xref: /openbmc/linux/fs/ext4/inode.c (revision 97795d2a5b8d3c8dc4365d4bd3404191840453ba)
1ac27a0ecSDave Kleikamp /*
2617ba13bSMingming Cao  *  linux/fs/ext4/inode.c
3ac27a0ecSDave Kleikamp  *
4ac27a0ecSDave Kleikamp  * Copyright (C) 1992, 1993, 1994, 1995
5ac27a0ecSDave Kleikamp  * Remy Card (card@masi.ibp.fr)
6ac27a0ecSDave Kleikamp  * Laboratoire MASI - Institut Blaise Pascal
7ac27a0ecSDave Kleikamp  * Universite Pierre et Marie Curie (Paris VI)
8ac27a0ecSDave Kleikamp  *
9ac27a0ecSDave Kleikamp  *  from
10ac27a0ecSDave Kleikamp  *
11ac27a0ecSDave Kleikamp  *  linux/fs/minix/inode.c
12ac27a0ecSDave Kleikamp  *
13ac27a0ecSDave Kleikamp  *  Copyright (C) 1991, 1992  Linus Torvalds
14ac27a0ecSDave Kleikamp  *
15ac27a0ecSDave Kleikamp  *  64-bit file support on 64-bit platforms by Jakub Jelinek
16ac27a0ecSDave Kleikamp  *	(jj@sunsite.ms.mff.cuni.cz)
17ac27a0ecSDave Kleikamp  *
18617ba13bSMingming Cao  *  Assorted race fixes, rewrite of ext4_get_block() by Al Viro, 2000
19ac27a0ecSDave Kleikamp  */
20ac27a0ecSDave Kleikamp 
21ac27a0ecSDave Kleikamp #include <linux/fs.h>
22ac27a0ecSDave Kleikamp #include <linux/time.h>
23dab291afSMingming Cao #include <linux/jbd2.h>
24ac27a0ecSDave Kleikamp #include <linux/highuid.h>
25ac27a0ecSDave Kleikamp #include <linux/pagemap.h>
26ac27a0ecSDave Kleikamp #include <linux/quotaops.h>
27ac27a0ecSDave Kleikamp #include <linux/string.h>
28ac27a0ecSDave Kleikamp #include <linux/buffer_head.h>
29ac27a0ecSDave Kleikamp #include <linux/writeback.h>
3064769240SAlex Tomas #include <linux/pagevec.h>
31ac27a0ecSDave Kleikamp #include <linux/mpage.h>
32e83c1397SDuane Griffin #include <linux/namei.h>
33ac27a0ecSDave Kleikamp #include <linux/uio.h>
34ac27a0ecSDave Kleikamp #include <linux/bio.h>
354c0425ffSMingming Cao #include <linux/workqueue.h>
36744692dcSJiaying Zhang #include <linux/kernel.h>
376db26ffcSAndrew Morton #include <linux/printk.h>
385a0e3ad6STejun Heo #include <linux/slab.h>
39a8901d34STheodore Ts'o #include <linux/ratelimit.h>
409bffad1eSTheodore Ts'o 
413dcf5451SChristoph Hellwig #include "ext4_jbd2.h"
42ac27a0ecSDave Kleikamp #include "xattr.h"
43ac27a0ecSDave Kleikamp #include "acl.h"
449f125d64STheodore Ts'o #include "truncate.h"
45ac27a0ecSDave Kleikamp 
469bffad1eSTheodore Ts'o #include <trace/events/ext4.h>
479bffad1eSTheodore Ts'o 
48a1d6cc56SAneesh Kumar K.V #define MPAGE_DA_EXTENT_TAIL 0x01
49a1d6cc56SAneesh Kumar K.V 
50814525f4SDarrick J. Wong static __u32 ext4_inode_csum(struct inode *inode, struct ext4_inode *raw,
51814525f4SDarrick J. Wong 			      struct ext4_inode_info *ei)
52814525f4SDarrick J. Wong {
53814525f4SDarrick J. Wong 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
54814525f4SDarrick J. Wong 	__u16 csum_lo;
55814525f4SDarrick J. Wong 	__u16 csum_hi = 0;
56814525f4SDarrick J. Wong 	__u32 csum;
57814525f4SDarrick J. Wong 
58814525f4SDarrick J. Wong 	csum_lo = raw->i_checksum_lo;
59814525f4SDarrick J. Wong 	raw->i_checksum_lo = 0;
60814525f4SDarrick J. Wong 	if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
61814525f4SDarrick J. Wong 	    EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) {
62814525f4SDarrick J. Wong 		csum_hi = raw->i_checksum_hi;
63814525f4SDarrick J. Wong 		raw->i_checksum_hi = 0;
64814525f4SDarrick J. Wong 	}
65814525f4SDarrick J. Wong 
66814525f4SDarrick J. Wong 	csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)raw,
67814525f4SDarrick J. Wong 			   EXT4_INODE_SIZE(inode->i_sb));
68814525f4SDarrick J. Wong 
69814525f4SDarrick J. Wong 	raw->i_checksum_lo = csum_lo;
70814525f4SDarrick J. Wong 	if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
71814525f4SDarrick J. Wong 	    EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi))
72814525f4SDarrick J. Wong 		raw->i_checksum_hi = csum_hi;
73814525f4SDarrick J. Wong 
74814525f4SDarrick J. Wong 	return csum;
75814525f4SDarrick J. Wong }
76814525f4SDarrick J. Wong 
77814525f4SDarrick J. Wong static int ext4_inode_csum_verify(struct inode *inode, struct ext4_inode *raw,
78814525f4SDarrick J. Wong 				  struct ext4_inode_info *ei)
79814525f4SDarrick J. Wong {
80814525f4SDarrick J. Wong 	__u32 provided, calculated;
81814525f4SDarrick J. Wong 
82814525f4SDarrick J. Wong 	if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
83814525f4SDarrick J. Wong 	    cpu_to_le32(EXT4_OS_LINUX) ||
84814525f4SDarrick J. Wong 	    !EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
85814525f4SDarrick J. Wong 		EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
86814525f4SDarrick J. Wong 		return 1;
87814525f4SDarrick J. Wong 
88814525f4SDarrick J. Wong 	provided = le16_to_cpu(raw->i_checksum_lo);
89814525f4SDarrick J. Wong 	calculated = ext4_inode_csum(inode, raw, ei);
90814525f4SDarrick J. Wong 	if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
91814525f4SDarrick J. Wong 	    EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi))
92814525f4SDarrick J. Wong 		provided |= ((__u32)le16_to_cpu(raw->i_checksum_hi)) << 16;
93814525f4SDarrick J. Wong 	else
94814525f4SDarrick J. Wong 		calculated &= 0xFFFF;
95814525f4SDarrick J. Wong 
96814525f4SDarrick J. Wong 	return provided == calculated;
97814525f4SDarrick J. Wong }
98814525f4SDarrick J. Wong 
99814525f4SDarrick J. Wong static void ext4_inode_csum_set(struct inode *inode, struct ext4_inode *raw,
100814525f4SDarrick J. Wong 				struct ext4_inode_info *ei)
101814525f4SDarrick J. Wong {
102814525f4SDarrick J. Wong 	__u32 csum;
103814525f4SDarrick J. Wong 
104814525f4SDarrick J. Wong 	if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
105814525f4SDarrick J. Wong 	    cpu_to_le32(EXT4_OS_LINUX) ||
106814525f4SDarrick J. Wong 	    !EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
107814525f4SDarrick J. Wong 		EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
108814525f4SDarrick J. Wong 		return;
109814525f4SDarrick J. Wong 
110814525f4SDarrick J. Wong 	csum = ext4_inode_csum(inode, raw, ei);
111814525f4SDarrick J. Wong 	raw->i_checksum_lo = cpu_to_le16(csum & 0xFFFF);
112814525f4SDarrick J. Wong 	if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
113814525f4SDarrick J. Wong 	    EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi))
114814525f4SDarrick J. Wong 		raw->i_checksum_hi = cpu_to_le16(csum >> 16);
115814525f4SDarrick J. Wong }
116814525f4SDarrick J. Wong 
117678aaf48SJan Kara static inline int ext4_begin_ordered_truncate(struct inode *inode,
118678aaf48SJan Kara 					      loff_t new_size)
119678aaf48SJan Kara {
1207ff9c073STheodore Ts'o 	trace_ext4_begin_ordered_truncate(inode, new_size);
1218aefcd55STheodore Ts'o 	/*
1228aefcd55STheodore Ts'o 	 * If jinode is zero, then we never opened the file for
1238aefcd55STheodore Ts'o 	 * writing, so there's no need to call
1248aefcd55STheodore Ts'o 	 * jbd2_journal_begin_ordered_truncate() since there's no
1258aefcd55STheodore Ts'o 	 * outstanding writes we need to flush.
1268aefcd55STheodore Ts'o 	 */
1278aefcd55STheodore Ts'o 	if (!EXT4_I(inode)->jinode)
1288aefcd55STheodore Ts'o 		return 0;
1298aefcd55STheodore Ts'o 	return jbd2_journal_begin_ordered_truncate(EXT4_JOURNAL(inode),
1308aefcd55STheodore Ts'o 						   EXT4_I(inode)->jinode,
131678aaf48SJan Kara 						   new_size);
132678aaf48SJan Kara }
133678aaf48SJan Kara 
13464769240SAlex Tomas static void ext4_invalidatepage(struct page *page, unsigned long offset);
135cb20d518STheodore Ts'o static int noalloc_get_block_write(struct inode *inode, sector_t iblock,
136cb20d518STheodore Ts'o 				   struct buffer_head *bh_result, int create);
137cb20d518STheodore Ts'o static int ext4_set_bh_endio(struct buffer_head *bh, struct inode *inode);
138cb20d518STheodore Ts'o static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate);
139cb20d518STheodore Ts'o static int __ext4_journalled_writepage(struct page *page, unsigned int len);
140cb20d518STheodore Ts'o static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh);
1415f163cc7SEric Sandeen static int ext4_discard_partial_page_buffers_no_lock(handle_t *handle,
1425f163cc7SEric Sandeen 		struct inode *inode, struct page *page, loff_t from,
1435f163cc7SEric Sandeen 		loff_t length, int flags);
14464769240SAlex Tomas 
145ac27a0ecSDave Kleikamp /*
146ac27a0ecSDave Kleikamp  * Test whether an inode is a fast symlink.
147ac27a0ecSDave Kleikamp  */
148617ba13bSMingming Cao static int ext4_inode_is_fast_symlink(struct inode *inode)
149ac27a0ecSDave Kleikamp {
150617ba13bSMingming Cao 	int ea_blocks = EXT4_I(inode)->i_file_acl ?
151ac27a0ecSDave Kleikamp 		(inode->i_sb->s_blocksize >> 9) : 0;
152ac27a0ecSDave Kleikamp 
153ac27a0ecSDave Kleikamp 	return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0);
154ac27a0ecSDave Kleikamp }
155ac27a0ecSDave Kleikamp 
156ac27a0ecSDave Kleikamp /*
157ac27a0ecSDave Kleikamp  * Restart the transaction associated with *handle.  This does a commit,
158ac27a0ecSDave Kleikamp  * so before we call here everything must be consistently dirtied against
159ac27a0ecSDave Kleikamp  * this transaction.
160ac27a0ecSDave Kleikamp  */
161487caeefSJan Kara int ext4_truncate_restart_trans(handle_t *handle, struct inode *inode,
162487caeefSJan Kara 				 int nblocks)
163ac27a0ecSDave Kleikamp {
164487caeefSJan Kara 	int ret;
165487caeefSJan Kara 
166487caeefSJan Kara 	/*
167e35fd660STheodore Ts'o 	 * Drop i_data_sem to avoid deadlock with ext4_map_blocks.  At this
168487caeefSJan Kara 	 * moment, get_block can be called only for blocks inside i_size since
169487caeefSJan Kara 	 * page cache has been already dropped and writes are blocked by
170487caeefSJan Kara 	 * i_mutex. So we can safely drop the i_data_sem here.
171487caeefSJan Kara 	 */
1720390131bSFrank Mayhar 	BUG_ON(EXT4_JOURNAL(inode) == NULL);
173ac27a0ecSDave Kleikamp 	jbd_debug(2, "restarting handle %p\n", handle);
174487caeefSJan Kara 	up_write(&EXT4_I(inode)->i_data_sem);
1758e8eaabeSAmir Goldstein 	ret = ext4_journal_restart(handle, nblocks);
176487caeefSJan Kara 	down_write(&EXT4_I(inode)->i_data_sem);
177fa5d1113SAneesh Kumar K.V 	ext4_discard_preallocations(inode);
178487caeefSJan Kara 
179487caeefSJan Kara 	return ret;
180ac27a0ecSDave Kleikamp }
181ac27a0ecSDave Kleikamp 
182ac27a0ecSDave Kleikamp /*
183ac27a0ecSDave Kleikamp  * Called at the last iput() if i_nlink is zero.
184ac27a0ecSDave Kleikamp  */
1850930fcc1SAl Viro void ext4_evict_inode(struct inode *inode)
186ac27a0ecSDave Kleikamp {
187ac27a0ecSDave Kleikamp 	handle_t *handle;
188bc965ab3STheodore Ts'o 	int err;
189ac27a0ecSDave Kleikamp 
1907ff9c073STheodore Ts'o 	trace_ext4_evict_inode(inode);
1912581fdc8SJiaying Zhang 
1922581fdc8SJiaying Zhang 	ext4_ioend_wait(inode);
1932581fdc8SJiaying Zhang 
1940930fcc1SAl Viro 	if (inode->i_nlink) {
1952d859db3SJan Kara 		/*
1962d859db3SJan Kara 		 * When journalling data dirty buffers are tracked only in the
1972d859db3SJan Kara 		 * journal. So although mm thinks everything is clean and
1982d859db3SJan Kara 		 * ready for reaping the inode might still have some pages to
1992d859db3SJan Kara 		 * write in the running transaction or waiting to be
2002d859db3SJan Kara 		 * checkpointed. Thus calling jbd2_journal_invalidatepage()
2012d859db3SJan Kara 		 * (via truncate_inode_pages()) to discard these buffers can
2022d859db3SJan Kara 		 * cause data loss. Also even if we did not discard these
2032d859db3SJan Kara 		 * buffers, we would have no way to find them after the inode
2042d859db3SJan Kara 		 * is reaped and thus user could see stale data if he tries to
2052d859db3SJan Kara 		 * read them before the transaction is checkpointed. So be
2062d859db3SJan Kara 		 * careful and force everything to disk here... We use
2072d859db3SJan Kara 		 * ei->i_datasync_tid to store the newest transaction
2082d859db3SJan Kara 		 * containing inode's data.
2092d859db3SJan Kara 		 *
2102d859db3SJan Kara 		 * Note that directories do not have this problem because they
2112d859db3SJan Kara 		 * don't use page cache.
2122d859db3SJan Kara 		 */
2132d859db3SJan Kara 		if (ext4_should_journal_data(inode) &&
2142d859db3SJan Kara 		    (S_ISLNK(inode->i_mode) || S_ISREG(inode->i_mode))) {
2152d859db3SJan Kara 			journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
2162d859db3SJan Kara 			tid_t commit_tid = EXT4_I(inode)->i_datasync_tid;
2172d859db3SJan Kara 
2182d859db3SJan Kara 			jbd2_log_start_commit(journal, commit_tid);
2192d859db3SJan Kara 			jbd2_log_wait_commit(journal, commit_tid);
2202d859db3SJan Kara 			filemap_write_and_wait(&inode->i_data);
2212d859db3SJan Kara 		}
2220930fcc1SAl Viro 		truncate_inode_pages(&inode->i_data, 0);
2230930fcc1SAl Viro 		goto no_delete;
2240930fcc1SAl Viro 	}
2250930fcc1SAl Viro 
226907f4554SChristoph Hellwig 	if (!is_bad_inode(inode))
227871a2931SChristoph Hellwig 		dquot_initialize(inode);
228907f4554SChristoph Hellwig 
229678aaf48SJan Kara 	if (ext4_should_order_data(inode))
230678aaf48SJan Kara 		ext4_begin_ordered_truncate(inode, 0);
231ac27a0ecSDave Kleikamp 	truncate_inode_pages(&inode->i_data, 0);
232ac27a0ecSDave Kleikamp 
233ac27a0ecSDave Kleikamp 	if (is_bad_inode(inode))
234ac27a0ecSDave Kleikamp 		goto no_delete;
235ac27a0ecSDave Kleikamp 
2369f125d64STheodore Ts'o 	handle = ext4_journal_start(inode, ext4_blocks_for_truncate(inode)+3);
237ac27a0ecSDave Kleikamp 	if (IS_ERR(handle)) {
238bc965ab3STheodore Ts'o 		ext4_std_error(inode->i_sb, PTR_ERR(handle));
239ac27a0ecSDave Kleikamp 		/*
240ac27a0ecSDave Kleikamp 		 * If we're going to skip the normal cleanup, we still need to
241ac27a0ecSDave Kleikamp 		 * make sure that the in-core orphan linked list is properly
242ac27a0ecSDave Kleikamp 		 * cleaned up.
243ac27a0ecSDave Kleikamp 		 */
244617ba13bSMingming Cao 		ext4_orphan_del(NULL, inode);
245ac27a0ecSDave Kleikamp 		goto no_delete;
246ac27a0ecSDave Kleikamp 	}
247ac27a0ecSDave Kleikamp 
248ac27a0ecSDave Kleikamp 	if (IS_SYNC(inode))
2490390131bSFrank Mayhar 		ext4_handle_sync(handle);
250ac27a0ecSDave Kleikamp 	inode->i_size = 0;
251bc965ab3STheodore Ts'o 	err = ext4_mark_inode_dirty(handle, inode);
252bc965ab3STheodore Ts'o 	if (err) {
25312062dddSEric Sandeen 		ext4_warning(inode->i_sb,
254bc965ab3STheodore Ts'o 			     "couldn't mark inode dirty (err %d)", err);
255bc965ab3STheodore Ts'o 		goto stop_handle;
256bc965ab3STheodore Ts'o 	}
257ac27a0ecSDave Kleikamp 	if (inode->i_blocks)
258617ba13bSMingming Cao 		ext4_truncate(inode);
259bc965ab3STheodore Ts'o 
260bc965ab3STheodore Ts'o 	/*
261bc965ab3STheodore Ts'o 	 * ext4_ext_truncate() doesn't reserve any slop when it
262bc965ab3STheodore Ts'o 	 * restarts journal transactions; therefore there may not be
263bc965ab3STheodore Ts'o 	 * enough credits left in the handle to remove the inode from
264bc965ab3STheodore Ts'o 	 * the orphan list and set the dtime field.
265bc965ab3STheodore Ts'o 	 */
2660390131bSFrank Mayhar 	if (!ext4_handle_has_enough_credits(handle, 3)) {
267bc965ab3STheodore Ts'o 		err = ext4_journal_extend(handle, 3);
268bc965ab3STheodore Ts'o 		if (err > 0)
269bc965ab3STheodore Ts'o 			err = ext4_journal_restart(handle, 3);
270bc965ab3STheodore Ts'o 		if (err != 0) {
27112062dddSEric Sandeen 			ext4_warning(inode->i_sb,
272bc965ab3STheodore Ts'o 				     "couldn't extend journal (err %d)", err);
273bc965ab3STheodore Ts'o 		stop_handle:
274bc965ab3STheodore Ts'o 			ext4_journal_stop(handle);
27545388219STheodore Ts'o 			ext4_orphan_del(NULL, inode);
276bc965ab3STheodore Ts'o 			goto no_delete;
277bc965ab3STheodore Ts'o 		}
278bc965ab3STheodore Ts'o 	}
279bc965ab3STheodore Ts'o 
280ac27a0ecSDave Kleikamp 	/*
281617ba13bSMingming Cao 	 * Kill off the orphan record which ext4_truncate created.
282ac27a0ecSDave Kleikamp 	 * AKPM: I think this can be inside the above `if'.
283617ba13bSMingming Cao 	 * Note that ext4_orphan_del() has to be able to cope with the
284ac27a0ecSDave Kleikamp 	 * deletion of a non-existent orphan - this is because we don't
285617ba13bSMingming Cao 	 * know if ext4_truncate() actually created an orphan record.
286ac27a0ecSDave Kleikamp 	 * (Well, we could do this if we need to, but heck - it works)
287ac27a0ecSDave Kleikamp 	 */
288617ba13bSMingming Cao 	ext4_orphan_del(handle, inode);
289617ba13bSMingming Cao 	EXT4_I(inode)->i_dtime	= get_seconds();
290ac27a0ecSDave Kleikamp 
291ac27a0ecSDave Kleikamp 	/*
292ac27a0ecSDave Kleikamp 	 * One subtle ordering requirement: if anything has gone wrong
293ac27a0ecSDave Kleikamp 	 * (transaction abort, IO errors, whatever), then we can still
294ac27a0ecSDave Kleikamp 	 * do these next steps (the fs will already have been marked as
295ac27a0ecSDave Kleikamp 	 * having errors), but we can't free the inode if the mark_dirty
296ac27a0ecSDave Kleikamp 	 * fails.
297ac27a0ecSDave Kleikamp 	 */
298617ba13bSMingming Cao 	if (ext4_mark_inode_dirty(handle, inode))
299ac27a0ecSDave Kleikamp 		/* If that failed, just do the required in-core inode clear. */
3000930fcc1SAl Viro 		ext4_clear_inode(inode);
301ac27a0ecSDave Kleikamp 	else
302617ba13bSMingming Cao 		ext4_free_inode(handle, inode);
303617ba13bSMingming Cao 	ext4_journal_stop(handle);
304ac27a0ecSDave Kleikamp 	return;
305ac27a0ecSDave Kleikamp no_delete:
3060930fcc1SAl Viro 	ext4_clear_inode(inode);	/* We must guarantee clearing of inode... */
307ac27a0ecSDave Kleikamp }
308ac27a0ecSDave Kleikamp 
309a9e7f447SDmitry Monakhov #ifdef CONFIG_QUOTA
310a9e7f447SDmitry Monakhov qsize_t *ext4_get_reserved_space(struct inode *inode)
31160e58e0fSMingming Cao {
312a9e7f447SDmitry Monakhov 	return &EXT4_I(inode)->i_reserved_quota;
31360e58e0fSMingming Cao }
314a9e7f447SDmitry Monakhov #endif
3159d0be502STheodore Ts'o 
31612219aeaSAneesh Kumar K.V /*
31712219aeaSAneesh Kumar K.V  * Calculate the number of metadata blocks need to reserve
3189d0be502STheodore Ts'o  * to allocate a block located at @lblock
31912219aeaSAneesh Kumar K.V  */
32001f49d0bSTheodore Ts'o static int ext4_calc_metadata_amount(struct inode *inode, ext4_lblk_t lblock)
32112219aeaSAneesh Kumar K.V {
32212e9b892SDmitry Monakhov 	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
3239d0be502STheodore Ts'o 		return ext4_ext_calc_metadata_amount(inode, lblock);
32412219aeaSAneesh Kumar K.V 
3258bb2b247SAmir Goldstein 	return ext4_ind_calc_metadata_amount(inode, lblock);
32612219aeaSAneesh Kumar K.V }
32712219aeaSAneesh Kumar K.V 
3280637c6f4STheodore Ts'o /*
3290637c6f4STheodore Ts'o  * Called with i_data_sem down, which is important since we can call
3300637c6f4STheodore Ts'o  * ext4_discard_preallocations() from here.
3310637c6f4STheodore Ts'o  */
3325f634d06SAneesh Kumar K.V void ext4_da_update_reserve_space(struct inode *inode,
3335f634d06SAneesh Kumar K.V 					int used, int quota_claim)
33412219aeaSAneesh Kumar K.V {
33512219aeaSAneesh Kumar K.V 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
3360637c6f4STheodore Ts'o 	struct ext4_inode_info *ei = EXT4_I(inode);
33712219aeaSAneesh Kumar K.V 
3380637c6f4STheodore Ts'o 	spin_lock(&ei->i_block_reservation_lock);
339d8990240SAditya Kali 	trace_ext4_da_update_reserve_space(inode, used, quota_claim);
3400637c6f4STheodore Ts'o 	if (unlikely(used > ei->i_reserved_data_blocks)) {
3410637c6f4STheodore Ts'o 		ext4_msg(inode->i_sb, KERN_NOTICE, "%s: ino %lu, used %d "
3421084f252STheodore Ts'o 			 "with only %d reserved data blocks",
3430637c6f4STheodore Ts'o 			 __func__, inode->i_ino, used,
3440637c6f4STheodore Ts'o 			 ei->i_reserved_data_blocks);
3450637c6f4STheodore Ts'o 		WARN_ON(1);
3460637c6f4STheodore Ts'o 		used = ei->i_reserved_data_blocks;
3476bc6e63fSAneesh Kumar K.V 	}
34812219aeaSAneesh Kumar K.V 
349*97795d2aSBrian Foster 	if (unlikely(ei->i_allocated_meta_blocks > ei->i_reserved_meta_blocks)) {
350*97795d2aSBrian Foster 		ext4_msg(inode->i_sb, KERN_NOTICE, "%s: ino %lu, allocated %d "
351*97795d2aSBrian Foster 			 "with only %d reserved metadata blocks\n", __func__,
352*97795d2aSBrian Foster 			 inode->i_ino, ei->i_allocated_meta_blocks,
353*97795d2aSBrian Foster 			 ei->i_reserved_meta_blocks);
354*97795d2aSBrian Foster 		WARN_ON(1);
355*97795d2aSBrian Foster 		ei->i_allocated_meta_blocks = ei->i_reserved_meta_blocks;
356*97795d2aSBrian Foster 	}
357*97795d2aSBrian Foster 
3580637c6f4STheodore Ts'o 	/* Update per-inode reservations */
3590637c6f4STheodore Ts'o 	ei->i_reserved_data_blocks -= used;
3600637c6f4STheodore Ts'o 	ei->i_reserved_meta_blocks -= ei->i_allocated_meta_blocks;
36157042651STheodore Ts'o 	percpu_counter_sub(&sbi->s_dirtyclusters_counter,
36272b8ab9dSEric Sandeen 			   used + ei->i_allocated_meta_blocks);
3630637c6f4STheodore Ts'o 	ei->i_allocated_meta_blocks = 0;
3640637c6f4STheodore Ts'o 
3650637c6f4STheodore Ts'o 	if (ei->i_reserved_data_blocks == 0) {
3660637c6f4STheodore Ts'o 		/*
3670637c6f4STheodore Ts'o 		 * We can release all of the reserved metadata blocks
3680637c6f4STheodore Ts'o 		 * only when we have written all of the delayed
3690637c6f4STheodore Ts'o 		 * allocation blocks.
3700637c6f4STheodore Ts'o 		 */
37157042651STheodore Ts'o 		percpu_counter_sub(&sbi->s_dirtyclusters_counter,
37272b8ab9dSEric Sandeen 				   ei->i_reserved_meta_blocks);
373ee5f4d9cSTheodore Ts'o 		ei->i_reserved_meta_blocks = 0;
3749d0be502STheodore Ts'o 		ei->i_da_metadata_calc_len = 0;
3750637c6f4STheodore Ts'o 	}
37612219aeaSAneesh Kumar K.V 	spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
37760e58e0fSMingming Cao 
37872b8ab9dSEric Sandeen 	/* Update quota subsystem for data blocks */
37972b8ab9dSEric Sandeen 	if (quota_claim)
3807b415bf6SAditya Kali 		dquot_claim_block(inode, EXT4_C2B(sbi, used));
38172b8ab9dSEric Sandeen 	else {
3825f634d06SAneesh Kumar K.V 		/*
3835f634d06SAneesh Kumar K.V 		 * We did fallocate with an offset that is already delayed
3845f634d06SAneesh Kumar K.V 		 * allocated. So on delayed allocated writeback we should
38572b8ab9dSEric Sandeen 		 * not re-claim the quota for fallocated blocks.
3865f634d06SAneesh Kumar K.V 		 */
3877b415bf6SAditya Kali 		dquot_release_reservation_block(inode, EXT4_C2B(sbi, used));
3885f634d06SAneesh Kumar K.V 	}
389d6014301SAneesh Kumar K.V 
390d6014301SAneesh Kumar K.V 	/*
391d6014301SAneesh Kumar K.V 	 * If we have done all the pending block allocations and if
392d6014301SAneesh Kumar K.V 	 * there aren't any writers on the inode, we can discard the
393d6014301SAneesh Kumar K.V 	 * inode's preallocations.
394d6014301SAneesh Kumar K.V 	 */
3950637c6f4STheodore Ts'o 	if ((ei->i_reserved_data_blocks == 0) &&
3960637c6f4STheodore Ts'o 	    (atomic_read(&inode->i_writecount) == 0))
397d6014301SAneesh Kumar K.V 		ext4_discard_preallocations(inode);
39812219aeaSAneesh Kumar K.V }
39912219aeaSAneesh Kumar K.V 
400e29136f8STheodore Ts'o static int __check_block_validity(struct inode *inode, const char *func,
401c398eda0STheodore Ts'o 				unsigned int line,
40224676da4STheodore Ts'o 				struct ext4_map_blocks *map)
4036fd058f7STheodore Ts'o {
40424676da4STheodore Ts'o 	if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), map->m_pblk,
40524676da4STheodore Ts'o 				   map->m_len)) {
406c398eda0STheodore Ts'o 		ext4_error_inode(inode, func, line, map->m_pblk,
407c398eda0STheodore Ts'o 				 "lblock %lu mapped to illegal pblock "
40824676da4STheodore Ts'o 				 "(length %d)", (unsigned long) map->m_lblk,
409c398eda0STheodore Ts'o 				 map->m_len);
4106fd058f7STheodore Ts'o 		return -EIO;
4116fd058f7STheodore Ts'o 	}
4126fd058f7STheodore Ts'o 	return 0;
4136fd058f7STheodore Ts'o }
4146fd058f7STheodore Ts'o 
415e29136f8STheodore Ts'o #define check_block_validity(inode, map)	\
416c398eda0STheodore Ts'o 	__check_block_validity((inode), __func__, __LINE__, (map))
417e29136f8STheodore Ts'o 
418f5ab0d1fSMingming Cao /*
4191f94533dSTheodore Ts'o  * Return the number of contiguous dirty pages in a given inode
4201f94533dSTheodore Ts'o  * starting at page frame idx.
42155138e0bSTheodore Ts'o  */
42255138e0bSTheodore Ts'o static pgoff_t ext4_num_dirty_pages(struct inode *inode, pgoff_t idx,
42355138e0bSTheodore Ts'o 				    unsigned int max_pages)
42455138e0bSTheodore Ts'o {
42555138e0bSTheodore Ts'o 	struct address_space *mapping = inode->i_mapping;
42655138e0bSTheodore Ts'o 	pgoff_t	index;
42755138e0bSTheodore Ts'o 	struct pagevec pvec;
42855138e0bSTheodore Ts'o 	pgoff_t num = 0;
42955138e0bSTheodore Ts'o 	int i, nr_pages, done = 0;
43055138e0bSTheodore Ts'o 
43155138e0bSTheodore Ts'o 	if (max_pages == 0)
43255138e0bSTheodore Ts'o 		return 0;
43355138e0bSTheodore Ts'o 	pagevec_init(&pvec, 0);
43455138e0bSTheodore Ts'o 	while (!done) {
43555138e0bSTheodore Ts'o 		index = idx;
43655138e0bSTheodore Ts'o 		nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
43755138e0bSTheodore Ts'o 					      PAGECACHE_TAG_DIRTY,
43855138e0bSTheodore Ts'o 					      (pgoff_t)PAGEVEC_SIZE);
43955138e0bSTheodore Ts'o 		if (nr_pages == 0)
44055138e0bSTheodore Ts'o 			break;
44155138e0bSTheodore Ts'o 		for (i = 0; i < nr_pages; i++) {
44255138e0bSTheodore Ts'o 			struct page *page = pvec.pages[i];
44355138e0bSTheodore Ts'o 			struct buffer_head *bh, *head;
44455138e0bSTheodore Ts'o 
44555138e0bSTheodore Ts'o 			lock_page(page);
44655138e0bSTheodore Ts'o 			if (unlikely(page->mapping != mapping) ||
44755138e0bSTheodore Ts'o 			    !PageDirty(page) ||
44855138e0bSTheodore Ts'o 			    PageWriteback(page) ||
44955138e0bSTheodore Ts'o 			    page->index != idx) {
45055138e0bSTheodore Ts'o 				done = 1;
45155138e0bSTheodore Ts'o 				unlock_page(page);
45255138e0bSTheodore Ts'o 				break;
45355138e0bSTheodore Ts'o 			}
4541f94533dSTheodore Ts'o 			if (page_has_buffers(page)) {
4551f94533dSTheodore Ts'o 				bh = head = page_buffers(page);
45655138e0bSTheodore Ts'o 				do {
45755138e0bSTheodore Ts'o 					if (!buffer_delay(bh) &&
4581f94533dSTheodore Ts'o 					    !buffer_unwritten(bh))
45955138e0bSTheodore Ts'o 						done = 1;
4601f94533dSTheodore Ts'o 					bh = bh->b_this_page;
4611f94533dSTheodore Ts'o 				} while (!done && (bh != head));
46255138e0bSTheodore Ts'o 			}
46355138e0bSTheodore Ts'o 			unlock_page(page);
46455138e0bSTheodore Ts'o 			if (done)
46555138e0bSTheodore Ts'o 				break;
46655138e0bSTheodore Ts'o 			idx++;
46755138e0bSTheodore Ts'o 			num++;
468659c6009SEric Sandeen 			if (num >= max_pages) {
469659c6009SEric Sandeen 				done = 1;
47055138e0bSTheodore Ts'o 				break;
47155138e0bSTheodore Ts'o 			}
472659c6009SEric Sandeen 		}
47355138e0bSTheodore Ts'o 		pagevec_release(&pvec);
47455138e0bSTheodore Ts'o 	}
47555138e0bSTheodore Ts'o 	return num;
47655138e0bSTheodore Ts'o }
47755138e0bSTheodore Ts'o 
47855138e0bSTheodore Ts'o /*
4795356f261SAditya Kali  * Sets the BH_Da_Mapped bit on the buffer heads corresponding to the given map.
4805356f261SAditya Kali  */
4815356f261SAditya Kali static void set_buffers_da_mapped(struct inode *inode,
4825356f261SAditya Kali 				   struct ext4_map_blocks *map)
4835356f261SAditya Kali {
4845356f261SAditya Kali 	struct address_space *mapping = inode->i_mapping;
4855356f261SAditya Kali 	struct pagevec pvec;
4865356f261SAditya Kali 	int i, nr_pages;
4875356f261SAditya Kali 	pgoff_t index, end;
4885356f261SAditya Kali 
4895356f261SAditya Kali 	index = map->m_lblk >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
4905356f261SAditya Kali 	end = (map->m_lblk + map->m_len - 1) >>
4915356f261SAditya Kali 		(PAGE_CACHE_SHIFT - inode->i_blkbits);
4925356f261SAditya Kali 
4935356f261SAditya Kali 	pagevec_init(&pvec, 0);
4945356f261SAditya Kali 	while (index <= end) {
4955356f261SAditya Kali 		nr_pages = pagevec_lookup(&pvec, mapping, index,
4965356f261SAditya Kali 					  min(end - index + 1,
4975356f261SAditya Kali 					      (pgoff_t)PAGEVEC_SIZE));
4985356f261SAditya Kali 		if (nr_pages == 0)
4995356f261SAditya Kali 			break;
5005356f261SAditya Kali 		for (i = 0; i < nr_pages; i++) {
5015356f261SAditya Kali 			struct page *page = pvec.pages[i];
5025356f261SAditya Kali 			struct buffer_head *bh, *head;
5035356f261SAditya Kali 
5045356f261SAditya Kali 			if (unlikely(page->mapping != mapping) ||
5055356f261SAditya Kali 			    !PageDirty(page))
5065356f261SAditya Kali 				break;
5075356f261SAditya Kali 
5085356f261SAditya Kali 			if (page_has_buffers(page)) {
5095356f261SAditya Kali 				bh = head = page_buffers(page);
5105356f261SAditya Kali 				do {
5115356f261SAditya Kali 					set_buffer_da_mapped(bh);
5125356f261SAditya Kali 					bh = bh->b_this_page;
5135356f261SAditya Kali 				} while (bh != head);
5145356f261SAditya Kali 			}
5155356f261SAditya Kali 			index++;
5165356f261SAditya Kali 		}
5175356f261SAditya Kali 		pagevec_release(&pvec);
5185356f261SAditya Kali 	}
5195356f261SAditya Kali }
5205356f261SAditya Kali 
5215356f261SAditya Kali /*
522e35fd660STheodore Ts'o  * The ext4_map_blocks() function tries to look up the requested blocks,
5232b2d6d01STheodore Ts'o  * and returns if the blocks are already mapped.
524f5ab0d1fSMingming Cao  *
525f5ab0d1fSMingming Cao  * Otherwise it takes the write lock of the i_data_sem and allocate blocks
526f5ab0d1fSMingming Cao  * and store the allocated blocks in the result buffer head and mark it
527f5ab0d1fSMingming Cao  * mapped.
528f5ab0d1fSMingming Cao  *
529e35fd660STheodore Ts'o  * If file type is extents based, it will call ext4_ext_map_blocks(),
530e35fd660STheodore Ts'o  * Otherwise, call with ext4_ind_map_blocks() to handle indirect mapping
531f5ab0d1fSMingming Cao  * based files
532f5ab0d1fSMingming Cao  *
533f5ab0d1fSMingming Cao  * On success, it returns the number of blocks being mapped or allocate.
534f5ab0d1fSMingming Cao  * if create==0 and the blocks are pre-allocated and uninitialized block,
535f5ab0d1fSMingming Cao  * the result buffer head is unmapped. If the create ==1, it will make sure
536f5ab0d1fSMingming Cao  * the buffer head is mapped.
537f5ab0d1fSMingming Cao  *
538f5ab0d1fSMingming Cao  * It returns 0 if plain look up failed (blocks have not been allocated), in
539df3ab170STao Ma  * that case, buffer head is unmapped
540f5ab0d1fSMingming Cao  *
541f5ab0d1fSMingming Cao  * It returns the error in case of allocation failure.
542f5ab0d1fSMingming Cao  */
543e35fd660STheodore Ts'o int ext4_map_blocks(handle_t *handle, struct inode *inode,
544e35fd660STheodore Ts'o 		    struct ext4_map_blocks *map, int flags)
5450e855ac8SAneesh Kumar K.V {
5460e855ac8SAneesh Kumar K.V 	int retval;
547f5ab0d1fSMingming Cao 
548e35fd660STheodore Ts'o 	map->m_flags = 0;
549e35fd660STheodore Ts'o 	ext_debug("ext4_map_blocks(): inode %lu, flag %d, max_blocks %u,"
550e35fd660STheodore Ts'o 		  "logical block %lu\n", inode->i_ino, flags, map->m_len,
551e35fd660STheodore Ts'o 		  (unsigned long) map->m_lblk);
5524df3d265SAneesh Kumar K.V 	/*
553b920c755STheodore Ts'o 	 * Try to see if we can get the block without requesting a new
554b920c755STheodore Ts'o 	 * file system block.
5554df3d265SAneesh Kumar K.V 	 */
556729f52c6SZheng Liu 	if (!(flags & EXT4_GET_BLOCKS_NO_LOCK))
5570e855ac8SAneesh Kumar K.V 		down_read((&EXT4_I(inode)->i_data_sem));
55812e9b892SDmitry Monakhov 	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
559a4e5d88bSDmitry Monakhov 		retval = ext4_ext_map_blocks(handle, inode, map, flags &
560a4e5d88bSDmitry Monakhov 					     EXT4_GET_BLOCKS_KEEP_SIZE);
5614df3d265SAneesh Kumar K.V 	} else {
562a4e5d88bSDmitry Monakhov 		retval = ext4_ind_map_blocks(handle, inode, map, flags &
563a4e5d88bSDmitry Monakhov 					     EXT4_GET_BLOCKS_KEEP_SIZE);
5640e855ac8SAneesh Kumar K.V 	}
565729f52c6SZheng Liu 	if (!(flags & EXT4_GET_BLOCKS_NO_LOCK))
5664df3d265SAneesh Kumar K.V 		up_read((&EXT4_I(inode)->i_data_sem));
567f5ab0d1fSMingming Cao 
568e35fd660STheodore Ts'o 	if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
569e29136f8STheodore Ts'o 		int ret = check_block_validity(inode, map);
5706fd058f7STheodore Ts'o 		if (ret != 0)
5716fd058f7STheodore Ts'o 			return ret;
5726fd058f7STheodore Ts'o 	}
5736fd058f7STheodore Ts'o 
574f5ab0d1fSMingming Cao 	/* If it is only a block(s) look up */
575c2177057STheodore Ts'o 	if ((flags & EXT4_GET_BLOCKS_CREATE) == 0)
5764df3d265SAneesh Kumar K.V 		return retval;
5774df3d265SAneesh Kumar K.V 
5784df3d265SAneesh Kumar K.V 	/*
579f5ab0d1fSMingming Cao 	 * Returns if the blocks have already allocated
580f5ab0d1fSMingming Cao 	 *
581f5ab0d1fSMingming Cao 	 * Note that if blocks have been preallocated
582df3ab170STao Ma 	 * ext4_ext_get_block() returns the create = 0
583f5ab0d1fSMingming Cao 	 * with buffer head unmapped.
584f5ab0d1fSMingming Cao 	 */
585e35fd660STheodore Ts'o 	if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED)
586f5ab0d1fSMingming Cao 		return retval;
587f5ab0d1fSMingming Cao 
588f5ab0d1fSMingming Cao 	/*
5892a8964d6SAneesh Kumar K.V 	 * When we call get_blocks without the create flag, the
5902a8964d6SAneesh Kumar K.V 	 * BH_Unwritten flag could have gotten set if the blocks
5912a8964d6SAneesh Kumar K.V 	 * requested were part of a uninitialized extent.  We need to
5922a8964d6SAneesh Kumar K.V 	 * clear this flag now that we are committed to convert all or
5932a8964d6SAneesh Kumar K.V 	 * part of the uninitialized extent to be an initialized
5942a8964d6SAneesh Kumar K.V 	 * extent.  This is because we need to avoid the combination
5952a8964d6SAneesh Kumar K.V 	 * of BH_Unwritten and BH_Mapped flags being simultaneously
5962a8964d6SAneesh Kumar K.V 	 * set on the buffer_head.
5972a8964d6SAneesh Kumar K.V 	 */
598e35fd660STheodore Ts'o 	map->m_flags &= ~EXT4_MAP_UNWRITTEN;
5992a8964d6SAneesh Kumar K.V 
6002a8964d6SAneesh Kumar K.V 	/*
601f5ab0d1fSMingming Cao 	 * New blocks allocate and/or writing to uninitialized extent
602f5ab0d1fSMingming Cao 	 * will possibly result in updating i_data, so we take
603f5ab0d1fSMingming Cao 	 * the write lock of i_data_sem, and call get_blocks()
604f5ab0d1fSMingming Cao 	 * with create == 1 flag.
6054df3d265SAneesh Kumar K.V 	 */
6064df3d265SAneesh Kumar K.V 	down_write((&EXT4_I(inode)->i_data_sem));
607d2a17637SMingming Cao 
608d2a17637SMingming Cao 	/*
609d2a17637SMingming Cao 	 * if the caller is from delayed allocation writeout path
610d2a17637SMingming Cao 	 * we have already reserved fs blocks for allocation
611d2a17637SMingming Cao 	 * let the underlying get_block() function know to
612d2a17637SMingming Cao 	 * avoid double accounting
613d2a17637SMingming Cao 	 */
614c2177057STheodore Ts'o 	if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
615f2321097STheodore Ts'o 		ext4_set_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED);
6164df3d265SAneesh Kumar K.V 	/*
6174df3d265SAneesh Kumar K.V 	 * We need to check for EXT4 here because migrate
6184df3d265SAneesh Kumar K.V 	 * could have changed the inode type in between
6194df3d265SAneesh Kumar K.V 	 */
62012e9b892SDmitry Monakhov 	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
621e35fd660STheodore Ts'o 		retval = ext4_ext_map_blocks(handle, inode, map, flags);
6220e855ac8SAneesh Kumar K.V 	} else {
623e35fd660STheodore Ts'o 		retval = ext4_ind_map_blocks(handle, inode, map, flags);
624267e4db9SAneesh Kumar K.V 
625e35fd660STheodore Ts'o 		if (retval > 0 && map->m_flags & EXT4_MAP_NEW) {
626267e4db9SAneesh Kumar K.V 			/*
627267e4db9SAneesh Kumar K.V 			 * We allocated new blocks which will result in
628267e4db9SAneesh Kumar K.V 			 * i_data's format changing.  Force the migrate
629267e4db9SAneesh Kumar K.V 			 * to fail by clearing migrate flags
630267e4db9SAneesh Kumar K.V 			 */
63119f5fb7aSTheodore Ts'o 			ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE);
632267e4db9SAneesh Kumar K.V 		}
6332ac3b6e0STheodore Ts'o 
634d2a17637SMingming Cao 		/*
6352ac3b6e0STheodore Ts'o 		 * Update reserved blocks/metadata blocks after successful
6365f634d06SAneesh Kumar K.V 		 * block allocation which had been deferred till now. We don't
6375f634d06SAneesh Kumar K.V 		 * support fallocate for non extent files. So we can update
6385f634d06SAneesh Kumar K.V 		 * reserve space here.
639d2a17637SMingming Cao 		 */
6405f634d06SAneesh Kumar K.V 		if ((retval > 0) &&
6411296cc85SAneesh Kumar K.V 			(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE))
6425f634d06SAneesh Kumar K.V 			ext4_da_update_reserve_space(inode, retval, 1);
6435f634d06SAneesh Kumar K.V 	}
6445356f261SAditya Kali 	if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) {
645f2321097STheodore Ts'o 		ext4_clear_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED);
646d2a17637SMingming Cao 
6475356f261SAditya Kali 		/* If we have successfully mapped the delayed allocated blocks,
6485356f261SAditya Kali 		 * set the BH_Da_Mapped bit on them. Its important to do this
6495356f261SAditya Kali 		 * under the protection of i_data_sem.
6505356f261SAditya Kali 		 */
6515356f261SAditya Kali 		if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED)
6525356f261SAditya Kali 			set_buffers_da_mapped(inode, map);
6535356f261SAditya Kali 	}
6545356f261SAditya Kali 
6550e855ac8SAneesh Kumar K.V 	up_write((&EXT4_I(inode)->i_data_sem));
656e35fd660STheodore Ts'o 	if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
657e29136f8STheodore Ts'o 		int ret = check_block_validity(inode, map);
6586fd058f7STheodore Ts'o 		if (ret != 0)
6596fd058f7STheodore Ts'o 			return ret;
6606fd058f7STheodore Ts'o 	}
6610e855ac8SAneesh Kumar K.V 	return retval;
6620e855ac8SAneesh Kumar K.V }
6630e855ac8SAneesh Kumar K.V 
664f3bd1f3fSMingming Cao /* Maximum number of blocks we map for direct IO at once. */
665f3bd1f3fSMingming Cao #define DIO_MAX_BLOCKS 4096
666f3bd1f3fSMingming Cao 
6672ed88685STheodore Ts'o static int _ext4_get_block(struct inode *inode, sector_t iblock,
6682ed88685STheodore Ts'o 			   struct buffer_head *bh, int flags)
669ac27a0ecSDave Kleikamp {
6703e4fdaf8SDmitriy Monakhov 	handle_t *handle = ext4_journal_current_handle();
6712ed88685STheodore Ts'o 	struct ext4_map_blocks map;
6727fb5409dSJan Kara 	int ret = 0, started = 0;
673f3bd1f3fSMingming Cao 	int dio_credits;
674ac27a0ecSDave Kleikamp 
6752ed88685STheodore Ts'o 	map.m_lblk = iblock;
6762ed88685STheodore Ts'o 	map.m_len = bh->b_size >> inode->i_blkbits;
6772ed88685STheodore Ts'o 
6782ed88685STheodore Ts'o 	if (flags && !handle) {
6797fb5409dSJan Kara 		/* Direct IO write... */
6802ed88685STheodore Ts'o 		if (map.m_len > DIO_MAX_BLOCKS)
6812ed88685STheodore Ts'o 			map.m_len = DIO_MAX_BLOCKS;
6822ed88685STheodore Ts'o 		dio_credits = ext4_chunk_trans_blocks(inode, map.m_len);
683f3bd1f3fSMingming Cao 		handle = ext4_journal_start(inode, dio_credits);
6847fb5409dSJan Kara 		if (IS_ERR(handle)) {
685ac27a0ecSDave Kleikamp 			ret = PTR_ERR(handle);
6862ed88685STheodore Ts'o 			return ret;
6877fb5409dSJan Kara 		}
6887fb5409dSJan Kara 		started = 1;
689ac27a0ecSDave Kleikamp 	}
690ac27a0ecSDave Kleikamp 
6912ed88685STheodore Ts'o 	ret = ext4_map_blocks(handle, inode, &map, flags);
692ac27a0ecSDave Kleikamp 	if (ret > 0) {
6932ed88685STheodore Ts'o 		map_bh(bh, inode->i_sb, map.m_pblk);
6942ed88685STheodore Ts'o 		bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags;
6952ed88685STheodore Ts'o 		bh->b_size = inode->i_sb->s_blocksize * map.m_len;
696ac27a0ecSDave Kleikamp 		ret = 0;
697ac27a0ecSDave Kleikamp 	}
6987fb5409dSJan Kara 	if (started)
6997fb5409dSJan Kara 		ext4_journal_stop(handle);
700ac27a0ecSDave Kleikamp 	return ret;
701ac27a0ecSDave Kleikamp }
702ac27a0ecSDave Kleikamp 
7032ed88685STheodore Ts'o int ext4_get_block(struct inode *inode, sector_t iblock,
7042ed88685STheodore Ts'o 		   struct buffer_head *bh, int create)
7052ed88685STheodore Ts'o {
7062ed88685STheodore Ts'o 	return _ext4_get_block(inode, iblock, bh,
7072ed88685STheodore Ts'o 			       create ? EXT4_GET_BLOCKS_CREATE : 0);
7082ed88685STheodore Ts'o }
7092ed88685STheodore Ts'o 
710ac27a0ecSDave Kleikamp /*
711ac27a0ecSDave Kleikamp  * `handle' can be NULL if create is zero
712ac27a0ecSDave Kleikamp  */
713617ba13bSMingming Cao struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode,
714725d26d3SAneesh Kumar K.V 				ext4_lblk_t block, int create, int *errp)
715ac27a0ecSDave Kleikamp {
7162ed88685STheodore Ts'o 	struct ext4_map_blocks map;
7172ed88685STheodore Ts'o 	struct buffer_head *bh;
718ac27a0ecSDave Kleikamp 	int fatal = 0, err;
719ac27a0ecSDave Kleikamp 
720ac27a0ecSDave Kleikamp 	J_ASSERT(handle != NULL || create == 0);
721ac27a0ecSDave Kleikamp 
7222ed88685STheodore Ts'o 	map.m_lblk = block;
7232ed88685STheodore Ts'o 	map.m_len = 1;
7242ed88685STheodore Ts'o 	err = ext4_map_blocks(handle, inode, &map,
7252ed88685STheodore Ts'o 			      create ? EXT4_GET_BLOCKS_CREATE : 0);
7262ed88685STheodore Ts'o 
7272ed88685STheodore Ts'o 	if (err < 0)
728ac27a0ecSDave Kleikamp 		*errp = err;
7292ed88685STheodore Ts'o 	if (err <= 0)
7302ed88685STheodore Ts'o 		return NULL;
7312ed88685STheodore Ts'o 	*errp = 0;
7322ed88685STheodore Ts'o 
7332ed88685STheodore Ts'o 	bh = sb_getblk(inode->i_sb, map.m_pblk);
734ac27a0ecSDave Kleikamp 	if (!bh) {
735ac27a0ecSDave Kleikamp 		*errp = -EIO;
7362ed88685STheodore Ts'o 		return NULL;
737ac27a0ecSDave Kleikamp 	}
7382ed88685STheodore Ts'o 	if (map.m_flags & EXT4_MAP_NEW) {
739ac27a0ecSDave Kleikamp 		J_ASSERT(create != 0);
740ac39849dSAneesh Kumar K.V 		J_ASSERT(handle != NULL);
741ac27a0ecSDave Kleikamp 
742ac27a0ecSDave Kleikamp 		/*
743ac27a0ecSDave Kleikamp 		 * Now that we do not always journal data, we should
744ac27a0ecSDave Kleikamp 		 * keep in mind whether this should always journal the
745ac27a0ecSDave Kleikamp 		 * new buffer as metadata.  For now, regular file
746617ba13bSMingming Cao 		 * writes use ext4_get_block instead, so it's not a
747ac27a0ecSDave Kleikamp 		 * problem.
748ac27a0ecSDave Kleikamp 		 */
749ac27a0ecSDave Kleikamp 		lock_buffer(bh);
750ac27a0ecSDave Kleikamp 		BUFFER_TRACE(bh, "call get_create_access");
751617ba13bSMingming Cao 		fatal = ext4_journal_get_create_access(handle, bh);
752ac27a0ecSDave Kleikamp 		if (!fatal && !buffer_uptodate(bh)) {
753ac27a0ecSDave Kleikamp 			memset(bh->b_data, 0, inode->i_sb->s_blocksize);
754ac27a0ecSDave Kleikamp 			set_buffer_uptodate(bh);
755ac27a0ecSDave Kleikamp 		}
756ac27a0ecSDave Kleikamp 		unlock_buffer(bh);
7570390131bSFrank Mayhar 		BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
7580390131bSFrank Mayhar 		err = ext4_handle_dirty_metadata(handle, inode, bh);
759ac27a0ecSDave Kleikamp 		if (!fatal)
760ac27a0ecSDave Kleikamp 			fatal = err;
761ac27a0ecSDave Kleikamp 	} else {
762ac27a0ecSDave Kleikamp 		BUFFER_TRACE(bh, "not a new buffer");
763ac27a0ecSDave Kleikamp 	}
764ac27a0ecSDave Kleikamp 	if (fatal) {
765ac27a0ecSDave Kleikamp 		*errp = fatal;
766ac27a0ecSDave Kleikamp 		brelse(bh);
767ac27a0ecSDave Kleikamp 		bh = NULL;
768ac27a0ecSDave Kleikamp 	}
769ac27a0ecSDave Kleikamp 	return bh;
770ac27a0ecSDave Kleikamp }
771ac27a0ecSDave Kleikamp 
772617ba13bSMingming Cao struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode,
773725d26d3SAneesh Kumar K.V 			       ext4_lblk_t block, int create, int *err)
774ac27a0ecSDave Kleikamp {
775ac27a0ecSDave Kleikamp 	struct buffer_head *bh;
776ac27a0ecSDave Kleikamp 
777617ba13bSMingming Cao 	bh = ext4_getblk(handle, inode, block, create, err);
778ac27a0ecSDave Kleikamp 	if (!bh)
779ac27a0ecSDave Kleikamp 		return bh;
780ac27a0ecSDave Kleikamp 	if (buffer_uptodate(bh))
781ac27a0ecSDave Kleikamp 		return bh;
78265299a3bSChristoph Hellwig 	ll_rw_block(READ | REQ_META | REQ_PRIO, 1, &bh);
783ac27a0ecSDave Kleikamp 	wait_on_buffer(bh);
784ac27a0ecSDave Kleikamp 	if (buffer_uptodate(bh))
785ac27a0ecSDave Kleikamp 		return bh;
786ac27a0ecSDave Kleikamp 	put_bh(bh);
787ac27a0ecSDave Kleikamp 	*err = -EIO;
788ac27a0ecSDave Kleikamp 	return NULL;
789ac27a0ecSDave Kleikamp }
790ac27a0ecSDave Kleikamp 
791ac27a0ecSDave Kleikamp static int walk_page_buffers(handle_t *handle,
792ac27a0ecSDave Kleikamp 			     struct buffer_head *head,
793ac27a0ecSDave Kleikamp 			     unsigned from,
794ac27a0ecSDave Kleikamp 			     unsigned to,
795ac27a0ecSDave Kleikamp 			     int *partial,
796ac27a0ecSDave Kleikamp 			     int (*fn)(handle_t *handle,
797ac27a0ecSDave Kleikamp 				       struct buffer_head *bh))
798ac27a0ecSDave Kleikamp {
799ac27a0ecSDave Kleikamp 	struct buffer_head *bh;
800ac27a0ecSDave Kleikamp 	unsigned block_start, block_end;
801ac27a0ecSDave Kleikamp 	unsigned blocksize = head->b_size;
802ac27a0ecSDave Kleikamp 	int err, ret = 0;
803ac27a0ecSDave Kleikamp 	struct buffer_head *next;
804ac27a0ecSDave Kleikamp 
805ac27a0ecSDave Kleikamp 	for (bh = head, block_start = 0;
806ac27a0ecSDave Kleikamp 	     ret == 0 && (bh != head || !block_start);
807de9a55b8STheodore Ts'o 	     block_start = block_end, bh = next) {
808ac27a0ecSDave Kleikamp 		next = bh->b_this_page;
809ac27a0ecSDave Kleikamp 		block_end = block_start + blocksize;
810ac27a0ecSDave Kleikamp 		if (block_end <= from || block_start >= to) {
811ac27a0ecSDave Kleikamp 			if (partial && !buffer_uptodate(bh))
812ac27a0ecSDave Kleikamp 				*partial = 1;
813ac27a0ecSDave Kleikamp 			continue;
814ac27a0ecSDave Kleikamp 		}
815ac27a0ecSDave Kleikamp 		err = (*fn)(handle, bh);
816ac27a0ecSDave Kleikamp 		if (!ret)
817ac27a0ecSDave Kleikamp 			ret = err;
818ac27a0ecSDave Kleikamp 	}
819ac27a0ecSDave Kleikamp 	return ret;
820ac27a0ecSDave Kleikamp }
821ac27a0ecSDave Kleikamp 
822ac27a0ecSDave Kleikamp /*
823ac27a0ecSDave Kleikamp  * To preserve ordering, it is essential that the hole instantiation and
824ac27a0ecSDave Kleikamp  * the data write be encapsulated in a single transaction.  We cannot
825617ba13bSMingming Cao  * close off a transaction and start a new one between the ext4_get_block()
826dab291afSMingming Cao  * and the commit_write().  So doing the jbd2_journal_start at the start of
827ac27a0ecSDave Kleikamp  * prepare_write() is the right place.
828ac27a0ecSDave Kleikamp  *
829617ba13bSMingming Cao  * Also, this function can nest inside ext4_writepage() ->
830617ba13bSMingming Cao  * block_write_full_page(). In that case, we *know* that ext4_writepage()
831ac27a0ecSDave Kleikamp  * has generated enough buffer credits to do the whole page.  So we won't
832ac27a0ecSDave Kleikamp  * block on the journal in that case, which is good, because the caller may
833ac27a0ecSDave Kleikamp  * be PF_MEMALLOC.
834ac27a0ecSDave Kleikamp  *
835617ba13bSMingming Cao  * By accident, ext4 can be reentered when a transaction is open via
836ac27a0ecSDave Kleikamp  * quota file writes.  If we were to commit the transaction while thus
837ac27a0ecSDave Kleikamp  * reentered, there can be a deadlock - we would be holding a quota
838ac27a0ecSDave Kleikamp  * lock, and the commit would never complete if another thread had a
839ac27a0ecSDave Kleikamp  * transaction open and was blocking on the quota lock - a ranking
840ac27a0ecSDave Kleikamp  * violation.
841ac27a0ecSDave Kleikamp  *
842dab291afSMingming Cao  * So what we do is to rely on the fact that jbd2_journal_stop/journal_start
843ac27a0ecSDave Kleikamp  * will _not_ run commit under these circumstances because handle->h_ref
844ac27a0ecSDave Kleikamp  * is elevated.  We'll still have enough credits for the tiny quotafile
845ac27a0ecSDave Kleikamp  * write.
846ac27a0ecSDave Kleikamp  */
847ac27a0ecSDave Kleikamp static int do_journal_get_write_access(handle_t *handle,
848ac27a0ecSDave Kleikamp 				       struct buffer_head *bh)
849ac27a0ecSDave Kleikamp {
85056d35a4cSJan Kara 	int dirty = buffer_dirty(bh);
85156d35a4cSJan Kara 	int ret;
85256d35a4cSJan Kara 
853ac27a0ecSDave Kleikamp 	if (!buffer_mapped(bh) || buffer_freed(bh))
854ac27a0ecSDave Kleikamp 		return 0;
85556d35a4cSJan Kara 	/*
856ebdec241SChristoph Hellwig 	 * __block_write_begin() could have dirtied some buffers. Clean
85756d35a4cSJan Kara 	 * the dirty bit as jbd2_journal_get_write_access() could complain
85856d35a4cSJan Kara 	 * otherwise about fs integrity issues. Setting of the dirty bit
859ebdec241SChristoph Hellwig 	 * by __block_write_begin() isn't a real problem here as we clear
86056d35a4cSJan Kara 	 * the bit before releasing a page lock and thus writeback cannot
86156d35a4cSJan Kara 	 * ever write the buffer.
86256d35a4cSJan Kara 	 */
86356d35a4cSJan Kara 	if (dirty)
86456d35a4cSJan Kara 		clear_buffer_dirty(bh);
86556d35a4cSJan Kara 	ret = ext4_journal_get_write_access(handle, bh);
86656d35a4cSJan Kara 	if (!ret && dirty)
86756d35a4cSJan Kara 		ret = ext4_handle_dirty_metadata(handle, NULL, bh);
86856d35a4cSJan Kara 	return ret;
869ac27a0ecSDave Kleikamp }
870ac27a0ecSDave Kleikamp 
871744692dcSJiaying Zhang static int ext4_get_block_write(struct inode *inode, sector_t iblock,
872744692dcSJiaying Zhang 		   struct buffer_head *bh_result, int create);
873bfc1af65SNick Piggin static int ext4_write_begin(struct file *file, struct address_space *mapping,
874bfc1af65SNick Piggin 			    loff_t pos, unsigned len, unsigned flags,
875bfc1af65SNick Piggin 			    struct page **pagep, void **fsdata)
876ac27a0ecSDave Kleikamp {
877bfc1af65SNick Piggin 	struct inode *inode = mapping->host;
8781938a150SAneesh Kumar K.V 	int ret, needed_blocks;
879ac27a0ecSDave Kleikamp 	handle_t *handle;
880ac27a0ecSDave Kleikamp 	int retries = 0;
881bfc1af65SNick Piggin 	struct page *page;
882bfc1af65SNick Piggin 	pgoff_t index;
883bfc1af65SNick Piggin 	unsigned from, to;
884bfc1af65SNick Piggin 
8859bffad1eSTheodore Ts'o 	trace_ext4_write_begin(inode, pos, len, flags);
8861938a150SAneesh Kumar K.V 	/*
8871938a150SAneesh Kumar K.V 	 * Reserve one block more for addition to orphan list in case
8881938a150SAneesh Kumar K.V 	 * we allocate blocks but write fails for some reason
8891938a150SAneesh Kumar K.V 	 */
8901938a150SAneesh Kumar K.V 	needed_blocks = ext4_writepage_trans_blocks(inode) + 1;
891bfc1af65SNick Piggin 	index = pos >> PAGE_CACHE_SHIFT;
892bfc1af65SNick Piggin 	from = pos & (PAGE_CACHE_SIZE - 1);
893bfc1af65SNick Piggin 	to = from + len;
894ac27a0ecSDave Kleikamp 
895ac27a0ecSDave Kleikamp retry:
896617ba13bSMingming Cao 	handle = ext4_journal_start(inode, needed_blocks);
8977479d2b9SAndrew Morton 	if (IS_ERR(handle)) {
8987479d2b9SAndrew Morton 		ret = PTR_ERR(handle);
8997479d2b9SAndrew Morton 		goto out;
9007479d2b9SAndrew Morton 	}
901ac27a0ecSDave Kleikamp 
902ebd3610bSJan Kara 	/* We cannot recurse into the filesystem as the transaction is already
903ebd3610bSJan Kara 	 * started */
904ebd3610bSJan Kara 	flags |= AOP_FLAG_NOFS;
905ebd3610bSJan Kara 
90654566b2cSNick Piggin 	page = grab_cache_page_write_begin(mapping, index, flags);
907cf108bcaSJan Kara 	if (!page) {
908cf108bcaSJan Kara 		ext4_journal_stop(handle);
909cf108bcaSJan Kara 		ret = -ENOMEM;
910cf108bcaSJan Kara 		goto out;
911cf108bcaSJan Kara 	}
912cf108bcaSJan Kara 	*pagep = page;
913cf108bcaSJan Kara 
914744692dcSJiaying Zhang 	if (ext4_should_dioread_nolock(inode))
9156e1db88dSChristoph Hellwig 		ret = __block_write_begin(page, pos, len, ext4_get_block_write);
916744692dcSJiaying Zhang 	else
9176e1db88dSChristoph Hellwig 		ret = __block_write_begin(page, pos, len, ext4_get_block);
918bfc1af65SNick Piggin 
919bfc1af65SNick Piggin 	if (!ret && ext4_should_journal_data(inode)) {
920ac27a0ecSDave Kleikamp 		ret = walk_page_buffers(handle, page_buffers(page),
921ac27a0ecSDave Kleikamp 				from, to, NULL, do_journal_get_write_access);
922b46be050SAndrey Savochkin 	}
923bfc1af65SNick Piggin 
924bfc1af65SNick Piggin 	if (ret) {
925bfc1af65SNick Piggin 		unlock_page(page);
926bfc1af65SNick Piggin 		page_cache_release(page);
927ae4d5372SAneesh Kumar K.V 		/*
9286e1db88dSChristoph Hellwig 		 * __block_write_begin may have instantiated a few blocks
929ae4d5372SAneesh Kumar K.V 		 * outside i_size.  Trim these off again. Don't need
930ae4d5372SAneesh Kumar K.V 		 * i_size_read because we hold i_mutex.
9311938a150SAneesh Kumar K.V 		 *
9321938a150SAneesh Kumar K.V 		 * Add inode to orphan list in case we crash before
9331938a150SAneesh Kumar K.V 		 * truncate finishes
934ae4d5372SAneesh Kumar K.V 		 */
935ffacfa7aSJan Kara 		if (pos + len > inode->i_size && ext4_can_truncate(inode))
9361938a150SAneesh Kumar K.V 			ext4_orphan_add(handle, inode);
9371938a150SAneesh Kumar K.V 
9381938a150SAneesh Kumar K.V 		ext4_journal_stop(handle);
9391938a150SAneesh Kumar K.V 		if (pos + len > inode->i_size) {
940b9a4207dSJan Kara 			ext4_truncate_failed_write(inode);
9411938a150SAneesh Kumar K.V 			/*
942ffacfa7aSJan Kara 			 * If truncate failed early the inode might
9431938a150SAneesh Kumar K.V 			 * still be on the orphan list; we need to
9441938a150SAneesh Kumar K.V 			 * make sure the inode is removed from the
9451938a150SAneesh Kumar K.V 			 * orphan list in that case.
9461938a150SAneesh Kumar K.V 			 */
9471938a150SAneesh Kumar K.V 			if (inode->i_nlink)
9481938a150SAneesh Kumar K.V 				ext4_orphan_del(NULL, inode);
9491938a150SAneesh Kumar K.V 		}
950bfc1af65SNick Piggin 	}
951bfc1af65SNick Piggin 
952617ba13bSMingming Cao 	if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
953ac27a0ecSDave Kleikamp 		goto retry;
9547479d2b9SAndrew Morton out:
955ac27a0ecSDave Kleikamp 	return ret;
956ac27a0ecSDave Kleikamp }
957ac27a0ecSDave Kleikamp 
958bfc1af65SNick Piggin /* For write_end() in data=journal mode */
959bfc1af65SNick Piggin static int write_end_fn(handle_t *handle, struct buffer_head *bh)
960ac27a0ecSDave Kleikamp {
961ac27a0ecSDave Kleikamp 	if (!buffer_mapped(bh) || buffer_freed(bh))
962ac27a0ecSDave Kleikamp 		return 0;
963ac27a0ecSDave Kleikamp 	set_buffer_uptodate(bh);
9640390131bSFrank Mayhar 	return ext4_handle_dirty_metadata(handle, NULL, bh);
965ac27a0ecSDave Kleikamp }
966ac27a0ecSDave Kleikamp 
967f8514083SAneesh Kumar K.V static int ext4_generic_write_end(struct file *file,
968f8514083SAneesh Kumar K.V 				  struct address_space *mapping,
969f8514083SAneesh Kumar K.V 				  loff_t pos, unsigned len, unsigned copied,
970f8514083SAneesh Kumar K.V 				  struct page *page, void *fsdata)
971f8514083SAneesh Kumar K.V {
972f8514083SAneesh Kumar K.V 	int i_size_changed = 0;
973f8514083SAneesh Kumar K.V 	struct inode *inode = mapping->host;
974f8514083SAneesh Kumar K.V 	handle_t *handle = ext4_journal_current_handle();
975f8514083SAneesh Kumar K.V 
976f8514083SAneesh Kumar K.V 	copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
977f8514083SAneesh Kumar K.V 
978f8514083SAneesh Kumar K.V 	/*
979f8514083SAneesh Kumar K.V 	 * No need to use i_size_read() here, the i_size
980f8514083SAneesh Kumar K.V 	 * cannot change under us because we hold i_mutex.
981f8514083SAneesh Kumar K.V 	 *
982f8514083SAneesh Kumar K.V 	 * But it's important to update i_size while still holding page lock:
983f8514083SAneesh Kumar K.V 	 * page writeout could otherwise come in and zero beyond i_size.
984f8514083SAneesh Kumar K.V 	 */
985f8514083SAneesh Kumar K.V 	if (pos + copied > inode->i_size) {
986f8514083SAneesh Kumar K.V 		i_size_write(inode, pos + copied);
987f8514083SAneesh Kumar K.V 		i_size_changed = 1;
988f8514083SAneesh Kumar K.V 	}
989f8514083SAneesh Kumar K.V 
990f8514083SAneesh Kumar K.V 	if (pos + copied >  EXT4_I(inode)->i_disksize) {
991f8514083SAneesh Kumar K.V 		/* We need to mark inode dirty even if
992f8514083SAneesh Kumar K.V 		 * new_i_size is less that inode->i_size
993f8514083SAneesh Kumar K.V 		 * bu greater than i_disksize.(hint delalloc)
994f8514083SAneesh Kumar K.V 		 */
995f8514083SAneesh Kumar K.V 		ext4_update_i_disksize(inode, (pos + copied));
996f8514083SAneesh Kumar K.V 		i_size_changed = 1;
997f8514083SAneesh Kumar K.V 	}
998f8514083SAneesh Kumar K.V 	unlock_page(page);
999f8514083SAneesh Kumar K.V 	page_cache_release(page);
1000f8514083SAneesh Kumar K.V 
1001f8514083SAneesh Kumar K.V 	/*
1002f8514083SAneesh Kumar K.V 	 * Don't mark the inode dirty under page lock. First, it unnecessarily
1003f8514083SAneesh Kumar K.V 	 * makes the holding time of page lock longer. Second, it forces lock
1004f8514083SAneesh Kumar K.V 	 * ordering of page lock and transaction start for journaling
1005f8514083SAneesh Kumar K.V 	 * filesystems.
1006f8514083SAneesh Kumar K.V 	 */
1007f8514083SAneesh Kumar K.V 	if (i_size_changed)
1008f8514083SAneesh Kumar K.V 		ext4_mark_inode_dirty(handle, inode);
1009f8514083SAneesh Kumar K.V 
1010f8514083SAneesh Kumar K.V 	return copied;
1011f8514083SAneesh Kumar K.V }
1012f8514083SAneesh Kumar K.V 
1013ac27a0ecSDave Kleikamp /*
1014ac27a0ecSDave Kleikamp  * We need to pick up the new inode size which generic_commit_write gave us
1015ac27a0ecSDave Kleikamp  * `file' can be NULL - eg, when called from page_symlink().
1016ac27a0ecSDave Kleikamp  *
1017617ba13bSMingming Cao  * ext4 never places buffers on inode->i_mapping->private_list.  metadata
1018ac27a0ecSDave Kleikamp  * buffers are managed internally.
1019ac27a0ecSDave Kleikamp  */
1020bfc1af65SNick Piggin static int ext4_ordered_write_end(struct file *file,
1021bfc1af65SNick Piggin 				  struct address_space *mapping,
1022bfc1af65SNick Piggin 				  loff_t pos, unsigned len, unsigned copied,
1023bfc1af65SNick Piggin 				  struct page *page, void *fsdata)
1024ac27a0ecSDave Kleikamp {
1025617ba13bSMingming Cao 	handle_t *handle = ext4_journal_current_handle();
1026cf108bcaSJan Kara 	struct inode *inode = mapping->host;
1027ac27a0ecSDave Kleikamp 	int ret = 0, ret2;
1028ac27a0ecSDave Kleikamp 
10299bffad1eSTheodore Ts'o 	trace_ext4_ordered_write_end(inode, pos, len, copied);
1030678aaf48SJan Kara 	ret = ext4_jbd2_file_inode(handle, inode);
1031ac27a0ecSDave Kleikamp 
1032ac27a0ecSDave Kleikamp 	if (ret == 0) {
1033f8514083SAneesh Kumar K.V 		ret2 = ext4_generic_write_end(file, mapping, pos, len, copied,
1034bfc1af65SNick Piggin 							page, fsdata);
1035f8a87d89SRoel Kluin 		copied = ret2;
1036ffacfa7aSJan Kara 		if (pos + len > inode->i_size && ext4_can_truncate(inode))
1037f8514083SAneesh Kumar K.V 			/* if we have allocated more blocks and copied
1038f8514083SAneesh Kumar K.V 			 * less. We will have blocks allocated outside
1039f8514083SAneesh Kumar K.V 			 * inode->i_size. So truncate them
1040f8514083SAneesh Kumar K.V 			 */
1041f8514083SAneesh Kumar K.V 			ext4_orphan_add(handle, inode);
1042f8a87d89SRoel Kluin 		if (ret2 < 0)
1043f8a87d89SRoel Kluin 			ret = ret2;
104409e0834fSAkira Fujita 	} else {
104509e0834fSAkira Fujita 		unlock_page(page);
104609e0834fSAkira Fujita 		page_cache_release(page);
1047ac27a0ecSDave Kleikamp 	}
104809e0834fSAkira Fujita 
1049617ba13bSMingming Cao 	ret2 = ext4_journal_stop(handle);
1050ac27a0ecSDave Kleikamp 	if (!ret)
1051ac27a0ecSDave Kleikamp 		ret = ret2;
1052bfc1af65SNick Piggin 
1053f8514083SAneesh Kumar K.V 	if (pos + len > inode->i_size) {
1054b9a4207dSJan Kara 		ext4_truncate_failed_write(inode);
1055f8514083SAneesh Kumar K.V 		/*
1056ffacfa7aSJan Kara 		 * If truncate failed early the inode might still be
1057f8514083SAneesh Kumar K.V 		 * on the orphan list; we need to make sure the inode
1058f8514083SAneesh Kumar K.V 		 * is removed from the orphan list in that case.
1059f8514083SAneesh Kumar K.V 		 */
1060f8514083SAneesh Kumar K.V 		if (inode->i_nlink)
1061f8514083SAneesh Kumar K.V 			ext4_orphan_del(NULL, inode);
1062f8514083SAneesh Kumar K.V 	}
1063f8514083SAneesh Kumar K.V 
1064f8514083SAneesh Kumar K.V 
1065bfc1af65SNick Piggin 	return ret ? ret : copied;
1066ac27a0ecSDave Kleikamp }
1067ac27a0ecSDave Kleikamp 
1068bfc1af65SNick Piggin static int ext4_writeback_write_end(struct file *file,
1069bfc1af65SNick Piggin 				    struct address_space *mapping,
1070bfc1af65SNick Piggin 				    loff_t pos, unsigned len, unsigned copied,
1071bfc1af65SNick Piggin 				    struct page *page, void *fsdata)
1072ac27a0ecSDave Kleikamp {
1073617ba13bSMingming Cao 	handle_t *handle = ext4_journal_current_handle();
1074cf108bcaSJan Kara 	struct inode *inode = mapping->host;
1075ac27a0ecSDave Kleikamp 	int ret = 0, ret2;
1076ac27a0ecSDave Kleikamp 
10779bffad1eSTheodore Ts'o 	trace_ext4_writeback_write_end(inode, pos, len, copied);
1078f8514083SAneesh Kumar K.V 	ret2 = ext4_generic_write_end(file, mapping, pos, len, copied,
1079bfc1af65SNick Piggin 							page, fsdata);
1080f8a87d89SRoel Kluin 	copied = ret2;
1081ffacfa7aSJan Kara 	if (pos + len > inode->i_size && ext4_can_truncate(inode))
1082f8514083SAneesh Kumar K.V 		/* if we have allocated more blocks and copied
1083f8514083SAneesh Kumar K.V 		 * less. We will have blocks allocated outside
1084f8514083SAneesh Kumar K.V 		 * inode->i_size. So truncate them
1085f8514083SAneesh Kumar K.V 		 */
1086f8514083SAneesh Kumar K.V 		ext4_orphan_add(handle, inode);
1087f8514083SAneesh Kumar K.V 
1088f8a87d89SRoel Kluin 	if (ret2 < 0)
1089f8a87d89SRoel Kluin 		ret = ret2;
1090ac27a0ecSDave Kleikamp 
1091617ba13bSMingming Cao 	ret2 = ext4_journal_stop(handle);
1092ac27a0ecSDave Kleikamp 	if (!ret)
1093ac27a0ecSDave Kleikamp 		ret = ret2;
1094bfc1af65SNick Piggin 
1095f8514083SAneesh Kumar K.V 	if (pos + len > inode->i_size) {
1096b9a4207dSJan Kara 		ext4_truncate_failed_write(inode);
1097f8514083SAneesh Kumar K.V 		/*
1098ffacfa7aSJan Kara 		 * If truncate failed early the inode might still be
1099f8514083SAneesh Kumar K.V 		 * on the orphan list; we need to make sure the inode
1100f8514083SAneesh Kumar K.V 		 * is removed from the orphan list in that case.
1101f8514083SAneesh Kumar K.V 		 */
1102f8514083SAneesh Kumar K.V 		if (inode->i_nlink)
1103f8514083SAneesh Kumar K.V 			ext4_orphan_del(NULL, inode);
1104f8514083SAneesh Kumar K.V 	}
1105f8514083SAneesh Kumar K.V 
1106bfc1af65SNick Piggin 	return ret ? ret : copied;
1107ac27a0ecSDave Kleikamp }
1108ac27a0ecSDave Kleikamp 
1109bfc1af65SNick Piggin static int ext4_journalled_write_end(struct file *file,
1110bfc1af65SNick Piggin 				     struct address_space *mapping,
1111bfc1af65SNick Piggin 				     loff_t pos, unsigned len, unsigned copied,
1112bfc1af65SNick Piggin 				     struct page *page, void *fsdata)
1113ac27a0ecSDave Kleikamp {
1114617ba13bSMingming Cao 	handle_t *handle = ext4_journal_current_handle();
1115bfc1af65SNick Piggin 	struct inode *inode = mapping->host;
1116ac27a0ecSDave Kleikamp 	int ret = 0, ret2;
1117ac27a0ecSDave Kleikamp 	int partial = 0;
1118bfc1af65SNick Piggin 	unsigned from, to;
1119cf17fea6SAneesh Kumar K.V 	loff_t new_i_size;
1120ac27a0ecSDave Kleikamp 
11219bffad1eSTheodore Ts'o 	trace_ext4_journalled_write_end(inode, pos, len, copied);
1122bfc1af65SNick Piggin 	from = pos & (PAGE_CACHE_SIZE - 1);
1123bfc1af65SNick Piggin 	to = from + len;
1124bfc1af65SNick Piggin 
1125441c8508SCurt Wohlgemuth 	BUG_ON(!ext4_handle_valid(handle));
1126441c8508SCurt Wohlgemuth 
1127bfc1af65SNick Piggin 	if (copied < len) {
1128bfc1af65SNick Piggin 		if (!PageUptodate(page))
1129bfc1af65SNick Piggin 			copied = 0;
1130bfc1af65SNick Piggin 		page_zero_new_buffers(page, from+copied, to);
1131bfc1af65SNick Piggin 	}
1132ac27a0ecSDave Kleikamp 
1133ac27a0ecSDave Kleikamp 	ret = walk_page_buffers(handle, page_buffers(page), from,
1134bfc1af65SNick Piggin 				to, &partial, write_end_fn);
1135ac27a0ecSDave Kleikamp 	if (!partial)
1136ac27a0ecSDave Kleikamp 		SetPageUptodate(page);
1137cf17fea6SAneesh Kumar K.V 	new_i_size = pos + copied;
1138cf17fea6SAneesh Kumar K.V 	if (new_i_size > inode->i_size)
1139bfc1af65SNick Piggin 		i_size_write(inode, pos+copied);
114019f5fb7aSTheodore Ts'o 	ext4_set_inode_state(inode, EXT4_STATE_JDATA);
11412d859db3SJan Kara 	EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid;
1142cf17fea6SAneesh Kumar K.V 	if (new_i_size > EXT4_I(inode)->i_disksize) {
1143cf17fea6SAneesh Kumar K.V 		ext4_update_i_disksize(inode, new_i_size);
1144617ba13bSMingming Cao 		ret2 = ext4_mark_inode_dirty(handle, inode);
1145ac27a0ecSDave Kleikamp 		if (!ret)
1146ac27a0ecSDave Kleikamp 			ret = ret2;
1147ac27a0ecSDave Kleikamp 	}
1148bfc1af65SNick Piggin 
1149cf108bcaSJan Kara 	unlock_page(page);
1150f8514083SAneesh Kumar K.V 	page_cache_release(page);
1151ffacfa7aSJan Kara 	if (pos + len > inode->i_size && ext4_can_truncate(inode))
1152f8514083SAneesh Kumar K.V 		/* if we have allocated more blocks and copied
1153f8514083SAneesh Kumar K.V 		 * less. We will have blocks allocated outside
1154f8514083SAneesh Kumar K.V 		 * inode->i_size. So truncate them
1155f8514083SAneesh Kumar K.V 		 */
1156f8514083SAneesh Kumar K.V 		ext4_orphan_add(handle, inode);
1157f8514083SAneesh Kumar K.V 
1158617ba13bSMingming Cao 	ret2 = ext4_journal_stop(handle);
1159ac27a0ecSDave Kleikamp 	if (!ret)
1160ac27a0ecSDave Kleikamp 		ret = ret2;
1161f8514083SAneesh Kumar K.V 	if (pos + len > inode->i_size) {
1162b9a4207dSJan Kara 		ext4_truncate_failed_write(inode);
1163f8514083SAneesh Kumar K.V 		/*
1164ffacfa7aSJan Kara 		 * If truncate failed early the inode might still be
1165f8514083SAneesh Kumar K.V 		 * on the orphan list; we need to make sure the inode
1166f8514083SAneesh Kumar K.V 		 * is removed from the orphan list in that case.
1167f8514083SAneesh Kumar K.V 		 */
1168f8514083SAneesh Kumar K.V 		if (inode->i_nlink)
1169f8514083SAneesh Kumar K.V 			ext4_orphan_del(NULL, inode);
1170f8514083SAneesh Kumar K.V 	}
1171bfc1af65SNick Piggin 
1172bfc1af65SNick Piggin 	return ret ? ret : copied;
1173ac27a0ecSDave Kleikamp }
1174d2a17637SMingming Cao 
11759d0be502STheodore Ts'o /*
11767b415bf6SAditya Kali  * Reserve a single cluster located at lblock
11779d0be502STheodore Ts'o  */
117801f49d0bSTheodore Ts'o static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock)
1179d2a17637SMingming Cao {
1180030ba6bcSAneesh Kumar K.V 	int retries = 0;
1181d2a17637SMingming Cao 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
11820637c6f4STheodore Ts'o 	struct ext4_inode_info *ei = EXT4_I(inode);
11837b415bf6SAditya Kali 	unsigned int md_needed;
11845dd4056dSChristoph Hellwig 	int ret;
1185d2a17637SMingming Cao 
1186d2a17637SMingming Cao 	/*
1187d2a17637SMingming Cao 	 * recalculate the amount of metadata blocks to reserve
1188d2a17637SMingming Cao 	 * in order to allocate nrblocks
1189d2a17637SMingming Cao 	 * worse case is one extent per block
1190d2a17637SMingming Cao 	 */
1191030ba6bcSAneesh Kumar K.V repeat:
11920637c6f4STheodore Ts'o 	spin_lock(&ei->i_block_reservation_lock);
11937b415bf6SAditya Kali 	md_needed = EXT4_NUM_B2C(sbi,
11947b415bf6SAditya Kali 				 ext4_calc_metadata_amount(inode, lblock));
1195f8ec9d68STheodore Ts'o 	trace_ext4_da_reserve_space(inode, md_needed);
11960637c6f4STheodore Ts'o 	spin_unlock(&ei->i_block_reservation_lock);
1197d2a17637SMingming Cao 
119860e58e0fSMingming Cao 	/*
119972b8ab9dSEric Sandeen 	 * We will charge metadata quota at writeout time; this saves
120072b8ab9dSEric Sandeen 	 * us from metadata over-estimation, though we may go over by
120172b8ab9dSEric Sandeen 	 * a small amount in the end.  Here we just reserve for data.
120260e58e0fSMingming Cao 	 */
12037b415bf6SAditya Kali 	ret = dquot_reserve_block(inode, EXT4_C2B(sbi, 1));
12045dd4056dSChristoph Hellwig 	if (ret)
12055dd4056dSChristoph Hellwig 		return ret;
120672b8ab9dSEric Sandeen 	/*
120772b8ab9dSEric Sandeen 	 * We do still charge estimated metadata to the sb though;
120872b8ab9dSEric Sandeen 	 * we cannot afford to run out of free blocks.
120972b8ab9dSEric Sandeen 	 */
1210e7d5f315STheodore Ts'o 	if (ext4_claim_free_clusters(sbi, md_needed + 1, 0)) {
12117b415bf6SAditya Kali 		dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1));
1212030ba6bcSAneesh Kumar K.V 		if (ext4_should_retry_alloc(inode->i_sb, &retries)) {
1213030ba6bcSAneesh Kumar K.V 			yield();
1214030ba6bcSAneesh Kumar K.V 			goto repeat;
1215030ba6bcSAneesh Kumar K.V 		}
1216d2a17637SMingming Cao 		return -ENOSPC;
1217d2a17637SMingming Cao 	}
12180637c6f4STheodore Ts'o 	spin_lock(&ei->i_block_reservation_lock);
12199d0be502STheodore Ts'o 	ei->i_reserved_data_blocks++;
12200637c6f4STheodore Ts'o 	ei->i_reserved_meta_blocks += md_needed;
12210637c6f4STheodore Ts'o 	spin_unlock(&ei->i_block_reservation_lock);
122239bc680aSDmitry Monakhov 
1223d2a17637SMingming Cao 	return 0;       /* success */
1224d2a17637SMingming Cao }
1225d2a17637SMingming Cao 
122612219aeaSAneesh Kumar K.V static void ext4_da_release_space(struct inode *inode, int to_free)
1227d2a17637SMingming Cao {
1228d2a17637SMingming Cao 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
12290637c6f4STheodore Ts'o 	struct ext4_inode_info *ei = EXT4_I(inode);
1230d2a17637SMingming Cao 
1231cd213226SMingming Cao 	if (!to_free)
1232cd213226SMingming Cao 		return;		/* Nothing to release, exit */
1233cd213226SMingming Cao 
1234d2a17637SMingming Cao 	spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
1235cd213226SMingming Cao 
12365a58ec87SLi Zefan 	trace_ext4_da_release_space(inode, to_free);
12370637c6f4STheodore Ts'o 	if (unlikely(to_free > ei->i_reserved_data_blocks)) {
1238cd213226SMingming Cao 		/*
12390637c6f4STheodore Ts'o 		 * if there aren't enough reserved blocks, then the
12400637c6f4STheodore Ts'o 		 * counter is messed up somewhere.  Since this
12410637c6f4STheodore Ts'o 		 * function is called from invalidate page, it's
12420637c6f4STheodore Ts'o 		 * harmless to return without any action.
1243cd213226SMingming Cao 		 */
12440637c6f4STheodore Ts'o 		ext4_msg(inode->i_sb, KERN_NOTICE, "ext4_da_release_space: "
12450637c6f4STheodore Ts'o 			 "ino %lu, to_free %d with only %d reserved "
12461084f252STheodore Ts'o 			 "data blocks", inode->i_ino, to_free,
12470637c6f4STheodore Ts'o 			 ei->i_reserved_data_blocks);
12480637c6f4STheodore Ts'o 		WARN_ON(1);
12490637c6f4STheodore Ts'o 		to_free = ei->i_reserved_data_blocks;
12500637c6f4STheodore Ts'o 	}
12510637c6f4STheodore Ts'o 	ei->i_reserved_data_blocks -= to_free;
12520637c6f4STheodore Ts'o 
12530637c6f4STheodore Ts'o 	if (ei->i_reserved_data_blocks == 0) {
12540637c6f4STheodore Ts'o 		/*
12550637c6f4STheodore Ts'o 		 * We can release all of the reserved metadata blocks
12560637c6f4STheodore Ts'o 		 * only when we have written all of the delayed
12570637c6f4STheodore Ts'o 		 * allocation blocks.
12587b415bf6SAditya Kali 		 * Note that in case of bigalloc, i_reserved_meta_blocks,
12597b415bf6SAditya Kali 		 * i_reserved_data_blocks, etc. refer to number of clusters.
12600637c6f4STheodore Ts'o 		 */
126157042651STheodore Ts'o 		percpu_counter_sub(&sbi->s_dirtyclusters_counter,
126272b8ab9dSEric Sandeen 				   ei->i_reserved_meta_blocks);
1263ee5f4d9cSTheodore Ts'o 		ei->i_reserved_meta_blocks = 0;
12649d0be502STheodore Ts'o 		ei->i_da_metadata_calc_len = 0;
1265cd213226SMingming Cao 	}
1266cd213226SMingming Cao 
126772b8ab9dSEric Sandeen 	/* update fs dirty data blocks counter */
126857042651STheodore Ts'o 	percpu_counter_sub(&sbi->s_dirtyclusters_counter, to_free);
1269d2a17637SMingming Cao 
1270d2a17637SMingming Cao 	spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
127160e58e0fSMingming Cao 
12727b415bf6SAditya Kali 	dquot_release_reservation_block(inode, EXT4_C2B(sbi, to_free));
1273d2a17637SMingming Cao }
1274d2a17637SMingming Cao 
1275d2a17637SMingming Cao static void ext4_da_page_release_reservation(struct page *page,
1276d2a17637SMingming Cao 					     unsigned long offset)
1277d2a17637SMingming Cao {
1278d2a17637SMingming Cao 	int to_release = 0;
1279d2a17637SMingming Cao 	struct buffer_head *head, *bh;
1280d2a17637SMingming Cao 	unsigned int curr_off = 0;
12817b415bf6SAditya Kali 	struct inode *inode = page->mapping->host;
12827b415bf6SAditya Kali 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
12837b415bf6SAditya Kali 	int num_clusters;
1284d2a17637SMingming Cao 
1285d2a17637SMingming Cao 	head = page_buffers(page);
1286d2a17637SMingming Cao 	bh = head;
1287d2a17637SMingming Cao 	do {
1288d2a17637SMingming Cao 		unsigned int next_off = curr_off + bh->b_size;
1289d2a17637SMingming Cao 
1290d2a17637SMingming Cao 		if ((offset <= curr_off) && (buffer_delay(bh))) {
1291d2a17637SMingming Cao 			to_release++;
1292d2a17637SMingming Cao 			clear_buffer_delay(bh);
12935356f261SAditya Kali 			clear_buffer_da_mapped(bh);
1294d2a17637SMingming Cao 		}
1295d2a17637SMingming Cao 		curr_off = next_off;
1296d2a17637SMingming Cao 	} while ((bh = bh->b_this_page) != head);
12977b415bf6SAditya Kali 
12987b415bf6SAditya Kali 	/* If we have released all the blocks belonging to a cluster, then we
12997b415bf6SAditya Kali 	 * need to release the reserved space for that cluster. */
13007b415bf6SAditya Kali 	num_clusters = EXT4_NUM_B2C(sbi, to_release);
13017b415bf6SAditya Kali 	while (num_clusters > 0) {
13027b415bf6SAditya Kali 		ext4_fsblk_t lblk;
13037b415bf6SAditya Kali 		lblk = (page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits)) +
13047b415bf6SAditya Kali 			((num_clusters - 1) << sbi->s_cluster_bits);
13057b415bf6SAditya Kali 		if (sbi->s_cluster_ratio == 1 ||
13067b415bf6SAditya Kali 		    !ext4_find_delalloc_cluster(inode, lblk, 1))
13077b415bf6SAditya Kali 			ext4_da_release_space(inode, 1);
13087b415bf6SAditya Kali 
13097b415bf6SAditya Kali 		num_clusters--;
13107b415bf6SAditya Kali 	}
1311d2a17637SMingming Cao }
1312ac27a0ecSDave Kleikamp 
1313ac27a0ecSDave Kleikamp /*
131464769240SAlex Tomas  * Delayed allocation stuff
131564769240SAlex Tomas  */
131664769240SAlex Tomas 
131764769240SAlex Tomas /*
131864769240SAlex Tomas  * mpage_da_submit_io - walks through extent of pages and try to write
1319a1d6cc56SAneesh Kumar K.V  * them with writepage() call back
132064769240SAlex Tomas  *
132164769240SAlex Tomas  * @mpd->inode: inode
132264769240SAlex Tomas  * @mpd->first_page: first page of the extent
132364769240SAlex Tomas  * @mpd->next_page: page after the last page of the extent
132464769240SAlex Tomas  *
132564769240SAlex Tomas  * By the time mpage_da_submit_io() is called we expect all blocks
132664769240SAlex Tomas  * to be allocated. this may be wrong if allocation failed.
132764769240SAlex Tomas  *
132864769240SAlex Tomas  * As pages are already locked by write_cache_pages(), we can't use it
132964769240SAlex Tomas  */
13301de3e3dfSTheodore Ts'o static int mpage_da_submit_io(struct mpage_da_data *mpd,
13311de3e3dfSTheodore Ts'o 			      struct ext4_map_blocks *map)
133264769240SAlex Tomas {
1333791b7f08SAneesh Kumar K.V 	struct pagevec pvec;
1334791b7f08SAneesh Kumar K.V 	unsigned long index, end;
1335791b7f08SAneesh Kumar K.V 	int ret = 0, err, nr_pages, i;
1336791b7f08SAneesh Kumar K.V 	struct inode *inode = mpd->inode;
1337791b7f08SAneesh Kumar K.V 	struct address_space *mapping = inode->i_mapping;
1338cb20d518STheodore Ts'o 	loff_t size = i_size_read(inode);
13393ecdb3a1STheodore Ts'o 	unsigned int len, block_start;
13403ecdb3a1STheodore Ts'o 	struct buffer_head *bh, *page_bufs = NULL;
1341cb20d518STheodore Ts'o 	int journal_data = ext4_should_journal_data(inode);
13421de3e3dfSTheodore Ts'o 	sector_t pblock = 0, cur_logical = 0;
1343bd2d0210STheodore Ts'o 	struct ext4_io_submit io_submit;
134464769240SAlex Tomas 
134564769240SAlex Tomas 	BUG_ON(mpd->next_page <= mpd->first_page);
1346bd2d0210STheodore Ts'o 	memset(&io_submit, 0, sizeof(io_submit));
1347791b7f08SAneesh Kumar K.V 	/*
1348791b7f08SAneesh Kumar K.V 	 * We need to start from the first_page to the next_page - 1
1349791b7f08SAneesh Kumar K.V 	 * to make sure we also write the mapped dirty buffer_heads.
13508dc207c0STheodore Ts'o 	 * If we look at mpd->b_blocknr we would only be looking
1351791b7f08SAneesh Kumar K.V 	 * at the currently mapped buffer_heads.
1352791b7f08SAneesh Kumar K.V 	 */
135364769240SAlex Tomas 	index = mpd->first_page;
135464769240SAlex Tomas 	end = mpd->next_page - 1;
135564769240SAlex Tomas 
1356791b7f08SAneesh Kumar K.V 	pagevec_init(&pvec, 0);
135764769240SAlex Tomas 	while (index <= end) {
1358791b7f08SAneesh Kumar K.V 		nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE);
135964769240SAlex Tomas 		if (nr_pages == 0)
136064769240SAlex Tomas 			break;
136164769240SAlex Tomas 		for (i = 0; i < nr_pages; i++) {
136297498956STheodore Ts'o 			int commit_write = 0, skip_page = 0;
136364769240SAlex Tomas 			struct page *page = pvec.pages[i];
136464769240SAlex Tomas 
1365791b7f08SAneesh Kumar K.V 			index = page->index;
1366791b7f08SAneesh Kumar K.V 			if (index > end)
1367791b7f08SAneesh Kumar K.V 				break;
1368cb20d518STheodore Ts'o 
1369cb20d518STheodore Ts'o 			if (index == size >> PAGE_CACHE_SHIFT)
1370cb20d518STheodore Ts'o 				len = size & ~PAGE_CACHE_MASK;
1371cb20d518STheodore Ts'o 			else
1372cb20d518STheodore Ts'o 				len = PAGE_CACHE_SIZE;
13731de3e3dfSTheodore Ts'o 			if (map) {
13741de3e3dfSTheodore Ts'o 				cur_logical = index << (PAGE_CACHE_SHIFT -
13751de3e3dfSTheodore Ts'o 							inode->i_blkbits);
13761de3e3dfSTheodore Ts'o 				pblock = map->m_pblk + (cur_logical -
13771de3e3dfSTheodore Ts'o 							map->m_lblk);
13781de3e3dfSTheodore Ts'o 			}
1379791b7f08SAneesh Kumar K.V 			index++;
1380791b7f08SAneesh Kumar K.V 
1381791b7f08SAneesh Kumar K.V 			BUG_ON(!PageLocked(page));
1382791b7f08SAneesh Kumar K.V 			BUG_ON(PageWriteback(page));
1383791b7f08SAneesh Kumar K.V 
138422208dedSAneesh Kumar K.V 			/*
1385cb20d518STheodore Ts'o 			 * If the page does not have buffers (for
1386cb20d518STheodore Ts'o 			 * whatever reason), try to create them using
1387a107e5a3STheodore Ts'o 			 * __block_write_begin.  If this fails,
138897498956STheodore Ts'o 			 * skip the page and move on.
138922208dedSAneesh Kumar K.V 			 */
1390cb20d518STheodore Ts'o 			if (!page_has_buffers(page)) {
1391a107e5a3STheodore Ts'o 				if (__block_write_begin(page, 0, len,
1392cb20d518STheodore Ts'o 						noalloc_get_block_write)) {
139397498956STheodore Ts'o 				skip_page:
1394cb20d518STheodore Ts'o 					unlock_page(page);
1395cb20d518STheodore Ts'o 					continue;
1396cb20d518STheodore Ts'o 				}
1397cb20d518STheodore Ts'o 				commit_write = 1;
1398cb20d518STheodore Ts'o 			}
13993ecdb3a1STheodore Ts'o 
14003ecdb3a1STheodore Ts'o 			bh = page_bufs = page_buffers(page);
14013ecdb3a1STheodore Ts'o 			block_start = 0;
14023ecdb3a1STheodore Ts'o 			do {
14031de3e3dfSTheodore Ts'o 				if (!bh)
140497498956STheodore Ts'o 					goto skip_page;
14051de3e3dfSTheodore Ts'o 				if (map && (cur_logical >= map->m_lblk) &&
14061de3e3dfSTheodore Ts'o 				    (cur_logical <= (map->m_lblk +
14071de3e3dfSTheodore Ts'o 						     (map->m_len - 1)))) {
14081de3e3dfSTheodore Ts'o 					if (buffer_delay(bh)) {
14091de3e3dfSTheodore Ts'o 						clear_buffer_delay(bh);
14101de3e3dfSTheodore Ts'o 						bh->b_blocknr = pblock;
14111de3e3dfSTheodore Ts'o 					}
14125356f261SAditya Kali 					if (buffer_da_mapped(bh))
14135356f261SAditya Kali 						clear_buffer_da_mapped(bh);
14141de3e3dfSTheodore Ts'o 					if (buffer_unwritten(bh) ||
14151de3e3dfSTheodore Ts'o 					    buffer_mapped(bh))
14161de3e3dfSTheodore Ts'o 						BUG_ON(bh->b_blocknr != pblock);
14171de3e3dfSTheodore Ts'o 					if (map->m_flags & EXT4_MAP_UNINIT)
14181de3e3dfSTheodore Ts'o 						set_buffer_uninit(bh);
14191de3e3dfSTheodore Ts'o 					clear_buffer_unwritten(bh);
14201de3e3dfSTheodore Ts'o 				}
14211de3e3dfSTheodore Ts'o 
142213a79a47SYongqiang Yang 				/*
142313a79a47SYongqiang Yang 				 * skip page if block allocation undone and
142413a79a47SYongqiang Yang 				 * block is dirty
142513a79a47SYongqiang Yang 				 */
142613a79a47SYongqiang Yang 				if (ext4_bh_delay_or_unwritten(NULL, bh))
142797498956STheodore Ts'o 					skip_page = 1;
14283ecdb3a1STheodore Ts'o 				bh = bh->b_this_page;
14293ecdb3a1STheodore Ts'o 				block_start += bh->b_size;
14301de3e3dfSTheodore Ts'o 				cur_logical++;
14311de3e3dfSTheodore Ts'o 				pblock++;
14321de3e3dfSTheodore Ts'o 			} while (bh != page_bufs);
14331de3e3dfSTheodore Ts'o 
143497498956STheodore Ts'o 			if (skip_page)
143597498956STheodore Ts'o 				goto skip_page;
1436cb20d518STheodore Ts'o 
1437cb20d518STheodore Ts'o 			if (commit_write)
1438cb20d518STheodore Ts'o 				/* mark the buffer_heads as dirty & uptodate */
1439cb20d518STheodore Ts'o 				block_commit_write(page, 0, len);
1440cb20d518STheodore Ts'o 
144197498956STheodore Ts'o 			clear_page_dirty_for_io(page);
1442bd2d0210STheodore Ts'o 			/*
1443bd2d0210STheodore Ts'o 			 * Delalloc doesn't support data journalling,
1444bd2d0210STheodore Ts'o 			 * but eventually maybe we'll lift this
1445bd2d0210STheodore Ts'o 			 * restriction.
1446bd2d0210STheodore Ts'o 			 */
1447bd2d0210STheodore Ts'o 			if (unlikely(journal_data && PageChecked(page)))
1448cb20d518STheodore Ts'o 				err = __ext4_journalled_writepage(page, len);
14491449032bSTheodore Ts'o 			else if (test_opt(inode->i_sb, MBLK_IO_SUBMIT))
1450bd2d0210STheodore Ts'o 				err = ext4_bio_write_page(&io_submit, page,
1451bd2d0210STheodore Ts'o 							  len, mpd->wbc);
14529dd75f1fSTheodore Ts'o 			else if (buffer_uninit(page_bufs)) {
14539dd75f1fSTheodore Ts'o 				ext4_set_bh_endio(page_bufs, inode);
14549dd75f1fSTheodore Ts'o 				err = block_write_full_page_endio(page,
14559dd75f1fSTheodore Ts'o 					noalloc_get_block_write,
14569dd75f1fSTheodore Ts'o 					mpd->wbc, ext4_end_io_buffer_write);
14579dd75f1fSTheodore Ts'o 			} else
14581449032bSTheodore Ts'o 				err = block_write_full_page(page,
14591449032bSTheodore Ts'o 					noalloc_get_block_write, mpd->wbc);
1460cb20d518STheodore Ts'o 
1461cb20d518STheodore Ts'o 			if (!err)
1462a1d6cc56SAneesh Kumar K.V 				mpd->pages_written++;
146364769240SAlex Tomas 			/*
146464769240SAlex Tomas 			 * In error case, we have to continue because
146564769240SAlex Tomas 			 * remaining pages are still locked
146664769240SAlex Tomas 			 */
146764769240SAlex Tomas 			if (ret == 0)
146864769240SAlex Tomas 				ret = err;
146964769240SAlex Tomas 		}
147064769240SAlex Tomas 		pagevec_release(&pvec);
147164769240SAlex Tomas 	}
1472bd2d0210STheodore Ts'o 	ext4_io_submit(&io_submit);
147364769240SAlex Tomas 	return ret;
147464769240SAlex Tomas }
147564769240SAlex Tomas 
1476c7f5938aSCurt Wohlgemuth static void ext4_da_block_invalidatepages(struct mpage_da_data *mpd)
1477c4a0c46eSAneesh Kumar K.V {
1478c4a0c46eSAneesh Kumar K.V 	int nr_pages, i;
1479c4a0c46eSAneesh Kumar K.V 	pgoff_t index, end;
1480c4a0c46eSAneesh Kumar K.V 	struct pagevec pvec;
1481c4a0c46eSAneesh Kumar K.V 	struct inode *inode = mpd->inode;
1482c4a0c46eSAneesh Kumar K.V 	struct address_space *mapping = inode->i_mapping;
1483c4a0c46eSAneesh Kumar K.V 
1484c7f5938aSCurt Wohlgemuth 	index = mpd->first_page;
1485c7f5938aSCurt Wohlgemuth 	end   = mpd->next_page - 1;
1486c4a0c46eSAneesh Kumar K.V 	while (index <= end) {
1487c4a0c46eSAneesh Kumar K.V 		nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE);
1488c4a0c46eSAneesh Kumar K.V 		if (nr_pages == 0)
1489c4a0c46eSAneesh Kumar K.V 			break;
1490c4a0c46eSAneesh Kumar K.V 		for (i = 0; i < nr_pages; i++) {
1491c4a0c46eSAneesh Kumar K.V 			struct page *page = pvec.pages[i];
14929b1d0998SJan Kara 			if (page->index > end)
1493c4a0c46eSAneesh Kumar K.V 				break;
1494c4a0c46eSAneesh Kumar K.V 			BUG_ON(!PageLocked(page));
1495c4a0c46eSAneesh Kumar K.V 			BUG_ON(PageWriteback(page));
1496c4a0c46eSAneesh Kumar K.V 			block_invalidatepage(page, 0);
1497c4a0c46eSAneesh Kumar K.V 			ClearPageUptodate(page);
1498c4a0c46eSAneesh Kumar K.V 			unlock_page(page);
1499c4a0c46eSAneesh Kumar K.V 		}
15009b1d0998SJan Kara 		index = pvec.pages[nr_pages - 1]->index + 1;
15019b1d0998SJan Kara 		pagevec_release(&pvec);
1502c4a0c46eSAneesh Kumar K.V 	}
1503c4a0c46eSAneesh Kumar K.V 	return;
1504c4a0c46eSAneesh Kumar K.V }
1505c4a0c46eSAneesh Kumar K.V 
1506df22291fSAneesh Kumar K.V static void ext4_print_free_blocks(struct inode *inode)
1507df22291fSAneesh Kumar K.V {
1508df22291fSAneesh Kumar K.V 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
150992b97816STheodore Ts'o 	struct super_block *sb = inode->i_sb;
151092b97816STheodore Ts'o 
151192b97816STheodore Ts'o 	ext4_msg(sb, KERN_CRIT, "Total free blocks count %lld",
15125dee5437STheodore Ts'o 	       EXT4_C2B(EXT4_SB(inode->i_sb),
15135dee5437STheodore Ts'o 			ext4_count_free_clusters(inode->i_sb)));
151492b97816STheodore Ts'o 	ext4_msg(sb, KERN_CRIT, "Free/Dirty block details");
151592b97816STheodore Ts'o 	ext4_msg(sb, KERN_CRIT, "free_blocks=%lld",
151657042651STheodore Ts'o 	       (long long) EXT4_C2B(EXT4_SB(inode->i_sb),
151757042651STheodore Ts'o 		percpu_counter_sum(&sbi->s_freeclusters_counter)));
151892b97816STheodore Ts'o 	ext4_msg(sb, KERN_CRIT, "dirty_blocks=%lld",
15197b415bf6SAditya Kali 	       (long long) EXT4_C2B(EXT4_SB(inode->i_sb),
15207b415bf6SAditya Kali 		percpu_counter_sum(&sbi->s_dirtyclusters_counter)));
152192b97816STheodore Ts'o 	ext4_msg(sb, KERN_CRIT, "Block reservation details");
152292b97816STheodore Ts'o 	ext4_msg(sb, KERN_CRIT, "i_reserved_data_blocks=%u",
1523df22291fSAneesh Kumar K.V 		 EXT4_I(inode)->i_reserved_data_blocks);
152492b97816STheodore Ts'o 	ext4_msg(sb, KERN_CRIT, "i_reserved_meta_blocks=%u",
1525df22291fSAneesh Kumar K.V 	       EXT4_I(inode)->i_reserved_meta_blocks);
1526df22291fSAneesh Kumar K.V 	return;
1527df22291fSAneesh Kumar K.V }
1528df22291fSAneesh Kumar K.V 
1529b920c755STheodore Ts'o /*
15305a87b7a5STheodore Ts'o  * mpage_da_map_and_submit - go through given space, map them
15315a87b7a5STheodore Ts'o  *       if necessary, and then submit them for I/O
153264769240SAlex Tomas  *
15338dc207c0STheodore Ts'o  * @mpd - bh describing space
153464769240SAlex Tomas  *
153564769240SAlex Tomas  * The function skips space we know is already mapped to disk blocks.
153664769240SAlex Tomas  *
153764769240SAlex Tomas  */
15385a87b7a5STheodore Ts'o static void mpage_da_map_and_submit(struct mpage_da_data *mpd)
153964769240SAlex Tomas {
15402ac3b6e0STheodore Ts'o 	int err, blks, get_blocks_flags;
15411de3e3dfSTheodore Ts'o 	struct ext4_map_blocks map, *mapp = NULL;
15422fa3cdfbSTheodore Ts'o 	sector_t next = mpd->b_blocknr;
15432fa3cdfbSTheodore Ts'o 	unsigned max_blocks = mpd->b_size >> mpd->inode->i_blkbits;
15442fa3cdfbSTheodore Ts'o 	loff_t disksize = EXT4_I(mpd->inode)->i_disksize;
15452fa3cdfbSTheodore Ts'o 	handle_t *handle = NULL;
154664769240SAlex Tomas 
154764769240SAlex Tomas 	/*
15485a87b7a5STheodore Ts'o 	 * If the blocks are mapped already, or we couldn't accumulate
15495a87b7a5STheodore Ts'o 	 * any blocks, then proceed immediately to the submission stage.
155064769240SAlex Tomas 	 */
15515a87b7a5STheodore Ts'o 	if ((mpd->b_size == 0) ||
15525a87b7a5STheodore Ts'o 	    ((mpd->b_state  & (1 << BH_Mapped)) &&
155329fa89d0SAneesh Kumar K.V 	     !(mpd->b_state & (1 << BH_Delay)) &&
15545a87b7a5STheodore Ts'o 	     !(mpd->b_state & (1 << BH_Unwritten))))
15555a87b7a5STheodore Ts'o 		goto submit_io;
15562fa3cdfbSTheodore Ts'o 
15572fa3cdfbSTheodore Ts'o 	handle = ext4_journal_current_handle();
15582fa3cdfbSTheodore Ts'o 	BUG_ON(!handle);
15592fa3cdfbSTheodore Ts'o 
156079ffab34SAneesh Kumar K.V 	/*
156179e83036SEric Sandeen 	 * Call ext4_map_blocks() to allocate any delayed allocation
15622ac3b6e0STheodore Ts'o 	 * blocks, or to convert an uninitialized extent to be
15632ac3b6e0STheodore Ts'o 	 * initialized (in the case where we have written into
15642ac3b6e0STheodore Ts'o 	 * one or more preallocated blocks).
15652ac3b6e0STheodore Ts'o 	 *
15662ac3b6e0STheodore Ts'o 	 * We pass in the magic EXT4_GET_BLOCKS_DELALLOC_RESERVE to
15672ac3b6e0STheodore Ts'o 	 * indicate that we are on the delayed allocation path.  This
15682ac3b6e0STheodore Ts'o 	 * affects functions in many different parts of the allocation
15692ac3b6e0STheodore Ts'o 	 * call path.  This flag exists primarily because we don't
157079e83036SEric Sandeen 	 * want to change *many* call functions, so ext4_map_blocks()
1571f2321097STheodore Ts'o 	 * will set the EXT4_STATE_DELALLOC_RESERVED flag once the
15722ac3b6e0STheodore Ts'o 	 * inode's allocation semaphore is taken.
15732ac3b6e0STheodore Ts'o 	 *
15742ac3b6e0STheodore Ts'o 	 * If the blocks in questions were delalloc blocks, set
15752ac3b6e0STheodore Ts'o 	 * EXT4_GET_BLOCKS_DELALLOC_RESERVE so the delalloc accounting
15762ac3b6e0STheodore Ts'o 	 * variables are updated after the blocks have been allocated.
157779ffab34SAneesh Kumar K.V 	 */
15782ed88685STheodore Ts'o 	map.m_lblk = next;
15792ed88685STheodore Ts'o 	map.m_len = max_blocks;
15801296cc85SAneesh Kumar K.V 	get_blocks_flags = EXT4_GET_BLOCKS_CREATE;
1581744692dcSJiaying Zhang 	if (ext4_should_dioread_nolock(mpd->inode))
1582744692dcSJiaying Zhang 		get_blocks_flags |= EXT4_GET_BLOCKS_IO_CREATE_EXT;
15832ac3b6e0STheodore Ts'o 	if (mpd->b_state & (1 << BH_Delay))
15841296cc85SAneesh Kumar K.V 		get_blocks_flags |= EXT4_GET_BLOCKS_DELALLOC_RESERVE;
15851296cc85SAneesh Kumar K.V 
15862ed88685STheodore Ts'o 	blks = ext4_map_blocks(handle, mpd->inode, &map, get_blocks_flags);
15872fa3cdfbSTheodore Ts'o 	if (blks < 0) {
1588e3570639SEric Sandeen 		struct super_block *sb = mpd->inode->i_sb;
1589e3570639SEric Sandeen 
15902fa3cdfbSTheodore Ts'o 		err = blks;
1591ed5bde0bSTheodore Ts'o 		/*
15925a87b7a5STheodore Ts'o 		 * If get block returns EAGAIN or ENOSPC and there
159397498956STheodore Ts'o 		 * appears to be free blocks we will just let
159497498956STheodore Ts'o 		 * mpage_da_submit_io() unlock all of the pages.
1595c4a0c46eSAneesh Kumar K.V 		 */
1596c4a0c46eSAneesh Kumar K.V 		if (err == -EAGAIN)
15975a87b7a5STheodore Ts'o 			goto submit_io;
1598df22291fSAneesh Kumar K.V 
15995dee5437STheodore Ts'o 		if (err == -ENOSPC && ext4_count_free_clusters(sb)) {
1600df22291fSAneesh Kumar K.V 			mpd->retval = err;
16015a87b7a5STheodore Ts'o 			goto submit_io;
1602df22291fSAneesh Kumar K.V 		}
1603df22291fSAneesh Kumar K.V 
1604c4a0c46eSAneesh Kumar K.V 		/*
1605ed5bde0bSTheodore Ts'o 		 * get block failure will cause us to loop in
1606ed5bde0bSTheodore Ts'o 		 * writepages, because a_ops->writepage won't be able
1607ed5bde0bSTheodore Ts'o 		 * to make progress. The page will be redirtied by
1608ed5bde0bSTheodore Ts'o 		 * writepage and writepages will again try to write
1609ed5bde0bSTheodore Ts'o 		 * the same.
1610c4a0c46eSAneesh Kumar K.V 		 */
1611e3570639SEric Sandeen 		if (!(EXT4_SB(sb)->s_mount_flags & EXT4_MF_FS_ABORTED)) {
1612e3570639SEric Sandeen 			ext4_msg(sb, KERN_CRIT,
1613e3570639SEric Sandeen 				 "delayed block allocation failed for inode %lu "
1614e3570639SEric Sandeen 				 "at logical offset %llu with max blocks %zd "
1615e3570639SEric Sandeen 				 "with error %d", mpd->inode->i_ino,
1616c4a0c46eSAneesh Kumar K.V 				 (unsigned long long) next,
16178dc207c0STheodore Ts'o 				 mpd->b_size >> mpd->inode->i_blkbits, err);
1618e3570639SEric Sandeen 			ext4_msg(sb, KERN_CRIT,
1619e3570639SEric Sandeen 				"This should not happen!! Data will be lost\n");
1620e3570639SEric Sandeen 			if (err == -ENOSPC)
1621df22291fSAneesh Kumar K.V 				ext4_print_free_blocks(mpd->inode);
1622030ba6bcSAneesh Kumar K.V 		}
16232fa3cdfbSTheodore Ts'o 		/* invalidate all the pages */
1624c7f5938aSCurt Wohlgemuth 		ext4_da_block_invalidatepages(mpd);
1625e0fd9b90SCurt Wohlgemuth 
1626e0fd9b90SCurt Wohlgemuth 		/* Mark this page range as having been completed */
1627e0fd9b90SCurt Wohlgemuth 		mpd->io_done = 1;
16285a87b7a5STheodore Ts'o 		return;
1629c4a0c46eSAneesh Kumar K.V 	}
16302fa3cdfbSTheodore Ts'o 	BUG_ON(blks == 0);
16312fa3cdfbSTheodore Ts'o 
16321de3e3dfSTheodore Ts'o 	mapp = &map;
16332ed88685STheodore Ts'o 	if (map.m_flags & EXT4_MAP_NEW) {
16342ed88685STheodore Ts'o 		struct block_device *bdev = mpd->inode->i_sb->s_bdev;
16352ed88685STheodore Ts'o 		int i;
163664769240SAlex Tomas 
16372ed88685STheodore Ts'o 		for (i = 0; i < map.m_len; i++)
16382ed88685STheodore Ts'o 			unmap_underlying_metadata(bdev, map.m_pblk + i);
163964769240SAlex Tomas 
16402fa3cdfbSTheodore Ts'o 		if (ext4_should_order_data(mpd->inode)) {
16412fa3cdfbSTheodore Ts'o 			err = ext4_jbd2_file_inode(handle, mpd->inode);
16428de49e67SKazuya Mio 			if (err) {
1643decbd919STheodore Ts'o 				/* Only if the journal is aborted */
16448de49e67SKazuya Mio 				mpd->retval = err;
16458de49e67SKazuya Mio 				goto submit_io;
16468de49e67SKazuya Mio 			}
16472fa3cdfbSTheodore Ts'o 		}
16482fa3cdfbSTheodore Ts'o 	}
16492fa3cdfbSTheodore Ts'o 
16502fa3cdfbSTheodore Ts'o 	/*
165103f5d8bcSJan Kara 	 * Update on-disk size along with block allocation.
16522fa3cdfbSTheodore Ts'o 	 */
16532fa3cdfbSTheodore Ts'o 	disksize = ((loff_t) next + blks) << mpd->inode->i_blkbits;
16542fa3cdfbSTheodore Ts'o 	if (disksize > i_size_read(mpd->inode))
16552fa3cdfbSTheodore Ts'o 		disksize = i_size_read(mpd->inode);
16562fa3cdfbSTheodore Ts'o 	if (disksize > EXT4_I(mpd->inode)->i_disksize) {
16572fa3cdfbSTheodore Ts'o 		ext4_update_i_disksize(mpd->inode, disksize);
16585a87b7a5STheodore Ts'o 		err = ext4_mark_inode_dirty(handle, mpd->inode);
16595a87b7a5STheodore Ts'o 		if (err)
16605a87b7a5STheodore Ts'o 			ext4_error(mpd->inode->i_sb,
16615a87b7a5STheodore Ts'o 				   "Failed to mark inode %lu dirty",
16625a87b7a5STheodore Ts'o 				   mpd->inode->i_ino);
16632fa3cdfbSTheodore Ts'o 	}
16642fa3cdfbSTheodore Ts'o 
16655a87b7a5STheodore Ts'o submit_io:
16661de3e3dfSTheodore Ts'o 	mpage_da_submit_io(mpd, mapp);
16675a87b7a5STheodore Ts'o 	mpd->io_done = 1;
166864769240SAlex Tomas }
166964769240SAlex Tomas 
1670bf068ee2SAneesh Kumar K.V #define BH_FLAGS ((1 << BH_Uptodate) | (1 << BH_Mapped) | \
1671bf068ee2SAneesh Kumar K.V 		(1 << BH_Delay) | (1 << BH_Unwritten))
167264769240SAlex Tomas 
167364769240SAlex Tomas /*
167464769240SAlex Tomas  * mpage_add_bh_to_extent - try to add one more block to extent of blocks
167564769240SAlex Tomas  *
167664769240SAlex Tomas  * @mpd->lbh - extent of blocks
167764769240SAlex Tomas  * @logical - logical number of the block in the file
167864769240SAlex Tomas  * @bh - bh of the block (used to access block's state)
167964769240SAlex Tomas  *
168064769240SAlex Tomas  * the function is used to collect contig. blocks in same state
168164769240SAlex Tomas  */
168264769240SAlex Tomas static void mpage_add_bh_to_extent(struct mpage_da_data *mpd,
16838dc207c0STheodore Ts'o 				   sector_t logical, size_t b_size,
16848dc207c0STheodore Ts'o 				   unsigned long b_state)
168564769240SAlex Tomas {
168664769240SAlex Tomas 	sector_t next;
16878dc207c0STheodore Ts'o 	int nrblocks = mpd->b_size >> mpd->inode->i_blkbits;
168864769240SAlex Tomas 
1689c445e3e0SEric Sandeen 	/*
1690c445e3e0SEric Sandeen 	 * XXX Don't go larger than mballoc is willing to allocate
1691c445e3e0SEric Sandeen 	 * This is a stopgap solution.  We eventually need to fold
1692c445e3e0SEric Sandeen 	 * mpage_da_submit_io() into this function and then call
169379e83036SEric Sandeen 	 * ext4_map_blocks() multiple times in a loop
1694c445e3e0SEric Sandeen 	 */
1695c445e3e0SEric Sandeen 	if (nrblocks >= 8*1024*1024/mpd->inode->i_sb->s_blocksize)
1696c445e3e0SEric Sandeen 		goto flush_it;
1697c445e3e0SEric Sandeen 
1698525f4ed8SMingming Cao 	/* check if thereserved journal credits might overflow */
169912e9b892SDmitry Monakhov 	if (!(ext4_test_inode_flag(mpd->inode, EXT4_INODE_EXTENTS))) {
1700525f4ed8SMingming Cao 		if (nrblocks >= EXT4_MAX_TRANS_DATA) {
1701525f4ed8SMingming Cao 			/*
1702525f4ed8SMingming Cao 			 * With non-extent format we are limited by the journal
1703525f4ed8SMingming Cao 			 * credit available.  Total credit needed to insert
1704525f4ed8SMingming Cao 			 * nrblocks contiguous blocks is dependent on the
1705525f4ed8SMingming Cao 			 * nrblocks.  So limit nrblocks.
1706525f4ed8SMingming Cao 			 */
1707525f4ed8SMingming Cao 			goto flush_it;
1708525f4ed8SMingming Cao 		} else if ((nrblocks + (b_size >> mpd->inode->i_blkbits)) >
1709525f4ed8SMingming Cao 				EXT4_MAX_TRANS_DATA) {
1710525f4ed8SMingming Cao 			/*
1711525f4ed8SMingming Cao 			 * Adding the new buffer_head would make it cross the
1712525f4ed8SMingming Cao 			 * allowed limit for which we have journal credit
1713525f4ed8SMingming Cao 			 * reserved. So limit the new bh->b_size
1714525f4ed8SMingming Cao 			 */
1715525f4ed8SMingming Cao 			b_size = (EXT4_MAX_TRANS_DATA - nrblocks) <<
1716525f4ed8SMingming Cao 						mpd->inode->i_blkbits;
1717525f4ed8SMingming Cao 			/* we will do mpage_da_submit_io in the next loop */
1718525f4ed8SMingming Cao 		}
1719525f4ed8SMingming Cao 	}
172064769240SAlex Tomas 	/*
172164769240SAlex Tomas 	 * First block in the extent
172264769240SAlex Tomas 	 */
17238dc207c0STheodore Ts'o 	if (mpd->b_size == 0) {
17248dc207c0STheodore Ts'o 		mpd->b_blocknr = logical;
17258dc207c0STheodore Ts'o 		mpd->b_size = b_size;
17268dc207c0STheodore Ts'o 		mpd->b_state = b_state & BH_FLAGS;
172764769240SAlex Tomas 		return;
172864769240SAlex Tomas 	}
172964769240SAlex Tomas 
17308dc207c0STheodore Ts'o 	next = mpd->b_blocknr + nrblocks;
173164769240SAlex Tomas 	/*
173264769240SAlex Tomas 	 * Can we merge the block to our big extent?
173364769240SAlex Tomas 	 */
17348dc207c0STheodore Ts'o 	if (logical == next && (b_state & BH_FLAGS) == mpd->b_state) {
17358dc207c0STheodore Ts'o 		mpd->b_size += b_size;
173664769240SAlex Tomas 		return;
173764769240SAlex Tomas 	}
173864769240SAlex Tomas 
1739525f4ed8SMingming Cao flush_it:
174064769240SAlex Tomas 	/*
174164769240SAlex Tomas 	 * We couldn't merge the block to our extent, so we
174264769240SAlex Tomas 	 * need to flush current  extent and start new one
174364769240SAlex Tomas 	 */
17445a87b7a5STheodore Ts'o 	mpage_da_map_and_submit(mpd);
1745a1d6cc56SAneesh Kumar K.V 	return;
174664769240SAlex Tomas }
174764769240SAlex Tomas 
1748c364b22cSAneesh Kumar K.V static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh)
174929fa89d0SAneesh Kumar K.V {
1750c364b22cSAneesh Kumar K.V 	return (buffer_delay(bh) || buffer_unwritten(bh)) && buffer_dirty(bh);
175129fa89d0SAneesh Kumar K.V }
175229fa89d0SAneesh Kumar K.V 
175364769240SAlex Tomas /*
17545356f261SAditya Kali  * This function is grabs code from the very beginning of
17555356f261SAditya Kali  * ext4_map_blocks, but assumes that the caller is from delayed write
17565356f261SAditya Kali  * time. This function looks up the requested blocks and sets the
17575356f261SAditya Kali  * buffer delay bit under the protection of i_data_sem.
17585356f261SAditya Kali  */
17595356f261SAditya Kali static int ext4_da_map_blocks(struct inode *inode, sector_t iblock,
17605356f261SAditya Kali 			      struct ext4_map_blocks *map,
17615356f261SAditya Kali 			      struct buffer_head *bh)
17625356f261SAditya Kali {
17635356f261SAditya Kali 	int retval;
17645356f261SAditya Kali 	sector_t invalid_block = ~((sector_t) 0xffff);
17655356f261SAditya Kali 
17665356f261SAditya Kali 	if (invalid_block < ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es))
17675356f261SAditya Kali 		invalid_block = ~0;
17685356f261SAditya Kali 
17695356f261SAditya Kali 	map->m_flags = 0;
17705356f261SAditya Kali 	ext_debug("ext4_da_map_blocks(): inode %lu, max_blocks %u,"
17715356f261SAditya Kali 		  "logical block %lu\n", inode->i_ino, map->m_len,
17725356f261SAditya Kali 		  (unsigned long) map->m_lblk);
17735356f261SAditya Kali 	/*
17745356f261SAditya Kali 	 * Try to see if we can get the block without requesting a new
17755356f261SAditya Kali 	 * file system block.
17765356f261SAditya Kali 	 */
17775356f261SAditya Kali 	down_read((&EXT4_I(inode)->i_data_sem));
17785356f261SAditya Kali 	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
17795356f261SAditya Kali 		retval = ext4_ext_map_blocks(NULL, inode, map, 0);
17805356f261SAditya Kali 	else
17815356f261SAditya Kali 		retval = ext4_ind_map_blocks(NULL, inode, map, 0);
17825356f261SAditya Kali 
17835356f261SAditya Kali 	if (retval == 0) {
17845356f261SAditya Kali 		/*
17855356f261SAditya Kali 		 * XXX: __block_prepare_write() unmaps passed block,
17865356f261SAditya Kali 		 * is it OK?
17875356f261SAditya Kali 		 */
17885356f261SAditya Kali 		/* If the block was allocated from previously allocated cluster,
17895356f261SAditya Kali 		 * then we dont need to reserve it again. */
17905356f261SAditya Kali 		if (!(map->m_flags & EXT4_MAP_FROM_CLUSTER)) {
17915356f261SAditya Kali 			retval = ext4_da_reserve_space(inode, iblock);
17925356f261SAditya Kali 			if (retval)
17935356f261SAditya Kali 				/* not enough space to reserve */
17945356f261SAditya Kali 				goto out_unlock;
17955356f261SAditya Kali 		}
17965356f261SAditya Kali 
17975356f261SAditya Kali 		/* Clear EXT4_MAP_FROM_CLUSTER flag since its purpose is served
17985356f261SAditya Kali 		 * and it should not appear on the bh->b_state.
17995356f261SAditya Kali 		 */
18005356f261SAditya Kali 		map->m_flags &= ~EXT4_MAP_FROM_CLUSTER;
18015356f261SAditya Kali 
18025356f261SAditya Kali 		map_bh(bh, inode->i_sb, invalid_block);
18035356f261SAditya Kali 		set_buffer_new(bh);
18045356f261SAditya Kali 		set_buffer_delay(bh);
18055356f261SAditya Kali 	}
18065356f261SAditya Kali 
18075356f261SAditya Kali out_unlock:
18085356f261SAditya Kali 	up_read((&EXT4_I(inode)->i_data_sem));
18095356f261SAditya Kali 
18105356f261SAditya Kali 	return retval;
18115356f261SAditya Kali }
18125356f261SAditya Kali 
18135356f261SAditya Kali /*
1814b920c755STheodore Ts'o  * This is a special get_blocks_t callback which is used by
1815b920c755STheodore Ts'o  * ext4_da_write_begin().  It will either return mapped block or
1816b920c755STheodore Ts'o  * reserve space for a single block.
181729fa89d0SAneesh Kumar K.V  *
181829fa89d0SAneesh Kumar K.V  * For delayed buffer_head we have BH_Mapped, BH_New, BH_Delay set.
181929fa89d0SAneesh Kumar K.V  * We also have b_blocknr = -1 and b_bdev initialized properly
182029fa89d0SAneesh Kumar K.V  *
182129fa89d0SAneesh Kumar K.V  * For unwritten buffer_head we have BH_Mapped, BH_New, BH_Unwritten set.
182229fa89d0SAneesh Kumar K.V  * We also have b_blocknr = physicalblock mapping unwritten extent and b_bdev
182329fa89d0SAneesh Kumar K.V  * initialized properly.
182464769240SAlex Tomas  */
182564769240SAlex Tomas static int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
18262ed88685STheodore Ts'o 				  struct buffer_head *bh, int create)
182764769240SAlex Tomas {
18282ed88685STheodore Ts'o 	struct ext4_map_blocks map;
182964769240SAlex Tomas 	int ret = 0;
183064769240SAlex Tomas 
183164769240SAlex Tomas 	BUG_ON(create == 0);
18322ed88685STheodore Ts'o 	BUG_ON(bh->b_size != inode->i_sb->s_blocksize);
18332ed88685STheodore Ts'o 
18342ed88685STheodore Ts'o 	map.m_lblk = iblock;
18352ed88685STheodore Ts'o 	map.m_len = 1;
183664769240SAlex Tomas 
183764769240SAlex Tomas 	/*
183864769240SAlex Tomas 	 * first, we need to know whether the block is allocated already
183964769240SAlex Tomas 	 * preallocated blocks are unmapped but should treated
184064769240SAlex Tomas 	 * the same as allocated blocks.
184164769240SAlex Tomas 	 */
18425356f261SAditya Kali 	ret = ext4_da_map_blocks(inode, iblock, &map, bh);
18435356f261SAditya Kali 	if (ret <= 0)
18442ed88685STheodore Ts'o 		return ret;
184564769240SAlex Tomas 
18462ed88685STheodore Ts'o 	map_bh(bh, inode->i_sb, map.m_pblk);
18472ed88685STheodore Ts'o 	bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags;
18482ed88685STheodore Ts'o 
18492ed88685STheodore Ts'o 	if (buffer_unwritten(bh)) {
18502ed88685STheodore Ts'o 		/* A delayed write to unwritten bh should be marked
18512ed88685STheodore Ts'o 		 * new and mapped.  Mapped ensures that we don't do
18522ed88685STheodore Ts'o 		 * get_block multiple times when we write to the same
18532ed88685STheodore Ts'o 		 * offset and new ensures that we do proper zero out
18542ed88685STheodore Ts'o 		 * for partial write.
18552ed88685STheodore Ts'o 		 */
18562ed88685STheodore Ts'o 		set_buffer_new(bh);
1857c8205636STheodore Ts'o 		set_buffer_mapped(bh);
18582ed88685STheodore Ts'o 	}
18592ed88685STheodore Ts'o 	return 0;
186064769240SAlex Tomas }
186161628a3fSMingming Cao 
1862b920c755STheodore Ts'o /*
1863b920c755STheodore Ts'o  * This function is used as a standard get_block_t calback function
1864b920c755STheodore Ts'o  * when there is no desire to allocate any blocks.  It is used as a
1865ebdec241SChristoph Hellwig  * callback function for block_write_begin() and block_write_full_page().
1866206f7ab4SChristoph Hellwig  * These functions should only try to map a single block at a time.
1867b920c755STheodore Ts'o  *
1868b920c755STheodore Ts'o  * Since this function doesn't do block allocations even if the caller
1869b920c755STheodore Ts'o  * requests it by passing in create=1, it is critically important that
1870b920c755STheodore Ts'o  * any caller checks to make sure that any buffer heads are returned
1871b920c755STheodore Ts'o  * by this function are either all already mapped or marked for
1872206f7ab4SChristoph Hellwig  * delayed allocation before calling  block_write_full_page().  Otherwise,
1873206f7ab4SChristoph Hellwig  * b_blocknr could be left unitialized, and the page write functions will
1874206f7ab4SChristoph Hellwig  * be taken by surprise.
1875b920c755STheodore Ts'o  */
1876b920c755STheodore Ts'o static int noalloc_get_block_write(struct inode *inode, sector_t iblock,
1877f0e6c985SAneesh Kumar K.V 				   struct buffer_head *bh_result, int create)
1878f0e6c985SAneesh Kumar K.V {
1879a2dc52b5STheodore Ts'o 	BUG_ON(bh_result->b_size != inode->i_sb->s_blocksize);
18802ed88685STheodore Ts'o 	return _ext4_get_block(inode, iblock, bh_result, 0);
188161628a3fSMingming Cao }
188261628a3fSMingming Cao 
188362e086beSAneesh Kumar K.V static int bget_one(handle_t *handle, struct buffer_head *bh)
188462e086beSAneesh Kumar K.V {
188562e086beSAneesh Kumar K.V 	get_bh(bh);
188662e086beSAneesh Kumar K.V 	return 0;
188762e086beSAneesh Kumar K.V }
188862e086beSAneesh Kumar K.V 
188962e086beSAneesh Kumar K.V static int bput_one(handle_t *handle, struct buffer_head *bh)
189062e086beSAneesh Kumar K.V {
189162e086beSAneesh Kumar K.V 	put_bh(bh);
189262e086beSAneesh Kumar K.V 	return 0;
189362e086beSAneesh Kumar K.V }
189462e086beSAneesh Kumar K.V 
189562e086beSAneesh Kumar K.V static int __ext4_journalled_writepage(struct page *page,
189662e086beSAneesh Kumar K.V 				       unsigned int len)
189762e086beSAneesh Kumar K.V {
189862e086beSAneesh Kumar K.V 	struct address_space *mapping = page->mapping;
189962e086beSAneesh Kumar K.V 	struct inode *inode = mapping->host;
190062e086beSAneesh Kumar K.V 	struct buffer_head *page_bufs;
190162e086beSAneesh Kumar K.V 	handle_t *handle = NULL;
190262e086beSAneesh Kumar K.V 	int ret = 0;
190362e086beSAneesh Kumar K.V 	int err;
190462e086beSAneesh Kumar K.V 
1905cb20d518STheodore Ts'o 	ClearPageChecked(page);
190662e086beSAneesh Kumar K.V 	page_bufs = page_buffers(page);
190762e086beSAneesh Kumar K.V 	BUG_ON(!page_bufs);
190862e086beSAneesh Kumar K.V 	walk_page_buffers(handle, page_bufs, 0, len, NULL, bget_one);
190962e086beSAneesh Kumar K.V 	/* As soon as we unlock the page, it can go away, but we have
191062e086beSAneesh Kumar K.V 	 * references to buffers so we are safe */
191162e086beSAneesh Kumar K.V 	unlock_page(page);
191262e086beSAneesh Kumar K.V 
191362e086beSAneesh Kumar K.V 	handle = ext4_journal_start(inode, ext4_writepage_trans_blocks(inode));
191462e086beSAneesh Kumar K.V 	if (IS_ERR(handle)) {
191562e086beSAneesh Kumar K.V 		ret = PTR_ERR(handle);
191662e086beSAneesh Kumar K.V 		goto out;
191762e086beSAneesh Kumar K.V 	}
191862e086beSAneesh Kumar K.V 
1919441c8508SCurt Wohlgemuth 	BUG_ON(!ext4_handle_valid(handle));
1920441c8508SCurt Wohlgemuth 
192162e086beSAneesh Kumar K.V 	ret = walk_page_buffers(handle, page_bufs, 0, len, NULL,
192262e086beSAneesh Kumar K.V 				do_journal_get_write_access);
192362e086beSAneesh Kumar K.V 
192462e086beSAneesh Kumar K.V 	err = walk_page_buffers(handle, page_bufs, 0, len, NULL,
192562e086beSAneesh Kumar K.V 				write_end_fn);
192662e086beSAneesh Kumar K.V 	if (ret == 0)
192762e086beSAneesh Kumar K.V 		ret = err;
19282d859db3SJan Kara 	EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid;
192962e086beSAneesh Kumar K.V 	err = ext4_journal_stop(handle);
193062e086beSAneesh Kumar K.V 	if (!ret)
193162e086beSAneesh Kumar K.V 		ret = err;
193262e086beSAneesh Kumar K.V 
193362e086beSAneesh Kumar K.V 	walk_page_buffers(handle, page_bufs, 0, len, NULL, bput_one);
193419f5fb7aSTheodore Ts'o 	ext4_set_inode_state(inode, EXT4_STATE_JDATA);
193562e086beSAneesh Kumar K.V out:
193662e086beSAneesh Kumar K.V 	return ret;
193762e086beSAneesh Kumar K.V }
193862e086beSAneesh Kumar K.V 
1939744692dcSJiaying Zhang static int ext4_set_bh_endio(struct buffer_head *bh, struct inode *inode);
1940744692dcSJiaying Zhang static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate);
1941744692dcSJiaying Zhang 
194261628a3fSMingming Cao /*
194343ce1d23SAneesh Kumar K.V  * Note that we don't need to start a transaction unless we're journaling data
194443ce1d23SAneesh Kumar K.V  * because we should have holes filled from ext4_page_mkwrite(). We even don't
194543ce1d23SAneesh Kumar K.V  * need to file the inode to the transaction's list in ordered mode because if
194643ce1d23SAneesh Kumar K.V  * we are writing back data added by write(), the inode is already there and if
194743ce1d23SAneesh Kumar K.V  * we are writing back data modified via mmap(), no one guarantees in which
194843ce1d23SAneesh Kumar K.V  * transaction the data will hit the disk. In case we are journaling data, we
194943ce1d23SAneesh Kumar K.V  * cannot start transaction directly because transaction start ranks above page
195043ce1d23SAneesh Kumar K.V  * lock so we have to do some magic.
195143ce1d23SAneesh Kumar K.V  *
1952b920c755STheodore Ts'o  * This function can get called via...
1953b920c755STheodore Ts'o  *   - ext4_da_writepages after taking page lock (have journal handle)
1954b920c755STheodore Ts'o  *   - journal_submit_inode_data_buffers (no journal handle)
1955b920c755STheodore Ts'o  *   - shrink_page_list via pdflush (no journal handle)
1956b920c755STheodore Ts'o  *   - grab_page_cache when doing write_begin (have journal handle)
195743ce1d23SAneesh Kumar K.V  *
195843ce1d23SAneesh Kumar K.V  * We don't do any block allocation in this function. If we have page with
195943ce1d23SAneesh Kumar K.V  * multiple blocks we need to write those buffer_heads that are mapped. This
196043ce1d23SAneesh Kumar K.V  * is important for mmaped based write. So if we do with blocksize 1K
196143ce1d23SAneesh Kumar K.V  * truncate(f, 1024);
196243ce1d23SAneesh Kumar K.V  * a = mmap(f, 0, 4096);
196343ce1d23SAneesh Kumar K.V  * a[0] = 'a';
196443ce1d23SAneesh Kumar K.V  * truncate(f, 4096);
196543ce1d23SAneesh Kumar K.V  * we have in the page first buffer_head mapped via page_mkwrite call back
196690802ed9SPaul Bolle  * but other buffer_heads would be unmapped but dirty (dirty done via the
196743ce1d23SAneesh Kumar K.V  * do_wp_page). So writepage should write the first block. If we modify
196843ce1d23SAneesh Kumar K.V  * the mmap area beyond 1024 we will again get a page_fault and the
196943ce1d23SAneesh Kumar K.V  * page_mkwrite callback will do the block allocation and mark the
197043ce1d23SAneesh Kumar K.V  * buffer_heads mapped.
197143ce1d23SAneesh Kumar K.V  *
197243ce1d23SAneesh Kumar K.V  * We redirty the page if we have any buffer_heads that is either delay or
197343ce1d23SAneesh Kumar K.V  * unwritten in the page.
197443ce1d23SAneesh Kumar K.V  *
197543ce1d23SAneesh Kumar K.V  * We can get recursively called as show below.
197643ce1d23SAneesh Kumar K.V  *
197743ce1d23SAneesh Kumar K.V  *	ext4_writepage() -> kmalloc() -> __alloc_pages() -> page_launder() ->
197843ce1d23SAneesh Kumar K.V  *		ext4_writepage()
197943ce1d23SAneesh Kumar K.V  *
198043ce1d23SAneesh Kumar K.V  * But since we don't do any block allocation we should not deadlock.
198143ce1d23SAneesh Kumar K.V  * Page also have the dirty flag cleared so we don't get recurive page_lock.
198261628a3fSMingming Cao  */
198343ce1d23SAneesh Kumar K.V static int ext4_writepage(struct page *page,
198464769240SAlex Tomas 			  struct writeback_control *wbc)
198564769240SAlex Tomas {
1986a42afc5fSTheodore Ts'o 	int ret = 0, commit_write = 0;
198761628a3fSMingming Cao 	loff_t size;
1988498e5f24STheodore Ts'o 	unsigned int len;
1989744692dcSJiaying Zhang 	struct buffer_head *page_bufs = NULL;
199061628a3fSMingming Cao 	struct inode *inode = page->mapping->host;
199164769240SAlex Tomas 
1992a9c667f8SLukas Czerner 	trace_ext4_writepage(page);
199361628a3fSMingming Cao 	size = i_size_read(inode);
199461628a3fSMingming Cao 	if (page->index == size >> PAGE_CACHE_SHIFT)
199561628a3fSMingming Cao 		len = size & ~PAGE_CACHE_MASK;
199661628a3fSMingming Cao 	else
199761628a3fSMingming Cao 		len = PAGE_CACHE_SIZE;
199861628a3fSMingming Cao 
1999a42afc5fSTheodore Ts'o 	/*
2000a42afc5fSTheodore Ts'o 	 * If the page does not have buffers (for whatever reason),
2001a107e5a3STheodore Ts'o 	 * try to create them using __block_write_begin.  If this
2002a42afc5fSTheodore Ts'o 	 * fails, redirty the page and move on.
2003a42afc5fSTheodore Ts'o 	 */
2004b1142e8fSTheodore Ts'o 	if (!page_has_buffers(page)) {
2005a107e5a3STheodore Ts'o 		if (__block_write_begin(page, 0, len,
2006a42afc5fSTheodore Ts'o 					noalloc_get_block_write)) {
2007a42afc5fSTheodore Ts'o 		redirty_page:
2008a42afc5fSTheodore Ts'o 			redirty_page_for_writepage(wbc, page);
2009a42afc5fSTheodore Ts'o 			unlock_page(page);
2010a42afc5fSTheodore Ts'o 			return 0;
2011a42afc5fSTheodore Ts'o 		}
2012a42afc5fSTheodore Ts'o 		commit_write = 1;
2013a42afc5fSTheodore Ts'o 	}
2014f0e6c985SAneesh Kumar K.V 	page_bufs = page_buffers(page);
2015f0e6c985SAneesh Kumar K.V 	if (walk_page_buffers(NULL, page_bufs, 0, len, NULL,
2016c364b22cSAneesh Kumar K.V 			      ext4_bh_delay_or_unwritten)) {
201761628a3fSMingming Cao 		/*
2018b1142e8fSTheodore Ts'o 		 * We don't want to do block allocation, so redirty
2019b1142e8fSTheodore Ts'o 		 * the page and return.  We may reach here when we do
2020b1142e8fSTheodore Ts'o 		 * a journal commit via journal_submit_inode_data_buffers.
2021966dbde2SMel Gorman 		 * We can also reach here via shrink_page_list but it
2022966dbde2SMel Gorman 		 * should never be for direct reclaim so warn if that
2023966dbde2SMel Gorman 		 * happens
2024f0e6c985SAneesh Kumar K.V 		 */
2025966dbde2SMel Gorman 		WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) ==
2026966dbde2SMel Gorman 								PF_MEMALLOC);
2027a42afc5fSTheodore Ts'o 		goto redirty_page;
2028f0e6c985SAneesh Kumar K.V 	}
2029a42afc5fSTheodore Ts'o 	if (commit_write)
2030ed9b3e33SAneesh Kumar K.V 		/* now mark the buffer_heads as dirty and uptodate */
2031b767e78aSAneesh Kumar K.V 		block_commit_write(page, 0, len);
203264769240SAlex Tomas 
2033cb20d518STheodore Ts'o 	if (PageChecked(page) && ext4_should_journal_data(inode))
203443ce1d23SAneesh Kumar K.V 		/*
203543ce1d23SAneesh Kumar K.V 		 * It's mmapped pagecache.  Add buffers and journal it.  There
203643ce1d23SAneesh Kumar K.V 		 * doesn't seem much point in redirtying the page here.
203743ce1d23SAneesh Kumar K.V 		 */
20383f0ca309SWu Fengguang 		return __ext4_journalled_writepage(page, len);
203943ce1d23SAneesh Kumar K.V 
2040a42afc5fSTheodore Ts'o 	if (buffer_uninit(page_bufs)) {
2041744692dcSJiaying Zhang 		ext4_set_bh_endio(page_bufs, inode);
2042744692dcSJiaying Zhang 		ret = block_write_full_page_endio(page, noalloc_get_block_write,
2043744692dcSJiaying Zhang 					    wbc, ext4_end_io_buffer_write);
2044744692dcSJiaying Zhang 	} else
2045b920c755STheodore Ts'o 		ret = block_write_full_page(page, noalloc_get_block_write,
2046f0e6c985SAneesh Kumar K.V 					    wbc);
204764769240SAlex Tomas 
204864769240SAlex Tomas 	return ret;
204964769240SAlex Tomas }
205064769240SAlex Tomas 
205161628a3fSMingming Cao /*
2052525f4ed8SMingming Cao  * This is called via ext4_da_writepages() to
205325985edcSLucas De Marchi  * calculate the total number of credits to reserve to fit
2054525f4ed8SMingming Cao  * a single extent allocation into a single transaction,
2055525f4ed8SMingming Cao  * ext4_da_writpeages() will loop calling this before
2056525f4ed8SMingming Cao  * the block allocation.
205761628a3fSMingming Cao  */
2058525f4ed8SMingming Cao 
2059525f4ed8SMingming Cao static int ext4_da_writepages_trans_blocks(struct inode *inode)
2060525f4ed8SMingming Cao {
2061525f4ed8SMingming Cao 	int max_blocks = EXT4_I(inode)->i_reserved_data_blocks;
2062525f4ed8SMingming Cao 
2063525f4ed8SMingming Cao 	/*
2064525f4ed8SMingming Cao 	 * With non-extent format the journal credit needed to
2065525f4ed8SMingming Cao 	 * insert nrblocks contiguous block is dependent on
2066525f4ed8SMingming Cao 	 * number of contiguous block. So we will limit
2067525f4ed8SMingming Cao 	 * number of contiguous block to a sane value
2068525f4ed8SMingming Cao 	 */
206912e9b892SDmitry Monakhov 	if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) &&
2070525f4ed8SMingming Cao 	    (max_blocks > EXT4_MAX_TRANS_DATA))
2071525f4ed8SMingming Cao 		max_blocks = EXT4_MAX_TRANS_DATA;
2072525f4ed8SMingming Cao 
2073525f4ed8SMingming Cao 	return ext4_chunk_trans_blocks(inode, max_blocks);
2074525f4ed8SMingming Cao }
207561628a3fSMingming Cao 
20768e48dcfbSTheodore Ts'o /*
20778e48dcfbSTheodore Ts'o  * write_cache_pages_da - walk the list of dirty pages of the given
20788eb9e5ceSTheodore Ts'o  * address space and accumulate pages that need writing, and call
2079168fc022STheodore Ts'o  * mpage_da_map_and_submit to map a single contiguous memory region
2080168fc022STheodore Ts'o  * and then write them.
20818e48dcfbSTheodore Ts'o  */
20828e48dcfbSTheodore Ts'o static int write_cache_pages_da(struct address_space *mapping,
20838e48dcfbSTheodore Ts'o 				struct writeback_control *wbc,
208472f84e65SEric Sandeen 				struct mpage_da_data *mpd,
208572f84e65SEric Sandeen 				pgoff_t *done_index)
20868e48dcfbSTheodore Ts'o {
20878eb9e5ceSTheodore Ts'o 	struct buffer_head	*bh, *head;
2088168fc022STheodore Ts'o 	struct inode		*inode = mapping->host;
20898e48dcfbSTheodore Ts'o 	struct pagevec		pvec;
20904f01b02cSTheodore Ts'o 	unsigned int		nr_pages;
20914f01b02cSTheodore Ts'o 	sector_t		logical;
20924f01b02cSTheodore Ts'o 	pgoff_t			index, end;
20938e48dcfbSTheodore Ts'o 	long			nr_to_write = wbc->nr_to_write;
20944f01b02cSTheodore Ts'o 	int			i, tag, ret = 0;
20958e48dcfbSTheodore Ts'o 
2096168fc022STheodore Ts'o 	memset(mpd, 0, sizeof(struct mpage_da_data));
2097168fc022STheodore Ts'o 	mpd->wbc = wbc;
2098168fc022STheodore Ts'o 	mpd->inode = inode;
20998e48dcfbSTheodore Ts'o 	pagevec_init(&pvec, 0);
21008e48dcfbSTheodore Ts'o 	index = wbc->range_start >> PAGE_CACHE_SHIFT;
21018e48dcfbSTheodore Ts'o 	end = wbc->range_end >> PAGE_CACHE_SHIFT;
21028e48dcfbSTheodore Ts'o 
21036e6938b6SWu Fengguang 	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
21045b41d924SEric Sandeen 		tag = PAGECACHE_TAG_TOWRITE;
21055b41d924SEric Sandeen 	else
21065b41d924SEric Sandeen 		tag = PAGECACHE_TAG_DIRTY;
21075b41d924SEric Sandeen 
210872f84e65SEric Sandeen 	*done_index = index;
21094f01b02cSTheodore Ts'o 	while (index <= end) {
21105b41d924SEric Sandeen 		nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
21118e48dcfbSTheodore Ts'o 			      min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
21128e48dcfbSTheodore Ts'o 		if (nr_pages == 0)
21134f01b02cSTheodore Ts'o 			return 0;
21148e48dcfbSTheodore Ts'o 
21158e48dcfbSTheodore Ts'o 		for (i = 0; i < nr_pages; i++) {
21168e48dcfbSTheodore Ts'o 			struct page *page = pvec.pages[i];
21178e48dcfbSTheodore Ts'o 
21188e48dcfbSTheodore Ts'o 			/*
21198e48dcfbSTheodore Ts'o 			 * At this point, the page may be truncated or
21208e48dcfbSTheodore Ts'o 			 * invalidated (changing page->mapping to NULL), or
21218e48dcfbSTheodore Ts'o 			 * even swizzled back from swapper_space to tmpfs file
21228e48dcfbSTheodore Ts'o 			 * mapping. However, page->index will not change
21238e48dcfbSTheodore Ts'o 			 * because we have a reference on the page.
21248e48dcfbSTheodore Ts'o 			 */
21254f01b02cSTheodore Ts'o 			if (page->index > end)
21264f01b02cSTheodore Ts'o 				goto out;
21278e48dcfbSTheodore Ts'o 
212872f84e65SEric Sandeen 			*done_index = page->index + 1;
212972f84e65SEric Sandeen 
213078aaced3STheodore Ts'o 			/*
213178aaced3STheodore Ts'o 			 * If we can't merge this page, and we have
213278aaced3STheodore Ts'o 			 * accumulated an contiguous region, write it
213378aaced3STheodore Ts'o 			 */
213478aaced3STheodore Ts'o 			if ((mpd->next_page != page->index) &&
213578aaced3STheodore Ts'o 			    (mpd->next_page != mpd->first_page)) {
213678aaced3STheodore Ts'o 				mpage_da_map_and_submit(mpd);
213778aaced3STheodore Ts'o 				goto ret_extent_tail;
213878aaced3STheodore Ts'o 			}
213978aaced3STheodore Ts'o 
21408e48dcfbSTheodore Ts'o 			lock_page(page);
21418e48dcfbSTheodore Ts'o 
21428e48dcfbSTheodore Ts'o 			/*
21434f01b02cSTheodore Ts'o 			 * If the page is no longer dirty, or its
21444f01b02cSTheodore Ts'o 			 * mapping no longer corresponds to inode we
21454f01b02cSTheodore Ts'o 			 * are writing (which means it has been
21464f01b02cSTheodore Ts'o 			 * truncated or invalidated), or the page is
21474f01b02cSTheodore Ts'o 			 * already under writeback and we are not
21484f01b02cSTheodore Ts'o 			 * doing a data integrity writeback, skip the page
21498e48dcfbSTheodore Ts'o 			 */
21504f01b02cSTheodore Ts'o 			if (!PageDirty(page) ||
21514f01b02cSTheodore Ts'o 			    (PageWriteback(page) &&
21524f01b02cSTheodore Ts'o 			     (wbc->sync_mode == WB_SYNC_NONE)) ||
21534f01b02cSTheodore Ts'o 			    unlikely(page->mapping != mapping)) {
21548e48dcfbSTheodore Ts'o 				unlock_page(page);
21558e48dcfbSTheodore Ts'o 				continue;
21568e48dcfbSTheodore Ts'o 			}
21578e48dcfbSTheodore Ts'o 
21588e48dcfbSTheodore Ts'o 			wait_on_page_writeback(page);
21598e48dcfbSTheodore Ts'o 			BUG_ON(PageWriteback(page));
21608e48dcfbSTheodore Ts'o 
2161168fc022STheodore Ts'o 			if (mpd->next_page != page->index)
21628eb9e5ceSTheodore Ts'o 				mpd->first_page = page->index;
21638eb9e5ceSTheodore Ts'o 			mpd->next_page = page->index + 1;
21648eb9e5ceSTheodore Ts'o 			logical = (sector_t) page->index <<
21658eb9e5ceSTheodore Ts'o 				(PAGE_CACHE_SHIFT - inode->i_blkbits);
21668eb9e5ceSTheodore Ts'o 
21678eb9e5ceSTheodore Ts'o 			if (!page_has_buffers(page)) {
21684f01b02cSTheodore Ts'o 				mpage_add_bh_to_extent(mpd, logical,
21694f01b02cSTheodore Ts'o 						       PAGE_CACHE_SIZE,
21708eb9e5ceSTheodore Ts'o 						       (1 << BH_Dirty) | (1 << BH_Uptodate));
21714f01b02cSTheodore Ts'o 				if (mpd->io_done)
21724f01b02cSTheodore Ts'o 					goto ret_extent_tail;
21738e48dcfbSTheodore Ts'o 			} else {
21748eb9e5ceSTheodore Ts'o 				/*
21754f01b02cSTheodore Ts'o 				 * Page with regular buffer heads,
21764f01b02cSTheodore Ts'o 				 * just add all dirty ones
21778eb9e5ceSTheodore Ts'o 				 */
21788eb9e5ceSTheodore Ts'o 				head = page_buffers(page);
21798eb9e5ceSTheodore Ts'o 				bh = head;
21808eb9e5ceSTheodore Ts'o 				do {
21818eb9e5ceSTheodore Ts'o 					BUG_ON(buffer_locked(bh));
21828eb9e5ceSTheodore Ts'o 					/*
21838eb9e5ceSTheodore Ts'o 					 * We need to try to allocate
21848eb9e5ceSTheodore Ts'o 					 * unmapped blocks in the same page.
21858eb9e5ceSTheodore Ts'o 					 * Otherwise we won't make progress
21868eb9e5ceSTheodore Ts'o 					 * with the page in ext4_writepage
21878eb9e5ceSTheodore Ts'o 					 */
21888eb9e5ceSTheodore Ts'o 					if (ext4_bh_delay_or_unwritten(NULL, bh)) {
21898eb9e5ceSTheodore Ts'o 						mpage_add_bh_to_extent(mpd, logical,
21908eb9e5ceSTheodore Ts'o 								       bh->b_size,
21918eb9e5ceSTheodore Ts'o 								       bh->b_state);
21924f01b02cSTheodore Ts'o 						if (mpd->io_done)
21934f01b02cSTheodore Ts'o 							goto ret_extent_tail;
21948eb9e5ceSTheodore Ts'o 					} else if (buffer_dirty(bh) && (buffer_mapped(bh))) {
21958eb9e5ceSTheodore Ts'o 						/*
21964f01b02cSTheodore Ts'o 						 * mapped dirty buffer. We need
21974f01b02cSTheodore Ts'o 						 * to update the b_state
21984f01b02cSTheodore Ts'o 						 * because we look at b_state
21994f01b02cSTheodore Ts'o 						 * in mpage_da_map_blocks.  We
22004f01b02cSTheodore Ts'o 						 * don't update b_size because
22014f01b02cSTheodore Ts'o 						 * if we find an unmapped
22024f01b02cSTheodore Ts'o 						 * buffer_head later we need to
22034f01b02cSTheodore Ts'o 						 * use the b_state flag of that
22044f01b02cSTheodore Ts'o 						 * buffer_head.
22058eb9e5ceSTheodore Ts'o 						 */
22068eb9e5ceSTheodore Ts'o 						if (mpd->b_size == 0)
22078eb9e5ceSTheodore Ts'o 							mpd->b_state = bh->b_state & BH_FLAGS;
22088e48dcfbSTheodore Ts'o 					}
22098eb9e5ceSTheodore Ts'o 					logical++;
22108eb9e5ceSTheodore Ts'o 				} while ((bh = bh->b_this_page) != head);
22118e48dcfbSTheodore Ts'o 			}
22128e48dcfbSTheodore Ts'o 
22138e48dcfbSTheodore Ts'o 			if (nr_to_write > 0) {
22148e48dcfbSTheodore Ts'o 				nr_to_write--;
22158e48dcfbSTheodore Ts'o 				if (nr_to_write == 0 &&
22164f01b02cSTheodore Ts'o 				    wbc->sync_mode == WB_SYNC_NONE)
22178e48dcfbSTheodore Ts'o 					/*
22188e48dcfbSTheodore Ts'o 					 * We stop writing back only if we are
22198e48dcfbSTheodore Ts'o 					 * not doing integrity sync. In case of
22208e48dcfbSTheodore Ts'o 					 * integrity sync we have to keep going
22218e48dcfbSTheodore Ts'o 					 * because someone may be concurrently
22228e48dcfbSTheodore Ts'o 					 * dirtying pages, and we might have
22238e48dcfbSTheodore Ts'o 					 * synced a lot of newly appeared dirty
22248e48dcfbSTheodore Ts'o 					 * pages, but have not synced all of the
22258e48dcfbSTheodore Ts'o 					 * old dirty pages.
22268e48dcfbSTheodore Ts'o 					 */
22274f01b02cSTheodore Ts'o 					goto out;
22288e48dcfbSTheodore Ts'o 			}
22298e48dcfbSTheodore Ts'o 		}
22308e48dcfbSTheodore Ts'o 		pagevec_release(&pvec);
22318e48dcfbSTheodore Ts'o 		cond_resched();
22328e48dcfbSTheodore Ts'o 	}
22334f01b02cSTheodore Ts'o 	return 0;
22344f01b02cSTheodore Ts'o ret_extent_tail:
22354f01b02cSTheodore Ts'o 	ret = MPAGE_DA_EXTENT_TAIL;
22368eb9e5ceSTheodore Ts'o out:
22378eb9e5ceSTheodore Ts'o 	pagevec_release(&pvec);
22388eb9e5ceSTheodore Ts'o 	cond_resched();
22398e48dcfbSTheodore Ts'o 	return ret;
22408e48dcfbSTheodore Ts'o }
22418e48dcfbSTheodore Ts'o 
22428e48dcfbSTheodore Ts'o 
224364769240SAlex Tomas static int ext4_da_writepages(struct address_space *mapping,
224464769240SAlex Tomas 			      struct writeback_control *wbc)
224564769240SAlex Tomas {
224622208dedSAneesh Kumar K.V 	pgoff_t	index;
224722208dedSAneesh Kumar K.V 	int range_whole = 0;
224861628a3fSMingming Cao 	handle_t *handle = NULL;
2249df22291fSAneesh Kumar K.V 	struct mpage_da_data mpd;
22505e745b04SAneesh Kumar K.V 	struct inode *inode = mapping->host;
2251498e5f24STheodore Ts'o 	int pages_written = 0;
225255138e0bSTheodore Ts'o 	unsigned int max_pages;
22532acf2c26SAneesh Kumar K.V 	int range_cyclic, cycled = 1, io_done = 0;
225455138e0bSTheodore Ts'o 	int needed_blocks, ret = 0;
225555138e0bSTheodore Ts'o 	long desired_nr_to_write, nr_to_writebump = 0;
2256de89de6eSTheodore Ts'o 	loff_t range_start = wbc->range_start;
22575e745b04SAneesh Kumar K.V 	struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb);
225872f84e65SEric Sandeen 	pgoff_t done_index = 0;
22595b41d924SEric Sandeen 	pgoff_t end;
22601bce63d1SShaohua Li 	struct blk_plug plug;
226161628a3fSMingming Cao 
22629bffad1eSTheodore Ts'o 	trace_ext4_da_writepages(inode, wbc);
2263ba80b101STheodore Ts'o 
226461628a3fSMingming Cao 	/*
226561628a3fSMingming Cao 	 * No pages to write? This is mainly a kludge to avoid starting
226661628a3fSMingming Cao 	 * a transaction for special inodes like journal inode on last iput()
226761628a3fSMingming Cao 	 * because that could violate lock ordering on umount
226861628a3fSMingming Cao 	 */
2269a1d6cc56SAneesh Kumar K.V 	if (!mapping->nrpages || !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
227061628a3fSMingming Cao 		return 0;
22712a21e37eSTheodore Ts'o 
22722a21e37eSTheodore Ts'o 	/*
22732a21e37eSTheodore Ts'o 	 * If the filesystem has aborted, it is read-only, so return
22742a21e37eSTheodore Ts'o 	 * right away instead of dumping stack traces later on that
22752a21e37eSTheodore Ts'o 	 * will obscure the real source of the problem.  We test
22764ab2f15bSTheodore Ts'o 	 * EXT4_MF_FS_ABORTED instead of sb->s_flag's MS_RDONLY because
22772a21e37eSTheodore Ts'o 	 * the latter could be true if the filesystem is mounted
22782a21e37eSTheodore Ts'o 	 * read-only, and in that case, ext4_da_writepages should
22792a21e37eSTheodore Ts'o 	 * *never* be called, so if that ever happens, we would want
22802a21e37eSTheodore Ts'o 	 * the stack trace.
22812a21e37eSTheodore Ts'o 	 */
22824ab2f15bSTheodore Ts'o 	if (unlikely(sbi->s_mount_flags & EXT4_MF_FS_ABORTED))
22832a21e37eSTheodore Ts'o 		return -EROFS;
22842a21e37eSTheodore Ts'o 
228522208dedSAneesh Kumar K.V 	if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
228622208dedSAneesh Kumar K.V 		range_whole = 1;
228761628a3fSMingming Cao 
22882acf2c26SAneesh Kumar K.V 	range_cyclic = wbc->range_cyclic;
22892acf2c26SAneesh Kumar K.V 	if (wbc->range_cyclic) {
229022208dedSAneesh Kumar K.V 		index = mapping->writeback_index;
22912acf2c26SAneesh Kumar K.V 		if (index)
22922acf2c26SAneesh Kumar K.V 			cycled = 0;
22932acf2c26SAneesh Kumar K.V 		wbc->range_start = index << PAGE_CACHE_SHIFT;
22942acf2c26SAneesh Kumar K.V 		wbc->range_end  = LLONG_MAX;
22952acf2c26SAneesh Kumar K.V 		wbc->range_cyclic = 0;
22965b41d924SEric Sandeen 		end = -1;
22975b41d924SEric Sandeen 	} else {
229822208dedSAneesh Kumar K.V 		index = wbc->range_start >> PAGE_CACHE_SHIFT;
22995b41d924SEric Sandeen 		end = wbc->range_end >> PAGE_CACHE_SHIFT;
23005b41d924SEric Sandeen 	}
2301a1d6cc56SAneesh Kumar K.V 
230255138e0bSTheodore Ts'o 	/*
230355138e0bSTheodore Ts'o 	 * This works around two forms of stupidity.  The first is in
230455138e0bSTheodore Ts'o 	 * the writeback code, which caps the maximum number of pages
230555138e0bSTheodore Ts'o 	 * written to be 1024 pages.  This is wrong on multiple
230655138e0bSTheodore Ts'o 	 * levels; different architectues have a different page size,
230755138e0bSTheodore Ts'o 	 * which changes the maximum amount of data which gets
230855138e0bSTheodore Ts'o 	 * written.  Secondly, 4 megabytes is way too small.  XFS
230955138e0bSTheodore Ts'o 	 * forces this value to be 16 megabytes by multiplying
231055138e0bSTheodore Ts'o 	 * nr_to_write parameter by four, and then relies on its
231155138e0bSTheodore Ts'o 	 * allocator to allocate larger extents to make them
231255138e0bSTheodore Ts'o 	 * contiguous.  Unfortunately this brings us to the second
231355138e0bSTheodore Ts'o 	 * stupidity, which is that ext4's mballoc code only allocates
231455138e0bSTheodore Ts'o 	 * at most 2048 blocks.  So we force contiguous writes up to
231555138e0bSTheodore Ts'o 	 * the number of dirty blocks in the inode, or
231655138e0bSTheodore Ts'o 	 * sbi->max_writeback_mb_bump whichever is smaller.
231755138e0bSTheodore Ts'o 	 */
231855138e0bSTheodore Ts'o 	max_pages = sbi->s_max_writeback_mb_bump << (20 - PAGE_CACHE_SHIFT);
2319b443e733SEric Sandeen 	if (!range_cyclic && range_whole) {
2320b443e733SEric Sandeen 		if (wbc->nr_to_write == LONG_MAX)
2321b443e733SEric Sandeen 			desired_nr_to_write = wbc->nr_to_write;
232255138e0bSTheodore Ts'o 		else
2323b443e733SEric Sandeen 			desired_nr_to_write = wbc->nr_to_write * 8;
2324b443e733SEric Sandeen 	} else
232555138e0bSTheodore Ts'o 		desired_nr_to_write = ext4_num_dirty_pages(inode, index,
232655138e0bSTheodore Ts'o 							   max_pages);
232755138e0bSTheodore Ts'o 	if (desired_nr_to_write > max_pages)
232855138e0bSTheodore Ts'o 		desired_nr_to_write = max_pages;
232955138e0bSTheodore Ts'o 
233055138e0bSTheodore Ts'o 	if (wbc->nr_to_write < desired_nr_to_write) {
233155138e0bSTheodore Ts'o 		nr_to_writebump = desired_nr_to_write - wbc->nr_to_write;
233255138e0bSTheodore Ts'o 		wbc->nr_to_write = desired_nr_to_write;
233355138e0bSTheodore Ts'o 	}
233455138e0bSTheodore Ts'o 
23352acf2c26SAneesh Kumar K.V retry:
23366e6938b6SWu Fengguang 	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
23375b41d924SEric Sandeen 		tag_pages_for_writeback(mapping, index, end);
23385b41d924SEric Sandeen 
23391bce63d1SShaohua Li 	blk_start_plug(&plug);
234022208dedSAneesh Kumar K.V 	while (!ret && wbc->nr_to_write > 0) {
2341a1d6cc56SAneesh Kumar K.V 
2342a1d6cc56SAneesh Kumar K.V 		/*
2343a1d6cc56SAneesh Kumar K.V 		 * we  insert one extent at a time. So we need
2344a1d6cc56SAneesh Kumar K.V 		 * credit needed for single extent allocation.
2345a1d6cc56SAneesh Kumar K.V 		 * journalled mode is currently not supported
2346a1d6cc56SAneesh Kumar K.V 		 * by delalloc
2347a1d6cc56SAneesh Kumar K.V 		 */
2348a1d6cc56SAneesh Kumar K.V 		BUG_ON(ext4_should_journal_data(inode));
2349525f4ed8SMingming Cao 		needed_blocks = ext4_da_writepages_trans_blocks(inode);
2350a1d6cc56SAneesh Kumar K.V 
235161628a3fSMingming Cao 		/* start a new transaction*/
235261628a3fSMingming Cao 		handle = ext4_journal_start(inode, needed_blocks);
235361628a3fSMingming Cao 		if (IS_ERR(handle)) {
235461628a3fSMingming Cao 			ret = PTR_ERR(handle);
23551693918eSTheodore Ts'o 			ext4_msg(inode->i_sb, KERN_CRIT, "%s: jbd2_start: "
2356fbe845ddSCurt Wohlgemuth 			       "%ld pages, ino %lu; err %d", __func__,
2357a1d6cc56SAneesh Kumar K.V 				wbc->nr_to_write, inode->i_ino, ret);
23583c1fcb2cSNamjae Jeon 			blk_finish_plug(&plug);
235961628a3fSMingming Cao 			goto out_writepages;
236061628a3fSMingming Cao 		}
2361f63e6005STheodore Ts'o 
2362f63e6005STheodore Ts'o 		/*
23638eb9e5ceSTheodore Ts'o 		 * Now call write_cache_pages_da() to find the next
2364f63e6005STheodore Ts'o 		 * contiguous region of logical blocks that need
23658eb9e5ceSTheodore Ts'o 		 * blocks to be allocated by ext4 and submit them.
2366f63e6005STheodore Ts'o 		 */
236772f84e65SEric Sandeen 		ret = write_cache_pages_da(mapping, wbc, &mpd, &done_index);
2368f63e6005STheodore Ts'o 		/*
2369af901ca1SAndré Goddard Rosa 		 * If we have a contiguous extent of pages and we
2370f63e6005STheodore Ts'o 		 * haven't done the I/O yet, map the blocks and submit
2371f63e6005STheodore Ts'o 		 * them for I/O.
2372f63e6005STheodore Ts'o 		 */
2373f63e6005STheodore Ts'o 		if (!mpd.io_done && mpd.next_page != mpd.first_page) {
23745a87b7a5STheodore Ts'o 			mpage_da_map_and_submit(&mpd);
2375f63e6005STheodore Ts'o 			ret = MPAGE_DA_EXTENT_TAIL;
2376f63e6005STheodore Ts'o 		}
2377b3a3ca8cSTheodore Ts'o 		trace_ext4_da_write_pages(inode, &mpd);
2378f63e6005STheodore Ts'o 		wbc->nr_to_write -= mpd.pages_written;
2379df22291fSAneesh Kumar K.V 
238061628a3fSMingming Cao 		ext4_journal_stop(handle);
2381df22291fSAneesh Kumar K.V 
23828f64b32eSEric Sandeen 		if ((mpd.retval == -ENOSPC) && sbi->s_journal) {
238322208dedSAneesh Kumar K.V 			/* commit the transaction which would
238422208dedSAneesh Kumar K.V 			 * free blocks released in the transaction
238522208dedSAneesh Kumar K.V 			 * and try again
238622208dedSAneesh Kumar K.V 			 */
2387df22291fSAneesh Kumar K.V 			jbd2_journal_force_commit_nested(sbi->s_journal);
238822208dedSAneesh Kumar K.V 			ret = 0;
238922208dedSAneesh Kumar K.V 		} else if (ret == MPAGE_DA_EXTENT_TAIL) {
2390a1d6cc56SAneesh Kumar K.V 			/*
23918de49e67SKazuya Mio 			 * Got one extent now try with rest of the pages.
23928de49e67SKazuya Mio 			 * If mpd.retval is set -EIO, journal is aborted.
23938de49e67SKazuya Mio 			 * So we don't need to write any more.
2394a1d6cc56SAneesh Kumar K.V 			 */
239522208dedSAneesh Kumar K.V 			pages_written += mpd.pages_written;
23968de49e67SKazuya Mio 			ret = mpd.retval;
23972acf2c26SAneesh Kumar K.V 			io_done = 1;
239822208dedSAneesh Kumar K.V 		} else if (wbc->nr_to_write)
239961628a3fSMingming Cao 			/*
240061628a3fSMingming Cao 			 * There is no more writeout needed
240161628a3fSMingming Cao 			 * or we requested for a noblocking writeout
240261628a3fSMingming Cao 			 * and we found the device congested
240361628a3fSMingming Cao 			 */
240461628a3fSMingming Cao 			break;
240561628a3fSMingming Cao 	}
24061bce63d1SShaohua Li 	blk_finish_plug(&plug);
24072acf2c26SAneesh Kumar K.V 	if (!io_done && !cycled) {
24082acf2c26SAneesh Kumar K.V 		cycled = 1;
24092acf2c26SAneesh Kumar K.V 		index = 0;
24102acf2c26SAneesh Kumar K.V 		wbc->range_start = index << PAGE_CACHE_SHIFT;
24112acf2c26SAneesh Kumar K.V 		wbc->range_end  = mapping->writeback_index - 1;
24122acf2c26SAneesh Kumar K.V 		goto retry;
24132acf2c26SAneesh Kumar K.V 	}
241461628a3fSMingming Cao 
241522208dedSAneesh Kumar K.V 	/* Update index */
24162acf2c26SAneesh Kumar K.V 	wbc->range_cyclic = range_cyclic;
241722208dedSAneesh Kumar K.V 	if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
241822208dedSAneesh Kumar K.V 		/*
241922208dedSAneesh Kumar K.V 		 * set the writeback_index so that range_cyclic
242022208dedSAneesh Kumar K.V 		 * mode will write it back later
242122208dedSAneesh Kumar K.V 		 */
242272f84e65SEric Sandeen 		mapping->writeback_index = done_index;
2423a1d6cc56SAneesh Kumar K.V 
242461628a3fSMingming Cao out_writepages:
242522208dedSAneesh Kumar K.V 	wbc->nr_to_write -= nr_to_writebump;
2426de89de6eSTheodore Ts'o 	wbc->range_start = range_start;
24279bffad1eSTheodore Ts'o 	trace_ext4_da_writepages_result(inode, wbc, ret, pages_written);
242861628a3fSMingming Cao 	return ret;
242964769240SAlex Tomas }
243064769240SAlex Tomas 
243179f0be8dSAneesh Kumar K.V #define FALL_BACK_TO_NONDELALLOC 1
243279f0be8dSAneesh Kumar K.V static int ext4_nonda_switch(struct super_block *sb)
243379f0be8dSAneesh Kumar K.V {
243479f0be8dSAneesh Kumar K.V 	s64 free_blocks, dirty_blocks;
243579f0be8dSAneesh Kumar K.V 	struct ext4_sb_info *sbi = EXT4_SB(sb);
243679f0be8dSAneesh Kumar K.V 
243779f0be8dSAneesh Kumar K.V 	/*
243879f0be8dSAneesh Kumar K.V 	 * switch to non delalloc mode if we are running low
243979f0be8dSAneesh Kumar K.V 	 * on free block. The free block accounting via percpu
2440179f7ebfSEric Dumazet 	 * counters can get slightly wrong with percpu_counter_batch getting
244179f0be8dSAneesh Kumar K.V 	 * accumulated on each CPU without updating global counters
244279f0be8dSAneesh Kumar K.V 	 * Delalloc need an accurate free block accounting. So switch
244379f0be8dSAneesh Kumar K.V 	 * to non delalloc when we are near to error range.
244479f0be8dSAneesh Kumar K.V 	 */
244557042651STheodore Ts'o 	free_blocks  = EXT4_C2B(sbi,
244657042651STheodore Ts'o 		percpu_counter_read_positive(&sbi->s_freeclusters_counter));
244757042651STheodore Ts'o 	dirty_blocks = percpu_counter_read_positive(&sbi->s_dirtyclusters_counter);
244879f0be8dSAneesh Kumar K.V 	if (2 * free_blocks < 3 * dirty_blocks ||
2449df55c99dSTheodore Ts'o 		free_blocks < (dirty_blocks + EXT4_FREECLUSTERS_WATERMARK)) {
245079f0be8dSAneesh Kumar K.V 		/*
2451c8afb446SEric Sandeen 		 * free block count is less than 150% of dirty blocks
2452c8afb446SEric Sandeen 		 * or free blocks is less than watermark
245379f0be8dSAneesh Kumar K.V 		 */
245479f0be8dSAneesh Kumar K.V 		return 1;
245579f0be8dSAneesh Kumar K.V 	}
2456c8afb446SEric Sandeen 	/*
2457c8afb446SEric Sandeen 	 * Even if we don't switch but are nearing capacity,
2458c8afb446SEric Sandeen 	 * start pushing delalloc when 1/2 of free blocks are dirty.
2459c8afb446SEric Sandeen 	 */
2460c8afb446SEric Sandeen 	if (free_blocks < 2 * dirty_blocks)
24610e175a18SCurt Wohlgemuth 		writeback_inodes_sb_if_idle(sb, WB_REASON_FS_FREE_SPACE);
2462c8afb446SEric Sandeen 
246379f0be8dSAneesh Kumar K.V 	return 0;
246479f0be8dSAneesh Kumar K.V }
246579f0be8dSAneesh Kumar K.V 
246664769240SAlex Tomas static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
246764769240SAlex Tomas 			       loff_t pos, unsigned len, unsigned flags,
246864769240SAlex Tomas 			       struct page **pagep, void **fsdata)
246964769240SAlex Tomas {
247072b8ab9dSEric Sandeen 	int ret, retries = 0;
247164769240SAlex Tomas 	struct page *page;
247264769240SAlex Tomas 	pgoff_t index;
247364769240SAlex Tomas 	struct inode *inode = mapping->host;
247464769240SAlex Tomas 	handle_t *handle;
247564769240SAlex Tomas 
247664769240SAlex Tomas 	index = pos >> PAGE_CACHE_SHIFT;
247779f0be8dSAneesh Kumar K.V 
247879f0be8dSAneesh Kumar K.V 	if (ext4_nonda_switch(inode->i_sb)) {
247979f0be8dSAneesh Kumar K.V 		*fsdata = (void *)FALL_BACK_TO_NONDELALLOC;
248079f0be8dSAneesh Kumar K.V 		return ext4_write_begin(file, mapping, pos,
248179f0be8dSAneesh Kumar K.V 					len, flags, pagep, fsdata);
248279f0be8dSAneesh Kumar K.V 	}
248379f0be8dSAneesh Kumar K.V 	*fsdata = (void *)0;
24849bffad1eSTheodore Ts'o 	trace_ext4_da_write_begin(inode, pos, len, flags);
2485d2a17637SMingming Cao retry:
248664769240SAlex Tomas 	/*
248764769240SAlex Tomas 	 * With delayed allocation, we don't log the i_disksize update
248864769240SAlex Tomas 	 * if there is delayed block allocation. But we still need
248964769240SAlex Tomas 	 * to journalling the i_disksize update if writes to the end
249064769240SAlex Tomas 	 * of file which has an already mapped buffer.
249164769240SAlex Tomas 	 */
249264769240SAlex Tomas 	handle = ext4_journal_start(inode, 1);
249364769240SAlex Tomas 	if (IS_ERR(handle)) {
249464769240SAlex Tomas 		ret = PTR_ERR(handle);
249564769240SAlex Tomas 		goto out;
249664769240SAlex Tomas 	}
2497ebd3610bSJan Kara 	/* We cannot recurse into the filesystem as the transaction is already
2498ebd3610bSJan Kara 	 * started */
2499ebd3610bSJan Kara 	flags |= AOP_FLAG_NOFS;
250064769240SAlex Tomas 
250154566b2cSNick Piggin 	page = grab_cache_page_write_begin(mapping, index, flags);
2502d5a0d4f7SEric Sandeen 	if (!page) {
2503d5a0d4f7SEric Sandeen 		ext4_journal_stop(handle);
2504d5a0d4f7SEric Sandeen 		ret = -ENOMEM;
2505d5a0d4f7SEric Sandeen 		goto out;
2506d5a0d4f7SEric Sandeen 	}
250764769240SAlex Tomas 	*pagep = page;
250864769240SAlex Tomas 
25096e1db88dSChristoph Hellwig 	ret = __block_write_begin(page, pos, len, ext4_da_get_block_prep);
251064769240SAlex Tomas 	if (ret < 0) {
251164769240SAlex Tomas 		unlock_page(page);
251264769240SAlex Tomas 		ext4_journal_stop(handle);
251364769240SAlex Tomas 		page_cache_release(page);
2514ae4d5372SAneesh Kumar K.V 		/*
2515ae4d5372SAneesh Kumar K.V 		 * block_write_begin may have instantiated a few blocks
2516ae4d5372SAneesh Kumar K.V 		 * outside i_size.  Trim these off again. Don't need
2517ae4d5372SAneesh Kumar K.V 		 * i_size_read because we hold i_mutex.
2518ae4d5372SAneesh Kumar K.V 		 */
2519ae4d5372SAneesh Kumar K.V 		if (pos + len > inode->i_size)
2520b9a4207dSJan Kara 			ext4_truncate_failed_write(inode);
252164769240SAlex Tomas 	}
252264769240SAlex Tomas 
2523d2a17637SMingming Cao 	if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
2524d2a17637SMingming Cao 		goto retry;
252564769240SAlex Tomas out:
252664769240SAlex Tomas 	return ret;
252764769240SAlex Tomas }
252864769240SAlex Tomas 
2529632eaeabSMingming Cao /*
2530632eaeabSMingming Cao  * Check if we should update i_disksize
2531632eaeabSMingming Cao  * when write to the end of file but not require block allocation
2532632eaeabSMingming Cao  */
2533632eaeabSMingming Cao static int ext4_da_should_update_i_disksize(struct page *page,
2534632eaeabSMingming Cao 					    unsigned long offset)
2535632eaeabSMingming Cao {
2536632eaeabSMingming Cao 	struct buffer_head *bh;
2537632eaeabSMingming Cao 	struct inode *inode = page->mapping->host;
2538632eaeabSMingming Cao 	unsigned int idx;
2539632eaeabSMingming Cao 	int i;
2540632eaeabSMingming Cao 
2541632eaeabSMingming Cao 	bh = page_buffers(page);
2542632eaeabSMingming Cao 	idx = offset >> inode->i_blkbits;
2543632eaeabSMingming Cao 
2544632eaeabSMingming Cao 	for (i = 0; i < idx; i++)
2545632eaeabSMingming Cao 		bh = bh->b_this_page;
2546632eaeabSMingming Cao 
254729fa89d0SAneesh Kumar K.V 	if (!buffer_mapped(bh) || (buffer_delay(bh)) || buffer_unwritten(bh))
2548632eaeabSMingming Cao 		return 0;
2549632eaeabSMingming Cao 	return 1;
2550632eaeabSMingming Cao }
2551632eaeabSMingming Cao 
255264769240SAlex Tomas static int ext4_da_write_end(struct file *file,
255364769240SAlex Tomas 			     struct address_space *mapping,
255464769240SAlex Tomas 			     loff_t pos, unsigned len, unsigned copied,
255564769240SAlex Tomas 			     struct page *page, void *fsdata)
255664769240SAlex Tomas {
255764769240SAlex Tomas 	struct inode *inode = mapping->host;
255864769240SAlex Tomas 	int ret = 0, ret2;
255964769240SAlex Tomas 	handle_t *handle = ext4_journal_current_handle();
256064769240SAlex Tomas 	loff_t new_i_size;
2561632eaeabSMingming Cao 	unsigned long start, end;
256279f0be8dSAneesh Kumar K.V 	int write_mode = (int)(unsigned long)fsdata;
256379f0be8dSAneesh Kumar K.V 
256479f0be8dSAneesh Kumar K.V 	if (write_mode == FALL_BACK_TO_NONDELALLOC) {
25653d2b1582SLukas Czerner 		switch (ext4_inode_journal_mode(inode)) {
25663d2b1582SLukas Czerner 		case EXT4_INODE_ORDERED_DATA_MODE:
256779f0be8dSAneesh Kumar K.V 			return ext4_ordered_write_end(file, mapping, pos,
256879f0be8dSAneesh Kumar K.V 					len, copied, page, fsdata);
25693d2b1582SLukas Czerner 		case EXT4_INODE_WRITEBACK_DATA_MODE:
257079f0be8dSAneesh Kumar K.V 			return ext4_writeback_write_end(file, mapping, pos,
257179f0be8dSAneesh Kumar K.V 					len, copied, page, fsdata);
25723d2b1582SLukas Czerner 		default:
257379f0be8dSAneesh Kumar K.V 			BUG();
257479f0be8dSAneesh Kumar K.V 		}
257579f0be8dSAneesh Kumar K.V 	}
2576632eaeabSMingming Cao 
25779bffad1eSTheodore Ts'o 	trace_ext4_da_write_end(inode, pos, len, copied);
2578632eaeabSMingming Cao 	start = pos & (PAGE_CACHE_SIZE - 1);
2579632eaeabSMingming Cao 	end = start + copied - 1;
258064769240SAlex Tomas 
258164769240SAlex Tomas 	/*
258264769240SAlex Tomas 	 * generic_write_end() will run mark_inode_dirty() if i_size
258364769240SAlex Tomas 	 * changes.  So let's piggyback the i_disksize mark_inode_dirty
258464769240SAlex Tomas 	 * into that.
258564769240SAlex Tomas 	 */
258664769240SAlex Tomas 
258764769240SAlex Tomas 	new_i_size = pos + copied;
2588ea51d132SAndrea Arcangeli 	if (copied && new_i_size > EXT4_I(inode)->i_disksize) {
2589632eaeabSMingming Cao 		if (ext4_da_should_update_i_disksize(page, end)) {
2590632eaeabSMingming Cao 			down_write(&EXT4_I(inode)->i_data_sem);
2591632eaeabSMingming Cao 			if (new_i_size > EXT4_I(inode)->i_disksize) {
259264769240SAlex Tomas 				/*
2593632eaeabSMingming Cao 				 * Updating i_disksize when extending file
2594632eaeabSMingming Cao 				 * without needing block allocation
259564769240SAlex Tomas 				 */
259664769240SAlex Tomas 				if (ext4_should_order_data(inode))
2597632eaeabSMingming Cao 					ret = ext4_jbd2_file_inode(handle,
2598632eaeabSMingming Cao 								   inode);
259964769240SAlex Tomas 
260064769240SAlex Tomas 				EXT4_I(inode)->i_disksize = new_i_size;
260164769240SAlex Tomas 			}
2602632eaeabSMingming Cao 			up_write(&EXT4_I(inode)->i_data_sem);
2603cf17fea6SAneesh Kumar K.V 			/* We need to mark inode dirty even if
2604cf17fea6SAneesh Kumar K.V 			 * new_i_size is less that inode->i_size
2605cf17fea6SAneesh Kumar K.V 			 * bu greater than i_disksize.(hint delalloc)
2606cf17fea6SAneesh Kumar K.V 			 */
2607cf17fea6SAneesh Kumar K.V 			ext4_mark_inode_dirty(handle, inode);
2608632eaeabSMingming Cao 		}
2609632eaeabSMingming Cao 	}
261064769240SAlex Tomas 	ret2 = generic_write_end(file, mapping, pos, len, copied,
261164769240SAlex Tomas 							page, fsdata);
261264769240SAlex Tomas 	copied = ret2;
261364769240SAlex Tomas 	if (ret2 < 0)
261464769240SAlex Tomas 		ret = ret2;
261564769240SAlex Tomas 	ret2 = ext4_journal_stop(handle);
261664769240SAlex Tomas 	if (!ret)
261764769240SAlex Tomas 		ret = ret2;
261864769240SAlex Tomas 
261964769240SAlex Tomas 	return ret ? ret : copied;
262064769240SAlex Tomas }
262164769240SAlex Tomas 
262264769240SAlex Tomas static void ext4_da_invalidatepage(struct page *page, unsigned long offset)
262364769240SAlex Tomas {
262464769240SAlex Tomas 	/*
262564769240SAlex Tomas 	 * Drop reserved blocks
262664769240SAlex Tomas 	 */
262764769240SAlex Tomas 	BUG_ON(!PageLocked(page));
262864769240SAlex Tomas 	if (!page_has_buffers(page))
262964769240SAlex Tomas 		goto out;
263064769240SAlex Tomas 
2631d2a17637SMingming Cao 	ext4_da_page_release_reservation(page, offset);
263264769240SAlex Tomas 
263364769240SAlex Tomas out:
263464769240SAlex Tomas 	ext4_invalidatepage(page, offset);
263564769240SAlex Tomas 
263664769240SAlex Tomas 	return;
263764769240SAlex Tomas }
263864769240SAlex Tomas 
2639ccd2506bSTheodore Ts'o /*
2640ccd2506bSTheodore Ts'o  * Force all delayed allocation blocks to be allocated for a given inode.
2641ccd2506bSTheodore Ts'o  */
2642ccd2506bSTheodore Ts'o int ext4_alloc_da_blocks(struct inode *inode)
2643ccd2506bSTheodore Ts'o {
2644fb40ba0dSTheodore Ts'o 	trace_ext4_alloc_da_blocks(inode);
2645fb40ba0dSTheodore Ts'o 
2646ccd2506bSTheodore Ts'o 	if (!EXT4_I(inode)->i_reserved_data_blocks &&
2647ccd2506bSTheodore Ts'o 	    !EXT4_I(inode)->i_reserved_meta_blocks)
2648ccd2506bSTheodore Ts'o 		return 0;
2649ccd2506bSTheodore Ts'o 
2650ccd2506bSTheodore Ts'o 	/*
2651ccd2506bSTheodore Ts'o 	 * We do something simple for now.  The filemap_flush() will
2652ccd2506bSTheodore Ts'o 	 * also start triggering a write of the data blocks, which is
2653ccd2506bSTheodore Ts'o 	 * not strictly speaking necessary (and for users of
2654ccd2506bSTheodore Ts'o 	 * laptop_mode, not even desirable).  However, to do otherwise
2655ccd2506bSTheodore Ts'o 	 * would require replicating code paths in:
2656ccd2506bSTheodore Ts'o 	 *
2657ccd2506bSTheodore Ts'o 	 * ext4_da_writepages() ->
2658ccd2506bSTheodore Ts'o 	 *    write_cache_pages() ---> (via passed in callback function)
2659ccd2506bSTheodore Ts'o 	 *        __mpage_da_writepage() -->
2660ccd2506bSTheodore Ts'o 	 *           mpage_add_bh_to_extent()
2661ccd2506bSTheodore Ts'o 	 *           mpage_da_map_blocks()
2662ccd2506bSTheodore Ts'o 	 *
2663ccd2506bSTheodore Ts'o 	 * The problem is that write_cache_pages(), located in
2664ccd2506bSTheodore Ts'o 	 * mm/page-writeback.c, marks pages clean in preparation for
2665ccd2506bSTheodore Ts'o 	 * doing I/O, which is not desirable if we're not planning on
2666ccd2506bSTheodore Ts'o 	 * doing I/O at all.
2667ccd2506bSTheodore Ts'o 	 *
2668ccd2506bSTheodore Ts'o 	 * We could call write_cache_pages(), and then redirty all of
2669380cf090SWu Fengguang 	 * the pages by calling redirty_page_for_writepage() but that
2670ccd2506bSTheodore Ts'o 	 * would be ugly in the extreme.  So instead we would need to
2671ccd2506bSTheodore Ts'o 	 * replicate parts of the code in the above functions,
267225985edcSLucas De Marchi 	 * simplifying them because we wouldn't actually intend to
2673ccd2506bSTheodore Ts'o 	 * write out the pages, but rather only collect contiguous
2674ccd2506bSTheodore Ts'o 	 * logical block extents, call the multi-block allocator, and
2675ccd2506bSTheodore Ts'o 	 * then update the buffer heads with the block allocations.
2676ccd2506bSTheodore Ts'o 	 *
2677ccd2506bSTheodore Ts'o 	 * For now, though, we'll cheat by calling filemap_flush(),
2678ccd2506bSTheodore Ts'o 	 * which will map the blocks, and start the I/O, but not
2679ccd2506bSTheodore Ts'o 	 * actually wait for the I/O to complete.
2680ccd2506bSTheodore Ts'o 	 */
2681ccd2506bSTheodore Ts'o 	return filemap_flush(inode->i_mapping);
2682ccd2506bSTheodore Ts'o }
268364769240SAlex Tomas 
268464769240SAlex Tomas /*
2685ac27a0ecSDave Kleikamp  * bmap() is special.  It gets used by applications such as lilo and by
2686ac27a0ecSDave Kleikamp  * the swapper to find the on-disk block of a specific piece of data.
2687ac27a0ecSDave Kleikamp  *
2688ac27a0ecSDave Kleikamp  * Naturally, this is dangerous if the block concerned is still in the
2689617ba13bSMingming Cao  * journal.  If somebody makes a swapfile on an ext4 data-journaling
2690ac27a0ecSDave Kleikamp  * filesystem and enables swap, then they may get a nasty shock when the
2691ac27a0ecSDave Kleikamp  * data getting swapped to that swapfile suddenly gets overwritten by
2692ac27a0ecSDave Kleikamp  * the original zero's written out previously to the journal and
2693ac27a0ecSDave Kleikamp  * awaiting writeback in the kernel's buffer cache.
2694ac27a0ecSDave Kleikamp  *
2695ac27a0ecSDave Kleikamp  * So, if we see any bmap calls here on a modified, data-journaled file,
2696ac27a0ecSDave Kleikamp  * take extra steps to flush any blocks which might be in the cache.
2697ac27a0ecSDave Kleikamp  */
2698617ba13bSMingming Cao static sector_t ext4_bmap(struct address_space *mapping, sector_t block)
2699ac27a0ecSDave Kleikamp {
2700ac27a0ecSDave Kleikamp 	struct inode *inode = mapping->host;
2701ac27a0ecSDave Kleikamp 	journal_t *journal;
2702ac27a0ecSDave Kleikamp 	int err;
2703ac27a0ecSDave Kleikamp 
270464769240SAlex Tomas 	if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) &&
270564769240SAlex Tomas 			test_opt(inode->i_sb, DELALLOC)) {
270664769240SAlex Tomas 		/*
270764769240SAlex Tomas 		 * With delalloc we want to sync the file
270864769240SAlex Tomas 		 * so that we can make sure we allocate
270964769240SAlex Tomas 		 * blocks for file
271064769240SAlex Tomas 		 */
271164769240SAlex Tomas 		filemap_write_and_wait(mapping);
271264769240SAlex Tomas 	}
271364769240SAlex Tomas 
271419f5fb7aSTheodore Ts'o 	if (EXT4_JOURNAL(inode) &&
271519f5fb7aSTheodore Ts'o 	    ext4_test_inode_state(inode, EXT4_STATE_JDATA)) {
2716ac27a0ecSDave Kleikamp 		/*
2717ac27a0ecSDave Kleikamp 		 * This is a REALLY heavyweight approach, but the use of
2718ac27a0ecSDave Kleikamp 		 * bmap on dirty files is expected to be extremely rare:
2719ac27a0ecSDave Kleikamp 		 * only if we run lilo or swapon on a freshly made file
2720ac27a0ecSDave Kleikamp 		 * do we expect this to happen.
2721ac27a0ecSDave Kleikamp 		 *
2722ac27a0ecSDave Kleikamp 		 * (bmap requires CAP_SYS_RAWIO so this does not
2723ac27a0ecSDave Kleikamp 		 * represent an unprivileged user DOS attack --- we'd be
2724ac27a0ecSDave Kleikamp 		 * in trouble if mortal users could trigger this path at
2725ac27a0ecSDave Kleikamp 		 * will.)
2726ac27a0ecSDave Kleikamp 		 *
2727617ba13bSMingming Cao 		 * NB. EXT4_STATE_JDATA is not set on files other than
2728ac27a0ecSDave Kleikamp 		 * regular files.  If somebody wants to bmap a directory
2729ac27a0ecSDave Kleikamp 		 * or symlink and gets confused because the buffer
2730ac27a0ecSDave Kleikamp 		 * hasn't yet been flushed to disk, they deserve
2731ac27a0ecSDave Kleikamp 		 * everything they get.
2732ac27a0ecSDave Kleikamp 		 */
2733ac27a0ecSDave Kleikamp 
273419f5fb7aSTheodore Ts'o 		ext4_clear_inode_state(inode, EXT4_STATE_JDATA);
2735617ba13bSMingming Cao 		journal = EXT4_JOURNAL(inode);
2736dab291afSMingming Cao 		jbd2_journal_lock_updates(journal);
2737dab291afSMingming Cao 		err = jbd2_journal_flush(journal);
2738dab291afSMingming Cao 		jbd2_journal_unlock_updates(journal);
2739ac27a0ecSDave Kleikamp 
2740ac27a0ecSDave Kleikamp 		if (err)
2741ac27a0ecSDave Kleikamp 			return 0;
2742ac27a0ecSDave Kleikamp 	}
2743ac27a0ecSDave Kleikamp 
2744617ba13bSMingming Cao 	return generic_block_bmap(mapping, block, ext4_get_block);
2745ac27a0ecSDave Kleikamp }
2746ac27a0ecSDave Kleikamp 
2747617ba13bSMingming Cao static int ext4_readpage(struct file *file, struct page *page)
2748ac27a0ecSDave Kleikamp {
27490562e0baSJiaying Zhang 	trace_ext4_readpage(page);
2750617ba13bSMingming Cao 	return mpage_readpage(page, ext4_get_block);
2751ac27a0ecSDave Kleikamp }
2752ac27a0ecSDave Kleikamp 
2753ac27a0ecSDave Kleikamp static int
2754617ba13bSMingming Cao ext4_readpages(struct file *file, struct address_space *mapping,
2755ac27a0ecSDave Kleikamp 		struct list_head *pages, unsigned nr_pages)
2756ac27a0ecSDave Kleikamp {
2757617ba13bSMingming Cao 	return mpage_readpages(mapping, pages, nr_pages, ext4_get_block);
2758ac27a0ecSDave Kleikamp }
2759ac27a0ecSDave Kleikamp 
2760744692dcSJiaying Zhang static void ext4_invalidatepage_free_endio(struct page *page, unsigned long offset)
2761744692dcSJiaying Zhang {
2762744692dcSJiaying Zhang 	struct buffer_head *head, *bh;
2763744692dcSJiaying Zhang 	unsigned int curr_off = 0;
2764744692dcSJiaying Zhang 
2765744692dcSJiaying Zhang 	if (!page_has_buffers(page))
2766744692dcSJiaying Zhang 		return;
2767744692dcSJiaying Zhang 	head = bh = page_buffers(page);
2768744692dcSJiaying Zhang 	do {
2769744692dcSJiaying Zhang 		if (offset <= curr_off && test_clear_buffer_uninit(bh)
2770744692dcSJiaying Zhang 					&& bh->b_private) {
2771744692dcSJiaying Zhang 			ext4_free_io_end(bh->b_private);
2772744692dcSJiaying Zhang 			bh->b_private = NULL;
2773744692dcSJiaying Zhang 			bh->b_end_io = NULL;
2774744692dcSJiaying Zhang 		}
2775744692dcSJiaying Zhang 		curr_off = curr_off + bh->b_size;
2776744692dcSJiaying Zhang 		bh = bh->b_this_page;
2777744692dcSJiaying Zhang 	} while (bh != head);
2778744692dcSJiaying Zhang }
2779744692dcSJiaying Zhang 
2780617ba13bSMingming Cao static void ext4_invalidatepage(struct page *page, unsigned long offset)
2781ac27a0ecSDave Kleikamp {
2782617ba13bSMingming Cao 	journal_t *journal = EXT4_JOURNAL(page->mapping->host);
2783ac27a0ecSDave Kleikamp 
27840562e0baSJiaying Zhang 	trace_ext4_invalidatepage(page, offset);
27850562e0baSJiaying Zhang 
2786ac27a0ecSDave Kleikamp 	/*
2787744692dcSJiaying Zhang 	 * free any io_end structure allocated for buffers to be discarded
2788744692dcSJiaying Zhang 	 */
2789744692dcSJiaying Zhang 	if (ext4_should_dioread_nolock(page->mapping->host))
2790744692dcSJiaying Zhang 		ext4_invalidatepage_free_endio(page, offset);
2791744692dcSJiaying Zhang 	/*
2792ac27a0ecSDave Kleikamp 	 * If it's a full truncate we just forget about the pending dirtying
2793ac27a0ecSDave Kleikamp 	 */
2794ac27a0ecSDave Kleikamp 	if (offset == 0)
2795ac27a0ecSDave Kleikamp 		ClearPageChecked(page);
2796ac27a0ecSDave Kleikamp 
27970390131bSFrank Mayhar 	if (journal)
2798dab291afSMingming Cao 		jbd2_journal_invalidatepage(journal, page, offset);
27990390131bSFrank Mayhar 	else
28000390131bSFrank Mayhar 		block_invalidatepage(page, offset);
2801ac27a0ecSDave Kleikamp }
2802ac27a0ecSDave Kleikamp 
2803617ba13bSMingming Cao static int ext4_releasepage(struct page *page, gfp_t wait)
2804ac27a0ecSDave Kleikamp {
2805617ba13bSMingming Cao 	journal_t *journal = EXT4_JOURNAL(page->mapping->host);
2806ac27a0ecSDave Kleikamp 
28070562e0baSJiaying Zhang 	trace_ext4_releasepage(page);
28080562e0baSJiaying Zhang 
2809ac27a0ecSDave Kleikamp 	WARN_ON(PageChecked(page));
2810ac27a0ecSDave Kleikamp 	if (!page_has_buffers(page))
2811ac27a0ecSDave Kleikamp 		return 0;
28120390131bSFrank Mayhar 	if (journal)
2813dab291afSMingming Cao 		return jbd2_journal_try_to_free_buffers(journal, page, wait);
28140390131bSFrank Mayhar 	else
28150390131bSFrank Mayhar 		return try_to_free_buffers(page);
2816ac27a0ecSDave Kleikamp }
2817ac27a0ecSDave Kleikamp 
2818ac27a0ecSDave Kleikamp /*
28192ed88685STheodore Ts'o  * ext4_get_block used when preparing for a DIO write or buffer write.
28202ed88685STheodore Ts'o  * We allocate an uinitialized extent if blocks haven't been allocated.
28212ed88685STheodore Ts'o  * The extent will be converted to initialized after the IO is complete.
28222ed88685STheodore Ts'o  */
2823c7064ef1SJiaying Zhang static int ext4_get_block_write(struct inode *inode, sector_t iblock,
28244c0425ffSMingming Cao 		   struct buffer_head *bh_result, int create)
28254c0425ffSMingming Cao {
2826c7064ef1SJiaying Zhang 	ext4_debug("ext4_get_block_write: inode %lu, create flag %d\n",
28278d5d02e6SMingming Cao 		   inode->i_ino, create);
28282ed88685STheodore Ts'o 	return _ext4_get_block(inode, iblock, bh_result,
28292ed88685STheodore Ts'o 			       EXT4_GET_BLOCKS_IO_CREATE_EXT);
28304c0425ffSMingming Cao }
28314c0425ffSMingming Cao 
2832729f52c6SZheng Liu static int ext4_get_block_write_nolock(struct inode *inode, sector_t iblock,
2833729f52c6SZheng Liu 		   struct buffer_head *bh_result, int flags)
2834729f52c6SZheng Liu {
2835729f52c6SZheng Liu 	handle_t *handle = ext4_journal_current_handle();
2836729f52c6SZheng Liu 	struct ext4_map_blocks map;
2837729f52c6SZheng Liu 	int ret = 0;
2838729f52c6SZheng Liu 
2839729f52c6SZheng Liu 	ext4_debug("ext4_get_block_write_nolock: inode %lu, flag %d\n",
2840729f52c6SZheng Liu 		   inode->i_ino, flags);
2841729f52c6SZheng Liu 
2842729f52c6SZheng Liu 	flags = EXT4_GET_BLOCKS_NO_LOCK;
2843729f52c6SZheng Liu 
2844729f52c6SZheng Liu 	map.m_lblk = iblock;
2845729f52c6SZheng Liu 	map.m_len = bh_result->b_size >> inode->i_blkbits;
2846729f52c6SZheng Liu 
2847729f52c6SZheng Liu 	ret = ext4_map_blocks(handle, inode, &map, flags);
2848729f52c6SZheng Liu 	if (ret > 0) {
2849729f52c6SZheng Liu 		map_bh(bh_result, inode->i_sb, map.m_pblk);
2850729f52c6SZheng Liu 		bh_result->b_state = (bh_result->b_state & ~EXT4_MAP_FLAGS) |
2851729f52c6SZheng Liu 					map.m_flags;
2852729f52c6SZheng Liu 		bh_result->b_size = inode->i_sb->s_blocksize * map.m_len;
2853729f52c6SZheng Liu 		ret = 0;
2854729f52c6SZheng Liu 	}
2855729f52c6SZheng Liu 	return ret;
2856729f52c6SZheng Liu }
2857729f52c6SZheng Liu 
28584c0425ffSMingming Cao static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
2859552ef802SChristoph Hellwig 			    ssize_t size, void *private, int ret,
2860552ef802SChristoph Hellwig 			    bool is_async)
28614c0425ffSMingming Cao {
286272c5052dSChristoph Hellwig 	struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode;
28634c0425ffSMingming Cao         ext4_io_end_t *io_end = iocb->private;
28644c0425ffSMingming Cao 	struct workqueue_struct *wq;
2865744692dcSJiaying Zhang 	unsigned long flags;
2866744692dcSJiaying Zhang 	struct ext4_inode_info *ei;
28674c0425ffSMingming Cao 
28684b70df18SMingming 	/* if not async direct IO or dio with 0 bytes write, just return */
28694b70df18SMingming 	if (!io_end || !size)
2870552ef802SChristoph Hellwig 		goto out;
28714b70df18SMingming 
28728d5d02e6SMingming Cao 	ext_debug("ext4_end_io_dio(): io_end 0x%p "
2873ace36ad4SJoe Perches 		  "for inode %lu, iocb 0x%p, offset %llu, size %zd\n",
28748d5d02e6SMingming Cao  		  iocb->private, io_end->inode->i_ino, iocb, offset,
28758d5d02e6SMingming Cao 		  size);
28768d5d02e6SMingming Cao 
2877b5a7e970STheodore Ts'o 	iocb->private = NULL;
2878b5a7e970STheodore Ts'o 
28798d5d02e6SMingming Cao 	/* if not aio dio with unwritten extents, just free io and return */
2880bd2d0210STheodore Ts'o 	if (!(io_end->flag & EXT4_IO_END_UNWRITTEN)) {
28818d5d02e6SMingming Cao 		ext4_free_io_end(io_end);
28825b3ff237Sjiayingz@google.com (Jiaying Zhang) out:
28835b3ff237Sjiayingz@google.com (Jiaying Zhang) 		if (is_async)
28845b3ff237Sjiayingz@google.com (Jiaying Zhang) 			aio_complete(iocb, ret, 0);
288572c5052dSChristoph Hellwig 		inode_dio_done(inode);
28865b3ff237Sjiayingz@google.com (Jiaying Zhang) 		return;
28878d5d02e6SMingming Cao 	}
28888d5d02e6SMingming Cao 
28894c0425ffSMingming Cao 	io_end->offset = offset;
28904c0425ffSMingming Cao 	io_end->size = size;
28915b3ff237Sjiayingz@google.com (Jiaying Zhang) 	if (is_async) {
28925b3ff237Sjiayingz@google.com (Jiaying Zhang) 		io_end->iocb = iocb;
28935b3ff237Sjiayingz@google.com (Jiaying Zhang) 		io_end->result = ret;
28945b3ff237Sjiayingz@google.com (Jiaying Zhang) 	}
28954c0425ffSMingming Cao 	wq = EXT4_SB(io_end->inode->i_sb)->dio_unwritten_wq;
28964c0425ffSMingming Cao 
28978d5d02e6SMingming Cao 	/* Add the io_end to per-inode completed aio dio list*/
2898744692dcSJiaying Zhang 	ei = EXT4_I(io_end->inode);
2899744692dcSJiaying Zhang 	spin_lock_irqsave(&ei->i_completed_io_lock, flags);
2900744692dcSJiaying Zhang 	list_add_tail(&io_end->list, &ei->i_completed_io_list);
2901744692dcSJiaying Zhang 	spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
2902c999af2bSEric Sandeen 
2903c999af2bSEric Sandeen 	/* queue the work to convert unwritten extents to written */
29044c81f045STejun Heo 	queue_work(wq, &io_end->work);
29054c0425ffSMingming Cao }
2906c7064ef1SJiaying Zhang 
2907744692dcSJiaying Zhang static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate)
2908744692dcSJiaying Zhang {
2909744692dcSJiaying Zhang 	ext4_io_end_t *io_end = bh->b_private;
2910744692dcSJiaying Zhang 	struct workqueue_struct *wq;
2911744692dcSJiaying Zhang 	struct inode *inode;
2912744692dcSJiaying Zhang 	unsigned long flags;
2913744692dcSJiaying Zhang 
2914744692dcSJiaying Zhang 	if (!test_clear_buffer_uninit(bh) || !io_end)
2915744692dcSJiaying Zhang 		goto out;
2916744692dcSJiaying Zhang 
2917744692dcSJiaying Zhang 	if (!(io_end->inode->i_sb->s_flags & MS_ACTIVE)) {
291892b97816STheodore Ts'o 		ext4_msg(io_end->inode->i_sb, KERN_INFO,
291992b97816STheodore Ts'o 			 "sb umounted, discard end_io request for inode %lu",
2920744692dcSJiaying Zhang 			 io_end->inode->i_ino);
2921744692dcSJiaying Zhang 		ext4_free_io_end(io_end);
2922744692dcSJiaying Zhang 		goto out;
2923744692dcSJiaying Zhang 	}
2924744692dcSJiaying Zhang 
292532c80b32STao Ma 	/*
292632c80b32STao Ma 	 * It may be over-defensive here to check EXT4_IO_END_UNWRITTEN now,
292732c80b32STao Ma 	 * but being more careful is always safe for the future change.
292832c80b32STao Ma 	 */
2929744692dcSJiaying Zhang 	inode = io_end->inode;
29300edeb71dSTao Ma 	ext4_set_io_unwritten_flag(inode, io_end);
2931744692dcSJiaying Zhang 
2932744692dcSJiaying Zhang 	/* Add the io_end to per-inode completed io list*/
2933744692dcSJiaying Zhang 	spin_lock_irqsave(&EXT4_I(inode)->i_completed_io_lock, flags);
2934744692dcSJiaying Zhang 	list_add_tail(&io_end->list, &EXT4_I(inode)->i_completed_io_list);
2935744692dcSJiaying Zhang 	spin_unlock_irqrestore(&EXT4_I(inode)->i_completed_io_lock, flags);
2936744692dcSJiaying Zhang 
2937744692dcSJiaying Zhang 	wq = EXT4_SB(inode->i_sb)->dio_unwritten_wq;
2938744692dcSJiaying Zhang 	/* queue the work to convert unwritten extents to written */
2939744692dcSJiaying Zhang 	queue_work(wq, &io_end->work);
2940744692dcSJiaying Zhang out:
2941744692dcSJiaying Zhang 	bh->b_private = NULL;
2942744692dcSJiaying Zhang 	bh->b_end_io = NULL;
2943744692dcSJiaying Zhang 	clear_buffer_uninit(bh);
2944744692dcSJiaying Zhang 	end_buffer_async_write(bh, uptodate);
2945744692dcSJiaying Zhang }
2946744692dcSJiaying Zhang 
2947744692dcSJiaying Zhang static int ext4_set_bh_endio(struct buffer_head *bh, struct inode *inode)
2948744692dcSJiaying Zhang {
2949744692dcSJiaying Zhang 	ext4_io_end_t *io_end;
2950744692dcSJiaying Zhang 	struct page *page = bh->b_page;
2951744692dcSJiaying Zhang 	loff_t offset = (sector_t)page->index << PAGE_CACHE_SHIFT;
2952744692dcSJiaying Zhang 	size_t size = bh->b_size;
2953744692dcSJiaying Zhang 
2954744692dcSJiaying Zhang retry:
2955744692dcSJiaying Zhang 	io_end = ext4_init_io_end(inode, GFP_ATOMIC);
2956744692dcSJiaying Zhang 	if (!io_end) {
29576db26ffcSAndrew Morton 		pr_warn_ratelimited("%s: allocation fail\n", __func__);
2958744692dcSJiaying Zhang 		schedule();
2959744692dcSJiaying Zhang 		goto retry;
2960744692dcSJiaying Zhang 	}
2961744692dcSJiaying Zhang 	io_end->offset = offset;
2962744692dcSJiaying Zhang 	io_end->size = size;
2963744692dcSJiaying Zhang 	/*
2964744692dcSJiaying Zhang 	 * We need to hold a reference to the page to make sure it
2965744692dcSJiaying Zhang 	 * doesn't get evicted before ext4_end_io_work() has a chance
2966744692dcSJiaying Zhang 	 * to convert the extent from written to unwritten.
2967744692dcSJiaying Zhang 	 */
2968744692dcSJiaying Zhang 	io_end->page = page;
2969744692dcSJiaying Zhang 	get_page(io_end->page);
2970744692dcSJiaying Zhang 
2971744692dcSJiaying Zhang 	bh->b_private = io_end;
2972744692dcSJiaying Zhang 	bh->b_end_io = ext4_end_io_buffer_write;
2973744692dcSJiaying Zhang 	return 0;
2974744692dcSJiaying Zhang }
2975744692dcSJiaying Zhang 
29764c0425ffSMingming Cao /*
29774c0425ffSMingming Cao  * For ext4 extent files, ext4 will do direct-io write to holes,
29784c0425ffSMingming Cao  * preallocated extents, and those write extend the file, no need to
29794c0425ffSMingming Cao  * fall back to buffered IO.
29804c0425ffSMingming Cao  *
2981b595076aSUwe Kleine-König  * For holes, we fallocate those blocks, mark them as uninitialized
29824c0425ffSMingming Cao  * If those blocks were preallocated, we mark sure they are splited, but
2983b595076aSUwe Kleine-König  * still keep the range to write as uninitialized.
29844c0425ffSMingming Cao  *
29858d5d02e6SMingming Cao  * The unwrritten extents will be converted to written when DIO is completed.
29868d5d02e6SMingming Cao  * For async direct IO, since the IO may still pending when return, we
298725985edcSLucas De Marchi  * set up an end_io call back function, which will do the conversion
29888d5d02e6SMingming Cao  * when async direct IO completed.
29894c0425ffSMingming Cao  *
29904c0425ffSMingming Cao  * If the O_DIRECT write will extend the file then add this inode to the
29914c0425ffSMingming Cao  * orphan list.  So recovery will truncate it back to the original size
29924c0425ffSMingming Cao  * if the machine crashes during the write.
29934c0425ffSMingming Cao  *
29944c0425ffSMingming Cao  */
29954c0425ffSMingming Cao static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
29964c0425ffSMingming Cao 			      const struct iovec *iov, loff_t offset,
29974c0425ffSMingming Cao 			      unsigned long nr_segs)
29984c0425ffSMingming Cao {
29994c0425ffSMingming Cao 	struct file *file = iocb->ki_filp;
30004c0425ffSMingming Cao 	struct inode *inode = file->f_mapping->host;
30014c0425ffSMingming Cao 	ssize_t ret;
30024c0425ffSMingming Cao 	size_t count = iov_length(iov, nr_segs);
30034c0425ffSMingming Cao 
30044c0425ffSMingming Cao 	loff_t final_size = offset + count;
30054c0425ffSMingming Cao 	if (rw == WRITE && final_size <= inode->i_size) {
3006729f52c6SZheng Liu 		int overwrite = 0;
3007729f52c6SZheng Liu 
30084bd809dbSZheng Liu 		BUG_ON(iocb->private == NULL);
30094bd809dbSZheng Liu 
30104bd809dbSZheng Liu 		/* If we do a overwrite dio, i_mutex locking can be released */
30114bd809dbSZheng Liu 		overwrite = *((int *)iocb->private);
30124bd809dbSZheng Liu 
30134bd809dbSZheng Liu 		if (overwrite) {
30144bd809dbSZheng Liu 			down_read(&EXT4_I(inode)->i_data_sem);
30154bd809dbSZheng Liu 			mutex_unlock(&inode->i_mutex);
30164bd809dbSZheng Liu 		}
30174bd809dbSZheng Liu 
30184c0425ffSMingming Cao 		/*
30198d5d02e6SMingming Cao  		 * We could direct write to holes and fallocate.
30208d5d02e6SMingming Cao 		 *
30218d5d02e6SMingming Cao  		 * Allocated blocks to fill the hole are marked as uninitialized
302225985edcSLucas De Marchi  		 * to prevent parallel buffered read to expose the stale data
30234c0425ffSMingming Cao  		 * before DIO complete the data IO.
30248d5d02e6SMingming Cao 		 *
30258d5d02e6SMingming Cao  		 * As to previously fallocated extents, ext4 get_block
30264c0425ffSMingming Cao  		 * will just simply mark the buffer mapped but still
30274c0425ffSMingming Cao  		 * keep the extents uninitialized.
30284c0425ffSMingming Cao  		 *
30298d5d02e6SMingming Cao 		 * for non AIO case, we will convert those unwritten extents
30308d5d02e6SMingming Cao 		 * to written after return back from blockdev_direct_IO.
30314c0425ffSMingming Cao 		 *
30328d5d02e6SMingming Cao 		 * for async DIO, the conversion needs to be defered when
30338d5d02e6SMingming Cao 		 * the IO is completed. The ext4 end_io callback function
30348d5d02e6SMingming Cao 		 * will be called to take care of the conversion work.
30358d5d02e6SMingming Cao 		 * Here for async case, we allocate an io_end structure to
30368d5d02e6SMingming Cao 		 * hook to the iocb.
30374c0425ffSMingming Cao  		 */
30388d5d02e6SMingming Cao 		iocb->private = NULL;
30398d5d02e6SMingming Cao 		EXT4_I(inode)->cur_aio_dio = NULL;
30408d5d02e6SMingming Cao 		if (!is_sync_kiocb(iocb)) {
3041266991b1SJeff Moyer 			ext4_io_end_t *io_end =
3042266991b1SJeff Moyer 				ext4_init_io_end(inode, GFP_NOFS);
30434bd809dbSZheng Liu 			if (!io_end) {
30444bd809dbSZheng Liu 				ret = -ENOMEM;
30454bd809dbSZheng Liu 				goto retake_lock;
30464bd809dbSZheng Liu 			}
3047266991b1SJeff Moyer 			io_end->flag |= EXT4_IO_END_DIRECT;
3048266991b1SJeff Moyer 			iocb->private = io_end;
30498d5d02e6SMingming Cao 			/*
30508d5d02e6SMingming Cao 			 * we save the io structure for current async
305179e83036SEric Sandeen 			 * direct IO, so that later ext4_map_blocks()
30528d5d02e6SMingming Cao 			 * could flag the io structure whether there
30538d5d02e6SMingming Cao 			 * is a unwritten extents needs to be converted
30548d5d02e6SMingming Cao 			 * when IO is completed.
30558d5d02e6SMingming Cao 			 */
30568d5d02e6SMingming Cao 			EXT4_I(inode)->cur_aio_dio = iocb->private;
30578d5d02e6SMingming Cao 		}
30588d5d02e6SMingming Cao 
3059729f52c6SZheng Liu 		if (overwrite)
3060729f52c6SZheng Liu 			ret = __blockdev_direct_IO(rw, iocb, inode,
3061729f52c6SZheng Liu 						 inode->i_sb->s_bdev, iov,
3062729f52c6SZheng Liu 						 offset, nr_segs,
3063729f52c6SZheng Liu 						 ext4_get_block_write_nolock,
3064729f52c6SZheng Liu 						 ext4_end_io_dio,
3065729f52c6SZheng Liu 						 NULL,
3066729f52c6SZheng Liu 						 0);
3067729f52c6SZheng Liu 		else
3068aacfc19cSChristoph Hellwig 			ret = __blockdev_direct_IO(rw, iocb, inode,
30694c0425ffSMingming Cao 						 inode->i_sb->s_bdev, iov,
30704c0425ffSMingming Cao 						 offset, nr_segs,
3071c7064ef1SJiaying Zhang 						 ext4_get_block_write,
3072aacfc19cSChristoph Hellwig 						 ext4_end_io_dio,
3073aacfc19cSChristoph Hellwig 						 NULL,
307493ef8541SJeff Moyer 						 DIO_LOCKING);
30758d5d02e6SMingming Cao 		if (iocb->private)
30768d5d02e6SMingming Cao 			EXT4_I(inode)->cur_aio_dio = NULL;
30778d5d02e6SMingming Cao 		/*
30788d5d02e6SMingming Cao 		 * The io_end structure takes a reference to the inode,
30798d5d02e6SMingming Cao 		 * that structure needs to be destroyed and the
30808d5d02e6SMingming Cao 		 * reference to the inode need to be dropped, when IO is
30818d5d02e6SMingming Cao 		 * complete, even with 0 byte write, or failed.
30828d5d02e6SMingming Cao 		 *
30838d5d02e6SMingming Cao 		 * In the successful AIO DIO case, the io_end structure will be
30848d5d02e6SMingming Cao 		 * desctroyed and the reference to the inode will be dropped
30858d5d02e6SMingming Cao 		 * after the end_io call back function is called.
30868d5d02e6SMingming Cao 		 *
30878d5d02e6SMingming Cao 		 * In the case there is 0 byte write, or error case, since
30888d5d02e6SMingming Cao 		 * VFS direct IO won't invoke the end_io call back function,
30898d5d02e6SMingming Cao 		 * we need to free the end_io structure here.
30908d5d02e6SMingming Cao 		 */
30918d5d02e6SMingming Cao 		if (ret != -EIOCBQUEUED && ret <= 0 && iocb->private) {
30928d5d02e6SMingming Cao 			ext4_free_io_end(iocb->private);
30938d5d02e6SMingming Cao 			iocb->private = NULL;
3094729f52c6SZheng Liu 		} else if (ret > 0 && !overwrite && ext4_test_inode_state(inode,
30955f524950SMingming 						EXT4_STATE_DIO_UNWRITTEN)) {
3096109f5565SMingming 			int err;
30978d5d02e6SMingming Cao 			/*
30988d5d02e6SMingming Cao 			 * for non AIO case, since the IO is already
309925985edcSLucas De Marchi 			 * completed, we could do the conversion right here
31008d5d02e6SMingming Cao 			 */
3101109f5565SMingming 			err = ext4_convert_unwritten_extents(inode,
31028d5d02e6SMingming Cao 							     offset, ret);
3103109f5565SMingming 			if (err < 0)
3104109f5565SMingming 				ret = err;
310519f5fb7aSTheodore Ts'o 			ext4_clear_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN);
3106109f5565SMingming 		}
31074bd809dbSZheng Liu 
31084bd809dbSZheng Liu 	retake_lock:
31094bd809dbSZheng Liu 		/* take i_mutex locking again if we do a ovewrite dio */
31104bd809dbSZheng Liu 		if (overwrite) {
31114bd809dbSZheng Liu 			up_read(&EXT4_I(inode)->i_data_sem);
31124bd809dbSZheng Liu 			mutex_lock(&inode->i_mutex);
31134bd809dbSZheng Liu 		}
31144bd809dbSZheng Liu 
31154c0425ffSMingming Cao 		return ret;
31164c0425ffSMingming Cao 	}
31178d5d02e6SMingming Cao 
31188d5d02e6SMingming Cao 	/* for write the the end of file case, we fall back to old way */
31194c0425ffSMingming Cao 	return ext4_ind_direct_IO(rw, iocb, iov, offset, nr_segs);
31204c0425ffSMingming Cao }
31214c0425ffSMingming Cao 
31224c0425ffSMingming Cao static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb,
31234c0425ffSMingming Cao 			      const struct iovec *iov, loff_t offset,
31244c0425ffSMingming Cao 			      unsigned long nr_segs)
31254c0425ffSMingming Cao {
31264c0425ffSMingming Cao 	struct file *file = iocb->ki_filp;
31274c0425ffSMingming Cao 	struct inode *inode = file->f_mapping->host;
31280562e0baSJiaying Zhang 	ssize_t ret;
31294c0425ffSMingming Cao 
313084ebd795STheodore Ts'o 	/*
313184ebd795STheodore Ts'o 	 * If we are doing data journalling we don't support O_DIRECT
313284ebd795STheodore Ts'o 	 */
313384ebd795STheodore Ts'o 	if (ext4_should_journal_data(inode))
313484ebd795STheodore Ts'o 		return 0;
313584ebd795STheodore Ts'o 
31360562e0baSJiaying Zhang 	trace_ext4_direct_IO_enter(inode, offset, iov_length(iov, nr_segs), rw);
313712e9b892SDmitry Monakhov 	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
31380562e0baSJiaying Zhang 		ret = ext4_ext_direct_IO(rw, iocb, iov, offset, nr_segs);
31390562e0baSJiaying Zhang 	else
31400562e0baSJiaying Zhang 		ret = ext4_ind_direct_IO(rw, iocb, iov, offset, nr_segs);
31410562e0baSJiaying Zhang 	trace_ext4_direct_IO_exit(inode, offset,
31420562e0baSJiaying Zhang 				iov_length(iov, nr_segs), rw, ret);
31430562e0baSJiaying Zhang 	return ret;
31444c0425ffSMingming Cao }
31454c0425ffSMingming Cao 
3146ac27a0ecSDave Kleikamp /*
3147617ba13bSMingming Cao  * Pages can be marked dirty completely asynchronously from ext4's journalling
3148ac27a0ecSDave Kleikamp  * activity.  By filemap_sync_pte(), try_to_unmap_one(), etc.  We cannot do
3149ac27a0ecSDave Kleikamp  * much here because ->set_page_dirty is called under VFS locks.  The page is
3150ac27a0ecSDave Kleikamp  * not necessarily locked.
3151ac27a0ecSDave Kleikamp  *
3152ac27a0ecSDave Kleikamp  * We cannot just dirty the page and leave attached buffers clean, because the
3153ac27a0ecSDave Kleikamp  * buffers' dirty state is "definitive".  We cannot just set the buffers dirty
3154ac27a0ecSDave Kleikamp  * or jbddirty because all the journalling code will explode.
3155ac27a0ecSDave Kleikamp  *
3156ac27a0ecSDave Kleikamp  * So what we do is to mark the page "pending dirty" and next time writepage
3157ac27a0ecSDave Kleikamp  * is called, propagate that into the buffers appropriately.
3158ac27a0ecSDave Kleikamp  */
3159617ba13bSMingming Cao static int ext4_journalled_set_page_dirty(struct page *page)
3160ac27a0ecSDave Kleikamp {
3161ac27a0ecSDave Kleikamp 	SetPageChecked(page);
3162ac27a0ecSDave Kleikamp 	return __set_page_dirty_nobuffers(page);
3163ac27a0ecSDave Kleikamp }
3164ac27a0ecSDave Kleikamp 
3165617ba13bSMingming Cao static const struct address_space_operations ext4_ordered_aops = {
3166617ba13bSMingming Cao 	.readpage		= ext4_readpage,
3167617ba13bSMingming Cao 	.readpages		= ext4_readpages,
316843ce1d23SAneesh Kumar K.V 	.writepage		= ext4_writepage,
3169bfc1af65SNick Piggin 	.write_begin		= ext4_write_begin,
3170bfc1af65SNick Piggin 	.write_end		= ext4_ordered_write_end,
3171617ba13bSMingming Cao 	.bmap			= ext4_bmap,
3172617ba13bSMingming Cao 	.invalidatepage		= ext4_invalidatepage,
3173617ba13bSMingming Cao 	.releasepage		= ext4_releasepage,
3174617ba13bSMingming Cao 	.direct_IO		= ext4_direct_IO,
3175ac27a0ecSDave Kleikamp 	.migratepage		= buffer_migrate_page,
31768ab22b9aSHisashi Hifumi 	.is_partially_uptodate  = block_is_partially_uptodate,
3177aa261f54SAndi Kleen 	.error_remove_page	= generic_error_remove_page,
3178ac27a0ecSDave Kleikamp };
3179ac27a0ecSDave Kleikamp 
3180617ba13bSMingming Cao static const struct address_space_operations ext4_writeback_aops = {
3181617ba13bSMingming Cao 	.readpage		= ext4_readpage,
3182617ba13bSMingming Cao 	.readpages		= ext4_readpages,
318343ce1d23SAneesh Kumar K.V 	.writepage		= ext4_writepage,
3184bfc1af65SNick Piggin 	.write_begin		= ext4_write_begin,
3185bfc1af65SNick Piggin 	.write_end		= ext4_writeback_write_end,
3186617ba13bSMingming Cao 	.bmap			= ext4_bmap,
3187617ba13bSMingming Cao 	.invalidatepage		= ext4_invalidatepage,
3188617ba13bSMingming Cao 	.releasepage		= ext4_releasepage,
3189617ba13bSMingming Cao 	.direct_IO		= ext4_direct_IO,
3190ac27a0ecSDave Kleikamp 	.migratepage		= buffer_migrate_page,
31918ab22b9aSHisashi Hifumi 	.is_partially_uptodate  = block_is_partially_uptodate,
3192aa261f54SAndi Kleen 	.error_remove_page	= generic_error_remove_page,
3193ac27a0ecSDave Kleikamp };
3194ac27a0ecSDave Kleikamp 
3195617ba13bSMingming Cao static const struct address_space_operations ext4_journalled_aops = {
3196617ba13bSMingming Cao 	.readpage		= ext4_readpage,
3197617ba13bSMingming Cao 	.readpages		= ext4_readpages,
319843ce1d23SAneesh Kumar K.V 	.writepage		= ext4_writepage,
3199bfc1af65SNick Piggin 	.write_begin		= ext4_write_begin,
3200bfc1af65SNick Piggin 	.write_end		= ext4_journalled_write_end,
3201617ba13bSMingming Cao 	.set_page_dirty		= ext4_journalled_set_page_dirty,
3202617ba13bSMingming Cao 	.bmap			= ext4_bmap,
3203617ba13bSMingming Cao 	.invalidatepage		= ext4_invalidatepage,
3204617ba13bSMingming Cao 	.releasepage		= ext4_releasepage,
320584ebd795STheodore Ts'o 	.direct_IO		= ext4_direct_IO,
32068ab22b9aSHisashi Hifumi 	.is_partially_uptodate  = block_is_partially_uptodate,
3207aa261f54SAndi Kleen 	.error_remove_page	= generic_error_remove_page,
3208ac27a0ecSDave Kleikamp };
3209ac27a0ecSDave Kleikamp 
321064769240SAlex Tomas static const struct address_space_operations ext4_da_aops = {
321164769240SAlex Tomas 	.readpage		= ext4_readpage,
321264769240SAlex Tomas 	.readpages		= ext4_readpages,
321343ce1d23SAneesh Kumar K.V 	.writepage		= ext4_writepage,
321464769240SAlex Tomas 	.writepages		= ext4_da_writepages,
321564769240SAlex Tomas 	.write_begin		= ext4_da_write_begin,
321664769240SAlex Tomas 	.write_end		= ext4_da_write_end,
321764769240SAlex Tomas 	.bmap			= ext4_bmap,
321864769240SAlex Tomas 	.invalidatepage		= ext4_da_invalidatepage,
321964769240SAlex Tomas 	.releasepage		= ext4_releasepage,
322064769240SAlex Tomas 	.direct_IO		= ext4_direct_IO,
322164769240SAlex Tomas 	.migratepage		= buffer_migrate_page,
32228ab22b9aSHisashi Hifumi 	.is_partially_uptodate  = block_is_partially_uptodate,
3223aa261f54SAndi Kleen 	.error_remove_page	= generic_error_remove_page,
322464769240SAlex Tomas };
322564769240SAlex Tomas 
3226617ba13bSMingming Cao void ext4_set_aops(struct inode *inode)
3227ac27a0ecSDave Kleikamp {
32283d2b1582SLukas Czerner 	switch (ext4_inode_journal_mode(inode)) {
32293d2b1582SLukas Czerner 	case EXT4_INODE_ORDERED_DATA_MODE:
32303d2b1582SLukas Czerner 		if (test_opt(inode->i_sb, DELALLOC))
3231cd1aac32SAneesh Kumar K.V 			inode->i_mapping->a_ops = &ext4_da_aops;
3232ac27a0ecSDave Kleikamp 		else
32333d2b1582SLukas Czerner 			inode->i_mapping->a_ops = &ext4_ordered_aops;
32343d2b1582SLukas Czerner 		break;
32353d2b1582SLukas Czerner 	case EXT4_INODE_WRITEBACK_DATA_MODE:
32363d2b1582SLukas Czerner 		if (test_opt(inode->i_sb, DELALLOC))
32373d2b1582SLukas Czerner 			inode->i_mapping->a_ops = &ext4_da_aops;
32383d2b1582SLukas Czerner 		else
32393d2b1582SLukas Czerner 			inode->i_mapping->a_ops = &ext4_writeback_aops;
32403d2b1582SLukas Czerner 		break;
32413d2b1582SLukas Czerner 	case EXT4_INODE_JOURNAL_DATA_MODE:
3242617ba13bSMingming Cao 		inode->i_mapping->a_ops = &ext4_journalled_aops;
32433d2b1582SLukas Czerner 		break;
32443d2b1582SLukas Czerner 	default:
32453d2b1582SLukas Czerner 		BUG();
32463d2b1582SLukas Czerner 	}
3247ac27a0ecSDave Kleikamp }
3248ac27a0ecSDave Kleikamp 
32494e96b2dbSAllison Henderson 
32504e96b2dbSAllison Henderson /*
32514e96b2dbSAllison Henderson  * ext4_discard_partial_page_buffers()
32524e96b2dbSAllison Henderson  * Wrapper function for ext4_discard_partial_page_buffers_no_lock.
32534e96b2dbSAllison Henderson  * This function finds and locks the page containing the offset
32544e96b2dbSAllison Henderson  * "from" and passes it to ext4_discard_partial_page_buffers_no_lock.
32554e96b2dbSAllison Henderson  * Calling functions that already have the page locked should call
32564e96b2dbSAllison Henderson  * ext4_discard_partial_page_buffers_no_lock directly.
32574e96b2dbSAllison Henderson  */
32584e96b2dbSAllison Henderson int ext4_discard_partial_page_buffers(handle_t *handle,
32594e96b2dbSAllison Henderson 		struct address_space *mapping, loff_t from,
32604e96b2dbSAllison Henderson 		loff_t length, int flags)
32614e96b2dbSAllison Henderson {
32624e96b2dbSAllison Henderson 	struct inode *inode = mapping->host;
32634e96b2dbSAllison Henderson 	struct page *page;
32644e96b2dbSAllison Henderson 	int err = 0;
32654e96b2dbSAllison Henderson 
32664e96b2dbSAllison Henderson 	page = find_or_create_page(mapping, from >> PAGE_CACHE_SHIFT,
32674e96b2dbSAllison Henderson 				   mapping_gfp_mask(mapping) & ~__GFP_FS);
32684e96b2dbSAllison Henderson 	if (!page)
32695129d05fSYongqiang Yang 		return -ENOMEM;
32704e96b2dbSAllison Henderson 
32714e96b2dbSAllison Henderson 	err = ext4_discard_partial_page_buffers_no_lock(handle, inode, page,
32724e96b2dbSAllison Henderson 		from, length, flags);
32734e96b2dbSAllison Henderson 
32744e96b2dbSAllison Henderson 	unlock_page(page);
32754e96b2dbSAllison Henderson 	page_cache_release(page);
32764e96b2dbSAllison Henderson 	return err;
32774e96b2dbSAllison Henderson }
32784e96b2dbSAllison Henderson 
32794e96b2dbSAllison Henderson /*
32804e96b2dbSAllison Henderson  * ext4_discard_partial_page_buffers_no_lock()
32814e96b2dbSAllison Henderson  * Zeros a page range of length 'length' starting from offset 'from'.
32824e96b2dbSAllison Henderson  * Buffer heads that correspond to the block aligned regions of the
32834e96b2dbSAllison Henderson  * zeroed range will be unmapped.  Unblock aligned regions
32844e96b2dbSAllison Henderson  * will have the corresponding buffer head mapped if needed so that
32854e96b2dbSAllison Henderson  * that region of the page can be updated with the partial zero out.
32864e96b2dbSAllison Henderson  *
32874e96b2dbSAllison Henderson  * This function assumes that the page has already been  locked.  The
32884e96b2dbSAllison Henderson  * The range to be discarded must be contained with in the given page.
32894e96b2dbSAllison Henderson  * If the specified range exceeds the end of the page it will be shortened
32904e96b2dbSAllison Henderson  * to the end of the page that corresponds to 'from'.  This function is
32914e96b2dbSAllison Henderson  * appropriate for updating a page and it buffer heads to be unmapped and
32924e96b2dbSAllison Henderson  * zeroed for blocks that have been either released, or are going to be
32934e96b2dbSAllison Henderson  * released.
32944e96b2dbSAllison Henderson  *
32954e96b2dbSAllison Henderson  * handle: The journal handle
32964e96b2dbSAllison Henderson  * inode:  The files inode
32974e96b2dbSAllison Henderson  * page:   A locked page that contains the offset "from"
32984e96b2dbSAllison Henderson  * from:   The starting byte offset (from the begining of the file)
32994e96b2dbSAllison Henderson  *         to begin discarding
33004e96b2dbSAllison Henderson  * len:    The length of bytes to discard
33014e96b2dbSAllison Henderson  * flags:  Optional flags that may be used:
33024e96b2dbSAllison Henderson  *
33034e96b2dbSAllison Henderson  *         EXT4_DISCARD_PARTIAL_PG_ZERO_UNMAPPED
33044e96b2dbSAllison Henderson  *         Only zero the regions of the page whose buffer heads
33054e96b2dbSAllison Henderson  *         have already been unmapped.  This flag is appropriate
33064e96b2dbSAllison Henderson  *         for updateing the contents of a page whose blocks may
33074e96b2dbSAllison Henderson  *         have already been released, and we only want to zero
33084e96b2dbSAllison Henderson  *         out the regions that correspond to those released blocks.
33094e96b2dbSAllison Henderson  *
33104e96b2dbSAllison Henderson  * Returns zero on sucess or negative on failure.
33114e96b2dbSAllison Henderson  */
33125f163cc7SEric Sandeen static int ext4_discard_partial_page_buffers_no_lock(handle_t *handle,
33134e96b2dbSAllison Henderson 		struct inode *inode, struct page *page, loff_t from,
33144e96b2dbSAllison Henderson 		loff_t length, int flags)
33154e96b2dbSAllison Henderson {
33164e96b2dbSAllison Henderson 	ext4_fsblk_t index = from >> PAGE_CACHE_SHIFT;
33174e96b2dbSAllison Henderson 	unsigned int offset = from & (PAGE_CACHE_SIZE-1);
33184e96b2dbSAllison Henderson 	unsigned int blocksize, max, pos;
33194e96b2dbSAllison Henderson 	ext4_lblk_t iblock;
33204e96b2dbSAllison Henderson 	struct buffer_head *bh;
33214e96b2dbSAllison Henderson 	int err = 0;
33224e96b2dbSAllison Henderson 
33234e96b2dbSAllison Henderson 	blocksize = inode->i_sb->s_blocksize;
33244e96b2dbSAllison Henderson 	max = PAGE_CACHE_SIZE - offset;
33254e96b2dbSAllison Henderson 
33264e96b2dbSAllison Henderson 	if (index != page->index)
33274e96b2dbSAllison Henderson 		return -EINVAL;
33284e96b2dbSAllison Henderson 
33294e96b2dbSAllison Henderson 	/*
33304e96b2dbSAllison Henderson 	 * correct length if it does not fall between
33314e96b2dbSAllison Henderson 	 * 'from' and the end of the page
33324e96b2dbSAllison Henderson 	 */
33334e96b2dbSAllison Henderson 	if (length > max || length < 0)
33344e96b2dbSAllison Henderson 		length = max;
33354e96b2dbSAllison Henderson 
33364e96b2dbSAllison Henderson 	iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
33374e96b2dbSAllison Henderson 
3338093e6e36SYongqiang Yang 	if (!page_has_buffers(page))
33394e96b2dbSAllison Henderson 		create_empty_buffers(page, blocksize, 0);
33404e96b2dbSAllison Henderson 
33414e96b2dbSAllison Henderson 	/* Find the buffer that contains "offset" */
33424e96b2dbSAllison Henderson 	bh = page_buffers(page);
33434e96b2dbSAllison Henderson 	pos = blocksize;
33444e96b2dbSAllison Henderson 	while (offset >= pos) {
33454e96b2dbSAllison Henderson 		bh = bh->b_this_page;
33464e96b2dbSAllison Henderson 		iblock++;
33474e96b2dbSAllison Henderson 		pos += blocksize;
33484e96b2dbSAllison Henderson 	}
33494e96b2dbSAllison Henderson 
33504e96b2dbSAllison Henderson 	pos = offset;
33514e96b2dbSAllison Henderson 	while (pos < offset + length) {
3352e260daf2SYongqiang Yang 		unsigned int end_of_block, range_to_discard;
3353e260daf2SYongqiang Yang 
33544e96b2dbSAllison Henderson 		err = 0;
33554e96b2dbSAllison Henderson 
33564e96b2dbSAllison Henderson 		/* The length of space left to zero and unmap */
33574e96b2dbSAllison Henderson 		range_to_discard = offset + length - pos;
33584e96b2dbSAllison Henderson 
33594e96b2dbSAllison Henderson 		/* The length of space until the end of the block */
33604e96b2dbSAllison Henderson 		end_of_block = blocksize - (pos & (blocksize-1));
33614e96b2dbSAllison Henderson 
33624e96b2dbSAllison Henderson 		/*
33634e96b2dbSAllison Henderson 		 * Do not unmap or zero past end of block
33644e96b2dbSAllison Henderson 		 * for this buffer head
33654e96b2dbSAllison Henderson 		 */
33664e96b2dbSAllison Henderson 		if (range_to_discard > end_of_block)
33674e96b2dbSAllison Henderson 			range_to_discard = end_of_block;
33684e96b2dbSAllison Henderson 
33694e96b2dbSAllison Henderson 
33704e96b2dbSAllison Henderson 		/*
33714e96b2dbSAllison Henderson 		 * Skip this buffer head if we are only zeroing unampped
33724e96b2dbSAllison Henderson 		 * regions of the page
33734e96b2dbSAllison Henderson 		 */
33744e96b2dbSAllison Henderson 		if (flags & EXT4_DISCARD_PARTIAL_PG_ZERO_UNMAPPED &&
33754e96b2dbSAllison Henderson 			buffer_mapped(bh))
33764e96b2dbSAllison Henderson 				goto next;
33774e96b2dbSAllison Henderson 
33784e96b2dbSAllison Henderson 		/* If the range is block aligned, unmap */
33794e96b2dbSAllison Henderson 		if (range_to_discard == blocksize) {
33804e96b2dbSAllison Henderson 			clear_buffer_dirty(bh);
33814e96b2dbSAllison Henderson 			bh->b_bdev = NULL;
33824e96b2dbSAllison Henderson 			clear_buffer_mapped(bh);
33834e96b2dbSAllison Henderson 			clear_buffer_req(bh);
33844e96b2dbSAllison Henderson 			clear_buffer_new(bh);
33854e96b2dbSAllison Henderson 			clear_buffer_delay(bh);
33864e96b2dbSAllison Henderson 			clear_buffer_unwritten(bh);
33874e96b2dbSAllison Henderson 			clear_buffer_uptodate(bh);
33884e96b2dbSAllison Henderson 			zero_user(page, pos, range_to_discard);
33894e96b2dbSAllison Henderson 			BUFFER_TRACE(bh, "Buffer discarded");
33904e96b2dbSAllison Henderson 			goto next;
33914e96b2dbSAllison Henderson 		}
33924e96b2dbSAllison Henderson 
33934e96b2dbSAllison Henderson 		/*
33944e96b2dbSAllison Henderson 		 * If this block is not completely contained in the range
33954e96b2dbSAllison Henderson 		 * to be discarded, then it is not going to be released. Because
33964e96b2dbSAllison Henderson 		 * we need to keep this block, we need to make sure this part
33974e96b2dbSAllison Henderson 		 * of the page is uptodate before we modify it by writeing
33984e96b2dbSAllison Henderson 		 * partial zeros on it.
33994e96b2dbSAllison Henderson 		 */
34004e96b2dbSAllison Henderson 		if (!buffer_mapped(bh)) {
34014e96b2dbSAllison Henderson 			/*
34024e96b2dbSAllison Henderson 			 * Buffer head must be mapped before we can read
34034e96b2dbSAllison Henderson 			 * from the block
34044e96b2dbSAllison Henderson 			 */
34054e96b2dbSAllison Henderson 			BUFFER_TRACE(bh, "unmapped");
34064e96b2dbSAllison Henderson 			ext4_get_block(inode, iblock, bh, 0);
34074e96b2dbSAllison Henderson 			/* unmapped? It's a hole - nothing to do */
34084e96b2dbSAllison Henderson 			if (!buffer_mapped(bh)) {
34094e96b2dbSAllison Henderson 				BUFFER_TRACE(bh, "still unmapped");
34104e96b2dbSAllison Henderson 				goto next;
34114e96b2dbSAllison Henderson 			}
34124e96b2dbSAllison Henderson 		}
34134e96b2dbSAllison Henderson 
34144e96b2dbSAllison Henderson 		/* Ok, it's mapped. Make sure it's up-to-date */
34154e96b2dbSAllison Henderson 		if (PageUptodate(page))
34164e96b2dbSAllison Henderson 			set_buffer_uptodate(bh);
34174e96b2dbSAllison Henderson 
34184e96b2dbSAllison Henderson 		if (!buffer_uptodate(bh)) {
34194e96b2dbSAllison Henderson 			err = -EIO;
34204e96b2dbSAllison Henderson 			ll_rw_block(READ, 1, &bh);
34214e96b2dbSAllison Henderson 			wait_on_buffer(bh);
34224e96b2dbSAllison Henderson 			/* Uhhuh. Read error. Complain and punt.*/
34234e96b2dbSAllison Henderson 			if (!buffer_uptodate(bh))
34244e96b2dbSAllison Henderson 				goto next;
34254e96b2dbSAllison Henderson 		}
34264e96b2dbSAllison Henderson 
34274e96b2dbSAllison Henderson 		if (ext4_should_journal_data(inode)) {
34284e96b2dbSAllison Henderson 			BUFFER_TRACE(bh, "get write access");
34294e96b2dbSAllison Henderson 			err = ext4_journal_get_write_access(handle, bh);
34304e96b2dbSAllison Henderson 			if (err)
34314e96b2dbSAllison Henderson 				goto next;
34324e96b2dbSAllison Henderson 		}
34334e96b2dbSAllison Henderson 
34344e96b2dbSAllison Henderson 		zero_user(page, pos, range_to_discard);
34354e96b2dbSAllison Henderson 
34364e96b2dbSAllison Henderson 		err = 0;
34374e96b2dbSAllison Henderson 		if (ext4_should_journal_data(inode)) {
34384e96b2dbSAllison Henderson 			err = ext4_handle_dirty_metadata(handle, inode, bh);
3439decbd919STheodore Ts'o 		} else
34404e96b2dbSAllison Henderson 			mark_buffer_dirty(bh);
34414e96b2dbSAllison Henderson 
34424e96b2dbSAllison Henderson 		BUFFER_TRACE(bh, "Partial buffer zeroed");
34434e96b2dbSAllison Henderson next:
34444e96b2dbSAllison Henderson 		bh = bh->b_this_page;
34454e96b2dbSAllison Henderson 		iblock++;
34464e96b2dbSAllison Henderson 		pos += range_to_discard;
34474e96b2dbSAllison Henderson 	}
34484e96b2dbSAllison Henderson 
34494e96b2dbSAllison Henderson 	return err;
34504e96b2dbSAllison Henderson }
34514e96b2dbSAllison Henderson 
345291ef4cafSDuane Griffin int ext4_can_truncate(struct inode *inode)
345391ef4cafSDuane Griffin {
345491ef4cafSDuane Griffin 	if (S_ISREG(inode->i_mode))
345591ef4cafSDuane Griffin 		return 1;
345691ef4cafSDuane Griffin 	if (S_ISDIR(inode->i_mode))
345791ef4cafSDuane Griffin 		return 1;
345891ef4cafSDuane Griffin 	if (S_ISLNK(inode->i_mode))
345991ef4cafSDuane Griffin 		return !ext4_inode_is_fast_symlink(inode);
346091ef4cafSDuane Griffin 	return 0;
346191ef4cafSDuane Griffin }
346291ef4cafSDuane Griffin 
3463ac27a0ecSDave Kleikamp /*
3464a4bb6b64SAllison Henderson  * ext4_punch_hole: punches a hole in a file by releaseing the blocks
3465a4bb6b64SAllison Henderson  * associated with the given offset and length
3466a4bb6b64SAllison Henderson  *
3467a4bb6b64SAllison Henderson  * @inode:  File inode
3468a4bb6b64SAllison Henderson  * @offset: The offset where the hole will begin
3469a4bb6b64SAllison Henderson  * @len:    The length of the hole
3470a4bb6b64SAllison Henderson  *
3471a4bb6b64SAllison Henderson  * Returns: 0 on sucess or negative on failure
3472a4bb6b64SAllison Henderson  */
3473a4bb6b64SAllison Henderson 
3474a4bb6b64SAllison Henderson int ext4_punch_hole(struct file *file, loff_t offset, loff_t length)
3475a4bb6b64SAllison Henderson {
3476a4bb6b64SAllison Henderson 	struct inode *inode = file->f_path.dentry->d_inode;
3477a4bb6b64SAllison Henderson 	if (!S_ISREG(inode->i_mode))
347873355192SAllison Henderson 		return -EOPNOTSUPP;
3479a4bb6b64SAllison Henderson 
3480a4bb6b64SAllison Henderson 	if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
3481a4bb6b64SAllison Henderson 		/* TODO: Add support for non extent hole punching */
348273355192SAllison Henderson 		return -EOPNOTSUPP;
3483a4bb6b64SAllison Henderson 	}
3484a4bb6b64SAllison Henderson 
3485bab08ab9STheodore Ts'o 	if (EXT4_SB(inode->i_sb)->s_cluster_ratio > 1) {
3486bab08ab9STheodore Ts'o 		/* TODO: Add support for bigalloc file systems */
348773355192SAllison Henderson 		return -EOPNOTSUPP;
3488bab08ab9STheodore Ts'o 	}
3489bab08ab9STheodore Ts'o 
3490a4bb6b64SAllison Henderson 	return ext4_ext_punch_hole(file, offset, length);
3491a4bb6b64SAllison Henderson }
3492a4bb6b64SAllison Henderson 
3493a4bb6b64SAllison Henderson /*
3494617ba13bSMingming Cao  * ext4_truncate()
3495ac27a0ecSDave Kleikamp  *
3496617ba13bSMingming Cao  * We block out ext4_get_block() block instantiations across the entire
3497617ba13bSMingming Cao  * transaction, and VFS/VM ensures that ext4_truncate() cannot run
3498ac27a0ecSDave Kleikamp  * simultaneously on behalf of the same inode.
3499ac27a0ecSDave Kleikamp  *
350042b2aa86SJustin P. Mattock  * As we work through the truncate and commit bits of it to the journal there
3501ac27a0ecSDave Kleikamp  * is one core, guiding principle: the file's tree must always be consistent on
3502ac27a0ecSDave Kleikamp  * disk.  We must be able to restart the truncate after a crash.
3503ac27a0ecSDave Kleikamp  *
3504ac27a0ecSDave Kleikamp  * The file's tree may be transiently inconsistent in memory (although it
3505ac27a0ecSDave Kleikamp  * probably isn't), but whenever we close off and commit a journal transaction,
3506ac27a0ecSDave Kleikamp  * the contents of (the filesystem + the journal) must be consistent and
3507ac27a0ecSDave Kleikamp  * restartable.  It's pretty simple, really: bottom up, right to left (although
3508ac27a0ecSDave Kleikamp  * left-to-right works OK too).
3509ac27a0ecSDave Kleikamp  *
3510ac27a0ecSDave Kleikamp  * Note that at recovery time, journal replay occurs *before* the restart of
3511ac27a0ecSDave Kleikamp  * truncate against the orphan inode list.
3512ac27a0ecSDave Kleikamp  *
3513ac27a0ecSDave Kleikamp  * The committed inode has the new, desired i_size (which is the same as
3514617ba13bSMingming Cao  * i_disksize in this case).  After a crash, ext4_orphan_cleanup() will see
3515ac27a0ecSDave Kleikamp  * that this inode's truncate did not complete and it will again call
3516617ba13bSMingming Cao  * ext4_truncate() to have another go.  So there will be instantiated blocks
3517617ba13bSMingming Cao  * to the right of the truncation point in a crashed ext4 filesystem.  But
3518ac27a0ecSDave Kleikamp  * that's fine - as long as they are linked from the inode, the post-crash
3519617ba13bSMingming Cao  * ext4_truncate() run will find them and release them.
3520ac27a0ecSDave Kleikamp  */
3521617ba13bSMingming Cao void ext4_truncate(struct inode *inode)
3522ac27a0ecSDave Kleikamp {
35230562e0baSJiaying Zhang 	trace_ext4_truncate_enter(inode);
35240562e0baSJiaying Zhang 
352591ef4cafSDuane Griffin 	if (!ext4_can_truncate(inode))
3526ac27a0ecSDave Kleikamp 		return;
3527ac27a0ecSDave Kleikamp 
352812e9b892SDmitry Monakhov 	ext4_clear_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
3529c8d46e41SJiaying Zhang 
35305534fb5bSTheodore Ts'o 	if (inode->i_size == 0 && !test_opt(inode->i_sb, NO_AUTO_DA_ALLOC))
353119f5fb7aSTheodore Ts'o 		ext4_set_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
35327d8f9f7dSTheodore Ts'o 
3533ff9893dcSAmir Goldstein 	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
3534cf108bcaSJan Kara 		ext4_ext_truncate(inode);
3535ff9893dcSAmir Goldstein 	else
3536ff9893dcSAmir Goldstein 		ext4_ind_truncate(inode);
3537a86c6181SAlex Tomas 
35380562e0baSJiaying Zhang 	trace_ext4_truncate_exit(inode);
3539ac27a0ecSDave Kleikamp }
3540ac27a0ecSDave Kleikamp 
3541ac27a0ecSDave Kleikamp /*
3542617ba13bSMingming Cao  * ext4_get_inode_loc returns with an extra refcount against the inode's
3543ac27a0ecSDave Kleikamp  * underlying buffer_head on success. If 'in_mem' is true, we have all
3544ac27a0ecSDave Kleikamp  * data in memory that is needed to recreate the on-disk version of this
3545ac27a0ecSDave Kleikamp  * inode.
3546ac27a0ecSDave Kleikamp  */
3547617ba13bSMingming Cao static int __ext4_get_inode_loc(struct inode *inode,
3548617ba13bSMingming Cao 				struct ext4_iloc *iloc, int in_mem)
3549ac27a0ecSDave Kleikamp {
3550240799cdSTheodore Ts'o 	struct ext4_group_desc	*gdp;
3551ac27a0ecSDave Kleikamp 	struct buffer_head	*bh;
3552240799cdSTheodore Ts'o 	struct super_block	*sb = inode->i_sb;
3553240799cdSTheodore Ts'o 	ext4_fsblk_t		block;
3554240799cdSTheodore Ts'o 	int			inodes_per_block, inode_offset;
3555ac27a0ecSDave Kleikamp 
35563a06d778SAneesh Kumar K.V 	iloc->bh = NULL;
3557240799cdSTheodore Ts'o 	if (!ext4_valid_inum(sb, inode->i_ino))
3558ac27a0ecSDave Kleikamp 		return -EIO;
3559ac27a0ecSDave Kleikamp 
3560240799cdSTheodore Ts'o 	iloc->block_group = (inode->i_ino - 1) / EXT4_INODES_PER_GROUP(sb);
3561240799cdSTheodore Ts'o 	gdp = ext4_get_group_desc(sb, iloc->block_group, NULL);
3562240799cdSTheodore Ts'o 	if (!gdp)
3563240799cdSTheodore Ts'o 		return -EIO;
3564240799cdSTheodore Ts'o 
3565240799cdSTheodore Ts'o 	/*
3566240799cdSTheodore Ts'o 	 * Figure out the offset within the block group inode table
3567240799cdSTheodore Ts'o 	 */
356800d09882STao Ma 	inodes_per_block = EXT4_SB(sb)->s_inodes_per_block;
3569240799cdSTheodore Ts'o 	inode_offset = ((inode->i_ino - 1) %
3570240799cdSTheodore Ts'o 			EXT4_INODES_PER_GROUP(sb));
3571240799cdSTheodore Ts'o 	block = ext4_inode_table(sb, gdp) + (inode_offset / inodes_per_block);
3572240799cdSTheodore Ts'o 	iloc->offset = (inode_offset % inodes_per_block) * EXT4_INODE_SIZE(sb);
3573240799cdSTheodore Ts'o 
3574240799cdSTheodore Ts'o 	bh = sb_getblk(sb, block);
3575ac27a0ecSDave Kleikamp 	if (!bh) {
3576c398eda0STheodore Ts'o 		EXT4_ERROR_INODE_BLOCK(inode, block,
3577c398eda0STheodore Ts'o 				       "unable to read itable block");
3578ac27a0ecSDave Kleikamp 		return -EIO;
3579ac27a0ecSDave Kleikamp 	}
3580ac27a0ecSDave Kleikamp 	if (!buffer_uptodate(bh)) {
3581ac27a0ecSDave Kleikamp 		lock_buffer(bh);
35829c83a923SHidehiro Kawai 
35839c83a923SHidehiro Kawai 		/*
35849c83a923SHidehiro Kawai 		 * If the buffer has the write error flag, we have failed
35859c83a923SHidehiro Kawai 		 * to write out another inode in the same block.  In this
35869c83a923SHidehiro Kawai 		 * case, we don't have to read the block because we may
35879c83a923SHidehiro Kawai 		 * read the old inode data successfully.
35889c83a923SHidehiro Kawai 		 */
35899c83a923SHidehiro Kawai 		if (buffer_write_io_error(bh) && !buffer_uptodate(bh))
35909c83a923SHidehiro Kawai 			set_buffer_uptodate(bh);
35919c83a923SHidehiro Kawai 
3592ac27a0ecSDave Kleikamp 		if (buffer_uptodate(bh)) {
3593ac27a0ecSDave Kleikamp 			/* someone brought it uptodate while we waited */
3594ac27a0ecSDave Kleikamp 			unlock_buffer(bh);
3595ac27a0ecSDave Kleikamp 			goto has_buffer;
3596ac27a0ecSDave Kleikamp 		}
3597ac27a0ecSDave Kleikamp 
3598ac27a0ecSDave Kleikamp 		/*
3599ac27a0ecSDave Kleikamp 		 * If we have all information of the inode in memory and this
3600ac27a0ecSDave Kleikamp 		 * is the only valid inode in the block, we need not read the
3601ac27a0ecSDave Kleikamp 		 * block.
3602ac27a0ecSDave Kleikamp 		 */
3603ac27a0ecSDave Kleikamp 		if (in_mem) {
3604ac27a0ecSDave Kleikamp 			struct buffer_head *bitmap_bh;
3605240799cdSTheodore Ts'o 			int i, start;
3606ac27a0ecSDave Kleikamp 
3607240799cdSTheodore Ts'o 			start = inode_offset & ~(inodes_per_block - 1);
3608ac27a0ecSDave Kleikamp 
3609ac27a0ecSDave Kleikamp 			/* Is the inode bitmap in cache? */
3610240799cdSTheodore Ts'o 			bitmap_bh = sb_getblk(sb, ext4_inode_bitmap(sb, gdp));
3611ac27a0ecSDave Kleikamp 			if (!bitmap_bh)
3612ac27a0ecSDave Kleikamp 				goto make_io;
3613ac27a0ecSDave Kleikamp 
3614ac27a0ecSDave Kleikamp 			/*
3615ac27a0ecSDave Kleikamp 			 * If the inode bitmap isn't in cache then the
3616ac27a0ecSDave Kleikamp 			 * optimisation may end up performing two reads instead
3617ac27a0ecSDave Kleikamp 			 * of one, so skip it.
3618ac27a0ecSDave Kleikamp 			 */
3619ac27a0ecSDave Kleikamp 			if (!buffer_uptodate(bitmap_bh)) {
3620ac27a0ecSDave Kleikamp 				brelse(bitmap_bh);
3621ac27a0ecSDave Kleikamp 				goto make_io;
3622ac27a0ecSDave Kleikamp 			}
3623240799cdSTheodore Ts'o 			for (i = start; i < start + inodes_per_block; i++) {
3624ac27a0ecSDave Kleikamp 				if (i == inode_offset)
3625ac27a0ecSDave Kleikamp 					continue;
3626617ba13bSMingming Cao 				if (ext4_test_bit(i, bitmap_bh->b_data))
3627ac27a0ecSDave Kleikamp 					break;
3628ac27a0ecSDave Kleikamp 			}
3629ac27a0ecSDave Kleikamp 			brelse(bitmap_bh);
3630240799cdSTheodore Ts'o 			if (i == start + inodes_per_block) {
3631ac27a0ecSDave Kleikamp 				/* all other inodes are free, so skip I/O */
3632ac27a0ecSDave Kleikamp 				memset(bh->b_data, 0, bh->b_size);
3633ac27a0ecSDave Kleikamp 				set_buffer_uptodate(bh);
3634ac27a0ecSDave Kleikamp 				unlock_buffer(bh);
3635ac27a0ecSDave Kleikamp 				goto has_buffer;
3636ac27a0ecSDave Kleikamp 			}
3637ac27a0ecSDave Kleikamp 		}
3638ac27a0ecSDave Kleikamp 
3639ac27a0ecSDave Kleikamp make_io:
3640ac27a0ecSDave Kleikamp 		/*
3641240799cdSTheodore Ts'o 		 * If we need to do any I/O, try to pre-readahead extra
3642240799cdSTheodore Ts'o 		 * blocks from the inode table.
3643240799cdSTheodore Ts'o 		 */
3644240799cdSTheodore Ts'o 		if (EXT4_SB(sb)->s_inode_readahead_blks) {
3645240799cdSTheodore Ts'o 			ext4_fsblk_t b, end, table;
3646240799cdSTheodore Ts'o 			unsigned num;
3647240799cdSTheodore Ts'o 
3648240799cdSTheodore Ts'o 			table = ext4_inode_table(sb, gdp);
3649b713a5ecSTheodore Ts'o 			/* s_inode_readahead_blks is always a power of 2 */
3650240799cdSTheodore Ts'o 			b = block & ~(EXT4_SB(sb)->s_inode_readahead_blks-1);
3651240799cdSTheodore Ts'o 			if (table > b)
3652240799cdSTheodore Ts'o 				b = table;
3653240799cdSTheodore Ts'o 			end = b + EXT4_SB(sb)->s_inode_readahead_blks;
3654240799cdSTheodore Ts'o 			num = EXT4_INODES_PER_GROUP(sb);
3655feb0ab32SDarrick J. Wong 			if (ext4_has_group_desc_csum(sb))
3656560671a0SAneesh Kumar K.V 				num -= ext4_itable_unused_count(sb, gdp);
3657240799cdSTheodore Ts'o 			table += num / inodes_per_block;
3658240799cdSTheodore Ts'o 			if (end > table)
3659240799cdSTheodore Ts'o 				end = table;
3660240799cdSTheodore Ts'o 			while (b <= end)
3661240799cdSTheodore Ts'o 				sb_breadahead(sb, b++);
3662240799cdSTheodore Ts'o 		}
3663240799cdSTheodore Ts'o 
3664240799cdSTheodore Ts'o 		/*
3665ac27a0ecSDave Kleikamp 		 * There are other valid inodes in the buffer, this inode
3666ac27a0ecSDave Kleikamp 		 * has in-inode xattrs, or we don't have this inode in memory.
3667ac27a0ecSDave Kleikamp 		 * Read the block from disk.
3668ac27a0ecSDave Kleikamp 		 */
36690562e0baSJiaying Zhang 		trace_ext4_load_inode(inode);
3670ac27a0ecSDave Kleikamp 		get_bh(bh);
3671ac27a0ecSDave Kleikamp 		bh->b_end_io = end_buffer_read_sync;
367265299a3bSChristoph Hellwig 		submit_bh(READ | REQ_META | REQ_PRIO, bh);
3673ac27a0ecSDave Kleikamp 		wait_on_buffer(bh);
3674ac27a0ecSDave Kleikamp 		if (!buffer_uptodate(bh)) {
3675c398eda0STheodore Ts'o 			EXT4_ERROR_INODE_BLOCK(inode, block,
3676c398eda0STheodore Ts'o 					       "unable to read itable block");
3677ac27a0ecSDave Kleikamp 			brelse(bh);
3678ac27a0ecSDave Kleikamp 			return -EIO;
3679ac27a0ecSDave Kleikamp 		}
3680ac27a0ecSDave Kleikamp 	}
3681ac27a0ecSDave Kleikamp has_buffer:
3682ac27a0ecSDave Kleikamp 	iloc->bh = bh;
3683ac27a0ecSDave Kleikamp 	return 0;
3684ac27a0ecSDave Kleikamp }
3685ac27a0ecSDave Kleikamp 
3686617ba13bSMingming Cao int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc)
3687ac27a0ecSDave Kleikamp {
3688ac27a0ecSDave Kleikamp 	/* We have all inode data except xattrs in memory here. */
3689617ba13bSMingming Cao 	return __ext4_get_inode_loc(inode, iloc,
369019f5fb7aSTheodore Ts'o 		!ext4_test_inode_state(inode, EXT4_STATE_XATTR));
3691ac27a0ecSDave Kleikamp }
3692ac27a0ecSDave Kleikamp 
3693617ba13bSMingming Cao void ext4_set_inode_flags(struct inode *inode)
3694ac27a0ecSDave Kleikamp {
3695617ba13bSMingming Cao 	unsigned int flags = EXT4_I(inode)->i_flags;
3696ac27a0ecSDave Kleikamp 
3697ac27a0ecSDave Kleikamp 	inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
3698617ba13bSMingming Cao 	if (flags & EXT4_SYNC_FL)
3699ac27a0ecSDave Kleikamp 		inode->i_flags |= S_SYNC;
3700617ba13bSMingming Cao 	if (flags & EXT4_APPEND_FL)
3701ac27a0ecSDave Kleikamp 		inode->i_flags |= S_APPEND;
3702617ba13bSMingming Cao 	if (flags & EXT4_IMMUTABLE_FL)
3703ac27a0ecSDave Kleikamp 		inode->i_flags |= S_IMMUTABLE;
3704617ba13bSMingming Cao 	if (flags & EXT4_NOATIME_FL)
3705ac27a0ecSDave Kleikamp 		inode->i_flags |= S_NOATIME;
3706617ba13bSMingming Cao 	if (flags & EXT4_DIRSYNC_FL)
3707ac27a0ecSDave Kleikamp 		inode->i_flags |= S_DIRSYNC;
3708ac27a0ecSDave Kleikamp }
3709ac27a0ecSDave Kleikamp 
3710ff9ddf7eSJan Kara /* Propagate flags from i_flags to EXT4_I(inode)->i_flags */
3711ff9ddf7eSJan Kara void ext4_get_inode_flags(struct ext4_inode_info *ei)
3712ff9ddf7eSJan Kara {
371384a8dce2SDmitry Monakhov 	unsigned int vfs_fl;
371484a8dce2SDmitry Monakhov 	unsigned long old_fl, new_fl;
3715ff9ddf7eSJan Kara 
371684a8dce2SDmitry Monakhov 	do {
371784a8dce2SDmitry Monakhov 		vfs_fl = ei->vfs_inode.i_flags;
371884a8dce2SDmitry Monakhov 		old_fl = ei->i_flags;
371984a8dce2SDmitry Monakhov 		new_fl = old_fl & ~(EXT4_SYNC_FL|EXT4_APPEND_FL|
372084a8dce2SDmitry Monakhov 				EXT4_IMMUTABLE_FL|EXT4_NOATIME_FL|
372184a8dce2SDmitry Monakhov 				EXT4_DIRSYNC_FL);
372284a8dce2SDmitry Monakhov 		if (vfs_fl & S_SYNC)
372384a8dce2SDmitry Monakhov 			new_fl |= EXT4_SYNC_FL;
372484a8dce2SDmitry Monakhov 		if (vfs_fl & S_APPEND)
372584a8dce2SDmitry Monakhov 			new_fl |= EXT4_APPEND_FL;
372684a8dce2SDmitry Monakhov 		if (vfs_fl & S_IMMUTABLE)
372784a8dce2SDmitry Monakhov 			new_fl |= EXT4_IMMUTABLE_FL;
372884a8dce2SDmitry Monakhov 		if (vfs_fl & S_NOATIME)
372984a8dce2SDmitry Monakhov 			new_fl |= EXT4_NOATIME_FL;
373084a8dce2SDmitry Monakhov 		if (vfs_fl & S_DIRSYNC)
373184a8dce2SDmitry Monakhov 			new_fl |= EXT4_DIRSYNC_FL;
373284a8dce2SDmitry Monakhov 	} while (cmpxchg(&ei->i_flags, old_fl, new_fl) != old_fl);
3733ff9ddf7eSJan Kara }
3734de9a55b8STheodore Ts'o 
37350fc1b451SAneesh Kumar K.V static blkcnt_t ext4_inode_blocks(struct ext4_inode *raw_inode,
37360fc1b451SAneesh Kumar K.V 				  struct ext4_inode_info *ei)
37370fc1b451SAneesh Kumar K.V {
37380fc1b451SAneesh Kumar K.V 	blkcnt_t i_blocks ;
37398180a562SAneesh Kumar K.V 	struct inode *inode = &(ei->vfs_inode);
37408180a562SAneesh Kumar K.V 	struct super_block *sb = inode->i_sb;
37410fc1b451SAneesh Kumar K.V 
37420fc1b451SAneesh Kumar K.V 	if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
37430fc1b451SAneesh Kumar K.V 				EXT4_FEATURE_RO_COMPAT_HUGE_FILE)) {
37440fc1b451SAneesh Kumar K.V 		/* we are using combined 48 bit field */
37450fc1b451SAneesh Kumar K.V 		i_blocks = ((u64)le16_to_cpu(raw_inode->i_blocks_high)) << 32 |
37460fc1b451SAneesh Kumar K.V 					le32_to_cpu(raw_inode->i_blocks_lo);
374707a03824STheodore Ts'o 		if (ext4_test_inode_flag(inode, EXT4_INODE_HUGE_FILE)) {
37488180a562SAneesh Kumar K.V 			/* i_blocks represent file system block size */
37498180a562SAneesh Kumar K.V 			return i_blocks  << (inode->i_blkbits - 9);
37508180a562SAneesh Kumar K.V 		} else {
37510fc1b451SAneesh Kumar K.V 			return i_blocks;
37528180a562SAneesh Kumar K.V 		}
37530fc1b451SAneesh Kumar K.V 	} else {
37540fc1b451SAneesh Kumar K.V 		return le32_to_cpu(raw_inode->i_blocks_lo);
37550fc1b451SAneesh Kumar K.V 	}
37560fc1b451SAneesh Kumar K.V }
3757ff9ddf7eSJan Kara 
37581d1fe1eeSDavid Howells struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
3759ac27a0ecSDave Kleikamp {
3760617ba13bSMingming Cao 	struct ext4_iloc iloc;
3761617ba13bSMingming Cao 	struct ext4_inode *raw_inode;
37621d1fe1eeSDavid Howells 	struct ext4_inode_info *ei;
37631d1fe1eeSDavid Howells 	struct inode *inode;
3764b436b9beSJan Kara 	journal_t *journal = EXT4_SB(sb)->s_journal;
37651d1fe1eeSDavid Howells 	long ret;
3766ac27a0ecSDave Kleikamp 	int block;
376708cefc7aSEric W. Biederman 	uid_t i_uid;
376808cefc7aSEric W. Biederman 	gid_t i_gid;
3769ac27a0ecSDave Kleikamp 
37701d1fe1eeSDavid Howells 	inode = iget_locked(sb, ino);
37711d1fe1eeSDavid Howells 	if (!inode)
37721d1fe1eeSDavid Howells 		return ERR_PTR(-ENOMEM);
37731d1fe1eeSDavid Howells 	if (!(inode->i_state & I_NEW))
37741d1fe1eeSDavid Howells 		return inode;
37751d1fe1eeSDavid Howells 
37761d1fe1eeSDavid Howells 	ei = EXT4_I(inode);
37777dc57615SPeter Huewe 	iloc.bh = NULL;
3778ac27a0ecSDave Kleikamp 
37791d1fe1eeSDavid Howells 	ret = __ext4_get_inode_loc(inode, &iloc, 0);
37801d1fe1eeSDavid Howells 	if (ret < 0)
3781ac27a0ecSDave Kleikamp 		goto bad_inode;
3782617ba13bSMingming Cao 	raw_inode = ext4_raw_inode(&iloc);
3783814525f4SDarrick J. Wong 
3784814525f4SDarrick J. Wong 	if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
3785814525f4SDarrick J. Wong 		ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize);
3786814525f4SDarrick J. Wong 		if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize >
3787814525f4SDarrick J. Wong 		    EXT4_INODE_SIZE(inode->i_sb)) {
3788814525f4SDarrick J. Wong 			EXT4_ERROR_INODE(inode, "bad extra_isize (%u != %u)",
3789814525f4SDarrick J. Wong 				EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize,
3790814525f4SDarrick J. Wong 				EXT4_INODE_SIZE(inode->i_sb));
3791814525f4SDarrick J. Wong 			ret = -EIO;
3792814525f4SDarrick J. Wong 			goto bad_inode;
3793814525f4SDarrick J. Wong 		}
3794814525f4SDarrick J. Wong 	} else
3795814525f4SDarrick J. Wong 		ei->i_extra_isize = 0;
3796814525f4SDarrick J. Wong 
3797814525f4SDarrick J. Wong 	/* Precompute checksum seed for inode metadata */
3798814525f4SDarrick J. Wong 	if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
3799814525f4SDarrick J. Wong 			EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) {
3800814525f4SDarrick J. Wong 		struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
3801814525f4SDarrick J. Wong 		__u32 csum;
3802814525f4SDarrick J. Wong 		__le32 inum = cpu_to_le32(inode->i_ino);
3803814525f4SDarrick J. Wong 		__le32 gen = raw_inode->i_generation;
3804814525f4SDarrick J. Wong 		csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&inum,
3805814525f4SDarrick J. Wong 				   sizeof(inum));
3806814525f4SDarrick J. Wong 		ei->i_csum_seed = ext4_chksum(sbi, csum, (__u8 *)&gen,
3807814525f4SDarrick J. Wong 					      sizeof(gen));
3808814525f4SDarrick J. Wong 	}
3809814525f4SDarrick J. Wong 
3810814525f4SDarrick J. Wong 	if (!ext4_inode_csum_verify(inode, raw_inode, ei)) {
3811814525f4SDarrick J. Wong 		EXT4_ERROR_INODE(inode, "checksum invalid");
3812814525f4SDarrick J. Wong 		ret = -EIO;
3813814525f4SDarrick J. Wong 		goto bad_inode;
3814814525f4SDarrick J. Wong 	}
3815814525f4SDarrick J. Wong 
3816ac27a0ecSDave Kleikamp 	inode->i_mode = le16_to_cpu(raw_inode->i_mode);
381708cefc7aSEric W. Biederman 	i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
381808cefc7aSEric W. Biederman 	i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
3819ac27a0ecSDave Kleikamp 	if (!(test_opt(inode->i_sb, NO_UID32))) {
382008cefc7aSEric W. Biederman 		i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
382108cefc7aSEric W. Biederman 		i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
3822ac27a0ecSDave Kleikamp 	}
382308cefc7aSEric W. Biederman 	i_uid_write(inode, i_uid);
382408cefc7aSEric W. Biederman 	i_gid_write(inode, i_gid);
3825bfe86848SMiklos Szeredi 	set_nlink(inode, le16_to_cpu(raw_inode->i_links_count));
3826ac27a0ecSDave Kleikamp 
3827353eb83cSTheodore Ts'o 	ext4_clear_state_flags(ei);	/* Only relevant on 32-bit archs */
3828ac27a0ecSDave Kleikamp 	ei->i_dir_start_lookup = 0;
3829ac27a0ecSDave Kleikamp 	ei->i_dtime = le32_to_cpu(raw_inode->i_dtime);
3830ac27a0ecSDave Kleikamp 	/* We now have enough fields to check if the inode was active or not.
3831ac27a0ecSDave Kleikamp 	 * This is needed because nfsd might try to access dead inodes
3832ac27a0ecSDave Kleikamp 	 * the test is that same one that e2fsck uses
3833ac27a0ecSDave Kleikamp 	 * NeilBrown 1999oct15
3834ac27a0ecSDave Kleikamp 	 */
3835ac27a0ecSDave Kleikamp 	if (inode->i_nlink == 0) {
3836ac27a0ecSDave Kleikamp 		if (inode->i_mode == 0 ||
3837617ba13bSMingming Cao 		    !(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS)) {
3838ac27a0ecSDave Kleikamp 			/* this inode is deleted */
38391d1fe1eeSDavid Howells 			ret = -ESTALE;
3840ac27a0ecSDave Kleikamp 			goto bad_inode;
3841ac27a0ecSDave Kleikamp 		}
3842ac27a0ecSDave Kleikamp 		/* The only unlinked inodes we let through here have
3843ac27a0ecSDave Kleikamp 		 * valid i_mode and are being read by the orphan
3844ac27a0ecSDave Kleikamp 		 * recovery code: that's fine, we're about to complete
3845ac27a0ecSDave Kleikamp 		 * the process of deleting those. */
3846ac27a0ecSDave Kleikamp 	}
3847ac27a0ecSDave Kleikamp 	ei->i_flags = le32_to_cpu(raw_inode->i_flags);
38480fc1b451SAneesh Kumar K.V 	inode->i_blocks = ext4_inode_blocks(raw_inode, ei);
38497973c0c1SAneesh Kumar K.V 	ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl_lo);
3850a9e81742STheodore Ts'o 	if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_64BIT))
3851a1ddeb7eSBadari Pulavarty 		ei->i_file_acl |=
3852a1ddeb7eSBadari Pulavarty 			((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32;
3853a48380f7SAneesh Kumar K.V 	inode->i_size = ext4_isize(raw_inode);
3854ac27a0ecSDave Kleikamp 	ei->i_disksize = inode->i_size;
3855a9e7f447SDmitry Monakhov #ifdef CONFIG_QUOTA
3856a9e7f447SDmitry Monakhov 	ei->i_reserved_quota = 0;
3857a9e7f447SDmitry Monakhov #endif
3858ac27a0ecSDave Kleikamp 	inode->i_generation = le32_to_cpu(raw_inode->i_generation);
3859ac27a0ecSDave Kleikamp 	ei->i_block_group = iloc.block_group;
3860a4912123STheodore Ts'o 	ei->i_last_alloc_group = ~0;
3861ac27a0ecSDave Kleikamp 	/*
3862ac27a0ecSDave Kleikamp 	 * NOTE! The in-memory inode i_data array is in little-endian order
3863ac27a0ecSDave Kleikamp 	 * even on big-endian machines: we do NOT byteswap the block numbers!
3864ac27a0ecSDave Kleikamp 	 */
3865617ba13bSMingming Cao 	for (block = 0; block < EXT4_N_BLOCKS; block++)
3866ac27a0ecSDave Kleikamp 		ei->i_data[block] = raw_inode->i_block[block];
3867ac27a0ecSDave Kleikamp 	INIT_LIST_HEAD(&ei->i_orphan);
3868ac27a0ecSDave Kleikamp 
3869b436b9beSJan Kara 	/*
3870b436b9beSJan Kara 	 * Set transaction id's of transactions that have to be committed
3871b436b9beSJan Kara 	 * to finish f[data]sync. We set them to currently running transaction
3872b436b9beSJan Kara 	 * as we cannot be sure that the inode or some of its metadata isn't
3873b436b9beSJan Kara 	 * part of the transaction - the inode could have been reclaimed and
3874b436b9beSJan Kara 	 * now it is reread from disk.
3875b436b9beSJan Kara 	 */
3876b436b9beSJan Kara 	if (journal) {
3877b436b9beSJan Kara 		transaction_t *transaction;
3878b436b9beSJan Kara 		tid_t tid;
3879b436b9beSJan Kara 
3880a931da6aSTheodore Ts'o 		read_lock(&journal->j_state_lock);
3881b436b9beSJan Kara 		if (journal->j_running_transaction)
3882b436b9beSJan Kara 			transaction = journal->j_running_transaction;
3883b436b9beSJan Kara 		else
3884b436b9beSJan Kara 			transaction = journal->j_committing_transaction;
3885b436b9beSJan Kara 		if (transaction)
3886b436b9beSJan Kara 			tid = transaction->t_tid;
3887b436b9beSJan Kara 		else
3888b436b9beSJan Kara 			tid = journal->j_commit_sequence;
3889a931da6aSTheodore Ts'o 		read_unlock(&journal->j_state_lock);
3890b436b9beSJan Kara 		ei->i_sync_tid = tid;
3891b436b9beSJan Kara 		ei->i_datasync_tid = tid;
3892b436b9beSJan Kara 	}
3893b436b9beSJan Kara 
38940040d987SEric Sandeen 	if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
3895ac27a0ecSDave Kleikamp 		if (ei->i_extra_isize == 0) {
3896ac27a0ecSDave Kleikamp 			/* The extra space is currently unused. Use it. */
3897617ba13bSMingming Cao 			ei->i_extra_isize = sizeof(struct ext4_inode) -
3898617ba13bSMingming Cao 					    EXT4_GOOD_OLD_INODE_SIZE;
3899ac27a0ecSDave Kleikamp 		} else {
3900ac27a0ecSDave Kleikamp 			__le32 *magic = (void *)raw_inode +
3901617ba13bSMingming Cao 					EXT4_GOOD_OLD_INODE_SIZE +
3902ac27a0ecSDave Kleikamp 					ei->i_extra_isize;
3903617ba13bSMingming Cao 			if (*magic == cpu_to_le32(EXT4_XATTR_MAGIC))
390419f5fb7aSTheodore Ts'o 				ext4_set_inode_state(inode, EXT4_STATE_XATTR);
3905ac27a0ecSDave Kleikamp 		}
3906814525f4SDarrick J. Wong 	}
3907ac27a0ecSDave Kleikamp 
3908ef7f3835SKalpak Shah 	EXT4_INODE_GET_XTIME(i_ctime, inode, raw_inode);
3909ef7f3835SKalpak Shah 	EXT4_INODE_GET_XTIME(i_mtime, inode, raw_inode);
3910ef7f3835SKalpak Shah 	EXT4_INODE_GET_XTIME(i_atime, inode, raw_inode);
3911ef7f3835SKalpak Shah 	EXT4_EINODE_GET_XTIME(i_crtime, ei, raw_inode);
3912ef7f3835SKalpak Shah 
391325ec56b5SJean Noel Cordenner 	inode->i_version = le32_to_cpu(raw_inode->i_disk_version);
391425ec56b5SJean Noel Cordenner 	if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
391525ec56b5SJean Noel Cordenner 		if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
391625ec56b5SJean Noel Cordenner 			inode->i_version |=
391725ec56b5SJean Noel Cordenner 			(__u64)(le32_to_cpu(raw_inode->i_version_hi)) << 32;
391825ec56b5SJean Noel Cordenner 	}
391925ec56b5SJean Noel Cordenner 
3920c4b5a614STheodore Ts'o 	ret = 0;
3921485c26ecSTheodore Ts'o 	if (ei->i_file_acl &&
39221032988cSTheodore Ts'o 	    !ext4_data_block_valid(EXT4_SB(sb), ei->i_file_acl, 1)) {
392324676da4STheodore Ts'o 		EXT4_ERROR_INODE(inode, "bad extended attribute block %llu",
392424676da4STheodore Ts'o 				 ei->i_file_acl);
3925485c26ecSTheodore Ts'o 		ret = -EIO;
3926485c26ecSTheodore Ts'o 		goto bad_inode;
392707a03824STheodore Ts'o 	} else if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
3928c4b5a614STheodore Ts'o 		if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
3929c4b5a614STheodore Ts'o 		    (S_ISLNK(inode->i_mode) &&
3930c4b5a614STheodore Ts'o 		     !ext4_inode_is_fast_symlink(inode)))
39317a262f7cSAneesh Kumar K.V 			/* Validate extent which is part of inode */
39327a262f7cSAneesh Kumar K.V 			ret = ext4_ext_check_inode(inode);
3933fe2c8191SThiemo Nagel 	} else if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
3934fe2c8191SThiemo Nagel 		   (S_ISLNK(inode->i_mode) &&
3935fe2c8191SThiemo Nagel 		    !ext4_inode_is_fast_symlink(inode))) {
3936fe2c8191SThiemo Nagel 		/* Validate block references which are part of inode */
39371f7d1e77STheodore Ts'o 		ret = ext4_ind_check_inode(inode);
3938fe2c8191SThiemo Nagel 	}
3939567f3e9aSTheodore Ts'o 	if (ret)
39407a262f7cSAneesh Kumar K.V 		goto bad_inode;
39417a262f7cSAneesh Kumar K.V 
3942ac27a0ecSDave Kleikamp 	if (S_ISREG(inode->i_mode)) {
3943617ba13bSMingming Cao 		inode->i_op = &ext4_file_inode_operations;
3944617ba13bSMingming Cao 		inode->i_fop = &ext4_file_operations;
3945617ba13bSMingming Cao 		ext4_set_aops(inode);
3946ac27a0ecSDave Kleikamp 	} else if (S_ISDIR(inode->i_mode)) {
3947617ba13bSMingming Cao 		inode->i_op = &ext4_dir_inode_operations;
3948617ba13bSMingming Cao 		inode->i_fop = &ext4_dir_operations;
3949ac27a0ecSDave Kleikamp 	} else if (S_ISLNK(inode->i_mode)) {
3950e83c1397SDuane Griffin 		if (ext4_inode_is_fast_symlink(inode)) {
3951617ba13bSMingming Cao 			inode->i_op = &ext4_fast_symlink_inode_operations;
3952e83c1397SDuane Griffin 			nd_terminate_link(ei->i_data, inode->i_size,
3953e83c1397SDuane Griffin 				sizeof(ei->i_data) - 1);
3954e83c1397SDuane Griffin 		} else {
3955617ba13bSMingming Cao 			inode->i_op = &ext4_symlink_inode_operations;
3956617ba13bSMingming Cao 			ext4_set_aops(inode);
3957ac27a0ecSDave Kleikamp 		}
3958563bdd61STheodore Ts'o 	} else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
3959563bdd61STheodore Ts'o 	      S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
3960617ba13bSMingming Cao 		inode->i_op = &ext4_special_inode_operations;
3961ac27a0ecSDave Kleikamp 		if (raw_inode->i_block[0])
3962ac27a0ecSDave Kleikamp 			init_special_inode(inode, inode->i_mode,
3963ac27a0ecSDave Kleikamp 			   old_decode_dev(le32_to_cpu(raw_inode->i_block[0])));
3964ac27a0ecSDave Kleikamp 		else
3965ac27a0ecSDave Kleikamp 			init_special_inode(inode, inode->i_mode,
3966ac27a0ecSDave Kleikamp 			   new_decode_dev(le32_to_cpu(raw_inode->i_block[1])));
3967563bdd61STheodore Ts'o 	} else {
3968563bdd61STheodore Ts'o 		ret = -EIO;
396924676da4STheodore Ts'o 		EXT4_ERROR_INODE(inode, "bogus i_mode (%o)", inode->i_mode);
3970563bdd61STheodore Ts'o 		goto bad_inode;
3971ac27a0ecSDave Kleikamp 	}
3972ac27a0ecSDave Kleikamp 	brelse(iloc.bh);
3973617ba13bSMingming Cao 	ext4_set_inode_flags(inode);
39741d1fe1eeSDavid Howells 	unlock_new_inode(inode);
39751d1fe1eeSDavid Howells 	return inode;
3976ac27a0ecSDave Kleikamp 
3977ac27a0ecSDave Kleikamp bad_inode:
3978567f3e9aSTheodore Ts'o 	brelse(iloc.bh);
39791d1fe1eeSDavid Howells 	iget_failed(inode);
39801d1fe1eeSDavid Howells 	return ERR_PTR(ret);
3981ac27a0ecSDave Kleikamp }
3982ac27a0ecSDave Kleikamp 
39830fc1b451SAneesh Kumar K.V static int ext4_inode_blocks_set(handle_t *handle,
39840fc1b451SAneesh Kumar K.V 				struct ext4_inode *raw_inode,
39850fc1b451SAneesh Kumar K.V 				struct ext4_inode_info *ei)
39860fc1b451SAneesh Kumar K.V {
39870fc1b451SAneesh Kumar K.V 	struct inode *inode = &(ei->vfs_inode);
39880fc1b451SAneesh Kumar K.V 	u64 i_blocks = inode->i_blocks;
39890fc1b451SAneesh Kumar K.V 	struct super_block *sb = inode->i_sb;
39900fc1b451SAneesh Kumar K.V 
39910fc1b451SAneesh Kumar K.V 	if (i_blocks <= ~0U) {
39920fc1b451SAneesh Kumar K.V 		/*
39930fc1b451SAneesh Kumar K.V 		 * i_blocks can be represnted in a 32 bit variable
39940fc1b451SAneesh Kumar K.V 		 * as multiple of 512 bytes
39950fc1b451SAneesh Kumar K.V 		 */
39968180a562SAneesh Kumar K.V 		raw_inode->i_blocks_lo   = cpu_to_le32(i_blocks);
39970fc1b451SAneesh Kumar K.V 		raw_inode->i_blocks_high = 0;
399884a8dce2SDmitry Monakhov 		ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE);
3999f287a1a5STheodore Ts'o 		return 0;
4000f287a1a5STheodore Ts'o 	}
4001f287a1a5STheodore Ts'o 	if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_HUGE_FILE))
4002f287a1a5STheodore Ts'o 		return -EFBIG;
4003f287a1a5STheodore Ts'o 
4004f287a1a5STheodore Ts'o 	if (i_blocks <= 0xffffffffffffULL) {
40050fc1b451SAneesh Kumar K.V 		/*
40060fc1b451SAneesh Kumar K.V 		 * i_blocks can be represented in a 48 bit variable
40070fc1b451SAneesh Kumar K.V 		 * as multiple of 512 bytes
40080fc1b451SAneesh Kumar K.V 		 */
40098180a562SAneesh Kumar K.V 		raw_inode->i_blocks_lo   = cpu_to_le32(i_blocks);
40100fc1b451SAneesh Kumar K.V 		raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32);
401184a8dce2SDmitry Monakhov 		ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE);
40120fc1b451SAneesh Kumar K.V 	} else {
401384a8dce2SDmitry Monakhov 		ext4_set_inode_flag(inode, EXT4_INODE_HUGE_FILE);
40148180a562SAneesh Kumar K.V 		/* i_block is stored in file system block size */
40158180a562SAneesh Kumar K.V 		i_blocks = i_blocks >> (inode->i_blkbits - 9);
40168180a562SAneesh Kumar K.V 		raw_inode->i_blocks_lo   = cpu_to_le32(i_blocks);
40178180a562SAneesh Kumar K.V 		raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32);
40180fc1b451SAneesh Kumar K.V 	}
4019f287a1a5STheodore Ts'o 	return 0;
40200fc1b451SAneesh Kumar K.V }
40210fc1b451SAneesh Kumar K.V 
4022ac27a0ecSDave Kleikamp /*
4023ac27a0ecSDave Kleikamp  * Post the struct inode info into an on-disk inode location in the
4024ac27a0ecSDave Kleikamp  * buffer-cache.  This gobbles the caller's reference to the
4025ac27a0ecSDave Kleikamp  * buffer_head in the inode location struct.
4026ac27a0ecSDave Kleikamp  *
4027ac27a0ecSDave Kleikamp  * The caller must have write access to iloc->bh.
4028ac27a0ecSDave Kleikamp  */
4029617ba13bSMingming Cao static int ext4_do_update_inode(handle_t *handle,
4030ac27a0ecSDave Kleikamp 				struct inode *inode,
4031830156c7SFrank Mayhar 				struct ext4_iloc *iloc)
4032ac27a0ecSDave Kleikamp {
4033617ba13bSMingming Cao 	struct ext4_inode *raw_inode = ext4_raw_inode(iloc);
4034617ba13bSMingming Cao 	struct ext4_inode_info *ei = EXT4_I(inode);
4035ac27a0ecSDave Kleikamp 	struct buffer_head *bh = iloc->bh;
4036ac27a0ecSDave Kleikamp 	int err = 0, rc, block;
403708cefc7aSEric W. Biederman 	uid_t i_uid;
403808cefc7aSEric W. Biederman 	gid_t i_gid;
4039ac27a0ecSDave Kleikamp 
4040ac27a0ecSDave Kleikamp 	/* For fields not not tracking in the in-memory inode,
4041ac27a0ecSDave Kleikamp 	 * initialise them to zero for new inodes. */
404219f5fb7aSTheodore Ts'o 	if (ext4_test_inode_state(inode, EXT4_STATE_NEW))
4043617ba13bSMingming Cao 		memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size);
4044ac27a0ecSDave Kleikamp 
4045ff9ddf7eSJan Kara 	ext4_get_inode_flags(ei);
4046ac27a0ecSDave Kleikamp 	raw_inode->i_mode = cpu_to_le16(inode->i_mode);
404708cefc7aSEric W. Biederman 	i_uid = i_uid_read(inode);
404808cefc7aSEric W. Biederman 	i_gid = i_gid_read(inode);
4049ac27a0ecSDave Kleikamp 	if (!(test_opt(inode->i_sb, NO_UID32))) {
405008cefc7aSEric W. Biederman 		raw_inode->i_uid_low = cpu_to_le16(low_16_bits(i_uid));
405108cefc7aSEric W. Biederman 		raw_inode->i_gid_low = cpu_to_le16(low_16_bits(i_gid));
4052ac27a0ecSDave Kleikamp /*
4053ac27a0ecSDave Kleikamp  * Fix up interoperability with old kernels. Otherwise, old inodes get
4054ac27a0ecSDave Kleikamp  * re-used with the upper 16 bits of the uid/gid intact
4055ac27a0ecSDave Kleikamp  */
4056ac27a0ecSDave Kleikamp 		if (!ei->i_dtime) {
4057ac27a0ecSDave Kleikamp 			raw_inode->i_uid_high =
405808cefc7aSEric W. Biederman 				cpu_to_le16(high_16_bits(i_uid));
4059ac27a0ecSDave Kleikamp 			raw_inode->i_gid_high =
406008cefc7aSEric W. Biederman 				cpu_to_le16(high_16_bits(i_gid));
4061ac27a0ecSDave Kleikamp 		} else {
4062ac27a0ecSDave Kleikamp 			raw_inode->i_uid_high = 0;
4063ac27a0ecSDave Kleikamp 			raw_inode->i_gid_high = 0;
4064ac27a0ecSDave Kleikamp 		}
4065ac27a0ecSDave Kleikamp 	} else {
406608cefc7aSEric W. Biederman 		raw_inode->i_uid_low = cpu_to_le16(fs_high2lowuid(i_uid));
406708cefc7aSEric W. Biederman 		raw_inode->i_gid_low = cpu_to_le16(fs_high2lowgid(i_gid));
4068ac27a0ecSDave Kleikamp 		raw_inode->i_uid_high = 0;
4069ac27a0ecSDave Kleikamp 		raw_inode->i_gid_high = 0;
4070ac27a0ecSDave Kleikamp 	}
4071ac27a0ecSDave Kleikamp 	raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
4072ef7f3835SKalpak Shah 
4073ef7f3835SKalpak Shah 	EXT4_INODE_SET_XTIME(i_ctime, inode, raw_inode);
4074ef7f3835SKalpak Shah 	EXT4_INODE_SET_XTIME(i_mtime, inode, raw_inode);
4075ef7f3835SKalpak Shah 	EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode);
4076ef7f3835SKalpak Shah 	EXT4_EINODE_SET_XTIME(i_crtime, ei, raw_inode);
4077ef7f3835SKalpak Shah 
40780fc1b451SAneesh Kumar K.V 	if (ext4_inode_blocks_set(handle, raw_inode, ei))
40790fc1b451SAneesh Kumar K.V 		goto out_brelse;
4080ac27a0ecSDave Kleikamp 	raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
4081353eb83cSTheodore Ts'o 	raw_inode->i_flags = cpu_to_le32(ei->i_flags & 0xFFFFFFFF);
40829b8f1f01SMingming Cao 	if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
40839b8f1f01SMingming Cao 	    cpu_to_le32(EXT4_OS_HURD))
4084a1ddeb7eSBadari Pulavarty 		raw_inode->i_file_acl_high =
4085a1ddeb7eSBadari Pulavarty 			cpu_to_le16(ei->i_file_acl >> 32);
40867973c0c1SAneesh Kumar K.V 	raw_inode->i_file_acl_lo = cpu_to_le32(ei->i_file_acl);
4087a48380f7SAneesh Kumar K.V 	ext4_isize_set(raw_inode, ei->i_disksize);
4088ac27a0ecSDave Kleikamp 	if (ei->i_disksize > 0x7fffffffULL) {
4089ac27a0ecSDave Kleikamp 		struct super_block *sb = inode->i_sb;
4090617ba13bSMingming Cao 		if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
4091617ba13bSMingming Cao 				EXT4_FEATURE_RO_COMPAT_LARGE_FILE) ||
4092617ba13bSMingming Cao 				EXT4_SB(sb)->s_es->s_rev_level ==
4093617ba13bSMingming Cao 				cpu_to_le32(EXT4_GOOD_OLD_REV)) {
4094ac27a0ecSDave Kleikamp 			/* If this is the first large file
4095ac27a0ecSDave Kleikamp 			 * created, add a flag to the superblock.
4096ac27a0ecSDave Kleikamp 			 */
4097617ba13bSMingming Cao 			err = ext4_journal_get_write_access(handle,
4098617ba13bSMingming Cao 					EXT4_SB(sb)->s_sbh);
4099ac27a0ecSDave Kleikamp 			if (err)
4100ac27a0ecSDave Kleikamp 				goto out_brelse;
4101617ba13bSMingming Cao 			ext4_update_dynamic_rev(sb);
4102617ba13bSMingming Cao 			EXT4_SET_RO_COMPAT_FEATURE(sb,
4103617ba13bSMingming Cao 					EXT4_FEATURE_RO_COMPAT_LARGE_FILE);
41040390131bSFrank Mayhar 			ext4_handle_sync(handle);
4105b50924c2SArtem Bityutskiy 			err = ext4_handle_dirty_super(handle, sb);
4106ac27a0ecSDave Kleikamp 		}
4107ac27a0ecSDave Kleikamp 	}
4108ac27a0ecSDave Kleikamp 	raw_inode->i_generation = cpu_to_le32(inode->i_generation);
4109ac27a0ecSDave Kleikamp 	if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
4110ac27a0ecSDave Kleikamp 		if (old_valid_dev(inode->i_rdev)) {
4111ac27a0ecSDave Kleikamp 			raw_inode->i_block[0] =
4112ac27a0ecSDave Kleikamp 				cpu_to_le32(old_encode_dev(inode->i_rdev));
4113ac27a0ecSDave Kleikamp 			raw_inode->i_block[1] = 0;
4114ac27a0ecSDave Kleikamp 		} else {
4115ac27a0ecSDave Kleikamp 			raw_inode->i_block[0] = 0;
4116ac27a0ecSDave Kleikamp 			raw_inode->i_block[1] =
4117ac27a0ecSDave Kleikamp 				cpu_to_le32(new_encode_dev(inode->i_rdev));
4118ac27a0ecSDave Kleikamp 			raw_inode->i_block[2] = 0;
4119ac27a0ecSDave Kleikamp 		}
4120de9a55b8STheodore Ts'o 	} else
4121de9a55b8STheodore Ts'o 		for (block = 0; block < EXT4_N_BLOCKS; block++)
4122ac27a0ecSDave Kleikamp 			raw_inode->i_block[block] = ei->i_data[block];
4123ac27a0ecSDave Kleikamp 
412425ec56b5SJean Noel Cordenner 	raw_inode->i_disk_version = cpu_to_le32(inode->i_version);
412525ec56b5SJean Noel Cordenner 	if (ei->i_extra_isize) {
412625ec56b5SJean Noel Cordenner 		if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
412725ec56b5SJean Noel Cordenner 			raw_inode->i_version_hi =
412825ec56b5SJean Noel Cordenner 			cpu_to_le32(inode->i_version >> 32);
4129ac27a0ecSDave Kleikamp 		raw_inode->i_extra_isize = cpu_to_le16(ei->i_extra_isize);
413025ec56b5SJean Noel Cordenner 	}
413125ec56b5SJean Noel Cordenner 
4132814525f4SDarrick J. Wong 	ext4_inode_csum_set(inode, raw_inode, ei);
4133814525f4SDarrick J. Wong 
41340390131bSFrank Mayhar 	BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
413573b50c1cSCurt Wohlgemuth 	rc = ext4_handle_dirty_metadata(handle, NULL, bh);
4136ac27a0ecSDave Kleikamp 	if (!err)
4137ac27a0ecSDave Kleikamp 		err = rc;
413819f5fb7aSTheodore Ts'o 	ext4_clear_inode_state(inode, EXT4_STATE_NEW);
4139ac27a0ecSDave Kleikamp 
4140b436b9beSJan Kara 	ext4_update_inode_fsync_trans(handle, inode, 0);
4141ac27a0ecSDave Kleikamp out_brelse:
4142ac27a0ecSDave Kleikamp 	brelse(bh);
4143617ba13bSMingming Cao 	ext4_std_error(inode->i_sb, err);
4144ac27a0ecSDave Kleikamp 	return err;
4145ac27a0ecSDave Kleikamp }
4146ac27a0ecSDave Kleikamp 
4147ac27a0ecSDave Kleikamp /*
4148617ba13bSMingming Cao  * ext4_write_inode()
4149ac27a0ecSDave Kleikamp  *
4150ac27a0ecSDave Kleikamp  * We are called from a few places:
4151ac27a0ecSDave Kleikamp  *
4152ac27a0ecSDave Kleikamp  * - Within generic_file_write() for O_SYNC files.
4153ac27a0ecSDave Kleikamp  *   Here, there will be no transaction running. We wait for any running
4154ac27a0ecSDave Kleikamp  *   trasnaction to commit.
4155ac27a0ecSDave Kleikamp  *
4156ac27a0ecSDave Kleikamp  * - Within sys_sync(), kupdate and such.
4157ac27a0ecSDave Kleikamp  *   We wait on commit, if tol to.
4158ac27a0ecSDave Kleikamp  *
4159ac27a0ecSDave Kleikamp  * - Within prune_icache() (PF_MEMALLOC == true)
4160ac27a0ecSDave Kleikamp  *   Here we simply return.  We can't afford to block kswapd on the
4161ac27a0ecSDave Kleikamp  *   journal commit.
4162ac27a0ecSDave Kleikamp  *
4163ac27a0ecSDave Kleikamp  * In all cases it is actually safe for us to return without doing anything,
4164ac27a0ecSDave Kleikamp  * because the inode has been copied into a raw inode buffer in
4165617ba13bSMingming Cao  * ext4_mark_inode_dirty().  This is a correctness thing for O_SYNC and for
4166ac27a0ecSDave Kleikamp  * knfsd.
4167ac27a0ecSDave Kleikamp  *
4168ac27a0ecSDave Kleikamp  * Note that we are absolutely dependent upon all inode dirtiers doing the
4169ac27a0ecSDave Kleikamp  * right thing: they *must* call mark_inode_dirty() after dirtying info in
4170ac27a0ecSDave Kleikamp  * which we are interested.
4171ac27a0ecSDave Kleikamp  *
4172ac27a0ecSDave Kleikamp  * It would be a bug for them to not do this.  The code:
4173ac27a0ecSDave Kleikamp  *
4174ac27a0ecSDave Kleikamp  *	mark_inode_dirty(inode)
4175ac27a0ecSDave Kleikamp  *	stuff();
4176ac27a0ecSDave Kleikamp  *	inode->i_size = expr;
4177ac27a0ecSDave Kleikamp  *
4178ac27a0ecSDave Kleikamp  * is in error because a kswapd-driven write_inode() could occur while
4179ac27a0ecSDave Kleikamp  * `stuff()' is running, and the new i_size will be lost.  Plus the inode
4180ac27a0ecSDave Kleikamp  * will no longer be on the superblock's dirty inode list.
4181ac27a0ecSDave Kleikamp  */
4182a9185b41SChristoph Hellwig int ext4_write_inode(struct inode *inode, struct writeback_control *wbc)
4183ac27a0ecSDave Kleikamp {
418491ac6f43SFrank Mayhar 	int err;
418591ac6f43SFrank Mayhar 
4186ac27a0ecSDave Kleikamp 	if (current->flags & PF_MEMALLOC)
4187ac27a0ecSDave Kleikamp 		return 0;
4188ac27a0ecSDave Kleikamp 
418991ac6f43SFrank Mayhar 	if (EXT4_SB(inode->i_sb)->s_journal) {
4190617ba13bSMingming Cao 		if (ext4_journal_current_handle()) {
4191b38bd33aSMingming Cao 			jbd_debug(1, "called recursively, non-PF_MEMALLOC!\n");
4192ac27a0ecSDave Kleikamp 			dump_stack();
4193ac27a0ecSDave Kleikamp 			return -EIO;
4194ac27a0ecSDave Kleikamp 		}
4195ac27a0ecSDave Kleikamp 
4196a9185b41SChristoph Hellwig 		if (wbc->sync_mode != WB_SYNC_ALL)
4197ac27a0ecSDave Kleikamp 			return 0;
4198ac27a0ecSDave Kleikamp 
419991ac6f43SFrank Mayhar 		err = ext4_force_commit(inode->i_sb);
420091ac6f43SFrank Mayhar 	} else {
420191ac6f43SFrank Mayhar 		struct ext4_iloc iloc;
420291ac6f43SFrank Mayhar 
42038b472d73SCurt Wohlgemuth 		err = __ext4_get_inode_loc(inode, &iloc, 0);
420491ac6f43SFrank Mayhar 		if (err)
420591ac6f43SFrank Mayhar 			return err;
4206a9185b41SChristoph Hellwig 		if (wbc->sync_mode == WB_SYNC_ALL)
4207830156c7SFrank Mayhar 			sync_dirty_buffer(iloc.bh);
4208830156c7SFrank Mayhar 		if (buffer_req(iloc.bh) && !buffer_uptodate(iloc.bh)) {
4209c398eda0STheodore Ts'o 			EXT4_ERROR_INODE_BLOCK(inode, iloc.bh->b_blocknr,
4210c398eda0STheodore Ts'o 					 "IO error syncing inode");
4211830156c7SFrank Mayhar 			err = -EIO;
4212830156c7SFrank Mayhar 		}
4213fd2dd9fbSCurt Wohlgemuth 		brelse(iloc.bh);
421491ac6f43SFrank Mayhar 	}
421591ac6f43SFrank Mayhar 	return err;
4216ac27a0ecSDave Kleikamp }
4217ac27a0ecSDave Kleikamp 
4218ac27a0ecSDave Kleikamp /*
4219617ba13bSMingming Cao  * ext4_setattr()
4220ac27a0ecSDave Kleikamp  *
4221ac27a0ecSDave Kleikamp  * Called from notify_change.
4222ac27a0ecSDave Kleikamp  *
4223ac27a0ecSDave Kleikamp  * We want to trap VFS attempts to truncate the file as soon as
4224ac27a0ecSDave Kleikamp  * possible.  In particular, we want to make sure that when the VFS
4225ac27a0ecSDave Kleikamp  * shrinks i_size, we put the inode on the orphan list and modify
4226ac27a0ecSDave Kleikamp  * i_disksize immediately, so that during the subsequent flushing of
4227ac27a0ecSDave Kleikamp  * dirty pages and freeing of disk blocks, we can guarantee that any
4228ac27a0ecSDave Kleikamp  * commit will leave the blocks being flushed in an unused state on
4229ac27a0ecSDave Kleikamp  * disk.  (On recovery, the inode will get truncated and the blocks will
4230ac27a0ecSDave Kleikamp  * be freed, so we have a strong guarantee that no future commit will
4231ac27a0ecSDave Kleikamp  * leave these blocks visible to the user.)
4232ac27a0ecSDave Kleikamp  *
4233678aaf48SJan Kara  * Another thing we have to assure is that if we are in ordered mode
4234678aaf48SJan Kara  * and inode is still attached to the committing transaction, we must
4235678aaf48SJan Kara  * we start writeout of all the dirty pages which are being truncated.
4236678aaf48SJan Kara  * This way we are sure that all the data written in the previous
4237678aaf48SJan Kara  * transaction are already on disk (truncate waits for pages under
4238678aaf48SJan Kara  * writeback).
4239678aaf48SJan Kara  *
4240678aaf48SJan Kara  * Called with inode->i_mutex down.
4241ac27a0ecSDave Kleikamp  */
4242617ba13bSMingming Cao int ext4_setattr(struct dentry *dentry, struct iattr *attr)
4243ac27a0ecSDave Kleikamp {
4244ac27a0ecSDave Kleikamp 	struct inode *inode = dentry->d_inode;
4245ac27a0ecSDave Kleikamp 	int error, rc = 0;
42463d287de3SDmitry Monakhov 	int orphan = 0;
4247ac27a0ecSDave Kleikamp 	const unsigned int ia_valid = attr->ia_valid;
4248ac27a0ecSDave Kleikamp 
4249ac27a0ecSDave Kleikamp 	error = inode_change_ok(inode, attr);
4250ac27a0ecSDave Kleikamp 	if (error)
4251ac27a0ecSDave Kleikamp 		return error;
4252ac27a0ecSDave Kleikamp 
425312755627SDmitry Monakhov 	if (is_quota_modification(inode, attr))
4254871a2931SChristoph Hellwig 		dquot_initialize(inode);
425508cefc7aSEric W. Biederman 	if ((ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, inode->i_uid)) ||
425608cefc7aSEric W. Biederman 	    (ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid))) {
4257ac27a0ecSDave Kleikamp 		handle_t *handle;
4258ac27a0ecSDave Kleikamp 
4259ac27a0ecSDave Kleikamp 		/* (user+group)*(old+new) structure, inode write (sb,
4260ac27a0ecSDave Kleikamp 		 * inode block, ? - but truncate inode update has it) */
42615aca07ebSDmitry Monakhov 		handle = ext4_journal_start(inode, (EXT4_MAXQUOTAS_INIT_BLOCKS(inode->i_sb)+
4262194074acSDmitry Monakhov 					EXT4_MAXQUOTAS_DEL_BLOCKS(inode->i_sb))+3);
4263ac27a0ecSDave Kleikamp 		if (IS_ERR(handle)) {
4264ac27a0ecSDave Kleikamp 			error = PTR_ERR(handle);
4265ac27a0ecSDave Kleikamp 			goto err_out;
4266ac27a0ecSDave Kleikamp 		}
4267b43fa828SChristoph Hellwig 		error = dquot_transfer(inode, attr);
4268ac27a0ecSDave Kleikamp 		if (error) {
4269617ba13bSMingming Cao 			ext4_journal_stop(handle);
4270ac27a0ecSDave Kleikamp 			return error;
4271ac27a0ecSDave Kleikamp 		}
4272ac27a0ecSDave Kleikamp 		/* Update corresponding info in inode so that everything is in
4273ac27a0ecSDave Kleikamp 		 * one transaction */
4274ac27a0ecSDave Kleikamp 		if (attr->ia_valid & ATTR_UID)
4275ac27a0ecSDave Kleikamp 			inode->i_uid = attr->ia_uid;
4276ac27a0ecSDave Kleikamp 		if (attr->ia_valid & ATTR_GID)
4277ac27a0ecSDave Kleikamp 			inode->i_gid = attr->ia_gid;
4278617ba13bSMingming Cao 		error = ext4_mark_inode_dirty(handle, inode);
4279617ba13bSMingming Cao 		ext4_journal_stop(handle);
4280ac27a0ecSDave Kleikamp 	}
4281ac27a0ecSDave Kleikamp 
4282e2b46574SEric Sandeen 	if (attr->ia_valid & ATTR_SIZE) {
4283562c72aaSChristoph Hellwig 		inode_dio_wait(inode);
4284562c72aaSChristoph Hellwig 
428512e9b892SDmitry Monakhov 		if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
4286e2b46574SEric Sandeen 			struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
4287e2b46574SEric Sandeen 
42880c095c7fSTheodore Ts'o 			if (attr->ia_size > sbi->s_bitmap_maxbytes)
42890c095c7fSTheodore Ts'o 				return -EFBIG;
4290e2b46574SEric Sandeen 		}
4291e2b46574SEric Sandeen 	}
4292e2b46574SEric Sandeen 
4293ac27a0ecSDave Kleikamp 	if (S_ISREG(inode->i_mode) &&
4294c8d46e41SJiaying Zhang 	    attr->ia_valid & ATTR_SIZE &&
4295072bd7eaSTheodore Ts'o 	    (attr->ia_size < inode->i_size)) {
4296ac27a0ecSDave Kleikamp 		handle_t *handle;
4297ac27a0ecSDave Kleikamp 
4298617ba13bSMingming Cao 		handle = ext4_journal_start(inode, 3);
4299ac27a0ecSDave Kleikamp 		if (IS_ERR(handle)) {
4300ac27a0ecSDave Kleikamp 			error = PTR_ERR(handle);
4301ac27a0ecSDave Kleikamp 			goto err_out;
4302ac27a0ecSDave Kleikamp 		}
43033d287de3SDmitry Monakhov 		if (ext4_handle_valid(handle)) {
4304617ba13bSMingming Cao 			error = ext4_orphan_add(handle, inode);
43053d287de3SDmitry Monakhov 			orphan = 1;
43063d287de3SDmitry Monakhov 		}
4307617ba13bSMingming Cao 		EXT4_I(inode)->i_disksize = attr->ia_size;
4308617ba13bSMingming Cao 		rc = ext4_mark_inode_dirty(handle, inode);
4309ac27a0ecSDave Kleikamp 		if (!error)
4310ac27a0ecSDave Kleikamp 			error = rc;
4311617ba13bSMingming Cao 		ext4_journal_stop(handle);
4312678aaf48SJan Kara 
4313678aaf48SJan Kara 		if (ext4_should_order_data(inode)) {
4314678aaf48SJan Kara 			error = ext4_begin_ordered_truncate(inode,
4315678aaf48SJan Kara 							    attr->ia_size);
4316678aaf48SJan Kara 			if (error) {
4317678aaf48SJan Kara 				/* Do as much error cleanup as possible */
4318678aaf48SJan Kara 				handle = ext4_journal_start(inode, 3);
4319678aaf48SJan Kara 				if (IS_ERR(handle)) {
4320678aaf48SJan Kara 					ext4_orphan_del(NULL, inode);
4321678aaf48SJan Kara 					goto err_out;
4322678aaf48SJan Kara 				}
4323678aaf48SJan Kara 				ext4_orphan_del(handle, inode);
43243d287de3SDmitry Monakhov 				orphan = 0;
4325678aaf48SJan Kara 				ext4_journal_stop(handle);
4326678aaf48SJan Kara 				goto err_out;
4327678aaf48SJan Kara 			}
4328678aaf48SJan Kara 		}
4329ac27a0ecSDave Kleikamp 	}
4330ac27a0ecSDave Kleikamp 
4331072bd7eaSTheodore Ts'o 	if (attr->ia_valid & ATTR_SIZE) {
4332afcff5d8SLukas Czerner 		if (attr->ia_size != i_size_read(inode))
4333072bd7eaSTheodore Ts'o 			truncate_setsize(inode, attr->ia_size);
4334072bd7eaSTheodore Ts'o 		ext4_truncate(inode);
4335072bd7eaSTheodore Ts'o 	}
4336ac27a0ecSDave Kleikamp 
43371025774cSChristoph Hellwig 	if (!rc) {
43381025774cSChristoph Hellwig 		setattr_copy(inode, attr);
43391025774cSChristoph Hellwig 		mark_inode_dirty(inode);
43401025774cSChristoph Hellwig 	}
43411025774cSChristoph Hellwig 
43421025774cSChristoph Hellwig 	/*
43431025774cSChristoph Hellwig 	 * If the call to ext4_truncate failed to get a transaction handle at
43441025774cSChristoph Hellwig 	 * all, we need to clean up the in-core orphan list manually.
43451025774cSChristoph Hellwig 	 */
43463d287de3SDmitry Monakhov 	if (orphan && inode->i_nlink)
4347617ba13bSMingming Cao 		ext4_orphan_del(NULL, inode);
4348ac27a0ecSDave Kleikamp 
4349ac27a0ecSDave Kleikamp 	if (!rc && (ia_valid & ATTR_MODE))
4350617ba13bSMingming Cao 		rc = ext4_acl_chmod(inode);
4351ac27a0ecSDave Kleikamp 
4352ac27a0ecSDave Kleikamp err_out:
4353617ba13bSMingming Cao 	ext4_std_error(inode->i_sb, error);
4354ac27a0ecSDave Kleikamp 	if (!error)
4355ac27a0ecSDave Kleikamp 		error = rc;
4356ac27a0ecSDave Kleikamp 	return error;
4357ac27a0ecSDave Kleikamp }
4358ac27a0ecSDave Kleikamp 
43593e3398a0SMingming Cao int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry,
43603e3398a0SMingming Cao 		 struct kstat *stat)
43613e3398a0SMingming Cao {
43623e3398a0SMingming Cao 	struct inode *inode;
43633e3398a0SMingming Cao 	unsigned long delalloc_blocks;
43643e3398a0SMingming Cao 
43653e3398a0SMingming Cao 	inode = dentry->d_inode;
43663e3398a0SMingming Cao 	generic_fillattr(inode, stat);
43673e3398a0SMingming Cao 
43683e3398a0SMingming Cao 	/*
43693e3398a0SMingming Cao 	 * We can't update i_blocks if the block allocation is delayed
43703e3398a0SMingming Cao 	 * otherwise in the case of system crash before the real block
43713e3398a0SMingming Cao 	 * allocation is done, we will have i_blocks inconsistent with
43723e3398a0SMingming Cao 	 * on-disk file blocks.
43733e3398a0SMingming Cao 	 * We always keep i_blocks updated together with real
43743e3398a0SMingming Cao 	 * allocation. But to not confuse with user, stat
43753e3398a0SMingming Cao 	 * will return the blocks that include the delayed allocation
43763e3398a0SMingming Cao 	 * blocks for this file.
43773e3398a0SMingming Cao 	 */
437896607551STao Ma 	delalloc_blocks = EXT4_C2B(EXT4_SB(inode->i_sb),
437996607551STao Ma 				EXT4_I(inode)->i_reserved_data_blocks);
43803e3398a0SMingming Cao 
43813e3398a0SMingming Cao 	stat->blocks += (delalloc_blocks << inode->i_sb->s_blocksize_bits)>>9;
43823e3398a0SMingming Cao 	return 0;
43833e3398a0SMingming Cao }
4384ac27a0ecSDave Kleikamp 
4385a02908f1SMingming Cao static int ext4_index_trans_blocks(struct inode *inode, int nrblocks, int chunk)
4386a02908f1SMingming Cao {
438712e9b892SDmitry Monakhov 	if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
43888bb2b247SAmir Goldstein 		return ext4_ind_trans_blocks(inode, nrblocks, chunk);
4389ac51d837STheodore Ts'o 	return ext4_ext_index_trans_blocks(inode, nrblocks, chunk);
4390a02908f1SMingming Cao }
4391ac51d837STheodore Ts'o 
4392a02908f1SMingming Cao /*
4393a02908f1SMingming Cao  * Account for index blocks, block groups bitmaps and block group
4394a02908f1SMingming Cao  * descriptor blocks if modify datablocks and index blocks
4395a02908f1SMingming Cao  * worse case, the indexs blocks spread over different block groups
4396a02908f1SMingming Cao  *
4397a02908f1SMingming Cao  * If datablocks are discontiguous, they are possible to spread over
4398af901ca1SAndré Goddard Rosa  * different block groups too. If they are contiuguous, with flexbg,
4399a02908f1SMingming Cao  * they could still across block group boundary.
4400a02908f1SMingming Cao  *
4401a02908f1SMingming Cao  * Also account for superblock, inode, quota and xattr blocks
4402a02908f1SMingming Cao  */
44031f109d5aSTheodore Ts'o static int ext4_meta_trans_blocks(struct inode *inode, int nrblocks, int chunk)
4404a02908f1SMingming Cao {
44058df9675fSTheodore Ts'o 	ext4_group_t groups, ngroups = ext4_get_groups_count(inode->i_sb);
44068df9675fSTheodore Ts'o 	int gdpblocks;
4407a02908f1SMingming Cao 	int idxblocks;
4408a02908f1SMingming Cao 	int ret = 0;
4409a02908f1SMingming Cao 
4410a02908f1SMingming Cao 	/*
4411a02908f1SMingming Cao 	 * How many index blocks need to touch to modify nrblocks?
4412a02908f1SMingming Cao 	 * The "Chunk" flag indicating whether the nrblocks is
4413a02908f1SMingming Cao 	 * physically contiguous on disk
4414a02908f1SMingming Cao 	 *
4415a02908f1SMingming Cao 	 * For Direct IO and fallocate, they calls get_block to allocate
4416a02908f1SMingming Cao 	 * one single extent at a time, so they could set the "Chunk" flag
4417a02908f1SMingming Cao 	 */
4418a02908f1SMingming Cao 	idxblocks = ext4_index_trans_blocks(inode, nrblocks, chunk);
4419a02908f1SMingming Cao 
4420a02908f1SMingming Cao 	ret = idxblocks;
4421a02908f1SMingming Cao 
4422a02908f1SMingming Cao 	/*
4423a02908f1SMingming Cao 	 * Now let's see how many group bitmaps and group descriptors need
4424a02908f1SMingming Cao 	 * to account
4425a02908f1SMingming Cao 	 */
4426a02908f1SMingming Cao 	groups = idxblocks;
4427a02908f1SMingming Cao 	if (chunk)
4428a02908f1SMingming Cao 		groups += 1;
4429ac27a0ecSDave Kleikamp 	else
4430a02908f1SMingming Cao 		groups += nrblocks;
4431ac27a0ecSDave Kleikamp 
4432a02908f1SMingming Cao 	gdpblocks = groups;
44338df9675fSTheodore Ts'o 	if (groups > ngroups)
44348df9675fSTheodore Ts'o 		groups = ngroups;
4435a02908f1SMingming Cao 	if (groups > EXT4_SB(inode->i_sb)->s_gdb_count)
4436a02908f1SMingming Cao 		gdpblocks = EXT4_SB(inode->i_sb)->s_gdb_count;
4437a02908f1SMingming Cao 
4438a02908f1SMingming Cao 	/* bitmaps and block group descriptor blocks */
4439a02908f1SMingming Cao 	ret += groups + gdpblocks;
4440a02908f1SMingming Cao 
4441a02908f1SMingming Cao 	/* Blocks for super block, inode, quota and xattr blocks */
4442a02908f1SMingming Cao 	ret += EXT4_META_TRANS_BLOCKS(inode->i_sb);
4443ac27a0ecSDave Kleikamp 
4444ac27a0ecSDave Kleikamp 	return ret;
4445ac27a0ecSDave Kleikamp }
4446ac27a0ecSDave Kleikamp 
4447ac27a0ecSDave Kleikamp /*
444825985edcSLucas De Marchi  * Calculate the total number of credits to reserve to fit
4449f3bd1f3fSMingming Cao  * the modification of a single pages into a single transaction,
4450f3bd1f3fSMingming Cao  * which may include multiple chunks of block allocations.
4451a02908f1SMingming Cao  *
4452525f4ed8SMingming Cao  * This could be called via ext4_write_begin()
4453a02908f1SMingming Cao  *
4454525f4ed8SMingming Cao  * We need to consider the worse case, when
4455a02908f1SMingming Cao  * one new block per extent.
4456a02908f1SMingming Cao  */
4457a02908f1SMingming Cao int ext4_writepage_trans_blocks(struct inode *inode)
4458a02908f1SMingming Cao {
4459a02908f1SMingming Cao 	int bpp = ext4_journal_blocks_per_page(inode);
4460a02908f1SMingming Cao 	int ret;
4461a02908f1SMingming Cao 
4462a02908f1SMingming Cao 	ret = ext4_meta_trans_blocks(inode, bpp, 0);
4463a02908f1SMingming Cao 
4464a02908f1SMingming Cao 	/* Account for data blocks for journalled mode */
4465a02908f1SMingming Cao 	if (ext4_should_journal_data(inode))
4466a02908f1SMingming Cao 		ret += bpp;
4467a02908f1SMingming Cao 	return ret;
4468a02908f1SMingming Cao }
4469f3bd1f3fSMingming Cao 
4470f3bd1f3fSMingming Cao /*
4471f3bd1f3fSMingming Cao  * Calculate the journal credits for a chunk of data modification.
4472f3bd1f3fSMingming Cao  *
4473f3bd1f3fSMingming Cao  * This is called from DIO, fallocate or whoever calling
447479e83036SEric Sandeen  * ext4_map_blocks() to map/allocate a chunk of contiguous disk blocks.
4475f3bd1f3fSMingming Cao  *
4476f3bd1f3fSMingming Cao  * journal buffers for data blocks are not included here, as DIO
4477f3bd1f3fSMingming Cao  * and fallocate do no need to journal data buffers.
4478f3bd1f3fSMingming Cao  */
4479f3bd1f3fSMingming Cao int ext4_chunk_trans_blocks(struct inode *inode, int nrblocks)
4480f3bd1f3fSMingming Cao {
4481f3bd1f3fSMingming Cao 	return ext4_meta_trans_blocks(inode, nrblocks, 1);
4482f3bd1f3fSMingming Cao }
4483f3bd1f3fSMingming Cao 
4484a02908f1SMingming Cao /*
4485617ba13bSMingming Cao  * The caller must have previously called ext4_reserve_inode_write().
4486ac27a0ecSDave Kleikamp  * Give this, we know that the caller already has write access to iloc->bh.
4487ac27a0ecSDave Kleikamp  */
4488617ba13bSMingming Cao int ext4_mark_iloc_dirty(handle_t *handle,
4489617ba13bSMingming Cao 			 struct inode *inode, struct ext4_iloc *iloc)
4490ac27a0ecSDave Kleikamp {
4491ac27a0ecSDave Kleikamp 	int err = 0;
4492ac27a0ecSDave Kleikamp 
4493c64db50eSTheodore Ts'o 	if (IS_I_VERSION(inode))
449425ec56b5SJean Noel Cordenner 		inode_inc_iversion(inode);
449525ec56b5SJean Noel Cordenner 
4496ac27a0ecSDave Kleikamp 	/* the do_update_inode consumes one bh->b_count */
4497ac27a0ecSDave Kleikamp 	get_bh(iloc->bh);
4498ac27a0ecSDave Kleikamp 
4499dab291afSMingming Cao 	/* ext4_do_update_inode() does jbd2_journal_dirty_metadata */
4500830156c7SFrank Mayhar 	err = ext4_do_update_inode(handle, inode, iloc);
4501ac27a0ecSDave Kleikamp 	put_bh(iloc->bh);
4502ac27a0ecSDave Kleikamp 	return err;
4503ac27a0ecSDave Kleikamp }
4504ac27a0ecSDave Kleikamp 
4505ac27a0ecSDave Kleikamp /*
4506ac27a0ecSDave Kleikamp  * On success, We end up with an outstanding reference count against
4507ac27a0ecSDave Kleikamp  * iloc->bh.  This _must_ be cleaned up later.
4508ac27a0ecSDave Kleikamp  */
4509ac27a0ecSDave Kleikamp 
4510ac27a0ecSDave Kleikamp int
4511617ba13bSMingming Cao ext4_reserve_inode_write(handle_t *handle, struct inode *inode,
4512617ba13bSMingming Cao 			 struct ext4_iloc *iloc)
4513ac27a0ecSDave Kleikamp {
45140390131bSFrank Mayhar 	int err;
45150390131bSFrank Mayhar 
4516617ba13bSMingming Cao 	err = ext4_get_inode_loc(inode, iloc);
4517ac27a0ecSDave Kleikamp 	if (!err) {
4518ac27a0ecSDave Kleikamp 		BUFFER_TRACE(iloc->bh, "get_write_access");
4519617ba13bSMingming Cao 		err = ext4_journal_get_write_access(handle, iloc->bh);
4520ac27a0ecSDave Kleikamp 		if (err) {
4521ac27a0ecSDave Kleikamp 			brelse(iloc->bh);
4522ac27a0ecSDave Kleikamp 			iloc->bh = NULL;
4523ac27a0ecSDave Kleikamp 		}
4524ac27a0ecSDave Kleikamp 	}
4525617ba13bSMingming Cao 	ext4_std_error(inode->i_sb, err);
4526ac27a0ecSDave Kleikamp 	return err;
4527ac27a0ecSDave Kleikamp }
4528ac27a0ecSDave Kleikamp 
4529ac27a0ecSDave Kleikamp /*
45306dd4ee7cSKalpak Shah  * Expand an inode by new_extra_isize bytes.
45316dd4ee7cSKalpak Shah  * Returns 0 on success or negative error number on failure.
45326dd4ee7cSKalpak Shah  */
45331d03ec98SAneesh Kumar K.V static int ext4_expand_extra_isize(struct inode *inode,
45341d03ec98SAneesh Kumar K.V 				   unsigned int new_extra_isize,
45351d03ec98SAneesh Kumar K.V 				   struct ext4_iloc iloc,
45361d03ec98SAneesh Kumar K.V 				   handle_t *handle)
45376dd4ee7cSKalpak Shah {
45386dd4ee7cSKalpak Shah 	struct ext4_inode *raw_inode;
45396dd4ee7cSKalpak Shah 	struct ext4_xattr_ibody_header *header;
45406dd4ee7cSKalpak Shah 
45416dd4ee7cSKalpak Shah 	if (EXT4_I(inode)->i_extra_isize >= new_extra_isize)
45426dd4ee7cSKalpak Shah 		return 0;
45436dd4ee7cSKalpak Shah 
45446dd4ee7cSKalpak Shah 	raw_inode = ext4_raw_inode(&iloc);
45456dd4ee7cSKalpak Shah 
45466dd4ee7cSKalpak Shah 	header = IHDR(inode, raw_inode);
45476dd4ee7cSKalpak Shah 
45486dd4ee7cSKalpak Shah 	/* No extended attributes present */
454919f5fb7aSTheodore Ts'o 	if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR) ||
45506dd4ee7cSKalpak Shah 	    header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) {
45516dd4ee7cSKalpak Shah 		memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE, 0,
45526dd4ee7cSKalpak Shah 			new_extra_isize);
45536dd4ee7cSKalpak Shah 		EXT4_I(inode)->i_extra_isize = new_extra_isize;
45546dd4ee7cSKalpak Shah 		return 0;
45556dd4ee7cSKalpak Shah 	}
45566dd4ee7cSKalpak Shah 
45576dd4ee7cSKalpak Shah 	/* try to expand with EAs present */
45586dd4ee7cSKalpak Shah 	return ext4_expand_extra_isize_ea(inode, new_extra_isize,
45596dd4ee7cSKalpak Shah 					  raw_inode, handle);
45606dd4ee7cSKalpak Shah }
45616dd4ee7cSKalpak Shah 
45626dd4ee7cSKalpak Shah /*
4563ac27a0ecSDave Kleikamp  * What we do here is to mark the in-core inode as clean with respect to inode
4564ac27a0ecSDave Kleikamp  * dirtiness (it may still be data-dirty).
4565ac27a0ecSDave Kleikamp  * This means that the in-core inode may be reaped by prune_icache
4566ac27a0ecSDave Kleikamp  * without having to perform any I/O.  This is a very good thing,
4567ac27a0ecSDave Kleikamp  * because *any* task may call prune_icache - even ones which
4568ac27a0ecSDave Kleikamp  * have a transaction open against a different journal.
4569ac27a0ecSDave Kleikamp  *
4570ac27a0ecSDave Kleikamp  * Is this cheating?  Not really.  Sure, we haven't written the
4571ac27a0ecSDave Kleikamp  * inode out, but prune_icache isn't a user-visible syncing function.
4572ac27a0ecSDave Kleikamp  * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync)
4573ac27a0ecSDave Kleikamp  * we start and wait on commits.
4574ac27a0ecSDave Kleikamp  *
4575ac27a0ecSDave Kleikamp  * Is this efficient/effective?  Well, we're being nice to the system
4576ac27a0ecSDave Kleikamp  * by cleaning up our inodes proactively so they can be reaped
4577ac27a0ecSDave Kleikamp  * without I/O.  But we are potentially leaving up to five seconds'
4578ac27a0ecSDave Kleikamp  * worth of inodes floating about which prune_icache wants us to
4579ac27a0ecSDave Kleikamp  * write out.  One way to fix that would be to get prune_icache()
4580ac27a0ecSDave Kleikamp  * to do a write_super() to free up some memory.  It has the desired
4581ac27a0ecSDave Kleikamp  * effect.
4582ac27a0ecSDave Kleikamp  */
4583617ba13bSMingming Cao int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
4584ac27a0ecSDave Kleikamp {
4585617ba13bSMingming Cao 	struct ext4_iloc iloc;
45866dd4ee7cSKalpak Shah 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
45876dd4ee7cSKalpak Shah 	static unsigned int mnt_count;
45886dd4ee7cSKalpak Shah 	int err, ret;
4589ac27a0ecSDave Kleikamp 
4590ac27a0ecSDave Kleikamp 	might_sleep();
45917ff9c073STheodore Ts'o 	trace_ext4_mark_inode_dirty(inode, _RET_IP_);
4592617ba13bSMingming Cao 	err = ext4_reserve_inode_write(handle, inode, &iloc);
45930390131bSFrank Mayhar 	if (ext4_handle_valid(handle) &&
45940390131bSFrank Mayhar 	    EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize &&
459519f5fb7aSTheodore Ts'o 	    !ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND)) {
45966dd4ee7cSKalpak Shah 		/*
45976dd4ee7cSKalpak Shah 		 * We need extra buffer credits since we may write into EA block
45986dd4ee7cSKalpak Shah 		 * with this same handle. If journal_extend fails, then it will
45996dd4ee7cSKalpak Shah 		 * only result in a minor loss of functionality for that inode.
46006dd4ee7cSKalpak Shah 		 * If this is felt to be critical, then e2fsck should be run to
46016dd4ee7cSKalpak Shah 		 * force a large enough s_min_extra_isize.
46026dd4ee7cSKalpak Shah 		 */
46036dd4ee7cSKalpak Shah 		if ((jbd2_journal_extend(handle,
46046dd4ee7cSKalpak Shah 			     EXT4_DATA_TRANS_BLOCKS(inode->i_sb))) == 0) {
46056dd4ee7cSKalpak Shah 			ret = ext4_expand_extra_isize(inode,
46066dd4ee7cSKalpak Shah 						      sbi->s_want_extra_isize,
46076dd4ee7cSKalpak Shah 						      iloc, handle);
46086dd4ee7cSKalpak Shah 			if (ret) {
460919f5fb7aSTheodore Ts'o 				ext4_set_inode_state(inode,
461019f5fb7aSTheodore Ts'o 						     EXT4_STATE_NO_EXPAND);
4611c1bddad9SAneesh Kumar K.V 				if (mnt_count !=
4612c1bddad9SAneesh Kumar K.V 					le16_to_cpu(sbi->s_es->s_mnt_count)) {
461312062dddSEric Sandeen 					ext4_warning(inode->i_sb,
46146dd4ee7cSKalpak Shah 					"Unable to expand inode %lu. Delete"
46156dd4ee7cSKalpak Shah 					" some EAs or run e2fsck.",
46166dd4ee7cSKalpak Shah 					inode->i_ino);
4617c1bddad9SAneesh Kumar K.V 					mnt_count =
4618c1bddad9SAneesh Kumar K.V 					  le16_to_cpu(sbi->s_es->s_mnt_count);
46196dd4ee7cSKalpak Shah 				}
46206dd4ee7cSKalpak Shah 			}
46216dd4ee7cSKalpak Shah 		}
46226dd4ee7cSKalpak Shah 	}
4623ac27a0ecSDave Kleikamp 	if (!err)
4624617ba13bSMingming Cao 		err = ext4_mark_iloc_dirty(handle, inode, &iloc);
4625ac27a0ecSDave Kleikamp 	return err;
4626ac27a0ecSDave Kleikamp }
4627ac27a0ecSDave Kleikamp 
4628ac27a0ecSDave Kleikamp /*
4629617ba13bSMingming Cao  * ext4_dirty_inode() is called from __mark_inode_dirty()
4630ac27a0ecSDave Kleikamp  *
4631ac27a0ecSDave Kleikamp  * We're really interested in the case where a file is being extended.
4632ac27a0ecSDave Kleikamp  * i_size has been changed by generic_commit_write() and we thus need
4633ac27a0ecSDave Kleikamp  * to include the updated inode in the current transaction.
4634ac27a0ecSDave Kleikamp  *
46355dd4056dSChristoph Hellwig  * Also, dquot_alloc_block() will always dirty the inode when blocks
4636ac27a0ecSDave Kleikamp  * are allocated to the file.
4637ac27a0ecSDave Kleikamp  *
4638ac27a0ecSDave Kleikamp  * If the inode is marked synchronous, we don't honour that here - doing
4639ac27a0ecSDave Kleikamp  * so would cause a commit on atime updates, which we don't bother doing.
4640ac27a0ecSDave Kleikamp  * We handle synchronous inodes at the highest possible level.
4641ac27a0ecSDave Kleikamp  */
4642aa385729SChristoph Hellwig void ext4_dirty_inode(struct inode *inode, int flags)
4643ac27a0ecSDave Kleikamp {
4644ac27a0ecSDave Kleikamp 	handle_t *handle;
4645ac27a0ecSDave Kleikamp 
4646617ba13bSMingming Cao 	handle = ext4_journal_start(inode, 2);
4647ac27a0ecSDave Kleikamp 	if (IS_ERR(handle))
4648ac27a0ecSDave Kleikamp 		goto out;
4649f3dc272fSCurt Wohlgemuth 
4650617ba13bSMingming Cao 	ext4_mark_inode_dirty(handle, inode);
4651f3dc272fSCurt Wohlgemuth 
4652617ba13bSMingming Cao 	ext4_journal_stop(handle);
4653ac27a0ecSDave Kleikamp out:
4654ac27a0ecSDave Kleikamp 	return;
4655ac27a0ecSDave Kleikamp }
4656ac27a0ecSDave Kleikamp 
4657ac27a0ecSDave Kleikamp #if 0
4658ac27a0ecSDave Kleikamp /*
4659ac27a0ecSDave Kleikamp  * Bind an inode's backing buffer_head into this transaction, to prevent
4660ac27a0ecSDave Kleikamp  * it from being flushed to disk early.  Unlike
4661617ba13bSMingming Cao  * ext4_reserve_inode_write, this leaves behind no bh reference and
4662ac27a0ecSDave Kleikamp  * returns no iloc structure, so the caller needs to repeat the iloc
4663ac27a0ecSDave Kleikamp  * lookup to mark the inode dirty later.
4664ac27a0ecSDave Kleikamp  */
4665617ba13bSMingming Cao static int ext4_pin_inode(handle_t *handle, struct inode *inode)
4666ac27a0ecSDave Kleikamp {
4667617ba13bSMingming Cao 	struct ext4_iloc iloc;
4668ac27a0ecSDave Kleikamp 
4669ac27a0ecSDave Kleikamp 	int err = 0;
4670ac27a0ecSDave Kleikamp 	if (handle) {
4671617ba13bSMingming Cao 		err = ext4_get_inode_loc(inode, &iloc);
4672ac27a0ecSDave Kleikamp 		if (!err) {
4673ac27a0ecSDave Kleikamp 			BUFFER_TRACE(iloc.bh, "get_write_access");
4674dab291afSMingming Cao 			err = jbd2_journal_get_write_access(handle, iloc.bh);
4675ac27a0ecSDave Kleikamp 			if (!err)
46760390131bSFrank Mayhar 				err = ext4_handle_dirty_metadata(handle,
467773b50c1cSCurt Wohlgemuth 								 NULL,
4678ac27a0ecSDave Kleikamp 								 iloc.bh);
4679ac27a0ecSDave Kleikamp 			brelse(iloc.bh);
4680ac27a0ecSDave Kleikamp 		}
4681ac27a0ecSDave Kleikamp 	}
4682617ba13bSMingming Cao 	ext4_std_error(inode->i_sb, err);
4683ac27a0ecSDave Kleikamp 	return err;
4684ac27a0ecSDave Kleikamp }
4685ac27a0ecSDave Kleikamp #endif
4686ac27a0ecSDave Kleikamp 
4687617ba13bSMingming Cao int ext4_change_inode_journal_flag(struct inode *inode, int val)
4688ac27a0ecSDave Kleikamp {
4689ac27a0ecSDave Kleikamp 	journal_t *journal;
4690ac27a0ecSDave Kleikamp 	handle_t *handle;
4691ac27a0ecSDave Kleikamp 	int err;
4692ac27a0ecSDave Kleikamp 
4693ac27a0ecSDave Kleikamp 	/*
4694ac27a0ecSDave Kleikamp 	 * We have to be very careful here: changing a data block's
4695ac27a0ecSDave Kleikamp 	 * journaling status dynamically is dangerous.  If we write a
4696ac27a0ecSDave Kleikamp 	 * data block to the journal, change the status and then delete
4697ac27a0ecSDave Kleikamp 	 * that block, we risk forgetting to revoke the old log record
4698ac27a0ecSDave Kleikamp 	 * from the journal and so a subsequent replay can corrupt data.
4699ac27a0ecSDave Kleikamp 	 * So, first we make sure that the journal is empty and that
4700ac27a0ecSDave Kleikamp 	 * nobody is changing anything.
4701ac27a0ecSDave Kleikamp 	 */
4702ac27a0ecSDave Kleikamp 
4703617ba13bSMingming Cao 	journal = EXT4_JOURNAL(inode);
47040390131bSFrank Mayhar 	if (!journal)
47050390131bSFrank Mayhar 		return 0;
4706d699594dSDave Hansen 	if (is_journal_aborted(journal))
4707ac27a0ecSDave Kleikamp 		return -EROFS;
47082aff57b0SYongqiang Yang 	/* We have to allocate physical blocks for delalloc blocks
47092aff57b0SYongqiang Yang 	 * before flushing journal. otherwise delalloc blocks can not
47102aff57b0SYongqiang Yang 	 * be allocated any more. even more truncate on delalloc blocks
47112aff57b0SYongqiang Yang 	 * could trigger BUG by flushing delalloc blocks in journal.
47122aff57b0SYongqiang Yang 	 * There is no delalloc block in non-journal data mode.
47132aff57b0SYongqiang Yang 	 */
47142aff57b0SYongqiang Yang 	if (val && test_opt(inode->i_sb, DELALLOC)) {
47152aff57b0SYongqiang Yang 		err = ext4_alloc_da_blocks(inode);
47162aff57b0SYongqiang Yang 		if (err < 0)
47172aff57b0SYongqiang Yang 			return err;
47182aff57b0SYongqiang Yang 	}
4719ac27a0ecSDave Kleikamp 
4720dab291afSMingming Cao 	jbd2_journal_lock_updates(journal);
4721ac27a0ecSDave Kleikamp 
4722ac27a0ecSDave Kleikamp 	/*
4723ac27a0ecSDave Kleikamp 	 * OK, there are no updates running now, and all cached data is
4724ac27a0ecSDave Kleikamp 	 * synced to disk.  We are now in a completely consistent state
4725ac27a0ecSDave Kleikamp 	 * which doesn't have anything in the journal, and we know that
4726ac27a0ecSDave Kleikamp 	 * no filesystem updates are running, so it is safe to modify
4727ac27a0ecSDave Kleikamp 	 * the inode's in-core data-journaling state flag now.
4728ac27a0ecSDave Kleikamp 	 */
4729ac27a0ecSDave Kleikamp 
4730ac27a0ecSDave Kleikamp 	if (val)
473112e9b892SDmitry Monakhov 		ext4_set_inode_flag(inode, EXT4_INODE_JOURNAL_DATA);
47325872ddaaSYongqiang Yang 	else {
47335872ddaaSYongqiang Yang 		jbd2_journal_flush(journal);
473412e9b892SDmitry Monakhov 		ext4_clear_inode_flag(inode, EXT4_INODE_JOURNAL_DATA);
47355872ddaaSYongqiang Yang 	}
4736617ba13bSMingming Cao 	ext4_set_aops(inode);
4737ac27a0ecSDave Kleikamp 
4738dab291afSMingming Cao 	jbd2_journal_unlock_updates(journal);
4739ac27a0ecSDave Kleikamp 
4740ac27a0ecSDave Kleikamp 	/* Finally we can mark the inode as dirty. */
4741ac27a0ecSDave Kleikamp 
4742617ba13bSMingming Cao 	handle = ext4_journal_start(inode, 1);
4743ac27a0ecSDave Kleikamp 	if (IS_ERR(handle))
4744ac27a0ecSDave Kleikamp 		return PTR_ERR(handle);
4745ac27a0ecSDave Kleikamp 
4746617ba13bSMingming Cao 	err = ext4_mark_inode_dirty(handle, inode);
47470390131bSFrank Mayhar 	ext4_handle_sync(handle);
4748617ba13bSMingming Cao 	ext4_journal_stop(handle);
4749617ba13bSMingming Cao 	ext4_std_error(inode->i_sb, err);
4750ac27a0ecSDave Kleikamp 
4751ac27a0ecSDave Kleikamp 	return err;
4752ac27a0ecSDave Kleikamp }
47532e9ee850SAneesh Kumar K.V 
47542e9ee850SAneesh Kumar K.V static int ext4_bh_unmapped(handle_t *handle, struct buffer_head *bh)
47552e9ee850SAneesh Kumar K.V {
47562e9ee850SAneesh Kumar K.V 	return !buffer_mapped(bh);
47572e9ee850SAneesh Kumar K.V }
47582e9ee850SAneesh Kumar K.V 
4759c2ec175cSNick Piggin int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
47602e9ee850SAneesh Kumar K.V {
4761c2ec175cSNick Piggin 	struct page *page = vmf->page;
47622e9ee850SAneesh Kumar K.V 	loff_t size;
47632e9ee850SAneesh Kumar K.V 	unsigned long len;
47649ea7df53SJan Kara 	int ret;
47652e9ee850SAneesh Kumar K.V 	struct file *file = vma->vm_file;
47662e9ee850SAneesh Kumar K.V 	struct inode *inode = file->f_path.dentry->d_inode;
47672e9ee850SAneesh Kumar K.V 	struct address_space *mapping = inode->i_mapping;
47689ea7df53SJan Kara 	handle_t *handle;
47699ea7df53SJan Kara 	get_block_t *get_block;
47709ea7df53SJan Kara 	int retries = 0;
47712e9ee850SAneesh Kumar K.V 
47722e9ee850SAneesh Kumar K.V 	/*
47739ea7df53SJan Kara 	 * This check is racy but catches the common case. We rely on
47749ea7df53SJan Kara 	 * __block_page_mkwrite() to do a reliable check.
47752e9ee850SAneesh Kumar K.V 	 */
47769ea7df53SJan Kara 	vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
47779ea7df53SJan Kara 	/* Delalloc case is easy... */
47789ea7df53SJan Kara 	if (test_opt(inode->i_sb, DELALLOC) &&
47799ea7df53SJan Kara 	    !ext4_should_journal_data(inode) &&
47809ea7df53SJan Kara 	    !ext4_nonda_switch(inode->i_sb)) {
47819ea7df53SJan Kara 		do {
47829ea7df53SJan Kara 			ret = __block_page_mkwrite(vma, vmf,
47839ea7df53SJan Kara 						   ext4_da_get_block_prep);
47849ea7df53SJan Kara 		} while (ret == -ENOSPC &&
47859ea7df53SJan Kara 		       ext4_should_retry_alloc(inode->i_sb, &retries));
47869ea7df53SJan Kara 		goto out_ret;
47872e9ee850SAneesh Kumar K.V 	}
47880e499890SDarrick J. Wong 
47890e499890SDarrick J. Wong 	lock_page(page);
47909ea7df53SJan Kara 	size = i_size_read(inode);
47919ea7df53SJan Kara 	/* Page got truncated from under us? */
47929ea7df53SJan Kara 	if (page->mapping != mapping || page_offset(page) > size) {
47939ea7df53SJan Kara 		unlock_page(page);
47949ea7df53SJan Kara 		ret = VM_FAULT_NOPAGE;
47959ea7df53SJan Kara 		goto out;
47960e499890SDarrick J. Wong 	}
47972e9ee850SAneesh Kumar K.V 
47982e9ee850SAneesh Kumar K.V 	if (page->index == size >> PAGE_CACHE_SHIFT)
47992e9ee850SAneesh Kumar K.V 		len = size & ~PAGE_CACHE_MASK;
48002e9ee850SAneesh Kumar K.V 	else
48012e9ee850SAneesh Kumar K.V 		len = PAGE_CACHE_SIZE;
4802a827eaffSAneesh Kumar K.V 	/*
48039ea7df53SJan Kara 	 * Return if we have all the buffers mapped. This avoids the need to do
48049ea7df53SJan Kara 	 * journal_start/journal_stop which can block and take a long time
4805a827eaffSAneesh Kumar K.V 	 */
48062e9ee850SAneesh Kumar K.V 	if (page_has_buffers(page)) {
48072e9ee850SAneesh Kumar K.V 		if (!walk_page_buffers(NULL, page_buffers(page), 0, len, NULL,
4808a827eaffSAneesh Kumar K.V 					ext4_bh_unmapped)) {
48099ea7df53SJan Kara 			/* Wait so that we don't change page under IO */
48109ea7df53SJan Kara 			wait_on_page_writeback(page);
48119ea7df53SJan Kara 			ret = VM_FAULT_LOCKED;
48129ea7df53SJan Kara 			goto out;
48132e9ee850SAneesh Kumar K.V 		}
4814a827eaffSAneesh Kumar K.V 	}
4815a827eaffSAneesh Kumar K.V 	unlock_page(page);
48169ea7df53SJan Kara 	/* OK, we need to fill the hole... */
48179ea7df53SJan Kara 	if (ext4_should_dioread_nolock(inode))
48189ea7df53SJan Kara 		get_block = ext4_get_block_write;
48199ea7df53SJan Kara 	else
48209ea7df53SJan Kara 		get_block = ext4_get_block;
48219ea7df53SJan Kara retry_alloc:
48229ea7df53SJan Kara 	handle = ext4_journal_start(inode, ext4_writepage_trans_blocks(inode));
48239ea7df53SJan Kara 	if (IS_ERR(handle)) {
4824c2ec175cSNick Piggin 		ret = VM_FAULT_SIGBUS;
48259ea7df53SJan Kara 		goto out;
48269ea7df53SJan Kara 	}
48279ea7df53SJan Kara 	ret = __block_page_mkwrite(vma, vmf, get_block);
48289ea7df53SJan Kara 	if (!ret && ext4_should_journal_data(inode)) {
48299ea7df53SJan Kara 		if (walk_page_buffers(handle, page_buffers(page), 0,
48309ea7df53SJan Kara 			  PAGE_CACHE_SIZE, NULL, do_journal_get_write_access)) {
48319ea7df53SJan Kara 			unlock_page(page);
48329ea7df53SJan Kara 			ret = VM_FAULT_SIGBUS;
4833fcbb5515SYongqiang Yang 			ext4_journal_stop(handle);
48349ea7df53SJan Kara 			goto out;
48359ea7df53SJan Kara 		}
48369ea7df53SJan Kara 		ext4_set_inode_state(inode, EXT4_STATE_JDATA);
48379ea7df53SJan Kara 	}
48389ea7df53SJan Kara 	ext4_journal_stop(handle);
48399ea7df53SJan Kara 	if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
48409ea7df53SJan Kara 		goto retry_alloc;
48419ea7df53SJan Kara out_ret:
48429ea7df53SJan Kara 	ret = block_page_mkwrite_return(ret);
48439ea7df53SJan Kara out:
48442e9ee850SAneesh Kumar K.V 	return ret;
48452e9ee850SAneesh Kumar K.V }
4846