xref: /openbmc/linux/fs/ext4/inode.c (revision f3b59291a69d0b734be1fc8be489fef2dd846d3d)
1ac27a0ecSDave Kleikamp /*
2617ba13bSMingming Cao  *  linux/fs/ext4/inode.c
3ac27a0ecSDave Kleikamp  *
4ac27a0ecSDave Kleikamp  * Copyright (C) 1992, 1993, 1994, 1995
5ac27a0ecSDave Kleikamp  * Remy Card (card@masi.ibp.fr)
6ac27a0ecSDave Kleikamp  * Laboratoire MASI - Institut Blaise Pascal
7ac27a0ecSDave Kleikamp  * Universite Pierre et Marie Curie (Paris VI)
8ac27a0ecSDave Kleikamp  *
9ac27a0ecSDave Kleikamp  *  from
10ac27a0ecSDave Kleikamp  *
11ac27a0ecSDave Kleikamp  *  linux/fs/minix/inode.c
12ac27a0ecSDave Kleikamp  *
13ac27a0ecSDave Kleikamp  *  Copyright (C) 1991, 1992  Linus Torvalds
14ac27a0ecSDave Kleikamp  *
15ac27a0ecSDave Kleikamp  *  64-bit file support on 64-bit platforms by Jakub Jelinek
16ac27a0ecSDave Kleikamp  *	(jj@sunsite.ms.mff.cuni.cz)
17ac27a0ecSDave Kleikamp  *
18617ba13bSMingming Cao  *  Assorted race fixes, rewrite of ext4_get_block() by Al Viro, 2000
19ac27a0ecSDave Kleikamp  */
20ac27a0ecSDave Kleikamp 
21ac27a0ecSDave Kleikamp #include <linux/fs.h>
22ac27a0ecSDave Kleikamp #include <linux/time.h>
23dab291afSMingming Cao #include <linux/jbd2.h>
24ac27a0ecSDave Kleikamp #include <linux/highuid.h>
25ac27a0ecSDave Kleikamp #include <linux/pagemap.h>
26ac27a0ecSDave Kleikamp #include <linux/quotaops.h>
27ac27a0ecSDave Kleikamp #include <linux/string.h>
28ac27a0ecSDave Kleikamp #include <linux/buffer_head.h>
29ac27a0ecSDave Kleikamp #include <linux/writeback.h>
3064769240SAlex Tomas #include <linux/pagevec.h>
31ac27a0ecSDave Kleikamp #include <linux/mpage.h>
32e83c1397SDuane Griffin #include <linux/namei.h>
33ac27a0ecSDave Kleikamp #include <linux/uio.h>
34ac27a0ecSDave Kleikamp #include <linux/bio.h>
354c0425ffSMingming Cao #include <linux/workqueue.h>
36744692dcSJiaying Zhang #include <linux/kernel.h>
376db26ffcSAndrew Morton #include <linux/printk.h>
385a0e3ad6STejun Heo #include <linux/slab.h>
39a8901d34STheodore Ts'o #include <linux/ratelimit.h>
409bffad1eSTheodore Ts'o 
413dcf5451SChristoph Hellwig #include "ext4_jbd2.h"
42ac27a0ecSDave Kleikamp #include "xattr.h"
43ac27a0ecSDave Kleikamp #include "acl.h"
449f125d64STheodore Ts'o #include "truncate.h"
45ac27a0ecSDave Kleikamp 
469bffad1eSTheodore Ts'o #include <trace/events/ext4.h>
479bffad1eSTheodore Ts'o 
48a1d6cc56SAneesh Kumar K.V #define MPAGE_DA_EXTENT_TAIL 0x01
49a1d6cc56SAneesh Kumar K.V 
50814525f4SDarrick J. Wong static __u32 ext4_inode_csum(struct inode *inode, struct ext4_inode *raw,
51814525f4SDarrick J. Wong 			      struct ext4_inode_info *ei)
52814525f4SDarrick J. Wong {
53814525f4SDarrick J. Wong 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
54814525f4SDarrick J. Wong 	__u16 csum_lo;
55814525f4SDarrick J. Wong 	__u16 csum_hi = 0;
56814525f4SDarrick J. Wong 	__u32 csum;
57814525f4SDarrick J. Wong 
58814525f4SDarrick J. Wong 	csum_lo = raw->i_checksum_lo;
59814525f4SDarrick J. Wong 	raw->i_checksum_lo = 0;
60814525f4SDarrick J. Wong 	if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
61814525f4SDarrick J. Wong 	    EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) {
62814525f4SDarrick J. Wong 		csum_hi = raw->i_checksum_hi;
63814525f4SDarrick J. Wong 		raw->i_checksum_hi = 0;
64814525f4SDarrick J. Wong 	}
65814525f4SDarrick J. Wong 
66814525f4SDarrick J. Wong 	csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)raw,
67814525f4SDarrick J. Wong 			   EXT4_INODE_SIZE(inode->i_sb));
68814525f4SDarrick J. Wong 
69814525f4SDarrick J. Wong 	raw->i_checksum_lo = csum_lo;
70814525f4SDarrick J. Wong 	if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
71814525f4SDarrick J. Wong 	    EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi))
72814525f4SDarrick J. Wong 		raw->i_checksum_hi = csum_hi;
73814525f4SDarrick J. Wong 
74814525f4SDarrick J. Wong 	return csum;
75814525f4SDarrick J. Wong }
76814525f4SDarrick J. Wong 
77814525f4SDarrick J. Wong static int ext4_inode_csum_verify(struct inode *inode, struct ext4_inode *raw,
78814525f4SDarrick J. Wong 				  struct ext4_inode_info *ei)
79814525f4SDarrick J. Wong {
80814525f4SDarrick J. Wong 	__u32 provided, calculated;
81814525f4SDarrick J. Wong 
82814525f4SDarrick J. Wong 	if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
83814525f4SDarrick J. Wong 	    cpu_to_le32(EXT4_OS_LINUX) ||
84814525f4SDarrick J. Wong 	    !EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
85814525f4SDarrick J. Wong 		EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
86814525f4SDarrick J. Wong 		return 1;
87814525f4SDarrick J. Wong 
88814525f4SDarrick J. Wong 	provided = le16_to_cpu(raw->i_checksum_lo);
89814525f4SDarrick J. Wong 	calculated = ext4_inode_csum(inode, raw, ei);
90814525f4SDarrick J. Wong 	if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
91814525f4SDarrick J. Wong 	    EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi))
92814525f4SDarrick J. Wong 		provided |= ((__u32)le16_to_cpu(raw->i_checksum_hi)) << 16;
93814525f4SDarrick J. Wong 	else
94814525f4SDarrick J. Wong 		calculated &= 0xFFFF;
95814525f4SDarrick J. Wong 
96814525f4SDarrick J. Wong 	return provided == calculated;
97814525f4SDarrick J. Wong }
98814525f4SDarrick J. Wong 
99814525f4SDarrick J. Wong static void ext4_inode_csum_set(struct inode *inode, struct ext4_inode *raw,
100814525f4SDarrick J. Wong 				struct ext4_inode_info *ei)
101814525f4SDarrick J. Wong {
102814525f4SDarrick J. Wong 	__u32 csum;
103814525f4SDarrick J. Wong 
104814525f4SDarrick J. Wong 	if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
105814525f4SDarrick J. Wong 	    cpu_to_le32(EXT4_OS_LINUX) ||
106814525f4SDarrick J. Wong 	    !EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
107814525f4SDarrick J. Wong 		EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
108814525f4SDarrick J. Wong 		return;
109814525f4SDarrick J. Wong 
110814525f4SDarrick J. Wong 	csum = ext4_inode_csum(inode, raw, ei);
111814525f4SDarrick J. Wong 	raw->i_checksum_lo = cpu_to_le16(csum & 0xFFFF);
112814525f4SDarrick J. Wong 	if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
113814525f4SDarrick J. Wong 	    EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi))
114814525f4SDarrick J. Wong 		raw->i_checksum_hi = cpu_to_le16(csum >> 16);
115814525f4SDarrick J. Wong }
116814525f4SDarrick J. Wong 
117678aaf48SJan Kara static inline int ext4_begin_ordered_truncate(struct inode *inode,
118678aaf48SJan Kara 					      loff_t new_size)
119678aaf48SJan Kara {
1207ff9c073STheodore Ts'o 	trace_ext4_begin_ordered_truncate(inode, new_size);
1218aefcd55STheodore Ts'o 	/*
1228aefcd55STheodore Ts'o 	 * If jinode is zero, then we never opened the file for
1238aefcd55STheodore Ts'o 	 * writing, so there's no need to call
1248aefcd55STheodore Ts'o 	 * jbd2_journal_begin_ordered_truncate() since there's no
1258aefcd55STheodore Ts'o 	 * outstanding writes we need to flush.
1268aefcd55STheodore Ts'o 	 */
1278aefcd55STheodore Ts'o 	if (!EXT4_I(inode)->jinode)
1288aefcd55STheodore Ts'o 		return 0;
1298aefcd55STheodore Ts'o 	return jbd2_journal_begin_ordered_truncate(EXT4_JOURNAL(inode),
1308aefcd55STheodore Ts'o 						   EXT4_I(inode)->jinode,
131678aaf48SJan Kara 						   new_size);
132678aaf48SJan Kara }
133678aaf48SJan Kara 
13464769240SAlex Tomas static void ext4_invalidatepage(struct page *page, unsigned long offset);
135cb20d518STheodore Ts'o static int noalloc_get_block_write(struct inode *inode, sector_t iblock,
136cb20d518STheodore Ts'o 				   struct buffer_head *bh_result, int create);
137cb20d518STheodore Ts'o static int ext4_set_bh_endio(struct buffer_head *bh, struct inode *inode);
138cb20d518STheodore Ts'o static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate);
139cb20d518STheodore Ts'o static int __ext4_journalled_writepage(struct page *page, unsigned int len);
140cb20d518STheodore Ts'o static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh);
1415f163cc7SEric Sandeen static int ext4_discard_partial_page_buffers_no_lock(handle_t *handle,
1425f163cc7SEric Sandeen 		struct inode *inode, struct page *page, loff_t from,
1435f163cc7SEric Sandeen 		loff_t length, int flags);
14464769240SAlex Tomas 
145ac27a0ecSDave Kleikamp /*
146ac27a0ecSDave Kleikamp  * Test whether an inode is a fast symlink.
147ac27a0ecSDave Kleikamp  */
148617ba13bSMingming Cao static int ext4_inode_is_fast_symlink(struct inode *inode)
149ac27a0ecSDave Kleikamp {
150617ba13bSMingming Cao 	int ea_blocks = EXT4_I(inode)->i_file_acl ?
151ac27a0ecSDave Kleikamp 		(inode->i_sb->s_blocksize >> 9) : 0;
152ac27a0ecSDave Kleikamp 
153ac27a0ecSDave Kleikamp 	return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0);
154ac27a0ecSDave Kleikamp }
155ac27a0ecSDave Kleikamp 
156ac27a0ecSDave Kleikamp /*
157ac27a0ecSDave Kleikamp  * Restart the transaction associated with *handle.  This does a commit,
158ac27a0ecSDave Kleikamp  * so before we call here everything must be consistently dirtied against
159ac27a0ecSDave Kleikamp  * this transaction.
160ac27a0ecSDave Kleikamp  */
161487caeefSJan Kara int ext4_truncate_restart_trans(handle_t *handle, struct inode *inode,
162487caeefSJan Kara 				 int nblocks)
163ac27a0ecSDave Kleikamp {
164487caeefSJan Kara 	int ret;
165487caeefSJan Kara 
166487caeefSJan Kara 	/*
167e35fd660STheodore Ts'o 	 * Drop i_data_sem to avoid deadlock with ext4_map_blocks.  At this
168487caeefSJan Kara 	 * moment, get_block can be called only for blocks inside i_size since
169487caeefSJan Kara 	 * page cache has been already dropped and writes are blocked by
170487caeefSJan Kara 	 * i_mutex. So we can safely drop the i_data_sem here.
171487caeefSJan Kara 	 */
1720390131bSFrank Mayhar 	BUG_ON(EXT4_JOURNAL(inode) == NULL);
173ac27a0ecSDave Kleikamp 	jbd_debug(2, "restarting handle %p\n", handle);
174487caeefSJan Kara 	up_write(&EXT4_I(inode)->i_data_sem);
1758e8eaabeSAmir Goldstein 	ret = ext4_journal_restart(handle, nblocks);
176487caeefSJan Kara 	down_write(&EXT4_I(inode)->i_data_sem);
177fa5d1113SAneesh Kumar K.V 	ext4_discard_preallocations(inode);
178487caeefSJan Kara 
179487caeefSJan Kara 	return ret;
180ac27a0ecSDave Kleikamp }
181ac27a0ecSDave Kleikamp 
182ac27a0ecSDave Kleikamp /*
183ac27a0ecSDave Kleikamp  * Called at the last iput() if i_nlink is zero.
184ac27a0ecSDave Kleikamp  */
1850930fcc1SAl Viro void ext4_evict_inode(struct inode *inode)
186ac27a0ecSDave Kleikamp {
187ac27a0ecSDave Kleikamp 	handle_t *handle;
188bc965ab3STheodore Ts'o 	int err;
189ac27a0ecSDave Kleikamp 
1907ff9c073STheodore Ts'o 	trace_ext4_evict_inode(inode);
1912581fdc8SJiaying Zhang 
1922581fdc8SJiaying Zhang 	ext4_ioend_wait(inode);
1932581fdc8SJiaying Zhang 
1940930fcc1SAl Viro 	if (inode->i_nlink) {
1952d859db3SJan Kara 		/*
1962d859db3SJan Kara 		 * When journalling data dirty buffers are tracked only in the
1972d859db3SJan Kara 		 * journal. So although mm thinks everything is clean and
1982d859db3SJan Kara 		 * ready for reaping the inode might still have some pages to
1992d859db3SJan Kara 		 * write in the running transaction or waiting to be
2002d859db3SJan Kara 		 * checkpointed. Thus calling jbd2_journal_invalidatepage()
2012d859db3SJan Kara 		 * (via truncate_inode_pages()) to discard these buffers can
2022d859db3SJan Kara 		 * cause data loss. Also even if we did not discard these
2032d859db3SJan Kara 		 * buffers, we would have no way to find them after the inode
2042d859db3SJan Kara 		 * is reaped and thus user could see stale data if he tries to
2052d859db3SJan Kara 		 * read them before the transaction is checkpointed. So be
2062d859db3SJan Kara 		 * careful and force everything to disk here... We use
2072d859db3SJan Kara 		 * ei->i_datasync_tid to store the newest transaction
2082d859db3SJan Kara 		 * containing inode's data.
2092d859db3SJan Kara 		 *
2102d859db3SJan Kara 		 * Note that directories do not have this problem because they
2112d859db3SJan Kara 		 * don't use page cache.
2122d859db3SJan Kara 		 */
2132d859db3SJan Kara 		if (ext4_should_journal_data(inode) &&
2142d859db3SJan Kara 		    (S_ISLNK(inode->i_mode) || S_ISREG(inode->i_mode))) {
2152d859db3SJan Kara 			journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
2162d859db3SJan Kara 			tid_t commit_tid = EXT4_I(inode)->i_datasync_tid;
2172d859db3SJan Kara 
2182d859db3SJan Kara 			jbd2_log_start_commit(journal, commit_tid);
2192d859db3SJan Kara 			jbd2_log_wait_commit(journal, commit_tid);
2202d859db3SJan Kara 			filemap_write_and_wait(&inode->i_data);
2212d859db3SJan Kara 		}
2220930fcc1SAl Viro 		truncate_inode_pages(&inode->i_data, 0);
2230930fcc1SAl Viro 		goto no_delete;
2240930fcc1SAl Viro 	}
2250930fcc1SAl Viro 
226907f4554SChristoph Hellwig 	if (!is_bad_inode(inode))
227871a2931SChristoph Hellwig 		dquot_initialize(inode);
228907f4554SChristoph Hellwig 
229678aaf48SJan Kara 	if (ext4_should_order_data(inode))
230678aaf48SJan Kara 		ext4_begin_ordered_truncate(inode, 0);
231ac27a0ecSDave Kleikamp 	truncate_inode_pages(&inode->i_data, 0);
232ac27a0ecSDave Kleikamp 
233ac27a0ecSDave Kleikamp 	if (is_bad_inode(inode))
234ac27a0ecSDave Kleikamp 		goto no_delete;
235ac27a0ecSDave Kleikamp 
2368e8ad8a5SJan Kara 	/*
2378e8ad8a5SJan Kara 	 * Protect us against freezing - iput() caller didn't have to have any
2388e8ad8a5SJan Kara 	 * protection against it
2398e8ad8a5SJan Kara 	 */
2408e8ad8a5SJan Kara 	sb_start_intwrite(inode->i_sb);
2419f125d64STheodore Ts'o 	handle = ext4_journal_start(inode, ext4_blocks_for_truncate(inode)+3);
242ac27a0ecSDave Kleikamp 	if (IS_ERR(handle)) {
243bc965ab3STheodore Ts'o 		ext4_std_error(inode->i_sb, PTR_ERR(handle));
244ac27a0ecSDave Kleikamp 		/*
245ac27a0ecSDave Kleikamp 		 * If we're going to skip the normal cleanup, we still need to
246ac27a0ecSDave Kleikamp 		 * make sure that the in-core orphan linked list is properly
247ac27a0ecSDave Kleikamp 		 * cleaned up.
248ac27a0ecSDave Kleikamp 		 */
249617ba13bSMingming Cao 		ext4_orphan_del(NULL, inode);
2508e8ad8a5SJan Kara 		sb_end_intwrite(inode->i_sb);
251ac27a0ecSDave Kleikamp 		goto no_delete;
252ac27a0ecSDave Kleikamp 	}
253ac27a0ecSDave Kleikamp 
254ac27a0ecSDave Kleikamp 	if (IS_SYNC(inode))
2550390131bSFrank Mayhar 		ext4_handle_sync(handle);
256ac27a0ecSDave Kleikamp 	inode->i_size = 0;
257bc965ab3STheodore Ts'o 	err = ext4_mark_inode_dirty(handle, inode);
258bc965ab3STheodore Ts'o 	if (err) {
25912062dddSEric Sandeen 		ext4_warning(inode->i_sb,
260bc965ab3STheodore Ts'o 			     "couldn't mark inode dirty (err %d)", err);
261bc965ab3STheodore Ts'o 		goto stop_handle;
262bc965ab3STheodore Ts'o 	}
263ac27a0ecSDave Kleikamp 	if (inode->i_blocks)
264617ba13bSMingming Cao 		ext4_truncate(inode);
265bc965ab3STheodore Ts'o 
266bc965ab3STheodore Ts'o 	/*
267bc965ab3STheodore Ts'o 	 * ext4_ext_truncate() doesn't reserve any slop when it
268bc965ab3STheodore Ts'o 	 * restarts journal transactions; therefore there may not be
269bc965ab3STheodore Ts'o 	 * enough credits left in the handle to remove the inode from
270bc965ab3STheodore Ts'o 	 * the orphan list and set the dtime field.
271bc965ab3STheodore Ts'o 	 */
2720390131bSFrank Mayhar 	if (!ext4_handle_has_enough_credits(handle, 3)) {
273bc965ab3STheodore Ts'o 		err = ext4_journal_extend(handle, 3);
274bc965ab3STheodore Ts'o 		if (err > 0)
275bc965ab3STheodore Ts'o 			err = ext4_journal_restart(handle, 3);
276bc965ab3STheodore Ts'o 		if (err != 0) {
27712062dddSEric Sandeen 			ext4_warning(inode->i_sb,
278bc965ab3STheodore Ts'o 				     "couldn't extend journal (err %d)", err);
279bc965ab3STheodore Ts'o 		stop_handle:
280bc965ab3STheodore Ts'o 			ext4_journal_stop(handle);
28145388219STheodore Ts'o 			ext4_orphan_del(NULL, inode);
2828e8ad8a5SJan Kara 			sb_end_intwrite(inode->i_sb);
283bc965ab3STheodore Ts'o 			goto no_delete;
284bc965ab3STheodore Ts'o 		}
285bc965ab3STheodore Ts'o 	}
286bc965ab3STheodore Ts'o 
287ac27a0ecSDave Kleikamp 	/*
288617ba13bSMingming Cao 	 * Kill off the orphan record which ext4_truncate created.
289ac27a0ecSDave Kleikamp 	 * AKPM: I think this can be inside the above `if'.
290617ba13bSMingming Cao 	 * Note that ext4_orphan_del() has to be able to cope with the
291ac27a0ecSDave Kleikamp 	 * deletion of a non-existent orphan - this is because we don't
292617ba13bSMingming Cao 	 * know if ext4_truncate() actually created an orphan record.
293ac27a0ecSDave Kleikamp 	 * (Well, we could do this if we need to, but heck - it works)
294ac27a0ecSDave Kleikamp 	 */
295617ba13bSMingming Cao 	ext4_orphan_del(handle, inode);
296617ba13bSMingming Cao 	EXT4_I(inode)->i_dtime	= get_seconds();
297ac27a0ecSDave Kleikamp 
298ac27a0ecSDave Kleikamp 	/*
299ac27a0ecSDave Kleikamp 	 * One subtle ordering requirement: if anything has gone wrong
300ac27a0ecSDave Kleikamp 	 * (transaction abort, IO errors, whatever), then we can still
301ac27a0ecSDave Kleikamp 	 * do these next steps (the fs will already have been marked as
302ac27a0ecSDave Kleikamp 	 * having errors), but we can't free the inode if the mark_dirty
303ac27a0ecSDave Kleikamp 	 * fails.
304ac27a0ecSDave Kleikamp 	 */
305617ba13bSMingming Cao 	if (ext4_mark_inode_dirty(handle, inode))
306ac27a0ecSDave Kleikamp 		/* If that failed, just do the required in-core inode clear. */
3070930fcc1SAl Viro 		ext4_clear_inode(inode);
308ac27a0ecSDave Kleikamp 	else
309617ba13bSMingming Cao 		ext4_free_inode(handle, inode);
310617ba13bSMingming Cao 	ext4_journal_stop(handle);
3118e8ad8a5SJan Kara 	sb_end_intwrite(inode->i_sb);
312ac27a0ecSDave Kleikamp 	return;
313ac27a0ecSDave Kleikamp no_delete:
3140930fcc1SAl Viro 	ext4_clear_inode(inode);	/* We must guarantee clearing of inode... */
315ac27a0ecSDave Kleikamp }
316ac27a0ecSDave Kleikamp 
317a9e7f447SDmitry Monakhov #ifdef CONFIG_QUOTA
318a9e7f447SDmitry Monakhov qsize_t *ext4_get_reserved_space(struct inode *inode)
31960e58e0fSMingming Cao {
320a9e7f447SDmitry Monakhov 	return &EXT4_I(inode)->i_reserved_quota;
32160e58e0fSMingming Cao }
322a9e7f447SDmitry Monakhov #endif
3239d0be502STheodore Ts'o 
32412219aeaSAneesh Kumar K.V /*
32512219aeaSAneesh Kumar K.V  * Calculate the number of metadata blocks need to reserve
3269d0be502STheodore Ts'o  * to allocate a block located at @lblock
32712219aeaSAneesh Kumar K.V  */
32801f49d0bSTheodore Ts'o static int ext4_calc_metadata_amount(struct inode *inode, ext4_lblk_t lblock)
32912219aeaSAneesh Kumar K.V {
33012e9b892SDmitry Monakhov 	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
3319d0be502STheodore Ts'o 		return ext4_ext_calc_metadata_amount(inode, lblock);
33212219aeaSAneesh Kumar K.V 
3338bb2b247SAmir Goldstein 	return ext4_ind_calc_metadata_amount(inode, lblock);
33412219aeaSAneesh Kumar K.V }
33512219aeaSAneesh Kumar K.V 
3360637c6f4STheodore Ts'o /*
3370637c6f4STheodore Ts'o  * Called with i_data_sem down, which is important since we can call
3380637c6f4STheodore Ts'o  * ext4_discard_preallocations() from here.
3390637c6f4STheodore Ts'o  */
3405f634d06SAneesh Kumar K.V void ext4_da_update_reserve_space(struct inode *inode,
3415f634d06SAneesh Kumar K.V 					int used, int quota_claim)
34212219aeaSAneesh Kumar K.V {
34312219aeaSAneesh Kumar K.V 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
3440637c6f4STheodore Ts'o 	struct ext4_inode_info *ei = EXT4_I(inode);
34512219aeaSAneesh Kumar K.V 
3460637c6f4STheodore Ts'o 	spin_lock(&ei->i_block_reservation_lock);
347d8990240SAditya Kali 	trace_ext4_da_update_reserve_space(inode, used, quota_claim);
3480637c6f4STheodore Ts'o 	if (unlikely(used > ei->i_reserved_data_blocks)) {
3490637c6f4STheodore Ts'o 		ext4_msg(inode->i_sb, KERN_NOTICE, "%s: ino %lu, used %d "
3501084f252STheodore Ts'o 			 "with only %d reserved data blocks",
3510637c6f4STheodore Ts'o 			 __func__, inode->i_ino, used,
3520637c6f4STheodore Ts'o 			 ei->i_reserved_data_blocks);
3530637c6f4STheodore Ts'o 		WARN_ON(1);
3540637c6f4STheodore Ts'o 		used = ei->i_reserved_data_blocks;
3556bc6e63fSAneesh Kumar K.V 	}
35612219aeaSAneesh Kumar K.V 
35797795d2aSBrian Foster 	if (unlikely(ei->i_allocated_meta_blocks > ei->i_reserved_meta_blocks)) {
35897795d2aSBrian Foster 		ext4_msg(inode->i_sb, KERN_NOTICE, "%s: ino %lu, allocated %d "
35997795d2aSBrian Foster 			 "with only %d reserved metadata blocks\n", __func__,
36097795d2aSBrian Foster 			 inode->i_ino, ei->i_allocated_meta_blocks,
36197795d2aSBrian Foster 			 ei->i_reserved_meta_blocks);
36297795d2aSBrian Foster 		WARN_ON(1);
36397795d2aSBrian Foster 		ei->i_allocated_meta_blocks = ei->i_reserved_meta_blocks;
36497795d2aSBrian Foster 	}
36597795d2aSBrian Foster 
3660637c6f4STheodore Ts'o 	/* Update per-inode reservations */
3670637c6f4STheodore Ts'o 	ei->i_reserved_data_blocks -= used;
3680637c6f4STheodore Ts'o 	ei->i_reserved_meta_blocks -= ei->i_allocated_meta_blocks;
36957042651STheodore Ts'o 	percpu_counter_sub(&sbi->s_dirtyclusters_counter,
37072b8ab9dSEric Sandeen 			   used + ei->i_allocated_meta_blocks);
3710637c6f4STheodore Ts'o 	ei->i_allocated_meta_blocks = 0;
3720637c6f4STheodore Ts'o 
3730637c6f4STheodore Ts'o 	if (ei->i_reserved_data_blocks == 0) {
3740637c6f4STheodore Ts'o 		/*
3750637c6f4STheodore Ts'o 		 * We can release all of the reserved metadata blocks
3760637c6f4STheodore Ts'o 		 * only when we have written all of the delayed
3770637c6f4STheodore Ts'o 		 * allocation blocks.
3780637c6f4STheodore Ts'o 		 */
37957042651STheodore Ts'o 		percpu_counter_sub(&sbi->s_dirtyclusters_counter,
38072b8ab9dSEric Sandeen 				   ei->i_reserved_meta_blocks);
381ee5f4d9cSTheodore Ts'o 		ei->i_reserved_meta_blocks = 0;
3829d0be502STheodore Ts'o 		ei->i_da_metadata_calc_len = 0;
3830637c6f4STheodore Ts'o 	}
38412219aeaSAneesh Kumar K.V 	spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
38560e58e0fSMingming Cao 
38672b8ab9dSEric Sandeen 	/* Update quota subsystem for data blocks */
38772b8ab9dSEric Sandeen 	if (quota_claim)
3887b415bf6SAditya Kali 		dquot_claim_block(inode, EXT4_C2B(sbi, used));
38972b8ab9dSEric Sandeen 	else {
3905f634d06SAneesh Kumar K.V 		/*
3915f634d06SAneesh Kumar K.V 		 * We did fallocate with an offset that is already delayed
3925f634d06SAneesh Kumar K.V 		 * allocated. So on delayed allocated writeback we should
39372b8ab9dSEric Sandeen 		 * not re-claim the quota for fallocated blocks.
3945f634d06SAneesh Kumar K.V 		 */
3957b415bf6SAditya Kali 		dquot_release_reservation_block(inode, EXT4_C2B(sbi, used));
3965f634d06SAneesh Kumar K.V 	}
397d6014301SAneesh Kumar K.V 
398d6014301SAneesh Kumar K.V 	/*
399d6014301SAneesh Kumar K.V 	 * If we have done all the pending block allocations and if
400d6014301SAneesh Kumar K.V 	 * there aren't any writers on the inode, we can discard the
401d6014301SAneesh Kumar K.V 	 * inode's preallocations.
402d6014301SAneesh Kumar K.V 	 */
4030637c6f4STheodore Ts'o 	if ((ei->i_reserved_data_blocks == 0) &&
4040637c6f4STheodore Ts'o 	    (atomic_read(&inode->i_writecount) == 0))
405d6014301SAneesh Kumar K.V 		ext4_discard_preallocations(inode);
40612219aeaSAneesh Kumar K.V }
40712219aeaSAneesh Kumar K.V 
408e29136f8STheodore Ts'o static int __check_block_validity(struct inode *inode, const char *func,
409c398eda0STheodore Ts'o 				unsigned int line,
41024676da4STheodore Ts'o 				struct ext4_map_blocks *map)
4116fd058f7STheodore Ts'o {
41224676da4STheodore Ts'o 	if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), map->m_pblk,
41324676da4STheodore Ts'o 				   map->m_len)) {
414c398eda0STheodore Ts'o 		ext4_error_inode(inode, func, line, map->m_pblk,
415c398eda0STheodore Ts'o 				 "lblock %lu mapped to illegal pblock "
41624676da4STheodore Ts'o 				 "(length %d)", (unsigned long) map->m_lblk,
417c398eda0STheodore Ts'o 				 map->m_len);
4186fd058f7STheodore Ts'o 		return -EIO;
4196fd058f7STheodore Ts'o 	}
4206fd058f7STheodore Ts'o 	return 0;
4216fd058f7STheodore Ts'o }
4226fd058f7STheodore Ts'o 
423e29136f8STheodore Ts'o #define check_block_validity(inode, map)	\
424c398eda0STheodore Ts'o 	__check_block_validity((inode), __func__, __LINE__, (map))
425e29136f8STheodore Ts'o 
426f5ab0d1fSMingming Cao /*
4271f94533dSTheodore Ts'o  * Return the number of contiguous dirty pages in a given inode
4281f94533dSTheodore Ts'o  * starting at page frame idx.
42955138e0bSTheodore Ts'o  */
43055138e0bSTheodore Ts'o static pgoff_t ext4_num_dirty_pages(struct inode *inode, pgoff_t idx,
43155138e0bSTheodore Ts'o 				    unsigned int max_pages)
43255138e0bSTheodore Ts'o {
43355138e0bSTheodore Ts'o 	struct address_space *mapping = inode->i_mapping;
43455138e0bSTheodore Ts'o 	pgoff_t	index;
43555138e0bSTheodore Ts'o 	struct pagevec pvec;
43655138e0bSTheodore Ts'o 	pgoff_t num = 0;
43755138e0bSTheodore Ts'o 	int i, nr_pages, done = 0;
43855138e0bSTheodore Ts'o 
43955138e0bSTheodore Ts'o 	if (max_pages == 0)
44055138e0bSTheodore Ts'o 		return 0;
44155138e0bSTheodore Ts'o 	pagevec_init(&pvec, 0);
44255138e0bSTheodore Ts'o 	while (!done) {
44355138e0bSTheodore Ts'o 		index = idx;
44455138e0bSTheodore Ts'o 		nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
44555138e0bSTheodore Ts'o 					      PAGECACHE_TAG_DIRTY,
44655138e0bSTheodore Ts'o 					      (pgoff_t)PAGEVEC_SIZE);
44755138e0bSTheodore Ts'o 		if (nr_pages == 0)
44855138e0bSTheodore Ts'o 			break;
44955138e0bSTheodore Ts'o 		for (i = 0; i < nr_pages; i++) {
45055138e0bSTheodore Ts'o 			struct page *page = pvec.pages[i];
45155138e0bSTheodore Ts'o 			struct buffer_head *bh, *head;
45255138e0bSTheodore Ts'o 
45355138e0bSTheodore Ts'o 			lock_page(page);
45455138e0bSTheodore Ts'o 			if (unlikely(page->mapping != mapping) ||
45555138e0bSTheodore Ts'o 			    !PageDirty(page) ||
45655138e0bSTheodore Ts'o 			    PageWriteback(page) ||
45755138e0bSTheodore Ts'o 			    page->index != idx) {
45855138e0bSTheodore Ts'o 				done = 1;
45955138e0bSTheodore Ts'o 				unlock_page(page);
46055138e0bSTheodore Ts'o 				break;
46155138e0bSTheodore Ts'o 			}
4621f94533dSTheodore Ts'o 			if (page_has_buffers(page)) {
4631f94533dSTheodore Ts'o 				bh = head = page_buffers(page);
46455138e0bSTheodore Ts'o 				do {
46555138e0bSTheodore Ts'o 					if (!buffer_delay(bh) &&
4661f94533dSTheodore Ts'o 					    !buffer_unwritten(bh))
46755138e0bSTheodore Ts'o 						done = 1;
4681f94533dSTheodore Ts'o 					bh = bh->b_this_page;
4691f94533dSTheodore Ts'o 				} while (!done && (bh != head));
47055138e0bSTheodore Ts'o 			}
47155138e0bSTheodore Ts'o 			unlock_page(page);
47255138e0bSTheodore Ts'o 			if (done)
47355138e0bSTheodore Ts'o 				break;
47455138e0bSTheodore Ts'o 			idx++;
47555138e0bSTheodore Ts'o 			num++;
476659c6009SEric Sandeen 			if (num >= max_pages) {
477659c6009SEric Sandeen 				done = 1;
47855138e0bSTheodore Ts'o 				break;
47955138e0bSTheodore Ts'o 			}
480659c6009SEric Sandeen 		}
48155138e0bSTheodore Ts'o 		pagevec_release(&pvec);
48255138e0bSTheodore Ts'o 	}
48355138e0bSTheodore Ts'o 	return num;
48455138e0bSTheodore Ts'o }
48555138e0bSTheodore Ts'o 
48655138e0bSTheodore Ts'o /*
487e35fd660STheodore Ts'o  * The ext4_map_blocks() function tries to look up the requested blocks,
4882b2d6d01STheodore Ts'o  * and returns if the blocks are already mapped.
489f5ab0d1fSMingming Cao  *
490f5ab0d1fSMingming Cao  * Otherwise it takes the write lock of the i_data_sem and allocate blocks
491f5ab0d1fSMingming Cao  * and store the allocated blocks in the result buffer head and mark it
492f5ab0d1fSMingming Cao  * mapped.
493f5ab0d1fSMingming Cao  *
494e35fd660STheodore Ts'o  * If file type is extents based, it will call ext4_ext_map_blocks(),
495e35fd660STheodore Ts'o  * Otherwise, call with ext4_ind_map_blocks() to handle indirect mapping
496f5ab0d1fSMingming Cao  * based files
497f5ab0d1fSMingming Cao  *
498f5ab0d1fSMingming Cao  * On success, it returns the number of blocks being mapped or allocate.
499f5ab0d1fSMingming Cao  * if create==0 and the blocks are pre-allocated and uninitialized block,
500f5ab0d1fSMingming Cao  * the result buffer head is unmapped. If the create ==1, it will make sure
501f5ab0d1fSMingming Cao  * the buffer head is mapped.
502f5ab0d1fSMingming Cao  *
503f5ab0d1fSMingming Cao  * It returns 0 if plain look up failed (blocks have not been allocated), in
504df3ab170STao Ma  * that case, buffer head is unmapped
505f5ab0d1fSMingming Cao  *
506f5ab0d1fSMingming Cao  * It returns the error in case of allocation failure.
507f5ab0d1fSMingming Cao  */
508e35fd660STheodore Ts'o int ext4_map_blocks(handle_t *handle, struct inode *inode,
509e35fd660STheodore Ts'o 		    struct ext4_map_blocks *map, int flags)
5100e855ac8SAneesh Kumar K.V {
5110e855ac8SAneesh Kumar K.V 	int retval;
512f5ab0d1fSMingming Cao 
513e35fd660STheodore Ts'o 	map->m_flags = 0;
514e35fd660STheodore Ts'o 	ext_debug("ext4_map_blocks(): inode %lu, flag %d, max_blocks %u,"
515e35fd660STheodore Ts'o 		  "logical block %lu\n", inode->i_ino, flags, map->m_len,
516e35fd660STheodore Ts'o 		  (unsigned long) map->m_lblk);
5174df3d265SAneesh Kumar K.V 	/*
518b920c755STheodore Ts'o 	 * Try to see if we can get the block without requesting a new
519b920c755STheodore Ts'o 	 * file system block.
5204df3d265SAneesh Kumar K.V 	 */
521729f52c6SZheng Liu 	if (!(flags & EXT4_GET_BLOCKS_NO_LOCK))
5220e855ac8SAneesh Kumar K.V 		down_read((&EXT4_I(inode)->i_data_sem));
52312e9b892SDmitry Monakhov 	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
524a4e5d88bSDmitry Monakhov 		retval = ext4_ext_map_blocks(handle, inode, map, flags &
525a4e5d88bSDmitry Monakhov 					     EXT4_GET_BLOCKS_KEEP_SIZE);
5264df3d265SAneesh Kumar K.V 	} else {
527a4e5d88bSDmitry Monakhov 		retval = ext4_ind_map_blocks(handle, inode, map, flags &
528a4e5d88bSDmitry Monakhov 					     EXT4_GET_BLOCKS_KEEP_SIZE);
5290e855ac8SAneesh Kumar K.V 	}
530729f52c6SZheng Liu 	if (!(flags & EXT4_GET_BLOCKS_NO_LOCK))
5314df3d265SAneesh Kumar K.V 		up_read((&EXT4_I(inode)->i_data_sem));
532f5ab0d1fSMingming Cao 
533e35fd660STheodore Ts'o 	if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
53451865fdaSZheng Liu 		int ret;
53551865fdaSZheng Liu 		if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) {
53651865fdaSZheng Liu 			/* delayed alloc may be allocated by fallocate and
53751865fdaSZheng Liu 			 * coverted to initialized by directIO.
53851865fdaSZheng Liu 			 * we need to handle delayed extent here.
53951865fdaSZheng Liu 			 */
54051865fdaSZheng Liu 			down_write((&EXT4_I(inode)->i_data_sem));
54151865fdaSZheng Liu 			goto delayed_mapped;
54251865fdaSZheng Liu 		}
54351865fdaSZheng Liu 		ret = check_block_validity(inode, map);
5446fd058f7STheodore Ts'o 		if (ret != 0)
5456fd058f7STheodore Ts'o 			return ret;
5466fd058f7STheodore Ts'o 	}
5476fd058f7STheodore Ts'o 
548f5ab0d1fSMingming Cao 	/* If it is only a block(s) look up */
549c2177057STheodore Ts'o 	if ((flags & EXT4_GET_BLOCKS_CREATE) == 0)
5504df3d265SAneesh Kumar K.V 		return retval;
5514df3d265SAneesh Kumar K.V 
5524df3d265SAneesh Kumar K.V 	/*
553f5ab0d1fSMingming Cao 	 * Returns if the blocks have already allocated
554f5ab0d1fSMingming Cao 	 *
555f5ab0d1fSMingming Cao 	 * Note that if blocks have been preallocated
556df3ab170STao Ma 	 * ext4_ext_get_block() returns the create = 0
557f5ab0d1fSMingming Cao 	 * with buffer head unmapped.
558f5ab0d1fSMingming Cao 	 */
559e35fd660STheodore Ts'o 	if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED)
560f5ab0d1fSMingming Cao 		return retval;
561f5ab0d1fSMingming Cao 
562f5ab0d1fSMingming Cao 	/*
5632a8964d6SAneesh Kumar K.V 	 * When we call get_blocks without the create flag, the
5642a8964d6SAneesh Kumar K.V 	 * BH_Unwritten flag could have gotten set if the blocks
5652a8964d6SAneesh Kumar K.V 	 * requested were part of a uninitialized extent.  We need to
5662a8964d6SAneesh Kumar K.V 	 * clear this flag now that we are committed to convert all or
5672a8964d6SAneesh Kumar K.V 	 * part of the uninitialized extent to be an initialized
5682a8964d6SAneesh Kumar K.V 	 * extent.  This is because we need to avoid the combination
5692a8964d6SAneesh Kumar K.V 	 * of BH_Unwritten and BH_Mapped flags being simultaneously
5702a8964d6SAneesh Kumar K.V 	 * set on the buffer_head.
5712a8964d6SAneesh Kumar K.V 	 */
572e35fd660STheodore Ts'o 	map->m_flags &= ~EXT4_MAP_UNWRITTEN;
5732a8964d6SAneesh Kumar K.V 
5742a8964d6SAneesh Kumar K.V 	/*
575f5ab0d1fSMingming Cao 	 * New blocks allocate and/or writing to uninitialized extent
576f5ab0d1fSMingming Cao 	 * will possibly result in updating i_data, so we take
577f5ab0d1fSMingming Cao 	 * the write lock of i_data_sem, and call get_blocks()
578f5ab0d1fSMingming Cao 	 * with create == 1 flag.
5794df3d265SAneesh Kumar K.V 	 */
5804df3d265SAneesh Kumar K.V 	down_write((&EXT4_I(inode)->i_data_sem));
581d2a17637SMingming Cao 
582d2a17637SMingming Cao 	/*
583d2a17637SMingming Cao 	 * if the caller is from delayed allocation writeout path
584d2a17637SMingming Cao 	 * we have already reserved fs blocks for allocation
585d2a17637SMingming Cao 	 * let the underlying get_block() function know to
586d2a17637SMingming Cao 	 * avoid double accounting
587d2a17637SMingming Cao 	 */
588c2177057STheodore Ts'o 	if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
589f2321097STheodore Ts'o 		ext4_set_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED);
5904df3d265SAneesh Kumar K.V 	/*
5914df3d265SAneesh Kumar K.V 	 * We need to check for EXT4 here because migrate
5924df3d265SAneesh Kumar K.V 	 * could have changed the inode type in between
5934df3d265SAneesh Kumar K.V 	 */
59412e9b892SDmitry Monakhov 	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
595e35fd660STheodore Ts'o 		retval = ext4_ext_map_blocks(handle, inode, map, flags);
5960e855ac8SAneesh Kumar K.V 	} else {
597e35fd660STheodore Ts'o 		retval = ext4_ind_map_blocks(handle, inode, map, flags);
598267e4db9SAneesh Kumar K.V 
599e35fd660STheodore Ts'o 		if (retval > 0 && map->m_flags & EXT4_MAP_NEW) {
600267e4db9SAneesh Kumar K.V 			/*
601267e4db9SAneesh Kumar K.V 			 * We allocated new blocks which will result in
602267e4db9SAneesh Kumar K.V 			 * i_data's format changing.  Force the migrate
603267e4db9SAneesh Kumar K.V 			 * to fail by clearing migrate flags
604267e4db9SAneesh Kumar K.V 			 */
60519f5fb7aSTheodore Ts'o 			ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE);
606267e4db9SAneesh Kumar K.V 		}
6072ac3b6e0STheodore Ts'o 
608d2a17637SMingming Cao 		/*
6092ac3b6e0STheodore Ts'o 		 * Update reserved blocks/metadata blocks after successful
6105f634d06SAneesh Kumar K.V 		 * block allocation which had been deferred till now. We don't
6115f634d06SAneesh Kumar K.V 		 * support fallocate for non extent files. So we can update
6125f634d06SAneesh Kumar K.V 		 * reserve space here.
613d2a17637SMingming Cao 		 */
6145f634d06SAneesh Kumar K.V 		if ((retval > 0) &&
6151296cc85SAneesh Kumar K.V 			(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE))
6165f634d06SAneesh Kumar K.V 			ext4_da_update_reserve_space(inode, retval, 1);
6175f634d06SAneesh Kumar K.V 	}
6185356f261SAditya Kali 	if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) {
619f2321097STheodore Ts'o 		ext4_clear_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED);
620d2a17637SMingming Cao 
62151865fdaSZheng Liu 		if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
62251865fdaSZheng Liu 			int ret;
62351865fdaSZheng Liu delayed_mapped:
62451865fdaSZheng Liu 			/* delayed allocation blocks has been allocated */
62551865fdaSZheng Liu 			ret = ext4_es_remove_extent(inode, map->m_lblk,
62651865fdaSZheng Liu 						    map->m_len);
62751865fdaSZheng Liu 			if (ret < 0)
62851865fdaSZheng Liu 				retval = ret;
62951865fdaSZheng Liu 		}
6305356f261SAditya Kali 	}
6315356f261SAditya Kali 
6320e855ac8SAneesh Kumar K.V 	up_write((&EXT4_I(inode)->i_data_sem));
633e35fd660STheodore Ts'o 	if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
634e29136f8STheodore Ts'o 		int ret = check_block_validity(inode, map);
6356fd058f7STheodore Ts'o 		if (ret != 0)
6366fd058f7STheodore Ts'o 			return ret;
6376fd058f7STheodore Ts'o 	}
6380e855ac8SAneesh Kumar K.V 	return retval;
6390e855ac8SAneesh Kumar K.V }
6400e855ac8SAneesh Kumar K.V 
641f3bd1f3fSMingming Cao /* Maximum number of blocks we map for direct IO at once. */
642f3bd1f3fSMingming Cao #define DIO_MAX_BLOCKS 4096
643f3bd1f3fSMingming Cao 
6442ed88685STheodore Ts'o static int _ext4_get_block(struct inode *inode, sector_t iblock,
6452ed88685STheodore Ts'o 			   struct buffer_head *bh, int flags)
646ac27a0ecSDave Kleikamp {
6473e4fdaf8SDmitriy Monakhov 	handle_t *handle = ext4_journal_current_handle();
6482ed88685STheodore Ts'o 	struct ext4_map_blocks map;
6497fb5409dSJan Kara 	int ret = 0, started = 0;
650f3bd1f3fSMingming Cao 	int dio_credits;
651ac27a0ecSDave Kleikamp 
6522ed88685STheodore Ts'o 	map.m_lblk = iblock;
6532ed88685STheodore Ts'o 	map.m_len = bh->b_size >> inode->i_blkbits;
6542ed88685STheodore Ts'o 
6558b0f165fSAnatol Pomozov 	if (flags && !(flags & EXT4_GET_BLOCKS_NO_LOCK) && !handle) {
6567fb5409dSJan Kara 		/* Direct IO write... */
6572ed88685STheodore Ts'o 		if (map.m_len > DIO_MAX_BLOCKS)
6582ed88685STheodore Ts'o 			map.m_len = DIO_MAX_BLOCKS;
6592ed88685STheodore Ts'o 		dio_credits = ext4_chunk_trans_blocks(inode, map.m_len);
660f3bd1f3fSMingming Cao 		handle = ext4_journal_start(inode, dio_credits);
6617fb5409dSJan Kara 		if (IS_ERR(handle)) {
662ac27a0ecSDave Kleikamp 			ret = PTR_ERR(handle);
6632ed88685STheodore Ts'o 			return ret;
6647fb5409dSJan Kara 		}
6657fb5409dSJan Kara 		started = 1;
666ac27a0ecSDave Kleikamp 	}
667ac27a0ecSDave Kleikamp 
6682ed88685STheodore Ts'o 	ret = ext4_map_blocks(handle, inode, &map, flags);
669ac27a0ecSDave Kleikamp 	if (ret > 0) {
6702ed88685STheodore Ts'o 		map_bh(bh, inode->i_sb, map.m_pblk);
6712ed88685STheodore Ts'o 		bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags;
6722ed88685STheodore Ts'o 		bh->b_size = inode->i_sb->s_blocksize * map.m_len;
673ac27a0ecSDave Kleikamp 		ret = 0;
674ac27a0ecSDave Kleikamp 	}
6757fb5409dSJan Kara 	if (started)
6767fb5409dSJan Kara 		ext4_journal_stop(handle);
677ac27a0ecSDave Kleikamp 	return ret;
678ac27a0ecSDave Kleikamp }
679ac27a0ecSDave Kleikamp 
6802ed88685STheodore Ts'o int ext4_get_block(struct inode *inode, sector_t iblock,
6812ed88685STheodore Ts'o 		   struct buffer_head *bh, int create)
6822ed88685STheodore Ts'o {
6832ed88685STheodore Ts'o 	return _ext4_get_block(inode, iblock, bh,
6842ed88685STheodore Ts'o 			       create ? EXT4_GET_BLOCKS_CREATE : 0);
6852ed88685STheodore Ts'o }
6862ed88685STheodore Ts'o 
687ac27a0ecSDave Kleikamp /*
688ac27a0ecSDave Kleikamp  * `handle' can be NULL if create is zero
689ac27a0ecSDave Kleikamp  */
690617ba13bSMingming Cao struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode,
691725d26d3SAneesh Kumar K.V 				ext4_lblk_t block, int create, int *errp)
692ac27a0ecSDave Kleikamp {
6932ed88685STheodore Ts'o 	struct ext4_map_blocks map;
6942ed88685STheodore Ts'o 	struct buffer_head *bh;
695ac27a0ecSDave Kleikamp 	int fatal = 0, err;
696ac27a0ecSDave Kleikamp 
697ac27a0ecSDave Kleikamp 	J_ASSERT(handle != NULL || create == 0);
698ac27a0ecSDave Kleikamp 
6992ed88685STheodore Ts'o 	map.m_lblk = block;
7002ed88685STheodore Ts'o 	map.m_len = 1;
7012ed88685STheodore Ts'o 	err = ext4_map_blocks(handle, inode, &map,
7022ed88685STheodore Ts'o 			      create ? EXT4_GET_BLOCKS_CREATE : 0);
7032ed88685STheodore Ts'o 
70490b0a973SCarlos Maiolino 	/* ensure we send some value back into *errp */
70590b0a973SCarlos Maiolino 	*errp = 0;
70690b0a973SCarlos Maiolino 
7072ed88685STheodore Ts'o 	if (err < 0)
708ac27a0ecSDave Kleikamp 		*errp = err;
7092ed88685STheodore Ts'o 	if (err <= 0)
7102ed88685STheodore Ts'o 		return NULL;
7112ed88685STheodore Ts'o 
7122ed88685STheodore Ts'o 	bh = sb_getblk(inode->i_sb, map.m_pblk);
713ac27a0ecSDave Kleikamp 	if (!bh) {
714ac27a0ecSDave Kleikamp 		*errp = -EIO;
7152ed88685STheodore Ts'o 		return NULL;
716ac27a0ecSDave Kleikamp 	}
7172ed88685STheodore Ts'o 	if (map.m_flags & EXT4_MAP_NEW) {
718ac27a0ecSDave Kleikamp 		J_ASSERT(create != 0);
719ac39849dSAneesh Kumar K.V 		J_ASSERT(handle != NULL);
720ac27a0ecSDave Kleikamp 
721ac27a0ecSDave Kleikamp 		/*
722ac27a0ecSDave Kleikamp 		 * Now that we do not always journal data, we should
723ac27a0ecSDave Kleikamp 		 * keep in mind whether this should always journal the
724ac27a0ecSDave Kleikamp 		 * new buffer as metadata.  For now, regular file
725617ba13bSMingming Cao 		 * writes use ext4_get_block instead, so it's not a
726ac27a0ecSDave Kleikamp 		 * problem.
727ac27a0ecSDave Kleikamp 		 */
728ac27a0ecSDave Kleikamp 		lock_buffer(bh);
729ac27a0ecSDave Kleikamp 		BUFFER_TRACE(bh, "call get_create_access");
730617ba13bSMingming Cao 		fatal = ext4_journal_get_create_access(handle, bh);
731ac27a0ecSDave Kleikamp 		if (!fatal && !buffer_uptodate(bh)) {
732ac27a0ecSDave Kleikamp 			memset(bh->b_data, 0, inode->i_sb->s_blocksize);
733ac27a0ecSDave Kleikamp 			set_buffer_uptodate(bh);
734ac27a0ecSDave Kleikamp 		}
735ac27a0ecSDave Kleikamp 		unlock_buffer(bh);
7360390131bSFrank Mayhar 		BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
7370390131bSFrank Mayhar 		err = ext4_handle_dirty_metadata(handle, inode, bh);
738ac27a0ecSDave Kleikamp 		if (!fatal)
739ac27a0ecSDave Kleikamp 			fatal = err;
740ac27a0ecSDave Kleikamp 	} else {
741ac27a0ecSDave Kleikamp 		BUFFER_TRACE(bh, "not a new buffer");
742ac27a0ecSDave Kleikamp 	}
743ac27a0ecSDave Kleikamp 	if (fatal) {
744ac27a0ecSDave Kleikamp 		*errp = fatal;
745ac27a0ecSDave Kleikamp 		brelse(bh);
746ac27a0ecSDave Kleikamp 		bh = NULL;
747ac27a0ecSDave Kleikamp 	}
748ac27a0ecSDave Kleikamp 	return bh;
749ac27a0ecSDave Kleikamp }
750ac27a0ecSDave Kleikamp 
751617ba13bSMingming Cao struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode,
752725d26d3SAneesh Kumar K.V 			       ext4_lblk_t block, int create, int *err)
753ac27a0ecSDave Kleikamp {
754ac27a0ecSDave Kleikamp 	struct buffer_head *bh;
755ac27a0ecSDave Kleikamp 
756617ba13bSMingming Cao 	bh = ext4_getblk(handle, inode, block, create, err);
757ac27a0ecSDave Kleikamp 	if (!bh)
758ac27a0ecSDave Kleikamp 		return bh;
759ac27a0ecSDave Kleikamp 	if (buffer_uptodate(bh))
760ac27a0ecSDave Kleikamp 		return bh;
76165299a3bSChristoph Hellwig 	ll_rw_block(READ | REQ_META | REQ_PRIO, 1, &bh);
762ac27a0ecSDave Kleikamp 	wait_on_buffer(bh);
763ac27a0ecSDave Kleikamp 	if (buffer_uptodate(bh))
764ac27a0ecSDave Kleikamp 		return bh;
765ac27a0ecSDave Kleikamp 	put_bh(bh);
766ac27a0ecSDave Kleikamp 	*err = -EIO;
767ac27a0ecSDave Kleikamp 	return NULL;
768ac27a0ecSDave Kleikamp }
769ac27a0ecSDave Kleikamp 
770ac27a0ecSDave Kleikamp static int walk_page_buffers(handle_t *handle,
771ac27a0ecSDave Kleikamp 			     struct buffer_head *head,
772ac27a0ecSDave Kleikamp 			     unsigned from,
773ac27a0ecSDave Kleikamp 			     unsigned to,
774ac27a0ecSDave Kleikamp 			     int *partial,
775ac27a0ecSDave Kleikamp 			     int (*fn)(handle_t *handle,
776ac27a0ecSDave Kleikamp 				       struct buffer_head *bh))
777ac27a0ecSDave Kleikamp {
778ac27a0ecSDave Kleikamp 	struct buffer_head *bh;
779ac27a0ecSDave Kleikamp 	unsigned block_start, block_end;
780ac27a0ecSDave Kleikamp 	unsigned blocksize = head->b_size;
781ac27a0ecSDave Kleikamp 	int err, ret = 0;
782ac27a0ecSDave Kleikamp 	struct buffer_head *next;
783ac27a0ecSDave Kleikamp 
784ac27a0ecSDave Kleikamp 	for (bh = head, block_start = 0;
785ac27a0ecSDave Kleikamp 	     ret == 0 && (bh != head || !block_start);
786de9a55b8STheodore Ts'o 	     block_start = block_end, bh = next) {
787ac27a0ecSDave Kleikamp 		next = bh->b_this_page;
788ac27a0ecSDave Kleikamp 		block_end = block_start + blocksize;
789ac27a0ecSDave Kleikamp 		if (block_end <= from || block_start >= to) {
790ac27a0ecSDave Kleikamp 			if (partial && !buffer_uptodate(bh))
791ac27a0ecSDave Kleikamp 				*partial = 1;
792ac27a0ecSDave Kleikamp 			continue;
793ac27a0ecSDave Kleikamp 		}
794ac27a0ecSDave Kleikamp 		err = (*fn)(handle, bh);
795ac27a0ecSDave Kleikamp 		if (!ret)
796ac27a0ecSDave Kleikamp 			ret = err;
797ac27a0ecSDave Kleikamp 	}
798ac27a0ecSDave Kleikamp 	return ret;
799ac27a0ecSDave Kleikamp }
800ac27a0ecSDave Kleikamp 
801ac27a0ecSDave Kleikamp /*
802ac27a0ecSDave Kleikamp  * To preserve ordering, it is essential that the hole instantiation and
803ac27a0ecSDave Kleikamp  * the data write be encapsulated in a single transaction.  We cannot
804617ba13bSMingming Cao  * close off a transaction and start a new one between the ext4_get_block()
805dab291afSMingming Cao  * and the commit_write().  So doing the jbd2_journal_start at the start of
806ac27a0ecSDave Kleikamp  * prepare_write() is the right place.
807ac27a0ecSDave Kleikamp  *
808617ba13bSMingming Cao  * Also, this function can nest inside ext4_writepage() ->
809617ba13bSMingming Cao  * block_write_full_page(). In that case, we *know* that ext4_writepage()
810ac27a0ecSDave Kleikamp  * has generated enough buffer credits to do the whole page.  So we won't
811ac27a0ecSDave Kleikamp  * block on the journal in that case, which is good, because the caller may
812ac27a0ecSDave Kleikamp  * be PF_MEMALLOC.
813ac27a0ecSDave Kleikamp  *
814617ba13bSMingming Cao  * By accident, ext4 can be reentered when a transaction is open via
815ac27a0ecSDave Kleikamp  * quota file writes.  If we were to commit the transaction while thus
816ac27a0ecSDave Kleikamp  * reentered, there can be a deadlock - we would be holding a quota
817ac27a0ecSDave Kleikamp  * lock, and the commit would never complete if another thread had a
818ac27a0ecSDave Kleikamp  * transaction open and was blocking on the quota lock - a ranking
819ac27a0ecSDave Kleikamp  * violation.
820ac27a0ecSDave Kleikamp  *
821dab291afSMingming Cao  * So what we do is to rely on the fact that jbd2_journal_stop/journal_start
822ac27a0ecSDave Kleikamp  * will _not_ run commit under these circumstances because handle->h_ref
823ac27a0ecSDave Kleikamp  * is elevated.  We'll still have enough credits for the tiny quotafile
824ac27a0ecSDave Kleikamp  * write.
825ac27a0ecSDave Kleikamp  */
826ac27a0ecSDave Kleikamp static int do_journal_get_write_access(handle_t *handle,
827ac27a0ecSDave Kleikamp 				       struct buffer_head *bh)
828ac27a0ecSDave Kleikamp {
82956d35a4cSJan Kara 	int dirty = buffer_dirty(bh);
83056d35a4cSJan Kara 	int ret;
83156d35a4cSJan Kara 
832ac27a0ecSDave Kleikamp 	if (!buffer_mapped(bh) || buffer_freed(bh))
833ac27a0ecSDave Kleikamp 		return 0;
83456d35a4cSJan Kara 	/*
835ebdec241SChristoph Hellwig 	 * __block_write_begin() could have dirtied some buffers. Clean
83656d35a4cSJan Kara 	 * the dirty bit as jbd2_journal_get_write_access() could complain
83756d35a4cSJan Kara 	 * otherwise about fs integrity issues. Setting of the dirty bit
838ebdec241SChristoph Hellwig 	 * by __block_write_begin() isn't a real problem here as we clear
83956d35a4cSJan Kara 	 * the bit before releasing a page lock and thus writeback cannot
84056d35a4cSJan Kara 	 * ever write the buffer.
84156d35a4cSJan Kara 	 */
84256d35a4cSJan Kara 	if (dirty)
84356d35a4cSJan Kara 		clear_buffer_dirty(bh);
84456d35a4cSJan Kara 	ret = ext4_journal_get_write_access(handle, bh);
84556d35a4cSJan Kara 	if (!ret && dirty)
84656d35a4cSJan Kara 		ret = ext4_handle_dirty_metadata(handle, NULL, bh);
84756d35a4cSJan Kara 	return ret;
848ac27a0ecSDave Kleikamp }
849ac27a0ecSDave Kleikamp 
850744692dcSJiaying Zhang static int ext4_get_block_write(struct inode *inode, sector_t iblock,
851744692dcSJiaying Zhang 		   struct buffer_head *bh_result, int create);
8528b0f165fSAnatol Pomozov static int ext4_get_block_write_nolock(struct inode *inode, sector_t iblock,
8538b0f165fSAnatol Pomozov 		   struct buffer_head *bh_result, int create);
854bfc1af65SNick Piggin static int ext4_write_begin(struct file *file, struct address_space *mapping,
855bfc1af65SNick Piggin 			    loff_t pos, unsigned len, unsigned flags,
856bfc1af65SNick Piggin 			    struct page **pagep, void **fsdata)
857ac27a0ecSDave Kleikamp {
858bfc1af65SNick Piggin 	struct inode *inode = mapping->host;
8591938a150SAneesh Kumar K.V 	int ret, needed_blocks;
860ac27a0ecSDave Kleikamp 	handle_t *handle;
861ac27a0ecSDave Kleikamp 	int retries = 0;
862bfc1af65SNick Piggin 	struct page *page;
863bfc1af65SNick Piggin 	pgoff_t index;
864bfc1af65SNick Piggin 	unsigned from, to;
865bfc1af65SNick Piggin 
8669bffad1eSTheodore Ts'o 	trace_ext4_write_begin(inode, pos, len, flags);
8671938a150SAneesh Kumar K.V 	/*
8681938a150SAneesh Kumar K.V 	 * Reserve one block more for addition to orphan list in case
8691938a150SAneesh Kumar K.V 	 * we allocate blocks but write fails for some reason
8701938a150SAneesh Kumar K.V 	 */
8711938a150SAneesh Kumar K.V 	needed_blocks = ext4_writepage_trans_blocks(inode) + 1;
872bfc1af65SNick Piggin 	index = pos >> PAGE_CACHE_SHIFT;
873bfc1af65SNick Piggin 	from = pos & (PAGE_CACHE_SIZE - 1);
874bfc1af65SNick Piggin 	to = from + len;
875ac27a0ecSDave Kleikamp 
876ac27a0ecSDave Kleikamp retry:
877617ba13bSMingming Cao 	handle = ext4_journal_start(inode, needed_blocks);
8787479d2b9SAndrew Morton 	if (IS_ERR(handle)) {
8797479d2b9SAndrew Morton 		ret = PTR_ERR(handle);
8807479d2b9SAndrew Morton 		goto out;
8817479d2b9SAndrew Morton 	}
882ac27a0ecSDave Kleikamp 
883ebd3610bSJan Kara 	/* We cannot recurse into the filesystem as the transaction is already
884ebd3610bSJan Kara 	 * started */
885ebd3610bSJan Kara 	flags |= AOP_FLAG_NOFS;
886ebd3610bSJan Kara 
88754566b2cSNick Piggin 	page = grab_cache_page_write_begin(mapping, index, flags);
888cf108bcaSJan Kara 	if (!page) {
889cf108bcaSJan Kara 		ext4_journal_stop(handle);
890cf108bcaSJan Kara 		ret = -ENOMEM;
891cf108bcaSJan Kara 		goto out;
892cf108bcaSJan Kara 	}
893cf108bcaSJan Kara 	*pagep = page;
894cf108bcaSJan Kara 
895744692dcSJiaying Zhang 	if (ext4_should_dioread_nolock(inode))
8966e1db88dSChristoph Hellwig 		ret = __block_write_begin(page, pos, len, ext4_get_block_write);
897744692dcSJiaying Zhang 	else
8986e1db88dSChristoph Hellwig 		ret = __block_write_begin(page, pos, len, ext4_get_block);
899bfc1af65SNick Piggin 
900bfc1af65SNick Piggin 	if (!ret && ext4_should_journal_data(inode)) {
901ac27a0ecSDave Kleikamp 		ret = walk_page_buffers(handle, page_buffers(page),
902ac27a0ecSDave Kleikamp 				from, to, NULL, do_journal_get_write_access);
903b46be050SAndrey Savochkin 	}
904bfc1af65SNick Piggin 
905bfc1af65SNick Piggin 	if (ret) {
906bfc1af65SNick Piggin 		unlock_page(page);
907bfc1af65SNick Piggin 		page_cache_release(page);
908ae4d5372SAneesh Kumar K.V 		/*
9096e1db88dSChristoph Hellwig 		 * __block_write_begin may have instantiated a few blocks
910ae4d5372SAneesh Kumar K.V 		 * outside i_size.  Trim these off again. Don't need
911ae4d5372SAneesh Kumar K.V 		 * i_size_read because we hold i_mutex.
9121938a150SAneesh Kumar K.V 		 *
9131938a150SAneesh Kumar K.V 		 * Add inode to orphan list in case we crash before
9141938a150SAneesh Kumar K.V 		 * truncate finishes
915ae4d5372SAneesh Kumar K.V 		 */
916ffacfa7aSJan Kara 		if (pos + len > inode->i_size && ext4_can_truncate(inode))
9171938a150SAneesh Kumar K.V 			ext4_orphan_add(handle, inode);
9181938a150SAneesh Kumar K.V 
9191938a150SAneesh Kumar K.V 		ext4_journal_stop(handle);
9201938a150SAneesh Kumar K.V 		if (pos + len > inode->i_size) {
921b9a4207dSJan Kara 			ext4_truncate_failed_write(inode);
9221938a150SAneesh Kumar K.V 			/*
923ffacfa7aSJan Kara 			 * If truncate failed early the inode might
9241938a150SAneesh Kumar K.V 			 * still be on the orphan list; we need to
9251938a150SAneesh Kumar K.V 			 * make sure the inode is removed from the
9261938a150SAneesh Kumar K.V 			 * orphan list in that case.
9271938a150SAneesh Kumar K.V 			 */
9281938a150SAneesh Kumar K.V 			if (inode->i_nlink)
9291938a150SAneesh Kumar K.V 				ext4_orphan_del(NULL, inode);
9301938a150SAneesh Kumar K.V 		}
931bfc1af65SNick Piggin 	}
932bfc1af65SNick Piggin 
933617ba13bSMingming Cao 	if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
934ac27a0ecSDave Kleikamp 		goto retry;
9357479d2b9SAndrew Morton out:
936ac27a0ecSDave Kleikamp 	return ret;
937ac27a0ecSDave Kleikamp }
938ac27a0ecSDave Kleikamp 
939bfc1af65SNick Piggin /* For write_end() in data=journal mode */
940bfc1af65SNick Piggin static int write_end_fn(handle_t *handle, struct buffer_head *bh)
941ac27a0ecSDave Kleikamp {
942ac27a0ecSDave Kleikamp 	if (!buffer_mapped(bh) || buffer_freed(bh))
943ac27a0ecSDave Kleikamp 		return 0;
944ac27a0ecSDave Kleikamp 	set_buffer_uptodate(bh);
9450390131bSFrank Mayhar 	return ext4_handle_dirty_metadata(handle, NULL, bh);
946ac27a0ecSDave Kleikamp }
947ac27a0ecSDave Kleikamp 
948f8514083SAneesh Kumar K.V static int ext4_generic_write_end(struct file *file,
949f8514083SAneesh Kumar K.V 				  struct address_space *mapping,
950f8514083SAneesh Kumar K.V 				  loff_t pos, unsigned len, unsigned copied,
951f8514083SAneesh Kumar K.V 				  struct page *page, void *fsdata)
952f8514083SAneesh Kumar K.V {
953f8514083SAneesh Kumar K.V 	int i_size_changed = 0;
954f8514083SAneesh Kumar K.V 	struct inode *inode = mapping->host;
955f8514083SAneesh Kumar K.V 	handle_t *handle = ext4_journal_current_handle();
956f8514083SAneesh Kumar K.V 
957f8514083SAneesh Kumar K.V 	copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
958f8514083SAneesh Kumar K.V 
959f8514083SAneesh Kumar K.V 	/*
960f8514083SAneesh Kumar K.V 	 * No need to use i_size_read() here, the i_size
961f8514083SAneesh Kumar K.V 	 * cannot change under us because we hold i_mutex.
962f8514083SAneesh Kumar K.V 	 *
963f8514083SAneesh Kumar K.V 	 * But it's important to update i_size while still holding page lock:
964f8514083SAneesh Kumar K.V 	 * page writeout could otherwise come in and zero beyond i_size.
965f8514083SAneesh Kumar K.V 	 */
966f8514083SAneesh Kumar K.V 	if (pos + copied > inode->i_size) {
967f8514083SAneesh Kumar K.V 		i_size_write(inode, pos + copied);
968f8514083SAneesh Kumar K.V 		i_size_changed = 1;
969f8514083SAneesh Kumar K.V 	}
970f8514083SAneesh Kumar K.V 
971f8514083SAneesh Kumar K.V 	if (pos + copied >  EXT4_I(inode)->i_disksize) {
972f8514083SAneesh Kumar K.V 		/* We need to mark inode dirty even if
973f8514083SAneesh Kumar K.V 		 * new_i_size is less that inode->i_size
974f8514083SAneesh Kumar K.V 		 * bu greater than i_disksize.(hint delalloc)
975f8514083SAneesh Kumar K.V 		 */
976f8514083SAneesh Kumar K.V 		ext4_update_i_disksize(inode, (pos + copied));
977f8514083SAneesh Kumar K.V 		i_size_changed = 1;
978f8514083SAneesh Kumar K.V 	}
979f8514083SAneesh Kumar K.V 	unlock_page(page);
980f8514083SAneesh Kumar K.V 	page_cache_release(page);
981f8514083SAneesh Kumar K.V 
982f8514083SAneesh Kumar K.V 	/*
983f8514083SAneesh Kumar K.V 	 * Don't mark the inode dirty under page lock. First, it unnecessarily
984f8514083SAneesh Kumar K.V 	 * makes the holding time of page lock longer. Second, it forces lock
985f8514083SAneesh Kumar K.V 	 * ordering of page lock and transaction start for journaling
986f8514083SAneesh Kumar K.V 	 * filesystems.
987f8514083SAneesh Kumar K.V 	 */
988f8514083SAneesh Kumar K.V 	if (i_size_changed)
989f8514083SAneesh Kumar K.V 		ext4_mark_inode_dirty(handle, inode);
990f8514083SAneesh Kumar K.V 
991f8514083SAneesh Kumar K.V 	return copied;
992f8514083SAneesh Kumar K.V }
993f8514083SAneesh Kumar K.V 
994ac27a0ecSDave Kleikamp /*
995ac27a0ecSDave Kleikamp  * We need to pick up the new inode size which generic_commit_write gave us
996ac27a0ecSDave Kleikamp  * `file' can be NULL - eg, when called from page_symlink().
997ac27a0ecSDave Kleikamp  *
998617ba13bSMingming Cao  * ext4 never places buffers on inode->i_mapping->private_list.  metadata
999ac27a0ecSDave Kleikamp  * buffers are managed internally.
1000ac27a0ecSDave Kleikamp  */
1001bfc1af65SNick Piggin static int ext4_ordered_write_end(struct file *file,
1002bfc1af65SNick Piggin 				  struct address_space *mapping,
1003bfc1af65SNick Piggin 				  loff_t pos, unsigned len, unsigned copied,
1004bfc1af65SNick Piggin 				  struct page *page, void *fsdata)
1005ac27a0ecSDave Kleikamp {
1006617ba13bSMingming Cao 	handle_t *handle = ext4_journal_current_handle();
1007cf108bcaSJan Kara 	struct inode *inode = mapping->host;
1008ac27a0ecSDave Kleikamp 	int ret = 0, ret2;
1009ac27a0ecSDave Kleikamp 
10109bffad1eSTheodore Ts'o 	trace_ext4_ordered_write_end(inode, pos, len, copied);
1011678aaf48SJan Kara 	ret = ext4_jbd2_file_inode(handle, inode);
1012ac27a0ecSDave Kleikamp 
1013ac27a0ecSDave Kleikamp 	if (ret == 0) {
1014f8514083SAneesh Kumar K.V 		ret2 = ext4_generic_write_end(file, mapping, pos, len, copied,
1015bfc1af65SNick Piggin 							page, fsdata);
1016f8a87d89SRoel Kluin 		copied = ret2;
1017ffacfa7aSJan Kara 		if (pos + len > inode->i_size && ext4_can_truncate(inode))
1018f8514083SAneesh Kumar K.V 			/* if we have allocated more blocks and copied
1019f8514083SAneesh Kumar K.V 			 * less. We will have blocks allocated outside
1020f8514083SAneesh Kumar K.V 			 * inode->i_size. So truncate them
1021f8514083SAneesh Kumar K.V 			 */
1022f8514083SAneesh Kumar K.V 			ext4_orphan_add(handle, inode);
1023f8a87d89SRoel Kluin 		if (ret2 < 0)
1024f8a87d89SRoel Kluin 			ret = ret2;
102509e0834fSAkira Fujita 	} else {
102609e0834fSAkira Fujita 		unlock_page(page);
102709e0834fSAkira Fujita 		page_cache_release(page);
1028ac27a0ecSDave Kleikamp 	}
102909e0834fSAkira Fujita 
1030617ba13bSMingming Cao 	ret2 = ext4_journal_stop(handle);
1031ac27a0ecSDave Kleikamp 	if (!ret)
1032ac27a0ecSDave Kleikamp 		ret = ret2;
1033bfc1af65SNick Piggin 
1034f8514083SAneesh Kumar K.V 	if (pos + len > inode->i_size) {
1035b9a4207dSJan Kara 		ext4_truncate_failed_write(inode);
1036f8514083SAneesh Kumar K.V 		/*
1037ffacfa7aSJan Kara 		 * If truncate failed early the inode might still be
1038f8514083SAneesh Kumar K.V 		 * on the orphan list; we need to make sure the inode
1039f8514083SAneesh Kumar K.V 		 * is removed from the orphan list in that case.
1040f8514083SAneesh Kumar K.V 		 */
1041f8514083SAneesh Kumar K.V 		if (inode->i_nlink)
1042f8514083SAneesh Kumar K.V 			ext4_orphan_del(NULL, inode);
1043f8514083SAneesh Kumar K.V 	}
1044f8514083SAneesh Kumar K.V 
1045f8514083SAneesh Kumar K.V 
1046bfc1af65SNick Piggin 	return ret ? ret : copied;
1047ac27a0ecSDave Kleikamp }
1048ac27a0ecSDave Kleikamp 
1049bfc1af65SNick Piggin static int ext4_writeback_write_end(struct file *file,
1050bfc1af65SNick Piggin 				    struct address_space *mapping,
1051bfc1af65SNick Piggin 				    loff_t pos, unsigned len, unsigned copied,
1052bfc1af65SNick Piggin 				    struct page *page, void *fsdata)
1053ac27a0ecSDave Kleikamp {
1054617ba13bSMingming Cao 	handle_t *handle = ext4_journal_current_handle();
1055cf108bcaSJan Kara 	struct inode *inode = mapping->host;
1056ac27a0ecSDave Kleikamp 	int ret = 0, ret2;
1057ac27a0ecSDave Kleikamp 
10589bffad1eSTheodore Ts'o 	trace_ext4_writeback_write_end(inode, pos, len, copied);
1059f8514083SAneesh Kumar K.V 	ret2 = ext4_generic_write_end(file, mapping, pos, len, copied,
1060bfc1af65SNick Piggin 							page, fsdata);
1061f8a87d89SRoel Kluin 	copied = ret2;
1062ffacfa7aSJan Kara 	if (pos + len > inode->i_size && ext4_can_truncate(inode))
1063f8514083SAneesh Kumar K.V 		/* if we have allocated more blocks and copied
1064f8514083SAneesh Kumar K.V 		 * less. We will have blocks allocated outside
1065f8514083SAneesh Kumar K.V 		 * inode->i_size. So truncate them
1066f8514083SAneesh Kumar K.V 		 */
1067f8514083SAneesh Kumar K.V 		ext4_orphan_add(handle, inode);
1068f8514083SAneesh Kumar K.V 
1069f8a87d89SRoel Kluin 	if (ret2 < 0)
1070f8a87d89SRoel Kluin 		ret = ret2;
1071ac27a0ecSDave Kleikamp 
1072617ba13bSMingming Cao 	ret2 = ext4_journal_stop(handle);
1073ac27a0ecSDave Kleikamp 	if (!ret)
1074ac27a0ecSDave Kleikamp 		ret = ret2;
1075bfc1af65SNick Piggin 
1076f8514083SAneesh Kumar K.V 	if (pos + len > inode->i_size) {
1077b9a4207dSJan Kara 		ext4_truncate_failed_write(inode);
1078f8514083SAneesh Kumar K.V 		/*
1079ffacfa7aSJan Kara 		 * If truncate failed early the inode might still be
1080f8514083SAneesh Kumar K.V 		 * on the orphan list; we need to make sure the inode
1081f8514083SAneesh Kumar K.V 		 * is removed from the orphan list in that case.
1082f8514083SAneesh Kumar K.V 		 */
1083f8514083SAneesh Kumar K.V 		if (inode->i_nlink)
1084f8514083SAneesh Kumar K.V 			ext4_orphan_del(NULL, inode);
1085f8514083SAneesh Kumar K.V 	}
1086f8514083SAneesh Kumar K.V 
1087bfc1af65SNick Piggin 	return ret ? ret : copied;
1088ac27a0ecSDave Kleikamp }
1089ac27a0ecSDave Kleikamp 
1090bfc1af65SNick Piggin static int ext4_journalled_write_end(struct file *file,
1091bfc1af65SNick Piggin 				     struct address_space *mapping,
1092bfc1af65SNick Piggin 				     loff_t pos, unsigned len, unsigned copied,
1093bfc1af65SNick Piggin 				     struct page *page, void *fsdata)
1094ac27a0ecSDave Kleikamp {
1095617ba13bSMingming Cao 	handle_t *handle = ext4_journal_current_handle();
1096bfc1af65SNick Piggin 	struct inode *inode = mapping->host;
1097ac27a0ecSDave Kleikamp 	int ret = 0, ret2;
1098ac27a0ecSDave Kleikamp 	int partial = 0;
1099bfc1af65SNick Piggin 	unsigned from, to;
1100cf17fea6SAneesh Kumar K.V 	loff_t new_i_size;
1101ac27a0ecSDave Kleikamp 
11029bffad1eSTheodore Ts'o 	trace_ext4_journalled_write_end(inode, pos, len, copied);
1103bfc1af65SNick Piggin 	from = pos & (PAGE_CACHE_SIZE - 1);
1104bfc1af65SNick Piggin 	to = from + len;
1105bfc1af65SNick Piggin 
1106441c8508SCurt Wohlgemuth 	BUG_ON(!ext4_handle_valid(handle));
1107441c8508SCurt Wohlgemuth 
1108bfc1af65SNick Piggin 	if (copied < len) {
1109bfc1af65SNick Piggin 		if (!PageUptodate(page))
1110bfc1af65SNick Piggin 			copied = 0;
1111bfc1af65SNick Piggin 		page_zero_new_buffers(page, from+copied, to);
1112bfc1af65SNick Piggin 	}
1113ac27a0ecSDave Kleikamp 
1114ac27a0ecSDave Kleikamp 	ret = walk_page_buffers(handle, page_buffers(page), from,
1115bfc1af65SNick Piggin 				to, &partial, write_end_fn);
1116ac27a0ecSDave Kleikamp 	if (!partial)
1117ac27a0ecSDave Kleikamp 		SetPageUptodate(page);
1118cf17fea6SAneesh Kumar K.V 	new_i_size = pos + copied;
1119cf17fea6SAneesh Kumar K.V 	if (new_i_size > inode->i_size)
1120bfc1af65SNick Piggin 		i_size_write(inode, pos+copied);
112119f5fb7aSTheodore Ts'o 	ext4_set_inode_state(inode, EXT4_STATE_JDATA);
11222d859db3SJan Kara 	EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid;
1123cf17fea6SAneesh Kumar K.V 	if (new_i_size > EXT4_I(inode)->i_disksize) {
1124cf17fea6SAneesh Kumar K.V 		ext4_update_i_disksize(inode, new_i_size);
1125617ba13bSMingming Cao 		ret2 = ext4_mark_inode_dirty(handle, inode);
1126ac27a0ecSDave Kleikamp 		if (!ret)
1127ac27a0ecSDave Kleikamp 			ret = ret2;
1128ac27a0ecSDave Kleikamp 	}
1129bfc1af65SNick Piggin 
1130cf108bcaSJan Kara 	unlock_page(page);
1131f8514083SAneesh Kumar K.V 	page_cache_release(page);
1132ffacfa7aSJan Kara 	if (pos + len > inode->i_size && ext4_can_truncate(inode))
1133f8514083SAneesh Kumar K.V 		/* if we have allocated more blocks and copied
1134f8514083SAneesh Kumar K.V 		 * less. We will have blocks allocated outside
1135f8514083SAneesh Kumar K.V 		 * inode->i_size. So truncate them
1136f8514083SAneesh Kumar K.V 		 */
1137f8514083SAneesh Kumar K.V 		ext4_orphan_add(handle, inode);
1138f8514083SAneesh Kumar K.V 
1139617ba13bSMingming Cao 	ret2 = ext4_journal_stop(handle);
1140ac27a0ecSDave Kleikamp 	if (!ret)
1141ac27a0ecSDave Kleikamp 		ret = ret2;
1142f8514083SAneesh Kumar K.V 	if (pos + len > inode->i_size) {
1143b9a4207dSJan Kara 		ext4_truncate_failed_write(inode);
1144f8514083SAneesh Kumar K.V 		/*
1145ffacfa7aSJan Kara 		 * If truncate failed early the inode might still be
1146f8514083SAneesh Kumar K.V 		 * on the orphan list; we need to make sure the inode
1147f8514083SAneesh Kumar K.V 		 * is removed from the orphan list in that case.
1148f8514083SAneesh Kumar K.V 		 */
1149f8514083SAneesh Kumar K.V 		if (inode->i_nlink)
1150f8514083SAneesh Kumar K.V 			ext4_orphan_del(NULL, inode);
1151f8514083SAneesh Kumar K.V 	}
1152bfc1af65SNick Piggin 
1153bfc1af65SNick Piggin 	return ret ? ret : copied;
1154ac27a0ecSDave Kleikamp }
1155d2a17637SMingming Cao 
11569d0be502STheodore Ts'o /*
11577b415bf6SAditya Kali  * Reserve a single cluster located at lblock
11589d0be502STheodore Ts'o  */
115901f49d0bSTheodore Ts'o static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock)
1160d2a17637SMingming Cao {
1161030ba6bcSAneesh Kumar K.V 	int retries = 0;
1162d2a17637SMingming Cao 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
11630637c6f4STheodore Ts'o 	struct ext4_inode_info *ei = EXT4_I(inode);
11647b415bf6SAditya Kali 	unsigned int md_needed;
11655dd4056dSChristoph Hellwig 	int ret;
116603179fe9STheodore Ts'o 	ext4_lblk_t save_last_lblock;
116703179fe9STheodore Ts'o 	int save_len;
1168d2a17637SMingming Cao 
116960e58e0fSMingming Cao 	/*
117072b8ab9dSEric Sandeen 	 * We will charge metadata quota at writeout time; this saves
117172b8ab9dSEric Sandeen 	 * us from metadata over-estimation, though we may go over by
117272b8ab9dSEric Sandeen 	 * a small amount in the end.  Here we just reserve for data.
117360e58e0fSMingming Cao 	 */
11747b415bf6SAditya Kali 	ret = dquot_reserve_block(inode, EXT4_C2B(sbi, 1));
11755dd4056dSChristoph Hellwig 	if (ret)
11765dd4056dSChristoph Hellwig 		return ret;
117703179fe9STheodore Ts'o 
117803179fe9STheodore Ts'o 	/*
117903179fe9STheodore Ts'o 	 * recalculate the amount of metadata blocks to reserve
118003179fe9STheodore Ts'o 	 * in order to allocate nrblocks
118103179fe9STheodore Ts'o 	 * worse case is one extent per block
118203179fe9STheodore Ts'o 	 */
118303179fe9STheodore Ts'o repeat:
118403179fe9STheodore Ts'o 	spin_lock(&ei->i_block_reservation_lock);
118503179fe9STheodore Ts'o 	/*
118603179fe9STheodore Ts'o 	 * ext4_calc_metadata_amount() has side effects, which we have
118703179fe9STheodore Ts'o 	 * to be prepared undo if we fail to claim space.
118803179fe9STheodore Ts'o 	 */
118903179fe9STheodore Ts'o 	save_len = ei->i_da_metadata_calc_len;
119003179fe9STheodore Ts'o 	save_last_lblock = ei->i_da_metadata_calc_last_lblock;
119103179fe9STheodore Ts'o 	md_needed = EXT4_NUM_B2C(sbi,
119203179fe9STheodore Ts'o 				 ext4_calc_metadata_amount(inode, lblock));
119303179fe9STheodore Ts'o 	trace_ext4_da_reserve_space(inode, md_needed);
119403179fe9STheodore Ts'o 
119572b8ab9dSEric Sandeen 	/*
119672b8ab9dSEric Sandeen 	 * We do still charge estimated metadata to the sb though;
119772b8ab9dSEric Sandeen 	 * we cannot afford to run out of free blocks.
119872b8ab9dSEric Sandeen 	 */
1199e7d5f315STheodore Ts'o 	if (ext4_claim_free_clusters(sbi, md_needed + 1, 0)) {
120003179fe9STheodore Ts'o 		ei->i_da_metadata_calc_len = save_len;
120103179fe9STheodore Ts'o 		ei->i_da_metadata_calc_last_lblock = save_last_lblock;
120203179fe9STheodore Ts'o 		spin_unlock(&ei->i_block_reservation_lock);
1203030ba6bcSAneesh Kumar K.V 		if (ext4_should_retry_alloc(inode->i_sb, &retries)) {
1204030ba6bcSAneesh Kumar K.V 			yield();
1205030ba6bcSAneesh Kumar K.V 			goto repeat;
1206030ba6bcSAneesh Kumar K.V 		}
120703179fe9STheodore Ts'o 		dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1));
1208d2a17637SMingming Cao 		return -ENOSPC;
1209d2a17637SMingming Cao 	}
12109d0be502STheodore Ts'o 	ei->i_reserved_data_blocks++;
12110637c6f4STheodore Ts'o 	ei->i_reserved_meta_blocks += md_needed;
12120637c6f4STheodore Ts'o 	spin_unlock(&ei->i_block_reservation_lock);
121339bc680aSDmitry Monakhov 
1214d2a17637SMingming Cao 	return 0;       /* success */
1215d2a17637SMingming Cao }
1216d2a17637SMingming Cao 
121712219aeaSAneesh Kumar K.V static void ext4_da_release_space(struct inode *inode, int to_free)
1218d2a17637SMingming Cao {
1219d2a17637SMingming Cao 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
12200637c6f4STheodore Ts'o 	struct ext4_inode_info *ei = EXT4_I(inode);
1221d2a17637SMingming Cao 
1222cd213226SMingming Cao 	if (!to_free)
1223cd213226SMingming Cao 		return;		/* Nothing to release, exit */
1224cd213226SMingming Cao 
1225d2a17637SMingming Cao 	spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
1226cd213226SMingming Cao 
12275a58ec87SLi Zefan 	trace_ext4_da_release_space(inode, to_free);
12280637c6f4STheodore Ts'o 	if (unlikely(to_free > ei->i_reserved_data_blocks)) {
1229cd213226SMingming Cao 		/*
12300637c6f4STheodore Ts'o 		 * if there aren't enough reserved blocks, then the
12310637c6f4STheodore Ts'o 		 * counter is messed up somewhere.  Since this
12320637c6f4STheodore Ts'o 		 * function is called from invalidate page, it's
12330637c6f4STheodore Ts'o 		 * harmless to return without any action.
1234cd213226SMingming Cao 		 */
12350637c6f4STheodore Ts'o 		ext4_msg(inode->i_sb, KERN_NOTICE, "ext4_da_release_space: "
12360637c6f4STheodore Ts'o 			 "ino %lu, to_free %d with only %d reserved "
12371084f252STheodore Ts'o 			 "data blocks", inode->i_ino, to_free,
12380637c6f4STheodore Ts'o 			 ei->i_reserved_data_blocks);
12390637c6f4STheodore Ts'o 		WARN_ON(1);
12400637c6f4STheodore Ts'o 		to_free = ei->i_reserved_data_blocks;
12410637c6f4STheodore Ts'o 	}
12420637c6f4STheodore Ts'o 	ei->i_reserved_data_blocks -= to_free;
12430637c6f4STheodore Ts'o 
12440637c6f4STheodore Ts'o 	if (ei->i_reserved_data_blocks == 0) {
12450637c6f4STheodore Ts'o 		/*
12460637c6f4STheodore Ts'o 		 * We can release all of the reserved metadata blocks
12470637c6f4STheodore Ts'o 		 * only when we have written all of the delayed
12480637c6f4STheodore Ts'o 		 * allocation blocks.
12497b415bf6SAditya Kali 		 * Note that in case of bigalloc, i_reserved_meta_blocks,
12507b415bf6SAditya Kali 		 * i_reserved_data_blocks, etc. refer to number of clusters.
12510637c6f4STheodore Ts'o 		 */
125257042651STheodore Ts'o 		percpu_counter_sub(&sbi->s_dirtyclusters_counter,
125372b8ab9dSEric Sandeen 				   ei->i_reserved_meta_blocks);
1254ee5f4d9cSTheodore Ts'o 		ei->i_reserved_meta_blocks = 0;
12559d0be502STheodore Ts'o 		ei->i_da_metadata_calc_len = 0;
1256cd213226SMingming Cao 	}
1257cd213226SMingming Cao 
125872b8ab9dSEric Sandeen 	/* update fs dirty data blocks counter */
125957042651STheodore Ts'o 	percpu_counter_sub(&sbi->s_dirtyclusters_counter, to_free);
1260d2a17637SMingming Cao 
1261d2a17637SMingming Cao 	spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
126260e58e0fSMingming Cao 
12637b415bf6SAditya Kali 	dquot_release_reservation_block(inode, EXT4_C2B(sbi, to_free));
1264d2a17637SMingming Cao }
1265d2a17637SMingming Cao 
1266d2a17637SMingming Cao static void ext4_da_page_release_reservation(struct page *page,
1267d2a17637SMingming Cao 					     unsigned long offset)
1268d2a17637SMingming Cao {
1269d2a17637SMingming Cao 	int to_release = 0;
1270d2a17637SMingming Cao 	struct buffer_head *head, *bh;
1271d2a17637SMingming Cao 	unsigned int curr_off = 0;
12727b415bf6SAditya Kali 	struct inode *inode = page->mapping->host;
12737b415bf6SAditya Kali 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
12747b415bf6SAditya Kali 	int num_clusters;
127551865fdaSZheng Liu 	ext4_fsblk_t lblk;
1276d2a17637SMingming Cao 
1277d2a17637SMingming Cao 	head = page_buffers(page);
1278d2a17637SMingming Cao 	bh = head;
1279d2a17637SMingming Cao 	do {
1280d2a17637SMingming Cao 		unsigned int next_off = curr_off + bh->b_size;
1281d2a17637SMingming Cao 
1282d2a17637SMingming Cao 		if ((offset <= curr_off) && (buffer_delay(bh))) {
1283d2a17637SMingming Cao 			to_release++;
1284d2a17637SMingming Cao 			clear_buffer_delay(bh);
1285d2a17637SMingming Cao 		}
1286d2a17637SMingming Cao 		curr_off = next_off;
1287d2a17637SMingming Cao 	} while ((bh = bh->b_this_page) != head);
12887b415bf6SAditya Kali 
128951865fdaSZheng Liu 	if (to_release) {
129051865fdaSZheng Liu 		lblk = page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
129151865fdaSZheng Liu 		ext4_es_remove_extent(inode, lblk, to_release);
129251865fdaSZheng Liu 	}
129351865fdaSZheng Liu 
12947b415bf6SAditya Kali 	/* If we have released all the blocks belonging to a cluster, then we
12957b415bf6SAditya Kali 	 * need to release the reserved space for that cluster. */
12967b415bf6SAditya Kali 	num_clusters = EXT4_NUM_B2C(sbi, to_release);
12977b415bf6SAditya Kali 	while (num_clusters > 0) {
12987b415bf6SAditya Kali 		lblk = (page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits)) +
12997b415bf6SAditya Kali 			((num_clusters - 1) << sbi->s_cluster_bits);
13007b415bf6SAditya Kali 		if (sbi->s_cluster_ratio == 1 ||
13017d1b1fbcSZheng Liu 		    !ext4_find_delalloc_cluster(inode, lblk))
13027b415bf6SAditya Kali 			ext4_da_release_space(inode, 1);
13037b415bf6SAditya Kali 
13047b415bf6SAditya Kali 		num_clusters--;
13057b415bf6SAditya Kali 	}
1306d2a17637SMingming Cao }
1307ac27a0ecSDave Kleikamp 
1308ac27a0ecSDave Kleikamp /*
130964769240SAlex Tomas  * Delayed allocation stuff
131064769240SAlex Tomas  */
131164769240SAlex Tomas 
131264769240SAlex Tomas /*
131364769240SAlex Tomas  * mpage_da_submit_io - walks through extent of pages and try to write
1314a1d6cc56SAneesh Kumar K.V  * them with writepage() call back
131564769240SAlex Tomas  *
131664769240SAlex Tomas  * @mpd->inode: inode
131764769240SAlex Tomas  * @mpd->first_page: first page of the extent
131864769240SAlex Tomas  * @mpd->next_page: page after the last page of the extent
131964769240SAlex Tomas  *
132064769240SAlex Tomas  * By the time mpage_da_submit_io() is called we expect all blocks
132164769240SAlex Tomas  * to be allocated. this may be wrong if allocation failed.
132264769240SAlex Tomas  *
132364769240SAlex Tomas  * As pages are already locked by write_cache_pages(), we can't use it
132464769240SAlex Tomas  */
13251de3e3dfSTheodore Ts'o static int mpage_da_submit_io(struct mpage_da_data *mpd,
13261de3e3dfSTheodore Ts'o 			      struct ext4_map_blocks *map)
132764769240SAlex Tomas {
1328791b7f08SAneesh Kumar K.V 	struct pagevec pvec;
1329791b7f08SAneesh Kumar K.V 	unsigned long index, end;
1330791b7f08SAneesh Kumar K.V 	int ret = 0, err, nr_pages, i;
1331791b7f08SAneesh Kumar K.V 	struct inode *inode = mpd->inode;
1332791b7f08SAneesh Kumar K.V 	struct address_space *mapping = inode->i_mapping;
1333cb20d518STheodore Ts'o 	loff_t size = i_size_read(inode);
13343ecdb3a1STheodore Ts'o 	unsigned int len, block_start;
13353ecdb3a1STheodore Ts'o 	struct buffer_head *bh, *page_bufs = NULL;
1336cb20d518STheodore Ts'o 	int journal_data = ext4_should_journal_data(inode);
13371de3e3dfSTheodore Ts'o 	sector_t pblock = 0, cur_logical = 0;
1338bd2d0210STheodore Ts'o 	struct ext4_io_submit io_submit;
133964769240SAlex Tomas 
134064769240SAlex Tomas 	BUG_ON(mpd->next_page <= mpd->first_page);
1341bd2d0210STheodore Ts'o 	memset(&io_submit, 0, sizeof(io_submit));
1342791b7f08SAneesh Kumar K.V 	/*
1343791b7f08SAneesh Kumar K.V 	 * We need to start from the first_page to the next_page - 1
1344791b7f08SAneesh Kumar K.V 	 * to make sure we also write the mapped dirty buffer_heads.
13458dc207c0STheodore Ts'o 	 * If we look at mpd->b_blocknr we would only be looking
1346791b7f08SAneesh Kumar K.V 	 * at the currently mapped buffer_heads.
1347791b7f08SAneesh Kumar K.V 	 */
134864769240SAlex Tomas 	index = mpd->first_page;
134964769240SAlex Tomas 	end = mpd->next_page - 1;
135064769240SAlex Tomas 
1351791b7f08SAneesh Kumar K.V 	pagevec_init(&pvec, 0);
135264769240SAlex Tomas 	while (index <= end) {
1353791b7f08SAneesh Kumar K.V 		nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE);
135464769240SAlex Tomas 		if (nr_pages == 0)
135564769240SAlex Tomas 			break;
135664769240SAlex Tomas 		for (i = 0; i < nr_pages; i++) {
135797498956STheodore Ts'o 			int commit_write = 0, skip_page = 0;
135864769240SAlex Tomas 			struct page *page = pvec.pages[i];
135964769240SAlex Tomas 
1360791b7f08SAneesh Kumar K.V 			index = page->index;
1361791b7f08SAneesh Kumar K.V 			if (index > end)
1362791b7f08SAneesh Kumar K.V 				break;
1363cb20d518STheodore Ts'o 
1364cb20d518STheodore Ts'o 			if (index == size >> PAGE_CACHE_SHIFT)
1365cb20d518STheodore Ts'o 				len = size & ~PAGE_CACHE_MASK;
1366cb20d518STheodore Ts'o 			else
1367cb20d518STheodore Ts'o 				len = PAGE_CACHE_SIZE;
13681de3e3dfSTheodore Ts'o 			if (map) {
13691de3e3dfSTheodore Ts'o 				cur_logical = index << (PAGE_CACHE_SHIFT -
13701de3e3dfSTheodore Ts'o 							inode->i_blkbits);
13711de3e3dfSTheodore Ts'o 				pblock = map->m_pblk + (cur_logical -
13721de3e3dfSTheodore Ts'o 							map->m_lblk);
13731de3e3dfSTheodore Ts'o 			}
1374791b7f08SAneesh Kumar K.V 			index++;
1375791b7f08SAneesh Kumar K.V 
1376791b7f08SAneesh Kumar K.V 			BUG_ON(!PageLocked(page));
1377791b7f08SAneesh Kumar K.V 			BUG_ON(PageWriteback(page));
1378791b7f08SAneesh Kumar K.V 
137922208dedSAneesh Kumar K.V 			/*
1380cb20d518STheodore Ts'o 			 * If the page does not have buffers (for
1381cb20d518STheodore Ts'o 			 * whatever reason), try to create them using
1382a107e5a3STheodore Ts'o 			 * __block_write_begin.  If this fails,
138397498956STheodore Ts'o 			 * skip the page and move on.
138422208dedSAneesh Kumar K.V 			 */
1385cb20d518STheodore Ts'o 			if (!page_has_buffers(page)) {
1386a107e5a3STheodore Ts'o 				if (__block_write_begin(page, 0, len,
1387cb20d518STheodore Ts'o 						noalloc_get_block_write)) {
138897498956STheodore Ts'o 				skip_page:
1389cb20d518STheodore Ts'o 					unlock_page(page);
1390cb20d518STheodore Ts'o 					continue;
1391cb20d518STheodore Ts'o 				}
1392cb20d518STheodore Ts'o 				commit_write = 1;
1393cb20d518STheodore Ts'o 			}
13943ecdb3a1STheodore Ts'o 
13953ecdb3a1STheodore Ts'o 			bh = page_bufs = page_buffers(page);
13963ecdb3a1STheodore Ts'o 			block_start = 0;
13973ecdb3a1STheodore Ts'o 			do {
13981de3e3dfSTheodore Ts'o 				if (!bh)
139997498956STheodore Ts'o 					goto skip_page;
14001de3e3dfSTheodore Ts'o 				if (map && (cur_logical >= map->m_lblk) &&
14011de3e3dfSTheodore Ts'o 				    (cur_logical <= (map->m_lblk +
14021de3e3dfSTheodore Ts'o 						     (map->m_len - 1)))) {
14031de3e3dfSTheodore Ts'o 					if (buffer_delay(bh)) {
14041de3e3dfSTheodore Ts'o 						clear_buffer_delay(bh);
14051de3e3dfSTheodore Ts'o 						bh->b_blocknr = pblock;
14061de3e3dfSTheodore Ts'o 					}
14071de3e3dfSTheodore Ts'o 					if (buffer_unwritten(bh) ||
14081de3e3dfSTheodore Ts'o 					    buffer_mapped(bh))
14091de3e3dfSTheodore Ts'o 						BUG_ON(bh->b_blocknr != pblock);
14101de3e3dfSTheodore Ts'o 					if (map->m_flags & EXT4_MAP_UNINIT)
14111de3e3dfSTheodore Ts'o 						set_buffer_uninit(bh);
14121de3e3dfSTheodore Ts'o 					clear_buffer_unwritten(bh);
14131de3e3dfSTheodore Ts'o 				}
14141de3e3dfSTheodore Ts'o 
141513a79a47SYongqiang Yang 				/*
141613a79a47SYongqiang Yang 				 * skip page if block allocation undone and
141713a79a47SYongqiang Yang 				 * block is dirty
141813a79a47SYongqiang Yang 				 */
141913a79a47SYongqiang Yang 				if (ext4_bh_delay_or_unwritten(NULL, bh))
142097498956STheodore Ts'o 					skip_page = 1;
14213ecdb3a1STheodore Ts'o 				bh = bh->b_this_page;
14223ecdb3a1STheodore Ts'o 				block_start += bh->b_size;
14231de3e3dfSTheodore Ts'o 				cur_logical++;
14241de3e3dfSTheodore Ts'o 				pblock++;
14251de3e3dfSTheodore Ts'o 			} while (bh != page_bufs);
14261de3e3dfSTheodore Ts'o 
142797498956STheodore Ts'o 			if (skip_page)
142897498956STheodore Ts'o 				goto skip_page;
1429cb20d518STheodore Ts'o 
1430cb20d518STheodore Ts'o 			if (commit_write)
1431cb20d518STheodore Ts'o 				/* mark the buffer_heads as dirty & uptodate */
1432cb20d518STheodore Ts'o 				block_commit_write(page, 0, len);
1433cb20d518STheodore Ts'o 
143497498956STheodore Ts'o 			clear_page_dirty_for_io(page);
1435bd2d0210STheodore Ts'o 			/*
1436bd2d0210STheodore Ts'o 			 * Delalloc doesn't support data journalling,
1437bd2d0210STheodore Ts'o 			 * but eventually maybe we'll lift this
1438bd2d0210STheodore Ts'o 			 * restriction.
1439bd2d0210STheodore Ts'o 			 */
1440bd2d0210STheodore Ts'o 			if (unlikely(journal_data && PageChecked(page)))
1441cb20d518STheodore Ts'o 				err = __ext4_journalled_writepage(page, len);
14421449032bSTheodore Ts'o 			else if (test_opt(inode->i_sb, MBLK_IO_SUBMIT))
1443bd2d0210STheodore Ts'o 				err = ext4_bio_write_page(&io_submit, page,
1444bd2d0210STheodore Ts'o 							  len, mpd->wbc);
14459dd75f1fSTheodore Ts'o 			else if (buffer_uninit(page_bufs)) {
14469dd75f1fSTheodore Ts'o 				ext4_set_bh_endio(page_bufs, inode);
14479dd75f1fSTheodore Ts'o 				err = block_write_full_page_endio(page,
14489dd75f1fSTheodore Ts'o 					noalloc_get_block_write,
14499dd75f1fSTheodore Ts'o 					mpd->wbc, ext4_end_io_buffer_write);
14509dd75f1fSTheodore Ts'o 			} else
14511449032bSTheodore Ts'o 				err = block_write_full_page(page,
14521449032bSTheodore Ts'o 					noalloc_get_block_write, mpd->wbc);
1453cb20d518STheodore Ts'o 
1454cb20d518STheodore Ts'o 			if (!err)
1455a1d6cc56SAneesh Kumar K.V 				mpd->pages_written++;
145664769240SAlex Tomas 			/*
145764769240SAlex Tomas 			 * In error case, we have to continue because
145864769240SAlex Tomas 			 * remaining pages are still locked
145964769240SAlex Tomas 			 */
146064769240SAlex Tomas 			if (ret == 0)
146164769240SAlex Tomas 				ret = err;
146264769240SAlex Tomas 		}
146364769240SAlex Tomas 		pagevec_release(&pvec);
146464769240SAlex Tomas 	}
1465bd2d0210STheodore Ts'o 	ext4_io_submit(&io_submit);
146664769240SAlex Tomas 	return ret;
146764769240SAlex Tomas }
146864769240SAlex Tomas 
1469c7f5938aSCurt Wohlgemuth static void ext4_da_block_invalidatepages(struct mpage_da_data *mpd)
1470c4a0c46eSAneesh Kumar K.V {
1471c4a0c46eSAneesh Kumar K.V 	int nr_pages, i;
1472c4a0c46eSAneesh Kumar K.V 	pgoff_t index, end;
1473c4a0c46eSAneesh Kumar K.V 	struct pagevec pvec;
1474c4a0c46eSAneesh Kumar K.V 	struct inode *inode = mpd->inode;
1475c4a0c46eSAneesh Kumar K.V 	struct address_space *mapping = inode->i_mapping;
147651865fdaSZheng Liu 	ext4_lblk_t start, last;
1477c4a0c46eSAneesh Kumar K.V 
1478c7f5938aSCurt Wohlgemuth 	index = mpd->first_page;
1479c7f5938aSCurt Wohlgemuth 	end   = mpd->next_page - 1;
148051865fdaSZheng Liu 
148151865fdaSZheng Liu 	start = index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
148251865fdaSZheng Liu 	last = end << (PAGE_CACHE_SHIFT - inode->i_blkbits);
148351865fdaSZheng Liu 	ext4_es_remove_extent(inode, start, last - start + 1);
148451865fdaSZheng Liu 
148566bea92cSEric Sandeen 	pagevec_init(&pvec, 0);
1486c4a0c46eSAneesh Kumar K.V 	while (index <= end) {
1487c4a0c46eSAneesh Kumar K.V 		nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE);
1488c4a0c46eSAneesh Kumar K.V 		if (nr_pages == 0)
1489c4a0c46eSAneesh Kumar K.V 			break;
1490c4a0c46eSAneesh Kumar K.V 		for (i = 0; i < nr_pages; i++) {
1491c4a0c46eSAneesh Kumar K.V 			struct page *page = pvec.pages[i];
14929b1d0998SJan Kara 			if (page->index > end)
1493c4a0c46eSAneesh Kumar K.V 				break;
1494c4a0c46eSAneesh Kumar K.V 			BUG_ON(!PageLocked(page));
1495c4a0c46eSAneesh Kumar K.V 			BUG_ON(PageWriteback(page));
1496c4a0c46eSAneesh Kumar K.V 			block_invalidatepage(page, 0);
1497c4a0c46eSAneesh Kumar K.V 			ClearPageUptodate(page);
1498c4a0c46eSAneesh Kumar K.V 			unlock_page(page);
1499c4a0c46eSAneesh Kumar K.V 		}
15009b1d0998SJan Kara 		index = pvec.pages[nr_pages - 1]->index + 1;
15019b1d0998SJan Kara 		pagevec_release(&pvec);
1502c4a0c46eSAneesh Kumar K.V 	}
1503c4a0c46eSAneesh Kumar K.V 	return;
1504c4a0c46eSAneesh Kumar K.V }
1505c4a0c46eSAneesh Kumar K.V 
1506df22291fSAneesh Kumar K.V static void ext4_print_free_blocks(struct inode *inode)
1507df22291fSAneesh Kumar K.V {
1508df22291fSAneesh Kumar K.V 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
150992b97816STheodore Ts'o 	struct super_block *sb = inode->i_sb;
151092b97816STheodore Ts'o 
151192b97816STheodore Ts'o 	ext4_msg(sb, KERN_CRIT, "Total free blocks count %lld",
15125dee5437STheodore Ts'o 	       EXT4_C2B(EXT4_SB(inode->i_sb),
15135dee5437STheodore Ts'o 			ext4_count_free_clusters(inode->i_sb)));
151492b97816STheodore Ts'o 	ext4_msg(sb, KERN_CRIT, "Free/Dirty block details");
151592b97816STheodore Ts'o 	ext4_msg(sb, KERN_CRIT, "free_blocks=%lld",
151657042651STheodore Ts'o 	       (long long) EXT4_C2B(EXT4_SB(inode->i_sb),
151757042651STheodore Ts'o 		percpu_counter_sum(&sbi->s_freeclusters_counter)));
151892b97816STheodore Ts'o 	ext4_msg(sb, KERN_CRIT, "dirty_blocks=%lld",
15197b415bf6SAditya Kali 	       (long long) EXT4_C2B(EXT4_SB(inode->i_sb),
15207b415bf6SAditya Kali 		percpu_counter_sum(&sbi->s_dirtyclusters_counter)));
152192b97816STheodore Ts'o 	ext4_msg(sb, KERN_CRIT, "Block reservation details");
152292b97816STheodore Ts'o 	ext4_msg(sb, KERN_CRIT, "i_reserved_data_blocks=%u",
1523df22291fSAneesh Kumar K.V 		 EXT4_I(inode)->i_reserved_data_blocks);
152492b97816STheodore Ts'o 	ext4_msg(sb, KERN_CRIT, "i_reserved_meta_blocks=%u",
1525df22291fSAneesh Kumar K.V 	       EXT4_I(inode)->i_reserved_meta_blocks);
1526df22291fSAneesh Kumar K.V 	return;
1527df22291fSAneesh Kumar K.V }
1528df22291fSAneesh Kumar K.V 
1529b920c755STheodore Ts'o /*
15305a87b7a5STheodore Ts'o  * mpage_da_map_and_submit - go through given space, map them
15315a87b7a5STheodore Ts'o  *       if necessary, and then submit them for I/O
153264769240SAlex Tomas  *
15338dc207c0STheodore Ts'o  * @mpd - bh describing space
153464769240SAlex Tomas  *
153564769240SAlex Tomas  * The function skips space we know is already mapped to disk blocks.
153664769240SAlex Tomas  *
153764769240SAlex Tomas  */
15385a87b7a5STheodore Ts'o static void mpage_da_map_and_submit(struct mpage_da_data *mpd)
153964769240SAlex Tomas {
15402ac3b6e0STheodore Ts'o 	int err, blks, get_blocks_flags;
15411de3e3dfSTheodore Ts'o 	struct ext4_map_blocks map, *mapp = NULL;
15422fa3cdfbSTheodore Ts'o 	sector_t next = mpd->b_blocknr;
15432fa3cdfbSTheodore Ts'o 	unsigned max_blocks = mpd->b_size >> mpd->inode->i_blkbits;
15442fa3cdfbSTheodore Ts'o 	loff_t disksize = EXT4_I(mpd->inode)->i_disksize;
15452fa3cdfbSTheodore Ts'o 	handle_t *handle = NULL;
154664769240SAlex Tomas 
154764769240SAlex Tomas 	/*
15485a87b7a5STheodore Ts'o 	 * If the blocks are mapped already, or we couldn't accumulate
15495a87b7a5STheodore Ts'o 	 * any blocks, then proceed immediately to the submission stage.
155064769240SAlex Tomas 	 */
15515a87b7a5STheodore Ts'o 	if ((mpd->b_size == 0) ||
15525a87b7a5STheodore Ts'o 	    ((mpd->b_state  & (1 << BH_Mapped)) &&
155329fa89d0SAneesh Kumar K.V 	     !(mpd->b_state & (1 << BH_Delay)) &&
15545a87b7a5STheodore Ts'o 	     !(mpd->b_state & (1 << BH_Unwritten))))
15555a87b7a5STheodore Ts'o 		goto submit_io;
15562fa3cdfbSTheodore Ts'o 
15572fa3cdfbSTheodore Ts'o 	handle = ext4_journal_current_handle();
15582fa3cdfbSTheodore Ts'o 	BUG_ON(!handle);
15592fa3cdfbSTheodore Ts'o 
156079ffab34SAneesh Kumar K.V 	/*
156179e83036SEric Sandeen 	 * Call ext4_map_blocks() to allocate any delayed allocation
15622ac3b6e0STheodore Ts'o 	 * blocks, or to convert an uninitialized extent to be
15632ac3b6e0STheodore Ts'o 	 * initialized (in the case where we have written into
15642ac3b6e0STheodore Ts'o 	 * one or more preallocated blocks).
15652ac3b6e0STheodore Ts'o 	 *
15662ac3b6e0STheodore Ts'o 	 * We pass in the magic EXT4_GET_BLOCKS_DELALLOC_RESERVE to
15672ac3b6e0STheodore Ts'o 	 * indicate that we are on the delayed allocation path.  This
15682ac3b6e0STheodore Ts'o 	 * affects functions in many different parts of the allocation
15692ac3b6e0STheodore Ts'o 	 * call path.  This flag exists primarily because we don't
157079e83036SEric Sandeen 	 * want to change *many* call functions, so ext4_map_blocks()
1571f2321097STheodore Ts'o 	 * will set the EXT4_STATE_DELALLOC_RESERVED flag once the
15722ac3b6e0STheodore Ts'o 	 * inode's allocation semaphore is taken.
15732ac3b6e0STheodore Ts'o 	 *
15742ac3b6e0STheodore Ts'o 	 * If the blocks in questions were delalloc blocks, set
15752ac3b6e0STheodore Ts'o 	 * EXT4_GET_BLOCKS_DELALLOC_RESERVE so the delalloc accounting
15762ac3b6e0STheodore Ts'o 	 * variables are updated after the blocks have been allocated.
157779ffab34SAneesh Kumar K.V 	 */
15782ed88685STheodore Ts'o 	map.m_lblk = next;
15792ed88685STheodore Ts'o 	map.m_len = max_blocks;
15801296cc85SAneesh Kumar K.V 	get_blocks_flags = EXT4_GET_BLOCKS_CREATE;
1581744692dcSJiaying Zhang 	if (ext4_should_dioread_nolock(mpd->inode))
1582744692dcSJiaying Zhang 		get_blocks_flags |= EXT4_GET_BLOCKS_IO_CREATE_EXT;
15832ac3b6e0STheodore Ts'o 	if (mpd->b_state & (1 << BH_Delay))
15841296cc85SAneesh Kumar K.V 		get_blocks_flags |= EXT4_GET_BLOCKS_DELALLOC_RESERVE;
15851296cc85SAneesh Kumar K.V 
15862ed88685STheodore Ts'o 	blks = ext4_map_blocks(handle, mpd->inode, &map, get_blocks_flags);
15872fa3cdfbSTheodore Ts'o 	if (blks < 0) {
1588e3570639SEric Sandeen 		struct super_block *sb = mpd->inode->i_sb;
1589e3570639SEric Sandeen 
15902fa3cdfbSTheodore Ts'o 		err = blks;
1591ed5bde0bSTheodore Ts'o 		/*
15925a87b7a5STheodore Ts'o 		 * If get block returns EAGAIN or ENOSPC and there
159397498956STheodore Ts'o 		 * appears to be free blocks we will just let
159497498956STheodore Ts'o 		 * mpage_da_submit_io() unlock all of the pages.
1595c4a0c46eSAneesh Kumar K.V 		 */
1596c4a0c46eSAneesh Kumar K.V 		if (err == -EAGAIN)
15975a87b7a5STheodore Ts'o 			goto submit_io;
1598df22291fSAneesh Kumar K.V 
15995dee5437STheodore Ts'o 		if (err == -ENOSPC && ext4_count_free_clusters(sb)) {
1600df22291fSAneesh Kumar K.V 			mpd->retval = err;
16015a87b7a5STheodore Ts'o 			goto submit_io;
1602df22291fSAneesh Kumar K.V 		}
1603df22291fSAneesh Kumar K.V 
1604c4a0c46eSAneesh Kumar K.V 		/*
1605ed5bde0bSTheodore Ts'o 		 * get block failure will cause us to loop in
1606ed5bde0bSTheodore Ts'o 		 * writepages, because a_ops->writepage won't be able
1607ed5bde0bSTheodore Ts'o 		 * to make progress. The page will be redirtied by
1608ed5bde0bSTheodore Ts'o 		 * writepage and writepages will again try to write
1609ed5bde0bSTheodore Ts'o 		 * the same.
1610c4a0c46eSAneesh Kumar K.V 		 */
1611e3570639SEric Sandeen 		if (!(EXT4_SB(sb)->s_mount_flags & EXT4_MF_FS_ABORTED)) {
1612e3570639SEric Sandeen 			ext4_msg(sb, KERN_CRIT,
1613e3570639SEric Sandeen 				 "delayed block allocation failed for inode %lu "
1614e3570639SEric Sandeen 				 "at logical offset %llu with max blocks %zd "
1615e3570639SEric Sandeen 				 "with error %d", mpd->inode->i_ino,
1616c4a0c46eSAneesh Kumar K.V 				 (unsigned long long) next,
16178dc207c0STheodore Ts'o 				 mpd->b_size >> mpd->inode->i_blkbits, err);
1618e3570639SEric Sandeen 			ext4_msg(sb, KERN_CRIT,
1619e3570639SEric Sandeen 				"This should not happen!! Data will be lost\n");
1620e3570639SEric Sandeen 			if (err == -ENOSPC)
1621df22291fSAneesh Kumar K.V 				ext4_print_free_blocks(mpd->inode);
1622030ba6bcSAneesh Kumar K.V 		}
16232fa3cdfbSTheodore Ts'o 		/* invalidate all the pages */
1624c7f5938aSCurt Wohlgemuth 		ext4_da_block_invalidatepages(mpd);
1625e0fd9b90SCurt Wohlgemuth 
1626e0fd9b90SCurt Wohlgemuth 		/* Mark this page range as having been completed */
1627e0fd9b90SCurt Wohlgemuth 		mpd->io_done = 1;
16285a87b7a5STheodore Ts'o 		return;
1629c4a0c46eSAneesh Kumar K.V 	}
16302fa3cdfbSTheodore Ts'o 	BUG_ON(blks == 0);
16312fa3cdfbSTheodore Ts'o 
16321de3e3dfSTheodore Ts'o 	mapp = &map;
16332ed88685STheodore Ts'o 	if (map.m_flags & EXT4_MAP_NEW) {
16342ed88685STheodore Ts'o 		struct block_device *bdev = mpd->inode->i_sb->s_bdev;
16352ed88685STheodore Ts'o 		int i;
163664769240SAlex Tomas 
16372ed88685STheodore Ts'o 		for (i = 0; i < map.m_len; i++)
16382ed88685STheodore Ts'o 			unmap_underlying_metadata(bdev, map.m_pblk + i);
16392fa3cdfbSTheodore Ts'o 	}
16402fa3cdfbSTheodore Ts'o 
16412fa3cdfbSTheodore Ts'o 	/*
164203f5d8bcSJan Kara 	 * Update on-disk size along with block allocation.
16432fa3cdfbSTheodore Ts'o 	 */
16442fa3cdfbSTheodore Ts'o 	disksize = ((loff_t) next + blks) << mpd->inode->i_blkbits;
16452fa3cdfbSTheodore Ts'o 	if (disksize > i_size_read(mpd->inode))
16462fa3cdfbSTheodore Ts'o 		disksize = i_size_read(mpd->inode);
16472fa3cdfbSTheodore Ts'o 	if (disksize > EXT4_I(mpd->inode)->i_disksize) {
16482fa3cdfbSTheodore Ts'o 		ext4_update_i_disksize(mpd->inode, disksize);
16495a87b7a5STheodore Ts'o 		err = ext4_mark_inode_dirty(handle, mpd->inode);
16505a87b7a5STheodore Ts'o 		if (err)
16515a87b7a5STheodore Ts'o 			ext4_error(mpd->inode->i_sb,
16525a87b7a5STheodore Ts'o 				   "Failed to mark inode %lu dirty",
16535a87b7a5STheodore Ts'o 				   mpd->inode->i_ino);
16542fa3cdfbSTheodore Ts'o 	}
16552fa3cdfbSTheodore Ts'o 
16565a87b7a5STheodore Ts'o submit_io:
16571de3e3dfSTheodore Ts'o 	mpage_da_submit_io(mpd, mapp);
16585a87b7a5STheodore Ts'o 	mpd->io_done = 1;
165964769240SAlex Tomas }
166064769240SAlex Tomas 
1661bf068ee2SAneesh Kumar K.V #define BH_FLAGS ((1 << BH_Uptodate) | (1 << BH_Mapped) | \
1662bf068ee2SAneesh Kumar K.V 		(1 << BH_Delay) | (1 << BH_Unwritten))
166364769240SAlex Tomas 
166464769240SAlex Tomas /*
166564769240SAlex Tomas  * mpage_add_bh_to_extent - try to add one more block to extent of blocks
166664769240SAlex Tomas  *
166764769240SAlex Tomas  * @mpd->lbh - extent of blocks
166864769240SAlex Tomas  * @logical - logical number of the block in the file
166964769240SAlex Tomas  * @bh - bh of the block (used to access block's state)
167064769240SAlex Tomas  *
167164769240SAlex Tomas  * the function is used to collect contig. blocks in same state
167264769240SAlex Tomas  */
167364769240SAlex Tomas static void mpage_add_bh_to_extent(struct mpage_da_data *mpd,
16748dc207c0STheodore Ts'o 				   sector_t logical, size_t b_size,
16758dc207c0STheodore Ts'o 				   unsigned long b_state)
167664769240SAlex Tomas {
167764769240SAlex Tomas 	sector_t next;
16788dc207c0STheodore Ts'o 	int nrblocks = mpd->b_size >> mpd->inode->i_blkbits;
167964769240SAlex Tomas 
1680c445e3e0SEric Sandeen 	/*
1681c445e3e0SEric Sandeen 	 * XXX Don't go larger than mballoc is willing to allocate
1682c445e3e0SEric Sandeen 	 * This is a stopgap solution.  We eventually need to fold
1683c445e3e0SEric Sandeen 	 * mpage_da_submit_io() into this function and then call
168479e83036SEric Sandeen 	 * ext4_map_blocks() multiple times in a loop
1685c445e3e0SEric Sandeen 	 */
1686c445e3e0SEric Sandeen 	if (nrblocks >= 8*1024*1024/mpd->inode->i_sb->s_blocksize)
1687c445e3e0SEric Sandeen 		goto flush_it;
1688c445e3e0SEric Sandeen 
1689525f4ed8SMingming Cao 	/* check if thereserved journal credits might overflow */
169012e9b892SDmitry Monakhov 	if (!(ext4_test_inode_flag(mpd->inode, EXT4_INODE_EXTENTS))) {
1691525f4ed8SMingming Cao 		if (nrblocks >= EXT4_MAX_TRANS_DATA) {
1692525f4ed8SMingming Cao 			/*
1693525f4ed8SMingming Cao 			 * With non-extent format we are limited by the journal
1694525f4ed8SMingming Cao 			 * credit available.  Total credit needed to insert
1695525f4ed8SMingming Cao 			 * nrblocks contiguous blocks is dependent on the
1696525f4ed8SMingming Cao 			 * nrblocks.  So limit nrblocks.
1697525f4ed8SMingming Cao 			 */
1698525f4ed8SMingming Cao 			goto flush_it;
1699525f4ed8SMingming Cao 		} else if ((nrblocks + (b_size >> mpd->inode->i_blkbits)) >
1700525f4ed8SMingming Cao 				EXT4_MAX_TRANS_DATA) {
1701525f4ed8SMingming Cao 			/*
1702525f4ed8SMingming Cao 			 * Adding the new buffer_head would make it cross the
1703525f4ed8SMingming Cao 			 * allowed limit for which we have journal credit
1704525f4ed8SMingming Cao 			 * reserved. So limit the new bh->b_size
1705525f4ed8SMingming Cao 			 */
1706525f4ed8SMingming Cao 			b_size = (EXT4_MAX_TRANS_DATA - nrblocks) <<
1707525f4ed8SMingming Cao 						mpd->inode->i_blkbits;
1708525f4ed8SMingming Cao 			/* we will do mpage_da_submit_io in the next loop */
1709525f4ed8SMingming Cao 		}
1710525f4ed8SMingming Cao 	}
171164769240SAlex Tomas 	/*
171264769240SAlex Tomas 	 * First block in the extent
171364769240SAlex Tomas 	 */
17148dc207c0STheodore Ts'o 	if (mpd->b_size == 0) {
17158dc207c0STheodore Ts'o 		mpd->b_blocknr = logical;
17168dc207c0STheodore Ts'o 		mpd->b_size = b_size;
17178dc207c0STheodore Ts'o 		mpd->b_state = b_state & BH_FLAGS;
171864769240SAlex Tomas 		return;
171964769240SAlex Tomas 	}
172064769240SAlex Tomas 
17218dc207c0STheodore Ts'o 	next = mpd->b_blocknr + nrblocks;
172264769240SAlex Tomas 	/*
172364769240SAlex Tomas 	 * Can we merge the block to our big extent?
172464769240SAlex Tomas 	 */
17258dc207c0STheodore Ts'o 	if (logical == next && (b_state & BH_FLAGS) == mpd->b_state) {
17268dc207c0STheodore Ts'o 		mpd->b_size += b_size;
172764769240SAlex Tomas 		return;
172864769240SAlex Tomas 	}
172964769240SAlex Tomas 
1730525f4ed8SMingming Cao flush_it:
173164769240SAlex Tomas 	/*
173264769240SAlex Tomas 	 * We couldn't merge the block to our extent, so we
173364769240SAlex Tomas 	 * need to flush current  extent and start new one
173464769240SAlex Tomas 	 */
17355a87b7a5STheodore Ts'o 	mpage_da_map_and_submit(mpd);
1736a1d6cc56SAneesh Kumar K.V 	return;
173764769240SAlex Tomas }
173864769240SAlex Tomas 
1739c364b22cSAneesh Kumar K.V static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh)
174029fa89d0SAneesh Kumar K.V {
1741c364b22cSAneesh Kumar K.V 	return (buffer_delay(bh) || buffer_unwritten(bh)) && buffer_dirty(bh);
174229fa89d0SAneesh Kumar K.V }
174329fa89d0SAneesh Kumar K.V 
174464769240SAlex Tomas /*
17455356f261SAditya Kali  * This function is grabs code from the very beginning of
17465356f261SAditya Kali  * ext4_map_blocks, but assumes that the caller is from delayed write
17475356f261SAditya Kali  * time. This function looks up the requested blocks and sets the
17485356f261SAditya Kali  * buffer delay bit under the protection of i_data_sem.
17495356f261SAditya Kali  */
17505356f261SAditya Kali static int ext4_da_map_blocks(struct inode *inode, sector_t iblock,
17515356f261SAditya Kali 			      struct ext4_map_blocks *map,
17525356f261SAditya Kali 			      struct buffer_head *bh)
17535356f261SAditya Kali {
17545356f261SAditya Kali 	int retval;
17555356f261SAditya Kali 	sector_t invalid_block = ~((sector_t) 0xffff);
17565356f261SAditya Kali 
17575356f261SAditya Kali 	if (invalid_block < ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es))
17585356f261SAditya Kali 		invalid_block = ~0;
17595356f261SAditya Kali 
17605356f261SAditya Kali 	map->m_flags = 0;
17615356f261SAditya Kali 	ext_debug("ext4_da_map_blocks(): inode %lu, max_blocks %u,"
17625356f261SAditya Kali 		  "logical block %lu\n", inode->i_ino, map->m_len,
17635356f261SAditya Kali 		  (unsigned long) map->m_lblk);
17645356f261SAditya Kali 	/*
17655356f261SAditya Kali 	 * Try to see if we can get the block without requesting a new
17665356f261SAditya Kali 	 * file system block.
17675356f261SAditya Kali 	 */
17685356f261SAditya Kali 	down_read((&EXT4_I(inode)->i_data_sem));
17695356f261SAditya Kali 	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
17705356f261SAditya Kali 		retval = ext4_ext_map_blocks(NULL, inode, map, 0);
17715356f261SAditya Kali 	else
17725356f261SAditya Kali 		retval = ext4_ind_map_blocks(NULL, inode, map, 0);
17735356f261SAditya Kali 
17745356f261SAditya Kali 	if (retval == 0) {
17755356f261SAditya Kali 		/*
17765356f261SAditya Kali 		 * XXX: __block_prepare_write() unmaps passed block,
17775356f261SAditya Kali 		 * is it OK?
17785356f261SAditya Kali 		 */
17795356f261SAditya Kali 		/* If the block was allocated from previously allocated cluster,
17805356f261SAditya Kali 		 * then we dont need to reserve it again. */
17815356f261SAditya Kali 		if (!(map->m_flags & EXT4_MAP_FROM_CLUSTER)) {
17825356f261SAditya Kali 			retval = ext4_da_reserve_space(inode, iblock);
17835356f261SAditya Kali 			if (retval)
17845356f261SAditya Kali 				/* not enough space to reserve */
17855356f261SAditya Kali 				goto out_unlock;
17865356f261SAditya Kali 		}
17875356f261SAditya Kali 
178851865fdaSZheng Liu 		retval = ext4_es_insert_extent(inode, map->m_lblk, map->m_len);
178951865fdaSZheng Liu 		if (retval)
179051865fdaSZheng Liu 			goto out_unlock;
179151865fdaSZheng Liu 
17925356f261SAditya Kali 		/* Clear EXT4_MAP_FROM_CLUSTER flag since its purpose is served
17935356f261SAditya Kali 		 * and it should not appear on the bh->b_state.
17945356f261SAditya Kali 		 */
17955356f261SAditya Kali 		map->m_flags &= ~EXT4_MAP_FROM_CLUSTER;
17965356f261SAditya Kali 
17975356f261SAditya Kali 		map_bh(bh, inode->i_sb, invalid_block);
17985356f261SAditya Kali 		set_buffer_new(bh);
17995356f261SAditya Kali 		set_buffer_delay(bh);
18005356f261SAditya Kali 	}
18015356f261SAditya Kali 
18025356f261SAditya Kali out_unlock:
18035356f261SAditya Kali 	up_read((&EXT4_I(inode)->i_data_sem));
18045356f261SAditya Kali 
18055356f261SAditya Kali 	return retval;
18065356f261SAditya Kali }
18075356f261SAditya Kali 
18085356f261SAditya Kali /*
1809b920c755STheodore Ts'o  * This is a special get_blocks_t callback which is used by
1810b920c755STheodore Ts'o  * ext4_da_write_begin().  It will either return mapped block or
1811b920c755STheodore Ts'o  * reserve space for a single block.
181229fa89d0SAneesh Kumar K.V  *
181329fa89d0SAneesh Kumar K.V  * For delayed buffer_head we have BH_Mapped, BH_New, BH_Delay set.
181429fa89d0SAneesh Kumar K.V  * We also have b_blocknr = -1 and b_bdev initialized properly
181529fa89d0SAneesh Kumar K.V  *
181629fa89d0SAneesh Kumar K.V  * For unwritten buffer_head we have BH_Mapped, BH_New, BH_Unwritten set.
181729fa89d0SAneesh Kumar K.V  * We also have b_blocknr = physicalblock mapping unwritten extent and b_bdev
181829fa89d0SAneesh Kumar K.V  * initialized properly.
181964769240SAlex Tomas  */
182064769240SAlex Tomas static int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
18212ed88685STheodore Ts'o 				  struct buffer_head *bh, int create)
182264769240SAlex Tomas {
18232ed88685STheodore Ts'o 	struct ext4_map_blocks map;
182464769240SAlex Tomas 	int ret = 0;
182564769240SAlex Tomas 
182664769240SAlex Tomas 	BUG_ON(create == 0);
18272ed88685STheodore Ts'o 	BUG_ON(bh->b_size != inode->i_sb->s_blocksize);
18282ed88685STheodore Ts'o 
18292ed88685STheodore Ts'o 	map.m_lblk = iblock;
18302ed88685STheodore Ts'o 	map.m_len = 1;
183164769240SAlex Tomas 
183264769240SAlex Tomas 	/*
183364769240SAlex Tomas 	 * first, we need to know whether the block is allocated already
183464769240SAlex Tomas 	 * preallocated blocks are unmapped but should treated
183564769240SAlex Tomas 	 * the same as allocated blocks.
183664769240SAlex Tomas 	 */
18375356f261SAditya Kali 	ret = ext4_da_map_blocks(inode, iblock, &map, bh);
18385356f261SAditya Kali 	if (ret <= 0)
18392ed88685STheodore Ts'o 		return ret;
184064769240SAlex Tomas 
18412ed88685STheodore Ts'o 	map_bh(bh, inode->i_sb, map.m_pblk);
18422ed88685STheodore Ts'o 	bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags;
18432ed88685STheodore Ts'o 
18442ed88685STheodore Ts'o 	if (buffer_unwritten(bh)) {
18452ed88685STheodore Ts'o 		/* A delayed write to unwritten bh should be marked
18462ed88685STheodore Ts'o 		 * new and mapped.  Mapped ensures that we don't do
18472ed88685STheodore Ts'o 		 * get_block multiple times when we write to the same
18482ed88685STheodore Ts'o 		 * offset and new ensures that we do proper zero out
18492ed88685STheodore Ts'o 		 * for partial write.
18502ed88685STheodore Ts'o 		 */
18512ed88685STheodore Ts'o 		set_buffer_new(bh);
1852c8205636STheodore Ts'o 		set_buffer_mapped(bh);
18532ed88685STheodore Ts'o 	}
18542ed88685STheodore Ts'o 	return 0;
185564769240SAlex Tomas }
185661628a3fSMingming Cao 
1857b920c755STheodore Ts'o /*
1858b920c755STheodore Ts'o  * This function is used as a standard get_block_t calback function
1859b920c755STheodore Ts'o  * when there is no desire to allocate any blocks.  It is used as a
1860ebdec241SChristoph Hellwig  * callback function for block_write_begin() and block_write_full_page().
1861206f7ab4SChristoph Hellwig  * These functions should only try to map a single block at a time.
1862b920c755STheodore Ts'o  *
1863b920c755STheodore Ts'o  * Since this function doesn't do block allocations even if the caller
1864b920c755STheodore Ts'o  * requests it by passing in create=1, it is critically important that
1865b920c755STheodore Ts'o  * any caller checks to make sure that any buffer heads are returned
1866b920c755STheodore Ts'o  * by this function are either all already mapped or marked for
1867206f7ab4SChristoph Hellwig  * delayed allocation before calling  block_write_full_page().  Otherwise,
1868206f7ab4SChristoph Hellwig  * b_blocknr could be left unitialized, and the page write functions will
1869206f7ab4SChristoph Hellwig  * be taken by surprise.
1870b920c755STheodore Ts'o  */
1871b920c755STheodore Ts'o static int noalloc_get_block_write(struct inode *inode, sector_t iblock,
1872f0e6c985SAneesh Kumar K.V 				   struct buffer_head *bh_result, int create)
1873f0e6c985SAneesh Kumar K.V {
1874a2dc52b5STheodore Ts'o 	BUG_ON(bh_result->b_size != inode->i_sb->s_blocksize);
18752ed88685STheodore Ts'o 	return _ext4_get_block(inode, iblock, bh_result, 0);
187661628a3fSMingming Cao }
187761628a3fSMingming Cao 
187862e086beSAneesh Kumar K.V static int bget_one(handle_t *handle, struct buffer_head *bh)
187962e086beSAneesh Kumar K.V {
188062e086beSAneesh Kumar K.V 	get_bh(bh);
188162e086beSAneesh Kumar K.V 	return 0;
188262e086beSAneesh Kumar K.V }
188362e086beSAneesh Kumar K.V 
188462e086beSAneesh Kumar K.V static int bput_one(handle_t *handle, struct buffer_head *bh)
188562e086beSAneesh Kumar K.V {
188662e086beSAneesh Kumar K.V 	put_bh(bh);
188762e086beSAneesh Kumar K.V 	return 0;
188862e086beSAneesh Kumar K.V }
188962e086beSAneesh Kumar K.V 
189062e086beSAneesh Kumar K.V static int __ext4_journalled_writepage(struct page *page,
189162e086beSAneesh Kumar K.V 				       unsigned int len)
189262e086beSAneesh Kumar K.V {
189362e086beSAneesh Kumar K.V 	struct address_space *mapping = page->mapping;
189462e086beSAneesh Kumar K.V 	struct inode *inode = mapping->host;
189562e086beSAneesh Kumar K.V 	struct buffer_head *page_bufs;
189662e086beSAneesh Kumar K.V 	handle_t *handle = NULL;
189762e086beSAneesh Kumar K.V 	int ret = 0;
189862e086beSAneesh Kumar K.V 	int err;
189962e086beSAneesh Kumar K.V 
1900cb20d518STheodore Ts'o 	ClearPageChecked(page);
190162e086beSAneesh Kumar K.V 	page_bufs = page_buffers(page);
190262e086beSAneesh Kumar K.V 	BUG_ON(!page_bufs);
190362e086beSAneesh Kumar K.V 	walk_page_buffers(handle, page_bufs, 0, len, NULL, bget_one);
190462e086beSAneesh Kumar K.V 	/* As soon as we unlock the page, it can go away, but we have
190562e086beSAneesh Kumar K.V 	 * references to buffers so we are safe */
190662e086beSAneesh Kumar K.V 	unlock_page(page);
190762e086beSAneesh Kumar K.V 
190862e086beSAneesh Kumar K.V 	handle = ext4_journal_start(inode, ext4_writepage_trans_blocks(inode));
190962e086beSAneesh Kumar K.V 	if (IS_ERR(handle)) {
191062e086beSAneesh Kumar K.V 		ret = PTR_ERR(handle);
191162e086beSAneesh Kumar K.V 		goto out;
191262e086beSAneesh Kumar K.V 	}
191362e086beSAneesh Kumar K.V 
1914441c8508SCurt Wohlgemuth 	BUG_ON(!ext4_handle_valid(handle));
1915441c8508SCurt Wohlgemuth 
191662e086beSAneesh Kumar K.V 	ret = walk_page_buffers(handle, page_bufs, 0, len, NULL,
191762e086beSAneesh Kumar K.V 				do_journal_get_write_access);
191862e086beSAneesh Kumar K.V 
191962e086beSAneesh Kumar K.V 	err = walk_page_buffers(handle, page_bufs, 0, len, NULL,
192062e086beSAneesh Kumar K.V 				write_end_fn);
192162e086beSAneesh Kumar K.V 	if (ret == 0)
192262e086beSAneesh Kumar K.V 		ret = err;
19232d859db3SJan Kara 	EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid;
192462e086beSAneesh Kumar K.V 	err = ext4_journal_stop(handle);
192562e086beSAneesh Kumar K.V 	if (!ret)
192662e086beSAneesh Kumar K.V 		ret = err;
192762e086beSAneesh Kumar K.V 
192862e086beSAneesh Kumar K.V 	walk_page_buffers(handle, page_bufs, 0, len, NULL, bput_one);
192919f5fb7aSTheodore Ts'o 	ext4_set_inode_state(inode, EXT4_STATE_JDATA);
193062e086beSAneesh Kumar K.V out:
193162e086beSAneesh Kumar K.V 	return ret;
193262e086beSAneesh Kumar K.V }
193362e086beSAneesh Kumar K.V 
193461628a3fSMingming Cao /*
193543ce1d23SAneesh Kumar K.V  * Note that we don't need to start a transaction unless we're journaling data
193643ce1d23SAneesh Kumar K.V  * because we should have holes filled from ext4_page_mkwrite(). We even don't
193743ce1d23SAneesh Kumar K.V  * need to file the inode to the transaction's list in ordered mode because if
193843ce1d23SAneesh Kumar K.V  * we are writing back data added by write(), the inode is already there and if
193943ce1d23SAneesh Kumar K.V  * we are writing back data modified via mmap(), no one guarantees in which
194043ce1d23SAneesh Kumar K.V  * transaction the data will hit the disk. In case we are journaling data, we
194143ce1d23SAneesh Kumar K.V  * cannot start transaction directly because transaction start ranks above page
194243ce1d23SAneesh Kumar K.V  * lock so we have to do some magic.
194343ce1d23SAneesh Kumar K.V  *
1944b920c755STheodore Ts'o  * This function can get called via...
1945b920c755STheodore Ts'o  *   - ext4_da_writepages after taking page lock (have journal handle)
1946b920c755STheodore Ts'o  *   - journal_submit_inode_data_buffers (no journal handle)
1947f6463b0dSArtem Bityutskiy  *   - shrink_page_list via the kswapd/direct reclaim (no journal handle)
1948b920c755STheodore Ts'o  *   - grab_page_cache when doing write_begin (have journal handle)
194943ce1d23SAneesh Kumar K.V  *
195043ce1d23SAneesh Kumar K.V  * We don't do any block allocation in this function. If we have page with
195143ce1d23SAneesh Kumar K.V  * multiple blocks we need to write those buffer_heads that are mapped. This
195243ce1d23SAneesh Kumar K.V  * is important for mmaped based write. So if we do with blocksize 1K
195343ce1d23SAneesh Kumar K.V  * truncate(f, 1024);
195443ce1d23SAneesh Kumar K.V  * a = mmap(f, 0, 4096);
195543ce1d23SAneesh Kumar K.V  * a[0] = 'a';
195643ce1d23SAneesh Kumar K.V  * truncate(f, 4096);
195743ce1d23SAneesh Kumar K.V  * we have in the page first buffer_head mapped via page_mkwrite call back
195890802ed9SPaul Bolle  * but other buffer_heads would be unmapped but dirty (dirty done via the
195943ce1d23SAneesh Kumar K.V  * do_wp_page). So writepage should write the first block. If we modify
196043ce1d23SAneesh Kumar K.V  * the mmap area beyond 1024 we will again get a page_fault and the
196143ce1d23SAneesh Kumar K.V  * page_mkwrite callback will do the block allocation and mark the
196243ce1d23SAneesh Kumar K.V  * buffer_heads mapped.
196343ce1d23SAneesh Kumar K.V  *
196443ce1d23SAneesh Kumar K.V  * We redirty the page if we have any buffer_heads that is either delay or
196543ce1d23SAneesh Kumar K.V  * unwritten in the page.
196643ce1d23SAneesh Kumar K.V  *
196743ce1d23SAneesh Kumar K.V  * We can get recursively called as show below.
196843ce1d23SAneesh Kumar K.V  *
196943ce1d23SAneesh Kumar K.V  *	ext4_writepage() -> kmalloc() -> __alloc_pages() -> page_launder() ->
197043ce1d23SAneesh Kumar K.V  *		ext4_writepage()
197143ce1d23SAneesh Kumar K.V  *
197243ce1d23SAneesh Kumar K.V  * But since we don't do any block allocation we should not deadlock.
197343ce1d23SAneesh Kumar K.V  * Page also have the dirty flag cleared so we don't get recurive page_lock.
197461628a3fSMingming Cao  */
197543ce1d23SAneesh Kumar K.V static int ext4_writepage(struct page *page,
197664769240SAlex Tomas 			  struct writeback_control *wbc)
197764769240SAlex Tomas {
1978a42afc5fSTheodore Ts'o 	int ret = 0, commit_write = 0;
197961628a3fSMingming Cao 	loff_t size;
1980498e5f24STheodore Ts'o 	unsigned int len;
1981744692dcSJiaying Zhang 	struct buffer_head *page_bufs = NULL;
198261628a3fSMingming Cao 	struct inode *inode = page->mapping->host;
198364769240SAlex Tomas 
1984a9c667f8SLukas Czerner 	trace_ext4_writepage(page);
198561628a3fSMingming Cao 	size = i_size_read(inode);
198661628a3fSMingming Cao 	if (page->index == size >> PAGE_CACHE_SHIFT)
198761628a3fSMingming Cao 		len = size & ~PAGE_CACHE_MASK;
198861628a3fSMingming Cao 	else
198961628a3fSMingming Cao 		len = PAGE_CACHE_SIZE;
199061628a3fSMingming Cao 
1991a42afc5fSTheodore Ts'o 	/*
1992a42afc5fSTheodore Ts'o 	 * If the page does not have buffers (for whatever reason),
1993a107e5a3STheodore Ts'o 	 * try to create them using __block_write_begin.  If this
1994a42afc5fSTheodore Ts'o 	 * fails, redirty the page and move on.
1995a42afc5fSTheodore Ts'o 	 */
1996b1142e8fSTheodore Ts'o 	if (!page_has_buffers(page)) {
1997a107e5a3STheodore Ts'o 		if (__block_write_begin(page, 0, len,
1998a42afc5fSTheodore Ts'o 					noalloc_get_block_write)) {
1999a42afc5fSTheodore Ts'o 		redirty_page:
2000a42afc5fSTheodore Ts'o 			redirty_page_for_writepage(wbc, page);
2001a42afc5fSTheodore Ts'o 			unlock_page(page);
2002a42afc5fSTheodore Ts'o 			return 0;
2003a42afc5fSTheodore Ts'o 		}
2004a42afc5fSTheodore Ts'o 		commit_write = 1;
2005a42afc5fSTheodore Ts'o 	}
2006f0e6c985SAneesh Kumar K.V 	page_bufs = page_buffers(page);
2007f0e6c985SAneesh Kumar K.V 	if (walk_page_buffers(NULL, page_bufs, 0, len, NULL,
2008c364b22cSAneesh Kumar K.V 			      ext4_bh_delay_or_unwritten)) {
200961628a3fSMingming Cao 		/*
2010b1142e8fSTheodore Ts'o 		 * We don't want to do block allocation, so redirty
2011b1142e8fSTheodore Ts'o 		 * the page and return.  We may reach here when we do
2012b1142e8fSTheodore Ts'o 		 * a journal commit via journal_submit_inode_data_buffers.
2013966dbde2SMel Gorman 		 * We can also reach here via shrink_page_list but it
2014966dbde2SMel Gorman 		 * should never be for direct reclaim so warn if that
2015966dbde2SMel Gorman 		 * happens
2016f0e6c985SAneesh Kumar K.V 		 */
2017966dbde2SMel Gorman 		WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) ==
2018966dbde2SMel Gorman 								PF_MEMALLOC);
2019a42afc5fSTheodore Ts'o 		goto redirty_page;
2020f0e6c985SAneesh Kumar K.V 	}
2021a42afc5fSTheodore Ts'o 	if (commit_write)
2022ed9b3e33SAneesh Kumar K.V 		/* now mark the buffer_heads as dirty and uptodate */
2023b767e78aSAneesh Kumar K.V 		block_commit_write(page, 0, len);
202464769240SAlex Tomas 
2025cb20d518STheodore Ts'o 	if (PageChecked(page) && ext4_should_journal_data(inode))
202643ce1d23SAneesh Kumar K.V 		/*
202743ce1d23SAneesh Kumar K.V 		 * It's mmapped pagecache.  Add buffers and journal it.  There
202843ce1d23SAneesh Kumar K.V 		 * doesn't seem much point in redirtying the page here.
202943ce1d23SAneesh Kumar K.V 		 */
20303f0ca309SWu Fengguang 		return __ext4_journalled_writepage(page, len);
203143ce1d23SAneesh Kumar K.V 
2032a42afc5fSTheodore Ts'o 	if (buffer_uninit(page_bufs)) {
2033744692dcSJiaying Zhang 		ext4_set_bh_endio(page_bufs, inode);
2034744692dcSJiaying Zhang 		ret = block_write_full_page_endio(page, noalloc_get_block_write,
2035744692dcSJiaying Zhang 					    wbc, ext4_end_io_buffer_write);
2036744692dcSJiaying Zhang 	} else
2037b920c755STheodore Ts'o 		ret = block_write_full_page(page, noalloc_get_block_write,
2038f0e6c985SAneesh Kumar K.V 					    wbc);
203964769240SAlex Tomas 
204064769240SAlex Tomas 	return ret;
204164769240SAlex Tomas }
204264769240SAlex Tomas 
204361628a3fSMingming Cao /*
2044525f4ed8SMingming Cao  * This is called via ext4_da_writepages() to
204525985edcSLucas De Marchi  * calculate the total number of credits to reserve to fit
2046525f4ed8SMingming Cao  * a single extent allocation into a single transaction,
2047525f4ed8SMingming Cao  * ext4_da_writpeages() will loop calling this before
2048525f4ed8SMingming Cao  * the block allocation.
204961628a3fSMingming Cao  */
2050525f4ed8SMingming Cao 
2051525f4ed8SMingming Cao static int ext4_da_writepages_trans_blocks(struct inode *inode)
2052525f4ed8SMingming Cao {
2053525f4ed8SMingming Cao 	int max_blocks = EXT4_I(inode)->i_reserved_data_blocks;
2054525f4ed8SMingming Cao 
2055525f4ed8SMingming Cao 	/*
2056525f4ed8SMingming Cao 	 * With non-extent format the journal credit needed to
2057525f4ed8SMingming Cao 	 * insert nrblocks contiguous block is dependent on
2058525f4ed8SMingming Cao 	 * number of contiguous block. So we will limit
2059525f4ed8SMingming Cao 	 * number of contiguous block to a sane value
2060525f4ed8SMingming Cao 	 */
206112e9b892SDmitry Monakhov 	if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) &&
2062525f4ed8SMingming Cao 	    (max_blocks > EXT4_MAX_TRANS_DATA))
2063525f4ed8SMingming Cao 		max_blocks = EXT4_MAX_TRANS_DATA;
2064525f4ed8SMingming Cao 
2065525f4ed8SMingming Cao 	return ext4_chunk_trans_blocks(inode, max_blocks);
2066525f4ed8SMingming Cao }
206761628a3fSMingming Cao 
20688e48dcfbSTheodore Ts'o /*
20698e48dcfbSTheodore Ts'o  * write_cache_pages_da - walk the list of dirty pages of the given
20708eb9e5ceSTheodore Ts'o  * address space and accumulate pages that need writing, and call
2071168fc022STheodore Ts'o  * mpage_da_map_and_submit to map a single contiguous memory region
2072168fc022STheodore Ts'o  * and then write them.
20738e48dcfbSTheodore Ts'o  */
20748e48dcfbSTheodore Ts'o static int write_cache_pages_da(struct address_space *mapping,
20758e48dcfbSTheodore Ts'o 				struct writeback_control *wbc,
207672f84e65SEric Sandeen 				struct mpage_da_data *mpd,
207772f84e65SEric Sandeen 				pgoff_t *done_index)
20788e48dcfbSTheodore Ts'o {
20798eb9e5ceSTheodore Ts'o 	struct buffer_head	*bh, *head;
2080168fc022STheodore Ts'o 	struct inode		*inode = mapping->host;
20818e48dcfbSTheodore Ts'o 	struct pagevec		pvec;
20824f01b02cSTheodore Ts'o 	unsigned int		nr_pages;
20834f01b02cSTheodore Ts'o 	sector_t		logical;
20844f01b02cSTheodore Ts'o 	pgoff_t			index, end;
20858e48dcfbSTheodore Ts'o 	long			nr_to_write = wbc->nr_to_write;
20864f01b02cSTheodore Ts'o 	int			i, tag, ret = 0;
20878e48dcfbSTheodore Ts'o 
2088168fc022STheodore Ts'o 	memset(mpd, 0, sizeof(struct mpage_da_data));
2089168fc022STheodore Ts'o 	mpd->wbc = wbc;
2090168fc022STheodore Ts'o 	mpd->inode = inode;
20918e48dcfbSTheodore Ts'o 	pagevec_init(&pvec, 0);
20928e48dcfbSTheodore Ts'o 	index = wbc->range_start >> PAGE_CACHE_SHIFT;
20938e48dcfbSTheodore Ts'o 	end = wbc->range_end >> PAGE_CACHE_SHIFT;
20948e48dcfbSTheodore Ts'o 
20956e6938b6SWu Fengguang 	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
20965b41d924SEric Sandeen 		tag = PAGECACHE_TAG_TOWRITE;
20975b41d924SEric Sandeen 	else
20985b41d924SEric Sandeen 		tag = PAGECACHE_TAG_DIRTY;
20995b41d924SEric Sandeen 
210072f84e65SEric Sandeen 	*done_index = index;
21014f01b02cSTheodore Ts'o 	while (index <= end) {
21025b41d924SEric Sandeen 		nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
21038e48dcfbSTheodore Ts'o 			      min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
21048e48dcfbSTheodore Ts'o 		if (nr_pages == 0)
21054f01b02cSTheodore Ts'o 			return 0;
21068e48dcfbSTheodore Ts'o 
21078e48dcfbSTheodore Ts'o 		for (i = 0; i < nr_pages; i++) {
21088e48dcfbSTheodore Ts'o 			struct page *page = pvec.pages[i];
21098e48dcfbSTheodore Ts'o 
21108e48dcfbSTheodore Ts'o 			/*
21118e48dcfbSTheodore Ts'o 			 * At this point, the page may be truncated or
21128e48dcfbSTheodore Ts'o 			 * invalidated (changing page->mapping to NULL), or
21138e48dcfbSTheodore Ts'o 			 * even swizzled back from swapper_space to tmpfs file
21148e48dcfbSTheodore Ts'o 			 * mapping. However, page->index will not change
21158e48dcfbSTheodore Ts'o 			 * because we have a reference on the page.
21168e48dcfbSTheodore Ts'o 			 */
21174f01b02cSTheodore Ts'o 			if (page->index > end)
21184f01b02cSTheodore Ts'o 				goto out;
21198e48dcfbSTheodore Ts'o 
212072f84e65SEric Sandeen 			*done_index = page->index + 1;
212172f84e65SEric Sandeen 
212278aaced3STheodore Ts'o 			/*
212378aaced3STheodore Ts'o 			 * If we can't merge this page, and we have
212478aaced3STheodore Ts'o 			 * accumulated an contiguous region, write it
212578aaced3STheodore Ts'o 			 */
212678aaced3STheodore Ts'o 			if ((mpd->next_page != page->index) &&
212778aaced3STheodore Ts'o 			    (mpd->next_page != mpd->first_page)) {
212878aaced3STheodore Ts'o 				mpage_da_map_and_submit(mpd);
212978aaced3STheodore Ts'o 				goto ret_extent_tail;
213078aaced3STheodore Ts'o 			}
213178aaced3STheodore Ts'o 
21328e48dcfbSTheodore Ts'o 			lock_page(page);
21338e48dcfbSTheodore Ts'o 
21348e48dcfbSTheodore Ts'o 			/*
21354f01b02cSTheodore Ts'o 			 * If the page is no longer dirty, or its
21364f01b02cSTheodore Ts'o 			 * mapping no longer corresponds to inode we
21374f01b02cSTheodore Ts'o 			 * are writing (which means it has been
21384f01b02cSTheodore Ts'o 			 * truncated or invalidated), or the page is
21394f01b02cSTheodore Ts'o 			 * already under writeback and we are not
21404f01b02cSTheodore Ts'o 			 * doing a data integrity writeback, skip the page
21418e48dcfbSTheodore Ts'o 			 */
21424f01b02cSTheodore Ts'o 			if (!PageDirty(page) ||
21434f01b02cSTheodore Ts'o 			    (PageWriteback(page) &&
21444f01b02cSTheodore Ts'o 			     (wbc->sync_mode == WB_SYNC_NONE)) ||
21454f01b02cSTheodore Ts'o 			    unlikely(page->mapping != mapping)) {
21468e48dcfbSTheodore Ts'o 				unlock_page(page);
21478e48dcfbSTheodore Ts'o 				continue;
21488e48dcfbSTheodore Ts'o 			}
21498e48dcfbSTheodore Ts'o 
21508e48dcfbSTheodore Ts'o 			wait_on_page_writeback(page);
21518e48dcfbSTheodore Ts'o 			BUG_ON(PageWriteback(page));
21528e48dcfbSTheodore Ts'o 
2153168fc022STheodore Ts'o 			if (mpd->next_page != page->index)
21548eb9e5ceSTheodore Ts'o 				mpd->first_page = page->index;
21558eb9e5ceSTheodore Ts'o 			mpd->next_page = page->index + 1;
21568eb9e5ceSTheodore Ts'o 			logical = (sector_t) page->index <<
21578eb9e5ceSTheodore Ts'o 				(PAGE_CACHE_SHIFT - inode->i_blkbits);
21588eb9e5ceSTheodore Ts'o 
21598eb9e5ceSTheodore Ts'o 			if (!page_has_buffers(page)) {
21604f01b02cSTheodore Ts'o 				mpage_add_bh_to_extent(mpd, logical,
21614f01b02cSTheodore Ts'o 						       PAGE_CACHE_SIZE,
21628eb9e5ceSTheodore Ts'o 						       (1 << BH_Dirty) | (1 << BH_Uptodate));
21634f01b02cSTheodore Ts'o 				if (mpd->io_done)
21644f01b02cSTheodore Ts'o 					goto ret_extent_tail;
21658e48dcfbSTheodore Ts'o 			} else {
21668eb9e5ceSTheodore Ts'o 				/*
21674f01b02cSTheodore Ts'o 				 * Page with regular buffer heads,
21684f01b02cSTheodore Ts'o 				 * just add all dirty ones
21698eb9e5ceSTheodore Ts'o 				 */
21708eb9e5ceSTheodore Ts'o 				head = page_buffers(page);
21718eb9e5ceSTheodore Ts'o 				bh = head;
21728eb9e5ceSTheodore Ts'o 				do {
21738eb9e5ceSTheodore Ts'o 					BUG_ON(buffer_locked(bh));
21748eb9e5ceSTheodore Ts'o 					/*
21758eb9e5ceSTheodore Ts'o 					 * We need to try to allocate
21768eb9e5ceSTheodore Ts'o 					 * unmapped blocks in the same page.
21778eb9e5ceSTheodore Ts'o 					 * Otherwise we won't make progress
21788eb9e5ceSTheodore Ts'o 					 * with the page in ext4_writepage
21798eb9e5ceSTheodore Ts'o 					 */
21808eb9e5ceSTheodore Ts'o 					if (ext4_bh_delay_or_unwritten(NULL, bh)) {
21818eb9e5ceSTheodore Ts'o 						mpage_add_bh_to_extent(mpd, logical,
21828eb9e5ceSTheodore Ts'o 								       bh->b_size,
21838eb9e5ceSTheodore Ts'o 								       bh->b_state);
21844f01b02cSTheodore Ts'o 						if (mpd->io_done)
21854f01b02cSTheodore Ts'o 							goto ret_extent_tail;
21868eb9e5ceSTheodore Ts'o 					} else if (buffer_dirty(bh) && (buffer_mapped(bh))) {
21878eb9e5ceSTheodore Ts'o 						/*
21884f01b02cSTheodore Ts'o 						 * mapped dirty buffer. We need
21894f01b02cSTheodore Ts'o 						 * to update the b_state
21904f01b02cSTheodore Ts'o 						 * because we look at b_state
21914f01b02cSTheodore Ts'o 						 * in mpage_da_map_blocks.  We
21924f01b02cSTheodore Ts'o 						 * don't update b_size because
21934f01b02cSTheodore Ts'o 						 * if we find an unmapped
21944f01b02cSTheodore Ts'o 						 * buffer_head later we need to
21954f01b02cSTheodore Ts'o 						 * use the b_state flag of that
21964f01b02cSTheodore Ts'o 						 * buffer_head.
21978eb9e5ceSTheodore Ts'o 						 */
21988eb9e5ceSTheodore Ts'o 						if (mpd->b_size == 0)
21998eb9e5ceSTheodore Ts'o 							mpd->b_state = bh->b_state & BH_FLAGS;
22008e48dcfbSTheodore Ts'o 					}
22018eb9e5ceSTheodore Ts'o 					logical++;
22028eb9e5ceSTheodore Ts'o 				} while ((bh = bh->b_this_page) != head);
22038e48dcfbSTheodore Ts'o 			}
22048e48dcfbSTheodore Ts'o 
22058e48dcfbSTheodore Ts'o 			if (nr_to_write > 0) {
22068e48dcfbSTheodore Ts'o 				nr_to_write--;
22078e48dcfbSTheodore Ts'o 				if (nr_to_write == 0 &&
22084f01b02cSTheodore Ts'o 				    wbc->sync_mode == WB_SYNC_NONE)
22098e48dcfbSTheodore Ts'o 					/*
22108e48dcfbSTheodore Ts'o 					 * We stop writing back only if we are
22118e48dcfbSTheodore Ts'o 					 * not doing integrity sync. In case of
22128e48dcfbSTheodore Ts'o 					 * integrity sync we have to keep going
22138e48dcfbSTheodore Ts'o 					 * because someone may be concurrently
22148e48dcfbSTheodore Ts'o 					 * dirtying pages, and we might have
22158e48dcfbSTheodore Ts'o 					 * synced a lot of newly appeared dirty
22168e48dcfbSTheodore Ts'o 					 * pages, but have not synced all of the
22178e48dcfbSTheodore Ts'o 					 * old dirty pages.
22188e48dcfbSTheodore Ts'o 					 */
22194f01b02cSTheodore Ts'o 					goto out;
22208e48dcfbSTheodore Ts'o 			}
22218e48dcfbSTheodore Ts'o 		}
22228e48dcfbSTheodore Ts'o 		pagevec_release(&pvec);
22238e48dcfbSTheodore Ts'o 		cond_resched();
22248e48dcfbSTheodore Ts'o 	}
22254f01b02cSTheodore Ts'o 	return 0;
22264f01b02cSTheodore Ts'o ret_extent_tail:
22274f01b02cSTheodore Ts'o 	ret = MPAGE_DA_EXTENT_TAIL;
22288eb9e5ceSTheodore Ts'o out:
22298eb9e5ceSTheodore Ts'o 	pagevec_release(&pvec);
22308eb9e5ceSTheodore Ts'o 	cond_resched();
22318e48dcfbSTheodore Ts'o 	return ret;
22328e48dcfbSTheodore Ts'o }
22338e48dcfbSTheodore Ts'o 
22348e48dcfbSTheodore Ts'o 
223564769240SAlex Tomas static int ext4_da_writepages(struct address_space *mapping,
223664769240SAlex Tomas 			      struct writeback_control *wbc)
223764769240SAlex Tomas {
223822208dedSAneesh Kumar K.V 	pgoff_t	index;
223922208dedSAneesh Kumar K.V 	int range_whole = 0;
224061628a3fSMingming Cao 	handle_t *handle = NULL;
2241df22291fSAneesh Kumar K.V 	struct mpage_da_data mpd;
22425e745b04SAneesh Kumar K.V 	struct inode *inode = mapping->host;
2243498e5f24STheodore Ts'o 	int pages_written = 0;
224455138e0bSTheodore Ts'o 	unsigned int max_pages;
22452acf2c26SAneesh Kumar K.V 	int range_cyclic, cycled = 1, io_done = 0;
224655138e0bSTheodore Ts'o 	int needed_blocks, ret = 0;
224755138e0bSTheodore Ts'o 	long desired_nr_to_write, nr_to_writebump = 0;
2248de89de6eSTheodore Ts'o 	loff_t range_start = wbc->range_start;
22495e745b04SAneesh Kumar K.V 	struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb);
225072f84e65SEric Sandeen 	pgoff_t done_index = 0;
22515b41d924SEric Sandeen 	pgoff_t end;
22521bce63d1SShaohua Li 	struct blk_plug plug;
225361628a3fSMingming Cao 
22549bffad1eSTheodore Ts'o 	trace_ext4_da_writepages(inode, wbc);
2255ba80b101STheodore Ts'o 
225661628a3fSMingming Cao 	/*
225761628a3fSMingming Cao 	 * No pages to write? This is mainly a kludge to avoid starting
225861628a3fSMingming Cao 	 * a transaction for special inodes like journal inode on last iput()
225961628a3fSMingming Cao 	 * because that could violate lock ordering on umount
226061628a3fSMingming Cao 	 */
2261a1d6cc56SAneesh Kumar K.V 	if (!mapping->nrpages || !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
226261628a3fSMingming Cao 		return 0;
22632a21e37eSTheodore Ts'o 
22642a21e37eSTheodore Ts'o 	/*
22652a21e37eSTheodore Ts'o 	 * If the filesystem has aborted, it is read-only, so return
22662a21e37eSTheodore Ts'o 	 * right away instead of dumping stack traces later on that
22672a21e37eSTheodore Ts'o 	 * will obscure the real source of the problem.  We test
22684ab2f15bSTheodore Ts'o 	 * EXT4_MF_FS_ABORTED instead of sb->s_flag's MS_RDONLY because
22692a21e37eSTheodore Ts'o 	 * the latter could be true if the filesystem is mounted
22702a21e37eSTheodore Ts'o 	 * read-only, and in that case, ext4_da_writepages should
22712a21e37eSTheodore Ts'o 	 * *never* be called, so if that ever happens, we would want
22722a21e37eSTheodore Ts'o 	 * the stack trace.
22732a21e37eSTheodore Ts'o 	 */
22744ab2f15bSTheodore Ts'o 	if (unlikely(sbi->s_mount_flags & EXT4_MF_FS_ABORTED))
22752a21e37eSTheodore Ts'o 		return -EROFS;
22762a21e37eSTheodore Ts'o 
227722208dedSAneesh Kumar K.V 	if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
227822208dedSAneesh Kumar K.V 		range_whole = 1;
227961628a3fSMingming Cao 
22802acf2c26SAneesh Kumar K.V 	range_cyclic = wbc->range_cyclic;
22812acf2c26SAneesh Kumar K.V 	if (wbc->range_cyclic) {
228222208dedSAneesh Kumar K.V 		index = mapping->writeback_index;
22832acf2c26SAneesh Kumar K.V 		if (index)
22842acf2c26SAneesh Kumar K.V 			cycled = 0;
22852acf2c26SAneesh Kumar K.V 		wbc->range_start = index << PAGE_CACHE_SHIFT;
22862acf2c26SAneesh Kumar K.V 		wbc->range_end  = LLONG_MAX;
22872acf2c26SAneesh Kumar K.V 		wbc->range_cyclic = 0;
22885b41d924SEric Sandeen 		end = -1;
22895b41d924SEric Sandeen 	} else {
229022208dedSAneesh Kumar K.V 		index = wbc->range_start >> PAGE_CACHE_SHIFT;
22915b41d924SEric Sandeen 		end = wbc->range_end >> PAGE_CACHE_SHIFT;
22925b41d924SEric Sandeen 	}
2293a1d6cc56SAneesh Kumar K.V 
229455138e0bSTheodore Ts'o 	/*
229555138e0bSTheodore Ts'o 	 * This works around two forms of stupidity.  The first is in
229655138e0bSTheodore Ts'o 	 * the writeback code, which caps the maximum number of pages
229755138e0bSTheodore Ts'o 	 * written to be 1024 pages.  This is wrong on multiple
229855138e0bSTheodore Ts'o 	 * levels; different architectues have a different page size,
229955138e0bSTheodore Ts'o 	 * which changes the maximum amount of data which gets
230055138e0bSTheodore Ts'o 	 * written.  Secondly, 4 megabytes is way too small.  XFS
230155138e0bSTheodore Ts'o 	 * forces this value to be 16 megabytes by multiplying
230255138e0bSTheodore Ts'o 	 * nr_to_write parameter by four, and then relies on its
230355138e0bSTheodore Ts'o 	 * allocator to allocate larger extents to make them
230455138e0bSTheodore Ts'o 	 * contiguous.  Unfortunately this brings us to the second
230555138e0bSTheodore Ts'o 	 * stupidity, which is that ext4's mballoc code only allocates
230655138e0bSTheodore Ts'o 	 * at most 2048 blocks.  So we force contiguous writes up to
230755138e0bSTheodore Ts'o 	 * the number of dirty blocks in the inode, or
230855138e0bSTheodore Ts'o 	 * sbi->max_writeback_mb_bump whichever is smaller.
230955138e0bSTheodore Ts'o 	 */
231055138e0bSTheodore Ts'o 	max_pages = sbi->s_max_writeback_mb_bump << (20 - PAGE_CACHE_SHIFT);
2311b443e733SEric Sandeen 	if (!range_cyclic && range_whole) {
2312b443e733SEric Sandeen 		if (wbc->nr_to_write == LONG_MAX)
2313b443e733SEric Sandeen 			desired_nr_to_write = wbc->nr_to_write;
231455138e0bSTheodore Ts'o 		else
2315b443e733SEric Sandeen 			desired_nr_to_write = wbc->nr_to_write * 8;
2316b443e733SEric Sandeen 	} else
231755138e0bSTheodore Ts'o 		desired_nr_to_write = ext4_num_dirty_pages(inode, index,
231855138e0bSTheodore Ts'o 							   max_pages);
231955138e0bSTheodore Ts'o 	if (desired_nr_to_write > max_pages)
232055138e0bSTheodore Ts'o 		desired_nr_to_write = max_pages;
232155138e0bSTheodore Ts'o 
232255138e0bSTheodore Ts'o 	if (wbc->nr_to_write < desired_nr_to_write) {
232355138e0bSTheodore Ts'o 		nr_to_writebump = desired_nr_to_write - wbc->nr_to_write;
232455138e0bSTheodore Ts'o 		wbc->nr_to_write = desired_nr_to_write;
232555138e0bSTheodore Ts'o 	}
232655138e0bSTheodore Ts'o 
23272acf2c26SAneesh Kumar K.V retry:
23286e6938b6SWu Fengguang 	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
23295b41d924SEric Sandeen 		tag_pages_for_writeback(mapping, index, end);
23305b41d924SEric Sandeen 
23311bce63d1SShaohua Li 	blk_start_plug(&plug);
233222208dedSAneesh Kumar K.V 	while (!ret && wbc->nr_to_write > 0) {
2333a1d6cc56SAneesh Kumar K.V 
2334a1d6cc56SAneesh Kumar K.V 		/*
2335a1d6cc56SAneesh Kumar K.V 		 * we  insert one extent at a time. So we need
2336a1d6cc56SAneesh Kumar K.V 		 * credit needed for single extent allocation.
2337a1d6cc56SAneesh Kumar K.V 		 * journalled mode is currently not supported
2338a1d6cc56SAneesh Kumar K.V 		 * by delalloc
2339a1d6cc56SAneesh Kumar K.V 		 */
2340a1d6cc56SAneesh Kumar K.V 		BUG_ON(ext4_should_journal_data(inode));
2341525f4ed8SMingming Cao 		needed_blocks = ext4_da_writepages_trans_blocks(inode);
2342a1d6cc56SAneesh Kumar K.V 
234361628a3fSMingming Cao 		/* start a new transaction*/
234461628a3fSMingming Cao 		handle = ext4_journal_start(inode, needed_blocks);
234561628a3fSMingming Cao 		if (IS_ERR(handle)) {
234661628a3fSMingming Cao 			ret = PTR_ERR(handle);
23471693918eSTheodore Ts'o 			ext4_msg(inode->i_sb, KERN_CRIT, "%s: jbd2_start: "
2348fbe845ddSCurt Wohlgemuth 			       "%ld pages, ino %lu; err %d", __func__,
2349a1d6cc56SAneesh Kumar K.V 				wbc->nr_to_write, inode->i_ino, ret);
23503c1fcb2cSNamjae Jeon 			blk_finish_plug(&plug);
235161628a3fSMingming Cao 			goto out_writepages;
235261628a3fSMingming Cao 		}
2353f63e6005STheodore Ts'o 
2354f63e6005STheodore Ts'o 		/*
23558eb9e5ceSTheodore Ts'o 		 * Now call write_cache_pages_da() to find the next
2356f63e6005STheodore Ts'o 		 * contiguous region of logical blocks that need
23578eb9e5ceSTheodore Ts'o 		 * blocks to be allocated by ext4 and submit them.
2358f63e6005STheodore Ts'o 		 */
235972f84e65SEric Sandeen 		ret = write_cache_pages_da(mapping, wbc, &mpd, &done_index);
2360f63e6005STheodore Ts'o 		/*
2361af901ca1SAndré Goddard Rosa 		 * If we have a contiguous extent of pages and we
2362f63e6005STheodore Ts'o 		 * haven't done the I/O yet, map the blocks and submit
2363f63e6005STheodore Ts'o 		 * them for I/O.
2364f63e6005STheodore Ts'o 		 */
2365f63e6005STheodore Ts'o 		if (!mpd.io_done && mpd.next_page != mpd.first_page) {
23665a87b7a5STheodore Ts'o 			mpage_da_map_and_submit(&mpd);
2367f63e6005STheodore Ts'o 			ret = MPAGE_DA_EXTENT_TAIL;
2368f63e6005STheodore Ts'o 		}
2369b3a3ca8cSTheodore Ts'o 		trace_ext4_da_write_pages(inode, &mpd);
2370f63e6005STheodore Ts'o 		wbc->nr_to_write -= mpd.pages_written;
2371df22291fSAneesh Kumar K.V 
237261628a3fSMingming Cao 		ext4_journal_stop(handle);
2373df22291fSAneesh Kumar K.V 
23748f64b32eSEric Sandeen 		if ((mpd.retval == -ENOSPC) && sbi->s_journal) {
237522208dedSAneesh Kumar K.V 			/* commit the transaction which would
237622208dedSAneesh Kumar K.V 			 * free blocks released in the transaction
237722208dedSAneesh Kumar K.V 			 * and try again
237822208dedSAneesh Kumar K.V 			 */
2379df22291fSAneesh Kumar K.V 			jbd2_journal_force_commit_nested(sbi->s_journal);
238022208dedSAneesh Kumar K.V 			ret = 0;
238122208dedSAneesh Kumar K.V 		} else if (ret == MPAGE_DA_EXTENT_TAIL) {
2382a1d6cc56SAneesh Kumar K.V 			/*
23838de49e67SKazuya Mio 			 * Got one extent now try with rest of the pages.
23848de49e67SKazuya Mio 			 * If mpd.retval is set -EIO, journal is aborted.
23858de49e67SKazuya Mio 			 * So we don't need to write any more.
2386a1d6cc56SAneesh Kumar K.V 			 */
238722208dedSAneesh Kumar K.V 			pages_written += mpd.pages_written;
23888de49e67SKazuya Mio 			ret = mpd.retval;
23892acf2c26SAneesh Kumar K.V 			io_done = 1;
239022208dedSAneesh Kumar K.V 		} else if (wbc->nr_to_write)
239161628a3fSMingming Cao 			/*
239261628a3fSMingming Cao 			 * There is no more writeout needed
239361628a3fSMingming Cao 			 * or we requested for a noblocking writeout
239461628a3fSMingming Cao 			 * and we found the device congested
239561628a3fSMingming Cao 			 */
239661628a3fSMingming Cao 			break;
239761628a3fSMingming Cao 	}
23981bce63d1SShaohua Li 	blk_finish_plug(&plug);
23992acf2c26SAneesh Kumar K.V 	if (!io_done && !cycled) {
24002acf2c26SAneesh Kumar K.V 		cycled = 1;
24012acf2c26SAneesh Kumar K.V 		index = 0;
24022acf2c26SAneesh Kumar K.V 		wbc->range_start = index << PAGE_CACHE_SHIFT;
24032acf2c26SAneesh Kumar K.V 		wbc->range_end  = mapping->writeback_index - 1;
24042acf2c26SAneesh Kumar K.V 		goto retry;
24052acf2c26SAneesh Kumar K.V 	}
240661628a3fSMingming Cao 
240722208dedSAneesh Kumar K.V 	/* Update index */
24082acf2c26SAneesh Kumar K.V 	wbc->range_cyclic = range_cyclic;
240922208dedSAneesh Kumar K.V 	if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
241022208dedSAneesh Kumar K.V 		/*
241122208dedSAneesh Kumar K.V 		 * set the writeback_index so that range_cyclic
241222208dedSAneesh Kumar K.V 		 * mode will write it back later
241322208dedSAneesh Kumar K.V 		 */
241472f84e65SEric Sandeen 		mapping->writeback_index = done_index;
2415a1d6cc56SAneesh Kumar K.V 
241661628a3fSMingming Cao out_writepages:
241722208dedSAneesh Kumar K.V 	wbc->nr_to_write -= nr_to_writebump;
2418de89de6eSTheodore Ts'o 	wbc->range_start = range_start;
24199bffad1eSTheodore Ts'o 	trace_ext4_da_writepages_result(inode, wbc, ret, pages_written);
242061628a3fSMingming Cao 	return ret;
242164769240SAlex Tomas }
242264769240SAlex Tomas 
242379f0be8dSAneesh Kumar K.V #define FALL_BACK_TO_NONDELALLOC 1
242479f0be8dSAneesh Kumar K.V static int ext4_nonda_switch(struct super_block *sb)
242579f0be8dSAneesh Kumar K.V {
242679f0be8dSAneesh Kumar K.V 	s64 free_blocks, dirty_blocks;
242779f0be8dSAneesh Kumar K.V 	struct ext4_sb_info *sbi = EXT4_SB(sb);
242879f0be8dSAneesh Kumar K.V 
242979f0be8dSAneesh Kumar K.V 	/*
243079f0be8dSAneesh Kumar K.V 	 * switch to non delalloc mode if we are running low
243179f0be8dSAneesh Kumar K.V 	 * on free block. The free block accounting via percpu
2432179f7ebfSEric Dumazet 	 * counters can get slightly wrong with percpu_counter_batch getting
243379f0be8dSAneesh Kumar K.V 	 * accumulated on each CPU without updating global counters
243479f0be8dSAneesh Kumar K.V 	 * Delalloc need an accurate free block accounting. So switch
243579f0be8dSAneesh Kumar K.V 	 * to non delalloc when we are near to error range.
243679f0be8dSAneesh Kumar K.V 	 */
243757042651STheodore Ts'o 	free_blocks  = EXT4_C2B(sbi,
243857042651STheodore Ts'o 		percpu_counter_read_positive(&sbi->s_freeclusters_counter));
243957042651STheodore Ts'o 	dirty_blocks = percpu_counter_read_positive(&sbi->s_dirtyclusters_counter);
244000d4e736STheodore Ts'o 	/*
244100d4e736STheodore Ts'o 	 * Start pushing delalloc when 1/2 of free blocks are dirty.
244200d4e736STheodore Ts'o 	 */
244300d4e736STheodore Ts'o 	if (dirty_blocks && (free_blocks < 2 * dirty_blocks) &&
244400d4e736STheodore Ts'o 	    !writeback_in_progress(sb->s_bdi) &&
244500d4e736STheodore Ts'o 	    down_read_trylock(&sb->s_umount)) {
244600d4e736STheodore Ts'o 		writeback_inodes_sb(sb, WB_REASON_FS_FREE_SPACE);
244700d4e736STheodore Ts'o 		up_read(&sb->s_umount);
244800d4e736STheodore Ts'o 	}
244900d4e736STheodore Ts'o 
245079f0be8dSAneesh Kumar K.V 	if (2 * free_blocks < 3 * dirty_blocks ||
2451df55c99dSTheodore Ts'o 		free_blocks < (dirty_blocks + EXT4_FREECLUSTERS_WATERMARK)) {
245279f0be8dSAneesh Kumar K.V 		/*
2453c8afb446SEric Sandeen 		 * free block count is less than 150% of dirty blocks
2454c8afb446SEric Sandeen 		 * or free blocks is less than watermark
245579f0be8dSAneesh Kumar K.V 		 */
245679f0be8dSAneesh Kumar K.V 		return 1;
245779f0be8dSAneesh Kumar K.V 	}
245879f0be8dSAneesh Kumar K.V 	return 0;
245979f0be8dSAneesh Kumar K.V }
246079f0be8dSAneesh Kumar K.V 
246164769240SAlex Tomas static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
246264769240SAlex Tomas 			       loff_t pos, unsigned len, unsigned flags,
246364769240SAlex Tomas 			       struct page **pagep, void **fsdata)
246464769240SAlex Tomas {
246572b8ab9dSEric Sandeen 	int ret, retries = 0;
246664769240SAlex Tomas 	struct page *page;
246764769240SAlex Tomas 	pgoff_t index;
246864769240SAlex Tomas 	struct inode *inode = mapping->host;
246964769240SAlex Tomas 	handle_t *handle;
247064769240SAlex Tomas 
247164769240SAlex Tomas 	index = pos >> PAGE_CACHE_SHIFT;
247279f0be8dSAneesh Kumar K.V 
247379f0be8dSAneesh Kumar K.V 	if (ext4_nonda_switch(inode->i_sb)) {
247479f0be8dSAneesh Kumar K.V 		*fsdata = (void *)FALL_BACK_TO_NONDELALLOC;
247579f0be8dSAneesh Kumar K.V 		return ext4_write_begin(file, mapping, pos,
247679f0be8dSAneesh Kumar K.V 					len, flags, pagep, fsdata);
247779f0be8dSAneesh Kumar K.V 	}
247879f0be8dSAneesh Kumar K.V 	*fsdata = (void *)0;
24799bffad1eSTheodore Ts'o 	trace_ext4_da_write_begin(inode, pos, len, flags);
2480d2a17637SMingming Cao retry:
248164769240SAlex Tomas 	/*
248264769240SAlex Tomas 	 * With delayed allocation, we don't log the i_disksize update
248364769240SAlex Tomas 	 * if there is delayed block allocation. But we still need
248464769240SAlex Tomas 	 * to journalling the i_disksize update if writes to the end
248564769240SAlex Tomas 	 * of file which has an already mapped buffer.
248664769240SAlex Tomas 	 */
248764769240SAlex Tomas 	handle = ext4_journal_start(inode, 1);
248864769240SAlex Tomas 	if (IS_ERR(handle)) {
248964769240SAlex Tomas 		ret = PTR_ERR(handle);
249064769240SAlex Tomas 		goto out;
249164769240SAlex Tomas 	}
2492ebd3610bSJan Kara 	/* We cannot recurse into the filesystem as the transaction is already
2493ebd3610bSJan Kara 	 * started */
2494ebd3610bSJan Kara 	flags |= AOP_FLAG_NOFS;
249564769240SAlex Tomas 
249654566b2cSNick Piggin 	page = grab_cache_page_write_begin(mapping, index, flags);
2497d5a0d4f7SEric Sandeen 	if (!page) {
2498d5a0d4f7SEric Sandeen 		ext4_journal_stop(handle);
2499d5a0d4f7SEric Sandeen 		ret = -ENOMEM;
2500d5a0d4f7SEric Sandeen 		goto out;
2501d5a0d4f7SEric Sandeen 	}
250264769240SAlex Tomas 	*pagep = page;
250364769240SAlex Tomas 
25046e1db88dSChristoph Hellwig 	ret = __block_write_begin(page, pos, len, ext4_da_get_block_prep);
250564769240SAlex Tomas 	if (ret < 0) {
250664769240SAlex Tomas 		unlock_page(page);
250764769240SAlex Tomas 		ext4_journal_stop(handle);
250864769240SAlex Tomas 		page_cache_release(page);
2509ae4d5372SAneesh Kumar K.V 		/*
2510ae4d5372SAneesh Kumar K.V 		 * block_write_begin may have instantiated a few blocks
2511ae4d5372SAneesh Kumar K.V 		 * outside i_size.  Trim these off again. Don't need
2512ae4d5372SAneesh Kumar K.V 		 * i_size_read because we hold i_mutex.
2513ae4d5372SAneesh Kumar K.V 		 */
2514ae4d5372SAneesh Kumar K.V 		if (pos + len > inode->i_size)
2515b9a4207dSJan Kara 			ext4_truncate_failed_write(inode);
251664769240SAlex Tomas 	}
251764769240SAlex Tomas 
2518d2a17637SMingming Cao 	if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
2519d2a17637SMingming Cao 		goto retry;
252064769240SAlex Tomas out:
252164769240SAlex Tomas 	return ret;
252264769240SAlex Tomas }
252364769240SAlex Tomas 
2524632eaeabSMingming Cao /*
2525632eaeabSMingming Cao  * Check if we should update i_disksize
2526632eaeabSMingming Cao  * when write to the end of file but not require block allocation
2527632eaeabSMingming Cao  */
2528632eaeabSMingming Cao static int ext4_da_should_update_i_disksize(struct page *page,
2529632eaeabSMingming Cao 					    unsigned long offset)
2530632eaeabSMingming Cao {
2531632eaeabSMingming Cao 	struct buffer_head *bh;
2532632eaeabSMingming Cao 	struct inode *inode = page->mapping->host;
2533632eaeabSMingming Cao 	unsigned int idx;
2534632eaeabSMingming Cao 	int i;
2535632eaeabSMingming Cao 
2536632eaeabSMingming Cao 	bh = page_buffers(page);
2537632eaeabSMingming Cao 	idx = offset >> inode->i_blkbits;
2538632eaeabSMingming Cao 
2539632eaeabSMingming Cao 	for (i = 0; i < idx; i++)
2540632eaeabSMingming Cao 		bh = bh->b_this_page;
2541632eaeabSMingming Cao 
254229fa89d0SAneesh Kumar K.V 	if (!buffer_mapped(bh) || (buffer_delay(bh)) || buffer_unwritten(bh))
2543632eaeabSMingming Cao 		return 0;
2544632eaeabSMingming Cao 	return 1;
2545632eaeabSMingming Cao }
2546632eaeabSMingming Cao 
254764769240SAlex Tomas static int ext4_da_write_end(struct file *file,
254864769240SAlex Tomas 			     struct address_space *mapping,
254964769240SAlex Tomas 			     loff_t pos, unsigned len, unsigned copied,
255064769240SAlex Tomas 			     struct page *page, void *fsdata)
255164769240SAlex Tomas {
255264769240SAlex Tomas 	struct inode *inode = mapping->host;
255364769240SAlex Tomas 	int ret = 0, ret2;
255464769240SAlex Tomas 	handle_t *handle = ext4_journal_current_handle();
255564769240SAlex Tomas 	loff_t new_i_size;
2556632eaeabSMingming Cao 	unsigned long start, end;
255779f0be8dSAneesh Kumar K.V 	int write_mode = (int)(unsigned long)fsdata;
255879f0be8dSAneesh Kumar K.V 
255979f0be8dSAneesh Kumar K.V 	if (write_mode == FALL_BACK_TO_NONDELALLOC) {
25603d2b1582SLukas Czerner 		switch (ext4_inode_journal_mode(inode)) {
25613d2b1582SLukas Czerner 		case EXT4_INODE_ORDERED_DATA_MODE:
256279f0be8dSAneesh Kumar K.V 			return ext4_ordered_write_end(file, mapping, pos,
256379f0be8dSAneesh Kumar K.V 					len, copied, page, fsdata);
25643d2b1582SLukas Czerner 		case EXT4_INODE_WRITEBACK_DATA_MODE:
256579f0be8dSAneesh Kumar K.V 			return ext4_writeback_write_end(file, mapping, pos,
256679f0be8dSAneesh Kumar K.V 					len, copied, page, fsdata);
25673d2b1582SLukas Czerner 		default:
256879f0be8dSAneesh Kumar K.V 			BUG();
256979f0be8dSAneesh Kumar K.V 		}
257079f0be8dSAneesh Kumar K.V 	}
2571632eaeabSMingming Cao 
25729bffad1eSTheodore Ts'o 	trace_ext4_da_write_end(inode, pos, len, copied);
2573632eaeabSMingming Cao 	start = pos & (PAGE_CACHE_SIZE - 1);
2574632eaeabSMingming Cao 	end = start + copied - 1;
257564769240SAlex Tomas 
257664769240SAlex Tomas 	/*
257764769240SAlex Tomas 	 * generic_write_end() will run mark_inode_dirty() if i_size
257864769240SAlex Tomas 	 * changes.  So let's piggyback the i_disksize mark_inode_dirty
257964769240SAlex Tomas 	 * into that.
258064769240SAlex Tomas 	 */
258164769240SAlex Tomas 
258264769240SAlex Tomas 	new_i_size = pos + copied;
2583ea51d132SAndrea Arcangeli 	if (copied && new_i_size > EXT4_I(inode)->i_disksize) {
2584632eaeabSMingming Cao 		if (ext4_da_should_update_i_disksize(page, end)) {
2585632eaeabSMingming Cao 			down_write(&EXT4_I(inode)->i_data_sem);
2586*f3b59291STheodore Ts'o 			if (new_i_size > EXT4_I(inode)->i_disksize)
258764769240SAlex Tomas 				EXT4_I(inode)->i_disksize = new_i_size;
2588632eaeabSMingming Cao 			up_write(&EXT4_I(inode)->i_data_sem);
2589cf17fea6SAneesh Kumar K.V 			/* We need to mark inode dirty even if
2590cf17fea6SAneesh Kumar K.V 			 * new_i_size is less that inode->i_size
2591cf17fea6SAneesh Kumar K.V 			 * bu greater than i_disksize.(hint delalloc)
2592cf17fea6SAneesh Kumar K.V 			 */
2593cf17fea6SAneesh Kumar K.V 			ext4_mark_inode_dirty(handle, inode);
2594632eaeabSMingming Cao 		}
2595632eaeabSMingming Cao 	}
259664769240SAlex Tomas 	ret2 = generic_write_end(file, mapping, pos, len, copied,
259764769240SAlex Tomas 							page, fsdata);
259864769240SAlex Tomas 	copied = ret2;
259964769240SAlex Tomas 	if (ret2 < 0)
260064769240SAlex Tomas 		ret = ret2;
260164769240SAlex Tomas 	ret2 = ext4_journal_stop(handle);
260264769240SAlex Tomas 	if (!ret)
260364769240SAlex Tomas 		ret = ret2;
260464769240SAlex Tomas 
260564769240SAlex Tomas 	return ret ? ret : copied;
260664769240SAlex Tomas }
260764769240SAlex Tomas 
260864769240SAlex Tomas static void ext4_da_invalidatepage(struct page *page, unsigned long offset)
260964769240SAlex Tomas {
261064769240SAlex Tomas 	/*
261164769240SAlex Tomas 	 * Drop reserved blocks
261264769240SAlex Tomas 	 */
261364769240SAlex Tomas 	BUG_ON(!PageLocked(page));
261464769240SAlex Tomas 	if (!page_has_buffers(page))
261564769240SAlex Tomas 		goto out;
261664769240SAlex Tomas 
2617d2a17637SMingming Cao 	ext4_da_page_release_reservation(page, offset);
261864769240SAlex Tomas 
261964769240SAlex Tomas out:
262064769240SAlex Tomas 	ext4_invalidatepage(page, offset);
262164769240SAlex Tomas 
262264769240SAlex Tomas 	return;
262364769240SAlex Tomas }
262464769240SAlex Tomas 
2625ccd2506bSTheodore Ts'o /*
2626ccd2506bSTheodore Ts'o  * Force all delayed allocation blocks to be allocated for a given inode.
2627ccd2506bSTheodore Ts'o  */
2628ccd2506bSTheodore Ts'o int ext4_alloc_da_blocks(struct inode *inode)
2629ccd2506bSTheodore Ts'o {
2630fb40ba0dSTheodore Ts'o 	trace_ext4_alloc_da_blocks(inode);
2631fb40ba0dSTheodore Ts'o 
2632ccd2506bSTheodore Ts'o 	if (!EXT4_I(inode)->i_reserved_data_blocks &&
2633ccd2506bSTheodore Ts'o 	    !EXT4_I(inode)->i_reserved_meta_blocks)
2634ccd2506bSTheodore Ts'o 		return 0;
2635ccd2506bSTheodore Ts'o 
2636ccd2506bSTheodore Ts'o 	/*
2637ccd2506bSTheodore Ts'o 	 * We do something simple for now.  The filemap_flush() will
2638ccd2506bSTheodore Ts'o 	 * also start triggering a write of the data blocks, which is
2639ccd2506bSTheodore Ts'o 	 * not strictly speaking necessary (and for users of
2640ccd2506bSTheodore Ts'o 	 * laptop_mode, not even desirable).  However, to do otherwise
2641ccd2506bSTheodore Ts'o 	 * would require replicating code paths in:
2642ccd2506bSTheodore Ts'o 	 *
2643ccd2506bSTheodore Ts'o 	 * ext4_da_writepages() ->
2644ccd2506bSTheodore Ts'o 	 *    write_cache_pages() ---> (via passed in callback function)
2645ccd2506bSTheodore Ts'o 	 *        __mpage_da_writepage() -->
2646ccd2506bSTheodore Ts'o 	 *           mpage_add_bh_to_extent()
2647ccd2506bSTheodore Ts'o 	 *           mpage_da_map_blocks()
2648ccd2506bSTheodore Ts'o 	 *
2649ccd2506bSTheodore Ts'o 	 * The problem is that write_cache_pages(), located in
2650ccd2506bSTheodore Ts'o 	 * mm/page-writeback.c, marks pages clean in preparation for
2651ccd2506bSTheodore Ts'o 	 * doing I/O, which is not desirable if we're not planning on
2652ccd2506bSTheodore Ts'o 	 * doing I/O at all.
2653ccd2506bSTheodore Ts'o 	 *
2654ccd2506bSTheodore Ts'o 	 * We could call write_cache_pages(), and then redirty all of
2655380cf090SWu Fengguang 	 * the pages by calling redirty_page_for_writepage() but that
2656ccd2506bSTheodore Ts'o 	 * would be ugly in the extreme.  So instead we would need to
2657ccd2506bSTheodore Ts'o 	 * replicate parts of the code in the above functions,
265825985edcSLucas De Marchi 	 * simplifying them because we wouldn't actually intend to
2659ccd2506bSTheodore Ts'o 	 * write out the pages, but rather only collect contiguous
2660ccd2506bSTheodore Ts'o 	 * logical block extents, call the multi-block allocator, and
2661ccd2506bSTheodore Ts'o 	 * then update the buffer heads with the block allocations.
2662ccd2506bSTheodore Ts'o 	 *
2663ccd2506bSTheodore Ts'o 	 * For now, though, we'll cheat by calling filemap_flush(),
2664ccd2506bSTheodore Ts'o 	 * which will map the blocks, and start the I/O, but not
2665ccd2506bSTheodore Ts'o 	 * actually wait for the I/O to complete.
2666ccd2506bSTheodore Ts'o 	 */
2667ccd2506bSTheodore Ts'o 	return filemap_flush(inode->i_mapping);
2668ccd2506bSTheodore Ts'o }
266964769240SAlex Tomas 
267064769240SAlex Tomas /*
2671ac27a0ecSDave Kleikamp  * bmap() is special.  It gets used by applications such as lilo and by
2672ac27a0ecSDave Kleikamp  * the swapper to find the on-disk block of a specific piece of data.
2673ac27a0ecSDave Kleikamp  *
2674ac27a0ecSDave Kleikamp  * Naturally, this is dangerous if the block concerned is still in the
2675617ba13bSMingming Cao  * journal.  If somebody makes a swapfile on an ext4 data-journaling
2676ac27a0ecSDave Kleikamp  * filesystem and enables swap, then they may get a nasty shock when the
2677ac27a0ecSDave Kleikamp  * data getting swapped to that swapfile suddenly gets overwritten by
2678ac27a0ecSDave Kleikamp  * the original zero's written out previously to the journal and
2679ac27a0ecSDave Kleikamp  * awaiting writeback in the kernel's buffer cache.
2680ac27a0ecSDave Kleikamp  *
2681ac27a0ecSDave Kleikamp  * So, if we see any bmap calls here on a modified, data-journaled file,
2682ac27a0ecSDave Kleikamp  * take extra steps to flush any blocks which might be in the cache.
2683ac27a0ecSDave Kleikamp  */
2684617ba13bSMingming Cao static sector_t ext4_bmap(struct address_space *mapping, sector_t block)
2685ac27a0ecSDave Kleikamp {
2686ac27a0ecSDave Kleikamp 	struct inode *inode = mapping->host;
2687ac27a0ecSDave Kleikamp 	journal_t *journal;
2688ac27a0ecSDave Kleikamp 	int err;
2689ac27a0ecSDave Kleikamp 
269064769240SAlex Tomas 	if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) &&
269164769240SAlex Tomas 			test_opt(inode->i_sb, DELALLOC)) {
269264769240SAlex Tomas 		/*
269364769240SAlex Tomas 		 * With delalloc we want to sync the file
269464769240SAlex Tomas 		 * so that we can make sure we allocate
269564769240SAlex Tomas 		 * blocks for file
269664769240SAlex Tomas 		 */
269764769240SAlex Tomas 		filemap_write_and_wait(mapping);
269864769240SAlex Tomas 	}
269964769240SAlex Tomas 
270019f5fb7aSTheodore Ts'o 	if (EXT4_JOURNAL(inode) &&
270119f5fb7aSTheodore Ts'o 	    ext4_test_inode_state(inode, EXT4_STATE_JDATA)) {
2702ac27a0ecSDave Kleikamp 		/*
2703ac27a0ecSDave Kleikamp 		 * This is a REALLY heavyweight approach, but the use of
2704ac27a0ecSDave Kleikamp 		 * bmap on dirty files is expected to be extremely rare:
2705ac27a0ecSDave Kleikamp 		 * only if we run lilo or swapon on a freshly made file
2706ac27a0ecSDave Kleikamp 		 * do we expect this to happen.
2707ac27a0ecSDave Kleikamp 		 *
2708ac27a0ecSDave Kleikamp 		 * (bmap requires CAP_SYS_RAWIO so this does not
2709ac27a0ecSDave Kleikamp 		 * represent an unprivileged user DOS attack --- we'd be
2710ac27a0ecSDave Kleikamp 		 * in trouble if mortal users could trigger this path at
2711ac27a0ecSDave Kleikamp 		 * will.)
2712ac27a0ecSDave Kleikamp 		 *
2713617ba13bSMingming Cao 		 * NB. EXT4_STATE_JDATA is not set on files other than
2714ac27a0ecSDave Kleikamp 		 * regular files.  If somebody wants to bmap a directory
2715ac27a0ecSDave Kleikamp 		 * or symlink and gets confused because the buffer
2716ac27a0ecSDave Kleikamp 		 * hasn't yet been flushed to disk, they deserve
2717ac27a0ecSDave Kleikamp 		 * everything they get.
2718ac27a0ecSDave Kleikamp 		 */
2719ac27a0ecSDave Kleikamp 
272019f5fb7aSTheodore Ts'o 		ext4_clear_inode_state(inode, EXT4_STATE_JDATA);
2721617ba13bSMingming Cao 		journal = EXT4_JOURNAL(inode);
2722dab291afSMingming Cao 		jbd2_journal_lock_updates(journal);
2723dab291afSMingming Cao 		err = jbd2_journal_flush(journal);
2724dab291afSMingming Cao 		jbd2_journal_unlock_updates(journal);
2725ac27a0ecSDave Kleikamp 
2726ac27a0ecSDave Kleikamp 		if (err)
2727ac27a0ecSDave Kleikamp 			return 0;
2728ac27a0ecSDave Kleikamp 	}
2729ac27a0ecSDave Kleikamp 
2730617ba13bSMingming Cao 	return generic_block_bmap(mapping, block, ext4_get_block);
2731ac27a0ecSDave Kleikamp }
2732ac27a0ecSDave Kleikamp 
2733617ba13bSMingming Cao static int ext4_readpage(struct file *file, struct page *page)
2734ac27a0ecSDave Kleikamp {
27350562e0baSJiaying Zhang 	trace_ext4_readpage(page);
2736617ba13bSMingming Cao 	return mpage_readpage(page, ext4_get_block);
2737ac27a0ecSDave Kleikamp }
2738ac27a0ecSDave Kleikamp 
2739ac27a0ecSDave Kleikamp static int
2740617ba13bSMingming Cao ext4_readpages(struct file *file, struct address_space *mapping,
2741ac27a0ecSDave Kleikamp 		struct list_head *pages, unsigned nr_pages)
2742ac27a0ecSDave Kleikamp {
2743617ba13bSMingming Cao 	return mpage_readpages(mapping, pages, nr_pages, ext4_get_block);
2744ac27a0ecSDave Kleikamp }
2745ac27a0ecSDave Kleikamp 
2746744692dcSJiaying Zhang static void ext4_invalidatepage_free_endio(struct page *page, unsigned long offset)
2747744692dcSJiaying Zhang {
2748744692dcSJiaying Zhang 	struct buffer_head *head, *bh;
2749744692dcSJiaying Zhang 	unsigned int curr_off = 0;
2750744692dcSJiaying Zhang 
2751744692dcSJiaying Zhang 	if (!page_has_buffers(page))
2752744692dcSJiaying Zhang 		return;
2753744692dcSJiaying Zhang 	head = bh = page_buffers(page);
2754744692dcSJiaying Zhang 	do {
2755744692dcSJiaying Zhang 		if (offset <= curr_off && test_clear_buffer_uninit(bh)
2756744692dcSJiaying Zhang 					&& bh->b_private) {
2757744692dcSJiaying Zhang 			ext4_free_io_end(bh->b_private);
2758744692dcSJiaying Zhang 			bh->b_private = NULL;
2759744692dcSJiaying Zhang 			bh->b_end_io = NULL;
2760744692dcSJiaying Zhang 		}
2761744692dcSJiaying Zhang 		curr_off = curr_off + bh->b_size;
2762744692dcSJiaying Zhang 		bh = bh->b_this_page;
2763744692dcSJiaying Zhang 	} while (bh != head);
2764744692dcSJiaying Zhang }
2765744692dcSJiaying Zhang 
2766617ba13bSMingming Cao static void ext4_invalidatepage(struct page *page, unsigned long offset)
2767ac27a0ecSDave Kleikamp {
2768617ba13bSMingming Cao 	journal_t *journal = EXT4_JOURNAL(page->mapping->host);
2769ac27a0ecSDave Kleikamp 
27700562e0baSJiaying Zhang 	trace_ext4_invalidatepage(page, offset);
27710562e0baSJiaying Zhang 
2772ac27a0ecSDave Kleikamp 	/*
2773744692dcSJiaying Zhang 	 * free any io_end structure allocated for buffers to be discarded
2774744692dcSJiaying Zhang 	 */
2775744692dcSJiaying Zhang 	if (ext4_should_dioread_nolock(page->mapping->host))
2776744692dcSJiaying Zhang 		ext4_invalidatepage_free_endio(page, offset);
2777744692dcSJiaying Zhang 	/*
2778ac27a0ecSDave Kleikamp 	 * If it's a full truncate we just forget about the pending dirtying
2779ac27a0ecSDave Kleikamp 	 */
2780ac27a0ecSDave Kleikamp 	if (offset == 0)
2781ac27a0ecSDave Kleikamp 		ClearPageChecked(page);
2782ac27a0ecSDave Kleikamp 
27830390131bSFrank Mayhar 	if (journal)
2784dab291afSMingming Cao 		jbd2_journal_invalidatepage(journal, page, offset);
27850390131bSFrank Mayhar 	else
27860390131bSFrank Mayhar 		block_invalidatepage(page, offset);
2787ac27a0ecSDave Kleikamp }
2788ac27a0ecSDave Kleikamp 
2789617ba13bSMingming Cao static int ext4_releasepage(struct page *page, gfp_t wait)
2790ac27a0ecSDave Kleikamp {
2791617ba13bSMingming Cao 	journal_t *journal = EXT4_JOURNAL(page->mapping->host);
2792ac27a0ecSDave Kleikamp 
27930562e0baSJiaying Zhang 	trace_ext4_releasepage(page);
27940562e0baSJiaying Zhang 
2795ac27a0ecSDave Kleikamp 	WARN_ON(PageChecked(page));
2796ac27a0ecSDave Kleikamp 	if (!page_has_buffers(page))
2797ac27a0ecSDave Kleikamp 		return 0;
27980390131bSFrank Mayhar 	if (journal)
2799dab291afSMingming Cao 		return jbd2_journal_try_to_free_buffers(journal, page, wait);
28000390131bSFrank Mayhar 	else
28010390131bSFrank Mayhar 		return try_to_free_buffers(page);
2802ac27a0ecSDave Kleikamp }
2803ac27a0ecSDave Kleikamp 
2804ac27a0ecSDave Kleikamp /*
28052ed88685STheodore Ts'o  * ext4_get_block used when preparing for a DIO write or buffer write.
28062ed88685STheodore Ts'o  * We allocate an uinitialized extent if blocks haven't been allocated.
28072ed88685STheodore Ts'o  * The extent will be converted to initialized after the IO is complete.
28082ed88685STheodore Ts'o  */
2809c7064ef1SJiaying Zhang static int ext4_get_block_write(struct inode *inode, sector_t iblock,
28104c0425ffSMingming Cao 		   struct buffer_head *bh_result, int create)
28114c0425ffSMingming Cao {
2812c7064ef1SJiaying Zhang 	ext4_debug("ext4_get_block_write: inode %lu, create flag %d\n",
28138d5d02e6SMingming Cao 		   inode->i_ino, create);
28142ed88685STheodore Ts'o 	return _ext4_get_block(inode, iblock, bh_result,
28152ed88685STheodore Ts'o 			       EXT4_GET_BLOCKS_IO_CREATE_EXT);
28164c0425ffSMingming Cao }
28174c0425ffSMingming Cao 
2818729f52c6SZheng Liu static int ext4_get_block_write_nolock(struct inode *inode, sector_t iblock,
28198b0f165fSAnatol Pomozov 		   struct buffer_head *bh_result, int create)
2820729f52c6SZheng Liu {
28218b0f165fSAnatol Pomozov 	ext4_debug("ext4_get_block_write_nolock: inode %lu, create flag %d\n",
28228b0f165fSAnatol Pomozov 		   inode->i_ino, create);
28238b0f165fSAnatol Pomozov 	return _ext4_get_block(inode, iblock, bh_result,
28248b0f165fSAnatol Pomozov 			       EXT4_GET_BLOCKS_NO_LOCK);
2825729f52c6SZheng Liu }
2826729f52c6SZheng Liu 
28274c0425ffSMingming Cao static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
2828552ef802SChristoph Hellwig 			    ssize_t size, void *private, int ret,
2829552ef802SChristoph Hellwig 			    bool is_async)
28304c0425ffSMingming Cao {
283172c5052dSChristoph Hellwig 	struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode;
28324c0425ffSMingming Cao         ext4_io_end_t *io_end = iocb->private;
28334c0425ffSMingming Cao 
28344b70df18SMingming 	/* if not async direct IO or dio with 0 bytes write, just return */
28354b70df18SMingming 	if (!io_end || !size)
2836552ef802SChristoph Hellwig 		goto out;
28374b70df18SMingming 
28388d5d02e6SMingming Cao 	ext_debug("ext4_end_io_dio(): io_end 0x%p "
2839ace36ad4SJoe Perches 		  "for inode %lu, iocb 0x%p, offset %llu, size %zd\n",
28408d5d02e6SMingming Cao  		  iocb->private, io_end->inode->i_ino, iocb, offset,
28418d5d02e6SMingming Cao 		  size);
28428d5d02e6SMingming Cao 
2843b5a7e970STheodore Ts'o 	iocb->private = NULL;
2844b5a7e970STheodore Ts'o 
28458d5d02e6SMingming Cao 	/* if not aio dio with unwritten extents, just free io and return */
2846bd2d0210STheodore Ts'o 	if (!(io_end->flag & EXT4_IO_END_UNWRITTEN)) {
28478d5d02e6SMingming Cao 		ext4_free_io_end(io_end);
28485b3ff237Sjiayingz@google.com (Jiaying Zhang) out:
28495b3ff237Sjiayingz@google.com (Jiaying Zhang) 		if (is_async)
28505b3ff237Sjiayingz@google.com (Jiaying Zhang) 			aio_complete(iocb, ret, 0);
285172c5052dSChristoph Hellwig 		inode_dio_done(inode);
28525b3ff237Sjiayingz@google.com (Jiaying Zhang) 		return;
28538d5d02e6SMingming Cao 	}
28548d5d02e6SMingming Cao 
28554c0425ffSMingming Cao 	io_end->offset = offset;
28564c0425ffSMingming Cao 	io_end->size = size;
28575b3ff237Sjiayingz@google.com (Jiaying Zhang) 	if (is_async) {
28585b3ff237Sjiayingz@google.com (Jiaying Zhang) 		io_end->iocb = iocb;
28595b3ff237Sjiayingz@google.com (Jiaying Zhang) 		io_end->result = ret;
28605b3ff237Sjiayingz@google.com (Jiaying Zhang) 	}
28614c0425ffSMingming Cao 
286228a535f9SDmitry Monakhov 	ext4_add_complete_io(io_end);
28634c0425ffSMingming Cao }
2864c7064ef1SJiaying Zhang 
2865744692dcSJiaying Zhang static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate)
2866744692dcSJiaying Zhang {
2867744692dcSJiaying Zhang 	ext4_io_end_t *io_end = bh->b_private;
2868744692dcSJiaying Zhang 	struct inode *inode;
2869744692dcSJiaying Zhang 
2870744692dcSJiaying Zhang 	if (!test_clear_buffer_uninit(bh) || !io_end)
2871744692dcSJiaying Zhang 		goto out;
2872744692dcSJiaying Zhang 
2873744692dcSJiaying Zhang 	if (!(io_end->inode->i_sb->s_flags & MS_ACTIVE)) {
287492b97816STheodore Ts'o 		ext4_msg(io_end->inode->i_sb, KERN_INFO,
287592b97816STheodore Ts'o 			 "sb umounted, discard end_io request for inode %lu",
2876744692dcSJiaying Zhang 			 io_end->inode->i_ino);
2877744692dcSJiaying Zhang 		ext4_free_io_end(io_end);
2878744692dcSJiaying Zhang 		goto out;
2879744692dcSJiaying Zhang 	}
2880744692dcSJiaying Zhang 
288132c80b32STao Ma 	/*
288232c80b32STao Ma 	 * It may be over-defensive here to check EXT4_IO_END_UNWRITTEN now,
288332c80b32STao Ma 	 * but being more careful is always safe for the future change.
288432c80b32STao Ma 	 */
2885744692dcSJiaying Zhang 	inode = io_end->inode;
28860edeb71dSTao Ma 	ext4_set_io_unwritten_flag(inode, io_end);
288728a535f9SDmitry Monakhov 	ext4_add_complete_io(io_end);
2888744692dcSJiaying Zhang out:
2889744692dcSJiaying Zhang 	bh->b_private = NULL;
2890744692dcSJiaying Zhang 	bh->b_end_io = NULL;
2891744692dcSJiaying Zhang 	clear_buffer_uninit(bh);
2892744692dcSJiaying Zhang 	end_buffer_async_write(bh, uptodate);
2893744692dcSJiaying Zhang }
2894744692dcSJiaying Zhang 
2895744692dcSJiaying Zhang static int ext4_set_bh_endio(struct buffer_head *bh, struct inode *inode)
2896744692dcSJiaying Zhang {
2897744692dcSJiaying Zhang 	ext4_io_end_t *io_end;
2898744692dcSJiaying Zhang 	struct page *page = bh->b_page;
2899744692dcSJiaying Zhang 	loff_t offset = (sector_t)page->index << PAGE_CACHE_SHIFT;
2900744692dcSJiaying Zhang 	size_t size = bh->b_size;
2901744692dcSJiaying Zhang 
2902744692dcSJiaying Zhang retry:
2903744692dcSJiaying Zhang 	io_end = ext4_init_io_end(inode, GFP_ATOMIC);
2904744692dcSJiaying Zhang 	if (!io_end) {
29056db26ffcSAndrew Morton 		pr_warn_ratelimited("%s: allocation fail\n", __func__);
2906744692dcSJiaying Zhang 		schedule();
2907744692dcSJiaying Zhang 		goto retry;
2908744692dcSJiaying Zhang 	}
2909744692dcSJiaying Zhang 	io_end->offset = offset;
2910744692dcSJiaying Zhang 	io_end->size = size;
2911744692dcSJiaying Zhang 	/*
2912744692dcSJiaying Zhang 	 * We need to hold a reference to the page to make sure it
2913744692dcSJiaying Zhang 	 * doesn't get evicted before ext4_end_io_work() has a chance
2914744692dcSJiaying Zhang 	 * to convert the extent from written to unwritten.
2915744692dcSJiaying Zhang 	 */
2916744692dcSJiaying Zhang 	io_end->page = page;
2917744692dcSJiaying Zhang 	get_page(io_end->page);
2918744692dcSJiaying Zhang 
2919744692dcSJiaying Zhang 	bh->b_private = io_end;
2920744692dcSJiaying Zhang 	bh->b_end_io = ext4_end_io_buffer_write;
2921744692dcSJiaying Zhang 	return 0;
2922744692dcSJiaying Zhang }
2923744692dcSJiaying Zhang 
29244c0425ffSMingming Cao /*
29254c0425ffSMingming Cao  * For ext4 extent files, ext4 will do direct-io write to holes,
29264c0425ffSMingming Cao  * preallocated extents, and those write extend the file, no need to
29274c0425ffSMingming Cao  * fall back to buffered IO.
29284c0425ffSMingming Cao  *
2929b595076aSUwe Kleine-König  * For holes, we fallocate those blocks, mark them as uninitialized
29304c0425ffSMingming Cao  * If those blocks were preallocated, we mark sure they are splited, but
2931b595076aSUwe Kleine-König  * still keep the range to write as uninitialized.
29324c0425ffSMingming Cao  *
29338d5d02e6SMingming Cao  * The unwrritten extents will be converted to written when DIO is completed.
29348d5d02e6SMingming Cao  * For async direct IO, since the IO may still pending when return, we
293525985edcSLucas De Marchi  * set up an end_io call back function, which will do the conversion
29368d5d02e6SMingming Cao  * when async direct IO completed.
29374c0425ffSMingming Cao  *
29384c0425ffSMingming Cao  * If the O_DIRECT write will extend the file then add this inode to the
29394c0425ffSMingming Cao  * orphan list.  So recovery will truncate it back to the original size
29404c0425ffSMingming Cao  * if the machine crashes during the write.
29414c0425ffSMingming Cao  *
29424c0425ffSMingming Cao  */
29434c0425ffSMingming Cao static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
29444c0425ffSMingming Cao 			      const struct iovec *iov, loff_t offset,
29454c0425ffSMingming Cao 			      unsigned long nr_segs)
29464c0425ffSMingming Cao {
29474c0425ffSMingming Cao 	struct file *file = iocb->ki_filp;
29484c0425ffSMingming Cao 	struct inode *inode = file->f_mapping->host;
29494c0425ffSMingming Cao 	ssize_t ret;
29504c0425ffSMingming Cao 	size_t count = iov_length(iov, nr_segs);
29514c0425ffSMingming Cao 
29524c0425ffSMingming Cao 	loff_t final_size = offset + count;
29534c0425ffSMingming Cao 	if (rw == WRITE && final_size <= inode->i_size) {
2954729f52c6SZheng Liu 		int overwrite = 0;
29558b0f165fSAnatol Pomozov 		get_block_t *get_block_func = NULL;
29568b0f165fSAnatol Pomozov 		int dio_flags = 0;
2957729f52c6SZheng Liu 
29584bd809dbSZheng Liu 		BUG_ON(iocb->private == NULL);
29594bd809dbSZheng Liu 
29604bd809dbSZheng Liu 		/* If we do a overwrite dio, i_mutex locking can be released */
29614bd809dbSZheng Liu 		overwrite = *((int *)iocb->private);
29624bd809dbSZheng Liu 
29634bd809dbSZheng Liu 		if (overwrite) {
29641f555cfaSDmitry Monakhov 			atomic_inc(&inode->i_dio_count);
29654bd809dbSZheng Liu 			down_read(&EXT4_I(inode)->i_data_sem);
29664bd809dbSZheng Liu 			mutex_unlock(&inode->i_mutex);
29674bd809dbSZheng Liu 		}
29684bd809dbSZheng Liu 
29694c0425ffSMingming Cao 		/*
29708d5d02e6SMingming Cao  		 * We could direct write to holes and fallocate.
29718d5d02e6SMingming Cao 		 *
29728d5d02e6SMingming Cao  		 * Allocated blocks to fill the hole are marked as uninitialized
297325985edcSLucas De Marchi  		 * to prevent parallel buffered read to expose the stale data
29744c0425ffSMingming Cao  		 * before DIO complete the data IO.
29758d5d02e6SMingming Cao 		 *
29768d5d02e6SMingming Cao  		 * As to previously fallocated extents, ext4 get_block
29774c0425ffSMingming Cao  		 * will just simply mark the buffer mapped but still
29784c0425ffSMingming Cao  		 * keep the extents uninitialized.
29794c0425ffSMingming Cao  		 *
29808d5d02e6SMingming Cao 		 * for non AIO case, we will convert those unwritten extents
29818d5d02e6SMingming Cao 		 * to written after return back from blockdev_direct_IO.
29824c0425ffSMingming Cao 		 *
29838d5d02e6SMingming Cao 		 * for async DIO, the conversion needs to be defered when
29848d5d02e6SMingming Cao 		 * the IO is completed. The ext4 end_io callback function
29858d5d02e6SMingming Cao 		 * will be called to take care of the conversion work.
29868d5d02e6SMingming Cao 		 * Here for async case, we allocate an io_end structure to
29878d5d02e6SMingming Cao 		 * hook to the iocb.
29884c0425ffSMingming Cao  		 */
29898d5d02e6SMingming Cao 		iocb->private = NULL;
2990f45ee3a1SDmitry Monakhov 		ext4_inode_aio_set(inode, NULL);
29918d5d02e6SMingming Cao 		if (!is_sync_kiocb(iocb)) {
2992266991b1SJeff Moyer 			ext4_io_end_t *io_end =
2993266991b1SJeff Moyer 				ext4_init_io_end(inode, GFP_NOFS);
29944bd809dbSZheng Liu 			if (!io_end) {
29954bd809dbSZheng Liu 				ret = -ENOMEM;
29964bd809dbSZheng Liu 				goto retake_lock;
29974bd809dbSZheng Liu 			}
2998266991b1SJeff Moyer 			io_end->flag |= EXT4_IO_END_DIRECT;
2999266991b1SJeff Moyer 			iocb->private = io_end;
30008d5d02e6SMingming Cao 			/*
30018d5d02e6SMingming Cao 			 * we save the io structure for current async
300279e83036SEric Sandeen 			 * direct IO, so that later ext4_map_blocks()
30038d5d02e6SMingming Cao 			 * could flag the io structure whether there
30048d5d02e6SMingming Cao 			 * is a unwritten extents needs to be converted
30058d5d02e6SMingming Cao 			 * when IO is completed.
30068d5d02e6SMingming Cao 			 */
3007f45ee3a1SDmitry Monakhov 			ext4_inode_aio_set(inode, io_end);
30088d5d02e6SMingming Cao 		}
30098d5d02e6SMingming Cao 
30108b0f165fSAnatol Pomozov 		if (overwrite) {
30118b0f165fSAnatol Pomozov 			get_block_func = ext4_get_block_write_nolock;
30128b0f165fSAnatol Pomozov 		} else {
30138b0f165fSAnatol Pomozov 			get_block_func = ext4_get_block_write;
30148b0f165fSAnatol Pomozov 			dio_flags = DIO_LOCKING;
30158b0f165fSAnatol Pomozov 		}
3016729f52c6SZheng Liu 		ret = __blockdev_direct_IO(rw, iocb, inode,
3017729f52c6SZheng Liu 					 inode->i_sb->s_bdev, iov,
3018729f52c6SZheng Liu 					 offset, nr_segs,
30198b0f165fSAnatol Pomozov 					 get_block_func,
3020729f52c6SZheng Liu 					 ext4_end_io_dio,
3021729f52c6SZheng Liu 					 NULL,
30228b0f165fSAnatol Pomozov 					 dio_flags);
30238b0f165fSAnatol Pomozov 
30248d5d02e6SMingming Cao 		if (iocb->private)
3025f45ee3a1SDmitry Monakhov 			ext4_inode_aio_set(inode, NULL);
30268d5d02e6SMingming Cao 		/*
30278d5d02e6SMingming Cao 		 * The io_end structure takes a reference to the inode,
30288d5d02e6SMingming Cao 		 * that structure needs to be destroyed and the
30298d5d02e6SMingming Cao 		 * reference to the inode need to be dropped, when IO is
30308d5d02e6SMingming Cao 		 * complete, even with 0 byte write, or failed.
30318d5d02e6SMingming Cao 		 *
30328d5d02e6SMingming Cao 		 * In the successful AIO DIO case, the io_end structure will be
30338d5d02e6SMingming Cao 		 * desctroyed and the reference to the inode will be dropped
30348d5d02e6SMingming Cao 		 * after the end_io call back function is called.
30358d5d02e6SMingming Cao 		 *
30368d5d02e6SMingming Cao 		 * In the case there is 0 byte write, or error case, since
30378d5d02e6SMingming Cao 		 * VFS direct IO won't invoke the end_io call back function,
30388d5d02e6SMingming Cao 		 * we need to free the end_io structure here.
30398d5d02e6SMingming Cao 		 */
30408d5d02e6SMingming Cao 		if (ret != -EIOCBQUEUED && ret <= 0 && iocb->private) {
30418d5d02e6SMingming Cao 			ext4_free_io_end(iocb->private);
30428d5d02e6SMingming Cao 			iocb->private = NULL;
3043729f52c6SZheng Liu 		} else if (ret > 0 && !overwrite && ext4_test_inode_state(inode,
30445f524950SMingming 						EXT4_STATE_DIO_UNWRITTEN)) {
3045109f5565SMingming 			int err;
30468d5d02e6SMingming Cao 			/*
30478d5d02e6SMingming Cao 			 * for non AIO case, since the IO is already
304825985edcSLucas De Marchi 			 * completed, we could do the conversion right here
30498d5d02e6SMingming Cao 			 */
3050109f5565SMingming 			err = ext4_convert_unwritten_extents(inode,
30518d5d02e6SMingming Cao 							     offset, ret);
3052109f5565SMingming 			if (err < 0)
3053109f5565SMingming 				ret = err;
305419f5fb7aSTheodore Ts'o 			ext4_clear_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN);
3055109f5565SMingming 		}
30564bd809dbSZheng Liu 
30574bd809dbSZheng Liu 	retake_lock:
30584bd809dbSZheng Liu 		/* take i_mutex locking again if we do a ovewrite dio */
30594bd809dbSZheng Liu 		if (overwrite) {
30601f555cfaSDmitry Monakhov 			inode_dio_done(inode);
30614bd809dbSZheng Liu 			up_read(&EXT4_I(inode)->i_data_sem);
30624bd809dbSZheng Liu 			mutex_lock(&inode->i_mutex);
30634bd809dbSZheng Liu 		}
30644bd809dbSZheng Liu 
30654c0425ffSMingming Cao 		return ret;
30664c0425ffSMingming Cao 	}
30678d5d02e6SMingming Cao 
30688d5d02e6SMingming Cao 	/* for write the the end of file case, we fall back to old way */
30694c0425ffSMingming Cao 	return ext4_ind_direct_IO(rw, iocb, iov, offset, nr_segs);
30704c0425ffSMingming Cao }
30714c0425ffSMingming Cao 
30724c0425ffSMingming Cao static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb,
30734c0425ffSMingming Cao 			      const struct iovec *iov, loff_t offset,
30744c0425ffSMingming Cao 			      unsigned long nr_segs)
30754c0425ffSMingming Cao {
30764c0425ffSMingming Cao 	struct file *file = iocb->ki_filp;
30774c0425ffSMingming Cao 	struct inode *inode = file->f_mapping->host;
30780562e0baSJiaying Zhang 	ssize_t ret;
30794c0425ffSMingming Cao 
308084ebd795STheodore Ts'o 	/*
308184ebd795STheodore Ts'o 	 * If we are doing data journalling we don't support O_DIRECT
308284ebd795STheodore Ts'o 	 */
308384ebd795STheodore Ts'o 	if (ext4_should_journal_data(inode))
308484ebd795STheodore Ts'o 		return 0;
308584ebd795STheodore Ts'o 
30860562e0baSJiaying Zhang 	trace_ext4_direct_IO_enter(inode, offset, iov_length(iov, nr_segs), rw);
308712e9b892SDmitry Monakhov 	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
30880562e0baSJiaying Zhang 		ret = ext4_ext_direct_IO(rw, iocb, iov, offset, nr_segs);
30890562e0baSJiaying Zhang 	else
30900562e0baSJiaying Zhang 		ret = ext4_ind_direct_IO(rw, iocb, iov, offset, nr_segs);
30910562e0baSJiaying Zhang 	trace_ext4_direct_IO_exit(inode, offset,
30920562e0baSJiaying Zhang 				iov_length(iov, nr_segs), rw, ret);
30930562e0baSJiaying Zhang 	return ret;
30944c0425ffSMingming Cao }
30954c0425ffSMingming Cao 
3096ac27a0ecSDave Kleikamp /*
3097617ba13bSMingming Cao  * Pages can be marked dirty completely asynchronously from ext4's journalling
3098ac27a0ecSDave Kleikamp  * activity.  By filemap_sync_pte(), try_to_unmap_one(), etc.  We cannot do
3099ac27a0ecSDave Kleikamp  * much here because ->set_page_dirty is called under VFS locks.  The page is
3100ac27a0ecSDave Kleikamp  * not necessarily locked.
3101ac27a0ecSDave Kleikamp  *
3102ac27a0ecSDave Kleikamp  * We cannot just dirty the page and leave attached buffers clean, because the
3103ac27a0ecSDave Kleikamp  * buffers' dirty state is "definitive".  We cannot just set the buffers dirty
3104ac27a0ecSDave Kleikamp  * or jbddirty because all the journalling code will explode.
3105ac27a0ecSDave Kleikamp  *
3106ac27a0ecSDave Kleikamp  * So what we do is to mark the page "pending dirty" and next time writepage
3107ac27a0ecSDave Kleikamp  * is called, propagate that into the buffers appropriately.
3108ac27a0ecSDave Kleikamp  */
3109617ba13bSMingming Cao static int ext4_journalled_set_page_dirty(struct page *page)
3110ac27a0ecSDave Kleikamp {
3111ac27a0ecSDave Kleikamp 	SetPageChecked(page);
3112ac27a0ecSDave Kleikamp 	return __set_page_dirty_nobuffers(page);
3113ac27a0ecSDave Kleikamp }
3114ac27a0ecSDave Kleikamp 
3115617ba13bSMingming Cao static const struct address_space_operations ext4_ordered_aops = {
3116617ba13bSMingming Cao 	.readpage		= ext4_readpage,
3117617ba13bSMingming Cao 	.readpages		= ext4_readpages,
311843ce1d23SAneesh Kumar K.V 	.writepage		= ext4_writepage,
3119bfc1af65SNick Piggin 	.write_begin		= ext4_write_begin,
3120bfc1af65SNick Piggin 	.write_end		= ext4_ordered_write_end,
3121617ba13bSMingming Cao 	.bmap			= ext4_bmap,
3122617ba13bSMingming Cao 	.invalidatepage		= ext4_invalidatepage,
3123617ba13bSMingming Cao 	.releasepage		= ext4_releasepage,
3124617ba13bSMingming Cao 	.direct_IO		= ext4_direct_IO,
3125ac27a0ecSDave Kleikamp 	.migratepage		= buffer_migrate_page,
31268ab22b9aSHisashi Hifumi 	.is_partially_uptodate  = block_is_partially_uptodate,
3127aa261f54SAndi Kleen 	.error_remove_page	= generic_error_remove_page,
3128ac27a0ecSDave Kleikamp };
3129ac27a0ecSDave Kleikamp 
3130617ba13bSMingming Cao static const struct address_space_operations ext4_writeback_aops = {
3131617ba13bSMingming Cao 	.readpage		= ext4_readpage,
3132617ba13bSMingming Cao 	.readpages		= ext4_readpages,
313343ce1d23SAneesh Kumar K.V 	.writepage		= ext4_writepage,
3134bfc1af65SNick Piggin 	.write_begin		= ext4_write_begin,
3135bfc1af65SNick Piggin 	.write_end		= ext4_writeback_write_end,
3136617ba13bSMingming Cao 	.bmap			= ext4_bmap,
3137617ba13bSMingming Cao 	.invalidatepage		= ext4_invalidatepage,
3138617ba13bSMingming Cao 	.releasepage		= ext4_releasepage,
3139617ba13bSMingming Cao 	.direct_IO		= ext4_direct_IO,
3140ac27a0ecSDave Kleikamp 	.migratepage		= buffer_migrate_page,
31418ab22b9aSHisashi Hifumi 	.is_partially_uptodate  = block_is_partially_uptodate,
3142aa261f54SAndi Kleen 	.error_remove_page	= generic_error_remove_page,
3143ac27a0ecSDave Kleikamp };
3144ac27a0ecSDave Kleikamp 
3145617ba13bSMingming Cao static const struct address_space_operations ext4_journalled_aops = {
3146617ba13bSMingming Cao 	.readpage		= ext4_readpage,
3147617ba13bSMingming Cao 	.readpages		= ext4_readpages,
314843ce1d23SAneesh Kumar K.V 	.writepage		= ext4_writepage,
3149bfc1af65SNick Piggin 	.write_begin		= ext4_write_begin,
3150bfc1af65SNick Piggin 	.write_end		= ext4_journalled_write_end,
3151617ba13bSMingming Cao 	.set_page_dirty		= ext4_journalled_set_page_dirty,
3152617ba13bSMingming Cao 	.bmap			= ext4_bmap,
3153617ba13bSMingming Cao 	.invalidatepage		= ext4_invalidatepage,
3154617ba13bSMingming Cao 	.releasepage		= ext4_releasepage,
315584ebd795STheodore Ts'o 	.direct_IO		= ext4_direct_IO,
31568ab22b9aSHisashi Hifumi 	.is_partially_uptodate  = block_is_partially_uptodate,
3157aa261f54SAndi Kleen 	.error_remove_page	= generic_error_remove_page,
3158ac27a0ecSDave Kleikamp };
3159ac27a0ecSDave Kleikamp 
316064769240SAlex Tomas static const struct address_space_operations ext4_da_aops = {
316164769240SAlex Tomas 	.readpage		= ext4_readpage,
316264769240SAlex Tomas 	.readpages		= ext4_readpages,
316343ce1d23SAneesh Kumar K.V 	.writepage		= ext4_writepage,
316464769240SAlex Tomas 	.writepages		= ext4_da_writepages,
316564769240SAlex Tomas 	.write_begin		= ext4_da_write_begin,
316664769240SAlex Tomas 	.write_end		= ext4_da_write_end,
316764769240SAlex Tomas 	.bmap			= ext4_bmap,
316864769240SAlex Tomas 	.invalidatepage		= ext4_da_invalidatepage,
316964769240SAlex Tomas 	.releasepage		= ext4_releasepage,
317064769240SAlex Tomas 	.direct_IO		= ext4_direct_IO,
317164769240SAlex Tomas 	.migratepage		= buffer_migrate_page,
31728ab22b9aSHisashi Hifumi 	.is_partially_uptodate  = block_is_partially_uptodate,
3173aa261f54SAndi Kleen 	.error_remove_page	= generic_error_remove_page,
317464769240SAlex Tomas };
317564769240SAlex Tomas 
3176617ba13bSMingming Cao void ext4_set_aops(struct inode *inode)
3177ac27a0ecSDave Kleikamp {
31783d2b1582SLukas Czerner 	switch (ext4_inode_journal_mode(inode)) {
31793d2b1582SLukas Czerner 	case EXT4_INODE_ORDERED_DATA_MODE:
31803d2b1582SLukas Czerner 		if (test_opt(inode->i_sb, DELALLOC))
3181cd1aac32SAneesh Kumar K.V 			inode->i_mapping->a_ops = &ext4_da_aops;
3182ac27a0ecSDave Kleikamp 		else
31833d2b1582SLukas Czerner 			inode->i_mapping->a_ops = &ext4_ordered_aops;
31843d2b1582SLukas Czerner 		break;
31853d2b1582SLukas Czerner 	case EXT4_INODE_WRITEBACK_DATA_MODE:
31863d2b1582SLukas Czerner 		if (test_opt(inode->i_sb, DELALLOC))
31873d2b1582SLukas Czerner 			inode->i_mapping->a_ops = &ext4_da_aops;
31883d2b1582SLukas Czerner 		else
31893d2b1582SLukas Czerner 			inode->i_mapping->a_ops = &ext4_writeback_aops;
31903d2b1582SLukas Czerner 		break;
31913d2b1582SLukas Czerner 	case EXT4_INODE_JOURNAL_DATA_MODE:
3192617ba13bSMingming Cao 		inode->i_mapping->a_ops = &ext4_journalled_aops;
31933d2b1582SLukas Czerner 		break;
31943d2b1582SLukas Czerner 	default:
31953d2b1582SLukas Czerner 		BUG();
31963d2b1582SLukas Czerner 	}
3197ac27a0ecSDave Kleikamp }
3198ac27a0ecSDave Kleikamp 
31994e96b2dbSAllison Henderson 
32004e96b2dbSAllison Henderson /*
32014e96b2dbSAllison Henderson  * ext4_discard_partial_page_buffers()
32024e96b2dbSAllison Henderson  * Wrapper function for ext4_discard_partial_page_buffers_no_lock.
32034e96b2dbSAllison Henderson  * This function finds and locks the page containing the offset
32044e96b2dbSAllison Henderson  * "from" and passes it to ext4_discard_partial_page_buffers_no_lock.
32054e96b2dbSAllison Henderson  * Calling functions that already have the page locked should call
32064e96b2dbSAllison Henderson  * ext4_discard_partial_page_buffers_no_lock directly.
32074e96b2dbSAllison Henderson  */
32084e96b2dbSAllison Henderson int ext4_discard_partial_page_buffers(handle_t *handle,
32094e96b2dbSAllison Henderson 		struct address_space *mapping, loff_t from,
32104e96b2dbSAllison Henderson 		loff_t length, int flags)
32114e96b2dbSAllison Henderson {
32124e96b2dbSAllison Henderson 	struct inode *inode = mapping->host;
32134e96b2dbSAllison Henderson 	struct page *page;
32144e96b2dbSAllison Henderson 	int err = 0;
32154e96b2dbSAllison Henderson 
32164e96b2dbSAllison Henderson 	page = find_or_create_page(mapping, from >> PAGE_CACHE_SHIFT,
32174e96b2dbSAllison Henderson 				   mapping_gfp_mask(mapping) & ~__GFP_FS);
32184e96b2dbSAllison Henderson 	if (!page)
32195129d05fSYongqiang Yang 		return -ENOMEM;
32204e96b2dbSAllison Henderson 
32214e96b2dbSAllison Henderson 	err = ext4_discard_partial_page_buffers_no_lock(handle, inode, page,
32224e96b2dbSAllison Henderson 		from, length, flags);
32234e96b2dbSAllison Henderson 
32244e96b2dbSAllison Henderson 	unlock_page(page);
32254e96b2dbSAllison Henderson 	page_cache_release(page);
32264e96b2dbSAllison Henderson 	return err;
32274e96b2dbSAllison Henderson }
32284e96b2dbSAllison Henderson 
32294e96b2dbSAllison Henderson /*
32304e96b2dbSAllison Henderson  * ext4_discard_partial_page_buffers_no_lock()
32314e96b2dbSAllison Henderson  * Zeros a page range of length 'length' starting from offset 'from'.
32324e96b2dbSAllison Henderson  * Buffer heads that correspond to the block aligned regions of the
32334e96b2dbSAllison Henderson  * zeroed range will be unmapped.  Unblock aligned regions
32344e96b2dbSAllison Henderson  * will have the corresponding buffer head mapped if needed so that
32354e96b2dbSAllison Henderson  * that region of the page can be updated with the partial zero out.
32364e96b2dbSAllison Henderson  *
32374e96b2dbSAllison Henderson  * This function assumes that the page has already been  locked.  The
32384e96b2dbSAllison Henderson  * The range to be discarded must be contained with in the given page.
32394e96b2dbSAllison Henderson  * If the specified range exceeds the end of the page it will be shortened
32404e96b2dbSAllison Henderson  * to the end of the page that corresponds to 'from'.  This function is
32414e96b2dbSAllison Henderson  * appropriate for updating a page and it buffer heads to be unmapped and
32424e96b2dbSAllison Henderson  * zeroed for blocks that have been either released, or are going to be
32434e96b2dbSAllison Henderson  * released.
32444e96b2dbSAllison Henderson  *
32454e96b2dbSAllison Henderson  * handle: The journal handle
32464e96b2dbSAllison Henderson  * inode:  The files inode
32474e96b2dbSAllison Henderson  * page:   A locked page that contains the offset "from"
32484907cb7bSAnatol Pomozov  * from:   The starting byte offset (from the beginning of the file)
32494e96b2dbSAllison Henderson  *         to begin discarding
32504e96b2dbSAllison Henderson  * len:    The length of bytes to discard
32514e96b2dbSAllison Henderson  * flags:  Optional flags that may be used:
32524e96b2dbSAllison Henderson  *
32534e96b2dbSAllison Henderson  *         EXT4_DISCARD_PARTIAL_PG_ZERO_UNMAPPED
32544e96b2dbSAllison Henderson  *         Only zero the regions of the page whose buffer heads
32554e96b2dbSAllison Henderson  *         have already been unmapped.  This flag is appropriate
32564907cb7bSAnatol Pomozov  *         for updating the contents of a page whose blocks may
32574e96b2dbSAllison Henderson  *         have already been released, and we only want to zero
32584e96b2dbSAllison Henderson  *         out the regions that correspond to those released blocks.
32594e96b2dbSAllison Henderson  *
32604907cb7bSAnatol Pomozov  * Returns zero on success or negative on failure.
32614e96b2dbSAllison Henderson  */
32625f163cc7SEric Sandeen static int ext4_discard_partial_page_buffers_no_lock(handle_t *handle,
32634e96b2dbSAllison Henderson 		struct inode *inode, struct page *page, loff_t from,
32644e96b2dbSAllison Henderson 		loff_t length, int flags)
32654e96b2dbSAllison Henderson {
32664e96b2dbSAllison Henderson 	ext4_fsblk_t index = from >> PAGE_CACHE_SHIFT;
32674e96b2dbSAllison Henderson 	unsigned int offset = from & (PAGE_CACHE_SIZE-1);
32684e96b2dbSAllison Henderson 	unsigned int blocksize, max, pos;
32694e96b2dbSAllison Henderson 	ext4_lblk_t iblock;
32704e96b2dbSAllison Henderson 	struct buffer_head *bh;
32714e96b2dbSAllison Henderson 	int err = 0;
32724e96b2dbSAllison Henderson 
32734e96b2dbSAllison Henderson 	blocksize = inode->i_sb->s_blocksize;
32744e96b2dbSAllison Henderson 	max = PAGE_CACHE_SIZE - offset;
32754e96b2dbSAllison Henderson 
32764e96b2dbSAllison Henderson 	if (index != page->index)
32774e96b2dbSAllison Henderson 		return -EINVAL;
32784e96b2dbSAllison Henderson 
32794e96b2dbSAllison Henderson 	/*
32804e96b2dbSAllison Henderson 	 * correct length if it does not fall between
32814e96b2dbSAllison Henderson 	 * 'from' and the end of the page
32824e96b2dbSAllison Henderson 	 */
32834e96b2dbSAllison Henderson 	if (length > max || length < 0)
32844e96b2dbSAllison Henderson 		length = max;
32854e96b2dbSAllison Henderson 
32864e96b2dbSAllison Henderson 	iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
32874e96b2dbSAllison Henderson 
3288093e6e36SYongqiang Yang 	if (!page_has_buffers(page))
32894e96b2dbSAllison Henderson 		create_empty_buffers(page, blocksize, 0);
32904e96b2dbSAllison Henderson 
32914e96b2dbSAllison Henderson 	/* Find the buffer that contains "offset" */
32924e96b2dbSAllison Henderson 	bh = page_buffers(page);
32934e96b2dbSAllison Henderson 	pos = blocksize;
32944e96b2dbSAllison Henderson 	while (offset >= pos) {
32954e96b2dbSAllison Henderson 		bh = bh->b_this_page;
32964e96b2dbSAllison Henderson 		iblock++;
32974e96b2dbSAllison Henderson 		pos += blocksize;
32984e96b2dbSAllison Henderson 	}
32994e96b2dbSAllison Henderson 
33004e96b2dbSAllison Henderson 	pos = offset;
33014e96b2dbSAllison Henderson 	while (pos < offset + length) {
3302e260daf2SYongqiang Yang 		unsigned int end_of_block, range_to_discard;
3303e260daf2SYongqiang Yang 
33044e96b2dbSAllison Henderson 		err = 0;
33054e96b2dbSAllison Henderson 
33064e96b2dbSAllison Henderson 		/* The length of space left to zero and unmap */
33074e96b2dbSAllison Henderson 		range_to_discard = offset + length - pos;
33084e96b2dbSAllison Henderson 
33094e96b2dbSAllison Henderson 		/* The length of space until the end of the block */
33104e96b2dbSAllison Henderson 		end_of_block = blocksize - (pos & (blocksize-1));
33114e96b2dbSAllison Henderson 
33124e96b2dbSAllison Henderson 		/*
33134e96b2dbSAllison Henderson 		 * Do not unmap or zero past end of block
33144e96b2dbSAllison Henderson 		 * for this buffer head
33154e96b2dbSAllison Henderson 		 */
33164e96b2dbSAllison Henderson 		if (range_to_discard > end_of_block)
33174e96b2dbSAllison Henderson 			range_to_discard = end_of_block;
33184e96b2dbSAllison Henderson 
33194e96b2dbSAllison Henderson 
33204e96b2dbSAllison Henderson 		/*
33214e96b2dbSAllison Henderson 		 * Skip this buffer head if we are only zeroing unampped
33224e96b2dbSAllison Henderson 		 * regions of the page
33234e96b2dbSAllison Henderson 		 */
33244e96b2dbSAllison Henderson 		if (flags & EXT4_DISCARD_PARTIAL_PG_ZERO_UNMAPPED &&
33254e96b2dbSAllison Henderson 			buffer_mapped(bh))
33264e96b2dbSAllison Henderson 				goto next;
33274e96b2dbSAllison Henderson 
33284e96b2dbSAllison Henderson 		/* If the range is block aligned, unmap */
33294e96b2dbSAllison Henderson 		if (range_to_discard == blocksize) {
33304e96b2dbSAllison Henderson 			clear_buffer_dirty(bh);
33314e96b2dbSAllison Henderson 			bh->b_bdev = NULL;
33324e96b2dbSAllison Henderson 			clear_buffer_mapped(bh);
33334e96b2dbSAllison Henderson 			clear_buffer_req(bh);
33344e96b2dbSAllison Henderson 			clear_buffer_new(bh);
33354e96b2dbSAllison Henderson 			clear_buffer_delay(bh);
33364e96b2dbSAllison Henderson 			clear_buffer_unwritten(bh);
33374e96b2dbSAllison Henderson 			clear_buffer_uptodate(bh);
33384e96b2dbSAllison Henderson 			zero_user(page, pos, range_to_discard);
33394e96b2dbSAllison Henderson 			BUFFER_TRACE(bh, "Buffer discarded");
33404e96b2dbSAllison Henderson 			goto next;
33414e96b2dbSAllison Henderson 		}
33424e96b2dbSAllison Henderson 
33434e96b2dbSAllison Henderson 		/*
33444e96b2dbSAllison Henderson 		 * If this block is not completely contained in the range
33454e96b2dbSAllison Henderson 		 * to be discarded, then it is not going to be released. Because
33464e96b2dbSAllison Henderson 		 * we need to keep this block, we need to make sure this part
33474e96b2dbSAllison Henderson 		 * of the page is uptodate before we modify it by writeing
33484e96b2dbSAllison Henderson 		 * partial zeros on it.
33494e96b2dbSAllison Henderson 		 */
33504e96b2dbSAllison Henderson 		if (!buffer_mapped(bh)) {
33514e96b2dbSAllison Henderson 			/*
33524e96b2dbSAllison Henderson 			 * Buffer head must be mapped before we can read
33534e96b2dbSAllison Henderson 			 * from the block
33544e96b2dbSAllison Henderson 			 */
33554e96b2dbSAllison Henderson 			BUFFER_TRACE(bh, "unmapped");
33564e96b2dbSAllison Henderson 			ext4_get_block(inode, iblock, bh, 0);
33574e96b2dbSAllison Henderson 			/* unmapped? It's a hole - nothing to do */
33584e96b2dbSAllison Henderson 			if (!buffer_mapped(bh)) {
33594e96b2dbSAllison Henderson 				BUFFER_TRACE(bh, "still unmapped");
33604e96b2dbSAllison Henderson 				goto next;
33614e96b2dbSAllison Henderson 			}
33624e96b2dbSAllison Henderson 		}
33634e96b2dbSAllison Henderson 
33644e96b2dbSAllison Henderson 		/* Ok, it's mapped. Make sure it's up-to-date */
33654e96b2dbSAllison Henderson 		if (PageUptodate(page))
33664e96b2dbSAllison Henderson 			set_buffer_uptodate(bh);
33674e96b2dbSAllison Henderson 
33684e96b2dbSAllison Henderson 		if (!buffer_uptodate(bh)) {
33694e96b2dbSAllison Henderson 			err = -EIO;
33704e96b2dbSAllison Henderson 			ll_rw_block(READ, 1, &bh);
33714e96b2dbSAllison Henderson 			wait_on_buffer(bh);
33724e96b2dbSAllison Henderson 			/* Uhhuh. Read error. Complain and punt.*/
33734e96b2dbSAllison Henderson 			if (!buffer_uptodate(bh))
33744e96b2dbSAllison Henderson 				goto next;
33754e96b2dbSAllison Henderson 		}
33764e96b2dbSAllison Henderson 
33774e96b2dbSAllison Henderson 		if (ext4_should_journal_data(inode)) {
33784e96b2dbSAllison Henderson 			BUFFER_TRACE(bh, "get write access");
33794e96b2dbSAllison Henderson 			err = ext4_journal_get_write_access(handle, bh);
33804e96b2dbSAllison Henderson 			if (err)
33814e96b2dbSAllison Henderson 				goto next;
33824e96b2dbSAllison Henderson 		}
33834e96b2dbSAllison Henderson 
33844e96b2dbSAllison Henderson 		zero_user(page, pos, range_to_discard);
33854e96b2dbSAllison Henderson 
33864e96b2dbSAllison Henderson 		err = 0;
33874e96b2dbSAllison Henderson 		if (ext4_should_journal_data(inode)) {
33884e96b2dbSAllison Henderson 			err = ext4_handle_dirty_metadata(handle, inode, bh);
3389decbd919STheodore Ts'o 		} else
33904e96b2dbSAllison Henderson 			mark_buffer_dirty(bh);
33914e96b2dbSAllison Henderson 
33924e96b2dbSAllison Henderson 		BUFFER_TRACE(bh, "Partial buffer zeroed");
33934e96b2dbSAllison Henderson next:
33944e96b2dbSAllison Henderson 		bh = bh->b_this_page;
33954e96b2dbSAllison Henderson 		iblock++;
33964e96b2dbSAllison Henderson 		pos += range_to_discard;
33974e96b2dbSAllison Henderson 	}
33984e96b2dbSAllison Henderson 
33994e96b2dbSAllison Henderson 	return err;
34004e96b2dbSAllison Henderson }
34014e96b2dbSAllison Henderson 
340291ef4cafSDuane Griffin int ext4_can_truncate(struct inode *inode)
340391ef4cafSDuane Griffin {
340491ef4cafSDuane Griffin 	if (S_ISREG(inode->i_mode))
340591ef4cafSDuane Griffin 		return 1;
340691ef4cafSDuane Griffin 	if (S_ISDIR(inode->i_mode))
340791ef4cafSDuane Griffin 		return 1;
340891ef4cafSDuane Griffin 	if (S_ISLNK(inode->i_mode))
340991ef4cafSDuane Griffin 		return !ext4_inode_is_fast_symlink(inode);
341091ef4cafSDuane Griffin 	return 0;
341191ef4cafSDuane Griffin }
341291ef4cafSDuane Griffin 
3413ac27a0ecSDave Kleikamp /*
3414a4bb6b64SAllison Henderson  * ext4_punch_hole: punches a hole in a file by releaseing the blocks
3415a4bb6b64SAllison Henderson  * associated with the given offset and length
3416a4bb6b64SAllison Henderson  *
3417a4bb6b64SAllison Henderson  * @inode:  File inode
3418a4bb6b64SAllison Henderson  * @offset: The offset where the hole will begin
3419a4bb6b64SAllison Henderson  * @len:    The length of the hole
3420a4bb6b64SAllison Henderson  *
34214907cb7bSAnatol Pomozov  * Returns: 0 on success or negative on failure
3422a4bb6b64SAllison Henderson  */
3423a4bb6b64SAllison Henderson 
3424a4bb6b64SAllison Henderson int ext4_punch_hole(struct file *file, loff_t offset, loff_t length)
3425a4bb6b64SAllison Henderson {
3426a4bb6b64SAllison Henderson 	struct inode *inode = file->f_path.dentry->d_inode;
3427a4bb6b64SAllison Henderson 	if (!S_ISREG(inode->i_mode))
342873355192SAllison Henderson 		return -EOPNOTSUPP;
3429a4bb6b64SAllison Henderson 
3430a4bb6b64SAllison Henderson 	if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
3431a4bb6b64SAllison Henderson 		/* TODO: Add support for non extent hole punching */
343273355192SAllison Henderson 		return -EOPNOTSUPP;
3433a4bb6b64SAllison Henderson 	}
3434a4bb6b64SAllison Henderson 
3435bab08ab9STheodore Ts'o 	if (EXT4_SB(inode->i_sb)->s_cluster_ratio > 1) {
3436bab08ab9STheodore Ts'o 		/* TODO: Add support for bigalloc file systems */
343773355192SAllison Henderson 		return -EOPNOTSUPP;
3438bab08ab9STheodore Ts'o 	}
3439bab08ab9STheodore Ts'o 
3440a4bb6b64SAllison Henderson 	return ext4_ext_punch_hole(file, offset, length);
3441a4bb6b64SAllison Henderson }
3442a4bb6b64SAllison Henderson 
3443a4bb6b64SAllison Henderson /*
3444617ba13bSMingming Cao  * ext4_truncate()
3445ac27a0ecSDave Kleikamp  *
3446617ba13bSMingming Cao  * We block out ext4_get_block() block instantiations across the entire
3447617ba13bSMingming Cao  * transaction, and VFS/VM ensures that ext4_truncate() cannot run
3448ac27a0ecSDave Kleikamp  * simultaneously on behalf of the same inode.
3449ac27a0ecSDave Kleikamp  *
345042b2aa86SJustin P. Mattock  * As we work through the truncate and commit bits of it to the journal there
3451ac27a0ecSDave Kleikamp  * is one core, guiding principle: the file's tree must always be consistent on
3452ac27a0ecSDave Kleikamp  * disk.  We must be able to restart the truncate after a crash.
3453ac27a0ecSDave Kleikamp  *
3454ac27a0ecSDave Kleikamp  * The file's tree may be transiently inconsistent in memory (although it
3455ac27a0ecSDave Kleikamp  * probably isn't), but whenever we close off and commit a journal transaction,
3456ac27a0ecSDave Kleikamp  * the contents of (the filesystem + the journal) must be consistent and
3457ac27a0ecSDave Kleikamp  * restartable.  It's pretty simple, really: bottom up, right to left (although
3458ac27a0ecSDave Kleikamp  * left-to-right works OK too).
3459ac27a0ecSDave Kleikamp  *
3460ac27a0ecSDave Kleikamp  * Note that at recovery time, journal replay occurs *before* the restart of
3461ac27a0ecSDave Kleikamp  * truncate against the orphan inode list.
3462ac27a0ecSDave Kleikamp  *
3463ac27a0ecSDave Kleikamp  * The committed inode has the new, desired i_size (which is the same as
3464617ba13bSMingming Cao  * i_disksize in this case).  After a crash, ext4_orphan_cleanup() will see
3465ac27a0ecSDave Kleikamp  * that this inode's truncate did not complete and it will again call
3466617ba13bSMingming Cao  * ext4_truncate() to have another go.  So there will be instantiated blocks
3467617ba13bSMingming Cao  * to the right of the truncation point in a crashed ext4 filesystem.  But
3468ac27a0ecSDave Kleikamp  * that's fine - as long as they are linked from the inode, the post-crash
3469617ba13bSMingming Cao  * ext4_truncate() run will find them and release them.
3470ac27a0ecSDave Kleikamp  */
3471617ba13bSMingming Cao void ext4_truncate(struct inode *inode)
3472ac27a0ecSDave Kleikamp {
34730562e0baSJiaying Zhang 	trace_ext4_truncate_enter(inode);
34740562e0baSJiaying Zhang 
347591ef4cafSDuane Griffin 	if (!ext4_can_truncate(inode))
3476ac27a0ecSDave Kleikamp 		return;
3477ac27a0ecSDave Kleikamp 
347812e9b892SDmitry Monakhov 	ext4_clear_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
3479c8d46e41SJiaying Zhang 
34805534fb5bSTheodore Ts'o 	if (inode->i_size == 0 && !test_opt(inode->i_sb, NO_AUTO_DA_ALLOC))
348119f5fb7aSTheodore Ts'o 		ext4_set_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
34827d8f9f7dSTheodore Ts'o 
3483ff9893dcSAmir Goldstein 	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
3484cf108bcaSJan Kara 		ext4_ext_truncate(inode);
3485ff9893dcSAmir Goldstein 	else
3486ff9893dcSAmir Goldstein 		ext4_ind_truncate(inode);
3487a86c6181SAlex Tomas 
34880562e0baSJiaying Zhang 	trace_ext4_truncate_exit(inode);
3489ac27a0ecSDave Kleikamp }
3490ac27a0ecSDave Kleikamp 
3491ac27a0ecSDave Kleikamp /*
3492617ba13bSMingming Cao  * ext4_get_inode_loc returns with an extra refcount against the inode's
3493ac27a0ecSDave Kleikamp  * underlying buffer_head on success. If 'in_mem' is true, we have all
3494ac27a0ecSDave Kleikamp  * data in memory that is needed to recreate the on-disk version of this
3495ac27a0ecSDave Kleikamp  * inode.
3496ac27a0ecSDave Kleikamp  */
3497617ba13bSMingming Cao static int __ext4_get_inode_loc(struct inode *inode,
3498617ba13bSMingming Cao 				struct ext4_iloc *iloc, int in_mem)
3499ac27a0ecSDave Kleikamp {
3500240799cdSTheodore Ts'o 	struct ext4_group_desc	*gdp;
3501ac27a0ecSDave Kleikamp 	struct buffer_head	*bh;
3502240799cdSTheodore Ts'o 	struct super_block	*sb = inode->i_sb;
3503240799cdSTheodore Ts'o 	ext4_fsblk_t		block;
3504240799cdSTheodore Ts'o 	int			inodes_per_block, inode_offset;
3505ac27a0ecSDave Kleikamp 
35063a06d778SAneesh Kumar K.V 	iloc->bh = NULL;
3507240799cdSTheodore Ts'o 	if (!ext4_valid_inum(sb, inode->i_ino))
3508ac27a0ecSDave Kleikamp 		return -EIO;
3509ac27a0ecSDave Kleikamp 
3510240799cdSTheodore Ts'o 	iloc->block_group = (inode->i_ino - 1) / EXT4_INODES_PER_GROUP(sb);
3511240799cdSTheodore Ts'o 	gdp = ext4_get_group_desc(sb, iloc->block_group, NULL);
3512240799cdSTheodore Ts'o 	if (!gdp)
3513240799cdSTheodore Ts'o 		return -EIO;
3514240799cdSTheodore Ts'o 
3515240799cdSTheodore Ts'o 	/*
3516240799cdSTheodore Ts'o 	 * Figure out the offset within the block group inode table
3517240799cdSTheodore Ts'o 	 */
351800d09882STao Ma 	inodes_per_block = EXT4_SB(sb)->s_inodes_per_block;
3519240799cdSTheodore Ts'o 	inode_offset = ((inode->i_ino - 1) %
3520240799cdSTheodore Ts'o 			EXT4_INODES_PER_GROUP(sb));
3521240799cdSTheodore Ts'o 	block = ext4_inode_table(sb, gdp) + (inode_offset / inodes_per_block);
3522240799cdSTheodore Ts'o 	iloc->offset = (inode_offset % inodes_per_block) * EXT4_INODE_SIZE(sb);
3523240799cdSTheodore Ts'o 
3524240799cdSTheodore Ts'o 	bh = sb_getblk(sb, block);
3525ac27a0ecSDave Kleikamp 	if (!bh) {
3526c398eda0STheodore Ts'o 		EXT4_ERROR_INODE_BLOCK(inode, block,
3527c398eda0STheodore Ts'o 				       "unable to read itable block");
3528ac27a0ecSDave Kleikamp 		return -EIO;
3529ac27a0ecSDave Kleikamp 	}
3530ac27a0ecSDave Kleikamp 	if (!buffer_uptodate(bh)) {
3531ac27a0ecSDave Kleikamp 		lock_buffer(bh);
35329c83a923SHidehiro Kawai 
35339c83a923SHidehiro Kawai 		/*
35349c83a923SHidehiro Kawai 		 * If the buffer has the write error flag, we have failed
35359c83a923SHidehiro Kawai 		 * to write out another inode in the same block.  In this
35369c83a923SHidehiro Kawai 		 * case, we don't have to read the block because we may
35379c83a923SHidehiro Kawai 		 * read the old inode data successfully.
35389c83a923SHidehiro Kawai 		 */
35399c83a923SHidehiro Kawai 		if (buffer_write_io_error(bh) && !buffer_uptodate(bh))
35409c83a923SHidehiro Kawai 			set_buffer_uptodate(bh);
35419c83a923SHidehiro Kawai 
3542ac27a0ecSDave Kleikamp 		if (buffer_uptodate(bh)) {
3543ac27a0ecSDave Kleikamp 			/* someone brought it uptodate while we waited */
3544ac27a0ecSDave Kleikamp 			unlock_buffer(bh);
3545ac27a0ecSDave Kleikamp 			goto has_buffer;
3546ac27a0ecSDave Kleikamp 		}
3547ac27a0ecSDave Kleikamp 
3548ac27a0ecSDave Kleikamp 		/*
3549ac27a0ecSDave Kleikamp 		 * If we have all information of the inode in memory and this
3550ac27a0ecSDave Kleikamp 		 * is the only valid inode in the block, we need not read the
3551ac27a0ecSDave Kleikamp 		 * block.
3552ac27a0ecSDave Kleikamp 		 */
3553ac27a0ecSDave Kleikamp 		if (in_mem) {
3554ac27a0ecSDave Kleikamp 			struct buffer_head *bitmap_bh;
3555240799cdSTheodore Ts'o 			int i, start;
3556ac27a0ecSDave Kleikamp 
3557240799cdSTheodore Ts'o 			start = inode_offset & ~(inodes_per_block - 1);
3558ac27a0ecSDave Kleikamp 
3559ac27a0ecSDave Kleikamp 			/* Is the inode bitmap in cache? */
3560240799cdSTheodore Ts'o 			bitmap_bh = sb_getblk(sb, ext4_inode_bitmap(sb, gdp));
3561ac27a0ecSDave Kleikamp 			if (!bitmap_bh)
3562ac27a0ecSDave Kleikamp 				goto make_io;
3563ac27a0ecSDave Kleikamp 
3564ac27a0ecSDave Kleikamp 			/*
3565ac27a0ecSDave Kleikamp 			 * If the inode bitmap isn't in cache then the
3566ac27a0ecSDave Kleikamp 			 * optimisation may end up performing two reads instead
3567ac27a0ecSDave Kleikamp 			 * of one, so skip it.
3568ac27a0ecSDave Kleikamp 			 */
3569ac27a0ecSDave Kleikamp 			if (!buffer_uptodate(bitmap_bh)) {
3570ac27a0ecSDave Kleikamp 				brelse(bitmap_bh);
3571ac27a0ecSDave Kleikamp 				goto make_io;
3572ac27a0ecSDave Kleikamp 			}
3573240799cdSTheodore Ts'o 			for (i = start; i < start + inodes_per_block; i++) {
3574ac27a0ecSDave Kleikamp 				if (i == inode_offset)
3575ac27a0ecSDave Kleikamp 					continue;
3576617ba13bSMingming Cao 				if (ext4_test_bit(i, bitmap_bh->b_data))
3577ac27a0ecSDave Kleikamp 					break;
3578ac27a0ecSDave Kleikamp 			}
3579ac27a0ecSDave Kleikamp 			brelse(bitmap_bh);
3580240799cdSTheodore Ts'o 			if (i == start + inodes_per_block) {
3581ac27a0ecSDave Kleikamp 				/* all other inodes are free, so skip I/O */
3582ac27a0ecSDave Kleikamp 				memset(bh->b_data, 0, bh->b_size);
3583ac27a0ecSDave Kleikamp 				set_buffer_uptodate(bh);
3584ac27a0ecSDave Kleikamp 				unlock_buffer(bh);
3585ac27a0ecSDave Kleikamp 				goto has_buffer;
3586ac27a0ecSDave Kleikamp 			}
3587ac27a0ecSDave Kleikamp 		}
3588ac27a0ecSDave Kleikamp 
3589ac27a0ecSDave Kleikamp make_io:
3590ac27a0ecSDave Kleikamp 		/*
3591240799cdSTheodore Ts'o 		 * If we need to do any I/O, try to pre-readahead extra
3592240799cdSTheodore Ts'o 		 * blocks from the inode table.
3593240799cdSTheodore Ts'o 		 */
3594240799cdSTheodore Ts'o 		if (EXT4_SB(sb)->s_inode_readahead_blks) {
3595240799cdSTheodore Ts'o 			ext4_fsblk_t b, end, table;
3596240799cdSTheodore Ts'o 			unsigned num;
3597240799cdSTheodore Ts'o 
3598240799cdSTheodore Ts'o 			table = ext4_inode_table(sb, gdp);
3599b713a5ecSTheodore Ts'o 			/* s_inode_readahead_blks is always a power of 2 */
3600240799cdSTheodore Ts'o 			b = block & ~(EXT4_SB(sb)->s_inode_readahead_blks-1);
3601240799cdSTheodore Ts'o 			if (table > b)
3602240799cdSTheodore Ts'o 				b = table;
3603240799cdSTheodore Ts'o 			end = b + EXT4_SB(sb)->s_inode_readahead_blks;
3604240799cdSTheodore Ts'o 			num = EXT4_INODES_PER_GROUP(sb);
3605feb0ab32SDarrick J. Wong 			if (ext4_has_group_desc_csum(sb))
3606560671a0SAneesh Kumar K.V 				num -= ext4_itable_unused_count(sb, gdp);
3607240799cdSTheodore Ts'o 			table += num / inodes_per_block;
3608240799cdSTheodore Ts'o 			if (end > table)
3609240799cdSTheodore Ts'o 				end = table;
3610240799cdSTheodore Ts'o 			while (b <= end)
3611240799cdSTheodore Ts'o 				sb_breadahead(sb, b++);
3612240799cdSTheodore Ts'o 		}
3613240799cdSTheodore Ts'o 
3614240799cdSTheodore Ts'o 		/*
3615ac27a0ecSDave Kleikamp 		 * There are other valid inodes in the buffer, this inode
3616ac27a0ecSDave Kleikamp 		 * has in-inode xattrs, or we don't have this inode in memory.
3617ac27a0ecSDave Kleikamp 		 * Read the block from disk.
3618ac27a0ecSDave Kleikamp 		 */
36190562e0baSJiaying Zhang 		trace_ext4_load_inode(inode);
3620ac27a0ecSDave Kleikamp 		get_bh(bh);
3621ac27a0ecSDave Kleikamp 		bh->b_end_io = end_buffer_read_sync;
362265299a3bSChristoph Hellwig 		submit_bh(READ | REQ_META | REQ_PRIO, bh);
3623ac27a0ecSDave Kleikamp 		wait_on_buffer(bh);
3624ac27a0ecSDave Kleikamp 		if (!buffer_uptodate(bh)) {
3625c398eda0STheodore Ts'o 			EXT4_ERROR_INODE_BLOCK(inode, block,
3626c398eda0STheodore Ts'o 					       "unable to read itable block");
3627ac27a0ecSDave Kleikamp 			brelse(bh);
3628ac27a0ecSDave Kleikamp 			return -EIO;
3629ac27a0ecSDave Kleikamp 		}
3630ac27a0ecSDave Kleikamp 	}
3631ac27a0ecSDave Kleikamp has_buffer:
3632ac27a0ecSDave Kleikamp 	iloc->bh = bh;
3633ac27a0ecSDave Kleikamp 	return 0;
3634ac27a0ecSDave Kleikamp }
3635ac27a0ecSDave Kleikamp 
3636617ba13bSMingming Cao int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc)
3637ac27a0ecSDave Kleikamp {
3638ac27a0ecSDave Kleikamp 	/* We have all inode data except xattrs in memory here. */
3639617ba13bSMingming Cao 	return __ext4_get_inode_loc(inode, iloc,
364019f5fb7aSTheodore Ts'o 		!ext4_test_inode_state(inode, EXT4_STATE_XATTR));
3641ac27a0ecSDave Kleikamp }
3642ac27a0ecSDave Kleikamp 
3643617ba13bSMingming Cao void ext4_set_inode_flags(struct inode *inode)
3644ac27a0ecSDave Kleikamp {
3645617ba13bSMingming Cao 	unsigned int flags = EXT4_I(inode)->i_flags;
3646ac27a0ecSDave Kleikamp 
3647ac27a0ecSDave Kleikamp 	inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
3648617ba13bSMingming Cao 	if (flags & EXT4_SYNC_FL)
3649ac27a0ecSDave Kleikamp 		inode->i_flags |= S_SYNC;
3650617ba13bSMingming Cao 	if (flags & EXT4_APPEND_FL)
3651ac27a0ecSDave Kleikamp 		inode->i_flags |= S_APPEND;
3652617ba13bSMingming Cao 	if (flags & EXT4_IMMUTABLE_FL)
3653ac27a0ecSDave Kleikamp 		inode->i_flags |= S_IMMUTABLE;
3654617ba13bSMingming Cao 	if (flags & EXT4_NOATIME_FL)
3655ac27a0ecSDave Kleikamp 		inode->i_flags |= S_NOATIME;
3656617ba13bSMingming Cao 	if (flags & EXT4_DIRSYNC_FL)
3657ac27a0ecSDave Kleikamp 		inode->i_flags |= S_DIRSYNC;
3658ac27a0ecSDave Kleikamp }
3659ac27a0ecSDave Kleikamp 
3660ff9ddf7eSJan Kara /* Propagate flags from i_flags to EXT4_I(inode)->i_flags */
3661ff9ddf7eSJan Kara void ext4_get_inode_flags(struct ext4_inode_info *ei)
3662ff9ddf7eSJan Kara {
366384a8dce2SDmitry Monakhov 	unsigned int vfs_fl;
366484a8dce2SDmitry Monakhov 	unsigned long old_fl, new_fl;
3665ff9ddf7eSJan Kara 
366684a8dce2SDmitry Monakhov 	do {
366784a8dce2SDmitry Monakhov 		vfs_fl = ei->vfs_inode.i_flags;
366884a8dce2SDmitry Monakhov 		old_fl = ei->i_flags;
366984a8dce2SDmitry Monakhov 		new_fl = old_fl & ~(EXT4_SYNC_FL|EXT4_APPEND_FL|
367084a8dce2SDmitry Monakhov 				EXT4_IMMUTABLE_FL|EXT4_NOATIME_FL|
367184a8dce2SDmitry Monakhov 				EXT4_DIRSYNC_FL);
367284a8dce2SDmitry Monakhov 		if (vfs_fl & S_SYNC)
367384a8dce2SDmitry Monakhov 			new_fl |= EXT4_SYNC_FL;
367484a8dce2SDmitry Monakhov 		if (vfs_fl & S_APPEND)
367584a8dce2SDmitry Monakhov 			new_fl |= EXT4_APPEND_FL;
367684a8dce2SDmitry Monakhov 		if (vfs_fl & S_IMMUTABLE)
367784a8dce2SDmitry Monakhov 			new_fl |= EXT4_IMMUTABLE_FL;
367884a8dce2SDmitry Monakhov 		if (vfs_fl & S_NOATIME)
367984a8dce2SDmitry Monakhov 			new_fl |= EXT4_NOATIME_FL;
368084a8dce2SDmitry Monakhov 		if (vfs_fl & S_DIRSYNC)
368184a8dce2SDmitry Monakhov 			new_fl |= EXT4_DIRSYNC_FL;
368284a8dce2SDmitry Monakhov 	} while (cmpxchg(&ei->i_flags, old_fl, new_fl) != old_fl);
3683ff9ddf7eSJan Kara }
3684de9a55b8STheodore Ts'o 
36850fc1b451SAneesh Kumar K.V static blkcnt_t ext4_inode_blocks(struct ext4_inode *raw_inode,
36860fc1b451SAneesh Kumar K.V 				  struct ext4_inode_info *ei)
36870fc1b451SAneesh Kumar K.V {
36880fc1b451SAneesh Kumar K.V 	blkcnt_t i_blocks ;
36898180a562SAneesh Kumar K.V 	struct inode *inode = &(ei->vfs_inode);
36908180a562SAneesh Kumar K.V 	struct super_block *sb = inode->i_sb;
36910fc1b451SAneesh Kumar K.V 
36920fc1b451SAneesh Kumar K.V 	if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
36930fc1b451SAneesh Kumar K.V 				EXT4_FEATURE_RO_COMPAT_HUGE_FILE)) {
36940fc1b451SAneesh Kumar K.V 		/* we are using combined 48 bit field */
36950fc1b451SAneesh Kumar K.V 		i_blocks = ((u64)le16_to_cpu(raw_inode->i_blocks_high)) << 32 |
36960fc1b451SAneesh Kumar K.V 					le32_to_cpu(raw_inode->i_blocks_lo);
369707a03824STheodore Ts'o 		if (ext4_test_inode_flag(inode, EXT4_INODE_HUGE_FILE)) {
36988180a562SAneesh Kumar K.V 			/* i_blocks represent file system block size */
36998180a562SAneesh Kumar K.V 			return i_blocks  << (inode->i_blkbits - 9);
37008180a562SAneesh Kumar K.V 		} else {
37010fc1b451SAneesh Kumar K.V 			return i_blocks;
37028180a562SAneesh Kumar K.V 		}
37030fc1b451SAneesh Kumar K.V 	} else {
37040fc1b451SAneesh Kumar K.V 		return le32_to_cpu(raw_inode->i_blocks_lo);
37050fc1b451SAneesh Kumar K.V 	}
37060fc1b451SAneesh Kumar K.V }
3707ff9ddf7eSJan Kara 
37081d1fe1eeSDavid Howells struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
3709ac27a0ecSDave Kleikamp {
3710617ba13bSMingming Cao 	struct ext4_iloc iloc;
3711617ba13bSMingming Cao 	struct ext4_inode *raw_inode;
37121d1fe1eeSDavid Howells 	struct ext4_inode_info *ei;
37131d1fe1eeSDavid Howells 	struct inode *inode;
3714b436b9beSJan Kara 	journal_t *journal = EXT4_SB(sb)->s_journal;
37151d1fe1eeSDavid Howells 	long ret;
3716ac27a0ecSDave Kleikamp 	int block;
371708cefc7aSEric W. Biederman 	uid_t i_uid;
371808cefc7aSEric W. Biederman 	gid_t i_gid;
3719ac27a0ecSDave Kleikamp 
37201d1fe1eeSDavid Howells 	inode = iget_locked(sb, ino);
37211d1fe1eeSDavid Howells 	if (!inode)
37221d1fe1eeSDavid Howells 		return ERR_PTR(-ENOMEM);
37231d1fe1eeSDavid Howells 	if (!(inode->i_state & I_NEW))
37241d1fe1eeSDavid Howells 		return inode;
37251d1fe1eeSDavid Howells 
37261d1fe1eeSDavid Howells 	ei = EXT4_I(inode);
37277dc57615SPeter Huewe 	iloc.bh = NULL;
3728ac27a0ecSDave Kleikamp 
37291d1fe1eeSDavid Howells 	ret = __ext4_get_inode_loc(inode, &iloc, 0);
37301d1fe1eeSDavid Howells 	if (ret < 0)
3731ac27a0ecSDave Kleikamp 		goto bad_inode;
3732617ba13bSMingming Cao 	raw_inode = ext4_raw_inode(&iloc);
3733814525f4SDarrick J. Wong 
3734814525f4SDarrick J. Wong 	if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
3735814525f4SDarrick J. Wong 		ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize);
3736814525f4SDarrick J. Wong 		if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize >
3737814525f4SDarrick J. Wong 		    EXT4_INODE_SIZE(inode->i_sb)) {
3738814525f4SDarrick J. Wong 			EXT4_ERROR_INODE(inode, "bad extra_isize (%u != %u)",
3739814525f4SDarrick J. Wong 				EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize,
3740814525f4SDarrick J. Wong 				EXT4_INODE_SIZE(inode->i_sb));
3741814525f4SDarrick J. Wong 			ret = -EIO;
3742814525f4SDarrick J. Wong 			goto bad_inode;
3743814525f4SDarrick J. Wong 		}
3744814525f4SDarrick J. Wong 	} else
3745814525f4SDarrick J. Wong 		ei->i_extra_isize = 0;
3746814525f4SDarrick J. Wong 
3747814525f4SDarrick J. Wong 	/* Precompute checksum seed for inode metadata */
3748814525f4SDarrick J. Wong 	if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
3749814525f4SDarrick J. Wong 			EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) {
3750814525f4SDarrick J. Wong 		struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
3751814525f4SDarrick J. Wong 		__u32 csum;
3752814525f4SDarrick J. Wong 		__le32 inum = cpu_to_le32(inode->i_ino);
3753814525f4SDarrick J. Wong 		__le32 gen = raw_inode->i_generation;
3754814525f4SDarrick J. Wong 		csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&inum,
3755814525f4SDarrick J. Wong 				   sizeof(inum));
3756814525f4SDarrick J. Wong 		ei->i_csum_seed = ext4_chksum(sbi, csum, (__u8 *)&gen,
3757814525f4SDarrick J. Wong 					      sizeof(gen));
3758814525f4SDarrick J. Wong 	}
3759814525f4SDarrick J. Wong 
3760814525f4SDarrick J. Wong 	if (!ext4_inode_csum_verify(inode, raw_inode, ei)) {
3761814525f4SDarrick J. Wong 		EXT4_ERROR_INODE(inode, "checksum invalid");
3762814525f4SDarrick J. Wong 		ret = -EIO;
3763814525f4SDarrick J. Wong 		goto bad_inode;
3764814525f4SDarrick J. Wong 	}
3765814525f4SDarrick J. Wong 
3766ac27a0ecSDave Kleikamp 	inode->i_mode = le16_to_cpu(raw_inode->i_mode);
376708cefc7aSEric W. Biederman 	i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
376808cefc7aSEric W. Biederman 	i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
3769ac27a0ecSDave Kleikamp 	if (!(test_opt(inode->i_sb, NO_UID32))) {
377008cefc7aSEric W. Biederman 		i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
377108cefc7aSEric W. Biederman 		i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
3772ac27a0ecSDave Kleikamp 	}
377308cefc7aSEric W. Biederman 	i_uid_write(inode, i_uid);
377408cefc7aSEric W. Biederman 	i_gid_write(inode, i_gid);
3775bfe86848SMiklos Szeredi 	set_nlink(inode, le16_to_cpu(raw_inode->i_links_count));
3776ac27a0ecSDave Kleikamp 
3777353eb83cSTheodore Ts'o 	ext4_clear_state_flags(ei);	/* Only relevant on 32-bit archs */
3778ac27a0ecSDave Kleikamp 	ei->i_dir_start_lookup = 0;
3779ac27a0ecSDave Kleikamp 	ei->i_dtime = le32_to_cpu(raw_inode->i_dtime);
3780ac27a0ecSDave Kleikamp 	/* We now have enough fields to check if the inode was active or not.
3781ac27a0ecSDave Kleikamp 	 * This is needed because nfsd might try to access dead inodes
3782ac27a0ecSDave Kleikamp 	 * the test is that same one that e2fsck uses
3783ac27a0ecSDave Kleikamp 	 * NeilBrown 1999oct15
3784ac27a0ecSDave Kleikamp 	 */
3785ac27a0ecSDave Kleikamp 	if (inode->i_nlink == 0) {
3786ac27a0ecSDave Kleikamp 		if (inode->i_mode == 0 ||
3787617ba13bSMingming Cao 		    !(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS)) {
3788ac27a0ecSDave Kleikamp 			/* this inode is deleted */
37891d1fe1eeSDavid Howells 			ret = -ESTALE;
3790ac27a0ecSDave Kleikamp 			goto bad_inode;
3791ac27a0ecSDave Kleikamp 		}
3792ac27a0ecSDave Kleikamp 		/* The only unlinked inodes we let through here have
3793ac27a0ecSDave Kleikamp 		 * valid i_mode and are being read by the orphan
3794ac27a0ecSDave Kleikamp 		 * recovery code: that's fine, we're about to complete
3795ac27a0ecSDave Kleikamp 		 * the process of deleting those. */
3796ac27a0ecSDave Kleikamp 	}
3797ac27a0ecSDave Kleikamp 	ei->i_flags = le32_to_cpu(raw_inode->i_flags);
37980fc1b451SAneesh Kumar K.V 	inode->i_blocks = ext4_inode_blocks(raw_inode, ei);
37997973c0c1SAneesh Kumar K.V 	ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl_lo);
3800a9e81742STheodore Ts'o 	if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_64BIT))
3801a1ddeb7eSBadari Pulavarty 		ei->i_file_acl |=
3802a1ddeb7eSBadari Pulavarty 			((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32;
3803a48380f7SAneesh Kumar K.V 	inode->i_size = ext4_isize(raw_inode);
3804ac27a0ecSDave Kleikamp 	ei->i_disksize = inode->i_size;
3805a9e7f447SDmitry Monakhov #ifdef CONFIG_QUOTA
3806a9e7f447SDmitry Monakhov 	ei->i_reserved_quota = 0;
3807a9e7f447SDmitry Monakhov #endif
3808ac27a0ecSDave Kleikamp 	inode->i_generation = le32_to_cpu(raw_inode->i_generation);
3809ac27a0ecSDave Kleikamp 	ei->i_block_group = iloc.block_group;
3810a4912123STheodore Ts'o 	ei->i_last_alloc_group = ~0;
3811ac27a0ecSDave Kleikamp 	/*
3812ac27a0ecSDave Kleikamp 	 * NOTE! The in-memory inode i_data array is in little-endian order
3813ac27a0ecSDave Kleikamp 	 * even on big-endian machines: we do NOT byteswap the block numbers!
3814ac27a0ecSDave Kleikamp 	 */
3815617ba13bSMingming Cao 	for (block = 0; block < EXT4_N_BLOCKS; block++)
3816ac27a0ecSDave Kleikamp 		ei->i_data[block] = raw_inode->i_block[block];
3817ac27a0ecSDave Kleikamp 	INIT_LIST_HEAD(&ei->i_orphan);
3818ac27a0ecSDave Kleikamp 
3819b436b9beSJan Kara 	/*
3820b436b9beSJan Kara 	 * Set transaction id's of transactions that have to be committed
3821b436b9beSJan Kara 	 * to finish f[data]sync. We set them to currently running transaction
3822b436b9beSJan Kara 	 * as we cannot be sure that the inode or some of its metadata isn't
3823b436b9beSJan Kara 	 * part of the transaction - the inode could have been reclaimed and
3824b436b9beSJan Kara 	 * now it is reread from disk.
3825b436b9beSJan Kara 	 */
3826b436b9beSJan Kara 	if (journal) {
3827b436b9beSJan Kara 		transaction_t *transaction;
3828b436b9beSJan Kara 		tid_t tid;
3829b436b9beSJan Kara 
3830a931da6aSTheodore Ts'o 		read_lock(&journal->j_state_lock);
3831b436b9beSJan Kara 		if (journal->j_running_transaction)
3832b436b9beSJan Kara 			transaction = journal->j_running_transaction;
3833b436b9beSJan Kara 		else
3834b436b9beSJan Kara 			transaction = journal->j_committing_transaction;
3835b436b9beSJan Kara 		if (transaction)
3836b436b9beSJan Kara 			tid = transaction->t_tid;
3837b436b9beSJan Kara 		else
3838b436b9beSJan Kara 			tid = journal->j_commit_sequence;
3839a931da6aSTheodore Ts'o 		read_unlock(&journal->j_state_lock);
3840b436b9beSJan Kara 		ei->i_sync_tid = tid;
3841b436b9beSJan Kara 		ei->i_datasync_tid = tid;
3842b436b9beSJan Kara 	}
3843b436b9beSJan Kara 
38440040d987SEric Sandeen 	if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
3845ac27a0ecSDave Kleikamp 		if (ei->i_extra_isize == 0) {
3846ac27a0ecSDave Kleikamp 			/* The extra space is currently unused. Use it. */
3847617ba13bSMingming Cao 			ei->i_extra_isize = sizeof(struct ext4_inode) -
3848617ba13bSMingming Cao 					    EXT4_GOOD_OLD_INODE_SIZE;
3849ac27a0ecSDave Kleikamp 		} else {
3850ac27a0ecSDave Kleikamp 			__le32 *magic = (void *)raw_inode +
3851617ba13bSMingming Cao 					EXT4_GOOD_OLD_INODE_SIZE +
3852ac27a0ecSDave Kleikamp 					ei->i_extra_isize;
3853617ba13bSMingming Cao 			if (*magic == cpu_to_le32(EXT4_XATTR_MAGIC))
385419f5fb7aSTheodore Ts'o 				ext4_set_inode_state(inode, EXT4_STATE_XATTR);
3855ac27a0ecSDave Kleikamp 		}
3856814525f4SDarrick J. Wong 	}
3857ac27a0ecSDave Kleikamp 
3858ef7f3835SKalpak Shah 	EXT4_INODE_GET_XTIME(i_ctime, inode, raw_inode);
3859ef7f3835SKalpak Shah 	EXT4_INODE_GET_XTIME(i_mtime, inode, raw_inode);
3860ef7f3835SKalpak Shah 	EXT4_INODE_GET_XTIME(i_atime, inode, raw_inode);
3861ef7f3835SKalpak Shah 	EXT4_EINODE_GET_XTIME(i_crtime, ei, raw_inode);
3862ef7f3835SKalpak Shah 
386325ec56b5SJean Noel Cordenner 	inode->i_version = le32_to_cpu(raw_inode->i_disk_version);
386425ec56b5SJean Noel Cordenner 	if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
386525ec56b5SJean Noel Cordenner 		if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
386625ec56b5SJean Noel Cordenner 			inode->i_version |=
386725ec56b5SJean Noel Cordenner 			(__u64)(le32_to_cpu(raw_inode->i_version_hi)) << 32;
386825ec56b5SJean Noel Cordenner 	}
386925ec56b5SJean Noel Cordenner 
3870c4b5a614STheodore Ts'o 	ret = 0;
3871485c26ecSTheodore Ts'o 	if (ei->i_file_acl &&
38721032988cSTheodore Ts'o 	    !ext4_data_block_valid(EXT4_SB(sb), ei->i_file_acl, 1)) {
387324676da4STheodore Ts'o 		EXT4_ERROR_INODE(inode, "bad extended attribute block %llu",
387424676da4STheodore Ts'o 				 ei->i_file_acl);
3875485c26ecSTheodore Ts'o 		ret = -EIO;
3876485c26ecSTheodore Ts'o 		goto bad_inode;
387707a03824STheodore Ts'o 	} else if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
3878c4b5a614STheodore Ts'o 		if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
3879c4b5a614STheodore Ts'o 		    (S_ISLNK(inode->i_mode) &&
3880c4b5a614STheodore Ts'o 		     !ext4_inode_is_fast_symlink(inode)))
38817a262f7cSAneesh Kumar K.V 			/* Validate extent which is part of inode */
38827a262f7cSAneesh Kumar K.V 			ret = ext4_ext_check_inode(inode);
3883fe2c8191SThiemo Nagel 	} else if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
3884fe2c8191SThiemo Nagel 		   (S_ISLNK(inode->i_mode) &&
3885fe2c8191SThiemo Nagel 		    !ext4_inode_is_fast_symlink(inode))) {
3886fe2c8191SThiemo Nagel 		/* Validate block references which are part of inode */
38871f7d1e77STheodore Ts'o 		ret = ext4_ind_check_inode(inode);
3888fe2c8191SThiemo Nagel 	}
3889567f3e9aSTheodore Ts'o 	if (ret)
38907a262f7cSAneesh Kumar K.V 		goto bad_inode;
38917a262f7cSAneesh Kumar K.V 
3892ac27a0ecSDave Kleikamp 	if (S_ISREG(inode->i_mode)) {
3893617ba13bSMingming Cao 		inode->i_op = &ext4_file_inode_operations;
3894617ba13bSMingming Cao 		inode->i_fop = &ext4_file_operations;
3895617ba13bSMingming Cao 		ext4_set_aops(inode);
3896ac27a0ecSDave Kleikamp 	} else if (S_ISDIR(inode->i_mode)) {
3897617ba13bSMingming Cao 		inode->i_op = &ext4_dir_inode_operations;
3898617ba13bSMingming Cao 		inode->i_fop = &ext4_dir_operations;
3899ac27a0ecSDave Kleikamp 	} else if (S_ISLNK(inode->i_mode)) {
3900e83c1397SDuane Griffin 		if (ext4_inode_is_fast_symlink(inode)) {
3901617ba13bSMingming Cao 			inode->i_op = &ext4_fast_symlink_inode_operations;
3902e83c1397SDuane Griffin 			nd_terminate_link(ei->i_data, inode->i_size,
3903e83c1397SDuane Griffin 				sizeof(ei->i_data) - 1);
3904e83c1397SDuane Griffin 		} else {
3905617ba13bSMingming Cao 			inode->i_op = &ext4_symlink_inode_operations;
3906617ba13bSMingming Cao 			ext4_set_aops(inode);
3907ac27a0ecSDave Kleikamp 		}
3908563bdd61STheodore Ts'o 	} else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
3909563bdd61STheodore Ts'o 	      S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
3910617ba13bSMingming Cao 		inode->i_op = &ext4_special_inode_operations;
3911ac27a0ecSDave Kleikamp 		if (raw_inode->i_block[0])
3912ac27a0ecSDave Kleikamp 			init_special_inode(inode, inode->i_mode,
3913ac27a0ecSDave Kleikamp 			   old_decode_dev(le32_to_cpu(raw_inode->i_block[0])));
3914ac27a0ecSDave Kleikamp 		else
3915ac27a0ecSDave Kleikamp 			init_special_inode(inode, inode->i_mode,
3916ac27a0ecSDave Kleikamp 			   new_decode_dev(le32_to_cpu(raw_inode->i_block[1])));
3917563bdd61STheodore Ts'o 	} else {
3918563bdd61STheodore Ts'o 		ret = -EIO;
391924676da4STheodore Ts'o 		EXT4_ERROR_INODE(inode, "bogus i_mode (%o)", inode->i_mode);
3920563bdd61STheodore Ts'o 		goto bad_inode;
3921ac27a0ecSDave Kleikamp 	}
3922ac27a0ecSDave Kleikamp 	brelse(iloc.bh);
3923617ba13bSMingming Cao 	ext4_set_inode_flags(inode);
39241d1fe1eeSDavid Howells 	unlock_new_inode(inode);
39251d1fe1eeSDavid Howells 	return inode;
3926ac27a0ecSDave Kleikamp 
3927ac27a0ecSDave Kleikamp bad_inode:
3928567f3e9aSTheodore Ts'o 	brelse(iloc.bh);
39291d1fe1eeSDavid Howells 	iget_failed(inode);
39301d1fe1eeSDavid Howells 	return ERR_PTR(ret);
3931ac27a0ecSDave Kleikamp }
3932ac27a0ecSDave Kleikamp 
39330fc1b451SAneesh Kumar K.V static int ext4_inode_blocks_set(handle_t *handle,
39340fc1b451SAneesh Kumar K.V 				struct ext4_inode *raw_inode,
39350fc1b451SAneesh Kumar K.V 				struct ext4_inode_info *ei)
39360fc1b451SAneesh Kumar K.V {
39370fc1b451SAneesh Kumar K.V 	struct inode *inode = &(ei->vfs_inode);
39380fc1b451SAneesh Kumar K.V 	u64 i_blocks = inode->i_blocks;
39390fc1b451SAneesh Kumar K.V 	struct super_block *sb = inode->i_sb;
39400fc1b451SAneesh Kumar K.V 
39410fc1b451SAneesh Kumar K.V 	if (i_blocks <= ~0U) {
39420fc1b451SAneesh Kumar K.V 		/*
39434907cb7bSAnatol Pomozov 		 * i_blocks can be represented in a 32 bit variable
39440fc1b451SAneesh Kumar K.V 		 * as multiple of 512 bytes
39450fc1b451SAneesh Kumar K.V 		 */
39468180a562SAneesh Kumar K.V 		raw_inode->i_blocks_lo   = cpu_to_le32(i_blocks);
39470fc1b451SAneesh Kumar K.V 		raw_inode->i_blocks_high = 0;
394884a8dce2SDmitry Monakhov 		ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE);
3949f287a1a5STheodore Ts'o 		return 0;
3950f287a1a5STheodore Ts'o 	}
3951f287a1a5STheodore Ts'o 	if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_HUGE_FILE))
3952f287a1a5STheodore Ts'o 		return -EFBIG;
3953f287a1a5STheodore Ts'o 
3954f287a1a5STheodore Ts'o 	if (i_blocks <= 0xffffffffffffULL) {
39550fc1b451SAneesh Kumar K.V 		/*
39560fc1b451SAneesh Kumar K.V 		 * i_blocks can be represented in a 48 bit variable
39570fc1b451SAneesh Kumar K.V 		 * as multiple of 512 bytes
39580fc1b451SAneesh Kumar K.V 		 */
39598180a562SAneesh Kumar K.V 		raw_inode->i_blocks_lo   = cpu_to_le32(i_blocks);
39600fc1b451SAneesh Kumar K.V 		raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32);
396184a8dce2SDmitry Monakhov 		ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE);
39620fc1b451SAneesh Kumar K.V 	} else {
396384a8dce2SDmitry Monakhov 		ext4_set_inode_flag(inode, EXT4_INODE_HUGE_FILE);
39648180a562SAneesh Kumar K.V 		/* i_block is stored in file system block size */
39658180a562SAneesh Kumar K.V 		i_blocks = i_blocks >> (inode->i_blkbits - 9);
39668180a562SAneesh Kumar K.V 		raw_inode->i_blocks_lo   = cpu_to_le32(i_blocks);
39678180a562SAneesh Kumar K.V 		raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32);
39680fc1b451SAneesh Kumar K.V 	}
3969f287a1a5STheodore Ts'o 	return 0;
39700fc1b451SAneesh Kumar K.V }
39710fc1b451SAneesh Kumar K.V 
3972ac27a0ecSDave Kleikamp /*
3973ac27a0ecSDave Kleikamp  * Post the struct inode info into an on-disk inode location in the
3974ac27a0ecSDave Kleikamp  * buffer-cache.  This gobbles the caller's reference to the
3975ac27a0ecSDave Kleikamp  * buffer_head in the inode location struct.
3976ac27a0ecSDave Kleikamp  *
3977ac27a0ecSDave Kleikamp  * The caller must have write access to iloc->bh.
3978ac27a0ecSDave Kleikamp  */
3979617ba13bSMingming Cao static int ext4_do_update_inode(handle_t *handle,
3980ac27a0ecSDave Kleikamp 				struct inode *inode,
3981830156c7SFrank Mayhar 				struct ext4_iloc *iloc)
3982ac27a0ecSDave Kleikamp {
3983617ba13bSMingming Cao 	struct ext4_inode *raw_inode = ext4_raw_inode(iloc);
3984617ba13bSMingming Cao 	struct ext4_inode_info *ei = EXT4_I(inode);
3985ac27a0ecSDave Kleikamp 	struct buffer_head *bh = iloc->bh;
3986ac27a0ecSDave Kleikamp 	int err = 0, rc, block;
3987b71fc079SJan Kara 	int need_datasync = 0;
398808cefc7aSEric W. Biederman 	uid_t i_uid;
398908cefc7aSEric W. Biederman 	gid_t i_gid;
3990ac27a0ecSDave Kleikamp 
3991ac27a0ecSDave Kleikamp 	/* For fields not not tracking in the in-memory inode,
3992ac27a0ecSDave Kleikamp 	 * initialise them to zero for new inodes. */
399319f5fb7aSTheodore Ts'o 	if (ext4_test_inode_state(inode, EXT4_STATE_NEW))
3994617ba13bSMingming Cao 		memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size);
3995ac27a0ecSDave Kleikamp 
3996ff9ddf7eSJan Kara 	ext4_get_inode_flags(ei);
3997ac27a0ecSDave Kleikamp 	raw_inode->i_mode = cpu_to_le16(inode->i_mode);
399808cefc7aSEric W. Biederman 	i_uid = i_uid_read(inode);
399908cefc7aSEric W. Biederman 	i_gid = i_gid_read(inode);
4000ac27a0ecSDave Kleikamp 	if (!(test_opt(inode->i_sb, NO_UID32))) {
400108cefc7aSEric W. Biederman 		raw_inode->i_uid_low = cpu_to_le16(low_16_bits(i_uid));
400208cefc7aSEric W. Biederman 		raw_inode->i_gid_low = cpu_to_le16(low_16_bits(i_gid));
4003ac27a0ecSDave Kleikamp /*
4004ac27a0ecSDave Kleikamp  * Fix up interoperability with old kernels. Otherwise, old inodes get
4005ac27a0ecSDave Kleikamp  * re-used with the upper 16 bits of the uid/gid intact
4006ac27a0ecSDave Kleikamp  */
4007ac27a0ecSDave Kleikamp 		if (!ei->i_dtime) {
4008ac27a0ecSDave Kleikamp 			raw_inode->i_uid_high =
400908cefc7aSEric W. Biederman 				cpu_to_le16(high_16_bits(i_uid));
4010ac27a0ecSDave Kleikamp 			raw_inode->i_gid_high =
401108cefc7aSEric W. Biederman 				cpu_to_le16(high_16_bits(i_gid));
4012ac27a0ecSDave Kleikamp 		} else {
4013ac27a0ecSDave Kleikamp 			raw_inode->i_uid_high = 0;
4014ac27a0ecSDave Kleikamp 			raw_inode->i_gid_high = 0;
4015ac27a0ecSDave Kleikamp 		}
4016ac27a0ecSDave Kleikamp 	} else {
401708cefc7aSEric W. Biederman 		raw_inode->i_uid_low = cpu_to_le16(fs_high2lowuid(i_uid));
401808cefc7aSEric W. Biederman 		raw_inode->i_gid_low = cpu_to_le16(fs_high2lowgid(i_gid));
4019ac27a0ecSDave Kleikamp 		raw_inode->i_uid_high = 0;
4020ac27a0ecSDave Kleikamp 		raw_inode->i_gid_high = 0;
4021ac27a0ecSDave Kleikamp 	}
4022ac27a0ecSDave Kleikamp 	raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
4023ef7f3835SKalpak Shah 
4024ef7f3835SKalpak Shah 	EXT4_INODE_SET_XTIME(i_ctime, inode, raw_inode);
4025ef7f3835SKalpak Shah 	EXT4_INODE_SET_XTIME(i_mtime, inode, raw_inode);
4026ef7f3835SKalpak Shah 	EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode);
4027ef7f3835SKalpak Shah 	EXT4_EINODE_SET_XTIME(i_crtime, ei, raw_inode);
4028ef7f3835SKalpak Shah 
40290fc1b451SAneesh Kumar K.V 	if (ext4_inode_blocks_set(handle, raw_inode, ei))
40300fc1b451SAneesh Kumar K.V 		goto out_brelse;
4031ac27a0ecSDave Kleikamp 	raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
4032353eb83cSTheodore Ts'o 	raw_inode->i_flags = cpu_to_le32(ei->i_flags & 0xFFFFFFFF);
40339b8f1f01SMingming Cao 	if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
40349b8f1f01SMingming Cao 	    cpu_to_le32(EXT4_OS_HURD))
4035a1ddeb7eSBadari Pulavarty 		raw_inode->i_file_acl_high =
4036a1ddeb7eSBadari Pulavarty 			cpu_to_le16(ei->i_file_acl >> 32);
40377973c0c1SAneesh Kumar K.V 	raw_inode->i_file_acl_lo = cpu_to_le32(ei->i_file_acl);
4038b71fc079SJan Kara 	if (ei->i_disksize != ext4_isize(raw_inode)) {
4039a48380f7SAneesh Kumar K.V 		ext4_isize_set(raw_inode, ei->i_disksize);
4040b71fc079SJan Kara 		need_datasync = 1;
4041b71fc079SJan Kara 	}
4042ac27a0ecSDave Kleikamp 	if (ei->i_disksize > 0x7fffffffULL) {
4043ac27a0ecSDave Kleikamp 		struct super_block *sb = inode->i_sb;
4044617ba13bSMingming Cao 		if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
4045617ba13bSMingming Cao 				EXT4_FEATURE_RO_COMPAT_LARGE_FILE) ||
4046617ba13bSMingming Cao 				EXT4_SB(sb)->s_es->s_rev_level ==
4047617ba13bSMingming Cao 				cpu_to_le32(EXT4_GOOD_OLD_REV)) {
4048ac27a0ecSDave Kleikamp 			/* If this is the first large file
4049ac27a0ecSDave Kleikamp 			 * created, add a flag to the superblock.
4050ac27a0ecSDave Kleikamp 			 */
4051617ba13bSMingming Cao 			err = ext4_journal_get_write_access(handle,
4052617ba13bSMingming Cao 					EXT4_SB(sb)->s_sbh);
4053ac27a0ecSDave Kleikamp 			if (err)
4054ac27a0ecSDave Kleikamp 				goto out_brelse;
4055617ba13bSMingming Cao 			ext4_update_dynamic_rev(sb);
4056617ba13bSMingming Cao 			EXT4_SET_RO_COMPAT_FEATURE(sb,
4057617ba13bSMingming Cao 					EXT4_FEATURE_RO_COMPAT_LARGE_FILE);
40580390131bSFrank Mayhar 			ext4_handle_sync(handle);
4059b50924c2SArtem Bityutskiy 			err = ext4_handle_dirty_super(handle, sb);
4060ac27a0ecSDave Kleikamp 		}
4061ac27a0ecSDave Kleikamp 	}
4062ac27a0ecSDave Kleikamp 	raw_inode->i_generation = cpu_to_le32(inode->i_generation);
4063ac27a0ecSDave Kleikamp 	if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
4064ac27a0ecSDave Kleikamp 		if (old_valid_dev(inode->i_rdev)) {
4065ac27a0ecSDave Kleikamp 			raw_inode->i_block[0] =
4066ac27a0ecSDave Kleikamp 				cpu_to_le32(old_encode_dev(inode->i_rdev));
4067ac27a0ecSDave Kleikamp 			raw_inode->i_block[1] = 0;
4068ac27a0ecSDave Kleikamp 		} else {
4069ac27a0ecSDave Kleikamp 			raw_inode->i_block[0] = 0;
4070ac27a0ecSDave Kleikamp 			raw_inode->i_block[1] =
4071ac27a0ecSDave Kleikamp 				cpu_to_le32(new_encode_dev(inode->i_rdev));
4072ac27a0ecSDave Kleikamp 			raw_inode->i_block[2] = 0;
4073ac27a0ecSDave Kleikamp 		}
4074de9a55b8STheodore Ts'o 	} else
4075de9a55b8STheodore Ts'o 		for (block = 0; block < EXT4_N_BLOCKS; block++)
4076ac27a0ecSDave Kleikamp 			raw_inode->i_block[block] = ei->i_data[block];
4077ac27a0ecSDave Kleikamp 
407825ec56b5SJean Noel Cordenner 	raw_inode->i_disk_version = cpu_to_le32(inode->i_version);
407925ec56b5SJean Noel Cordenner 	if (ei->i_extra_isize) {
408025ec56b5SJean Noel Cordenner 		if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
408125ec56b5SJean Noel Cordenner 			raw_inode->i_version_hi =
408225ec56b5SJean Noel Cordenner 			cpu_to_le32(inode->i_version >> 32);
4083ac27a0ecSDave Kleikamp 		raw_inode->i_extra_isize = cpu_to_le16(ei->i_extra_isize);
408425ec56b5SJean Noel Cordenner 	}
408525ec56b5SJean Noel Cordenner 
4086814525f4SDarrick J. Wong 	ext4_inode_csum_set(inode, raw_inode, ei);
4087814525f4SDarrick J. Wong 
40880390131bSFrank Mayhar 	BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
408973b50c1cSCurt Wohlgemuth 	rc = ext4_handle_dirty_metadata(handle, NULL, bh);
4090ac27a0ecSDave Kleikamp 	if (!err)
4091ac27a0ecSDave Kleikamp 		err = rc;
409219f5fb7aSTheodore Ts'o 	ext4_clear_inode_state(inode, EXT4_STATE_NEW);
4093ac27a0ecSDave Kleikamp 
4094b71fc079SJan Kara 	ext4_update_inode_fsync_trans(handle, inode, need_datasync);
4095ac27a0ecSDave Kleikamp out_brelse:
4096ac27a0ecSDave Kleikamp 	brelse(bh);
4097617ba13bSMingming Cao 	ext4_std_error(inode->i_sb, err);
4098ac27a0ecSDave Kleikamp 	return err;
4099ac27a0ecSDave Kleikamp }
4100ac27a0ecSDave Kleikamp 
4101ac27a0ecSDave Kleikamp /*
4102617ba13bSMingming Cao  * ext4_write_inode()
4103ac27a0ecSDave Kleikamp  *
4104ac27a0ecSDave Kleikamp  * We are called from a few places:
4105ac27a0ecSDave Kleikamp  *
4106ac27a0ecSDave Kleikamp  * - Within generic_file_write() for O_SYNC files.
4107ac27a0ecSDave Kleikamp  *   Here, there will be no transaction running. We wait for any running
41084907cb7bSAnatol Pomozov  *   transaction to commit.
4109ac27a0ecSDave Kleikamp  *
4110ac27a0ecSDave Kleikamp  * - Within sys_sync(), kupdate and such.
4111ac27a0ecSDave Kleikamp  *   We wait on commit, if tol to.
4112ac27a0ecSDave Kleikamp  *
4113ac27a0ecSDave Kleikamp  * - Within prune_icache() (PF_MEMALLOC == true)
4114ac27a0ecSDave Kleikamp  *   Here we simply return.  We can't afford to block kswapd on the
4115ac27a0ecSDave Kleikamp  *   journal commit.
4116ac27a0ecSDave Kleikamp  *
4117ac27a0ecSDave Kleikamp  * In all cases it is actually safe for us to return without doing anything,
4118ac27a0ecSDave Kleikamp  * because the inode has been copied into a raw inode buffer in
4119617ba13bSMingming Cao  * ext4_mark_inode_dirty().  This is a correctness thing for O_SYNC and for
4120ac27a0ecSDave Kleikamp  * knfsd.
4121ac27a0ecSDave Kleikamp  *
4122ac27a0ecSDave Kleikamp  * Note that we are absolutely dependent upon all inode dirtiers doing the
4123ac27a0ecSDave Kleikamp  * right thing: they *must* call mark_inode_dirty() after dirtying info in
4124ac27a0ecSDave Kleikamp  * which we are interested.
4125ac27a0ecSDave Kleikamp  *
4126ac27a0ecSDave Kleikamp  * It would be a bug for them to not do this.  The code:
4127ac27a0ecSDave Kleikamp  *
4128ac27a0ecSDave Kleikamp  *	mark_inode_dirty(inode)
4129ac27a0ecSDave Kleikamp  *	stuff();
4130ac27a0ecSDave Kleikamp  *	inode->i_size = expr;
4131ac27a0ecSDave Kleikamp  *
4132ac27a0ecSDave Kleikamp  * is in error because a kswapd-driven write_inode() could occur while
4133ac27a0ecSDave Kleikamp  * `stuff()' is running, and the new i_size will be lost.  Plus the inode
4134ac27a0ecSDave Kleikamp  * will no longer be on the superblock's dirty inode list.
4135ac27a0ecSDave Kleikamp  */
4136a9185b41SChristoph Hellwig int ext4_write_inode(struct inode *inode, struct writeback_control *wbc)
4137ac27a0ecSDave Kleikamp {
413891ac6f43SFrank Mayhar 	int err;
413991ac6f43SFrank Mayhar 
4140ac27a0ecSDave Kleikamp 	if (current->flags & PF_MEMALLOC)
4141ac27a0ecSDave Kleikamp 		return 0;
4142ac27a0ecSDave Kleikamp 
414391ac6f43SFrank Mayhar 	if (EXT4_SB(inode->i_sb)->s_journal) {
4144617ba13bSMingming Cao 		if (ext4_journal_current_handle()) {
4145b38bd33aSMingming Cao 			jbd_debug(1, "called recursively, non-PF_MEMALLOC!\n");
4146ac27a0ecSDave Kleikamp 			dump_stack();
4147ac27a0ecSDave Kleikamp 			return -EIO;
4148ac27a0ecSDave Kleikamp 		}
4149ac27a0ecSDave Kleikamp 
4150a9185b41SChristoph Hellwig 		if (wbc->sync_mode != WB_SYNC_ALL)
4151ac27a0ecSDave Kleikamp 			return 0;
4152ac27a0ecSDave Kleikamp 
415391ac6f43SFrank Mayhar 		err = ext4_force_commit(inode->i_sb);
415491ac6f43SFrank Mayhar 	} else {
415591ac6f43SFrank Mayhar 		struct ext4_iloc iloc;
415691ac6f43SFrank Mayhar 
41578b472d73SCurt Wohlgemuth 		err = __ext4_get_inode_loc(inode, &iloc, 0);
415891ac6f43SFrank Mayhar 		if (err)
415991ac6f43SFrank Mayhar 			return err;
4160a9185b41SChristoph Hellwig 		if (wbc->sync_mode == WB_SYNC_ALL)
4161830156c7SFrank Mayhar 			sync_dirty_buffer(iloc.bh);
4162830156c7SFrank Mayhar 		if (buffer_req(iloc.bh) && !buffer_uptodate(iloc.bh)) {
4163c398eda0STheodore Ts'o 			EXT4_ERROR_INODE_BLOCK(inode, iloc.bh->b_blocknr,
4164c398eda0STheodore Ts'o 					 "IO error syncing inode");
4165830156c7SFrank Mayhar 			err = -EIO;
4166830156c7SFrank Mayhar 		}
4167fd2dd9fbSCurt Wohlgemuth 		brelse(iloc.bh);
416891ac6f43SFrank Mayhar 	}
416991ac6f43SFrank Mayhar 	return err;
4170ac27a0ecSDave Kleikamp }
4171ac27a0ecSDave Kleikamp 
4172ac27a0ecSDave Kleikamp /*
4173617ba13bSMingming Cao  * ext4_setattr()
4174ac27a0ecSDave Kleikamp  *
4175ac27a0ecSDave Kleikamp  * Called from notify_change.
4176ac27a0ecSDave Kleikamp  *
4177ac27a0ecSDave Kleikamp  * We want to trap VFS attempts to truncate the file as soon as
4178ac27a0ecSDave Kleikamp  * possible.  In particular, we want to make sure that when the VFS
4179ac27a0ecSDave Kleikamp  * shrinks i_size, we put the inode on the orphan list and modify
4180ac27a0ecSDave Kleikamp  * i_disksize immediately, so that during the subsequent flushing of
4181ac27a0ecSDave Kleikamp  * dirty pages and freeing of disk blocks, we can guarantee that any
4182ac27a0ecSDave Kleikamp  * commit will leave the blocks being flushed in an unused state on
4183ac27a0ecSDave Kleikamp  * disk.  (On recovery, the inode will get truncated and the blocks will
4184ac27a0ecSDave Kleikamp  * be freed, so we have a strong guarantee that no future commit will
4185ac27a0ecSDave Kleikamp  * leave these blocks visible to the user.)
4186ac27a0ecSDave Kleikamp  *
4187678aaf48SJan Kara  * Another thing we have to assure is that if we are in ordered mode
4188678aaf48SJan Kara  * and inode is still attached to the committing transaction, we must
4189678aaf48SJan Kara  * we start writeout of all the dirty pages which are being truncated.
4190678aaf48SJan Kara  * This way we are sure that all the data written in the previous
4191678aaf48SJan Kara  * transaction are already on disk (truncate waits for pages under
4192678aaf48SJan Kara  * writeback).
4193678aaf48SJan Kara  *
4194678aaf48SJan Kara  * Called with inode->i_mutex down.
4195ac27a0ecSDave Kleikamp  */
4196617ba13bSMingming Cao int ext4_setattr(struct dentry *dentry, struct iattr *attr)
4197ac27a0ecSDave Kleikamp {
4198ac27a0ecSDave Kleikamp 	struct inode *inode = dentry->d_inode;
4199ac27a0ecSDave Kleikamp 	int error, rc = 0;
42003d287de3SDmitry Monakhov 	int orphan = 0;
4201ac27a0ecSDave Kleikamp 	const unsigned int ia_valid = attr->ia_valid;
4202ac27a0ecSDave Kleikamp 
4203ac27a0ecSDave Kleikamp 	error = inode_change_ok(inode, attr);
4204ac27a0ecSDave Kleikamp 	if (error)
4205ac27a0ecSDave Kleikamp 		return error;
4206ac27a0ecSDave Kleikamp 
420712755627SDmitry Monakhov 	if (is_quota_modification(inode, attr))
4208871a2931SChristoph Hellwig 		dquot_initialize(inode);
420908cefc7aSEric W. Biederman 	if ((ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, inode->i_uid)) ||
421008cefc7aSEric W. Biederman 	    (ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid))) {
4211ac27a0ecSDave Kleikamp 		handle_t *handle;
4212ac27a0ecSDave Kleikamp 
4213ac27a0ecSDave Kleikamp 		/* (user+group)*(old+new) structure, inode write (sb,
4214ac27a0ecSDave Kleikamp 		 * inode block, ? - but truncate inode update has it) */
42155aca07ebSDmitry Monakhov 		handle = ext4_journal_start(inode, (EXT4_MAXQUOTAS_INIT_BLOCKS(inode->i_sb)+
4216194074acSDmitry Monakhov 					EXT4_MAXQUOTAS_DEL_BLOCKS(inode->i_sb))+3);
4217ac27a0ecSDave Kleikamp 		if (IS_ERR(handle)) {
4218ac27a0ecSDave Kleikamp 			error = PTR_ERR(handle);
4219ac27a0ecSDave Kleikamp 			goto err_out;
4220ac27a0ecSDave Kleikamp 		}
4221b43fa828SChristoph Hellwig 		error = dquot_transfer(inode, attr);
4222ac27a0ecSDave Kleikamp 		if (error) {
4223617ba13bSMingming Cao 			ext4_journal_stop(handle);
4224ac27a0ecSDave Kleikamp 			return error;
4225ac27a0ecSDave Kleikamp 		}
4226ac27a0ecSDave Kleikamp 		/* Update corresponding info in inode so that everything is in
4227ac27a0ecSDave Kleikamp 		 * one transaction */
4228ac27a0ecSDave Kleikamp 		if (attr->ia_valid & ATTR_UID)
4229ac27a0ecSDave Kleikamp 			inode->i_uid = attr->ia_uid;
4230ac27a0ecSDave Kleikamp 		if (attr->ia_valid & ATTR_GID)
4231ac27a0ecSDave Kleikamp 			inode->i_gid = attr->ia_gid;
4232617ba13bSMingming Cao 		error = ext4_mark_inode_dirty(handle, inode);
4233617ba13bSMingming Cao 		ext4_journal_stop(handle);
4234ac27a0ecSDave Kleikamp 	}
4235ac27a0ecSDave Kleikamp 
4236e2b46574SEric Sandeen 	if (attr->ia_valid & ATTR_SIZE) {
4237562c72aaSChristoph Hellwig 
423812e9b892SDmitry Monakhov 		if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
4239e2b46574SEric Sandeen 			struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
4240e2b46574SEric Sandeen 
42410c095c7fSTheodore Ts'o 			if (attr->ia_size > sbi->s_bitmap_maxbytes)
42420c095c7fSTheodore Ts'o 				return -EFBIG;
4243e2b46574SEric Sandeen 		}
4244e2b46574SEric Sandeen 	}
4245e2b46574SEric Sandeen 
4246ac27a0ecSDave Kleikamp 	if (S_ISREG(inode->i_mode) &&
4247c8d46e41SJiaying Zhang 	    attr->ia_valid & ATTR_SIZE &&
4248072bd7eaSTheodore Ts'o 	    (attr->ia_size < inode->i_size)) {
4249ac27a0ecSDave Kleikamp 		handle_t *handle;
4250ac27a0ecSDave Kleikamp 
4251617ba13bSMingming Cao 		handle = ext4_journal_start(inode, 3);
4252ac27a0ecSDave Kleikamp 		if (IS_ERR(handle)) {
4253ac27a0ecSDave Kleikamp 			error = PTR_ERR(handle);
4254ac27a0ecSDave Kleikamp 			goto err_out;
4255ac27a0ecSDave Kleikamp 		}
42563d287de3SDmitry Monakhov 		if (ext4_handle_valid(handle)) {
4257617ba13bSMingming Cao 			error = ext4_orphan_add(handle, inode);
42583d287de3SDmitry Monakhov 			orphan = 1;
42593d287de3SDmitry Monakhov 		}
4260617ba13bSMingming Cao 		EXT4_I(inode)->i_disksize = attr->ia_size;
4261617ba13bSMingming Cao 		rc = ext4_mark_inode_dirty(handle, inode);
4262ac27a0ecSDave Kleikamp 		if (!error)
4263ac27a0ecSDave Kleikamp 			error = rc;
4264617ba13bSMingming Cao 		ext4_journal_stop(handle);
4265678aaf48SJan Kara 
4266678aaf48SJan Kara 		if (ext4_should_order_data(inode)) {
4267678aaf48SJan Kara 			error = ext4_begin_ordered_truncate(inode,
4268678aaf48SJan Kara 							    attr->ia_size);
4269678aaf48SJan Kara 			if (error) {
4270678aaf48SJan Kara 				/* Do as much error cleanup as possible */
4271678aaf48SJan Kara 				handle = ext4_journal_start(inode, 3);
4272678aaf48SJan Kara 				if (IS_ERR(handle)) {
4273678aaf48SJan Kara 					ext4_orphan_del(NULL, inode);
4274678aaf48SJan Kara 					goto err_out;
4275678aaf48SJan Kara 				}
4276678aaf48SJan Kara 				ext4_orphan_del(handle, inode);
42773d287de3SDmitry Monakhov 				orphan = 0;
4278678aaf48SJan Kara 				ext4_journal_stop(handle);
4279678aaf48SJan Kara 				goto err_out;
4280678aaf48SJan Kara 			}
4281678aaf48SJan Kara 		}
4282ac27a0ecSDave Kleikamp 	}
4283ac27a0ecSDave Kleikamp 
4284072bd7eaSTheodore Ts'o 	if (attr->ia_valid & ATTR_SIZE) {
42851c9114f9SDmitry Monakhov 		if (attr->ia_size != i_size_read(inode)) {
4286072bd7eaSTheodore Ts'o 			truncate_setsize(inode, attr->ia_size);
42871b65007eSDmitry Monakhov 			/* Inode size will be reduced, wait for dio in flight.
42881b65007eSDmitry Monakhov 			 * Temporarily disable dioread_nolock to prevent
42891b65007eSDmitry Monakhov 			 * livelock. */
42901b65007eSDmitry Monakhov 			if (orphan) {
42911b65007eSDmitry Monakhov 				ext4_inode_block_unlocked_dio(inode);
42921c9114f9SDmitry Monakhov 				inode_dio_wait(inode);
42931b65007eSDmitry Monakhov 				ext4_inode_resume_unlocked_dio(inode);
42941b65007eSDmitry Monakhov 			}
42951c9114f9SDmitry Monakhov 		}
4296072bd7eaSTheodore Ts'o 		ext4_truncate(inode);
4297072bd7eaSTheodore Ts'o 	}
4298ac27a0ecSDave Kleikamp 
42991025774cSChristoph Hellwig 	if (!rc) {
43001025774cSChristoph Hellwig 		setattr_copy(inode, attr);
43011025774cSChristoph Hellwig 		mark_inode_dirty(inode);
43021025774cSChristoph Hellwig 	}
43031025774cSChristoph Hellwig 
43041025774cSChristoph Hellwig 	/*
43051025774cSChristoph Hellwig 	 * If the call to ext4_truncate failed to get a transaction handle at
43061025774cSChristoph Hellwig 	 * all, we need to clean up the in-core orphan list manually.
43071025774cSChristoph Hellwig 	 */
43083d287de3SDmitry Monakhov 	if (orphan && inode->i_nlink)
4309617ba13bSMingming Cao 		ext4_orphan_del(NULL, inode);
4310ac27a0ecSDave Kleikamp 
4311ac27a0ecSDave Kleikamp 	if (!rc && (ia_valid & ATTR_MODE))
4312617ba13bSMingming Cao 		rc = ext4_acl_chmod(inode);
4313ac27a0ecSDave Kleikamp 
4314ac27a0ecSDave Kleikamp err_out:
4315617ba13bSMingming Cao 	ext4_std_error(inode->i_sb, error);
4316ac27a0ecSDave Kleikamp 	if (!error)
4317ac27a0ecSDave Kleikamp 		error = rc;
4318ac27a0ecSDave Kleikamp 	return error;
4319ac27a0ecSDave Kleikamp }
4320ac27a0ecSDave Kleikamp 
43213e3398a0SMingming Cao int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry,
43223e3398a0SMingming Cao 		 struct kstat *stat)
43233e3398a0SMingming Cao {
43243e3398a0SMingming Cao 	struct inode *inode;
43253e3398a0SMingming Cao 	unsigned long delalloc_blocks;
43263e3398a0SMingming Cao 
43273e3398a0SMingming Cao 	inode = dentry->d_inode;
43283e3398a0SMingming Cao 	generic_fillattr(inode, stat);
43293e3398a0SMingming Cao 
43303e3398a0SMingming Cao 	/*
43313e3398a0SMingming Cao 	 * We can't update i_blocks if the block allocation is delayed
43323e3398a0SMingming Cao 	 * otherwise in the case of system crash before the real block
43333e3398a0SMingming Cao 	 * allocation is done, we will have i_blocks inconsistent with
43343e3398a0SMingming Cao 	 * on-disk file blocks.
43353e3398a0SMingming Cao 	 * We always keep i_blocks updated together with real
43363e3398a0SMingming Cao 	 * allocation. But to not confuse with user, stat
43373e3398a0SMingming Cao 	 * will return the blocks that include the delayed allocation
43383e3398a0SMingming Cao 	 * blocks for this file.
43393e3398a0SMingming Cao 	 */
434096607551STao Ma 	delalloc_blocks = EXT4_C2B(EXT4_SB(inode->i_sb),
434196607551STao Ma 				EXT4_I(inode)->i_reserved_data_blocks);
43423e3398a0SMingming Cao 
43433e3398a0SMingming Cao 	stat->blocks += (delalloc_blocks << inode->i_sb->s_blocksize_bits)>>9;
43443e3398a0SMingming Cao 	return 0;
43453e3398a0SMingming Cao }
4346ac27a0ecSDave Kleikamp 
4347a02908f1SMingming Cao static int ext4_index_trans_blocks(struct inode *inode, int nrblocks, int chunk)
4348a02908f1SMingming Cao {
434912e9b892SDmitry Monakhov 	if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
43508bb2b247SAmir Goldstein 		return ext4_ind_trans_blocks(inode, nrblocks, chunk);
4351ac51d837STheodore Ts'o 	return ext4_ext_index_trans_blocks(inode, nrblocks, chunk);
4352a02908f1SMingming Cao }
4353ac51d837STheodore Ts'o 
4354a02908f1SMingming Cao /*
4355a02908f1SMingming Cao  * Account for index blocks, block groups bitmaps and block group
4356a02908f1SMingming Cao  * descriptor blocks if modify datablocks and index blocks
4357a02908f1SMingming Cao  * worse case, the indexs blocks spread over different block groups
4358a02908f1SMingming Cao  *
4359a02908f1SMingming Cao  * If datablocks are discontiguous, they are possible to spread over
43604907cb7bSAnatol Pomozov  * different block groups too. If they are contiguous, with flexbg,
4361a02908f1SMingming Cao  * they could still across block group boundary.
4362a02908f1SMingming Cao  *
4363a02908f1SMingming Cao  * Also account for superblock, inode, quota and xattr blocks
4364a02908f1SMingming Cao  */
43651f109d5aSTheodore Ts'o static int ext4_meta_trans_blocks(struct inode *inode, int nrblocks, int chunk)
4366a02908f1SMingming Cao {
43678df9675fSTheodore Ts'o 	ext4_group_t groups, ngroups = ext4_get_groups_count(inode->i_sb);
43688df9675fSTheodore Ts'o 	int gdpblocks;
4369a02908f1SMingming Cao 	int idxblocks;
4370a02908f1SMingming Cao 	int ret = 0;
4371a02908f1SMingming Cao 
4372a02908f1SMingming Cao 	/*
4373a02908f1SMingming Cao 	 * How many index blocks need to touch to modify nrblocks?
4374a02908f1SMingming Cao 	 * The "Chunk" flag indicating whether the nrblocks is
4375a02908f1SMingming Cao 	 * physically contiguous on disk
4376a02908f1SMingming Cao 	 *
4377a02908f1SMingming Cao 	 * For Direct IO and fallocate, they calls get_block to allocate
4378a02908f1SMingming Cao 	 * one single extent at a time, so they could set the "Chunk" flag
4379a02908f1SMingming Cao 	 */
4380a02908f1SMingming Cao 	idxblocks = ext4_index_trans_blocks(inode, nrblocks, chunk);
4381a02908f1SMingming Cao 
4382a02908f1SMingming Cao 	ret = idxblocks;
4383a02908f1SMingming Cao 
4384a02908f1SMingming Cao 	/*
4385a02908f1SMingming Cao 	 * Now let's see how many group bitmaps and group descriptors need
4386a02908f1SMingming Cao 	 * to account
4387a02908f1SMingming Cao 	 */
4388a02908f1SMingming Cao 	groups = idxblocks;
4389a02908f1SMingming Cao 	if (chunk)
4390a02908f1SMingming Cao 		groups += 1;
4391ac27a0ecSDave Kleikamp 	else
4392a02908f1SMingming Cao 		groups += nrblocks;
4393ac27a0ecSDave Kleikamp 
4394a02908f1SMingming Cao 	gdpblocks = groups;
43958df9675fSTheodore Ts'o 	if (groups > ngroups)
43968df9675fSTheodore Ts'o 		groups = ngroups;
4397a02908f1SMingming Cao 	if (groups > EXT4_SB(inode->i_sb)->s_gdb_count)
4398a02908f1SMingming Cao 		gdpblocks = EXT4_SB(inode->i_sb)->s_gdb_count;
4399a02908f1SMingming Cao 
4400a02908f1SMingming Cao 	/* bitmaps and block group descriptor blocks */
4401a02908f1SMingming Cao 	ret += groups + gdpblocks;
4402a02908f1SMingming Cao 
4403a02908f1SMingming Cao 	/* Blocks for super block, inode, quota and xattr blocks */
4404a02908f1SMingming Cao 	ret += EXT4_META_TRANS_BLOCKS(inode->i_sb);
4405ac27a0ecSDave Kleikamp 
4406ac27a0ecSDave Kleikamp 	return ret;
4407ac27a0ecSDave Kleikamp }
4408ac27a0ecSDave Kleikamp 
4409ac27a0ecSDave Kleikamp /*
441025985edcSLucas De Marchi  * Calculate the total number of credits to reserve to fit
4411f3bd1f3fSMingming Cao  * the modification of a single pages into a single transaction,
4412f3bd1f3fSMingming Cao  * which may include multiple chunks of block allocations.
4413a02908f1SMingming Cao  *
4414525f4ed8SMingming Cao  * This could be called via ext4_write_begin()
4415a02908f1SMingming Cao  *
4416525f4ed8SMingming Cao  * We need to consider the worse case, when
4417a02908f1SMingming Cao  * one new block per extent.
4418a02908f1SMingming Cao  */
4419a02908f1SMingming Cao int ext4_writepage_trans_blocks(struct inode *inode)
4420a02908f1SMingming Cao {
4421a02908f1SMingming Cao 	int bpp = ext4_journal_blocks_per_page(inode);
4422a02908f1SMingming Cao 	int ret;
4423a02908f1SMingming Cao 
4424a02908f1SMingming Cao 	ret = ext4_meta_trans_blocks(inode, bpp, 0);
4425a02908f1SMingming Cao 
4426a02908f1SMingming Cao 	/* Account for data blocks for journalled mode */
4427a02908f1SMingming Cao 	if (ext4_should_journal_data(inode))
4428a02908f1SMingming Cao 		ret += bpp;
4429a02908f1SMingming Cao 	return ret;
4430a02908f1SMingming Cao }
4431f3bd1f3fSMingming Cao 
4432f3bd1f3fSMingming Cao /*
4433f3bd1f3fSMingming Cao  * Calculate the journal credits for a chunk of data modification.
4434f3bd1f3fSMingming Cao  *
4435f3bd1f3fSMingming Cao  * This is called from DIO, fallocate or whoever calling
443679e83036SEric Sandeen  * ext4_map_blocks() to map/allocate a chunk of contiguous disk blocks.
4437f3bd1f3fSMingming Cao  *
4438f3bd1f3fSMingming Cao  * journal buffers for data blocks are not included here, as DIO
4439f3bd1f3fSMingming Cao  * and fallocate do no need to journal data buffers.
4440f3bd1f3fSMingming Cao  */
4441f3bd1f3fSMingming Cao int ext4_chunk_trans_blocks(struct inode *inode, int nrblocks)
4442f3bd1f3fSMingming Cao {
4443f3bd1f3fSMingming Cao 	return ext4_meta_trans_blocks(inode, nrblocks, 1);
4444f3bd1f3fSMingming Cao }
4445f3bd1f3fSMingming Cao 
4446a02908f1SMingming Cao /*
4447617ba13bSMingming Cao  * The caller must have previously called ext4_reserve_inode_write().
4448ac27a0ecSDave Kleikamp  * Give this, we know that the caller already has write access to iloc->bh.
4449ac27a0ecSDave Kleikamp  */
4450617ba13bSMingming Cao int ext4_mark_iloc_dirty(handle_t *handle,
4451617ba13bSMingming Cao 			 struct inode *inode, struct ext4_iloc *iloc)
4452ac27a0ecSDave Kleikamp {
4453ac27a0ecSDave Kleikamp 	int err = 0;
4454ac27a0ecSDave Kleikamp 
4455c64db50eSTheodore Ts'o 	if (IS_I_VERSION(inode))
445625ec56b5SJean Noel Cordenner 		inode_inc_iversion(inode);
445725ec56b5SJean Noel Cordenner 
4458ac27a0ecSDave Kleikamp 	/* the do_update_inode consumes one bh->b_count */
4459ac27a0ecSDave Kleikamp 	get_bh(iloc->bh);
4460ac27a0ecSDave Kleikamp 
4461dab291afSMingming Cao 	/* ext4_do_update_inode() does jbd2_journal_dirty_metadata */
4462830156c7SFrank Mayhar 	err = ext4_do_update_inode(handle, inode, iloc);
4463ac27a0ecSDave Kleikamp 	put_bh(iloc->bh);
4464ac27a0ecSDave Kleikamp 	return err;
4465ac27a0ecSDave Kleikamp }
4466ac27a0ecSDave Kleikamp 
4467ac27a0ecSDave Kleikamp /*
4468ac27a0ecSDave Kleikamp  * On success, We end up with an outstanding reference count against
4469ac27a0ecSDave Kleikamp  * iloc->bh.  This _must_ be cleaned up later.
4470ac27a0ecSDave Kleikamp  */
4471ac27a0ecSDave Kleikamp 
4472ac27a0ecSDave Kleikamp int
4473617ba13bSMingming Cao ext4_reserve_inode_write(handle_t *handle, struct inode *inode,
4474617ba13bSMingming Cao 			 struct ext4_iloc *iloc)
4475ac27a0ecSDave Kleikamp {
44760390131bSFrank Mayhar 	int err;
44770390131bSFrank Mayhar 
4478617ba13bSMingming Cao 	err = ext4_get_inode_loc(inode, iloc);
4479ac27a0ecSDave Kleikamp 	if (!err) {
4480ac27a0ecSDave Kleikamp 		BUFFER_TRACE(iloc->bh, "get_write_access");
4481617ba13bSMingming Cao 		err = ext4_journal_get_write_access(handle, iloc->bh);
4482ac27a0ecSDave Kleikamp 		if (err) {
4483ac27a0ecSDave Kleikamp 			brelse(iloc->bh);
4484ac27a0ecSDave Kleikamp 			iloc->bh = NULL;
4485ac27a0ecSDave Kleikamp 		}
4486ac27a0ecSDave Kleikamp 	}
4487617ba13bSMingming Cao 	ext4_std_error(inode->i_sb, err);
4488ac27a0ecSDave Kleikamp 	return err;
4489ac27a0ecSDave Kleikamp }
4490ac27a0ecSDave Kleikamp 
4491ac27a0ecSDave Kleikamp /*
44926dd4ee7cSKalpak Shah  * Expand an inode by new_extra_isize bytes.
44936dd4ee7cSKalpak Shah  * Returns 0 on success or negative error number on failure.
44946dd4ee7cSKalpak Shah  */
44951d03ec98SAneesh Kumar K.V static int ext4_expand_extra_isize(struct inode *inode,
44961d03ec98SAneesh Kumar K.V 				   unsigned int new_extra_isize,
44971d03ec98SAneesh Kumar K.V 				   struct ext4_iloc iloc,
44981d03ec98SAneesh Kumar K.V 				   handle_t *handle)
44996dd4ee7cSKalpak Shah {
45006dd4ee7cSKalpak Shah 	struct ext4_inode *raw_inode;
45016dd4ee7cSKalpak Shah 	struct ext4_xattr_ibody_header *header;
45026dd4ee7cSKalpak Shah 
45036dd4ee7cSKalpak Shah 	if (EXT4_I(inode)->i_extra_isize >= new_extra_isize)
45046dd4ee7cSKalpak Shah 		return 0;
45056dd4ee7cSKalpak Shah 
45066dd4ee7cSKalpak Shah 	raw_inode = ext4_raw_inode(&iloc);
45076dd4ee7cSKalpak Shah 
45086dd4ee7cSKalpak Shah 	header = IHDR(inode, raw_inode);
45096dd4ee7cSKalpak Shah 
45106dd4ee7cSKalpak Shah 	/* No extended attributes present */
451119f5fb7aSTheodore Ts'o 	if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR) ||
45126dd4ee7cSKalpak Shah 	    header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) {
45136dd4ee7cSKalpak Shah 		memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE, 0,
45146dd4ee7cSKalpak Shah 			new_extra_isize);
45156dd4ee7cSKalpak Shah 		EXT4_I(inode)->i_extra_isize = new_extra_isize;
45166dd4ee7cSKalpak Shah 		return 0;
45176dd4ee7cSKalpak Shah 	}
45186dd4ee7cSKalpak Shah 
45196dd4ee7cSKalpak Shah 	/* try to expand with EAs present */
45206dd4ee7cSKalpak Shah 	return ext4_expand_extra_isize_ea(inode, new_extra_isize,
45216dd4ee7cSKalpak Shah 					  raw_inode, handle);
45226dd4ee7cSKalpak Shah }
45236dd4ee7cSKalpak Shah 
45246dd4ee7cSKalpak Shah /*
4525ac27a0ecSDave Kleikamp  * What we do here is to mark the in-core inode as clean with respect to inode
4526ac27a0ecSDave Kleikamp  * dirtiness (it may still be data-dirty).
4527ac27a0ecSDave Kleikamp  * This means that the in-core inode may be reaped by prune_icache
4528ac27a0ecSDave Kleikamp  * without having to perform any I/O.  This is a very good thing,
4529ac27a0ecSDave Kleikamp  * because *any* task may call prune_icache - even ones which
4530ac27a0ecSDave Kleikamp  * have a transaction open against a different journal.
4531ac27a0ecSDave Kleikamp  *
4532ac27a0ecSDave Kleikamp  * Is this cheating?  Not really.  Sure, we haven't written the
4533ac27a0ecSDave Kleikamp  * inode out, but prune_icache isn't a user-visible syncing function.
4534ac27a0ecSDave Kleikamp  * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync)
4535ac27a0ecSDave Kleikamp  * we start and wait on commits.
4536ac27a0ecSDave Kleikamp  */
4537617ba13bSMingming Cao int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
4538ac27a0ecSDave Kleikamp {
4539617ba13bSMingming Cao 	struct ext4_iloc iloc;
45406dd4ee7cSKalpak Shah 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
45416dd4ee7cSKalpak Shah 	static unsigned int mnt_count;
45426dd4ee7cSKalpak Shah 	int err, ret;
4543ac27a0ecSDave Kleikamp 
4544ac27a0ecSDave Kleikamp 	might_sleep();
45457ff9c073STheodore Ts'o 	trace_ext4_mark_inode_dirty(inode, _RET_IP_);
4546617ba13bSMingming Cao 	err = ext4_reserve_inode_write(handle, inode, &iloc);
45470390131bSFrank Mayhar 	if (ext4_handle_valid(handle) &&
45480390131bSFrank Mayhar 	    EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize &&
454919f5fb7aSTheodore Ts'o 	    !ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND)) {
45506dd4ee7cSKalpak Shah 		/*
45516dd4ee7cSKalpak Shah 		 * We need extra buffer credits since we may write into EA block
45526dd4ee7cSKalpak Shah 		 * with this same handle. If journal_extend fails, then it will
45536dd4ee7cSKalpak Shah 		 * only result in a minor loss of functionality for that inode.
45546dd4ee7cSKalpak Shah 		 * If this is felt to be critical, then e2fsck should be run to
45556dd4ee7cSKalpak Shah 		 * force a large enough s_min_extra_isize.
45566dd4ee7cSKalpak Shah 		 */
45576dd4ee7cSKalpak Shah 		if ((jbd2_journal_extend(handle,
45586dd4ee7cSKalpak Shah 			     EXT4_DATA_TRANS_BLOCKS(inode->i_sb))) == 0) {
45596dd4ee7cSKalpak Shah 			ret = ext4_expand_extra_isize(inode,
45606dd4ee7cSKalpak Shah 						      sbi->s_want_extra_isize,
45616dd4ee7cSKalpak Shah 						      iloc, handle);
45626dd4ee7cSKalpak Shah 			if (ret) {
456319f5fb7aSTheodore Ts'o 				ext4_set_inode_state(inode,
456419f5fb7aSTheodore Ts'o 						     EXT4_STATE_NO_EXPAND);
4565c1bddad9SAneesh Kumar K.V 				if (mnt_count !=
4566c1bddad9SAneesh Kumar K.V 					le16_to_cpu(sbi->s_es->s_mnt_count)) {
456712062dddSEric Sandeen 					ext4_warning(inode->i_sb,
45686dd4ee7cSKalpak Shah 					"Unable to expand inode %lu. Delete"
45696dd4ee7cSKalpak Shah 					" some EAs or run e2fsck.",
45706dd4ee7cSKalpak Shah 					inode->i_ino);
4571c1bddad9SAneesh Kumar K.V 					mnt_count =
4572c1bddad9SAneesh Kumar K.V 					  le16_to_cpu(sbi->s_es->s_mnt_count);
45736dd4ee7cSKalpak Shah 				}
45746dd4ee7cSKalpak Shah 			}
45756dd4ee7cSKalpak Shah 		}
45766dd4ee7cSKalpak Shah 	}
4577ac27a0ecSDave Kleikamp 	if (!err)
4578617ba13bSMingming Cao 		err = ext4_mark_iloc_dirty(handle, inode, &iloc);
4579ac27a0ecSDave Kleikamp 	return err;
4580ac27a0ecSDave Kleikamp }
4581ac27a0ecSDave Kleikamp 
4582ac27a0ecSDave Kleikamp /*
4583617ba13bSMingming Cao  * ext4_dirty_inode() is called from __mark_inode_dirty()
4584ac27a0ecSDave Kleikamp  *
4585ac27a0ecSDave Kleikamp  * We're really interested in the case where a file is being extended.
4586ac27a0ecSDave Kleikamp  * i_size has been changed by generic_commit_write() and we thus need
4587ac27a0ecSDave Kleikamp  * to include the updated inode in the current transaction.
4588ac27a0ecSDave Kleikamp  *
45895dd4056dSChristoph Hellwig  * Also, dquot_alloc_block() will always dirty the inode when blocks
4590ac27a0ecSDave Kleikamp  * are allocated to the file.
4591ac27a0ecSDave Kleikamp  *
4592ac27a0ecSDave Kleikamp  * If the inode is marked synchronous, we don't honour that here - doing
4593ac27a0ecSDave Kleikamp  * so would cause a commit on atime updates, which we don't bother doing.
4594ac27a0ecSDave Kleikamp  * We handle synchronous inodes at the highest possible level.
4595ac27a0ecSDave Kleikamp  */
4596aa385729SChristoph Hellwig void ext4_dirty_inode(struct inode *inode, int flags)
4597ac27a0ecSDave Kleikamp {
4598ac27a0ecSDave Kleikamp 	handle_t *handle;
4599ac27a0ecSDave Kleikamp 
4600617ba13bSMingming Cao 	handle = ext4_journal_start(inode, 2);
4601ac27a0ecSDave Kleikamp 	if (IS_ERR(handle))
4602ac27a0ecSDave Kleikamp 		goto out;
4603f3dc272fSCurt Wohlgemuth 
4604617ba13bSMingming Cao 	ext4_mark_inode_dirty(handle, inode);
4605f3dc272fSCurt Wohlgemuth 
4606617ba13bSMingming Cao 	ext4_journal_stop(handle);
4607ac27a0ecSDave Kleikamp out:
4608ac27a0ecSDave Kleikamp 	return;
4609ac27a0ecSDave Kleikamp }
4610ac27a0ecSDave Kleikamp 
4611ac27a0ecSDave Kleikamp #if 0
4612ac27a0ecSDave Kleikamp /*
4613ac27a0ecSDave Kleikamp  * Bind an inode's backing buffer_head into this transaction, to prevent
4614ac27a0ecSDave Kleikamp  * it from being flushed to disk early.  Unlike
4615617ba13bSMingming Cao  * ext4_reserve_inode_write, this leaves behind no bh reference and
4616ac27a0ecSDave Kleikamp  * returns no iloc structure, so the caller needs to repeat the iloc
4617ac27a0ecSDave Kleikamp  * lookup to mark the inode dirty later.
4618ac27a0ecSDave Kleikamp  */
4619617ba13bSMingming Cao static int ext4_pin_inode(handle_t *handle, struct inode *inode)
4620ac27a0ecSDave Kleikamp {
4621617ba13bSMingming Cao 	struct ext4_iloc iloc;
4622ac27a0ecSDave Kleikamp 
4623ac27a0ecSDave Kleikamp 	int err = 0;
4624ac27a0ecSDave Kleikamp 	if (handle) {
4625617ba13bSMingming Cao 		err = ext4_get_inode_loc(inode, &iloc);
4626ac27a0ecSDave Kleikamp 		if (!err) {
4627ac27a0ecSDave Kleikamp 			BUFFER_TRACE(iloc.bh, "get_write_access");
4628dab291afSMingming Cao 			err = jbd2_journal_get_write_access(handle, iloc.bh);
4629ac27a0ecSDave Kleikamp 			if (!err)
46300390131bSFrank Mayhar 				err = ext4_handle_dirty_metadata(handle,
463173b50c1cSCurt Wohlgemuth 								 NULL,
4632ac27a0ecSDave Kleikamp 								 iloc.bh);
4633ac27a0ecSDave Kleikamp 			brelse(iloc.bh);
4634ac27a0ecSDave Kleikamp 		}
4635ac27a0ecSDave Kleikamp 	}
4636617ba13bSMingming Cao 	ext4_std_error(inode->i_sb, err);
4637ac27a0ecSDave Kleikamp 	return err;
4638ac27a0ecSDave Kleikamp }
4639ac27a0ecSDave Kleikamp #endif
4640ac27a0ecSDave Kleikamp 
4641617ba13bSMingming Cao int ext4_change_inode_journal_flag(struct inode *inode, int val)
4642ac27a0ecSDave Kleikamp {
4643ac27a0ecSDave Kleikamp 	journal_t *journal;
4644ac27a0ecSDave Kleikamp 	handle_t *handle;
4645ac27a0ecSDave Kleikamp 	int err;
4646ac27a0ecSDave Kleikamp 
4647ac27a0ecSDave Kleikamp 	/*
4648ac27a0ecSDave Kleikamp 	 * We have to be very careful here: changing a data block's
4649ac27a0ecSDave Kleikamp 	 * journaling status dynamically is dangerous.  If we write a
4650ac27a0ecSDave Kleikamp 	 * data block to the journal, change the status and then delete
4651ac27a0ecSDave Kleikamp 	 * that block, we risk forgetting to revoke the old log record
4652ac27a0ecSDave Kleikamp 	 * from the journal and so a subsequent replay can corrupt data.
4653ac27a0ecSDave Kleikamp 	 * So, first we make sure that the journal is empty and that
4654ac27a0ecSDave Kleikamp 	 * nobody is changing anything.
4655ac27a0ecSDave Kleikamp 	 */
4656ac27a0ecSDave Kleikamp 
4657617ba13bSMingming Cao 	journal = EXT4_JOURNAL(inode);
46580390131bSFrank Mayhar 	if (!journal)
46590390131bSFrank Mayhar 		return 0;
4660d699594dSDave Hansen 	if (is_journal_aborted(journal))
4661ac27a0ecSDave Kleikamp 		return -EROFS;
46622aff57b0SYongqiang Yang 	/* We have to allocate physical blocks for delalloc blocks
46632aff57b0SYongqiang Yang 	 * before flushing journal. otherwise delalloc blocks can not
46642aff57b0SYongqiang Yang 	 * be allocated any more. even more truncate on delalloc blocks
46652aff57b0SYongqiang Yang 	 * could trigger BUG by flushing delalloc blocks in journal.
46662aff57b0SYongqiang Yang 	 * There is no delalloc block in non-journal data mode.
46672aff57b0SYongqiang Yang 	 */
46682aff57b0SYongqiang Yang 	if (val && test_opt(inode->i_sb, DELALLOC)) {
46692aff57b0SYongqiang Yang 		err = ext4_alloc_da_blocks(inode);
46702aff57b0SYongqiang Yang 		if (err < 0)
46712aff57b0SYongqiang Yang 			return err;
46722aff57b0SYongqiang Yang 	}
4673ac27a0ecSDave Kleikamp 
467417335dccSDmitry Monakhov 	/* Wait for all existing dio workers */
467517335dccSDmitry Monakhov 	ext4_inode_block_unlocked_dio(inode);
467617335dccSDmitry Monakhov 	inode_dio_wait(inode);
467717335dccSDmitry Monakhov 
4678dab291afSMingming Cao 	jbd2_journal_lock_updates(journal);
4679ac27a0ecSDave Kleikamp 
4680ac27a0ecSDave Kleikamp 	/*
4681ac27a0ecSDave Kleikamp 	 * OK, there are no updates running now, and all cached data is
4682ac27a0ecSDave Kleikamp 	 * synced to disk.  We are now in a completely consistent state
4683ac27a0ecSDave Kleikamp 	 * which doesn't have anything in the journal, and we know that
4684ac27a0ecSDave Kleikamp 	 * no filesystem updates are running, so it is safe to modify
4685ac27a0ecSDave Kleikamp 	 * the inode's in-core data-journaling state flag now.
4686ac27a0ecSDave Kleikamp 	 */
4687ac27a0ecSDave Kleikamp 
4688ac27a0ecSDave Kleikamp 	if (val)
468912e9b892SDmitry Monakhov 		ext4_set_inode_flag(inode, EXT4_INODE_JOURNAL_DATA);
46905872ddaaSYongqiang Yang 	else {
46915872ddaaSYongqiang Yang 		jbd2_journal_flush(journal);
469212e9b892SDmitry Monakhov 		ext4_clear_inode_flag(inode, EXT4_INODE_JOURNAL_DATA);
46935872ddaaSYongqiang Yang 	}
4694617ba13bSMingming Cao 	ext4_set_aops(inode);
4695ac27a0ecSDave Kleikamp 
4696dab291afSMingming Cao 	jbd2_journal_unlock_updates(journal);
469717335dccSDmitry Monakhov 	ext4_inode_resume_unlocked_dio(inode);
4698ac27a0ecSDave Kleikamp 
4699ac27a0ecSDave Kleikamp 	/* Finally we can mark the inode as dirty. */
4700ac27a0ecSDave Kleikamp 
4701617ba13bSMingming Cao 	handle = ext4_journal_start(inode, 1);
4702ac27a0ecSDave Kleikamp 	if (IS_ERR(handle))
4703ac27a0ecSDave Kleikamp 		return PTR_ERR(handle);
4704ac27a0ecSDave Kleikamp 
4705617ba13bSMingming Cao 	err = ext4_mark_inode_dirty(handle, inode);
47060390131bSFrank Mayhar 	ext4_handle_sync(handle);
4707617ba13bSMingming Cao 	ext4_journal_stop(handle);
4708617ba13bSMingming Cao 	ext4_std_error(inode->i_sb, err);
4709ac27a0ecSDave Kleikamp 
4710ac27a0ecSDave Kleikamp 	return err;
4711ac27a0ecSDave Kleikamp }
47122e9ee850SAneesh Kumar K.V 
47132e9ee850SAneesh Kumar K.V static int ext4_bh_unmapped(handle_t *handle, struct buffer_head *bh)
47142e9ee850SAneesh Kumar K.V {
47152e9ee850SAneesh Kumar K.V 	return !buffer_mapped(bh);
47162e9ee850SAneesh Kumar K.V }
47172e9ee850SAneesh Kumar K.V 
4718c2ec175cSNick Piggin int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
47192e9ee850SAneesh Kumar K.V {
4720c2ec175cSNick Piggin 	struct page *page = vmf->page;
47212e9ee850SAneesh Kumar K.V 	loff_t size;
47222e9ee850SAneesh Kumar K.V 	unsigned long len;
47239ea7df53SJan Kara 	int ret;
47242e9ee850SAneesh Kumar K.V 	struct file *file = vma->vm_file;
47252e9ee850SAneesh Kumar K.V 	struct inode *inode = file->f_path.dentry->d_inode;
47262e9ee850SAneesh Kumar K.V 	struct address_space *mapping = inode->i_mapping;
47279ea7df53SJan Kara 	handle_t *handle;
47289ea7df53SJan Kara 	get_block_t *get_block;
47299ea7df53SJan Kara 	int retries = 0;
47302e9ee850SAneesh Kumar K.V 
47318e8ad8a5SJan Kara 	sb_start_pagefault(inode->i_sb);
4732041bbb6dSTheodore Ts'o 	file_update_time(vma->vm_file);
47339ea7df53SJan Kara 	/* Delalloc case is easy... */
47349ea7df53SJan Kara 	if (test_opt(inode->i_sb, DELALLOC) &&
47359ea7df53SJan Kara 	    !ext4_should_journal_data(inode) &&
47369ea7df53SJan Kara 	    !ext4_nonda_switch(inode->i_sb)) {
47379ea7df53SJan Kara 		do {
47389ea7df53SJan Kara 			ret = __block_page_mkwrite(vma, vmf,
47399ea7df53SJan Kara 						   ext4_da_get_block_prep);
47409ea7df53SJan Kara 		} while (ret == -ENOSPC &&
47419ea7df53SJan Kara 		       ext4_should_retry_alloc(inode->i_sb, &retries));
47429ea7df53SJan Kara 		goto out_ret;
47432e9ee850SAneesh Kumar K.V 	}
47440e499890SDarrick J. Wong 
47450e499890SDarrick J. Wong 	lock_page(page);
47469ea7df53SJan Kara 	size = i_size_read(inode);
47479ea7df53SJan Kara 	/* Page got truncated from under us? */
47489ea7df53SJan Kara 	if (page->mapping != mapping || page_offset(page) > size) {
47499ea7df53SJan Kara 		unlock_page(page);
47509ea7df53SJan Kara 		ret = VM_FAULT_NOPAGE;
47519ea7df53SJan Kara 		goto out;
47520e499890SDarrick J. Wong 	}
47532e9ee850SAneesh Kumar K.V 
47542e9ee850SAneesh Kumar K.V 	if (page->index == size >> PAGE_CACHE_SHIFT)
47552e9ee850SAneesh Kumar K.V 		len = size & ~PAGE_CACHE_MASK;
47562e9ee850SAneesh Kumar K.V 	else
47572e9ee850SAneesh Kumar K.V 		len = PAGE_CACHE_SIZE;
4758a827eaffSAneesh Kumar K.V 	/*
47599ea7df53SJan Kara 	 * Return if we have all the buffers mapped. This avoids the need to do
47609ea7df53SJan Kara 	 * journal_start/journal_stop which can block and take a long time
4761a827eaffSAneesh Kumar K.V 	 */
47622e9ee850SAneesh Kumar K.V 	if (page_has_buffers(page)) {
47632e9ee850SAneesh Kumar K.V 		if (!walk_page_buffers(NULL, page_buffers(page), 0, len, NULL,
4764a827eaffSAneesh Kumar K.V 					ext4_bh_unmapped)) {
47659ea7df53SJan Kara 			/* Wait so that we don't change page under IO */
47669ea7df53SJan Kara 			wait_on_page_writeback(page);
47679ea7df53SJan Kara 			ret = VM_FAULT_LOCKED;
47689ea7df53SJan Kara 			goto out;
47692e9ee850SAneesh Kumar K.V 		}
4770a827eaffSAneesh Kumar K.V 	}
4771a827eaffSAneesh Kumar K.V 	unlock_page(page);
47729ea7df53SJan Kara 	/* OK, we need to fill the hole... */
47739ea7df53SJan Kara 	if (ext4_should_dioread_nolock(inode))
47749ea7df53SJan Kara 		get_block = ext4_get_block_write;
47759ea7df53SJan Kara 	else
47769ea7df53SJan Kara 		get_block = ext4_get_block;
47779ea7df53SJan Kara retry_alloc:
47789ea7df53SJan Kara 	handle = ext4_journal_start(inode, ext4_writepage_trans_blocks(inode));
47799ea7df53SJan Kara 	if (IS_ERR(handle)) {
4780c2ec175cSNick Piggin 		ret = VM_FAULT_SIGBUS;
47819ea7df53SJan Kara 		goto out;
47829ea7df53SJan Kara 	}
47839ea7df53SJan Kara 	ret = __block_page_mkwrite(vma, vmf, get_block);
47849ea7df53SJan Kara 	if (!ret && ext4_should_journal_data(inode)) {
47859ea7df53SJan Kara 		if (walk_page_buffers(handle, page_buffers(page), 0,
47869ea7df53SJan Kara 			  PAGE_CACHE_SIZE, NULL, do_journal_get_write_access)) {
47879ea7df53SJan Kara 			unlock_page(page);
47889ea7df53SJan Kara 			ret = VM_FAULT_SIGBUS;
4789fcbb5515SYongqiang Yang 			ext4_journal_stop(handle);
47909ea7df53SJan Kara 			goto out;
47919ea7df53SJan Kara 		}
47929ea7df53SJan Kara 		ext4_set_inode_state(inode, EXT4_STATE_JDATA);
47939ea7df53SJan Kara 	}
47949ea7df53SJan Kara 	ext4_journal_stop(handle);
47959ea7df53SJan Kara 	if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
47969ea7df53SJan Kara 		goto retry_alloc;
47979ea7df53SJan Kara out_ret:
47989ea7df53SJan Kara 	ret = block_page_mkwrite_return(ret);
47999ea7df53SJan Kara out:
48008e8ad8a5SJan Kara 	sb_end_pagefault(inode->i_sb);
48012e9ee850SAneesh Kumar K.V 	return ret;
48022e9ee850SAneesh Kumar K.V }
4803