xref: /openbmc/linux/fs/ext4/inode.c (revision b34e08d5)
1 /*
2  *  linux/fs/ext4/inode.c
3  *
4  * Copyright (C) 1992, 1993, 1994, 1995
5  * Remy Card (card@masi.ibp.fr)
6  * Laboratoire MASI - Institut Blaise Pascal
7  * Universite Pierre et Marie Curie (Paris VI)
8  *
9  *  from
10  *
11  *  linux/fs/minix/inode.c
12  *
13  *  Copyright (C) 1991, 1992  Linus Torvalds
14  *
15  *  64-bit file support on 64-bit platforms by Jakub Jelinek
16  *	(jj@sunsite.ms.mff.cuni.cz)
17  *
18  *  Assorted race fixes, rewrite of ext4_get_block() by Al Viro, 2000
19  */
20 
21 #include <linux/fs.h>
22 #include <linux/time.h>
23 #include <linux/jbd2.h>
24 #include <linux/highuid.h>
25 #include <linux/pagemap.h>
26 #include <linux/quotaops.h>
27 #include <linux/string.h>
28 #include <linux/buffer_head.h>
29 #include <linux/writeback.h>
30 #include <linux/pagevec.h>
31 #include <linux/mpage.h>
32 #include <linux/namei.h>
33 #include <linux/uio.h>
34 #include <linux/bio.h>
35 #include <linux/workqueue.h>
36 #include <linux/kernel.h>
37 #include <linux/printk.h>
38 #include <linux/slab.h>
39 #include <linux/ratelimit.h>
40 #include <linux/aio.h>
41 #include <linux/bitops.h>
42 
43 #include "ext4_jbd2.h"
44 #include "xattr.h"
45 #include "acl.h"
46 #include "truncate.h"
47 
48 #include <trace/events/ext4.h>
49 
50 #define MPAGE_DA_EXTENT_TAIL 0x01
51 
52 static __u32 ext4_inode_csum(struct inode *inode, struct ext4_inode *raw,
53 			      struct ext4_inode_info *ei)
54 {
55 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
56 	__u16 csum_lo;
57 	__u16 csum_hi = 0;
58 	__u32 csum;
59 
60 	csum_lo = le16_to_cpu(raw->i_checksum_lo);
61 	raw->i_checksum_lo = 0;
62 	if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
63 	    EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) {
64 		csum_hi = le16_to_cpu(raw->i_checksum_hi);
65 		raw->i_checksum_hi = 0;
66 	}
67 
68 	csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)raw,
69 			   EXT4_INODE_SIZE(inode->i_sb));
70 
71 	raw->i_checksum_lo = cpu_to_le16(csum_lo);
72 	if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
73 	    EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi))
74 		raw->i_checksum_hi = cpu_to_le16(csum_hi);
75 
76 	return csum;
77 }
78 
79 static int ext4_inode_csum_verify(struct inode *inode, struct ext4_inode *raw,
80 				  struct ext4_inode_info *ei)
81 {
82 	__u32 provided, calculated;
83 
84 	if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
85 	    cpu_to_le32(EXT4_OS_LINUX) ||
86 	    !EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
87 		EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
88 		return 1;
89 
90 	provided = le16_to_cpu(raw->i_checksum_lo);
91 	calculated = ext4_inode_csum(inode, raw, ei);
92 	if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
93 	    EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi))
94 		provided |= ((__u32)le16_to_cpu(raw->i_checksum_hi)) << 16;
95 	else
96 		calculated &= 0xFFFF;
97 
98 	return provided == calculated;
99 }
100 
101 static void ext4_inode_csum_set(struct inode *inode, struct ext4_inode *raw,
102 				struct ext4_inode_info *ei)
103 {
104 	__u32 csum;
105 
106 	if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
107 	    cpu_to_le32(EXT4_OS_LINUX) ||
108 	    !EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
109 		EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
110 		return;
111 
112 	csum = ext4_inode_csum(inode, raw, ei);
113 	raw->i_checksum_lo = cpu_to_le16(csum & 0xFFFF);
114 	if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
115 	    EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi))
116 		raw->i_checksum_hi = cpu_to_le16(csum >> 16);
117 }
118 
119 static inline int ext4_begin_ordered_truncate(struct inode *inode,
120 					      loff_t new_size)
121 {
122 	trace_ext4_begin_ordered_truncate(inode, new_size);
123 	/*
124 	 * If jinode is zero, then we never opened the file for
125 	 * writing, so there's no need to call
126 	 * jbd2_journal_begin_ordered_truncate() since there's no
127 	 * outstanding writes we need to flush.
128 	 */
129 	if (!EXT4_I(inode)->jinode)
130 		return 0;
131 	return jbd2_journal_begin_ordered_truncate(EXT4_JOURNAL(inode),
132 						   EXT4_I(inode)->jinode,
133 						   new_size);
134 }
135 
136 static void ext4_invalidatepage(struct page *page, unsigned int offset,
137 				unsigned int length);
138 static int __ext4_journalled_writepage(struct page *page, unsigned int len);
139 static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh);
140 static int ext4_meta_trans_blocks(struct inode *inode, int lblocks,
141 				  int pextents);
142 
143 /*
144  * Test whether an inode is a fast symlink.
145  */
146 static int ext4_inode_is_fast_symlink(struct inode *inode)
147 {
148         int ea_blocks = EXT4_I(inode)->i_file_acl ?
149 		EXT4_CLUSTER_SIZE(inode->i_sb) >> 9 : 0;
150 
151 	return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0);
152 }
153 
154 /*
155  * Restart the transaction associated with *handle.  This does a commit,
156  * so before we call here everything must be consistently dirtied against
157  * this transaction.
158  */
159 int ext4_truncate_restart_trans(handle_t *handle, struct inode *inode,
160 				 int nblocks)
161 {
162 	int ret;
163 
164 	/*
165 	 * Drop i_data_sem to avoid deadlock with ext4_map_blocks.  At this
166 	 * moment, get_block can be called only for blocks inside i_size since
167 	 * page cache has been already dropped and writes are blocked by
168 	 * i_mutex. So we can safely drop the i_data_sem here.
169 	 */
170 	BUG_ON(EXT4_JOURNAL(inode) == NULL);
171 	jbd_debug(2, "restarting handle %p\n", handle);
172 	up_write(&EXT4_I(inode)->i_data_sem);
173 	ret = ext4_journal_restart(handle, nblocks);
174 	down_write(&EXT4_I(inode)->i_data_sem);
175 	ext4_discard_preallocations(inode);
176 
177 	return ret;
178 }
179 
180 /*
181  * Called at the last iput() if i_nlink is zero.
182  */
183 void ext4_evict_inode(struct inode *inode)
184 {
185 	handle_t *handle;
186 	int err;
187 
188 	trace_ext4_evict_inode(inode);
189 
190 	if (inode->i_nlink) {
191 		/*
192 		 * When journalling data dirty buffers are tracked only in the
193 		 * journal. So although mm thinks everything is clean and
194 		 * ready for reaping the inode might still have some pages to
195 		 * write in the running transaction or waiting to be
196 		 * checkpointed. Thus calling jbd2_journal_invalidatepage()
197 		 * (via truncate_inode_pages()) to discard these buffers can
198 		 * cause data loss. Also even if we did not discard these
199 		 * buffers, we would have no way to find them after the inode
200 		 * is reaped and thus user could see stale data if he tries to
201 		 * read them before the transaction is checkpointed. So be
202 		 * careful and force everything to disk here... We use
203 		 * ei->i_datasync_tid to store the newest transaction
204 		 * containing inode's data.
205 		 *
206 		 * Note that directories do not have this problem because they
207 		 * don't use page cache.
208 		 */
209 		if (ext4_should_journal_data(inode) &&
210 		    (S_ISLNK(inode->i_mode) || S_ISREG(inode->i_mode)) &&
211 		    inode->i_ino != EXT4_JOURNAL_INO) {
212 			journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
213 			tid_t commit_tid = EXT4_I(inode)->i_datasync_tid;
214 
215 			jbd2_complete_transaction(journal, commit_tid);
216 			filemap_write_and_wait(&inode->i_data);
217 		}
218 		truncate_inode_pages_final(&inode->i_data);
219 
220 		WARN_ON(atomic_read(&EXT4_I(inode)->i_ioend_count));
221 		goto no_delete;
222 	}
223 
224 	if (!is_bad_inode(inode))
225 		dquot_initialize(inode);
226 
227 	if (ext4_should_order_data(inode))
228 		ext4_begin_ordered_truncate(inode, 0);
229 	truncate_inode_pages_final(&inode->i_data);
230 
231 	WARN_ON(atomic_read(&EXT4_I(inode)->i_ioend_count));
232 	if (is_bad_inode(inode))
233 		goto no_delete;
234 
235 	/*
236 	 * Protect us against freezing - iput() caller didn't have to have any
237 	 * protection against it
238 	 */
239 	sb_start_intwrite(inode->i_sb);
240 	handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE,
241 				    ext4_blocks_for_truncate(inode)+3);
242 	if (IS_ERR(handle)) {
243 		ext4_std_error(inode->i_sb, PTR_ERR(handle));
244 		/*
245 		 * If we're going to skip the normal cleanup, we still need to
246 		 * make sure that the in-core orphan linked list is properly
247 		 * cleaned up.
248 		 */
249 		ext4_orphan_del(NULL, inode);
250 		sb_end_intwrite(inode->i_sb);
251 		goto no_delete;
252 	}
253 
254 	if (IS_SYNC(inode))
255 		ext4_handle_sync(handle);
256 	inode->i_size = 0;
257 	err = ext4_mark_inode_dirty(handle, inode);
258 	if (err) {
259 		ext4_warning(inode->i_sb,
260 			     "couldn't mark inode dirty (err %d)", err);
261 		goto stop_handle;
262 	}
263 	if (inode->i_blocks)
264 		ext4_truncate(inode);
265 
266 	/*
267 	 * ext4_ext_truncate() doesn't reserve any slop when it
268 	 * restarts journal transactions; therefore there may not be
269 	 * enough credits left in the handle to remove the inode from
270 	 * the orphan list and set the dtime field.
271 	 */
272 	if (!ext4_handle_has_enough_credits(handle, 3)) {
273 		err = ext4_journal_extend(handle, 3);
274 		if (err > 0)
275 			err = ext4_journal_restart(handle, 3);
276 		if (err != 0) {
277 			ext4_warning(inode->i_sb,
278 				     "couldn't extend journal (err %d)", err);
279 		stop_handle:
280 			ext4_journal_stop(handle);
281 			ext4_orphan_del(NULL, inode);
282 			sb_end_intwrite(inode->i_sb);
283 			goto no_delete;
284 		}
285 	}
286 
287 	/*
288 	 * Kill off the orphan record which ext4_truncate created.
289 	 * AKPM: I think this can be inside the above `if'.
290 	 * Note that ext4_orphan_del() has to be able to cope with the
291 	 * deletion of a non-existent orphan - this is because we don't
292 	 * know if ext4_truncate() actually created an orphan record.
293 	 * (Well, we could do this if we need to, but heck - it works)
294 	 */
295 	ext4_orphan_del(handle, inode);
296 	EXT4_I(inode)->i_dtime	= get_seconds();
297 
298 	/*
299 	 * One subtle ordering requirement: if anything has gone wrong
300 	 * (transaction abort, IO errors, whatever), then we can still
301 	 * do these next steps (the fs will already have been marked as
302 	 * having errors), but we can't free the inode if the mark_dirty
303 	 * fails.
304 	 */
305 	if (ext4_mark_inode_dirty(handle, inode))
306 		/* If that failed, just do the required in-core inode clear. */
307 		ext4_clear_inode(inode);
308 	else
309 		ext4_free_inode(handle, inode);
310 	ext4_journal_stop(handle);
311 	sb_end_intwrite(inode->i_sb);
312 	return;
313 no_delete:
314 	ext4_clear_inode(inode);	/* We must guarantee clearing of inode... */
315 }
316 
317 #ifdef CONFIG_QUOTA
318 qsize_t *ext4_get_reserved_space(struct inode *inode)
319 {
320 	return &EXT4_I(inode)->i_reserved_quota;
321 }
322 #endif
323 
324 /*
325  * Calculate the number of metadata blocks need to reserve
326  * to allocate a block located at @lblock
327  */
328 static int ext4_calc_metadata_amount(struct inode *inode, ext4_lblk_t lblock)
329 {
330 	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
331 		return ext4_ext_calc_metadata_amount(inode, lblock);
332 
333 	return ext4_ind_calc_metadata_amount(inode, lblock);
334 }
335 
336 /*
337  * Called with i_data_sem down, which is important since we can call
338  * ext4_discard_preallocations() from here.
339  */
340 void ext4_da_update_reserve_space(struct inode *inode,
341 					int used, int quota_claim)
342 {
343 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
344 	struct ext4_inode_info *ei = EXT4_I(inode);
345 
346 	spin_lock(&ei->i_block_reservation_lock);
347 	trace_ext4_da_update_reserve_space(inode, used, quota_claim);
348 	if (unlikely(used > ei->i_reserved_data_blocks)) {
349 		ext4_warning(inode->i_sb, "%s: ino %lu, used %d "
350 			 "with only %d reserved data blocks",
351 			 __func__, inode->i_ino, used,
352 			 ei->i_reserved_data_blocks);
353 		WARN_ON(1);
354 		used = ei->i_reserved_data_blocks;
355 	}
356 
357 	if (unlikely(ei->i_allocated_meta_blocks > ei->i_reserved_meta_blocks)) {
358 		ext4_warning(inode->i_sb, "ino %lu, allocated %d "
359 			"with only %d reserved metadata blocks "
360 			"(releasing %d blocks with reserved %d data blocks)",
361 			inode->i_ino, ei->i_allocated_meta_blocks,
362 			     ei->i_reserved_meta_blocks, used,
363 			     ei->i_reserved_data_blocks);
364 		WARN_ON(1);
365 		ei->i_allocated_meta_blocks = ei->i_reserved_meta_blocks;
366 	}
367 
368 	/* Update per-inode reservations */
369 	ei->i_reserved_data_blocks -= used;
370 	ei->i_reserved_meta_blocks -= ei->i_allocated_meta_blocks;
371 	percpu_counter_sub(&sbi->s_dirtyclusters_counter,
372 			   used + ei->i_allocated_meta_blocks);
373 	ei->i_allocated_meta_blocks = 0;
374 
375 	if (ei->i_reserved_data_blocks == 0) {
376 		/*
377 		 * We can release all of the reserved metadata blocks
378 		 * only when we have written all of the delayed
379 		 * allocation blocks.
380 		 */
381 		percpu_counter_sub(&sbi->s_dirtyclusters_counter,
382 				   ei->i_reserved_meta_blocks);
383 		ei->i_reserved_meta_blocks = 0;
384 		ei->i_da_metadata_calc_len = 0;
385 	}
386 	spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
387 
388 	/* Update quota subsystem for data blocks */
389 	if (quota_claim)
390 		dquot_claim_block(inode, EXT4_C2B(sbi, used));
391 	else {
392 		/*
393 		 * We did fallocate with an offset that is already delayed
394 		 * allocated. So on delayed allocated writeback we should
395 		 * not re-claim the quota for fallocated blocks.
396 		 */
397 		dquot_release_reservation_block(inode, EXT4_C2B(sbi, used));
398 	}
399 
400 	/*
401 	 * If we have done all the pending block allocations and if
402 	 * there aren't any writers on the inode, we can discard the
403 	 * inode's preallocations.
404 	 */
405 	if ((ei->i_reserved_data_blocks == 0) &&
406 	    (atomic_read(&inode->i_writecount) == 0))
407 		ext4_discard_preallocations(inode);
408 }
409 
410 static int __check_block_validity(struct inode *inode, const char *func,
411 				unsigned int line,
412 				struct ext4_map_blocks *map)
413 {
414 	if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), map->m_pblk,
415 				   map->m_len)) {
416 		ext4_error_inode(inode, func, line, map->m_pblk,
417 				 "lblock %lu mapped to illegal pblock "
418 				 "(length %d)", (unsigned long) map->m_lblk,
419 				 map->m_len);
420 		return -EIO;
421 	}
422 	return 0;
423 }
424 
425 #define check_block_validity(inode, map)	\
426 	__check_block_validity((inode), __func__, __LINE__, (map))
427 
428 #ifdef ES_AGGRESSIVE_TEST
429 static void ext4_map_blocks_es_recheck(handle_t *handle,
430 				       struct inode *inode,
431 				       struct ext4_map_blocks *es_map,
432 				       struct ext4_map_blocks *map,
433 				       int flags)
434 {
435 	int retval;
436 
437 	map->m_flags = 0;
438 	/*
439 	 * There is a race window that the result is not the same.
440 	 * e.g. xfstests #223 when dioread_nolock enables.  The reason
441 	 * is that we lookup a block mapping in extent status tree with
442 	 * out taking i_data_sem.  So at the time the unwritten extent
443 	 * could be converted.
444 	 */
445 	if (!(flags & EXT4_GET_BLOCKS_NO_LOCK))
446 		down_read((&EXT4_I(inode)->i_data_sem));
447 	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
448 		retval = ext4_ext_map_blocks(handle, inode, map, flags &
449 					     EXT4_GET_BLOCKS_KEEP_SIZE);
450 	} else {
451 		retval = ext4_ind_map_blocks(handle, inode, map, flags &
452 					     EXT4_GET_BLOCKS_KEEP_SIZE);
453 	}
454 	if (!(flags & EXT4_GET_BLOCKS_NO_LOCK))
455 		up_read((&EXT4_I(inode)->i_data_sem));
456 	/*
457 	 * Clear EXT4_MAP_FROM_CLUSTER and EXT4_MAP_BOUNDARY flag
458 	 * because it shouldn't be marked in es_map->m_flags.
459 	 */
460 	map->m_flags &= ~(EXT4_MAP_FROM_CLUSTER | EXT4_MAP_BOUNDARY);
461 
462 	/*
463 	 * We don't check m_len because extent will be collpased in status
464 	 * tree.  So the m_len might not equal.
465 	 */
466 	if (es_map->m_lblk != map->m_lblk ||
467 	    es_map->m_flags != map->m_flags ||
468 	    es_map->m_pblk != map->m_pblk) {
469 		printk("ES cache assertion failed for inode: %lu "
470 		       "es_cached ex [%d/%d/%llu/%x] != "
471 		       "found ex [%d/%d/%llu/%x] retval %d flags %x\n",
472 		       inode->i_ino, es_map->m_lblk, es_map->m_len,
473 		       es_map->m_pblk, es_map->m_flags, map->m_lblk,
474 		       map->m_len, map->m_pblk, map->m_flags,
475 		       retval, flags);
476 	}
477 }
478 #endif /* ES_AGGRESSIVE_TEST */
479 
480 /*
481  * The ext4_map_blocks() function tries to look up the requested blocks,
482  * and returns if the blocks are already mapped.
483  *
484  * Otherwise it takes the write lock of the i_data_sem and allocate blocks
485  * and store the allocated blocks in the result buffer head and mark it
486  * mapped.
487  *
488  * If file type is extents based, it will call ext4_ext_map_blocks(),
489  * Otherwise, call with ext4_ind_map_blocks() to handle indirect mapping
490  * based files
491  *
492  * On success, it returns the number of blocks being mapped or allocate.
493  * if create==0 and the blocks are pre-allocated and uninitialized block,
494  * the result buffer head is unmapped. If the create ==1, it will make sure
495  * the buffer head is mapped.
496  *
497  * It returns 0 if plain look up failed (blocks have not been allocated), in
498  * that case, buffer head is unmapped
499  *
500  * It returns the error in case of allocation failure.
501  */
502 int ext4_map_blocks(handle_t *handle, struct inode *inode,
503 		    struct ext4_map_blocks *map, int flags)
504 {
505 	struct extent_status es;
506 	int retval;
507 	int ret = 0;
508 #ifdef ES_AGGRESSIVE_TEST
509 	struct ext4_map_blocks orig_map;
510 
511 	memcpy(&orig_map, map, sizeof(*map));
512 #endif
513 
514 	map->m_flags = 0;
515 	ext_debug("ext4_map_blocks(): inode %lu, flag %d, max_blocks %u,"
516 		  "logical block %lu\n", inode->i_ino, flags, map->m_len,
517 		  (unsigned long) map->m_lblk);
518 
519 	/*
520 	 * ext4_map_blocks returns an int, and m_len is an unsigned int
521 	 */
522 	if (unlikely(map->m_len > INT_MAX))
523 		map->m_len = INT_MAX;
524 
525 	/* Lookup extent status tree firstly */
526 	if (ext4_es_lookup_extent(inode, map->m_lblk, &es)) {
527 		ext4_es_lru_add(inode);
528 		if (ext4_es_is_written(&es) || ext4_es_is_unwritten(&es)) {
529 			map->m_pblk = ext4_es_pblock(&es) +
530 					map->m_lblk - es.es_lblk;
531 			map->m_flags |= ext4_es_is_written(&es) ?
532 					EXT4_MAP_MAPPED : EXT4_MAP_UNWRITTEN;
533 			retval = es.es_len - (map->m_lblk - es.es_lblk);
534 			if (retval > map->m_len)
535 				retval = map->m_len;
536 			map->m_len = retval;
537 		} else if (ext4_es_is_delayed(&es) || ext4_es_is_hole(&es)) {
538 			retval = 0;
539 		} else {
540 			BUG_ON(1);
541 		}
542 #ifdef ES_AGGRESSIVE_TEST
543 		ext4_map_blocks_es_recheck(handle, inode, map,
544 					   &orig_map, flags);
545 #endif
546 		goto found;
547 	}
548 
549 	/*
550 	 * Try to see if we can get the block without requesting a new
551 	 * file system block.
552 	 */
553 	if (!(flags & EXT4_GET_BLOCKS_NO_LOCK))
554 		down_read((&EXT4_I(inode)->i_data_sem));
555 	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
556 		retval = ext4_ext_map_blocks(handle, inode, map, flags &
557 					     EXT4_GET_BLOCKS_KEEP_SIZE);
558 	} else {
559 		retval = ext4_ind_map_blocks(handle, inode, map, flags &
560 					     EXT4_GET_BLOCKS_KEEP_SIZE);
561 	}
562 	if (retval > 0) {
563 		unsigned int status;
564 
565 		if (unlikely(retval != map->m_len)) {
566 			ext4_warning(inode->i_sb,
567 				     "ES len assertion failed for inode "
568 				     "%lu: retval %d != map->m_len %d",
569 				     inode->i_ino, retval, map->m_len);
570 			WARN_ON(1);
571 		}
572 
573 		status = map->m_flags & EXT4_MAP_UNWRITTEN ?
574 				EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
575 		if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) &&
576 		    ext4_find_delalloc_range(inode, map->m_lblk,
577 					     map->m_lblk + map->m_len - 1))
578 			status |= EXTENT_STATUS_DELAYED;
579 		ret = ext4_es_insert_extent(inode, map->m_lblk,
580 					    map->m_len, map->m_pblk, status);
581 		if (ret < 0)
582 			retval = ret;
583 	}
584 	if (!(flags & EXT4_GET_BLOCKS_NO_LOCK))
585 		up_read((&EXT4_I(inode)->i_data_sem));
586 
587 found:
588 	if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
589 		ret = check_block_validity(inode, map);
590 		if (ret != 0)
591 			return ret;
592 	}
593 
594 	/* If it is only a block(s) look up */
595 	if ((flags & EXT4_GET_BLOCKS_CREATE) == 0)
596 		return retval;
597 
598 	/*
599 	 * Returns if the blocks have already allocated
600 	 *
601 	 * Note that if blocks have been preallocated
602 	 * ext4_ext_get_block() returns the create = 0
603 	 * with buffer head unmapped.
604 	 */
605 	if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED)
606 		/*
607 		 * If we need to convert extent to unwritten
608 		 * we continue and do the actual work in
609 		 * ext4_ext_map_blocks()
610 		 */
611 		if (!(flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN))
612 			return retval;
613 
614 	/*
615 	 * Here we clear m_flags because after allocating an new extent,
616 	 * it will be set again.
617 	 */
618 	map->m_flags &= ~EXT4_MAP_FLAGS;
619 
620 	/*
621 	 * New blocks allocate and/or writing to uninitialized extent
622 	 * will possibly result in updating i_data, so we take
623 	 * the write lock of i_data_sem, and call get_blocks()
624 	 * with create == 1 flag.
625 	 */
626 	down_write((&EXT4_I(inode)->i_data_sem));
627 
628 	/*
629 	 * if the caller is from delayed allocation writeout path
630 	 * we have already reserved fs blocks for allocation
631 	 * let the underlying get_block() function know to
632 	 * avoid double accounting
633 	 */
634 	if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
635 		ext4_set_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED);
636 	/*
637 	 * We need to check for EXT4 here because migrate
638 	 * could have changed the inode type in between
639 	 */
640 	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
641 		retval = ext4_ext_map_blocks(handle, inode, map, flags);
642 	} else {
643 		retval = ext4_ind_map_blocks(handle, inode, map, flags);
644 
645 		if (retval > 0 && map->m_flags & EXT4_MAP_NEW) {
646 			/*
647 			 * We allocated new blocks which will result in
648 			 * i_data's format changing.  Force the migrate
649 			 * to fail by clearing migrate flags
650 			 */
651 			ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE);
652 		}
653 
654 		/*
655 		 * Update reserved blocks/metadata blocks after successful
656 		 * block allocation which had been deferred till now. We don't
657 		 * support fallocate for non extent files. So we can update
658 		 * reserve space here.
659 		 */
660 		if ((retval > 0) &&
661 			(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE))
662 			ext4_da_update_reserve_space(inode, retval, 1);
663 	}
664 	if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
665 		ext4_clear_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED);
666 
667 	if (retval > 0) {
668 		unsigned int status;
669 
670 		if (unlikely(retval != map->m_len)) {
671 			ext4_warning(inode->i_sb,
672 				     "ES len assertion failed for inode "
673 				     "%lu: retval %d != map->m_len %d",
674 				     inode->i_ino, retval, map->m_len);
675 			WARN_ON(1);
676 		}
677 
678 		/*
679 		 * If the extent has been zeroed out, we don't need to update
680 		 * extent status tree.
681 		 */
682 		if ((flags & EXT4_GET_BLOCKS_PRE_IO) &&
683 		    ext4_es_lookup_extent(inode, map->m_lblk, &es)) {
684 			if (ext4_es_is_written(&es))
685 				goto has_zeroout;
686 		}
687 		status = map->m_flags & EXT4_MAP_UNWRITTEN ?
688 				EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
689 		if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) &&
690 		    ext4_find_delalloc_range(inode, map->m_lblk,
691 					     map->m_lblk + map->m_len - 1))
692 			status |= EXTENT_STATUS_DELAYED;
693 		ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
694 					    map->m_pblk, status);
695 		if (ret < 0)
696 			retval = ret;
697 	}
698 
699 has_zeroout:
700 	up_write((&EXT4_I(inode)->i_data_sem));
701 	if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
702 		ret = check_block_validity(inode, map);
703 		if (ret != 0)
704 			return ret;
705 	}
706 	return retval;
707 }
708 
709 /* Maximum number of blocks we map for direct IO at once. */
710 #define DIO_MAX_BLOCKS 4096
711 
712 static int _ext4_get_block(struct inode *inode, sector_t iblock,
713 			   struct buffer_head *bh, int flags)
714 {
715 	handle_t *handle = ext4_journal_current_handle();
716 	struct ext4_map_blocks map;
717 	int ret = 0, started = 0;
718 	int dio_credits;
719 
720 	if (ext4_has_inline_data(inode))
721 		return -ERANGE;
722 
723 	map.m_lblk = iblock;
724 	map.m_len = bh->b_size >> inode->i_blkbits;
725 
726 	if (flags && !(flags & EXT4_GET_BLOCKS_NO_LOCK) && !handle) {
727 		/* Direct IO write... */
728 		if (map.m_len > DIO_MAX_BLOCKS)
729 			map.m_len = DIO_MAX_BLOCKS;
730 		dio_credits = ext4_chunk_trans_blocks(inode, map.m_len);
731 		handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS,
732 					    dio_credits);
733 		if (IS_ERR(handle)) {
734 			ret = PTR_ERR(handle);
735 			return ret;
736 		}
737 		started = 1;
738 	}
739 
740 	ret = ext4_map_blocks(handle, inode, &map, flags);
741 	if (ret > 0) {
742 		ext4_io_end_t *io_end = ext4_inode_aio(inode);
743 
744 		map_bh(bh, inode->i_sb, map.m_pblk);
745 		bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags;
746 		if (io_end && io_end->flag & EXT4_IO_END_UNWRITTEN)
747 			set_buffer_defer_completion(bh);
748 		bh->b_size = inode->i_sb->s_blocksize * map.m_len;
749 		ret = 0;
750 	}
751 	if (started)
752 		ext4_journal_stop(handle);
753 	return ret;
754 }
755 
756 int ext4_get_block(struct inode *inode, sector_t iblock,
757 		   struct buffer_head *bh, int create)
758 {
759 	return _ext4_get_block(inode, iblock, bh,
760 			       create ? EXT4_GET_BLOCKS_CREATE : 0);
761 }
762 
763 /*
764  * `handle' can be NULL if create is zero
765  */
766 struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode,
767 				ext4_lblk_t block, int create, int *errp)
768 {
769 	struct ext4_map_blocks map;
770 	struct buffer_head *bh;
771 	int fatal = 0, err;
772 
773 	J_ASSERT(handle != NULL || create == 0);
774 
775 	map.m_lblk = block;
776 	map.m_len = 1;
777 	err = ext4_map_blocks(handle, inode, &map,
778 			      create ? EXT4_GET_BLOCKS_CREATE : 0);
779 
780 	/* ensure we send some value back into *errp */
781 	*errp = 0;
782 
783 	if (create && err == 0)
784 		err = -ENOSPC;	/* should never happen */
785 	if (err < 0)
786 		*errp = err;
787 	if (err <= 0)
788 		return NULL;
789 
790 	bh = sb_getblk(inode->i_sb, map.m_pblk);
791 	if (unlikely(!bh)) {
792 		*errp = -ENOMEM;
793 		return NULL;
794 	}
795 	if (map.m_flags & EXT4_MAP_NEW) {
796 		J_ASSERT(create != 0);
797 		J_ASSERT(handle != NULL);
798 
799 		/*
800 		 * Now that we do not always journal data, we should
801 		 * keep in mind whether this should always journal the
802 		 * new buffer as metadata.  For now, regular file
803 		 * writes use ext4_get_block instead, so it's not a
804 		 * problem.
805 		 */
806 		lock_buffer(bh);
807 		BUFFER_TRACE(bh, "call get_create_access");
808 		fatal = ext4_journal_get_create_access(handle, bh);
809 		if (!fatal && !buffer_uptodate(bh)) {
810 			memset(bh->b_data, 0, inode->i_sb->s_blocksize);
811 			set_buffer_uptodate(bh);
812 		}
813 		unlock_buffer(bh);
814 		BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
815 		err = ext4_handle_dirty_metadata(handle, inode, bh);
816 		if (!fatal)
817 			fatal = err;
818 	} else {
819 		BUFFER_TRACE(bh, "not a new buffer");
820 	}
821 	if (fatal) {
822 		*errp = fatal;
823 		brelse(bh);
824 		bh = NULL;
825 	}
826 	return bh;
827 }
828 
829 struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode,
830 			       ext4_lblk_t block, int create, int *err)
831 {
832 	struct buffer_head *bh;
833 
834 	bh = ext4_getblk(handle, inode, block, create, err);
835 	if (!bh)
836 		return bh;
837 	if (buffer_uptodate(bh))
838 		return bh;
839 	ll_rw_block(READ | REQ_META | REQ_PRIO, 1, &bh);
840 	wait_on_buffer(bh);
841 	if (buffer_uptodate(bh))
842 		return bh;
843 	put_bh(bh);
844 	*err = -EIO;
845 	return NULL;
846 }
847 
848 int ext4_walk_page_buffers(handle_t *handle,
849 			   struct buffer_head *head,
850 			   unsigned from,
851 			   unsigned to,
852 			   int *partial,
853 			   int (*fn)(handle_t *handle,
854 				     struct buffer_head *bh))
855 {
856 	struct buffer_head *bh;
857 	unsigned block_start, block_end;
858 	unsigned blocksize = head->b_size;
859 	int err, ret = 0;
860 	struct buffer_head *next;
861 
862 	for (bh = head, block_start = 0;
863 	     ret == 0 && (bh != head || !block_start);
864 	     block_start = block_end, bh = next) {
865 		next = bh->b_this_page;
866 		block_end = block_start + blocksize;
867 		if (block_end <= from || block_start >= to) {
868 			if (partial && !buffer_uptodate(bh))
869 				*partial = 1;
870 			continue;
871 		}
872 		err = (*fn)(handle, bh);
873 		if (!ret)
874 			ret = err;
875 	}
876 	return ret;
877 }
878 
879 /*
880  * To preserve ordering, it is essential that the hole instantiation and
881  * the data write be encapsulated in a single transaction.  We cannot
882  * close off a transaction and start a new one between the ext4_get_block()
883  * and the commit_write().  So doing the jbd2_journal_start at the start of
884  * prepare_write() is the right place.
885  *
886  * Also, this function can nest inside ext4_writepage().  In that case, we
887  * *know* that ext4_writepage() has generated enough buffer credits to do the
888  * whole page.  So we won't block on the journal in that case, which is good,
889  * because the caller may be PF_MEMALLOC.
890  *
891  * By accident, ext4 can be reentered when a transaction is open via
892  * quota file writes.  If we were to commit the transaction while thus
893  * reentered, there can be a deadlock - we would be holding a quota
894  * lock, and the commit would never complete if another thread had a
895  * transaction open and was blocking on the quota lock - a ranking
896  * violation.
897  *
898  * So what we do is to rely on the fact that jbd2_journal_stop/journal_start
899  * will _not_ run commit under these circumstances because handle->h_ref
900  * is elevated.  We'll still have enough credits for the tiny quotafile
901  * write.
902  */
903 int do_journal_get_write_access(handle_t *handle,
904 				struct buffer_head *bh)
905 {
906 	int dirty = buffer_dirty(bh);
907 	int ret;
908 
909 	if (!buffer_mapped(bh) || buffer_freed(bh))
910 		return 0;
911 	/*
912 	 * __block_write_begin() could have dirtied some buffers. Clean
913 	 * the dirty bit as jbd2_journal_get_write_access() could complain
914 	 * otherwise about fs integrity issues. Setting of the dirty bit
915 	 * by __block_write_begin() isn't a real problem here as we clear
916 	 * the bit before releasing a page lock and thus writeback cannot
917 	 * ever write the buffer.
918 	 */
919 	if (dirty)
920 		clear_buffer_dirty(bh);
921 	ret = ext4_journal_get_write_access(handle, bh);
922 	if (!ret && dirty)
923 		ret = ext4_handle_dirty_metadata(handle, NULL, bh);
924 	return ret;
925 }
926 
927 static int ext4_get_block_write_nolock(struct inode *inode, sector_t iblock,
928 		   struct buffer_head *bh_result, int create);
929 static int ext4_write_begin(struct file *file, struct address_space *mapping,
930 			    loff_t pos, unsigned len, unsigned flags,
931 			    struct page **pagep, void **fsdata)
932 {
933 	struct inode *inode = mapping->host;
934 	int ret, needed_blocks;
935 	handle_t *handle;
936 	int retries = 0;
937 	struct page *page;
938 	pgoff_t index;
939 	unsigned from, to;
940 
941 	trace_ext4_write_begin(inode, pos, len, flags);
942 	/*
943 	 * Reserve one block more for addition to orphan list in case
944 	 * we allocate blocks but write fails for some reason
945 	 */
946 	needed_blocks = ext4_writepage_trans_blocks(inode) + 1;
947 	index = pos >> PAGE_CACHE_SHIFT;
948 	from = pos & (PAGE_CACHE_SIZE - 1);
949 	to = from + len;
950 
951 	if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) {
952 		ret = ext4_try_to_write_inline_data(mapping, inode, pos, len,
953 						    flags, pagep);
954 		if (ret < 0)
955 			return ret;
956 		if (ret == 1)
957 			return 0;
958 	}
959 
960 	/*
961 	 * grab_cache_page_write_begin() can take a long time if the
962 	 * system is thrashing due to memory pressure, or if the page
963 	 * is being written back.  So grab it first before we start
964 	 * the transaction handle.  This also allows us to allocate
965 	 * the page (if needed) without using GFP_NOFS.
966 	 */
967 retry_grab:
968 	page = grab_cache_page_write_begin(mapping, index, flags);
969 	if (!page)
970 		return -ENOMEM;
971 	unlock_page(page);
972 
973 retry_journal:
974 	handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, needed_blocks);
975 	if (IS_ERR(handle)) {
976 		page_cache_release(page);
977 		return PTR_ERR(handle);
978 	}
979 
980 	lock_page(page);
981 	if (page->mapping != mapping) {
982 		/* The page got truncated from under us */
983 		unlock_page(page);
984 		page_cache_release(page);
985 		ext4_journal_stop(handle);
986 		goto retry_grab;
987 	}
988 	/* In case writeback began while the page was unlocked */
989 	wait_for_stable_page(page);
990 
991 	if (ext4_should_dioread_nolock(inode))
992 		ret = __block_write_begin(page, pos, len, ext4_get_block_write);
993 	else
994 		ret = __block_write_begin(page, pos, len, ext4_get_block);
995 
996 	if (!ret && ext4_should_journal_data(inode)) {
997 		ret = ext4_walk_page_buffers(handle, page_buffers(page),
998 					     from, to, NULL,
999 					     do_journal_get_write_access);
1000 	}
1001 
1002 	if (ret) {
1003 		unlock_page(page);
1004 		/*
1005 		 * __block_write_begin may have instantiated a few blocks
1006 		 * outside i_size.  Trim these off again. Don't need
1007 		 * i_size_read because we hold i_mutex.
1008 		 *
1009 		 * Add inode to orphan list in case we crash before
1010 		 * truncate finishes
1011 		 */
1012 		if (pos + len > inode->i_size && ext4_can_truncate(inode))
1013 			ext4_orphan_add(handle, inode);
1014 
1015 		ext4_journal_stop(handle);
1016 		if (pos + len > inode->i_size) {
1017 			ext4_truncate_failed_write(inode);
1018 			/*
1019 			 * If truncate failed early the inode might
1020 			 * still be on the orphan list; we need to
1021 			 * make sure the inode is removed from the
1022 			 * orphan list in that case.
1023 			 */
1024 			if (inode->i_nlink)
1025 				ext4_orphan_del(NULL, inode);
1026 		}
1027 
1028 		if (ret == -ENOSPC &&
1029 		    ext4_should_retry_alloc(inode->i_sb, &retries))
1030 			goto retry_journal;
1031 		page_cache_release(page);
1032 		return ret;
1033 	}
1034 	*pagep = page;
1035 	return ret;
1036 }
1037 
1038 /* For write_end() in data=journal mode */
1039 static int write_end_fn(handle_t *handle, struct buffer_head *bh)
1040 {
1041 	int ret;
1042 	if (!buffer_mapped(bh) || buffer_freed(bh))
1043 		return 0;
1044 	set_buffer_uptodate(bh);
1045 	ret = ext4_handle_dirty_metadata(handle, NULL, bh);
1046 	clear_buffer_meta(bh);
1047 	clear_buffer_prio(bh);
1048 	return ret;
1049 }
1050 
1051 /*
1052  * We need to pick up the new inode size which generic_commit_write gave us
1053  * `file' can be NULL - eg, when called from page_symlink().
1054  *
1055  * ext4 never places buffers on inode->i_mapping->private_list.  metadata
1056  * buffers are managed internally.
1057  */
1058 static int ext4_write_end(struct file *file,
1059 			  struct address_space *mapping,
1060 			  loff_t pos, unsigned len, unsigned copied,
1061 			  struct page *page, void *fsdata)
1062 {
1063 	handle_t *handle = ext4_journal_current_handle();
1064 	struct inode *inode = mapping->host;
1065 	int ret = 0, ret2;
1066 	int i_size_changed = 0;
1067 
1068 	trace_ext4_write_end(inode, pos, len, copied);
1069 	if (ext4_test_inode_state(inode, EXT4_STATE_ORDERED_MODE)) {
1070 		ret = ext4_jbd2_file_inode(handle, inode);
1071 		if (ret) {
1072 			unlock_page(page);
1073 			page_cache_release(page);
1074 			goto errout;
1075 		}
1076 	}
1077 
1078 	if (ext4_has_inline_data(inode)) {
1079 		ret = ext4_write_inline_data_end(inode, pos, len,
1080 						 copied, page);
1081 		if (ret < 0)
1082 			goto errout;
1083 		copied = ret;
1084 	} else
1085 		copied = block_write_end(file, mapping, pos,
1086 					 len, copied, page, fsdata);
1087 
1088 	/*
1089 	 * No need to use i_size_read() here, the i_size
1090 	 * cannot change under us because we hole i_mutex.
1091 	 *
1092 	 * But it's important to update i_size while still holding page lock:
1093 	 * page writeout could otherwise come in and zero beyond i_size.
1094 	 */
1095 	if (pos + copied > inode->i_size) {
1096 		i_size_write(inode, pos + copied);
1097 		i_size_changed = 1;
1098 	}
1099 
1100 	if (pos + copied > EXT4_I(inode)->i_disksize) {
1101 		/* We need to mark inode dirty even if
1102 		 * new_i_size is less that inode->i_size
1103 		 * but greater than i_disksize. (hint delalloc)
1104 		 */
1105 		ext4_update_i_disksize(inode, (pos + copied));
1106 		i_size_changed = 1;
1107 	}
1108 	unlock_page(page);
1109 	page_cache_release(page);
1110 
1111 	/*
1112 	 * Don't mark the inode dirty under page lock. First, it unnecessarily
1113 	 * makes the holding time of page lock longer. Second, it forces lock
1114 	 * ordering of page lock and transaction start for journaling
1115 	 * filesystems.
1116 	 */
1117 	if (i_size_changed)
1118 		ext4_mark_inode_dirty(handle, inode);
1119 
1120 	if (pos + len > inode->i_size && ext4_can_truncate(inode))
1121 		/* if we have allocated more blocks and copied
1122 		 * less. We will have blocks allocated outside
1123 		 * inode->i_size. So truncate them
1124 		 */
1125 		ext4_orphan_add(handle, inode);
1126 errout:
1127 	ret2 = ext4_journal_stop(handle);
1128 	if (!ret)
1129 		ret = ret2;
1130 
1131 	if (pos + len > inode->i_size) {
1132 		ext4_truncate_failed_write(inode);
1133 		/*
1134 		 * If truncate failed early the inode might still be
1135 		 * on the orphan list; we need to make sure the inode
1136 		 * is removed from the orphan list in that case.
1137 		 */
1138 		if (inode->i_nlink)
1139 			ext4_orphan_del(NULL, inode);
1140 	}
1141 
1142 	return ret ? ret : copied;
1143 }
1144 
1145 static int ext4_journalled_write_end(struct file *file,
1146 				     struct address_space *mapping,
1147 				     loff_t pos, unsigned len, unsigned copied,
1148 				     struct page *page, void *fsdata)
1149 {
1150 	handle_t *handle = ext4_journal_current_handle();
1151 	struct inode *inode = mapping->host;
1152 	int ret = 0, ret2;
1153 	int partial = 0;
1154 	unsigned from, to;
1155 	loff_t new_i_size;
1156 
1157 	trace_ext4_journalled_write_end(inode, pos, len, copied);
1158 	from = pos & (PAGE_CACHE_SIZE - 1);
1159 	to = from + len;
1160 
1161 	BUG_ON(!ext4_handle_valid(handle));
1162 
1163 	if (ext4_has_inline_data(inode))
1164 		copied = ext4_write_inline_data_end(inode, pos, len,
1165 						    copied, page);
1166 	else {
1167 		if (copied < len) {
1168 			if (!PageUptodate(page))
1169 				copied = 0;
1170 			page_zero_new_buffers(page, from+copied, to);
1171 		}
1172 
1173 		ret = ext4_walk_page_buffers(handle, page_buffers(page), from,
1174 					     to, &partial, write_end_fn);
1175 		if (!partial)
1176 			SetPageUptodate(page);
1177 	}
1178 	new_i_size = pos + copied;
1179 	if (new_i_size > inode->i_size)
1180 		i_size_write(inode, pos+copied);
1181 	ext4_set_inode_state(inode, EXT4_STATE_JDATA);
1182 	EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid;
1183 	if (new_i_size > EXT4_I(inode)->i_disksize) {
1184 		ext4_update_i_disksize(inode, new_i_size);
1185 		ret2 = ext4_mark_inode_dirty(handle, inode);
1186 		if (!ret)
1187 			ret = ret2;
1188 	}
1189 
1190 	unlock_page(page);
1191 	page_cache_release(page);
1192 	if (pos + len > inode->i_size && ext4_can_truncate(inode))
1193 		/* if we have allocated more blocks and copied
1194 		 * less. We will have blocks allocated outside
1195 		 * inode->i_size. So truncate them
1196 		 */
1197 		ext4_orphan_add(handle, inode);
1198 
1199 	ret2 = ext4_journal_stop(handle);
1200 	if (!ret)
1201 		ret = ret2;
1202 	if (pos + len > inode->i_size) {
1203 		ext4_truncate_failed_write(inode);
1204 		/*
1205 		 * If truncate failed early the inode might still be
1206 		 * on the orphan list; we need to make sure the inode
1207 		 * is removed from the orphan list in that case.
1208 		 */
1209 		if (inode->i_nlink)
1210 			ext4_orphan_del(NULL, inode);
1211 	}
1212 
1213 	return ret ? ret : copied;
1214 }
1215 
1216 /*
1217  * Reserve a metadata for a single block located at lblock
1218  */
1219 static int ext4_da_reserve_metadata(struct inode *inode, ext4_lblk_t lblock)
1220 {
1221 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1222 	struct ext4_inode_info *ei = EXT4_I(inode);
1223 	unsigned int md_needed;
1224 	ext4_lblk_t save_last_lblock;
1225 	int save_len;
1226 
1227 	/*
1228 	 * recalculate the amount of metadata blocks to reserve
1229 	 * in order to allocate nrblocks
1230 	 * worse case is one extent per block
1231 	 */
1232 	spin_lock(&ei->i_block_reservation_lock);
1233 	/*
1234 	 * ext4_calc_metadata_amount() has side effects, which we have
1235 	 * to be prepared undo if we fail to claim space.
1236 	 */
1237 	save_len = ei->i_da_metadata_calc_len;
1238 	save_last_lblock = ei->i_da_metadata_calc_last_lblock;
1239 	md_needed = EXT4_NUM_B2C(sbi,
1240 				 ext4_calc_metadata_amount(inode, lblock));
1241 	trace_ext4_da_reserve_space(inode, md_needed);
1242 
1243 	/*
1244 	 * We do still charge estimated metadata to the sb though;
1245 	 * we cannot afford to run out of free blocks.
1246 	 */
1247 	if (ext4_claim_free_clusters(sbi, md_needed, 0)) {
1248 		ei->i_da_metadata_calc_len = save_len;
1249 		ei->i_da_metadata_calc_last_lblock = save_last_lblock;
1250 		spin_unlock(&ei->i_block_reservation_lock);
1251 		return -ENOSPC;
1252 	}
1253 	ei->i_reserved_meta_blocks += md_needed;
1254 	spin_unlock(&ei->i_block_reservation_lock);
1255 
1256 	return 0;       /* success */
1257 }
1258 
1259 /*
1260  * Reserve a single cluster located at lblock
1261  */
1262 static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock)
1263 {
1264 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1265 	struct ext4_inode_info *ei = EXT4_I(inode);
1266 	unsigned int md_needed;
1267 	int ret;
1268 	ext4_lblk_t save_last_lblock;
1269 	int save_len;
1270 
1271 	/*
1272 	 * We will charge metadata quota at writeout time; this saves
1273 	 * us from metadata over-estimation, though we may go over by
1274 	 * a small amount in the end.  Here we just reserve for data.
1275 	 */
1276 	ret = dquot_reserve_block(inode, EXT4_C2B(sbi, 1));
1277 	if (ret)
1278 		return ret;
1279 
1280 	/*
1281 	 * recalculate the amount of metadata blocks to reserve
1282 	 * in order to allocate nrblocks
1283 	 * worse case is one extent per block
1284 	 */
1285 	spin_lock(&ei->i_block_reservation_lock);
1286 	/*
1287 	 * ext4_calc_metadata_amount() has side effects, which we have
1288 	 * to be prepared undo if we fail to claim space.
1289 	 */
1290 	save_len = ei->i_da_metadata_calc_len;
1291 	save_last_lblock = ei->i_da_metadata_calc_last_lblock;
1292 	md_needed = EXT4_NUM_B2C(sbi,
1293 				 ext4_calc_metadata_amount(inode, lblock));
1294 	trace_ext4_da_reserve_space(inode, md_needed);
1295 
1296 	/*
1297 	 * We do still charge estimated metadata to the sb though;
1298 	 * we cannot afford to run out of free blocks.
1299 	 */
1300 	if (ext4_claim_free_clusters(sbi, md_needed + 1, 0)) {
1301 		ei->i_da_metadata_calc_len = save_len;
1302 		ei->i_da_metadata_calc_last_lblock = save_last_lblock;
1303 		spin_unlock(&ei->i_block_reservation_lock);
1304 		dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1));
1305 		return -ENOSPC;
1306 	}
1307 	ei->i_reserved_data_blocks++;
1308 	ei->i_reserved_meta_blocks += md_needed;
1309 	spin_unlock(&ei->i_block_reservation_lock);
1310 
1311 	return 0;       /* success */
1312 }
1313 
1314 static void ext4_da_release_space(struct inode *inode, int to_free)
1315 {
1316 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1317 	struct ext4_inode_info *ei = EXT4_I(inode);
1318 
1319 	if (!to_free)
1320 		return;		/* Nothing to release, exit */
1321 
1322 	spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
1323 
1324 	trace_ext4_da_release_space(inode, to_free);
1325 	if (unlikely(to_free > ei->i_reserved_data_blocks)) {
1326 		/*
1327 		 * if there aren't enough reserved blocks, then the
1328 		 * counter is messed up somewhere.  Since this
1329 		 * function is called from invalidate page, it's
1330 		 * harmless to return without any action.
1331 		 */
1332 		ext4_warning(inode->i_sb, "ext4_da_release_space: "
1333 			 "ino %lu, to_free %d with only %d reserved "
1334 			 "data blocks", inode->i_ino, to_free,
1335 			 ei->i_reserved_data_blocks);
1336 		WARN_ON(1);
1337 		to_free = ei->i_reserved_data_blocks;
1338 	}
1339 	ei->i_reserved_data_blocks -= to_free;
1340 
1341 	if (ei->i_reserved_data_blocks == 0) {
1342 		/*
1343 		 * We can release all of the reserved metadata blocks
1344 		 * only when we have written all of the delayed
1345 		 * allocation blocks.
1346 		 * Note that in case of bigalloc, i_reserved_meta_blocks,
1347 		 * i_reserved_data_blocks, etc. refer to number of clusters.
1348 		 */
1349 		percpu_counter_sub(&sbi->s_dirtyclusters_counter,
1350 				   ei->i_reserved_meta_blocks);
1351 		ei->i_reserved_meta_blocks = 0;
1352 		ei->i_da_metadata_calc_len = 0;
1353 	}
1354 
1355 	/* update fs dirty data blocks counter */
1356 	percpu_counter_sub(&sbi->s_dirtyclusters_counter, to_free);
1357 
1358 	spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
1359 
1360 	dquot_release_reservation_block(inode, EXT4_C2B(sbi, to_free));
1361 }
1362 
1363 static void ext4_da_page_release_reservation(struct page *page,
1364 					     unsigned int offset,
1365 					     unsigned int length)
1366 {
1367 	int to_release = 0;
1368 	struct buffer_head *head, *bh;
1369 	unsigned int curr_off = 0;
1370 	struct inode *inode = page->mapping->host;
1371 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1372 	unsigned int stop = offset + length;
1373 	int num_clusters;
1374 	ext4_fsblk_t lblk;
1375 
1376 	BUG_ON(stop > PAGE_CACHE_SIZE || stop < length);
1377 
1378 	head = page_buffers(page);
1379 	bh = head;
1380 	do {
1381 		unsigned int next_off = curr_off + bh->b_size;
1382 
1383 		if (next_off > stop)
1384 			break;
1385 
1386 		if ((offset <= curr_off) && (buffer_delay(bh))) {
1387 			to_release++;
1388 			clear_buffer_delay(bh);
1389 		}
1390 		curr_off = next_off;
1391 	} while ((bh = bh->b_this_page) != head);
1392 
1393 	if (to_release) {
1394 		lblk = page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1395 		ext4_es_remove_extent(inode, lblk, to_release);
1396 	}
1397 
1398 	/* If we have released all the blocks belonging to a cluster, then we
1399 	 * need to release the reserved space for that cluster. */
1400 	num_clusters = EXT4_NUM_B2C(sbi, to_release);
1401 	while (num_clusters > 0) {
1402 		lblk = (page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits)) +
1403 			((num_clusters - 1) << sbi->s_cluster_bits);
1404 		if (sbi->s_cluster_ratio == 1 ||
1405 		    !ext4_find_delalloc_cluster(inode, lblk))
1406 			ext4_da_release_space(inode, 1);
1407 
1408 		num_clusters--;
1409 	}
1410 }
1411 
1412 /*
1413  * Delayed allocation stuff
1414  */
1415 
1416 struct mpage_da_data {
1417 	struct inode *inode;
1418 	struct writeback_control *wbc;
1419 
1420 	pgoff_t first_page;	/* The first page to write */
1421 	pgoff_t next_page;	/* Current page to examine */
1422 	pgoff_t last_page;	/* Last page to examine */
1423 	/*
1424 	 * Extent to map - this can be after first_page because that can be
1425 	 * fully mapped. We somewhat abuse m_flags to store whether the extent
1426 	 * is delalloc or unwritten.
1427 	 */
1428 	struct ext4_map_blocks map;
1429 	struct ext4_io_submit io_submit;	/* IO submission data */
1430 };
1431 
1432 static void mpage_release_unused_pages(struct mpage_da_data *mpd,
1433 				       bool invalidate)
1434 {
1435 	int nr_pages, i;
1436 	pgoff_t index, end;
1437 	struct pagevec pvec;
1438 	struct inode *inode = mpd->inode;
1439 	struct address_space *mapping = inode->i_mapping;
1440 
1441 	/* This is necessary when next_page == 0. */
1442 	if (mpd->first_page >= mpd->next_page)
1443 		return;
1444 
1445 	index = mpd->first_page;
1446 	end   = mpd->next_page - 1;
1447 	if (invalidate) {
1448 		ext4_lblk_t start, last;
1449 		start = index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1450 		last = end << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1451 		ext4_es_remove_extent(inode, start, last - start + 1);
1452 	}
1453 
1454 	pagevec_init(&pvec, 0);
1455 	while (index <= end) {
1456 		nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE);
1457 		if (nr_pages == 0)
1458 			break;
1459 		for (i = 0; i < nr_pages; i++) {
1460 			struct page *page = pvec.pages[i];
1461 			if (page->index > end)
1462 				break;
1463 			BUG_ON(!PageLocked(page));
1464 			BUG_ON(PageWriteback(page));
1465 			if (invalidate) {
1466 				block_invalidatepage(page, 0, PAGE_CACHE_SIZE);
1467 				ClearPageUptodate(page);
1468 			}
1469 			unlock_page(page);
1470 		}
1471 		index = pvec.pages[nr_pages - 1]->index + 1;
1472 		pagevec_release(&pvec);
1473 	}
1474 }
1475 
1476 static void ext4_print_free_blocks(struct inode *inode)
1477 {
1478 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1479 	struct super_block *sb = inode->i_sb;
1480 	struct ext4_inode_info *ei = EXT4_I(inode);
1481 
1482 	ext4_msg(sb, KERN_CRIT, "Total free blocks count %lld",
1483 	       EXT4_C2B(EXT4_SB(inode->i_sb),
1484 			ext4_count_free_clusters(sb)));
1485 	ext4_msg(sb, KERN_CRIT, "Free/Dirty block details");
1486 	ext4_msg(sb, KERN_CRIT, "free_blocks=%lld",
1487 	       (long long) EXT4_C2B(EXT4_SB(sb),
1488 		percpu_counter_sum(&sbi->s_freeclusters_counter)));
1489 	ext4_msg(sb, KERN_CRIT, "dirty_blocks=%lld",
1490 	       (long long) EXT4_C2B(EXT4_SB(sb),
1491 		percpu_counter_sum(&sbi->s_dirtyclusters_counter)));
1492 	ext4_msg(sb, KERN_CRIT, "Block reservation details");
1493 	ext4_msg(sb, KERN_CRIT, "i_reserved_data_blocks=%u",
1494 		 ei->i_reserved_data_blocks);
1495 	ext4_msg(sb, KERN_CRIT, "i_reserved_meta_blocks=%u",
1496 	       ei->i_reserved_meta_blocks);
1497 	ext4_msg(sb, KERN_CRIT, "i_allocated_meta_blocks=%u",
1498 	       ei->i_allocated_meta_blocks);
1499 	return;
1500 }
1501 
1502 static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh)
1503 {
1504 	return (buffer_delay(bh) || buffer_unwritten(bh)) && buffer_dirty(bh);
1505 }
1506 
1507 /*
1508  * This function is grabs code from the very beginning of
1509  * ext4_map_blocks, but assumes that the caller is from delayed write
1510  * time. This function looks up the requested blocks and sets the
1511  * buffer delay bit under the protection of i_data_sem.
1512  */
1513 static int ext4_da_map_blocks(struct inode *inode, sector_t iblock,
1514 			      struct ext4_map_blocks *map,
1515 			      struct buffer_head *bh)
1516 {
1517 	struct extent_status es;
1518 	int retval;
1519 	sector_t invalid_block = ~((sector_t) 0xffff);
1520 #ifdef ES_AGGRESSIVE_TEST
1521 	struct ext4_map_blocks orig_map;
1522 
1523 	memcpy(&orig_map, map, sizeof(*map));
1524 #endif
1525 
1526 	if (invalid_block < ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es))
1527 		invalid_block = ~0;
1528 
1529 	map->m_flags = 0;
1530 	ext_debug("ext4_da_map_blocks(): inode %lu, max_blocks %u,"
1531 		  "logical block %lu\n", inode->i_ino, map->m_len,
1532 		  (unsigned long) map->m_lblk);
1533 
1534 	/* Lookup extent status tree firstly */
1535 	if (ext4_es_lookup_extent(inode, iblock, &es)) {
1536 		ext4_es_lru_add(inode);
1537 		if (ext4_es_is_hole(&es)) {
1538 			retval = 0;
1539 			down_read((&EXT4_I(inode)->i_data_sem));
1540 			goto add_delayed;
1541 		}
1542 
1543 		/*
1544 		 * Delayed extent could be allocated by fallocate.
1545 		 * So we need to check it.
1546 		 */
1547 		if (ext4_es_is_delayed(&es) && !ext4_es_is_unwritten(&es)) {
1548 			map_bh(bh, inode->i_sb, invalid_block);
1549 			set_buffer_new(bh);
1550 			set_buffer_delay(bh);
1551 			return 0;
1552 		}
1553 
1554 		map->m_pblk = ext4_es_pblock(&es) + iblock - es.es_lblk;
1555 		retval = es.es_len - (iblock - es.es_lblk);
1556 		if (retval > map->m_len)
1557 			retval = map->m_len;
1558 		map->m_len = retval;
1559 		if (ext4_es_is_written(&es))
1560 			map->m_flags |= EXT4_MAP_MAPPED;
1561 		else if (ext4_es_is_unwritten(&es))
1562 			map->m_flags |= EXT4_MAP_UNWRITTEN;
1563 		else
1564 			BUG_ON(1);
1565 
1566 #ifdef ES_AGGRESSIVE_TEST
1567 		ext4_map_blocks_es_recheck(NULL, inode, map, &orig_map, 0);
1568 #endif
1569 		return retval;
1570 	}
1571 
1572 	/*
1573 	 * Try to see if we can get the block without requesting a new
1574 	 * file system block.
1575 	 */
1576 	down_read((&EXT4_I(inode)->i_data_sem));
1577 	if (ext4_has_inline_data(inode)) {
1578 		/*
1579 		 * We will soon create blocks for this page, and let
1580 		 * us pretend as if the blocks aren't allocated yet.
1581 		 * In case of clusters, we have to handle the work
1582 		 * of mapping from cluster so that the reserved space
1583 		 * is calculated properly.
1584 		 */
1585 		if ((EXT4_SB(inode->i_sb)->s_cluster_ratio > 1) &&
1586 		    ext4_find_delalloc_cluster(inode, map->m_lblk))
1587 			map->m_flags |= EXT4_MAP_FROM_CLUSTER;
1588 		retval = 0;
1589 	} else if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
1590 		retval = ext4_ext_map_blocks(NULL, inode, map,
1591 					     EXT4_GET_BLOCKS_NO_PUT_HOLE);
1592 	else
1593 		retval = ext4_ind_map_blocks(NULL, inode, map,
1594 					     EXT4_GET_BLOCKS_NO_PUT_HOLE);
1595 
1596 add_delayed:
1597 	if (retval == 0) {
1598 		int ret;
1599 		/*
1600 		 * XXX: __block_prepare_write() unmaps passed block,
1601 		 * is it OK?
1602 		 */
1603 		/*
1604 		 * If the block was allocated from previously allocated cluster,
1605 		 * then we don't need to reserve it again. However we still need
1606 		 * to reserve metadata for every block we're going to write.
1607 		 */
1608 		if (!(map->m_flags & EXT4_MAP_FROM_CLUSTER)) {
1609 			ret = ext4_da_reserve_space(inode, iblock);
1610 			if (ret) {
1611 				/* not enough space to reserve */
1612 				retval = ret;
1613 				goto out_unlock;
1614 			}
1615 		} else {
1616 			ret = ext4_da_reserve_metadata(inode, iblock);
1617 			if (ret) {
1618 				/* not enough space to reserve */
1619 				retval = ret;
1620 				goto out_unlock;
1621 			}
1622 		}
1623 
1624 		ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
1625 					    ~0, EXTENT_STATUS_DELAYED);
1626 		if (ret) {
1627 			retval = ret;
1628 			goto out_unlock;
1629 		}
1630 
1631 		/* Clear EXT4_MAP_FROM_CLUSTER flag since its purpose is served
1632 		 * and it should not appear on the bh->b_state.
1633 		 */
1634 		map->m_flags &= ~EXT4_MAP_FROM_CLUSTER;
1635 
1636 		map_bh(bh, inode->i_sb, invalid_block);
1637 		set_buffer_new(bh);
1638 		set_buffer_delay(bh);
1639 	} else if (retval > 0) {
1640 		int ret;
1641 		unsigned int status;
1642 
1643 		if (unlikely(retval != map->m_len)) {
1644 			ext4_warning(inode->i_sb,
1645 				     "ES len assertion failed for inode "
1646 				     "%lu: retval %d != map->m_len %d",
1647 				     inode->i_ino, retval, map->m_len);
1648 			WARN_ON(1);
1649 		}
1650 
1651 		status = map->m_flags & EXT4_MAP_UNWRITTEN ?
1652 				EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
1653 		ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
1654 					    map->m_pblk, status);
1655 		if (ret != 0)
1656 			retval = ret;
1657 	}
1658 
1659 out_unlock:
1660 	up_read((&EXT4_I(inode)->i_data_sem));
1661 
1662 	return retval;
1663 }
1664 
1665 /*
1666  * This is a special get_blocks_t callback which is used by
1667  * ext4_da_write_begin().  It will either return mapped block or
1668  * reserve space for a single block.
1669  *
1670  * For delayed buffer_head we have BH_Mapped, BH_New, BH_Delay set.
1671  * We also have b_blocknr = -1 and b_bdev initialized properly
1672  *
1673  * For unwritten buffer_head we have BH_Mapped, BH_New, BH_Unwritten set.
1674  * We also have b_blocknr = physicalblock mapping unwritten extent and b_bdev
1675  * initialized properly.
1676  */
1677 int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
1678 			   struct buffer_head *bh, int create)
1679 {
1680 	struct ext4_map_blocks map;
1681 	int ret = 0;
1682 
1683 	BUG_ON(create == 0);
1684 	BUG_ON(bh->b_size != inode->i_sb->s_blocksize);
1685 
1686 	map.m_lblk = iblock;
1687 	map.m_len = 1;
1688 
1689 	/*
1690 	 * first, we need to know whether the block is allocated already
1691 	 * preallocated blocks are unmapped but should treated
1692 	 * the same as allocated blocks.
1693 	 */
1694 	ret = ext4_da_map_blocks(inode, iblock, &map, bh);
1695 	if (ret <= 0)
1696 		return ret;
1697 
1698 	map_bh(bh, inode->i_sb, map.m_pblk);
1699 	bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags;
1700 
1701 	if (buffer_unwritten(bh)) {
1702 		/* A delayed write to unwritten bh should be marked
1703 		 * new and mapped.  Mapped ensures that we don't do
1704 		 * get_block multiple times when we write to the same
1705 		 * offset and new ensures that we do proper zero out
1706 		 * for partial write.
1707 		 */
1708 		set_buffer_new(bh);
1709 		set_buffer_mapped(bh);
1710 	}
1711 	return 0;
1712 }
1713 
1714 static int bget_one(handle_t *handle, struct buffer_head *bh)
1715 {
1716 	get_bh(bh);
1717 	return 0;
1718 }
1719 
1720 static int bput_one(handle_t *handle, struct buffer_head *bh)
1721 {
1722 	put_bh(bh);
1723 	return 0;
1724 }
1725 
1726 static int __ext4_journalled_writepage(struct page *page,
1727 				       unsigned int len)
1728 {
1729 	struct address_space *mapping = page->mapping;
1730 	struct inode *inode = mapping->host;
1731 	struct buffer_head *page_bufs = NULL;
1732 	handle_t *handle = NULL;
1733 	int ret = 0, err = 0;
1734 	int inline_data = ext4_has_inline_data(inode);
1735 	struct buffer_head *inode_bh = NULL;
1736 
1737 	ClearPageChecked(page);
1738 
1739 	if (inline_data) {
1740 		BUG_ON(page->index != 0);
1741 		BUG_ON(len > ext4_get_max_inline_size(inode));
1742 		inode_bh = ext4_journalled_write_inline_data(inode, len, page);
1743 		if (inode_bh == NULL)
1744 			goto out;
1745 	} else {
1746 		page_bufs = page_buffers(page);
1747 		if (!page_bufs) {
1748 			BUG();
1749 			goto out;
1750 		}
1751 		ext4_walk_page_buffers(handle, page_bufs, 0, len,
1752 				       NULL, bget_one);
1753 	}
1754 	/* As soon as we unlock the page, it can go away, but we have
1755 	 * references to buffers so we are safe */
1756 	unlock_page(page);
1757 
1758 	handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE,
1759 				    ext4_writepage_trans_blocks(inode));
1760 	if (IS_ERR(handle)) {
1761 		ret = PTR_ERR(handle);
1762 		goto out;
1763 	}
1764 
1765 	BUG_ON(!ext4_handle_valid(handle));
1766 
1767 	if (inline_data) {
1768 		ret = ext4_journal_get_write_access(handle, inode_bh);
1769 
1770 		err = ext4_handle_dirty_metadata(handle, inode, inode_bh);
1771 
1772 	} else {
1773 		ret = ext4_walk_page_buffers(handle, page_bufs, 0, len, NULL,
1774 					     do_journal_get_write_access);
1775 
1776 		err = ext4_walk_page_buffers(handle, page_bufs, 0, len, NULL,
1777 					     write_end_fn);
1778 	}
1779 	if (ret == 0)
1780 		ret = err;
1781 	EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid;
1782 	err = ext4_journal_stop(handle);
1783 	if (!ret)
1784 		ret = err;
1785 
1786 	if (!ext4_has_inline_data(inode))
1787 		ext4_walk_page_buffers(NULL, page_bufs, 0, len,
1788 				       NULL, bput_one);
1789 	ext4_set_inode_state(inode, EXT4_STATE_JDATA);
1790 out:
1791 	brelse(inode_bh);
1792 	return ret;
1793 }
1794 
1795 /*
1796  * Note that we don't need to start a transaction unless we're journaling data
1797  * because we should have holes filled from ext4_page_mkwrite(). We even don't
1798  * need to file the inode to the transaction's list in ordered mode because if
1799  * we are writing back data added by write(), the inode is already there and if
1800  * we are writing back data modified via mmap(), no one guarantees in which
1801  * transaction the data will hit the disk. In case we are journaling data, we
1802  * cannot start transaction directly because transaction start ranks above page
1803  * lock so we have to do some magic.
1804  *
1805  * This function can get called via...
1806  *   - ext4_writepages after taking page lock (have journal handle)
1807  *   - journal_submit_inode_data_buffers (no journal handle)
1808  *   - shrink_page_list via the kswapd/direct reclaim (no journal handle)
1809  *   - grab_page_cache when doing write_begin (have journal handle)
1810  *
1811  * We don't do any block allocation in this function. If we have page with
1812  * multiple blocks we need to write those buffer_heads that are mapped. This
1813  * is important for mmaped based write. So if we do with blocksize 1K
1814  * truncate(f, 1024);
1815  * a = mmap(f, 0, 4096);
1816  * a[0] = 'a';
1817  * truncate(f, 4096);
1818  * we have in the page first buffer_head mapped via page_mkwrite call back
1819  * but other buffer_heads would be unmapped but dirty (dirty done via the
1820  * do_wp_page). So writepage should write the first block. If we modify
1821  * the mmap area beyond 1024 we will again get a page_fault and the
1822  * page_mkwrite callback will do the block allocation and mark the
1823  * buffer_heads mapped.
1824  *
1825  * We redirty the page if we have any buffer_heads that is either delay or
1826  * unwritten in the page.
1827  *
1828  * We can get recursively called as show below.
1829  *
1830  *	ext4_writepage() -> kmalloc() -> __alloc_pages() -> page_launder() ->
1831  *		ext4_writepage()
1832  *
1833  * But since we don't do any block allocation we should not deadlock.
1834  * Page also have the dirty flag cleared so we don't get recurive page_lock.
1835  */
1836 static int ext4_writepage(struct page *page,
1837 			  struct writeback_control *wbc)
1838 {
1839 	int ret = 0;
1840 	loff_t size;
1841 	unsigned int len;
1842 	struct buffer_head *page_bufs = NULL;
1843 	struct inode *inode = page->mapping->host;
1844 	struct ext4_io_submit io_submit;
1845 
1846 	trace_ext4_writepage(page);
1847 	size = i_size_read(inode);
1848 	if (page->index == size >> PAGE_CACHE_SHIFT)
1849 		len = size & ~PAGE_CACHE_MASK;
1850 	else
1851 		len = PAGE_CACHE_SIZE;
1852 
1853 	page_bufs = page_buffers(page);
1854 	/*
1855 	 * We cannot do block allocation or other extent handling in this
1856 	 * function. If there are buffers needing that, we have to redirty
1857 	 * the page. But we may reach here when we do a journal commit via
1858 	 * journal_submit_inode_data_buffers() and in that case we must write
1859 	 * allocated buffers to achieve data=ordered mode guarantees.
1860 	 */
1861 	if (ext4_walk_page_buffers(NULL, page_bufs, 0, len, NULL,
1862 				   ext4_bh_delay_or_unwritten)) {
1863 		redirty_page_for_writepage(wbc, page);
1864 		if (current->flags & PF_MEMALLOC) {
1865 			/*
1866 			 * For memory cleaning there's no point in writing only
1867 			 * some buffers. So just bail out. Warn if we came here
1868 			 * from direct reclaim.
1869 			 */
1870 			WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD))
1871 							== PF_MEMALLOC);
1872 			unlock_page(page);
1873 			return 0;
1874 		}
1875 	}
1876 
1877 	if (PageChecked(page) && ext4_should_journal_data(inode))
1878 		/*
1879 		 * It's mmapped pagecache.  Add buffers and journal it.  There
1880 		 * doesn't seem much point in redirtying the page here.
1881 		 */
1882 		return __ext4_journalled_writepage(page, len);
1883 
1884 	ext4_io_submit_init(&io_submit, wbc);
1885 	io_submit.io_end = ext4_init_io_end(inode, GFP_NOFS);
1886 	if (!io_submit.io_end) {
1887 		redirty_page_for_writepage(wbc, page);
1888 		unlock_page(page);
1889 		return -ENOMEM;
1890 	}
1891 	ret = ext4_bio_write_page(&io_submit, page, len, wbc);
1892 	ext4_io_submit(&io_submit);
1893 	/* Drop io_end reference we got from init */
1894 	ext4_put_io_end_defer(io_submit.io_end);
1895 	return ret;
1896 }
1897 
1898 static int mpage_submit_page(struct mpage_da_data *mpd, struct page *page)
1899 {
1900 	int len;
1901 	loff_t size = i_size_read(mpd->inode);
1902 	int err;
1903 
1904 	BUG_ON(page->index != mpd->first_page);
1905 	if (page->index == size >> PAGE_CACHE_SHIFT)
1906 		len = size & ~PAGE_CACHE_MASK;
1907 	else
1908 		len = PAGE_CACHE_SIZE;
1909 	clear_page_dirty_for_io(page);
1910 	err = ext4_bio_write_page(&mpd->io_submit, page, len, mpd->wbc);
1911 	if (!err)
1912 		mpd->wbc->nr_to_write--;
1913 	mpd->first_page++;
1914 
1915 	return err;
1916 }
1917 
1918 #define BH_FLAGS ((1 << BH_Unwritten) | (1 << BH_Delay))
1919 
1920 /*
1921  * mballoc gives us at most this number of blocks...
1922  * XXX: That seems to be only a limitation of ext4_mb_normalize_request().
1923  * The rest of mballoc seems to handle chunks up to full group size.
1924  */
1925 #define MAX_WRITEPAGES_EXTENT_LEN 2048
1926 
1927 /*
1928  * mpage_add_bh_to_extent - try to add bh to extent of blocks to map
1929  *
1930  * @mpd - extent of blocks
1931  * @lblk - logical number of the block in the file
1932  * @bh - buffer head we want to add to the extent
1933  *
1934  * The function is used to collect contig. blocks in the same state. If the
1935  * buffer doesn't require mapping for writeback and we haven't started the
1936  * extent of buffers to map yet, the function returns 'true' immediately - the
1937  * caller can write the buffer right away. Otherwise the function returns true
1938  * if the block has been added to the extent, false if the block couldn't be
1939  * added.
1940  */
1941 static bool mpage_add_bh_to_extent(struct mpage_da_data *mpd, ext4_lblk_t lblk,
1942 				   struct buffer_head *bh)
1943 {
1944 	struct ext4_map_blocks *map = &mpd->map;
1945 
1946 	/* Buffer that doesn't need mapping for writeback? */
1947 	if (!buffer_dirty(bh) || !buffer_mapped(bh) ||
1948 	    (!buffer_delay(bh) && !buffer_unwritten(bh))) {
1949 		/* So far no extent to map => we write the buffer right away */
1950 		if (map->m_len == 0)
1951 			return true;
1952 		return false;
1953 	}
1954 
1955 	/* First block in the extent? */
1956 	if (map->m_len == 0) {
1957 		map->m_lblk = lblk;
1958 		map->m_len = 1;
1959 		map->m_flags = bh->b_state & BH_FLAGS;
1960 		return true;
1961 	}
1962 
1963 	/* Don't go larger than mballoc is willing to allocate */
1964 	if (map->m_len >= MAX_WRITEPAGES_EXTENT_LEN)
1965 		return false;
1966 
1967 	/* Can we merge the block to our big extent? */
1968 	if (lblk == map->m_lblk + map->m_len &&
1969 	    (bh->b_state & BH_FLAGS) == map->m_flags) {
1970 		map->m_len++;
1971 		return true;
1972 	}
1973 	return false;
1974 }
1975 
1976 /*
1977  * mpage_process_page_bufs - submit page buffers for IO or add them to extent
1978  *
1979  * @mpd - extent of blocks for mapping
1980  * @head - the first buffer in the page
1981  * @bh - buffer we should start processing from
1982  * @lblk - logical number of the block in the file corresponding to @bh
1983  *
1984  * Walk through page buffers from @bh upto @head (exclusive) and either submit
1985  * the page for IO if all buffers in this page were mapped and there's no
1986  * accumulated extent of buffers to map or add buffers in the page to the
1987  * extent of buffers to map. The function returns 1 if the caller can continue
1988  * by processing the next page, 0 if it should stop adding buffers to the
1989  * extent to map because we cannot extend it anymore. It can also return value
1990  * < 0 in case of error during IO submission.
1991  */
1992 static int mpage_process_page_bufs(struct mpage_da_data *mpd,
1993 				   struct buffer_head *head,
1994 				   struct buffer_head *bh,
1995 				   ext4_lblk_t lblk)
1996 {
1997 	struct inode *inode = mpd->inode;
1998 	int err;
1999 	ext4_lblk_t blocks = (i_size_read(inode) + (1 << inode->i_blkbits) - 1)
2000 							>> inode->i_blkbits;
2001 
2002 	do {
2003 		BUG_ON(buffer_locked(bh));
2004 
2005 		if (lblk >= blocks || !mpage_add_bh_to_extent(mpd, lblk, bh)) {
2006 			/* Found extent to map? */
2007 			if (mpd->map.m_len)
2008 				return 0;
2009 			/* Everything mapped so far and we hit EOF */
2010 			break;
2011 		}
2012 	} while (lblk++, (bh = bh->b_this_page) != head);
2013 	/* So far everything mapped? Submit the page for IO. */
2014 	if (mpd->map.m_len == 0) {
2015 		err = mpage_submit_page(mpd, head->b_page);
2016 		if (err < 0)
2017 			return err;
2018 	}
2019 	return lblk < blocks;
2020 }
2021 
2022 /*
2023  * mpage_map_buffers - update buffers corresponding to changed extent and
2024  *		       submit fully mapped pages for IO
2025  *
2026  * @mpd - description of extent to map, on return next extent to map
2027  *
2028  * Scan buffers corresponding to changed extent (we expect corresponding pages
2029  * to be already locked) and update buffer state according to new extent state.
2030  * We map delalloc buffers to their physical location, clear unwritten bits,
2031  * and mark buffers as uninit when we perform writes to uninitialized extents
2032  * and do extent conversion after IO is finished. If the last page is not fully
2033  * mapped, we update @map to the next extent in the last page that needs
2034  * mapping. Otherwise we submit the page for IO.
2035  */
2036 static int mpage_map_and_submit_buffers(struct mpage_da_data *mpd)
2037 {
2038 	struct pagevec pvec;
2039 	int nr_pages, i;
2040 	struct inode *inode = mpd->inode;
2041 	struct buffer_head *head, *bh;
2042 	int bpp_bits = PAGE_CACHE_SHIFT - inode->i_blkbits;
2043 	pgoff_t start, end;
2044 	ext4_lblk_t lblk;
2045 	sector_t pblock;
2046 	int err;
2047 
2048 	start = mpd->map.m_lblk >> bpp_bits;
2049 	end = (mpd->map.m_lblk + mpd->map.m_len - 1) >> bpp_bits;
2050 	lblk = start << bpp_bits;
2051 	pblock = mpd->map.m_pblk;
2052 
2053 	pagevec_init(&pvec, 0);
2054 	while (start <= end) {
2055 		nr_pages = pagevec_lookup(&pvec, inode->i_mapping, start,
2056 					  PAGEVEC_SIZE);
2057 		if (nr_pages == 0)
2058 			break;
2059 		for (i = 0; i < nr_pages; i++) {
2060 			struct page *page = pvec.pages[i];
2061 
2062 			if (page->index > end)
2063 				break;
2064 			/* Up to 'end' pages must be contiguous */
2065 			BUG_ON(page->index != start);
2066 			bh = head = page_buffers(page);
2067 			do {
2068 				if (lblk < mpd->map.m_lblk)
2069 					continue;
2070 				if (lblk >= mpd->map.m_lblk + mpd->map.m_len) {
2071 					/*
2072 					 * Buffer after end of mapped extent.
2073 					 * Find next buffer in the page to map.
2074 					 */
2075 					mpd->map.m_len = 0;
2076 					mpd->map.m_flags = 0;
2077 					/*
2078 					 * FIXME: If dioread_nolock supports
2079 					 * blocksize < pagesize, we need to make
2080 					 * sure we add size mapped so far to
2081 					 * io_end->size as the following call
2082 					 * can submit the page for IO.
2083 					 */
2084 					err = mpage_process_page_bufs(mpd, head,
2085 								      bh, lblk);
2086 					pagevec_release(&pvec);
2087 					if (err > 0)
2088 						err = 0;
2089 					return err;
2090 				}
2091 				if (buffer_delay(bh)) {
2092 					clear_buffer_delay(bh);
2093 					bh->b_blocknr = pblock++;
2094 				}
2095 				clear_buffer_unwritten(bh);
2096 			} while (lblk++, (bh = bh->b_this_page) != head);
2097 
2098 			/*
2099 			 * FIXME: This is going to break if dioread_nolock
2100 			 * supports blocksize < pagesize as we will try to
2101 			 * convert potentially unmapped parts of inode.
2102 			 */
2103 			mpd->io_submit.io_end->size += PAGE_CACHE_SIZE;
2104 			/* Page fully mapped - let IO run! */
2105 			err = mpage_submit_page(mpd, page);
2106 			if (err < 0) {
2107 				pagevec_release(&pvec);
2108 				return err;
2109 			}
2110 			start++;
2111 		}
2112 		pagevec_release(&pvec);
2113 	}
2114 	/* Extent fully mapped and matches with page boundary. We are done. */
2115 	mpd->map.m_len = 0;
2116 	mpd->map.m_flags = 0;
2117 	return 0;
2118 }
2119 
2120 static int mpage_map_one_extent(handle_t *handle, struct mpage_da_data *mpd)
2121 {
2122 	struct inode *inode = mpd->inode;
2123 	struct ext4_map_blocks *map = &mpd->map;
2124 	int get_blocks_flags;
2125 	int err;
2126 
2127 	trace_ext4_da_write_pages_extent(inode, map);
2128 	/*
2129 	 * Call ext4_map_blocks() to allocate any delayed allocation blocks, or
2130 	 * to convert an uninitialized extent to be initialized (in the case
2131 	 * where we have written into one or more preallocated blocks).  It is
2132 	 * possible that we're going to need more metadata blocks than
2133 	 * previously reserved. However we must not fail because we're in
2134 	 * writeback and there is nothing we can do about it so it might result
2135 	 * in data loss.  So use reserved blocks to allocate metadata if
2136 	 * possible.
2137 	 *
2138 	 * We pass in the magic EXT4_GET_BLOCKS_DELALLOC_RESERVE if the blocks
2139 	 * in question are delalloc blocks.  This affects functions in many
2140 	 * different parts of the allocation call path.  This flag exists
2141 	 * primarily because we don't want to change *many* call functions, so
2142 	 * ext4_map_blocks() will set the EXT4_STATE_DELALLOC_RESERVED flag
2143 	 * once the inode's allocation semaphore is taken.
2144 	 */
2145 	get_blocks_flags = EXT4_GET_BLOCKS_CREATE |
2146 			   EXT4_GET_BLOCKS_METADATA_NOFAIL;
2147 	if (ext4_should_dioread_nolock(inode))
2148 		get_blocks_flags |= EXT4_GET_BLOCKS_IO_CREATE_EXT;
2149 	if (map->m_flags & (1 << BH_Delay))
2150 		get_blocks_flags |= EXT4_GET_BLOCKS_DELALLOC_RESERVE;
2151 
2152 	err = ext4_map_blocks(handle, inode, map, get_blocks_flags);
2153 	if (err < 0)
2154 		return err;
2155 	if (map->m_flags & EXT4_MAP_UNINIT) {
2156 		if (!mpd->io_submit.io_end->handle &&
2157 		    ext4_handle_valid(handle)) {
2158 			mpd->io_submit.io_end->handle = handle->h_rsv_handle;
2159 			handle->h_rsv_handle = NULL;
2160 		}
2161 		ext4_set_io_unwritten_flag(inode, mpd->io_submit.io_end);
2162 	}
2163 
2164 	BUG_ON(map->m_len == 0);
2165 	if (map->m_flags & EXT4_MAP_NEW) {
2166 		struct block_device *bdev = inode->i_sb->s_bdev;
2167 		int i;
2168 
2169 		for (i = 0; i < map->m_len; i++)
2170 			unmap_underlying_metadata(bdev, map->m_pblk + i);
2171 	}
2172 	return 0;
2173 }
2174 
2175 /*
2176  * mpage_map_and_submit_extent - map extent starting at mpd->lblk of length
2177  *				 mpd->len and submit pages underlying it for IO
2178  *
2179  * @handle - handle for journal operations
2180  * @mpd - extent to map
2181  * @give_up_on_write - we set this to true iff there is a fatal error and there
2182  *                     is no hope of writing the data. The caller should discard
2183  *                     dirty pages to avoid infinite loops.
2184  *
2185  * The function maps extent starting at mpd->lblk of length mpd->len. If it is
2186  * delayed, blocks are allocated, if it is unwritten, we may need to convert
2187  * them to initialized or split the described range from larger unwritten
2188  * extent. Note that we need not map all the described range since allocation
2189  * can return less blocks or the range is covered by more unwritten extents. We
2190  * cannot map more because we are limited by reserved transaction credits. On
2191  * the other hand we always make sure that the last touched page is fully
2192  * mapped so that it can be written out (and thus forward progress is
2193  * guaranteed). After mapping we submit all mapped pages for IO.
2194  */
2195 static int mpage_map_and_submit_extent(handle_t *handle,
2196 				       struct mpage_da_data *mpd,
2197 				       bool *give_up_on_write)
2198 {
2199 	struct inode *inode = mpd->inode;
2200 	struct ext4_map_blocks *map = &mpd->map;
2201 	int err;
2202 	loff_t disksize;
2203 
2204 	mpd->io_submit.io_end->offset =
2205 				((loff_t)map->m_lblk) << inode->i_blkbits;
2206 	do {
2207 		err = mpage_map_one_extent(handle, mpd);
2208 		if (err < 0) {
2209 			struct super_block *sb = inode->i_sb;
2210 
2211 			if (EXT4_SB(sb)->s_mount_flags & EXT4_MF_FS_ABORTED)
2212 				goto invalidate_dirty_pages;
2213 			/*
2214 			 * Let the uper layers retry transient errors.
2215 			 * In the case of ENOSPC, if ext4_count_free_blocks()
2216 			 * is non-zero, a commit should free up blocks.
2217 			 */
2218 			if ((err == -ENOMEM) ||
2219 			    (err == -ENOSPC && ext4_count_free_clusters(sb)))
2220 				return err;
2221 			ext4_msg(sb, KERN_CRIT,
2222 				 "Delayed block allocation failed for "
2223 				 "inode %lu at logical offset %llu with"
2224 				 " max blocks %u with error %d",
2225 				 inode->i_ino,
2226 				 (unsigned long long)map->m_lblk,
2227 				 (unsigned)map->m_len, -err);
2228 			ext4_msg(sb, KERN_CRIT,
2229 				 "This should not happen!! Data will "
2230 				 "be lost\n");
2231 			if (err == -ENOSPC)
2232 				ext4_print_free_blocks(inode);
2233 		invalidate_dirty_pages:
2234 			*give_up_on_write = true;
2235 			return err;
2236 		}
2237 		/*
2238 		 * Update buffer state, submit mapped pages, and get us new
2239 		 * extent to map
2240 		 */
2241 		err = mpage_map_and_submit_buffers(mpd);
2242 		if (err < 0)
2243 			return err;
2244 	} while (map->m_len);
2245 
2246 	/* Update on-disk size after IO is submitted */
2247 	disksize = ((loff_t)mpd->first_page) << PAGE_CACHE_SHIFT;
2248 	if (disksize > EXT4_I(inode)->i_disksize) {
2249 		int err2;
2250 
2251 		ext4_wb_update_i_disksize(inode, disksize);
2252 		err2 = ext4_mark_inode_dirty(handle, inode);
2253 		if (err2)
2254 			ext4_error(inode->i_sb,
2255 				   "Failed to mark inode %lu dirty",
2256 				   inode->i_ino);
2257 		if (!err)
2258 			err = err2;
2259 	}
2260 	return err;
2261 }
2262 
2263 /*
2264  * Calculate the total number of credits to reserve for one writepages
2265  * iteration. This is called from ext4_writepages(). We map an extent of
2266  * up to MAX_WRITEPAGES_EXTENT_LEN blocks and then we go on and finish mapping
2267  * the last partial page. So in total we can map MAX_WRITEPAGES_EXTENT_LEN +
2268  * bpp - 1 blocks in bpp different extents.
2269  */
2270 static int ext4_da_writepages_trans_blocks(struct inode *inode)
2271 {
2272 	int bpp = ext4_journal_blocks_per_page(inode);
2273 
2274 	return ext4_meta_trans_blocks(inode,
2275 				MAX_WRITEPAGES_EXTENT_LEN + bpp - 1, bpp);
2276 }
2277 
2278 /*
2279  * mpage_prepare_extent_to_map - find & lock contiguous range of dirty pages
2280  * 				 and underlying extent to map
2281  *
2282  * @mpd - where to look for pages
2283  *
2284  * Walk dirty pages in the mapping. If they are fully mapped, submit them for
2285  * IO immediately. When we find a page which isn't mapped we start accumulating
2286  * extent of buffers underlying these pages that needs mapping (formed by
2287  * either delayed or unwritten buffers). We also lock the pages containing
2288  * these buffers. The extent found is returned in @mpd structure (starting at
2289  * mpd->lblk with length mpd->len blocks).
2290  *
2291  * Note that this function can attach bios to one io_end structure which are
2292  * neither logically nor physically contiguous. Although it may seem as an
2293  * unnecessary complication, it is actually inevitable in blocksize < pagesize
2294  * case as we need to track IO to all buffers underlying a page in one io_end.
2295  */
2296 static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd)
2297 {
2298 	struct address_space *mapping = mpd->inode->i_mapping;
2299 	struct pagevec pvec;
2300 	unsigned int nr_pages;
2301 	long left = mpd->wbc->nr_to_write;
2302 	pgoff_t index = mpd->first_page;
2303 	pgoff_t end = mpd->last_page;
2304 	int tag;
2305 	int i, err = 0;
2306 	int blkbits = mpd->inode->i_blkbits;
2307 	ext4_lblk_t lblk;
2308 	struct buffer_head *head;
2309 
2310 	if (mpd->wbc->sync_mode == WB_SYNC_ALL || mpd->wbc->tagged_writepages)
2311 		tag = PAGECACHE_TAG_TOWRITE;
2312 	else
2313 		tag = PAGECACHE_TAG_DIRTY;
2314 
2315 	pagevec_init(&pvec, 0);
2316 	mpd->map.m_len = 0;
2317 	mpd->next_page = index;
2318 	while (index <= end) {
2319 		nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
2320 			      min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
2321 		if (nr_pages == 0)
2322 			goto out;
2323 
2324 		for (i = 0; i < nr_pages; i++) {
2325 			struct page *page = pvec.pages[i];
2326 
2327 			/*
2328 			 * At this point, the page may be truncated or
2329 			 * invalidated (changing page->mapping to NULL), or
2330 			 * even swizzled back from swapper_space to tmpfs file
2331 			 * mapping. However, page->index will not change
2332 			 * because we have a reference on the page.
2333 			 */
2334 			if (page->index > end)
2335 				goto out;
2336 
2337 			/*
2338 			 * Accumulated enough dirty pages? This doesn't apply
2339 			 * to WB_SYNC_ALL mode. For integrity sync we have to
2340 			 * keep going because someone may be concurrently
2341 			 * dirtying pages, and we might have synced a lot of
2342 			 * newly appeared dirty pages, but have not synced all
2343 			 * of the old dirty pages.
2344 			 */
2345 			if (mpd->wbc->sync_mode == WB_SYNC_NONE && left <= 0)
2346 				goto out;
2347 
2348 			/* If we can't merge this page, we are done. */
2349 			if (mpd->map.m_len > 0 && mpd->next_page != page->index)
2350 				goto out;
2351 
2352 			lock_page(page);
2353 			/*
2354 			 * If the page is no longer dirty, or its mapping no
2355 			 * longer corresponds to inode we are writing (which
2356 			 * means it has been truncated or invalidated), or the
2357 			 * page is already under writeback and we are not doing
2358 			 * a data integrity writeback, skip the page
2359 			 */
2360 			if (!PageDirty(page) ||
2361 			    (PageWriteback(page) &&
2362 			     (mpd->wbc->sync_mode == WB_SYNC_NONE)) ||
2363 			    unlikely(page->mapping != mapping)) {
2364 				unlock_page(page);
2365 				continue;
2366 			}
2367 
2368 			wait_on_page_writeback(page);
2369 			BUG_ON(PageWriteback(page));
2370 
2371 			if (mpd->map.m_len == 0)
2372 				mpd->first_page = page->index;
2373 			mpd->next_page = page->index + 1;
2374 			/* Add all dirty buffers to mpd */
2375 			lblk = ((ext4_lblk_t)page->index) <<
2376 				(PAGE_CACHE_SHIFT - blkbits);
2377 			head = page_buffers(page);
2378 			err = mpage_process_page_bufs(mpd, head, head, lblk);
2379 			if (err <= 0)
2380 				goto out;
2381 			err = 0;
2382 			left--;
2383 		}
2384 		pagevec_release(&pvec);
2385 		cond_resched();
2386 	}
2387 	return 0;
2388 out:
2389 	pagevec_release(&pvec);
2390 	return err;
2391 }
2392 
2393 static int __writepage(struct page *page, struct writeback_control *wbc,
2394 		       void *data)
2395 {
2396 	struct address_space *mapping = data;
2397 	int ret = ext4_writepage(page, wbc);
2398 	mapping_set_error(mapping, ret);
2399 	return ret;
2400 }
2401 
2402 static int ext4_writepages(struct address_space *mapping,
2403 			   struct writeback_control *wbc)
2404 {
2405 	pgoff_t	writeback_index = 0;
2406 	long nr_to_write = wbc->nr_to_write;
2407 	int range_whole = 0;
2408 	int cycled = 1;
2409 	handle_t *handle = NULL;
2410 	struct mpage_da_data mpd;
2411 	struct inode *inode = mapping->host;
2412 	int needed_blocks, rsv_blocks = 0, ret = 0;
2413 	struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb);
2414 	bool done;
2415 	struct blk_plug plug;
2416 	bool give_up_on_write = false;
2417 
2418 	trace_ext4_writepages(inode, wbc);
2419 
2420 	/*
2421 	 * No pages to write? This is mainly a kludge to avoid starting
2422 	 * a transaction for special inodes like journal inode on last iput()
2423 	 * because that could violate lock ordering on umount
2424 	 */
2425 	if (!mapping->nrpages || !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
2426 		goto out_writepages;
2427 
2428 	if (ext4_should_journal_data(inode)) {
2429 		struct blk_plug plug;
2430 
2431 		blk_start_plug(&plug);
2432 		ret = write_cache_pages(mapping, wbc, __writepage, mapping);
2433 		blk_finish_plug(&plug);
2434 		goto out_writepages;
2435 	}
2436 
2437 	/*
2438 	 * If the filesystem has aborted, it is read-only, so return
2439 	 * right away instead of dumping stack traces later on that
2440 	 * will obscure the real source of the problem.  We test
2441 	 * EXT4_MF_FS_ABORTED instead of sb->s_flag's MS_RDONLY because
2442 	 * the latter could be true if the filesystem is mounted
2443 	 * read-only, and in that case, ext4_writepages should
2444 	 * *never* be called, so if that ever happens, we would want
2445 	 * the stack trace.
2446 	 */
2447 	if (unlikely(sbi->s_mount_flags & EXT4_MF_FS_ABORTED)) {
2448 		ret = -EROFS;
2449 		goto out_writepages;
2450 	}
2451 
2452 	if (ext4_should_dioread_nolock(inode)) {
2453 		/*
2454 		 * We may need to convert up to one extent per block in
2455 		 * the page and we may dirty the inode.
2456 		 */
2457 		rsv_blocks = 1 + (PAGE_CACHE_SIZE >> inode->i_blkbits);
2458 	}
2459 
2460 	/*
2461 	 * If we have inline data and arrive here, it means that
2462 	 * we will soon create the block for the 1st page, so
2463 	 * we'd better clear the inline data here.
2464 	 */
2465 	if (ext4_has_inline_data(inode)) {
2466 		/* Just inode will be modified... */
2467 		handle = ext4_journal_start(inode, EXT4_HT_INODE, 1);
2468 		if (IS_ERR(handle)) {
2469 			ret = PTR_ERR(handle);
2470 			goto out_writepages;
2471 		}
2472 		BUG_ON(ext4_test_inode_state(inode,
2473 				EXT4_STATE_MAY_INLINE_DATA));
2474 		ext4_destroy_inline_data(handle, inode);
2475 		ext4_journal_stop(handle);
2476 	}
2477 
2478 	if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2479 		range_whole = 1;
2480 
2481 	if (wbc->range_cyclic) {
2482 		writeback_index = mapping->writeback_index;
2483 		if (writeback_index)
2484 			cycled = 0;
2485 		mpd.first_page = writeback_index;
2486 		mpd.last_page = -1;
2487 	} else {
2488 		mpd.first_page = wbc->range_start >> PAGE_CACHE_SHIFT;
2489 		mpd.last_page = wbc->range_end >> PAGE_CACHE_SHIFT;
2490 	}
2491 
2492 	mpd.inode = inode;
2493 	mpd.wbc = wbc;
2494 	ext4_io_submit_init(&mpd.io_submit, wbc);
2495 retry:
2496 	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
2497 		tag_pages_for_writeback(mapping, mpd.first_page, mpd.last_page);
2498 	done = false;
2499 	blk_start_plug(&plug);
2500 	while (!done && mpd.first_page <= mpd.last_page) {
2501 		/* For each extent of pages we use new io_end */
2502 		mpd.io_submit.io_end = ext4_init_io_end(inode, GFP_KERNEL);
2503 		if (!mpd.io_submit.io_end) {
2504 			ret = -ENOMEM;
2505 			break;
2506 		}
2507 
2508 		/*
2509 		 * We have two constraints: We find one extent to map and we
2510 		 * must always write out whole page (makes a difference when
2511 		 * blocksize < pagesize) so that we don't block on IO when we
2512 		 * try to write out the rest of the page. Journalled mode is
2513 		 * not supported by delalloc.
2514 		 */
2515 		BUG_ON(ext4_should_journal_data(inode));
2516 		needed_blocks = ext4_da_writepages_trans_blocks(inode);
2517 
2518 		/* start a new transaction */
2519 		handle = ext4_journal_start_with_reserve(inode,
2520 				EXT4_HT_WRITE_PAGE, needed_blocks, rsv_blocks);
2521 		if (IS_ERR(handle)) {
2522 			ret = PTR_ERR(handle);
2523 			ext4_msg(inode->i_sb, KERN_CRIT, "%s: jbd2_start: "
2524 			       "%ld pages, ino %lu; err %d", __func__,
2525 				wbc->nr_to_write, inode->i_ino, ret);
2526 			/* Release allocated io_end */
2527 			ext4_put_io_end(mpd.io_submit.io_end);
2528 			break;
2529 		}
2530 
2531 		trace_ext4_da_write_pages(inode, mpd.first_page, mpd.wbc);
2532 		ret = mpage_prepare_extent_to_map(&mpd);
2533 		if (!ret) {
2534 			if (mpd.map.m_len)
2535 				ret = mpage_map_and_submit_extent(handle, &mpd,
2536 					&give_up_on_write);
2537 			else {
2538 				/*
2539 				 * We scanned the whole range (or exhausted
2540 				 * nr_to_write), submitted what was mapped and
2541 				 * didn't find anything needing mapping. We are
2542 				 * done.
2543 				 */
2544 				done = true;
2545 			}
2546 		}
2547 		ext4_journal_stop(handle);
2548 		/* Submit prepared bio */
2549 		ext4_io_submit(&mpd.io_submit);
2550 		/* Unlock pages we didn't use */
2551 		mpage_release_unused_pages(&mpd, give_up_on_write);
2552 		/* Drop our io_end reference we got from init */
2553 		ext4_put_io_end(mpd.io_submit.io_end);
2554 
2555 		if (ret == -ENOSPC && sbi->s_journal) {
2556 			/*
2557 			 * Commit the transaction which would
2558 			 * free blocks released in the transaction
2559 			 * and try again
2560 			 */
2561 			jbd2_journal_force_commit_nested(sbi->s_journal);
2562 			ret = 0;
2563 			continue;
2564 		}
2565 		/* Fatal error - ENOMEM, EIO... */
2566 		if (ret)
2567 			break;
2568 	}
2569 	blk_finish_plug(&plug);
2570 	if (!ret && !cycled && wbc->nr_to_write > 0) {
2571 		cycled = 1;
2572 		mpd.last_page = writeback_index - 1;
2573 		mpd.first_page = 0;
2574 		goto retry;
2575 	}
2576 
2577 	/* Update index */
2578 	if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
2579 		/*
2580 		 * Set the writeback_index so that range_cyclic
2581 		 * mode will write it back later
2582 		 */
2583 		mapping->writeback_index = mpd.first_page;
2584 
2585 out_writepages:
2586 	trace_ext4_writepages_result(inode, wbc, ret,
2587 				     nr_to_write - wbc->nr_to_write);
2588 	return ret;
2589 }
2590 
2591 static int ext4_nonda_switch(struct super_block *sb)
2592 {
2593 	s64 free_clusters, dirty_clusters;
2594 	struct ext4_sb_info *sbi = EXT4_SB(sb);
2595 
2596 	/*
2597 	 * switch to non delalloc mode if we are running low
2598 	 * on free block. The free block accounting via percpu
2599 	 * counters can get slightly wrong with percpu_counter_batch getting
2600 	 * accumulated on each CPU without updating global counters
2601 	 * Delalloc need an accurate free block accounting. So switch
2602 	 * to non delalloc when we are near to error range.
2603 	 */
2604 	free_clusters =
2605 		percpu_counter_read_positive(&sbi->s_freeclusters_counter);
2606 	dirty_clusters =
2607 		percpu_counter_read_positive(&sbi->s_dirtyclusters_counter);
2608 	/*
2609 	 * Start pushing delalloc when 1/2 of free blocks are dirty.
2610 	 */
2611 	if (dirty_clusters && (free_clusters < 2 * dirty_clusters))
2612 		try_to_writeback_inodes_sb(sb, WB_REASON_FS_FREE_SPACE);
2613 
2614 	if (2 * free_clusters < 3 * dirty_clusters ||
2615 	    free_clusters < (dirty_clusters + EXT4_FREECLUSTERS_WATERMARK)) {
2616 		/*
2617 		 * free block count is less than 150% of dirty blocks
2618 		 * or free blocks is less than watermark
2619 		 */
2620 		return 1;
2621 	}
2622 	return 0;
2623 }
2624 
2625 static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
2626 			       loff_t pos, unsigned len, unsigned flags,
2627 			       struct page **pagep, void **fsdata)
2628 {
2629 	int ret, retries = 0;
2630 	struct page *page;
2631 	pgoff_t index;
2632 	struct inode *inode = mapping->host;
2633 	handle_t *handle;
2634 
2635 	index = pos >> PAGE_CACHE_SHIFT;
2636 
2637 	if (ext4_nonda_switch(inode->i_sb)) {
2638 		*fsdata = (void *)FALL_BACK_TO_NONDELALLOC;
2639 		return ext4_write_begin(file, mapping, pos,
2640 					len, flags, pagep, fsdata);
2641 	}
2642 	*fsdata = (void *)0;
2643 	trace_ext4_da_write_begin(inode, pos, len, flags);
2644 
2645 	if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) {
2646 		ret = ext4_da_write_inline_data_begin(mapping, inode,
2647 						      pos, len, flags,
2648 						      pagep, fsdata);
2649 		if (ret < 0)
2650 			return ret;
2651 		if (ret == 1)
2652 			return 0;
2653 	}
2654 
2655 	/*
2656 	 * grab_cache_page_write_begin() can take a long time if the
2657 	 * system is thrashing due to memory pressure, or if the page
2658 	 * is being written back.  So grab it first before we start
2659 	 * the transaction handle.  This also allows us to allocate
2660 	 * the page (if needed) without using GFP_NOFS.
2661 	 */
2662 retry_grab:
2663 	page = grab_cache_page_write_begin(mapping, index, flags);
2664 	if (!page)
2665 		return -ENOMEM;
2666 	unlock_page(page);
2667 
2668 	/*
2669 	 * With delayed allocation, we don't log the i_disksize update
2670 	 * if there is delayed block allocation. But we still need
2671 	 * to journalling the i_disksize update if writes to the end
2672 	 * of file which has an already mapped buffer.
2673 	 */
2674 retry_journal:
2675 	handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, 1);
2676 	if (IS_ERR(handle)) {
2677 		page_cache_release(page);
2678 		return PTR_ERR(handle);
2679 	}
2680 
2681 	lock_page(page);
2682 	if (page->mapping != mapping) {
2683 		/* The page got truncated from under us */
2684 		unlock_page(page);
2685 		page_cache_release(page);
2686 		ext4_journal_stop(handle);
2687 		goto retry_grab;
2688 	}
2689 	/* In case writeback began while the page was unlocked */
2690 	wait_for_stable_page(page);
2691 
2692 	ret = __block_write_begin(page, pos, len, ext4_da_get_block_prep);
2693 	if (ret < 0) {
2694 		unlock_page(page);
2695 		ext4_journal_stop(handle);
2696 		/*
2697 		 * block_write_begin may have instantiated a few blocks
2698 		 * outside i_size.  Trim these off again. Don't need
2699 		 * i_size_read because we hold i_mutex.
2700 		 */
2701 		if (pos + len > inode->i_size)
2702 			ext4_truncate_failed_write(inode);
2703 
2704 		if (ret == -ENOSPC &&
2705 		    ext4_should_retry_alloc(inode->i_sb, &retries))
2706 			goto retry_journal;
2707 
2708 		page_cache_release(page);
2709 		return ret;
2710 	}
2711 
2712 	*pagep = page;
2713 	return ret;
2714 }
2715 
2716 /*
2717  * Check if we should update i_disksize
2718  * when write to the end of file but not require block allocation
2719  */
2720 static int ext4_da_should_update_i_disksize(struct page *page,
2721 					    unsigned long offset)
2722 {
2723 	struct buffer_head *bh;
2724 	struct inode *inode = page->mapping->host;
2725 	unsigned int idx;
2726 	int i;
2727 
2728 	bh = page_buffers(page);
2729 	idx = offset >> inode->i_blkbits;
2730 
2731 	for (i = 0; i < idx; i++)
2732 		bh = bh->b_this_page;
2733 
2734 	if (!buffer_mapped(bh) || (buffer_delay(bh)) || buffer_unwritten(bh))
2735 		return 0;
2736 	return 1;
2737 }
2738 
2739 static int ext4_da_write_end(struct file *file,
2740 			     struct address_space *mapping,
2741 			     loff_t pos, unsigned len, unsigned copied,
2742 			     struct page *page, void *fsdata)
2743 {
2744 	struct inode *inode = mapping->host;
2745 	int ret = 0, ret2;
2746 	handle_t *handle = ext4_journal_current_handle();
2747 	loff_t new_i_size;
2748 	unsigned long start, end;
2749 	int write_mode = (int)(unsigned long)fsdata;
2750 
2751 	if (write_mode == FALL_BACK_TO_NONDELALLOC)
2752 		return ext4_write_end(file, mapping, pos,
2753 				      len, copied, page, fsdata);
2754 
2755 	trace_ext4_da_write_end(inode, pos, len, copied);
2756 	start = pos & (PAGE_CACHE_SIZE - 1);
2757 	end = start + copied - 1;
2758 
2759 	/*
2760 	 * generic_write_end() will run mark_inode_dirty() if i_size
2761 	 * changes.  So let's piggyback the i_disksize mark_inode_dirty
2762 	 * into that.
2763 	 */
2764 	new_i_size = pos + copied;
2765 	if (copied && new_i_size > EXT4_I(inode)->i_disksize) {
2766 		if (ext4_has_inline_data(inode) ||
2767 		    ext4_da_should_update_i_disksize(page, end)) {
2768 			down_write(&EXT4_I(inode)->i_data_sem);
2769 			if (new_i_size > EXT4_I(inode)->i_disksize)
2770 				EXT4_I(inode)->i_disksize = new_i_size;
2771 			up_write(&EXT4_I(inode)->i_data_sem);
2772 			/* We need to mark inode dirty even if
2773 			 * new_i_size is less that inode->i_size
2774 			 * bu greater than i_disksize.(hint delalloc)
2775 			 */
2776 			ext4_mark_inode_dirty(handle, inode);
2777 		}
2778 	}
2779 
2780 	if (write_mode != CONVERT_INLINE_DATA &&
2781 	    ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA) &&
2782 	    ext4_has_inline_data(inode))
2783 		ret2 = ext4_da_write_inline_data_end(inode, pos, len, copied,
2784 						     page);
2785 	else
2786 		ret2 = generic_write_end(file, mapping, pos, len, copied,
2787 							page, fsdata);
2788 
2789 	copied = ret2;
2790 	if (ret2 < 0)
2791 		ret = ret2;
2792 	ret2 = ext4_journal_stop(handle);
2793 	if (!ret)
2794 		ret = ret2;
2795 
2796 	return ret ? ret : copied;
2797 }
2798 
2799 static void ext4_da_invalidatepage(struct page *page, unsigned int offset,
2800 				   unsigned int length)
2801 {
2802 	/*
2803 	 * Drop reserved blocks
2804 	 */
2805 	BUG_ON(!PageLocked(page));
2806 	if (!page_has_buffers(page))
2807 		goto out;
2808 
2809 	ext4_da_page_release_reservation(page, offset, length);
2810 
2811 out:
2812 	ext4_invalidatepage(page, offset, length);
2813 
2814 	return;
2815 }
2816 
2817 /*
2818  * Force all delayed allocation blocks to be allocated for a given inode.
2819  */
2820 int ext4_alloc_da_blocks(struct inode *inode)
2821 {
2822 	trace_ext4_alloc_da_blocks(inode);
2823 
2824 	if (!EXT4_I(inode)->i_reserved_data_blocks &&
2825 	    !EXT4_I(inode)->i_reserved_meta_blocks)
2826 		return 0;
2827 
2828 	/*
2829 	 * We do something simple for now.  The filemap_flush() will
2830 	 * also start triggering a write of the data blocks, which is
2831 	 * not strictly speaking necessary (and for users of
2832 	 * laptop_mode, not even desirable).  However, to do otherwise
2833 	 * would require replicating code paths in:
2834 	 *
2835 	 * ext4_writepages() ->
2836 	 *    write_cache_pages() ---> (via passed in callback function)
2837 	 *        __mpage_da_writepage() -->
2838 	 *           mpage_add_bh_to_extent()
2839 	 *           mpage_da_map_blocks()
2840 	 *
2841 	 * The problem is that write_cache_pages(), located in
2842 	 * mm/page-writeback.c, marks pages clean in preparation for
2843 	 * doing I/O, which is not desirable if we're not planning on
2844 	 * doing I/O at all.
2845 	 *
2846 	 * We could call write_cache_pages(), and then redirty all of
2847 	 * the pages by calling redirty_page_for_writepage() but that
2848 	 * would be ugly in the extreme.  So instead we would need to
2849 	 * replicate parts of the code in the above functions,
2850 	 * simplifying them because we wouldn't actually intend to
2851 	 * write out the pages, but rather only collect contiguous
2852 	 * logical block extents, call the multi-block allocator, and
2853 	 * then update the buffer heads with the block allocations.
2854 	 *
2855 	 * For now, though, we'll cheat by calling filemap_flush(),
2856 	 * which will map the blocks, and start the I/O, but not
2857 	 * actually wait for the I/O to complete.
2858 	 */
2859 	return filemap_flush(inode->i_mapping);
2860 }
2861 
2862 /*
2863  * bmap() is special.  It gets used by applications such as lilo and by
2864  * the swapper to find the on-disk block of a specific piece of data.
2865  *
2866  * Naturally, this is dangerous if the block concerned is still in the
2867  * journal.  If somebody makes a swapfile on an ext4 data-journaling
2868  * filesystem and enables swap, then they may get a nasty shock when the
2869  * data getting swapped to that swapfile suddenly gets overwritten by
2870  * the original zero's written out previously to the journal and
2871  * awaiting writeback in the kernel's buffer cache.
2872  *
2873  * So, if we see any bmap calls here on a modified, data-journaled file,
2874  * take extra steps to flush any blocks which might be in the cache.
2875  */
2876 static sector_t ext4_bmap(struct address_space *mapping, sector_t block)
2877 {
2878 	struct inode *inode = mapping->host;
2879 	journal_t *journal;
2880 	int err;
2881 
2882 	/*
2883 	 * We can get here for an inline file via the FIBMAP ioctl
2884 	 */
2885 	if (ext4_has_inline_data(inode))
2886 		return 0;
2887 
2888 	if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) &&
2889 			test_opt(inode->i_sb, DELALLOC)) {
2890 		/*
2891 		 * With delalloc we want to sync the file
2892 		 * so that we can make sure we allocate
2893 		 * blocks for file
2894 		 */
2895 		filemap_write_and_wait(mapping);
2896 	}
2897 
2898 	if (EXT4_JOURNAL(inode) &&
2899 	    ext4_test_inode_state(inode, EXT4_STATE_JDATA)) {
2900 		/*
2901 		 * This is a REALLY heavyweight approach, but the use of
2902 		 * bmap on dirty files is expected to be extremely rare:
2903 		 * only if we run lilo or swapon on a freshly made file
2904 		 * do we expect this to happen.
2905 		 *
2906 		 * (bmap requires CAP_SYS_RAWIO so this does not
2907 		 * represent an unprivileged user DOS attack --- we'd be
2908 		 * in trouble if mortal users could trigger this path at
2909 		 * will.)
2910 		 *
2911 		 * NB. EXT4_STATE_JDATA is not set on files other than
2912 		 * regular files.  If somebody wants to bmap a directory
2913 		 * or symlink and gets confused because the buffer
2914 		 * hasn't yet been flushed to disk, they deserve
2915 		 * everything they get.
2916 		 */
2917 
2918 		ext4_clear_inode_state(inode, EXT4_STATE_JDATA);
2919 		journal = EXT4_JOURNAL(inode);
2920 		jbd2_journal_lock_updates(journal);
2921 		err = jbd2_journal_flush(journal);
2922 		jbd2_journal_unlock_updates(journal);
2923 
2924 		if (err)
2925 			return 0;
2926 	}
2927 
2928 	return generic_block_bmap(mapping, block, ext4_get_block);
2929 }
2930 
2931 static int ext4_readpage(struct file *file, struct page *page)
2932 {
2933 	int ret = -EAGAIN;
2934 	struct inode *inode = page->mapping->host;
2935 
2936 	trace_ext4_readpage(page);
2937 
2938 	if (ext4_has_inline_data(inode))
2939 		ret = ext4_readpage_inline(inode, page);
2940 
2941 	if (ret == -EAGAIN)
2942 		return mpage_readpage(page, ext4_get_block);
2943 
2944 	return ret;
2945 }
2946 
2947 static int
2948 ext4_readpages(struct file *file, struct address_space *mapping,
2949 		struct list_head *pages, unsigned nr_pages)
2950 {
2951 	struct inode *inode = mapping->host;
2952 
2953 	/* If the file has inline data, no need to do readpages. */
2954 	if (ext4_has_inline_data(inode))
2955 		return 0;
2956 
2957 	return mpage_readpages(mapping, pages, nr_pages, ext4_get_block);
2958 }
2959 
2960 static void ext4_invalidatepage(struct page *page, unsigned int offset,
2961 				unsigned int length)
2962 {
2963 	trace_ext4_invalidatepage(page, offset, length);
2964 
2965 	/* No journalling happens on data buffers when this function is used */
2966 	WARN_ON(page_has_buffers(page) && buffer_jbd(page_buffers(page)));
2967 
2968 	block_invalidatepage(page, offset, length);
2969 }
2970 
2971 static int __ext4_journalled_invalidatepage(struct page *page,
2972 					    unsigned int offset,
2973 					    unsigned int length)
2974 {
2975 	journal_t *journal = EXT4_JOURNAL(page->mapping->host);
2976 
2977 	trace_ext4_journalled_invalidatepage(page, offset, length);
2978 
2979 	/*
2980 	 * If it's a full truncate we just forget about the pending dirtying
2981 	 */
2982 	if (offset == 0 && length == PAGE_CACHE_SIZE)
2983 		ClearPageChecked(page);
2984 
2985 	return jbd2_journal_invalidatepage(journal, page, offset, length);
2986 }
2987 
2988 /* Wrapper for aops... */
2989 static void ext4_journalled_invalidatepage(struct page *page,
2990 					   unsigned int offset,
2991 					   unsigned int length)
2992 {
2993 	WARN_ON(__ext4_journalled_invalidatepage(page, offset, length) < 0);
2994 }
2995 
2996 static int ext4_releasepage(struct page *page, gfp_t wait)
2997 {
2998 	journal_t *journal = EXT4_JOURNAL(page->mapping->host);
2999 
3000 	trace_ext4_releasepage(page);
3001 
3002 	/* Page has dirty journalled data -> cannot release */
3003 	if (PageChecked(page))
3004 		return 0;
3005 	if (journal)
3006 		return jbd2_journal_try_to_free_buffers(journal, page, wait);
3007 	else
3008 		return try_to_free_buffers(page);
3009 }
3010 
3011 /*
3012  * ext4_get_block used when preparing for a DIO write or buffer write.
3013  * We allocate an uinitialized extent if blocks haven't been allocated.
3014  * The extent will be converted to initialized after the IO is complete.
3015  */
3016 int ext4_get_block_write(struct inode *inode, sector_t iblock,
3017 		   struct buffer_head *bh_result, int create)
3018 {
3019 	ext4_debug("ext4_get_block_write: inode %lu, create flag %d\n",
3020 		   inode->i_ino, create);
3021 	return _ext4_get_block(inode, iblock, bh_result,
3022 			       EXT4_GET_BLOCKS_IO_CREATE_EXT);
3023 }
3024 
3025 static int ext4_get_block_write_nolock(struct inode *inode, sector_t iblock,
3026 		   struct buffer_head *bh_result, int create)
3027 {
3028 	ext4_debug("ext4_get_block_write_nolock: inode %lu, create flag %d\n",
3029 		   inode->i_ino, create);
3030 	return _ext4_get_block(inode, iblock, bh_result,
3031 			       EXT4_GET_BLOCKS_NO_LOCK);
3032 }
3033 
3034 static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
3035 			    ssize_t size, void *private)
3036 {
3037         ext4_io_end_t *io_end = iocb->private;
3038 
3039 	/* if not async direct IO just return */
3040 	if (!io_end)
3041 		return;
3042 
3043 	ext_debug("ext4_end_io_dio(): io_end 0x%p "
3044 		  "for inode %lu, iocb 0x%p, offset %llu, size %zd\n",
3045  		  iocb->private, io_end->inode->i_ino, iocb, offset,
3046 		  size);
3047 
3048 	iocb->private = NULL;
3049 	io_end->offset = offset;
3050 	io_end->size = size;
3051 	ext4_put_io_end(io_end);
3052 }
3053 
3054 /*
3055  * For ext4 extent files, ext4 will do direct-io write to holes,
3056  * preallocated extents, and those write extend the file, no need to
3057  * fall back to buffered IO.
3058  *
3059  * For holes, we fallocate those blocks, mark them as uninitialized
3060  * If those blocks were preallocated, we mark sure they are split, but
3061  * still keep the range to write as uninitialized.
3062  *
3063  * The unwritten extents will be converted to written when DIO is completed.
3064  * For async direct IO, since the IO may still pending when return, we
3065  * set up an end_io call back function, which will do the conversion
3066  * when async direct IO completed.
3067  *
3068  * If the O_DIRECT write will extend the file then add this inode to the
3069  * orphan list.  So recovery will truncate it back to the original size
3070  * if the machine crashes during the write.
3071  *
3072  */
3073 static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
3074 			      const struct iovec *iov, loff_t offset,
3075 			      unsigned long nr_segs)
3076 {
3077 	struct file *file = iocb->ki_filp;
3078 	struct inode *inode = file->f_mapping->host;
3079 	ssize_t ret;
3080 	size_t count = iov_length(iov, nr_segs);
3081 	int overwrite = 0;
3082 	get_block_t *get_block_func = NULL;
3083 	int dio_flags = 0;
3084 	loff_t final_size = offset + count;
3085 	ext4_io_end_t *io_end = NULL;
3086 
3087 	/* Use the old path for reads and writes beyond i_size. */
3088 	if (rw != WRITE || final_size > inode->i_size)
3089 		return ext4_ind_direct_IO(rw, iocb, iov, offset, nr_segs);
3090 
3091 	BUG_ON(iocb->private == NULL);
3092 
3093 	/*
3094 	 * Make all waiters for direct IO properly wait also for extent
3095 	 * conversion. This also disallows race between truncate() and
3096 	 * overwrite DIO as i_dio_count needs to be incremented under i_mutex.
3097 	 */
3098 	if (rw == WRITE)
3099 		atomic_inc(&inode->i_dio_count);
3100 
3101 	/* If we do a overwrite dio, i_mutex locking can be released */
3102 	overwrite = *((int *)iocb->private);
3103 
3104 	if (overwrite) {
3105 		down_read(&EXT4_I(inode)->i_data_sem);
3106 		mutex_unlock(&inode->i_mutex);
3107 	}
3108 
3109 	/*
3110 	 * We could direct write to holes and fallocate.
3111 	 *
3112 	 * Allocated blocks to fill the hole are marked as
3113 	 * uninitialized to prevent parallel buffered read to expose
3114 	 * the stale data before DIO complete the data IO.
3115 	 *
3116 	 * As to previously fallocated extents, ext4 get_block will
3117 	 * just simply mark the buffer mapped but still keep the
3118 	 * extents uninitialized.
3119 	 *
3120 	 * For non AIO case, we will convert those unwritten extents
3121 	 * to written after return back from blockdev_direct_IO.
3122 	 *
3123 	 * For async DIO, the conversion needs to be deferred when the
3124 	 * IO is completed. The ext4 end_io callback function will be
3125 	 * called to take care of the conversion work.  Here for async
3126 	 * case, we allocate an io_end structure to hook to the iocb.
3127 	 */
3128 	iocb->private = NULL;
3129 	ext4_inode_aio_set(inode, NULL);
3130 	if (!is_sync_kiocb(iocb)) {
3131 		io_end = ext4_init_io_end(inode, GFP_NOFS);
3132 		if (!io_end) {
3133 			ret = -ENOMEM;
3134 			goto retake_lock;
3135 		}
3136 		/*
3137 		 * Grab reference for DIO. Will be dropped in ext4_end_io_dio()
3138 		 */
3139 		iocb->private = ext4_get_io_end(io_end);
3140 		/*
3141 		 * we save the io structure for current async direct
3142 		 * IO, so that later ext4_map_blocks() could flag the
3143 		 * io structure whether there is a unwritten extents
3144 		 * needs to be converted when IO is completed.
3145 		 */
3146 		ext4_inode_aio_set(inode, io_end);
3147 	}
3148 
3149 	if (overwrite) {
3150 		get_block_func = ext4_get_block_write_nolock;
3151 	} else {
3152 		get_block_func = ext4_get_block_write;
3153 		dio_flags = DIO_LOCKING;
3154 	}
3155 	ret = __blockdev_direct_IO(rw, iocb, inode,
3156 				   inode->i_sb->s_bdev, iov,
3157 				   offset, nr_segs,
3158 				   get_block_func,
3159 				   ext4_end_io_dio,
3160 				   NULL,
3161 				   dio_flags);
3162 
3163 	/*
3164 	 * Put our reference to io_end. This can free the io_end structure e.g.
3165 	 * in sync IO case or in case of error. It can even perform extent
3166 	 * conversion if all bios we submitted finished before we got here.
3167 	 * Note that in that case iocb->private can be already set to NULL
3168 	 * here.
3169 	 */
3170 	if (io_end) {
3171 		ext4_inode_aio_set(inode, NULL);
3172 		ext4_put_io_end(io_end);
3173 		/*
3174 		 * When no IO was submitted ext4_end_io_dio() was not
3175 		 * called so we have to put iocb's reference.
3176 		 */
3177 		if (ret <= 0 && ret != -EIOCBQUEUED && iocb->private) {
3178 			WARN_ON(iocb->private != io_end);
3179 			WARN_ON(io_end->flag & EXT4_IO_END_UNWRITTEN);
3180 			ext4_put_io_end(io_end);
3181 			iocb->private = NULL;
3182 		}
3183 	}
3184 	if (ret > 0 && !overwrite && ext4_test_inode_state(inode,
3185 						EXT4_STATE_DIO_UNWRITTEN)) {
3186 		int err;
3187 		/*
3188 		 * for non AIO case, since the IO is already
3189 		 * completed, we could do the conversion right here
3190 		 */
3191 		err = ext4_convert_unwritten_extents(NULL, inode,
3192 						     offset, ret);
3193 		if (err < 0)
3194 			ret = err;
3195 		ext4_clear_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN);
3196 	}
3197 
3198 retake_lock:
3199 	if (rw == WRITE)
3200 		inode_dio_done(inode);
3201 	/* take i_mutex locking again if we do a ovewrite dio */
3202 	if (overwrite) {
3203 		up_read(&EXT4_I(inode)->i_data_sem);
3204 		mutex_lock(&inode->i_mutex);
3205 	}
3206 
3207 	return ret;
3208 }
3209 
3210 static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb,
3211 			      const struct iovec *iov, loff_t offset,
3212 			      unsigned long nr_segs)
3213 {
3214 	struct file *file = iocb->ki_filp;
3215 	struct inode *inode = file->f_mapping->host;
3216 	ssize_t ret;
3217 
3218 	/*
3219 	 * If we are doing data journalling we don't support O_DIRECT
3220 	 */
3221 	if (ext4_should_journal_data(inode))
3222 		return 0;
3223 
3224 	/* Let buffer I/O handle the inline data case. */
3225 	if (ext4_has_inline_data(inode))
3226 		return 0;
3227 
3228 	trace_ext4_direct_IO_enter(inode, offset, iov_length(iov, nr_segs), rw);
3229 	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
3230 		ret = ext4_ext_direct_IO(rw, iocb, iov, offset, nr_segs);
3231 	else
3232 		ret = ext4_ind_direct_IO(rw, iocb, iov, offset, nr_segs);
3233 	trace_ext4_direct_IO_exit(inode, offset,
3234 				iov_length(iov, nr_segs), rw, ret);
3235 	return ret;
3236 }
3237 
3238 /*
3239  * Pages can be marked dirty completely asynchronously from ext4's journalling
3240  * activity.  By filemap_sync_pte(), try_to_unmap_one(), etc.  We cannot do
3241  * much here because ->set_page_dirty is called under VFS locks.  The page is
3242  * not necessarily locked.
3243  *
3244  * We cannot just dirty the page and leave attached buffers clean, because the
3245  * buffers' dirty state is "definitive".  We cannot just set the buffers dirty
3246  * or jbddirty because all the journalling code will explode.
3247  *
3248  * So what we do is to mark the page "pending dirty" and next time writepage
3249  * is called, propagate that into the buffers appropriately.
3250  */
3251 static int ext4_journalled_set_page_dirty(struct page *page)
3252 {
3253 	SetPageChecked(page);
3254 	return __set_page_dirty_nobuffers(page);
3255 }
3256 
3257 static const struct address_space_operations ext4_aops = {
3258 	.readpage		= ext4_readpage,
3259 	.readpages		= ext4_readpages,
3260 	.writepage		= ext4_writepage,
3261 	.writepages		= ext4_writepages,
3262 	.write_begin		= ext4_write_begin,
3263 	.write_end		= ext4_write_end,
3264 	.bmap			= ext4_bmap,
3265 	.invalidatepage		= ext4_invalidatepage,
3266 	.releasepage		= ext4_releasepage,
3267 	.direct_IO		= ext4_direct_IO,
3268 	.migratepage		= buffer_migrate_page,
3269 	.is_partially_uptodate  = block_is_partially_uptodate,
3270 	.error_remove_page	= generic_error_remove_page,
3271 };
3272 
3273 static const struct address_space_operations ext4_journalled_aops = {
3274 	.readpage		= ext4_readpage,
3275 	.readpages		= ext4_readpages,
3276 	.writepage		= ext4_writepage,
3277 	.writepages		= ext4_writepages,
3278 	.write_begin		= ext4_write_begin,
3279 	.write_end		= ext4_journalled_write_end,
3280 	.set_page_dirty		= ext4_journalled_set_page_dirty,
3281 	.bmap			= ext4_bmap,
3282 	.invalidatepage		= ext4_journalled_invalidatepage,
3283 	.releasepage		= ext4_releasepage,
3284 	.direct_IO		= ext4_direct_IO,
3285 	.is_partially_uptodate  = block_is_partially_uptodate,
3286 	.error_remove_page	= generic_error_remove_page,
3287 };
3288 
3289 static const struct address_space_operations ext4_da_aops = {
3290 	.readpage		= ext4_readpage,
3291 	.readpages		= ext4_readpages,
3292 	.writepage		= ext4_writepage,
3293 	.writepages		= ext4_writepages,
3294 	.write_begin		= ext4_da_write_begin,
3295 	.write_end		= ext4_da_write_end,
3296 	.bmap			= ext4_bmap,
3297 	.invalidatepage		= ext4_da_invalidatepage,
3298 	.releasepage		= ext4_releasepage,
3299 	.direct_IO		= ext4_direct_IO,
3300 	.migratepage		= buffer_migrate_page,
3301 	.is_partially_uptodate  = block_is_partially_uptodate,
3302 	.error_remove_page	= generic_error_remove_page,
3303 };
3304 
3305 void ext4_set_aops(struct inode *inode)
3306 {
3307 	switch (ext4_inode_journal_mode(inode)) {
3308 	case EXT4_INODE_ORDERED_DATA_MODE:
3309 		ext4_set_inode_state(inode, EXT4_STATE_ORDERED_MODE);
3310 		break;
3311 	case EXT4_INODE_WRITEBACK_DATA_MODE:
3312 		ext4_clear_inode_state(inode, EXT4_STATE_ORDERED_MODE);
3313 		break;
3314 	case EXT4_INODE_JOURNAL_DATA_MODE:
3315 		inode->i_mapping->a_ops = &ext4_journalled_aops;
3316 		return;
3317 	default:
3318 		BUG();
3319 	}
3320 	if (test_opt(inode->i_sb, DELALLOC))
3321 		inode->i_mapping->a_ops = &ext4_da_aops;
3322 	else
3323 		inode->i_mapping->a_ops = &ext4_aops;
3324 }
3325 
3326 /*
3327  * ext4_block_zero_page_range() zeros out a mapping of length 'length'
3328  * starting from file offset 'from'.  The range to be zero'd must
3329  * be contained with in one block.  If the specified range exceeds
3330  * the end of the block it will be shortened to end of the block
3331  * that cooresponds to 'from'
3332  */
3333 static int ext4_block_zero_page_range(handle_t *handle,
3334 		struct address_space *mapping, loff_t from, loff_t length)
3335 {
3336 	ext4_fsblk_t index = from >> PAGE_CACHE_SHIFT;
3337 	unsigned offset = from & (PAGE_CACHE_SIZE-1);
3338 	unsigned blocksize, max, pos;
3339 	ext4_lblk_t iblock;
3340 	struct inode *inode = mapping->host;
3341 	struct buffer_head *bh;
3342 	struct page *page;
3343 	int err = 0;
3344 
3345 	page = find_or_create_page(mapping, from >> PAGE_CACHE_SHIFT,
3346 				   mapping_gfp_mask(mapping) & ~__GFP_FS);
3347 	if (!page)
3348 		return -ENOMEM;
3349 
3350 	blocksize = inode->i_sb->s_blocksize;
3351 	max = blocksize - (offset & (blocksize - 1));
3352 
3353 	/*
3354 	 * correct length if it does not fall between
3355 	 * 'from' and the end of the block
3356 	 */
3357 	if (length > max || length < 0)
3358 		length = max;
3359 
3360 	iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
3361 
3362 	if (!page_has_buffers(page))
3363 		create_empty_buffers(page, blocksize, 0);
3364 
3365 	/* Find the buffer that contains "offset" */
3366 	bh = page_buffers(page);
3367 	pos = blocksize;
3368 	while (offset >= pos) {
3369 		bh = bh->b_this_page;
3370 		iblock++;
3371 		pos += blocksize;
3372 	}
3373 	if (buffer_freed(bh)) {
3374 		BUFFER_TRACE(bh, "freed: skip");
3375 		goto unlock;
3376 	}
3377 	if (!buffer_mapped(bh)) {
3378 		BUFFER_TRACE(bh, "unmapped");
3379 		ext4_get_block(inode, iblock, bh, 0);
3380 		/* unmapped? It's a hole - nothing to do */
3381 		if (!buffer_mapped(bh)) {
3382 			BUFFER_TRACE(bh, "still unmapped");
3383 			goto unlock;
3384 		}
3385 	}
3386 
3387 	/* Ok, it's mapped. Make sure it's up-to-date */
3388 	if (PageUptodate(page))
3389 		set_buffer_uptodate(bh);
3390 
3391 	if (!buffer_uptodate(bh)) {
3392 		err = -EIO;
3393 		ll_rw_block(READ, 1, &bh);
3394 		wait_on_buffer(bh);
3395 		/* Uhhuh. Read error. Complain and punt. */
3396 		if (!buffer_uptodate(bh))
3397 			goto unlock;
3398 	}
3399 	if (ext4_should_journal_data(inode)) {
3400 		BUFFER_TRACE(bh, "get write access");
3401 		err = ext4_journal_get_write_access(handle, bh);
3402 		if (err)
3403 			goto unlock;
3404 	}
3405 	zero_user(page, offset, length);
3406 	BUFFER_TRACE(bh, "zeroed end of block");
3407 
3408 	if (ext4_should_journal_data(inode)) {
3409 		err = ext4_handle_dirty_metadata(handle, inode, bh);
3410 	} else {
3411 		err = 0;
3412 		mark_buffer_dirty(bh);
3413 		if (ext4_test_inode_state(inode, EXT4_STATE_ORDERED_MODE))
3414 			err = ext4_jbd2_file_inode(handle, inode);
3415 	}
3416 
3417 unlock:
3418 	unlock_page(page);
3419 	page_cache_release(page);
3420 	return err;
3421 }
3422 
3423 /*
3424  * ext4_block_truncate_page() zeroes out a mapping from file offset `from'
3425  * up to the end of the block which corresponds to `from'.
3426  * This required during truncate. We need to physically zero the tail end
3427  * of that block so it doesn't yield old data if the file is later grown.
3428  */
3429 int ext4_block_truncate_page(handle_t *handle,
3430 		struct address_space *mapping, loff_t from)
3431 {
3432 	unsigned offset = from & (PAGE_CACHE_SIZE-1);
3433 	unsigned length;
3434 	unsigned blocksize;
3435 	struct inode *inode = mapping->host;
3436 
3437 	blocksize = inode->i_sb->s_blocksize;
3438 	length = blocksize - (offset & (blocksize - 1));
3439 
3440 	return ext4_block_zero_page_range(handle, mapping, from, length);
3441 }
3442 
3443 int ext4_zero_partial_blocks(handle_t *handle, struct inode *inode,
3444 			     loff_t lstart, loff_t length)
3445 {
3446 	struct super_block *sb = inode->i_sb;
3447 	struct address_space *mapping = inode->i_mapping;
3448 	unsigned partial_start, partial_end;
3449 	ext4_fsblk_t start, end;
3450 	loff_t byte_end = (lstart + length - 1);
3451 	int err = 0;
3452 
3453 	partial_start = lstart & (sb->s_blocksize - 1);
3454 	partial_end = byte_end & (sb->s_blocksize - 1);
3455 
3456 	start = lstart >> sb->s_blocksize_bits;
3457 	end = byte_end >> sb->s_blocksize_bits;
3458 
3459 	/* Handle partial zero within the single block */
3460 	if (start == end &&
3461 	    (partial_start || (partial_end != sb->s_blocksize - 1))) {
3462 		err = ext4_block_zero_page_range(handle, mapping,
3463 						 lstart, length);
3464 		return err;
3465 	}
3466 	/* Handle partial zero out on the start of the range */
3467 	if (partial_start) {
3468 		err = ext4_block_zero_page_range(handle, mapping,
3469 						 lstart, sb->s_blocksize);
3470 		if (err)
3471 			return err;
3472 	}
3473 	/* Handle partial zero out on the end of the range */
3474 	if (partial_end != sb->s_blocksize - 1)
3475 		err = ext4_block_zero_page_range(handle, mapping,
3476 						 byte_end - partial_end,
3477 						 partial_end + 1);
3478 	return err;
3479 }
3480 
3481 int ext4_can_truncate(struct inode *inode)
3482 {
3483 	if (S_ISREG(inode->i_mode))
3484 		return 1;
3485 	if (S_ISDIR(inode->i_mode))
3486 		return 1;
3487 	if (S_ISLNK(inode->i_mode))
3488 		return !ext4_inode_is_fast_symlink(inode);
3489 	return 0;
3490 }
3491 
3492 /*
3493  * ext4_punch_hole: punches a hole in a file by releaseing the blocks
3494  * associated with the given offset and length
3495  *
3496  * @inode:  File inode
3497  * @offset: The offset where the hole will begin
3498  * @len:    The length of the hole
3499  *
3500  * Returns: 0 on success or negative on failure
3501  */
3502 
3503 int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
3504 {
3505 	struct super_block *sb = inode->i_sb;
3506 	ext4_lblk_t first_block, stop_block;
3507 	struct address_space *mapping = inode->i_mapping;
3508 	loff_t first_block_offset, last_block_offset;
3509 	handle_t *handle;
3510 	unsigned int credits;
3511 	int ret = 0;
3512 
3513 	if (!S_ISREG(inode->i_mode))
3514 		return -EOPNOTSUPP;
3515 
3516 	trace_ext4_punch_hole(inode, offset, length, 0);
3517 
3518 	/*
3519 	 * Write out all dirty pages to avoid race conditions
3520 	 * Then release them.
3521 	 */
3522 	if (mapping->nrpages && mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
3523 		ret = filemap_write_and_wait_range(mapping, offset,
3524 						   offset + length - 1);
3525 		if (ret)
3526 			return ret;
3527 	}
3528 
3529 	mutex_lock(&inode->i_mutex);
3530 	/* It's not possible punch hole on append only file */
3531 	if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) {
3532 		ret = -EPERM;
3533 		goto out_mutex;
3534 	}
3535 	if (IS_SWAPFILE(inode)) {
3536 		ret = -ETXTBSY;
3537 		goto out_mutex;
3538 	}
3539 
3540 	/* No need to punch hole beyond i_size */
3541 	if (offset >= inode->i_size)
3542 		goto out_mutex;
3543 
3544 	/*
3545 	 * If the hole extends beyond i_size, set the hole
3546 	 * to end after the page that contains i_size
3547 	 */
3548 	if (offset + length > inode->i_size) {
3549 		length = inode->i_size +
3550 		   PAGE_CACHE_SIZE - (inode->i_size & (PAGE_CACHE_SIZE - 1)) -
3551 		   offset;
3552 	}
3553 
3554 	if (offset & (sb->s_blocksize - 1) ||
3555 	    (offset + length) & (sb->s_blocksize - 1)) {
3556 		/*
3557 		 * Attach jinode to inode for jbd2 if we do any zeroing of
3558 		 * partial block
3559 		 */
3560 		ret = ext4_inode_attach_jinode(inode);
3561 		if (ret < 0)
3562 			goto out_mutex;
3563 
3564 	}
3565 
3566 	first_block_offset = round_up(offset, sb->s_blocksize);
3567 	last_block_offset = round_down((offset + length), sb->s_blocksize) - 1;
3568 
3569 	/* Now release the pages and zero block aligned part of pages*/
3570 	if (last_block_offset > first_block_offset)
3571 		truncate_pagecache_range(inode, first_block_offset,
3572 					 last_block_offset);
3573 
3574 	/* Wait all existing dio workers, newcomers will block on i_mutex */
3575 	ext4_inode_block_unlocked_dio(inode);
3576 	inode_dio_wait(inode);
3577 
3578 	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
3579 		credits = ext4_writepage_trans_blocks(inode);
3580 	else
3581 		credits = ext4_blocks_for_truncate(inode);
3582 	handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
3583 	if (IS_ERR(handle)) {
3584 		ret = PTR_ERR(handle);
3585 		ext4_std_error(sb, ret);
3586 		goto out_dio;
3587 	}
3588 
3589 	ret = ext4_zero_partial_blocks(handle, inode, offset,
3590 				       length);
3591 	if (ret)
3592 		goto out_stop;
3593 
3594 	first_block = (offset + sb->s_blocksize - 1) >>
3595 		EXT4_BLOCK_SIZE_BITS(sb);
3596 	stop_block = (offset + length) >> EXT4_BLOCK_SIZE_BITS(sb);
3597 
3598 	/* If there are no blocks to remove, return now */
3599 	if (first_block >= stop_block)
3600 		goto out_stop;
3601 
3602 	down_write(&EXT4_I(inode)->i_data_sem);
3603 	ext4_discard_preallocations(inode);
3604 
3605 	ret = ext4_es_remove_extent(inode, first_block,
3606 				    stop_block - first_block);
3607 	if (ret) {
3608 		up_write(&EXT4_I(inode)->i_data_sem);
3609 		goto out_stop;
3610 	}
3611 
3612 	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
3613 		ret = ext4_ext_remove_space(inode, first_block,
3614 					    stop_block - 1);
3615 	else
3616 		ret = ext4_free_hole_blocks(handle, inode, first_block,
3617 					    stop_block);
3618 
3619 	ext4_discard_preallocations(inode);
3620 	up_write(&EXT4_I(inode)->i_data_sem);
3621 	if (IS_SYNC(inode))
3622 		ext4_handle_sync(handle);
3623 
3624 	/* Now release the pages again to reduce race window */
3625 	if (last_block_offset > first_block_offset)
3626 		truncate_pagecache_range(inode, first_block_offset,
3627 					 last_block_offset);
3628 
3629 	inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
3630 	ext4_mark_inode_dirty(handle, inode);
3631 out_stop:
3632 	ext4_journal_stop(handle);
3633 out_dio:
3634 	ext4_inode_resume_unlocked_dio(inode);
3635 out_mutex:
3636 	mutex_unlock(&inode->i_mutex);
3637 	return ret;
3638 }
3639 
3640 int ext4_inode_attach_jinode(struct inode *inode)
3641 {
3642 	struct ext4_inode_info *ei = EXT4_I(inode);
3643 	struct jbd2_inode *jinode;
3644 
3645 	if (ei->jinode || !EXT4_SB(inode->i_sb)->s_journal)
3646 		return 0;
3647 
3648 	jinode = jbd2_alloc_inode(GFP_KERNEL);
3649 	spin_lock(&inode->i_lock);
3650 	if (!ei->jinode) {
3651 		if (!jinode) {
3652 			spin_unlock(&inode->i_lock);
3653 			return -ENOMEM;
3654 		}
3655 		ei->jinode = jinode;
3656 		jbd2_journal_init_jbd_inode(ei->jinode, inode);
3657 		jinode = NULL;
3658 	}
3659 	spin_unlock(&inode->i_lock);
3660 	if (unlikely(jinode != NULL))
3661 		jbd2_free_inode(jinode);
3662 	return 0;
3663 }
3664 
3665 /*
3666  * ext4_truncate()
3667  *
3668  * We block out ext4_get_block() block instantiations across the entire
3669  * transaction, and VFS/VM ensures that ext4_truncate() cannot run
3670  * simultaneously on behalf of the same inode.
3671  *
3672  * As we work through the truncate and commit bits of it to the journal there
3673  * is one core, guiding principle: the file's tree must always be consistent on
3674  * disk.  We must be able to restart the truncate after a crash.
3675  *
3676  * The file's tree may be transiently inconsistent in memory (although it
3677  * probably isn't), but whenever we close off and commit a journal transaction,
3678  * the contents of (the filesystem + the journal) must be consistent and
3679  * restartable.  It's pretty simple, really: bottom up, right to left (although
3680  * left-to-right works OK too).
3681  *
3682  * Note that at recovery time, journal replay occurs *before* the restart of
3683  * truncate against the orphan inode list.
3684  *
3685  * The committed inode has the new, desired i_size (which is the same as
3686  * i_disksize in this case).  After a crash, ext4_orphan_cleanup() will see
3687  * that this inode's truncate did not complete and it will again call
3688  * ext4_truncate() to have another go.  So there will be instantiated blocks
3689  * to the right of the truncation point in a crashed ext4 filesystem.  But
3690  * that's fine - as long as they are linked from the inode, the post-crash
3691  * ext4_truncate() run will find them and release them.
3692  */
3693 void ext4_truncate(struct inode *inode)
3694 {
3695 	struct ext4_inode_info *ei = EXT4_I(inode);
3696 	unsigned int credits;
3697 	handle_t *handle;
3698 	struct address_space *mapping = inode->i_mapping;
3699 
3700 	/*
3701 	 * There is a possibility that we're either freeing the inode
3702 	 * or it's a completely new inode. In those cases we might not
3703 	 * have i_mutex locked because it's not necessary.
3704 	 */
3705 	if (!(inode->i_state & (I_NEW|I_FREEING)))
3706 		WARN_ON(!mutex_is_locked(&inode->i_mutex));
3707 	trace_ext4_truncate_enter(inode);
3708 
3709 	if (!ext4_can_truncate(inode))
3710 		return;
3711 
3712 	ext4_clear_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
3713 
3714 	if (inode->i_size == 0 && !test_opt(inode->i_sb, NO_AUTO_DA_ALLOC))
3715 		ext4_set_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
3716 
3717 	if (ext4_has_inline_data(inode)) {
3718 		int has_inline = 1;
3719 
3720 		ext4_inline_data_truncate(inode, &has_inline);
3721 		if (has_inline)
3722 			return;
3723 	}
3724 
3725 	/* If we zero-out tail of the page, we have to create jinode for jbd2 */
3726 	if (inode->i_size & (inode->i_sb->s_blocksize - 1)) {
3727 		if (ext4_inode_attach_jinode(inode) < 0)
3728 			return;
3729 	}
3730 
3731 	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
3732 		credits = ext4_writepage_trans_blocks(inode);
3733 	else
3734 		credits = ext4_blocks_for_truncate(inode);
3735 
3736 	handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
3737 	if (IS_ERR(handle)) {
3738 		ext4_std_error(inode->i_sb, PTR_ERR(handle));
3739 		return;
3740 	}
3741 
3742 	if (inode->i_size & (inode->i_sb->s_blocksize - 1))
3743 		ext4_block_truncate_page(handle, mapping, inode->i_size);
3744 
3745 	/*
3746 	 * We add the inode to the orphan list, so that if this
3747 	 * truncate spans multiple transactions, and we crash, we will
3748 	 * resume the truncate when the filesystem recovers.  It also
3749 	 * marks the inode dirty, to catch the new size.
3750 	 *
3751 	 * Implication: the file must always be in a sane, consistent
3752 	 * truncatable state while each transaction commits.
3753 	 */
3754 	if (ext4_orphan_add(handle, inode))
3755 		goto out_stop;
3756 
3757 	down_write(&EXT4_I(inode)->i_data_sem);
3758 
3759 	ext4_discard_preallocations(inode);
3760 
3761 	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
3762 		ext4_ext_truncate(handle, inode);
3763 	else
3764 		ext4_ind_truncate(handle, inode);
3765 
3766 	up_write(&ei->i_data_sem);
3767 
3768 	if (IS_SYNC(inode))
3769 		ext4_handle_sync(handle);
3770 
3771 out_stop:
3772 	/*
3773 	 * If this was a simple ftruncate() and the file will remain alive,
3774 	 * then we need to clear up the orphan record which we created above.
3775 	 * However, if this was a real unlink then we were called by
3776 	 * ext4_delete_inode(), and we allow that function to clean up the
3777 	 * orphan info for us.
3778 	 */
3779 	if (inode->i_nlink)
3780 		ext4_orphan_del(handle, inode);
3781 
3782 	inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
3783 	ext4_mark_inode_dirty(handle, inode);
3784 	ext4_journal_stop(handle);
3785 
3786 	trace_ext4_truncate_exit(inode);
3787 }
3788 
3789 /*
3790  * ext4_get_inode_loc returns with an extra refcount against the inode's
3791  * underlying buffer_head on success. If 'in_mem' is true, we have all
3792  * data in memory that is needed to recreate the on-disk version of this
3793  * inode.
3794  */
3795 static int __ext4_get_inode_loc(struct inode *inode,
3796 				struct ext4_iloc *iloc, int in_mem)
3797 {
3798 	struct ext4_group_desc	*gdp;
3799 	struct buffer_head	*bh;
3800 	struct super_block	*sb = inode->i_sb;
3801 	ext4_fsblk_t		block;
3802 	int			inodes_per_block, inode_offset;
3803 
3804 	iloc->bh = NULL;
3805 	if (!ext4_valid_inum(sb, inode->i_ino))
3806 		return -EIO;
3807 
3808 	iloc->block_group = (inode->i_ino - 1) / EXT4_INODES_PER_GROUP(sb);
3809 	gdp = ext4_get_group_desc(sb, iloc->block_group, NULL);
3810 	if (!gdp)
3811 		return -EIO;
3812 
3813 	/*
3814 	 * Figure out the offset within the block group inode table
3815 	 */
3816 	inodes_per_block = EXT4_SB(sb)->s_inodes_per_block;
3817 	inode_offset = ((inode->i_ino - 1) %
3818 			EXT4_INODES_PER_GROUP(sb));
3819 	block = ext4_inode_table(sb, gdp) + (inode_offset / inodes_per_block);
3820 	iloc->offset = (inode_offset % inodes_per_block) * EXT4_INODE_SIZE(sb);
3821 
3822 	bh = sb_getblk(sb, block);
3823 	if (unlikely(!bh))
3824 		return -ENOMEM;
3825 	if (!buffer_uptodate(bh)) {
3826 		lock_buffer(bh);
3827 
3828 		/*
3829 		 * If the buffer has the write error flag, we have failed
3830 		 * to write out another inode in the same block.  In this
3831 		 * case, we don't have to read the block because we may
3832 		 * read the old inode data successfully.
3833 		 */
3834 		if (buffer_write_io_error(bh) && !buffer_uptodate(bh))
3835 			set_buffer_uptodate(bh);
3836 
3837 		if (buffer_uptodate(bh)) {
3838 			/* someone brought it uptodate while we waited */
3839 			unlock_buffer(bh);
3840 			goto has_buffer;
3841 		}
3842 
3843 		/*
3844 		 * If we have all information of the inode in memory and this
3845 		 * is the only valid inode in the block, we need not read the
3846 		 * block.
3847 		 */
3848 		if (in_mem) {
3849 			struct buffer_head *bitmap_bh;
3850 			int i, start;
3851 
3852 			start = inode_offset & ~(inodes_per_block - 1);
3853 
3854 			/* Is the inode bitmap in cache? */
3855 			bitmap_bh = sb_getblk(sb, ext4_inode_bitmap(sb, gdp));
3856 			if (unlikely(!bitmap_bh))
3857 				goto make_io;
3858 
3859 			/*
3860 			 * If the inode bitmap isn't in cache then the
3861 			 * optimisation may end up performing two reads instead
3862 			 * of one, so skip it.
3863 			 */
3864 			if (!buffer_uptodate(bitmap_bh)) {
3865 				brelse(bitmap_bh);
3866 				goto make_io;
3867 			}
3868 			for (i = start; i < start + inodes_per_block; i++) {
3869 				if (i == inode_offset)
3870 					continue;
3871 				if (ext4_test_bit(i, bitmap_bh->b_data))
3872 					break;
3873 			}
3874 			brelse(bitmap_bh);
3875 			if (i == start + inodes_per_block) {
3876 				/* all other inodes are free, so skip I/O */
3877 				memset(bh->b_data, 0, bh->b_size);
3878 				set_buffer_uptodate(bh);
3879 				unlock_buffer(bh);
3880 				goto has_buffer;
3881 			}
3882 		}
3883 
3884 make_io:
3885 		/*
3886 		 * If we need to do any I/O, try to pre-readahead extra
3887 		 * blocks from the inode table.
3888 		 */
3889 		if (EXT4_SB(sb)->s_inode_readahead_blks) {
3890 			ext4_fsblk_t b, end, table;
3891 			unsigned num;
3892 			__u32 ra_blks = EXT4_SB(sb)->s_inode_readahead_blks;
3893 
3894 			table = ext4_inode_table(sb, gdp);
3895 			/* s_inode_readahead_blks is always a power of 2 */
3896 			b = block & ~((ext4_fsblk_t) ra_blks - 1);
3897 			if (table > b)
3898 				b = table;
3899 			end = b + ra_blks;
3900 			num = EXT4_INODES_PER_GROUP(sb);
3901 			if (ext4_has_group_desc_csum(sb))
3902 				num -= ext4_itable_unused_count(sb, gdp);
3903 			table += num / inodes_per_block;
3904 			if (end > table)
3905 				end = table;
3906 			while (b <= end)
3907 				sb_breadahead(sb, b++);
3908 		}
3909 
3910 		/*
3911 		 * There are other valid inodes in the buffer, this inode
3912 		 * has in-inode xattrs, or we don't have this inode in memory.
3913 		 * Read the block from disk.
3914 		 */
3915 		trace_ext4_load_inode(inode);
3916 		get_bh(bh);
3917 		bh->b_end_io = end_buffer_read_sync;
3918 		submit_bh(READ | REQ_META | REQ_PRIO, bh);
3919 		wait_on_buffer(bh);
3920 		if (!buffer_uptodate(bh)) {
3921 			EXT4_ERROR_INODE_BLOCK(inode, block,
3922 					       "unable to read itable block");
3923 			brelse(bh);
3924 			return -EIO;
3925 		}
3926 	}
3927 has_buffer:
3928 	iloc->bh = bh;
3929 	return 0;
3930 }
3931 
3932 int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc)
3933 {
3934 	/* We have all inode data except xattrs in memory here. */
3935 	return __ext4_get_inode_loc(inode, iloc,
3936 		!ext4_test_inode_state(inode, EXT4_STATE_XATTR));
3937 }
3938 
3939 void ext4_set_inode_flags(struct inode *inode)
3940 {
3941 	unsigned int flags = EXT4_I(inode)->i_flags;
3942 	unsigned int new_fl = 0;
3943 
3944 	if (flags & EXT4_SYNC_FL)
3945 		new_fl |= S_SYNC;
3946 	if (flags & EXT4_APPEND_FL)
3947 		new_fl |= S_APPEND;
3948 	if (flags & EXT4_IMMUTABLE_FL)
3949 		new_fl |= S_IMMUTABLE;
3950 	if (flags & EXT4_NOATIME_FL)
3951 		new_fl |= S_NOATIME;
3952 	if (flags & EXT4_DIRSYNC_FL)
3953 		new_fl |= S_DIRSYNC;
3954 	inode_set_flags(inode, new_fl,
3955 			S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
3956 }
3957 
3958 /* Propagate flags from i_flags to EXT4_I(inode)->i_flags */
3959 void ext4_get_inode_flags(struct ext4_inode_info *ei)
3960 {
3961 	unsigned int vfs_fl;
3962 	unsigned long old_fl, new_fl;
3963 
3964 	do {
3965 		vfs_fl = ei->vfs_inode.i_flags;
3966 		old_fl = ei->i_flags;
3967 		new_fl = old_fl & ~(EXT4_SYNC_FL|EXT4_APPEND_FL|
3968 				EXT4_IMMUTABLE_FL|EXT4_NOATIME_FL|
3969 				EXT4_DIRSYNC_FL);
3970 		if (vfs_fl & S_SYNC)
3971 			new_fl |= EXT4_SYNC_FL;
3972 		if (vfs_fl & S_APPEND)
3973 			new_fl |= EXT4_APPEND_FL;
3974 		if (vfs_fl & S_IMMUTABLE)
3975 			new_fl |= EXT4_IMMUTABLE_FL;
3976 		if (vfs_fl & S_NOATIME)
3977 			new_fl |= EXT4_NOATIME_FL;
3978 		if (vfs_fl & S_DIRSYNC)
3979 			new_fl |= EXT4_DIRSYNC_FL;
3980 	} while (cmpxchg(&ei->i_flags, old_fl, new_fl) != old_fl);
3981 }
3982 
3983 static blkcnt_t ext4_inode_blocks(struct ext4_inode *raw_inode,
3984 				  struct ext4_inode_info *ei)
3985 {
3986 	blkcnt_t i_blocks ;
3987 	struct inode *inode = &(ei->vfs_inode);
3988 	struct super_block *sb = inode->i_sb;
3989 
3990 	if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
3991 				EXT4_FEATURE_RO_COMPAT_HUGE_FILE)) {
3992 		/* we are using combined 48 bit field */
3993 		i_blocks = ((u64)le16_to_cpu(raw_inode->i_blocks_high)) << 32 |
3994 					le32_to_cpu(raw_inode->i_blocks_lo);
3995 		if (ext4_test_inode_flag(inode, EXT4_INODE_HUGE_FILE)) {
3996 			/* i_blocks represent file system block size */
3997 			return i_blocks  << (inode->i_blkbits - 9);
3998 		} else {
3999 			return i_blocks;
4000 		}
4001 	} else {
4002 		return le32_to_cpu(raw_inode->i_blocks_lo);
4003 	}
4004 }
4005 
4006 static inline void ext4_iget_extra_inode(struct inode *inode,
4007 					 struct ext4_inode *raw_inode,
4008 					 struct ext4_inode_info *ei)
4009 {
4010 	__le32 *magic = (void *)raw_inode +
4011 			EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize;
4012 	if (*magic == cpu_to_le32(EXT4_XATTR_MAGIC)) {
4013 		ext4_set_inode_state(inode, EXT4_STATE_XATTR);
4014 		ext4_find_inline_data_nolock(inode);
4015 	} else
4016 		EXT4_I(inode)->i_inline_off = 0;
4017 }
4018 
4019 struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
4020 {
4021 	struct ext4_iloc iloc;
4022 	struct ext4_inode *raw_inode;
4023 	struct ext4_inode_info *ei;
4024 	struct inode *inode;
4025 	journal_t *journal = EXT4_SB(sb)->s_journal;
4026 	long ret;
4027 	int block;
4028 	uid_t i_uid;
4029 	gid_t i_gid;
4030 
4031 	inode = iget_locked(sb, ino);
4032 	if (!inode)
4033 		return ERR_PTR(-ENOMEM);
4034 	if (!(inode->i_state & I_NEW))
4035 		return inode;
4036 
4037 	ei = EXT4_I(inode);
4038 	iloc.bh = NULL;
4039 
4040 	ret = __ext4_get_inode_loc(inode, &iloc, 0);
4041 	if (ret < 0)
4042 		goto bad_inode;
4043 	raw_inode = ext4_raw_inode(&iloc);
4044 
4045 	if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
4046 		ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize);
4047 		if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize >
4048 		    EXT4_INODE_SIZE(inode->i_sb)) {
4049 			EXT4_ERROR_INODE(inode, "bad extra_isize (%u != %u)",
4050 				EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize,
4051 				EXT4_INODE_SIZE(inode->i_sb));
4052 			ret = -EIO;
4053 			goto bad_inode;
4054 		}
4055 	} else
4056 		ei->i_extra_isize = 0;
4057 
4058 	/* Precompute checksum seed for inode metadata */
4059 	if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
4060 			EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) {
4061 		struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
4062 		__u32 csum;
4063 		__le32 inum = cpu_to_le32(inode->i_ino);
4064 		__le32 gen = raw_inode->i_generation;
4065 		csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&inum,
4066 				   sizeof(inum));
4067 		ei->i_csum_seed = ext4_chksum(sbi, csum, (__u8 *)&gen,
4068 					      sizeof(gen));
4069 	}
4070 
4071 	if (!ext4_inode_csum_verify(inode, raw_inode, ei)) {
4072 		EXT4_ERROR_INODE(inode, "checksum invalid");
4073 		ret = -EIO;
4074 		goto bad_inode;
4075 	}
4076 
4077 	inode->i_mode = le16_to_cpu(raw_inode->i_mode);
4078 	i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
4079 	i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
4080 	if (!(test_opt(inode->i_sb, NO_UID32))) {
4081 		i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
4082 		i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
4083 	}
4084 	i_uid_write(inode, i_uid);
4085 	i_gid_write(inode, i_gid);
4086 	set_nlink(inode, le16_to_cpu(raw_inode->i_links_count));
4087 
4088 	ext4_clear_state_flags(ei);	/* Only relevant on 32-bit archs */
4089 	ei->i_inline_off = 0;
4090 	ei->i_dir_start_lookup = 0;
4091 	ei->i_dtime = le32_to_cpu(raw_inode->i_dtime);
4092 	/* We now have enough fields to check if the inode was active or not.
4093 	 * This is needed because nfsd might try to access dead inodes
4094 	 * the test is that same one that e2fsck uses
4095 	 * NeilBrown 1999oct15
4096 	 */
4097 	if (inode->i_nlink == 0) {
4098 		if ((inode->i_mode == 0 ||
4099 		     !(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS)) &&
4100 		    ino != EXT4_BOOT_LOADER_INO) {
4101 			/* this inode is deleted */
4102 			ret = -ESTALE;
4103 			goto bad_inode;
4104 		}
4105 		/* The only unlinked inodes we let through here have
4106 		 * valid i_mode and are being read by the orphan
4107 		 * recovery code: that's fine, we're about to complete
4108 		 * the process of deleting those.
4109 		 * OR it is the EXT4_BOOT_LOADER_INO which is
4110 		 * not initialized on a new filesystem. */
4111 	}
4112 	ei->i_flags = le32_to_cpu(raw_inode->i_flags);
4113 	inode->i_blocks = ext4_inode_blocks(raw_inode, ei);
4114 	ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl_lo);
4115 	if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_64BIT))
4116 		ei->i_file_acl |=
4117 			((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32;
4118 	inode->i_size = ext4_isize(raw_inode);
4119 	ei->i_disksize = inode->i_size;
4120 #ifdef CONFIG_QUOTA
4121 	ei->i_reserved_quota = 0;
4122 #endif
4123 	inode->i_generation = le32_to_cpu(raw_inode->i_generation);
4124 	ei->i_block_group = iloc.block_group;
4125 	ei->i_last_alloc_group = ~0;
4126 	/*
4127 	 * NOTE! The in-memory inode i_data array is in little-endian order
4128 	 * even on big-endian machines: we do NOT byteswap the block numbers!
4129 	 */
4130 	for (block = 0; block < EXT4_N_BLOCKS; block++)
4131 		ei->i_data[block] = raw_inode->i_block[block];
4132 	INIT_LIST_HEAD(&ei->i_orphan);
4133 
4134 	/*
4135 	 * Set transaction id's of transactions that have to be committed
4136 	 * to finish f[data]sync. We set them to currently running transaction
4137 	 * as we cannot be sure that the inode or some of its metadata isn't
4138 	 * part of the transaction - the inode could have been reclaimed and
4139 	 * now it is reread from disk.
4140 	 */
4141 	if (journal) {
4142 		transaction_t *transaction;
4143 		tid_t tid;
4144 
4145 		read_lock(&journal->j_state_lock);
4146 		if (journal->j_running_transaction)
4147 			transaction = journal->j_running_transaction;
4148 		else
4149 			transaction = journal->j_committing_transaction;
4150 		if (transaction)
4151 			tid = transaction->t_tid;
4152 		else
4153 			tid = journal->j_commit_sequence;
4154 		read_unlock(&journal->j_state_lock);
4155 		ei->i_sync_tid = tid;
4156 		ei->i_datasync_tid = tid;
4157 	}
4158 
4159 	if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
4160 		if (ei->i_extra_isize == 0) {
4161 			/* The extra space is currently unused. Use it. */
4162 			ei->i_extra_isize = sizeof(struct ext4_inode) -
4163 					    EXT4_GOOD_OLD_INODE_SIZE;
4164 		} else {
4165 			ext4_iget_extra_inode(inode, raw_inode, ei);
4166 		}
4167 	}
4168 
4169 	EXT4_INODE_GET_XTIME(i_ctime, inode, raw_inode);
4170 	EXT4_INODE_GET_XTIME(i_mtime, inode, raw_inode);
4171 	EXT4_INODE_GET_XTIME(i_atime, inode, raw_inode);
4172 	EXT4_EINODE_GET_XTIME(i_crtime, ei, raw_inode);
4173 
4174 	if (likely(!test_opt2(inode->i_sb, HURD_COMPAT))) {
4175 		inode->i_version = le32_to_cpu(raw_inode->i_disk_version);
4176 		if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
4177 			if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
4178 				inode->i_version |=
4179 		    (__u64)(le32_to_cpu(raw_inode->i_version_hi)) << 32;
4180 		}
4181 	}
4182 
4183 	ret = 0;
4184 	if (ei->i_file_acl &&
4185 	    !ext4_data_block_valid(EXT4_SB(sb), ei->i_file_acl, 1)) {
4186 		EXT4_ERROR_INODE(inode, "bad extended attribute block %llu",
4187 				 ei->i_file_acl);
4188 		ret = -EIO;
4189 		goto bad_inode;
4190 	} else if (!ext4_has_inline_data(inode)) {
4191 		if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
4192 			if ((S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
4193 			    (S_ISLNK(inode->i_mode) &&
4194 			     !ext4_inode_is_fast_symlink(inode))))
4195 				/* Validate extent which is part of inode */
4196 				ret = ext4_ext_check_inode(inode);
4197 		} else if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
4198 			   (S_ISLNK(inode->i_mode) &&
4199 			    !ext4_inode_is_fast_symlink(inode))) {
4200 			/* Validate block references which are part of inode */
4201 			ret = ext4_ind_check_inode(inode);
4202 		}
4203 	}
4204 	if (ret)
4205 		goto bad_inode;
4206 
4207 	if (S_ISREG(inode->i_mode)) {
4208 		inode->i_op = &ext4_file_inode_operations;
4209 		inode->i_fop = &ext4_file_operations;
4210 		ext4_set_aops(inode);
4211 	} else if (S_ISDIR(inode->i_mode)) {
4212 		inode->i_op = &ext4_dir_inode_operations;
4213 		inode->i_fop = &ext4_dir_operations;
4214 	} else if (S_ISLNK(inode->i_mode)) {
4215 		if (ext4_inode_is_fast_symlink(inode)) {
4216 			inode->i_op = &ext4_fast_symlink_inode_operations;
4217 			nd_terminate_link(ei->i_data, inode->i_size,
4218 				sizeof(ei->i_data) - 1);
4219 		} else {
4220 			inode->i_op = &ext4_symlink_inode_operations;
4221 			ext4_set_aops(inode);
4222 		}
4223 	} else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
4224 	      S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
4225 		inode->i_op = &ext4_special_inode_operations;
4226 		if (raw_inode->i_block[0])
4227 			init_special_inode(inode, inode->i_mode,
4228 			   old_decode_dev(le32_to_cpu(raw_inode->i_block[0])));
4229 		else
4230 			init_special_inode(inode, inode->i_mode,
4231 			   new_decode_dev(le32_to_cpu(raw_inode->i_block[1])));
4232 	} else if (ino == EXT4_BOOT_LOADER_INO) {
4233 		make_bad_inode(inode);
4234 	} else {
4235 		ret = -EIO;
4236 		EXT4_ERROR_INODE(inode, "bogus i_mode (%o)", inode->i_mode);
4237 		goto bad_inode;
4238 	}
4239 	brelse(iloc.bh);
4240 	ext4_set_inode_flags(inode);
4241 	unlock_new_inode(inode);
4242 	return inode;
4243 
4244 bad_inode:
4245 	brelse(iloc.bh);
4246 	iget_failed(inode);
4247 	return ERR_PTR(ret);
4248 }
4249 
4250 static int ext4_inode_blocks_set(handle_t *handle,
4251 				struct ext4_inode *raw_inode,
4252 				struct ext4_inode_info *ei)
4253 {
4254 	struct inode *inode = &(ei->vfs_inode);
4255 	u64 i_blocks = inode->i_blocks;
4256 	struct super_block *sb = inode->i_sb;
4257 
4258 	if (i_blocks <= ~0U) {
4259 		/*
4260 		 * i_blocks can be represented in a 32 bit variable
4261 		 * as multiple of 512 bytes
4262 		 */
4263 		raw_inode->i_blocks_lo   = cpu_to_le32(i_blocks);
4264 		raw_inode->i_blocks_high = 0;
4265 		ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE);
4266 		return 0;
4267 	}
4268 	if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_HUGE_FILE))
4269 		return -EFBIG;
4270 
4271 	if (i_blocks <= 0xffffffffffffULL) {
4272 		/*
4273 		 * i_blocks can be represented in a 48 bit variable
4274 		 * as multiple of 512 bytes
4275 		 */
4276 		raw_inode->i_blocks_lo   = cpu_to_le32(i_blocks);
4277 		raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32);
4278 		ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE);
4279 	} else {
4280 		ext4_set_inode_flag(inode, EXT4_INODE_HUGE_FILE);
4281 		/* i_block is stored in file system block size */
4282 		i_blocks = i_blocks >> (inode->i_blkbits - 9);
4283 		raw_inode->i_blocks_lo   = cpu_to_le32(i_blocks);
4284 		raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32);
4285 	}
4286 	return 0;
4287 }
4288 
4289 /*
4290  * Post the struct inode info into an on-disk inode location in the
4291  * buffer-cache.  This gobbles the caller's reference to the
4292  * buffer_head in the inode location struct.
4293  *
4294  * The caller must have write access to iloc->bh.
4295  */
4296 static int ext4_do_update_inode(handle_t *handle,
4297 				struct inode *inode,
4298 				struct ext4_iloc *iloc)
4299 {
4300 	struct ext4_inode *raw_inode = ext4_raw_inode(iloc);
4301 	struct ext4_inode_info *ei = EXT4_I(inode);
4302 	struct buffer_head *bh = iloc->bh;
4303 	int err = 0, rc, block;
4304 	int need_datasync = 0;
4305 	uid_t i_uid;
4306 	gid_t i_gid;
4307 
4308 	/* For fields not not tracking in the in-memory inode,
4309 	 * initialise them to zero for new inodes. */
4310 	if (ext4_test_inode_state(inode, EXT4_STATE_NEW))
4311 		memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size);
4312 
4313 	ext4_get_inode_flags(ei);
4314 	raw_inode->i_mode = cpu_to_le16(inode->i_mode);
4315 	i_uid = i_uid_read(inode);
4316 	i_gid = i_gid_read(inode);
4317 	if (!(test_opt(inode->i_sb, NO_UID32))) {
4318 		raw_inode->i_uid_low = cpu_to_le16(low_16_bits(i_uid));
4319 		raw_inode->i_gid_low = cpu_to_le16(low_16_bits(i_gid));
4320 /*
4321  * Fix up interoperability with old kernels. Otherwise, old inodes get
4322  * re-used with the upper 16 bits of the uid/gid intact
4323  */
4324 		if (!ei->i_dtime) {
4325 			raw_inode->i_uid_high =
4326 				cpu_to_le16(high_16_bits(i_uid));
4327 			raw_inode->i_gid_high =
4328 				cpu_to_le16(high_16_bits(i_gid));
4329 		} else {
4330 			raw_inode->i_uid_high = 0;
4331 			raw_inode->i_gid_high = 0;
4332 		}
4333 	} else {
4334 		raw_inode->i_uid_low = cpu_to_le16(fs_high2lowuid(i_uid));
4335 		raw_inode->i_gid_low = cpu_to_le16(fs_high2lowgid(i_gid));
4336 		raw_inode->i_uid_high = 0;
4337 		raw_inode->i_gid_high = 0;
4338 	}
4339 	raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
4340 
4341 	EXT4_INODE_SET_XTIME(i_ctime, inode, raw_inode);
4342 	EXT4_INODE_SET_XTIME(i_mtime, inode, raw_inode);
4343 	EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode);
4344 	EXT4_EINODE_SET_XTIME(i_crtime, ei, raw_inode);
4345 
4346 	if (ext4_inode_blocks_set(handle, raw_inode, ei))
4347 		goto out_brelse;
4348 	raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
4349 	raw_inode->i_flags = cpu_to_le32(ei->i_flags & 0xFFFFFFFF);
4350 	if (likely(!test_opt2(inode->i_sb, HURD_COMPAT)))
4351 		raw_inode->i_file_acl_high =
4352 			cpu_to_le16(ei->i_file_acl >> 32);
4353 	raw_inode->i_file_acl_lo = cpu_to_le32(ei->i_file_acl);
4354 	if (ei->i_disksize != ext4_isize(raw_inode)) {
4355 		ext4_isize_set(raw_inode, ei->i_disksize);
4356 		need_datasync = 1;
4357 	}
4358 	if (ei->i_disksize > 0x7fffffffULL) {
4359 		struct super_block *sb = inode->i_sb;
4360 		if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
4361 				EXT4_FEATURE_RO_COMPAT_LARGE_FILE) ||
4362 				EXT4_SB(sb)->s_es->s_rev_level ==
4363 				cpu_to_le32(EXT4_GOOD_OLD_REV)) {
4364 			/* If this is the first large file
4365 			 * created, add a flag to the superblock.
4366 			 */
4367 			err = ext4_journal_get_write_access(handle,
4368 					EXT4_SB(sb)->s_sbh);
4369 			if (err)
4370 				goto out_brelse;
4371 			ext4_update_dynamic_rev(sb);
4372 			EXT4_SET_RO_COMPAT_FEATURE(sb,
4373 					EXT4_FEATURE_RO_COMPAT_LARGE_FILE);
4374 			ext4_handle_sync(handle);
4375 			err = ext4_handle_dirty_super(handle, sb);
4376 		}
4377 	}
4378 	raw_inode->i_generation = cpu_to_le32(inode->i_generation);
4379 	if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
4380 		if (old_valid_dev(inode->i_rdev)) {
4381 			raw_inode->i_block[0] =
4382 				cpu_to_le32(old_encode_dev(inode->i_rdev));
4383 			raw_inode->i_block[1] = 0;
4384 		} else {
4385 			raw_inode->i_block[0] = 0;
4386 			raw_inode->i_block[1] =
4387 				cpu_to_le32(new_encode_dev(inode->i_rdev));
4388 			raw_inode->i_block[2] = 0;
4389 		}
4390 	} else if (!ext4_has_inline_data(inode)) {
4391 		for (block = 0; block < EXT4_N_BLOCKS; block++)
4392 			raw_inode->i_block[block] = ei->i_data[block];
4393 	}
4394 
4395 	if (likely(!test_opt2(inode->i_sb, HURD_COMPAT))) {
4396 		raw_inode->i_disk_version = cpu_to_le32(inode->i_version);
4397 		if (ei->i_extra_isize) {
4398 			if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
4399 				raw_inode->i_version_hi =
4400 					cpu_to_le32(inode->i_version >> 32);
4401 			raw_inode->i_extra_isize =
4402 				cpu_to_le16(ei->i_extra_isize);
4403 		}
4404 	}
4405 
4406 	ext4_inode_csum_set(inode, raw_inode, ei);
4407 
4408 	BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
4409 	rc = ext4_handle_dirty_metadata(handle, NULL, bh);
4410 	if (!err)
4411 		err = rc;
4412 	ext4_clear_inode_state(inode, EXT4_STATE_NEW);
4413 
4414 	ext4_update_inode_fsync_trans(handle, inode, need_datasync);
4415 out_brelse:
4416 	brelse(bh);
4417 	ext4_std_error(inode->i_sb, err);
4418 	return err;
4419 }
4420 
4421 /*
4422  * ext4_write_inode()
4423  *
4424  * We are called from a few places:
4425  *
4426  * - Within generic_file_write() for O_SYNC files.
4427  *   Here, there will be no transaction running. We wait for any running
4428  *   transaction to commit.
4429  *
4430  * - Within sys_sync(), kupdate and such.
4431  *   We wait on commit, if tol to.
4432  *
4433  * - Within prune_icache() (PF_MEMALLOC == true)
4434  *   Here we simply return.  We can't afford to block kswapd on the
4435  *   journal commit.
4436  *
4437  * In all cases it is actually safe for us to return without doing anything,
4438  * because the inode has been copied into a raw inode buffer in
4439  * ext4_mark_inode_dirty().  This is a correctness thing for O_SYNC and for
4440  * knfsd.
4441  *
4442  * Note that we are absolutely dependent upon all inode dirtiers doing the
4443  * right thing: they *must* call mark_inode_dirty() after dirtying info in
4444  * which we are interested.
4445  *
4446  * It would be a bug for them to not do this.  The code:
4447  *
4448  *	mark_inode_dirty(inode)
4449  *	stuff();
4450  *	inode->i_size = expr;
4451  *
4452  * is in error because a kswapd-driven write_inode() could occur while
4453  * `stuff()' is running, and the new i_size will be lost.  Plus the inode
4454  * will no longer be on the superblock's dirty inode list.
4455  */
4456 int ext4_write_inode(struct inode *inode, struct writeback_control *wbc)
4457 {
4458 	int err;
4459 
4460 	if (current->flags & PF_MEMALLOC)
4461 		return 0;
4462 
4463 	if (EXT4_SB(inode->i_sb)->s_journal) {
4464 		if (ext4_journal_current_handle()) {
4465 			jbd_debug(1, "called recursively, non-PF_MEMALLOC!\n");
4466 			dump_stack();
4467 			return -EIO;
4468 		}
4469 
4470 		/*
4471 		 * No need to force transaction in WB_SYNC_NONE mode. Also
4472 		 * ext4_sync_fs() will force the commit after everything is
4473 		 * written.
4474 		 */
4475 		if (wbc->sync_mode != WB_SYNC_ALL || wbc->for_sync)
4476 			return 0;
4477 
4478 		err = ext4_force_commit(inode->i_sb);
4479 	} else {
4480 		struct ext4_iloc iloc;
4481 
4482 		err = __ext4_get_inode_loc(inode, &iloc, 0);
4483 		if (err)
4484 			return err;
4485 		/*
4486 		 * sync(2) will flush the whole buffer cache. No need to do
4487 		 * it here separately for each inode.
4488 		 */
4489 		if (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync)
4490 			sync_dirty_buffer(iloc.bh);
4491 		if (buffer_req(iloc.bh) && !buffer_uptodate(iloc.bh)) {
4492 			EXT4_ERROR_INODE_BLOCK(inode, iloc.bh->b_blocknr,
4493 					 "IO error syncing inode");
4494 			err = -EIO;
4495 		}
4496 		brelse(iloc.bh);
4497 	}
4498 	return err;
4499 }
4500 
4501 /*
4502  * In data=journal mode ext4_journalled_invalidatepage() may fail to invalidate
4503  * buffers that are attached to a page stradding i_size and are undergoing
4504  * commit. In that case we have to wait for commit to finish and try again.
4505  */
4506 static void ext4_wait_for_tail_page_commit(struct inode *inode)
4507 {
4508 	struct page *page;
4509 	unsigned offset;
4510 	journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
4511 	tid_t commit_tid = 0;
4512 	int ret;
4513 
4514 	offset = inode->i_size & (PAGE_CACHE_SIZE - 1);
4515 	/*
4516 	 * All buffers in the last page remain valid? Then there's nothing to
4517 	 * do. We do the check mainly to optimize the common PAGE_CACHE_SIZE ==
4518 	 * blocksize case
4519 	 */
4520 	if (offset > PAGE_CACHE_SIZE - (1 << inode->i_blkbits))
4521 		return;
4522 	while (1) {
4523 		page = find_lock_page(inode->i_mapping,
4524 				      inode->i_size >> PAGE_CACHE_SHIFT);
4525 		if (!page)
4526 			return;
4527 		ret = __ext4_journalled_invalidatepage(page, offset,
4528 						PAGE_CACHE_SIZE - offset);
4529 		unlock_page(page);
4530 		page_cache_release(page);
4531 		if (ret != -EBUSY)
4532 			return;
4533 		commit_tid = 0;
4534 		read_lock(&journal->j_state_lock);
4535 		if (journal->j_committing_transaction)
4536 			commit_tid = journal->j_committing_transaction->t_tid;
4537 		read_unlock(&journal->j_state_lock);
4538 		if (commit_tid)
4539 			jbd2_log_wait_commit(journal, commit_tid);
4540 	}
4541 }
4542 
4543 /*
4544  * ext4_setattr()
4545  *
4546  * Called from notify_change.
4547  *
4548  * We want to trap VFS attempts to truncate the file as soon as
4549  * possible.  In particular, we want to make sure that when the VFS
4550  * shrinks i_size, we put the inode on the orphan list and modify
4551  * i_disksize immediately, so that during the subsequent flushing of
4552  * dirty pages and freeing of disk blocks, we can guarantee that any
4553  * commit will leave the blocks being flushed in an unused state on
4554  * disk.  (On recovery, the inode will get truncated and the blocks will
4555  * be freed, so we have a strong guarantee that no future commit will
4556  * leave these blocks visible to the user.)
4557  *
4558  * Another thing we have to assure is that if we are in ordered mode
4559  * and inode is still attached to the committing transaction, we must
4560  * we start writeout of all the dirty pages which are being truncated.
4561  * This way we are sure that all the data written in the previous
4562  * transaction are already on disk (truncate waits for pages under
4563  * writeback).
4564  *
4565  * Called with inode->i_mutex down.
4566  */
4567 int ext4_setattr(struct dentry *dentry, struct iattr *attr)
4568 {
4569 	struct inode *inode = dentry->d_inode;
4570 	int error, rc = 0;
4571 	int orphan = 0;
4572 	const unsigned int ia_valid = attr->ia_valid;
4573 
4574 	error = inode_change_ok(inode, attr);
4575 	if (error)
4576 		return error;
4577 
4578 	if (is_quota_modification(inode, attr))
4579 		dquot_initialize(inode);
4580 	if ((ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, inode->i_uid)) ||
4581 	    (ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid))) {
4582 		handle_t *handle;
4583 
4584 		/* (user+group)*(old+new) structure, inode write (sb,
4585 		 * inode block, ? - but truncate inode update has it) */
4586 		handle = ext4_journal_start(inode, EXT4_HT_QUOTA,
4587 			(EXT4_MAXQUOTAS_INIT_BLOCKS(inode->i_sb) +
4588 			 EXT4_MAXQUOTAS_DEL_BLOCKS(inode->i_sb)) + 3);
4589 		if (IS_ERR(handle)) {
4590 			error = PTR_ERR(handle);
4591 			goto err_out;
4592 		}
4593 		error = dquot_transfer(inode, attr);
4594 		if (error) {
4595 			ext4_journal_stop(handle);
4596 			return error;
4597 		}
4598 		/* Update corresponding info in inode so that everything is in
4599 		 * one transaction */
4600 		if (attr->ia_valid & ATTR_UID)
4601 			inode->i_uid = attr->ia_uid;
4602 		if (attr->ia_valid & ATTR_GID)
4603 			inode->i_gid = attr->ia_gid;
4604 		error = ext4_mark_inode_dirty(handle, inode);
4605 		ext4_journal_stop(handle);
4606 	}
4607 
4608 	if (attr->ia_valid & ATTR_SIZE && attr->ia_size != inode->i_size) {
4609 		handle_t *handle;
4610 
4611 		if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
4612 			struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
4613 
4614 			if (attr->ia_size > sbi->s_bitmap_maxbytes)
4615 				return -EFBIG;
4616 		}
4617 
4618 		if (IS_I_VERSION(inode) && attr->ia_size != inode->i_size)
4619 			inode_inc_iversion(inode);
4620 
4621 		if (S_ISREG(inode->i_mode) &&
4622 		    (attr->ia_size < inode->i_size)) {
4623 			if (ext4_should_order_data(inode)) {
4624 				error = ext4_begin_ordered_truncate(inode,
4625 							    attr->ia_size);
4626 				if (error)
4627 					goto err_out;
4628 			}
4629 			handle = ext4_journal_start(inode, EXT4_HT_INODE, 3);
4630 			if (IS_ERR(handle)) {
4631 				error = PTR_ERR(handle);
4632 				goto err_out;
4633 			}
4634 			if (ext4_handle_valid(handle)) {
4635 				error = ext4_orphan_add(handle, inode);
4636 				orphan = 1;
4637 			}
4638 			down_write(&EXT4_I(inode)->i_data_sem);
4639 			EXT4_I(inode)->i_disksize = attr->ia_size;
4640 			rc = ext4_mark_inode_dirty(handle, inode);
4641 			if (!error)
4642 				error = rc;
4643 			/*
4644 			 * We have to update i_size under i_data_sem together
4645 			 * with i_disksize to avoid races with writeback code
4646 			 * running ext4_wb_update_i_disksize().
4647 			 */
4648 			if (!error)
4649 				i_size_write(inode, attr->ia_size);
4650 			up_write(&EXT4_I(inode)->i_data_sem);
4651 			ext4_journal_stop(handle);
4652 			if (error) {
4653 				ext4_orphan_del(NULL, inode);
4654 				goto err_out;
4655 			}
4656 		} else
4657 			i_size_write(inode, attr->ia_size);
4658 
4659 		/*
4660 		 * Blocks are going to be removed from the inode. Wait
4661 		 * for dio in flight.  Temporarily disable
4662 		 * dioread_nolock to prevent livelock.
4663 		 */
4664 		if (orphan) {
4665 			if (!ext4_should_journal_data(inode)) {
4666 				ext4_inode_block_unlocked_dio(inode);
4667 				inode_dio_wait(inode);
4668 				ext4_inode_resume_unlocked_dio(inode);
4669 			} else
4670 				ext4_wait_for_tail_page_commit(inode);
4671 		}
4672 		/*
4673 		 * Truncate pagecache after we've waited for commit
4674 		 * in data=journal mode to make pages freeable.
4675 		 */
4676 			truncate_pagecache(inode, inode->i_size);
4677 	}
4678 	/*
4679 	 * We want to call ext4_truncate() even if attr->ia_size ==
4680 	 * inode->i_size for cases like truncation of fallocated space
4681 	 */
4682 	if (attr->ia_valid & ATTR_SIZE)
4683 		ext4_truncate(inode);
4684 
4685 	if (!rc) {
4686 		setattr_copy(inode, attr);
4687 		mark_inode_dirty(inode);
4688 	}
4689 
4690 	/*
4691 	 * If the call to ext4_truncate failed to get a transaction handle at
4692 	 * all, we need to clean up the in-core orphan list manually.
4693 	 */
4694 	if (orphan && inode->i_nlink)
4695 		ext4_orphan_del(NULL, inode);
4696 
4697 	if (!rc && (ia_valid & ATTR_MODE))
4698 		rc = posix_acl_chmod(inode, inode->i_mode);
4699 
4700 err_out:
4701 	ext4_std_error(inode->i_sb, error);
4702 	if (!error)
4703 		error = rc;
4704 	return error;
4705 }
4706 
4707 int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry,
4708 		 struct kstat *stat)
4709 {
4710 	struct inode *inode;
4711 	unsigned long long delalloc_blocks;
4712 
4713 	inode = dentry->d_inode;
4714 	generic_fillattr(inode, stat);
4715 
4716 	/*
4717 	 * If there is inline data in the inode, the inode will normally not
4718 	 * have data blocks allocated (it may have an external xattr block).
4719 	 * Report at least one sector for such files, so tools like tar, rsync,
4720 	 * others doen't incorrectly think the file is completely sparse.
4721 	 */
4722 	if (unlikely(ext4_has_inline_data(inode)))
4723 		stat->blocks += (stat->size + 511) >> 9;
4724 
4725 	/*
4726 	 * We can't update i_blocks if the block allocation is delayed
4727 	 * otherwise in the case of system crash before the real block
4728 	 * allocation is done, we will have i_blocks inconsistent with
4729 	 * on-disk file blocks.
4730 	 * We always keep i_blocks updated together with real
4731 	 * allocation. But to not confuse with user, stat
4732 	 * will return the blocks that include the delayed allocation
4733 	 * blocks for this file.
4734 	 */
4735 	delalloc_blocks = EXT4_C2B(EXT4_SB(inode->i_sb),
4736 				   EXT4_I(inode)->i_reserved_data_blocks);
4737 	stat->blocks += delalloc_blocks << (inode->i_sb->s_blocksize_bits - 9);
4738 	return 0;
4739 }
4740 
4741 static int ext4_index_trans_blocks(struct inode *inode, int lblocks,
4742 				   int pextents)
4743 {
4744 	if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
4745 		return ext4_ind_trans_blocks(inode, lblocks);
4746 	return ext4_ext_index_trans_blocks(inode, pextents);
4747 }
4748 
4749 /*
4750  * Account for index blocks, block groups bitmaps and block group
4751  * descriptor blocks if modify datablocks and index blocks
4752  * worse case, the indexs blocks spread over different block groups
4753  *
4754  * If datablocks are discontiguous, they are possible to spread over
4755  * different block groups too. If they are contiguous, with flexbg,
4756  * they could still across block group boundary.
4757  *
4758  * Also account for superblock, inode, quota and xattr blocks
4759  */
4760 static int ext4_meta_trans_blocks(struct inode *inode, int lblocks,
4761 				  int pextents)
4762 {
4763 	ext4_group_t groups, ngroups = ext4_get_groups_count(inode->i_sb);
4764 	int gdpblocks;
4765 	int idxblocks;
4766 	int ret = 0;
4767 
4768 	/*
4769 	 * How many index blocks need to touch to map @lblocks logical blocks
4770 	 * to @pextents physical extents?
4771 	 */
4772 	idxblocks = ext4_index_trans_blocks(inode, lblocks, pextents);
4773 
4774 	ret = idxblocks;
4775 
4776 	/*
4777 	 * Now let's see how many group bitmaps and group descriptors need
4778 	 * to account
4779 	 */
4780 	groups = idxblocks + pextents;
4781 	gdpblocks = groups;
4782 	if (groups > ngroups)
4783 		groups = ngroups;
4784 	if (groups > EXT4_SB(inode->i_sb)->s_gdb_count)
4785 		gdpblocks = EXT4_SB(inode->i_sb)->s_gdb_count;
4786 
4787 	/* bitmaps and block group descriptor blocks */
4788 	ret += groups + gdpblocks;
4789 
4790 	/* Blocks for super block, inode, quota and xattr blocks */
4791 	ret += EXT4_META_TRANS_BLOCKS(inode->i_sb);
4792 
4793 	return ret;
4794 }
4795 
4796 /*
4797  * Calculate the total number of credits to reserve to fit
4798  * the modification of a single pages into a single transaction,
4799  * which may include multiple chunks of block allocations.
4800  *
4801  * This could be called via ext4_write_begin()
4802  *
4803  * We need to consider the worse case, when
4804  * one new block per extent.
4805  */
4806 int ext4_writepage_trans_blocks(struct inode *inode)
4807 {
4808 	int bpp = ext4_journal_blocks_per_page(inode);
4809 	int ret;
4810 
4811 	ret = ext4_meta_trans_blocks(inode, bpp, bpp);
4812 
4813 	/* Account for data blocks for journalled mode */
4814 	if (ext4_should_journal_data(inode))
4815 		ret += bpp;
4816 	return ret;
4817 }
4818 
4819 /*
4820  * Calculate the journal credits for a chunk of data modification.
4821  *
4822  * This is called from DIO, fallocate or whoever calling
4823  * ext4_map_blocks() to map/allocate a chunk of contiguous disk blocks.
4824  *
4825  * journal buffers for data blocks are not included here, as DIO
4826  * and fallocate do no need to journal data buffers.
4827  */
4828 int ext4_chunk_trans_blocks(struct inode *inode, int nrblocks)
4829 {
4830 	return ext4_meta_trans_blocks(inode, nrblocks, 1);
4831 }
4832 
4833 /*
4834  * The caller must have previously called ext4_reserve_inode_write().
4835  * Give this, we know that the caller already has write access to iloc->bh.
4836  */
4837 int ext4_mark_iloc_dirty(handle_t *handle,
4838 			 struct inode *inode, struct ext4_iloc *iloc)
4839 {
4840 	int err = 0;
4841 
4842 	if (IS_I_VERSION(inode))
4843 		inode_inc_iversion(inode);
4844 
4845 	/* the do_update_inode consumes one bh->b_count */
4846 	get_bh(iloc->bh);
4847 
4848 	/* ext4_do_update_inode() does jbd2_journal_dirty_metadata */
4849 	err = ext4_do_update_inode(handle, inode, iloc);
4850 	put_bh(iloc->bh);
4851 	return err;
4852 }
4853 
4854 /*
4855  * On success, We end up with an outstanding reference count against
4856  * iloc->bh.  This _must_ be cleaned up later.
4857  */
4858 
4859 int
4860 ext4_reserve_inode_write(handle_t *handle, struct inode *inode,
4861 			 struct ext4_iloc *iloc)
4862 {
4863 	int err;
4864 
4865 	err = ext4_get_inode_loc(inode, iloc);
4866 	if (!err) {
4867 		BUFFER_TRACE(iloc->bh, "get_write_access");
4868 		err = ext4_journal_get_write_access(handle, iloc->bh);
4869 		if (err) {
4870 			brelse(iloc->bh);
4871 			iloc->bh = NULL;
4872 		}
4873 	}
4874 	ext4_std_error(inode->i_sb, err);
4875 	return err;
4876 }
4877 
4878 /*
4879  * Expand an inode by new_extra_isize bytes.
4880  * Returns 0 on success or negative error number on failure.
4881  */
4882 static int ext4_expand_extra_isize(struct inode *inode,
4883 				   unsigned int new_extra_isize,
4884 				   struct ext4_iloc iloc,
4885 				   handle_t *handle)
4886 {
4887 	struct ext4_inode *raw_inode;
4888 	struct ext4_xattr_ibody_header *header;
4889 
4890 	if (EXT4_I(inode)->i_extra_isize >= new_extra_isize)
4891 		return 0;
4892 
4893 	raw_inode = ext4_raw_inode(&iloc);
4894 
4895 	header = IHDR(inode, raw_inode);
4896 
4897 	/* No extended attributes present */
4898 	if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR) ||
4899 	    header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) {
4900 		memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE, 0,
4901 			new_extra_isize);
4902 		EXT4_I(inode)->i_extra_isize = new_extra_isize;
4903 		return 0;
4904 	}
4905 
4906 	/* try to expand with EAs present */
4907 	return ext4_expand_extra_isize_ea(inode, new_extra_isize,
4908 					  raw_inode, handle);
4909 }
4910 
4911 /*
4912  * What we do here is to mark the in-core inode as clean with respect to inode
4913  * dirtiness (it may still be data-dirty).
4914  * This means that the in-core inode may be reaped by prune_icache
4915  * without having to perform any I/O.  This is a very good thing,
4916  * because *any* task may call prune_icache - even ones which
4917  * have a transaction open against a different journal.
4918  *
4919  * Is this cheating?  Not really.  Sure, we haven't written the
4920  * inode out, but prune_icache isn't a user-visible syncing function.
4921  * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync)
4922  * we start and wait on commits.
4923  */
4924 int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
4925 {
4926 	struct ext4_iloc iloc;
4927 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
4928 	static unsigned int mnt_count;
4929 	int err, ret;
4930 
4931 	might_sleep();
4932 	trace_ext4_mark_inode_dirty(inode, _RET_IP_);
4933 	err = ext4_reserve_inode_write(handle, inode, &iloc);
4934 	if (ext4_handle_valid(handle) &&
4935 	    EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize &&
4936 	    !ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND)) {
4937 		/*
4938 		 * We need extra buffer credits since we may write into EA block
4939 		 * with this same handle. If journal_extend fails, then it will
4940 		 * only result in a minor loss of functionality for that inode.
4941 		 * If this is felt to be critical, then e2fsck should be run to
4942 		 * force a large enough s_min_extra_isize.
4943 		 */
4944 		if ((jbd2_journal_extend(handle,
4945 			     EXT4_DATA_TRANS_BLOCKS(inode->i_sb))) == 0) {
4946 			ret = ext4_expand_extra_isize(inode,
4947 						      sbi->s_want_extra_isize,
4948 						      iloc, handle);
4949 			if (ret) {
4950 				ext4_set_inode_state(inode,
4951 						     EXT4_STATE_NO_EXPAND);
4952 				if (mnt_count !=
4953 					le16_to_cpu(sbi->s_es->s_mnt_count)) {
4954 					ext4_warning(inode->i_sb,
4955 					"Unable to expand inode %lu. Delete"
4956 					" some EAs or run e2fsck.",
4957 					inode->i_ino);
4958 					mnt_count =
4959 					  le16_to_cpu(sbi->s_es->s_mnt_count);
4960 				}
4961 			}
4962 		}
4963 	}
4964 	if (!err)
4965 		err = ext4_mark_iloc_dirty(handle, inode, &iloc);
4966 	return err;
4967 }
4968 
4969 /*
4970  * ext4_dirty_inode() is called from __mark_inode_dirty()
4971  *
4972  * We're really interested in the case where a file is being extended.
4973  * i_size has been changed by generic_commit_write() and we thus need
4974  * to include the updated inode in the current transaction.
4975  *
4976  * Also, dquot_alloc_block() will always dirty the inode when blocks
4977  * are allocated to the file.
4978  *
4979  * If the inode is marked synchronous, we don't honour that here - doing
4980  * so would cause a commit on atime updates, which we don't bother doing.
4981  * We handle synchronous inodes at the highest possible level.
4982  */
4983 void ext4_dirty_inode(struct inode *inode, int flags)
4984 {
4985 	handle_t *handle;
4986 
4987 	handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
4988 	if (IS_ERR(handle))
4989 		goto out;
4990 
4991 	ext4_mark_inode_dirty(handle, inode);
4992 
4993 	ext4_journal_stop(handle);
4994 out:
4995 	return;
4996 }
4997 
4998 #if 0
4999 /*
5000  * Bind an inode's backing buffer_head into this transaction, to prevent
5001  * it from being flushed to disk early.  Unlike
5002  * ext4_reserve_inode_write, this leaves behind no bh reference and
5003  * returns no iloc structure, so the caller needs to repeat the iloc
5004  * lookup to mark the inode dirty later.
5005  */
5006 static int ext4_pin_inode(handle_t *handle, struct inode *inode)
5007 {
5008 	struct ext4_iloc iloc;
5009 
5010 	int err = 0;
5011 	if (handle) {
5012 		err = ext4_get_inode_loc(inode, &iloc);
5013 		if (!err) {
5014 			BUFFER_TRACE(iloc.bh, "get_write_access");
5015 			err = jbd2_journal_get_write_access(handle, iloc.bh);
5016 			if (!err)
5017 				err = ext4_handle_dirty_metadata(handle,
5018 								 NULL,
5019 								 iloc.bh);
5020 			brelse(iloc.bh);
5021 		}
5022 	}
5023 	ext4_std_error(inode->i_sb, err);
5024 	return err;
5025 }
5026 #endif
5027 
5028 int ext4_change_inode_journal_flag(struct inode *inode, int val)
5029 {
5030 	journal_t *journal;
5031 	handle_t *handle;
5032 	int err;
5033 
5034 	/*
5035 	 * We have to be very careful here: changing a data block's
5036 	 * journaling status dynamically is dangerous.  If we write a
5037 	 * data block to the journal, change the status and then delete
5038 	 * that block, we risk forgetting to revoke the old log record
5039 	 * from the journal and so a subsequent replay can corrupt data.
5040 	 * So, first we make sure that the journal is empty and that
5041 	 * nobody is changing anything.
5042 	 */
5043 
5044 	journal = EXT4_JOURNAL(inode);
5045 	if (!journal)
5046 		return 0;
5047 	if (is_journal_aborted(journal))
5048 		return -EROFS;
5049 	/* We have to allocate physical blocks for delalloc blocks
5050 	 * before flushing journal. otherwise delalloc blocks can not
5051 	 * be allocated any more. even more truncate on delalloc blocks
5052 	 * could trigger BUG by flushing delalloc blocks in journal.
5053 	 * There is no delalloc block in non-journal data mode.
5054 	 */
5055 	if (val && test_opt(inode->i_sb, DELALLOC)) {
5056 		err = ext4_alloc_da_blocks(inode);
5057 		if (err < 0)
5058 			return err;
5059 	}
5060 
5061 	/* Wait for all existing dio workers */
5062 	ext4_inode_block_unlocked_dio(inode);
5063 	inode_dio_wait(inode);
5064 
5065 	jbd2_journal_lock_updates(journal);
5066 
5067 	/*
5068 	 * OK, there are no updates running now, and all cached data is
5069 	 * synced to disk.  We are now in a completely consistent state
5070 	 * which doesn't have anything in the journal, and we know that
5071 	 * no filesystem updates are running, so it is safe to modify
5072 	 * the inode's in-core data-journaling state flag now.
5073 	 */
5074 
5075 	if (val)
5076 		ext4_set_inode_flag(inode, EXT4_INODE_JOURNAL_DATA);
5077 	else {
5078 		jbd2_journal_flush(journal);
5079 		ext4_clear_inode_flag(inode, EXT4_INODE_JOURNAL_DATA);
5080 	}
5081 	ext4_set_aops(inode);
5082 
5083 	jbd2_journal_unlock_updates(journal);
5084 	ext4_inode_resume_unlocked_dio(inode);
5085 
5086 	/* Finally we can mark the inode as dirty. */
5087 
5088 	handle = ext4_journal_start(inode, EXT4_HT_INODE, 1);
5089 	if (IS_ERR(handle))
5090 		return PTR_ERR(handle);
5091 
5092 	err = ext4_mark_inode_dirty(handle, inode);
5093 	ext4_handle_sync(handle);
5094 	ext4_journal_stop(handle);
5095 	ext4_std_error(inode->i_sb, err);
5096 
5097 	return err;
5098 }
5099 
5100 static int ext4_bh_unmapped(handle_t *handle, struct buffer_head *bh)
5101 {
5102 	return !buffer_mapped(bh);
5103 }
5104 
5105 int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
5106 {
5107 	struct page *page = vmf->page;
5108 	loff_t size;
5109 	unsigned long len;
5110 	int ret;
5111 	struct file *file = vma->vm_file;
5112 	struct inode *inode = file_inode(file);
5113 	struct address_space *mapping = inode->i_mapping;
5114 	handle_t *handle;
5115 	get_block_t *get_block;
5116 	int retries = 0;
5117 
5118 	sb_start_pagefault(inode->i_sb);
5119 	file_update_time(vma->vm_file);
5120 	/* Delalloc case is easy... */
5121 	if (test_opt(inode->i_sb, DELALLOC) &&
5122 	    !ext4_should_journal_data(inode) &&
5123 	    !ext4_nonda_switch(inode->i_sb)) {
5124 		do {
5125 			ret = __block_page_mkwrite(vma, vmf,
5126 						   ext4_da_get_block_prep);
5127 		} while (ret == -ENOSPC &&
5128 		       ext4_should_retry_alloc(inode->i_sb, &retries));
5129 		goto out_ret;
5130 	}
5131 
5132 	lock_page(page);
5133 	size = i_size_read(inode);
5134 	/* Page got truncated from under us? */
5135 	if (page->mapping != mapping || page_offset(page) > size) {
5136 		unlock_page(page);
5137 		ret = VM_FAULT_NOPAGE;
5138 		goto out;
5139 	}
5140 
5141 	if (page->index == size >> PAGE_CACHE_SHIFT)
5142 		len = size & ~PAGE_CACHE_MASK;
5143 	else
5144 		len = PAGE_CACHE_SIZE;
5145 	/*
5146 	 * Return if we have all the buffers mapped. This avoids the need to do
5147 	 * journal_start/journal_stop which can block and take a long time
5148 	 */
5149 	if (page_has_buffers(page)) {
5150 		if (!ext4_walk_page_buffers(NULL, page_buffers(page),
5151 					    0, len, NULL,
5152 					    ext4_bh_unmapped)) {
5153 			/* Wait so that we don't change page under IO */
5154 			wait_for_stable_page(page);
5155 			ret = VM_FAULT_LOCKED;
5156 			goto out;
5157 		}
5158 	}
5159 	unlock_page(page);
5160 	/* OK, we need to fill the hole... */
5161 	if (ext4_should_dioread_nolock(inode))
5162 		get_block = ext4_get_block_write;
5163 	else
5164 		get_block = ext4_get_block;
5165 retry_alloc:
5166 	handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE,
5167 				    ext4_writepage_trans_blocks(inode));
5168 	if (IS_ERR(handle)) {
5169 		ret = VM_FAULT_SIGBUS;
5170 		goto out;
5171 	}
5172 	ret = __block_page_mkwrite(vma, vmf, get_block);
5173 	if (!ret && ext4_should_journal_data(inode)) {
5174 		if (ext4_walk_page_buffers(handle, page_buffers(page), 0,
5175 			  PAGE_CACHE_SIZE, NULL, do_journal_get_write_access)) {
5176 			unlock_page(page);
5177 			ret = VM_FAULT_SIGBUS;
5178 			ext4_journal_stop(handle);
5179 			goto out;
5180 		}
5181 		ext4_set_inode_state(inode, EXT4_STATE_JDATA);
5182 	}
5183 	ext4_journal_stop(handle);
5184 	if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
5185 		goto retry_alloc;
5186 out_ret:
5187 	ret = block_page_mkwrite_return(ret);
5188 out:
5189 	sb_end_pagefault(inode->i_sb);
5190 	return ret;
5191 }
5192