xref: /openbmc/linux/fs/f2fs/data.c (revision 3afae09ffea5e08f523823be99a784675995d6bb)
17c1a000dSChao Yu // SPDX-License-Identifier: GPL-2.0
20a8165d7SJaegeuk Kim /*
3eb47b800SJaegeuk Kim  * fs/f2fs/data.c
4eb47b800SJaegeuk Kim  *
5eb47b800SJaegeuk Kim  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6eb47b800SJaegeuk Kim  *             http://www.samsung.com/
7eb47b800SJaegeuk Kim  */
8eb47b800SJaegeuk Kim #include <linux/fs.h>
9eb47b800SJaegeuk Kim #include <linux/f2fs_fs.h>
10eb47b800SJaegeuk Kim #include <linux/buffer_head.h>
11eb47b800SJaegeuk Kim #include <linux/mpage.h>
12eb47b800SJaegeuk Kim #include <linux/writeback.h>
13eb47b800SJaegeuk Kim #include <linux/backing-dev.h>
148f46dcaeSChao Yu #include <linux/pagevec.h>
15eb47b800SJaegeuk Kim #include <linux/blkdev.h>
16eb47b800SJaegeuk Kim #include <linux/bio.h>
1727aacd28SSatya Tangirala #include <linux/blk-crypto.h>
184969c06aSJaegeuk Kim #include <linux/swap.h>
19690e4a3eSGeert Uytterhoeven #include <linux/prefetch.h>
20e2e40f2cSChristoph Hellwig #include <linux/uio.h>
21f1e88660SJaegeuk Kim #include <linux/cleancache.h>
22174cd4b1SIngo Molnar #include <linux/sched/signal.h>
2310c5db28SChristoph Hellwig #include <linux/fiemap.h>
24eb47b800SJaegeuk Kim 
25eb47b800SJaegeuk Kim #include "f2fs.h"
26eb47b800SJaegeuk Kim #include "node.h"
27eb47b800SJaegeuk Kim #include "segment.h"
28db9f7c1aSJaegeuk Kim #include "trace.h"
29848753aaSNamjae Jeon #include <trace/events/f2fs.h>
30eb47b800SJaegeuk Kim 
316dbb1796SEric Biggers #define NUM_PREALLOC_POST_READ_CTXS	128
326dbb1796SEric Biggers 
336dbb1796SEric Biggers static struct kmem_cache *bio_post_read_ctx_cache;
340b20fcecSChao Yu static struct kmem_cache *bio_entry_slab;
356dbb1796SEric Biggers static mempool_t *bio_post_read_ctx_pool;
36f543805fSChao Yu static struct bio_set f2fs_bioset;
37f543805fSChao Yu 
38f543805fSChao Yu #define	F2FS_BIO_POOL_SIZE	NR_CURSEG_TYPE
39f543805fSChao Yu 
40f543805fSChao Yu int __init f2fs_init_bioset(void)
41f543805fSChao Yu {
42f543805fSChao Yu 	if (bioset_init(&f2fs_bioset, F2FS_BIO_POOL_SIZE,
43f543805fSChao Yu 					0, BIOSET_NEED_BVECS))
44f543805fSChao Yu 		return -ENOMEM;
45f543805fSChao Yu 	return 0;
46f543805fSChao Yu }
47f543805fSChao Yu 
48f543805fSChao Yu void f2fs_destroy_bioset(void)
49f543805fSChao Yu {
50f543805fSChao Yu 	bioset_exit(&f2fs_bioset);
51f543805fSChao Yu }
52f543805fSChao Yu 
53f543805fSChao Yu static inline struct bio *__f2fs_bio_alloc(gfp_t gfp_mask,
54f543805fSChao Yu 						unsigned int nr_iovecs)
55f543805fSChao Yu {
56f543805fSChao Yu 	return bio_alloc_bioset(gfp_mask, nr_iovecs, &f2fs_bioset);
57f543805fSChao Yu }
58f543805fSChao Yu 
59ca9e968aSChao Yu struct bio *f2fs_bio_alloc(struct f2fs_sb_info *sbi, int npages, bool noio)
60f543805fSChao Yu {
61ca9e968aSChao Yu 	if (noio) {
62f543805fSChao Yu 		/* No failure on bio allocation */
63ca9e968aSChao Yu 		return __f2fs_bio_alloc(GFP_NOIO, npages);
64f543805fSChao Yu 	}
65ca9e968aSChao Yu 
66f543805fSChao Yu 	if (time_to_inject(sbi, FAULT_ALLOC_BIO)) {
67f543805fSChao Yu 		f2fs_show_injection_info(sbi, FAULT_ALLOC_BIO);
68f543805fSChao Yu 		return NULL;
69f543805fSChao Yu 	}
70f543805fSChao Yu 
71f543805fSChao Yu 	return __f2fs_bio_alloc(GFP_KERNEL, npages);
72f543805fSChao Yu }
736dbb1796SEric Biggers 
7436951b38SChao Yu static bool __is_cp_guaranteed(struct page *page)
7536951b38SChao Yu {
7636951b38SChao Yu 	struct address_space *mapping = page->mapping;
7736951b38SChao Yu 	struct inode *inode;
7836951b38SChao Yu 	struct f2fs_sb_info *sbi;
7936951b38SChao Yu 
8036951b38SChao Yu 	if (!mapping)
8136951b38SChao Yu 		return false;
8236951b38SChao Yu 
834c8ff709SChao Yu 	if (f2fs_is_compressed_page(page))
844c8ff709SChao Yu 		return false;
854c8ff709SChao Yu 
8636951b38SChao Yu 	inode = mapping->host;
8736951b38SChao Yu 	sbi = F2FS_I_SB(inode);
8836951b38SChao Yu 
8936951b38SChao Yu 	if (inode->i_ino == F2FS_META_INO(sbi) ||
9036951b38SChao Yu 			inode->i_ino == F2FS_NODE_INO(sbi) ||
9136951b38SChao Yu 			S_ISDIR(inode->i_mode) ||
92e7a4feb0SChao Yu 			(S_ISREG(inode->i_mode) &&
93af033b2aSChao Yu 			(f2fs_is_atomic_file(inode) || IS_NOQUOTA(inode))) ||
9436951b38SChao Yu 			is_cold_data(page))
9536951b38SChao Yu 		return true;
9636951b38SChao Yu 	return false;
9736951b38SChao Yu }
9836951b38SChao Yu 
995f9abab4SJaegeuk Kim static enum count_type __read_io_type(struct page *page)
1005f9abab4SJaegeuk Kim {
1014969c06aSJaegeuk Kim 	struct address_space *mapping = page_file_mapping(page);
1025f9abab4SJaegeuk Kim 
1035f9abab4SJaegeuk Kim 	if (mapping) {
1045f9abab4SJaegeuk Kim 		struct inode *inode = mapping->host;
1055f9abab4SJaegeuk Kim 		struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1065f9abab4SJaegeuk Kim 
1075f9abab4SJaegeuk Kim 		if (inode->i_ino == F2FS_META_INO(sbi))
1085f9abab4SJaegeuk Kim 			return F2FS_RD_META;
1095f9abab4SJaegeuk Kim 
1105f9abab4SJaegeuk Kim 		if (inode->i_ino == F2FS_NODE_INO(sbi))
1115f9abab4SJaegeuk Kim 			return F2FS_RD_NODE;
1125f9abab4SJaegeuk Kim 	}
1135f9abab4SJaegeuk Kim 	return F2FS_RD_DATA;
1145f9abab4SJaegeuk Kim }
1155f9abab4SJaegeuk Kim 
1166dbb1796SEric Biggers /* postprocessing steps for read bios */
1176dbb1796SEric Biggers enum bio_post_read_step {
1187f59b277SEric Biggers #ifdef CONFIG_FS_ENCRYPTION
1197f59b277SEric Biggers 	STEP_DECRYPT	= 1 << 0,
1207f59b277SEric Biggers #else
1217f59b277SEric Biggers 	STEP_DECRYPT	= 0,	/* compile out the decryption-related code */
1227f59b277SEric Biggers #endif
1237f59b277SEric Biggers #ifdef CONFIG_F2FS_FS_COMPRESSION
1247f59b277SEric Biggers 	STEP_DECOMPRESS	= 1 << 1,
1257f59b277SEric Biggers #else
1267f59b277SEric Biggers 	STEP_DECOMPRESS	= 0,	/* compile out the decompression-related code */
1277f59b277SEric Biggers #endif
1287f59b277SEric Biggers #ifdef CONFIG_FS_VERITY
1297f59b277SEric Biggers 	STEP_VERITY	= 1 << 2,
1307f59b277SEric Biggers #else
1317f59b277SEric Biggers 	STEP_VERITY	= 0,	/* compile out the verity-related code */
1327f59b277SEric Biggers #endif
1336dbb1796SEric Biggers };
1346dbb1796SEric Biggers 
1356dbb1796SEric Biggers struct bio_post_read_ctx {
1366dbb1796SEric Biggers 	struct bio *bio;
1374c8ff709SChao Yu 	struct f2fs_sb_info *sbi;
1386dbb1796SEric Biggers 	struct work_struct work;
1396dbb1796SEric Biggers 	unsigned int enabled_steps;
1406dbb1796SEric Biggers };
1416dbb1796SEric Biggers 
1427f59b277SEric Biggers static void f2fs_finish_read_bio(struct bio *bio)
14393dfe2acSJaegeuk Kim {
1446dbb1796SEric Biggers 	struct bio_vec *bv;
1456dc4f100SMing Lei 	struct bvec_iter_all iter_all;
14693dfe2acSJaegeuk Kim 
1477f59b277SEric Biggers 	/*
1487f59b277SEric Biggers 	 * Update and unlock the bio's pagecache pages, and put the
1497f59b277SEric Biggers 	 * decompression context for any compressed pages.
1507f59b277SEric Biggers 	 */
1512b070cfeSChristoph Hellwig 	bio_for_each_segment_all(bv, bio, iter_all) {
1527f59b277SEric Biggers 		struct page *page = bv->bv_page;
1536dbb1796SEric Biggers 
1547f59b277SEric Biggers 		if (f2fs_is_compressed_page(page)) {
1557f59b277SEric Biggers 			if (bio->bi_status)
1567f59b277SEric Biggers 				f2fs_end_read_compressed_page(page, true);
1577f59b277SEric Biggers 			f2fs_put_page_dic(page);
1584c8ff709SChao Yu 			continue;
1594c8ff709SChao Yu 		}
1604c8ff709SChao Yu 
1617f59b277SEric Biggers 		/* PG_error was set if decryption or verity failed. */
1626dbb1796SEric Biggers 		if (bio->bi_status || PageError(page)) {
1636dbb1796SEric Biggers 			ClearPageUptodate(page);
164fb7d70dbSJaegeuk Kim 			/* will re-read again later */
165fb7d70dbSJaegeuk Kim 			ClearPageError(page);
1666dbb1796SEric Biggers 		} else {
1676dbb1796SEric Biggers 			SetPageUptodate(page);
1686dbb1796SEric Biggers 		}
1695f9abab4SJaegeuk Kim 		dec_page_count(F2FS_P_SB(page), __read_io_type(page));
1706dbb1796SEric Biggers 		unlock_page(page);
1716dbb1796SEric Biggers 	}
1727f59b277SEric Biggers 
1737f59b277SEric Biggers 	if (bio->bi_private)
1747f59b277SEric Biggers 		mempool_free(bio->bi_private, bio_post_read_ctx_pool);
1757f59b277SEric Biggers 	bio_put(bio);
1764c8ff709SChao Yu }
1774c8ff709SChao Yu 
1787f59b277SEric Biggers static void f2fs_verify_bio(struct work_struct *work)
1794c8ff709SChao Yu {
1807f59b277SEric Biggers 	struct bio_post_read_ctx *ctx =
1817f59b277SEric Biggers 		container_of(work, struct bio_post_read_ctx, work);
1827f59b277SEric Biggers 	struct bio *bio = ctx->bio;
1837f59b277SEric Biggers 	bool may_have_compressed_pages = (ctx->enabled_steps & STEP_DECOMPRESS);
1844c8ff709SChao Yu 
1857f59b277SEric Biggers 	/*
1867f59b277SEric Biggers 	 * fsverity_verify_bio() may call readpages() again, and while verity
1877f59b277SEric Biggers 	 * will be disabled for this, decryption and/or decompression may still
1887f59b277SEric Biggers 	 * be needed, resulting in another bio_post_read_ctx being allocated.
1897f59b277SEric Biggers 	 * So to prevent deadlocks we need to release the current ctx to the
1907f59b277SEric Biggers 	 * mempool first.  This assumes that verity is the last post-read step.
1917f59b277SEric Biggers 	 */
1927f59b277SEric Biggers 	mempool_free(ctx, bio_post_read_ctx_pool);
1937f59b277SEric Biggers 	bio->bi_private = NULL;
1946dbb1796SEric Biggers 
1957f59b277SEric Biggers 	/*
1967f59b277SEric Biggers 	 * Verify the bio's pages with fs-verity.  Exclude compressed pages,
1977f59b277SEric Biggers 	 * as those were handled separately by f2fs_end_read_compressed_page().
1987f59b277SEric Biggers 	 */
1997f59b277SEric Biggers 	if (may_have_compressed_pages) {
20079bbefb1SChao Yu 		struct bio_vec *bv;
20179bbefb1SChao Yu 		struct bvec_iter_all iter_all;
2024c8ff709SChao Yu 
20379bbefb1SChao Yu 		bio_for_each_segment_all(bv, bio, iter_all) {
20479bbefb1SChao Yu 			struct page *page = bv->bv_page;
20579bbefb1SChao Yu 
2067f59b277SEric Biggers 			if (!f2fs_is_compressed_page(page) &&
2077f59b277SEric Biggers 			    !PageError(page) && !fsverity_verify_page(page))
2087f59b277SEric Biggers 				SetPageError(page);
2097f59b277SEric Biggers 		}
2107f59b277SEric Biggers 	} else {
2117f59b277SEric Biggers 		fsverity_verify_bio(bio);
21279bbefb1SChao Yu 	}
21379bbefb1SChao Yu 
2147f59b277SEric Biggers 	f2fs_finish_read_bio(bio);
21579bbefb1SChao Yu }
216644c8c92SEric Biggers 
217644c8c92SEric Biggers /*
2187f59b277SEric Biggers  * If the bio's data needs to be verified with fs-verity, then enqueue the
2197f59b277SEric Biggers  * verity work for the bio.  Otherwise finish the bio now.
2207f59b277SEric Biggers  *
2217f59b277SEric Biggers  * Note that to avoid deadlocks, the verity work can't be done on the
2227f59b277SEric Biggers  * decryption/decompression workqueue.  This is because verifying the data pages
2237f59b277SEric Biggers  * can involve reading verity metadata pages from the file, and these verity
2247f59b277SEric Biggers  * metadata pages may be encrypted and/or compressed.
225644c8c92SEric Biggers  */
2267f59b277SEric Biggers static void f2fs_verify_and_finish_bio(struct bio *bio)
2277f59b277SEric Biggers {
2287f59b277SEric Biggers 	struct bio_post_read_ctx *ctx = bio->bi_private;
22995ae251fSEric Biggers 
2307f59b277SEric Biggers 	if (ctx && (ctx->enabled_steps & STEP_VERITY)) {
2317f59b277SEric Biggers 		INIT_WORK(&ctx->work, f2fs_verify_bio);
2327f59b277SEric Biggers 		fsverity_enqueue_verify_work(&ctx->work);
2337f59b277SEric Biggers 	} else {
2347f59b277SEric Biggers 		f2fs_finish_read_bio(bio);
2354c8ff709SChao Yu 	}
2367f59b277SEric Biggers }
2374c8ff709SChao Yu 
2387f59b277SEric Biggers /*
2397f59b277SEric Biggers  * Handle STEP_DECOMPRESS by decompressing any compressed clusters whose last
2407f59b277SEric Biggers  * remaining page was read by @ctx->bio.
2417f59b277SEric Biggers  *
2427f59b277SEric Biggers  * Note that a bio may span clusters (even a mix of compressed and uncompressed
2437f59b277SEric Biggers  * clusters) or be for just part of a cluster.  STEP_DECOMPRESS just indicates
2447f59b277SEric Biggers  * that the bio includes at least one compressed page.  The actual decompression
2457f59b277SEric Biggers  * is done on a per-cluster basis, not a per-bio basis.
2467f59b277SEric Biggers  */
2477f59b277SEric Biggers static void f2fs_handle_step_decompress(struct bio_post_read_ctx *ctx)
2487f59b277SEric Biggers {
2497f59b277SEric Biggers 	struct bio_vec *bv;
2507f59b277SEric Biggers 	struct bvec_iter_all iter_all;
2517f59b277SEric Biggers 	bool all_compressed = true;
2527f59b277SEric Biggers 
2537f59b277SEric Biggers 	bio_for_each_segment_all(bv, ctx->bio, iter_all) {
2547f59b277SEric Biggers 		struct page *page = bv->bv_page;
2557f59b277SEric Biggers 
2567f59b277SEric Biggers 		/* PG_error was set if decryption failed. */
2577f59b277SEric Biggers 		if (f2fs_is_compressed_page(page))
2587f59b277SEric Biggers 			f2fs_end_read_compressed_page(page, PageError(page));
2597f59b277SEric Biggers 		else
2607f59b277SEric Biggers 			all_compressed = false;
2617f59b277SEric Biggers 	}
2627f59b277SEric Biggers 
2637f59b277SEric Biggers 	/*
2647f59b277SEric Biggers 	 * Optimization: if all the bio's pages are compressed, then scheduling
2657f59b277SEric Biggers 	 * the per-bio verity work is unnecessary, as verity will be fully
2667f59b277SEric Biggers 	 * handled at the compression cluster level.
2677f59b277SEric Biggers 	 */
2687f59b277SEric Biggers 	if (all_compressed)
2697f59b277SEric Biggers 		ctx->enabled_steps &= ~STEP_VERITY;
2704c8ff709SChao Yu }
2714c8ff709SChao Yu 
2724c8ff709SChao Yu static void f2fs_post_read_work(struct work_struct *work)
2734c8ff709SChao Yu {
2744c8ff709SChao Yu 	struct bio_post_read_ctx *ctx =
2754c8ff709SChao Yu 		container_of(work, struct bio_post_read_ctx, work);
2764c8ff709SChao Yu 
2777f59b277SEric Biggers 	if (ctx->enabled_steps & STEP_DECRYPT)
2787f59b277SEric Biggers 		fscrypt_decrypt_bio(ctx->bio);
2794c8ff709SChao Yu 
2807f59b277SEric Biggers 	if (ctx->enabled_steps & STEP_DECOMPRESS)
2817f59b277SEric Biggers 		f2fs_handle_step_decompress(ctx);
2824c8ff709SChao Yu 
2837f59b277SEric Biggers 	f2fs_verify_and_finish_bio(ctx->bio);
2846dbb1796SEric Biggers }
2856dbb1796SEric Biggers 
2866dbb1796SEric Biggers static void f2fs_read_end_io(struct bio *bio)
2876dbb1796SEric Biggers {
288c45d6002SChao Yu 	struct f2fs_sb_info *sbi = F2FS_P_SB(bio_first_page_all(bio));
2897f59b277SEric Biggers 	struct bio_post_read_ctx *ctx = bio->bi_private;
290c45d6002SChao Yu 
291c45d6002SChao Yu 	if (time_to_inject(sbi, FAULT_READ_IO)) {
292c45d6002SChao Yu 		f2fs_show_injection_info(sbi, FAULT_READ_IO);
2934e4cbee9SChristoph Hellwig 		bio->bi_status = BLK_STS_IOERR;
29455523519SChao Yu 	}
2958b038c70SChao Yu 
2967f59b277SEric Biggers 	if (bio->bi_status) {
2977f59b277SEric Biggers 		f2fs_finish_read_bio(bio);
29812377024SChao Yu 		return;
29912377024SChao Yu 	}
30012377024SChao Yu 
3017f59b277SEric Biggers 	if (ctx && (ctx->enabled_steps & (STEP_DECRYPT | STEP_DECOMPRESS))) {
3027f59b277SEric Biggers 		INIT_WORK(&ctx->work, f2fs_post_read_work);
3037f59b277SEric Biggers 		queue_work(ctx->sbi->post_read_wq, &ctx->work);
3047f59b277SEric Biggers 	} else {
3057f59b277SEric Biggers 		f2fs_verify_and_finish_bio(bio);
3067f59b277SEric Biggers 	}
30793dfe2acSJaegeuk Kim }
30893dfe2acSJaegeuk Kim 
3094246a0b6SChristoph Hellwig static void f2fs_write_end_io(struct bio *bio)
31093dfe2acSJaegeuk Kim {
3111b1f559fSJaegeuk Kim 	struct f2fs_sb_info *sbi = bio->bi_private;
312f568849eSLinus Torvalds 	struct bio_vec *bvec;
3136dc4f100SMing Lei 	struct bvec_iter_all iter_all;
31493dfe2acSJaegeuk Kim 
3156f5c2ed0SChao Yu 	if (time_to_inject(sbi, FAULT_WRITE_IO)) {
316c45d6002SChao Yu 		f2fs_show_injection_info(sbi, FAULT_WRITE_IO);
3176f5c2ed0SChao Yu 		bio->bi_status = BLK_STS_IOERR;
3186f5c2ed0SChao Yu 	}
3196f5c2ed0SChao Yu 
3202b070cfeSChristoph Hellwig 	bio_for_each_segment_all(bvec, bio, iter_all) {
32193dfe2acSJaegeuk Kim 		struct page *page = bvec->bv_page;
32236951b38SChao Yu 		enum count_type type = WB_DATA_TYPE(page);
32393dfe2acSJaegeuk Kim 
3240a595ebaSJaegeuk Kim 		if (IS_DUMMY_WRITTEN_PAGE(page)) {
3250a595ebaSJaegeuk Kim 			set_page_private(page, (unsigned long)NULL);
3260a595ebaSJaegeuk Kim 			ClearPagePrivate(page);
3270a595ebaSJaegeuk Kim 			unlock_page(page);
3280a595ebaSJaegeuk Kim 			mempool_free(page, sbi->write_io_dummy);
3290a595ebaSJaegeuk Kim 
3304e4cbee9SChristoph Hellwig 			if (unlikely(bio->bi_status))
3310a595ebaSJaegeuk Kim 				f2fs_stop_checkpoint(sbi, true);
3320a595ebaSJaegeuk Kim 			continue;
3330a595ebaSJaegeuk Kim 		}
3340a595ebaSJaegeuk Kim 
335d2d0727bSEric Biggers 		fscrypt_finalize_bounce_page(&page);
3364375a336SJaegeuk Kim 
3374c8ff709SChao Yu #ifdef CONFIG_F2FS_FS_COMPRESSION
3384c8ff709SChao Yu 		if (f2fs_is_compressed_page(page)) {
3394c8ff709SChao Yu 			f2fs_compress_write_end_io(bio, page);
3404c8ff709SChao Yu 			continue;
3414c8ff709SChao Yu 		}
3424c8ff709SChao Yu #endif
3434c8ff709SChao Yu 
3444e4cbee9SChristoph Hellwig 		if (unlikely(bio->bi_status)) {
3455114a97aSMichal Hocko 			mapping_set_error(page->mapping, -EIO);
346b1ca321dSJaegeuk Kim 			if (type == F2FS_WB_CP_DATA)
34738f91ca8SJaegeuk Kim 				f2fs_stop_checkpoint(sbi, true);
34893dfe2acSJaegeuk Kim 		}
3497dff55d2SYunlei He 
3507dff55d2SYunlei He 		f2fs_bug_on(sbi, page->mapping == NODE_MAPPING(sbi) &&
3517dff55d2SYunlei He 					page->index != nid_of_node(page));
3527dff55d2SYunlei He 
35336951b38SChao Yu 		dec_page_count(sbi, type);
35450fa53ecSChao Yu 		if (f2fs_in_warm_node_list(sbi, page))
35550fa53ecSChao Yu 			f2fs_del_fsync_node_entry(sbi, page);
35636951b38SChao Yu 		clear_cold_data(page);
35793dfe2acSJaegeuk Kim 		end_page_writeback(page);
358f568849eSLinus Torvalds 	}
35936951b38SChao Yu 	if (!get_pages(sbi, F2FS_WB_CP_DATA) &&
360f5730184SJaegeuk Kim 				wq_has_sleeper(&sbi->cp_wait))
36193dfe2acSJaegeuk Kim 		wake_up(&sbi->cp_wait);
36293dfe2acSJaegeuk Kim 
36393dfe2acSJaegeuk Kim 	bio_put(bio);
36493dfe2acSJaegeuk Kim }
36593dfe2acSJaegeuk Kim 
3663c62be17SJaegeuk Kim struct block_device *f2fs_target_device(struct f2fs_sb_info *sbi,
3673c62be17SJaegeuk Kim 				block_t blk_addr, struct bio *bio)
3683c62be17SJaegeuk Kim {
3693c62be17SJaegeuk Kim 	struct block_device *bdev = sbi->sb->s_bdev;
3703c62be17SJaegeuk Kim 	int i;
3713c62be17SJaegeuk Kim 
3720916878dSDamien Le Moal 	if (f2fs_is_multi_device(sbi)) {
3733c62be17SJaegeuk Kim 		for (i = 0; i < sbi->s_ndevs; i++) {
3743c62be17SJaegeuk Kim 			if (FDEV(i).start_blk <= blk_addr &&
3753c62be17SJaegeuk Kim 			    FDEV(i).end_blk >= blk_addr) {
3763c62be17SJaegeuk Kim 				blk_addr -= FDEV(i).start_blk;
3773c62be17SJaegeuk Kim 				bdev = FDEV(i).bdev;
3783c62be17SJaegeuk Kim 				break;
3793c62be17SJaegeuk Kim 			}
3803c62be17SJaegeuk Kim 		}
3810916878dSDamien Le Moal 	}
3823c62be17SJaegeuk Kim 	if (bio) {
38374d46992SChristoph Hellwig 		bio_set_dev(bio, bdev);
3843c62be17SJaegeuk Kim 		bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(blk_addr);
3853c62be17SJaegeuk Kim 	}
3863c62be17SJaegeuk Kim 	return bdev;
3873c62be17SJaegeuk Kim }
3883c62be17SJaegeuk Kim 
3893c62be17SJaegeuk Kim int f2fs_target_device_index(struct f2fs_sb_info *sbi, block_t blkaddr)
3903c62be17SJaegeuk Kim {
3913c62be17SJaegeuk Kim 	int i;
3923c62be17SJaegeuk Kim 
3930916878dSDamien Le Moal 	if (!f2fs_is_multi_device(sbi))
3940916878dSDamien Le Moal 		return 0;
3950916878dSDamien Le Moal 
3963c62be17SJaegeuk Kim 	for (i = 0; i < sbi->s_ndevs; i++)
3973c62be17SJaegeuk Kim 		if (FDEV(i).start_blk <= blkaddr && FDEV(i).end_blk >= blkaddr)
3983c62be17SJaegeuk Kim 			return i;
3993c62be17SJaegeuk Kim 	return 0;
4003c62be17SJaegeuk Kim }
4013c62be17SJaegeuk Kim 
4027a88ddb5SChao Yu /*
4037a88ddb5SChao Yu  * Return true, if pre_bio's bdev is same as its target device.
4047a88ddb5SChao Yu  */
4053c62be17SJaegeuk Kim static bool __same_bdev(struct f2fs_sb_info *sbi,
4063c62be17SJaegeuk Kim 				block_t blk_addr, struct bio *bio)
4073c62be17SJaegeuk Kim {
40874d46992SChristoph Hellwig 	struct block_device *b = f2fs_target_device(sbi, blk_addr, NULL);
40974d46992SChristoph Hellwig 	return bio->bi_disk == b->bd_disk && bio->bi_partno == b->bd_partno;
4103c62be17SJaegeuk Kim }
4113c62be17SJaegeuk Kim 
412b757f6edSChao Yu static struct bio *__bio_alloc(struct f2fs_io_info *fio, int npages)
413940a6d34SGu Zheng {
414b757f6edSChao Yu 	struct f2fs_sb_info *sbi = fio->sbi;
415940a6d34SGu Zheng 	struct bio *bio;
416940a6d34SGu Zheng 
417d62fe971SChao Yu 	bio = f2fs_bio_alloc(sbi, npages, true);
418940a6d34SGu Zheng 
419b757f6edSChao Yu 	f2fs_target_device(sbi, fio->new_blkaddr, bio);
420b757f6edSChao Yu 	if (is_read_io(fio->op)) {
4210cdd3195SHyunchul Lee 		bio->bi_end_io = f2fs_read_end_io;
4220cdd3195SHyunchul Lee 		bio->bi_private = NULL;
4230cdd3195SHyunchul Lee 	} else {
4240cdd3195SHyunchul Lee 		bio->bi_end_io = f2fs_write_end_io;
4250cdd3195SHyunchul Lee 		bio->bi_private = sbi;
426b757f6edSChao Yu 		bio->bi_write_hint = f2fs_io_type_to_rw_hint(sbi,
427b757f6edSChao Yu 						fio->type, fio->temp);
4280cdd3195SHyunchul Lee 	}
429b757f6edSChao Yu 	if (fio->io_wbc)
430b757f6edSChao Yu 		wbc_init_bio(fio->io_wbc, bio);
431940a6d34SGu Zheng 
432940a6d34SGu Zheng 	return bio;
433940a6d34SGu Zheng }
434940a6d34SGu Zheng 
43527aacd28SSatya Tangirala static void f2fs_set_bio_crypt_ctx(struct bio *bio, const struct inode *inode,
43627aacd28SSatya Tangirala 				  pgoff_t first_idx,
43727aacd28SSatya Tangirala 				  const struct f2fs_io_info *fio,
43827aacd28SSatya Tangirala 				  gfp_t gfp_mask)
43927aacd28SSatya Tangirala {
44027aacd28SSatya Tangirala 	/*
44127aacd28SSatya Tangirala 	 * The f2fs garbage collector sets ->encrypted_page when it wants to
44227aacd28SSatya Tangirala 	 * read/write raw data without encryption.
44327aacd28SSatya Tangirala 	 */
44427aacd28SSatya Tangirala 	if (!fio || !fio->encrypted_page)
44527aacd28SSatya Tangirala 		fscrypt_set_bio_crypt_ctx(bio, inode, first_idx, gfp_mask);
44627aacd28SSatya Tangirala }
44727aacd28SSatya Tangirala 
44827aacd28SSatya Tangirala static bool f2fs_crypt_mergeable_bio(struct bio *bio, const struct inode *inode,
44927aacd28SSatya Tangirala 				     pgoff_t next_idx,
45027aacd28SSatya Tangirala 				     const struct f2fs_io_info *fio)
45127aacd28SSatya Tangirala {
45227aacd28SSatya Tangirala 	/*
45327aacd28SSatya Tangirala 	 * The f2fs garbage collector sets ->encrypted_page when it wants to
45427aacd28SSatya Tangirala 	 * read/write raw data without encryption.
45527aacd28SSatya Tangirala 	 */
45627aacd28SSatya Tangirala 	if (fio && fio->encrypted_page)
45727aacd28SSatya Tangirala 		return !bio_has_crypt_ctx(bio);
45827aacd28SSatya Tangirala 
45927aacd28SSatya Tangirala 	return fscrypt_mergeable_bio(bio, inode, next_idx);
46027aacd28SSatya Tangirala }
46127aacd28SSatya Tangirala 
4624fc29c1aSLinus Torvalds static inline void __submit_bio(struct f2fs_sb_info *sbi,
46319a5f5e2SJaegeuk Kim 				struct bio *bio, enum page_type type)
464f5730184SJaegeuk Kim {
4654fc29c1aSLinus Torvalds 	if (!is_read_io(bio_op(bio))) {
4660a595ebaSJaegeuk Kim 		unsigned int start;
4670a595ebaSJaegeuk Kim 
4680a595ebaSJaegeuk Kim 		if (type != DATA && type != NODE)
4690a595ebaSJaegeuk Kim 			goto submit_io;
4700a595ebaSJaegeuk Kim 
471b0332a0fSChao Yu 		if (f2fs_lfs_mode(sbi) && current->plug)
4723bb09a0eSTiezhu Yang 			blk_finish_plug(current->plug);
4733bb09a0eSTiezhu Yang 
4748223ecc4SChao Yu 		if (F2FS_IO_ALIGNED(sbi))
4758223ecc4SChao Yu 			goto submit_io;
4768223ecc4SChao Yu 
4770a595ebaSJaegeuk Kim 		start = bio->bi_iter.bi_size >> F2FS_BLKSIZE_BITS;
4780a595ebaSJaegeuk Kim 		start %= F2FS_IO_SIZE(sbi);
4790a595ebaSJaegeuk Kim 
4800a595ebaSJaegeuk Kim 		if (start == 0)
4810a595ebaSJaegeuk Kim 			goto submit_io;
4820a595ebaSJaegeuk Kim 
4830a595ebaSJaegeuk Kim 		/* fill dummy pages */
4840a595ebaSJaegeuk Kim 		for (; start < F2FS_IO_SIZE(sbi); start++) {
4850a595ebaSJaegeuk Kim 			struct page *page =
4860a595ebaSJaegeuk Kim 				mempool_alloc(sbi->write_io_dummy,
487bc73a4b2SGao Xiang 					      GFP_NOIO | __GFP_NOFAIL);
4880a595ebaSJaegeuk Kim 			f2fs_bug_on(sbi, !page);
4890a595ebaSJaegeuk Kim 
490bc73a4b2SGao Xiang 			zero_user_segment(page, 0, PAGE_SIZE);
4910a595ebaSJaegeuk Kim 			SetPagePrivate(page);
492e90027d2SXiaojun Wang 			set_page_private(page, DUMMY_WRITTEN_PAGE);
4930a595ebaSJaegeuk Kim 			lock_page(page);
4940a595ebaSJaegeuk Kim 			if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE)
4950a595ebaSJaegeuk Kim 				f2fs_bug_on(sbi, 1);
49619a5f5e2SJaegeuk Kim 		}
4970a595ebaSJaegeuk Kim 		/*
4980a595ebaSJaegeuk Kim 		 * In the NODE case, we lose next block address chain. So, we
4990a595ebaSJaegeuk Kim 		 * need to do checkpoint in f2fs_sync_file.
5000a595ebaSJaegeuk Kim 		 */
5010a595ebaSJaegeuk Kim 		if (type == NODE)
5020a595ebaSJaegeuk Kim 			set_sbi_flag(sbi, SBI_NEED_CP);
5030a595ebaSJaegeuk Kim 	}
5040a595ebaSJaegeuk Kim submit_io:
505554b5125SJaegeuk Kim 	if (is_read_io(bio_op(bio)))
506554b5125SJaegeuk Kim 		trace_f2fs_submit_read_bio(sbi->sb, type, bio);
507554b5125SJaegeuk Kim 	else
508554b5125SJaegeuk Kim 		trace_f2fs_submit_write_bio(sbi->sb, type, bio);
5094e49ea4aSMike Christie 	submit_bio(bio);
510f5730184SJaegeuk Kim }
511f5730184SJaegeuk Kim 
5124c8ff709SChao Yu void f2fs_submit_bio(struct f2fs_sb_info *sbi,
5134c8ff709SChao Yu 				struct bio *bio, enum page_type type)
5144c8ff709SChao Yu {
5154c8ff709SChao Yu 	__submit_bio(sbi, bio, type);
5164c8ff709SChao Yu }
5174c8ff709SChao Yu 
51832b6aba8SJaegeuk Kim static void __attach_io_flag(struct f2fs_io_info *fio)
519da9953b7SJaegeuk Kim {
520da9953b7SJaegeuk Kim 	struct f2fs_sb_info *sbi = fio->sbi;
521da9953b7SJaegeuk Kim 	unsigned int temp_mask = (1 << NR_TEMP_TYPE) - 1;
52232b6aba8SJaegeuk Kim 	unsigned int io_flag, fua_flag, meta_flag;
52332b6aba8SJaegeuk Kim 
52432b6aba8SJaegeuk Kim 	if (fio->type == DATA)
52532b6aba8SJaegeuk Kim 		io_flag = sbi->data_io_flag;
52632b6aba8SJaegeuk Kim 	else if (fio->type == NODE)
52732b6aba8SJaegeuk Kim 		io_flag = sbi->node_io_flag;
52832b6aba8SJaegeuk Kim 	else
52932b6aba8SJaegeuk Kim 		return;
53032b6aba8SJaegeuk Kim 
53132b6aba8SJaegeuk Kim 	fua_flag = io_flag & temp_mask;
53232b6aba8SJaegeuk Kim 	meta_flag = (io_flag >> NR_TEMP_TYPE) & temp_mask;
53332b6aba8SJaegeuk Kim 
534da9953b7SJaegeuk Kim 	/*
53532b6aba8SJaegeuk Kim 	 * data/node io flag bits per temp:
536da9953b7SJaegeuk Kim 	 *      REQ_META     |      REQ_FUA      |
537da9953b7SJaegeuk Kim 	 *    5 |    4 |   3 |    2 |    1 |   0 |
538da9953b7SJaegeuk Kim 	 * Cold | Warm | Hot | Cold | Warm | Hot |
539da9953b7SJaegeuk Kim 	 */
540da9953b7SJaegeuk Kim 	if ((1 << fio->temp) & meta_flag)
541da9953b7SJaegeuk Kim 		fio->op_flags |= REQ_META;
542da9953b7SJaegeuk Kim 	if ((1 << fio->temp) & fua_flag)
543da9953b7SJaegeuk Kim 		fio->op_flags |= REQ_FUA;
544da9953b7SJaegeuk Kim }
545da9953b7SJaegeuk Kim 
546458e6197SJaegeuk Kim static void __submit_merged_bio(struct f2fs_bio_info *io)
54793dfe2acSJaegeuk Kim {
548458e6197SJaegeuk Kim 	struct f2fs_io_info *fio = &io->fio;
54993dfe2acSJaegeuk Kim 
55093dfe2acSJaegeuk Kim 	if (!io->bio)
55193dfe2acSJaegeuk Kim 		return;
55293dfe2acSJaegeuk Kim 
55332b6aba8SJaegeuk Kim 	__attach_io_flag(fio);
55404d328deSMike Christie 	bio_set_op_attrs(io->bio, fio->op, fio->op_flags);
55504d328deSMike Christie 
556554b5125SJaegeuk Kim 	if (is_read_io(fio->op))
557554b5125SJaegeuk Kim 		trace_f2fs_prepare_read_bio(io->sbi->sb, fio->type, io->bio);
558554b5125SJaegeuk Kim 	else
559554b5125SJaegeuk Kim 		trace_f2fs_prepare_write_bio(io->sbi->sb, fio->type, io->bio);
560554b5125SJaegeuk Kim 
5614fc29c1aSLinus Torvalds 	__submit_bio(io->sbi, io->bio, fio->type);
56293dfe2acSJaegeuk Kim 	io->bio = NULL;
56393dfe2acSJaegeuk Kim }
56493dfe2acSJaegeuk Kim 
5658648de2cSChao Yu static bool __has_merged_page(struct bio *bio, struct inode *inode,
566bab475c5SChao Yu 						struct page *page, nid_t ino)
5670fd785ebSChao Yu {
5680fd785ebSChao Yu 	struct bio_vec *bvec;
5696dc4f100SMing Lei 	struct bvec_iter_all iter_all;
5700fd785ebSChao Yu 
5718648de2cSChao Yu 	if (!bio)
5720fd785ebSChao Yu 		return false;
5730c3a5797SChao Yu 
574bab475c5SChao Yu 	if (!inode && !page && !ino)
5750c3a5797SChao Yu 		return true;
5760fd785ebSChao Yu 
5778648de2cSChao Yu 	bio_for_each_segment_all(bvec, bio, iter_all) {
5784c8ff709SChao Yu 		struct page *target = bvec->bv_page;
5790fd785ebSChao Yu 
5804c8ff709SChao Yu 		if (fscrypt_is_bounce_page(target)) {
581d2d0727bSEric Biggers 			target = fscrypt_pagecache_page(target);
5824c8ff709SChao Yu 			if (IS_ERR(target))
5834c8ff709SChao Yu 				continue;
5844c8ff709SChao Yu 		}
5854c8ff709SChao Yu 		if (f2fs_is_compressed_page(target)) {
5864c8ff709SChao Yu 			target = f2fs_compress_control_page(target);
5874c8ff709SChao Yu 			if (IS_ERR(target))
5884c8ff709SChao Yu 				continue;
5894c8ff709SChao Yu 		}
5900fd785ebSChao Yu 
5910c3a5797SChao Yu 		if (inode && inode == target->mapping->host)
5920c3a5797SChao Yu 			return true;
593bab475c5SChao Yu 		if (page && page == target)
594bab475c5SChao Yu 			return true;
5950c3a5797SChao Yu 		if (ino && ino == ino_of_node(target))
5960fd785ebSChao Yu 			return true;
5970fd785ebSChao Yu 	}
5980fd785ebSChao Yu 
5990fd785ebSChao Yu 	return false;
6000fd785ebSChao Yu }
6010fd785ebSChao Yu 
602b9109b0eSJaegeuk Kim static void __f2fs_submit_merged_write(struct f2fs_sb_info *sbi,
603a912b54dSJaegeuk Kim 				enum page_type type, enum temp_type temp)
60493dfe2acSJaegeuk Kim {
60593dfe2acSJaegeuk Kim 	enum page_type btype = PAGE_TYPE_OF_BIO(type);
606a912b54dSJaegeuk Kim 	struct f2fs_bio_info *io = sbi->write_io[btype] + temp;
60793dfe2acSJaegeuk Kim 
608df0f8dc0SChao Yu 	down_write(&io->io_rwsem);
609458e6197SJaegeuk Kim 
610458e6197SJaegeuk Kim 	/* change META to META_FLUSH in the checkpoint procedure */
611458e6197SJaegeuk Kim 	if (type >= META_FLUSH) {
612458e6197SJaegeuk Kim 		io->fio.type = META_FLUSH;
61304d328deSMike Christie 		io->fio.op = REQ_OP_WRITE;
6143adc5fcbSJan Kara 		io->fio.op_flags = REQ_META | REQ_PRIO | REQ_SYNC;
61570fd7614SChristoph Hellwig 		if (!test_opt(sbi, NOBARRIER))
6167f54f51fSJaegeuk Kim 			io->fio.op_flags |= REQ_PREFLUSH | REQ_FUA;
617458e6197SJaegeuk Kim 	}
618458e6197SJaegeuk Kim 	__submit_merged_bio(io);
619df0f8dc0SChao Yu 	up_write(&io->io_rwsem);
62093dfe2acSJaegeuk Kim }
62193dfe2acSJaegeuk Kim 
622a912b54dSJaegeuk Kim static void __submit_merged_write_cond(struct f2fs_sb_info *sbi,
623bab475c5SChao Yu 				struct inode *inode, struct page *page,
624bab475c5SChao Yu 				nid_t ino, enum page_type type, bool force)
6250c3a5797SChao Yu {
626a912b54dSJaegeuk Kim 	enum temp_type temp;
6271e771e83SYunlong Song 	bool ret = true;
628a912b54dSJaegeuk Kim 
629a912b54dSJaegeuk Kim 	for (temp = HOT; temp < NR_TEMP_TYPE; temp++) {
6301e771e83SYunlong Song 		if (!force)	{
6311e771e83SYunlong Song 			enum page_type btype = PAGE_TYPE_OF_BIO(type);
6321e771e83SYunlong Song 			struct f2fs_bio_info *io = sbi->write_io[btype] + temp;
633a912b54dSJaegeuk Kim 
6341e771e83SYunlong Song 			down_read(&io->io_rwsem);
6358648de2cSChao Yu 			ret = __has_merged_page(io->bio, inode, page, ino);
6361e771e83SYunlong Song 			up_read(&io->io_rwsem);
6371e771e83SYunlong Song 		}
6381e771e83SYunlong Song 		if (ret)
639a912b54dSJaegeuk Kim 			__f2fs_submit_merged_write(sbi, type, temp);
640a912b54dSJaegeuk Kim 
641a912b54dSJaegeuk Kim 		/* TODO: use HOT temp only for meta pages now. */
642a912b54dSJaegeuk Kim 		if (type >= META)
643a912b54dSJaegeuk Kim 			break;
644a912b54dSJaegeuk Kim 	}
6450c3a5797SChao Yu }
6460c3a5797SChao Yu 
647b9109b0eSJaegeuk Kim void f2fs_submit_merged_write(struct f2fs_sb_info *sbi, enum page_type type)
648406657ddSChao Yu {
649adcc00f7SHariprasad Kelam 	__submit_merged_write_cond(sbi, NULL, NULL, 0, type, true);
65093dfe2acSJaegeuk Kim }
65193dfe2acSJaegeuk Kim 
652b9109b0eSJaegeuk Kim void f2fs_submit_merged_write_cond(struct f2fs_sb_info *sbi,
653bab475c5SChao Yu 				struct inode *inode, struct page *page,
654bab475c5SChao Yu 				nid_t ino, enum page_type type)
65593dfe2acSJaegeuk Kim {
656bab475c5SChao Yu 	__submit_merged_write_cond(sbi, inode, page, ino, type, false);
65793dfe2acSJaegeuk Kim }
65893dfe2acSJaegeuk Kim 
659b9109b0eSJaegeuk Kim void f2fs_flush_merged_writes(struct f2fs_sb_info *sbi)
660406657ddSChao Yu {
661b9109b0eSJaegeuk Kim 	f2fs_submit_merged_write(sbi, DATA);
662b9109b0eSJaegeuk Kim 	f2fs_submit_merged_write(sbi, NODE);
663b9109b0eSJaegeuk Kim 	f2fs_submit_merged_write(sbi, META);
664406657ddSChao Yu }
665406657ddSChao Yu 
66693dfe2acSJaegeuk Kim /*
66793dfe2acSJaegeuk Kim  * Fill the locked page with data located in the block address.
668771a9a71STomohiro Kusumi  * A caller needs to unlock the page on failure.
66993dfe2acSJaegeuk Kim  */
67005ca3632SJaegeuk Kim int f2fs_submit_page_bio(struct f2fs_io_info *fio)
67193dfe2acSJaegeuk Kim {
67293dfe2acSJaegeuk Kim 	struct bio *bio;
6730b81d077SJaegeuk Kim 	struct page *page = fio->encrypted_page ?
6740b81d077SJaegeuk Kim 			fio->encrypted_page : fio->page;
67593dfe2acSJaegeuk Kim 
676c9b60788SChao Yu 	if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr,
67793770ab7SChao Yu 			fio->is_por ? META_POR : (__is_meta_io(fio) ?
67893770ab7SChao Yu 			META_GENERIC : DATA_GENERIC_ENHANCE)))
67910f966bbSChao Yu 		return -EFSCORRUPTED;
680c9b60788SChao Yu 
6812ace38e0SChao Yu 	trace_f2fs_submit_page_bio(page, fio);
68205ca3632SJaegeuk Kim 	f2fs_trace_ios(fio, 0);
68393dfe2acSJaegeuk Kim 
68493dfe2acSJaegeuk Kim 	/* Allocate a new bio */
685b757f6edSChao Yu 	bio = __bio_alloc(fio, 1);
68693dfe2acSJaegeuk Kim 
68727aacd28SSatya Tangirala 	f2fs_set_bio_crypt_ctx(bio, fio->page->mapping->host,
68827aacd28SSatya Tangirala 			       fio->page->index, fio, GFP_NOIO);
68927aacd28SSatya Tangirala 
69009cbfeafSKirill A. Shutemov 	if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
69193dfe2acSJaegeuk Kim 		bio_put(bio);
69293dfe2acSJaegeuk Kim 		return -EFAULT;
69393dfe2acSJaegeuk Kim 	}
69478efac53SChao Yu 
69578efac53SChao Yu 	if (fio->io_wbc && !is_read_io(fio->op))
69634e51a5eSTejun Heo 		wbc_account_cgroup_owner(fio->io_wbc, page, PAGE_SIZE);
69778efac53SChao Yu 
698b7b911d5SJaegeuk Kim 	__attach_io_flag(fio);
69904d328deSMike Christie 	bio_set_op_attrs(bio, fio->op, fio->op_flags);
70093dfe2acSJaegeuk Kim 
7015f9abab4SJaegeuk Kim 	inc_page_count(fio->sbi, is_read_io(fio->op) ?
7025f9abab4SJaegeuk Kim 			__read_io_type(page): WB_DATA_TYPE(fio->page));
7034c58ed07SChao Yu 
7044c58ed07SChao Yu 	__submit_bio(fio->sbi, bio, fio->type);
70593dfe2acSJaegeuk Kim 	return 0;
70693dfe2acSJaegeuk Kim }
70793dfe2acSJaegeuk Kim 
7088896cbdfSChao Yu static bool page_is_mergeable(struct f2fs_sb_info *sbi, struct bio *bio,
7098896cbdfSChao Yu 				block_t last_blkaddr, block_t cur_blkaddr)
7108896cbdfSChao Yu {
71110208567SJaegeuk Kim 	if (unlikely(sbi->max_io_bytes &&
71210208567SJaegeuk Kim 			bio->bi_iter.bi_size >= sbi->max_io_bytes))
71310208567SJaegeuk Kim 		return false;
7148896cbdfSChao Yu 	if (last_blkaddr + 1 != cur_blkaddr)
7158896cbdfSChao Yu 		return false;
7168896cbdfSChao Yu 	return __same_bdev(sbi, cur_blkaddr, bio);
7178896cbdfSChao Yu }
7188896cbdfSChao Yu 
7198896cbdfSChao Yu static bool io_type_is_mergeable(struct f2fs_bio_info *io,
7208896cbdfSChao Yu 						struct f2fs_io_info *fio)
7218896cbdfSChao Yu {
7228896cbdfSChao Yu 	if (io->fio.op != fio->op)
7238896cbdfSChao Yu 		return false;
7248896cbdfSChao Yu 	return io->fio.op_flags == fio->op_flags;
7258896cbdfSChao Yu }
7268896cbdfSChao Yu 
7278896cbdfSChao Yu static bool io_is_mergeable(struct f2fs_sb_info *sbi, struct bio *bio,
7288896cbdfSChao Yu 					struct f2fs_bio_info *io,
7298896cbdfSChao Yu 					struct f2fs_io_info *fio,
7308896cbdfSChao Yu 					block_t last_blkaddr,
7318896cbdfSChao Yu 					block_t cur_blkaddr)
7328896cbdfSChao Yu {
733c72db71eSChao Yu 	if (F2FS_IO_ALIGNED(sbi) && (fio->type == DATA || fio->type == NODE)) {
734c72db71eSChao Yu 		unsigned int filled_blocks =
735c72db71eSChao Yu 				F2FS_BYTES_TO_BLK(bio->bi_iter.bi_size);
736c72db71eSChao Yu 		unsigned int io_size = F2FS_IO_SIZE(sbi);
737c72db71eSChao Yu 		unsigned int left_vecs = bio->bi_max_vecs - bio->bi_vcnt;
738c72db71eSChao Yu 
739c72db71eSChao Yu 		/* IOs in bio is aligned and left space of vectors is not enough */
740c72db71eSChao Yu 		if (!(filled_blocks % io_size) && left_vecs < io_size)
741c72db71eSChao Yu 			return false;
742c72db71eSChao Yu 	}
7438896cbdfSChao Yu 	if (!page_is_mergeable(sbi, bio, last_blkaddr, cur_blkaddr))
7448896cbdfSChao Yu 		return false;
7458896cbdfSChao Yu 	return io_type_is_mergeable(io, fio);
7468896cbdfSChao Yu }
7478896cbdfSChao Yu 
7480b20fcecSChao Yu static void add_bio_entry(struct f2fs_sb_info *sbi, struct bio *bio,
7490b20fcecSChao Yu 				struct page *page, enum temp_type temp)
7500b20fcecSChao Yu {
7510b20fcecSChao Yu 	struct f2fs_bio_info *io = sbi->write_io[DATA] + temp;
7520b20fcecSChao Yu 	struct bio_entry *be;
7530b20fcecSChao Yu 
7540b20fcecSChao Yu 	be = f2fs_kmem_cache_alloc(bio_entry_slab, GFP_NOFS);
7550b20fcecSChao Yu 	be->bio = bio;
7560b20fcecSChao Yu 	bio_get(bio);
7570b20fcecSChao Yu 
7580b20fcecSChao Yu 	if (bio_add_page(bio, page, PAGE_SIZE, 0) != PAGE_SIZE)
7590b20fcecSChao Yu 		f2fs_bug_on(sbi, 1);
7600b20fcecSChao Yu 
7610b20fcecSChao Yu 	down_write(&io->bio_list_lock);
7620b20fcecSChao Yu 	list_add_tail(&be->list, &io->bio_list);
7630b20fcecSChao Yu 	up_write(&io->bio_list_lock);
7640b20fcecSChao Yu }
7650b20fcecSChao Yu 
7660b20fcecSChao Yu static void del_bio_entry(struct bio_entry *be)
7670b20fcecSChao Yu {
7680b20fcecSChao Yu 	list_del(&be->list);
7690b20fcecSChao Yu 	kmem_cache_free(bio_entry_slab, be);
7700b20fcecSChao Yu }
7710b20fcecSChao Yu 
77227aacd28SSatya Tangirala static int add_ipu_page(struct f2fs_io_info *fio, struct bio **bio,
7730b20fcecSChao Yu 							struct page *page)
7740b20fcecSChao Yu {
77527aacd28SSatya Tangirala 	struct f2fs_sb_info *sbi = fio->sbi;
7760b20fcecSChao Yu 	enum temp_type temp;
7770b20fcecSChao Yu 	bool found = false;
7780b20fcecSChao Yu 	int ret = -EAGAIN;
7790b20fcecSChao Yu 
7800b20fcecSChao Yu 	for (temp = HOT; temp < NR_TEMP_TYPE && !found; temp++) {
7810b20fcecSChao Yu 		struct f2fs_bio_info *io = sbi->write_io[DATA] + temp;
7820b20fcecSChao Yu 		struct list_head *head = &io->bio_list;
7830b20fcecSChao Yu 		struct bio_entry *be;
7840b20fcecSChao Yu 
7850b20fcecSChao Yu 		down_write(&io->bio_list_lock);
7860b20fcecSChao Yu 		list_for_each_entry(be, head, list) {
7870b20fcecSChao Yu 			if (be->bio != *bio)
7880b20fcecSChao Yu 				continue;
7890b20fcecSChao Yu 
7900b20fcecSChao Yu 			found = true;
7910b20fcecSChao Yu 
79227aacd28SSatya Tangirala 			f2fs_bug_on(sbi, !page_is_mergeable(sbi, *bio,
79327aacd28SSatya Tangirala 							    *fio->last_block,
79427aacd28SSatya Tangirala 							    fio->new_blkaddr));
79527aacd28SSatya Tangirala 			if (f2fs_crypt_mergeable_bio(*bio,
79627aacd28SSatya Tangirala 					fio->page->mapping->host,
79727aacd28SSatya Tangirala 					fio->page->index, fio) &&
79827aacd28SSatya Tangirala 			    bio_add_page(*bio, page, PAGE_SIZE, 0) ==
7994c8ff709SChao Yu 					PAGE_SIZE) {
8000b20fcecSChao Yu 				ret = 0;
8010b20fcecSChao Yu 				break;
8020b20fcecSChao Yu 			}
8030b20fcecSChao Yu 
80427aacd28SSatya Tangirala 			/* page can't be merged into bio; submit the bio */
8050b20fcecSChao Yu 			del_bio_entry(be);
8060b20fcecSChao Yu 			__submit_bio(sbi, *bio, DATA);
8070b20fcecSChao Yu 			break;
8080b20fcecSChao Yu 		}
8090b20fcecSChao Yu 		up_write(&io->bio_list_lock);
8100b20fcecSChao Yu 	}
8110b20fcecSChao Yu 
8120b20fcecSChao Yu 	if (ret) {
8130b20fcecSChao Yu 		bio_put(*bio);
8140b20fcecSChao Yu 		*bio = NULL;
8150b20fcecSChao Yu 	}
8160b20fcecSChao Yu 
8170b20fcecSChao Yu 	return ret;
8180b20fcecSChao Yu }
8190b20fcecSChao Yu 
8200b20fcecSChao Yu void f2fs_submit_merged_ipu_write(struct f2fs_sb_info *sbi,
8210b20fcecSChao Yu 					struct bio **bio, struct page *page)
8220b20fcecSChao Yu {
8230b20fcecSChao Yu 	enum temp_type temp;
8240b20fcecSChao Yu 	bool found = false;
8250b20fcecSChao Yu 	struct bio *target = bio ? *bio : NULL;
8260b20fcecSChao Yu 
8270b20fcecSChao Yu 	for (temp = HOT; temp < NR_TEMP_TYPE && !found; temp++) {
8280b20fcecSChao Yu 		struct f2fs_bio_info *io = sbi->write_io[DATA] + temp;
8290b20fcecSChao Yu 		struct list_head *head = &io->bio_list;
8300b20fcecSChao Yu 		struct bio_entry *be;
8310b20fcecSChao Yu 
8320b20fcecSChao Yu 		if (list_empty(head))
8330b20fcecSChao Yu 			continue;
8340b20fcecSChao Yu 
8350b20fcecSChao Yu 		down_read(&io->bio_list_lock);
8360b20fcecSChao Yu 		list_for_each_entry(be, head, list) {
8370b20fcecSChao Yu 			if (target)
8380b20fcecSChao Yu 				found = (target == be->bio);
8390b20fcecSChao Yu 			else
8400b20fcecSChao Yu 				found = __has_merged_page(be->bio, NULL,
8410b20fcecSChao Yu 								page, 0);
8420b20fcecSChao Yu 			if (found)
8430b20fcecSChao Yu 				break;
8440b20fcecSChao Yu 		}
8450b20fcecSChao Yu 		up_read(&io->bio_list_lock);
8460b20fcecSChao Yu 
8470b20fcecSChao Yu 		if (!found)
8480b20fcecSChao Yu 			continue;
8490b20fcecSChao Yu 
8500b20fcecSChao Yu 		found = false;
8510b20fcecSChao Yu 
8520b20fcecSChao Yu 		down_write(&io->bio_list_lock);
8530b20fcecSChao Yu 		list_for_each_entry(be, head, list) {
8540b20fcecSChao Yu 			if (target)
8550b20fcecSChao Yu 				found = (target == be->bio);
8560b20fcecSChao Yu 			else
8570b20fcecSChao Yu 				found = __has_merged_page(be->bio, NULL,
8580b20fcecSChao Yu 								page, 0);
8590b20fcecSChao Yu 			if (found) {
8600b20fcecSChao Yu 				target = be->bio;
8610b20fcecSChao Yu 				del_bio_entry(be);
8620b20fcecSChao Yu 				break;
8630b20fcecSChao Yu 			}
8640b20fcecSChao Yu 		}
8650b20fcecSChao Yu 		up_write(&io->bio_list_lock);
8660b20fcecSChao Yu 	}
8670b20fcecSChao Yu 
8680b20fcecSChao Yu 	if (found)
8690b20fcecSChao Yu 		__submit_bio(sbi, target, DATA);
8700b20fcecSChao Yu 	if (bio && *bio) {
8710b20fcecSChao Yu 		bio_put(*bio);
8720b20fcecSChao Yu 		*bio = NULL;
8730b20fcecSChao Yu 	}
8740b20fcecSChao Yu }
8750b20fcecSChao Yu 
8768648de2cSChao Yu int f2fs_merge_page_bio(struct f2fs_io_info *fio)
8778648de2cSChao Yu {
8788648de2cSChao Yu 	struct bio *bio = *fio->bio;
8798648de2cSChao Yu 	struct page *page = fio->encrypted_page ?
8808648de2cSChao Yu 			fio->encrypted_page : fio->page;
8818648de2cSChao Yu 
8828648de2cSChao Yu 	if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr,
8838648de2cSChao Yu 			__is_meta_io(fio) ? META_GENERIC : DATA_GENERIC))
88410f966bbSChao Yu 		return -EFSCORRUPTED;
8858648de2cSChao Yu 
8868648de2cSChao Yu 	trace_f2fs_submit_page_bio(page, fio);
8878648de2cSChao Yu 	f2fs_trace_ios(fio, 0);
8888648de2cSChao Yu 
8898896cbdfSChao Yu 	if (bio && !page_is_mergeable(fio->sbi, bio, *fio->last_block,
8900b20fcecSChao Yu 						fio->new_blkaddr))
8910b20fcecSChao Yu 		f2fs_submit_merged_ipu_write(fio->sbi, &bio, NULL);
8928648de2cSChao Yu alloc_new:
8938648de2cSChao Yu 	if (!bio) {
894b757f6edSChao Yu 		bio = __bio_alloc(fio, BIO_MAX_PAGES);
895b7b911d5SJaegeuk Kim 		__attach_io_flag(fio);
89627aacd28SSatya Tangirala 		f2fs_set_bio_crypt_ctx(bio, fio->page->mapping->host,
89727aacd28SSatya Tangirala 				       fio->page->index, fio, GFP_NOIO);
8988648de2cSChao Yu 		bio_set_op_attrs(bio, fio->op, fio->op_flags);
8998648de2cSChao Yu 
9000b20fcecSChao Yu 		add_bio_entry(fio->sbi, bio, page, fio->temp);
9010b20fcecSChao Yu 	} else {
90227aacd28SSatya Tangirala 		if (add_ipu_page(fio, &bio, page))
9038648de2cSChao Yu 			goto alloc_new;
9048648de2cSChao Yu 	}
9058648de2cSChao Yu 
9068648de2cSChao Yu 	if (fio->io_wbc)
9079637d517SLinus Torvalds 		wbc_account_cgroup_owner(fio->io_wbc, page, PAGE_SIZE);
9088648de2cSChao Yu 
9098648de2cSChao Yu 	inc_page_count(fio->sbi, WB_DATA_TYPE(page));
9108648de2cSChao Yu 
9118648de2cSChao Yu 	*fio->last_block = fio->new_blkaddr;
9128648de2cSChao Yu 	*fio->bio = bio;
9138648de2cSChao Yu 
9148648de2cSChao Yu 	return 0;
9158648de2cSChao Yu }
9168648de2cSChao Yu 
917fe16efe6SChao Yu void f2fs_submit_page_write(struct f2fs_io_info *fio)
91893dfe2acSJaegeuk Kim {
91905ca3632SJaegeuk Kim 	struct f2fs_sb_info *sbi = fio->sbi;
920458e6197SJaegeuk Kim 	enum page_type btype = PAGE_TYPE_OF_BIO(fio->type);
921a912b54dSJaegeuk Kim 	struct f2fs_bio_info *io = sbi->write_io[btype] + fio->temp;
9224375a336SJaegeuk Kim 	struct page *bio_page;
92393dfe2acSJaegeuk Kim 
924b9109b0eSJaegeuk Kim 	f2fs_bug_on(sbi, is_read_io(fio->op));
92593dfe2acSJaegeuk Kim 
926fb830fc5SChao Yu 	down_write(&io->io_rwsem);
927fb830fc5SChao Yu next:
928fb830fc5SChao Yu 	if (fio->in_list) {
929fb830fc5SChao Yu 		spin_lock(&io->io_lock);
930fb830fc5SChao Yu 		if (list_empty(&io->io_list)) {
931fb830fc5SChao Yu 			spin_unlock(&io->io_lock);
932fe16efe6SChao Yu 			goto out;
933fb830fc5SChao Yu 		}
934fb830fc5SChao Yu 		fio = list_first_entry(&io->io_list,
935fb830fc5SChao Yu 						struct f2fs_io_info, list);
936fb830fc5SChao Yu 		list_del(&fio->list);
937fb830fc5SChao Yu 		spin_unlock(&io->io_lock);
938fb830fc5SChao Yu 	}
93993dfe2acSJaegeuk Kim 
94093770ab7SChao Yu 	verify_fio_blkaddr(fio);
94193dfe2acSJaegeuk Kim 
9424c8ff709SChao Yu 	if (fio->encrypted_page)
9434c8ff709SChao Yu 		bio_page = fio->encrypted_page;
9444c8ff709SChao Yu 	else if (fio->compressed_page)
9454c8ff709SChao Yu 		bio_page = fio->compressed_page;
9464c8ff709SChao Yu 	else
9474c8ff709SChao Yu 		bio_page = fio->page;
94836951b38SChao Yu 
949ebf7c522SThomas Meyer 	/* set submitted = true as a return value */
950ebf7c522SThomas Meyer 	fio->submitted = true;
951d68f735bSJaegeuk Kim 
95236951b38SChao Yu 	inc_page_count(sbi, WB_DATA_TYPE(bio_page));
95336951b38SChao Yu 
95427aacd28SSatya Tangirala 	if (io->bio &&
95527aacd28SSatya Tangirala 	    (!io_is_mergeable(sbi, io->bio, io, fio, io->last_block_in_bio,
95627aacd28SSatya Tangirala 			      fio->new_blkaddr) ||
95727aacd28SSatya Tangirala 	     !f2fs_crypt_mergeable_bio(io->bio, fio->page->mapping->host,
95827aacd28SSatya Tangirala 				       bio_page->index, fio)))
959458e6197SJaegeuk Kim 		__submit_merged_bio(io);
96093dfe2acSJaegeuk Kim alloc_new:
96193dfe2acSJaegeuk Kim 	if (io->bio == NULL) {
9628223ecc4SChao Yu 		if (F2FS_IO_ALIGNED(sbi) &&
9638223ecc4SChao Yu 				(fio->type == DATA || fio->type == NODE) &&
9640a595ebaSJaegeuk Kim 				fio->new_blkaddr & F2FS_IO_SIZE_MASK(sbi)) {
9650a595ebaSJaegeuk Kim 			dec_page_count(sbi, WB_DATA_TYPE(bio_page));
966fe16efe6SChao Yu 			fio->retry = true;
967fe16efe6SChao Yu 			goto skip;
9680a595ebaSJaegeuk Kim 		}
969b757f6edSChao Yu 		io->bio = __bio_alloc(fio, BIO_MAX_PAGES);
97027aacd28SSatya Tangirala 		f2fs_set_bio_crypt_ctx(io->bio, fio->page->mapping->host,
97127aacd28SSatya Tangirala 				       bio_page->index, fio, GFP_NOIO);
972458e6197SJaegeuk Kim 		io->fio = *fio;
97393dfe2acSJaegeuk Kim 	}
97493dfe2acSJaegeuk Kim 
975a912b54dSJaegeuk Kim 	if (bio_add_page(io->bio, bio_page, PAGE_SIZE, 0) < PAGE_SIZE) {
976458e6197SJaegeuk Kim 		__submit_merged_bio(io);
97793dfe2acSJaegeuk Kim 		goto alloc_new;
97893dfe2acSJaegeuk Kim 	}
97993dfe2acSJaegeuk Kim 
980578c6478SYufen Yu 	if (fio->io_wbc)
98134e51a5eSTejun Heo 		wbc_account_cgroup_owner(fio->io_wbc, bio_page, PAGE_SIZE);
982578c6478SYufen Yu 
9837a9d7548SChao Yu 	io->last_block_in_bio = fio->new_blkaddr;
98405ca3632SJaegeuk Kim 	f2fs_trace_ios(fio, 0);
985fb830fc5SChao Yu 
986fb830fc5SChao Yu 	trace_f2fs_submit_page_write(fio->page, fio);
987fe16efe6SChao Yu skip:
988fb830fc5SChao Yu 	if (fio->in_list)
989fb830fc5SChao Yu 		goto next;
990fe16efe6SChao Yu out:
9914354994fSDaniel Rosenberg 	if (is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN) ||
99200e09c0bSChao Yu 				!f2fs_is_checkpoint_ready(sbi))
9935ce80586SJaegeuk Kim 		__submit_merged_bio(io);
994df0f8dc0SChao Yu 	up_write(&io->io_rwsem);
99593dfe2acSJaegeuk Kim }
99693dfe2acSJaegeuk Kim 
99713ba41e3SJaegeuk Kim static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr,
99895ae251fSEric Biggers 				      unsigned nr_pages, unsigned op_flag,
9997f59b277SEric Biggers 				      pgoff_t first_idx, bool for_write)
100013ba41e3SJaegeuk Kim {
100113ba41e3SJaegeuk Kim 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
100213ba41e3SJaegeuk Kim 	struct bio *bio;
10036dbb1796SEric Biggers 	struct bio_post_read_ctx *ctx;
10046dbb1796SEric Biggers 	unsigned int post_read_steps = 0;
100513ba41e3SJaegeuk Kim 
10060683728aSChao Yu 	bio = f2fs_bio_alloc(sbi, min_t(int, nr_pages, BIO_MAX_PAGES),
10070683728aSChao Yu 								for_write);
10086dbb1796SEric Biggers 	if (!bio)
10096dbb1796SEric Biggers 		return ERR_PTR(-ENOMEM);
101027aacd28SSatya Tangirala 
101127aacd28SSatya Tangirala 	f2fs_set_bio_crypt_ctx(bio, inode, first_idx, NULL, GFP_NOFS);
101227aacd28SSatya Tangirala 
10136dbb1796SEric Biggers 	f2fs_target_device(sbi, blkaddr, bio);
10146dbb1796SEric Biggers 	bio->bi_end_io = f2fs_read_end_io;
1015e2e59414SJaegeuk Kim 	bio_set_op_attrs(bio, REQ_OP_READ, op_flag);
10166dbb1796SEric Biggers 
101727aacd28SSatya Tangirala 	if (fscrypt_inode_uses_fs_layer_crypto(inode))
10187f59b277SEric Biggers 		post_read_steps |= STEP_DECRYPT;
101995ae251fSEric Biggers 
10207f59b277SEric Biggers 	if (f2fs_need_verity(inode, first_idx))
10217f59b277SEric Biggers 		post_read_steps |= STEP_VERITY;
10227f59b277SEric Biggers 
10237f59b277SEric Biggers 	/*
10247f59b277SEric Biggers 	 * STEP_DECOMPRESS is handled specially, since a compressed file might
10257f59b277SEric Biggers 	 * contain both compressed and uncompressed clusters.  We'll allocate a
10267f59b277SEric Biggers 	 * bio_post_read_ctx if the file is compressed, but the caller is
10277f59b277SEric Biggers 	 * responsible for enabling STEP_DECOMPRESS if it's actually needed.
10287f59b277SEric Biggers 	 */
10297f59b277SEric Biggers 
10307f59b277SEric Biggers 	if (post_read_steps || f2fs_compressed_file(inode)) {
1031e8ce5749SEric Biggers 		/* Due to the mempool, this never fails. */
10326dbb1796SEric Biggers 		ctx = mempool_alloc(bio_post_read_ctx_pool, GFP_NOFS);
10336dbb1796SEric Biggers 		ctx->bio = bio;
10344c8ff709SChao Yu 		ctx->sbi = sbi;
10356dbb1796SEric Biggers 		ctx->enabled_steps = post_read_steps;
10366dbb1796SEric Biggers 		bio->bi_private = ctx;
103713ba41e3SJaegeuk Kim 	}
103813ba41e3SJaegeuk Kim 
103913ba41e3SJaegeuk Kim 	return bio;
104013ba41e3SJaegeuk Kim }
104113ba41e3SJaegeuk Kim 
104213ba41e3SJaegeuk Kim /* This can handle encryption stuffs */
104313ba41e3SJaegeuk Kim static int f2fs_submit_page_read(struct inode *inode, struct page *page,
1044b7973091SJia Yang 				 block_t blkaddr, int op_flags, bool for_write)
104513ba41e3SJaegeuk Kim {
104693770ab7SChao Yu 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
104793770ab7SChao Yu 	struct bio *bio;
104813ba41e3SJaegeuk Kim 
1049b7973091SJia Yang 	bio = f2fs_grab_read_bio(inode, blkaddr, 1, op_flags,
10507f59b277SEric Biggers 					page->index, for_write);
105113ba41e3SJaegeuk Kim 	if (IS_ERR(bio))
105213ba41e3SJaegeuk Kim 		return PTR_ERR(bio);
105313ba41e3SJaegeuk Kim 
10540ded69f6SJaegeuk Kim 	/* wait for GCed page writeback via META_MAPPING */
10550ded69f6SJaegeuk Kim 	f2fs_wait_on_block_writeback(inode, blkaddr);
10560ded69f6SJaegeuk Kim 
105713ba41e3SJaegeuk Kim 	if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
105813ba41e3SJaegeuk Kim 		bio_put(bio);
105913ba41e3SJaegeuk Kim 		return -EFAULT;
106013ba41e3SJaegeuk Kim 	}
1061fb7d70dbSJaegeuk Kim 	ClearPageError(page);
106293770ab7SChao Yu 	inc_page_count(sbi, F2FS_RD_DATA);
10638b83ac81SChao Yu 	f2fs_update_iostat(sbi, FS_DATA_READ_IO, F2FS_BLKSIZE);
106493770ab7SChao Yu 	__submit_bio(sbi, bio, DATA);
106513ba41e3SJaegeuk Kim 	return 0;
106613ba41e3SJaegeuk Kim }
106713ba41e3SJaegeuk Kim 
106846008c6dSChao Yu static void __set_data_blkaddr(struct dnode_of_data *dn)
106946008c6dSChao Yu {
107046008c6dSChao Yu 	struct f2fs_node *rn = F2FS_NODE(dn->node_page);
107146008c6dSChao Yu 	__le32 *addr_array;
10727a2af766SChao Yu 	int base = 0;
10737a2af766SChao Yu 
10747a2af766SChao Yu 	if (IS_INODE(dn->node_page) && f2fs_has_extra_attr(dn->inode))
10757a2af766SChao Yu 		base = get_extra_isize(dn->inode);
107646008c6dSChao Yu 
107746008c6dSChao Yu 	/* Get physical address of data block */
107846008c6dSChao Yu 	addr_array = blkaddr_in_node(rn);
10797a2af766SChao Yu 	addr_array[base + dn->ofs_in_node] = cpu_to_le32(dn->data_blkaddr);
108046008c6dSChao Yu }
108146008c6dSChao Yu 
108293dfe2acSJaegeuk Kim /*
1083eb47b800SJaegeuk Kim  * Lock ordering for the change of data block address:
1084eb47b800SJaegeuk Kim  * ->data_page
1085eb47b800SJaegeuk Kim  *  ->node_page
1086eb47b800SJaegeuk Kim  *    update block addresses in the node page
1087eb47b800SJaegeuk Kim  */
10884d57b86dSChao Yu void f2fs_set_data_blkaddr(struct dnode_of_data *dn)
1089eb47b800SJaegeuk Kim {
1090bae0ee7aSChao Yu 	f2fs_wait_on_page_writeback(dn->node_page, NODE, true, true);
109146008c6dSChao Yu 	__set_data_blkaddr(dn);
109246008c6dSChao Yu 	if (set_page_dirty(dn->node_page))
109393bae099SJaegeuk Kim 		dn->node_changed = true;
1094eb47b800SJaegeuk Kim }
1095eb47b800SJaegeuk Kim 
1096f28b3434SChao Yu void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr)
1097f28b3434SChao Yu {
1098f28b3434SChao Yu 	dn->data_blkaddr = blkaddr;
10994d57b86dSChao Yu 	f2fs_set_data_blkaddr(dn);
1100f28b3434SChao Yu 	f2fs_update_extent_cache(dn);
1101f28b3434SChao Yu }
1102f28b3434SChao Yu 
110346008c6dSChao Yu /* dn->ofs_in_node will be returned with up-to-date last block pointer */
11044d57b86dSChao Yu int f2fs_reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count)
1105eb47b800SJaegeuk Kim {
11064081363fSJaegeuk Kim 	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
11070abd675eSChao Yu 	int err;
1108eb47b800SJaegeuk Kim 
110946008c6dSChao Yu 	if (!count)
111046008c6dSChao Yu 		return 0;
111146008c6dSChao Yu 
111291942321SJaegeuk Kim 	if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
1113eb47b800SJaegeuk Kim 		return -EPERM;
11140abd675eSChao Yu 	if (unlikely((err = inc_valid_block_count(sbi, dn->inode, &count))))
11150abd675eSChao Yu 		return err;
1116eb47b800SJaegeuk Kim 
111746008c6dSChao Yu 	trace_f2fs_reserve_new_blocks(dn->inode, dn->nid,
111846008c6dSChao Yu 						dn->ofs_in_node, count);
1119c01e2853SNamjae Jeon 
1120bae0ee7aSChao Yu 	f2fs_wait_on_page_writeback(dn->node_page, NODE, true, true);
112146008c6dSChao Yu 
112246008c6dSChao Yu 	for (; count > 0; dn->ofs_in_node++) {
1123a2ced1ceSChao Yu 		block_t blkaddr = f2fs_data_blkaddr(dn);
112446008c6dSChao Yu 		if (blkaddr == NULL_ADDR) {
1125eb47b800SJaegeuk Kim 			dn->data_blkaddr = NEW_ADDR;
112646008c6dSChao Yu 			__set_data_blkaddr(dn);
112746008c6dSChao Yu 			count--;
112846008c6dSChao Yu 		}
112946008c6dSChao Yu 	}
113046008c6dSChao Yu 
113146008c6dSChao Yu 	if (set_page_dirty(dn->node_page))
113246008c6dSChao Yu 		dn->node_changed = true;
1133eb47b800SJaegeuk Kim 	return 0;
1134eb47b800SJaegeuk Kim }
1135eb47b800SJaegeuk Kim 
113646008c6dSChao Yu /* Should keep dn->ofs_in_node unchanged */
11374d57b86dSChao Yu int f2fs_reserve_new_block(struct dnode_of_data *dn)
113846008c6dSChao Yu {
113946008c6dSChao Yu 	unsigned int ofs_in_node = dn->ofs_in_node;
114046008c6dSChao Yu 	int ret;
114146008c6dSChao Yu 
11424d57b86dSChao Yu 	ret = f2fs_reserve_new_blocks(dn, 1);
114346008c6dSChao Yu 	dn->ofs_in_node = ofs_in_node;
114446008c6dSChao Yu 	return ret;
114546008c6dSChao Yu }
114646008c6dSChao Yu 
1147b600965cSHuajun Li int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index)
1148b600965cSHuajun Li {
1149b600965cSHuajun Li 	bool need_put = dn->inode_page ? false : true;
1150b600965cSHuajun Li 	int err;
1151b600965cSHuajun Li 
11524d57b86dSChao Yu 	err = f2fs_get_dnode_of_data(dn, index, ALLOC_NODE);
1153b600965cSHuajun Li 	if (err)
1154b600965cSHuajun Li 		return err;
1155a8865372SJaegeuk Kim 
1156b600965cSHuajun Li 	if (dn->data_blkaddr == NULL_ADDR)
11574d57b86dSChao Yu 		err = f2fs_reserve_new_block(dn);
1158a8865372SJaegeuk Kim 	if (err || need_put)
1159b600965cSHuajun Li 		f2fs_put_dnode(dn);
1160b600965cSHuajun Li 	return err;
1161b600965cSHuajun Li }
1162b600965cSHuajun Li 
1163759af1c9SFan Li int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index)
1164eb47b800SJaegeuk Kim {
1165e15882b6SHou Pengyang 	struct extent_info ei = {0, 0, 0};
1166759af1c9SFan Li 	struct inode *inode = dn->inode;
1167028a41e8SChao Yu 
1168759af1c9SFan Li 	if (f2fs_lookup_extent_cache(inode, index, &ei)) {
1169759af1c9SFan Li 		dn->data_blkaddr = ei.blk + index - ei.fofs;
1170759af1c9SFan Li 		return 0;
1171028a41e8SChao Yu 	}
1172028a41e8SChao Yu 
1173759af1c9SFan Li 	return f2fs_reserve_block(dn, index);
1174eb47b800SJaegeuk Kim }
1175eb47b800SJaegeuk Kim 
11764d57b86dSChao Yu struct page *f2fs_get_read_data_page(struct inode *inode, pgoff_t index,
117704d328deSMike Christie 						int op_flags, bool for_write)
1178eb47b800SJaegeuk Kim {
1179eb47b800SJaegeuk Kim 	struct address_space *mapping = inode->i_mapping;
1180eb47b800SJaegeuk Kim 	struct dnode_of_data dn;
1181eb47b800SJaegeuk Kim 	struct page *page;
1182e15882b6SHou Pengyang 	struct extent_info ei = {0,0,0};
1183eb47b800SJaegeuk Kim 	int err;
11844375a336SJaegeuk Kim 
1185a56c7c6fSJaegeuk Kim 	page = f2fs_grab_cache_page(mapping, index, for_write);
1186eb47b800SJaegeuk Kim 	if (!page)
1187eb47b800SJaegeuk Kim 		return ERR_PTR(-ENOMEM);
1188eb47b800SJaegeuk Kim 
1189cb3bc9eeSChao Yu 	if (f2fs_lookup_extent_cache(inode, index, &ei)) {
1190cb3bc9eeSChao Yu 		dn.data_blkaddr = ei.blk + index - ei.fofs;
119193770ab7SChao Yu 		if (!f2fs_is_valid_blkaddr(F2FS_I_SB(inode), dn.data_blkaddr,
119293770ab7SChao Yu 						DATA_GENERIC_ENHANCE_READ)) {
119310f966bbSChao Yu 			err = -EFSCORRUPTED;
119493770ab7SChao Yu 			goto put_err;
119593770ab7SChao Yu 		}
1196cb3bc9eeSChao Yu 		goto got_it;
1197cb3bc9eeSChao Yu 	}
1198cb3bc9eeSChao Yu 
1199650495deSJaegeuk Kim 	set_new_dnode(&dn, inode, NULL, NULL, 0);
12004d57b86dSChao Yu 	err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
120186531d6bSJaegeuk Kim 	if (err)
120286531d6bSJaegeuk Kim 		goto put_err;
1203650495deSJaegeuk Kim 	f2fs_put_dnode(&dn);
1204650495deSJaegeuk Kim 
12056bacf52fSJaegeuk Kim 	if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
120686531d6bSJaegeuk Kim 		err = -ENOENT;
120786531d6bSJaegeuk Kim 		goto put_err;
1208650495deSJaegeuk Kim 	}
120993770ab7SChao Yu 	if (dn.data_blkaddr != NEW_ADDR &&
121093770ab7SChao Yu 			!f2fs_is_valid_blkaddr(F2FS_I_SB(inode),
121193770ab7SChao Yu 						dn.data_blkaddr,
121293770ab7SChao Yu 						DATA_GENERIC_ENHANCE)) {
121310f966bbSChao Yu 		err = -EFSCORRUPTED;
121493770ab7SChao Yu 		goto put_err;
121593770ab7SChao Yu 	}
1216cb3bc9eeSChao Yu got_it:
121743f3eae1SJaegeuk Kim 	if (PageUptodate(page)) {
121843f3eae1SJaegeuk Kim 		unlock_page(page);
1219eb47b800SJaegeuk Kim 		return page;
122043f3eae1SJaegeuk Kim 	}
1221eb47b800SJaegeuk Kim 
1222d59ff4dfSJaegeuk Kim 	/*
1223d59ff4dfSJaegeuk Kim 	 * A new dentry page is allocated but not able to be written, since its
1224d59ff4dfSJaegeuk Kim 	 * new inode page couldn't be allocated due to -ENOSPC.
1225d59ff4dfSJaegeuk Kim 	 * In such the case, its blkaddr can be remained as NEW_ADDR.
12264d57b86dSChao Yu 	 * see, f2fs_add_link -> f2fs_get_new_data_page ->
12274d57b86dSChao Yu 	 * f2fs_init_inode_metadata.
1228d59ff4dfSJaegeuk Kim 	 */
1229d59ff4dfSJaegeuk Kim 	if (dn.data_blkaddr == NEW_ADDR) {
123009cbfeafSKirill A. Shutemov 		zero_user_segment(page, 0, PAGE_SIZE);
1231237c0790SJaegeuk Kim 		if (!PageUptodate(page))
1232d59ff4dfSJaegeuk Kim 			SetPageUptodate(page);
123343f3eae1SJaegeuk Kim 		unlock_page(page);
1234d59ff4dfSJaegeuk Kim 		return page;
1235d59ff4dfSJaegeuk Kim 	}
1236eb47b800SJaegeuk Kim 
1237b7973091SJia Yang 	err = f2fs_submit_page_read(inode, page, dn.data_blkaddr,
1238b7973091SJia Yang 						op_flags, for_write);
1239393ff91fSJaegeuk Kim 	if (err)
124086531d6bSJaegeuk Kim 		goto put_err;
124143f3eae1SJaegeuk Kim 	return page;
124286531d6bSJaegeuk Kim 
124386531d6bSJaegeuk Kim put_err:
124486531d6bSJaegeuk Kim 	f2fs_put_page(page, 1);
124586531d6bSJaegeuk Kim 	return ERR_PTR(err);
124643f3eae1SJaegeuk Kim }
1247393ff91fSJaegeuk Kim 
12484d57b86dSChao Yu struct page *f2fs_find_data_page(struct inode *inode, pgoff_t index)
124943f3eae1SJaegeuk Kim {
125043f3eae1SJaegeuk Kim 	struct address_space *mapping = inode->i_mapping;
125143f3eae1SJaegeuk Kim 	struct page *page;
125243f3eae1SJaegeuk Kim 
125343f3eae1SJaegeuk Kim 	page = find_get_page(mapping, index);
125443f3eae1SJaegeuk Kim 	if (page && PageUptodate(page))
125543f3eae1SJaegeuk Kim 		return page;
125643f3eae1SJaegeuk Kim 	f2fs_put_page(page, 0);
125743f3eae1SJaegeuk Kim 
12584d57b86dSChao Yu 	page = f2fs_get_read_data_page(inode, index, 0, false);
125943f3eae1SJaegeuk Kim 	if (IS_ERR(page))
126043f3eae1SJaegeuk Kim 		return page;
126143f3eae1SJaegeuk Kim 
126243f3eae1SJaegeuk Kim 	if (PageUptodate(page))
126343f3eae1SJaegeuk Kim 		return page;
126443f3eae1SJaegeuk Kim 
126543f3eae1SJaegeuk Kim 	wait_on_page_locked(page);
126643f3eae1SJaegeuk Kim 	if (unlikely(!PageUptodate(page))) {
126743f3eae1SJaegeuk Kim 		f2fs_put_page(page, 0);
126843f3eae1SJaegeuk Kim 		return ERR_PTR(-EIO);
126943f3eae1SJaegeuk Kim 	}
127043f3eae1SJaegeuk Kim 	return page;
127143f3eae1SJaegeuk Kim }
127243f3eae1SJaegeuk Kim 
127343f3eae1SJaegeuk Kim /*
127443f3eae1SJaegeuk Kim  * If it tries to access a hole, return an error.
127543f3eae1SJaegeuk Kim  * Because, the callers, functions in dir.c and GC, should be able to know
127643f3eae1SJaegeuk Kim  * whether this page exists or not.
127743f3eae1SJaegeuk Kim  */
12784d57b86dSChao Yu struct page *f2fs_get_lock_data_page(struct inode *inode, pgoff_t index,
1279a56c7c6fSJaegeuk Kim 							bool for_write)
128043f3eae1SJaegeuk Kim {
128143f3eae1SJaegeuk Kim 	struct address_space *mapping = inode->i_mapping;
128243f3eae1SJaegeuk Kim 	struct page *page;
128343f3eae1SJaegeuk Kim repeat:
12844d57b86dSChao Yu 	page = f2fs_get_read_data_page(inode, index, 0, for_write);
128543f3eae1SJaegeuk Kim 	if (IS_ERR(page))
128643f3eae1SJaegeuk Kim 		return page;
128743f3eae1SJaegeuk Kim 
128843f3eae1SJaegeuk Kim 	/* wait for read completion */
1289393ff91fSJaegeuk Kim 	lock_page(page);
12906bacf52fSJaegeuk Kim 	if (unlikely(page->mapping != mapping)) {
1291afcb7ca0SJaegeuk Kim 		f2fs_put_page(page, 1);
1292afcb7ca0SJaegeuk Kim 		goto repeat;
1293eb47b800SJaegeuk Kim 	}
12941563ac75SChao Yu 	if (unlikely(!PageUptodate(page))) {
12951563ac75SChao Yu 		f2fs_put_page(page, 1);
12961563ac75SChao Yu 		return ERR_PTR(-EIO);
12971563ac75SChao Yu 	}
1298eb47b800SJaegeuk Kim 	return page;
1299eb47b800SJaegeuk Kim }
1300eb47b800SJaegeuk Kim 
13010a8165d7SJaegeuk Kim /*
1302eb47b800SJaegeuk Kim  * Caller ensures that this data page is never allocated.
1303eb47b800SJaegeuk Kim  * A new zero-filled data page is allocated in the page cache.
130439936837SJaegeuk Kim  *
13054f4124d0SChao Yu  * Also, caller should grab and release a rwsem by calling f2fs_lock_op() and
13064f4124d0SChao Yu  * f2fs_unlock_op().
1307470f00e9SChao Yu  * Note that, ipage is set only by make_empty_dir, and if any error occur,
1308470f00e9SChao Yu  * ipage should be released by this function.
1309eb47b800SJaegeuk Kim  */
13104d57b86dSChao Yu struct page *f2fs_get_new_data_page(struct inode *inode,
1311a8865372SJaegeuk Kim 		struct page *ipage, pgoff_t index, bool new_i_size)
1312eb47b800SJaegeuk Kim {
1313eb47b800SJaegeuk Kim 	struct address_space *mapping = inode->i_mapping;
1314eb47b800SJaegeuk Kim 	struct page *page;
1315eb47b800SJaegeuk Kim 	struct dnode_of_data dn;
1316eb47b800SJaegeuk Kim 	int err;
13177612118aSJaegeuk Kim 
1318a56c7c6fSJaegeuk Kim 	page = f2fs_grab_cache_page(mapping, index, true);
1319470f00e9SChao Yu 	if (!page) {
1320470f00e9SChao Yu 		/*
1321470f00e9SChao Yu 		 * before exiting, we should make sure ipage will be released
1322470f00e9SChao Yu 		 * if any error occur.
1323470f00e9SChao Yu 		 */
1324470f00e9SChao Yu 		f2fs_put_page(ipage, 1);
132501f28610SJaegeuk Kim 		return ERR_PTR(-ENOMEM);
1326470f00e9SChao Yu 	}
1327eb47b800SJaegeuk Kim 
1328a8865372SJaegeuk Kim 	set_new_dnode(&dn, inode, ipage, NULL, 0);
1329b600965cSHuajun Li 	err = f2fs_reserve_block(&dn, index);
133001f28610SJaegeuk Kim 	if (err) {
133101f28610SJaegeuk Kim 		f2fs_put_page(page, 1);
1332eb47b800SJaegeuk Kim 		return ERR_PTR(err);
1333a8865372SJaegeuk Kim 	}
133401f28610SJaegeuk Kim 	if (!ipage)
133501f28610SJaegeuk Kim 		f2fs_put_dnode(&dn);
1336eb47b800SJaegeuk Kim 
1337eb47b800SJaegeuk Kim 	if (PageUptodate(page))
133801f28610SJaegeuk Kim 		goto got_it;
1339eb47b800SJaegeuk Kim 
1340eb47b800SJaegeuk Kim 	if (dn.data_blkaddr == NEW_ADDR) {
134109cbfeafSKirill A. Shutemov 		zero_user_segment(page, 0, PAGE_SIZE);
1342237c0790SJaegeuk Kim 		if (!PageUptodate(page))
1343393ff91fSJaegeuk Kim 			SetPageUptodate(page);
1344eb47b800SJaegeuk Kim 	} else {
13454375a336SJaegeuk Kim 		f2fs_put_page(page, 1);
1346a8865372SJaegeuk Kim 
13477612118aSJaegeuk Kim 		/* if ipage exists, blkaddr should be NEW_ADDR */
13487612118aSJaegeuk Kim 		f2fs_bug_on(F2FS_I_SB(inode), ipage);
13494d57b86dSChao Yu 		page = f2fs_get_lock_data_page(inode, index, true);
13504375a336SJaegeuk Kim 		if (IS_ERR(page))
13517612118aSJaegeuk Kim 			return page;
1352eb47b800SJaegeuk Kim 	}
135301f28610SJaegeuk Kim got_it:
13549edcdabfSChao Yu 	if (new_i_size && i_size_read(inode) <
1355ee6d182fSJaegeuk Kim 				((loff_t)(index + 1) << PAGE_SHIFT))
1356fc9581c8SJaegeuk Kim 		f2fs_i_size_write(inode, ((loff_t)(index + 1) << PAGE_SHIFT));
1357eb47b800SJaegeuk Kim 	return page;
1358eb47b800SJaegeuk Kim }
1359eb47b800SJaegeuk Kim 
1360d5097be5SHyunchul Lee static int __allocate_data_block(struct dnode_of_data *dn, int seg_type)
1361bfad7c2dSJaegeuk Kim {
13624081363fSJaegeuk Kim 	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
1363bfad7c2dSJaegeuk Kim 	struct f2fs_summary sum;
1364bfad7c2dSJaegeuk Kim 	struct node_info ni;
13656aa58d8aSChao Yu 	block_t old_blkaddr;
136646008c6dSChao Yu 	blkcnt_t count = 1;
13670abd675eSChao Yu 	int err;
1368bfad7c2dSJaegeuk Kim 
136991942321SJaegeuk Kim 	if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
1370bfad7c2dSJaegeuk Kim 		return -EPERM;
1371df6136efSChao Yu 
13727735730dSChao Yu 	err = f2fs_get_node_info(sbi, dn->nid, &ni);
13737735730dSChao Yu 	if (err)
13747735730dSChao Yu 		return err;
13757735730dSChao Yu 
1376a2ced1ceSChao Yu 	dn->data_blkaddr = f2fs_data_blkaddr(dn);
1377f847c699SChao Yu 	if (dn->data_blkaddr != NULL_ADDR)
1378df6136efSChao Yu 		goto alloc;
1379df6136efSChao Yu 
13800abd675eSChao Yu 	if (unlikely((err = inc_valid_block_count(sbi, dn->inode, &count))))
13810abd675eSChao Yu 		return err;
1382bfad7c2dSJaegeuk Kim 
1383df6136efSChao Yu alloc:
1384bfad7c2dSJaegeuk Kim 	set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
13856aa58d8aSChao Yu 	old_blkaddr = dn->data_blkaddr;
13866aa58d8aSChao Yu 	f2fs_allocate_data_block(sbi, NULL, old_blkaddr, &dn->data_blkaddr,
1387093749e2SChao Yu 				&sum, seg_type, NULL);
13886aa58d8aSChao Yu 	if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO)
13896aa58d8aSChao Yu 		invalidate_mapping_pages(META_MAPPING(sbi),
13906aa58d8aSChao Yu 					old_blkaddr, old_blkaddr);
139186f35dc3SChao Yu 	f2fs_update_data_blkaddr(dn, dn->data_blkaddr);
1392bfad7c2dSJaegeuk Kim 
13930a4daae5SJaegeuk Kim 	/*
13940a4daae5SJaegeuk Kim 	 * i_size will be updated by direct_IO. Otherwise, we'll get stale
13950a4daae5SJaegeuk Kim 	 * data from unwritten block via dio_read.
13960a4daae5SJaegeuk Kim 	 */
1397bfad7c2dSJaegeuk Kim 	return 0;
1398bfad7c2dSJaegeuk Kim }
1399bfad7c2dSJaegeuk Kim 
1400a7de6086SJaegeuk Kim int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from)
140159b802e5SJaegeuk Kim {
1402b439b103SJaegeuk Kim 	struct inode *inode = file_inode(iocb->ki_filp);
14035b8db7faSChao Yu 	struct f2fs_map_blocks map;
1404d6d478a1SChao Yu 	int flag;
1405a7de6086SJaegeuk Kim 	int err = 0;
1406d6d478a1SChao Yu 	bool direct_io = iocb->ki_flags & IOCB_DIRECT;
140759b802e5SJaegeuk Kim 
14080080c507SJaegeuk Kim 	map.m_lblk = F2FS_BLK_ALIGN(iocb->ki_pos);
1409dfd02e4dSChao Yu 	map.m_len = F2FS_BYTES_TO_BLK(iocb->ki_pos + iov_iter_count(from));
1410dfd02e4dSChao Yu 	if (map.m_len > map.m_lblk)
1411dfd02e4dSChao Yu 		map.m_len -= map.m_lblk;
1412dfd02e4dSChao Yu 	else
1413dfd02e4dSChao Yu 		map.m_len = 0;
1414dfd02e4dSChao Yu 
1415da85985cSChao Yu 	map.m_next_pgofs = NULL;
1416c4020b2dSChao Yu 	map.m_next_extent = NULL;
1417d5097be5SHyunchul Lee 	map.m_seg_type = NO_CHECK_TYPE;
1418f9d6d059SChao Yu 	map.m_may_create = true;
141959b802e5SJaegeuk Kim 
1420d6d478a1SChao Yu 	if (direct_io) {
14214d57b86dSChao Yu 		map.m_seg_type = f2fs_rw_hint_to_seg_type(iocb->ki_hint);
1422f847c699SChao Yu 		flag = f2fs_force_buffered_io(inode, iocb, from) ?
1423c040ff9dSJaegeuk Kim 					F2FS_GET_BLOCK_PRE_AIO :
1424d6d478a1SChao Yu 					F2FS_GET_BLOCK_PRE_DIO;
1425d6d478a1SChao Yu 		goto map_blocks;
1426d5097be5SHyunchul Lee 	}
1427f2470371SChao Yu 	if (iocb->ki_pos + iov_iter_count(from) > MAX_INLINE_DATA(inode)) {
1428a7de6086SJaegeuk Kim 		err = f2fs_convert_inline_inode(inode);
1429a7de6086SJaegeuk Kim 		if (err)
1430a7de6086SJaegeuk Kim 			return err;
143124b84912SJaegeuk Kim 	}
1432d6d478a1SChao Yu 	if (f2fs_has_inline_data(inode))
1433d6d478a1SChao Yu 		return err;
1434d6d478a1SChao Yu 
1435d6d478a1SChao Yu 	flag = F2FS_GET_BLOCK_PRE_AIO;
1436d6d478a1SChao Yu 
1437d6d478a1SChao Yu map_blocks:
1438d6d478a1SChao Yu 	err = f2fs_map_blocks(inode, &map, 1, flag);
143925006645SSheng Yong 	if (map.m_len > 0 && err == -ENOSPC) {
1440d6d478a1SChao Yu 		if (!direct_io)
144125006645SSheng Yong 			set_inode_flag(inode, FI_NO_PREALLOC);
144225006645SSheng Yong 		err = 0;
144325006645SSheng Yong 	}
1444a7de6086SJaegeuk Kim 	return err;
144559b802e5SJaegeuk Kim }
144659b802e5SJaegeuk Kim 
14470ef81833SChao Yu void f2fs_do_map_lock(struct f2fs_sb_info *sbi, int flag, bool lock)
144859c9081bSYunlei He {
144959c9081bSYunlei He 	if (flag == F2FS_GET_BLOCK_PRE_AIO) {
145059c9081bSYunlei He 		if (lock)
145159c9081bSYunlei He 			down_read(&sbi->node_change);
145259c9081bSYunlei He 		else
145359c9081bSYunlei He 			up_read(&sbi->node_change);
145459c9081bSYunlei He 	} else {
145559c9081bSYunlei He 		if (lock)
145659c9081bSYunlei He 			f2fs_lock_op(sbi);
145759c9081bSYunlei He 		else
145859c9081bSYunlei He 			f2fs_unlock_op(sbi);
145959c9081bSYunlei He 	}
146059c9081bSYunlei He }
146159c9081bSYunlei He 
14620a8165d7SJaegeuk Kim /*
14637a88ddb5SChao Yu  * f2fs_map_blocks() tries to find or build mapping relationship which
14647a88ddb5SChao Yu  * maps continuous logical blocks to physical blocks, and return such
14657a88ddb5SChao Yu  * info via f2fs_map_blocks structure.
1466eb47b800SJaegeuk Kim  */
1467d323d005SChao Yu int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
1468e2b4e2bcSChao Yu 						int create, int flag)
1469eb47b800SJaegeuk Kim {
1470003a3e1dSJaegeuk Kim 	unsigned int maxblocks = map->m_len;
1471eb47b800SJaegeuk Kim 	struct dnode_of_data dn;
1472f9811703SChao Yu 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1473f9d6d059SChao Yu 	int mode = map->m_may_create ? ALLOC_NODE : LOOKUP_NODE;
147446008c6dSChao Yu 	pgoff_t pgofs, end_offset, end;
1475bfad7c2dSJaegeuk Kim 	int err = 0, ofs = 1;
147646008c6dSChao Yu 	unsigned int ofs_in_node, last_ofs_in_node;
147746008c6dSChao Yu 	blkcnt_t prealloc;
1478e15882b6SHou Pengyang 	struct extent_info ei = {0,0,0};
14797df3a431SFan Li 	block_t blkaddr;
1480c4020b2dSChao Yu 	unsigned int start_pgofs;
1481eb47b800SJaegeuk Kim 
1482dfd02e4dSChao Yu 	if (!maxblocks)
1483dfd02e4dSChao Yu 		return 0;
1484dfd02e4dSChao Yu 
1485003a3e1dSJaegeuk Kim 	map->m_len = 0;
1486003a3e1dSJaegeuk Kim 	map->m_flags = 0;
1487003a3e1dSJaegeuk Kim 
1488003a3e1dSJaegeuk Kim 	/* it only supports block size == page size */
1489003a3e1dSJaegeuk Kim 	pgofs =	(pgoff_t)map->m_lblk;
149046008c6dSChao Yu 	end = pgofs + maxblocks;
1491eb47b800SJaegeuk Kim 
149224b84912SJaegeuk Kim 	if (!create && f2fs_lookup_extent_cache(inode, pgofs, &ei)) {
1493b0332a0fSChao Yu 		if (f2fs_lfs_mode(sbi) && flag == F2FS_GET_BLOCK_DIO &&
1494f4f0b677SJia Zhu 							map->m_may_create)
1495f4f0b677SJia Zhu 			goto next_dnode;
1496f4f0b677SJia Zhu 
1497003a3e1dSJaegeuk Kim 		map->m_pblk = ei.blk + pgofs - ei.fofs;
1498003a3e1dSJaegeuk Kim 		map->m_len = min((pgoff_t)maxblocks, ei.fofs + ei.len - pgofs);
1499003a3e1dSJaegeuk Kim 		map->m_flags = F2FS_MAP_MAPPED;
1500c4020b2dSChao Yu 		if (map->m_next_extent)
1501c4020b2dSChao Yu 			*map->m_next_extent = pgofs + map->m_len;
15021e78e8bdSSahitya Tummala 
15031e78e8bdSSahitya Tummala 		/* for hardware encryption, but to avoid potential issue in future */
15041e78e8bdSSahitya Tummala 		if (flag == F2FS_GET_BLOCK_DIO)
15051e78e8bdSSahitya Tummala 			f2fs_wait_on_block_writeback_range(inode,
15061e78e8bdSSahitya Tummala 						map->m_pblk, map->m_len);
1507bfad7c2dSJaegeuk Kim 		goto out;
1508a2e7d1bfSChao Yu 	}
1509bfad7c2dSJaegeuk Kim 
15104fe71e88SChao Yu next_dnode:
1511f9d6d059SChao Yu 	if (map->m_may_create)
15120ef81833SChao Yu 		f2fs_do_map_lock(sbi, flag, true);
1513eb47b800SJaegeuk Kim 
1514eb47b800SJaegeuk Kim 	/* When reading holes, we need its node page */
1515eb47b800SJaegeuk Kim 	set_new_dnode(&dn, inode, NULL, NULL, 0);
15164d57b86dSChao Yu 	err = f2fs_get_dnode_of_data(&dn, pgofs, mode);
15171ec79083SJaegeuk Kim 	if (err) {
151843473f96SChao Yu 		if (flag == F2FS_GET_BLOCK_BMAP)
151943473f96SChao Yu 			map->m_pblk = 0;
1520da85985cSChao Yu 		if (err == -ENOENT) {
1521bfad7c2dSJaegeuk Kim 			err = 0;
1522da85985cSChao Yu 			if (map->m_next_pgofs)
1523da85985cSChao Yu 				*map->m_next_pgofs =
15244d57b86dSChao Yu 					f2fs_get_next_page_offset(&dn, pgofs);
1525c4020b2dSChao Yu 			if (map->m_next_extent)
1526c4020b2dSChao Yu 				*map->m_next_extent =
15274d57b86dSChao Yu 					f2fs_get_next_page_offset(&dn, pgofs);
1528da85985cSChao Yu 		}
1529bfad7c2dSJaegeuk Kim 		goto unlock_out;
1530848753aaSNamjae Jeon 	}
1531eb47b800SJaegeuk Kim 
1532c4020b2dSChao Yu 	start_pgofs = pgofs;
153346008c6dSChao Yu 	prealloc = 0;
1534230436b3SArnd Bergmann 	last_ofs_in_node = ofs_in_node = dn.ofs_in_node;
153581ca7350SChao Yu 	end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
1536eb47b800SJaegeuk Kim 
15374fe71e88SChao Yu next_block:
1538a2ced1ceSChao Yu 	blkaddr = f2fs_data_blkaddr(&dn);
1539973163fcSChao Yu 
1540c9b60788SChao Yu 	if (__is_valid_data_blkaddr(blkaddr) &&
154193770ab7SChao Yu 		!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE)) {
154210f966bbSChao Yu 		err = -EFSCORRUPTED;
1543c9b60788SChao Yu 		goto sync_out;
1544c9b60788SChao Yu 	}
1545c9b60788SChao Yu 
154693770ab7SChao Yu 	if (__is_valid_data_blkaddr(blkaddr)) {
1547f847c699SChao Yu 		/* use out-place-update for driect IO under LFS mode */
1548b0332a0fSChao Yu 		if (f2fs_lfs_mode(sbi) && flag == F2FS_GET_BLOCK_DIO &&
1549f9d6d059SChao Yu 							map->m_may_create) {
1550f847c699SChao Yu 			err = __allocate_data_block(&dn, map->m_seg_type);
155105e36006SChao Yu 			if (err)
155205e36006SChao Yu 				goto sync_out;
155373c0a927SJia Zhu 			blkaddr = dn.data_blkaddr;
1554f847c699SChao Yu 			set_inode_flag(inode, FI_APPEND_WRITE);
1555f847c699SChao Yu 		}
1556f847c699SChao Yu 	} else {
1557973163fcSChao Yu 		if (create) {
1558f9811703SChao Yu 			if (unlikely(f2fs_cp_error(sbi))) {
1559f9811703SChao Yu 				err = -EIO;
1560f9811703SChao Yu 				goto sync_out;
1561f9811703SChao Yu 			}
156224b84912SJaegeuk Kim 			if (flag == F2FS_GET_BLOCK_PRE_AIO) {
156346008c6dSChao Yu 				if (blkaddr == NULL_ADDR) {
156446008c6dSChao Yu 					prealloc++;
156546008c6dSChao Yu 					last_ofs_in_node = dn.ofs_in_node;
156646008c6dSChao Yu 				}
156724b84912SJaegeuk Kim 			} else {
15680a4daae5SJaegeuk Kim 				WARN_ON(flag != F2FS_GET_BLOCK_PRE_DIO &&
15690a4daae5SJaegeuk Kim 					flag != F2FS_GET_BLOCK_DIO);
1570d5097be5SHyunchul Lee 				err = __allocate_data_block(&dn,
1571d5097be5SHyunchul Lee 							map->m_seg_type);
15726f2d8ed6SChao Yu 				if (!err)
157391942321SJaegeuk Kim 					set_inode_flag(inode, FI_APPEND_WRITE);
157424b84912SJaegeuk Kim 			}
1575bfad7c2dSJaegeuk Kim 			if (err)
1576bfad7c2dSJaegeuk Kim 				goto sync_out;
15773f2be043SKinglong Mee 			map->m_flags |= F2FS_MAP_NEW;
1578bfad7c2dSJaegeuk Kim 			blkaddr = dn.data_blkaddr;
1579973163fcSChao Yu 		} else {
158043473f96SChao Yu 			if (flag == F2FS_GET_BLOCK_BMAP) {
158143473f96SChao Yu 				map->m_pblk = 0;
158243473f96SChao Yu 				goto sync_out;
158343473f96SChao Yu 			}
1584c4020b2dSChao Yu 			if (flag == F2FS_GET_BLOCK_PRECACHE)
1585c4020b2dSChao Yu 				goto sync_out;
1586da85985cSChao Yu 			if (flag == F2FS_GET_BLOCK_FIEMAP &&
1587da85985cSChao Yu 						blkaddr == NULL_ADDR) {
1588da85985cSChao Yu 				if (map->m_next_pgofs)
1589da85985cSChao Yu 					*map->m_next_pgofs = pgofs + 1;
1590973163fcSChao Yu 				goto sync_out;
1591bfad7c2dSJaegeuk Kim 			}
1592f3d98e74SChao Yu 			if (flag != F2FS_GET_BLOCK_FIEMAP) {
1593f3d98e74SChao Yu 				/* for defragment case */
1594f3d98e74SChao Yu 				if (map->m_next_pgofs)
1595f3d98e74SChao Yu 					*map->m_next_pgofs = pgofs + 1;
1596bfad7c2dSJaegeuk Kim 				goto sync_out;
1597bfad7c2dSJaegeuk Kim 			}
1598973163fcSChao Yu 		}
1599973163fcSChao Yu 	}
1600973163fcSChao Yu 
160146008c6dSChao Yu 	if (flag == F2FS_GET_BLOCK_PRE_AIO)
160246008c6dSChao Yu 		goto skip;
16037f63eb77SJaegeuk Kim 
16044fe71e88SChao Yu 	if (map->m_len == 0) {
16054fe71e88SChao Yu 		/* preallocated unwritten block should be mapped for fiemap. */
16064fe71e88SChao Yu 		if (blkaddr == NEW_ADDR)
16074fe71e88SChao Yu 			map->m_flags |= F2FS_MAP_UNWRITTEN;
16084fe71e88SChao Yu 		map->m_flags |= F2FS_MAP_MAPPED;
16094fe71e88SChao Yu 
16104fe71e88SChao Yu 		map->m_pblk = blkaddr;
16114fe71e88SChao Yu 		map->m_len = 1;
16124fe71e88SChao Yu 	} else if ((map->m_pblk != NEW_ADDR &&
16137f63eb77SJaegeuk Kim 			blkaddr == (map->m_pblk + ofs)) ||
1614b439b103SJaegeuk Kim 			(map->m_pblk == NEW_ADDR && blkaddr == NEW_ADDR) ||
161546008c6dSChao Yu 			flag == F2FS_GET_BLOCK_PRE_DIO) {
1616bfad7c2dSJaegeuk Kim 		ofs++;
16174fe71e88SChao Yu 		map->m_len++;
16184fe71e88SChao Yu 	} else {
16194fe71e88SChao Yu 		goto sync_out;
16204fe71e88SChao Yu 	}
16214fe71e88SChao Yu 
162246008c6dSChao Yu skip:
1623bfad7c2dSJaegeuk Kim 	dn.ofs_in_node++;
1624bfad7c2dSJaegeuk Kim 	pgofs++;
16254fe71e88SChao Yu 
162646008c6dSChao Yu 	/* preallocate blocks in batch for one dnode page */
162746008c6dSChao Yu 	if (flag == F2FS_GET_BLOCK_PRE_AIO &&
162846008c6dSChao Yu 			(pgofs == end || dn.ofs_in_node == end_offset)) {
162946008c6dSChao Yu 
163046008c6dSChao Yu 		dn.ofs_in_node = ofs_in_node;
16314d57b86dSChao Yu 		err = f2fs_reserve_new_blocks(&dn, prealloc);
163246008c6dSChao Yu 		if (err)
163346008c6dSChao Yu 			goto sync_out;
163446008c6dSChao Yu 
163546008c6dSChao Yu 		map->m_len += dn.ofs_in_node - ofs_in_node;
163646008c6dSChao Yu 		if (prealloc && dn.ofs_in_node != last_ofs_in_node + 1) {
163746008c6dSChao Yu 			err = -ENOSPC;
163846008c6dSChao Yu 			goto sync_out;
163946008c6dSChao Yu 		}
164046008c6dSChao Yu 		dn.ofs_in_node = end_offset;
164146008c6dSChao Yu 	}
164246008c6dSChao Yu 
164346008c6dSChao Yu 	if (pgofs >= end)
164446008c6dSChao Yu 		goto sync_out;
164546008c6dSChao Yu 	else if (dn.ofs_in_node < end_offset)
16464fe71e88SChao Yu 		goto next_block;
16474fe71e88SChao Yu 
1648c4020b2dSChao Yu 	if (flag == F2FS_GET_BLOCK_PRECACHE) {
1649c4020b2dSChao Yu 		if (map->m_flags & F2FS_MAP_MAPPED) {
1650c4020b2dSChao Yu 			unsigned int ofs = start_pgofs - map->m_lblk;
1651c4020b2dSChao Yu 
1652c4020b2dSChao Yu 			f2fs_update_extent_cache_range(&dn,
1653c4020b2dSChao Yu 				start_pgofs, map->m_pblk + ofs,
1654c4020b2dSChao Yu 				map->m_len - ofs);
1655c4020b2dSChao Yu 		}
1656c4020b2dSChao Yu 	}
1657c4020b2dSChao Yu 
16584fe71e88SChao Yu 	f2fs_put_dnode(&dn);
16594fe71e88SChao Yu 
1660f9d6d059SChao Yu 	if (map->m_may_create) {
16610ef81833SChao Yu 		f2fs_do_map_lock(sbi, flag, false);
16626f2d8ed6SChao Yu 		f2fs_balance_fs(sbi, dn.node_changed);
16634fe71e88SChao Yu 	}
16644fe71e88SChao Yu 	goto next_dnode;
16657df3a431SFan Li 
1666bfad7c2dSJaegeuk Kim sync_out:
16671e78e8bdSSahitya Tummala 
16681e78e8bdSSahitya Tummala 	/* for hardware encryption, but to avoid potential issue in future */
16691e78e8bdSSahitya Tummala 	if (flag == F2FS_GET_BLOCK_DIO && map->m_flags & F2FS_MAP_MAPPED)
16701e78e8bdSSahitya Tummala 		f2fs_wait_on_block_writeback_range(inode,
16711e78e8bdSSahitya Tummala 						map->m_pblk, map->m_len);
16721e78e8bdSSahitya Tummala 
1673c4020b2dSChao Yu 	if (flag == F2FS_GET_BLOCK_PRECACHE) {
1674c4020b2dSChao Yu 		if (map->m_flags & F2FS_MAP_MAPPED) {
1675c4020b2dSChao Yu 			unsigned int ofs = start_pgofs - map->m_lblk;
1676c4020b2dSChao Yu 
1677c4020b2dSChao Yu 			f2fs_update_extent_cache_range(&dn,
1678c4020b2dSChao Yu 				start_pgofs, map->m_pblk + ofs,
1679c4020b2dSChao Yu 				map->m_len - ofs);
1680c4020b2dSChao Yu 		}
1681c4020b2dSChao Yu 		if (map->m_next_extent)
1682c4020b2dSChao Yu 			*map->m_next_extent = pgofs + 1;
1683c4020b2dSChao Yu 	}
1684bfad7c2dSJaegeuk Kim 	f2fs_put_dnode(&dn);
1685bfad7c2dSJaegeuk Kim unlock_out:
1686f9d6d059SChao Yu 	if (map->m_may_create) {
16870ef81833SChao Yu 		f2fs_do_map_lock(sbi, flag, false);
16886f2d8ed6SChao Yu 		f2fs_balance_fs(sbi, dn.node_changed);
16892a340760SJaegeuk Kim 	}
1690bfad7c2dSJaegeuk Kim out:
1691003a3e1dSJaegeuk Kim 	trace_f2fs_map_blocks(inode, map, err);
1692bfad7c2dSJaegeuk Kim 	return err;
1693eb47b800SJaegeuk Kim }
1694eb47b800SJaegeuk Kim 
1695b91050a8SHyunchul Lee bool f2fs_overwrite_io(struct inode *inode, loff_t pos, size_t len)
1696b91050a8SHyunchul Lee {
1697b91050a8SHyunchul Lee 	struct f2fs_map_blocks map;
1698b91050a8SHyunchul Lee 	block_t last_lblk;
1699b91050a8SHyunchul Lee 	int err;
1700b91050a8SHyunchul Lee 
1701b91050a8SHyunchul Lee 	if (pos + len > i_size_read(inode))
1702b91050a8SHyunchul Lee 		return false;
1703b91050a8SHyunchul Lee 
1704b91050a8SHyunchul Lee 	map.m_lblk = F2FS_BYTES_TO_BLK(pos);
1705b91050a8SHyunchul Lee 	map.m_next_pgofs = NULL;
1706b91050a8SHyunchul Lee 	map.m_next_extent = NULL;
1707b91050a8SHyunchul Lee 	map.m_seg_type = NO_CHECK_TYPE;
1708f4f0b677SJia Zhu 	map.m_may_create = false;
1709b91050a8SHyunchul Lee 	last_lblk = F2FS_BLK_ALIGN(pos + len);
1710b91050a8SHyunchul Lee 
1711b91050a8SHyunchul Lee 	while (map.m_lblk < last_lblk) {
1712b91050a8SHyunchul Lee 		map.m_len = last_lblk - map.m_lblk;
1713b91050a8SHyunchul Lee 		err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT);
1714b91050a8SHyunchul Lee 		if (err || map.m_len == 0)
1715b91050a8SHyunchul Lee 			return false;
1716b91050a8SHyunchul Lee 		map.m_lblk += map.m_len;
1717b91050a8SHyunchul Lee 	}
1718b91050a8SHyunchul Lee 	return true;
1719b91050a8SHyunchul Lee }
1720b91050a8SHyunchul Lee 
172143b9d4b4SJaegeuk Kim static inline u64 bytes_to_blks(struct inode *inode, u64 bytes)
172243b9d4b4SJaegeuk Kim {
172343b9d4b4SJaegeuk Kim 	return (bytes >> inode->i_blkbits);
172443b9d4b4SJaegeuk Kim }
172543b9d4b4SJaegeuk Kim 
172643b9d4b4SJaegeuk Kim static inline u64 blks_to_bytes(struct inode *inode, u64 blks)
172743b9d4b4SJaegeuk Kim {
172843b9d4b4SJaegeuk Kim 	return (blks << inode->i_blkbits);
172943b9d4b4SJaegeuk Kim }
173043b9d4b4SJaegeuk Kim 
1731003a3e1dSJaegeuk Kim static int __get_data_block(struct inode *inode, sector_t iblock,
1732da85985cSChao Yu 			struct buffer_head *bh, int create, int flag,
1733f9d6d059SChao Yu 			pgoff_t *next_pgofs, int seg_type, bool may_write)
1734003a3e1dSJaegeuk Kim {
1735003a3e1dSJaegeuk Kim 	struct f2fs_map_blocks map;
1736a7de6086SJaegeuk Kim 	int err;
1737003a3e1dSJaegeuk Kim 
1738003a3e1dSJaegeuk Kim 	map.m_lblk = iblock;
173943b9d4b4SJaegeuk Kim 	map.m_len = bytes_to_blks(inode, bh->b_size);
1740da85985cSChao Yu 	map.m_next_pgofs = next_pgofs;
1741c4020b2dSChao Yu 	map.m_next_extent = NULL;
1742d5097be5SHyunchul Lee 	map.m_seg_type = seg_type;
1743f9d6d059SChao Yu 	map.m_may_create = may_write;
1744003a3e1dSJaegeuk Kim 
1745a7de6086SJaegeuk Kim 	err = f2fs_map_blocks(inode, &map, create, flag);
1746a7de6086SJaegeuk Kim 	if (!err) {
1747003a3e1dSJaegeuk Kim 		map_bh(bh, inode->i_sb, map.m_pblk);
1748003a3e1dSJaegeuk Kim 		bh->b_state = (bh->b_state & ~F2FS_MAP_FLAGS) | map.m_flags;
174943b9d4b4SJaegeuk Kim 		bh->b_size = blks_to_bytes(inode, map.m_len);
1750003a3e1dSJaegeuk Kim 	}
1751a7de6086SJaegeuk Kim 	return err;
1752003a3e1dSJaegeuk Kim }
1753003a3e1dSJaegeuk Kim 
1754f9d6d059SChao Yu static int get_data_block_dio_write(struct inode *inode, sector_t iblock,
1755f9d6d059SChao Yu 			struct buffer_head *bh_result, int create)
1756f9d6d059SChao Yu {
1757f9d6d059SChao Yu 	return __get_data_block(inode, iblock, bh_result, create,
1758f9d6d059SChao Yu 				F2FS_GET_BLOCK_DIO, NULL,
1759f9d6d059SChao Yu 				f2fs_rw_hint_to_seg_type(inode->i_write_hint),
176075a037f3SJaegeuk Kim 				IS_SWAPFILE(inode) ? false : true);
1761ccfb3000SJaegeuk Kim }
1762ccfb3000SJaegeuk Kim 
1763e2b4e2bcSChao Yu static int get_data_block_dio(struct inode *inode, sector_t iblock,
1764ccfb3000SJaegeuk Kim 			struct buffer_head *bh_result, int create)
1765ccfb3000SJaegeuk Kim {
1766e2b4e2bcSChao Yu 	return __get_data_block(inode, iblock, bh_result, create,
17670a4daae5SJaegeuk Kim 				F2FS_GET_BLOCK_DIO, NULL,
1768f9d6d059SChao Yu 				f2fs_rw_hint_to_seg_type(inode->i_write_hint),
1769f9d6d059SChao Yu 				false);
1770e2b4e2bcSChao Yu }
1771e2b4e2bcSChao Yu 
1772442a9dbdSChao Yu static int f2fs_xattr_fiemap(struct inode *inode,
1773442a9dbdSChao Yu 				struct fiemap_extent_info *fieinfo)
1774442a9dbdSChao Yu {
1775442a9dbdSChao Yu 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1776442a9dbdSChao Yu 	struct page *page;
1777442a9dbdSChao Yu 	struct node_info ni;
1778442a9dbdSChao Yu 	__u64 phys = 0, len;
1779442a9dbdSChao Yu 	__u32 flags;
1780442a9dbdSChao Yu 	nid_t xnid = F2FS_I(inode)->i_xattr_nid;
1781442a9dbdSChao Yu 	int err = 0;
1782442a9dbdSChao Yu 
1783442a9dbdSChao Yu 	if (f2fs_has_inline_xattr(inode)) {
1784442a9dbdSChao Yu 		int offset;
1785442a9dbdSChao Yu 
1786442a9dbdSChao Yu 		page = f2fs_grab_cache_page(NODE_MAPPING(sbi),
1787442a9dbdSChao Yu 						inode->i_ino, false);
1788442a9dbdSChao Yu 		if (!page)
1789442a9dbdSChao Yu 			return -ENOMEM;
1790442a9dbdSChao Yu 
17917735730dSChao Yu 		err = f2fs_get_node_info(sbi, inode->i_ino, &ni);
17927735730dSChao Yu 		if (err) {
17937735730dSChao Yu 			f2fs_put_page(page, 1);
17947735730dSChao Yu 			return err;
17957735730dSChao Yu 		}
1796442a9dbdSChao Yu 
17976cbfcab5SJaegeuk Kim 		phys = blks_to_bytes(inode, ni.blk_addr);
1798442a9dbdSChao Yu 		offset = offsetof(struct f2fs_inode, i_addr) +
1799442a9dbdSChao Yu 					sizeof(__le32) * (DEF_ADDRS_PER_INODE -
1800b323fd28SChao Yu 					get_inline_xattr_addrs(inode));
1801442a9dbdSChao Yu 
1802442a9dbdSChao Yu 		phys += offset;
1803442a9dbdSChao Yu 		len = inline_xattr_size(inode);
1804442a9dbdSChao Yu 
1805442a9dbdSChao Yu 		f2fs_put_page(page, 1);
1806442a9dbdSChao Yu 
1807442a9dbdSChao Yu 		flags = FIEMAP_EXTENT_DATA_INLINE | FIEMAP_EXTENT_NOT_ALIGNED;
1808442a9dbdSChao Yu 
1809442a9dbdSChao Yu 		if (!xnid)
1810442a9dbdSChao Yu 			flags |= FIEMAP_EXTENT_LAST;
1811442a9dbdSChao Yu 
1812442a9dbdSChao Yu 		err = fiemap_fill_next_extent(fieinfo, 0, phys, len, flags);
1813dd5a09bdSChao Yu 		trace_f2fs_fiemap(inode, 0, phys, len, flags, err);
1814442a9dbdSChao Yu 		if (err || err == 1)
1815442a9dbdSChao Yu 			return err;
1816442a9dbdSChao Yu 	}
1817442a9dbdSChao Yu 
1818442a9dbdSChao Yu 	if (xnid) {
1819442a9dbdSChao Yu 		page = f2fs_grab_cache_page(NODE_MAPPING(sbi), xnid, false);
1820442a9dbdSChao Yu 		if (!page)
1821442a9dbdSChao Yu 			return -ENOMEM;
1822442a9dbdSChao Yu 
18237735730dSChao Yu 		err = f2fs_get_node_info(sbi, xnid, &ni);
18247735730dSChao Yu 		if (err) {
18257735730dSChao Yu 			f2fs_put_page(page, 1);
18267735730dSChao Yu 			return err;
18277735730dSChao Yu 		}
1828442a9dbdSChao Yu 
18296cbfcab5SJaegeuk Kim 		phys = blks_to_bytes(inode, ni.blk_addr);
1830442a9dbdSChao Yu 		len = inode->i_sb->s_blocksize;
1831442a9dbdSChao Yu 
1832442a9dbdSChao Yu 		f2fs_put_page(page, 1);
1833442a9dbdSChao Yu 
1834442a9dbdSChao Yu 		flags = FIEMAP_EXTENT_LAST;
1835442a9dbdSChao Yu 	}
1836442a9dbdSChao Yu 
1837dd5a09bdSChao Yu 	if (phys) {
1838442a9dbdSChao Yu 		err = fiemap_fill_next_extent(fieinfo, 0, phys, len, flags);
1839dd5a09bdSChao Yu 		trace_f2fs_fiemap(inode, 0, phys, len, flags, err);
1840dd5a09bdSChao Yu 	}
1841442a9dbdSChao Yu 
1842442a9dbdSChao Yu 	return (err < 0 ? err : 0);
1843442a9dbdSChao Yu }
1844442a9dbdSChao Yu 
1845bf38fbadSChao Yu static loff_t max_inode_blocks(struct inode *inode)
1846bf38fbadSChao Yu {
1847bf38fbadSChao Yu 	loff_t result = ADDRS_PER_INODE(inode);
1848bf38fbadSChao Yu 	loff_t leaf_count = ADDRS_PER_BLOCK(inode);
1849bf38fbadSChao Yu 
1850bf38fbadSChao Yu 	/* two direct node blocks */
1851bf38fbadSChao Yu 	result += (leaf_count * 2);
1852bf38fbadSChao Yu 
1853bf38fbadSChao Yu 	/* two indirect node blocks */
1854bf38fbadSChao Yu 	leaf_count *= NIDS_PER_BLOCK;
1855bf38fbadSChao Yu 	result += (leaf_count * 2);
1856bf38fbadSChao Yu 
1857bf38fbadSChao Yu 	/* one double indirect node block */
1858bf38fbadSChao Yu 	leaf_count *= NIDS_PER_BLOCK;
1859bf38fbadSChao Yu 	result += leaf_count;
1860bf38fbadSChao Yu 
1861bf38fbadSChao Yu 	return result;
1862bf38fbadSChao Yu }
1863bf38fbadSChao Yu 
18649ab70134SJaegeuk Kim int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
18659ab70134SJaegeuk Kim 		u64 start, u64 len)
18669ab70134SJaegeuk Kim {
1867b876f4c9SJaegeuk Kim 	struct f2fs_map_blocks map;
18687f63eb77SJaegeuk Kim 	sector_t start_blk, last_blk;
1869da85985cSChao Yu 	pgoff_t next_pgofs;
18707f63eb77SJaegeuk Kim 	u64 logical = 0, phys = 0, size = 0;
18717f63eb77SJaegeuk Kim 	u32 flags = 0;
18727f63eb77SJaegeuk Kim 	int ret = 0;
1873bf38fbadSChao Yu 	bool compr_cluster = false;
1874bf38fbadSChao Yu 	unsigned int cluster_size = F2FS_I(inode)->i_cluster_size;
18757f63eb77SJaegeuk Kim 
1876c4020b2dSChao Yu 	if (fieinfo->fi_flags & FIEMAP_FLAG_CACHE) {
1877c4020b2dSChao Yu 		ret = f2fs_precache_extents(inode);
18787f63eb77SJaegeuk Kim 		if (ret)
18797f63eb77SJaegeuk Kim 			return ret;
1880c4020b2dSChao Yu 	}
1881c4020b2dSChao Yu 
188245dd052eSChristoph Hellwig 	ret = fiemap_prep(inode, fieinfo, start, &len, FIEMAP_FLAG_XATTR);
18837f63eb77SJaegeuk Kim 	if (ret)
18847f63eb77SJaegeuk Kim 		return ret;
18857f63eb77SJaegeuk Kim 
1886f1b43d4cSChao Yu 	inode_lock(inode);
1887f1b43d4cSChao Yu 
1888442a9dbdSChao Yu 	if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) {
1889442a9dbdSChao Yu 		ret = f2fs_xattr_fiemap(inode, fieinfo);
1890442a9dbdSChao Yu 		goto out;
1891442a9dbdSChao Yu 	}
18927f63eb77SJaegeuk Kim 
18937975f349SChao Yu 	if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode)) {
189467f8cf3cSJaegeuk Kim 		ret = f2fs_inline_data_fiemap(inode, fieinfo, start, len);
189567f8cf3cSJaegeuk Kim 		if (ret != -EAGAIN)
1896f1b43d4cSChao Yu 			goto out;
189767f8cf3cSJaegeuk Kim 	}
189867f8cf3cSJaegeuk Kim 
18996cbfcab5SJaegeuk Kim 	if (bytes_to_blks(inode, len) == 0)
19006cbfcab5SJaegeuk Kim 		len = blks_to_bytes(inode, 1);
19017f63eb77SJaegeuk Kim 
19026cbfcab5SJaegeuk Kim 	start_blk = bytes_to_blks(inode, start);
19036cbfcab5SJaegeuk Kim 	last_blk = bytes_to_blks(inode, start + len - 1);
19049a950d52SFan Li 
19057f63eb77SJaegeuk Kim next:
1906b876f4c9SJaegeuk Kim 	memset(&map, 0, sizeof(map));
1907b876f4c9SJaegeuk Kim 	map.m_lblk = start_blk;
1908b876f4c9SJaegeuk Kim 	map.m_len = bytes_to_blks(inode, len);
1909b876f4c9SJaegeuk Kim 	map.m_next_pgofs = &next_pgofs;
1910b876f4c9SJaegeuk Kim 	map.m_seg_type = NO_CHECK_TYPE;
19117f63eb77SJaegeuk Kim 
1912bf38fbadSChao Yu 	if (compr_cluster)
1913b876f4c9SJaegeuk Kim 		map.m_len = cluster_size - 1;
1914bf38fbadSChao Yu 
1915b876f4c9SJaegeuk Kim 	ret = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_FIEMAP);
19167f63eb77SJaegeuk Kim 	if (ret)
19177f63eb77SJaegeuk Kim 		goto out;
19187f63eb77SJaegeuk Kim 
19197f63eb77SJaegeuk Kim 	/* HOLE */
1920b876f4c9SJaegeuk Kim 	if (!(map.m_flags & F2FS_MAP_FLAGS)) {
1921da85985cSChao Yu 		start_blk = next_pgofs;
192258736fa6SChao Yu 
19236cbfcab5SJaegeuk Kim 		if (blks_to_bytes(inode, start_blk) < blks_to_bytes(inode,
1924bf38fbadSChao Yu 						max_inode_blocks(inode)))
19259a950d52SFan Li 			goto prep_next;
192658736fa6SChao Yu 
19279a950d52SFan Li 		flags |= FIEMAP_EXTENT_LAST;
19289a950d52SFan Li 	}
19299a950d52SFan Li 
1930da5af127SChao Yu 	if (size) {
19310953fe86SChao Yu 		flags |= FIEMAP_EXTENT_MERGED;
193262230e0dSChandan Rajendra 		if (IS_ENCRYPTED(inode))
1933da5af127SChao Yu 			flags |= FIEMAP_EXTENT_DATA_ENCRYPTED;
1934da5af127SChao Yu 
19357f63eb77SJaegeuk Kim 		ret = fiemap_fill_next_extent(fieinfo, logical,
19367f63eb77SJaegeuk Kim 				phys, size, flags);
1937dd5a09bdSChao Yu 		trace_f2fs_fiemap(inode, logical, phys, size, flags, ret);
1938bf38fbadSChao Yu 		if (ret)
1939bf38fbadSChao Yu 			goto out;
1940bf38fbadSChao Yu 		size = 0;
1941da5af127SChao Yu 	}
19429a950d52SFan Li 
1943bf38fbadSChao Yu 	if (start_blk > last_blk)
19447f63eb77SJaegeuk Kim 		goto out;
19457f63eb77SJaegeuk Kim 
1946bf38fbadSChao Yu 	if (compr_cluster) {
1947bf38fbadSChao Yu 		compr_cluster = false;
1948bf38fbadSChao Yu 
1949bf38fbadSChao Yu 
19506cbfcab5SJaegeuk Kim 		logical = blks_to_bytes(inode, start_blk - 1);
1951b876f4c9SJaegeuk Kim 		phys = blks_to_bytes(inode, map.m_pblk);
19526cbfcab5SJaegeuk Kim 		size = blks_to_bytes(inode, cluster_size);
1953bf38fbadSChao Yu 
1954bf38fbadSChao Yu 		flags |= FIEMAP_EXTENT_ENCODED;
1955bf38fbadSChao Yu 
1956bf38fbadSChao Yu 		start_blk += cluster_size - 1;
1957bf38fbadSChao Yu 
1958bf38fbadSChao Yu 		if (start_blk > last_blk)
1959bf38fbadSChao Yu 			goto out;
1960bf38fbadSChao Yu 
1961bf38fbadSChao Yu 		goto prep_next;
1962bf38fbadSChao Yu 	}
1963bf38fbadSChao Yu 
1964b876f4c9SJaegeuk Kim 	if (map.m_pblk == COMPRESS_ADDR) {
1965bf38fbadSChao Yu 		compr_cluster = true;
1966bf38fbadSChao Yu 		start_blk++;
1967bf38fbadSChao Yu 		goto prep_next;
1968bf38fbadSChao Yu 	}
1969bf38fbadSChao Yu 
19706cbfcab5SJaegeuk Kim 	logical = blks_to_bytes(inode, start_blk);
1971b876f4c9SJaegeuk Kim 	phys = blks_to_bytes(inode, map.m_pblk);
1972b876f4c9SJaegeuk Kim 	size = blks_to_bytes(inode, map.m_len);
19737f63eb77SJaegeuk Kim 	flags = 0;
1974b876f4c9SJaegeuk Kim 	if (map.m_flags & F2FS_MAP_UNWRITTEN)
19757f63eb77SJaegeuk Kim 		flags = FIEMAP_EXTENT_UNWRITTEN;
19767f63eb77SJaegeuk Kim 
19776cbfcab5SJaegeuk Kim 	start_blk += bytes_to_blks(inode, size);
19787f63eb77SJaegeuk Kim 
19799a950d52SFan Li prep_next:
19807f63eb77SJaegeuk Kim 	cond_resched();
19817f63eb77SJaegeuk Kim 	if (fatal_signal_pending(current))
19827f63eb77SJaegeuk Kim 		ret = -EINTR;
19837f63eb77SJaegeuk Kim 	else
19847f63eb77SJaegeuk Kim 		goto next;
19857f63eb77SJaegeuk Kim out:
19867f63eb77SJaegeuk Kim 	if (ret == 1)
19877f63eb77SJaegeuk Kim 		ret = 0;
19887f63eb77SJaegeuk Kim 
19895955102cSAl Viro 	inode_unlock(inode);
19907f63eb77SJaegeuk Kim 	return ret;
19919ab70134SJaegeuk Kim }
19929ab70134SJaegeuk Kim 
199395ae251fSEric Biggers static inline loff_t f2fs_readpage_limit(struct inode *inode)
199495ae251fSEric Biggers {
199595ae251fSEric Biggers 	if (IS_ENABLED(CONFIG_FS_VERITY) &&
199695ae251fSEric Biggers 	    (IS_VERITY(inode) || f2fs_verity_in_progress(inode)))
199795ae251fSEric Biggers 		return inode->i_sb->s_maxbytes;
199895ae251fSEric Biggers 
199995ae251fSEric Biggers 	return i_size_read(inode);
200095ae251fSEric Biggers }
200195ae251fSEric Biggers 
20022df0ab04SChao Yu static int f2fs_read_single_page(struct inode *inode, struct page *page,
20032df0ab04SChao Yu 					unsigned nr_pages,
20042df0ab04SChao Yu 					struct f2fs_map_blocks *map,
20052df0ab04SChao Yu 					struct bio **bio_ret,
20062df0ab04SChao Yu 					sector_t *last_block_in_bio,
20072df0ab04SChao Yu 					bool is_readahead)
20082df0ab04SChao Yu {
20092df0ab04SChao Yu 	struct bio *bio = *bio_ret;
201043b9d4b4SJaegeuk Kim 	const unsigned blocksize = blks_to_bytes(inode, 1);
20112df0ab04SChao Yu 	sector_t block_in_file;
20122df0ab04SChao Yu 	sector_t last_block;
20132df0ab04SChao Yu 	sector_t last_block_in_file;
20142df0ab04SChao Yu 	sector_t block_nr;
20152df0ab04SChao Yu 	int ret = 0;
20162df0ab04SChao Yu 
20174969c06aSJaegeuk Kim 	block_in_file = (sector_t)page_index(page);
20182df0ab04SChao Yu 	last_block = block_in_file + nr_pages;
201943b9d4b4SJaegeuk Kim 	last_block_in_file = bytes_to_blks(inode,
202043b9d4b4SJaegeuk Kim 			f2fs_readpage_limit(inode) + blocksize - 1);
20212df0ab04SChao Yu 	if (last_block > last_block_in_file)
20222df0ab04SChao Yu 		last_block = last_block_in_file;
20232df0ab04SChao Yu 
20242df0ab04SChao Yu 	/* just zeroing out page which is beyond EOF */
20252df0ab04SChao Yu 	if (block_in_file >= last_block)
20262df0ab04SChao Yu 		goto zero_out;
20272df0ab04SChao Yu 	/*
20282df0ab04SChao Yu 	 * Map blocks using the previous result first.
20292df0ab04SChao Yu 	 */
20302df0ab04SChao Yu 	if ((map->m_flags & F2FS_MAP_MAPPED) &&
20312df0ab04SChao Yu 			block_in_file > map->m_lblk &&
20322df0ab04SChao Yu 			block_in_file < (map->m_lblk + map->m_len))
20332df0ab04SChao Yu 		goto got_it;
20342df0ab04SChao Yu 
20352df0ab04SChao Yu 	/*
20362df0ab04SChao Yu 	 * Then do more f2fs_map_blocks() calls until we are
20372df0ab04SChao Yu 	 * done with this page.
20382df0ab04SChao Yu 	 */
20392df0ab04SChao Yu 	map->m_lblk = block_in_file;
20402df0ab04SChao Yu 	map->m_len = last_block - block_in_file;
20412df0ab04SChao Yu 
20422df0ab04SChao Yu 	ret = f2fs_map_blocks(inode, map, 0, F2FS_GET_BLOCK_DEFAULT);
20432df0ab04SChao Yu 	if (ret)
20442df0ab04SChao Yu 		goto out;
20452df0ab04SChao Yu got_it:
20462df0ab04SChao Yu 	if ((map->m_flags & F2FS_MAP_MAPPED)) {
20472df0ab04SChao Yu 		block_nr = map->m_pblk + block_in_file - map->m_lblk;
20482df0ab04SChao Yu 		SetPageMappedToDisk(page);
20492df0ab04SChao Yu 
20504969c06aSJaegeuk Kim 		if (!PageUptodate(page) && (!PageSwapCache(page) &&
20514969c06aSJaegeuk Kim 					!cleancache_get_page(page))) {
20522df0ab04SChao Yu 			SetPageUptodate(page);
20532df0ab04SChao Yu 			goto confused;
20542df0ab04SChao Yu 		}
20552df0ab04SChao Yu 
20562df0ab04SChao Yu 		if (!f2fs_is_valid_blkaddr(F2FS_I_SB(inode), block_nr,
205793770ab7SChao Yu 						DATA_GENERIC_ENHANCE_READ)) {
205810f966bbSChao Yu 			ret = -EFSCORRUPTED;
20592df0ab04SChao Yu 			goto out;
20602df0ab04SChao Yu 		}
20612df0ab04SChao Yu 	} else {
20622df0ab04SChao Yu zero_out:
20632df0ab04SChao Yu 		zero_user_segment(page, 0, PAGE_SIZE);
206495ae251fSEric Biggers 		if (f2fs_need_verity(inode, page->index) &&
206595ae251fSEric Biggers 		    !fsverity_verify_page(page)) {
206695ae251fSEric Biggers 			ret = -EIO;
206795ae251fSEric Biggers 			goto out;
206895ae251fSEric Biggers 		}
20692df0ab04SChao Yu 		if (!PageUptodate(page))
20702df0ab04SChao Yu 			SetPageUptodate(page);
20712df0ab04SChao Yu 		unlock_page(page);
20722df0ab04SChao Yu 		goto out;
20732df0ab04SChao Yu 	}
20742df0ab04SChao Yu 
20752df0ab04SChao Yu 	/*
20762df0ab04SChao Yu 	 * This page will go to BIO.  Do we need to send this
20772df0ab04SChao Yu 	 * BIO off first?
20782df0ab04SChao Yu 	 */
207927aacd28SSatya Tangirala 	if (bio && (!page_is_mergeable(F2FS_I_SB(inode), bio,
208027aacd28SSatya Tangirala 				       *last_block_in_bio, block_nr) ||
208127aacd28SSatya Tangirala 		    !f2fs_crypt_mergeable_bio(bio, inode, page->index, NULL))) {
20822df0ab04SChao Yu submit_and_realloc:
20832df0ab04SChao Yu 		__submit_bio(F2FS_I_SB(inode), bio, DATA);
20842df0ab04SChao Yu 		bio = NULL;
20852df0ab04SChao Yu 	}
20862df0ab04SChao Yu 	if (bio == NULL) {
20872df0ab04SChao Yu 		bio = f2fs_grab_read_bio(inode, block_nr, nr_pages,
20880683728aSChao Yu 				is_readahead ? REQ_RAHEAD : 0, page->index,
20897f59b277SEric Biggers 				false);
20902df0ab04SChao Yu 		if (IS_ERR(bio)) {
20912df0ab04SChao Yu 			ret = PTR_ERR(bio);
20922df0ab04SChao Yu 			bio = NULL;
20932df0ab04SChao Yu 			goto out;
20942df0ab04SChao Yu 		}
20952df0ab04SChao Yu 	}
20962df0ab04SChao Yu 
20972df0ab04SChao Yu 	/*
20982df0ab04SChao Yu 	 * If the page is under writeback, we need to wait for
20992df0ab04SChao Yu 	 * its completion to see the correct decrypted data.
21002df0ab04SChao Yu 	 */
21012df0ab04SChao Yu 	f2fs_wait_on_block_writeback(inode, block_nr);
21022df0ab04SChao Yu 
21032df0ab04SChao Yu 	if (bio_add_page(bio, page, blocksize, 0) < blocksize)
21042df0ab04SChao Yu 		goto submit_and_realloc;
21052df0ab04SChao Yu 
21062df0ab04SChao Yu 	inc_page_count(F2FS_I_SB(inode), F2FS_RD_DATA);
21078b83ac81SChao Yu 	f2fs_update_iostat(F2FS_I_SB(inode), FS_DATA_READ_IO, F2FS_BLKSIZE);
21082df0ab04SChao Yu 	ClearPageError(page);
21092df0ab04SChao Yu 	*last_block_in_bio = block_nr;
21102df0ab04SChao Yu 	goto out;
21112df0ab04SChao Yu confused:
21122df0ab04SChao Yu 	if (bio) {
21132df0ab04SChao Yu 		__submit_bio(F2FS_I_SB(inode), bio, DATA);
21142df0ab04SChao Yu 		bio = NULL;
21152df0ab04SChao Yu 	}
21162df0ab04SChao Yu 	unlock_page(page);
21172df0ab04SChao Yu out:
21182df0ab04SChao Yu 	*bio_ret = bio;
21192df0ab04SChao Yu 	return ret;
21202df0ab04SChao Yu }
21212df0ab04SChao Yu 
21224c8ff709SChao Yu #ifdef CONFIG_F2FS_FS_COMPRESSION
21234c8ff709SChao Yu int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
21244c8ff709SChao Yu 				unsigned nr_pages, sector_t *last_block_in_bio,
21250683728aSChao Yu 				bool is_readahead, bool for_write)
21264c8ff709SChao Yu {
21274c8ff709SChao Yu 	struct dnode_of_data dn;
21284c8ff709SChao Yu 	struct inode *inode = cc->inode;
21294c8ff709SChao Yu 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
21304c8ff709SChao Yu 	struct bio *bio = *bio_ret;
21314c8ff709SChao Yu 	unsigned int start_idx = cc->cluster_idx << cc->log_cluster_size;
21324c8ff709SChao Yu 	sector_t last_block_in_file;
213343b9d4b4SJaegeuk Kim 	const unsigned blocksize = blks_to_bytes(inode, 1);
21344c8ff709SChao Yu 	struct decompress_io_ctx *dic = NULL;
21354c8ff709SChao Yu 	int i;
21364c8ff709SChao Yu 	int ret = 0;
21374c8ff709SChao Yu 
21384c8ff709SChao Yu 	f2fs_bug_on(sbi, f2fs_cluster_is_empty(cc));
21394c8ff709SChao Yu 
214043b9d4b4SJaegeuk Kim 	last_block_in_file = bytes_to_blks(inode,
214143b9d4b4SJaegeuk Kim 			f2fs_readpage_limit(inode) + blocksize - 1);
21424c8ff709SChao Yu 
21434c8ff709SChao Yu 	/* get rid of pages beyond EOF */
21444c8ff709SChao Yu 	for (i = 0; i < cc->cluster_size; i++) {
21454c8ff709SChao Yu 		struct page *page = cc->rpages[i];
21464c8ff709SChao Yu 
21474c8ff709SChao Yu 		if (!page)
21484c8ff709SChao Yu 			continue;
21494c8ff709SChao Yu 		if ((sector_t)page->index >= last_block_in_file) {
21504c8ff709SChao Yu 			zero_user_segment(page, 0, PAGE_SIZE);
21514c8ff709SChao Yu 			if (!PageUptodate(page))
21524c8ff709SChao Yu 				SetPageUptodate(page);
21534c8ff709SChao Yu 		} else if (!PageUptodate(page)) {
21544c8ff709SChao Yu 			continue;
21554c8ff709SChao Yu 		}
21564c8ff709SChao Yu 		unlock_page(page);
21574c8ff709SChao Yu 		cc->rpages[i] = NULL;
21584c8ff709SChao Yu 		cc->nr_rpages--;
21594c8ff709SChao Yu 	}
21604c8ff709SChao Yu 
21614c8ff709SChao Yu 	/* we are done since all pages are beyond EOF */
21624c8ff709SChao Yu 	if (f2fs_cluster_is_empty(cc))
21634c8ff709SChao Yu 		goto out;
21644c8ff709SChao Yu 
21654c8ff709SChao Yu 	set_new_dnode(&dn, inode, NULL, NULL, 0);
21664c8ff709SChao Yu 	ret = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
21674c8ff709SChao Yu 	if (ret)
21684c8ff709SChao Yu 		goto out;
21694c8ff709SChao Yu 
2170a86d27ddSChao Yu 	f2fs_bug_on(sbi, dn.data_blkaddr != COMPRESS_ADDR);
21714c8ff709SChao Yu 
21724c8ff709SChao Yu 	for (i = 1; i < cc->cluster_size; i++) {
21734c8ff709SChao Yu 		block_t blkaddr;
21744c8ff709SChao Yu 
2175a2ced1ceSChao Yu 		blkaddr = data_blkaddr(dn.inode, dn.node_page,
21764c8ff709SChao Yu 						dn.ofs_in_node + i);
21774c8ff709SChao Yu 
21784c8ff709SChao Yu 		if (!__is_valid_data_blkaddr(blkaddr))
21794c8ff709SChao Yu 			break;
21804c8ff709SChao Yu 
21814c8ff709SChao Yu 		if (!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC)) {
21824c8ff709SChao Yu 			ret = -EFAULT;
21834c8ff709SChao Yu 			goto out_put_dnode;
21844c8ff709SChao Yu 		}
21854c8ff709SChao Yu 		cc->nr_cpages++;
21864c8ff709SChao Yu 	}
21874c8ff709SChao Yu 
21884c8ff709SChao Yu 	/* nothing to decompress */
21894c8ff709SChao Yu 	if (cc->nr_cpages == 0) {
21904c8ff709SChao Yu 		ret = 0;
21914c8ff709SChao Yu 		goto out_put_dnode;
21924c8ff709SChao Yu 	}
21934c8ff709SChao Yu 
21944c8ff709SChao Yu 	dic = f2fs_alloc_dic(cc);
21954c8ff709SChao Yu 	if (IS_ERR(dic)) {
21964c8ff709SChao Yu 		ret = PTR_ERR(dic);
21974c8ff709SChao Yu 		goto out_put_dnode;
21984c8ff709SChao Yu 	}
21994c8ff709SChao Yu 
22004c8ff709SChao Yu 	for (i = 0; i < dic->nr_cpages; i++) {
22014c8ff709SChao Yu 		struct page *page = dic->cpages[i];
22024c8ff709SChao Yu 		block_t blkaddr;
22037f59b277SEric Biggers 		struct bio_post_read_ctx *ctx;
22044c8ff709SChao Yu 
2205a2ced1ceSChao Yu 		blkaddr = data_blkaddr(dn.inode, dn.node_page,
22064c8ff709SChao Yu 						dn.ofs_in_node + i + 1);
22074c8ff709SChao Yu 
220827aacd28SSatya Tangirala 		if (bio && (!page_is_mergeable(sbi, bio,
220927aacd28SSatya Tangirala 					*last_block_in_bio, blkaddr) ||
221027aacd28SSatya Tangirala 		    !f2fs_crypt_mergeable_bio(bio, inode, page->index, NULL))) {
22114c8ff709SChao Yu submit_and_realloc:
22124c8ff709SChao Yu 			__submit_bio(sbi, bio, DATA);
22134c8ff709SChao Yu 			bio = NULL;
22144c8ff709SChao Yu 		}
22154c8ff709SChao Yu 
22164c8ff709SChao Yu 		if (!bio) {
22174c8ff709SChao Yu 			bio = f2fs_grab_read_bio(inode, blkaddr, nr_pages,
22184c8ff709SChao Yu 					is_readahead ? REQ_RAHEAD : 0,
22197f59b277SEric Biggers 					page->index, for_write);
22204c8ff709SChao Yu 			if (IS_ERR(bio)) {
22214c8ff709SChao Yu 				ret = PTR_ERR(bio);
22227f59b277SEric Biggers 				f2fs_decompress_end_io(dic, ret);
22234c8ff709SChao Yu 				f2fs_put_dnode(&dn);
2224f3494345SChao Yu 				*bio_ret = NULL;
22254c8ff709SChao Yu 				return ret;
22264c8ff709SChao Yu 			}
22274c8ff709SChao Yu 		}
22284c8ff709SChao Yu 
22294c8ff709SChao Yu 		f2fs_wait_on_block_writeback(inode, blkaddr);
22304c8ff709SChao Yu 
22314c8ff709SChao Yu 		if (bio_add_page(bio, page, blocksize, 0) < blocksize)
22324c8ff709SChao Yu 			goto submit_and_realloc;
22334c8ff709SChao Yu 
223403382f1aSChao Yu 		ctx = bio->bi_private;
22357f59b277SEric Biggers 		ctx->enabled_steps |= STEP_DECOMPRESS;
22367f59b277SEric Biggers 		refcount_inc(&dic->refcnt);
223703382f1aSChao Yu 
22384c8ff709SChao Yu 		inc_page_count(sbi, F2FS_RD_DATA);
22398b83ac81SChao Yu 		f2fs_update_iostat(sbi, FS_DATA_READ_IO, F2FS_BLKSIZE);
22409c122384SChao Yu 		f2fs_update_iostat(sbi, FS_CDATA_READ_IO, F2FS_BLKSIZE);
22414c8ff709SChao Yu 		ClearPageError(page);
22424c8ff709SChao Yu 		*last_block_in_bio = blkaddr;
22434c8ff709SChao Yu 	}
22444c8ff709SChao Yu 
22454c8ff709SChao Yu 	f2fs_put_dnode(&dn);
22464c8ff709SChao Yu 
22474c8ff709SChao Yu 	*bio_ret = bio;
22484c8ff709SChao Yu 	return 0;
22494c8ff709SChao Yu 
22504c8ff709SChao Yu out_put_dnode:
22514c8ff709SChao Yu 	f2fs_put_dnode(&dn);
22524c8ff709SChao Yu out:
22537f59b277SEric Biggers 	for (i = 0; i < cc->cluster_size; i++) {
22547f59b277SEric Biggers 		if (cc->rpages[i]) {
22557f59b277SEric Biggers 			ClearPageUptodate(cc->rpages[i]);
22567f59b277SEric Biggers 			ClearPageError(cc->rpages[i]);
22577f59b277SEric Biggers 			unlock_page(cc->rpages[i]);
22587f59b277SEric Biggers 		}
22597f59b277SEric Biggers 	}
22604c8ff709SChao Yu 	*bio_ret = bio;
22614c8ff709SChao Yu 	return ret;
22624c8ff709SChao Yu }
22634c8ff709SChao Yu #endif
22644c8ff709SChao Yu 
2265f1e88660SJaegeuk Kim /*
2266f1e88660SJaegeuk Kim  * This function was originally taken from fs/mpage.c, and customized for f2fs.
2267f1e88660SJaegeuk Kim  * Major change was from block_size == page_size in f2fs by default.
2268e2e59414SJaegeuk Kim  *
2269e2e59414SJaegeuk Kim  * Note that the aops->readpages() function is ONLY used for read-ahead. If
2270e2e59414SJaegeuk Kim  * this function ever deviates from doing just read-ahead, it should either
2271e2e59414SJaegeuk Kim  * use ->readpage() or do the necessary surgery to decouple ->readpages()
2272e2e59414SJaegeuk Kim  * from read-ahead.
2273f1e88660SJaegeuk Kim  */
2274e20a7693SMatthew Wilcox (Oracle) static int f2fs_mpage_readpages(struct inode *inode,
227523323196SMatthew Wilcox (Oracle) 		struct readahead_control *rac, struct page *page)
2276f1e88660SJaegeuk Kim {
2277f1e88660SJaegeuk Kim 	struct bio *bio = NULL;
2278f1e88660SJaegeuk Kim 	sector_t last_block_in_bio = 0;
2279f1e88660SJaegeuk Kim 	struct f2fs_map_blocks map;
22804c8ff709SChao Yu #ifdef CONFIG_F2FS_FS_COMPRESSION
22814c8ff709SChao Yu 	struct compress_ctx cc = {
22824c8ff709SChao Yu 		.inode = inode,
22834c8ff709SChao Yu 		.log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
22844c8ff709SChao Yu 		.cluster_size = F2FS_I(inode)->i_cluster_size,
22854c8ff709SChao Yu 		.cluster_idx = NULL_CLUSTER,
22864c8ff709SChao Yu 		.rpages = NULL,
22874c8ff709SChao Yu 		.cpages = NULL,
22884c8ff709SChao Yu 		.nr_rpages = 0,
22894c8ff709SChao Yu 		.nr_cpages = 0,
22904c8ff709SChao Yu 	};
22914c8ff709SChao Yu #endif
229223323196SMatthew Wilcox (Oracle) 	unsigned nr_pages = rac ? readahead_count(rac) : 1;
22934c8ff709SChao Yu 	unsigned max_nr_pages = nr_pages;
22942df0ab04SChao Yu 	int ret = 0;
22956b12367dSJaegeuk Kim 	bool drop_ra = false;
2296f1e88660SJaegeuk Kim 
2297f1e88660SJaegeuk Kim 	map.m_pblk = 0;
2298f1e88660SJaegeuk Kim 	map.m_lblk = 0;
2299f1e88660SJaegeuk Kim 	map.m_len = 0;
2300f1e88660SJaegeuk Kim 	map.m_flags = 0;
2301da85985cSChao Yu 	map.m_next_pgofs = NULL;
2302c4020b2dSChao Yu 	map.m_next_extent = NULL;
2303d5097be5SHyunchul Lee 	map.m_seg_type = NO_CHECK_TYPE;
2304f9d6d059SChao Yu 	map.m_may_create = false;
2305f1e88660SJaegeuk Kim 
23066b12367dSJaegeuk Kim 	/*
23076b12367dSJaegeuk Kim 	 * Two readahead threads for same address range can cause race condition
23086b12367dSJaegeuk Kim 	 * which fragments sequential read IOs. So let's avoid each other.
23096b12367dSJaegeuk Kim 	 */
23106b12367dSJaegeuk Kim 	if (rac && readahead_count(rac)) {
23116b12367dSJaegeuk Kim 		if (READ_ONCE(F2FS_I(inode)->ra_offset) == readahead_index(rac))
23126b12367dSJaegeuk Kim 			drop_ra = true;
23136b12367dSJaegeuk Kim 		else
23146b12367dSJaegeuk Kim 			WRITE_ONCE(F2FS_I(inode)->ra_offset,
23156b12367dSJaegeuk Kim 						readahead_index(rac));
23166b12367dSJaegeuk Kim 	}
23176b12367dSJaegeuk Kim 
2318736c0a74SLiFan 	for (; nr_pages; nr_pages--) {
231923323196SMatthew Wilcox (Oracle) 		if (rac) {
232023323196SMatthew Wilcox (Oracle) 			page = readahead_page(rac);
2321a83d50bcSKinglong Mee 			prefetchw(&page->flags);
23226b12367dSJaegeuk Kim 			if (drop_ra) {
23236b12367dSJaegeuk Kim 				f2fs_put_page(page, 1);
23246b12367dSJaegeuk Kim 				continue;
23256b12367dSJaegeuk Kim 			}
2326f1e88660SJaegeuk Kim 		}
2327f1e88660SJaegeuk Kim 
23284c8ff709SChao Yu #ifdef CONFIG_F2FS_FS_COMPRESSION
23294c8ff709SChao Yu 		if (f2fs_compressed_file(inode)) {
23304c8ff709SChao Yu 			/* there are remained comressed pages, submit them */
23314c8ff709SChao Yu 			if (!f2fs_cluster_can_merge_page(&cc, page->index)) {
23324c8ff709SChao Yu 				ret = f2fs_read_multi_pages(&cc, &bio,
23334c8ff709SChao Yu 							max_nr_pages,
23344c8ff709SChao Yu 							&last_block_in_bio,
233523323196SMatthew Wilcox (Oracle) 							rac != NULL, false);
23364c8ff709SChao Yu 				f2fs_destroy_compress_ctx(&cc);
23374c8ff709SChao Yu 				if (ret)
23384c8ff709SChao Yu 					goto set_error_page;
23394c8ff709SChao Yu 			}
23404c8ff709SChao Yu 			ret = f2fs_is_compressed_cluster(inode, page->index);
23414c8ff709SChao Yu 			if (ret < 0)
23424c8ff709SChao Yu 				goto set_error_page;
23434c8ff709SChao Yu 			else if (!ret)
23444c8ff709SChao Yu 				goto read_single_page;
23454c8ff709SChao Yu 
23464c8ff709SChao Yu 			ret = f2fs_init_compress_ctx(&cc);
23474c8ff709SChao Yu 			if (ret)
23484c8ff709SChao Yu 				goto set_error_page;
23494c8ff709SChao Yu 
23504c8ff709SChao Yu 			f2fs_compress_ctx_add_page(&cc, page);
23514c8ff709SChao Yu 
23524c8ff709SChao Yu 			goto next_page;
23534c8ff709SChao Yu 		}
23544c8ff709SChao Yu read_single_page:
23554c8ff709SChao Yu #endif
23564c8ff709SChao Yu 
23574c8ff709SChao Yu 		ret = f2fs_read_single_page(inode, page, max_nr_pages, &map,
235823323196SMatthew Wilcox (Oracle) 					&bio, &last_block_in_bio, rac);
23592df0ab04SChao Yu 		if (ret) {
23604c8ff709SChao Yu #ifdef CONFIG_F2FS_FS_COMPRESSION
23614c8ff709SChao Yu set_error_page:
23624c8ff709SChao Yu #endif
2363f1e88660SJaegeuk Kim 			SetPageError(page);
236409cbfeafSKirill A. Shutemov 			zero_user_segment(page, 0, PAGE_SIZE);
2365f1e88660SJaegeuk Kim 			unlock_page(page);
2366f1e88660SJaegeuk Kim 		}
236723323196SMatthew Wilcox (Oracle) #ifdef CONFIG_F2FS_FS_COMPRESSION
2368f1e88660SJaegeuk Kim next_page:
236923323196SMatthew Wilcox (Oracle) #endif
237023323196SMatthew Wilcox (Oracle) 		if (rac)
237109cbfeafSKirill A. Shutemov 			put_page(page);
23724c8ff709SChao Yu 
23734c8ff709SChao Yu #ifdef CONFIG_F2FS_FS_COMPRESSION
23744c8ff709SChao Yu 		if (f2fs_compressed_file(inode)) {
23754c8ff709SChao Yu 			/* last page */
23764c8ff709SChao Yu 			if (nr_pages == 1 && !f2fs_cluster_is_empty(&cc)) {
23774c8ff709SChao Yu 				ret = f2fs_read_multi_pages(&cc, &bio,
23784c8ff709SChao Yu 							max_nr_pages,
23794c8ff709SChao Yu 							&last_block_in_bio,
238023323196SMatthew Wilcox (Oracle) 							rac != NULL, false);
23814c8ff709SChao Yu 				f2fs_destroy_compress_ctx(&cc);
23824c8ff709SChao Yu 			}
23834c8ff709SChao Yu 		}
23844c8ff709SChao Yu #endif
2385f1e88660SJaegeuk Kim 	}
2386f1e88660SJaegeuk Kim 	if (bio)
23874fc29c1aSLinus Torvalds 		__submit_bio(F2FS_I_SB(inode), bio, DATA);
23886b12367dSJaegeuk Kim 
23896b12367dSJaegeuk Kim 	if (rac && readahead_count(rac) && !drop_ra)
23906b12367dSJaegeuk Kim 		WRITE_ONCE(F2FS_I(inode)->ra_offset, -1);
239123323196SMatthew Wilcox (Oracle) 	return ret;
2392f1e88660SJaegeuk Kim }
2393f1e88660SJaegeuk Kim 
2394eb47b800SJaegeuk Kim static int f2fs_read_data_page(struct file *file, struct page *page)
2395eb47b800SJaegeuk Kim {
23964969c06aSJaegeuk Kim 	struct inode *inode = page_file_mapping(page)->host;
2397b3d208f9SJaegeuk Kim 	int ret = -EAGAIN;
23989ffe0fb5SHuajun Li 
2399c20e89cdSChao Yu 	trace_f2fs_readpage(page, DATA);
2400c20e89cdSChao Yu 
24014c8ff709SChao Yu 	if (!f2fs_is_compress_backend_ready(inode)) {
24024c8ff709SChao Yu 		unlock_page(page);
24034c8ff709SChao Yu 		return -EOPNOTSUPP;
24044c8ff709SChao Yu 	}
24054c8ff709SChao Yu 
2406e1c42045Sarter97 	/* If the file has inline data, try to read it directly */
24079ffe0fb5SHuajun Li 	if (f2fs_has_inline_data(inode))
24089ffe0fb5SHuajun Li 		ret = f2fs_read_inline_data(inode, page);
2409b3d208f9SJaegeuk Kim 	if (ret == -EAGAIN)
2410e20a7693SMatthew Wilcox (Oracle) 		ret = f2fs_mpage_readpages(inode, NULL, page);
24119ffe0fb5SHuajun Li 	return ret;
2412eb47b800SJaegeuk Kim }
2413eb47b800SJaegeuk Kim 
241423323196SMatthew Wilcox (Oracle) static void f2fs_readahead(struct readahead_control *rac)
2415eb47b800SJaegeuk Kim {
241623323196SMatthew Wilcox (Oracle) 	struct inode *inode = rac->mapping->host;
2417b8c29400SChao Yu 
241823323196SMatthew Wilcox (Oracle) 	trace_f2fs_readpages(inode, readahead_index(rac), readahead_count(rac));
24199ffe0fb5SHuajun Li 
24204c8ff709SChao Yu 	if (!f2fs_is_compress_backend_ready(inode))
242123323196SMatthew Wilcox (Oracle) 		return;
24224c8ff709SChao Yu 
24239ffe0fb5SHuajun Li 	/* If the file has inline data, skip readpages */
24249ffe0fb5SHuajun Li 	if (f2fs_has_inline_data(inode))
242523323196SMatthew Wilcox (Oracle) 		return;
24269ffe0fb5SHuajun Li 
2427e20a7693SMatthew Wilcox (Oracle) 	f2fs_mpage_readpages(inode, rac, NULL);
2428eb47b800SJaegeuk Kim }
2429eb47b800SJaegeuk Kim 
24304c8ff709SChao Yu int f2fs_encrypt_one_page(struct f2fs_io_info *fio)
24317eab0c0dSHou Pengyang {
24327eab0c0dSHou Pengyang 	struct inode *inode = fio->page->mapping->host;
24334c8ff709SChao Yu 	struct page *mpage, *page;
24347eab0c0dSHou Pengyang 	gfp_t gfp_flags = GFP_NOFS;
24357eab0c0dSHou Pengyang 
24361958593eSJaegeuk Kim 	if (!f2fs_encrypted_file(inode))
24377eab0c0dSHou Pengyang 		return 0;
24387eab0c0dSHou Pengyang 
24394c8ff709SChao Yu 	page = fio->compressed_page ? fio->compressed_page : fio->page;
24404c8ff709SChao Yu 
24416dbb1796SEric Biggers 	/* wait for GCed page writeback via META_MAPPING */
24420ded69f6SJaegeuk Kim 	f2fs_wait_on_block_writeback(inode, fio->old_blkaddr);
24437eab0c0dSHou Pengyang 
244427aacd28SSatya Tangirala 	if (fscrypt_inode_uses_inline_crypto(inode))
244527aacd28SSatya Tangirala 		return 0;
244627aacd28SSatya Tangirala 
24477eab0c0dSHou Pengyang retry_encrypt:
24484c8ff709SChao Yu 	fio->encrypted_page = fscrypt_encrypt_pagecache_blocks(page,
24494c8ff709SChao Yu 					PAGE_SIZE, 0, gfp_flags);
24506aa58d8aSChao Yu 	if (IS_ERR(fio->encrypted_page)) {
24517eab0c0dSHou Pengyang 		/* flush pending IOs and wait for a while in the ENOMEM case */
24527eab0c0dSHou Pengyang 		if (PTR_ERR(fio->encrypted_page) == -ENOMEM) {
2453b9109b0eSJaegeuk Kim 			f2fs_flush_merged_writes(fio->sbi);
24545df7731fSChao Yu 			congestion_wait(BLK_RW_ASYNC, DEFAULT_IO_TIMEOUT);
24557eab0c0dSHou Pengyang 			gfp_flags |= __GFP_NOFAIL;
24567eab0c0dSHou Pengyang 			goto retry_encrypt;
24577eab0c0dSHou Pengyang 		}
24587eab0c0dSHou Pengyang 		return PTR_ERR(fio->encrypted_page);
24597eab0c0dSHou Pengyang 	}
24607eab0c0dSHou Pengyang 
24616aa58d8aSChao Yu 	mpage = find_lock_page(META_MAPPING(fio->sbi), fio->old_blkaddr);
24626aa58d8aSChao Yu 	if (mpage) {
24636aa58d8aSChao Yu 		if (PageUptodate(mpage))
24646aa58d8aSChao Yu 			memcpy(page_address(mpage),
24656aa58d8aSChao Yu 				page_address(fio->encrypted_page), PAGE_SIZE);
24666aa58d8aSChao Yu 		f2fs_put_page(mpage, 1);
24676aa58d8aSChao Yu 	}
24686aa58d8aSChao Yu 	return 0;
24696aa58d8aSChao Yu }
24706aa58d8aSChao Yu 
2471bb9e3bb8SChao Yu static inline bool check_inplace_update_policy(struct inode *inode,
2472bb9e3bb8SChao Yu 				struct f2fs_io_info *fio)
2473bb9e3bb8SChao Yu {
2474bb9e3bb8SChao Yu 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2475bb9e3bb8SChao Yu 	unsigned int policy = SM_I(sbi)->ipu_policy;
2476bb9e3bb8SChao Yu 
2477bb9e3bb8SChao Yu 	if (policy & (0x1 << F2FS_IPU_FORCE))
2478bb9e3bb8SChao Yu 		return true;
24794d57b86dSChao Yu 	if (policy & (0x1 << F2FS_IPU_SSR) && f2fs_need_SSR(sbi))
2480bb9e3bb8SChao Yu 		return true;
2481bb9e3bb8SChao Yu 	if (policy & (0x1 << F2FS_IPU_UTIL) &&
2482bb9e3bb8SChao Yu 			utilization(sbi) > SM_I(sbi)->min_ipu_util)
2483bb9e3bb8SChao Yu 		return true;
24844d57b86dSChao Yu 	if (policy & (0x1 << F2FS_IPU_SSR_UTIL) && f2fs_need_SSR(sbi) &&
2485bb9e3bb8SChao Yu 			utilization(sbi) > SM_I(sbi)->min_ipu_util)
2486bb9e3bb8SChao Yu 		return true;
2487bb9e3bb8SChao Yu 
2488bb9e3bb8SChao Yu 	/*
2489bb9e3bb8SChao Yu 	 * IPU for rewrite async pages
2490bb9e3bb8SChao Yu 	 */
2491bb9e3bb8SChao Yu 	if (policy & (0x1 << F2FS_IPU_ASYNC) &&
2492bb9e3bb8SChao Yu 			fio && fio->op == REQ_OP_WRITE &&
2493bb9e3bb8SChao Yu 			!(fio->op_flags & REQ_SYNC) &&
249462230e0dSChandan Rajendra 			!IS_ENCRYPTED(inode))
2495bb9e3bb8SChao Yu 		return true;
2496bb9e3bb8SChao Yu 
2497bb9e3bb8SChao Yu 	/* this is only set during fdatasync */
2498bb9e3bb8SChao Yu 	if (policy & (0x1 << F2FS_IPU_FSYNC) &&
2499bb9e3bb8SChao Yu 			is_inode_flag_set(inode, FI_NEED_IPU))
2500bb9e3bb8SChao Yu 		return true;
2501bb9e3bb8SChao Yu 
25024354994fSDaniel Rosenberg 	if (unlikely(fio && is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
25034354994fSDaniel Rosenberg 			!f2fs_is_checkpointed_data(sbi, fio->old_blkaddr)))
25044354994fSDaniel Rosenberg 		return true;
25054354994fSDaniel Rosenberg 
2506bb9e3bb8SChao Yu 	return false;
2507bb9e3bb8SChao Yu }
2508bb9e3bb8SChao Yu 
25094d57b86dSChao Yu bool f2fs_should_update_inplace(struct inode *inode, struct f2fs_io_info *fio)
2510bb9e3bb8SChao Yu {
2511bb9e3bb8SChao Yu 	if (f2fs_is_pinned_file(inode))
2512bb9e3bb8SChao Yu 		return true;
2513bb9e3bb8SChao Yu 
2514bb9e3bb8SChao Yu 	/* if this is cold file, we should overwrite to avoid fragmentation */
2515bb9e3bb8SChao Yu 	if (file_is_cold(inode))
2516bb9e3bb8SChao Yu 		return true;
2517bb9e3bb8SChao Yu 
2518bb9e3bb8SChao Yu 	return check_inplace_update_policy(inode, fio);
2519bb9e3bb8SChao Yu }
2520bb9e3bb8SChao Yu 
25214d57b86dSChao Yu bool f2fs_should_update_outplace(struct inode *inode, struct f2fs_io_info *fio)
2522bb9e3bb8SChao Yu {
2523bb9e3bb8SChao Yu 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2524bb9e3bb8SChao Yu 
2525b0332a0fSChao Yu 	if (f2fs_lfs_mode(sbi))
2526bb9e3bb8SChao Yu 		return true;
2527bb9e3bb8SChao Yu 	if (S_ISDIR(inode->i_mode))
2528bb9e3bb8SChao Yu 		return true;
2529af033b2aSChao Yu 	if (IS_NOQUOTA(inode))
2530af033b2aSChao Yu 		return true;
2531bb9e3bb8SChao Yu 	if (f2fs_is_atomic_file(inode))
2532bb9e3bb8SChao Yu 		return true;
2533bb9e3bb8SChao Yu 	if (fio) {
2534bb9e3bb8SChao Yu 		if (is_cold_data(fio->page))
2535bb9e3bb8SChao Yu 			return true;
2536bb9e3bb8SChao Yu 		if (IS_ATOMIC_WRITTEN_PAGE(fio->page))
2537bb9e3bb8SChao Yu 			return true;
25384354994fSDaniel Rosenberg 		if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
25394354994fSDaniel Rosenberg 			f2fs_is_checkpointed_data(sbi, fio->old_blkaddr)))
25404354994fSDaniel Rosenberg 			return true;
2541bb9e3bb8SChao Yu 	}
2542bb9e3bb8SChao Yu 	return false;
2543bb9e3bb8SChao Yu }
2544bb9e3bb8SChao Yu 
25457eab0c0dSHou Pengyang static inline bool need_inplace_update(struct f2fs_io_info *fio)
25467eab0c0dSHou Pengyang {
25477eab0c0dSHou Pengyang 	struct inode *inode = fio->page->mapping->host;
25487eab0c0dSHou Pengyang 
25494d57b86dSChao Yu 	if (f2fs_should_update_outplace(inode, fio))
25507eab0c0dSHou Pengyang 		return false;
25517eab0c0dSHou Pengyang 
25524d57b86dSChao Yu 	return f2fs_should_update_inplace(inode, fio);
25537eab0c0dSHou Pengyang }
25547eab0c0dSHou Pengyang 
25554d57b86dSChao Yu int f2fs_do_write_data_page(struct f2fs_io_info *fio)
2556eb47b800SJaegeuk Kim {
255705ca3632SJaegeuk Kim 	struct page *page = fio->page;
2558eb47b800SJaegeuk Kim 	struct inode *inode = page->mapping->host;
2559eb47b800SJaegeuk Kim 	struct dnode_of_data dn;
2560e959c8f5SHou Pengyang 	struct extent_info ei = {0,0,0};
25617735730dSChao Yu 	struct node_info ni;
2562e959c8f5SHou Pengyang 	bool ipu_force = false;
2563eb47b800SJaegeuk Kim 	int err = 0;
2564eb47b800SJaegeuk Kim 
2565eb47b800SJaegeuk Kim 	set_new_dnode(&dn, inode, NULL, NULL, 0);
2566e959c8f5SHou Pengyang 	if (need_inplace_update(fio) &&
2567e959c8f5SHou Pengyang 			f2fs_lookup_extent_cache(inode, page->index, &ei)) {
2568e959c8f5SHou Pengyang 		fio->old_blkaddr = ei.blk + page->index - ei.fofs;
2569a817737eSJaegeuk Kim 
2570c9b60788SChao Yu 		if (!f2fs_is_valid_blkaddr(fio->sbi, fio->old_blkaddr,
257193770ab7SChao Yu 						DATA_GENERIC_ENHANCE))
257210f966bbSChao Yu 			return -EFSCORRUPTED;
2573c9b60788SChao Yu 
2574e959c8f5SHou Pengyang 		ipu_force = true;
2575cc15620bSJaegeuk Kim 		fio->need_lock = LOCK_DONE;
2576e959c8f5SHou Pengyang 		goto got_it;
2577e959c8f5SHou Pengyang 	}
2578279d6df2SHou Pengyang 
2579d29460e5SJaegeuk Kim 	/* Deadlock due to between page->lock and f2fs_lock_op */
2580d29460e5SJaegeuk Kim 	if (fio->need_lock == LOCK_REQ && !f2fs_trylock_op(fio->sbi))
2581d29460e5SJaegeuk Kim 		return -EAGAIN;
2582279d6df2SHou Pengyang 
25834d57b86dSChao Yu 	err = f2fs_get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
2584eb47b800SJaegeuk Kim 	if (err)
2585279d6df2SHou Pengyang 		goto out;
2586eb47b800SJaegeuk Kim 
258728bc106bSChao Yu 	fio->old_blkaddr = dn.data_blkaddr;
2588eb47b800SJaegeuk Kim 
2589eb47b800SJaegeuk Kim 	/* This page is already truncated */
25907a9d7548SChao Yu 	if (fio->old_blkaddr == NULL_ADDR) {
25912bca1e23SJaegeuk Kim 		ClearPageUptodate(page);
25922baf0781SChao Yu 		clear_cold_data(page);
2593eb47b800SJaegeuk Kim 		goto out_writepage;
25942bca1e23SJaegeuk Kim 	}
2595e959c8f5SHou Pengyang got_it:
2596c9b60788SChao Yu 	if (__is_valid_data_blkaddr(fio->old_blkaddr) &&
2597c9b60788SChao Yu 		!f2fs_is_valid_blkaddr(fio->sbi, fio->old_blkaddr,
259893770ab7SChao Yu 						DATA_GENERIC_ENHANCE)) {
259910f966bbSChao Yu 		err = -EFSCORRUPTED;
2600c9b60788SChao Yu 		goto out_writepage;
2601c9b60788SChao Yu 	}
2602eb47b800SJaegeuk Kim 	/*
2603eb47b800SJaegeuk Kim 	 * If current allocation needs SSR,
2604eb47b800SJaegeuk Kim 	 * it had better in-place writes for updated data.
2605eb47b800SJaegeuk Kim 	 */
260693770ab7SChao Yu 	if (ipu_force ||
260793770ab7SChao Yu 		(__is_valid_data_blkaddr(fio->old_blkaddr) &&
26087b525dd0SChao Yu 					need_inplace_update(fio))) {
26094c8ff709SChao Yu 		err = f2fs_encrypt_one_page(fio);
2610cc15620bSJaegeuk Kim 		if (err)
2611cc15620bSJaegeuk Kim 			goto out_writepage;
2612cc15620bSJaegeuk Kim 
2613cc15620bSJaegeuk Kim 		set_page_writeback(page);
261417c50035SJaegeuk Kim 		ClearPageError(page);
2615279d6df2SHou Pengyang 		f2fs_put_dnode(&dn);
2616cc15620bSJaegeuk Kim 		if (fio->need_lock == LOCK_REQ)
26177eab0c0dSHou Pengyang 			f2fs_unlock_op(fio->sbi);
26184d57b86dSChao Yu 		err = f2fs_inplace_write_data(fio);
26196492a335SChao Yu 		if (err) {
262027aacd28SSatya Tangirala 			if (fscrypt_inode_uses_fs_layer_crypto(inode))
2621d2d0727bSEric Biggers 				fscrypt_finalize_bounce_page(&fio->encrypted_page);
26226492a335SChao Yu 			if (PageWriteback(page))
26232062e0c3SSheng Yong 				end_page_writeback(page);
2624cd23ffa9SChao Yu 		} else {
2625cd23ffa9SChao Yu 			set_inode_flag(inode, FI_UPDATE_WRITE);
26266492a335SChao Yu 		}
26277eab0c0dSHou Pengyang 		trace_f2fs_do_write_data_page(fio->page, IPU);
2628279d6df2SHou Pengyang 		return err;
2629279d6df2SHou Pengyang 	}
2630279d6df2SHou Pengyang 
2631cc15620bSJaegeuk Kim 	if (fio->need_lock == LOCK_RETRY) {
2632cc15620bSJaegeuk Kim 		if (!f2fs_trylock_op(fio->sbi)) {
2633cc15620bSJaegeuk Kim 			err = -EAGAIN;
2634cc15620bSJaegeuk Kim 			goto out_writepage;
2635cc15620bSJaegeuk Kim 		}
2636cc15620bSJaegeuk Kim 		fio->need_lock = LOCK_REQ;
2637cc15620bSJaegeuk Kim 	}
2638cc15620bSJaegeuk Kim 
26397735730dSChao Yu 	err = f2fs_get_node_info(fio->sbi, dn.nid, &ni);
26407735730dSChao Yu 	if (err)
26417735730dSChao Yu 		goto out_writepage;
26427735730dSChao Yu 
26437735730dSChao Yu 	fio->version = ni.version;
26447735730dSChao Yu 
26454c8ff709SChao Yu 	err = f2fs_encrypt_one_page(fio);
2646cc15620bSJaegeuk Kim 	if (err)
2647cc15620bSJaegeuk Kim 		goto out_writepage;
2648cc15620bSJaegeuk Kim 
2649cc15620bSJaegeuk Kim 	set_page_writeback(page);
265017c50035SJaegeuk Kim 	ClearPageError(page);
2651cc15620bSJaegeuk Kim 
26524c8ff709SChao Yu 	if (fio->compr_blocks && fio->old_blkaddr == COMPRESS_ADDR)
26534c8ff709SChao Yu 		f2fs_i_compr_blocks_update(inode, fio->compr_blocks - 1, false);
26544c8ff709SChao Yu 
2655279d6df2SHou Pengyang 	/* LFS mode write path */
26564d57b86dSChao Yu 	f2fs_outplace_write_data(&dn, fio);
26578ce67cb0SJaegeuk Kim 	trace_f2fs_do_write_data_page(page, OPU);
265891942321SJaegeuk Kim 	set_inode_flag(inode, FI_APPEND_WRITE);
26593c6c2bebSJaegeuk Kim 	if (page->index == 0)
266091942321SJaegeuk Kim 		set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
2661eb47b800SJaegeuk Kim out_writepage:
2662eb47b800SJaegeuk Kim 	f2fs_put_dnode(&dn);
2663279d6df2SHou Pengyang out:
2664cc15620bSJaegeuk Kim 	if (fio->need_lock == LOCK_REQ)
2665279d6df2SHou Pengyang 		f2fs_unlock_op(fio->sbi);
2666eb47b800SJaegeuk Kim 	return err;
2667eb47b800SJaegeuk Kim }
2668eb47b800SJaegeuk Kim 
26694c8ff709SChao Yu int f2fs_write_single_data_page(struct page *page, int *submitted,
26708648de2cSChao Yu 				struct bio **bio,
26718648de2cSChao Yu 				sector_t *last_block,
2672b0af6d49SChao Yu 				struct writeback_control *wbc,
26734c8ff709SChao Yu 				enum iostat_type io_type,
2674*3afae09fSChao Yu 				int compr_blocks,
2675*3afae09fSChao Yu 				bool allow_balance)
2676eb47b800SJaegeuk Kim {
2677eb47b800SJaegeuk Kim 	struct inode *inode = page->mapping->host;
26784081363fSJaegeuk Kim 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2679eb47b800SJaegeuk Kim 	loff_t i_size = i_size_read(inode);
2680eb47b800SJaegeuk Kim 	const pgoff_t end_index = ((unsigned long long)i_size)
268109cbfeafSKirill A. Shutemov 							>> PAGE_SHIFT;
26821f0d5c91SChao Yu 	loff_t psize = (loff_t)(page->index + 1) << PAGE_SHIFT;
26839ffe0fb5SHuajun Li 	unsigned offset = 0;
268439936837SJaegeuk Kim 	bool need_balance_fs = false;
2685eb47b800SJaegeuk Kim 	int err = 0;
2686458e6197SJaegeuk Kim 	struct f2fs_io_info fio = {
268705ca3632SJaegeuk Kim 		.sbi = sbi,
268839d787beSChao Yu 		.ino = inode->i_ino,
2689458e6197SJaegeuk Kim 		.type = DATA,
269004d328deSMike Christie 		.op = REQ_OP_WRITE,
26917637241eSJens Axboe 		.op_flags = wbc_to_write_flags(wbc),
2692e959c8f5SHou Pengyang 		.old_blkaddr = NULL_ADDR,
269305ca3632SJaegeuk Kim 		.page = page,
26944375a336SJaegeuk Kim 		.encrypted_page = NULL,
2695d68f735bSJaegeuk Kim 		.submitted = false,
26964c8ff709SChao Yu 		.compr_blocks = compr_blocks,
2697cc15620bSJaegeuk Kim 		.need_lock = LOCK_RETRY,
2698b0af6d49SChao Yu 		.io_type = io_type,
2699578c6478SYufen Yu 		.io_wbc = wbc,
27008648de2cSChao Yu 		.bio = bio,
27018648de2cSChao Yu 		.last_block = last_block,
2702458e6197SJaegeuk Kim 	};
2703eb47b800SJaegeuk Kim 
2704ecda0de3SChao Yu 	trace_f2fs_writepage(page, DATA);
2705ecda0de3SChao Yu 
2706db198ae0SChao Yu 	/* we should bypass data pages to proceed the kworkder jobs */
2707db198ae0SChao Yu 	if (unlikely(f2fs_cp_error(sbi))) {
2708db198ae0SChao Yu 		mapping_set_error(page->mapping, -EIO);
27091174abfdSChao Yu 		/*
27101174abfdSChao Yu 		 * don't drop any dirty dentry pages for keeping lastest
27111174abfdSChao Yu 		 * directory structure.
27121174abfdSChao Yu 		 */
27131174abfdSChao Yu 		if (S_ISDIR(inode->i_mode))
27141174abfdSChao Yu 			goto redirty_out;
2715db198ae0SChao Yu 		goto out;
2716db198ae0SChao Yu 	}
2717db198ae0SChao Yu 
27180771fcc7SChao Yu 	if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
27190771fcc7SChao Yu 		goto redirty_out;
27200771fcc7SChao Yu 
27214c8ff709SChao Yu 	if (page->index < end_index ||
27224c8ff709SChao Yu 			f2fs_verity_in_progress(inode) ||
27234c8ff709SChao Yu 			compr_blocks)
272439936837SJaegeuk Kim 		goto write;
2725eb47b800SJaegeuk Kim 
2726eb47b800SJaegeuk Kim 	/*
2727eb47b800SJaegeuk Kim 	 * If the offset is out-of-range of file size,
2728eb47b800SJaegeuk Kim 	 * this page does not have to be written to disk.
2729eb47b800SJaegeuk Kim 	 */
273009cbfeafSKirill A. Shutemov 	offset = i_size & (PAGE_SIZE - 1);
273176f60268SJaegeuk Kim 	if ((page->index >= end_index + 1) || !offset)
273239936837SJaegeuk Kim 		goto out;
2733eb47b800SJaegeuk Kim 
273409cbfeafSKirill A. Shutemov 	zero_user_segment(page, offset, PAGE_SIZE);
273539936837SJaegeuk Kim write:
27361e84371fSJaegeuk Kim 	if (f2fs_is_drop_cache(inode))
27371e84371fSJaegeuk Kim 		goto out;
2738e6e5f561SJaegeuk Kim 	/* we should not write 0'th page having journal header */
2739e6e5f561SJaegeuk Kim 	if (f2fs_is_volatile_file(inode) && (!page->index ||
2740e6e5f561SJaegeuk Kim 			(!wbc->for_reclaim &&
27414d57b86dSChao Yu 			f2fs_available_free_memory(sbi, BASE_CHECK))))
27421e84371fSJaegeuk Kim 		goto redirty_out;
2743eb47b800SJaegeuk Kim 
2744435cbab9SJaegeuk Kim 	/* Dentry/quota blocks are controlled by checkpoint */
2745435cbab9SJaegeuk Kim 	if (S_ISDIR(inode->i_mode) || IS_NOQUOTA(inode)) {
274679963d96SChao Yu 		/*
274779963d96SChao Yu 		 * We need to wait for node_write to avoid block allocation during
274879963d96SChao Yu 		 * checkpoint. This can only happen to quota writes which can cause
274979963d96SChao Yu 		 * the below discard race condition.
275079963d96SChao Yu 		 */
275179963d96SChao Yu 		if (IS_NOQUOTA(inode))
275279963d96SChao Yu 			down_read(&sbi->node_write);
275379963d96SChao Yu 
2754cc15620bSJaegeuk Kim 		fio.need_lock = LOCK_DONE;
27554d57b86dSChao Yu 		err = f2fs_do_write_data_page(&fio);
275679963d96SChao Yu 
275779963d96SChao Yu 		if (IS_NOQUOTA(inode))
275879963d96SChao Yu 			up_read(&sbi->node_write);
275979963d96SChao Yu 
2760b230e6caSJaegeuk Kim 		goto done;
2761b230e6caSJaegeuk Kim 	}
2762b230e6caSJaegeuk Kim 
27638618b881SJaegeuk Kim 	if (!wbc->for_reclaim)
276439936837SJaegeuk Kim 		need_balance_fs = true;
27657f3037a5SJaegeuk Kim 	else if (has_not_enough_free_secs(sbi, 0, 0))
276639936837SJaegeuk Kim 		goto redirty_out;
2767ef095d19SJaegeuk Kim 	else
2768ef095d19SJaegeuk Kim 		set_inode_flag(inode, FI_HOT_DATA);
2769eb47b800SJaegeuk Kim 
2770b3d208f9SJaegeuk Kim 	err = -EAGAIN;
2771dd7b2333SYunlei He 	if (f2fs_has_inline_data(inode)) {
2772b3d208f9SJaegeuk Kim 		err = f2fs_write_inline_data(inode, page);
2773dd7b2333SYunlei He 		if (!err)
2774dd7b2333SYunlei He 			goto out;
2775dd7b2333SYunlei He 	}
2776279d6df2SHou Pengyang 
2777cc15620bSJaegeuk Kim 	if (err == -EAGAIN) {
27784d57b86dSChao Yu 		err = f2fs_do_write_data_page(&fio);
2779cc15620bSJaegeuk Kim 		if (err == -EAGAIN) {
2780cc15620bSJaegeuk Kim 			fio.need_lock = LOCK_REQ;
27814d57b86dSChao Yu 			err = f2fs_do_write_data_page(&fio);
2782cc15620bSJaegeuk Kim 		}
2783cc15620bSJaegeuk Kim 	}
2784a0d00fadSChao Yu 
2785eb449797SChao Yu 	if (err) {
2786eb449797SChao Yu 		file_set_keep_isize(inode);
2787eb449797SChao Yu 	} else {
2788c10c9820SChao Yu 		spin_lock(&F2FS_I(inode)->i_size_lock);
278926de9b11SJaegeuk Kim 		if (F2FS_I(inode)->last_disk_size < psize)
279026de9b11SJaegeuk Kim 			F2FS_I(inode)->last_disk_size = psize;
2791c10c9820SChao Yu 		spin_unlock(&F2FS_I(inode)->i_size_lock);
2792eb449797SChao Yu 	}
2793279d6df2SHou Pengyang 
27948618b881SJaegeuk Kim done:
27958618b881SJaegeuk Kim 	if (err && err != -ENOENT)
27968618b881SJaegeuk Kim 		goto redirty_out;
2797eb47b800SJaegeuk Kim 
279839936837SJaegeuk Kim out:
2799a7ffdbe2SJaegeuk Kim 	inode_dec_dirty_pages(inode);
28002baf0781SChao Yu 	if (err) {
28012bca1e23SJaegeuk Kim 		ClearPageUptodate(page);
28022baf0781SChao Yu 		clear_cold_data(page);
28032baf0781SChao Yu 	}
28040c3a5797SChao Yu 
28050c3a5797SChao Yu 	if (wbc->for_reclaim) {
2806bab475c5SChao Yu 		f2fs_submit_merged_write_cond(sbi, NULL, page, 0, DATA);
2807ef095d19SJaegeuk Kim 		clear_inode_flag(inode, FI_HOT_DATA);
28084d57b86dSChao Yu 		f2fs_remove_dirty_inode(inode);
2809d68f735bSJaegeuk Kim 		submitted = NULL;
2810eb7e813cSChao Yu 	}
28110c3a5797SChao Yu 	unlock_page(page);
2812186857c5SChao Yu 	if (!S_ISDIR(inode->i_mode) && !IS_NOQUOTA(inode) &&
2813*3afae09fSChao Yu 			!F2FS_I(inode)->cp_task && allow_balance)
28140c3a5797SChao Yu 		f2fs_balance_fs(sbi, need_balance_fs);
28150c3a5797SChao Yu 
2816d68f735bSJaegeuk Kim 	if (unlikely(f2fs_cp_error(sbi))) {
2817b9109b0eSJaegeuk Kim 		f2fs_submit_merged_write(sbi, DATA);
28180b20fcecSChao Yu 		f2fs_submit_merged_ipu_write(sbi, bio, NULL);
2819d68f735bSJaegeuk Kim 		submitted = NULL;
2820d68f735bSJaegeuk Kim 	}
2821d68f735bSJaegeuk Kim 
2822d68f735bSJaegeuk Kim 	if (submitted)
28234c8ff709SChao Yu 		*submitted = fio.submitted ? 1 : 0;
28240c3a5797SChao Yu 
2825eb47b800SJaegeuk Kim 	return 0;
2826eb47b800SJaegeuk Kim 
2827eb47b800SJaegeuk Kim redirty_out:
282876f60268SJaegeuk Kim 	redirty_page_for_writepage(wbc, page);
28295b19d284SJaegeuk Kim 	/*
28305b19d284SJaegeuk Kim 	 * pageout() in MM traslates EAGAIN, so calls handle_write_error()
28315b19d284SJaegeuk Kim 	 * -> mapping_set_error() -> set_bit(AS_EIO, ...).
28325b19d284SJaegeuk Kim 	 * file_write_and_wait_range() will see EIO error, which is critical
28335b19d284SJaegeuk Kim 	 * to return value of fsync() followed by atomic_write failure to user.
28345b19d284SJaegeuk Kim 	 */
28355b19d284SJaegeuk Kim 	if (!err || wbc->for_reclaim)
28360002b61bSChao Yu 		return AOP_WRITEPAGE_ACTIVATE;
2837b230e6caSJaegeuk Kim 	unlock_page(page);
2838b230e6caSJaegeuk Kim 	return err;
2839fa9150a8SNamjae Jeon }
2840fa9150a8SNamjae Jeon 
2841f566bae8SJaegeuk Kim static int f2fs_write_data_page(struct page *page,
2842f566bae8SJaegeuk Kim 					struct writeback_control *wbc)
2843f566bae8SJaegeuk Kim {
28444c8ff709SChao Yu #ifdef CONFIG_F2FS_FS_COMPRESSION
28454c8ff709SChao Yu 	struct inode *inode = page->mapping->host;
28464c8ff709SChao Yu 
28474c8ff709SChao Yu 	if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
28484c8ff709SChao Yu 		goto out;
28494c8ff709SChao Yu 
28504c8ff709SChao Yu 	if (f2fs_compressed_file(inode)) {
28514c8ff709SChao Yu 		if (f2fs_is_compressed_cluster(inode, page->index)) {
28524c8ff709SChao Yu 			redirty_page_for_writepage(wbc, page);
28534c8ff709SChao Yu 			return AOP_WRITEPAGE_ACTIVATE;
28544c8ff709SChao Yu 		}
28554c8ff709SChao Yu 	}
28564c8ff709SChao Yu out:
28574c8ff709SChao Yu #endif
28584c8ff709SChao Yu 
28594c8ff709SChao Yu 	return f2fs_write_single_data_page(page, NULL, NULL, NULL,
2860*3afae09fSChao Yu 						wbc, FS_DATA_IO, 0, true);
2861f566bae8SJaegeuk Kim }
2862f566bae8SJaegeuk Kim 
28638f46dcaeSChao Yu /*
28648f46dcaeSChao Yu  * This function was copied from write_cche_pages from mm/page-writeback.c.
28658f46dcaeSChao Yu  * The major change is making write step of cold data page separately from
28668f46dcaeSChao Yu  * warm/hot data page.
28678f46dcaeSChao Yu  */
28688f46dcaeSChao Yu static int f2fs_write_cache_pages(struct address_space *mapping,
2869b0af6d49SChao Yu 					struct writeback_control *wbc,
2870b0af6d49SChao Yu 					enum iostat_type io_type)
28718f46dcaeSChao Yu {
28728f46dcaeSChao Yu 	int ret = 0;
28734c8ff709SChao Yu 	int done = 0, retry = 0;
28748f46dcaeSChao Yu 	struct pagevec pvec;
2875c29fd0c0SChao Yu 	struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
28768648de2cSChao Yu 	struct bio *bio = NULL;
28778648de2cSChao Yu 	sector_t last_block;
28784c8ff709SChao Yu #ifdef CONFIG_F2FS_FS_COMPRESSION
28794c8ff709SChao Yu 	struct inode *inode = mapping->host;
28804c8ff709SChao Yu 	struct compress_ctx cc = {
28814c8ff709SChao Yu 		.inode = inode,
28824c8ff709SChao Yu 		.log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
28834c8ff709SChao Yu 		.cluster_size = F2FS_I(inode)->i_cluster_size,
28844c8ff709SChao Yu 		.cluster_idx = NULL_CLUSTER,
28854c8ff709SChao Yu 		.rpages = NULL,
28864c8ff709SChao Yu 		.nr_rpages = 0,
28874c8ff709SChao Yu 		.cpages = NULL,
28884c8ff709SChao Yu 		.rbuf = NULL,
28894c8ff709SChao Yu 		.cbuf = NULL,
28904c8ff709SChao Yu 		.rlen = PAGE_SIZE * F2FS_I(inode)->i_cluster_size,
28914c8ff709SChao Yu 		.private = NULL,
28924c8ff709SChao Yu 	};
28934c8ff709SChao Yu #endif
28948f46dcaeSChao Yu 	int nr_pages;
28958f46dcaeSChao Yu 	pgoff_t index;
28968f46dcaeSChao Yu 	pgoff_t end;		/* Inclusive */
28978f46dcaeSChao Yu 	pgoff_t done_index;
28988f46dcaeSChao Yu 	int range_whole = 0;
289910bbd235SMatthew Wilcox 	xa_mark_t tag;
2900bab475c5SChao Yu 	int nwritten = 0;
29014c8ff709SChao Yu 	int submitted = 0;
29024c8ff709SChao Yu 	int i;
29038f46dcaeSChao Yu 
290486679820SMel Gorman 	pagevec_init(&pvec);
290546ae957fSJaegeuk Kim 
2906ef095d19SJaegeuk Kim 	if (get_dirty_pages(mapping->host) <=
2907ef095d19SJaegeuk Kim 				SM_I(F2FS_M_SB(mapping))->min_hot_blocks)
2908ef095d19SJaegeuk Kim 		set_inode_flag(mapping->host, FI_HOT_DATA);
2909ef095d19SJaegeuk Kim 	else
2910ef095d19SJaegeuk Kim 		clear_inode_flag(mapping->host, FI_HOT_DATA);
2911ef095d19SJaegeuk Kim 
29128f46dcaeSChao Yu 	if (wbc->range_cyclic) {
29134df7a75fSJason Yan 		index = mapping->writeback_index; /* prev offset */
29148f46dcaeSChao Yu 		end = -1;
29158f46dcaeSChao Yu 	} else {
291609cbfeafSKirill A. Shutemov 		index = wbc->range_start >> PAGE_SHIFT;
291709cbfeafSKirill A. Shutemov 		end = wbc->range_end >> PAGE_SHIFT;
29188f46dcaeSChao Yu 		if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
29198f46dcaeSChao Yu 			range_whole = 1;
29208f46dcaeSChao Yu 	}
29218f46dcaeSChao Yu 	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
29228f46dcaeSChao Yu 		tag = PAGECACHE_TAG_TOWRITE;
29238f46dcaeSChao Yu 	else
29248f46dcaeSChao Yu 		tag = PAGECACHE_TAG_DIRTY;
29258f46dcaeSChao Yu retry:
29264c8ff709SChao Yu 	retry = 0;
29278f46dcaeSChao Yu 	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
29288f46dcaeSChao Yu 		tag_pages_for_writeback(mapping, index, end);
29298f46dcaeSChao Yu 	done_index = index;
29304c8ff709SChao Yu 	while (!done && !retry && (index <= end)) {
293169c4f35dSJan Kara 		nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end,
293267fd707fSJan Kara 				tag);
29338f46dcaeSChao Yu 		if (nr_pages == 0)
29348f46dcaeSChao Yu 			break;
29358f46dcaeSChao Yu 
29368f46dcaeSChao Yu 		for (i = 0; i < nr_pages; i++) {
29378f46dcaeSChao Yu 			struct page *page = pvec.pages[i];
29384c8ff709SChao Yu 			bool need_readd;
29394c8ff709SChao Yu readd:
29404c8ff709SChao Yu 			need_readd = false;
29414c8ff709SChao Yu #ifdef CONFIG_F2FS_FS_COMPRESSION
29424c8ff709SChao Yu 			if (f2fs_compressed_file(inode)) {
29434c8ff709SChao Yu 				ret = f2fs_init_compress_ctx(&cc);
29444c8ff709SChao Yu 				if (ret) {
29454c8ff709SChao Yu 					done = 1;
29464c8ff709SChao Yu 					break;
29474c8ff709SChao Yu 				}
29488f46dcaeSChao Yu 
29494c8ff709SChao Yu 				if (!f2fs_cluster_can_merge_page(&cc,
29504c8ff709SChao Yu 								page->index)) {
29514c8ff709SChao Yu 					ret = f2fs_write_multi_pages(&cc,
29524c8ff709SChao Yu 						&submitted, wbc, io_type);
29534c8ff709SChao Yu 					if (!ret)
29544c8ff709SChao Yu 						need_readd = true;
29554c8ff709SChao Yu 					goto result;
29564c8ff709SChao Yu 				}
29574c8ff709SChao Yu 
29584c8ff709SChao Yu 				if (unlikely(f2fs_cp_error(sbi)))
29594c8ff709SChao Yu 					goto lock_page;
29604c8ff709SChao Yu 
29614c8ff709SChao Yu 				if (f2fs_cluster_is_empty(&cc)) {
29624c8ff709SChao Yu 					void *fsdata = NULL;
29634c8ff709SChao Yu 					struct page *pagep;
29644c8ff709SChao Yu 					int ret2;
29654c8ff709SChao Yu 
29664c8ff709SChao Yu 					ret2 = f2fs_prepare_compress_overwrite(
29674c8ff709SChao Yu 							inode, &pagep,
29684c8ff709SChao Yu 							page->index, &fsdata);
29694c8ff709SChao Yu 					if (ret2 < 0) {
29704c8ff709SChao Yu 						ret = ret2;
29714c8ff709SChao Yu 						done = 1;
29724c8ff709SChao Yu 						break;
29734c8ff709SChao Yu 					} else if (ret2 &&
29744c8ff709SChao Yu 						!f2fs_compress_write_end(inode,
29754c8ff709SChao Yu 								fsdata, page->index,
29764c8ff709SChao Yu 								1)) {
29774c8ff709SChao Yu 						retry = 1;
29784c8ff709SChao Yu 						break;
29794c8ff709SChao Yu 					}
29804c8ff709SChao Yu 				} else {
29814c8ff709SChao Yu 					goto lock_page;
29824c8ff709SChao Yu 				}
29834c8ff709SChao Yu 			}
29844c8ff709SChao Yu #endif
2985f8de4331SChao Yu 			/* give a priority to WB_SYNC threads */
2986c29fd0c0SChao Yu 			if (atomic_read(&sbi->wb_sync_req[DATA]) &&
2987f8de4331SChao Yu 					wbc->sync_mode == WB_SYNC_NONE) {
2988f8de4331SChao Yu 				done = 1;
2989f8de4331SChao Yu 				break;
2990f8de4331SChao Yu 			}
29914c8ff709SChao Yu #ifdef CONFIG_F2FS_FS_COMPRESSION
29924c8ff709SChao Yu lock_page:
29934c8ff709SChao Yu #endif
29948f46dcaeSChao Yu 			done_index = page->index;
2995d29460e5SJaegeuk Kim retry_write:
29968f46dcaeSChao Yu 			lock_page(page);
29978f46dcaeSChao Yu 
29988f46dcaeSChao Yu 			if (unlikely(page->mapping != mapping)) {
29998f46dcaeSChao Yu continue_unlock:
30008f46dcaeSChao Yu 				unlock_page(page);
30018f46dcaeSChao Yu 				continue;
30028f46dcaeSChao Yu 			}
30038f46dcaeSChao Yu 
30048f46dcaeSChao Yu 			if (!PageDirty(page)) {
30058f46dcaeSChao Yu 				/* someone wrote it for us */
30068f46dcaeSChao Yu 				goto continue_unlock;
30078f46dcaeSChao Yu 			}
30088f46dcaeSChao Yu 
30098f46dcaeSChao Yu 			if (PageWriteback(page)) {
30100b20fcecSChao Yu 				if (wbc->sync_mode != WB_SYNC_NONE)
3011fec1d657SJaegeuk Kim 					f2fs_wait_on_page_writeback(page,
3012bae0ee7aSChao Yu 							DATA, true, true);
30130b20fcecSChao Yu 				else
30148f46dcaeSChao Yu 					goto continue_unlock;
30158f46dcaeSChao Yu 			}
30168f46dcaeSChao Yu 
30178f46dcaeSChao Yu 			if (!clear_page_dirty_for_io(page))
30188f46dcaeSChao Yu 				goto continue_unlock;
30198f46dcaeSChao Yu 
30204c8ff709SChao Yu #ifdef CONFIG_F2FS_FS_COMPRESSION
30214c8ff709SChao Yu 			if (f2fs_compressed_file(inode)) {
30224c8ff709SChao Yu 				get_page(page);
30234c8ff709SChao Yu 				f2fs_compress_ctx_add_page(&cc, page);
30244c8ff709SChao Yu 				continue;
30254c8ff709SChao Yu 			}
30264c8ff709SChao Yu #endif
30274c8ff709SChao Yu 			ret = f2fs_write_single_data_page(page, &submitted,
3028*3afae09fSChao Yu 					&bio, &last_block, wbc, io_type,
3029*3afae09fSChao Yu 					0, true);
30304c8ff709SChao Yu 			if (ret == AOP_WRITEPAGE_ACTIVATE)
30314c8ff709SChao Yu 				unlock_page(page);
30324c8ff709SChao Yu #ifdef CONFIG_F2FS_FS_COMPRESSION
30334c8ff709SChao Yu result:
30344c8ff709SChao Yu #endif
30354c8ff709SChao Yu 			nwritten += submitted;
30364c8ff709SChao Yu 			wbc->nr_to_write -= submitted;
30374c8ff709SChao Yu 
30388f46dcaeSChao Yu 			if (unlikely(ret)) {
30390002b61bSChao Yu 				/*
30400002b61bSChao Yu 				 * keep nr_to_write, since vfs uses this to
30410002b61bSChao Yu 				 * get # of written pages.
30420002b61bSChao Yu 				 */
30430002b61bSChao Yu 				if (ret == AOP_WRITEPAGE_ACTIVATE) {
30440002b61bSChao Yu 					ret = 0;
30454c8ff709SChao Yu 					goto next;
3046d29460e5SJaegeuk Kim 				} else if (ret == -EAGAIN) {
3047d29460e5SJaegeuk Kim 					ret = 0;
3048d29460e5SJaegeuk Kim 					if (wbc->sync_mode == WB_SYNC_ALL) {
3049d29460e5SJaegeuk Kim 						cond_resched();
3050d29460e5SJaegeuk Kim 						congestion_wait(BLK_RW_ASYNC,
30515df7731fSChao Yu 							DEFAULT_IO_TIMEOUT);
3052d29460e5SJaegeuk Kim 						goto retry_write;
3053d29460e5SJaegeuk Kim 					}
30544c8ff709SChao Yu 					goto next;
30550002b61bSChao Yu 				}
30568f46dcaeSChao Yu 				done_index = page->index + 1;
30578f46dcaeSChao Yu 				done = 1;
30588f46dcaeSChao Yu 				break;
30598f46dcaeSChao Yu 			}
30608f46dcaeSChao Yu 
30614c8ff709SChao Yu 			if (wbc->nr_to_write <= 0 &&
30628f46dcaeSChao Yu 					wbc->sync_mode == WB_SYNC_NONE) {
30638f46dcaeSChao Yu 				done = 1;
30648f46dcaeSChao Yu 				break;
30658f46dcaeSChao Yu 			}
30664c8ff709SChao Yu next:
30674c8ff709SChao Yu 			if (need_readd)
30684c8ff709SChao Yu 				goto readd;
30698f46dcaeSChao Yu 		}
30708f46dcaeSChao Yu 		pagevec_release(&pvec);
30718f46dcaeSChao Yu 		cond_resched();
30728f46dcaeSChao Yu 	}
30734c8ff709SChao Yu #ifdef CONFIG_F2FS_FS_COMPRESSION
30744c8ff709SChao Yu 	/* flush remained pages in compress cluster */
30754c8ff709SChao Yu 	if (f2fs_compressed_file(inode) && !f2fs_cluster_is_empty(&cc)) {
30764c8ff709SChao Yu 		ret = f2fs_write_multi_pages(&cc, &submitted, wbc, io_type);
30774c8ff709SChao Yu 		nwritten += submitted;
30784c8ff709SChao Yu 		wbc->nr_to_write -= submitted;
30794c8ff709SChao Yu 		if (ret) {
30804c8ff709SChao Yu 			done = 1;
30814c8ff709SChao Yu 			retry = 0;
30824c8ff709SChao Yu 		}
30834c8ff709SChao Yu 	}
3084adfc6943SJaegeuk Kim 	if (f2fs_compressed_file(inode))
3085adfc6943SJaegeuk Kim 		f2fs_destroy_compress_ctx(&cc);
30864c8ff709SChao Yu #endif
3087e78790f8SSahitya Tummala 	if (retry) {
30888f46dcaeSChao Yu 		index = 0;
3089e78790f8SSahitya Tummala 		end = -1;
30908f46dcaeSChao Yu 		goto retry;
30918f46dcaeSChao Yu 	}
3092e78790f8SSahitya Tummala 	if (wbc->range_cyclic && !done)
3093e78790f8SSahitya Tummala 		done_index = 0;
30948f46dcaeSChao Yu 	if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
30958f46dcaeSChao Yu 		mapping->writeback_index = done_index;
30968f46dcaeSChao Yu 
3097bab475c5SChao Yu 	if (nwritten)
3098b9109b0eSJaegeuk Kim 		f2fs_submit_merged_write_cond(F2FS_M_SB(mapping), mapping->host,
3099bab475c5SChao Yu 								NULL, 0, DATA);
31008648de2cSChao Yu 	/* submit cached bio of IPU write */
31018648de2cSChao Yu 	if (bio)
31020b20fcecSChao Yu 		f2fs_submit_merged_ipu_write(sbi, &bio, NULL);
31036ca56ca4SChao Yu 
31048f46dcaeSChao Yu 	return ret;
31058f46dcaeSChao Yu }
31068f46dcaeSChao Yu 
3107853137ceSJaegeuk Kim static inline bool __should_serialize_io(struct inode *inode,
3108853137ceSJaegeuk Kim 					struct writeback_control *wbc)
3109853137ceSJaegeuk Kim {
3110040d2bb3SChao Yu 	/* to avoid deadlock in path of data flush */
3111040d2bb3SChao Yu 	if (F2FS_I(inode)->cp_task)
3112040d2bb3SChao Yu 		return false;
3113b13f67ffSChao Yu 
3114b13f67ffSChao Yu 	if (!S_ISREG(inode->i_mode))
3115b13f67ffSChao Yu 		return false;
3116b13f67ffSChao Yu 	if (IS_NOQUOTA(inode))
3117b13f67ffSChao Yu 		return false;
3118b13f67ffSChao Yu 
3119602a16d5SDaeho Jeong 	if (f2fs_need_compress_data(inode))
3120b13f67ffSChao Yu 		return true;
3121853137ceSJaegeuk Kim 	if (wbc->sync_mode != WB_SYNC_ALL)
3122853137ceSJaegeuk Kim 		return true;
3123853137ceSJaegeuk Kim 	if (get_dirty_pages(inode) >= SM_I(F2FS_I_SB(inode))->min_seq_blocks)
3124853137ceSJaegeuk Kim 		return true;
3125853137ceSJaegeuk Kim 	return false;
3126853137ceSJaegeuk Kim }
3127853137ceSJaegeuk Kim 
3128fc99fe27SChao Yu static int __f2fs_write_data_pages(struct address_space *mapping,
3129b0af6d49SChao Yu 						struct writeback_control *wbc,
3130b0af6d49SChao Yu 						enum iostat_type io_type)
3131eb47b800SJaegeuk Kim {
3132eb47b800SJaegeuk Kim 	struct inode *inode = mapping->host;
31334081363fSJaegeuk Kim 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
31349dfa1bafSJaegeuk Kim 	struct blk_plug plug;
3135eb47b800SJaegeuk Kim 	int ret;
3136853137ceSJaegeuk Kim 	bool locked = false;
3137eb47b800SJaegeuk Kim 
3138cfb185a1SP J P 	/* deal with chardevs and other special file */
3139cfb185a1SP J P 	if (!mapping->a_ops->writepage)
3140cfb185a1SP J P 		return 0;
3141cfb185a1SP J P 
31426a290544SChao Yu 	/* skip writing if there is no dirty page in this inode */
31436a290544SChao Yu 	if (!get_dirty_pages(inode) && wbc->sync_mode == WB_SYNC_NONE)
31446a290544SChao Yu 		return 0;
31456a290544SChao Yu 
31460771fcc7SChao Yu 	/* during POR, we don't need to trigger writepage at all. */
31470771fcc7SChao Yu 	if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
31480771fcc7SChao Yu 		goto skip_write;
31490771fcc7SChao Yu 
3150af033b2aSChao Yu 	if ((S_ISDIR(inode->i_mode) || IS_NOQUOTA(inode)) &&
3151af033b2aSChao Yu 			wbc->sync_mode == WB_SYNC_NONE &&
3152a1257023SJaegeuk Kim 			get_dirty_pages(inode) < nr_pages_to_skip(sbi, DATA) &&
31534d57b86dSChao Yu 			f2fs_available_free_memory(sbi, DIRTY_DENTS))
3154a1257023SJaegeuk Kim 		goto skip_write;
3155a1257023SJaegeuk Kim 
3156d323d005SChao Yu 	/* skip writing during file defragment */
315791942321SJaegeuk Kim 	if (is_inode_flag_set(inode, FI_DO_DEFRAG))
3158d323d005SChao Yu 		goto skip_write;
3159d323d005SChao Yu 
3160d31c7c3fSYunlei He 	trace_f2fs_writepages(mapping->host, wbc, DATA);
3161d31c7c3fSYunlei He 
3162687de7f1SJaegeuk Kim 	/* to avoid spliting IOs due to mixed WB_SYNC_ALL and WB_SYNC_NONE */
3163687de7f1SJaegeuk Kim 	if (wbc->sync_mode == WB_SYNC_ALL)
3164c29fd0c0SChao Yu 		atomic_inc(&sbi->wb_sync_req[DATA]);
3165c29fd0c0SChao Yu 	else if (atomic_read(&sbi->wb_sync_req[DATA]))
3166687de7f1SJaegeuk Kim 		goto skip_write;
3167687de7f1SJaegeuk Kim 
3168853137ceSJaegeuk Kim 	if (__should_serialize_io(inode, wbc)) {
3169853137ceSJaegeuk Kim 		mutex_lock(&sbi->writepages);
3170853137ceSJaegeuk Kim 		locked = true;
3171853137ceSJaegeuk Kim 	}
3172853137ceSJaegeuk Kim 
31739dfa1bafSJaegeuk Kim 	blk_start_plug(&plug);
3174b0af6d49SChao Yu 	ret = f2fs_write_cache_pages(mapping, wbc, io_type);
31759dfa1bafSJaegeuk Kim 	blk_finish_plug(&plug);
3176687de7f1SJaegeuk Kim 
3177853137ceSJaegeuk Kim 	if (locked)
3178853137ceSJaegeuk Kim 		mutex_unlock(&sbi->writepages);
3179853137ceSJaegeuk Kim 
3180687de7f1SJaegeuk Kim 	if (wbc->sync_mode == WB_SYNC_ALL)
3181c29fd0c0SChao Yu 		atomic_dec(&sbi->wb_sync_req[DATA]);
318228ea6162SJaegeuk Kim 	/*
318328ea6162SJaegeuk Kim 	 * if some pages were truncated, we cannot guarantee its mapping->host
318428ea6162SJaegeuk Kim 	 * to detect pending bios.
318528ea6162SJaegeuk Kim 	 */
3186458e6197SJaegeuk Kim 
31874d57b86dSChao Yu 	f2fs_remove_dirty_inode(inode);
3188eb47b800SJaegeuk Kim 	return ret;
3189d3baf95dSJaegeuk Kim 
3190d3baf95dSJaegeuk Kim skip_write:
3191a7ffdbe2SJaegeuk Kim 	wbc->pages_skipped += get_dirty_pages(inode);
3192d31c7c3fSYunlei He 	trace_f2fs_writepages(mapping->host, wbc, DATA);
3193d3baf95dSJaegeuk Kim 	return 0;
3194eb47b800SJaegeuk Kim }
3195eb47b800SJaegeuk Kim 
3196b0af6d49SChao Yu static int f2fs_write_data_pages(struct address_space *mapping,
3197b0af6d49SChao Yu 			    struct writeback_control *wbc)
3198b0af6d49SChao Yu {
3199b0af6d49SChao Yu 	struct inode *inode = mapping->host;
3200b0af6d49SChao Yu 
3201b0af6d49SChao Yu 	return __f2fs_write_data_pages(mapping, wbc,
3202b0af6d49SChao Yu 			F2FS_I(inode)->cp_task == current ?
3203b0af6d49SChao Yu 			FS_CP_DATA_IO : FS_DATA_IO);
3204b0af6d49SChao Yu }
3205b0af6d49SChao Yu 
32063aab8f82SChao Yu static void f2fs_write_failed(struct address_space *mapping, loff_t to)
32073aab8f82SChao Yu {
32083aab8f82SChao Yu 	struct inode *inode = mapping->host;
3209819d9153SJaegeuk Kim 	loff_t i_size = i_size_read(inode);
32103aab8f82SChao Yu 
32113f188c23SJaegeuk Kim 	if (IS_NOQUOTA(inode))
32123f188c23SJaegeuk Kim 		return;
32133f188c23SJaegeuk Kim 
321495ae251fSEric Biggers 	/* In the fs-verity case, f2fs_end_enable_verity() does the truncate */
321595ae251fSEric Biggers 	if (to > i_size && !f2fs_verity_in_progress(inode)) {
3216a33c1502SChao Yu 		down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
32176f8d4455SJaegeuk Kim 		down_write(&F2FS_I(inode)->i_mmap_sem);
3218a33c1502SChao Yu 
3219819d9153SJaegeuk Kim 		truncate_pagecache(inode, i_size);
3220c42d28ceSChao Yu 		f2fs_truncate_blocks(inode, i_size, true);
3221a33c1502SChao Yu 
32225a3a2d83SQiuyang Sun 		up_write(&F2FS_I(inode)->i_mmap_sem);
32236f8d4455SJaegeuk Kim 		up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
32243aab8f82SChao Yu 	}
32253aab8f82SChao Yu }
32263aab8f82SChao Yu 
32272aadac08SJaegeuk Kim static int prepare_write_begin(struct f2fs_sb_info *sbi,
32282aadac08SJaegeuk Kim 			struct page *page, loff_t pos, unsigned len,
32292aadac08SJaegeuk Kim 			block_t *blk_addr, bool *node_changed)
32302aadac08SJaegeuk Kim {
32312aadac08SJaegeuk Kim 	struct inode *inode = page->mapping->host;
32322aadac08SJaegeuk Kim 	pgoff_t index = page->index;
32332aadac08SJaegeuk Kim 	struct dnode_of_data dn;
32342aadac08SJaegeuk Kim 	struct page *ipage;
3235b4d07a3eSJaegeuk Kim 	bool locked = false;
3236e15882b6SHou Pengyang 	struct extent_info ei = {0,0,0};
32372aadac08SJaegeuk Kim 	int err = 0;
32382866fb16SSheng Yong 	int flag;
32392aadac08SJaegeuk Kim 
324024b84912SJaegeuk Kim 	/*
324124b84912SJaegeuk Kim 	 * we already allocated all the blocks, so we don't need to get
324224b84912SJaegeuk Kim 	 * the block addresses when there is no need to fill the page.
324324b84912SJaegeuk Kim 	 */
3244dc91de78SJaegeuk Kim 	if (!f2fs_has_inline_data(inode) && len == PAGE_SIZE &&
324595ae251fSEric Biggers 	    !is_inode_flag_set(inode, FI_NO_PREALLOC) &&
324695ae251fSEric Biggers 	    !f2fs_verity_in_progress(inode))
324724b84912SJaegeuk Kim 		return 0;
324824b84912SJaegeuk Kim 
32492866fb16SSheng Yong 	/* f2fs_lock_op avoids race between write CP and convert_inline_page */
32502866fb16SSheng Yong 	if (f2fs_has_inline_data(inode) && pos + len > MAX_INLINE_DATA(inode))
32512866fb16SSheng Yong 		flag = F2FS_GET_BLOCK_DEFAULT;
32522866fb16SSheng Yong 	else
32532866fb16SSheng Yong 		flag = F2FS_GET_BLOCK_PRE_AIO;
32542866fb16SSheng Yong 
3255b4d07a3eSJaegeuk Kim 	if (f2fs_has_inline_data(inode) ||
325609cbfeafSKirill A. Shutemov 			(pos & PAGE_MASK) >= i_size_read(inode)) {
32570ef81833SChao Yu 		f2fs_do_map_lock(sbi, flag, true);
3258b4d07a3eSJaegeuk Kim 		locked = true;
3259b4d07a3eSJaegeuk Kim 	}
32604c8ff709SChao Yu 
3261b4d07a3eSJaegeuk Kim restart:
32622aadac08SJaegeuk Kim 	/* check inline_data */
32634d57b86dSChao Yu 	ipage = f2fs_get_node_page(sbi, inode->i_ino);
32642aadac08SJaegeuk Kim 	if (IS_ERR(ipage)) {
32652aadac08SJaegeuk Kim 		err = PTR_ERR(ipage);
32662aadac08SJaegeuk Kim 		goto unlock_out;
32672aadac08SJaegeuk Kim 	}
32682aadac08SJaegeuk Kim 
32692aadac08SJaegeuk Kim 	set_new_dnode(&dn, inode, ipage, ipage, 0);
32702aadac08SJaegeuk Kim 
32712aadac08SJaegeuk Kim 	if (f2fs_has_inline_data(inode)) {
3272f2470371SChao Yu 		if (pos + len <= MAX_INLINE_DATA(inode)) {
32734d57b86dSChao Yu 			f2fs_do_read_inline_data(page, ipage);
327491942321SJaegeuk Kim 			set_inode_flag(inode, FI_DATA_EXIST);
3275ab47036dSChao Yu 			if (inode->i_nlink)
32762049d4fcSJaegeuk Kim 				set_inline_node(ipage);
32772aadac08SJaegeuk Kim 		} else {
32782aadac08SJaegeuk Kim 			err = f2fs_convert_inline_page(&dn, page);
32792aadac08SJaegeuk Kim 			if (err)
3280b4d07a3eSJaegeuk Kim 				goto out;
3281b4d07a3eSJaegeuk Kim 			if (dn.data_blkaddr == NULL_ADDR)
32822aadac08SJaegeuk Kim 				err = f2fs_get_block(&dn, index);
3283b4d07a3eSJaegeuk Kim 		}
3284b4d07a3eSJaegeuk Kim 	} else if (locked) {
3285b4d07a3eSJaegeuk Kim 		err = f2fs_get_block(&dn, index);
3286b4d07a3eSJaegeuk Kim 	} else {
3287b4d07a3eSJaegeuk Kim 		if (f2fs_lookup_extent_cache(inode, index, &ei)) {
3288b4d07a3eSJaegeuk Kim 			dn.data_blkaddr = ei.blk + index - ei.fofs;
3289b4d07a3eSJaegeuk Kim 		} else {
3290b4d07a3eSJaegeuk Kim 			/* hole case */
32914d57b86dSChao Yu 			err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
32924da7bf5aSJaegeuk Kim 			if (err || dn.data_blkaddr == NULL_ADDR) {
3293b4d07a3eSJaegeuk Kim 				f2fs_put_dnode(&dn);
32940ef81833SChao Yu 				f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO,
329559c9081bSYunlei He 								true);
32962866fb16SSheng Yong 				WARN_ON(flag != F2FS_GET_BLOCK_PRE_AIO);
3297b4d07a3eSJaegeuk Kim 				locked = true;
3298b4d07a3eSJaegeuk Kim 				goto restart;
3299b4d07a3eSJaegeuk Kim 			}
3300b4d07a3eSJaegeuk Kim 		}
3301b4d07a3eSJaegeuk Kim 	}
3302b4d07a3eSJaegeuk Kim 
33032aadac08SJaegeuk Kim 	/* convert_inline_page can make node_changed */
33042aadac08SJaegeuk Kim 	*blk_addr = dn.data_blkaddr;
33052aadac08SJaegeuk Kim 	*node_changed = dn.node_changed;
3306b4d07a3eSJaegeuk Kim out:
33072aadac08SJaegeuk Kim 	f2fs_put_dnode(&dn);
33082aadac08SJaegeuk Kim unlock_out:
3309b4d07a3eSJaegeuk Kim 	if (locked)
33100ef81833SChao Yu 		f2fs_do_map_lock(sbi, flag, false);
33112aadac08SJaegeuk Kim 	return err;
33122aadac08SJaegeuk Kim }
33132aadac08SJaegeuk Kim 
3314eb47b800SJaegeuk Kim static int f2fs_write_begin(struct file *file, struct address_space *mapping,
3315eb47b800SJaegeuk Kim 		loff_t pos, unsigned len, unsigned flags,
3316eb47b800SJaegeuk Kim 		struct page **pagep, void **fsdata)
3317eb47b800SJaegeuk Kim {
3318eb47b800SJaegeuk Kim 	struct inode *inode = mapping->host;
33194081363fSJaegeuk Kim 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
332086531d6bSJaegeuk Kim 	struct page *page = NULL;
332109cbfeafSKirill A. Shutemov 	pgoff_t index = ((unsigned long long) pos) >> PAGE_SHIFT;
3322a2e2e76bSChao Yu 	bool need_balance = false, drop_atomic = false;
33232aadac08SJaegeuk Kim 	block_t blkaddr = NULL_ADDR;
3324eb47b800SJaegeuk Kim 	int err = 0;
3325eb47b800SJaegeuk Kim 
332662aed044SChao Yu 	trace_f2fs_write_begin(inode, pos, len, flags);
332762aed044SChao Yu 
332800e09c0bSChao Yu 	if (!f2fs_is_checkpoint_ready(sbi)) {
332900e09c0bSChao Yu 		err = -ENOSPC;
33304354994fSDaniel Rosenberg 		goto fail;
333100e09c0bSChao Yu 	}
33324354994fSDaniel Rosenberg 
3333455e3a58SJaegeuk Kim 	if ((f2fs_is_atomic_file(inode) &&
3334455e3a58SJaegeuk Kim 			!f2fs_available_free_memory(sbi, INMEM_PAGES)) ||
3335455e3a58SJaegeuk Kim 			is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST)) {
333657864ae5SJaegeuk Kim 		err = -ENOMEM;
3337a2e2e76bSChao Yu 		drop_atomic = true;
333857864ae5SJaegeuk Kim 		goto fail;
333957864ae5SJaegeuk Kim 	}
334057864ae5SJaegeuk Kim 
33415f727395SJaegeuk Kim 	/*
33425f727395SJaegeuk Kim 	 * We should check this at this moment to avoid deadlock on inode page
33435f727395SJaegeuk Kim 	 * and #0 page. The locking rule for inline_data conversion should be:
33445f727395SJaegeuk Kim 	 * lock_page(page #0) -> lock_page(inode_page)
33455f727395SJaegeuk Kim 	 */
33465f727395SJaegeuk Kim 	if (index != 0) {
33475f727395SJaegeuk Kim 		err = f2fs_convert_inline_inode(inode);
33485f727395SJaegeuk Kim 		if (err)
33495f727395SJaegeuk Kim 			goto fail;
33505f727395SJaegeuk Kim 	}
33514c8ff709SChao Yu 
33524c8ff709SChao Yu #ifdef CONFIG_F2FS_FS_COMPRESSION
33534c8ff709SChao Yu 	if (f2fs_compressed_file(inode)) {
33544c8ff709SChao Yu 		int ret;
33554c8ff709SChao Yu 
33564c8ff709SChao Yu 		*fsdata = NULL;
33574c8ff709SChao Yu 
33584c8ff709SChao Yu 		ret = f2fs_prepare_compress_overwrite(inode, pagep,
33594c8ff709SChao Yu 							index, fsdata);
33604c8ff709SChao Yu 		if (ret < 0) {
33614c8ff709SChao Yu 			err = ret;
33624c8ff709SChao Yu 			goto fail;
33634c8ff709SChao Yu 		} else if (ret) {
33644c8ff709SChao Yu 			return 0;
33654c8ff709SChao Yu 		}
33664c8ff709SChao Yu 	}
33674c8ff709SChao Yu #endif
33684c8ff709SChao Yu 
3369afcb7ca0SJaegeuk Kim repeat:
337086d54795SJaegeuk Kim 	/*
337186d54795SJaegeuk Kim 	 * Do not use grab_cache_page_write_begin() to avoid deadlock due to
337286d54795SJaegeuk Kim 	 * wait_for_stable_page. Will wait that below with our IO control.
337386d54795SJaegeuk Kim 	 */
337401eccef7SChao Yu 	page = f2fs_pagecache_get_page(mapping, index,
337586d54795SJaegeuk Kim 				FGP_LOCK | FGP_WRITE | FGP_CREAT, GFP_NOFS);
33763aab8f82SChao Yu 	if (!page) {
33773aab8f82SChao Yu 		err = -ENOMEM;
33783aab8f82SChao Yu 		goto fail;
33793aab8f82SChao Yu 	}
3380d5f66990SJaegeuk Kim 
33814c8ff709SChao Yu 	/* TODO: cluster can be compressed due to race with .writepage */
33824c8ff709SChao Yu 
3383eb47b800SJaegeuk Kim 	*pagep = page;
3384eb47b800SJaegeuk Kim 
33852aadac08SJaegeuk Kim 	err = prepare_write_begin(sbi, page, pos, len,
33862aadac08SJaegeuk Kim 					&blkaddr, &need_balance);
3387b3d208f9SJaegeuk Kim 	if (err)
33882aadac08SJaegeuk Kim 		goto fail;
3389759af1c9SFan Li 
3390af033b2aSChao Yu 	if (need_balance && !IS_NOQUOTA(inode) &&
3391af033b2aSChao Yu 			has_not_enough_free_secs(sbi, 0, 0)) {
33922a340760SJaegeuk Kim 		unlock_page(page);
33932c4db1a6SJaegeuk Kim 		f2fs_balance_fs(sbi, true);
33942a340760SJaegeuk Kim 		lock_page(page);
33952a340760SJaegeuk Kim 		if (page->mapping != mapping) {
33962a340760SJaegeuk Kim 			/* The page got truncated from under us */
33972a340760SJaegeuk Kim 			f2fs_put_page(page, 1);
33982a340760SJaegeuk Kim 			goto repeat;
33992a340760SJaegeuk Kim 		}
34002a340760SJaegeuk Kim 	}
34012a340760SJaegeuk Kim 
3402bae0ee7aSChao Yu 	f2fs_wait_on_page_writeback(page, DATA, false, true);
3403b3d208f9SJaegeuk Kim 
3404649d7df2SJaegeuk Kim 	if (len == PAGE_SIZE || PageUptodate(page))
3405649d7df2SJaegeuk Kim 		return 0;
3406eb47b800SJaegeuk Kim 
340795ae251fSEric Biggers 	if (!(pos & (PAGE_SIZE - 1)) && (pos + len) >= i_size_read(inode) &&
340895ae251fSEric Biggers 	    !f2fs_verity_in_progress(inode)) {
3409746e2403SYunlei He 		zero_user_segment(page, len, PAGE_SIZE);
3410746e2403SYunlei He 		return 0;
3411746e2403SYunlei He 	}
3412746e2403SYunlei He 
34132aadac08SJaegeuk Kim 	if (blkaddr == NEW_ADDR) {
341409cbfeafSKirill A. Shutemov 		zero_user_segment(page, 0, PAGE_SIZE);
3415649d7df2SJaegeuk Kim 		SetPageUptodate(page);
3416d54c795bSChao Yu 	} else {
341793770ab7SChao Yu 		if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
341893770ab7SChao Yu 				DATA_GENERIC_ENHANCE_READ)) {
341910f966bbSChao Yu 			err = -EFSCORRUPTED;
342093770ab7SChao Yu 			goto fail;
342193770ab7SChao Yu 		}
3422b7973091SJia Yang 		err = f2fs_submit_page_read(inode, page, blkaddr, 0, true);
342313ba41e3SJaegeuk Kim 		if (err)
34243aab8f82SChao Yu 			goto fail;
3425d54c795bSChao Yu 
3426393ff91fSJaegeuk Kim 		lock_page(page);
34276bacf52fSJaegeuk Kim 		if (unlikely(page->mapping != mapping)) {
3428afcb7ca0SJaegeuk Kim 			f2fs_put_page(page, 1);
3429afcb7ca0SJaegeuk Kim 			goto repeat;
3430eb47b800SJaegeuk Kim 		}
34311563ac75SChao Yu 		if (unlikely(!PageUptodate(page))) {
34321563ac75SChao Yu 			err = -EIO;
34334375a336SJaegeuk Kim 			goto fail;
34344375a336SJaegeuk Kim 		}
34354375a336SJaegeuk Kim 	}
3436eb47b800SJaegeuk Kim 	return 0;
34379ba69cf9SJaegeuk Kim 
34383aab8f82SChao Yu fail:
343986531d6bSJaegeuk Kim 	f2fs_put_page(page, 1);
34403aab8f82SChao Yu 	f2fs_write_failed(mapping, pos + len);
3441a2e2e76bSChao Yu 	if (drop_atomic)
34424d57b86dSChao Yu 		f2fs_drop_inmem_pages_all(sbi, false);
34433aab8f82SChao Yu 	return err;
3444eb47b800SJaegeuk Kim }
3445eb47b800SJaegeuk Kim 
3446a1dd3c13SJaegeuk Kim static int f2fs_write_end(struct file *file,
3447a1dd3c13SJaegeuk Kim 			struct address_space *mapping,
3448a1dd3c13SJaegeuk Kim 			loff_t pos, unsigned len, unsigned copied,
3449a1dd3c13SJaegeuk Kim 			struct page *page, void *fsdata)
3450a1dd3c13SJaegeuk Kim {
3451a1dd3c13SJaegeuk Kim 	struct inode *inode = page->mapping->host;
3452a1dd3c13SJaegeuk Kim 
3453dfb2bf38SChao Yu 	trace_f2fs_write_end(inode, pos, len, copied);
3454dfb2bf38SChao Yu 
3455649d7df2SJaegeuk Kim 	/*
3456649d7df2SJaegeuk Kim 	 * This should be come from len == PAGE_SIZE, and we expect copied
3457649d7df2SJaegeuk Kim 	 * should be PAGE_SIZE. Otherwise, we treat it with zero copied and
3458649d7df2SJaegeuk Kim 	 * let generic_perform_write() try to copy data again through copied=0.
3459649d7df2SJaegeuk Kim 	 */
3460649d7df2SJaegeuk Kim 	if (!PageUptodate(page)) {
3461746e2403SYunlei He 		if (unlikely(copied != len))
3462649d7df2SJaegeuk Kim 			copied = 0;
3463649d7df2SJaegeuk Kim 		else
3464649d7df2SJaegeuk Kim 			SetPageUptodate(page);
3465649d7df2SJaegeuk Kim 	}
34664c8ff709SChao Yu 
34674c8ff709SChao Yu #ifdef CONFIG_F2FS_FS_COMPRESSION
34684c8ff709SChao Yu 	/* overwrite compressed file */
34694c8ff709SChao Yu 	if (f2fs_compressed_file(inode) && fsdata) {
34704c8ff709SChao Yu 		f2fs_compress_write_end(inode, fsdata, page->index, copied);
34714c8ff709SChao Yu 		f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
3472944dd22eSChao Yu 
3473944dd22eSChao Yu 		if (pos + copied > i_size_read(inode) &&
3474944dd22eSChao Yu 				!f2fs_verity_in_progress(inode))
3475944dd22eSChao Yu 			f2fs_i_size_write(inode, pos + copied);
34764c8ff709SChao Yu 		return copied;
34774c8ff709SChao Yu 	}
34784c8ff709SChao Yu #endif
34794c8ff709SChao Yu 
3480649d7df2SJaegeuk Kim 	if (!copied)
3481649d7df2SJaegeuk Kim 		goto unlock_out;
3482649d7df2SJaegeuk Kim 
3483a1dd3c13SJaegeuk Kim 	set_page_dirty(page);
3484a1dd3c13SJaegeuk Kim 
348595ae251fSEric Biggers 	if (pos + copied > i_size_read(inode) &&
348695ae251fSEric Biggers 	    !f2fs_verity_in_progress(inode))
3487fc9581c8SJaegeuk Kim 		f2fs_i_size_write(inode, pos + copied);
3488649d7df2SJaegeuk Kim unlock_out:
34893024c9a1SChao Yu 	f2fs_put_page(page, 1);
3490d0239e1bSJaegeuk Kim 	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
3491a1dd3c13SJaegeuk Kim 	return copied;
3492a1dd3c13SJaegeuk Kim }
3493a1dd3c13SJaegeuk Kim 
34946f673763SOmar Sandoval static int check_direct_IO(struct inode *inode, struct iov_iter *iter,
34956f673763SOmar Sandoval 			   loff_t offset)
3496944fcfc1SJaegeuk Kim {
34978a56dd96SJaegeuk Kim 	unsigned i_blkbits = READ_ONCE(inode->i_blkbits);
34988a56dd96SJaegeuk Kim 	unsigned blkbits = i_blkbits;
34998a56dd96SJaegeuk Kim 	unsigned blocksize_mask = (1 << blkbits) - 1;
35008a56dd96SJaegeuk Kim 	unsigned long align = offset | iov_iter_alignment(iter);
35018a56dd96SJaegeuk Kim 	struct block_device *bdev = inode->i_sb->s_bdev;
3502944fcfc1SJaegeuk Kim 
350320d0a107SGabriel Krisman Bertazi 	if (iov_iter_rw(iter) == READ && offset >= i_size_read(inode))
350420d0a107SGabriel Krisman Bertazi 		return 1;
350520d0a107SGabriel Krisman Bertazi 
35068a56dd96SJaegeuk Kim 	if (align & blocksize_mask) {
35078a56dd96SJaegeuk Kim 		if (bdev)
35088a56dd96SJaegeuk Kim 			blkbits = blksize_bits(bdev_logical_block_size(bdev));
35098a56dd96SJaegeuk Kim 		blocksize_mask = (1 << blkbits) - 1;
35108a56dd96SJaegeuk Kim 		if (align & blocksize_mask)
3511944fcfc1SJaegeuk Kim 			return -EINVAL;
35128a56dd96SJaegeuk Kim 		return 1;
35138a56dd96SJaegeuk Kim 	}
3514944fcfc1SJaegeuk Kim 	return 0;
3515944fcfc1SJaegeuk Kim }
3516944fcfc1SJaegeuk Kim 
351702b16d0aSChao Yu static void f2fs_dio_end_io(struct bio *bio)
351802b16d0aSChao Yu {
351902b16d0aSChao Yu 	struct f2fs_private_dio *dio = bio->bi_private;
352002b16d0aSChao Yu 
352102b16d0aSChao Yu 	dec_page_count(F2FS_I_SB(dio->inode),
352202b16d0aSChao Yu 			dio->write ? F2FS_DIO_WRITE : F2FS_DIO_READ);
352302b16d0aSChao Yu 
352402b16d0aSChao Yu 	bio->bi_private = dio->orig_private;
352502b16d0aSChao Yu 	bio->bi_end_io = dio->orig_end_io;
352602b16d0aSChao Yu 
3527c8eb7024SChao Yu 	kfree(dio);
352802b16d0aSChao Yu 
352902b16d0aSChao Yu 	bio_endio(bio);
353002b16d0aSChao Yu }
353102b16d0aSChao Yu 
353202b16d0aSChao Yu static void f2fs_dio_submit_bio(struct bio *bio, struct inode *inode,
353302b16d0aSChao Yu 							loff_t file_offset)
353402b16d0aSChao Yu {
353502b16d0aSChao Yu 	struct f2fs_private_dio *dio;
353602b16d0aSChao Yu 	bool write = (bio_op(bio) == REQ_OP_WRITE);
353702b16d0aSChao Yu 
353802b16d0aSChao Yu 	dio = f2fs_kzalloc(F2FS_I_SB(inode),
353902b16d0aSChao Yu 			sizeof(struct f2fs_private_dio), GFP_NOFS);
35408e114038SYueHaibing 	if (!dio)
354102b16d0aSChao Yu 		goto out;
354202b16d0aSChao Yu 
354302b16d0aSChao Yu 	dio->inode = inode;
354402b16d0aSChao Yu 	dio->orig_end_io = bio->bi_end_io;
354502b16d0aSChao Yu 	dio->orig_private = bio->bi_private;
354602b16d0aSChao Yu 	dio->write = write;
354702b16d0aSChao Yu 
354802b16d0aSChao Yu 	bio->bi_end_io = f2fs_dio_end_io;
354902b16d0aSChao Yu 	bio->bi_private = dio;
355002b16d0aSChao Yu 
355102b16d0aSChao Yu 	inc_page_count(F2FS_I_SB(inode),
355202b16d0aSChao Yu 			write ? F2FS_DIO_WRITE : F2FS_DIO_READ);
355302b16d0aSChao Yu 
355402b16d0aSChao Yu 	submit_bio(bio);
355502b16d0aSChao Yu 	return;
355602b16d0aSChao Yu out:
355702b16d0aSChao Yu 	bio->bi_status = BLK_STS_IOERR;
355802b16d0aSChao Yu 	bio_endio(bio);
355902b16d0aSChao Yu }
356002b16d0aSChao Yu 
3561c8b8e32dSChristoph Hellwig static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
3562eb47b800SJaegeuk Kim {
3563b439b103SJaegeuk Kim 	struct address_space *mapping = iocb->ki_filp->f_mapping;
35643aab8f82SChao Yu 	struct inode *inode = mapping->host;
35650cdd3195SHyunchul Lee 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3566f847c699SChao Yu 	struct f2fs_inode_info *fi = F2FS_I(inode);
35673aab8f82SChao Yu 	size_t count = iov_iter_count(iter);
3568c8b8e32dSChristoph Hellwig 	loff_t offset = iocb->ki_pos;
356982e0a5aaSChao Yu 	int rw = iov_iter_rw(iter);
35703aab8f82SChao Yu 	int err;
35710cdd3195SHyunchul Lee 	enum rw_hint hint = iocb->ki_hint;
357263189b78SChao Yu 	int whint_mode = F2FS_OPTION(sbi).whint_mode;
3573f847c699SChao Yu 	bool do_opu;
3574944fcfc1SJaegeuk Kim 
3575b439b103SJaegeuk Kim 	err = check_direct_IO(inode, iter, offset);
3576b3d208f9SJaegeuk Kim 	if (err)
35778a56dd96SJaegeuk Kim 		return err < 0 ? err : 0;
35789ffe0fb5SHuajun Li 
3579f847c699SChao Yu 	if (f2fs_force_buffered_io(inode, iocb, iter))
358036abef4eSJaegeuk Kim 		return 0;
3581fcc85a4dSJaegeuk Kim 
3582f847c699SChao Yu 	do_opu = allow_outplace_dio(inode, iocb, iter);
3583f847c699SChao Yu 
35845302fb00SJaegeuk Kim 	trace_f2fs_direct_IO_enter(inode, offset, count, rw);
358570407fadSChao Yu 
35860cdd3195SHyunchul Lee 	if (rw == WRITE && whint_mode == WHINT_MODE_OFF)
35870cdd3195SHyunchul Lee 		iocb->ki_hint = WRITE_LIFE_NOT_SET;
35880cdd3195SHyunchul Lee 
3589b91050a8SHyunchul Lee 	if (iocb->ki_flags & IOCB_NOWAIT) {
3590f847c699SChao Yu 		if (!down_read_trylock(&fi->i_gc_rwsem[rw])) {
3591b91050a8SHyunchul Lee 			iocb->ki_hint = hint;
3592b91050a8SHyunchul Lee 			err = -EAGAIN;
3593b91050a8SHyunchul Lee 			goto out;
3594b91050a8SHyunchul Lee 		}
3595f847c699SChao Yu 		if (do_opu && !down_read_trylock(&fi->i_gc_rwsem[READ])) {
3596f847c699SChao Yu 			up_read(&fi->i_gc_rwsem[rw]);
3597f847c699SChao Yu 			iocb->ki_hint = hint;
3598f847c699SChao Yu 			err = -EAGAIN;
3599f847c699SChao Yu 			goto out;
3600f847c699SChao Yu 		}
3601f847c699SChao Yu 	} else {
3602f847c699SChao Yu 		down_read(&fi->i_gc_rwsem[rw]);
3603f847c699SChao Yu 		if (do_opu)
3604f847c699SChao Yu 			down_read(&fi->i_gc_rwsem[READ]);
3605b91050a8SHyunchul Lee 	}
3606b91050a8SHyunchul Lee 
360702b16d0aSChao Yu 	err = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev,
3608f9d6d059SChao Yu 			iter, rw == WRITE ? get_data_block_dio_write :
3609f9d6d059SChao Yu 			get_data_block_dio, NULL, f2fs_dio_submit_bio,
3610ad8d6a02SDongDongJu 			rw == WRITE ? DIO_LOCKING | DIO_SKIP_HOLES :
3611ad8d6a02SDongDongJu 			DIO_SKIP_HOLES);
3612f847c699SChao Yu 
3613f847c699SChao Yu 	if (do_opu)
3614f847c699SChao Yu 		up_read(&fi->i_gc_rwsem[READ]);
3615f847c699SChao Yu 
3616f847c699SChao Yu 	up_read(&fi->i_gc_rwsem[rw]);
361782e0a5aaSChao Yu 
361882e0a5aaSChao Yu 	if (rw == WRITE) {
36190cdd3195SHyunchul Lee 		if (whint_mode == WHINT_MODE_OFF)
36200cdd3195SHyunchul Lee 			iocb->ki_hint = hint;
3621b0af6d49SChao Yu 		if (err > 0) {
3622b0af6d49SChao Yu 			f2fs_update_iostat(F2FS_I_SB(inode), APP_DIRECT_IO,
3623b0af6d49SChao Yu 									err);
3624f847c699SChao Yu 			if (!do_opu)
362591942321SJaegeuk Kim 				set_inode_flag(inode, FI_UPDATE_WRITE);
3626335cac8bSJack Qiu 		} else if (err == -EIOCBQUEUED) {
3627335cac8bSJack Qiu 			f2fs_update_iostat(F2FS_I_SB(inode), APP_DIRECT_IO,
3628335cac8bSJack Qiu 						count - iov_iter_count(iter));
3629b0af6d49SChao Yu 		} else if (err < 0) {
36303aab8f82SChao Yu 			f2fs_write_failed(mapping, offset + count);
36316bfc4919SJaegeuk Kim 		}
36328b83ac81SChao Yu 	} else {
36338b83ac81SChao Yu 		if (err > 0)
36348b83ac81SChao Yu 			f2fs_update_iostat(sbi, APP_DIRECT_READ_IO, err);
3635335cac8bSJack Qiu 		else if (err == -EIOCBQUEUED)
3636335cac8bSJack Qiu 			f2fs_update_iostat(F2FS_I_SB(inode), APP_DIRECT_READ_IO,
3637335cac8bSJack Qiu 						count - iov_iter_count(iter));
3638b0af6d49SChao Yu 	}
363970407fadSChao Yu 
3640b91050a8SHyunchul Lee out:
36415302fb00SJaegeuk Kim 	trace_f2fs_direct_IO_exit(inode, offset, count, rw, err);
364270407fadSChao Yu 
36433aab8f82SChao Yu 	return err;
3644eb47b800SJaegeuk Kim }
3645eb47b800SJaegeuk Kim 
3646487261f3SChao Yu void f2fs_invalidate_page(struct page *page, unsigned int offset,
3647d47992f8SLukas Czerner 							unsigned int length)
3648eb47b800SJaegeuk Kim {
3649eb47b800SJaegeuk Kim 	struct inode *inode = page->mapping->host;
3650487261f3SChao Yu 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3651a7ffdbe2SJaegeuk Kim 
3652487261f3SChao Yu 	if (inode->i_ino >= F2FS_ROOT_INO(sbi) &&
365309cbfeafSKirill A. Shutemov 		(offset % PAGE_SIZE || length != PAGE_SIZE))
3654a7ffdbe2SJaegeuk Kim 		return;
3655a7ffdbe2SJaegeuk Kim 
3656487261f3SChao Yu 	if (PageDirty(page)) {
3657933439c8SChao Yu 		if (inode->i_ino == F2FS_META_INO(sbi)) {
3658487261f3SChao Yu 			dec_page_count(sbi, F2FS_DIRTY_META);
3659933439c8SChao Yu 		} else if (inode->i_ino == F2FS_NODE_INO(sbi)) {
3660487261f3SChao Yu 			dec_page_count(sbi, F2FS_DIRTY_NODES);
3661933439c8SChao Yu 		} else {
3662a7ffdbe2SJaegeuk Kim 			inode_dec_dirty_pages(inode);
36634d57b86dSChao Yu 			f2fs_remove_dirty_inode(inode);
3664933439c8SChao Yu 		}
3665487261f3SChao Yu 	}
3666decd36b6SChao Yu 
36672baf0781SChao Yu 	clear_cold_data(page);
36682baf0781SChao Yu 
3669decd36b6SChao Yu 	if (IS_ATOMIC_WRITTEN_PAGE(page))
36704d57b86dSChao Yu 		return f2fs_drop_inmem_page(inode, page);
3671decd36b6SChao Yu 
3672240a5915SChao Yu 	f2fs_clear_page_private(page);
3673eb47b800SJaegeuk Kim }
3674eb47b800SJaegeuk Kim 
3675487261f3SChao Yu int f2fs_release_page(struct page *page, gfp_t wait)
3676eb47b800SJaegeuk Kim {
3677f68daeebSJaegeuk Kim 	/* If this is dirty page, keep PagePrivate */
3678f68daeebSJaegeuk Kim 	if (PageDirty(page))
3679f68daeebSJaegeuk Kim 		return 0;
3680f68daeebSJaegeuk Kim 
3681decd36b6SChao Yu 	/* This is atomic written page, keep Private */
3682decd36b6SChao Yu 	if (IS_ATOMIC_WRITTEN_PAGE(page))
3683decd36b6SChao Yu 		return 0;
3684decd36b6SChao Yu 
36852baf0781SChao Yu 	clear_cold_data(page);
3686240a5915SChao Yu 	f2fs_clear_page_private(page);
3687c3850aa1SJaegeuk Kim 	return 1;
3688eb47b800SJaegeuk Kim }
3689eb47b800SJaegeuk Kim 
3690eb47b800SJaegeuk Kim static int f2fs_set_data_page_dirty(struct page *page)
3691eb47b800SJaegeuk Kim {
36924969c06aSJaegeuk Kim 	struct inode *inode = page_file_mapping(page)->host;
3693eb47b800SJaegeuk Kim 
369426c6b887SJaegeuk Kim 	trace_f2fs_set_page_dirty(page, DATA);
369526c6b887SJaegeuk Kim 
3696237c0790SJaegeuk Kim 	if (!PageUptodate(page))
3697eb47b800SJaegeuk Kim 		SetPageUptodate(page);
36984969c06aSJaegeuk Kim 	if (PageSwapCache(page))
36994969c06aSJaegeuk Kim 		return __set_page_dirty_nobuffers(page);
370034ba94baSJaegeuk Kim 
37015fe45743SChao Yu 	if (f2fs_is_atomic_file(inode) && !f2fs_is_commit_atomic_write(inode)) {
3702decd36b6SChao Yu 		if (!IS_ATOMIC_WRITTEN_PAGE(page)) {
37034d57b86dSChao Yu 			f2fs_register_inmem_page(inode, page);
370434ba94baSJaegeuk Kim 			return 1;
370534ba94baSJaegeuk Kim 		}
3706decd36b6SChao Yu 		/*
3707decd36b6SChao Yu 		 * Previously, this page has been registered, we just
3708decd36b6SChao Yu 		 * return here.
3709decd36b6SChao Yu 		 */
3710decd36b6SChao Yu 		return 0;
3711decd36b6SChao Yu 	}
371234ba94baSJaegeuk Kim 
3713eb47b800SJaegeuk Kim 	if (!PageDirty(page)) {
3714b87078adSJaegeuk Kim 		__set_page_dirty_nobuffers(page);
37154d57b86dSChao Yu 		f2fs_update_dirty_page(inode, page);
3716eb47b800SJaegeuk Kim 		return 1;
3717eb47b800SJaegeuk Kim 	}
3718eb47b800SJaegeuk Kim 	return 0;
3719eb47b800SJaegeuk Kim }
3720eb47b800SJaegeuk Kim 
3721c1c63387SChao Yu 
3722c1c63387SChao Yu static sector_t f2fs_bmap_compress(struct inode *inode, sector_t block)
3723c1c63387SChao Yu {
3724c1c63387SChao Yu #ifdef CONFIG_F2FS_FS_COMPRESSION
3725c1c63387SChao Yu 	struct dnode_of_data dn;
3726c1c63387SChao Yu 	sector_t start_idx, blknr = 0;
3727c1c63387SChao Yu 	int ret;
3728c1c63387SChao Yu 
3729c1c63387SChao Yu 	start_idx = round_down(block, F2FS_I(inode)->i_cluster_size);
3730c1c63387SChao Yu 
3731c1c63387SChao Yu 	set_new_dnode(&dn, inode, NULL, NULL, 0);
3732c1c63387SChao Yu 	ret = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
3733c1c63387SChao Yu 	if (ret)
3734c1c63387SChao Yu 		return 0;
3735c1c63387SChao Yu 
3736c1c63387SChao Yu 	if (dn.data_blkaddr != COMPRESS_ADDR) {
3737c1c63387SChao Yu 		dn.ofs_in_node += block - start_idx;
3738c1c63387SChao Yu 		blknr = f2fs_data_blkaddr(&dn);
3739c1c63387SChao Yu 		if (!__is_valid_data_blkaddr(blknr))
3740c1c63387SChao Yu 			blknr = 0;
3741c1c63387SChao Yu 	}
3742c1c63387SChao Yu 
3743c1c63387SChao Yu 	f2fs_put_dnode(&dn);
3744c1c63387SChao Yu 	return blknr;
3745c1c63387SChao Yu #else
3746250e84d7SChao Yu 	return 0;
3747c1c63387SChao Yu #endif
3748c1c63387SChao Yu }
3749c1c63387SChao Yu 
3750c1c63387SChao Yu 
3751c01e54b7SJaegeuk Kim static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
3752c01e54b7SJaegeuk Kim {
3753454ae7e5SChao Yu 	struct inode *inode = mapping->host;
3754b79b0a31SChao Yu 	sector_t blknr = 0;
3755454ae7e5SChao Yu 
37561d373a0eSJaegeuk Kim 	if (f2fs_has_inline_data(inode))
3757b79b0a31SChao Yu 		goto out;
37581d373a0eSJaegeuk Kim 
37591d373a0eSJaegeuk Kim 	/* make sure allocating whole blocks */
37601d373a0eSJaegeuk Kim 	if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
37611d373a0eSJaegeuk Kim 		filemap_write_and_wait(mapping);
37621d373a0eSJaegeuk Kim 
37634eda1682SDaeho Jeong 	/* Block number less than F2FS MAX BLOCKS */
37644eda1682SDaeho Jeong 	if (unlikely(block >= F2FS_I_SB(inode)->max_file_blocks))
37654eda1682SDaeho Jeong 		goto out;
3766c1c63387SChao Yu 
37674eda1682SDaeho Jeong 	if (f2fs_compressed_file(inode)) {
37684eda1682SDaeho Jeong 		blknr = f2fs_bmap_compress(inode, block);
37694eda1682SDaeho Jeong 	} else {
3770b876f4c9SJaegeuk Kim 		struct f2fs_map_blocks map;
3771b876f4c9SJaegeuk Kim 
3772b876f4c9SJaegeuk Kim 		memset(&map, 0, sizeof(map));
3773b876f4c9SJaegeuk Kim 		map.m_lblk = block;
3774b876f4c9SJaegeuk Kim 		map.m_len = 1;
3775b876f4c9SJaegeuk Kim 		map.m_next_pgofs = NULL;
3776b876f4c9SJaegeuk Kim 		map.m_seg_type = NO_CHECK_TYPE;
3777b876f4c9SJaegeuk Kim 
3778b876f4c9SJaegeuk Kim 		if (!f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_BMAP))
3779b876f4c9SJaegeuk Kim 			blknr = map.m_pblk;
37804eda1682SDaeho Jeong 	}
3781b79b0a31SChao Yu out:
3782b79b0a31SChao Yu 	trace_f2fs_bmap(inode, block, blknr);
3783b79b0a31SChao Yu 	return blknr;
3784429511cdSChao Yu }
3785429511cdSChao Yu 
37865b7a487cSWeichao Guo #ifdef CONFIG_MIGRATION
37875b7a487cSWeichao Guo #include <linux/migrate.h>
37885b7a487cSWeichao Guo 
37895b7a487cSWeichao Guo int f2fs_migrate_page(struct address_space *mapping,
37905b7a487cSWeichao Guo 		struct page *newpage, struct page *page, enum migrate_mode mode)
37915b7a487cSWeichao Guo {
37925b7a487cSWeichao Guo 	int rc, extra_count;
37935b7a487cSWeichao Guo 	struct f2fs_inode_info *fi = F2FS_I(mapping->host);
37945b7a487cSWeichao Guo 	bool atomic_written = IS_ATOMIC_WRITTEN_PAGE(page);
37955b7a487cSWeichao Guo 
37965b7a487cSWeichao Guo 	BUG_ON(PageWriteback(page));
37975b7a487cSWeichao Guo 
37985b7a487cSWeichao Guo 	/* migrating an atomic written page is safe with the inmem_lock hold */
3799ff1048e7SJaegeuk Kim 	if (atomic_written) {
3800ff1048e7SJaegeuk Kim 		if (mode != MIGRATE_SYNC)
3801ff1048e7SJaegeuk Kim 			return -EBUSY;
3802ff1048e7SJaegeuk Kim 		if (!mutex_trylock(&fi->inmem_lock))
38035b7a487cSWeichao Guo 			return -EAGAIN;
3804ff1048e7SJaegeuk Kim 	}
38055b7a487cSWeichao Guo 
3806240a5915SChao Yu 	/* one extra reference was held for atomic_write page */
3807240a5915SChao Yu 	extra_count = atomic_written ? 1 : 0;
38085b7a487cSWeichao Guo 	rc = migrate_page_move_mapping(mapping, newpage,
380937109694SKeith Busch 				page, extra_count);
38105b7a487cSWeichao Guo 	if (rc != MIGRATEPAGE_SUCCESS) {
38115b7a487cSWeichao Guo 		if (atomic_written)
38125b7a487cSWeichao Guo 			mutex_unlock(&fi->inmem_lock);
38135b7a487cSWeichao Guo 		return rc;
38145b7a487cSWeichao Guo 	}
38155b7a487cSWeichao Guo 
38165b7a487cSWeichao Guo 	if (atomic_written) {
38175b7a487cSWeichao Guo 		struct inmem_pages *cur;
38185b7a487cSWeichao Guo 		list_for_each_entry(cur, &fi->inmem_pages, list)
38195b7a487cSWeichao Guo 			if (cur->page == page) {
38205b7a487cSWeichao Guo 				cur->page = newpage;
38215b7a487cSWeichao Guo 				break;
38225b7a487cSWeichao Guo 			}
38235b7a487cSWeichao Guo 		mutex_unlock(&fi->inmem_lock);
38245b7a487cSWeichao Guo 		put_page(page);
38255b7a487cSWeichao Guo 		get_page(newpage);
38265b7a487cSWeichao Guo 	}
38275b7a487cSWeichao Guo 
3828240a5915SChao Yu 	if (PagePrivate(page)) {
3829240a5915SChao Yu 		f2fs_set_page_private(newpage, page_private(page));
3830240a5915SChao Yu 		f2fs_clear_page_private(page);
3831240a5915SChao Yu 	}
38325b7a487cSWeichao Guo 
38332916ecc0SJérôme Glisse 	if (mode != MIGRATE_SYNC_NO_COPY)
38345b7a487cSWeichao Guo 		migrate_page_copy(newpage, page);
38352916ecc0SJérôme Glisse 	else
38362916ecc0SJérôme Glisse 		migrate_page_states(newpage, page);
38375b7a487cSWeichao Guo 
38385b7a487cSWeichao Guo 	return MIGRATEPAGE_SUCCESS;
38395b7a487cSWeichao Guo }
38405b7a487cSWeichao Guo #endif
38415b7a487cSWeichao Guo 
38424969c06aSJaegeuk Kim #ifdef CONFIG_SWAP
3843af4b6b8eSChao Yu static int check_swap_activate_fast(struct swap_info_struct *sis,
3844af4b6b8eSChao Yu 				struct file *swap_file, sector_t *span)
3845af4b6b8eSChao Yu {
3846af4b6b8eSChao Yu 	struct address_space *mapping = swap_file->f_mapping;
3847af4b6b8eSChao Yu 	struct inode *inode = mapping->host;
3848af4b6b8eSChao Yu 	sector_t cur_lblock;
3849af4b6b8eSChao Yu 	sector_t last_lblock;
3850af4b6b8eSChao Yu 	sector_t pblock;
3851af4b6b8eSChao Yu 	sector_t lowest_pblock = -1;
3852af4b6b8eSChao Yu 	sector_t highest_pblock = 0;
3853af4b6b8eSChao Yu 	int nr_extents = 0;
3854af4b6b8eSChao Yu 	unsigned long nr_pblocks;
3855963ba7f9SJaegeuk Kim 	u64 len;
3856af4b6b8eSChao Yu 	int ret;
3857af4b6b8eSChao Yu 
3858af4b6b8eSChao Yu 	/*
3859af4b6b8eSChao Yu 	 * Map all the blocks into the extent list.  This code doesn't try
3860af4b6b8eSChao Yu 	 * to be very smart.
3861af4b6b8eSChao Yu 	 */
3862af4b6b8eSChao Yu 	cur_lblock = 0;
38636cbfcab5SJaegeuk Kim 	last_lblock = bytes_to_blks(inode, i_size_read(inode));
3864af4b6b8eSChao Yu 	len = i_size_read(inode);
3865af4b6b8eSChao Yu 
3866af4b6b8eSChao Yu 	while (cur_lblock <= last_lblock && cur_lblock < sis->max) {
3867b876f4c9SJaegeuk Kim 		struct f2fs_map_blocks map;
3868af4b6b8eSChao Yu 		pgoff_t next_pgofs;
3869af4b6b8eSChao Yu 
3870af4b6b8eSChao Yu 		cond_resched();
3871af4b6b8eSChao Yu 
3872b876f4c9SJaegeuk Kim 		memset(&map, 0, sizeof(map));
3873b876f4c9SJaegeuk Kim 		map.m_lblk = cur_lblock;
3874b876f4c9SJaegeuk Kim 		map.m_len = bytes_to_blks(inode, len) - cur_lblock;
3875b876f4c9SJaegeuk Kim 		map.m_next_pgofs = &next_pgofs;
3876b876f4c9SJaegeuk Kim 		map.m_seg_type = NO_CHECK_TYPE;
3877af4b6b8eSChao Yu 
3878b876f4c9SJaegeuk Kim 		ret = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_FIEMAP);
3879af4b6b8eSChao Yu 		if (ret)
3880af4b6b8eSChao Yu 			goto err_out;
3881af4b6b8eSChao Yu 
3882af4b6b8eSChao Yu 		/* hole */
3883b876f4c9SJaegeuk Kim 		if (!(map.m_flags & F2FS_MAP_FLAGS))
3884af4b6b8eSChao Yu 			goto err_out;
3885af4b6b8eSChao Yu 
3886b876f4c9SJaegeuk Kim 		pblock = map.m_pblk;
3887b876f4c9SJaegeuk Kim 		nr_pblocks = map.m_len;
3888af4b6b8eSChao Yu 
3889af4b6b8eSChao Yu 		if (cur_lblock + nr_pblocks >= sis->max)
3890af4b6b8eSChao Yu 			nr_pblocks = sis->max - cur_lblock;
3891af4b6b8eSChao Yu 
3892af4b6b8eSChao Yu 		if (cur_lblock) {	/* exclude the header page */
3893af4b6b8eSChao Yu 			if (pblock < lowest_pblock)
3894af4b6b8eSChao Yu 				lowest_pblock = pblock;
3895af4b6b8eSChao Yu 			if (pblock + nr_pblocks - 1 > highest_pblock)
3896af4b6b8eSChao Yu 				highest_pblock = pblock + nr_pblocks - 1;
3897af4b6b8eSChao Yu 		}
3898af4b6b8eSChao Yu 
3899af4b6b8eSChao Yu 		/*
3900af4b6b8eSChao Yu 		 * We found a PAGE_SIZE-length, PAGE_SIZE-aligned run of blocks
3901af4b6b8eSChao Yu 		 */
3902af4b6b8eSChao Yu 		ret = add_swap_extent(sis, cur_lblock, nr_pblocks, pblock);
3903af4b6b8eSChao Yu 		if (ret < 0)
3904af4b6b8eSChao Yu 			goto out;
3905af4b6b8eSChao Yu 		nr_extents += ret;
3906af4b6b8eSChao Yu 		cur_lblock += nr_pblocks;
3907af4b6b8eSChao Yu 	}
3908af4b6b8eSChao Yu 	ret = nr_extents;
3909af4b6b8eSChao Yu 	*span = 1 + highest_pblock - lowest_pblock;
3910af4b6b8eSChao Yu 	if (cur_lblock == 0)
3911af4b6b8eSChao Yu 		cur_lblock = 1;	/* force Empty message */
3912af4b6b8eSChao Yu 	sis->max = cur_lblock;
3913af4b6b8eSChao Yu 	sis->pages = cur_lblock - 1;
3914af4b6b8eSChao Yu 	sis->highest_bit = cur_lblock - 1;
3915af4b6b8eSChao Yu out:
3916af4b6b8eSChao Yu 	return ret;
3917af4b6b8eSChao Yu err_out:
3918af4b6b8eSChao Yu 	pr_err("swapon: swapfile has holes\n");
3919af4b6b8eSChao Yu 	return -EINVAL;
3920af4b6b8eSChao Yu }
3921af4b6b8eSChao Yu 
39224969c06aSJaegeuk Kim /* Copied from generic_swapfile_activate() to check any holes */
39233e5e479aSChao Yu static int check_swap_activate(struct swap_info_struct *sis,
39243e5e479aSChao Yu 				struct file *swap_file, sector_t *span)
39254969c06aSJaegeuk Kim {
39264969c06aSJaegeuk Kim 	struct address_space *mapping = swap_file->f_mapping;
39274969c06aSJaegeuk Kim 	struct inode *inode = mapping->host;
39284969c06aSJaegeuk Kim 	unsigned blocks_per_page;
39294969c06aSJaegeuk Kim 	unsigned long page_no;
39304969c06aSJaegeuk Kim 	sector_t probe_block;
39314969c06aSJaegeuk Kim 	sector_t last_block;
39324969c06aSJaegeuk Kim 	sector_t lowest_block = -1;
39334969c06aSJaegeuk Kim 	sector_t highest_block = 0;
39343e5e479aSChao Yu 	int nr_extents = 0;
39353e5e479aSChao Yu 	int ret;
39364969c06aSJaegeuk Kim 
3937af4b6b8eSChao Yu 	if (PAGE_SIZE == F2FS_BLKSIZE)
3938af4b6b8eSChao Yu 		return check_swap_activate_fast(sis, swap_file, span);
3939af4b6b8eSChao Yu 
394043b9d4b4SJaegeuk Kim 	blocks_per_page = bytes_to_blks(inode, PAGE_SIZE);
39414969c06aSJaegeuk Kim 
39424969c06aSJaegeuk Kim 	/*
39434969c06aSJaegeuk Kim 	 * Map all the blocks into the extent list.  This code doesn't try
39444969c06aSJaegeuk Kim 	 * to be very smart.
39454969c06aSJaegeuk Kim 	 */
39464969c06aSJaegeuk Kim 	probe_block = 0;
39474969c06aSJaegeuk Kim 	page_no = 0;
394843b9d4b4SJaegeuk Kim 	last_block = bytes_to_blks(inode, i_size_read(inode));
39493e5e479aSChao Yu 	while ((probe_block + blocks_per_page) <= last_block &&
39503e5e479aSChao Yu 			page_no < sis->max) {
39514969c06aSJaegeuk Kim 		unsigned block_in_page;
39524969c06aSJaegeuk Kim 		sector_t first_block;
395330460e1eSCarlos Maiolino 		sector_t block = 0;
395430460e1eSCarlos Maiolino 		int	 err = 0;
39554969c06aSJaegeuk Kim 
39564969c06aSJaegeuk Kim 		cond_resched();
39574969c06aSJaegeuk Kim 
395830460e1eSCarlos Maiolino 		block = probe_block;
395930460e1eSCarlos Maiolino 		err = bmap(inode, &block);
396030460e1eSCarlos Maiolino 		if (err || !block)
39614969c06aSJaegeuk Kim 			goto bad_bmap;
396230460e1eSCarlos Maiolino 		first_block = block;
39634969c06aSJaegeuk Kim 
39644969c06aSJaegeuk Kim 		/*
39654969c06aSJaegeuk Kim 		 * It must be PAGE_SIZE aligned on-disk
39664969c06aSJaegeuk Kim 		 */
39674969c06aSJaegeuk Kim 		if (first_block & (blocks_per_page - 1)) {
39684969c06aSJaegeuk Kim 			probe_block++;
39694969c06aSJaegeuk Kim 			goto reprobe;
39704969c06aSJaegeuk Kim 		}
39714969c06aSJaegeuk Kim 
39724969c06aSJaegeuk Kim 		for (block_in_page = 1; block_in_page < blocks_per_page;
39734969c06aSJaegeuk Kim 					block_in_page++) {
39744969c06aSJaegeuk Kim 
397530460e1eSCarlos Maiolino 			block = probe_block + block_in_page;
397630460e1eSCarlos Maiolino 			err = bmap(inode, &block);
397730460e1eSCarlos Maiolino 
397830460e1eSCarlos Maiolino 			if (err || !block)
39794969c06aSJaegeuk Kim 				goto bad_bmap;
398030460e1eSCarlos Maiolino 
39814969c06aSJaegeuk Kim 			if (block != first_block + block_in_page) {
39824969c06aSJaegeuk Kim 				/* Discontiguity */
39834969c06aSJaegeuk Kim 				probe_block++;
39844969c06aSJaegeuk Kim 				goto reprobe;
39854969c06aSJaegeuk Kim 			}
39864969c06aSJaegeuk Kim 		}
39874969c06aSJaegeuk Kim 
398843b9d4b4SJaegeuk Kim 		first_block >>= (PAGE_SHIFT - inode->i_blkbits);
39894969c06aSJaegeuk Kim 		if (page_no) {	/* exclude the header page */
39904969c06aSJaegeuk Kim 			if (first_block < lowest_block)
39914969c06aSJaegeuk Kim 				lowest_block = first_block;
39924969c06aSJaegeuk Kim 			if (first_block > highest_block)
39934969c06aSJaegeuk Kim 				highest_block = first_block;
39944969c06aSJaegeuk Kim 		}
39954969c06aSJaegeuk Kim 
39963e5e479aSChao Yu 		/*
39973e5e479aSChao Yu 		 * We found a PAGE_SIZE-length, PAGE_SIZE-aligned run of blocks
39983e5e479aSChao Yu 		 */
39993e5e479aSChao Yu 		ret = add_swap_extent(sis, page_no, 1, first_block);
40003e5e479aSChao Yu 		if (ret < 0)
40013e5e479aSChao Yu 			goto out;
40023e5e479aSChao Yu 		nr_extents += ret;
40034969c06aSJaegeuk Kim 		page_no++;
40044969c06aSJaegeuk Kim 		probe_block += blocks_per_page;
40054969c06aSJaegeuk Kim reprobe:
40064969c06aSJaegeuk Kim 		continue;
40074969c06aSJaegeuk Kim 	}
40083e5e479aSChao Yu 	ret = nr_extents;
40093e5e479aSChao Yu 	*span = 1 + highest_block - lowest_block;
40103e5e479aSChao Yu 	if (page_no == 0)
40113e5e479aSChao Yu 		page_no = 1;	/* force Empty message */
40123e5e479aSChao Yu 	sis->max = page_no;
40133e5e479aSChao Yu 	sis->pages = page_no - 1;
40143e5e479aSChao Yu 	sis->highest_bit = page_no - 1;
40153e5e479aSChao Yu out:
40163e5e479aSChao Yu 	return ret;
40174969c06aSJaegeuk Kim bad_bmap:
40184969c06aSJaegeuk Kim 	pr_err("swapon: swapfile has holes\n");
40194969c06aSJaegeuk Kim 	return -EINVAL;
40204969c06aSJaegeuk Kim }
40214969c06aSJaegeuk Kim 
40224969c06aSJaegeuk Kim static int f2fs_swap_activate(struct swap_info_struct *sis, struct file *file,
40234969c06aSJaegeuk Kim 				sector_t *span)
40244969c06aSJaegeuk Kim {
40254969c06aSJaegeuk Kim 	struct inode *inode = file_inode(file);
40264969c06aSJaegeuk Kim 	int ret;
40274969c06aSJaegeuk Kim 
40284969c06aSJaegeuk Kim 	if (!S_ISREG(inode->i_mode))
40294969c06aSJaegeuk Kim 		return -EINVAL;
40304969c06aSJaegeuk Kim 
40314969c06aSJaegeuk Kim 	if (f2fs_readonly(F2FS_I_SB(inode)->sb))
40324969c06aSJaegeuk Kim 		return -EROFS;
40334969c06aSJaegeuk Kim 
40344969c06aSJaegeuk Kim 	ret = f2fs_convert_inline_inode(inode);
40354969c06aSJaegeuk Kim 	if (ret)
40364969c06aSJaegeuk Kim 		return ret;
40374969c06aSJaegeuk Kim 
403878134d03SDaeho Jeong 	if (!f2fs_disable_compressed_file(inode))
40394c8ff709SChao Yu 		return -EINVAL;
40404c8ff709SChao Yu 
40410b979f1bSChao Yu 	f2fs_precache_extents(inode);
40420b979f1bSChao Yu 
40433e5e479aSChao Yu 	ret = check_swap_activate(sis, file, span);
40443e5e479aSChao Yu 	if (ret < 0)
40454969c06aSJaegeuk Kim 		return ret;
40464969c06aSJaegeuk Kim 
40474969c06aSJaegeuk Kim 	set_inode_flag(inode, FI_PIN_FILE);
40484969c06aSJaegeuk Kim 	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
40493e5e479aSChao Yu 	return ret;
40504969c06aSJaegeuk Kim }
40514969c06aSJaegeuk Kim 
40524969c06aSJaegeuk Kim static void f2fs_swap_deactivate(struct file *file)
40534969c06aSJaegeuk Kim {
40544969c06aSJaegeuk Kim 	struct inode *inode = file_inode(file);
40554969c06aSJaegeuk Kim 
40564969c06aSJaegeuk Kim 	clear_inode_flag(inode, FI_PIN_FILE);
40574969c06aSJaegeuk Kim }
40584969c06aSJaegeuk Kim #else
40594969c06aSJaegeuk Kim static int f2fs_swap_activate(struct swap_info_struct *sis, struct file *file,
40604969c06aSJaegeuk Kim 				sector_t *span)
40614969c06aSJaegeuk Kim {
40624969c06aSJaegeuk Kim 	return -EOPNOTSUPP;
40634969c06aSJaegeuk Kim }
40644969c06aSJaegeuk Kim 
40654969c06aSJaegeuk Kim static void f2fs_swap_deactivate(struct file *file)
40664969c06aSJaegeuk Kim {
40674969c06aSJaegeuk Kim }
40684969c06aSJaegeuk Kim #endif
40694969c06aSJaegeuk Kim 
4070eb47b800SJaegeuk Kim const struct address_space_operations f2fs_dblock_aops = {
4071eb47b800SJaegeuk Kim 	.readpage	= f2fs_read_data_page,
407223323196SMatthew Wilcox (Oracle) 	.readahead	= f2fs_readahead,
4073eb47b800SJaegeuk Kim 	.writepage	= f2fs_write_data_page,
4074eb47b800SJaegeuk Kim 	.writepages	= f2fs_write_data_pages,
4075eb47b800SJaegeuk Kim 	.write_begin	= f2fs_write_begin,
4076a1dd3c13SJaegeuk Kim 	.write_end	= f2fs_write_end,
4077eb47b800SJaegeuk Kim 	.set_page_dirty	= f2fs_set_data_page_dirty,
4078487261f3SChao Yu 	.invalidatepage	= f2fs_invalidate_page,
4079487261f3SChao Yu 	.releasepage	= f2fs_release_page,
4080eb47b800SJaegeuk Kim 	.direct_IO	= f2fs_direct_IO,
4081c01e54b7SJaegeuk Kim 	.bmap		= f2fs_bmap,
40824969c06aSJaegeuk Kim 	.swap_activate  = f2fs_swap_activate,
40834969c06aSJaegeuk Kim 	.swap_deactivate = f2fs_swap_deactivate,
40845b7a487cSWeichao Guo #ifdef CONFIG_MIGRATION
40855b7a487cSWeichao Guo 	.migratepage    = f2fs_migrate_page,
40865b7a487cSWeichao Guo #endif
4087eb47b800SJaegeuk Kim };
40886dbb1796SEric Biggers 
40895ec2d99dSMatthew Wilcox void f2fs_clear_page_cache_dirty_tag(struct page *page)
4090aec2f729SChao Yu {
4091aec2f729SChao Yu 	struct address_space *mapping = page_mapping(page);
4092aec2f729SChao Yu 	unsigned long flags;
4093aec2f729SChao Yu 
4094aec2f729SChao Yu 	xa_lock_irqsave(&mapping->i_pages, flags);
40955ec2d99dSMatthew Wilcox 	__xa_clear_mark(&mapping->i_pages, page_index(page),
4096aec2f729SChao Yu 						PAGECACHE_TAG_DIRTY);
4097aec2f729SChao Yu 	xa_unlock_irqrestore(&mapping->i_pages, flags);
4098aec2f729SChao Yu }
4099aec2f729SChao Yu 
41006dbb1796SEric Biggers int __init f2fs_init_post_read_processing(void)
41016dbb1796SEric Biggers {
410295ae251fSEric Biggers 	bio_post_read_ctx_cache =
410395ae251fSEric Biggers 		kmem_cache_create("f2fs_bio_post_read_ctx",
410495ae251fSEric Biggers 				  sizeof(struct bio_post_read_ctx), 0, 0, NULL);
41056dbb1796SEric Biggers 	if (!bio_post_read_ctx_cache)
41066dbb1796SEric Biggers 		goto fail;
41076dbb1796SEric Biggers 	bio_post_read_ctx_pool =
41086dbb1796SEric Biggers 		mempool_create_slab_pool(NUM_PREALLOC_POST_READ_CTXS,
41096dbb1796SEric Biggers 					 bio_post_read_ctx_cache);
41106dbb1796SEric Biggers 	if (!bio_post_read_ctx_pool)
41116dbb1796SEric Biggers 		goto fail_free_cache;
41126dbb1796SEric Biggers 	return 0;
41136dbb1796SEric Biggers 
41146dbb1796SEric Biggers fail_free_cache:
41156dbb1796SEric Biggers 	kmem_cache_destroy(bio_post_read_ctx_cache);
41166dbb1796SEric Biggers fail:
41176dbb1796SEric Biggers 	return -ENOMEM;
41186dbb1796SEric Biggers }
41196dbb1796SEric Biggers 
41200b20fcecSChao Yu void f2fs_destroy_post_read_processing(void)
41216dbb1796SEric Biggers {
41226dbb1796SEric Biggers 	mempool_destroy(bio_post_read_ctx_pool);
41236dbb1796SEric Biggers 	kmem_cache_destroy(bio_post_read_ctx_cache);
41246dbb1796SEric Biggers }
41250b20fcecSChao Yu 
41264c8ff709SChao Yu int f2fs_init_post_read_wq(struct f2fs_sb_info *sbi)
41274c8ff709SChao Yu {
41284c8ff709SChao Yu 	if (!f2fs_sb_has_encrypt(sbi) &&
41294c8ff709SChao Yu 		!f2fs_sb_has_verity(sbi) &&
41304c8ff709SChao Yu 		!f2fs_sb_has_compression(sbi))
41314c8ff709SChao Yu 		return 0;
41324c8ff709SChao Yu 
41334c8ff709SChao Yu 	sbi->post_read_wq = alloc_workqueue("f2fs_post_read_wq",
41344c8ff709SChao Yu 						 WQ_UNBOUND | WQ_HIGHPRI,
41354c8ff709SChao Yu 						 num_online_cpus());
41364c8ff709SChao Yu 	if (!sbi->post_read_wq)
41374c8ff709SChao Yu 		return -ENOMEM;
41384c8ff709SChao Yu 	return 0;
41394c8ff709SChao Yu }
41404c8ff709SChao Yu 
41414c8ff709SChao Yu void f2fs_destroy_post_read_wq(struct f2fs_sb_info *sbi)
41424c8ff709SChao Yu {
41434c8ff709SChao Yu 	if (sbi->post_read_wq)
41444c8ff709SChao Yu 		destroy_workqueue(sbi->post_read_wq);
41454c8ff709SChao Yu }
41464c8ff709SChao Yu 
41470b20fcecSChao Yu int __init f2fs_init_bio_entry_cache(void)
41480b20fcecSChao Yu {
414998510003SChao Yu 	bio_entry_slab = f2fs_kmem_cache_create("f2fs_bio_entry_slab",
41500b20fcecSChao Yu 			sizeof(struct bio_entry));
41510b20fcecSChao Yu 	if (!bio_entry_slab)
41520b20fcecSChao Yu 		return -ENOMEM;
41530b20fcecSChao Yu 	return 0;
41540b20fcecSChao Yu }
41550b20fcecSChao Yu 
4156f543805fSChao Yu void f2fs_destroy_bio_entry_cache(void)
41570b20fcecSChao Yu {
41580b20fcecSChao Yu 	kmem_cache_destroy(bio_entry_slab);
41590b20fcecSChao Yu }
4160