xref: /openbmc/linux/fs/f2fs/data.c (revision 2866fb16d67992195b0526d19e65acb6640fb87f)
17c1a000dSChao Yu // SPDX-License-Identifier: GPL-2.0
20a8165d7SJaegeuk Kim /*
3eb47b800SJaegeuk Kim  * fs/f2fs/data.c
4eb47b800SJaegeuk Kim  *
5eb47b800SJaegeuk Kim  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6eb47b800SJaegeuk Kim  *             http://www.samsung.com/
7eb47b800SJaegeuk Kim  */
8eb47b800SJaegeuk Kim #include <linux/fs.h>
9eb47b800SJaegeuk Kim #include <linux/f2fs_fs.h>
10eb47b800SJaegeuk Kim #include <linux/buffer_head.h>
11eb47b800SJaegeuk Kim #include <linux/mpage.h>
12eb47b800SJaegeuk Kim #include <linux/writeback.h>
13eb47b800SJaegeuk Kim #include <linux/backing-dev.h>
148f46dcaeSChao Yu #include <linux/pagevec.h>
15eb47b800SJaegeuk Kim #include <linux/blkdev.h>
16eb47b800SJaegeuk Kim #include <linux/bio.h>
17690e4a3eSGeert Uytterhoeven #include <linux/prefetch.h>
18e2e40f2cSChristoph Hellwig #include <linux/uio.h>
19f1e88660SJaegeuk Kim #include <linux/cleancache.h>
20174cd4b1SIngo Molnar #include <linux/sched/signal.h>
21eb47b800SJaegeuk Kim 
22eb47b800SJaegeuk Kim #include "f2fs.h"
23eb47b800SJaegeuk Kim #include "node.h"
24eb47b800SJaegeuk Kim #include "segment.h"
25db9f7c1aSJaegeuk Kim #include "trace.h"
26848753aaSNamjae Jeon #include <trace/events/f2fs.h>
27eb47b800SJaegeuk Kim 
286dbb1796SEric Biggers #define NUM_PREALLOC_POST_READ_CTXS	128
296dbb1796SEric Biggers 
306dbb1796SEric Biggers static struct kmem_cache *bio_post_read_ctx_cache;
316dbb1796SEric Biggers static mempool_t *bio_post_read_ctx_pool;
326dbb1796SEric Biggers 
3336951b38SChao Yu static bool __is_cp_guaranteed(struct page *page)
3436951b38SChao Yu {
3536951b38SChao Yu 	struct address_space *mapping = page->mapping;
3636951b38SChao Yu 	struct inode *inode;
3736951b38SChao Yu 	struct f2fs_sb_info *sbi;
3836951b38SChao Yu 
3936951b38SChao Yu 	if (!mapping)
4036951b38SChao Yu 		return false;
4136951b38SChao Yu 
4236951b38SChao Yu 	inode = mapping->host;
4336951b38SChao Yu 	sbi = F2FS_I_SB(inode);
4436951b38SChao Yu 
4536951b38SChao Yu 	if (inode->i_ino == F2FS_META_INO(sbi) ||
4636951b38SChao Yu 			inode->i_ino ==  F2FS_NODE_INO(sbi) ||
4736951b38SChao Yu 			S_ISDIR(inode->i_mode) ||
48e7a4feb0SChao Yu 			(S_ISREG(inode->i_mode) &&
49af033b2aSChao Yu 			(f2fs_is_atomic_file(inode) || IS_NOQUOTA(inode))) ||
5036951b38SChao Yu 			is_cold_data(page))
5136951b38SChao Yu 		return true;
5236951b38SChao Yu 	return false;
5336951b38SChao Yu }
5436951b38SChao Yu 
555f9abab4SJaegeuk Kim static enum count_type __read_io_type(struct page *page)
565f9abab4SJaegeuk Kim {
575f9abab4SJaegeuk Kim 	struct address_space *mapping = page->mapping;
585f9abab4SJaegeuk Kim 
595f9abab4SJaegeuk Kim 	if (mapping) {
605f9abab4SJaegeuk Kim 		struct inode *inode = mapping->host;
615f9abab4SJaegeuk Kim 		struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
625f9abab4SJaegeuk Kim 
635f9abab4SJaegeuk Kim 		if (inode->i_ino == F2FS_META_INO(sbi))
645f9abab4SJaegeuk Kim 			return F2FS_RD_META;
655f9abab4SJaegeuk Kim 
665f9abab4SJaegeuk Kim 		if (inode->i_ino == F2FS_NODE_INO(sbi))
675f9abab4SJaegeuk Kim 			return F2FS_RD_NODE;
685f9abab4SJaegeuk Kim 	}
695f9abab4SJaegeuk Kim 	return F2FS_RD_DATA;
705f9abab4SJaegeuk Kim }
715f9abab4SJaegeuk Kim 
726dbb1796SEric Biggers /* postprocessing steps for read bios */
736dbb1796SEric Biggers enum bio_post_read_step {
746dbb1796SEric Biggers 	STEP_INITIAL = 0,
756dbb1796SEric Biggers 	STEP_DECRYPT,
766dbb1796SEric Biggers };
776dbb1796SEric Biggers 
786dbb1796SEric Biggers struct bio_post_read_ctx {
796dbb1796SEric Biggers 	struct bio *bio;
806dbb1796SEric Biggers 	struct work_struct work;
816dbb1796SEric Biggers 	unsigned int cur_step;
826dbb1796SEric Biggers 	unsigned int enabled_steps;
836dbb1796SEric Biggers };
846dbb1796SEric Biggers 
856dbb1796SEric Biggers static void __read_end_io(struct bio *bio)
8693dfe2acSJaegeuk Kim {
876dbb1796SEric Biggers 	struct page *page;
886dbb1796SEric Biggers 	struct bio_vec *bv;
89f568849eSLinus Torvalds 	int i;
9093dfe2acSJaegeuk Kim 
916dbb1796SEric Biggers 	bio_for_each_segment_all(bv, bio, i) {
926dbb1796SEric Biggers 		page = bv->bv_page;
936dbb1796SEric Biggers 
946dbb1796SEric Biggers 		/* PG_error was set if any post_read step failed */
956dbb1796SEric Biggers 		if (bio->bi_status || PageError(page)) {
966dbb1796SEric Biggers 			ClearPageUptodate(page);
97fb7d70dbSJaegeuk Kim 			/* will re-read again later */
98fb7d70dbSJaegeuk Kim 			ClearPageError(page);
996dbb1796SEric Biggers 		} else {
1006dbb1796SEric Biggers 			SetPageUptodate(page);
1016dbb1796SEric Biggers 		}
1025f9abab4SJaegeuk Kim 		dec_page_count(F2FS_P_SB(page), __read_io_type(page));
1036dbb1796SEric Biggers 		unlock_page(page);
1046dbb1796SEric Biggers 	}
1056dbb1796SEric Biggers 	if (bio->bi_private)
1066dbb1796SEric Biggers 		mempool_free(bio->bi_private, bio_post_read_ctx_pool);
1076dbb1796SEric Biggers 	bio_put(bio);
1086dbb1796SEric Biggers }
1096dbb1796SEric Biggers 
1106dbb1796SEric Biggers static void bio_post_read_processing(struct bio_post_read_ctx *ctx);
1116dbb1796SEric Biggers 
1126dbb1796SEric Biggers static void decrypt_work(struct work_struct *work)
1136dbb1796SEric Biggers {
1146dbb1796SEric Biggers 	struct bio_post_read_ctx *ctx =
1156dbb1796SEric Biggers 		container_of(work, struct bio_post_read_ctx, work);
1166dbb1796SEric Biggers 
1176dbb1796SEric Biggers 	fscrypt_decrypt_bio(ctx->bio);
1186dbb1796SEric Biggers 
1196dbb1796SEric Biggers 	bio_post_read_processing(ctx);
1206dbb1796SEric Biggers }
1216dbb1796SEric Biggers 
1226dbb1796SEric Biggers static void bio_post_read_processing(struct bio_post_read_ctx *ctx)
1236dbb1796SEric Biggers {
1246dbb1796SEric Biggers 	switch (++ctx->cur_step) {
1256dbb1796SEric Biggers 	case STEP_DECRYPT:
1266dbb1796SEric Biggers 		if (ctx->enabled_steps & (1 << STEP_DECRYPT)) {
1276dbb1796SEric Biggers 			INIT_WORK(&ctx->work, decrypt_work);
1286dbb1796SEric Biggers 			fscrypt_enqueue_decrypt_work(&ctx->work);
1296dbb1796SEric Biggers 			return;
1306dbb1796SEric Biggers 		}
1316dbb1796SEric Biggers 		ctx->cur_step++;
1326dbb1796SEric Biggers 		/* fall-through */
1336dbb1796SEric Biggers 	default:
1346dbb1796SEric Biggers 		__read_end_io(ctx->bio);
1356dbb1796SEric Biggers 	}
1366dbb1796SEric Biggers }
1376dbb1796SEric Biggers 
1386dbb1796SEric Biggers static bool f2fs_bio_post_read_required(struct bio *bio)
1396dbb1796SEric Biggers {
1406dbb1796SEric Biggers 	return bio->bi_private && !bio->bi_status;
1416dbb1796SEric Biggers }
1426dbb1796SEric Biggers 
1436dbb1796SEric Biggers static void f2fs_read_end_io(struct bio *bio)
1446dbb1796SEric Biggers {
1456f5c2ed0SChao Yu 	if (time_to_inject(F2FS_P_SB(bio_first_page_all(bio)),
1466f5c2ed0SChao Yu 						FAULT_READ_IO)) {
1476f5c2ed0SChao Yu 		f2fs_show_injection_info(FAULT_READ_IO);
1484e4cbee9SChristoph Hellwig 		bio->bi_status = BLK_STS_IOERR;
14955523519SChao Yu 	}
1508b038c70SChao Yu 
1516dbb1796SEric Biggers 	if (f2fs_bio_post_read_required(bio)) {
1526dbb1796SEric Biggers 		struct bio_post_read_ctx *ctx = bio->bi_private;
1536dbb1796SEric Biggers 
1546dbb1796SEric Biggers 		ctx->cur_step = STEP_INITIAL;
1556dbb1796SEric Biggers 		bio_post_read_processing(ctx);
15612377024SChao Yu 		return;
15712377024SChao Yu 	}
15812377024SChao Yu 
1596dbb1796SEric Biggers 	__read_end_io(bio);
16093dfe2acSJaegeuk Kim }
16193dfe2acSJaegeuk Kim 
1624246a0b6SChristoph Hellwig static void f2fs_write_end_io(struct bio *bio)
16393dfe2acSJaegeuk Kim {
1641b1f559fSJaegeuk Kim 	struct f2fs_sb_info *sbi = bio->bi_private;
165f568849eSLinus Torvalds 	struct bio_vec *bvec;
166f568849eSLinus Torvalds 	int i;
16793dfe2acSJaegeuk Kim 
1686f5c2ed0SChao Yu 	if (time_to_inject(sbi, FAULT_WRITE_IO)) {
1696f5c2ed0SChao Yu 		f2fs_show_injection_info(FAULT_WRITE_IO);
1706f5c2ed0SChao Yu 		bio->bi_status = BLK_STS_IOERR;
1716f5c2ed0SChao Yu 	}
1726f5c2ed0SChao Yu 
173f568849eSLinus Torvalds 	bio_for_each_segment_all(bvec, bio, i) {
17493dfe2acSJaegeuk Kim 		struct page *page = bvec->bv_page;
17536951b38SChao Yu 		enum count_type type = WB_DATA_TYPE(page);
17693dfe2acSJaegeuk Kim 
1770a595ebaSJaegeuk Kim 		if (IS_DUMMY_WRITTEN_PAGE(page)) {
1780a595ebaSJaegeuk Kim 			set_page_private(page, (unsigned long)NULL);
1790a595ebaSJaegeuk Kim 			ClearPagePrivate(page);
1800a595ebaSJaegeuk Kim 			unlock_page(page);
1810a595ebaSJaegeuk Kim 			mempool_free(page, sbi->write_io_dummy);
1820a595ebaSJaegeuk Kim 
1834e4cbee9SChristoph Hellwig 			if (unlikely(bio->bi_status))
1840a595ebaSJaegeuk Kim 				f2fs_stop_checkpoint(sbi, true);
1850a595ebaSJaegeuk Kim 			continue;
1860a595ebaSJaegeuk Kim 		}
1870a595ebaSJaegeuk Kim 
1880b81d077SJaegeuk Kim 		fscrypt_pullback_bio_page(&page, true);
1894375a336SJaegeuk Kim 
1904e4cbee9SChristoph Hellwig 		if (unlikely(bio->bi_status)) {
1915114a97aSMichal Hocko 			mapping_set_error(page->mapping, -EIO);
192b1ca321dSJaegeuk Kim 			if (type == F2FS_WB_CP_DATA)
19338f91ca8SJaegeuk Kim 				f2fs_stop_checkpoint(sbi, true);
19493dfe2acSJaegeuk Kim 		}
1957dff55d2SYunlei He 
1967dff55d2SYunlei He 		f2fs_bug_on(sbi, page->mapping == NODE_MAPPING(sbi) &&
1977dff55d2SYunlei He 					page->index != nid_of_node(page));
1987dff55d2SYunlei He 
19936951b38SChao Yu 		dec_page_count(sbi, type);
20050fa53ecSChao Yu 		if (f2fs_in_warm_node_list(sbi, page))
20150fa53ecSChao Yu 			f2fs_del_fsync_node_entry(sbi, page);
20236951b38SChao Yu 		clear_cold_data(page);
20393dfe2acSJaegeuk Kim 		end_page_writeback(page);
204f568849eSLinus Torvalds 	}
20536951b38SChao Yu 	if (!get_pages(sbi, F2FS_WB_CP_DATA) &&
206f5730184SJaegeuk Kim 				wq_has_sleeper(&sbi->cp_wait))
20793dfe2acSJaegeuk Kim 		wake_up(&sbi->cp_wait);
20893dfe2acSJaegeuk Kim 
20993dfe2acSJaegeuk Kim 	bio_put(bio);
21093dfe2acSJaegeuk Kim }
21193dfe2acSJaegeuk Kim 
212940a6d34SGu Zheng /*
2133c62be17SJaegeuk Kim  * Return true, if pre_bio's bdev is same as its target device.
2143c62be17SJaegeuk Kim  */
2153c62be17SJaegeuk Kim struct block_device *f2fs_target_device(struct f2fs_sb_info *sbi,
2163c62be17SJaegeuk Kim 				block_t blk_addr, struct bio *bio)
2173c62be17SJaegeuk Kim {
2183c62be17SJaegeuk Kim 	struct block_device *bdev = sbi->sb->s_bdev;
2193c62be17SJaegeuk Kim 	int i;
2203c62be17SJaegeuk Kim 
2213c62be17SJaegeuk Kim 	for (i = 0; i < sbi->s_ndevs; i++) {
2223c62be17SJaegeuk Kim 		if (FDEV(i).start_blk <= blk_addr &&
2233c62be17SJaegeuk Kim 					FDEV(i).end_blk >= blk_addr) {
2243c62be17SJaegeuk Kim 			blk_addr -= FDEV(i).start_blk;
2253c62be17SJaegeuk Kim 			bdev = FDEV(i).bdev;
2263c62be17SJaegeuk Kim 			break;
2273c62be17SJaegeuk Kim 		}
2283c62be17SJaegeuk Kim 	}
2293c62be17SJaegeuk Kim 	if (bio) {
23074d46992SChristoph Hellwig 		bio_set_dev(bio, bdev);
2313c62be17SJaegeuk Kim 		bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(blk_addr);
2323c62be17SJaegeuk Kim 	}
2333c62be17SJaegeuk Kim 	return bdev;
2343c62be17SJaegeuk Kim }
2353c62be17SJaegeuk Kim 
2363c62be17SJaegeuk Kim int f2fs_target_device_index(struct f2fs_sb_info *sbi, block_t blkaddr)
2373c62be17SJaegeuk Kim {
2383c62be17SJaegeuk Kim 	int i;
2393c62be17SJaegeuk Kim 
2403c62be17SJaegeuk Kim 	for (i = 0; i < sbi->s_ndevs; i++)
2413c62be17SJaegeuk Kim 		if (FDEV(i).start_blk <= blkaddr && FDEV(i).end_blk >= blkaddr)
2423c62be17SJaegeuk Kim 			return i;
2433c62be17SJaegeuk Kim 	return 0;
2443c62be17SJaegeuk Kim }
2453c62be17SJaegeuk Kim 
2463c62be17SJaegeuk Kim static bool __same_bdev(struct f2fs_sb_info *sbi,
2473c62be17SJaegeuk Kim 				block_t blk_addr, struct bio *bio)
2483c62be17SJaegeuk Kim {
24974d46992SChristoph Hellwig 	struct block_device *b = f2fs_target_device(sbi, blk_addr, NULL);
25074d46992SChristoph Hellwig 	return bio->bi_disk == b->bd_disk && bio->bi_partno == b->bd_partno;
2513c62be17SJaegeuk Kim }
2523c62be17SJaegeuk Kim 
2533c62be17SJaegeuk Kim /*
254940a6d34SGu Zheng  * Low-level block read/write IO operations.
255940a6d34SGu Zheng  */
256940a6d34SGu Zheng static struct bio *__bio_alloc(struct f2fs_sb_info *sbi, block_t blk_addr,
257578c6478SYufen Yu 				struct writeback_control *wbc,
2580cdd3195SHyunchul Lee 				int npages, bool is_read,
2590cdd3195SHyunchul Lee 				enum page_type type, enum temp_type temp)
260940a6d34SGu Zheng {
261940a6d34SGu Zheng 	struct bio *bio;
262940a6d34SGu Zheng 
263d62fe971SChao Yu 	bio = f2fs_bio_alloc(sbi, npages, true);
264940a6d34SGu Zheng 
2653c62be17SJaegeuk Kim 	f2fs_target_device(sbi, blk_addr, bio);
2660cdd3195SHyunchul Lee 	if (is_read) {
2670cdd3195SHyunchul Lee 		bio->bi_end_io = f2fs_read_end_io;
2680cdd3195SHyunchul Lee 		bio->bi_private = NULL;
2690cdd3195SHyunchul Lee 	} else {
2700cdd3195SHyunchul Lee 		bio->bi_end_io = f2fs_write_end_io;
2710cdd3195SHyunchul Lee 		bio->bi_private = sbi;
2724d57b86dSChao Yu 		bio->bi_write_hint = f2fs_io_type_to_rw_hint(sbi, type, temp);
2730cdd3195SHyunchul Lee 	}
274578c6478SYufen Yu 	if (wbc)
275578c6478SYufen Yu 		wbc_init_bio(wbc, bio);
276940a6d34SGu Zheng 
277940a6d34SGu Zheng 	return bio;
278940a6d34SGu Zheng }
279940a6d34SGu Zheng 
2804fc29c1aSLinus Torvalds static inline void __submit_bio(struct f2fs_sb_info *sbi,
28119a5f5e2SJaegeuk Kim 				struct bio *bio, enum page_type type)
282f5730184SJaegeuk Kim {
2834fc29c1aSLinus Torvalds 	if (!is_read_io(bio_op(bio))) {
2840a595ebaSJaegeuk Kim 		unsigned int start;
2850a595ebaSJaegeuk Kim 
2860a595ebaSJaegeuk Kim 		if (type != DATA && type != NODE)
2870a595ebaSJaegeuk Kim 			goto submit_io;
2880a595ebaSJaegeuk Kim 
28966415ceeSYunlong Song 		if (test_opt(sbi, LFS) && current->plug)
2903bb09a0eSTiezhu Yang 			blk_finish_plug(current->plug);
2913bb09a0eSTiezhu Yang 
2920a595ebaSJaegeuk Kim 		start = bio->bi_iter.bi_size >> F2FS_BLKSIZE_BITS;
2930a595ebaSJaegeuk Kim 		start %= F2FS_IO_SIZE(sbi);
2940a595ebaSJaegeuk Kim 
2950a595ebaSJaegeuk Kim 		if (start == 0)
2960a595ebaSJaegeuk Kim 			goto submit_io;
2970a595ebaSJaegeuk Kim 
2980a595ebaSJaegeuk Kim 		/* fill dummy pages */
2990a595ebaSJaegeuk Kim 		for (; start < F2FS_IO_SIZE(sbi); start++) {
3000a595ebaSJaegeuk Kim 			struct page *page =
3010a595ebaSJaegeuk Kim 				mempool_alloc(sbi->write_io_dummy,
3020a595ebaSJaegeuk Kim 					GFP_NOIO | __GFP_ZERO | __GFP_NOFAIL);
3030a595ebaSJaegeuk Kim 			f2fs_bug_on(sbi, !page);
3040a595ebaSJaegeuk Kim 
3050a595ebaSJaegeuk Kim 			SetPagePrivate(page);
3060a595ebaSJaegeuk Kim 			set_page_private(page, (unsigned long)DUMMY_WRITTEN_PAGE);
3070a595ebaSJaegeuk Kim 			lock_page(page);
3080a595ebaSJaegeuk Kim 			if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE)
3090a595ebaSJaegeuk Kim 				f2fs_bug_on(sbi, 1);
31019a5f5e2SJaegeuk Kim 		}
3110a595ebaSJaegeuk Kim 		/*
3120a595ebaSJaegeuk Kim 		 * In the NODE case, we lose next block address chain. So, we
3130a595ebaSJaegeuk Kim 		 * need to do checkpoint in f2fs_sync_file.
3140a595ebaSJaegeuk Kim 		 */
3150a595ebaSJaegeuk Kim 		if (type == NODE)
3160a595ebaSJaegeuk Kim 			set_sbi_flag(sbi, SBI_NEED_CP);
3170a595ebaSJaegeuk Kim 	}
3180a595ebaSJaegeuk Kim submit_io:
319554b5125SJaegeuk Kim 	if (is_read_io(bio_op(bio)))
320554b5125SJaegeuk Kim 		trace_f2fs_submit_read_bio(sbi->sb, type, bio);
321554b5125SJaegeuk Kim 	else
322554b5125SJaegeuk Kim 		trace_f2fs_submit_write_bio(sbi->sb, type, bio);
3234e49ea4aSMike Christie 	submit_bio(bio);
324f5730184SJaegeuk Kim }
325f5730184SJaegeuk Kim 
326458e6197SJaegeuk Kim static void __submit_merged_bio(struct f2fs_bio_info *io)
32793dfe2acSJaegeuk Kim {
328458e6197SJaegeuk Kim 	struct f2fs_io_info *fio = &io->fio;
32993dfe2acSJaegeuk Kim 
33093dfe2acSJaegeuk Kim 	if (!io->bio)
33193dfe2acSJaegeuk Kim 		return;
33293dfe2acSJaegeuk Kim 
33304d328deSMike Christie 	bio_set_op_attrs(io->bio, fio->op, fio->op_flags);
33404d328deSMike Christie 
335554b5125SJaegeuk Kim 	if (is_read_io(fio->op))
336554b5125SJaegeuk Kim 		trace_f2fs_prepare_read_bio(io->sbi->sb, fio->type, io->bio);
337554b5125SJaegeuk Kim 	else
338554b5125SJaegeuk Kim 		trace_f2fs_prepare_write_bio(io->sbi->sb, fio->type, io->bio);
339554b5125SJaegeuk Kim 
3404fc29c1aSLinus Torvalds 	__submit_bio(io->sbi, io->bio, fio->type);
34193dfe2acSJaegeuk Kim 	io->bio = NULL;
34293dfe2acSJaegeuk Kim }
34393dfe2acSJaegeuk Kim 
344bab475c5SChao Yu static bool __has_merged_page(struct f2fs_bio_info *io, struct inode *inode,
345bab475c5SChao Yu 						struct page *page, nid_t ino)
3460fd785ebSChao Yu {
3470fd785ebSChao Yu 	struct bio_vec *bvec;
3480fd785ebSChao Yu 	struct page *target;
3490fd785ebSChao Yu 	int i;
3500fd785ebSChao Yu 
3510c3a5797SChao Yu 	if (!io->bio)
3520fd785ebSChao Yu 		return false;
3530c3a5797SChao Yu 
354bab475c5SChao Yu 	if (!inode && !page && !ino)
3550c3a5797SChao Yu 		return true;
3560fd785ebSChao Yu 
3570fd785ebSChao Yu 	bio_for_each_segment_all(bvec, io->bio, i) {
3580fd785ebSChao Yu 
3590b81d077SJaegeuk Kim 		if (bvec->bv_page->mapping)
3600fd785ebSChao Yu 			target = bvec->bv_page;
3610b81d077SJaegeuk Kim 		else
3620b81d077SJaegeuk Kim 			target = fscrypt_control_page(bvec->bv_page);
3630fd785ebSChao Yu 
3640c3a5797SChao Yu 		if (inode && inode == target->mapping->host)
3650c3a5797SChao Yu 			return true;
366bab475c5SChao Yu 		if (page && page == target)
367bab475c5SChao Yu 			return true;
3680c3a5797SChao Yu 		if (ino && ino == ino_of_node(target))
3690fd785ebSChao Yu 			return true;
3700fd785ebSChao Yu 	}
3710fd785ebSChao Yu 
3720fd785ebSChao Yu 	return false;
3730fd785ebSChao Yu }
3740fd785ebSChao Yu 
375b9109b0eSJaegeuk Kim static void __f2fs_submit_merged_write(struct f2fs_sb_info *sbi,
376a912b54dSJaegeuk Kim 				enum page_type type, enum temp_type temp)
37793dfe2acSJaegeuk Kim {
37893dfe2acSJaegeuk Kim 	enum page_type btype = PAGE_TYPE_OF_BIO(type);
379a912b54dSJaegeuk Kim 	struct f2fs_bio_info *io = sbi->write_io[btype] + temp;
38093dfe2acSJaegeuk Kim 
381df0f8dc0SChao Yu 	down_write(&io->io_rwsem);
382458e6197SJaegeuk Kim 
383458e6197SJaegeuk Kim 	/* change META to META_FLUSH in the checkpoint procedure */
384458e6197SJaegeuk Kim 	if (type >= META_FLUSH) {
385458e6197SJaegeuk Kim 		io->fio.type = META_FLUSH;
38604d328deSMike Christie 		io->fio.op = REQ_OP_WRITE;
3873adc5fcbSJan Kara 		io->fio.op_flags = REQ_META | REQ_PRIO | REQ_SYNC;
38870fd7614SChristoph Hellwig 		if (!test_opt(sbi, NOBARRIER))
3897f54f51fSJaegeuk Kim 			io->fio.op_flags |= REQ_PREFLUSH | REQ_FUA;
390458e6197SJaegeuk Kim 	}
391458e6197SJaegeuk Kim 	__submit_merged_bio(io);
392df0f8dc0SChao Yu 	up_write(&io->io_rwsem);
39393dfe2acSJaegeuk Kim }
39493dfe2acSJaegeuk Kim 
395a912b54dSJaegeuk Kim static void __submit_merged_write_cond(struct f2fs_sb_info *sbi,
396bab475c5SChao Yu 				struct inode *inode, struct page *page,
397bab475c5SChao Yu 				nid_t ino, enum page_type type, bool force)
3980c3a5797SChao Yu {
399a912b54dSJaegeuk Kim 	enum temp_type temp;
4001e771e83SYunlong Song 	bool ret = true;
401a912b54dSJaegeuk Kim 
402a912b54dSJaegeuk Kim 	for (temp = HOT; temp < NR_TEMP_TYPE; temp++) {
4031e771e83SYunlong Song 		if (!force)	{
4041e771e83SYunlong Song 			enum page_type btype = PAGE_TYPE_OF_BIO(type);
4051e771e83SYunlong Song 			struct f2fs_bio_info *io = sbi->write_io[btype] + temp;
406a912b54dSJaegeuk Kim 
4071e771e83SYunlong Song 			down_read(&io->io_rwsem);
4081e771e83SYunlong Song 			ret = __has_merged_page(io, inode, page, ino);
4091e771e83SYunlong Song 			up_read(&io->io_rwsem);
4101e771e83SYunlong Song 		}
4111e771e83SYunlong Song 		if (ret)
412a912b54dSJaegeuk Kim 			__f2fs_submit_merged_write(sbi, type, temp);
413a912b54dSJaegeuk Kim 
414a912b54dSJaegeuk Kim 		/* TODO: use HOT temp only for meta pages now. */
415a912b54dSJaegeuk Kim 		if (type >= META)
416a912b54dSJaegeuk Kim 			break;
417a912b54dSJaegeuk Kim 	}
4180c3a5797SChao Yu }
4190c3a5797SChao Yu 
420b9109b0eSJaegeuk Kim void f2fs_submit_merged_write(struct f2fs_sb_info *sbi, enum page_type type)
421406657ddSChao Yu {
422a912b54dSJaegeuk Kim 	__submit_merged_write_cond(sbi, NULL, 0, 0, type, true);
42393dfe2acSJaegeuk Kim }
42493dfe2acSJaegeuk Kim 
425b9109b0eSJaegeuk Kim void f2fs_submit_merged_write_cond(struct f2fs_sb_info *sbi,
426bab475c5SChao Yu 				struct inode *inode, struct page *page,
427bab475c5SChao Yu 				nid_t ino, enum page_type type)
42893dfe2acSJaegeuk Kim {
429bab475c5SChao Yu 	__submit_merged_write_cond(sbi, inode, page, ino, type, false);
43093dfe2acSJaegeuk Kim }
43193dfe2acSJaegeuk Kim 
432b9109b0eSJaegeuk Kim void f2fs_flush_merged_writes(struct f2fs_sb_info *sbi)
433406657ddSChao Yu {
434b9109b0eSJaegeuk Kim 	f2fs_submit_merged_write(sbi, DATA);
435b9109b0eSJaegeuk Kim 	f2fs_submit_merged_write(sbi, NODE);
436b9109b0eSJaegeuk Kim 	f2fs_submit_merged_write(sbi, META);
437406657ddSChao Yu }
438406657ddSChao Yu 
43993dfe2acSJaegeuk Kim /*
44093dfe2acSJaegeuk Kim  * Fill the locked page with data located in the block address.
441771a9a71STomohiro Kusumi  * A caller needs to unlock the page on failure.
44293dfe2acSJaegeuk Kim  */
44305ca3632SJaegeuk Kim int f2fs_submit_page_bio(struct f2fs_io_info *fio)
44493dfe2acSJaegeuk Kim {
44593dfe2acSJaegeuk Kim 	struct bio *bio;
4460b81d077SJaegeuk Kim 	struct page *page = fio->encrypted_page ?
4470b81d077SJaegeuk Kim 			fio->encrypted_page : fio->page;
44893dfe2acSJaegeuk Kim 
449c9b60788SChao Yu 	if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr,
450c9b60788SChao Yu 			__is_meta_io(fio) ? META_GENERIC : DATA_GENERIC))
451c9b60788SChao Yu 		return -EFAULT;
452c9b60788SChao Yu 
4532ace38e0SChao Yu 	trace_f2fs_submit_page_bio(page, fio);
45405ca3632SJaegeuk Kim 	f2fs_trace_ios(fio, 0);
45593dfe2acSJaegeuk Kim 
45693dfe2acSJaegeuk Kim 	/* Allocate a new bio */
457578c6478SYufen Yu 	bio = __bio_alloc(fio->sbi, fio->new_blkaddr, fio->io_wbc,
4580cdd3195SHyunchul Lee 				1, is_read_io(fio->op), fio->type, fio->temp);
45993dfe2acSJaegeuk Kim 
46009cbfeafSKirill A. Shutemov 	if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
46193dfe2acSJaegeuk Kim 		bio_put(bio);
46293dfe2acSJaegeuk Kim 		return -EFAULT;
46393dfe2acSJaegeuk Kim 	}
46478efac53SChao Yu 
46578efac53SChao Yu 	if (fio->io_wbc && !is_read_io(fio->op))
46678efac53SChao Yu 		wbc_account_io(fio->io_wbc, page, PAGE_SIZE);
46778efac53SChao Yu 
46804d328deSMike Christie 	bio_set_op_attrs(bio, fio->op, fio->op_flags);
46993dfe2acSJaegeuk Kim 
4705f9abab4SJaegeuk Kim 	inc_page_count(fio->sbi, is_read_io(fio->op) ?
4715f9abab4SJaegeuk Kim 			__read_io_type(page): WB_DATA_TYPE(fio->page));
4724c58ed07SChao Yu 
4734c58ed07SChao Yu 	__submit_bio(fio->sbi, bio, fio->type);
47493dfe2acSJaegeuk Kim 	return 0;
47593dfe2acSJaegeuk Kim }
47693dfe2acSJaegeuk Kim 
477fe16efe6SChao Yu void f2fs_submit_page_write(struct f2fs_io_info *fio)
47893dfe2acSJaegeuk Kim {
47905ca3632SJaegeuk Kim 	struct f2fs_sb_info *sbi = fio->sbi;
480458e6197SJaegeuk Kim 	enum page_type btype = PAGE_TYPE_OF_BIO(fio->type);
481a912b54dSJaegeuk Kim 	struct f2fs_bio_info *io = sbi->write_io[btype] + fio->temp;
4824375a336SJaegeuk Kim 	struct page *bio_page;
48393dfe2acSJaegeuk Kim 
484b9109b0eSJaegeuk Kim 	f2fs_bug_on(sbi, is_read_io(fio->op));
48593dfe2acSJaegeuk Kim 
486fb830fc5SChao Yu 	down_write(&io->io_rwsem);
487fb830fc5SChao Yu next:
488fb830fc5SChao Yu 	if (fio->in_list) {
489fb830fc5SChao Yu 		spin_lock(&io->io_lock);
490fb830fc5SChao Yu 		if (list_empty(&io->io_list)) {
491fb830fc5SChao Yu 			spin_unlock(&io->io_lock);
492fe16efe6SChao Yu 			goto out;
493fb830fc5SChao Yu 		}
494fb830fc5SChao Yu 		fio = list_first_entry(&io->io_list,
495fb830fc5SChao Yu 						struct f2fs_io_info, list);
496fb830fc5SChao Yu 		list_del(&fio->list);
497fb830fc5SChao Yu 		spin_unlock(&io->io_lock);
498fb830fc5SChao Yu 	}
49993dfe2acSJaegeuk Kim 
500e1da7872SChao Yu 	if (__is_valid_data_blkaddr(fio->old_blkaddr))
5010833721eSYunlei He 		verify_block_addr(fio, fio->old_blkaddr);
5020833721eSYunlei He 	verify_block_addr(fio, fio->new_blkaddr);
50393dfe2acSJaegeuk Kim 
50436951b38SChao Yu 	bio_page = fio->encrypted_page ? fio->encrypted_page : fio->page;
50536951b38SChao Yu 
506ebf7c522SThomas Meyer 	/* set submitted = true as a return value */
507ebf7c522SThomas Meyer 	fio->submitted = true;
508d68f735bSJaegeuk Kim 
50936951b38SChao Yu 	inc_page_count(sbi, WB_DATA_TYPE(bio_page));
51036951b38SChao Yu 
5117a9d7548SChao Yu 	if (io->bio && (io->last_block_in_bio != fio->new_blkaddr - 1 ||
5123c62be17SJaegeuk Kim 	    (io->fio.op != fio->op || io->fio.op_flags != fio->op_flags) ||
5133c62be17SJaegeuk Kim 			!__same_bdev(sbi, fio->new_blkaddr, io->bio)))
514458e6197SJaegeuk Kim 		__submit_merged_bio(io);
51593dfe2acSJaegeuk Kim alloc_new:
51693dfe2acSJaegeuk Kim 	if (io->bio == NULL) {
5170a595ebaSJaegeuk Kim 		if ((fio->type == DATA || fio->type == NODE) &&
5180a595ebaSJaegeuk Kim 				fio->new_blkaddr & F2FS_IO_SIZE_MASK(sbi)) {
5190a595ebaSJaegeuk Kim 			dec_page_count(sbi, WB_DATA_TYPE(bio_page));
520fe16efe6SChao Yu 			fio->retry = true;
521fe16efe6SChao Yu 			goto skip;
5220a595ebaSJaegeuk Kim 		}
523578c6478SYufen Yu 		io->bio = __bio_alloc(sbi, fio->new_blkaddr, fio->io_wbc,
5240cdd3195SHyunchul Lee 						BIO_MAX_PAGES, false,
5250cdd3195SHyunchul Lee 						fio->type, fio->temp);
526458e6197SJaegeuk Kim 		io->fio = *fio;
52793dfe2acSJaegeuk Kim 	}
52893dfe2acSJaegeuk Kim 
529a912b54dSJaegeuk Kim 	if (bio_add_page(io->bio, bio_page, PAGE_SIZE, 0) < PAGE_SIZE) {
530458e6197SJaegeuk Kim 		__submit_merged_bio(io);
53193dfe2acSJaegeuk Kim 		goto alloc_new;
53293dfe2acSJaegeuk Kim 	}
53393dfe2acSJaegeuk Kim 
534578c6478SYufen Yu 	if (fio->io_wbc)
535578c6478SYufen Yu 		wbc_account_io(fio->io_wbc, bio_page, PAGE_SIZE);
536578c6478SYufen Yu 
5377a9d7548SChao Yu 	io->last_block_in_bio = fio->new_blkaddr;
53805ca3632SJaegeuk Kim 	f2fs_trace_ios(fio, 0);
539fb830fc5SChao Yu 
540fb830fc5SChao Yu 	trace_f2fs_submit_page_write(fio->page, fio);
541fe16efe6SChao Yu skip:
542fb830fc5SChao Yu 	if (fio->in_list)
543fb830fc5SChao Yu 		goto next;
544fe16efe6SChao Yu out:
5454354994fSDaniel Rosenberg 	if (is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN) ||
5464354994fSDaniel Rosenberg 				f2fs_is_checkpoint_ready(sbi))
5475ce80586SJaegeuk Kim 		__submit_merged_bio(io);
548df0f8dc0SChao Yu 	up_write(&io->io_rwsem);
54993dfe2acSJaegeuk Kim }
55093dfe2acSJaegeuk Kim 
55113ba41e3SJaegeuk Kim static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr,
552e2e59414SJaegeuk Kim 					unsigned nr_pages, unsigned op_flag)
55313ba41e3SJaegeuk Kim {
55413ba41e3SJaegeuk Kim 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
55513ba41e3SJaegeuk Kim 	struct bio *bio;
5566dbb1796SEric Biggers 	struct bio_post_read_ctx *ctx;
5576dbb1796SEric Biggers 	unsigned int post_read_steps = 0;
55813ba41e3SJaegeuk Kim 
55991291e99SChao Yu 	if (!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC))
56091291e99SChao Yu 		return ERR_PTR(-EFAULT);
56191291e99SChao Yu 
5626dbb1796SEric Biggers 	bio = f2fs_bio_alloc(sbi, min_t(int, nr_pages, BIO_MAX_PAGES), false);
5636dbb1796SEric Biggers 	if (!bio)
5646dbb1796SEric Biggers 		return ERR_PTR(-ENOMEM);
5656dbb1796SEric Biggers 	f2fs_target_device(sbi, blkaddr, bio);
5666dbb1796SEric Biggers 	bio->bi_end_io = f2fs_read_end_io;
567e2e59414SJaegeuk Kim 	bio_set_op_attrs(bio, REQ_OP_READ, op_flag);
5686dbb1796SEric Biggers 
5696dbb1796SEric Biggers 	if (f2fs_encrypted_file(inode))
5706dbb1796SEric Biggers 		post_read_steps |= 1 << STEP_DECRYPT;
5716dbb1796SEric Biggers 	if (post_read_steps) {
5726dbb1796SEric Biggers 		ctx = mempool_alloc(bio_post_read_ctx_pool, GFP_NOFS);
5736dbb1796SEric Biggers 		if (!ctx) {
5746dbb1796SEric Biggers 			bio_put(bio);
5756dbb1796SEric Biggers 			return ERR_PTR(-ENOMEM);
5766dbb1796SEric Biggers 		}
5776dbb1796SEric Biggers 		ctx->bio = bio;
5786dbb1796SEric Biggers 		ctx->enabled_steps = post_read_steps;
5796dbb1796SEric Biggers 		bio->bi_private = ctx;
58013ba41e3SJaegeuk Kim 	}
58113ba41e3SJaegeuk Kim 
58213ba41e3SJaegeuk Kim 	return bio;
58313ba41e3SJaegeuk Kim }
58413ba41e3SJaegeuk Kim 
58513ba41e3SJaegeuk Kim /* This can handle encryption stuffs */
58613ba41e3SJaegeuk Kim static int f2fs_submit_page_read(struct inode *inode, struct page *page,
58713ba41e3SJaegeuk Kim 							block_t blkaddr)
58813ba41e3SJaegeuk Kim {
589e2e59414SJaegeuk Kim 	struct bio *bio = f2fs_grab_read_bio(inode, blkaddr, 1, 0);
59013ba41e3SJaegeuk Kim 
59113ba41e3SJaegeuk Kim 	if (IS_ERR(bio))
59213ba41e3SJaegeuk Kim 		return PTR_ERR(bio);
59313ba41e3SJaegeuk Kim 
5940ded69f6SJaegeuk Kim 	/* wait for GCed page writeback via META_MAPPING */
5950ded69f6SJaegeuk Kim 	f2fs_wait_on_block_writeback(inode, blkaddr);
5960ded69f6SJaegeuk Kim 
59713ba41e3SJaegeuk Kim 	if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
59813ba41e3SJaegeuk Kim 		bio_put(bio);
59913ba41e3SJaegeuk Kim 		return -EFAULT;
60013ba41e3SJaegeuk Kim 	}
601fb7d70dbSJaegeuk Kim 	ClearPageError(page);
6025f9abab4SJaegeuk Kim 	inc_page_count(F2FS_I_SB(inode), F2FS_RD_DATA);
60313ba41e3SJaegeuk Kim 	__submit_bio(F2FS_I_SB(inode), bio, DATA);
60413ba41e3SJaegeuk Kim 	return 0;
60513ba41e3SJaegeuk Kim }
60613ba41e3SJaegeuk Kim 
60746008c6dSChao Yu static void __set_data_blkaddr(struct dnode_of_data *dn)
60846008c6dSChao Yu {
60946008c6dSChao Yu 	struct f2fs_node *rn = F2FS_NODE(dn->node_page);
61046008c6dSChao Yu 	__le32 *addr_array;
6117a2af766SChao Yu 	int base = 0;
6127a2af766SChao Yu 
6137a2af766SChao Yu 	if (IS_INODE(dn->node_page) && f2fs_has_extra_attr(dn->inode))
6147a2af766SChao Yu 		base = get_extra_isize(dn->inode);
61546008c6dSChao Yu 
61646008c6dSChao Yu 	/* Get physical address of data block */
61746008c6dSChao Yu 	addr_array = blkaddr_in_node(rn);
6187a2af766SChao Yu 	addr_array[base + dn->ofs_in_node] = cpu_to_le32(dn->data_blkaddr);
61946008c6dSChao Yu }
62046008c6dSChao Yu 
62193dfe2acSJaegeuk Kim /*
622eb47b800SJaegeuk Kim  * Lock ordering for the change of data block address:
623eb47b800SJaegeuk Kim  * ->data_page
624eb47b800SJaegeuk Kim  *  ->node_page
625eb47b800SJaegeuk Kim  *    update block addresses in the node page
626eb47b800SJaegeuk Kim  */
6274d57b86dSChao Yu void f2fs_set_data_blkaddr(struct dnode_of_data *dn)
628eb47b800SJaegeuk Kim {
62946008c6dSChao Yu 	f2fs_wait_on_page_writeback(dn->node_page, NODE, true);
63046008c6dSChao Yu 	__set_data_blkaddr(dn);
63146008c6dSChao Yu 	if (set_page_dirty(dn->node_page))
63293bae099SJaegeuk Kim 		dn->node_changed = true;
633eb47b800SJaegeuk Kim }
634eb47b800SJaegeuk Kim 
635f28b3434SChao Yu void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr)
636f28b3434SChao Yu {
637f28b3434SChao Yu 	dn->data_blkaddr = blkaddr;
6384d57b86dSChao Yu 	f2fs_set_data_blkaddr(dn);
639f28b3434SChao Yu 	f2fs_update_extent_cache(dn);
640f28b3434SChao Yu }
641f28b3434SChao Yu 
64246008c6dSChao Yu /* dn->ofs_in_node will be returned with up-to-date last block pointer */
6434d57b86dSChao Yu int f2fs_reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count)
644eb47b800SJaegeuk Kim {
6454081363fSJaegeuk Kim 	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
6460abd675eSChao Yu 	int err;
647eb47b800SJaegeuk Kim 
64846008c6dSChao Yu 	if (!count)
64946008c6dSChao Yu 		return 0;
65046008c6dSChao Yu 
65191942321SJaegeuk Kim 	if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
652eb47b800SJaegeuk Kim 		return -EPERM;
6530abd675eSChao Yu 	if (unlikely((err = inc_valid_block_count(sbi, dn->inode, &count))))
6540abd675eSChao Yu 		return err;
655eb47b800SJaegeuk Kim 
65646008c6dSChao Yu 	trace_f2fs_reserve_new_blocks(dn->inode, dn->nid,
65746008c6dSChao Yu 						dn->ofs_in_node, count);
658c01e2853SNamjae Jeon 
65946008c6dSChao Yu 	f2fs_wait_on_page_writeback(dn->node_page, NODE, true);
66046008c6dSChao Yu 
66146008c6dSChao Yu 	for (; count > 0; dn->ofs_in_node++) {
6627a2af766SChao Yu 		block_t blkaddr = datablock_addr(dn->inode,
6637a2af766SChao Yu 					dn->node_page, dn->ofs_in_node);
66446008c6dSChao Yu 		if (blkaddr == NULL_ADDR) {
665eb47b800SJaegeuk Kim 			dn->data_blkaddr = NEW_ADDR;
66646008c6dSChao Yu 			__set_data_blkaddr(dn);
66746008c6dSChao Yu 			count--;
66846008c6dSChao Yu 		}
66946008c6dSChao Yu 	}
67046008c6dSChao Yu 
67146008c6dSChao Yu 	if (set_page_dirty(dn->node_page))
67246008c6dSChao Yu 		dn->node_changed = true;
673eb47b800SJaegeuk Kim 	return 0;
674eb47b800SJaegeuk Kim }
675eb47b800SJaegeuk Kim 
67646008c6dSChao Yu /* Should keep dn->ofs_in_node unchanged */
6774d57b86dSChao Yu int f2fs_reserve_new_block(struct dnode_of_data *dn)
67846008c6dSChao Yu {
67946008c6dSChao Yu 	unsigned int ofs_in_node = dn->ofs_in_node;
68046008c6dSChao Yu 	int ret;
68146008c6dSChao Yu 
6824d57b86dSChao Yu 	ret = f2fs_reserve_new_blocks(dn, 1);
68346008c6dSChao Yu 	dn->ofs_in_node = ofs_in_node;
68446008c6dSChao Yu 	return ret;
68546008c6dSChao Yu }
68646008c6dSChao Yu 
687b600965cSHuajun Li int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index)
688b600965cSHuajun Li {
689b600965cSHuajun Li 	bool need_put = dn->inode_page ? false : true;
690b600965cSHuajun Li 	int err;
691b600965cSHuajun Li 
6924d57b86dSChao Yu 	err = f2fs_get_dnode_of_data(dn, index, ALLOC_NODE);
693b600965cSHuajun Li 	if (err)
694b600965cSHuajun Li 		return err;
695a8865372SJaegeuk Kim 
696b600965cSHuajun Li 	if (dn->data_blkaddr == NULL_ADDR)
6974d57b86dSChao Yu 		err = f2fs_reserve_new_block(dn);
698a8865372SJaegeuk Kim 	if (err || need_put)
699b600965cSHuajun Li 		f2fs_put_dnode(dn);
700b600965cSHuajun Li 	return err;
701b600965cSHuajun Li }
702b600965cSHuajun Li 
703759af1c9SFan Li int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index)
704eb47b800SJaegeuk Kim {
705e15882b6SHou Pengyang 	struct extent_info ei  = {0,0,0};
706759af1c9SFan Li 	struct inode *inode = dn->inode;
707028a41e8SChao Yu 
708759af1c9SFan Li 	if (f2fs_lookup_extent_cache(inode, index, &ei)) {
709759af1c9SFan Li 		dn->data_blkaddr = ei.blk + index - ei.fofs;
710759af1c9SFan Li 		return 0;
711028a41e8SChao Yu 	}
712028a41e8SChao Yu 
713759af1c9SFan Li 	return f2fs_reserve_block(dn, index);
714eb47b800SJaegeuk Kim }
715eb47b800SJaegeuk Kim 
7164d57b86dSChao Yu struct page *f2fs_get_read_data_page(struct inode *inode, pgoff_t index,
71704d328deSMike Christie 						int op_flags, bool for_write)
718eb47b800SJaegeuk Kim {
719eb47b800SJaegeuk Kim 	struct address_space *mapping = inode->i_mapping;
720eb47b800SJaegeuk Kim 	struct dnode_of_data dn;
721eb47b800SJaegeuk Kim 	struct page *page;
722e15882b6SHou Pengyang 	struct extent_info ei = {0,0,0};
723eb47b800SJaegeuk Kim 	int err;
7244375a336SJaegeuk Kim 
725a56c7c6fSJaegeuk Kim 	page = f2fs_grab_cache_page(mapping, index, for_write);
726eb47b800SJaegeuk Kim 	if (!page)
727eb47b800SJaegeuk Kim 		return ERR_PTR(-ENOMEM);
728eb47b800SJaegeuk Kim 
729cb3bc9eeSChao Yu 	if (f2fs_lookup_extent_cache(inode, index, &ei)) {
730cb3bc9eeSChao Yu 		dn.data_blkaddr = ei.blk + index - ei.fofs;
731cb3bc9eeSChao Yu 		goto got_it;
732cb3bc9eeSChao Yu 	}
733cb3bc9eeSChao Yu 
734650495deSJaegeuk Kim 	set_new_dnode(&dn, inode, NULL, NULL, 0);
7354d57b86dSChao Yu 	err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
73686531d6bSJaegeuk Kim 	if (err)
73786531d6bSJaegeuk Kim 		goto put_err;
738650495deSJaegeuk Kim 	f2fs_put_dnode(&dn);
739650495deSJaegeuk Kim 
7406bacf52fSJaegeuk Kim 	if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
74186531d6bSJaegeuk Kim 		err = -ENOENT;
74286531d6bSJaegeuk Kim 		goto put_err;
743650495deSJaegeuk Kim 	}
744cb3bc9eeSChao Yu got_it:
74543f3eae1SJaegeuk Kim 	if (PageUptodate(page)) {
74643f3eae1SJaegeuk Kim 		unlock_page(page);
747eb47b800SJaegeuk Kim 		return page;
74843f3eae1SJaegeuk Kim 	}
749eb47b800SJaegeuk Kim 
750d59ff4dfSJaegeuk Kim 	/*
751d59ff4dfSJaegeuk Kim 	 * A new dentry page is allocated but not able to be written, since its
752d59ff4dfSJaegeuk Kim 	 * new inode page couldn't be allocated due to -ENOSPC.
753d59ff4dfSJaegeuk Kim 	 * In such the case, its blkaddr can be remained as NEW_ADDR.
7544d57b86dSChao Yu 	 * see, f2fs_add_link -> f2fs_get_new_data_page ->
7554d57b86dSChao Yu 	 * f2fs_init_inode_metadata.
756d59ff4dfSJaegeuk Kim 	 */
757d59ff4dfSJaegeuk Kim 	if (dn.data_blkaddr == NEW_ADDR) {
75809cbfeafSKirill A. Shutemov 		zero_user_segment(page, 0, PAGE_SIZE);
759237c0790SJaegeuk Kim 		if (!PageUptodate(page))
760d59ff4dfSJaegeuk Kim 			SetPageUptodate(page);
76143f3eae1SJaegeuk Kim 		unlock_page(page);
762d59ff4dfSJaegeuk Kim 		return page;
763d59ff4dfSJaegeuk Kim 	}
764eb47b800SJaegeuk Kim 
76513ba41e3SJaegeuk Kim 	err = f2fs_submit_page_read(inode, page, dn.data_blkaddr);
766393ff91fSJaegeuk Kim 	if (err)
76786531d6bSJaegeuk Kim 		goto put_err;
76843f3eae1SJaegeuk Kim 	return page;
76986531d6bSJaegeuk Kim 
77086531d6bSJaegeuk Kim put_err:
77186531d6bSJaegeuk Kim 	f2fs_put_page(page, 1);
77286531d6bSJaegeuk Kim 	return ERR_PTR(err);
77343f3eae1SJaegeuk Kim }
774393ff91fSJaegeuk Kim 
7754d57b86dSChao Yu struct page *f2fs_find_data_page(struct inode *inode, pgoff_t index)
77643f3eae1SJaegeuk Kim {
77743f3eae1SJaegeuk Kim 	struct address_space *mapping = inode->i_mapping;
77843f3eae1SJaegeuk Kim 	struct page *page;
77943f3eae1SJaegeuk Kim 
78043f3eae1SJaegeuk Kim 	page = find_get_page(mapping, index);
78143f3eae1SJaegeuk Kim 	if (page && PageUptodate(page))
78243f3eae1SJaegeuk Kim 		return page;
78343f3eae1SJaegeuk Kim 	f2fs_put_page(page, 0);
78443f3eae1SJaegeuk Kim 
7854d57b86dSChao Yu 	page = f2fs_get_read_data_page(inode, index, 0, false);
78643f3eae1SJaegeuk Kim 	if (IS_ERR(page))
78743f3eae1SJaegeuk Kim 		return page;
78843f3eae1SJaegeuk Kim 
78943f3eae1SJaegeuk Kim 	if (PageUptodate(page))
79043f3eae1SJaegeuk Kim 		return page;
79143f3eae1SJaegeuk Kim 
79243f3eae1SJaegeuk Kim 	wait_on_page_locked(page);
79343f3eae1SJaegeuk Kim 	if (unlikely(!PageUptodate(page))) {
79443f3eae1SJaegeuk Kim 		f2fs_put_page(page, 0);
79543f3eae1SJaegeuk Kim 		return ERR_PTR(-EIO);
79643f3eae1SJaegeuk Kim 	}
79743f3eae1SJaegeuk Kim 	return page;
79843f3eae1SJaegeuk Kim }
79943f3eae1SJaegeuk Kim 
80043f3eae1SJaegeuk Kim /*
80143f3eae1SJaegeuk Kim  * If it tries to access a hole, return an error.
80243f3eae1SJaegeuk Kim  * Because, the callers, functions in dir.c and GC, should be able to know
80343f3eae1SJaegeuk Kim  * whether this page exists or not.
80443f3eae1SJaegeuk Kim  */
8054d57b86dSChao Yu struct page *f2fs_get_lock_data_page(struct inode *inode, pgoff_t index,
806a56c7c6fSJaegeuk Kim 							bool for_write)
80743f3eae1SJaegeuk Kim {
80843f3eae1SJaegeuk Kim 	struct address_space *mapping = inode->i_mapping;
80943f3eae1SJaegeuk Kim 	struct page *page;
81043f3eae1SJaegeuk Kim repeat:
8114d57b86dSChao Yu 	page = f2fs_get_read_data_page(inode, index, 0, for_write);
81243f3eae1SJaegeuk Kim 	if (IS_ERR(page))
81343f3eae1SJaegeuk Kim 		return page;
81443f3eae1SJaegeuk Kim 
81543f3eae1SJaegeuk Kim 	/* wait for read completion */
816393ff91fSJaegeuk Kim 	lock_page(page);
8176bacf52fSJaegeuk Kim 	if (unlikely(page->mapping != mapping)) {
818afcb7ca0SJaegeuk Kim 		f2fs_put_page(page, 1);
819afcb7ca0SJaegeuk Kim 		goto repeat;
820eb47b800SJaegeuk Kim 	}
8211563ac75SChao Yu 	if (unlikely(!PageUptodate(page))) {
8221563ac75SChao Yu 		f2fs_put_page(page, 1);
8231563ac75SChao Yu 		return ERR_PTR(-EIO);
8241563ac75SChao Yu 	}
825eb47b800SJaegeuk Kim 	return page;
826eb47b800SJaegeuk Kim }
827eb47b800SJaegeuk Kim 
8280a8165d7SJaegeuk Kim /*
829eb47b800SJaegeuk Kim  * Caller ensures that this data page is never allocated.
830eb47b800SJaegeuk Kim  * A new zero-filled data page is allocated in the page cache.
83139936837SJaegeuk Kim  *
8324f4124d0SChao Yu  * Also, caller should grab and release a rwsem by calling f2fs_lock_op() and
8334f4124d0SChao Yu  * f2fs_unlock_op().
834470f00e9SChao Yu  * Note that, ipage is set only by make_empty_dir, and if any error occur,
835470f00e9SChao Yu  * ipage should be released by this function.
836eb47b800SJaegeuk Kim  */
8374d57b86dSChao Yu struct page *f2fs_get_new_data_page(struct inode *inode,
838a8865372SJaegeuk Kim 		struct page *ipage, pgoff_t index, bool new_i_size)
839eb47b800SJaegeuk Kim {
840eb47b800SJaegeuk Kim 	struct address_space *mapping = inode->i_mapping;
841eb47b800SJaegeuk Kim 	struct page *page;
842eb47b800SJaegeuk Kim 	struct dnode_of_data dn;
843eb47b800SJaegeuk Kim 	int err;
8447612118aSJaegeuk Kim 
845a56c7c6fSJaegeuk Kim 	page = f2fs_grab_cache_page(mapping, index, true);
846470f00e9SChao Yu 	if (!page) {
847470f00e9SChao Yu 		/*
848470f00e9SChao Yu 		 * before exiting, we should make sure ipage will be released
849470f00e9SChao Yu 		 * if any error occur.
850470f00e9SChao Yu 		 */
851470f00e9SChao Yu 		f2fs_put_page(ipage, 1);
85201f28610SJaegeuk Kim 		return ERR_PTR(-ENOMEM);
853470f00e9SChao Yu 	}
854eb47b800SJaegeuk Kim 
855a8865372SJaegeuk Kim 	set_new_dnode(&dn, inode, ipage, NULL, 0);
856b600965cSHuajun Li 	err = f2fs_reserve_block(&dn, index);
85701f28610SJaegeuk Kim 	if (err) {
85801f28610SJaegeuk Kim 		f2fs_put_page(page, 1);
859eb47b800SJaegeuk Kim 		return ERR_PTR(err);
860a8865372SJaegeuk Kim 	}
86101f28610SJaegeuk Kim 	if (!ipage)
86201f28610SJaegeuk Kim 		f2fs_put_dnode(&dn);
863eb47b800SJaegeuk Kim 
864eb47b800SJaegeuk Kim 	if (PageUptodate(page))
86501f28610SJaegeuk Kim 		goto got_it;
866eb47b800SJaegeuk Kim 
867eb47b800SJaegeuk Kim 	if (dn.data_blkaddr == NEW_ADDR) {
86809cbfeafSKirill A. Shutemov 		zero_user_segment(page, 0, PAGE_SIZE);
869237c0790SJaegeuk Kim 		if (!PageUptodate(page))
870393ff91fSJaegeuk Kim 			SetPageUptodate(page);
871eb47b800SJaegeuk Kim 	} else {
8724375a336SJaegeuk Kim 		f2fs_put_page(page, 1);
873a8865372SJaegeuk Kim 
8747612118aSJaegeuk Kim 		/* if ipage exists, blkaddr should be NEW_ADDR */
8757612118aSJaegeuk Kim 		f2fs_bug_on(F2FS_I_SB(inode), ipage);
8764d57b86dSChao Yu 		page = f2fs_get_lock_data_page(inode, index, true);
8774375a336SJaegeuk Kim 		if (IS_ERR(page))
8787612118aSJaegeuk Kim 			return page;
879eb47b800SJaegeuk Kim 	}
88001f28610SJaegeuk Kim got_it:
8819edcdabfSChao Yu 	if (new_i_size && i_size_read(inode) <
882ee6d182fSJaegeuk Kim 				((loff_t)(index + 1) << PAGE_SHIFT))
883fc9581c8SJaegeuk Kim 		f2fs_i_size_write(inode, ((loff_t)(index + 1) << PAGE_SHIFT));
884eb47b800SJaegeuk Kim 	return page;
885eb47b800SJaegeuk Kim }
886eb47b800SJaegeuk Kim 
887d5097be5SHyunchul Lee static int __allocate_data_block(struct dnode_of_data *dn, int seg_type)
888bfad7c2dSJaegeuk Kim {
8894081363fSJaegeuk Kim 	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
890bfad7c2dSJaegeuk Kim 	struct f2fs_summary sum;
891bfad7c2dSJaegeuk Kim 	struct node_info ni;
8926aa58d8aSChao Yu 	block_t old_blkaddr;
89346008c6dSChao Yu 	blkcnt_t count = 1;
8940abd675eSChao Yu 	int err;
895bfad7c2dSJaegeuk Kim 
89691942321SJaegeuk Kim 	if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
897bfad7c2dSJaegeuk Kim 		return -EPERM;
898df6136efSChao Yu 
8997735730dSChao Yu 	err = f2fs_get_node_info(sbi, dn->nid, &ni);
9007735730dSChao Yu 	if (err)
9017735730dSChao Yu 		return err;
9027735730dSChao Yu 
9037a2af766SChao Yu 	dn->data_blkaddr = datablock_addr(dn->inode,
9047a2af766SChao Yu 				dn->node_page, dn->ofs_in_node);
905f847c699SChao Yu 	if (dn->data_blkaddr != NULL_ADDR)
906df6136efSChao Yu 		goto alloc;
907df6136efSChao Yu 
9080abd675eSChao Yu 	if (unlikely((err = inc_valid_block_count(sbi, dn->inode, &count))))
9090abd675eSChao Yu 		return err;
910bfad7c2dSJaegeuk Kim 
911df6136efSChao Yu alloc:
912bfad7c2dSJaegeuk Kim 	set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
9136aa58d8aSChao Yu 	old_blkaddr = dn->data_blkaddr;
9146aa58d8aSChao Yu 	f2fs_allocate_data_block(sbi, NULL, old_blkaddr, &dn->data_blkaddr,
915d5097be5SHyunchul Lee 					&sum, seg_type, NULL, false);
9166aa58d8aSChao Yu 	if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO)
9176aa58d8aSChao Yu 		invalidate_mapping_pages(META_MAPPING(sbi),
9186aa58d8aSChao Yu 					old_blkaddr, old_blkaddr);
9194d57b86dSChao Yu 	f2fs_set_data_blkaddr(dn);
920bfad7c2dSJaegeuk Kim 
9210a4daae5SJaegeuk Kim 	/*
9220a4daae5SJaegeuk Kim 	 * i_size will be updated by direct_IO. Otherwise, we'll get stale
9230a4daae5SJaegeuk Kim 	 * data from unwritten block via dio_read.
9240a4daae5SJaegeuk Kim 	 */
925bfad7c2dSJaegeuk Kim 	return 0;
926bfad7c2dSJaegeuk Kim }
927bfad7c2dSJaegeuk Kim 
928a7de6086SJaegeuk Kim int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from)
92959b802e5SJaegeuk Kim {
930b439b103SJaegeuk Kim 	struct inode *inode = file_inode(iocb->ki_filp);
9315b8db7faSChao Yu 	struct f2fs_map_blocks map;
932d6d478a1SChao Yu 	int flag;
933a7de6086SJaegeuk Kim 	int err = 0;
934d6d478a1SChao Yu 	bool direct_io = iocb->ki_flags & IOCB_DIRECT;
93559b802e5SJaegeuk Kim 
93671ad682cSWeichao Guo 	/* convert inline data for Direct I/O*/
937d6d478a1SChao Yu 	if (direct_io) {
93871ad682cSWeichao Guo 		err = f2fs_convert_inline_inode(inode);
93971ad682cSWeichao Guo 		if (err)
94071ad682cSWeichao Guo 			return err;
94171ad682cSWeichao Guo 	}
94271ad682cSWeichao Guo 
943f9d6d059SChao Yu 	if (direct_io && allow_outplace_dio(inode, iocb, from))
944f9d6d059SChao Yu 		return 0;
945f9d6d059SChao Yu 
946dc91de78SJaegeuk Kim 	if (is_inode_flag_set(inode, FI_NO_PREALLOC))
947dc91de78SJaegeuk Kim 		return 0;
948dc91de78SJaegeuk Kim 
9490080c507SJaegeuk Kim 	map.m_lblk = F2FS_BLK_ALIGN(iocb->ki_pos);
950dfd02e4dSChao Yu 	map.m_len = F2FS_BYTES_TO_BLK(iocb->ki_pos + iov_iter_count(from));
951dfd02e4dSChao Yu 	if (map.m_len > map.m_lblk)
952dfd02e4dSChao Yu 		map.m_len -= map.m_lblk;
953dfd02e4dSChao Yu 	else
954dfd02e4dSChao Yu 		map.m_len = 0;
955dfd02e4dSChao Yu 
956da85985cSChao Yu 	map.m_next_pgofs = NULL;
957c4020b2dSChao Yu 	map.m_next_extent = NULL;
958d5097be5SHyunchul Lee 	map.m_seg_type = NO_CHECK_TYPE;
959f9d6d059SChao Yu 	map.m_may_create = true;
96059b802e5SJaegeuk Kim 
961d6d478a1SChao Yu 	if (direct_io) {
9624d57b86dSChao Yu 		map.m_seg_type = f2fs_rw_hint_to_seg_type(iocb->ki_hint);
963f847c699SChao Yu 		flag = f2fs_force_buffered_io(inode, iocb, from) ?
964c040ff9dSJaegeuk Kim 					F2FS_GET_BLOCK_PRE_AIO :
965d6d478a1SChao Yu 					F2FS_GET_BLOCK_PRE_DIO;
966d6d478a1SChao Yu 		goto map_blocks;
967d5097be5SHyunchul Lee 	}
968f2470371SChao Yu 	if (iocb->ki_pos + iov_iter_count(from) > MAX_INLINE_DATA(inode)) {
969a7de6086SJaegeuk Kim 		err = f2fs_convert_inline_inode(inode);
970a7de6086SJaegeuk Kim 		if (err)
971a7de6086SJaegeuk Kim 			return err;
97224b84912SJaegeuk Kim 	}
973d6d478a1SChao Yu 	if (f2fs_has_inline_data(inode))
974d6d478a1SChao Yu 		return err;
975d6d478a1SChao Yu 
976d6d478a1SChao Yu 	flag = F2FS_GET_BLOCK_PRE_AIO;
977d6d478a1SChao Yu 
978d6d478a1SChao Yu map_blocks:
979d6d478a1SChao Yu 	err = f2fs_map_blocks(inode, &map, 1, flag);
98025006645SSheng Yong 	if (map.m_len > 0 && err == -ENOSPC) {
981d6d478a1SChao Yu 		if (!direct_io)
98225006645SSheng Yong 			set_inode_flag(inode, FI_NO_PREALLOC);
98325006645SSheng Yong 		err = 0;
98425006645SSheng Yong 	}
985a7de6086SJaegeuk Kim 	return err;
98659b802e5SJaegeuk Kim }
98759b802e5SJaegeuk Kim 
98839a86958SChao Yu void __do_map_lock(struct f2fs_sb_info *sbi, int flag, bool lock)
98959c9081bSYunlei He {
99059c9081bSYunlei He 	if (flag == F2FS_GET_BLOCK_PRE_AIO) {
99159c9081bSYunlei He 		if (lock)
99259c9081bSYunlei He 			down_read(&sbi->node_change);
99359c9081bSYunlei He 		else
99459c9081bSYunlei He 			up_read(&sbi->node_change);
99559c9081bSYunlei He 	} else {
99659c9081bSYunlei He 		if (lock)
99759c9081bSYunlei He 			f2fs_lock_op(sbi);
99859c9081bSYunlei He 		else
99959c9081bSYunlei He 			f2fs_unlock_op(sbi);
100059c9081bSYunlei He 	}
100159c9081bSYunlei He }
100259c9081bSYunlei He 
10030a8165d7SJaegeuk Kim /*
1004003a3e1dSJaegeuk Kim  * f2fs_map_blocks() now supported readahead/bmap/rw direct_IO with
1005003a3e1dSJaegeuk Kim  * f2fs_map_blocks structure.
10064f4124d0SChao Yu  * If original data blocks are allocated, then give them to blockdev.
10074f4124d0SChao Yu  * Otherwise,
10084f4124d0SChao Yu  *     a. preallocate requested block addresses
10094f4124d0SChao Yu  *     b. do not use extent cache for better performance
10104f4124d0SChao Yu  *     c. give the block addresses to blockdev
1011eb47b800SJaegeuk Kim  */
1012d323d005SChao Yu int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
1013e2b4e2bcSChao Yu 						int create, int flag)
1014eb47b800SJaegeuk Kim {
1015003a3e1dSJaegeuk Kim 	unsigned int maxblocks = map->m_len;
1016eb47b800SJaegeuk Kim 	struct dnode_of_data dn;
1017f9811703SChao Yu 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1018f9d6d059SChao Yu 	int mode = map->m_may_create ? ALLOC_NODE : LOOKUP_NODE;
101946008c6dSChao Yu 	pgoff_t pgofs, end_offset, end;
1020bfad7c2dSJaegeuk Kim 	int err = 0, ofs = 1;
102146008c6dSChao Yu 	unsigned int ofs_in_node, last_ofs_in_node;
102246008c6dSChao Yu 	blkcnt_t prealloc;
1023e15882b6SHou Pengyang 	struct extent_info ei = {0,0,0};
10247df3a431SFan Li 	block_t blkaddr;
1025c4020b2dSChao Yu 	unsigned int start_pgofs;
1026eb47b800SJaegeuk Kim 
1027dfd02e4dSChao Yu 	if (!maxblocks)
1028dfd02e4dSChao Yu 		return 0;
1029dfd02e4dSChao Yu 
1030003a3e1dSJaegeuk Kim 	map->m_len = 0;
1031003a3e1dSJaegeuk Kim 	map->m_flags = 0;
1032003a3e1dSJaegeuk Kim 
1033003a3e1dSJaegeuk Kim 	/* it only supports block size == page size */
1034003a3e1dSJaegeuk Kim 	pgofs =	(pgoff_t)map->m_lblk;
103546008c6dSChao Yu 	end = pgofs + maxblocks;
1036eb47b800SJaegeuk Kim 
103724b84912SJaegeuk Kim 	if (!create && f2fs_lookup_extent_cache(inode, pgofs, &ei)) {
1038003a3e1dSJaegeuk Kim 		map->m_pblk = ei.blk + pgofs - ei.fofs;
1039003a3e1dSJaegeuk Kim 		map->m_len = min((pgoff_t)maxblocks, ei.fofs + ei.len - pgofs);
1040003a3e1dSJaegeuk Kim 		map->m_flags = F2FS_MAP_MAPPED;
1041c4020b2dSChao Yu 		if (map->m_next_extent)
1042c4020b2dSChao Yu 			*map->m_next_extent = pgofs + map->m_len;
10431e78e8bdSSahitya Tummala 
10441e78e8bdSSahitya Tummala 		/* for hardware encryption, but to avoid potential issue in future */
10451e78e8bdSSahitya Tummala 		if (flag == F2FS_GET_BLOCK_DIO)
10461e78e8bdSSahitya Tummala 			f2fs_wait_on_block_writeback_range(inode,
10471e78e8bdSSahitya Tummala 						map->m_pblk, map->m_len);
1048bfad7c2dSJaegeuk Kim 		goto out;
1049a2e7d1bfSChao Yu 	}
1050bfad7c2dSJaegeuk Kim 
10514fe71e88SChao Yu next_dnode:
1052f9d6d059SChao Yu 	if (map->m_may_create)
105359c9081bSYunlei He 		__do_map_lock(sbi, flag, true);
1054eb47b800SJaegeuk Kim 
1055eb47b800SJaegeuk Kim 	/* When reading holes, we need its node page */
1056eb47b800SJaegeuk Kim 	set_new_dnode(&dn, inode, NULL, NULL, 0);
10574d57b86dSChao Yu 	err = f2fs_get_dnode_of_data(&dn, pgofs, mode);
10581ec79083SJaegeuk Kim 	if (err) {
105943473f96SChao Yu 		if (flag == F2FS_GET_BLOCK_BMAP)
106043473f96SChao Yu 			map->m_pblk = 0;
1061da85985cSChao Yu 		if (err == -ENOENT) {
1062bfad7c2dSJaegeuk Kim 			err = 0;
1063da85985cSChao Yu 			if (map->m_next_pgofs)
1064da85985cSChao Yu 				*map->m_next_pgofs =
10654d57b86dSChao Yu 					f2fs_get_next_page_offset(&dn, pgofs);
1066c4020b2dSChao Yu 			if (map->m_next_extent)
1067c4020b2dSChao Yu 				*map->m_next_extent =
10684d57b86dSChao Yu 					f2fs_get_next_page_offset(&dn, pgofs);
1069da85985cSChao Yu 		}
1070bfad7c2dSJaegeuk Kim 		goto unlock_out;
1071848753aaSNamjae Jeon 	}
1072eb47b800SJaegeuk Kim 
1073c4020b2dSChao Yu 	start_pgofs = pgofs;
107446008c6dSChao Yu 	prealloc = 0;
1075230436b3SArnd Bergmann 	last_ofs_in_node = ofs_in_node = dn.ofs_in_node;
107681ca7350SChao Yu 	end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
1077eb47b800SJaegeuk Kim 
10784fe71e88SChao Yu next_block:
10797a2af766SChao Yu 	blkaddr = datablock_addr(dn.inode, dn.node_page, dn.ofs_in_node);
1080973163fcSChao Yu 
1081c9b60788SChao Yu 	if (__is_valid_data_blkaddr(blkaddr) &&
1082c9b60788SChao Yu 		!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC)) {
1083c9b60788SChao Yu 		err = -EFAULT;
1084c9b60788SChao Yu 		goto sync_out;
1085c9b60788SChao Yu 	}
1086c9b60788SChao Yu 
1087f847c699SChao Yu 	if (is_valid_data_blkaddr(sbi, blkaddr)) {
1088f847c699SChao Yu 		/* use out-place-update for driect IO under LFS mode */
1089f9d6d059SChao Yu 		if (test_opt(sbi, LFS) && flag == F2FS_GET_BLOCK_DIO &&
1090f9d6d059SChao Yu 							map->m_may_create) {
1091f847c699SChao Yu 			err = __allocate_data_block(&dn, map->m_seg_type);
1092f847c699SChao Yu 			if (!err)
1093f847c699SChao Yu 				set_inode_flag(inode, FI_APPEND_WRITE);
1094f847c699SChao Yu 		}
1095f847c699SChao Yu 	} else {
1096973163fcSChao Yu 		if (create) {
1097f9811703SChao Yu 			if (unlikely(f2fs_cp_error(sbi))) {
1098f9811703SChao Yu 				err = -EIO;
1099f9811703SChao Yu 				goto sync_out;
1100f9811703SChao Yu 			}
110124b84912SJaegeuk Kim 			if (flag == F2FS_GET_BLOCK_PRE_AIO) {
110246008c6dSChao Yu 				if (blkaddr == NULL_ADDR) {
110346008c6dSChao Yu 					prealloc++;
110446008c6dSChao Yu 					last_ofs_in_node = dn.ofs_in_node;
110546008c6dSChao Yu 				}
110624b84912SJaegeuk Kim 			} else {
11070a4daae5SJaegeuk Kim 				WARN_ON(flag != F2FS_GET_BLOCK_PRE_DIO &&
11080a4daae5SJaegeuk Kim 					flag != F2FS_GET_BLOCK_DIO);
1109d5097be5SHyunchul Lee 				err = __allocate_data_block(&dn,
1110d5097be5SHyunchul Lee 							map->m_seg_type);
11116f2d8ed6SChao Yu 				if (!err)
111291942321SJaegeuk Kim 					set_inode_flag(inode, FI_APPEND_WRITE);
111324b84912SJaegeuk Kim 			}
1114bfad7c2dSJaegeuk Kim 			if (err)
1115bfad7c2dSJaegeuk Kim 				goto sync_out;
11163f2be043SKinglong Mee 			map->m_flags |= F2FS_MAP_NEW;
1117bfad7c2dSJaegeuk Kim 			blkaddr = dn.data_blkaddr;
1118973163fcSChao Yu 		} else {
111943473f96SChao Yu 			if (flag == F2FS_GET_BLOCK_BMAP) {
112043473f96SChao Yu 				map->m_pblk = 0;
112143473f96SChao Yu 				goto sync_out;
112243473f96SChao Yu 			}
1123c4020b2dSChao Yu 			if (flag == F2FS_GET_BLOCK_PRECACHE)
1124c4020b2dSChao Yu 				goto sync_out;
1125da85985cSChao Yu 			if (flag == F2FS_GET_BLOCK_FIEMAP &&
1126da85985cSChao Yu 						blkaddr == NULL_ADDR) {
1127da85985cSChao Yu 				if (map->m_next_pgofs)
1128da85985cSChao Yu 					*map->m_next_pgofs = pgofs + 1;
1129973163fcSChao Yu 				goto sync_out;
1130bfad7c2dSJaegeuk Kim 			}
1131f3d98e74SChao Yu 			if (flag != F2FS_GET_BLOCK_FIEMAP) {
1132f3d98e74SChao Yu 				/* for defragment case */
1133f3d98e74SChao Yu 				if (map->m_next_pgofs)
1134f3d98e74SChao Yu 					*map->m_next_pgofs = pgofs + 1;
1135bfad7c2dSJaegeuk Kim 				goto sync_out;
1136bfad7c2dSJaegeuk Kim 			}
1137973163fcSChao Yu 		}
1138973163fcSChao Yu 	}
1139973163fcSChao Yu 
114046008c6dSChao Yu 	if (flag == F2FS_GET_BLOCK_PRE_AIO)
114146008c6dSChao Yu 		goto skip;
11427f63eb77SJaegeuk Kim 
11434fe71e88SChao Yu 	if (map->m_len == 0) {
11444fe71e88SChao Yu 		/* preallocated unwritten block should be mapped for fiemap. */
11454fe71e88SChao Yu 		if (blkaddr == NEW_ADDR)
11464fe71e88SChao Yu 			map->m_flags |= F2FS_MAP_UNWRITTEN;
11474fe71e88SChao Yu 		map->m_flags |= F2FS_MAP_MAPPED;
11484fe71e88SChao Yu 
11494fe71e88SChao Yu 		map->m_pblk = blkaddr;
11504fe71e88SChao Yu 		map->m_len = 1;
11514fe71e88SChao Yu 	} else if ((map->m_pblk != NEW_ADDR &&
11527f63eb77SJaegeuk Kim 			blkaddr == (map->m_pblk + ofs)) ||
1153b439b103SJaegeuk Kim 			(map->m_pblk == NEW_ADDR && blkaddr == NEW_ADDR) ||
115446008c6dSChao Yu 			flag == F2FS_GET_BLOCK_PRE_DIO) {
1155bfad7c2dSJaegeuk Kim 		ofs++;
11564fe71e88SChao Yu 		map->m_len++;
11574fe71e88SChao Yu 	} else {
11584fe71e88SChao Yu 		goto sync_out;
11594fe71e88SChao Yu 	}
11604fe71e88SChao Yu 
116146008c6dSChao Yu skip:
1162bfad7c2dSJaegeuk Kim 	dn.ofs_in_node++;
1163bfad7c2dSJaegeuk Kim 	pgofs++;
11644fe71e88SChao Yu 
116546008c6dSChao Yu 	/* preallocate blocks in batch for one dnode page */
116646008c6dSChao Yu 	if (flag == F2FS_GET_BLOCK_PRE_AIO &&
116746008c6dSChao Yu 			(pgofs == end || dn.ofs_in_node == end_offset)) {
116846008c6dSChao Yu 
116946008c6dSChao Yu 		dn.ofs_in_node = ofs_in_node;
11704d57b86dSChao Yu 		err = f2fs_reserve_new_blocks(&dn, prealloc);
117146008c6dSChao Yu 		if (err)
117246008c6dSChao Yu 			goto sync_out;
117346008c6dSChao Yu 
117446008c6dSChao Yu 		map->m_len += dn.ofs_in_node - ofs_in_node;
117546008c6dSChao Yu 		if (prealloc && dn.ofs_in_node != last_ofs_in_node + 1) {
117646008c6dSChao Yu 			err = -ENOSPC;
117746008c6dSChao Yu 			goto sync_out;
117846008c6dSChao Yu 		}
117946008c6dSChao Yu 		dn.ofs_in_node = end_offset;
118046008c6dSChao Yu 	}
118146008c6dSChao Yu 
118246008c6dSChao Yu 	if (pgofs >= end)
118346008c6dSChao Yu 		goto sync_out;
118446008c6dSChao Yu 	else if (dn.ofs_in_node < end_offset)
11854fe71e88SChao Yu 		goto next_block;
11864fe71e88SChao Yu 
1187c4020b2dSChao Yu 	if (flag == F2FS_GET_BLOCK_PRECACHE) {
1188c4020b2dSChao Yu 		if (map->m_flags & F2FS_MAP_MAPPED) {
1189c4020b2dSChao Yu 			unsigned int ofs = start_pgofs - map->m_lblk;
1190c4020b2dSChao Yu 
1191c4020b2dSChao Yu 			f2fs_update_extent_cache_range(&dn,
1192c4020b2dSChao Yu 				start_pgofs, map->m_pblk + ofs,
1193c4020b2dSChao Yu 				map->m_len - ofs);
1194c4020b2dSChao Yu 		}
1195c4020b2dSChao Yu 	}
1196c4020b2dSChao Yu 
11974fe71e88SChao Yu 	f2fs_put_dnode(&dn);
11984fe71e88SChao Yu 
1199f9d6d059SChao Yu 	if (map->m_may_create) {
120059c9081bSYunlei He 		__do_map_lock(sbi, flag, false);
12016f2d8ed6SChao Yu 		f2fs_balance_fs(sbi, dn.node_changed);
12024fe71e88SChao Yu 	}
12034fe71e88SChao Yu 	goto next_dnode;
12047df3a431SFan Li 
1205bfad7c2dSJaegeuk Kim sync_out:
12061e78e8bdSSahitya Tummala 
12071e78e8bdSSahitya Tummala 	/* for hardware encryption, but to avoid potential issue in future */
12081e78e8bdSSahitya Tummala 	if (flag == F2FS_GET_BLOCK_DIO && map->m_flags & F2FS_MAP_MAPPED)
12091e78e8bdSSahitya Tummala 		f2fs_wait_on_block_writeback_range(inode,
12101e78e8bdSSahitya Tummala 						map->m_pblk, map->m_len);
12111e78e8bdSSahitya Tummala 
1212c4020b2dSChao Yu 	if (flag == F2FS_GET_BLOCK_PRECACHE) {
1213c4020b2dSChao Yu 		if (map->m_flags & F2FS_MAP_MAPPED) {
1214c4020b2dSChao Yu 			unsigned int ofs = start_pgofs - map->m_lblk;
1215c4020b2dSChao Yu 
1216c4020b2dSChao Yu 			f2fs_update_extent_cache_range(&dn,
1217c4020b2dSChao Yu 				start_pgofs, map->m_pblk + ofs,
1218c4020b2dSChao Yu 				map->m_len - ofs);
1219c4020b2dSChao Yu 		}
1220c4020b2dSChao Yu 		if (map->m_next_extent)
1221c4020b2dSChao Yu 			*map->m_next_extent = pgofs + 1;
1222c4020b2dSChao Yu 	}
1223bfad7c2dSJaegeuk Kim 	f2fs_put_dnode(&dn);
1224bfad7c2dSJaegeuk Kim unlock_out:
1225f9d6d059SChao Yu 	if (map->m_may_create) {
122659c9081bSYunlei He 		__do_map_lock(sbi, flag, false);
12276f2d8ed6SChao Yu 		f2fs_balance_fs(sbi, dn.node_changed);
12282a340760SJaegeuk Kim 	}
1229bfad7c2dSJaegeuk Kim out:
1230003a3e1dSJaegeuk Kim 	trace_f2fs_map_blocks(inode, map, err);
1231bfad7c2dSJaegeuk Kim 	return err;
1232eb47b800SJaegeuk Kim }
1233eb47b800SJaegeuk Kim 
1234b91050a8SHyunchul Lee bool f2fs_overwrite_io(struct inode *inode, loff_t pos, size_t len)
1235b91050a8SHyunchul Lee {
1236b91050a8SHyunchul Lee 	struct f2fs_map_blocks map;
1237b91050a8SHyunchul Lee 	block_t last_lblk;
1238b91050a8SHyunchul Lee 	int err;
1239b91050a8SHyunchul Lee 
1240b91050a8SHyunchul Lee 	if (pos + len > i_size_read(inode))
1241b91050a8SHyunchul Lee 		return false;
1242b91050a8SHyunchul Lee 
1243b91050a8SHyunchul Lee 	map.m_lblk = F2FS_BYTES_TO_BLK(pos);
1244b91050a8SHyunchul Lee 	map.m_next_pgofs = NULL;
1245b91050a8SHyunchul Lee 	map.m_next_extent = NULL;
1246b91050a8SHyunchul Lee 	map.m_seg_type = NO_CHECK_TYPE;
1247b91050a8SHyunchul Lee 	last_lblk = F2FS_BLK_ALIGN(pos + len);
1248b91050a8SHyunchul Lee 
1249b91050a8SHyunchul Lee 	while (map.m_lblk < last_lblk) {
1250b91050a8SHyunchul Lee 		map.m_len = last_lblk - map.m_lblk;
1251b91050a8SHyunchul Lee 		err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT);
1252b91050a8SHyunchul Lee 		if (err || map.m_len == 0)
1253b91050a8SHyunchul Lee 			return false;
1254b91050a8SHyunchul Lee 		map.m_lblk += map.m_len;
1255b91050a8SHyunchul Lee 	}
1256b91050a8SHyunchul Lee 	return true;
1257b91050a8SHyunchul Lee }
1258b91050a8SHyunchul Lee 
1259003a3e1dSJaegeuk Kim static int __get_data_block(struct inode *inode, sector_t iblock,
1260da85985cSChao Yu 			struct buffer_head *bh, int create, int flag,
1261f9d6d059SChao Yu 			pgoff_t *next_pgofs, int seg_type, bool may_write)
1262003a3e1dSJaegeuk Kim {
1263003a3e1dSJaegeuk Kim 	struct f2fs_map_blocks map;
1264a7de6086SJaegeuk Kim 	int err;
1265003a3e1dSJaegeuk Kim 
1266003a3e1dSJaegeuk Kim 	map.m_lblk = iblock;
1267003a3e1dSJaegeuk Kim 	map.m_len = bh->b_size >> inode->i_blkbits;
1268da85985cSChao Yu 	map.m_next_pgofs = next_pgofs;
1269c4020b2dSChao Yu 	map.m_next_extent = NULL;
1270d5097be5SHyunchul Lee 	map.m_seg_type = seg_type;
1271f9d6d059SChao Yu 	map.m_may_create = may_write;
1272003a3e1dSJaegeuk Kim 
1273a7de6086SJaegeuk Kim 	err = f2fs_map_blocks(inode, &map, create, flag);
1274a7de6086SJaegeuk Kim 	if (!err) {
1275003a3e1dSJaegeuk Kim 		map_bh(bh, inode->i_sb, map.m_pblk);
1276003a3e1dSJaegeuk Kim 		bh->b_state = (bh->b_state & ~F2FS_MAP_FLAGS) | map.m_flags;
1277b86e3307SWei Fang 		bh->b_size = (u64)map.m_len << inode->i_blkbits;
1278003a3e1dSJaegeuk Kim 	}
1279a7de6086SJaegeuk Kim 	return err;
1280003a3e1dSJaegeuk Kim }
1281003a3e1dSJaegeuk Kim 
1282ccfb3000SJaegeuk Kim static int get_data_block(struct inode *inode, sector_t iblock,
1283da85985cSChao Yu 			struct buffer_head *bh_result, int create, int flag,
1284da85985cSChao Yu 			pgoff_t *next_pgofs)
1285ccfb3000SJaegeuk Kim {
1286da85985cSChao Yu 	return __get_data_block(inode, iblock, bh_result, create,
1287d5097be5SHyunchul Lee 							flag, next_pgofs,
1288f9d6d059SChao Yu 							NO_CHECK_TYPE, create);
1289f9d6d059SChao Yu }
1290f9d6d059SChao Yu 
1291f9d6d059SChao Yu static int get_data_block_dio_write(struct inode *inode, sector_t iblock,
1292f9d6d059SChao Yu 			struct buffer_head *bh_result, int create)
1293f9d6d059SChao Yu {
1294f9d6d059SChao Yu 	return __get_data_block(inode, iblock, bh_result, create,
1295f9d6d059SChao Yu 				F2FS_GET_BLOCK_DIO, NULL,
1296f9d6d059SChao Yu 				f2fs_rw_hint_to_seg_type(inode->i_write_hint),
1297f9d6d059SChao Yu 				true);
1298ccfb3000SJaegeuk Kim }
1299ccfb3000SJaegeuk Kim 
1300e2b4e2bcSChao Yu static int get_data_block_dio(struct inode *inode, sector_t iblock,
1301ccfb3000SJaegeuk Kim 			struct buffer_head *bh_result, int create)
1302ccfb3000SJaegeuk Kim {
1303e2b4e2bcSChao Yu 	return __get_data_block(inode, iblock, bh_result, create,
13040a4daae5SJaegeuk Kim 				F2FS_GET_BLOCK_DIO, NULL,
1305f9d6d059SChao Yu 				f2fs_rw_hint_to_seg_type(inode->i_write_hint),
1306f9d6d059SChao Yu 				false);
1307e2b4e2bcSChao Yu }
1308e2b4e2bcSChao Yu 
1309e2b4e2bcSChao Yu static int get_data_block_bmap(struct inode *inode, sector_t iblock,
1310e2b4e2bcSChao Yu 			struct buffer_head *bh_result, int create)
1311e2b4e2bcSChao Yu {
1312179448bfSYunlei He 	/* Block number less than F2FS MAX BLOCKS */
1313e0afc4d6SChao Yu 	if (unlikely(iblock >= F2FS_I_SB(inode)->max_file_blocks))
1314179448bfSYunlei He 		return -EFBIG;
1315179448bfSYunlei He 
1316e2b4e2bcSChao Yu 	return __get_data_block(inode, iblock, bh_result, create,
1317d5097be5SHyunchul Lee 						F2FS_GET_BLOCK_BMAP, NULL,
1318f9d6d059SChao Yu 						NO_CHECK_TYPE, create);
1319ccfb3000SJaegeuk Kim }
1320ccfb3000SJaegeuk Kim 
13217f63eb77SJaegeuk Kim static inline sector_t logical_to_blk(struct inode *inode, loff_t offset)
13227f63eb77SJaegeuk Kim {
13237f63eb77SJaegeuk Kim 	return (offset >> inode->i_blkbits);
13247f63eb77SJaegeuk Kim }
13257f63eb77SJaegeuk Kim 
13267f63eb77SJaegeuk Kim static inline loff_t blk_to_logical(struct inode *inode, sector_t blk)
13277f63eb77SJaegeuk Kim {
13287f63eb77SJaegeuk Kim 	return (blk << inode->i_blkbits);
13297f63eb77SJaegeuk Kim }
13307f63eb77SJaegeuk Kim 
1331442a9dbdSChao Yu static int f2fs_xattr_fiemap(struct inode *inode,
1332442a9dbdSChao Yu 				struct fiemap_extent_info *fieinfo)
1333442a9dbdSChao Yu {
1334442a9dbdSChao Yu 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1335442a9dbdSChao Yu 	struct page *page;
1336442a9dbdSChao Yu 	struct node_info ni;
1337442a9dbdSChao Yu 	__u64 phys = 0, len;
1338442a9dbdSChao Yu 	__u32 flags;
1339442a9dbdSChao Yu 	nid_t xnid = F2FS_I(inode)->i_xattr_nid;
1340442a9dbdSChao Yu 	int err = 0;
1341442a9dbdSChao Yu 
1342442a9dbdSChao Yu 	if (f2fs_has_inline_xattr(inode)) {
1343442a9dbdSChao Yu 		int offset;
1344442a9dbdSChao Yu 
1345442a9dbdSChao Yu 		page = f2fs_grab_cache_page(NODE_MAPPING(sbi),
1346442a9dbdSChao Yu 						inode->i_ino, false);
1347442a9dbdSChao Yu 		if (!page)
1348442a9dbdSChao Yu 			return -ENOMEM;
1349442a9dbdSChao Yu 
13507735730dSChao Yu 		err = f2fs_get_node_info(sbi, inode->i_ino, &ni);
13517735730dSChao Yu 		if (err) {
13527735730dSChao Yu 			f2fs_put_page(page, 1);
13537735730dSChao Yu 			return err;
13547735730dSChao Yu 		}
1355442a9dbdSChao Yu 
1356442a9dbdSChao Yu 		phys = (__u64)blk_to_logical(inode, ni.blk_addr);
1357442a9dbdSChao Yu 		offset = offsetof(struct f2fs_inode, i_addr) +
1358442a9dbdSChao Yu 					sizeof(__le32) * (DEF_ADDRS_PER_INODE -
1359b323fd28SChao Yu 					get_inline_xattr_addrs(inode));
1360442a9dbdSChao Yu 
1361442a9dbdSChao Yu 		phys += offset;
1362442a9dbdSChao Yu 		len = inline_xattr_size(inode);
1363442a9dbdSChao Yu 
1364442a9dbdSChao Yu 		f2fs_put_page(page, 1);
1365442a9dbdSChao Yu 
1366442a9dbdSChao Yu 		flags = FIEMAP_EXTENT_DATA_INLINE | FIEMAP_EXTENT_NOT_ALIGNED;
1367442a9dbdSChao Yu 
1368442a9dbdSChao Yu 		if (!xnid)
1369442a9dbdSChao Yu 			flags |= FIEMAP_EXTENT_LAST;
1370442a9dbdSChao Yu 
1371442a9dbdSChao Yu 		err = fiemap_fill_next_extent(fieinfo, 0, phys, len, flags);
1372442a9dbdSChao Yu 		if (err || err == 1)
1373442a9dbdSChao Yu 			return err;
1374442a9dbdSChao Yu 	}
1375442a9dbdSChao Yu 
1376442a9dbdSChao Yu 	if (xnid) {
1377442a9dbdSChao Yu 		page = f2fs_grab_cache_page(NODE_MAPPING(sbi), xnid, false);
1378442a9dbdSChao Yu 		if (!page)
1379442a9dbdSChao Yu 			return -ENOMEM;
1380442a9dbdSChao Yu 
13817735730dSChao Yu 		err = f2fs_get_node_info(sbi, xnid, &ni);
13827735730dSChao Yu 		if (err) {
13837735730dSChao Yu 			f2fs_put_page(page, 1);
13847735730dSChao Yu 			return err;
13857735730dSChao Yu 		}
1386442a9dbdSChao Yu 
1387442a9dbdSChao Yu 		phys = (__u64)blk_to_logical(inode, ni.blk_addr);
1388442a9dbdSChao Yu 		len = inode->i_sb->s_blocksize;
1389442a9dbdSChao Yu 
1390442a9dbdSChao Yu 		f2fs_put_page(page, 1);
1391442a9dbdSChao Yu 
1392442a9dbdSChao Yu 		flags = FIEMAP_EXTENT_LAST;
1393442a9dbdSChao Yu 	}
1394442a9dbdSChao Yu 
1395442a9dbdSChao Yu 	if (phys)
1396442a9dbdSChao Yu 		err = fiemap_fill_next_extent(fieinfo, 0, phys, len, flags);
1397442a9dbdSChao Yu 
1398442a9dbdSChao Yu 	return (err < 0 ? err : 0);
1399442a9dbdSChao Yu }
1400442a9dbdSChao Yu 
14019ab70134SJaegeuk Kim int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
14029ab70134SJaegeuk Kim 		u64 start, u64 len)
14039ab70134SJaegeuk Kim {
14047f63eb77SJaegeuk Kim 	struct buffer_head map_bh;
14057f63eb77SJaegeuk Kim 	sector_t start_blk, last_blk;
1406da85985cSChao Yu 	pgoff_t next_pgofs;
14077f63eb77SJaegeuk Kim 	u64 logical = 0, phys = 0, size = 0;
14087f63eb77SJaegeuk Kim 	u32 flags = 0;
14097f63eb77SJaegeuk Kim 	int ret = 0;
14107f63eb77SJaegeuk Kim 
1411c4020b2dSChao Yu 	if (fieinfo->fi_flags & FIEMAP_FLAG_CACHE) {
1412c4020b2dSChao Yu 		ret = f2fs_precache_extents(inode);
14137f63eb77SJaegeuk Kim 		if (ret)
14147f63eb77SJaegeuk Kim 			return ret;
1415c4020b2dSChao Yu 	}
1416c4020b2dSChao Yu 
1417442a9dbdSChao Yu 	ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC | FIEMAP_FLAG_XATTR);
14187f63eb77SJaegeuk Kim 	if (ret)
14197f63eb77SJaegeuk Kim 		return ret;
14207f63eb77SJaegeuk Kim 
1421f1b43d4cSChao Yu 	inode_lock(inode);
1422f1b43d4cSChao Yu 
1423442a9dbdSChao Yu 	if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) {
1424442a9dbdSChao Yu 		ret = f2fs_xattr_fiemap(inode, fieinfo);
1425442a9dbdSChao Yu 		goto out;
1426442a9dbdSChao Yu 	}
14277f63eb77SJaegeuk Kim 
142867f8cf3cSJaegeuk Kim 	if (f2fs_has_inline_data(inode)) {
142967f8cf3cSJaegeuk Kim 		ret = f2fs_inline_data_fiemap(inode, fieinfo, start, len);
143067f8cf3cSJaegeuk Kim 		if (ret != -EAGAIN)
1431f1b43d4cSChao Yu 			goto out;
143267f8cf3cSJaegeuk Kim 	}
143367f8cf3cSJaegeuk Kim 
14347f63eb77SJaegeuk Kim 	if (logical_to_blk(inode, len) == 0)
14357f63eb77SJaegeuk Kim 		len = blk_to_logical(inode, 1);
14367f63eb77SJaegeuk Kim 
14377f63eb77SJaegeuk Kim 	start_blk = logical_to_blk(inode, start);
14387f63eb77SJaegeuk Kim 	last_blk = logical_to_blk(inode, start + len - 1);
14399a950d52SFan Li 
14407f63eb77SJaegeuk Kim next:
14417f63eb77SJaegeuk Kim 	memset(&map_bh, 0, sizeof(struct buffer_head));
14427f63eb77SJaegeuk Kim 	map_bh.b_size = len;
14437f63eb77SJaegeuk Kim 
1444e2b4e2bcSChao Yu 	ret = get_data_block(inode, start_blk, &map_bh, 0,
1445da85985cSChao Yu 					F2FS_GET_BLOCK_FIEMAP, &next_pgofs);
14467f63eb77SJaegeuk Kim 	if (ret)
14477f63eb77SJaegeuk Kim 		goto out;
14487f63eb77SJaegeuk Kim 
14497f63eb77SJaegeuk Kim 	/* HOLE */
14507f63eb77SJaegeuk Kim 	if (!buffer_mapped(&map_bh)) {
1451da85985cSChao Yu 		start_blk = next_pgofs;
145258736fa6SChao Yu 
145358736fa6SChao Yu 		if (blk_to_logical(inode, start_blk) < blk_to_logical(inode,
145458736fa6SChao Yu 					F2FS_I_SB(inode)->max_file_blocks))
14559a950d52SFan Li 			goto prep_next;
145658736fa6SChao Yu 
14579a950d52SFan Li 		flags |= FIEMAP_EXTENT_LAST;
14589a950d52SFan Li 	}
14599a950d52SFan Li 
1460da5af127SChao Yu 	if (size) {
1461da5af127SChao Yu 		if (f2fs_encrypted_inode(inode))
1462da5af127SChao Yu 			flags |= FIEMAP_EXTENT_DATA_ENCRYPTED;
1463da5af127SChao Yu 
14647f63eb77SJaegeuk Kim 		ret = fiemap_fill_next_extent(fieinfo, logical,
14657f63eb77SJaegeuk Kim 				phys, size, flags);
1466da5af127SChao Yu 	}
14679a950d52SFan Li 
14689a950d52SFan Li 	if (start_blk > last_blk || ret)
14697f63eb77SJaegeuk Kim 		goto out;
14707f63eb77SJaegeuk Kim 
14717f63eb77SJaegeuk Kim 	logical = blk_to_logical(inode, start_blk);
14727f63eb77SJaegeuk Kim 	phys = blk_to_logical(inode, map_bh.b_blocknr);
14737f63eb77SJaegeuk Kim 	size = map_bh.b_size;
14747f63eb77SJaegeuk Kim 	flags = 0;
14757f63eb77SJaegeuk Kim 	if (buffer_unwritten(&map_bh))
14767f63eb77SJaegeuk Kim 		flags = FIEMAP_EXTENT_UNWRITTEN;
14777f63eb77SJaegeuk Kim 
14787f63eb77SJaegeuk Kim 	start_blk += logical_to_blk(inode, size);
14797f63eb77SJaegeuk Kim 
14809a950d52SFan Li prep_next:
14817f63eb77SJaegeuk Kim 	cond_resched();
14827f63eb77SJaegeuk Kim 	if (fatal_signal_pending(current))
14837f63eb77SJaegeuk Kim 		ret = -EINTR;
14847f63eb77SJaegeuk Kim 	else
14857f63eb77SJaegeuk Kim 		goto next;
14867f63eb77SJaegeuk Kim out:
14877f63eb77SJaegeuk Kim 	if (ret == 1)
14887f63eb77SJaegeuk Kim 		ret = 0;
14897f63eb77SJaegeuk Kim 
14905955102cSAl Viro 	inode_unlock(inode);
14917f63eb77SJaegeuk Kim 	return ret;
14929ab70134SJaegeuk Kim }
14939ab70134SJaegeuk Kim 
1494f1e88660SJaegeuk Kim /*
1495f1e88660SJaegeuk Kim  * This function was originally taken from fs/mpage.c, and customized for f2fs.
1496f1e88660SJaegeuk Kim  * Major change was from block_size == page_size in f2fs by default.
1497e2e59414SJaegeuk Kim  *
1498e2e59414SJaegeuk Kim  * Note that the aops->readpages() function is ONLY used for read-ahead. If
1499e2e59414SJaegeuk Kim  * this function ever deviates from doing just read-ahead, it should either
1500e2e59414SJaegeuk Kim  * use ->readpage() or do the necessary surgery to decouple ->readpages()
1501e2e59414SJaegeuk Kim  * from read-ahead.
1502f1e88660SJaegeuk Kim  */
1503f1e88660SJaegeuk Kim static int f2fs_mpage_readpages(struct address_space *mapping,
1504f1e88660SJaegeuk Kim 			struct list_head *pages, struct page *page,
1505e2e59414SJaegeuk Kim 			unsigned nr_pages, bool is_readahead)
1506f1e88660SJaegeuk Kim {
1507f1e88660SJaegeuk Kim 	struct bio *bio = NULL;
1508f1e88660SJaegeuk Kim 	sector_t last_block_in_bio = 0;
1509f1e88660SJaegeuk Kim 	struct inode *inode = mapping->host;
1510f1e88660SJaegeuk Kim 	const unsigned blkbits = inode->i_blkbits;
1511f1e88660SJaegeuk Kim 	const unsigned blocksize = 1 << blkbits;
1512f1e88660SJaegeuk Kim 	sector_t block_in_file;
1513f1e88660SJaegeuk Kim 	sector_t last_block;
1514f1e88660SJaegeuk Kim 	sector_t last_block_in_file;
1515f1e88660SJaegeuk Kim 	sector_t block_nr;
1516f1e88660SJaegeuk Kim 	struct f2fs_map_blocks map;
1517f1e88660SJaegeuk Kim 
1518f1e88660SJaegeuk Kim 	map.m_pblk = 0;
1519f1e88660SJaegeuk Kim 	map.m_lblk = 0;
1520f1e88660SJaegeuk Kim 	map.m_len = 0;
1521f1e88660SJaegeuk Kim 	map.m_flags = 0;
1522da85985cSChao Yu 	map.m_next_pgofs = NULL;
1523c4020b2dSChao Yu 	map.m_next_extent = NULL;
1524d5097be5SHyunchul Lee 	map.m_seg_type = NO_CHECK_TYPE;
1525f9d6d059SChao Yu 	map.m_may_create = false;
1526f1e88660SJaegeuk Kim 
1527736c0a74SLiFan 	for (; nr_pages; nr_pages--) {
1528f1e88660SJaegeuk Kim 		if (pages) {
1529939afa94SChao Yu 			page = list_last_entry(pages, struct page, lru);
1530a83d50bcSKinglong Mee 
1531a83d50bcSKinglong Mee 			prefetchw(&page->flags);
1532f1e88660SJaegeuk Kim 			list_del(&page->lru);
1533f1e88660SJaegeuk Kim 			if (add_to_page_cache_lru(page, mapping,
15348a5c743eSMichal Hocko 						  page->index,
15358a5c743eSMichal Hocko 						  readahead_gfp_mask(mapping)))
1536f1e88660SJaegeuk Kim 				goto next_page;
1537f1e88660SJaegeuk Kim 		}
1538f1e88660SJaegeuk Kim 
1539f1e88660SJaegeuk Kim 		block_in_file = (sector_t)page->index;
1540f1e88660SJaegeuk Kim 		last_block = block_in_file + nr_pages;
1541f1e88660SJaegeuk Kim 		last_block_in_file = (i_size_read(inode) + blocksize - 1) >>
1542f1e88660SJaegeuk Kim 								blkbits;
1543f1e88660SJaegeuk Kim 		if (last_block > last_block_in_file)
1544f1e88660SJaegeuk Kim 			last_block = last_block_in_file;
1545f1e88660SJaegeuk Kim 
1546f1e88660SJaegeuk Kim 		/*
1547f1e88660SJaegeuk Kim 		 * Map blocks using the previous result first.
1548f1e88660SJaegeuk Kim 		 */
1549f1e88660SJaegeuk Kim 		if ((map.m_flags & F2FS_MAP_MAPPED) &&
1550f1e88660SJaegeuk Kim 				block_in_file > map.m_lblk &&
1551f1e88660SJaegeuk Kim 				block_in_file < (map.m_lblk + map.m_len))
1552f1e88660SJaegeuk Kim 			goto got_it;
1553f1e88660SJaegeuk Kim 
1554f1e88660SJaegeuk Kim 		/*
1555f1e88660SJaegeuk Kim 		 * Then do more f2fs_map_blocks() calls until we are
1556f1e88660SJaegeuk Kim 		 * done with this page.
1557f1e88660SJaegeuk Kim 		 */
1558f1e88660SJaegeuk Kim 		map.m_flags = 0;
1559f1e88660SJaegeuk Kim 
1560f1e88660SJaegeuk Kim 		if (block_in_file < last_block) {
1561f1e88660SJaegeuk Kim 			map.m_lblk = block_in_file;
1562f1e88660SJaegeuk Kim 			map.m_len = last_block - block_in_file;
1563f1e88660SJaegeuk Kim 
156446c9e141SChao Yu 			if (f2fs_map_blocks(inode, &map, 0,
1565f2220c7fSQiuyang Sun 						F2FS_GET_BLOCK_DEFAULT))
1566f1e88660SJaegeuk Kim 				goto set_error_page;
1567f1e88660SJaegeuk Kim 		}
1568f1e88660SJaegeuk Kim got_it:
1569f1e88660SJaegeuk Kim 		if ((map.m_flags & F2FS_MAP_MAPPED)) {
1570f1e88660SJaegeuk Kim 			block_nr = map.m_pblk + block_in_file - map.m_lblk;
1571f1e88660SJaegeuk Kim 			SetPageMappedToDisk(page);
1572f1e88660SJaegeuk Kim 
1573f1e88660SJaegeuk Kim 			if (!PageUptodate(page) && !cleancache_get_page(page)) {
1574f1e88660SJaegeuk Kim 				SetPageUptodate(page);
1575f1e88660SJaegeuk Kim 				goto confused;
1576f1e88660SJaegeuk Kim 			}
1577c9b60788SChao Yu 
1578c9b60788SChao Yu 			if (!f2fs_is_valid_blkaddr(F2FS_I_SB(inode), block_nr,
1579c9b60788SChao Yu 								DATA_GENERIC))
1580c9b60788SChao Yu 				goto set_error_page;
1581f1e88660SJaegeuk Kim 		} else {
158209cbfeafSKirill A. Shutemov 			zero_user_segment(page, 0, PAGE_SIZE);
1583237c0790SJaegeuk Kim 			if (!PageUptodate(page))
1584f1e88660SJaegeuk Kim 				SetPageUptodate(page);
1585f1e88660SJaegeuk Kim 			unlock_page(page);
1586f1e88660SJaegeuk Kim 			goto next_page;
1587f1e88660SJaegeuk Kim 		}
1588f1e88660SJaegeuk Kim 
1589f1e88660SJaegeuk Kim 		/*
1590f1e88660SJaegeuk Kim 		 * This page will go to BIO.  Do we need to send this
1591f1e88660SJaegeuk Kim 		 * BIO off first?
1592f1e88660SJaegeuk Kim 		 */
15933c62be17SJaegeuk Kim 		if (bio && (last_block_in_bio != block_nr - 1 ||
15943c62be17SJaegeuk Kim 			!__same_bdev(F2FS_I_SB(inode), block_nr, bio))) {
1595f1e88660SJaegeuk Kim submit_and_realloc:
15964fc29c1aSLinus Torvalds 			__submit_bio(F2FS_I_SB(inode), bio, DATA);
1597f1e88660SJaegeuk Kim 			bio = NULL;
1598f1e88660SJaegeuk Kim 		}
1599f1e88660SJaegeuk Kim 		if (bio == NULL) {
1600e2e59414SJaegeuk Kim 			bio = f2fs_grab_read_bio(inode, block_nr, nr_pages,
1601e2e59414SJaegeuk Kim 					is_readahead ? REQ_RAHEAD : 0);
16021d353eb7SJaegeuk Kim 			if (IS_ERR(bio)) {
16031d353eb7SJaegeuk Kim 				bio = NULL;
1604f1e88660SJaegeuk Kim 				goto set_error_page;
16054375a336SJaegeuk Kim 			}
1606f1e88660SJaegeuk Kim 		}
1607f1e88660SJaegeuk Kim 
16080ded69f6SJaegeuk Kim 		/*
16090ded69f6SJaegeuk Kim 		 * If the page is under writeback, we need to wait for
16100ded69f6SJaegeuk Kim 		 * its completion to see the correct decrypted data.
16110ded69f6SJaegeuk Kim 		 */
16120ded69f6SJaegeuk Kim 		f2fs_wait_on_block_writeback(inode, block_nr);
16130ded69f6SJaegeuk Kim 
1614f1e88660SJaegeuk Kim 		if (bio_add_page(bio, page, blocksize, 0) < blocksize)
1615f1e88660SJaegeuk Kim 			goto submit_and_realloc;
1616f1e88660SJaegeuk Kim 
16175f9abab4SJaegeuk Kim 		inc_page_count(F2FS_I_SB(inode), F2FS_RD_DATA);
1618fb7d70dbSJaegeuk Kim 		ClearPageError(page);
1619f1e88660SJaegeuk Kim 		last_block_in_bio = block_nr;
1620f1e88660SJaegeuk Kim 		goto next_page;
1621f1e88660SJaegeuk Kim set_error_page:
1622f1e88660SJaegeuk Kim 		SetPageError(page);
162309cbfeafSKirill A. Shutemov 		zero_user_segment(page, 0, PAGE_SIZE);
1624f1e88660SJaegeuk Kim 		unlock_page(page);
1625f1e88660SJaegeuk Kim 		goto next_page;
1626f1e88660SJaegeuk Kim confused:
1627f1e88660SJaegeuk Kim 		if (bio) {
16284fc29c1aSLinus Torvalds 			__submit_bio(F2FS_I_SB(inode), bio, DATA);
1629f1e88660SJaegeuk Kim 			bio = NULL;
1630f1e88660SJaegeuk Kim 		}
1631f1e88660SJaegeuk Kim 		unlock_page(page);
1632f1e88660SJaegeuk Kim next_page:
1633f1e88660SJaegeuk Kim 		if (pages)
163409cbfeafSKirill A. Shutemov 			put_page(page);
1635f1e88660SJaegeuk Kim 	}
1636f1e88660SJaegeuk Kim 	BUG_ON(pages && !list_empty(pages));
1637f1e88660SJaegeuk Kim 	if (bio)
16384fc29c1aSLinus Torvalds 		__submit_bio(F2FS_I_SB(inode), bio, DATA);
1639f1e88660SJaegeuk Kim 	return 0;
1640f1e88660SJaegeuk Kim }
1641f1e88660SJaegeuk Kim 
1642eb47b800SJaegeuk Kim static int f2fs_read_data_page(struct file *file, struct page *page)
1643eb47b800SJaegeuk Kim {
16449ffe0fb5SHuajun Li 	struct inode *inode = page->mapping->host;
1645b3d208f9SJaegeuk Kim 	int ret = -EAGAIN;
16469ffe0fb5SHuajun Li 
1647c20e89cdSChao Yu 	trace_f2fs_readpage(page, DATA);
1648c20e89cdSChao Yu 
1649e1c42045Sarter97 	/* If the file has inline data, try to read it directly */
16509ffe0fb5SHuajun Li 	if (f2fs_has_inline_data(inode))
16519ffe0fb5SHuajun Li 		ret = f2fs_read_inline_data(inode, page);
1652b3d208f9SJaegeuk Kim 	if (ret == -EAGAIN)
1653e2e59414SJaegeuk Kim 		ret = f2fs_mpage_readpages(page->mapping, NULL, page, 1, false);
16549ffe0fb5SHuajun Li 	return ret;
1655eb47b800SJaegeuk Kim }
1656eb47b800SJaegeuk Kim 
1657eb47b800SJaegeuk Kim static int f2fs_read_data_pages(struct file *file,
1658eb47b800SJaegeuk Kim 			struct address_space *mapping,
1659eb47b800SJaegeuk Kim 			struct list_head *pages, unsigned nr_pages)
1660eb47b800SJaegeuk Kim {
166171cb4affSHsiang Kao 	struct inode *inode = mapping->host;
1662939afa94SChao Yu 	struct page *page = list_last_entry(pages, struct page, lru);
1663b8c29400SChao Yu 
1664b8c29400SChao Yu 	trace_f2fs_readpages(inode, page, nr_pages);
16659ffe0fb5SHuajun Li 
16669ffe0fb5SHuajun Li 	/* If the file has inline data, skip readpages */
16679ffe0fb5SHuajun Li 	if (f2fs_has_inline_data(inode))
16689ffe0fb5SHuajun Li 		return 0;
16699ffe0fb5SHuajun Li 
1670e2e59414SJaegeuk Kim 	return f2fs_mpage_readpages(mapping, pages, NULL, nr_pages, true);
1671eb47b800SJaegeuk Kim }
1672eb47b800SJaegeuk Kim 
16737eab0c0dSHou Pengyang static int encrypt_one_page(struct f2fs_io_info *fio)
16747eab0c0dSHou Pengyang {
16757eab0c0dSHou Pengyang 	struct inode *inode = fio->page->mapping->host;
16766aa58d8aSChao Yu 	struct page *mpage;
16777eab0c0dSHou Pengyang 	gfp_t gfp_flags = GFP_NOFS;
16787eab0c0dSHou Pengyang 
16791958593eSJaegeuk Kim 	if (!f2fs_encrypted_file(inode))
16807eab0c0dSHou Pengyang 		return 0;
16817eab0c0dSHou Pengyang 
16826dbb1796SEric Biggers 	/* wait for GCed page writeback via META_MAPPING */
16830ded69f6SJaegeuk Kim 	f2fs_wait_on_block_writeback(inode, fio->old_blkaddr);
16847eab0c0dSHou Pengyang 
16857eab0c0dSHou Pengyang retry_encrypt:
16867eab0c0dSHou Pengyang 	fio->encrypted_page = fscrypt_encrypt_page(inode, fio->page,
16877eab0c0dSHou Pengyang 			PAGE_SIZE, 0, fio->page->index, gfp_flags);
16886aa58d8aSChao Yu 	if (IS_ERR(fio->encrypted_page)) {
16897eab0c0dSHou Pengyang 		/* flush pending IOs and wait for a while in the ENOMEM case */
16907eab0c0dSHou Pengyang 		if (PTR_ERR(fio->encrypted_page) == -ENOMEM) {
1691b9109b0eSJaegeuk Kim 			f2fs_flush_merged_writes(fio->sbi);
16927eab0c0dSHou Pengyang 			congestion_wait(BLK_RW_ASYNC, HZ/50);
16937eab0c0dSHou Pengyang 			gfp_flags |= __GFP_NOFAIL;
16947eab0c0dSHou Pengyang 			goto retry_encrypt;
16957eab0c0dSHou Pengyang 		}
16967eab0c0dSHou Pengyang 		return PTR_ERR(fio->encrypted_page);
16977eab0c0dSHou Pengyang 	}
16987eab0c0dSHou Pengyang 
16996aa58d8aSChao Yu 	mpage = find_lock_page(META_MAPPING(fio->sbi), fio->old_blkaddr);
17006aa58d8aSChao Yu 	if (mpage) {
17016aa58d8aSChao Yu 		if (PageUptodate(mpage))
17026aa58d8aSChao Yu 			memcpy(page_address(mpage),
17036aa58d8aSChao Yu 				page_address(fio->encrypted_page), PAGE_SIZE);
17046aa58d8aSChao Yu 		f2fs_put_page(mpage, 1);
17056aa58d8aSChao Yu 	}
17066aa58d8aSChao Yu 	return 0;
17076aa58d8aSChao Yu }
17086aa58d8aSChao Yu 
1709bb9e3bb8SChao Yu static inline bool check_inplace_update_policy(struct inode *inode,
1710bb9e3bb8SChao Yu 				struct f2fs_io_info *fio)
1711bb9e3bb8SChao Yu {
1712bb9e3bb8SChao Yu 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1713bb9e3bb8SChao Yu 	unsigned int policy = SM_I(sbi)->ipu_policy;
1714bb9e3bb8SChao Yu 
1715bb9e3bb8SChao Yu 	if (policy & (0x1 << F2FS_IPU_FORCE))
1716bb9e3bb8SChao Yu 		return true;
17174d57b86dSChao Yu 	if (policy & (0x1 << F2FS_IPU_SSR) && f2fs_need_SSR(sbi))
1718bb9e3bb8SChao Yu 		return true;
1719bb9e3bb8SChao Yu 	if (policy & (0x1 << F2FS_IPU_UTIL) &&
1720bb9e3bb8SChao Yu 			utilization(sbi) > SM_I(sbi)->min_ipu_util)
1721bb9e3bb8SChao Yu 		return true;
17224d57b86dSChao Yu 	if (policy & (0x1 << F2FS_IPU_SSR_UTIL) && f2fs_need_SSR(sbi) &&
1723bb9e3bb8SChao Yu 			utilization(sbi) > SM_I(sbi)->min_ipu_util)
1724bb9e3bb8SChao Yu 		return true;
1725bb9e3bb8SChao Yu 
1726bb9e3bb8SChao Yu 	/*
1727bb9e3bb8SChao Yu 	 * IPU for rewrite async pages
1728bb9e3bb8SChao Yu 	 */
1729bb9e3bb8SChao Yu 	if (policy & (0x1 << F2FS_IPU_ASYNC) &&
1730bb9e3bb8SChao Yu 			fio && fio->op == REQ_OP_WRITE &&
1731bb9e3bb8SChao Yu 			!(fio->op_flags & REQ_SYNC) &&
1732bb9e3bb8SChao Yu 			!f2fs_encrypted_inode(inode))
1733bb9e3bb8SChao Yu 		return true;
1734bb9e3bb8SChao Yu 
1735bb9e3bb8SChao Yu 	/* this is only set during fdatasync */
1736bb9e3bb8SChao Yu 	if (policy & (0x1 << F2FS_IPU_FSYNC) &&
1737bb9e3bb8SChao Yu 			is_inode_flag_set(inode, FI_NEED_IPU))
1738bb9e3bb8SChao Yu 		return true;
1739bb9e3bb8SChao Yu 
17404354994fSDaniel Rosenberg 	if (unlikely(fio && is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
17414354994fSDaniel Rosenberg 			!f2fs_is_checkpointed_data(sbi, fio->old_blkaddr)))
17424354994fSDaniel Rosenberg 		return true;
17434354994fSDaniel Rosenberg 
1744bb9e3bb8SChao Yu 	return false;
1745bb9e3bb8SChao Yu }
1746bb9e3bb8SChao Yu 
17474d57b86dSChao Yu bool f2fs_should_update_inplace(struct inode *inode, struct f2fs_io_info *fio)
1748bb9e3bb8SChao Yu {
1749bb9e3bb8SChao Yu 	if (f2fs_is_pinned_file(inode))
1750bb9e3bb8SChao Yu 		return true;
1751bb9e3bb8SChao Yu 
1752bb9e3bb8SChao Yu 	/* if this is cold file, we should overwrite to avoid fragmentation */
1753bb9e3bb8SChao Yu 	if (file_is_cold(inode))
1754bb9e3bb8SChao Yu 		return true;
1755bb9e3bb8SChao Yu 
1756bb9e3bb8SChao Yu 	return check_inplace_update_policy(inode, fio);
1757bb9e3bb8SChao Yu }
1758bb9e3bb8SChao Yu 
17594d57b86dSChao Yu bool f2fs_should_update_outplace(struct inode *inode, struct f2fs_io_info *fio)
1760bb9e3bb8SChao Yu {
1761bb9e3bb8SChao Yu 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1762bb9e3bb8SChao Yu 
1763bb9e3bb8SChao Yu 	if (test_opt(sbi, LFS))
1764bb9e3bb8SChao Yu 		return true;
1765bb9e3bb8SChao Yu 	if (S_ISDIR(inode->i_mode))
1766bb9e3bb8SChao Yu 		return true;
1767af033b2aSChao Yu 	if (IS_NOQUOTA(inode))
1768af033b2aSChao Yu 		return true;
1769bb9e3bb8SChao Yu 	if (f2fs_is_atomic_file(inode))
1770bb9e3bb8SChao Yu 		return true;
1771bb9e3bb8SChao Yu 	if (fio) {
1772bb9e3bb8SChao Yu 		if (is_cold_data(fio->page))
1773bb9e3bb8SChao Yu 			return true;
1774bb9e3bb8SChao Yu 		if (IS_ATOMIC_WRITTEN_PAGE(fio->page))
1775bb9e3bb8SChao Yu 			return true;
17764354994fSDaniel Rosenberg 		if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
17774354994fSDaniel Rosenberg 			f2fs_is_checkpointed_data(sbi, fio->old_blkaddr)))
17784354994fSDaniel Rosenberg 			return true;
1779bb9e3bb8SChao Yu 	}
1780bb9e3bb8SChao Yu 	return false;
1781bb9e3bb8SChao Yu }
1782bb9e3bb8SChao Yu 
17837eab0c0dSHou Pengyang static inline bool need_inplace_update(struct f2fs_io_info *fio)
17847eab0c0dSHou Pengyang {
17857eab0c0dSHou Pengyang 	struct inode *inode = fio->page->mapping->host;
17867eab0c0dSHou Pengyang 
17874d57b86dSChao Yu 	if (f2fs_should_update_outplace(inode, fio))
17887eab0c0dSHou Pengyang 		return false;
17897eab0c0dSHou Pengyang 
17904d57b86dSChao Yu 	return f2fs_should_update_inplace(inode, fio);
17917eab0c0dSHou Pengyang }
17927eab0c0dSHou Pengyang 
17934d57b86dSChao Yu int f2fs_do_write_data_page(struct f2fs_io_info *fio)
1794eb47b800SJaegeuk Kim {
179505ca3632SJaegeuk Kim 	struct page *page = fio->page;
1796eb47b800SJaegeuk Kim 	struct inode *inode = page->mapping->host;
1797eb47b800SJaegeuk Kim 	struct dnode_of_data dn;
1798e959c8f5SHou Pengyang 	struct extent_info ei = {0,0,0};
17997735730dSChao Yu 	struct node_info ni;
1800e959c8f5SHou Pengyang 	bool ipu_force = false;
1801eb47b800SJaegeuk Kim 	int err = 0;
1802eb47b800SJaegeuk Kim 
1803eb47b800SJaegeuk Kim 	set_new_dnode(&dn, inode, NULL, NULL, 0);
1804e959c8f5SHou Pengyang 	if (need_inplace_update(fio) &&
1805e959c8f5SHou Pengyang 			f2fs_lookup_extent_cache(inode, page->index, &ei)) {
1806e959c8f5SHou Pengyang 		fio->old_blkaddr = ei.blk + page->index - ei.fofs;
1807a817737eSJaegeuk Kim 
1808c9b60788SChao Yu 		if (!f2fs_is_valid_blkaddr(fio->sbi, fio->old_blkaddr,
1809c9b60788SChao Yu 							DATA_GENERIC))
1810c9b60788SChao Yu 			return -EFAULT;
1811c9b60788SChao Yu 
1812e959c8f5SHou Pengyang 		ipu_force = true;
1813cc15620bSJaegeuk Kim 		fio->need_lock = LOCK_DONE;
1814e959c8f5SHou Pengyang 		goto got_it;
1815e959c8f5SHou Pengyang 	}
1816279d6df2SHou Pengyang 
1817d29460e5SJaegeuk Kim 	/* Deadlock due to between page->lock and f2fs_lock_op */
1818d29460e5SJaegeuk Kim 	if (fio->need_lock == LOCK_REQ && !f2fs_trylock_op(fio->sbi))
1819d29460e5SJaegeuk Kim 		return -EAGAIN;
1820279d6df2SHou Pengyang 
18214d57b86dSChao Yu 	err = f2fs_get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
1822eb47b800SJaegeuk Kim 	if (err)
1823279d6df2SHou Pengyang 		goto out;
1824eb47b800SJaegeuk Kim 
182528bc106bSChao Yu 	fio->old_blkaddr = dn.data_blkaddr;
1826eb47b800SJaegeuk Kim 
1827eb47b800SJaegeuk Kim 	/* This page is already truncated */
18287a9d7548SChao Yu 	if (fio->old_blkaddr == NULL_ADDR) {
18292bca1e23SJaegeuk Kim 		ClearPageUptodate(page);
18302baf0781SChao Yu 		clear_cold_data(page);
1831eb47b800SJaegeuk Kim 		goto out_writepage;
18322bca1e23SJaegeuk Kim 	}
1833e959c8f5SHou Pengyang got_it:
1834c9b60788SChao Yu 	if (__is_valid_data_blkaddr(fio->old_blkaddr) &&
1835c9b60788SChao Yu 		!f2fs_is_valid_blkaddr(fio->sbi, fio->old_blkaddr,
1836c9b60788SChao Yu 							DATA_GENERIC)) {
1837c9b60788SChao Yu 		err = -EFAULT;
1838c9b60788SChao Yu 		goto out_writepage;
1839c9b60788SChao Yu 	}
1840eb47b800SJaegeuk Kim 	/*
1841eb47b800SJaegeuk Kim 	 * If current allocation needs SSR,
1842eb47b800SJaegeuk Kim 	 * it had better in-place writes for updated data.
1843eb47b800SJaegeuk Kim 	 */
1844e1da7872SChao Yu 	if (ipu_force || (is_valid_data_blkaddr(fio->sbi, fio->old_blkaddr) &&
18457b525dd0SChao Yu 					need_inplace_update(fio))) {
1846cc15620bSJaegeuk Kim 		err = encrypt_one_page(fio);
1847cc15620bSJaegeuk Kim 		if (err)
1848cc15620bSJaegeuk Kim 			goto out_writepage;
1849cc15620bSJaegeuk Kim 
1850cc15620bSJaegeuk Kim 		set_page_writeback(page);
185117c50035SJaegeuk Kim 		ClearPageError(page);
1852279d6df2SHou Pengyang 		f2fs_put_dnode(&dn);
1853cc15620bSJaegeuk Kim 		if (fio->need_lock == LOCK_REQ)
18547eab0c0dSHou Pengyang 			f2fs_unlock_op(fio->sbi);
18554d57b86dSChao Yu 		err = f2fs_inplace_write_data(fio);
18567eab0c0dSHou Pengyang 		trace_f2fs_do_write_data_page(fio->page, IPU);
185791942321SJaegeuk Kim 		set_inode_flag(inode, FI_UPDATE_WRITE);
1858279d6df2SHou Pengyang 		return err;
1859279d6df2SHou Pengyang 	}
1860279d6df2SHou Pengyang 
1861cc15620bSJaegeuk Kim 	if (fio->need_lock == LOCK_RETRY) {
1862cc15620bSJaegeuk Kim 		if (!f2fs_trylock_op(fio->sbi)) {
1863cc15620bSJaegeuk Kim 			err = -EAGAIN;
1864cc15620bSJaegeuk Kim 			goto out_writepage;
1865cc15620bSJaegeuk Kim 		}
1866cc15620bSJaegeuk Kim 		fio->need_lock = LOCK_REQ;
1867cc15620bSJaegeuk Kim 	}
1868cc15620bSJaegeuk Kim 
18697735730dSChao Yu 	err = f2fs_get_node_info(fio->sbi, dn.nid, &ni);
18707735730dSChao Yu 	if (err)
18717735730dSChao Yu 		goto out_writepage;
18727735730dSChao Yu 
18737735730dSChao Yu 	fio->version = ni.version;
18747735730dSChao Yu 
1875cc15620bSJaegeuk Kim 	err = encrypt_one_page(fio);
1876cc15620bSJaegeuk Kim 	if (err)
1877cc15620bSJaegeuk Kim 		goto out_writepage;
1878cc15620bSJaegeuk Kim 
1879cc15620bSJaegeuk Kim 	set_page_writeback(page);
188017c50035SJaegeuk Kim 	ClearPageError(page);
1881cc15620bSJaegeuk Kim 
1882279d6df2SHou Pengyang 	/* LFS mode write path */
18834d57b86dSChao Yu 	f2fs_outplace_write_data(&dn, fio);
18848ce67cb0SJaegeuk Kim 	trace_f2fs_do_write_data_page(page, OPU);
188591942321SJaegeuk Kim 	set_inode_flag(inode, FI_APPEND_WRITE);
18863c6c2bebSJaegeuk Kim 	if (page->index == 0)
188791942321SJaegeuk Kim 		set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
1888eb47b800SJaegeuk Kim out_writepage:
1889eb47b800SJaegeuk Kim 	f2fs_put_dnode(&dn);
1890279d6df2SHou Pengyang out:
1891cc15620bSJaegeuk Kim 	if (fio->need_lock == LOCK_REQ)
1892279d6df2SHou Pengyang 		f2fs_unlock_op(fio->sbi);
1893eb47b800SJaegeuk Kim 	return err;
1894eb47b800SJaegeuk Kim }
1895eb47b800SJaegeuk Kim 
1896d68f735bSJaegeuk Kim static int __write_data_page(struct page *page, bool *submitted,
1897b0af6d49SChao Yu 				struct writeback_control *wbc,
1898b0af6d49SChao Yu 				enum iostat_type io_type)
1899eb47b800SJaegeuk Kim {
1900eb47b800SJaegeuk Kim 	struct inode *inode = page->mapping->host;
19014081363fSJaegeuk Kim 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1902eb47b800SJaegeuk Kim 	loff_t i_size = i_size_read(inode);
1903eb47b800SJaegeuk Kim 	const pgoff_t end_index = ((unsigned long long) i_size)
190409cbfeafSKirill A. Shutemov 							>> PAGE_SHIFT;
190526de9b11SJaegeuk Kim 	loff_t psize = (page->index + 1) << PAGE_SHIFT;
19069ffe0fb5SHuajun Li 	unsigned offset = 0;
190739936837SJaegeuk Kim 	bool need_balance_fs = false;
1908eb47b800SJaegeuk Kim 	int err = 0;
1909458e6197SJaegeuk Kim 	struct f2fs_io_info fio = {
191005ca3632SJaegeuk Kim 		.sbi = sbi,
191139d787beSChao Yu 		.ino = inode->i_ino,
1912458e6197SJaegeuk Kim 		.type = DATA,
191304d328deSMike Christie 		.op = REQ_OP_WRITE,
19147637241eSJens Axboe 		.op_flags = wbc_to_write_flags(wbc),
1915e959c8f5SHou Pengyang 		.old_blkaddr = NULL_ADDR,
191605ca3632SJaegeuk Kim 		.page = page,
19174375a336SJaegeuk Kim 		.encrypted_page = NULL,
1918d68f735bSJaegeuk Kim 		.submitted = false,
1919cc15620bSJaegeuk Kim 		.need_lock = LOCK_RETRY,
1920b0af6d49SChao Yu 		.io_type = io_type,
1921578c6478SYufen Yu 		.io_wbc = wbc,
1922458e6197SJaegeuk Kim 	};
1923eb47b800SJaegeuk Kim 
1924ecda0de3SChao Yu 	trace_f2fs_writepage(page, DATA);
1925ecda0de3SChao Yu 
1926db198ae0SChao Yu 	/* we should bypass data pages to proceed the kworkder jobs */
1927db198ae0SChao Yu 	if (unlikely(f2fs_cp_error(sbi))) {
1928db198ae0SChao Yu 		mapping_set_error(page->mapping, -EIO);
19291174abfdSChao Yu 		/*
19301174abfdSChao Yu 		 * don't drop any dirty dentry pages for keeping lastest
19311174abfdSChao Yu 		 * directory structure.
19321174abfdSChao Yu 		 */
19331174abfdSChao Yu 		if (S_ISDIR(inode->i_mode))
19341174abfdSChao Yu 			goto redirty_out;
1935db198ae0SChao Yu 		goto out;
1936db198ae0SChao Yu 	}
1937db198ae0SChao Yu 
19380771fcc7SChao Yu 	if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
19390771fcc7SChao Yu 		goto redirty_out;
19400771fcc7SChao Yu 
1941eb47b800SJaegeuk Kim 	if (page->index < end_index)
194239936837SJaegeuk Kim 		goto write;
1943eb47b800SJaegeuk Kim 
1944eb47b800SJaegeuk Kim 	/*
1945eb47b800SJaegeuk Kim 	 * If the offset is out-of-range of file size,
1946eb47b800SJaegeuk Kim 	 * this page does not have to be written to disk.
1947eb47b800SJaegeuk Kim 	 */
194809cbfeafSKirill A. Shutemov 	offset = i_size & (PAGE_SIZE - 1);
194976f60268SJaegeuk Kim 	if ((page->index >= end_index + 1) || !offset)
195039936837SJaegeuk Kim 		goto out;
1951eb47b800SJaegeuk Kim 
195209cbfeafSKirill A. Shutemov 	zero_user_segment(page, offset, PAGE_SIZE);
195339936837SJaegeuk Kim write:
19541e84371fSJaegeuk Kim 	if (f2fs_is_drop_cache(inode))
19551e84371fSJaegeuk Kim 		goto out;
1956e6e5f561SJaegeuk Kim 	/* we should not write 0'th page having journal header */
1957e6e5f561SJaegeuk Kim 	if (f2fs_is_volatile_file(inode) && (!page->index ||
1958e6e5f561SJaegeuk Kim 			(!wbc->for_reclaim &&
19594d57b86dSChao Yu 			f2fs_available_free_memory(sbi, BASE_CHECK))))
19601e84371fSJaegeuk Kim 		goto redirty_out;
1961eb47b800SJaegeuk Kim 
1962b230e6caSJaegeuk Kim 	/* Dentry blocks are controlled by checkpoint */
1963b230e6caSJaegeuk Kim 	if (S_ISDIR(inode->i_mode)) {
1964cc15620bSJaegeuk Kim 		fio.need_lock = LOCK_DONE;
19654d57b86dSChao Yu 		err = f2fs_do_write_data_page(&fio);
1966b230e6caSJaegeuk Kim 		goto done;
1967b230e6caSJaegeuk Kim 	}
1968b230e6caSJaegeuk Kim 
19698618b881SJaegeuk Kim 	if (!wbc->for_reclaim)
197039936837SJaegeuk Kim 		need_balance_fs = true;
19717f3037a5SJaegeuk Kim 	else if (has_not_enough_free_secs(sbi, 0, 0))
197239936837SJaegeuk Kim 		goto redirty_out;
1973ef095d19SJaegeuk Kim 	else
1974ef095d19SJaegeuk Kim 		set_inode_flag(inode, FI_HOT_DATA);
1975eb47b800SJaegeuk Kim 
1976b3d208f9SJaegeuk Kim 	err = -EAGAIN;
1977dd7b2333SYunlei He 	if (f2fs_has_inline_data(inode)) {
1978b3d208f9SJaegeuk Kim 		err = f2fs_write_inline_data(inode, page);
1979dd7b2333SYunlei He 		if (!err)
1980dd7b2333SYunlei He 			goto out;
1981dd7b2333SYunlei He 	}
1982279d6df2SHou Pengyang 
1983cc15620bSJaegeuk Kim 	if (err == -EAGAIN) {
19844d57b86dSChao Yu 		err = f2fs_do_write_data_page(&fio);
1985cc15620bSJaegeuk Kim 		if (err == -EAGAIN) {
1986cc15620bSJaegeuk Kim 			fio.need_lock = LOCK_REQ;
19874d57b86dSChao Yu 			err = f2fs_do_write_data_page(&fio);
1988cc15620bSJaegeuk Kim 		}
1989cc15620bSJaegeuk Kim 	}
1990a0d00fadSChao Yu 
1991eb449797SChao Yu 	if (err) {
1992eb449797SChao Yu 		file_set_keep_isize(inode);
1993eb449797SChao Yu 	} else {
1994a0d00fadSChao Yu 		down_write(&F2FS_I(inode)->i_sem);
199526de9b11SJaegeuk Kim 		if (F2FS_I(inode)->last_disk_size < psize)
199626de9b11SJaegeuk Kim 			F2FS_I(inode)->last_disk_size = psize;
1997a0d00fadSChao Yu 		up_write(&F2FS_I(inode)->i_sem);
1998eb449797SChao Yu 	}
1999279d6df2SHou Pengyang 
20008618b881SJaegeuk Kim done:
20018618b881SJaegeuk Kim 	if (err && err != -ENOENT)
20028618b881SJaegeuk Kim 		goto redirty_out;
2003eb47b800SJaegeuk Kim 
200439936837SJaegeuk Kim out:
2005a7ffdbe2SJaegeuk Kim 	inode_dec_dirty_pages(inode);
20062baf0781SChao Yu 	if (err) {
20072bca1e23SJaegeuk Kim 		ClearPageUptodate(page);
20082baf0781SChao Yu 		clear_cold_data(page);
20092baf0781SChao Yu 	}
20100c3a5797SChao Yu 
20110c3a5797SChao Yu 	if (wbc->for_reclaim) {
2012bab475c5SChao Yu 		f2fs_submit_merged_write_cond(sbi, NULL, page, 0, DATA);
2013ef095d19SJaegeuk Kim 		clear_inode_flag(inode, FI_HOT_DATA);
20144d57b86dSChao Yu 		f2fs_remove_dirty_inode(inode);
2015d68f735bSJaegeuk Kim 		submitted = NULL;
2016eb7e813cSChao Yu 	}
20170c3a5797SChao Yu 
20180c3a5797SChao Yu 	unlock_page(page);
2019af033b2aSChao Yu 	if (!S_ISDIR(inode->i_mode) && !IS_NOQUOTA(inode))
20200c3a5797SChao Yu 		f2fs_balance_fs(sbi, need_balance_fs);
20210c3a5797SChao Yu 
2022d68f735bSJaegeuk Kim 	if (unlikely(f2fs_cp_error(sbi))) {
2023b9109b0eSJaegeuk Kim 		f2fs_submit_merged_write(sbi, DATA);
2024d68f735bSJaegeuk Kim 		submitted = NULL;
2025d68f735bSJaegeuk Kim 	}
2026d68f735bSJaegeuk Kim 
2027d68f735bSJaegeuk Kim 	if (submitted)
2028d68f735bSJaegeuk Kim 		*submitted = fio.submitted;
20290c3a5797SChao Yu 
2030eb47b800SJaegeuk Kim 	return 0;
2031eb47b800SJaegeuk Kim 
2032eb47b800SJaegeuk Kim redirty_out:
203376f60268SJaegeuk Kim 	redirty_page_for_writepage(wbc, page);
20345b19d284SJaegeuk Kim 	/*
20355b19d284SJaegeuk Kim 	 * pageout() in MM traslates EAGAIN, so calls handle_write_error()
20365b19d284SJaegeuk Kim 	 * -> mapping_set_error() -> set_bit(AS_EIO, ...).
20375b19d284SJaegeuk Kim 	 * file_write_and_wait_range() will see EIO error, which is critical
20385b19d284SJaegeuk Kim 	 * to return value of fsync() followed by atomic_write failure to user.
20395b19d284SJaegeuk Kim 	 */
20405b19d284SJaegeuk Kim 	if (!err || wbc->for_reclaim)
20410002b61bSChao Yu 		return AOP_WRITEPAGE_ACTIVATE;
2042b230e6caSJaegeuk Kim 	unlock_page(page);
2043b230e6caSJaegeuk Kim 	return err;
2044fa9150a8SNamjae Jeon }
2045fa9150a8SNamjae Jeon 
2046f566bae8SJaegeuk Kim static int f2fs_write_data_page(struct page *page,
2047f566bae8SJaegeuk Kim 					struct writeback_control *wbc)
2048f566bae8SJaegeuk Kim {
2049b0af6d49SChao Yu 	return __write_data_page(page, NULL, wbc, FS_DATA_IO);
2050f566bae8SJaegeuk Kim }
2051f566bae8SJaegeuk Kim 
20528f46dcaeSChao Yu /*
20538f46dcaeSChao Yu  * This function was copied from write_cche_pages from mm/page-writeback.c.
20548f46dcaeSChao Yu  * The major change is making write step of cold data page separately from
20558f46dcaeSChao Yu  * warm/hot data page.
20568f46dcaeSChao Yu  */
20578f46dcaeSChao Yu static int f2fs_write_cache_pages(struct address_space *mapping,
2058b0af6d49SChao Yu 					struct writeback_control *wbc,
2059b0af6d49SChao Yu 					enum iostat_type io_type)
20608f46dcaeSChao Yu {
20618f46dcaeSChao Yu 	int ret = 0;
20628f46dcaeSChao Yu 	int done = 0;
20638f46dcaeSChao Yu 	struct pagevec pvec;
2064c29fd0c0SChao Yu 	struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
20658f46dcaeSChao Yu 	int nr_pages;
20668f46dcaeSChao Yu 	pgoff_t uninitialized_var(writeback_index);
20678f46dcaeSChao Yu 	pgoff_t index;
20688f46dcaeSChao Yu 	pgoff_t end;		/* Inclusive */
20698f46dcaeSChao Yu 	pgoff_t done_index;
20708f46dcaeSChao Yu 	int cycled;
20718f46dcaeSChao Yu 	int range_whole = 0;
207210bbd235SMatthew Wilcox 	xa_mark_t tag;
2073bab475c5SChao Yu 	int nwritten = 0;
20748f46dcaeSChao Yu 
207586679820SMel Gorman 	pagevec_init(&pvec);
207646ae957fSJaegeuk Kim 
2077ef095d19SJaegeuk Kim 	if (get_dirty_pages(mapping->host) <=
2078ef095d19SJaegeuk Kim 				SM_I(F2FS_M_SB(mapping))->min_hot_blocks)
2079ef095d19SJaegeuk Kim 		set_inode_flag(mapping->host, FI_HOT_DATA);
2080ef095d19SJaegeuk Kim 	else
2081ef095d19SJaegeuk Kim 		clear_inode_flag(mapping->host, FI_HOT_DATA);
2082ef095d19SJaegeuk Kim 
20838f46dcaeSChao Yu 	if (wbc->range_cyclic) {
20848f46dcaeSChao Yu 		writeback_index = mapping->writeback_index; /* prev offset */
20858f46dcaeSChao Yu 		index = writeback_index;
20868f46dcaeSChao Yu 		if (index == 0)
20878f46dcaeSChao Yu 			cycled = 1;
20888f46dcaeSChao Yu 		else
20898f46dcaeSChao Yu 			cycled = 0;
20908f46dcaeSChao Yu 		end = -1;
20918f46dcaeSChao Yu 	} else {
209209cbfeafSKirill A. Shutemov 		index = wbc->range_start >> PAGE_SHIFT;
209309cbfeafSKirill A. Shutemov 		end = wbc->range_end >> PAGE_SHIFT;
20948f46dcaeSChao Yu 		if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
20958f46dcaeSChao Yu 			range_whole = 1;
20968f46dcaeSChao Yu 		cycled = 1; /* ignore range_cyclic tests */
20978f46dcaeSChao Yu 	}
20988f46dcaeSChao Yu 	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
20998f46dcaeSChao Yu 		tag = PAGECACHE_TAG_TOWRITE;
21008f46dcaeSChao Yu 	else
21018f46dcaeSChao Yu 		tag = PAGECACHE_TAG_DIRTY;
21028f46dcaeSChao Yu retry:
21038f46dcaeSChao Yu 	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
21048f46dcaeSChao Yu 		tag_pages_for_writeback(mapping, index, end);
21058f46dcaeSChao Yu 	done_index = index;
21068f46dcaeSChao Yu 	while (!done && (index <= end)) {
21078f46dcaeSChao Yu 		int i;
21088f46dcaeSChao Yu 
210969c4f35dSJan Kara 		nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end,
211067fd707fSJan Kara 				tag);
21118f46dcaeSChao Yu 		if (nr_pages == 0)
21128f46dcaeSChao Yu 			break;
21138f46dcaeSChao Yu 
21148f46dcaeSChao Yu 		for (i = 0; i < nr_pages; i++) {
21158f46dcaeSChao Yu 			struct page *page = pvec.pages[i];
2116d68f735bSJaegeuk Kim 			bool submitted = false;
21178f46dcaeSChao Yu 
2118f8de4331SChao Yu 			/* give a priority to WB_SYNC threads */
2119c29fd0c0SChao Yu 			if (atomic_read(&sbi->wb_sync_req[DATA]) &&
2120f8de4331SChao Yu 					wbc->sync_mode == WB_SYNC_NONE) {
2121f8de4331SChao Yu 				done = 1;
2122f8de4331SChao Yu 				break;
2123f8de4331SChao Yu 			}
2124f8de4331SChao Yu 
21258f46dcaeSChao Yu 			done_index = page->index;
2126d29460e5SJaegeuk Kim retry_write:
21278f46dcaeSChao Yu 			lock_page(page);
21288f46dcaeSChao Yu 
21298f46dcaeSChao Yu 			if (unlikely(page->mapping != mapping)) {
21308f46dcaeSChao Yu continue_unlock:
21318f46dcaeSChao Yu 				unlock_page(page);
21328f46dcaeSChao Yu 				continue;
21338f46dcaeSChao Yu 			}
21348f46dcaeSChao Yu 
21358f46dcaeSChao Yu 			if (!PageDirty(page)) {
21368f46dcaeSChao Yu 				/* someone wrote it for us */
21378f46dcaeSChao Yu 				goto continue_unlock;
21388f46dcaeSChao Yu 			}
21398f46dcaeSChao Yu 
21408f46dcaeSChao Yu 			if (PageWriteback(page)) {
21418f46dcaeSChao Yu 				if (wbc->sync_mode != WB_SYNC_NONE)
2142fec1d657SJaegeuk Kim 					f2fs_wait_on_page_writeback(page,
2143fec1d657SJaegeuk Kim 								DATA, true);
21448f46dcaeSChao Yu 				else
21458f46dcaeSChao Yu 					goto continue_unlock;
21468f46dcaeSChao Yu 			}
21478f46dcaeSChao Yu 
21488f46dcaeSChao Yu 			BUG_ON(PageWriteback(page));
21498f46dcaeSChao Yu 			if (!clear_page_dirty_for_io(page))
21508f46dcaeSChao Yu 				goto continue_unlock;
21518f46dcaeSChao Yu 
2152b0af6d49SChao Yu 			ret = __write_data_page(page, &submitted, wbc, io_type);
21538f46dcaeSChao Yu 			if (unlikely(ret)) {
21540002b61bSChao Yu 				/*
21550002b61bSChao Yu 				 * keep nr_to_write, since vfs uses this to
21560002b61bSChao Yu 				 * get # of written pages.
21570002b61bSChao Yu 				 */
21580002b61bSChao Yu 				if (ret == AOP_WRITEPAGE_ACTIVATE) {
21590002b61bSChao Yu 					unlock_page(page);
21600002b61bSChao Yu 					ret = 0;
21610002b61bSChao Yu 					continue;
2162d29460e5SJaegeuk Kim 				} else if (ret == -EAGAIN) {
2163d29460e5SJaegeuk Kim 					ret = 0;
2164d29460e5SJaegeuk Kim 					if (wbc->sync_mode == WB_SYNC_ALL) {
2165d29460e5SJaegeuk Kim 						cond_resched();
2166d29460e5SJaegeuk Kim 						congestion_wait(BLK_RW_ASYNC,
2167d29460e5SJaegeuk Kim 									HZ/50);
2168d29460e5SJaegeuk Kim 						goto retry_write;
2169d29460e5SJaegeuk Kim 					}
2170d29460e5SJaegeuk Kim 					continue;
21710002b61bSChao Yu 				}
21728f46dcaeSChao Yu 				done_index = page->index + 1;
21738f46dcaeSChao Yu 				done = 1;
21748f46dcaeSChao Yu 				break;
2175d68f735bSJaegeuk Kim 			} else if (submitted) {
2176bab475c5SChao Yu 				nwritten++;
21778f46dcaeSChao Yu 			}
21788f46dcaeSChao Yu 
2179f8de4331SChao Yu 			if (--wbc->nr_to_write <= 0 &&
21808f46dcaeSChao Yu 					wbc->sync_mode == WB_SYNC_NONE) {
21818f46dcaeSChao Yu 				done = 1;
21828f46dcaeSChao Yu 				break;
21838f46dcaeSChao Yu 			}
21848f46dcaeSChao Yu 		}
21858f46dcaeSChao Yu 		pagevec_release(&pvec);
21868f46dcaeSChao Yu 		cond_resched();
21878f46dcaeSChao Yu 	}
21888f46dcaeSChao Yu 
21898f46dcaeSChao Yu 	if (!cycled && !done) {
21908f46dcaeSChao Yu 		cycled = 1;
21918f46dcaeSChao Yu 		index = 0;
21928f46dcaeSChao Yu 		end = writeback_index - 1;
21938f46dcaeSChao Yu 		goto retry;
21948f46dcaeSChao Yu 	}
21958f46dcaeSChao Yu 	if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
21968f46dcaeSChao Yu 		mapping->writeback_index = done_index;
21978f46dcaeSChao Yu 
2198bab475c5SChao Yu 	if (nwritten)
2199b9109b0eSJaegeuk Kim 		f2fs_submit_merged_write_cond(F2FS_M_SB(mapping), mapping->host,
2200bab475c5SChao Yu 								NULL, 0, DATA);
22016ca56ca4SChao Yu 
22028f46dcaeSChao Yu 	return ret;
22038f46dcaeSChao Yu }
22048f46dcaeSChao Yu 
2205853137ceSJaegeuk Kim static inline bool __should_serialize_io(struct inode *inode,
2206853137ceSJaegeuk Kim 					struct writeback_control *wbc)
2207853137ceSJaegeuk Kim {
2208853137ceSJaegeuk Kim 	if (!S_ISREG(inode->i_mode))
2209853137ceSJaegeuk Kim 		return false;
2210af033b2aSChao Yu 	if (IS_NOQUOTA(inode))
2211af033b2aSChao Yu 		return false;
2212853137ceSJaegeuk Kim 	if (wbc->sync_mode != WB_SYNC_ALL)
2213853137ceSJaegeuk Kim 		return true;
2214853137ceSJaegeuk Kim 	if (get_dirty_pages(inode) >= SM_I(F2FS_I_SB(inode))->min_seq_blocks)
2215853137ceSJaegeuk Kim 		return true;
2216853137ceSJaegeuk Kim 	return false;
2217853137ceSJaegeuk Kim }
2218853137ceSJaegeuk Kim 
2219fc99fe27SChao Yu static int __f2fs_write_data_pages(struct address_space *mapping,
2220b0af6d49SChao Yu 						struct writeback_control *wbc,
2221b0af6d49SChao Yu 						enum iostat_type io_type)
2222eb47b800SJaegeuk Kim {
2223eb47b800SJaegeuk Kim 	struct inode *inode = mapping->host;
22244081363fSJaegeuk Kim 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
22259dfa1bafSJaegeuk Kim 	struct blk_plug plug;
2226eb47b800SJaegeuk Kim 	int ret;
2227853137ceSJaegeuk Kim 	bool locked = false;
2228eb47b800SJaegeuk Kim 
2229cfb185a1SP J P 	/* deal with chardevs and other special file */
2230cfb185a1SP J P 	if (!mapping->a_ops->writepage)
2231cfb185a1SP J P 		return 0;
2232cfb185a1SP J P 
22336a290544SChao Yu 	/* skip writing if there is no dirty page in this inode */
22346a290544SChao Yu 	if (!get_dirty_pages(inode) && wbc->sync_mode == WB_SYNC_NONE)
22356a290544SChao Yu 		return 0;
22366a290544SChao Yu 
22370771fcc7SChao Yu 	/* during POR, we don't need to trigger writepage at all. */
22380771fcc7SChao Yu 	if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
22390771fcc7SChao Yu 		goto skip_write;
22400771fcc7SChao Yu 
2241af033b2aSChao Yu 	if ((S_ISDIR(inode->i_mode) || IS_NOQUOTA(inode)) &&
2242af033b2aSChao Yu 			wbc->sync_mode == WB_SYNC_NONE &&
2243a1257023SJaegeuk Kim 			get_dirty_pages(inode) < nr_pages_to_skip(sbi, DATA) &&
22444d57b86dSChao Yu 			f2fs_available_free_memory(sbi, DIRTY_DENTS))
2245a1257023SJaegeuk Kim 		goto skip_write;
2246a1257023SJaegeuk Kim 
2247d323d005SChao Yu 	/* skip writing during file defragment */
224891942321SJaegeuk Kim 	if (is_inode_flag_set(inode, FI_DO_DEFRAG))
2249d323d005SChao Yu 		goto skip_write;
2250d323d005SChao Yu 
2251d31c7c3fSYunlei He 	trace_f2fs_writepages(mapping->host, wbc, DATA);
2252d31c7c3fSYunlei He 
2253687de7f1SJaegeuk Kim 	/* to avoid spliting IOs due to mixed WB_SYNC_ALL and WB_SYNC_NONE */
2254687de7f1SJaegeuk Kim 	if (wbc->sync_mode == WB_SYNC_ALL)
2255c29fd0c0SChao Yu 		atomic_inc(&sbi->wb_sync_req[DATA]);
2256c29fd0c0SChao Yu 	else if (atomic_read(&sbi->wb_sync_req[DATA]))
2257687de7f1SJaegeuk Kim 		goto skip_write;
2258687de7f1SJaegeuk Kim 
2259853137ceSJaegeuk Kim 	if (__should_serialize_io(inode, wbc)) {
2260853137ceSJaegeuk Kim 		mutex_lock(&sbi->writepages);
2261853137ceSJaegeuk Kim 		locked = true;
2262853137ceSJaegeuk Kim 	}
2263853137ceSJaegeuk Kim 
22649dfa1bafSJaegeuk Kim 	blk_start_plug(&plug);
2265b0af6d49SChao Yu 	ret = f2fs_write_cache_pages(mapping, wbc, io_type);
22669dfa1bafSJaegeuk Kim 	blk_finish_plug(&plug);
2267687de7f1SJaegeuk Kim 
2268853137ceSJaegeuk Kim 	if (locked)
2269853137ceSJaegeuk Kim 		mutex_unlock(&sbi->writepages);
2270853137ceSJaegeuk Kim 
2271687de7f1SJaegeuk Kim 	if (wbc->sync_mode == WB_SYNC_ALL)
2272c29fd0c0SChao Yu 		atomic_dec(&sbi->wb_sync_req[DATA]);
227328ea6162SJaegeuk Kim 	/*
227428ea6162SJaegeuk Kim 	 * if some pages were truncated, we cannot guarantee its mapping->host
227528ea6162SJaegeuk Kim 	 * to detect pending bios.
227628ea6162SJaegeuk Kim 	 */
2277458e6197SJaegeuk Kim 
22784d57b86dSChao Yu 	f2fs_remove_dirty_inode(inode);
2279eb47b800SJaegeuk Kim 	return ret;
2280d3baf95dSJaegeuk Kim 
2281d3baf95dSJaegeuk Kim skip_write:
2282a7ffdbe2SJaegeuk Kim 	wbc->pages_skipped += get_dirty_pages(inode);
2283d31c7c3fSYunlei He 	trace_f2fs_writepages(mapping->host, wbc, DATA);
2284d3baf95dSJaegeuk Kim 	return 0;
2285eb47b800SJaegeuk Kim }
2286eb47b800SJaegeuk Kim 
2287b0af6d49SChao Yu static int f2fs_write_data_pages(struct address_space *mapping,
2288b0af6d49SChao Yu 			    struct writeback_control *wbc)
2289b0af6d49SChao Yu {
2290b0af6d49SChao Yu 	struct inode *inode = mapping->host;
2291b0af6d49SChao Yu 
2292b0af6d49SChao Yu 	return __f2fs_write_data_pages(mapping, wbc,
2293b0af6d49SChao Yu 			F2FS_I(inode)->cp_task == current ?
2294b0af6d49SChao Yu 			FS_CP_DATA_IO : FS_DATA_IO);
2295b0af6d49SChao Yu }
2296b0af6d49SChao Yu 
22973aab8f82SChao Yu static void f2fs_write_failed(struct address_space *mapping, loff_t to)
22983aab8f82SChao Yu {
22993aab8f82SChao Yu 	struct inode *inode = mapping->host;
2300819d9153SJaegeuk Kim 	loff_t i_size = i_size_read(inode);
23013aab8f82SChao Yu 
2302819d9153SJaegeuk Kim 	if (to > i_size) {
2303a33c1502SChao Yu 		down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
23046f8d4455SJaegeuk Kim 		down_write(&F2FS_I(inode)->i_mmap_sem);
2305a33c1502SChao Yu 
2306819d9153SJaegeuk Kim 		truncate_pagecache(inode, i_size);
2307af033b2aSChao Yu 		f2fs_truncate_blocks(inode, i_size, true, true);
2308a33c1502SChao Yu 
23095a3a2d83SQiuyang Sun 		up_write(&F2FS_I(inode)->i_mmap_sem);
23106f8d4455SJaegeuk Kim 		up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
23113aab8f82SChao Yu 	}
23123aab8f82SChao Yu }
23133aab8f82SChao Yu 
23142aadac08SJaegeuk Kim static int prepare_write_begin(struct f2fs_sb_info *sbi,
23152aadac08SJaegeuk Kim 			struct page *page, loff_t pos, unsigned len,
23162aadac08SJaegeuk Kim 			block_t *blk_addr, bool *node_changed)
23172aadac08SJaegeuk Kim {
23182aadac08SJaegeuk Kim 	struct inode *inode = page->mapping->host;
23192aadac08SJaegeuk Kim 	pgoff_t index = page->index;
23202aadac08SJaegeuk Kim 	struct dnode_of_data dn;
23212aadac08SJaegeuk Kim 	struct page *ipage;
2322b4d07a3eSJaegeuk Kim 	bool locked = false;
2323e15882b6SHou Pengyang 	struct extent_info ei = {0,0,0};
23242aadac08SJaegeuk Kim 	int err = 0;
2325*2866fb16SSheng Yong 	int flag;
23262aadac08SJaegeuk Kim 
232724b84912SJaegeuk Kim 	/*
232824b84912SJaegeuk Kim 	 * we already allocated all the blocks, so we don't need to get
232924b84912SJaegeuk Kim 	 * the block addresses when there is no need to fill the page.
233024b84912SJaegeuk Kim 	 */
2331dc91de78SJaegeuk Kim 	if (!f2fs_has_inline_data(inode) && len == PAGE_SIZE &&
2332dc91de78SJaegeuk Kim 			!is_inode_flag_set(inode, FI_NO_PREALLOC))
233324b84912SJaegeuk Kim 		return 0;
233424b84912SJaegeuk Kim 
2335*2866fb16SSheng Yong 	/* f2fs_lock_op avoids race between write CP and convert_inline_page */
2336*2866fb16SSheng Yong 	if (f2fs_has_inline_data(inode) && pos + len > MAX_INLINE_DATA(inode))
2337*2866fb16SSheng Yong 		flag = F2FS_GET_BLOCK_DEFAULT;
2338*2866fb16SSheng Yong 	else
2339*2866fb16SSheng Yong 		flag = F2FS_GET_BLOCK_PRE_AIO;
2340*2866fb16SSheng Yong 
2341b4d07a3eSJaegeuk Kim 	if (f2fs_has_inline_data(inode) ||
234209cbfeafSKirill A. Shutemov 			(pos & PAGE_MASK) >= i_size_read(inode)) {
2343*2866fb16SSheng Yong 		__do_map_lock(sbi, flag, true);
2344b4d07a3eSJaegeuk Kim 		locked = true;
2345b4d07a3eSJaegeuk Kim 	}
2346b4d07a3eSJaegeuk Kim restart:
23472aadac08SJaegeuk Kim 	/* check inline_data */
23484d57b86dSChao Yu 	ipage = f2fs_get_node_page(sbi, inode->i_ino);
23492aadac08SJaegeuk Kim 	if (IS_ERR(ipage)) {
23502aadac08SJaegeuk Kim 		err = PTR_ERR(ipage);
23512aadac08SJaegeuk Kim 		goto unlock_out;
23522aadac08SJaegeuk Kim 	}
23532aadac08SJaegeuk Kim 
23542aadac08SJaegeuk Kim 	set_new_dnode(&dn, inode, ipage, ipage, 0);
23552aadac08SJaegeuk Kim 
23562aadac08SJaegeuk Kim 	if (f2fs_has_inline_data(inode)) {
2357f2470371SChao Yu 		if (pos + len <= MAX_INLINE_DATA(inode)) {
23584d57b86dSChao Yu 			f2fs_do_read_inline_data(page, ipage);
235991942321SJaegeuk Kim 			set_inode_flag(inode, FI_DATA_EXIST);
2360ab47036dSChao Yu 			if (inode->i_nlink)
23612049d4fcSJaegeuk Kim 				set_inline_node(ipage);
23622aadac08SJaegeuk Kim 		} else {
23632aadac08SJaegeuk Kim 			err = f2fs_convert_inline_page(&dn, page);
23642aadac08SJaegeuk Kim 			if (err)
2365b4d07a3eSJaegeuk Kim 				goto out;
2366b4d07a3eSJaegeuk Kim 			if (dn.data_blkaddr == NULL_ADDR)
23672aadac08SJaegeuk Kim 				err = f2fs_get_block(&dn, index);
2368b4d07a3eSJaegeuk Kim 		}
2369b4d07a3eSJaegeuk Kim 	} else if (locked) {
2370b4d07a3eSJaegeuk Kim 		err = f2fs_get_block(&dn, index);
2371b4d07a3eSJaegeuk Kim 	} else {
2372b4d07a3eSJaegeuk Kim 		if (f2fs_lookup_extent_cache(inode, index, &ei)) {
2373b4d07a3eSJaegeuk Kim 			dn.data_blkaddr = ei.blk + index - ei.fofs;
2374b4d07a3eSJaegeuk Kim 		} else {
2375b4d07a3eSJaegeuk Kim 			/* hole case */
23764d57b86dSChao Yu 			err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
23774da7bf5aSJaegeuk Kim 			if (err || dn.data_blkaddr == NULL_ADDR) {
2378b4d07a3eSJaegeuk Kim 				f2fs_put_dnode(&dn);
237959c9081bSYunlei He 				__do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO,
238059c9081bSYunlei He 								true);
2381*2866fb16SSheng Yong 				WARN_ON(flag != F2FS_GET_BLOCK_PRE_AIO);
2382b4d07a3eSJaegeuk Kim 				locked = true;
2383b4d07a3eSJaegeuk Kim 				goto restart;
2384b4d07a3eSJaegeuk Kim 			}
2385b4d07a3eSJaegeuk Kim 		}
2386b4d07a3eSJaegeuk Kim 	}
2387b4d07a3eSJaegeuk Kim 
23882aadac08SJaegeuk Kim 	/* convert_inline_page can make node_changed */
23892aadac08SJaegeuk Kim 	*blk_addr = dn.data_blkaddr;
23902aadac08SJaegeuk Kim 	*node_changed = dn.node_changed;
2391b4d07a3eSJaegeuk Kim out:
23922aadac08SJaegeuk Kim 	f2fs_put_dnode(&dn);
23932aadac08SJaegeuk Kim unlock_out:
2394b4d07a3eSJaegeuk Kim 	if (locked)
2395*2866fb16SSheng Yong 		__do_map_lock(sbi, flag, false);
23962aadac08SJaegeuk Kim 	return err;
23972aadac08SJaegeuk Kim }
23982aadac08SJaegeuk Kim 
2399eb47b800SJaegeuk Kim static int f2fs_write_begin(struct file *file, struct address_space *mapping,
2400eb47b800SJaegeuk Kim 		loff_t pos, unsigned len, unsigned flags,
2401eb47b800SJaegeuk Kim 		struct page **pagep, void **fsdata)
2402eb47b800SJaegeuk Kim {
2403eb47b800SJaegeuk Kim 	struct inode *inode = mapping->host;
24044081363fSJaegeuk Kim 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
240586531d6bSJaegeuk Kim 	struct page *page = NULL;
240609cbfeafSKirill A. Shutemov 	pgoff_t index = ((unsigned long long) pos) >> PAGE_SHIFT;
2407a2e2e76bSChao Yu 	bool need_balance = false, drop_atomic = false;
24082aadac08SJaegeuk Kim 	block_t blkaddr = NULL_ADDR;
2409eb47b800SJaegeuk Kim 	int err = 0;
2410eb47b800SJaegeuk Kim 
241162aed044SChao Yu 	trace_f2fs_write_begin(inode, pos, len, flags);
241262aed044SChao Yu 
24134354994fSDaniel Rosenberg 	err = f2fs_is_checkpoint_ready(sbi);
24144354994fSDaniel Rosenberg 	if (err)
24154354994fSDaniel Rosenberg 		goto fail;
24164354994fSDaniel Rosenberg 
2417455e3a58SJaegeuk Kim 	if ((f2fs_is_atomic_file(inode) &&
2418455e3a58SJaegeuk Kim 			!f2fs_available_free_memory(sbi, INMEM_PAGES)) ||
2419455e3a58SJaegeuk Kim 			is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST)) {
242057864ae5SJaegeuk Kim 		err = -ENOMEM;
2421a2e2e76bSChao Yu 		drop_atomic = true;
242257864ae5SJaegeuk Kim 		goto fail;
242357864ae5SJaegeuk Kim 	}
242457864ae5SJaegeuk Kim 
24255f727395SJaegeuk Kim 	/*
24265f727395SJaegeuk Kim 	 * We should check this at this moment to avoid deadlock on inode page
24275f727395SJaegeuk Kim 	 * and #0 page. The locking rule for inline_data conversion should be:
24285f727395SJaegeuk Kim 	 * lock_page(page #0) -> lock_page(inode_page)
24295f727395SJaegeuk Kim 	 */
24305f727395SJaegeuk Kim 	if (index != 0) {
24315f727395SJaegeuk Kim 		err = f2fs_convert_inline_inode(inode);
24325f727395SJaegeuk Kim 		if (err)
24335f727395SJaegeuk Kim 			goto fail;
24345f727395SJaegeuk Kim 	}
2435afcb7ca0SJaegeuk Kim repeat:
243686d54795SJaegeuk Kim 	/*
243786d54795SJaegeuk Kim 	 * Do not use grab_cache_page_write_begin() to avoid deadlock due to
243886d54795SJaegeuk Kim 	 * wait_for_stable_page. Will wait that below with our IO control.
243986d54795SJaegeuk Kim 	 */
244001eccef7SChao Yu 	page = f2fs_pagecache_get_page(mapping, index,
244186d54795SJaegeuk Kim 				FGP_LOCK | FGP_WRITE | FGP_CREAT, GFP_NOFS);
24423aab8f82SChao Yu 	if (!page) {
24433aab8f82SChao Yu 		err = -ENOMEM;
24443aab8f82SChao Yu 		goto fail;
24453aab8f82SChao Yu 	}
2446d5f66990SJaegeuk Kim 
2447eb47b800SJaegeuk Kim 	*pagep = page;
2448eb47b800SJaegeuk Kim 
24492aadac08SJaegeuk Kim 	err = prepare_write_begin(sbi, page, pos, len,
24502aadac08SJaegeuk Kim 					&blkaddr, &need_balance);
2451b3d208f9SJaegeuk Kim 	if (err)
24522aadac08SJaegeuk Kim 		goto fail;
2453759af1c9SFan Li 
2454af033b2aSChao Yu 	if (need_balance && !IS_NOQUOTA(inode) &&
2455af033b2aSChao Yu 			has_not_enough_free_secs(sbi, 0, 0)) {
24562a340760SJaegeuk Kim 		unlock_page(page);
24572c4db1a6SJaegeuk Kim 		f2fs_balance_fs(sbi, true);
24582a340760SJaegeuk Kim 		lock_page(page);
24592a340760SJaegeuk Kim 		if (page->mapping != mapping) {
24602a340760SJaegeuk Kim 			/* The page got truncated from under us */
24612a340760SJaegeuk Kim 			f2fs_put_page(page, 1);
24622a340760SJaegeuk Kim 			goto repeat;
24632a340760SJaegeuk Kim 		}
24642a340760SJaegeuk Kim 	}
24652a340760SJaegeuk Kim 
2466fec1d657SJaegeuk Kim 	f2fs_wait_on_page_writeback(page, DATA, false);
2467b3d208f9SJaegeuk Kim 
2468649d7df2SJaegeuk Kim 	if (len == PAGE_SIZE || PageUptodate(page))
2469649d7df2SJaegeuk Kim 		return 0;
2470eb47b800SJaegeuk Kim 
2471746e2403SYunlei He 	if (!(pos & (PAGE_SIZE - 1)) && (pos + len) >= i_size_read(inode)) {
2472746e2403SYunlei He 		zero_user_segment(page, len, PAGE_SIZE);
2473746e2403SYunlei He 		return 0;
2474746e2403SYunlei He 	}
2475746e2403SYunlei He 
24762aadac08SJaegeuk Kim 	if (blkaddr == NEW_ADDR) {
247709cbfeafSKirill A. Shutemov 		zero_user_segment(page, 0, PAGE_SIZE);
2478649d7df2SJaegeuk Kim 		SetPageUptodate(page);
2479d54c795bSChao Yu 	} else {
248013ba41e3SJaegeuk Kim 		err = f2fs_submit_page_read(inode, page, blkaddr);
248113ba41e3SJaegeuk Kim 		if (err)
24823aab8f82SChao Yu 			goto fail;
2483d54c795bSChao Yu 
2484393ff91fSJaegeuk Kim 		lock_page(page);
24856bacf52fSJaegeuk Kim 		if (unlikely(page->mapping != mapping)) {
2486afcb7ca0SJaegeuk Kim 			f2fs_put_page(page, 1);
2487afcb7ca0SJaegeuk Kim 			goto repeat;
2488eb47b800SJaegeuk Kim 		}
24891563ac75SChao Yu 		if (unlikely(!PageUptodate(page))) {
24901563ac75SChao Yu 			err = -EIO;
24914375a336SJaegeuk Kim 			goto fail;
24924375a336SJaegeuk Kim 		}
24934375a336SJaegeuk Kim 	}
2494eb47b800SJaegeuk Kim 	return 0;
24959ba69cf9SJaegeuk Kim 
24963aab8f82SChao Yu fail:
249786531d6bSJaegeuk Kim 	f2fs_put_page(page, 1);
24983aab8f82SChao Yu 	f2fs_write_failed(mapping, pos + len);
2499a2e2e76bSChao Yu 	if (drop_atomic)
25004d57b86dSChao Yu 		f2fs_drop_inmem_pages_all(sbi, false);
25013aab8f82SChao Yu 	return err;
2502eb47b800SJaegeuk Kim }
2503eb47b800SJaegeuk Kim 
2504a1dd3c13SJaegeuk Kim static int f2fs_write_end(struct file *file,
2505a1dd3c13SJaegeuk Kim 			struct address_space *mapping,
2506a1dd3c13SJaegeuk Kim 			loff_t pos, unsigned len, unsigned copied,
2507a1dd3c13SJaegeuk Kim 			struct page *page, void *fsdata)
2508a1dd3c13SJaegeuk Kim {
2509a1dd3c13SJaegeuk Kim 	struct inode *inode = page->mapping->host;
2510a1dd3c13SJaegeuk Kim 
2511dfb2bf38SChao Yu 	trace_f2fs_write_end(inode, pos, len, copied);
2512dfb2bf38SChao Yu 
2513649d7df2SJaegeuk Kim 	/*
2514649d7df2SJaegeuk Kim 	 * This should be come from len == PAGE_SIZE, and we expect copied
2515649d7df2SJaegeuk Kim 	 * should be PAGE_SIZE. Otherwise, we treat it with zero copied and
2516649d7df2SJaegeuk Kim 	 * let generic_perform_write() try to copy data again through copied=0.
2517649d7df2SJaegeuk Kim 	 */
2518649d7df2SJaegeuk Kim 	if (!PageUptodate(page)) {
2519746e2403SYunlei He 		if (unlikely(copied != len))
2520649d7df2SJaegeuk Kim 			copied = 0;
2521649d7df2SJaegeuk Kim 		else
2522649d7df2SJaegeuk Kim 			SetPageUptodate(page);
2523649d7df2SJaegeuk Kim 	}
2524649d7df2SJaegeuk Kim 	if (!copied)
2525649d7df2SJaegeuk Kim 		goto unlock_out;
2526649d7df2SJaegeuk Kim 
2527a1dd3c13SJaegeuk Kim 	set_page_dirty(page);
2528a1dd3c13SJaegeuk Kim 
2529fc9581c8SJaegeuk Kim 	if (pos + copied > i_size_read(inode))
2530fc9581c8SJaegeuk Kim 		f2fs_i_size_write(inode, pos + copied);
2531649d7df2SJaegeuk Kim unlock_out:
25323024c9a1SChao Yu 	f2fs_put_page(page, 1);
2533d0239e1bSJaegeuk Kim 	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2534a1dd3c13SJaegeuk Kim 	return copied;
2535a1dd3c13SJaegeuk Kim }
2536a1dd3c13SJaegeuk Kim 
25376f673763SOmar Sandoval static int check_direct_IO(struct inode *inode, struct iov_iter *iter,
25386f673763SOmar Sandoval 			   loff_t offset)
2539944fcfc1SJaegeuk Kim {
25408a56dd96SJaegeuk Kim 	unsigned i_blkbits = READ_ONCE(inode->i_blkbits);
25418a56dd96SJaegeuk Kim 	unsigned blkbits = i_blkbits;
25428a56dd96SJaegeuk Kim 	unsigned blocksize_mask = (1 << blkbits) - 1;
25438a56dd96SJaegeuk Kim 	unsigned long align = offset | iov_iter_alignment(iter);
25448a56dd96SJaegeuk Kim 	struct block_device *bdev = inode->i_sb->s_bdev;
2545944fcfc1SJaegeuk Kim 
25468a56dd96SJaegeuk Kim 	if (align & blocksize_mask) {
25478a56dd96SJaegeuk Kim 		if (bdev)
25488a56dd96SJaegeuk Kim 			blkbits = blksize_bits(bdev_logical_block_size(bdev));
25498a56dd96SJaegeuk Kim 		blocksize_mask = (1 << blkbits) - 1;
25508a56dd96SJaegeuk Kim 		if (align & blocksize_mask)
2551944fcfc1SJaegeuk Kim 			return -EINVAL;
25528a56dd96SJaegeuk Kim 		return 1;
25538a56dd96SJaegeuk Kim 	}
2554944fcfc1SJaegeuk Kim 	return 0;
2555944fcfc1SJaegeuk Kim }
2556944fcfc1SJaegeuk Kim 
255702b16d0aSChao Yu static void f2fs_dio_end_io(struct bio *bio)
255802b16d0aSChao Yu {
255902b16d0aSChao Yu 	struct f2fs_private_dio *dio = bio->bi_private;
256002b16d0aSChao Yu 
256102b16d0aSChao Yu 	dec_page_count(F2FS_I_SB(dio->inode),
256202b16d0aSChao Yu 			dio->write ? F2FS_DIO_WRITE : F2FS_DIO_READ);
256302b16d0aSChao Yu 
256402b16d0aSChao Yu 	bio->bi_private = dio->orig_private;
256502b16d0aSChao Yu 	bio->bi_end_io = dio->orig_end_io;
256602b16d0aSChao Yu 
256702b16d0aSChao Yu 	kfree(dio);
256802b16d0aSChao Yu 
256902b16d0aSChao Yu 	bio_endio(bio);
257002b16d0aSChao Yu }
257102b16d0aSChao Yu 
257202b16d0aSChao Yu static void f2fs_dio_submit_bio(struct bio *bio, struct inode *inode,
257302b16d0aSChao Yu 							loff_t file_offset)
257402b16d0aSChao Yu {
257502b16d0aSChao Yu 	struct f2fs_private_dio *dio;
257602b16d0aSChao Yu 	bool write = (bio_op(bio) == REQ_OP_WRITE);
257702b16d0aSChao Yu 	int err;
257802b16d0aSChao Yu 
257902b16d0aSChao Yu 	dio = f2fs_kzalloc(F2FS_I_SB(inode),
258002b16d0aSChao Yu 			sizeof(struct f2fs_private_dio), GFP_NOFS);
258102b16d0aSChao Yu 	if (!dio) {
258202b16d0aSChao Yu 		err = -ENOMEM;
258302b16d0aSChao Yu 		goto out;
258402b16d0aSChao Yu 	}
258502b16d0aSChao Yu 
258602b16d0aSChao Yu 	dio->inode = inode;
258702b16d0aSChao Yu 	dio->orig_end_io = bio->bi_end_io;
258802b16d0aSChao Yu 	dio->orig_private = bio->bi_private;
258902b16d0aSChao Yu 	dio->write = write;
259002b16d0aSChao Yu 
259102b16d0aSChao Yu 	bio->bi_end_io = f2fs_dio_end_io;
259202b16d0aSChao Yu 	bio->bi_private = dio;
259302b16d0aSChao Yu 
259402b16d0aSChao Yu 	inc_page_count(F2FS_I_SB(inode),
259502b16d0aSChao Yu 			write ? F2FS_DIO_WRITE : F2FS_DIO_READ);
259602b16d0aSChao Yu 
259702b16d0aSChao Yu 	submit_bio(bio);
259802b16d0aSChao Yu 	return;
259902b16d0aSChao Yu out:
260002b16d0aSChao Yu 	bio->bi_status = BLK_STS_IOERR;
260102b16d0aSChao Yu 	bio_endio(bio);
260202b16d0aSChao Yu }
260302b16d0aSChao Yu 
2604c8b8e32dSChristoph Hellwig static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
2605eb47b800SJaegeuk Kim {
2606b439b103SJaegeuk Kim 	struct address_space *mapping = iocb->ki_filp->f_mapping;
26073aab8f82SChao Yu 	struct inode *inode = mapping->host;
26080cdd3195SHyunchul Lee 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2609f847c699SChao Yu 	struct f2fs_inode_info *fi = F2FS_I(inode);
26103aab8f82SChao Yu 	size_t count = iov_iter_count(iter);
2611c8b8e32dSChristoph Hellwig 	loff_t offset = iocb->ki_pos;
261282e0a5aaSChao Yu 	int rw = iov_iter_rw(iter);
26133aab8f82SChao Yu 	int err;
26140cdd3195SHyunchul Lee 	enum rw_hint hint = iocb->ki_hint;
261563189b78SChao Yu 	int whint_mode = F2FS_OPTION(sbi).whint_mode;
2616f847c699SChao Yu 	bool do_opu;
2617944fcfc1SJaegeuk Kim 
2618b439b103SJaegeuk Kim 	err = check_direct_IO(inode, iter, offset);
2619b3d208f9SJaegeuk Kim 	if (err)
26208a56dd96SJaegeuk Kim 		return err < 0 ? err : 0;
26219ffe0fb5SHuajun Li 
2622f847c699SChao Yu 	if (f2fs_force_buffered_io(inode, iocb, iter))
262336abef4eSJaegeuk Kim 		return 0;
2624fcc85a4dSJaegeuk Kim 
2625f847c699SChao Yu 	do_opu = allow_outplace_dio(inode, iocb, iter);
2626f847c699SChao Yu 
26275302fb00SJaegeuk Kim 	trace_f2fs_direct_IO_enter(inode, offset, count, rw);
262870407fadSChao Yu 
26290cdd3195SHyunchul Lee 	if (rw == WRITE && whint_mode == WHINT_MODE_OFF)
26300cdd3195SHyunchul Lee 		iocb->ki_hint = WRITE_LIFE_NOT_SET;
26310cdd3195SHyunchul Lee 
2632b91050a8SHyunchul Lee 	if (iocb->ki_flags & IOCB_NOWAIT) {
2633f847c699SChao Yu 		if (!down_read_trylock(&fi->i_gc_rwsem[rw])) {
2634b91050a8SHyunchul Lee 			iocb->ki_hint = hint;
2635b91050a8SHyunchul Lee 			err = -EAGAIN;
2636b91050a8SHyunchul Lee 			goto out;
2637b91050a8SHyunchul Lee 		}
2638f847c699SChao Yu 		if (do_opu && !down_read_trylock(&fi->i_gc_rwsem[READ])) {
2639f847c699SChao Yu 			up_read(&fi->i_gc_rwsem[rw]);
2640f847c699SChao Yu 			iocb->ki_hint = hint;
2641f847c699SChao Yu 			err = -EAGAIN;
2642f847c699SChao Yu 			goto out;
2643f847c699SChao Yu 		}
2644f847c699SChao Yu 	} else {
2645f847c699SChao Yu 		down_read(&fi->i_gc_rwsem[rw]);
2646f847c699SChao Yu 		if (do_opu)
2647f847c699SChao Yu 			down_read(&fi->i_gc_rwsem[READ]);
2648b91050a8SHyunchul Lee 	}
2649b91050a8SHyunchul Lee 
265002b16d0aSChao Yu 	err = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev,
2651f9d6d059SChao Yu 			iter, rw == WRITE ? get_data_block_dio_write :
2652f9d6d059SChao Yu 			get_data_block_dio, NULL, f2fs_dio_submit_bio,
265302b16d0aSChao Yu 			DIO_LOCKING | DIO_SKIP_HOLES);
2654f847c699SChao Yu 
2655f847c699SChao Yu 	if (do_opu)
2656f847c699SChao Yu 		up_read(&fi->i_gc_rwsem[READ]);
2657f847c699SChao Yu 
2658f847c699SChao Yu 	up_read(&fi->i_gc_rwsem[rw]);
265982e0a5aaSChao Yu 
266082e0a5aaSChao Yu 	if (rw == WRITE) {
26610cdd3195SHyunchul Lee 		if (whint_mode == WHINT_MODE_OFF)
26620cdd3195SHyunchul Lee 			iocb->ki_hint = hint;
2663b0af6d49SChao Yu 		if (err > 0) {
2664b0af6d49SChao Yu 			f2fs_update_iostat(F2FS_I_SB(inode), APP_DIRECT_IO,
2665b0af6d49SChao Yu 									err);
2666f847c699SChao Yu 			if (!do_opu)
266791942321SJaegeuk Kim 				set_inode_flag(inode, FI_UPDATE_WRITE);
2668b0af6d49SChao Yu 		} else if (err < 0) {
26693aab8f82SChao Yu 			f2fs_write_failed(mapping, offset + count);
26706bfc4919SJaegeuk Kim 		}
2671b0af6d49SChao Yu 	}
267270407fadSChao Yu 
2673b91050a8SHyunchul Lee out:
26745302fb00SJaegeuk Kim 	trace_f2fs_direct_IO_exit(inode, offset, count, rw, err);
267570407fadSChao Yu 
26763aab8f82SChao Yu 	return err;
2677eb47b800SJaegeuk Kim }
2678eb47b800SJaegeuk Kim 
2679487261f3SChao Yu void f2fs_invalidate_page(struct page *page, unsigned int offset,
2680d47992f8SLukas Czerner 							unsigned int length)
2681eb47b800SJaegeuk Kim {
2682eb47b800SJaegeuk Kim 	struct inode *inode = page->mapping->host;
2683487261f3SChao Yu 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2684a7ffdbe2SJaegeuk Kim 
2685487261f3SChao Yu 	if (inode->i_ino >= F2FS_ROOT_INO(sbi) &&
268609cbfeafSKirill A. Shutemov 		(offset % PAGE_SIZE || length != PAGE_SIZE))
2687a7ffdbe2SJaegeuk Kim 		return;
2688a7ffdbe2SJaegeuk Kim 
2689487261f3SChao Yu 	if (PageDirty(page)) {
2690933439c8SChao Yu 		if (inode->i_ino == F2FS_META_INO(sbi)) {
2691487261f3SChao Yu 			dec_page_count(sbi, F2FS_DIRTY_META);
2692933439c8SChao Yu 		} else if (inode->i_ino == F2FS_NODE_INO(sbi)) {
2693487261f3SChao Yu 			dec_page_count(sbi, F2FS_DIRTY_NODES);
2694933439c8SChao Yu 		} else {
2695a7ffdbe2SJaegeuk Kim 			inode_dec_dirty_pages(inode);
26964d57b86dSChao Yu 			f2fs_remove_dirty_inode(inode);
2697933439c8SChao Yu 		}
2698487261f3SChao Yu 	}
2699decd36b6SChao Yu 
27002baf0781SChao Yu 	clear_cold_data(page);
27012baf0781SChao Yu 
2702decd36b6SChao Yu 	/* This is atomic written page, keep Private */
2703decd36b6SChao Yu 	if (IS_ATOMIC_WRITTEN_PAGE(page))
27044d57b86dSChao Yu 		return f2fs_drop_inmem_page(inode, page);
2705decd36b6SChao Yu 
270623dc974eSChao Yu 	set_page_private(page, 0);
2707eb47b800SJaegeuk Kim 	ClearPagePrivate(page);
2708eb47b800SJaegeuk Kim }
2709eb47b800SJaegeuk Kim 
2710487261f3SChao Yu int f2fs_release_page(struct page *page, gfp_t wait)
2711eb47b800SJaegeuk Kim {
2712f68daeebSJaegeuk Kim 	/* If this is dirty page, keep PagePrivate */
2713f68daeebSJaegeuk Kim 	if (PageDirty(page))
2714f68daeebSJaegeuk Kim 		return 0;
2715f68daeebSJaegeuk Kim 
2716decd36b6SChao Yu 	/* This is atomic written page, keep Private */
2717decd36b6SChao Yu 	if (IS_ATOMIC_WRITTEN_PAGE(page))
2718decd36b6SChao Yu 		return 0;
2719decd36b6SChao Yu 
27202baf0781SChao Yu 	clear_cold_data(page);
272123dc974eSChao Yu 	set_page_private(page, 0);
2722eb47b800SJaegeuk Kim 	ClearPagePrivate(page);
2723c3850aa1SJaegeuk Kim 	return 1;
2724eb47b800SJaegeuk Kim }
2725eb47b800SJaegeuk Kim 
2726eb47b800SJaegeuk Kim static int f2fs_set_data_page_dirty(struct page *page)
2727eb47b800SJaegeuk Kim {
2728eb47b800SJaegeuk Kim 	struct address_space *mapping = page->mapping;
2729eb47b800SJaegeuk Kim 	struct inode *inode = mapping->host;
2730eb47b800SJaegeuk Kim 
273126c6b887SJaegeuk Kim 	trace_f2fs_set_page_dirty(page, DATA);
273226c6b887SJaegeuk Kim 
2733237c0790SJaegeuk Kim 	if (!PageUptodate(page))
2734eb47b800SJaegeuk Kim 		SetPageUptodate(page);
273534ba94baSJaegeuk Kim 
27365fe45743SChao Yu 	if (f2fs_is_atomic_file(inode) && !f2fs_is_commit_atomic_write(inode)) {
2737decd36b6SChao Yu 		if (!IS_ATOMIC_WRITTEN_PAGE(page)) {
27384d57b86dSChao Yu 			f2fs_register_inmem_page(inode, page);
273934ba94baSJaegeuk Kim 			return 1;
274034ba94baSJaegeuk Kim 		}
2741decd36b6SChao Yu 		/*
2742decd36b6SChao Yu 		 * Previously, this page has been registered, we just
2743decd36b6SChao Yu 		 * return here.
2744decd36b6SChao Yu 		 */
2745decd36b6SChao Yu 		return 0;
2746decd36b6SChao Yu 	}
274734ba94baSJaegeuk Kim 
2748eb47b800SJaegeuk Kim 	if (!PageDirty(page)) {
2749b87078adSJaegeuk Kim 		__set_page_dirty_nobuffers(page);
27504d57b86dSChao Yu 		f2fs_update_dirty_page(inode, page);
2751eb47b800SJaegeuk Kim 		return 1;
2752eb47b800SJaegeuk Kim 	}
2753eb47b800SJaegeuk Kim 	return 0;
2754eb47b800SJaegeuk Kim }
2755eb47b800SJaegeuk Kim 
2756c01e54b7SJaegeuk Kim static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
2757c01e54b7SJaegeuk Kim {
2758454ae7e5SChao Yu 	struct inode *inode = mapping->host;
2759454ae7e5SChao Yu 
27601d373a0eSJaegeuk Kim 	if (f2fs_has_inline_data(inode))
27611d373a0eSJaegeuk Kim 		return 0;
27621d373a0eSJaegeuk Kim 
27631d373a0eSJaegeuk Kim 	/* make sure allocating whole blocks */
27641d373a0eSJaegeuk Kim 	if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
27651d373a0eSJaegeuk Kim 		filemap_write_and_wait(mapping);
27661d373a0eSJaegeuk Kim 
2767e2b4e2bcSChao Yu 	return generic_block_bmap(mapping, block, get_data_block_bmap);
2768429511cdSChao Yu }
2769429511cdSChao Yu 
27705b7a487cSWeichao Guo #ifdef CONFIG_MIGRATION
27715b7a487cSWeichao Guo #include <linux/migrate.h>
27725b7a487cSWeichao Guo 
27735b7a487cSWeichao Guo int f2fs_migrate_page(struct address_space *mapping,
27745b7a487cSWeichao Guo 		struct page *newpage, struct page *page, enum migrate_mode mode)
27755b7a487cSWeichao Guo {
27765b7a487cSWeichao Guo 	int rc, extra_count;
27775b7a487cSWeichao Guo 	struct f2fs_inode_info *fi = F2FS_I(mapping->host);
27785b7a487cSWeichao Guo 	bool atomic_written = IS_ATOMIC_WRITTEN_PAGE(page);
27795b7a487cSWeichao Guo 
27805b7a487cSWeichao Guo 	BUG_ON(PageWriteback(page));
27815b7a487cSWeichao Guo 
27825b7a487cSWeichao Guo 	/* migrating an atomic written page is safe with the inmem_lock hold */
2783ff1048e7SJaegeuk Kim 	if (atomic_written) {
2784ff1048e7SJaegeuk Kim 		if (mode != MIGRATE_SYNC)
2785ff1048e7SJaegeuk Kim 			return -EBUSY;
2786ff1048e7SJaegeuk Kim 		if (!mutex_trylock(&fi->inmem_lock))
27875b7a487cSWeichao Guo 			return -EAGAIN;
2788ff1048e7SJaegeuk Kim 	}
27895b7a487cSWeichao Guo 
27905b7a487cSWeichao Guo 	/*
27915b7a487cSWeichao Guo 	 * A reference is expected if PagePrivate set when move mapping,
27925b7a487cSWeichao Guo 	 * however F2FS breaks this for maintaining dirty page counts when
27935b7a487cSWeichao Guo 	 * truncating pages. So here adjusting the 'extra_count' make it work.
27945b7a487cSWeichao Guo 	 */
27955b7a487cSWeichao Guo 	extra_count = (atomic_written ? 1 : 0) - page_has_private(page);
27965b7a487cSWeichao Guo 	rc = migrate_page_move_mapping(mapping, newpage,
27975b7a487cSWeichao Guo 				page, NULL, mode, extra_count);
27985b7a487cSWeichao Guo 	if (rc != MIGRATEPAGE_SUCCESS) {
27995b7a487cSWeichao Guo 		if (atomic_written)
28005b7a487cSWeichao Guo 			mutex_unlock(&fi->inmem_lock);
28015b7a487cSWeichao Guo 		return rc;
28025b7a487cSWeichao Guo 	}
28035b7a487cSWeichao Guo 
28045b7a487cSWeichao Guo 	if (atomic_written) {
28055b7a487cSWeichao Guo 		struct inmem_pages *cur;
28065b7a487cSWeichao Guo 		list_for_each_entry(cur, &fi->inmem_pages, list)
28075b7a487cSWeichao Guo 			if (cur->page == page) {
28085b7a487cSWeichao Guo 				cur->page = newpage;
28095b7a487cSWeichao Guo 				break;
28105b7a487cSWeichao Guo 			}
28115b7a487cSWeichao Guo 		mutex_unlock(&fi->inmem_lock);
28125b7a487cSWeichao Guo 		put_page(page);
28135b7a487cSWeichao Guo 		get_page(newpage);
28145b7a487cSWeichao Guo 	}
28155b7a487cSWeichao Guo 
28165b7a487cSWeichao Guo 	if (PagePrivate(page))
28175b7a487cSWeichao Guo 		SetPagePrivate(newpage);
28185b7a487cSWeichao Guo 	set_page_private(newpage, page_private(page));
28195b7a487cSWeichao Guo 
28202916ecc0SJérôme Glisse 	if (mode != MIGRATE_SYNC_NO_COPY)
28215b7a487cSWeichao Guo 		migrate_page_copy(newpage, page);
28222916ecc0SJérôme Glisse 	else
28232916ecc0SJérôme Glisse 		migrate_page_states(newpage, page);
28245b7a487cSWeichao Guo 
28255b7a487cSWeichao Guo 	return MIGRATEPAGE_SUCCESS;
28265b7a487cSWeichao Guo }
28275b7a487cSWeichao Guo #endif
28285b7a487cSWeichao Guo 
2829eb47b800SJaegeuk Kim const struct address_space_operations f2fs_dblock_aops = {
2830eb47b800SJaegeuk Kim 	.readpage	= f2fs_read_data_page,
2831eb47b800SJaegeuk Kim 	.readpages	= f2fs_read_data_pages,
2832eb47b800SJaegeuk Kim 	.writepage	= f2fs_write_data_page,
2833eb47b800SJaegeuk Kim 	.writepages	= f2fs_write_data_pages,
2834eb47b800SJaegeuk Kim 	.write_begin	= f2fs_write_begin,
2835a1dd3c13SJaegeuk Kim 	.write_end	= f2fs_write_end,
2836eb47b800SJaegeuk Kim 	.set_page_dirty	= f2fs_set_data_page_dirty,
2837487261f3SChao Yu 	.invalidatepage	= f2fs_invalidate_page,
2838487261f3SChao Yu 	.releasepage	= f2fs_release_page,
2839eb47b800SJaegeuk Kim 	.direct_IO	= f2fs_direct_IO,
2840c01e54b7SJaegeuk Kim 	.bmap		= f2fs_bmap,
28415b7a487cSWeichao Guo #ifdef CONFIG_MIGRATION
28425b7a487cSWeichao Guo 	.migratepage    = f2fs_migrate_page,
28435b7a487cSWeichao Guo #endif
2844eb47b800SJaegeuk Kim };
28456dbb1796SEric Biggers 
28465ec2d99dSMatthew Wilcox void f2fs_clear_page_cache_dirty_tag(struct page *page)
2847aec2f729SChao Yu {
2848aec2f729SChao Yu 	struct address_space *mapping = page_mapping(page);
2849aec2f729SChao Yu 	unsigned long flags;
2850aec2f729SChao Yu 
2851aec2f729SChao Yu 	xa_lock_irqsave(&mapping->i_pages, flags);
28525ec2d99dSMatthew Wilcox 	__xa_clear_mark(&mapping->i_pages, page_index(page),
2853aec2f729SChao Yu 						PAGECACHE_TAG_DIRTY);
2854aec2f729SChao Yu 	xa_unlock_irqrestore(&mapping->i_pages, flags);
2855aec2f729SChao Yu }
2856aec2f729SChao Yu 
28576dbb1796SEric Biggers int __init f2fs_init_post_read_processing(void)
28586dbb1796SEric Biggers {
28596dbb1796SEric Biggers 	bio_post_read_ctx_cache = KMEM_CACHE(bio_post_read_ctx, 0);
28606dbb1796SEric Biggers 	if (!bio_post_read_ctx_cache)
28616dbb1796SEric Biggers 		goto fail;
28626dbb1796SEric Biggers 	bio_post_read_ctx_pool =
28636dbb1796SEric Biggers 		mempool_create_slab_pool(NUM_PREALLOC_POST_READ_CTXS,
28646dbb1796SEric Biggers 					 bio_post_read_ctx_cache);
28656dbb1796SEric Biggers 	if (!bio_post_read_ctx_pool)
28666dbb1796SEric Biggers 		goto fail_free_cache;
28676dbb1796SEric Biggers 	return 0;
28686dbb1796SEric Biggers 
28696dbb1796SEric Biggers fail_free_cache:
28706dbb1796SEric Biggers 	kmem_cache_destroy(bio_post_read_ctx_cache);
28716dbb1796SEric Biggers fail:
28726dbb1796SEric Biggers 	return -ENOMEM;
28736dbb1796SEric Biggers }
28746dbb1796SEric Biggers 
28756dbb1796SEric Biggers void __exit f2fs_destroy_post_read_processing(void)
28766dbb1796SEric Biggers {
28776dbb1796SEric Biggers 	mempool_destroy(bio_post_read_ctx_pool);
28786dbb1796SEric Biggers 	kmem_cache_destroy(bio_post_read_ctx_cache);
28796dbb1796SEric Biggers }
2880