17c1a000dSChao Yu // SPDX-License-Identifier: GPL-2.0
20a8165d7SJaegeuk Kim /*
3eb47b800SJaegeuk Kim * fs/f2fs/data.c
4eb47b800SJaegeuk Kim *
5eb47b800SJaegeuk Kim * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6eb47b800SJaegeuk Kim * http://www.samsung.com/
7eb47b800SJaegeuk Kim */
8eb47b800SJaegeuk Kim #include <linux/fs.h>
9eb47b800SJaegeuk Kim #include <linux/f2fs_fs.h>
10eb47b800SJaegeuk Kim #include <linux/buffer_head.h>
114034247aSNeilBrown #include <linux/sched/mm.h>
12eb47b800SJaegeuk Kim #include <linux/mpage.h>
13eb47b800SJaegeuk Kim #include <linux/writeback.h>
148f46dcaeSChao Yu #include <linux/pagevec.h>
15eb47b800SJaegeuk Kim #include <linux/blkdev.h>
16eb47b800SJaegeuk Kim #include <linux/bio.h>
1727aacd28SSatya Tangirala #include <linux/blk-crypto.h>
184969c06aSJaegeuk Kim #include <linux/swap.h>
19690e4a3eSGeert Uytterhoeven #include <linux/prefetch.h>
20e2e40f2cSChristoph Hellwig #include <linux/uio.h>
21174cd4b1SIngo Molnar #include <linux/sched/signal.h>
2210c5db28SChristoph Hellwig #include <linux/fiemap.h>
231517c1a7SEric Biggers #include <linux/iomap.h>
24eb47b800SJaegeuk Kim
25eb47b800SJaegeuk Kim #include "f2fs.h"
26eb47b800SJaegeuk Kim #include "node.h"
27eb47b800SJaegeuk Kim #include "segment.h"
2852118743SDaeho Jeong #include "iostat.h"
29848753aaSNamjae Jeon #include <trace/events/f2fs.h>
30eb47b800SJaegeuk Kim
316dbb1796SEric Biggers #define NUM_PREALLOC_POST_READ_CTXS 128
326dbb1796SEric Biggers
336dbb1796SEric Biggers static struct kmem_cache *bio_post_read_ctx_cache;
340b20fcecSChao Yu static struct kmem_cache *bio_entry_slab;
356dbb1796SEric Biggers static mempool_t *bio_post_read_ctx_pool;
36f543805fSChao Yu static struct bio_set f2fs_bioset;
37f543805fSChao Yu
38f543805fSChao Yu #define F2FS_BIO_POOL_SIZE NR_CURSEG_TYPE
39f543805fSChao Yu
f2fs_init_bioset(void)40f543805fSChao Yu int __init f2fs_init_bioset(void)
41f543805fSChao Yu {
42870af777SYangtao Li return bioset_init(&f2fs_bioset, F2FS_BIO_POOL_SIZE,
43870af777SYangtao Li 0, BIOSET_NEED_BVECS);
44f543805fSChao Yu }
45f543805fSChao Yu
f2fs_destroy_bioset(void)46f543805fSChao Yu void f2fs_destroy_bioset(void)
47f543805fSChao Yu {
48f543805fSChao Yu bioset_exit(&f2fs_bioset);
49f543805fSChao Yu }
50f543805fSChao Yu
f2fs_is_cp_guaranteed(struct page * page)5182704e59SChao Yu bool f2fs_is_cp_guaranteed(struct page *page)
5236951b38SChao Yu {
5336951b38SChao Yu struct address_space *mapping = page->mapping;
5436951b38SChao Yu struct inode *inode;
5536951b38SChao Yu struct f2fs_sb_info *sbi;
5636951b38SChao Yu
5736951b38SChao Yu if (!mapping)
5836951b38SChao Yu return false;
5936951b38SChao Yu
6036951b38SChao Yu inode = mapping->host;
6136951b38SChao Yu sbi = F2FS_I_SB(inode);
6236951b38SChao Yu
6336951b38SChao Yu if (inode->i_ino == F2FS_META_INO(sbi) ||
6436951b38SChao Yu inode->i_ino == F2FS_NODE_INO(sbi) ||
65b763f3beSChao Yu S_ISDIR(inode->i_mode))
66b763f3beSChao Yu return true;
67b763f3beSChao Yu
683db1de0eSDaeho Jeong if ((S_ISREG(inode->i_mode) && IS_NOQUOTA(inode)) ||
69b763f3beSChao Yu page_private_gcing(page))
7036951b38SChao Yu return true;
7136951b38SChao Yu return false;
7236951b38SChao Yu }
7336951b38SChao Yu
__read_io_type(struct page * page)745f9abab4SJaegeuk Kim static enum count_type __read_io_type(struct page *page)
755f9abab4SJaegeuk Kim {
764969c06aSJaegeuk Kim struct address_space *mapping = page_file_mapping(page);
775f9abab4SJaegeuk Kim
785f9abab4SJaegeuk Kim if (mapping) {
795f9abab4SJaegeuk Kim struct inode *inode = mapping->host;
805f9abab4SJaegeuk Kim struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
815f9abab4SJaegeuk Kim
825f9abab4SJaegeuk Kim if (inode->i_ino == F2FS_META_INO(sbi))
835f9abab4SJaegeuk Kim return F2FS_RD_META;
845f9abab4SJaegeuk Kim
855f9abab4SJaegeuk Kim if (inode->i_ino == F2FS_NODE_INO(sbi))
865f9abab4SJaegeuk Kim return F2FS_RD_NODE;
875f9abab4SJaegeuk Kim }
885f9abab4SJaegeuk Kim return F2FS_RD_DATA;
895f9abab4SJaegeuk Kim }
905f9abab4SJaegeuk Kim
916dbb1796SEric Biggers /* postprocessing steps for read bios */
926dbb1796SEric Biggers enum bio_post_read_step {
937f59b277SEric Biggers #ifdef CONFIG_FS_ENCRYPTION
94447286ebSYangtao Li STEP_DECRYPT = BIT(0),
957f59b277SEric Biggers #else
967f59b277SEric Biggers STEP_DECRYPT = 0, /* compile out the decryption-related code */
977f59b277SEric Biggers #endif
987f59b277SEric Biggers #ifdef CONFIG_F2FS_FS_COMPRESSION
99447286ebSYangtao Li STEP_DECOMPRESS = BIT(1),
1007f59b277SEric Biggers #else
1017f59b277SEric Biggers STEP_DECOMPRESS = 0, /* compile out the decompression-related code */
1027f59b277SEric Biggers #endif
1037f59b277SEric Biggers #ifdef CONFIG_FS_VERITY
104447286ebSYangtao Li STEP_VERITY = BIT(2),
1057f59b277SEric Biggers #else
1067f59b277SEric Biggers STEP_VERITY = 0, /* compile out the verity-related code */
1077f59b277SEric Biggers #endif
1086dbb1796SEric Biggers };
1096dbb1796SEric Biggers
1106dbb1796SEric Biggers struct bio_post_read_ctx {
1116dbb1796SEric Biggers struct bio *bio;
1124c8ff709SChao Yu struct f2fs_sb_info *sbi;
1136dbb1796SEric Biggers struct work_struct work;
1146dbb1796SEric Biggers unsigned int enabled_steps;
11598dc08baSEric Biggers /*
11698dc08baSEric Biggers * decompression_attempted keeps track of whether
11798dc08baSEric Biggers * f2fs_end_read_compressed_page() has been called on the pages in the
11898dc08baSEric Biggers * bio that belong to a compressed cluster yet.
11998dc08baSEric Biggers */
12098dc08baSEric Biggers bool decompression_attempted;
1214931e0c9SDaeho Jeong block_t fs_blkaddr;
1226dbb1796SEric Biggers };
1236dbb1796SEric Biggers
12498dc08baSEric Biggers /*
12598dc08baSEric Biggers * Update and unlock a bio's pages, and free the bio.
12698dc08baSEric Biggers *
12798dc08baSEric Biggers * This marks pages up-to-date only if there was no error in the bio (I/O error,
12898dc08baSEric Biggers * decryption error, or verity error), as indicated by bio->bi_status.
12998dc08baSEric Biggers *
13098dc08baSEric Biggers * "Compressed pages" (pagecache pages backed by a compressed cluster on-disk)
13198dc08baSEric Biggers * aren't marked up-to-date here, as decompression is done on a per-compression-
13298dc08baSEric Biggers * cluster basis rather than a per-bio basis. Instead, we only must do two
13398dc08baSEric Biggers * things for each compressed page here: call f2fs_end_read_compressed_page()
13498dc08baSEric Biggers * with failed=true if an error occurred before it would have normally gotten
13598dc08baSEric Biggers * called (i.e., I/O error or decryption error, but *not* verity error), and
13698dc08baSEric Biggers * release the bio's reference to the decompress_io_ctx of the page's cluster.
13798dc08baSEric Biggers */
f2fs_finish_read_bio(struct bio * bio,bool in_task)138bff139b4SDaeho Jeong static void f2fs_finish_read_bio(struct bio *bio, bool in_task)
13993dfe2acSJaegeuk Kim {
1406dbb1796SEric Biggers struct bio_vec *bv;
1416dc4f100SMing Lei struct bvec_iter_all iter_all;
14298dc08baSEric Biggers struct bio_post_read_ctx *ctx = bio->bi_private;
14393dfe2acSJaegeuk Kim
1442b070cfeSChristoph Hellwig bio_for_each_segment_all(bv, bio, iter_all) {
1457f59b277SEric Biggers struct page *page = bv->bv_page;
1466dbb1796SEric Biggers
1477f59b277SEric Biggers if (f2fs_is_compressed_page(page)) {
14898dc08baSEric Biggers if (ctx && !ctx->decompression_attempted)
149bff139b4SDaeho Jeong f2fs_end_read_compressed_page(page, true, 0,
150bff139b4SDaeho Jeong in_task);
151bff139b4SDaeho Jeong f2fs_put_page_dic(page, in_task);
1524c8ff709SChao Yu continue;
1534c8ff709SChao Yu }
1544c8ff709SChao Yu
15598dc08baSEric Biggers if (bio->bi_status)
1566dbb1796SEric Biggers ClearPageUptodate(page);
15798dc08baSEric Biggers else
1586dbb1796SEric Biggers SetPageUptodate(page);
1595f9abab4SJaegeuk Kim dec_page_count(F2FS_P_SB(page), __read_io_type(page));
1606dbb1796SEric Biggers unlock_page(page);
1616dbb1796SEric Biggers }
1627f59b277SEric Biggers
16398dc08baSEric Biggers if (ctx)
16498dc08baSEric Biggers mempool_free(ctx, bio_post_read_ctx_pool);
1657f59b277SEric Biggers bio_put(bio);
1664c8ff709SChao Yu }
1674c8ff709SChao Yu
f2fs_verify_bio(struct work_struct * work)1687f59b277SEric Biggers static void f2fs_verify_bio(struct work_struct *work)
1694c8ff709SChao Yu {
1707f59b277SEric Biggers struct bio_post_read_ctx *ctx =
1717f59b277SEric Biggers container_of(work, struct bio_post_read_ctx, work);
1727f59b277SEric Biggers struct bio *bio = ctx->bio;
1737f59b277SEric Biggers bool may_have_compressed_pages = (ctx->enabled_steps & STEP_DECOMPRESS);
1744c8ff709SChao Yu
1757f59b277SEric Biggers /*
176704528d8SMatthew Wilcox (Oracle) * fsverity_verify_bio() may call readahead() again, and while verity
1777f59b277SEric Biggers * will be disabled for this, decryption and/or decompression may still
1787f59b277SEric Biggers * be needed, resulting in another bio_post_read_ctx being allocated.
1797f59b277SEric Biggers * So to prevent deadlocks we need to release the current ctx to the
1807f59b277SEric Biggers * mempool first. This assumes that verity is the last post-read step.
1817f59b277SEric Biggers */
1827f59b277SEric Biggers mempool_free(ctx, bio_post_read_ctx_pool);
1837f59b277SEric Biggers bio->bi_private = NULL;
1846dbb1796SEric Biggers
1857f59b277SEric Biggers /*
1867f59b277SEric Biggers * Verify the bio's pages with fs-verity. Exclude compressed pages,
1877f59b277SEric Biggers * as those were handled separately by f2fs_end_read_compressed_page().
1887f59b277SEric Biggers */
1897f59b277SEric Biggers if (may_have_compressed_pages) {
19079bbefb1SChao Yu struct bio_vec *bv;
19179bbefb1SChao Yu struct bvec_iter_all iter_all;
1924c8ff709SChao Yu
19379bbefb1SChao Yu bio_for_each_segment_all(bv, bio, iter_all) {
19479bbefb1SChao Yu struct page *page = bv->bv_page;
19579bbefb1SChao Yu
1967f59b277SEric Biggers if (!f2fs_is_compressed_page(page) &&
19798dc08baSEric Biggers !fsverity_verify_page(page)) {
19898dc08baSEric Biggers bio->bi_status = BLK_STS_IOERR;
19998dc08baSEric Biggers break;
20098dc08baSEric Biggers }
2017f59b277SEric Biggers }
2027f59b277SEric Biggers } else {
2037f59b277SEric Biggers fsverity_verify_bio(bio);
20479bbefb1SChao Yu }
20579bbefb1SChao Yu
206bff139b4SDaeho Jeong f2fs_finish_read_bio(bio, true);
20779bbefb1SChao Yu }
208644c8c92SEric Biggers
209644c8c92SEric Biggers /*
2107f59b277SEric Biggers * If the bio's data needs to be verified with fs-verity, then enqueue the
2117f59b277SEric Biggers * verity work for the bio. Otherwise finish the bio now.
2127f59b277SEric Biggers *
2137f59b277SEric Biggers * Note that to avoid deadlocks, the verity work can't be done on the
2147f59b277SEric Biggers * decryption/decompression workqueue. This is because verifying the data pages
2157f59b277SEric Biggers * can involve reading verity metadata pages from the file, and these verity
2167f59b277SEric Biggers * metadata pages may be encrypted and/or compressed.
217644c8c92SEric Biggers */
f2fs_verify_and_finish_bio(struct bio * bio,bool in_task)218bff139b4SDaeho Jeong static void f2fs_verify_and_finish_bio(struct bio *bio, bool in_task)
2197f59b277SEric Biggers {
2207f59b277SEric Biggers struct bio_post_read_ctx *ctx = bio->bi_private;
22195ae251fSEric Biggers
2227f59b277SEric Biggers if (ctx && (ctx->enabled_steps & STEP_VERITY)) {
2237f59b277SEric Biggers INIT_WORK(&ctx->work, f2fs_verify_bio);
2247f59b277SEric Biggers fsverity_enqueue_verify_work(&ctx->work);
2257f59b277SEric Biggers } else {
226bff139b4SDaeho Jeong f2fs_finish_read_bio(bio, in_task);
2274c8ff709SChao Yu }
2287f59b277SEric Biggers }
2294c8ff709SChao Yu
2307f59b277SEric Biggers /*
2317f59b277SEric Biggers * Handle STEP_DECOMPRESS by decompressing any compressed clusters whose last
2327f59b277SEric Biggers * remaining page was read by @ctx->bio.
2337f59b277SEric Biggers *
2347f59b277SEric Biggers * Note that a bio may span clusters (even a mix of compressed and uncompressed
2357f59b277SEric Biggers * clusters) or be for just part of a cluster. STEP_DECOMPRESS just indicates
2367f59b277SEric Biggers * that the bio includes at least one compressed page. The actual decompression
2377f59b277SEric Biggers * is done on a per-cluster basis, not a per-bio basis.
2387f59b277SEric Biggers */
f2fs_handle_step_decompress(struct bio_post_read_ctx * ctx,bool in_task)239bff139b4SDaeho Jeong static void f2fs_handle_step_decompress(struct bio_post_read_ctx *ctx,
240bff139b4SDaeho Jeong bool in_task)
2417f59b277SEric Biggers {
2427f59b277SEric Biggers struct bio_vec *bv;
2437f59b277SEric Biggers struct bvec_iter_all iter_all;
2447f59b277SEric Biggers bool all_compressed = true;
2454931e0c9SDaeho Jeong block_t blkaddr = ctx->fs_blkaddr;
2467f59b277SEric Biggers
2477f59b277SEric Biggers bio_for_each_segment_all(bv, ctx->bio, iter_all) {
2487f59b277SEric Biggers struct page *page = bv->bv_page;
2497f59b277SEric Biggers
2507f59b277SEric Biggers if (f2fs_is_compressed_page(page))
25114db0b3cSEric Biggers f2fs_end_read_compressed_page(page, false, blkaddr,
25214db0b3cSEric Biggers in_task);
2537f59b277SEric Biggers else
2547f59b277SEric Biggers all_compressed = false;
2556ce19affSChao Yu
2566ce19affSChao Yu blkaddr++;
2577f59b277SEric Biggers }
2587f59b277SEric Biggers
25998dc08baSEric Biggers ctx->decompression_attempted = true;
26098dc08baSEric Biggers
2617f59b277SEric Biggers /*
2627f59b277SEric Biggers * Optimization: if all the bio's pages are compressed, then scheduling
2637f59b277SEric Biggers * the per-bio verity work is unnecessary, as verity will be fully
2647f59b277SEric Biggers * handled at the compression cluster level.
2657f59b277SEric Biggers */
2667f59b277SEric Biggers if (all_compressed)
2677f59b277SEric Biggers ctx->enabled_steps &= ~STEP_VERITY;
2684c8ff709SChao Yu }
2694c8ff709SChao Yu
f2fs_post_read_work(struct work_struct * work)2704c8ff709SChao Yu static void f2fs_post_read_work(struct work_struct *work)
2714c8ff709SChao Yu {
2724c8ff709SChao Yu struct bio_post_read_ctx *ctx =
2734c8ff709SChao Yu container_of(work, struct bio_post_read_ctx, work);
27414db0b3cSEric Biggers struct bio *bio = ctx->bio;
2754c8ff709SChao Yu
27614db0b3cSEric Biggers if ((ctx->enabled_steps & STEP_DECRYPT) && !fscrypt_decrypt_bio(bio)) {
27714db0b3cSEric Biggers f2fs_finish_read_bio(bio, true);
27814db0b3cSEric Biggers return;
27914db0b3cSEric Biggers }
2804c8ff709SChao Yu
2817f59b277SEric Biggers if (ctx->enabled_steps & STEP_DECOMPRESS)
282bff139b4SDaeho Jeong f2fs_handle_step_decompress(ctx, true);
2834c8ff709SChao Yu
28414db0b3cSEric Biggers f2fs_verify_and_finish_bio(bio, true);
2856dbb1796SEric Biggers }
2866dbb1796SEric Biggers
f2fs_read_end_io(struct bio * bio)2876dbb1796SEric Biggers static void f2fs_read_end_io(struct bio *bio)
2886dbb1796SEric Biggers {
289c45d6002SChao Yu struct f2fs_sb_info *sbi = F2FS_P_SB(bio_first_page_all(bio));
290a4b68176SDaeho Jeong struct bio_post_read_ctx *ctx;
291bff139b4SDaeho Jeong bool intask = in_task();
292a4b68176SDaeho Jeong
293d9bac032SYangtao Li iostat_update_and_unbind_ctx(bio);
294a4b68176SDaeho Jeong ctx = bio->bi_private;
295c45d6002SChao Yu
296c40e15a9SYangtao Li if (time_to_inject(sbi, FAULT_READ_IO))
2974e4cbee9SChristoph Hellwig bio->bi_status = BLK_STS_IOERR;
2988b038c70SChao Yu
2997f59b277SEric Biggers if (bio->bi_status) {
300bff139b4SDaeho Jeong f2fs_finish_read_bio(bio, intask);
30112377024SChao Yu return;
30212377024SChao Yu }
30312377024SChao Yu
304bff139b4SDaeho Jeong if (ctx) {
305bff139b4SDaeho Jeong unsigned int enabled_steps = ctx->enabled_steps &
306bff139b4SDaeho Jeong (STEP_DECRYPT | STEP_DECOMPRESS);
307bff139b4SDaeho Jeong
308bff139b4SDaeho Jeong /*
309bff139b4SDaeho Jeong * If we have only decompression step between decompression and
310bff139b4SDaeho Jeong * decrypt, we don't need post processing for this.
311bff139b4SDaeho Jeong */
312bff139b4SDaeho Jeong if (enabled_steps == STEP_DECOMPRESS &&
313bff139b4SDaeho Jeong !f2fs_low_mem_mode(sbi)) {
314bff139b4SDaeho Jeong f2fs_handle_step_decompress(ctx, intask);
315bff139b4SDaeho Jeong } else if (enabled_steps) {
3167f59b277SEric Biggers INIT_WORK(&ctx->work, f2fs_post_read_work);
3177f59b277SEric Biggers queue_work(ctx->sbi->post_read_wq, &ctx->work);
318bff139b4SDaeho Jeong return;
3197f59b277SEric Biggers }
32093dfe2acSJaegeuk Kim }
32193dfe2acSJaegeuk Kim
322bff139b4SDaeho Jeong f2fs_verify_and_finish_bio(bio, intask);
323bff139b4SDaeho Jeong }
324bff139b4SDaeho Jeong
f2fs_write_end_io(struct bio * bio)3254246a0b6SChristoph Hellwig static void f2fs_write_end_io(struct bio *bio)
32693dfe2acSJaegeuk Kim {
327a4b68176SDaeho Jeong struct f2fs_sb_info *sbi;
328f568849eSLinus Torvalds struct bio_vec *bvec;
3296dc4f100SMing Lei struct bvec_iter_all iter_all;
33093dfe2acSJaegeuk Kim
331d9bac032SYangtao Li iostat_update_and_unbind_ctx(bio);
332a4b68176SDaeho Jeong sbi = bio->bi_private;
333a4b68176SDaeho Jeong
334c40e15a9SYangtao Li if (time_to_inject(sbi, FAULT_WRITE_IO))
3356f5c2ed0SChao Yu bio->bi_status = BLK_STS_IOERR;
3366f5c2ed0SChao Yu
3372b070cfeSChristoph Hellwig bio_for_each_segment_all(bvec, bio, iter_all) {
33893dfe2acSJaegeuk Kim struct page *page = bvec->bv_page;
33982704e59SChao Yu enum count_type type = WB_DATA_TYPE(page, false);
34093dfe2acSJaegeuk Kim
341d2d0727bSEric Biggers fscrypt_finalize_bounce_page(&page);
3424375a336SJaegeuk Kim
3434c8ff709SChao Yu #ifdef CONFIG_F2FS_FS_COMPRESSION
3444c8ff709SChao Yu if (f2fs_is_compressed_page(page)) {
3454c8ff709SChao Yu f2fs_compress_write_end_io(bio, page);
3464c8ff709SChao Yu continue;
3474c8ff709SChao Yu }
3484c8ff709SChao Yu #endif
3494c8ff709SChao Yu
3504e4cbee9SChristoph Hellwig if (unlikely(bio->bi_status)) {
3515114a97aSMichal Hocko mapping_set_error(page->mapping, -EIO);
352b1ca321dSJaegeuk Kim if (type == F2FS_WB_CP_DATA)
353a9cfee0eSChao Yu f2fs_stop_checkpoint(sbi, true,
354a9cfee0eSChao Yu STOP_CP_REASON_WRITE_FAIL);
35593dfe2acSJaegeuk Kim }
3567dff55d2SYunlei He
3577dff55d2SYunlei He f2fs_bug_on(sbi, page->mapping == NODE_MAPPING(sbi) &&
3587dff55d2SYunlei He page->index != nid_of_node(page));
3597dff55d2SYunlei He
36036951b38SChao Yu dec_page_count(sbi, type);
36150fa53ecSChao Yu if (f2fs_in_warm_node_list(sbi, page))
36250fa53ecSChao Yu f2fs_del_fsync_node_entry(sbi, page);
363b763f3beSChao Yu clear_page_private_gcing(page);
36493dfe2acSJaegeuk Kim end_page_writeback(page);
365f568849eSLinus Torvalds }
36636951b38SChao Yu if (!get_pages(sbi, F2FS_WB_CP_DATA) &&
367f5730184SJaegeuk Kim wq_has_sleeper(&sbi->cp_wait))
36893dfe2acSJaegeuk Kim wake_up(&sbi->cp_wait);
36993dfe2acSJaegeuk Kim
37093dfe2acSJaegeuk Kim bio_put(bio);
37193dfe2acSJaegeuk Kim }
37293dfe2acSJaegeuk Kim
373e067dc3cSDaeho Jeong #ifdef CONFIG_BLK_DEV_ZONED
f2fs_zone_write_end_io(struct bio * bio)374e067dc3cSDaeho Jeong static void f2fs_zone_write_end_io(struct bio *bio)
375e067dc3cSDaeho Jeong {
376e067dc3cSDaeho Jeong struct f2fs_bio_info *io = (struct f2fs_bio_info *)bio->bi_private;
377e067dc3cSDaeho Jeong
378e067dc3cSDaeho Jeong bio->bi_private = io->bi_private;
379e067dc3cSDaeho Jeong complete(&io->zone_wait);
380e067dc3cSDaeho Jeong f2fs_write_end_io(bio);
381e067dc3cSDaeho Jeong }
382e067dc3cSDaeho Jeong #endif
383e067dc3cSDaeho Jeong
f2fs_target_device(struct f2fs_sb_info * sbi,block_t blk_addr,sector_t * sector)3843c62be17SJaegeuk Kim struct block_device *f2fs_target_device(struct f2fs_sb_info *sbi,
3855189810aSChristoph Hellwig block_t blk_addr, sector_t *sector)
3863c62be17SJaegeuk Kim {
3873c62be17SJaegeuk Kim struct block_device *bdev = sbi->sb->s_bdev;
3883c62be17SJaegeuk Kim int i;
3893c62be17SJaegeuk Kim
3900916878dSDamien Le Moal if (f2fs_is_multi_device(sbi)) {
3913c62be17SJaegeuk Kim for (i = 0; i < sbi->s_ndevs; i++) {
3923c62be17SJaegeuk Kim if (FDEV(i).start_blk <= blk_addr &&
3933c62be17SJaegeuk Kim FDEV(i).end_blk >= blk_addr) {
3943c62be17SJaegeuk Kim blk_addr -= FDEV(i).start_blk;
3953c62be17SJaegeuk Kim bdev = FDEV(i).bdev;
3963c62be17SJaegeuk Kim break;
3973c62be17SJaegeuk Kim }
3983c62be17SJaegeuk Kim }
3990916878dSDamien Le Moal }
4005189810aSChristoph Hellwig
4015189810aSChristoph Hellwig if (sector)
4025189810aSChristoph Hellwig *sector = SECTOR_FROM_BLOCK(blk_addr);
4033c62be17SJaegeuk Kim return bdev;
4043c62be17SJaegeuk Kim }
4053c62be17SJaegeuk Kim
f2fs_target_device_index(struct f2fs_sb_info * sbi,block_t blkaddr)4063c62be17SJaegeuk Kim int f2fs_target_device_index(struct f2fs_sb_info *sbi, block_t blkaddr)
4073c62be17SJaegeuk Kim {
4083c62be17SJaegeuk Kim int i;
4093c62be17SJaegeuk Kim
4100916878dSDamien Le Moal if (!f2fs_is_multi_device(sbi))
4110916878dSDamien Le Moal return 0;
4120916878dSDamien Le Moal
4133c62be17SJaegeuk Kim for (i = 0; i < sbi->s_ndevs; i++)
4143c62be17SJaegeuk Kim if (FDEV(i).start_blk <= blkaddr && FDEV(i).end_blk >= blkaddr)
4153c62be17SJaegeuk Kim return i;
4163c62be17SJaegeuk Kim return 0;
4173c62be17SJaegeuk Kim }
4183c62be17SJaegeuk Kim
f2fs_io_flags(struct f2fs_io_info * fio)4197649c873SBart Van Assche static blk_opf_t f2fs_io_flags(struct f2fs_io_info *fio)
42064bf0eefSChristoph Hellwig {
421447286ebSYangtao Li unsigned int temp_mask = GENMASK(NR_TEMP_TYPE - 1, 0);
4220adc2ab0SJaegeuk Kim unsigned int fua_flag, meta_flag, io_flag;
4237649c873SBart Van Assche blk_opf_t op_flags = 0;
4240adc2ab0SJaegeuk Kim
4250adc2ab0SJaegeuk Kim if (fio->op != REQ_OP_WRITE)
4260adc2ab0SJaegeuk Kim return 0;
4270adc2ab0SJaegeuk Kim if (fio->type == DATA)
4280adc2ab0SJaegeuk Kim io_flag = fio->sbi->data_io_flag;
4290adc2ab0SJaegeuk Kim else if (fio->type == NODE)
4300adc2ab0SJaegeuk Kim io_flag = fio->sbi->node_io_flag;
4310adc2ab0SJaegeuk Kim else
4320adc2ab0SJaegeuk Kim return 0;
4330adc2ab0SJaegeuk Kim
4340adc2ab0SJaegeuk Kim fua_flag = io_flag & temp_mask;
4350adc2ab0SJaegeuk Kim meta_flag = (io_flag >> NR_TEMP_TYPE) & temp_mask;
43664bf0eefSChristoph Hellwig
43764bf0eefSChristoph Hellwig /*
43864bf0eefSChristoph Hellwig * data/node io flag bits per temp:
43964bf0eefSChristoph Hellwig * REQ_META | REQ_FUA |
44064bf0eefSChristoph Hellwig * 5 | 4 | 3 | 2 | 1 | 0 |
44164bf0eefSChristoph Hellwig * Cold | Warm | Hot | Cold | Warm | Hot |
44264bf0eefSChristoph Hellwig */
443447286ebSYangtao Li if (BIT(fio->temp) & meta_flag)
4440adc2ab0SJaegeuk Kim op_flags |= REQ_META;
445447286ebSYangtao Li if (BIT(fio->temp) & fua_flag)
4460adc2ab0SJaegeuk Kim op_flags |= REQ_FUA;
4470adc2ab0SJaegeuk Kim return op_flags;
44864bf0eefSChristoph Hellwig }
44964bf0eefSChristoph Hellwig
__bio_alloc(struct f2fs_io_info * fio,int npages)450b757f6edSChao Yu static struct bio *__bio_alloc(struct f2fs_io_info *fio, int npages)
451940a6d34SGu Zheng {
452b757f6edSChao Yu struct f2fs_sb_info *sbi = fio->sbi;
4535189810aSChristoph Hellwig struct block_device *bdev;
4545189810aSChristoph Hellwig sector_t sector;
455940a6d34SGu Zheng struct bio *bio;
456940a6d34SGu Zheng
4575189810aSChristoph Hellwig bdev = f2fs_target_device(sbi, fio->new_blkaddr, §or);
4580adc2ab0SJaegeuk Kim bio = bio_alloc_bioset(bdev, npages,
4590adc2ab0SJaegeuk Kim fio->op | fio->op_flags | f2fs_io_flags(fio),
4600adc2ab0SJaegeuk Kim GFP_NOIO, &f2fs_bioset);
4615189810aSChristoph Hellwig bio->bi_iter.bi_sector = sector;
462b757f6edSChao Yu if (is_read_io(fio->op)) {
4630cdd3195SHyunchul Lee bio->bi_end_io = f2fs_read_end_io;
4640cdd3195SHyunchul Lee bio->bi_private = NULL;
4650cdd3195SHyunchul Lee } else {
4660cdd3195SHyunchul Lee bio->bi_end_io = f2fs_write_end_io;
4670cdd3195SHyunchul Lee bio->bi_private = sbi;
4680cdd3195SHyunchul Lee }
469a4b68176SDaeho Jeong iostat_alloc_and_bind_ctx(sbi, bio, NULL);
470a4b68176SDaeho Jeong
471b757f6edSChao Yu if (fio->io_wbc)
472b757f6edSChao Yu wbc_init_bio(fio->io_wbc, bio);
473940a6d34SGu Zheng
474940a6d34SGu Zheng return bio;
475940a6d34SGu Zheng }
476940a6d34SGu Zheng
f2fs_set_bio_crypt_ctx(struct bio * bio,const struct inode * inode,pgoff_t first_idx,const struct f2fs_io_info * fio,gfp_t gfp_mask)47727aacd28SSatya Tangirala static void f2fs_set_bio_crypt_ctx(struct bio *bio, const struct inode *inode,
47827aacd28SSatya Tangirala pgoff_t first_idx,
47927aacd28SSatya Tangirala const struct f2fs_io_info *fio,
48027aacd28SSatya Tangirala gfp_t gfp_mask)
48127aacd28SSatya Tangirala {
48227aacd28SSatya Tangirala /*
48327aacd28SSatya Tangirala * The f2fs garbage collector sets ->encrypted_page when it wants to
48427aacd28SSatya Tangirala * read/write raw data without encryption.
48527aacd28SSatya Tangirala */
48627aacd28SSatya Tangirala if (!fio || !fio->encrypted_page)
48727aacd28SSatya Tangirala fscrypt_set_bio_crypt_ctx(bio, inode, first_idx, gfp_mask);
48827aacd28SSatya Tangirala }
48927aacd28SSatya Tangirala
f2fs_crypt_mergeable_bio(struct bio * bio,const struct inode * inode,pgoff_t next_idx,const struct f2fs_io_info * fio)49027aacd28SSatya Tangirala static bool f2fs_crypt_mergeable_bio(struct bio *bio, const struct inode *inode,
49127aacd28SSatya Tangirala pgoff_t next_idx,
49227aacd28SSatya Tangirala const struct f2fs_io_info *fio)
49327aacd28SSatya Tangirala {
49427aacd28SSatya Tangirala /*
49527aacd28SSatya Tangirala * The f2fs garbage collector sets ->encrypted_page when it wants to
49627aacd28SSatya Tangirala * read/write raw data without encryption.
49727aacd28SSatya Tangirala */
49827aacd28SSatya Tangirala if (fio && fio->encrypted_page)
49927aacd28SSatya Tangirala return !bio_has_crypt_ctx(bio);
50027aacd28SSatya Tangirala
50127aacd28SSatya Tangirala return fscrypt_mergeable_bio(bio, inode, next_idx);
50227aacd28SSatya Tangirala }
50327aacd28SSatya Tangirala
f2fs_submit_read_bio(struct f2fs_sb_info * sbi,struct bio * bio,enum page_type type)504bc29835aSChristoph Hellwig void f2fs_submit_read_bio(struct f2fs_sb_info *sbi, struct bio *bio,
505bc29835aSChristoph Hellwig enum page_type type)
506f5730184SJaegeuk Kim {
507bc29835aSChristoph Hellwig WARN_ON_ONCE(!is_read_io(bio_op(bio)));
508bc29835aSChristoph Hellwig trace_f2fs_submit_read_bio(sbi->sb, type, bio);
5090a595ebaSJaegeuk Kim
510bc29835aSChristoph Hellwig iostat_update_submit_ctx(bio, type);
511bc29835aSChristoph Hellwig submit_bio(bio);
512bc29835aSChristoph Hellwig }
5130a595ebaSJaegeuk Kim
f2fs_submit_write_bio(struct f2fs_sb_info * sbi,struct bio * bio,enum page_type type)514bc29835aSChristoph Hellwig static void f2fs_submit_write_bio(struct f2fs_sb_info *sbi, struct bio *bio,
515bc29835aSChristoph Hellwig enum page_type type)
516bc29835aSChristoph Hellwig {
517bc29835aSChristoph Hellwig WARN_ON_ONCE(is_read_io(bio_op(bio)));
518bc29835aSChristoph Hellwig
5195f8e5a09SJaegeuk Kim if (f2fs_lfs_mode(sbi) && current->plug && PAGE_TYPE_ON_MAIN(type))
520bc29835aSChristoph Hellwig blk_finish_plug(current->plug);
521bc29835aSChristoph Hellwig
522bc29835aSChristoph Hellwig trace_f2fs_submit_write_bio(sbi->sb, type, bio);
523bc29835aSChristoph Hellwig iostat_update_submit_ctx(bio, type);
524bc29835aSChristoph Hellwig submit_bio(bio);
5254c8ff709SChao Yu }
5264c8ff709SChao Yu
__submit_merged_bio(struct f2fs_bio_info * io)527458e6197SJaegeuk Kim static void __submit_merged_bio(struct f2fs_bio_info *io)
52893dfe2acSJaegeuk Kim {
529458e6197SJaegeuk Kim struct f2fs_io_info *fio = &io->fio;
53093dfe2acSJaegeuk Kim
53193dfe2acSJaegeuk Kim if (!io->bio)
53293dfe2acSJaegeuk Kim return;
53393dfe2acSJaegeuk Kim
534bc29835aSChristoph Hellwig if (is_read_io(fio->op)) {
535554b5125SJaegeuk Kim trace_f2fs_prepare_read_bio(io->sbi->sb, fio->type, io->bio);
536bc29835aSChristoph Hellwig f2fs_submit_read_bio(io->sbi, io->bio, fio->type);
537bc29835aSChristoph Hellwig } else {
538554b5125SJaegeuk Kim trace_f2fs_prepare_write_bio(io->sbi->sb, fio->type, io->bio);
539bc29835aSChristoph Hellwig f2fs_submit_write_bio(io->sbi, io->bio, fio->type);
540bc29835aSChristoph Hellwig }
54193dfe2acSJaegeuk Kim io->bio = NULL;
54293dfe2acSJaegeuk Kim }
54393dfe2acSJaegeuk Kim
__has_merged_page(struct bio * bio,struct inode * inode,struct page * page,nid_t ino)5448648de2cSChao Yu static bool __has_merged_page(struct bio *bio, struct inode *inode,
545bab475c5SChao Yu struct page *page, nid_t ino)
5460fd785ebSChao Yu {
5470fd785ebSChao Yu struct bio_vec *bvec;
5486dc4f100SMing Lei struct bvec_iter_all iter_all;
5490fd785ebSChao Yu
5508648de2cSChao Yu if (!bio)
5510fd785ebSChao Yu return false;
5520c3a5797SChao Yu
553bab475c5SChao Yu if (!inode && !page && !ino)
5540c3a5797SChao Yu return true;
5550fd785ebSChao Yu
5568648de2cSChao Yu bio_for_each_segment_all(bvec, bio, iter_all) {
5574c8ff709SChao Yu struct page *target = bvec->bv_page;
5580fd785ebSChao Yu
5594c8ff709SChao Yu if (fscrypt_is_bounce_page(target)) {
560d2d0727bSEric Biggers target = fscrypt_pagecache_page(target);
5614c8ff709SChao Yu if (IS_ERR(target))
5624c8ff709SChao Yu continue;
5634c8ff709SChao Yu }
5644c8ff709SChao Yu if (f2fs_is_compressed_page(target)) {
5654c8ff709SChao Yu target = f2fs_compress_control_page(target);
5664c8ff709SChao Yu if (IS_ERR(target))
5674c8ff709SChao Yu continue;
5684c8ff709SChao Yu }
5690fd785ebSChao Yu
5700c3a5797SChao Yu if (inode && inode == target->mapping->host)
5710c3a5797SChao Yu return true;
572bab475c5SChao Yu if (page && page == target)
573bab475c5SChao Yu return true;
5740c3a5797SChao Yu if (ino && ino == ino_of_node(target))
5750fd785ebSChao Yu return true;
5760fd785ebSChao Yu }
5770fd785ebSChao Yu
5780fd785ebSChao Yu return false;
5790fd785ebSChao Yu }
5800fd785ebSChao Yu
f2fs_init_write_merge_io(struct f2fs_sb_info * sbi)581908ea654SYufen Yu int f2fs_init_write_merge_io(struct f2fs_sb_info *sbi)
582908ea654SYufen Yu {
583908ea654SYufen Yu int i;
584908ea654SYufen Yu
585908ea654SYufen Yu for (i = 0; i < NR_PAGE_TYPE; i++) {
586908ea654SYufen Yu int n = (i == META) ? 1 : NR_TEMP_TYPE;
587908ea654SYufen Yu int j;
588908ea654SYufen Yu
589908ea654SYufen Yu sbi->write_io[i] = f2fs_kmalloc(sbi,
590908ea654SYufen Yu array_size(n, sizeof(struct f2fs_bio_info)),
591908ea654SYufen Yu GFP_KERNEL);
592908ea654SYufen Yu if (!sbi->write_io[i])
593908ea654SYufen Yu return -ENOMEM;
594908ea654SYufen Yu
595908ea654SYufen Yu for (j = HOT; j < n; j++) {
596908ea654SYufen Yu init_f2fs_rwsem(&sbi->write_io[i][j].io_rwsem);
597908ea654SYufen Yu sbi->write_io[i][j].sbi = sbi;
598908ea654SYufen Yu sbi->write_io[i][j].bio = NULL;
599908ea654SYufen Yu spin_lock_init(&sbi->write_io[i][j].io_lock);
600908ea654SYufen Yu INIT_LIST_HEAD(&sbi->write_io[i][j].io_list);
601908ea654SYufen Yu INIT_LIST_HEAD(&sbi->write_io[i][j].bio_list);
602908ea654SYufen Yu init_f2fs_rwsem(&sbi->write_io[i][j].bio_list_lock);
603e067dc3cSDaeho Jeong #ifdef CONFIG_BLK_DEV_ZONED
604e067dc3cSDaeho Jeong init_completion(&sbi->write_io[i][j].zone_wait);
605e067dc3cSDaeho Jeong sbi->write_io[i][j].zone_pending_bio = NULL;
606e067dc3cSDaeho Jeong sbi->write_io[i][j].bi_private = NULL;
607e067dc3cSDaeho Jeong #endif
608908ea654SYufen Yu }
609908ea654SYufen Yu }
610908ea654SYufen Yu
611908ea654SYufen Yu return 0;
612908ea654SYufen Yu }
613908ea654SYufen Yu
__f2fs_submit_merged_write(struct f2fs_sb_info * sbi,enum page_type type,enum temp_type temp)614b9109b0eSJaegeuk Kim static void __f2fs_submit_merged_write(struct f2fs_sb_info *sbi,
615a912b54dSJaegeuk Kim enum page_type type, enum temp_type temp)
61693dfe2acSJaegeuk Kim {
61793dfe2acSJaegeuk Kim enum page_type btype = PAGE_TYPE_OF_BIO(type);
618a912b54dSJaegeuk Kim struct f2fs_bio_info *io = sbi->write_io[btype] + temp;
61993dfe2acSJaegeuk Kim
620e4544b63STim Murray f2fs_down_write(&io->io_rwsem);
621458e6197SJaegeuk Kim
622267c159fSJaegeuk Kim if (!io->bio)
623267c159fSJaegeuk Kim goto unlock_out;
624267c159fSJaegeuk Kim
625458e6197SJaegeuk Kim /* change META to META_FLUSH in the checkpoint procedure */
626458e6197SJaegeuk Kim if (type >= META_FLUSH) {
627458e6197SJaegeuk Kim io->fio.type = META_FLUSH;
62864bf0eefSChristoph Hellwig io->bio->bi_opf |= REQ_META | REQ_PRIO | REQ_SYNC;
62970fd7614SChristoph Hellwig if (!test_opt(sbi, NOBARRIER))
63064bf0eefSChristoph Hellwig io->bio->bi_opf |= REQ_PREFLUSH | REQ_FUA;
631458e6197SJaegeuk Kim }
632458e6197SJaegeuk Kim __submit_merged_bio(io);
633267c159fSJaegeuk Kim unlock_out:
634e4544b63STim Murray f2fs_up_write(&io->io_rwsem);
63593dfe2acSJaegeuk Kim }
63693dfe2acSJaegeuk Kim
__submit_merged_write_cond(struct f2fs_sb_info * sbi,struct inode * inode,struct page * page,nid_t ino,enum page_type type,bool force)637a912b54dSJaegeuk Kim static void __submit_merged_write_cond(struct f2fs_sb_info *sbi,
638bab475c5SChao Yu struct inode *inode, struct page *page,
639bab475c5SChao Yu nid_t ino, enum page_type type, bool force)
6400c3a5797SChao Yu {
641a912b54dSJaegeuk Kim enum temp_type temp;
6421e771e83SYunlong Song bool ret = true;
643a912b54dSJaegeuk Kim
644a912b54dSJaegeuk Kim for (temp = HOT; temp < NR_TEMP_TYPE; temp++) {
6451e771e83SYunlong Song if (!force) {
6461e771e83SYunlong Song enum page_type btype = PAGE_TYPE_OF_BIO(type);
6471e771e83SYunlong Song struct f2fs_bio_info *io = sbi->write_io[btype] + temp;
648a912b54dSJaegeuk Kim
649e4544b63STim Murray f2fs_down_read(&io->io_rwsem);
6508648de2cSChao Yu ret = __has_merged_page(io->bio, inode, page, ino);
651e4544b63STim Murray f2fs_up_read(&io->io_rwsem);
6521e771e83SYunlong Song }
6531e771e83SYunlong Song if (ret)
654a912b54dSJaegeuk Kim __f2fs_submit_merged_write(sbi, type, temp);
655a912b54dSJaegeuk Kim
656a912b54dSJaegeuk Kim /* TODO: use HOT temp only for meta pages now. */
657a912b54dSJaegeuk Kim if (type >= META)
658a912b54dSJaegeuk Kim break;
659a912b54dSJaegeuk Kim }
6600c3a5797SChao Yu }
6610c3a5797SChao Yu
f2fs_submit_merged_write(struct f2fs_sb_info * sbi,enum page_type type)662b9109b0eSJaegeuk Kim void f2fs_submit_merged_write(struct f2fs_sb_info *sbi, enum page_type type)
663406657ddSChao Yu {
664adcc00f7SHariprasad Kelam __submit_merged_write_cond(sbi, NULL, NULL, 0, type, true);
66593dfe2acSJaegeuk Kim }
66693dfe2acSJaegeuk Kim
f2fs_submit_merged_write_cond(struct f2fs_sb_info * sbi,struct inode * inode,struct page * page,nid_t ino,enum page_type type)667b9109b0eSJaegeuk Kim void f2fs_submit_merged_write_cond(struct f2fs_sb_info *sbi,
668bab475c5SChao Yu struct inode *inode, struct page *page,
669bab475c5SChao Yu nid_t ino, enum page_type type)
67093dfe2acSJaegeuk Kim {
671bab475c5SChao Yu __submit_merged_write_cond(sbi, inode, page, ino, type, false);
67293dfe2acSJaegeuk Kim }
67393dfe2acSJaegeuk Kim
f2fs_flush_merged_writes(struct f2fs_sb_info * sbi)674b9109b0eSJaegeuk Kim void f2fs_flush_merged_writes(struct f2fs_sb_info *sbi)
675406657ddSChao Yu {
676b9109b0eSJaegeuk Kim f2fs_submit_merged_write(sbi, DATA);
677b9109b0eSJaegeuk Kim f2fs_submit_merged_write(sbi, NODE);
678b9109b0eSJaegeuk Kim f2fs_submit_merged_write(sbi, META);
679406657ddSChao Yu }
680406657ddSChao Yu
68193dfe2acSJaegeuk Kim /*
68293dfe2acSJaegeuk Kim * Fill the locked page with data located in the block address.
683771a9a71STomohiro Kusumi * A caller needs to unlock the page on failure.
68493dfe2acSJaegeuk Kim */
f2fs_submit_page_bio(struct f2fs_io_info * fio)68505ca3632SJaegeuk Kim int f2fs_submit_page_bio(struct f2fs_io_info *fio)
68693dfe2acSJaegeuk Kim {
68793dfe2acSJaegeuk Kim struct bio *bio;
6880b81d077SJaegeuk Kim struct page *page = fio->encrypted_page ?
6890b81d077SJaegeuk Kim fio->encrypted_page : fio->page;
69093dfe2acSJaegeuk Kim
691c9b60788SChao Yu if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr,
69293770ab7SChao Yu fio->is_por ? META_POR : (__is_meta_io(fio) ?
69395fa90c9SChao Yu META_GENERIC : DATA_GENERIC_ENHANCE))) {
69495fa90c9SChao Yu f2fs_handle_error(fio->sbi, ERROR_INVALID_BLKADDR);
69510f966bbSChao Yu return -EFSCORRUPTED;
69695fa90c9SChao Yu }
697c9b60788SChao Yu
6982ace38e0SChao Yu trace_f2fs_submit_page_bio(page, fio);
69993dfe2acSJaegeuk Kim
70093dfe2acSJaegeuk Kim /* Allocate a new bio */
701b757f6edSChao Yu bio = __bio_alloc(fio, 1);
70293dfe2acSJaegeuk Kim
70327aacd28SSatya Tangirala f2fs_set_bio_crypt_ctx(bio, fio->page->mapping->host,
70427aacd28SSatya Tangirala fio->page->index, fio, GFP_NOIO);
70527aacd28SSatya Tangirala
70609cbfeafSKirill A. Shutemov if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
70793dfe2acSJaegeuk Kim bio_put(bio);
70893dfe2acSJaegeuk Kim return -EFAULT;
70993dfe2acSJaegeuk Kim }
71078efac53SChao Yu
71178efac53SChao Yu if (fio->io_wbc && !is_read_io(fio->op))
712844545c5SEric Biggers wbc_account_cgroup_owner(fio->io_wbc, fio->page, PAGE_SIZE);
71378efac53SChao Yu
7145f9abab4SJaegeuk Kim inc_page_count(fio->sbi, is_read_io(fio->op) ?
71582704e59SChao Yu __read_io_type(page) : WB_DATA_TYPE(fio->page, false));
7164c58ed07SChao Yu
717bc29835aSChristoph Hellwig if (is_read_io(bio_op(bio)))
718bc29835aSChristoph Hellwig f2fs_submit_read_bio(fio->sbi, bio, fio->type);
719bc29835aSChristoph Hellwig else
720bc29835aSChristoph Hellwig f2fs_submit_write_bio(fio->sbi, bio, fio->type);
72193dfe2acSJaegeuk Kim return 0;
72293dfe2acSJaegeuk Kim }
72393dfe2acSJaegeuk Kim
page_is_mergeable(struct f2fs_sb_info * sbi,struct bio * bio,block_t last_blkaddr,block_t cur_blkaddr)7248896cbdfSChao Yu static bool page_is_mergeable(struct f2fs_sb_info *sbi, struct bio *bio,
7258896cbdfSChao Yu block_t last_blkaddr, block_t cur_blkaddr)
7268896cbdfSChao Yu {
72710208567SJaegeuk Kim if (unlikely(sbi->max_io_bytes &&
72810208567SJaegeuk Kim bio->bi_iter.bi_size >= sbi->max_io_bytes))
72910208567SJaegeuk Kim return false;
7308896cbdfSChao Yu if (last_blkaddr + 1 != cur_blkaddr)
7318896cbdfSChao Yu return false;
732309dca30SChristoph Hellwig return bio->bi_bdev == f2fs_target_device(sbi, cur_blkaddr, NULL);
7338896cbdfSChao Yu }
7348896cbdfSChao Yu
io_type_is_mergeable(struct f2fs_bio_info * io,struct f2fs_io_info * fio)7358896cbdfSChao Yu static bool io_type_is_mergeable(struct f2fs_bio_info *io,
7368896cbdfSChao Yu struct f2fs_io_info *fio)
7378896cbdfSChao Yu {
7388896cbdfSChao Yu if (io->fio.op != fio->op)
7398896cbdfSChao Yu return false;
7408896cbdfSChao Yu return io->fio.op_flags == fio->op_flags;
7418896cbdfSChao Yu }
7428896cbdfSChao Yu
io_is_mergeable(struct f2fs_sb_info * sbi,struct bio * bio,struct f2fs_bio_info * io,struct f2fs_io_info * fio,block_t last_blkaddr,block_t cur_blkaddr)7438896cbdfSChao Yu static bool io_is_mergeable(struct f2fs_sb_info *sbi, struct bio *bio,
7448896cbdfSChao Yu struct f2fs_bio_info *io,
7458896cbdfSChao Yu struct f2fs_io_info *fio,
7468896cbdfSChao Yu block_t last_blkaddr,
7478896cbdfSChao Yu block_t cur_blkaddr)
7488896cbdfSChao Yu {
7498896cbdfSChao Yu if (!page_is_mergeable(sbi, bio, last_blkaddr, cur_blkaddr))
7508896cbdfSChao Yu return false;
7518896cbdfSChao Yu return io_type_is_mergeable(io, fio);
7528896cbdfSChao Yu }
7538896cbdfSChao Yu
add_bio_entry(struct f2fs_sb_info * sbi,struct bio * bio,struct page * page,enum temp_type temp)7540b20fcecSChao Yu static void add_bio_entry(struct f2fs_sb_info *sbi, struct bio *bio,
7550b20fcecSChao Yu struct page *page, enum temp_type temp)
7560b20fcecSChao Yu {
7570b20fcecSChao Yu struct f2fs_bio_info *io = sbi->write_io[DATA] + temp;
7580b20fcecSChao Yu struct bio_entry *be;
7590b20fcecSChao Yu
76032410577SChao Yu be = f2fs_kmem_cache_alloc(bio_entry_slab, GFP_NOFS, true, NULL);
7610b20fcecSChao Yu be->bio = bio;
7620b20fcecSChao Yu bio_get(bio);
7630b20fcecSChao Yu
7640b20fcecSChao Yu if (bio_add_page(bio, page, PAGE_SIZE, 0) != PAGE_SIZE)
7650b20fcecSChao Yu f2fs_bug_on(sbi, 1);
7660b20fcecSChao Yu
767e4544b63STim Murray f2fs_down_write(&io->bio_list_lock);
7680b20fcecSChao Yu list_add_tail(&be->list, &io->bio_list);
769e4544b63STim Murray f2fs_up_write(&io->bio_list_lock);
7700b20fcecSChao Yu }
7710b20fcecSChao Yu
del_bio_entry(struct bio_entry * be)7720b20fcecSChao Yu static void del_bio_entry(struct bio_entry *be)
7730b20fcecSChao Yu {
7740b20fcecSChao Yu list_del(&be->list);
7750b20fcecSChao Yu kmem_cache_free(bio_entry_slab, be);
7760b20fcecSChao Yu }
7770b20fcecSChao Yu
add_ipu_page(struct f2fs_io_info * fio,struct bio ** bio,struct page * page)77827aacd28SSatya Tangirala static int add_ipu_page(struct f2fs_io_info *fio, struct bio **bio,
7790b20fcecSChao Yu struct page *page)
7800b20fcecSChao Yu {
78127aacd28SSatya Tangirala struct f2fs_sb_info *sbi = fio->sbi;
7820b20fcecSChao Yu enum temp_type temp;
7830b20fcecSChao Yu bool found = false;
7840b20fcecSChao Yu int ret = -EAGAIN;
7850b20fcecSChao Yu
7860b20fcecSChao Yu for (temp = HOT; temp < NR_TEMP_TYPE && !found; temp++) {
7870b20fcecSChao Yu struct f2fs_bio_info *io = sbi->write_io[DATA] + temp;
7880b20fcecSChao Yu struct list_head *head = &io->bio_list;
7890b20fcecSChao Yu struct bio_entry *be;
7900b20fcecSChao Yu
791e4544b63STim Murray f2fs_down_write(&io->bio_list_lock);
7920b20fcecSChao Yu list_for_each_entry(be, head, list) {
7930b20fcecSChao Yu if (be->bio != *bio)
7940b20fcecSChao Yu continue;
7950b20fcecSChao Yu
7960b20fcecSChao Yu found = true;
7970b20fcecSChao Yu
79827aacd28SSatya Tangirala f2fs_bug_on(sbi, !page_is_mergeable(sbi, *bio,
79927aacd28SSatya Tangirala *fio->last_block,
80027aacd28SSatya Tangirala fio->new_blkaddr));
80127aacd28SSatya Tangirala if (f2fs_crypt_mergeable_bio(*bio,
80227aacd28SSatya Tangirala fio->page->mapping->host,
80327aacd28SSatya Tangirala fio->page->index, fio) &&
80427aacd28SSatya Tangirala bio_add_page(*bio, page, PAGE_SIZE, 0) ==
8054c8ff709SChao Yu PAGE_SIZE) {
8060b20fcecSChao Yu ret = 0;
8070b20fcecSChao Yu break;
8080b20fcecSChao Yu }
8090b20fcecSChao Yu
81027aacd28SSatya Tangirala /* page can't be merged into bio; submit the bio */
8110b20fcecSChao Yu del_bio_entry(be);
812bc29835aSChristoph Hellwig f2fs_submit_write_bio(sbi, *bio, DATA);
8130b20fcecSChao Yu break;
8140b20fcecSChao Yu }
815e4544b63STim Murray f2fs_up_write(&io->bio_list_lock);
8160b20fcecSChao Yu }
8170b20fcecSChao Yu
8180b20fcecSChao Yu if (ret) {
8190b20fcecSChao Yu bio_put(*bio);
8200b20fcecSChao Yu *bio = NULL;
8210b20fcecSChao Yu }
8220b20fcecSChao Yu
8230b20fcecSChao Yu return ret;
8240b20fcecSChao Yu }
8250b20fcecSChao Yu
f2fs_submit_merged_ipu_write(struct f2fs_sb_info * sbi,struct bio ** bio,struct page * page)8260b20fcecSChao Yu void f2fs_submit_merged_ipu_write(struct f2fs_sb_info *sbi,
8270b20fcecSChao Yu struct bio **bio, struct page *page)
8280b20fcecSChao Yu {
8290b20fcecSChao Yu enum temp_type temp;
8300b20fcecSChao Yu bool found = false;
8310b20fcecSChao Yu struct bio *target = bio ? *bio : NULL;
8320b20fcecSChao Yu
8335cdb422cSChao Yu f2fs_bug_on(sbi, !target && !page);
8345cdb422cSChao Yu
8350b20fcecSChao Yu for (temp = HOT; temp < NR_TEMP_TYPE && !found; temp++) {
8360b20fcecSChao Yu struct f2fs_bio_info *io = sbi->write_io[DATA] + temp;
8370b20fcecSChao Yu struct list_head *head = &io->bio_list;
8380b20fcecSChao Yu struct bio_entry *be;
8390b20fcecSChao Yu
8400b20fcecSChao Yu if (list_empty(head))
8410b20fcecSChao Yu continue;
8420b20fcecSChao Yu
843e4544b63STim Murray f2fs_down_read(&io->bio_list_lock);
8440b20fcecSChao Yu list_for_each_entry(be, head, list) {
8450b20fcecSChao Yu if (target)
8460b20fcecSChao Yu found = (target == be->bio);
8470b20fcecSChao Yu else
8480b20fcecSChao Yu found = __has_merged_page(be->bio, NULL,
8490b20fcecSChao Yu page, 0);
8500b20fcecSChao Yu if (found)
8510b20fcecSChao Yu break;
8520b20fcecSChao Yu }
853e4544b63STim Murray f2fs_up_read(&io->bio_list_lock);
8540b20fcecSChao Yu
8550b20fcecSChao Yu if (!found)
8560b20fcecSChao Yu continue;
8570b20fcecSChao Yu
8580b20fcecSChao Yu found = false;
8590b20fcecSChao Yu
860e4544b63STim Murray f2fs_down_write(&io->bio_list_lock);
8610b20fcecSChao Yu list_for_each_entry(be, head, list) {
8620b20fcecSChao Yu if (target)
8630b20fcecSChao Yu found = (target == be->bio);
8640b20fcecSChao Yu else
8650b20fcecSChao Yu found = __has_merged_page(be->bio, NULL,
8660b20fcecSChao Yu page, 0);
8670b20fcecSChao Yu if (found) {
8680b20fcecSChao Yu target = be->bio;
8690b20fcecSChao Yu del_bio_entry(be);
8700b20fcecSChao Yu break;
8710b20fcecSChao Yu }
8720b20fcecSChao Yu }
873e4544b63STim Murray f2fs_up_write(&io->bio_list_lock);
8740b20fcecSChao Yu }
8750b20fcecSChao Yu
8760b20fcecSChao Yu if (found)
877bc29835aSChristoph Hellwig f2fs_submit_write_bio(sbi, target, DATA);
8780b20fcecSChao Yu if (bio && *bio) {
8790b20fcecSChao Yu bio_put(*bio);
8800b20fcecSChao Yu *bio = NULL;
8810b20fcecSChao Yu }
8820b20fcecSChao Yu }
8830b20fcecSChao Yu
f2fs_merge_page_bio(struct f2fs_io_info * fio)8848648de2cSChao Yu int f2fs_merge_page_bio(struct f2fs_io_info *fio)
8858648de2cSChao Yu {
8868648de2cSChao Yu struct bio *bio = *fio->bio;
8878648de2cSChao Yu struct page *page = fio->encrypted_page ?
8888648de2cSChao Yu fio->encrypted_page : fio->page;
8898648de2cSChao Yu
8908648de2cSChao Yu if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr,
89195fa90c9SChao Yu __is_meta_io(fio) ? META_GENERIC : DATA_GENERIC)) {
89295fa90c9SChao Yu f2fs_handle_error(fio->sbi, ERROR_INVALID_BLKADDR);
89310f966bbSChao Yu return -EFSCORRUPTED;
89495fa90c9SChao Yu }
8958648de2cSChao Yu
8968648de2cSChao Yu trace_f2fs_submit_page_bio(page, fio);
8978648de2cSChao Yu
8988896cbdfSChao Yu if (bio && !page_is_mergeable(fio->sbi, bio, *fio->last_block,
8990b20fcecSChao Yu fio->new_blkaddr))
9000b20fcecSChao Yu f2fs_submit_merged_ipu_write(fio->sbi, &bio, NULL);
9018648de2cSChao Yu alloc_new:
9028648de2cSChao Yu if (!bio) {
903a8affc03SChristoph Hellwig bio = __bio_alloc(fio, BIO_MAX_VECS);
90427aacd28SSatya Tangirala f2fs_set_bio_crypt_ctx(bio, fio->page->mapping->host,
90527aacd28SSatya Tangirala fio->page->index, fio, GFP_NOIO);
9068648de2cSChao Yu
9070b20fcecSChao Yu add_bio_entry(fio->sbi, bio, page, fio->temp);
9080b20fcecSChao Yu } else {
90927aacd28SSatya Tangirala if (add_ipu_page(fio, &bio, page))
9108648de2cSChao Yu goto alloc_new;
9118648de2cSChao Yu }
9128648de2cSChao Yu
9138648de2cSChao Yu if (fio->io_wbc)
914844545c5SEric Biggers wbc_account_cgroup_owner(fio->io_wbc, fio->page, PAGE_SIZE);
9158648de2cSChao Yu
91682704e59SChao Yu inc_page_count(fio->sbi, WB_DATA_TYPE(page, false));
9178648de2cSChao Yu
9188648de2cSChao Yu *fio->last_block = fio->new_blkaddr;
9198648de2cSChao Yu *fio->bio = bio;
9208648de2cSChao Yu
9218648de2cSChao Yu return 0;
9228648de2cSChao Yu }
9238648de2cSChao Yu
924e067dc3cSDaeho Jeong #ifdef CONFIG_BLK_DEV_ZONED
is_end_zone_blkaddr(struct f2fs_sb_info * sbi,block_t blkaddr)925e067dc3cSDaeho Jeong static bool is_end_zone_blkaddr(struct f2fs_sb_info *sbi, block_t blkaddr)
926e067dc3cSDaeho Jeong {
927*b9877730SDaejun Park struct block_device *bdev = sbi->sb->s_bdev;
928e067dc3cSDaeho Jeong int devi = 0;
929e067dc3cSDaeho Jeong
930e067dc3cSDaeho Jeong if (f2fs_is_multi_device(sbi)) {
931e067dc3cSDaeho Jeong devi = f2fs_target_device_index(sbi, blkaddr);
932e067dc3cSDaeho Jeong if (blkaddr < FDEV(devi).start_blk ||
933e067dc3cSDaeho Jeong blkaddr > FDEV(devi).end_blk) {
934e067dc3cSDaeho Jeong f2fs_err(sbi, "Invalid block %x", blkaddr);
935e067dc3cSDaeho Jeong return false;
936e067dc3cSDaeho Jeong }
937e067dc3cSDaeho Jeong blkaddr -= FDEV(devi).start_blk;
938*b9877730SDaejun Park bdev = FDEV(devi).bdev;
939e067dc3cSDaeho Jeong }
940*b9877730SDaejun Park return bdev_is_zoned(bdev) &&
941e067dc3cSDaeho Jeong f2fs_blkz_is_seq(sbi, devi, blkaddr) &&
942e067dc3cSDaeho Jeong (blkaddr % sbi->blocks_per_blkz == sbi->blocks_per_blkz - 1);
943e067dc3cSDaeho Jeong }
944e067dc3cSDaeho Jeong #endif
945e067dc3cSDaeho Jeong
f2fs_submit_page_write(struct f2fs_io_info * fio)946fe16efe6SChao Yu void f2fs_submit_page_write(struct f2fs_io_info *fio)
94793dfe2acSJaegeuk Kim {
94805ca3632SJaegeuk Kim struct f2fs_sb_info *sbi = fio->sbi;
949458e6197SJaegeuk Kim enum page_type btype = PAGE_TYPE_OF_BIO(fio->type);
950a912b54dSJaegeuk Kim struct f2fs_bio_info *io = sbi->write_io[btype] + fio->temp;
9514375a336SJaegeuk Kim struct page *bio_page;
95282704e59SChao Yu enum count_type type;
95393dfe2acSJaegeuk Kim
954b9109b0eSJaegeuk Kim f2fs_bug_on(sbi, is_read_io(fio->op));
95593dfe2acSJaegeuk Kim
956e4544b63STim Murray f2fs_down_write(&io->io_rwsem);
95710b19ea7SChao Yu next:
958e067dc3cSDaeho Jeong #ifdef CONFIG_BLK_DEV_ZONED
959e067dc3cSDaeho Jeong if (f2fs_sb_has_blkzoned(sbi) && btype < META && io->zone_pending_bio) {
960e067dc3cSDaeho Jeong wait_for_completion_io(&io->zone_wait);
961e067dc3cSDaeho Jeong bio_put(io->zone_pending_bio);
962e067dc3cSDaeho Jeong io->zone_pending_bio = NULL;
963e067dc3cSDaeho Jeong io->bi_private = NULL;
964e067dc3cSDaeho Jeong }
965e067dc3cSDaeho Jeong #endif
966e067dc3cSDaeho Jeong
967fb830fc5SChao Yu if (fio->in_list) {
968fb830fc5SChao Yu spin_lock(&io->io_lock);
969fb830fc5SChao Yu if (list_empty(&io->io_list)) {
970fb830fc5SChao Yu spin_unlock(&io->io_lock);
971fe16efe6SChao Yu goto out;
972fb830fc5SChao Yu }
973fb830fc5SChao Yu fio = list_first_entry(&io->io_list,
974fb830fc5SChao Yu struct f2fs_io_info, list);
975fb830fc5SChao Yu list_del(&fio->list);
976fb830fc5SChao Yu spin_unlock(&io->io_lock);
977fb830fc5SChao Yu }
97893dfe2acSJaegeuk Kim
97993770ab7SChao Yu verify_fio_blkaddr(fio);
98093dfe2acSJaegeuk Kim
9814c8ff709SChao Yu if (fio->encrypted_page)
9824c8ff709SChao Yu bio_page = fio->encrypted_page;
9834c8ff709SChao Yu else if (fio->compressed_page)
9844c8ff709SChao Yu bio_page = fio->compressed_page;
9854c8ff709SChao Yu else
9864c8ff709SChao Yu bio_page = fio->page;
98736951b38SChao Yu
988ebf7c522SThomas Meyer /* set submitted = true as a return value */
9892eae077eSChao Yu fio->submitted = 1;
990d68f735bSJaegeuk Kim
99182704e59SChao Yu type = WB_DATA_TYPE(bio_page, fio->compressed_page);
99282704e59SChao Yu inc_page_count(sbi, type);
99336951b38SChao Yu
99427aacd28SSatya Tangirala if (io->bio &&
99527aacd28SSatya Tangirala (!io_is_mergeable(sbi, io->bio, io, fio, io->last_block_in_bio,
99627aacd28SSatya Tangirala fio->new_blkaddr) ||
99727aacd28SSatya Tangirala !f2fs_crypt_mergeable_bio(io->bio, fio->page->mapping->host,
99827aacd28SSatya Tangirala bio_page->index, fio)))
999458e6197SJaegeuk Kim __submit_merged_bio(io);
100093dfe2acSJaegeuk Kim alloc_new:
100193dfe2acSJaegeuk Kim if (io->bio == NULL) {
1002a8affc03SChristoph Hellwig io->bio = __bio_alloc(fio, BIO_MAX_VECS);
100327aacd28SSatya Tangirala f2fs_set_bio_crypt_ctx(io->bio, fio->page->mapping->host,
100427aacd28SSatya Tangirala bio_page->index, fio, GFP_NOIO);
1005458e6197SJaegeuk Kim io->fio = *fio;
100693dfe2acSJaegeuk Kim }
100793dfe2acSJaegeuk Kim
1008a912b54dSJaegeuk Kim if (bio_add_page(io->bio, bio_page, PAGE_SIZE, 0) < PAGE_SIZE) {
1009458e6197SJaegeuk Kim __submit_merged_bio(io);
101093dfe2acSJaegeuk Kim goto alloc_new;
101193dfe2acSJaegeuk Kim }
101293dfe2acSJaegeuk Kim
1013578c6478SYufen Yu if (fio->io_wbc)
1014844545c5SEric Biggers wbc_account_cgroup_owner(fio->io_wbc, fio->page, PAGE_SIZE);
1015578c6478SYufen Yu
10167a9d7548SChao Yu io->last_block_in_bio = fio->new_blkaddr;
1017fb830fc5SChao Yu
1018fb830fc5SChao Yu trace_f2fs_submit_page_write(fio->page, fio);
1019e067dc3cSDaeho Jeong #ifdef CONFIG_BLK_DEV_ZONED
1020e067dc3cSDaeho Jeong if (f2fs_sb_has_blkzoned(sbi) && btype < META &&
1021e067dc3cSDaeho Jeong is_end_zone_blkaddr(sbi, fio->new_blkaddr)) {
1022e067dc3cSDaeho Jeong bio_get(io->bio);
1023e067dc3cSDaeho Jeong reinit_completion(&io->zone_wait);
1024e067dc3cSDaeho Jeong io->bi_private = io->bio->bi_private;
1025e067dc3cSDaeho Jeong io->bio->bi_private = io;
1026e067dc3cSDaeho Jeong io->bio->bi_end_io = f2fs_zone_write_end_io;
1027e067dc3cSDaeho Jeong io->zone_pending_bio = io->bio;
1028e067dc3cSDaeho Jeong __submit_merged_bio(io);
1029e067dc3cSDaeho Jeong }
1030e067dc3cSDaeho Jeong #endif
10318e2ea8b0SWenjie Qi if (fio->in_list)
10328e2ea8b0SWenjie Qi goto next;
10338e2ea8b0SWenjie Qi out:
10344354994fSDaniel Rosenberg if (is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN) ||
103500e09c0bSChao Yu !f2fs_is_checkpoint_ready(sbi))
10365ce80586SJaegeuk Kim __submit_merged_bio(io);
1037e4544b63STim Murray f2fs_up_write(&io->io_rwsem);
103893dfe2acSJaegeuk Kim }
103993dfe2acSJaegeuk Kim
f2fs_grab_read_bio(struct inode * inode,block_t blkaddr,unsigned nr_pages,blk_opf_t op_flag,pgoff_t first_idx,bool for_write)104013ba41e3SJaegeuk Kim static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr,
10417649c873SBart Van Assche unsigned nr_pages, blk_opf_t op_flag,
10427f59b277SEric Biggers pgoff_t first_idx, bool for_write)
104313ba41e3SJaegeuk Kim {
104413ba41e3SJaegeuk Kim struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
104513ba41e3SJaegeuk Kim struct bio *bio;
1046a4b68176SDaeho Jeong struct bio_post_read_ctx *ctx = NULL;
10476dbb1796SEric Biggers unsigned int post_read_steps = 0;
10485189810aSChristoph Hellwig sector_t sector;
10495189810aSChristoph Hellwig struct block_device *bdev = f2fs_target_device(sbi, blkaddr, §or);
105013ba41e3SJaegeuk Kim
105164bf0eefSChristoph Hellwig bio = bio_alloc_bioset(bdev, bio_max_segs(nr_pages),
105264bf0eefSChristoph Hellwig REQ_OP_READ | op_flag,
1053609be106SChristoph Hellwig for_write ? GFP_NOIO : GFP_KERNEL, &f2fs_bioset);
10546dbb1796SEric Biggers if (!bio)
10556dbb1796SEric Biggers return ERR_PTR(-ENOMEM);
10565189810aSChristoph Hellwig bio->bi_iter.bi_sector = sector;
105727aacd28SSatya Tangirala f2fs_set_bio_crypt_ctx(bio, inode, first_idx, NULL, GFP_NOFS);
10586dbb1796SEric Biggers bio->bi_end_io = f2fs_read_end_io;
10596dbb1796SEric Biggers
106027aacd28SSatya Tangirala if (fscrypt_inode_uses_fs_layer_crypto(inode))
10617f59b277SEric Biggers post_read_steps |= STEP_DECRYPT;
106295ae251fSEric Biggers
10637f59b277SEric Biggers if (f2fs_need_verity(inode, first_idx))
10647f59b277SEric Biggers post_read_steps |= STEP_VERITY;
10657f59b277SEric Biggers
10667f59b277SEric Biggers /*
10677f59b277SEric Biggers * STEP_DECOMPRESS is handled specially, since a compressed file might
10687f59b277SEric Biggers * contain both compressed and uncompressed clusters. We'll allocate a
10697f59b277SEric Biggers * bio_post_read_ctx if the file is compressed, but the caller is
10707f59b277SEric Biggers * responsible for enabling STEP_DECOMPRESS if it's actually needed.
10717f59b277SEric Biggers */
10727f59b277SEric Biggers
10737f59b277SEric Biggers if (post_read_steps || f2fs_compressed_file(inode)) {
1074e8ce5749SEric Biggers /* Due to the mempool, this never fails. */
10756dbb1796SEric Biggers ctx = mempool_alloc(bio_post_read_ctx_pool, GFP_NOFS);
10766dbb1796SEric Biggers ctx->bio = bio;
10774c8ff709SChao Yu ctx->sbi = sbi;
10786dbb1796SEric Biggers ctx->enabled_steps = post_read_steps;
10794931e0c9SDaeho Jeong ctx->fs_blkaddr = blkaddr;
108098dc08baSEric Biggers ctx->decompression_attempted = false;
10816dbb1796SEric Biggers bio->bi_private = ctx;
108213ba41e3SJaegeuk Kim }
1083a4b68176SDaeho Jeong iostat_alloc_and_bind_ctx(sbi, bio, ctx);
108413ba41e3SJaegeuk Kim
108513ba41e3SJaegeuk Kim return bio;
108613ba41e3SJaegeuk Kim }
108713ba41e3SJaegeuk Kim
108813ba41e3SJaegeuk Kim /* This can handle encryption stuffs */
f2fs_submit_page_read(struct inode * inode,struct page * page,block_t blkaddr,blk_opf_t op_flags,bool for_write)108913ba41e3SJaegeuk Kim static int f2fs_submit_page_read(struct inode *inode, struct page *page,
10907649c873SBart Van Assche block_t blkaddr, blk_opf_t op_flags,
10917649c873SBart Van Assche bool for_write)
109213ba41e3SJaegeuk Kim {
109393770ab7SChao Yu struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
109493770ab7SChao Yu struct bio *bio;
109513ba41e3SJaegeuk Kim
1096b7973091SJia Yang bio = f2fs_grab_read_bio(inode, blkaddr, 1, op_flags,
10977f59b277SEric Biggers page->index, for_write);
109813ba41e3SJaegeuk Kim if (IS_ERR(bio))
109913ba41e3SJaegeuk Kim return PTR_ERR(bio);
110013ba41e3SJaegeuk Kim
11010ded69f6SJaegeuk Kim /* wait for GCed page writeback via META_MAPPING */
11020ded69f6SJaegeuk Kim f2fs_wait_on_block_writeback(inode, blkaddr);
11030ded69f6SJaegeuk Kim
110413ba41e3SJaegeuk Kim if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
11055118697fSChao Yu iostat_update_and_unbind_ctx(bio);
11065118697fSChao Yu if (bio->bi_private)
11075118697fSChao Yu mempool_free(bio->bi_private, bio_post_read_ctx_pool);
110813ba41e3SJaegeuk Kim bio_put(bio);
110913ba41e3SJaegeuk Kim return -EFAULT;
111013ba41e3SJaegeuk Kim }
111193770ab7SChao Yu inc_page_count(sbi, F2FS_RD_DATA);
111234a23525SChao Yu f2fs_update_iostat(sbi, NULL, FS_DATA_READ_IO, F2FS_BLKSIZE);
1113bc29835aSChristoph Hellwig f2fs_submit_read_bio(sbi, bio, DATA);
111413ba41e3SJaegeuk Kim return 0;
111513ba41e3SJaegeuk Kim }
111613ba41e3SJaegeuk Kim
__set_data_blkaddr(struct dnode_of_data * dn,block_t blkaddr)1117eb6d30bcSChao Yu static void __set_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr)
111846008c6dSChao Yu {
1119dcd6b38bSChao Yu __le32 *addr = get_dnode_addr(dn->inode, dn->node_page);
11207a2af766SChao Yu
1121eb6d30bcSChao Yu dn->data_blkaddr = blkaddr;
1122dcd6b38bSChao Yu addr[dn->ofs_in_node] = cpu_to_le32(dn->data_blkaddr);
112346008c6dSChao Yu }
112446008c6dSChao Yu
112593dfe2acSJaegeuk Kim /*
1126eb47b800SJaegeuk Kim * Lock ordering for the change of data block address:
1127eb47b800SJaegeuk Kim * ->data_page
1128eb47b800SJaegeuk Kim * ->node_page
1129eb47b800SJaegeuk Kim * update block addresses in the node page
1130eb47b800SJaegeuk Kim */
f2fs_set_data_blkaddr(struct dnode_of_data * dn,block_t blkaddr)1131eb6d30bcSChao Yu void f2fs_set_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr)
1132eb47b800SJaegeuk Kim {
1133bae0ee7aSChao Yu f2fs_wait_on_page_writeback(dn->node_page, NODE, true, true);
1134eb6d30bcSChao Yu __set_data_blkaddr(dn, blkaddr);
113546008c6dSChao Yu if (set_page_dirty(dn->node_page))
113693bae099SJaegeuk Kim dn->node_changed = true;
1137eb47b800SJaegeuk Kim }
1138eb47b800SJaegeuk Kim
f2fs_update_data_blkaddr(struct dnode_of_data * dn,block_t blkaddr)1139f28b3434SChao Yu void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr)
1140f28b3434SChao Yu {
1141eb6d30bcSChao Yu f2fs_set_data_blkaddr(dn, blkaddr);
1142e7547dacSJaegeuk Kim f2fs_update_read_extent_cache(dn);
1143f28b3434SChao Yu }
1144f28b3434SChao Yu
114546008c6dSChao Yu /* dn->ofs_in_node will be returned with up-to-date last block pointer */
f2fs_reserve_new_blocks(struct dnode_of_data * dn,blkcnt_t count)11464d57b86dSChao Yu int f2fs_reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count)
1147eb47b800SJaegeuk Kim {
11484081363fSJaegeuk Kim struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
11490abd675eSChao Yu int err;
1150eb47b800SJaegeuk Kim
115146008c6dSChao Yu if (!count)
115246008c6dSChao Yu return 0;
115346008c6dSChao Yu
115491942321SJaegeuk Kim if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
1155eb47b800SJaegeuk Kim return -EPERM;
1156bc1e3992SChao Yu err = inc_valid_block_count(sbi, dn->inode, &count, true);
1157bc1e3992SChao Yu if (unlikely(err))
11580abd675eSChao Yu return err;
1159eb47b800SJaegeuk Kim
116046008c6dSChao Yu trace_f2fs_reserve_new_blocks(dn->inode, dn->nid,
116146008c6dSChao Yu dn->ofs_in_node, count);
1162c01e2853SNamjae Jeon
1163bae0ee7aSChao Yu f2fs_wait_on_page_writeback(dn->node_page, NODE, true, true);
116446008c6dSChao Yu
116546008c6dSChao Yu for (; count > 0; dn->ofs_in_node++) {
1166a2ced1ceSChao Yu block_t blkaddr = f2fs_data_blkaddr(dn);
11675f029c04SYi Zhuang
116846008c6dSChao Yu if (blkaddr == NULL_ADDR) {
1169eb6d30bcSChao Yu __set_data_blkaddr(dn, NEW_ADDR);
117046008c6dSChao Yu count--;
117146008c6dSChao Yu }
117246008c6dSChao Yu }
117346008c6dSChao Yu
117446008c6dSChao Yu if (set_page_dirty(dn->node_page))
117546008c6dSChao Yu dn->node_changed = true;
1176eb47b800SJaegeuk Kim return 0;
1177eb47b800SJaegeuk Kim }
1178eb47b800SJaegeuk Kim
117946008c6dSChao Yu /* Should keep dn->ofs_in_node unchanged */
f2fs_reserve_new_block(struct dnode_of_data * dn)11804d57b86dSChao Yu int f2fs_reserve_new_block(struct dnode_of_data *dn)
118146008c6dSChao Yu {
118246008c6dSChao Yu unsigned int ofs_in_node = dn->ofs_in_node;
118346008c6dSChao Yu int ret;
118446008c6dSChao Yu
11854d57b86dSChao Yu ret = f2fs_reserve_new_blocks(dn, 1);
118646008c6dSChao Yu dn->ofs_in_node = ofs_in_node;
118746008c6dSChao Yu return ret;
118846008c6dSChao Yu }
118946008c6dSChao Yu
f2fs_reserve_block(struct dnode_of_data * dn,pgoff_t index)1190b600965cSHuajun Li int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index)
1191b600965cSHuajun Li {
1192b600965cSHuajun Li bool need_put = dn->inode_page ? false : true;
1193b600965cSHuajun Li int err;
1194b600965cSHuajun Li
11954d57b86dSChao Yu err = f2fs_get_dnode_of_data(dn, index, ALLOC_NODE);
1196b600965cSHuajun Li if (err)
1197b600965cSHuajun Li return err;
1198a8865372SJaegeuk Kim
1199b600965cSHuajun Li if (dn->data_blkaddr == NULL_ADDR)
12004d57b86dSChao Yu err = f2fs_reserve_new_block(dn);
1201a8865372SJaegeuk Kim if (err || need_put)
1202b600965cSHuajun Li f2fs_put_dnode(dn);
1203b600965cSHuajun Li return err;
1204b600965cSHuajun Li }
1205b600965cSHuajun Li
f2fs_get_read_data_page(struct inode * inode,pgoff_t index,blk_opf_t op_flags,bool for_write,pgoff_t * next_pgofs)12064d57b86dSChao Yu struct page *f2fs_get_read_data_page(struct inode *inode, pgoff_t index,
120759237a21SChao Yu blk_opf_t op_flags, bool for_write,
120859237a21SChao Yu pgoff_t *next_pgofs)
1209eb47b800SJaegeuk Kim {
1210eb47b800SJaegeuk Kim struct address_space *mapping = inode->i_mapping;
1211eb47b800SJaegeuk Kim struct dnode_of_data dn;
1212eb47b800SJaegeuk Kim struct page *page;
1213eb47b800SJaegeuk Kim int err;
12144375a336SJaegeuk Kim
1215a56c7c6fSJaegeuk Kim page = f2fs_grab_cache_page(mapping, index, for_write);
1216eb47b800SJaegeuk Kim if (!page)
1217eb47b800SJaegeuk Kim return ERR_PTR(-ENOMEM);
1218eb47b800SJaegeuk Kim
121904a91ab0SChristoph Hellwig if (f2fs_lookup_read_extent_cache_block(inode, index,
122004a91ab0SChristoph Hellwig &dn.data_blkaddr)) {
122193770ab7SChao Yu if (!f2fs_is_valid_blkaddr(F2FS_I_SB(inode), dn.data_blkaddr,
122293770ab7SChao Yu DATA_GENERIC_ENHANCE_READ)) {
122310f966bbSChao Yu err = -EFSCORRUPTED;
122495fa90c9SChao Yu f2fs_handle_error(F2FS_I_SB(inode),
122595fa90c9SChao Yu ERROR_INVALID_BLKADDR);
122693770ab7SChao Yu goto put_err;
122793770ab7SChao Yu }
1228cb3bc9eeSChao Yu goto got_it;
1229cb3bc9eeSChao Yu }
1230cb3bc9eeSChao Yu
1231650495deSJaegeuk Kim set_new_dnode(&dn, inode, NULL, NULL, 0);
12324d57b86dSChao Yu err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
123359237a21SChao Yu if (err) {
123459237a21SChao Yu if (err == -ENOENT && next_pgofs)
123559237a21SChao Yu *next_pgofs = f2fs_get_next_page_offset(&dn, index);
123686531d6bSJaegeuk Kim goto put_err;
123759237a21SChao Yu }
1238650495deSJaegeuk Kim f2fs_put_dnode(&dn);
1239650495deSJaegeuk Kim
12406bacf52fSJaegeuk Kim if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
124186531d6bSJaegeuk Kim err = -ENOENT;
124259237a21SChao Yu if (next_pgofs)
124359237a21SChao Yu *next_pgofs = index + 1;
124486531d6bSJaegeuk Kim goto put_err;
1245650495deSJaegeuk Kim }
124693770ab7SChao Yu if (dn.data_blkaddr != NEW_ADDR &&
124793770ab7SChao Yu !f2fs_is_valid_blkaddr(F2FS_I_SB(inode),
124893770ab7SChao Yu dn.data_blkaddr,
124993770ab7SChao Yu DATA_GENERIC_ENHANCE)) {
125010f966bbSChao Yu err = -EFSCORRUPTED;
125195fa90c9SChao Yu f2fs_handle_error(F2FS_I_SB(inode),
125295fa90c9SChao Yu ERROR_INVALID_BLKADDR);
125393770ab7SChao Yu goto put_err;
125493770ab7SChao Yu }
1255cb3bc9eeSChao Yu got_it:
125643f3eae1SJaegeuk Kim if (PageUptodate(page)) {
125743f3eae1SJaegeuk Kim unlock_page(page);
1258eb47b800SJaegeuk Kim return page;
125943f3eae1SJaegeuk Kim }
1260eb47b800SJaegeuk Kim
1261d59ff4dfSJaegeuk Kim /*
1262d59ff4dfSJaegeuk Kim * A new dentry page is allocated but not able to be written, since its
1263d59ff4dfSJaegeuk Kim * new inode page couldn't be allocated due to -ENOSPC.
1264d59ff4dfSJaegeuk Kim * In such the case, its blkaddr can be remained as NEW_ADDR.
12654d57b86dSChao Yu * see, f2fs_add_link -> f2fs_get_new_data_page ->
12664d57b86dSChao Yu * f2fs_init_inode_metadata.
1267d59ff4dfSJaegeuk Kim */
1268d59ff4dfSJaegeuk Kim if (dn.data_blkaddr == NEW_ADDR) {
126909cbfeafSKirill A. Shutemov zero_user_segment(page, 0, PAGE_SIZE);
1270237c0790SJaegeuk Kim if (!PageUptodate(page))
1271d59ff4dfSJaegeuk Kim SetPageUptodate(page);
127243f3eae1SJaegeuk Kim unlock_page(page);
1273d59ff4dfSJaegeuk Kim return page;
1274d59ff4dfSJaegeuk Kim }
1275eb47b800SJaegeuk Kim
1276b7973091SJia Yang err = f2fs_submit_page_read(inode, page, dn.data_blkaddr,
1277b7973091SJia Yang op_flags, for_write);
1278393ff91fSJaegeuk Kim if (err)
127986531d6bSJaegeuk Kim goto put_err;
128043f3eae1SJaegeuk Kim return page;
128186531d6bSJaegeuk Kim
128286531d6bSJaegeuk Kim put_err:
128386531d6bSJaegeuk Kim f2fs_put_page(page, 1);
128486531d6bSJaegeuk Kim return ERR_PTR(err);
128543f3eae1SJaegeuk Kim }
1286393ff91fSJaegeuk Kim
f2fs_find_data_page(struct inode * inode,pgoff_t index,pgoff_t * next_pgofs)128759237a21SChao Yu struct page *f2fs_find_data_page(struct inode *inode, pgoff_t index,
128859237a21SChao Yu pgoff_t *next_pgofs)
128943f3eae1SJaegeuk Kim {
129043f3eae1SJaegeuk Kim struct address_space *mapping = inode->i_mapping;
129143f3eae1SJaegeuk Kim struct page *page;
129243f3eae1SJaegeuk Kim
129343f3eae1SJaegeuk Kim page = find_get_page(mapping, index);
129443f3eae1SJaegeuk Kim if (page && PageUptodate(page))
129543f3eae1SJaegeuk Kim return page;
129643f3eae1SJaegeuk Kim f2fs_put_page(page, 0);
129743f3eae1SJaegeuk Kim
129859237a21SChao Yu page = f2fs_get_read_data_page(inode, index, 0, false, next_pgofs);
129943f3eae1SJaegeuk Kim if (IS_ERR(page))
130043f3eae1SJaegeuk Kim return page;
130143f3eae1SJaegeuk Kim
130243f3eae1SJaegeuk Kim if (PageUptodate(page))
130343f3eae1SJaegeuk Kim return page;
130443f3eae1SJaegeuk Kim
130543f3eae1SJaegeuk Kim wait_on_page_locked(page);
130643f3eae1SJaegeuk Kim if (unlikely(!PageUptodate(page))) {
130743f3eae1SJaegeuk Kim f2fs_put_page(page, 0);
130843f3eae1SJaegeuk Kim return ERR_PTR(-EIO);
130943f3eae1SJaegeuk Kim }
131043f3eae1SJaegeuk Kim return page;
131143f3eae1SJaegeuk Kim }
131243f3eae1SJaegeuk Kim
131343f3eae1SJaegeuk Kim /*
131443f3eae1SJaegeuk Kim * If it tries to access a hole, return an error.
131543f3eae1SJaegeuk Kim * Because, the callers, functions in dir.c and GC, should be able to know
131643f3eae1SJaegeuk Kim * whether this page exists or not.
131743f3eae1SJaegeuk Kim */
f2fs_get_lock_data_page(struct inode * inode,pgoff_t index,bool for_write)13184d57b86dSChao Yu struct page *f2fs_get_lock_data_page(struct inode *inode, pgoff_t index,
1319a56c7c6fSJaegeuk Kim bool for_write)
132043f3eae1SJaegeuk Kim {
132143f3eae1SJaegeuk Kim struct address_space *mapping = inode->i_mapping;
132243f3eae1SJaegeuk Kim struct page *page;
1323d2d9bb3bSJaegeuk Kim
132459237a21SChao Yu page = f2fs_get_read_data_page(inode, index, 0, for_write, NULL);
132543f3eae1SJaegeuk Kim if (IS_ERR(page))
132643f3eae1SJaegeuk Kim return page;
132743f3eae1SJaegeuk Kim
132843f3eae1SJaegeuk Kim /* wait for read completion */
1329393ff91fSJaegeuk Kim lock_page(page);
1330d2d9bb3bSJaegeuk Kim if (unlikely(page->mapping != mapping || !PageUptodate(page))) {
13311563ac75SChao Yu f2fs_put_page(page, 1);
13321563ac75SChao Yu return ERR_PTR(-EIO);
13331563ac75SChao Yu }
1334eb47b800SJaegeuk Kim return page;
1335eb47b800SJaegeuk Kim }
1336eb47b800SJaegeuk Kim
13370a8165d7SJaegeuk Kim /*
1338eb47b800SJaegeuk Kim * Caller ensures that this data page is never allocated.
1339eb47b800SJaegeuk Kim * A new zero-filled data page is allocated in the page cache.
134039936837SJaegeuk Kim *
13414f4124d0SChao Yu * Also, caller should grab and release a rwsem by calling f2fs_lock_op() and
13424f4124d0SChao Yu * f2fs_unlock_op().
1343470f00e9SChao Yu * Note that, ipage is set only by make_empty_dir, and if any error occur,
1344470f00e9SChao Yu * ipage should be released by this function.
1345eb47b800SJaegeuk Kim */
f2fs_get_new_data_page(struct inode * inode,struct page * ipage,pgoff_t index,bool new_i_size)13464d57b86dSChao Yu struct page *f2fs_get_new_data_page(struct inode *inode,
1347a8865372SJaegeuk Kim struct page *ipage, pgoff_t index, bool new_i_size)
1348eb47b800SJaegeuk Kim {
1349eb47b800SJaegeuk Kim struct address_space *mapping = inode->i_mapping;
1350eb47b800SJaegeuk Kim struct page *page;
1351eb47b800SJaegeuk Kim struct dnode_of_data dn;
1352eb47b800SJaegeuk Kim int err;
13537612118aSJaegeuk Kim
1354a56c7c6fSJaegeuk Kim page = f2fs_grab_cache_page(mapping, index, true);
1355470f00e9SChao Yu if (!page) {
1356470f00e9SChao Yu /*
1357470f00e9SChao Yu * before exiting, we should make sure ipage will be released
1358470f00e9SChao Yu * if any error occur.
1359470f00e9SChao Yu */
1360470f00e9SChao Yu f2fs_put_page(ipage, 1);
136101f28610SJaegeuk Kim return ERR_PTR(-ENOMEM);
1362470f00e9SChao Yu }
1363eb47b800SJaegeuk Kim
1364a8865372SJaegeuk Kim set_new_dnode(&dn, inode, ipage, NULL, 0);
1365b600965cSHuajun Li err = f2fs_reserve_block(&dn, index);
136601f28610SJaegeuk Kim if (err) {
136701f28610SJaegeuk Kim f2fs_put_page(page, 1);
1368eb47b800SJaegeuk Kim return ERR_PTR(err);
1369a8865372SJaegeuk Kim }
137001f28610SJaegeuk Kim if (!ipage)
137101f28610SJaegeuk Kim f2fs_put_dnode(&dn);
1372eb47b800SJaegeuk Kim
1373eb47b800SJaegeuk Kim if (PageUptodate(page))
137401f28610SJaegeuk Kim goto got_it;
1375eb47b800SJaegeuk Kim
1376eb47b800SJaegeuk Kim if (dn.data_blkaddr == NEW_ADDR) {
137709cbfeafSKirill A. Shutemov zero_user_segment(page, 0, PAGE_SIZE);
1378237c0790SJaegeuk Kim if (!PageUptodate(page))
1379393ff91fSJaegeuk Kim SetPageUptodate(page);
1380eb47b800SJaegeuk Kim } else {
13814375a336SJaegeuk Kim f2fs_put_page(page, 1);
1382a8865372SJaegeuk Kim
13837612118aSJaegeuk Kim /* if ipage exists, blkaddr should be NEW_ADDR */
13847612118aSJaegeuk Kim f2fs_bug_on(F2FS_I_SB(inode), ipage);
13854d57b86dSChao Yu page = f2fs_get_lock_data_page(inode, index, true);
13864375a336SJaegeuk Kim if (IS_ERR(page))
13877612118aSJaegeuk Kim return page;
1388eb47b800SJaegeuk Kim }
138901f28610SJaegeuk Kim got_it:
13909edcdabfSChao Yu if (new_i_size && i_size_read(inode) <
1391ee6d182fSJaegeuk Kim ((loff_t)(index + 1) << PAGE_SHIFT))
1392fc9581c8SJaegeuk Kim f2fs_i_size_write(inode, ((loff_t)(index + 1) << PAGE_SHIFT));
1393eb47b800SJaegeuk Kim return page;
1394eb47b800SJaegeuk Kim }
1395eb47b800SJaegeuk Kim
__allocate_data_block(struct dnode_of_data * dn,int seg_type)1396d5097be5SHyunchul Lee static int __allocate_data_block(struct dnode_of_data *dn, int seg_type)
1397bfad7c2dSJaegeuk Kim {
13984081363fSJaegeuk Kim struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
1399bfad7c2dSJaegeuk Kim struct f2fs_summary sum;
1400bfad7c2dSJaegeuk Kim struct node_info ni;
14016aa58d8aSChao Yu block_t old_blkaddr;
140246008c6dSChao Yu blkcnt_t count = 1;
14030abd675eSChao Yu int err;
1404bfad7c2dSJaegeuk Kim
140591942321SJaegeuk Kim if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
1406bfad7c2dSJaegeuk Kim return -EPERM;
1407df6136efSChao Yu
1408a9419b63SJaegeuk Kim err = f2fs_get_node_info(sbi, dn->nid, &ni, false);
14097735730dSChao Yu if (err)
14107735730dSChao Yu return err;
14117735730dSChao Yu
1412a2ced1ceSChao Yu dn->data_blkaddr = f2fs_data_blkaddr(dn);
14133cf684f2SChristoph Hellwig if (dn->data_blkaddr == NULL_ADDR) {
1414bc1e3992SChao Yu err = inc_valid_block_count(sbi, dn->inode, &count, true);
14153cf684f2SChristoph Hellwig if (unlikely(err))
14160abd675eSChao Yu return err;
14173cf684f2SChristoph Hellwig }
1418bfad7c2dSJaegeuk Kim
1419bfad7c2dSJaegeuk Kim set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
14206aa58d8aSChao Yu old_blkaddr = dn->data_blkaddr;
14216aa58d8aSChao Yu f2fs_allocate_data_block(sbi, NULL, old_blkaddr, &dn->data_blkaddr,
1422093749e2SChao Yu &sum, seg_type, NULL);
1423cfd217f6SChao Yu if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO)
1424cfd217f6SChao Yu f2fs_invalidate_internal_cache(sbi, old_blkaddr);
1425cfd217f6SChao Yu
142686f35dc3SChao Yu f2fs_update_data_blkaddr(dn, dn->data_blkaddr);
1427bfad7c2dSJaegeuk Kim return 0;
1428bfad7c2dSJaegeuk Kim }
1429bfad7c2dSJaegeuk Kim
f2fs_map_lock(struct f2fs_sb_info * sbi,int flag)14302f51ade9SChristoph Hellwig static void f2fs_map_lock(struct f2fs_sb_info *sbi, int flag)
143159c9081bSYunlei He {
14322f51ade9SChristoph Hellwig if (flag == F2FS_GET_BLOCK_PRE_AIO)
1433e4544b63STim Murray f2fs_down_read(&sbi->node_change);
143459c9081bSYunlei He else
143559c9081bSYunlei He f2fs_lock_op(sbi);
14362f51ade9SChristoph Hellwig }
14372f51ade9SChristoph Hellwig
f2fs_map_unlock(struct f2fs_sb_info * sbi,int flag)14382f51ade9SChristoph Hellwig static void f2fs_map_unlock(struct f2fs_sb_info *sbi, int flag)
14392f51ade9SChristoph Hellwig {
14402f51ade9SChristoph Hellwig if (flag == F2FS_GET_BLOCK_PRE_AIO)
14412f51ade9SChristoph Hellwig f2fs_up_read(&sbi->node_change);
144259c9081bSYunlei He else
144359c9081bSYunlei He f2fs_unlock_op(sbi);
144459c9081bSYunlei He }
144559c9081bSYunlei He
f2fs_get_block_locked(struct dnode_of_data * dn,pgoff_t index)1446cf342d3bSChristoph Hellwig int f2fs_get_block_locked(struct dnode_of_data *dn, pgoff_t index)
1447cf342d3bSChristoph Hellwig {
1448cf342d3bSChristoph Hellwig struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
1449ffdeab71SChristoph Hellwig int err = 0;
1450cf342d3bSChristoph Hellwig
14512f51ade9SChristoph Hellwig f2fs_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO);
1452ffdeab71SChristoph Hellwig if (!f2fs_lookup_read_extent_cache_block(dn->inode, index,
1453ffdeab71SChristoph Hellwig &dn->data_blkaddr))
1454ffdeab71SChristoph Hellwig err = f2fs_reserve_block(dn, index);
14552f51ade9SChristoph Hellwig f2fs_map_unlock(sbi, F2FS_GET_BLOCK_PRE_AIO);
1456cf342d3bSChristoph Hellwig
1457cf342d3bSChristoph Hellwig return err;
1458cf342d3bSChristoph Hellwig }
1459cf342d3bSChristoph Hellwig
f2fs_map_no_dnode(struct inode * inode,struct f2fs_map_blocks * map,struct dnode_of_data * dn,pgoff_t pgoff)1460817c968bSChristoph Hellwig static int f2fs_map_no_dnode(struct inode *inode,
1461817c968bSChristoph Hellwig struct f2fs_map_blocks *map, struct dnode_of_data *dn,
1462817c968bSChristoph Hellwig pgoff_t pgoff)
1463817c968bSChristoph Hellwig {
1464817c968bSChristoph Hellwig struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1465817c968bSChristoph Hellwig
1466817c968bSChristoph Hellwig /*
1467817c968bSChristoph Hellwig * There is one exceptional case that read_node_page() may return
1468817c968bSChristoph Hellwig * -ENOENT due to filesystem has been shutdown or cp_error, return
1469817c968bSChristoph Hellwig * -EIO in that case.
1470817c968bSChristoph Hellwig */
1471817c968bSChristoph Hellwig if (map->m_may_create &&
1472817c968bSChristoph Hellwig (is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN) || f2fs_cp_error(sbi)))
1473817c968bSChristoph Hellwig return -EIO;
1474817c968bSChristoph Hellwig
1475817c968bSChristoph Hellwig if (map->m_next_pgofs)
1476817c968bSChristoph Hellwig *map->m_next_pgofs = f2fs_get_next_page_offset(dn, pgoff);
1477817c968bSChristoph Hellwig if (map->m_next_extent)
1478817c968bSChristoph Hellwig *map->m_next_extent = f2fs_get_next_page_offset(dn, pgoff);
1479817c968bSChristoph Hellwig return 0;
1480817c968bSChristoph Hellwig }
1481817c968bSChristoph Hellwig
f2fs_map_blocks_cached(struct inode * inode,struct f2fs_map_blocks * map,int flag)14820094e98bSChristoph Hellwig static bool f2fs_map_blocks_cached(struct inode *inode,
14830094e98bSChristoph Hellwig struct f2fs_map_blocks *map, int flag)
14840094e98bSChristoph Hellwig {
14850094e98bSChristoph Hellwig struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
14860094e98bSChristoph Hellwig unsigned int maxblocks = map->m_len;
14870094e98bSChristoph Hellwig pgoff_t pgoff = (pgoff_t)map->m_lblk;
14880094e98bSChristoph Hellwig struct extent_info ei = {};
14890094e98bSChristoph Hellwig
14900094e98bSChristoph Hellwig if (!f2fs_lookup_read_extent_cache(inode, pgoff, &ei))
14910094e98bSChristoph Hellwig return false;
14920094e98bSChristoph Hellwig
14930094e98bSChristoph Hellwig map->m_pblk = ei.blk + pgoff - ei.fofs;
14940094e98bSChristoph Hellwig map->m_len = min((pgoff_t)maxblocks, ei.fofs + ei.len - pgoff);
14950094e98bSChristoph Hellwig map->m_flags = F2FS_MAP_MAPPED;
14960094e98bSChristoph Hellwig if (map->m_next_extent)
14970094e98bSChristoph Hellwig *map->m_next_extent = pgoff + map->m_len;
14980094e98bSChristoph Hellwig
14990094e98bSChristoph Hellwig /* for hardware encryption, but to avoid potential issue in future */
15000094e98bSChristoph Hellwig if (flag == F2FS_GET_BLOCK_DIO)
15010094e98bSChristoph Hellwig f2fs_wait_on_block_writeback_range(inode,
15020094e98bSChristoph Hellwig map->m_pblk, map->m_len);
15030094e98bSChristoph Hellwig
15040094e98bSChristoph Hellwig if (f2fs_allow_multi_device_dio(sbi, flag)) {
15050094e98bSChristoph Hellwig int bidx = f2fs_target_device_index(sbi, map->m_pblk);
15060094e98bSChristoph Hellwig struct f2fs_dev_info *dev = &sbi->devs[bidx];
15070094e98bSChristoph Hellwig
15080094e98bSChristoph Hellwig map->m_bdev = dev->bdev;
15090094e98bSChristoph Hellwig map->m_pblk -= dev->start_blk;
15100094e98bSChristoph Hellwig map->m_len = min(map->m_len, dev->end_blk + 1 - map->m_pblk);
15110094e98bSChristoph Hellwig } else {
15120094e98bSChristoph Hellwig map->m_bdev = inode->i_sb->s_bdev;
15130094e98bSChristoph Hellwig }
15140094e98bSChristoph Hellwig return true;
151559b802e5SJaegeuk Kim }
151659b802e5SJaegeuk Kim
15170a8165d7SJaegeuk Kim /*
15187a88ddb5SChao Yu * f2fs_map_blocks() tries to find or build mapping relationship which
15197a88ddb5SChao Yu * maps continuous logical blocks to physical blocks, and return such
15207a88ddb5SChao Yu * info via f2fs_map_blocks structure.
1521eb47b800SJaegeuk Kim */
f2fs_map_blocks(struct inode * inode,struct f2fs_map_blocks * map,int flag)1522cd8fc522SChristoph Hellwig int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map, int flag)
1523eb47b800SJaegeuk Kim {
1524003a3e1dSJaegeuk Kim unsigned int maxblocks = map->m_len;
1525eb47b800SJaegeuk Kim struct dnode_of_data dn;
1526f9811703SChao Yu struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1527f9d6d059SChao Yu int mode = map->m_may_create ? ALLOC_NODE : LOOKUP_NODE;
152846008c6dSChao Yu pgoff_t pgofs, end_offset, end;
1529bfad7c2dSJaegeuk Kim int err = 0, ofs = 1;
153046008c6dSChao Yu unsigned int ofs_in_node, last_ofs_in_node;
153146008c6dSChao Yu blkcnt_t prealloc;
15327df3a431SFan Li block_t blkaddr;
1533c4020b2dSChao Yu unsigned int start_pgofs;
153471f2c820SChao Yu int bidx = 0;
1535fdbf69a7SChristoph Hellwig bool is_hole;
1536eb47b800SJaegeuk Kim
1537dfd02e4dSChao Yu if (!maxblocks)
1538dfd02e4dSChao Yu return 0;
1539dfd02e4dSChao Yu
15400094e98bSChristoph Hellwig if (!map->m_may_create && f2fs_map_blocks_cached(inode, map, flag))
15410094e98bSChristoph Hellwig goto out;
15420094e98bSChristoph Hellwig
154371f2c820SChao Yu map->m_bdev = inode->i_sb->s_bdev;
154471f2c820SChao Yu map->m_multidev_dio =
154571f2c820SChao Yu f2fs_allow_multi_device_dio(F2FS_I_SB(inode), flag);
154671f2c820SChao Yu
1547003a3e1dSJaegeuk Kim map->m_len = 0;
1548003a3e1dSJaegeuk Kim map->m_flags = 0;
1549003a3e1dSJaegeuk Kim
1550003a3e1dSJaegeuk Kim /* it only supports block size == page size */
1551003a3e1dSJaegeuk Kim pgofs = (pgoff_t)map->m_lblk;
155246008c6dSChao Yu end = pgofs + maxblocks;
1553eb47b800SJaegeuk Kim
15544fe71e88SChao Yu next_dnode:
1555f9d6d059SChao Yu if (map->m_may_create)
15562f51ade9SChristoph Hellwig f2fs_map_lock(sbi, flag);
1557eb47b800SJaegeuk Kim
1558eb47b800SJaegeuk Kim /* When reading holes, we need its node page */
1559eb47b800SJaegeuk Kim set_new_dnode(&dn, inode, NULL, NULL, 0);
15604d57b86dSChao Yu err = f2fs_get_dnode_of_data(&dn, pgofs, mode);
15611ec79083SJaegeuk Kim if (err) {
156243473f96SChao Yu if (flag == F2FS_GET_BLOCK_BMAP)
156343473f96SChao Yu map->m_pblk = 0;
1564817c968bSChristoph Hellwig if (err == -ENOENT)
1565817c968bSChristoph Hellwig err = f2fs_map_no_dnode(inode, map, &dn, pgofs);
1566bfad7c2dSJaegeuk Kim goto unlock_out;
1567848753aaSNamjae Jeon }
1568eb47b800SJaegeuk Kim
1569c4020b2dSChao Yu start_pgofs = pgofs;
157046008c6dSChao Yu prealloc = 0;
1571230436b3SArnd Bergmann last_ofs_in_node = ofs_in_node = dn.ofs_in_node;
157281ca7350SChao Yu end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
1573eb47b800SJaegeuk Kim
15744fe71e88SChao Yu next_block:
1575a2ced1ceSChao Yu blkaddr = f2fs_data_blkaddr(&dn);
1576fdbf69a7SChristoph Hellwig is_hole = !__is_valid_data_blkaddr(blkaddr);
1577fdbf69a7SChristoph Hellwig if (!is_hole &&
157893770ab7SChao Yu !f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE)) {
157910f966bbSChao Yu err = -EFSCORRUPTED;
158095fa90c9SChao Yu f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR);
1581c9b60788SChao Yu goto sync_out;
1582c9b60788SChao Yu }
1583c9b60788SChao Yu
1584fdbf69a7SChristoph Hellwig /* use out-place-update for direct IO under LFS mode */
1585fdbf69a7SChristoph Hellwig if (map->m_may_create &&
1586fdbf69a7SChristoph Hellwig (is_hole || (f2fs_lfs_mode(sbi) && flag == F2FS_GET_BLOCK_DIO))) {
1587f9811703SChao Yu if (unlikely(f2fs_cp_error(sbi))) {
1588f9811703SChao Yu err = -EIO;
1589f9811703SChao Yu goto sync_out;
1590f9811703SChao Yu }
1591fdbf69a7SChristoph Hellwig
1592fdbf69a7SChristoph Hellwig switch (flag) {
1593fdbf69a7SChristoph Hellwig case F2FS_GET_BLOCK_PRE_AIO:
159446008c6dSChao Yu if (blkaddr == NULL_ADDR) {
159546008c6dSChao Yu prealloc++;
159646008c6dSChao Yu last_ofs_in_node = dn.ofs_in_node;
159746008c6dSChao Yu }
1598fdbf69a7SChristoph Hellwig break;
1599fdbf69a7SChristoph Hellwig case F2FS_GET_BLOCK_PRE_DIO:
1600fdbf69a7SChristoph Hellwig case F2FS_GET_BLOCK_DIO:
1601fdbf69a7SChristoph Hellwig err = __allocate_data_block(&dn, map->m_seg_type);
1602fdbf69a7SChristoph Hellwig if (err)
1603fdbf69a7SChristoph Hellwig goto sync_out;
1604d4dd19ecSJaegeuk Kim if (flag == F2FS_GET_BLOCK_PRE_DIO)
1605d4dd19ecSJaegeuk Kim file_need_truncate(inode);
160691942321SJaegeuk Kim set_inode_flag(inode, FI_APPEND_WRITE);
1607fdbf69a7SChristoph Hellwig break;
1608fdbf69a7SChristoph Hellwig default:
1609fdbf69a7SChristoph Hellwig WARN_ON_ONCE(1);
1610fdbf69a7SChristoph Hellwig err = -EIO;
1611bfad7c2dSJaegeuk Kim goto sync_out;
1612fdbf69a7SChristoph Hellwig }
1613fdbf69a7SChristoph Hellwig
1614bfad7c2dSJaegeuk Kim blkaddr = dn.data_blkaddr;
1615fdbf69a7SChristoph Hellwig if (is_hole)
1616fdbf69a7SChristoph Hellwig map->m_flags |= F2FS_MAP_NEW;
1617fdbf69a7SChristoph Hellwig } else if (is_hole) {
1618bbe1da7eSChao Yu if (f2fs_compressed_file(inode) &&
16194263b3efSChao Yu f2fs_sanity_check_cluster(&dn)) {
1620bbe1da7eSChao Yu err = -EFSCORRUPTED;
162195fa90c9SChao Yu f2fs_handle_error(sbi,
162295fa90c9SChao Yu ERROR_CORRUPTED_CLUSTER);
1623bbe1da7eSChao Yu goto sync_out;
1624bbe1da7eSChao Yu }
1625fdbf69a7SChristoph Hellwig
1626fdbf69a7SChristoph Hellwig switch (flag) {
1627fdbf69a7SChristoph Hellwig case F2FS_GET_BLOCK_PRECACHE:
1628fdbf69a7SChristoph Hellwig goto sync_out;
1629fdbf69a7SChristoph Hellwig case F2FS_GET_BLOCK_BMAP:
163043473f96SChao Yu map->m_pblk = 0;
163143473f96SChao Yu goto sync_out;
1632fdbf69a7SChristoph Hellwig case F2FS_GET_BLOCK_FIEMAP:
1633fdbf69a7SChristoph Hellwig if (blkaddr == NULL_ADDR) {
1634da85985cSChao Yu if (map->m_next_pgofs)
1635da85985cSChao Yu *map->m_next_pgofs = pgofs + 1;
1636973163fcSChao Yu goto sync_out;
1637bfad7c2dSJaegeuk Kim }
1638fdbf69a7SChristoph Hellwig break;
1639fdbf69a7SChristoph Hellwig default:
1640f3d98e74SChao Yu /* for defragment case */
1641f3d98e74SChao Yu if (map->m_next_pgofs)
1642f3d98e74SChao Yu *map->m_next_pgofs = pgofs + 1;
1643bfad7c2dSJaegeuk Kim goto sync_out;
1644bfad7c2dSJaegeuk Kim }
1645973163fcSChao Yu }
1646973163fcSChao Yu
164746008c6dSChao Yu if (flag == F2FS_GET_BLOCK_PRE_AIO)
164846008c6dSChao Yu goto skip;
16497f63eb77SJaegeuk Kim
165071f2c820SChao Yu if (map->m_multidev_dio)
165171f2c820SChao Yu bidx = f2fs_target_device_index(sbi, blkaddr);
165271f2c820SChao Yu
16534fe71e88SChao Yu if (map->m_len == 0) {
1654da8c7fecSChristoph Hellwig /* reserved delalloc block should be mapped for fiemap. */
16554fe71e88SChao Yu if (blkaddr == NEW_ADDR)
1656da8c7fecSChristoph Hellwig map->m_flags |= F2FS_MAP_DELALLOC;
16574fe71e88SChao Yu map->m_flags |= F2FS_MAP_MAPPED;
16584fe71e88SChao Yu
16594fe71e88SChao Yu map->m_pblk = blkaddr;
16604fe71e88SChao Yu map->m_len = 1;
166171f2c820SChao Yu
166271f2c820SChao Yu if (map->m_multidev_dio)
166371f2c820SChao Yu map->m_bdev = FDEV(bidx).bdev;
16644fe71e88SChao Yu } else if ((map->m_pblk != NEW_ADDR &&
16657f63eb77SJaegeuk Kim blkaddr == (map->m_pblk + ofs)) ||
1666b439b103SJaegeuk Kim (map->m_pblk == NEW_ADDR && blkaddr == NEW_ADDR) ||
166746008c6dSChao Yu flag == F2FS_GET_BLOCK_PRE_DIO) {
166871f2c820SChao Yu if (map->m_multidev_dio && map->m_bdev != FDEV(bidx).bdev)
166971f2c820SChao Yu goto sync_out;
1670bfad7c2dSJaegeuk Kim ofs++;
16714fe71e88SChao Yu map->m_len++;
16724fe71e88SChao Yu } else {
16734fe71e88SChao Yu goto sync_out;
16744fe71e88SChao Yu }
16754fe71e88SChao Yu
167646008c6dSChao Yu skip:
1677bfad7c2dSJaegeuk Kim dn.ofs_in_node++;
1678bfad7c2dSJaegeuk Kim pgofs++;
16794fe71e88SChao Yu
168046008c6dSChao Yu /* preallocate blocks in batch for one dnode page */
168146008c6dSChao Yu if (flag == F2FS_GET_BLOCK_PRE_AIO &&
168246008c6dSChao Yu (pgofs == end || dn.ofs_in_node == end_offset)) {
168346008c6dSChao Yu
168446008c6dSChao Yu dn.ofs_in_node = ofs_in_node;
16854d57b86dSChao Yu err = f2fs_reserve_new_blocks(&dn, prealloc);
168646008c6dSChao Yu if (err)
168746008c6dSChao Yu goto sync_out;
168846008c6dSChao Yu
168946008c6dSChao Yu map->m_len += dn.ofs_in_node - ofs_in_node;
169046008c6dSChao Yu if (prealloc && dn.ofs_in_node != last_ofs_in_node + 1) {
169146008c6dSChao Yu err = -ENOSPC;
169246008c6dSChao Yu goto sync_out;
169346008c6dSChao Yu }
169446008c6dSChao Yu dn.ofs_in_node = end_offset;
169546008c6dSChao Yu }
169646008c6dSChao Yu
169746008c6dSChao Yu if (pgofs >= end)
169846008c6dSChao Yu goto sync_out;
169946008c6dSChao Yu else if (dn.ofs_in_node < end_offset)
17004fe71e88SChao Yu goto next_block;
17014fe71e88SChao Yu
1702c4020b2dSChao Yu if (flag == F2FS_GET_BLOCK_PRECACHE) {
1703c4020b2dSChao Yu if (map->m_flags & F2FS_MAP_MAPPED) {
1704c4020b2dSChao Yu unsigned int ofs = start_pgofs - map->m_lblk;
1705c4020b2dSChao Yu
1706e7547dacSJaegeuk Kim f2fs_update_read_extent_cache_range(&dn,
1707c4020b2dSChao Yu start_pgofs, map->m_pblk + ofs,
1708c4020b2dSChao Yu map->m_len - ofs);
1709c4020b2dSChao Yu }
1710c4020b2dSChao Yu }
1711c4020b2dSChao Yu
17124fe71e88SChao Yu f2fs_put_dnode(&dn);
17134fe71e88SChao Yu
1714f9d6d059SChao Yu if (map->m_may_create) {
17152f51ade9SChristoph Hellwig f2fs_map_unlock(sbi, flag);
17166f2d8ed6SChao Yu f2fs_balance_fs(sbi, dn.node_changed);
17174fe71e88SChao Yu }
17184fe71e88SChao Yu goto next_dnode;
17197df3a431SFan Li
1720bfad7c2dSJaegeuk Kim sync_out:
17211e78e8bdSSahitya Tummala
172271f2c820SChao Yu if (flag == F2FS_GET_BLOCK_DIO && map->m_flags & F2FS_MAP_MAPPED) {
172371f2c820SChao Yu /*
172471f2c820SChao Yu * for hardware encryption, but to avoid potential issue
172571f2c820SChao Yu * in future
172671f2c820SChao Yu */
17271e78e8bdSSahitya Tummala f2fs_wait_on_block_writeback_range(inode,
17281e78e8bdSSahitya Tummala map->m_pblk, map->m_len);
17291e78e8bdSSahitya Tummala
173071f2c820SChao Yu if (map->m_multidev_dio) {
173171f2c820SChao Yu block_t blk_addr = map->m_pblk;
173271f2c820SChao Yu
173371f2c820SChao Yu bidx = f2fs_target_device_index(sbi, map->m_pblk);
173471f2c820SChao Yu
173571f2c820SChao Yu map->m_bdev = FDEV(bidx).bdev;
173671f2c820SChao Yu map->m_pblk -= FDEV(bidx).start_blk;
173771f2c820SChao Yu
173871f2c820SChao Yu if (map->m_may_create)
173971f2c820SChao Yu f2fs_update_device_state(sbi, inode->i_ino,
174071f2c820SChao Yu blk_addr, map->m_len);
174171f2c820SChao Yu
174271f2c820SChao Yu f2fs_bug_on(sbi, blk_addr + map->m_len >
174371f2c820SChao Yu FDEV(bidx).end_blk + 1);
174471f2c820SChao Yu }
174571f2c820SChao Yu }
174671f2c820SChao Yu
1747c4020b2dSChao Yu if (flag == F2FS_GET_BLOCK_PRECACHE) {
1748c4020b2dSChao Yu if (map->m_flags & F2FS_MAP_MAPPED) {
1749c4020b2dSChao Yu unsigned int ofs = start_pgofs - map->m_lblk;
1750c4020b2dSChao Yu
1751e7547dacSJaegeuk Kim f2fs_update_read_extent_cache_range(&dn,
1752c4020b2dSChao Yu start_pgofs, map->m_pblk + ofs,
1753c4020b2dSChao Yu map->m_len - ofs);
1754c4020b2dSChao Yu }
1755c4020b2dSChao Yu if (map->m_next_extent)
1756c4020b2dSChao Yu *map->m_next_extent = pgofs + 1;
1757c4020b2dSChao Yu }
1758bfad7c2dSJaegeuk Kim f2fs_put_dnode(&dn);
1759bfad7c2dSJaegeuk Kim unlock_out:
1760f9d6d059SChao Yu if (map->m_may_create) {
17612f51ade9SChristoph Hellwig f2fs_map_unlock(sbi, flag);
17626f2d8ed6SChao Yu f2fs_balance_fs(sbi, dn.node_changed);
17632a340760SJaegeuk Kim }
1764bfad7c2dSJaegeuk Kim out:
1765cd8fc522SChristoph Hellwig trace_f2fs_map_blocks(inode, map, flag, err);
1766bfad7c2dSJaegeuk Kim return err;
1767eb47b800SJaegeuk Kim }
1768eb47b800SJaegeuk Kim
f2fs_overwrite_io(struct inode * inode,loff_t pos,size_t len)1769b91050a8SHyunchul Lee bool f2fs_overwrite_io(struct inode *inode, loff_t pos, size_t len)
1770b91050a8SHyunchul Lee {
1771b91050a8SHyunchul Lee struct f2fs_map_blocks map;
1772b91050a8SHyunchul Lee block_t last_lblk;
1773b91050a8SHyunchul Lee int err;
1774b91050a8SHyunchul Lee
1775b91050a8SHyunchul Lee if (pos + len > i_size_read(inode))
1776b91050a8SHyunchul Lee return false;
1777b91050a8SHyunchul Lee
1778b91050a8SHyunchul Lee map.m_lblk = F2FS_BYTES_TO_BLK(pos);
1779b91050a8SHyunchul Lee map.m_next_pgofs = NULL;
1780b91050a8SHyunchul Lee map.m_next_extent = NULL;
1781b91050a8SHyunchul Lee map.m_seg_type = NO_CHECK_TYPE;
1782f4f0b677SJia Zhu map.m_may_create = false;
1783b91050a8SHyunchul Lee last_lblk = F2FS_BLK_ALIGN(pos + len);
1784b91050a8SHyunchul Lee
1785b91050a8SHyunchul Lee while (map.m_lblk < last_lblk) {
1786b91050a8SHyunchul Lee map.m_len = last_lblk - map.m_lblk;
1787cd8fc522SChristoph Hellwig err = f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_DEFAULT);
1788b91050a8SHyunchul Lee if (err || map.m_len == 0)
1789b91050a8SHyunchul Lee return false;
1790b91050a8SHyunchul Lee map.m_lblk += map.m_len;
1791b91050a8SHyunchul Lee }
1792b91050a8SHyunchul Lee return true;
1793b91050a8SHyunchul Lee }
1794b91050a8SHyunchul Lee
bytes_to_blks(struct inode * inode,u64 bytes)179543b9d4b4SJaegeuk Kim static inline u64 bytes_to_blks(struct inode *inode, u64 bytes)
179643b9d4b4SJaegeuk Kim {
179743b9d4b4SJaegeuk Kim return (bytes >> inode->i_blkbits);
179843b9d4b4SJaegeuk Kim }
179943b9d4b4SJaegeuk Kim
blks_to_bytes(struct inode * inode,u64 blks)180043b9d4b4SJaegeuk Kim static inline u64 blks_to_bytes(struct inode *inode, u64 blks)
180143b9d4b4SJaegeuk Kim {
180243b9d4b4SJaegeuk Kim return (blks << inode->i_blkbits);
180343b9d4b4SJaegeuk Kim }
180443b9d4b4SJaegeuk Kim
f2fs_xattr_fiemap(struct inode * inode,struct fiemap_extent_info * fieinfo)1805442a9dbdSChao Yu static int f2fs_xattr_fiemap(struct inode *inode,
1806442a9dbdSChao Yu struct fiemap_extent_info *fieinfo)
1807442a9dbdSChao Yu {
1808442a9dbdSChao Yu struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1809442a9dbdSChao Yu struct page *page;
1810442a9dbdSChao Yu struct node_info ni;
1811442a9dbdSChao Yu __u64 phys = 0, len;
1812442a9dbdSChao Yu __u32 flags;
1813442a9dbdSChao Yu nid_t xnid = F2FS_I(inode)->i_xattr_nid;
1814442a9dbdSChao Yu int err = 0;
1815442a9dbdSChao Yu
1816442a9dbdSChao Yu if (f2fs_has_inline_xattr(inode)) {
1817442a9dbdSChao Yu int offset;
1818442a9dbdSChao Yu
1819442a9dbdSChao Yu page = f2fs_grab_cache_page(NODE_MAPPING(sbi),
1820442a9dbdSChao Yu inode->i_ino, false);
1821442a9dbdSChao Yu if (!page)
1822442a9dbdSChao Yu return -ENOMEM;
1823442a9dbdSChao Yu
1824a9419b63SJaegeuk Kim err = f2fs_get_node_info(sbi, inode->i_ino, &ni, false);
18257735730dSChao Yu if (err) {
18267735730dSChao Yu f2fs_put_page(page, 1);
18277735730dSChao Yu return err;
18287735730dSChao Yu }
1829442a9dbdSChao Yu
18306cbfcab5SJaegeuk Kim phys = blks_to_bytes(inode, ni.blk_addr);
1831442a9dbdSChao Yu offset = offsetof(struct f2fs_inode, i_addr) +
1832442a9dbdSChao Yu sizeof(__le32) * (DEF_ADDRS_PER_INODE -
1833b323fd28SChao Yu get_inline_xattr_addrs(inode));
1834442a9dbdSChao Yu
1835442a9dbdSChao Yu phys += offset;
1836442a9dbdSChao Yu len = inline_xattr_size(inode);
1837442a9dbdSChao Yu
1838442a9dbdSChao Yu f2fs_put_page(page, 1);
1839442a9dbdSChao Yu
1840442a9dbdSChao Yu flags = FIEMAP_EXTENT_DATA_INLINE | FIEMAP_EXTENT_NOT_ALIGNED;
1841442a9dbdSChao Yu
1842442a9dbdSChao Yu if (!xnid)
1843442a9dbdSChao Yu flags |= FIEMAP_EXTENT_LAST;
1844442a9dbdSChao Yu
1845442a9dbdSChao Yu err = fiemap_fill_next_extent(fieinfo, 0, phys, len, flags);
1846dd5a09bdSChao Yu trace_f2fs_fiemap(inode, 0, phys, len, flags, err);
1847ca7efd71SZhang Qilong if (err)
1848442a9dbdSChao Yu return err;
1849442a9dbdSChao Yu }
1850442a9dbdSChao Yu
1851442a9dbdSChao Yu if (xnid) {
1852442a9dbdSChao Yu page = f2fs_grab_cache_page(NODE_MAPPING(sbi), xnid, false);
1853442a9dbdSChao Yu if (!page)
1854442a9dbdSChao Yu return -ENOMEM;
1855442a9dbdSChao Yu
1856a9419b63SJaegeuk Kim err = f2fs_get_node_info(sbi, xnid, &ni, false);
18577735730dSChao Yu if (err) {
18587735730dSChao Yu f2fs_put_page(page, 1);
18597735730dSChao Yu return err;
18607735730dSChao Yu }
1861442a9dbdSChao Yu
18626cbfcab5SJaegeuk Kim phys = blks_to_bytes(inode, ni.blk_addr);
1863442a9dbdSChao Yu len = inode->i_sb->s_blocksize;
1864442a9dbdSChao Yu
1865442a9dbdSChao Yu f2fs_put_page(page, 1);
1866442a9dbdSChao Yu
1867442a9dbdSChao Yu flags = FIEMAP_EXTENT_LAST;
1868442a9dbdSChao Yu }
1869442a9dbdSChao Yu
1870dd5a09bdSChao Yu if (phys) {
1871442a9dbdSChao Yu err = fiemap_fill_next_extent(fieinfo, 0, phys, len, flags);
1872dd5a09bdSChao Yu trace_f2fs_fiemap(inode, 0, phys, len, flags, err);
1873dd5a09bdSChao Yu }
1874442a9dbdSChao Yu
1875442a9dbdSChao Yu return (err < 0 ? err : 0);
1876442a9dbdSChao Yu }
1877442a9dbdSChao Yu
f2fs_fiemap(struct inode * inode,struct fiemap_extent_info * fieinfo,u64 start,u64 len)18789ab70134SJaegeuk Kim int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
18799ab70134SJaegeuk Kim u64 start, u64 len)
18809ab70134SJaegeuk Kim {
1881b876f4c9SJaegeuk Kim struct f2fs_map_blocks map;
18827f63eb77SJaegeuk Kim sector_t start_blk, last_blk;
1883da85985cSChao Yu pgoff_t next_pgofs;
18847f63eb77SJaegeuk Kim u64 logical = 0, phys = 0, size = 0;
18857f63eb77SJaegeuk Kim u32 flags = 0;
18867f63eb77SJaegeuk Kim int ret = 0;
1887093f0bacSDaeho Jeong bool compr_cluster = false, compr_appended;
1888bf38fbadSChao Yu unsigned int cluster_size = F2FS_I(inode)->i_cluster_size;
1889093f0bacSDaeho Jeong unsigned int count_in_cluster = 0;
18900bb2045cSChengguang Xu loff_t maxbytes;
18917f63eb77SJaegeuk Kim
1892c4020b2dSChao Yu if (fieinfo->fi_flags & FIEMAP_FLAG_CACHE) {
1893c4020b2dSChao Yu ret = f2fs_precache_extents(inode);
18947f63eb77SJaegeuk Kim if (ret)
18957f63eb77SJaegeuk Kim return ret;
1896c4020b2dSChao Yu }
1897c4020b2dSChao Yu
189845dd052eSChristoph Hellwig ret = fiemap_prep(inode, fieinfo, start, &len, FIEMAP_FLAG_XATTR);
18997f63eb77SJaegeuk Kim if (ret)
19007f63eb77SJaegeuk Kim return ret;
19017f63eb77SJaegeuk Kim
1902f1b43d4cSChao Yu inode_lock(inode);
1903f1b43d4cSChao Yu
19040bb2045cSChengguang Xu maxbytes = max_file_blocks(inode) << F2FS_BLKSIZE_BITS;
19050bb2045cSChengguang Xu if (start > maxbytes) {
19060bb2045cSChengguang Xu ret = -EFBIG;
19070bb2045cSChengguang Xu goto out;
19080bb2045cSChengguang Xu }
19090bb2045cSChengguang Xu
19100bb2045cSChengguang Xu if (len > maxbytes || (maxbytes - len) < start)
19110bb2045cSChengguang Xu len = maxbytes - start;
19120bb2045cSChengguang Xu
1913442a9dbdSChao Yu if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) {
1914442a9dbdSChao Yu ret = f2fs_xattr_fiemap(inode, fieinfo);
1915442a9dbdSChao Yu goto out;
1916442a9dbdSChao Yu }
19177f63eb77SJaegeuk Kim
19187975f349SChao Yu if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode)) {
191967f8cf3cSJaegeuk Kim ret = f2fs_inline_data_fiemap(inode, fieinfo, start, len);
192067f8cf3cSJaegeuk Kim if (ret != -EAGAIN)
1921f1b43d4cSChao Yu goto out;
192267f8cf3cSJaegeuk Kim }
192367f8cf3cSJaegeuk Kim
19246cbfcab5SJaegeuk Kim if (bytes_to_blks(inode, len) == 0)
19256cbfcab5SJaegeuk Kim len = blks_to_bytes(inode, 1);
19267f63eb77SJaegeuk Kim
19276cbfcab5SJaegeuk Kim start_blk = bytes_to_blks(inode, start);
19286cbfcab5SJaegeuk Kim last_blk = bytes_to_blks(inode, start + len - 1);
19299a950d52SFan Li
19307f63eb77SJaegeuk Kim next:
1931b876f4c9SJaegeuk Kim memset(&map, 0, sizeof(map));
1932b876f4c9SJaegeuk Kim map.m_lblk = start_blk;
1933b876f4c9SJaegeuk Kim map.m_len = bytes_to_blks(inode, len);
1934b876f4c9SJaegeuk Kim map.m_next_pgofs = &next_pgofs;
1935b876f4c9SJaegeuk Kim map.m_seg_type = NO_CHECK_TYPE;
19367f63eb77SJaegeuk Kim
1937093f0bacSDaeho Jeong if (compr_cluster) {
1938093f0bacSDaeho Jeong map.m_lblk += 1;
1939093f0bacSDaeho Jeong map.m_len = cluster_size - count_in_cluster;
1940093f0bacSDaeho Jeong }
1941bf38fbadSChao Yu
1942cd8fc522SChristoph Hellwig ret = f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_FIEMAP);
19437f63eb77SJaegeuk Kim if (ret)
19447f63eb77SJaegeuk Kim goto out;
19457f63eb77SJaegeuk Kim
19467f63eb77SJaegeuk Kim /* HOLE */
1947093f0bacSDaeho Jeong if (!compr_cluster && !(map.m_flags & F2FS_MAP_FLAGS)) {
1948da85985cSChao Yu start_blk = next_pgofs;
194958736fa6SChao Yu
1950a90cb059SXiuhong Wang if (blks_to_bytes(inode, start_blk) < maxbytes)
19519a950d52SFan Li goto prep_next;
195258736fa6SChao Yu
19539a950d52SFan Li flags |= FIEMAP_EXTENT_LAST;
19549a950d52SFan Li }
19559a950d52SFan Li
1956093f0bacSDaeho Jeong compr_appended = false;
1957093f0bacSDaeho Jeong /* In a case of compressed cluster, append this to the last extent */
1958da8c7fecSChristoph Hellwig if (compr_cluster && ((map.m_flags & F2FS_MAP_DELALLOC) ||
1959093f0bacSDaeho Jeong !(map.m_flags & F2FS_MAP_FLAGS))) {
1960093f0bacSDaeho Jeong compr_appended = true;
1961093f0bacSDaeho Jeong goto skip_fill;
1962093f0bacSDaeho Jeong }
1963093f0bacSDaeho Jeong
1964da5af127SChao Yu if (size) {
19650953fe86SChao Yu flags |= FIEMAP_EXTENT_MERGED;
196662230e0dSChandan Rajendra if (IS_ENCRYPTED(inode))
1967da5af127SChao Yu flags |= FIEMAP_EXTENT_DATA_ENCRYPTED;
1968da5af127SChao Yu
19697f63eb77SJaegeuk Kim ret = fiemap_fill_next_extent(fieinfo, logical,
19707f63eb77SJaegeuk Kim phys, size, flags);
1971dd5a09bdSChao Yu trace_f2fs_fiemap(inode, logical, phys, size, flags, ret);
1972bf38fbadSChao Yu if (ret)
1973bf38fbadSChao Yu goto out;
1974bf38fbadSChao Yu size = 0;
1975da5af127SChao Yu }
19769a950d52SFan Li
1977bf38fbadSChao Yu if (start_blk > last_blk)
19787f63eb77SJaegeuk Kim goto out;
19797f63eb77SJaegeuk Kim
1980093f0bacSDaeho Jeong skip_fill:
1981b876f4c9SJaegeuk Kim if (map.m_pblk == COMPRESS_ADDR) {
1982bf38fbadSChao Yu compr_cluster = true;
1983093f0bacSDaeho Jeong count_in_cluster = 1;
1984093f0bacSDaeho Jeong } else if (compr_appended) {
1985093f0bacSDaeho Jeong unsigned int appended_blks = cluster_size -
1986093f0bacSDaeho Jeong count_in_cluster + 1;
1987093f0bacSDaeho Jeong size += blks_to_bytes(inode, appended_blks);
1988093f0bacSDaeho Jeong start_blk += appended_blks;
1989093f0bacSDaeho Jeong compr_cluster = false;
1990093f0bacSDaeho Jeong } else {
19916cbfcab5SJaegeuk Kim logical = blks_to_bytes(inode, start_blk);
1992093f0bacSDaeho Jeong phys = __is_valid_data_blkaddr(map.m_pblk) ?
1993093f0bacSDaeho Jeong blks_to_bytes(inode, map.m_pblk) : 0;
1994b876f4c9SJaegeuk Kim size = blks_to_bytes(inode, map.m_len);
19957f63eb77SJaegeuk Kim flags = 0;
1996093f0bacSDaeho Jeong
1997093f0bacSDaeho Jeong if (compr_cluster) {
1998093f0bacSDaeho Jeong flags = FIEMAP_EXTENT_ENCODED;
1999093f0bacSDaeho Jeong count_in_cluster += map.m_len;
2000093f0bacSDaeho Jeong if (count_in_cluster == cluster_size) {
2001093f0bacSDaeho Jeong compr_cluster = false;
2002093f0bacSDaeho Jeong size += blks_to_bytes(inode, 1);
2003093f0bacSDaeho Jeong }
2004da8c7fecSChristoph Hellwig } else if (map.m_flags & F2FS_MAP_DELALLOC) {
20057f63eb77SJaegeuk Kim flags = FIEMAP_EXTENT_UNWRITTEN;
2006093f0bacSDaeho Jeong }
20077f63eb77SJaegeuk Kim
20086cbfcab5SJaegeuk Kim start_blk += bytes_to_blks(inode, size);
2009093f0bacSDaeho Jeong }
20107f63eb77SJaegeuk Kim
20119a950d52SFan Li prep_next:
20127f63eb77SJaegeuk Kim cond_resched();
20137f63eb77SJaegeuk Kim if (fatal_signal_pending(current))
20147f63eb77SJaegeuk Kim ret = -EINTR;
20157f63eb77SJaegeuk Kim else
20167f63eb77SJaegeuk Kim goto next;
20177f63eb77SJaegeuk Kim out:
20187f63eb77SJaegeuk Kim if (ret == 1)
20197f63eb77SJaegeuk Kim ret = 0;
20207f63eb77SJaegeuk Kim
20215955102cSAl Viro inode_unlock(inode);
20227f63eb77SJaegeuk Kim return ret;
20239ab70134SJaegeuk Kim }
20249ab70134SJaegeuk Kim
f2fs_readpage_limit(struct inode * inode)202595ae251fSEric Biggers static inline loff_t f2fs_readpage_limit(struct inode *inode)
202695ae251fSEric Biggers {
2027feb0576aSEric Biggers if (IS_ENABLED(CONFIG_FS_VERITY) && IS_VERITY(inode))
202895ae251fSEric Biggers return inode->i_sb->s_maxbytes;
202995ae251fSEric Biggers
203095ae251fSEric Biggers return i_size_read(inode);
203195ae251fSEric Biggers }
203295ae251fSEric Biggers
f2fs_read_single_page(struct inode * inode,struct page * page,unsigned nr_pages,struct f2fs_map_blocks * map,struct bio ** bio_ret,sector_t * last_block_in_bio,bool is_readahead)20332df0ab04SChao Yu static int f2fs_read_single_page(struct inode *inode, struct page *page,
20342df0ab04SChao Yu unsigned nr_pages,
20352df0ab04SChao Yu struct f2fs_map_blocks *map,
20362df0ab04SChao Yu struct bio **bio_ret,
20372df0ab04SChao Yu sector_t *last_block_in_bio,
20382df0ab04SChao Yu bool is_readahead)
20392df0ab04SChao Yu {
20402df0ab04SChao Yu struct bio *bio = *bio_ret;
204143b9d4b4SJaegeuk Kim const unsigned blocksize = blks_to_bytes(inode, 1);
20422df0ab04SChao Yu sector_t block_in_file;
20432df0ab04SChao Yu sector_t last_block;
20442df0ab04SChao Yu sector_t last_block_in_file;
20452df0ab04SChao Yu sector_t block_nr;
20462df0ab04SChao Yu int ret = 0;
20472df0ab04SChao Yu
20484969c06aSJaegeuk Kim block_in_file = (sector_t)page_index(page);
20492df0ab04SChao Yu last_block = block_in_file + nr_pages;
205043b9d4b4SJaegeuk Kim last_block_in_file = bytes_to_blks(inode,
205143b9d4b4SJaegeuk Kim f2fs_readpage_limit(inode) + blocksize - 1);
20522df0ab04SChao Yu if (last_block > last_block_in_file)
20532df0ab04SChao Yu last_block = last_block_in_file;
20542df0ab04SChao Yu
20552df0ab04SChao Yu /* just zeroing out page which is beyond EOF */
20562df0ab04SChao Yu if (block_in_file >= last_block)
20572df0ab04SChao Yu goto zero_out;
20582df0ab04SChao Yu /*
20592df0ab04SChao Yu * Map blocks using the previous result first.
20602df0ab04SChao Yu */
20612df0ab04SChao Yu if ((map->m_flags & F2FS_MAP_MAPPED) &&
20622df0ab04SChao Yu block_in_file > map->m_lblk &&
20632df0ab04SChao Yu block_in_file < (map->m_lblk + map->m_len))
20642df0ab04SChao Yu goto got_it;
20652df0ab04SChao Yu
20662df0ab04SChao Yu /*
20672df0ab04SChao Yu * Then do more f2fs_map_blocks() calls until we are
20682df0ab04SChao Yu * done with this page.
20692df0ab04SChao Yu */
20702df0ab04SChao Yu map->m_lblk = block_in_file;
20712df0ab04SChao Yu map->m_len = last_block - block_in_file;
20722df0ab04SChao Yu
2073cd8fc522SChristoph Hellwig ret = f2fs_map_blocks(inode, map, F2FS_GET_BLOCK_DEFAULT);
20742df0ab04SChao Yu if (ret)
20752df0ab04SChao Yu goto out;
20762df0ab04SChao Yu got_it:
20772df0ab04SChao Yu if ((map->m_flags & F2FS_MAP_MAPPED)) {
20782df0ab04SChao Yu block_nr = map->m_pblk + block_in_file - map->m_lblk;
20792df0ab04SChao Yu SetPageMappedToDisk(page);
20802df0ab04SChao Yu
20812df0ab04SChao Yu if (!f2fs_is_valid_blkaddr(F2FS_I_SB(inode), block_nr,
208293770ab7SChao Yu DATA_GENERIC_ENHANCE_READ)) {
208310f966bbSChao Yu ret = -EFSCORRUPTED;
208495fa90c9SChao Yu f2fs_handle_error(F2FS_I_SB(inode),
208595fa90c9SChao Yu ERROR_INVALID_BLKADDR);
20862df0ab04SChao Yu goto out;
20872df0ab04SChao Yu }
20882df0ab04SChao Yu } else {
20892df0ab04SChao Yu zero_out:
20902df0ab04SChao Yu zero_user_segment(page, 0, PAGE_SIZE);
209195ae251fSEric Biggers if (f2fs_need_verity(inode, page->index) &&
209295ae251fSEric Biggers !fsverity_verify_page(page)) {
209395ae251fSEric Biggers ret = -EIO;
209495ae251fSEric Biggers goto out;
209595ae251fSEric Biggers }
20962df0ab04SChao Yu if (!PageUptodate(page))
20972df0ab04SChao Yu SetPageUptodate(page);
20982df0ab04SChao Yu unlock_page(page);
20992df0ab04SChao Yu goto out;
21002df0ab04SChao Yu }
21012df0ab04SChao Yu
21022df0ab04SChao Yu /*
21032df0ab04SChao Yu * This page will go to BIO. Do we need to send this
21042df0ab04SChao Yu * BIO off first?
21052df0ab04SChao Yu */
210627aacd28SSatya Tangirala if (bio && (!page_is_mergeable(F2FS_I_SB(inode), bio,
210727aacd28SSatya Tangirala *last_block_in_bio, block_nr) ||
210827aacd28SSatya Tangirala !f2fs_crypt_mergeable_bio(bio, inode, page->index, NULL))) {
21092df0ab04SChao Yu submit_and_realloc:
2110bc29835aSChristoph Hellwig f2fs_submit_read_bio(F2FS_I_SB(inode), bio, DATA);
21112df0ab04SChao Yu bio = NULL;
21122df0ab04SChao Yu }
21132df0ab04SChao Yu if (bio == NULL) {
21142df0ab04SChao Yu bio = f2fs_grab_read_bio(inode, block_nr, nr_pages,
21150683728aSChao Yu is_readahead ? REQ_RAHEAD : 0, page->index,
21167f59b277SEric Biggers false);
21172df0ab04SChao Yu if (IS_ERR(bio)) {
21182df0ab04SChao Yu ret = PTR_ERR(bio);
21192df0ab04SChao Yu bio = NULL;
21202df0ab04SChao Yu goto out;
21212df0ab04SChao Yu }
21222df0ab04SChao Yu }
21232df0ab04SChao Yu
21242df0ab04SChao Yu /*
21252df0ab04SChao Yu * If the page is under writeback, we need to wait for
21262df0ab04SChao Yu * its completion to see the correct decrypted data.
21272df0ab04SChao Yu */
21282df0ab04SChao Yu f2fs_wait_on_block_writeback(inode, block_nr);
21292df0ab04SChao Yu
21302df0ab04SChao Yu if (bio_add_page(bio, page, blocksize, 0) < blocksize)
21312df0ab04SChao Yu goto submit_and_realloc;
21322df0ab04SChao Yu
21332df0ab04SChao Yu inc_page_count(F2FS_I_SB(inode), F2FS_RD_DATA);
213434a23525SChao Yu f2fs_update_iostat(F2FS_I_SB(inode), NULL, FS_DATA_READ_IO,
213534a23525SChao Yu F2FS_BLKSIZE);
21362df0ab04SChao Yu *last_block_in_bio = block_nr;
21372df0ab04SChao Yu out:
21382df0ab04SChao Yu *bio_ret = bio;
21392df0ab04SChao Yu return ret;
21402df0ab04SChao Yu }
21412df0ab04SChao Yu
21424c8ff709SChao Yu #ifdef CONFIG_F2FS_FS_COMPRESSION
f2fs_read_multi_pages(struct compress_ctx * cc,struct bio ** bio_ret,unsigned nr_pages,sector_t * last_block_in_bio,bool is_readahead,bool for_write)21434c8ff709SChao Yu int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
21444c8ff709SChao Yu unsigned nr_pages, sector_t *last_block_in_bio,
21450683728aSChao Yu bool is_readahead, bool for_write)
21464c8ff709SChao Yu {
21474c8ff709SChao Yu struct dnode_of_data dn;
21484c8ff709SChao Yu struct inode *inode = cc->inode;
21494c8ff709SChao Yu struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
21504c8ff709SChao Yu struct bio *bio = *bio_ret;
21514c8ff709SChao Yu unsigned int start_idx = cc->cluster_idx << cc->log_cluster_size;
21524c8ff709SChao Yu sector_t last_block_in_file;
215343b9d4b4SJaegeuk Kim const unsigned blocksize = blks_to_bytes(inode, 1);
21544c8ff709SChao Yu struct decompress_io_ctx *dic = NULL;
2155fe59109aSJaegeuk Kim struct extent_info ei = {};
215694afd6d6SChao Yu bool from_dnode = true;
21574c8ff709SChao Yu int i;
21584c8ff709SChao Yu int ret = 0;
21594c8ff709SChao Yu
21604c8ff709SChao Yu f2fs_bug_on(sbi, f2fs_cluster_is_empty(cc));
21614c8ff709SChao Yu
216243b9d4b4SJaegeuk Kim last_block_in_file = bytes_to_blks(inode,
216343b9d4b4SJaegeuk Kim f2fs_readpage_limit(inode) + blocksize - 1);
21644c8ff709SChao Yu
21654c8ff709SChao Yu /* get rid of pages beyond EOF */
21664c8ff709SChao Yu for (i = 0; i < cc->cluster_size; i++) {
21674c8ff709SChao Yu struct page *page = cc->rpages[i];
21684c8ff709SChao Yu
21694c8ff709SChao Yu if (!page)
21704c8ff709SChao Yu continue;
21714c8ff709SChao Yu if ((sector_t)page->index >= last_block_in_file) {
21724c8ff709SChao Yu zero_user_segment(page, 0, PAGE_SIZE);
21734c8ff709SChao Yu if (!PageUptodate(page))
21744c8ff709SChao Yu SetPageUptodate(page);
21754c8ff709SChao Yu } else if (!PageUptodate(page)) {
21764c8ff709SChao Yu continue;
21774c8ff709SChao Yu }
21784c8ff709SChao Yu unlock_page(page);
21799605f75cSJaegeuk Kim if (for_write)
21809605f75cSJaegeuk Kim put_page(page);
21814c8ff709SChao Yu cc->rpages[i] = NULL;
21824c8ff709SChao Yu cc->nr_rpages--;
21834c8ff709SChao Yu }
21844c8ff709SChao Yu
21854c8ff709SChao Yu /* we are done since all pages are beyond EOF */
21864c8ff709SChao Yu if (f2fs_cluster_is_empty(cc))
21874c8ff709SChao Yu goto out;
21884c8ff709SChao Yu
2189e7547dacSJaegeuk Kim if (f2fs_lookup_read_extent_cache(inode, start_idx, &ei))
219094afd6d6SChao Yu from_dnode = false;
219194afd6d6SChao Yu
219294afd6d6SChao Yu if (!from_dnode)
219394afd6d6SChao Yu goto skip_reading_dnode;
219494afd6d6SChao Yu
21954c8ff709SChao Yu set_new_dnode(&dn, inode, NULL, NULL, 0);
21964c8ff709SChao Yu ret = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
21974c8ff709SChao Yu if (ret)
21984c8ff709SChao Yu goto out;
21994c8ff709SChao Yu
2200bd90c5cdSJaegeuk Kim if (unlikely(f2fs_cp_error(sbi))) {
2201bd90c5cdSJaegeuk Kim ret = -EIO;
2202bd90c5cdSJaegeuk Kim goto out_put_dnode;
2203bd90c5cdSJaegeuk Kim }
2204a86d27ddSChao Yu f2fs_bug_on(sbi, dn.data_blkaddr != COMPRESS_ADDR);
22054c8ff709SChao Yu
220694afd6d6SChao Yu skip_reading_dnode:
22074c8ff709SChao Yu for (i = 1; i < cc->cluster_size; i++) {
22084c8ff709SChao Yu block_t blkaddr;
22094c8ff709SChao Yu
221094afd6d6SChao Yu blkaddr = from_dnode ? data_blkaddr(dn.inode, dn.node_page,
221194afd6d6SChao Yu dn.ofs_in_node + i) :
221294afd6d6SChao Yu ei.blk + i - 1;
22134c8ff709SChao Yu
22144c8ff709SChao Yu if (!__is_valid_data_blkaddr(blkaddr))
22154c8ff709SChao Yu break;
22164c8ff709SChao Yu
22174c8ff709SChao Yu if (!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC)) {
22184c8ff709SChao Yu ret = -EFAULT;
22194c8ff709SChao Yu goto out_put_dnode;
22204c8ff709SChao Yu }
22214c8ff709SChao Yu cc->nr_cpages++;
222294afd6d6SChao Yu
222394afd6d6SChao Yu if (!from_dnode && i >= ei.c_len)
222494afd6d6SChao Yu break;
22254c8ff709SChao Yu }
22264c8ff709SChao Yu
22274c8ff709SChao Yu /* nothing to decompress */
22284c8ff709SChao Yu if (cc->nr_cpages == 0) {
22294c8ff709SChao Yu ret = 0;
22304c8ff709SChao Yu goto out_put_dnode;
22314c8ff709SChao Yu }
22324c8ff709SChao Yu
22334c8ff709SChao Yu dic = f2fs_alloc_dic(cc);
22344c8ff709SChao Yu if (IS_ERR(dic)) {
22354c8ff709SChao Yu ret = PTR_ERR(dic);
22364c8ff709SChao Yu goto out_put_dnode;
22374c8ff709SChao Yu }
22384c8ff709SChao Yu
22396ce19affSChao Yu for (i = 0; i < cc->nr_cpages; i++) {
22404c8ff709SChao Yu struct page *page = dic->cpages[i];
22414c8ff709SChao Yu block_t blkaddr;
22427f59b277SEric Biggers struct bio_post_read_ctx *ctx;
22434c8ff709SChao Yu
224494afd6d6SChao Yu blkaddr = from_dnode ? data_blkaddr(dn.inode, dn.node_page,
224594afd6d6SChao Yu dn.ofs_in_node + i + 1) :
224694afd6d6SChao Yu ei.blk + i;
22474c8ff709SChao Yu
22486ce19affSChao Yu f2fs_wait_on_block_writeback(inode, blkaddr);
22496ce19affSChao Yu
22506ce19affSChao Yu if (f2fs_load_compressed_page(sbi, page, blkaddr)) {
22519d065aa5SChao Yu if (atomic_dec_and_test(&dic->remaining_pages)) {
2252bff139b4SDaeho Jeong f2fs_decompress_cluster(dic, true);
22539d065aa5SChao Yu break;
22549d065aa5SChao Yu }
22556ce19affSChao Yu continue;
22566ce19affSChao Yu }
22576ce19affSChao Yu
225827aacd28SSatya Tangirala if (bio && (!page_is_mergeable(sbi, bio,
225927aacd28SSatya Tangirala *last_block_in_bio, blkaddr) ||
226027aacd28SSatya Tangirala !f2fs_crypt_mergeable_bio(bio, inode, page->index, NULL))) {
22614c8ff709SChao Yu submit_and_realloc:
2262bc29835aSChristoph Hellwig f2fs_submit_read_bio(sbi, bio, DATA);
22634c8ff709SChao Yu bio = NULL;
22644c8ff709SChao Yu }
22654c8ff709SChao Yu
22664c8ff709SChao Yu if (!bio) {
22674c8ff709SChao Yu bio = f2fs_grab_read_bio(inode, blkaddr, nr_pages,
22684c8ff709SChao Yu is_readahead ? REQ_RAHEAD : 0,
22697f59b277SEric Biggers page->index, for_write);
22704c8ff709SChao Yu if (IS_ERR(bio)) {
22714c8ff709SChao Yu ret = PTR_ERR(bio);
2272bff139b4SDaeho Jeong f2fs_decompress_end_io(dic, ret, true);
22734c8ff709SChao Yu f2fs_put_dnode(&dn);
2274f3494345SChao Yu *bio_ret = NULL;
22754c8ff709SChao Yu return ret;
22764c8ff709SChao Yu }
22774c8ff709SChao Yu }
22784c8ff709SChao Yu
22794c8ff709SChao Yu if (bio_add_page(bio, page, blocksize, 0) < blocksize)
22804c8ff709SChao Yu goto submit_and_realloc;
22814c8ff709SChao Yu
2282a4b68176SDaeho Jeong ctx = get_post_read_ctx(bio);
22837f59b277SEric Biggers ctx->enabled_steps |= STEP_DECOMPRESS;
22847f59b277SEric Biggers refcount_inc(&dic->refcnt);
228503382f1aSChao Yu
22864c8ff709SChao Yu inc_page_count(sbi, F2FS_RD_DATA);
228734a23525SChao Yu f2fs_update_iostat(sbi, inode, FS_DATA_READ_IO, F2FS_BLKSIZE);
22884c8ff709SChao Yu *last_block_in_bio = blkaddr;
22894c8ff709SChao Yu }
22904c8ff709SChao Yu
229194afd6d6SChao Yu if (from_dnode)
22924c8ff709SChao Yu f2fs_put_dnode(&dn);
22934c8ff709SChao Yu
22944c8ff709SChao Yu *bio_ret = bio;
22954c8ff709SChao Yu return 0;
22964c8ff709SChao Yu
22974c8ff709SChao Yu out_put_dnode:
229894afd6d6SChao Yu if (from_dnode)
22994c8ff709SChao Yu f2fs_put_dnode(&dn);
23004c8ff709SChao Yu out:
23017f59b277SEric Biggers for (i = 0; i < cc->cluster_size; i++) {
23027f59b277SEric Biggers if (cc->rpages[i]) {
23037f59b277SEric Biggers ClearPageUptodate(cc->rpages[i]);
23047f59b277SEric Biggers unlock_page(cc->rpages[i]);
23057f59b277SEric Biggers }
23067f59b277SEric Biggers }
23074c8ff709SChao Yu *bio_ret = bio;
23084c8ff709SChao Yu return ret;
23094c8ff709SChao Yu }
23104c8ff709SChao Yu #endif
23114c8ff709SChao Yu
2312f1e88660SJaegeuk Kim /*
2313f1e88660SJaegeuk Kim * This function was originally taken from fs/mpage.c, and customized for f2fs.
2314f1e88660SJaegeuk Kim * Major change was from block_size == page_size in f2fs by default.
2315f1e88660SJaegeuk Kim */
f2fs_mpage_readpages(struct inode * inode,struct readahead_control * rac,struct page * page)2316e20a7693SMatthew Wilcox (Oracle) static int f2fs_mpage_readpages(struct inode *inode,
231723323196SMatthew Wilcox (Oracle) struct readahead_control *rac, struct page *page)
2318f1e88660SJaegeuk Kim {
2319f1e88660SJaegeuk Kim struct bio *bio = NULL;
2320f1e88660SJaegeuk Kim sector_t last_block_in_bio = 0;
2321f1e88660SJaegeuk Kim struct f2fs_map_blocks map;
23224c8ff709SChao Yu #ifdef CONFIG_F2FS_FS_COMPRESSION
23234c8ff709SChao Yu struct compress_ctx cc = {
23244c8ff709SChao Yu .inode = inode,
23254c8ff709SChao Yu .log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
23264c8ff709SChao Yu .cluster_size = F2FS_I(inode)->i_cluster_size,
23274c8ff709SChao Yu .cluster_idx = NULL_CLUSTER,
23284c8ff709SChao Yu .rpages = NULL,
23294c8ff709SChao Yu .cpages = NULL,
23304c8ff709SChao Yu .nr_rpages = 0,
23314c8ff709SChao Yu .nr_cpages = 0,
23324c8ff709SChao Yu };
2333a2649315SFengnan Chang pgoff_t nc_cluster_idx = NULL_CLUSTER;
23344c8ff709SChao Yu #endif
233523323196SMatthew Wilcox (Oracle) unsigned nr_pages = rac ? readahead_count(rac) : 1;
23364c8ff709SChao Yu unsigned max_nr_pages = nr_pages;
23372df0ab04SChao Yu int ret = 0;
2338f1e88660SJaegeuk Kim
2339f1e88660SJaegeuk Kim map.m_pblk = 0;
2340f1e88660SJaegeuk Kim map.m_lblk = 0;
2341f1e88660SJaegeuk Kim map.m_len = 0;
2342f1e88660SJaegeuk Kim map.m_flags = 0;
2343da85985cSChao Yu map.m_next_pgofs = NULL;
2344c4020b2dSChao Yu map.m_next_extent = NULL;
2345d5097be5SHyunchul Lee map.m_seg_type = NO_CHECK_TYPE;
2346f9d6d059SChao Yu map.m_may_create = false;
2347f1e88660SJaegeuk Kim
2348736c0a74SLiFan for (; nr_pages; nr_pages--) {
234923323196SMatthew Wilcox (Oracle) if (rac) {
235023323196SMatthew Wilcox (Oracle) page = readahead_page(rac);
2351a83d50bcSKinglong Mee prefetchw(&page->flags);
2352f1e88660SJaegeuk Kim }
2353f1e88660SJaegeuk Kim
23544c8ff709SChao Yu #ifdef CONFIG_F2FS_FS_COMPRESSION
23554c8ff709SChao Yu if (f2fs_compressed_file(inode)) {
2356146949deSJinyoung CHOI /* there are remained compressed pages, submit them */
23574c8ff709SChao Yu if (!f2fs_cluster_can_merge_page(&cc, page->index)) {
23584c8ff709SChao Yu ret = f2fs_read_multi_pages(&cc, &bio,
23594c8ff709SChao Yu max_nr_pages,
23604c8ff709SChao Yu &last_block_in_bio,
236123323196SMatthew Wilcox (Oracle) rac != NULL, false);
23628bfbfb0dSChao Yu f2fs_destroy_compress_ctx(&cc, false);
23634c8ff709SChao Yu if (ret)
23644c8ff709SChao Yu goto set_error_page;
23654c8ff709SChao Yu }
2366a2649315SFengnan Chang if (cc.cluster_idx == NULL_CLUSTER) {
2367a2649315SFengnan Chang if (nc_cluster_idx ==
2368a2649315SFengnan Chang page->index >> cc.log_cluster_size) {
2369a2649315SFengnan Chang goto read_single_page;
2370a2649315SFengnan Chang }
2371a2649315SFengnan Chang
23724c8ff709SChao Yu ret = f2fs_is_compressed_cluster(inode, page->index);
23734c8ff709SChao Yu if (ret < 0)
23744c8ff709SChao Yu goto set_error_page;
2375a2649315SFengnan Chang else if (!ret) {
2376a2649315SFengnan Chang nc_cluster_idx =
2377a2649315SFengnan Chang page->index >> cc.log_cluster_size;
23784c8ff709SChao Yu goto read_single_page;
2379a2649315SFengnan Chang }
23804c8ff709SChao Yu
2381a2649315SFengnan Chang nc_cluster_idx = NULL_CLUSTER;
2382a2649315SFengnan Chang }
23834c8ff709SChao Yu ret = f2fs_init_compress_ctx(&cc);
23844c8ff709SChao Yu if (ret)
23854c8ff709SChao Yu goto set_error_page;
23864c8ff709SChao Yu
23874c8ff709SChao Yu f2fs_compress_ctx_add_page(&cc, page);
23884c8ff709SChao Yu
23894c8ff709SChao Yu goto next_page;
23904c8ff709SChao Yu }
23914c8ff709SChao Yu read_single_page:
23924c8ff709SChao Yu #endif
23934c8ff709SChao Yu
23944c8ff709SChao Yu ret = f2fs_read_single_page(inode, page, max_nr_pages, &map,
239523323196SMatthew Wilcox (Oracle) &bio, &last_block_in_bio, rac);
23962df0ab04SChao Yu if (ret) {
23974c8ff709SChao Yu #ifdef CONFIG_F2FS_FS_COMPRESSION
23984c8ff709SChao Yu set_error_page:
23994c8ff709SChao Yu #endif
240009cbfeafSKirill A. Shutemov zero_user_segment(page, 0, PAGE_SIZE);
2401f1e88660SJaegeuk Kim unlock_page(page);
2402f1e88660SJaegeuk Kim }
240323323196SMatthew Wilcox (Oracle) #ifdef CONFIG_F2FS_FS_COMPRESSION
2404f1e88660SJaegeuk Kim next_page:
240523323196SMatthew Wilcox (Oracle) #endif
240623323196SMatthew Wilcox (Oracle) if (rac)
240709cbfeafSKirill A. Shutemov put_page(page);
24084c8ff709SChao Yu
24094c8ff709SChao Yu #ifdef CONFIG_F2FS_FS_COMPRESSION
24104c8ff709SChao Yu if (f2fs_compressed_file(inode)) {
24114c8ff709SChao Yu /* last page */
24124c8ff709SChao Yu if (nr_pages == 1 && !f2fs_cluster_is_empty(&cc)) {
24134c8ff709SChao Yu ret = f2fs_read_multi_pages(&cc, &bio,
24144c8ff709SChao Yu max_nr_pages,
24154c8ff709SChao Yu &last_block_in_bio,
241623323196SMatthew Wilcox (Oracle) rac != NULL, false);
24178bfbfb0dSChao Yu f2fs_destroy_compress_ctx(&cc, false);
24184c8ff709SChao Yu }
24194c8ff709SChao Yu }
24204c8ff709SChao Yu #endif
2421f1e88660SJaegeuk Kim }
2422f1e88660SJaegeuk Kim if (bio)
2423bc29835aSChristoph Hellwig f2fs_submit_read_bio(F2FS_I_SB(inode), bio, DATA);
242423323196SMatthew Wilcox (Oracle) return ret;
2425f1e88660SJaegeuk Kim }
2426f1e88660SJaegeuk Kim
f2fs_read_data_folio(struct file * file,struct folio * folio)2427be05584fSMatthew Wilcox (Oracle) static int f2fs_read_data_folio(struct file *file, struct folio *folio)
2428eb47b800SJaegeuk Kim {
2429be05584fSMatthew Wilcox (Oracle) struct page *page = &folio->page;
24304969c06aSJaegeuk Kim struct inode *inode = page_file_mapping(page)->host;
2431b3d208f9SJaegeuk Kim int ret = -EAGAIN;
24329ffe0fb5SHuajun Li
2433c20e89cdSChao Yu trace_f2fs_readpage(page, DATA);
2434c20e89cdSChao Yu
24354c8ff709SChao Yu if (!f2fs_is_compress_backend_ready(inode)) {
24364c8ff709SChao Yu unlock_page(page);
24374c8ff709SChao Yu return -EOPNOTSUPP;
24384c8ff709SChao Yu }
24394c8ff709SChao Yu
2440e1c42045Sarter97 /* If the file has inline data, try to read it directly */
24419ffe0fb5SHuajun Li if (f2fs_has_inline_data(inode))
24429ffe0fb5SHuajun Li ret = f2fs_read_inline_data(inode, page);
2443b3d208f9SJaegeuk Kim if (ret == -EAGAIN)
2444e20a7693SMatthew Wilcox (Oracle) ret = f2fs_mpage_readpages(inode, NULL, page);
24459ffe0fb5SHuajun Li return ret;
2446eb47b800SJaegeuk Kim }
2447eb47b800SJaegeuk Kim
f2fs_readahead(struct readahead_control * rac)244823323196SMatthew Wilcox (Oracle) static void f2fs_readahead(struct readahead_control *rac)
2449eb47b800SJaegeuk Kim {
245023323196SMatthew Wilcox (Oracle) struct inode *inode = rac->mapping->host;
2451b8c29400SChao Yu
245223323196SMatthew Wilcox (Oracle) trace_f2fs_readpages(inode, readahead_index(rac), readahead_count(rac));
24539ffe0fb5SHuajun Li
24544c8ff709SChao Yu if (!f2fs_is_compress_backend_ready(inode))
245523323196SMatthew Wilcox (Oracle) return;
24564c8ff709SChao Yu
2457704528d8SMatthew Wilcox (Oracle) /* If the file has inline data, skip readahead */
24589ffe0fb5SHuajun Li if (f2fs_has_inline_data(inode))
245923323196SMatthew Wilcox (Oracle) return;
24609ffe0fb5SHuajun Li
2461e20a7693SMatthew Wilcox (Oracle) f2fs_mpage_readpages(inode, rac, NULL);
2462eb47b800SJaegeuk Kim }
2463eb47b800SJaegeuk Kim
f2fs_encrypt_one_page(struct f2fs_io_info * fio)24644c8ff709SChao Yu int f2fs_encrypt_one_page(struct f2fs_io_info *fio)
24657eab0c0dSHou Pengyang {
24667eab0c0dSHou Pengyang struct inode *inode = fio->page->mapping->host;
24674c8ff709SChao Yu struct page *mpage, *page;
24687eab0c0dSHou Pengyang gfp_t gfp_flags = GFP_NOFS;
24697eab0c0dSHou Pengyang
24701958593eSJaegeuk Kim if (!f2fs_encrypted_file(inode))
24717eab0c0dSHou Pengyang return 0;
24727eab0c0dSHou Pengyang
24734c8ff709SChao Yu page = fio->compressed_page ? fio->compressed_page : fio->page;
24744c8ff709SChao Yu
247527aacd28SSatya Tangirala if (fscrypt_inode_uses_inline_crypto(inode))
247627aacd28SSatya Tangirala return 0;
247727aacd28SSatya Tangirala
24787eab0c0dSHou Pengyang retry_encrypt:
24794c8ff709SChao Yu fio->encrypted_page = fscrypt_encrypt_pagecache_blocks(page,
24804c8ff709SChao Yu PAGE_SIZE, 0, gfp_flags);
24816aa58d8aSChao Yu if (IS_ERR(fio->encrypted_page)) {
24827eab0c0dSHou Pengyang /* flush pending IOs and wait for a while in the ENOMEM case */
24837eab0c0dSHou Pengyang if (PTR_ERR(fio->encrypted_page) == -ENOMEM) {
2484b9109b0eSJaegeuk Kim f2fs_flush_merged_writes(fio->sbi);
24854034247aSNeilBrown memalloc_retry_wait(GFP_NOFS);
24867eab0c0dSHou Pengyang gfp_flags |= __GFP_NOFAIL;
24877eab0c0dSHou Pengyang goto retry_encrypt;
24887eab0c0dSHou Pengyang }
24897eab0c0dSHou Pengyang return PTR_ERR(fio->encrypted_page);
24907eab0c0dSHou Pengyang }
24917eab0c0dSHou Pengyang
24926aa58d8aSChao Yu mpage = find_lock_page(META_MAPPING(fio->sbi), fio->old_blkaddr);
24936aa58d8aSChao Yu if (mpage) {
24946aa58d8aSChao Yu if (PageUptodate(mpage))
24956aa58d8aSChao Yu memcpy(page_address(mpage),
24966aa58d8aSChao Yu page_address(fio->encrypted_page), PAGE_SIZE);
24976aa58d8aSChao Yu f2fs_put_page(mpage, 1);
24986aa58d8aSChao Yu }
24996aa58d8aSChao Yu return 0;
25006aa58d8aSChao Yu }
25016aa58d8aSChao Yu
check_inplace_update_policy(struct inode * inode,struct f2fs_io_info * fio)2502bb9e3bb8SChao Yu static inline bool check_inplace_update_policy(struct inode *inode,
2503bb9e3bb8SChao Yu struct f2fs_io_info *fio)
2504bb9e3bb8SChao Yu {
2505bb9e3bb8SChao Yu struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2506bb9e3bb8SChao Yu
2507fdb7ccc3SYangtao Li if (IS_F2FS_IPU_HONOR_OPU_WRITE(sbi) &&
25081018a546SChao Yu is_inode_flag_set(inode, FI_OPU_WRITE))
25091018a546SChao Yu return false;
2510fdb7ccc3SYangtao Li if (IS_F2FS_IPU_FORCE(sbi))
2511bb9e3bb8SChao Yu return true;
2512fdb7ccc3SYangtao Li if (IS_F2FS_IPU_SSR(sbi) && f2fs_need_SSR(sbi))
2513bb9e3bb8SChao Yu return true;
2514fdb7ccc3SYangtao Li if (IS_F2FS_IPU_UTIL(sbi) && utilization(sbi) > SM_I(sbi)->min_ipu_util)
2515bb9e3bb8SChao Yu return true;
2516fdb7ccc3SYangtao Li if (IS_F2FS_IPU_SSR_UTIL(sbi) && f2fs_need_SSR(sbi) &&
2517bb9e3bb8SChao Yu utilization(sbi) > SM_I(sbi)->min_ipu_util)
2518bb9e3bb8SChao Yu return true;
2519bb9e3bb8SChao Yu
2520bb9e3bb8SChao Yu /*
2521bb9e3bb8SChao Yu * IPU for rewrite async pages
2522bb9e3bb8SChao Yu */
2523fdb7ccc3SYangtao Li if (IS_F2FS_IPU_ASYNC(sbi) && fio && fio->op == REQ_OP_WRITE &&
2524fdb7ccc3SYangtao Li !(fio->op_flags & REQ_SYNC) && !IS_ENCRYPTED(inode))
2525bb9e3bb8SChao Yu return true;
2526bb9e3bb8SChao Yu
2527bb9e3bb8SChao Yu /* this is only set during fdatasync */
2528fdb7ccc3SYangtao Li if (IS_F2FS_IPU_FSYNC(sbi) && is_inode_flag_set(inode, FI_NEED_IPU))
2529bb9e3bb8SChao Yu return true;
2530bb9e3bb8SChao Yu
25314354994fSDaniel Rosenberg if (unlikely(fio && is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
25324354994fSDaniel Rosenberg !f2fs_is_checkpointed_data(sbi, fio->old_blkaddr)))
25334354994fSDaniel Rosenberg return true;
25344354994fSDaniel Rosenberg
2535bb9e3bb8SChao Yu return false;
2536bb9e3bb8SChao Yu }
2537bb9e3bb8SChao Yu
f2fs_should_update_inplace(struct inode * inode,struct f2fs_io_info * fio)25384d57b86dSChao Yu bool f2fs_should_update_inplace(struct inode *inode, struct f2fs_io_info *fio)
2539bb9e3bb8SChao Yu {
2540859fca6bSChao Yu /* swap file is migrating in aligned write mode */
2541859fca6bSChao Yu if (is_inode_flag_set(inode, FI_ALIGNED_WRITE))
2542859fca6bSChao Yu return false;
2543859fca6bSChao Yu
2544bb9e3bb8SChao Yu if (f2fs_is_pinned_file(inode))
2545bb9e3bb8SChao Yu return true;
2546bb9e3bb8SChao Yu
2547bb9e3bb8SChao Yu /* if this is cold file, we should overwrite to avoid fragmentation */
2548f3b23c78SWeichao Guo if (file_is_cold(inode) && !is_inode_flag_set(inode, FI_OPU_WRITE))
2549bb9e3bb8SChao Yu return true;
2550bb9e3bb8SChao Yu
2551bb9e3bb8SChao Yu return check_inplace_update_policy(inode, fio);
2552bb9e3bb8SChao Yu }
2553bb9e3bb8SChao Yu
f2fs_should_update_outplace(struct inode * inode,struct f2fs_io_info * fio)25544d57b86dSChao Yu bool f2fs_should_update_outplace(struct inode *inode, struct f2fs_io_info *fio)
2555bb9e3bb8SChao Yu {
2556bb9e3bb8SChao Yu struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2557bb9e3bb8SChao Yu
255819bdba52SJaegeuk Kim /* The below cases were checked when setting it. */
255919bdba52SJaegeuk Kim if (f2fs_is_pinned_file(inode))
256019bdba52SJaegeuk Kim return false;
256119bdba52SJaegeuk Kim if (fio && is_sbi_flag_set(sbi, SBI_NEED_FSCK))
256219bdba52SJaegeuk Kim return true;
2563b0332a0fSChao Yu if (f2fs_lfs_mode(sbi))
2564bb9e3bb8SChao Yu return true;
2565bb9e3bb8SChao Yu if (S_ISDIR(inode->i_mode))
2566bb9e3bb8SChao Yu return true;
2567af033b2aSChao Yu if (IS_NOQUOTA(inode))
2568af033b2aSChao Yu return true;
2569b82d4300SSunmin Jeong if (f2fs_used_in_atomic_write(inode))
2570bb9e3bb8SChao Yu return true;
2571859fca6bSChao Yu
2572859fca6bSChao Yu /* swap file is migrating in aligned write mode */
2573859fca6bSChao Yu if (is_inode_flag_set(inode, FI_ALIGNED_WRITE))
2574859fca6bSChao Yu return true;
2575859fca6bSChao Yu
25761018a546SChao Yu if (is_inode_flag_set(inode, FI_OPU_WRITE))
25771018a546SChao Yu return true;
25781018a546SChao Yu
2579bb9e3bb8SChao Yu if (fio) {
2580b763f3beSChao Yu if (page_private_gcing(fio->page))
2581bb9e3bb8SChao Yu return true;
25824354994fSDaniel Rosenberg if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
25834354994fSDaniel Rosenberg f2fs_is_checkpointed_data(sbi, fio->old_blkaddr)))
25844354994fSDaniel Rosenberg return true;
2585bb9e3bb8SChao Yu }
2586bb9e3bb8SChao Yu return false;
2587bb9e3bb8SChao Yu }
2588bb9e3bb8SChao Yu
need_inplace_update(struct f2fs_io_info * fio)25897eab0c0dSHou Pengyang static inline bool need_inplace_update(struct f2fs_io_info *fio)
25907eab0c0dSHou Pengyang {
25917eab0c0dSHou Pengyang struct inode *inode = fio->page->mapping->host;
25927eab0c0dSHou Pengyang
25934d57b86dSChao Yu if (f2fs_should_update_outplace(inode, fio))
25947eab0c0dSHou Pengyang return false;
25957eab0c0dSHou Pengyang
25964d57b86dSChao Yu return f2fs_should_update_inplace(inode, fio);
25977eab0c0dSHou Pengyang }
25987eab0c0dSHou Pengyang
f2fs_do_write_data_page(struct f2fs_io_info * fio)25994d57b86dSChao Yu int f2fs_do_write_data_page(struct f2fs_io_info *fio)
2600eb47b800SJaegeuk Kim {
260105ca3632SJaegeuk Kim struct page *page = fio->page;
2602eb47b800SJaegeuk Kim struct inode *inode = page->mapping->host;
2603eb47b800SJaegeuk Kim struct dnode_of_data dn;
26047735730dSChao Yu struct node_info ni;
2605e959c8f5SHou Pengyang bool ipu_force = false;
2606d889928bSChao Yu bool atomic_commit;
2607eb47b800SJaegeuk Kim int err = 0;
2608eb47b800SJaegeuk Kim
26093db1de0eSDaeho Jeong /* Use COW inode to make dnode_of_data for atomic write */
2610d889928bSChao Yu atomic_commit = f2fs_is_atomic_file(inode) &&
2611d889928bSChao Yu page_private_atomic(fio->page);
2612d889928bSChao Yu if (atomic_commit)
26133db1de0eSDaeho Jeong set_new_dnode(&dn, F2FS_I(inode)->cow_inode, NULL, NULL, 0);
26143db1de0eSDaeho Jeong else
2615eb47b800SJaegeuk Kim set_new_dnode(&dn, inode, NULL, NULL, 0);
26163db1de0eSDaeho Jeong
2617e959c8f5SHou Pengyang if (need_inplace_update(fio) &&
261804a91ab0SChristoph Hellwig f2fs_lookup_read_extent_cache_block(inode, page->index,
261904a91ab0SChristoph Hellwig &fio->old_blkaddr)) {
2620c9b60788SChao Yu if (!f2fs_is_valid_blkaddr(fio->sbi, fio->old_blkaddr,
262195fa90c9SChao Yu DATA_GENERIC_ENHANCE)) {
262295fa90c9SChao Yu f2fs_handle_error(fio->sbi,
262395fa90c9SChao Yu ERROR_INVALID_BLKADDR);
262410f966bbSChao Yu return -EFSCORRUPTED;
262595fa90c9SChao Yu }
2626c9b60788SChao Yu
2627e959c8f5SHou Pengyang ipu_force = true;
2628cc15620bSJaegeuk Kim fio->need_lock = LOCK_DONE;
2629e959c8f5SHou Pengyang goto got_it;
2630e959c8f5SHou Pengyang }
2631279d6df2SHou Pengyang
2632d29460e5SJaegeuk Kim /* Deadlock due to between page->lock and f2fs_lock_op */
2633d29460e5SJaegeuk Kim if (fio->need_lock == LOCK_REQ && !f2fs_trylock_op(fio->sbi))
2634d29460e5SJaegeuk Kim return -EAGAIN;
2635279d6df2SHou Pengyang
26364d57b86dSChao Yu err = f2fs_get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
2637eb47b800SJaegeuk Kim if (err)
2638279d6df2SHou Pengyang goto out;
2639eb47b800SJaegeuk Kim
264028bc106bSChao Yu fio->old_blkaddr = dn.data_blkaddr;
2641eb47b800SJaegeuk Kim
2642eb47b800SJaegeuk Kim /* This page is already truncated */
26437a9d7548SChao Yu if (fio->old_blkaddr == NULL_ADDR) {
26442bca1e23SJaegeuk Kim ClearPageUptodate(page);
2645b763f3beSChao Yu clear_page_private_gcing(page);
2646eb47b800SJaegeuk Kim goto out_writepage;
26472bca1e23SJaegeuk Kim }
2648e959c8f5SHou Pengyang got_it:
2649c9b60788SChao Yu if (__is_valid_data_blkaddr(fio->old_blkaddr) &&
2650c9b60788SChao Yu !f2fs_is_valid_blkaddr(fio->sbi, fio->old_blkaddr,
265193770ab7SChao Yu DATA_GENERIC_ENHANCE)) {
265210f966bbSChao Yu err = -EFSCORRUPTED;
265395fa90c9SChao Yu f2fs_handle_error(fio->sbi, ERROR_INVALID_BLKADDR);
2654c9b60788SChao Yu goto out_writepage;
2655c9b60788SChao Yu }
26563db1de0eSDaeho Jeong
26574535be48SChao Yu /* wait for GCed page writeback via META_MAPPING */
2658271fda62SSunmin Jeong if (fio->meta_gc)
26594535be48SChao Yu f2fs_wait_on_block_writeback(inode, fio->old_blkaddr);
26604535be48SChao Yu
2661eb47b800SJaegeuk Kim /*
2662eb47b800SJaegeuk Kim * If current allocation needs SSR,
2663eb47b800SJaegeuk Kim * it had better in-place writes for updated data.
2664eb47b800SJaegeuk Kim */
266593770ab7SChao Yu if (ipu_force ||
266693770ab7SChao Yu (__is_valid_data_blkaddr(fio->old_blkaddr) &&
26677b525dd0SChao Yu need_inplace_update(fio))) {
26684c8ff709SChao Yu err = f2fs_encrypt_one_page(fio);
2669cc15620bSJaegeuk Kim if (err)
2670cc15620bSJaegeuk Kim goto out_writepage;
2671cc15620bSJaegeuk Kim
2672cc15620bSJaegeuk Kim set_page_writeback(page);
2673279d6df2SHou Pengyang f2fs_put_dnode(&dn);
2674cc15620bSJaegeuk Kim if (fio->need_lock == LOCK_REQ)
26757eab0c0dSHou Pengyang f2fs_unlock_op(fio->sbi);
26764d57b86dSChao Yu err = f2fs_inplace_write_data(fio);
26776492a335SChao Yu if (err) {
267827aacd28SSatya Tangirala if (fscrypt_inode_uses_fs_layer_crypto(inode))
2679d2d0727bSEric Biggers fscrypt_finalize_bounce_page(&fio->encrypted_page);
26806492a335SChao Yu if (PageWriteback(page))
26812062e0c3SSheng Yong end_page_writeback(page);
2682cd23ffa9SChao Yu } else {
2683cd23ffa9SChao Yu set_inode_flag(inode, FI_UPDATE_WRITE);
26846492a335SChao Yu }
26857eab0c0dSHou Pengyang trace_f2fs_do_write_data_page(fio->page, IPU);
2686279d6df2SHou Pengyang return err;
2687279d6df2SHou Pengyang }
2688279d6df2SHou Pengyang
2689cc15620bSJaegeuk Kim if (fio->need_lock == LOCK_RETRY) {
2690cc15620bSJaegeuk Kim if (!f2fs_trylock_op(fio->sbi)) {
2691cc15620bSJaegeuk Kim err = -EAGAIN;
2692cc15620bSJaegeuk Kim goto out_writepage;
2693cc15620bSJaegeuk Kim }
2694cc15620bSJaegeuk Kim fio->need_lock = LOCK_REQ;
2695cc15620bSJaegeuk Kim }
2696cc15620bSJaegeuk Kim
2697a9419b63SJaegeuk Kim err = f2fs_get_node_info(fio->sbi, dn.nid, &ni, false);
26987735730dSChao Yu if (err)
26997735730dSChao Yu goto out_writepage;
27007735730dSChao Yu
27017735730dSChao Yu fio->version = ni.version;
27027735730dSChao Yu
27034c8ff709SChao Yu err = f2fs_encrypt_one_page(fio);
2704cc15620bSJaegeuk Kim if (err)
2705cc15620bSJaegeuk Kim goto out_writepage;
2706cc15620bSJaegeuk Kim
2707cc15620bSJaegeuk Kim set_page_writeback(page);
2708cc15620bSJaegeuk Kim
27094c8ff709SChao Yu if (fio->compr_blocks && fio->old_blkaddr == COMPRESS_ADDR)
27104c8ff709SChao Yu f2fs_i_compr_blocks_update(inode, fio->compr_blocks - 1, false);
27114c8ff709SChao Yu
2712279d6df2SHou Pengyang /* LFS mode write path */
27134d57b86dSChao Yu f2fs_outplace_write_data(&dn, fio);
27148ce67cb0SJaegeuk Kim trace_f2fs_do_write_data_page(page, OPU);
271591942321SJaegeuk Kim set_inode_flag(inode, FI_APPEND_WRITE);
2716d889928bSChao Yu if (atomic_commit)
2717d889928bSChao Yu clear_page_private_atomic(page);
2718eb47b800SJaegeuk Kim out_writepage:
2719eb47b800SJaegeuk Kim f2fs_put_dnode(&dn);
2720279d6df2SHou Pengyang out:
2721cc15620bSJaegeuk Kim if (fio->need_lock == LOCK_REQ)
2722279d6df2SHou Pengyang f2fs_unlock_op(fio->sbi);
2723eb47b800SJaegeuk Kim return err;
2724eb47b800SJaegeuk Kim }
2725eb47b800SJaegeuk Kim
f2fs_write_single_data_page(struct page * page,int * submitted,struct bio ** bio,sector_t * last_block,struct writeback_control * wbc,enum iostat_type io_type,int compr_blocks,bool allow_balance)27264c8ff709SChao Yu int f2fs_write_single_data_page(struct page *page, int *submitted,
27278648de2cSChao Yu struct bio **bio,
27288648de2cSChao Yu sector_t *last_block,
2729b0af6d49SChao Yu struct writeback_control *wbc,
27304c8ff709SChao Yu enum iostat_type io_type,
27313afae09fSChao Yu int compr_blocks,
27323afae09fSChao Yu bool allow_balance)
2733eb47b800SJaegeuk Kim {
2734eb47b800SJaegeuk Kim struct inode *inode = page->mapping->host;
27354081363fSJaegeuk Kim struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2736eb47b800SJaegeuk Kim loff_t i_size = i_size_read(inode);
2737eb47b800SJaegeuk Kim const pgoff_t end_index = ((unsigned long long)i_size)
273809cbfeafSKirill A. Shutemov >> PAGE_SHIFT;
27391f0d5c91SChao Yu loff_t psize = (loff_t)(page->index + 1) << PAGE_SHIFT;
27409ffe0fb5SHuajun Li unsigned offset = 0;
274139936837SJaegeuk Kim bool need_balance_fs = false;
2742f082c6b2SChao Yu bool quota_inode = IS_NOQUOTA(inode);
2743eb47b800SJaegeuk Kim int err = 0;
2744458e6197SJaegeuk Kim struct f2fs_io_info fio = {
274505ca3632SJaegeuk Kim .sbi = sbi,
274639d787beSChao Yu .ino = inode->i_ino,
2747458e6197SJaegeuk Kim .type = DATA,
274804d328deSMike Christie .op = REQ_OP_WRITE,
27497637241eSJens Axboe .op_flags = wbc_to_write_flags(wbc),
2750e959c8f5SHou Pengyang .old_blkaddr = NULL_ADDR,
275105ca3632SJaegeuk Kim .page = page,
27524375a336SJaegeuk Kim .encrypted_page = NULL,
27532eae077eSChao Yu .submitted = 0,
27544c8ff709SChao Yu .compr_blocks = compr_blocks,
275575abfd61SChao Yu .need_lock = compr_blocks ? LOCK_DONE : LOCK_RETRY,
2756271fda62SSunmin Jeong .meta_gc = f2fs_meta_inode_gc_required(inode) ? 1 : 0,
2757b0af6d49SChao Yu .io_type = io_type,
2758578c6478SYufen Yu .io_wbc = wbc,
27598648de2cSChao Yu .bio = bio,
27608648de2cSChao Yu .last_block = last_block,
2761458e6197SJaegeuk Kim };
2762eb47b800SJaegeuk Kim
2763ecda0de3SChao Yu trace_f2fs_writepage(page, DATA);
2764ecda0de3SChao Yu
2765146949deSJinyoung CHOI /* we should bypass data pages to proceed the kworker jobs */
2766db198ae0SChao Yu if (unlikely(f2fs_cp_error(sbi))) {
2767db198ae0SChao Yu mapping_set_error(page->mapping, -EIO);
27681174abfdSChao Yu /*
27691174abfdSChao Yu * don't drop any dirty dentry pages for keeping lastest
27701174abfdSChao Yu * directory structure.
27711174abfdSChao Yu */
2772c9b3649aSChao Yu if (S_ISDIR(inode->i_mode) &&
2773c9b3649aSChao Yu !is_sbi_flag_set(sbi, SBI_IS_CLOSE))
27741174abfdSChao Yu goto redirty_out;
2775b62e71beSChao Yu
2776b62e71beSChao Yu /* keep data pages in remount-ro mode */
2777b62e71beSChao Yu if (F2FS_OPTION(sbi).errors == MOUNT_ERRORS_READONLY)
2778b62e71beSChao Yu goto redirty_out;
2779db198ae0SChao Yu goto out;
2780db198ae0SChao Yu }
2781db198ae0SChao Yu
27820771fcc7SChao Yu if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
27830771fcc7SChao Yu goto redirty_out;
27840771fcc7SChao Yu
27854c8ff709SChao Yu if (page->index < end_index ||
27864c8ff709SChao Yu f2fs_verity_in_progress(inode) ||
27874c8ff709SChao Yu compr_blocks)
278839936837SJaegeuk Kim goto write;
2789eb47b800SJaegeuk Kim
2790eb47b800SJaegeuk Kim /*
2791eb47b800SJaegeuk Kim * If the offset is out-of-range of file size,
2792eb47b800SJaegeuk Kim * this page does not have to be written to disk.
2793eb47b800SJaegeuk Kim */
279409cbfeafSKirill A. Shutemov offset = i_size & (PAGE_SIZE - 1);
279576f60268SJaegeuk Kim if ((page->index >= end_index + 1) || !offset)
279639936837SJaegeuk Kim goto out;
2797eb47b800SJaegeuk Kim
279809cbfeafSKirill A. Shutemov zero_user_segment(page, offset, PAGE_SIZE);
279939936837SJaegeuk Kim write:
2800435cbab9SJaegeuk Kim /* Dentry/quota blocks are controlled by checkpoint */
2801f082c6b2SChao Yu if (S_ISDIR(inode->i_mode) || quota_inode) {
280279963d96SChao Yu /*
280379963d96SChao Yu * We need to wait for node_write to avoid block allocation during
280479963d96SChao Yu * checkpoint. This can only happen to quota writes which can cause
280579963d96SChao Yu * the below discard race condition.
280679963d96SChao Yu */
2807f082c6b2SChao Yu if (quota_inode)
2808e4544b63STim Murray f2fs_down_read(&sbi->node_write);
280979963d96SChao Yu
2810cc15620bSJaegeuk Kim fio.need_lock = LOCK_DONE;
28114d57b86dSChao Yu err = f2fs_do_write_data_page(&fio);
281279963d96SChao Yu
2813f082c6b2SChao Yu if (quota_inode)
2814e4544b63STim Murray f2fs_up_read(&sbi->node_write);
281579963d96SChao Yu
2816b230e6caSJaegeuk Kim goto done;
2817b230e6caSJaegeuk Kim }
2818b230e6caSJaegeuk Kim
28198618b881SJaegeuk Kim if (!wbc->for_reclaim)
282039936837SJaegeuk Kim need_balance_fs = true;
28217f3037a5SJaegeuk Kim else if (has_not_enough_free_secs(sbi, 0, 0))
282239936837SJaegeuk Kim goto redirty_out;
2823ef095d19SJaegeuk Kim else
2824ef095d19SJaegeuk Kim set_inode_flag(inode, FI_HOT_DATA);
2825eb47b800SJaegeuk Kim
2826b3d208f9SJaegeuk Kim err = -EAGAIN;
2827dd7b2333SYunlei He if (f2fs_has_inline_data(inode)) {
2828b3d208f9SJaegeuk Kim err = f2fs_write_inline_data(inode, page);
2829dd7b2333SYunlei He if (!err)
2830dd7b2333SYunlei He goto out;
2831dd7b2333SYunlei He }
2832279d6df2SHou Pengyang
2833cc15620bSJaegeuk Kim if (err == -EAGAIN) {
28344d57b86dSChao Yu err = f2fs_do_write_data_page(&fio);
2835cc15620bSJaegeuk Kim if (err == -EAGAIN) {
283675abfd61SChao Yu f2fs_bug_on(sbi, compr_blocks);
2837cc15620bSJaegeuk Kim fio.need_lock = LOCK_REQ;
28384d57b86dSChao Yu err = f2fs_do_write_data_page(&fio);
2839cc15620bSJaegeuk Kim }
2840cc15620bSJaegeuk Kim }
2841a0d00fadSChao Yu
2842eb449797SChao Yu if (err) {
2843eb449797SChao Yu file_set_keep_isize(inode);
2844eb449797SChao Yu } else {
2845c10c9820SChao Yu spin_lock(&F2FS_I(inode)->i_size_lock);
284626de9b11SJaegeuk Kim if (F2FS_I(inode)->last_disk_size < psize)
284726de9b11SJaegeuk Kim F2FS_I(inode)->last_disk_size = psize;
2848c10c9820SChao Yu spin_unlock(&F2FS_I(inode)->i_size_lock);
2849eb449797SChao Yu }
2850279d6df2SHou Pengyang
28518618b881SJaegeuk Kim done:
28528618b881SJaegeuk Kim if (err && err != -ENOENT)
28538618b881SJaegeuk Kim goto redirty_out;
2854eb47b800SJaegeuk Kim
285539936837SJaegeuk Kim out:
2856a7ffdbe2SJaegeuk Kim inode_dec_dirty_pages(inode);
28572baf0781SChao Yu if (err) {
28582bca1e23SJaegeuk Kim ClearPageUptodate(page);
2859b763f3beSChao Yu clear_page_private_gcing(page);
28602baf0781SChao Yu }
28610c3a5797SChao Yu
28620c3a5797SChao Yu if (wbc->for_reclaim) {
2863bab475c5SChao Yu f2fs_submit_merged_write_cond(sbi, NULL, page, 0, DATA);
2864ef095d19SJaegeuk Kim clear_inode_flag(inode, FI_HOT_DATA);
28654d57b86dSChao Yu f2fs_remove_dirty_inode(inode);
2866d68f735bSJaegeuk Kim submitted = NULL;
2867eb7e813cSChao Yu }
28680c3a5797SChao Yu unlock_page(page);
2869186857c5SChao Yu if (!S_ISDIR(inode->i_mode) && !IS_NOQUOTA(inode) &&
2870d80afefbSChao Yu !F2FS_I(inode)->wb_task && allow_balance)
28710c3a5797SChao Yu f2fs_balance_fs(sbi, need_balance_fs);
28720c3a5797SChao Yu
2873d68f735bSJaegeuk Kim if (unlikely(f2fs_cp_error(sbi))) {
2874b9109b0eSJaegeuk Kim f2fs_submit_merged_write(sbi, DATA);
28755cdb422cSChao Yu if (bio && *bio)
28760b20fcecSChao Yu f2fs_submit_merged_ipu_write(sbi, bio, NULL);
2877d68f735bSJaegeuk Kim submitted = NULL;
2878d68f735bSJaegeuk Kim }
2879d68f735bSJaegeuk Kim
2880d68f735bSJaegeuk Kim if (submitted)
28812eae077eSChao Yu *submitted = fio.submitted;
28820c3a5797SChao Yu
2883eb47b800SJaegeuk Kim return 0;
2884eb47b800SJaegeuk Kim
2885eb47b800SJaegeuk Kim redirty_out:
288676f60268SJaegeuk Kim redirty_page_for_writepage(wbc, page);
28875b19d284SJaegeuk Kim /*
2888146949deSJinyoung CHOI * pageout() in MM translates EAGAIN, so calls handle_write_error()
28895b19d284SJaegeuk Kim * -> mapping_set_error() -> set_bit(AS_EIO, ...).
28905b19d284SJaegeuk Kim * file_write_and_wait_range() will see EIO error, which is critical
28915b19d284SJaegeuk Kim * to return value of fsync() followed by atomic_write failure to user.
28925b19d284SJaegeuk Kim */
28935b19d284SJaegeuk Kim if (!err || wbc->for_reclaim)
28940002b61bSChao Yu return AOP_WRITEPAGE_ACTIVATE;
2895b230e6caSJaegeuk Kim unlock_page(page);
2896b230e6caSJaegeuk Kim return err;
2897fa9150a8SNamjae Jeon }
2898fa9150a8SNamjae Jeon
f2fs_write_data_page(struct page * page,struct writeback_control * wbc)2899f566bae8SJaegeuk Kim static int f2fs_write_data_page(struct page *page,
2900f566bae8SJaegeuk Kim struct writeback_control *wbc)
2901f566bae8SJaegeuk Kim {
29024c8ff709SChao Yu #ifdef CONFIG_F2FS_FS_COMPRESSION
29034c8ff709SChao Yu struct inode *inode = page->mapping->host;
29044c8ff709SChao Yu
29054c8ff709SChao Yu if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
29064c8ff709SChao Yu goto out;
29074c8ff709SChao Yu
29084c8ff709SChao Yu if (f2fs_compressed_file(inode)) {
29094c8ff709SChao Yu if (f2fs_is_compressed_cluster(inode, page->index)) {
29104c8ff709SChao Yu redirty_page_for_writepage(wbc, page);
29114c8ff709SChao Yu return AOP_WRITEPAGE_ACTIVATE;
29124c8ff709SChao Yu }
29134c8ff709SChao Yu }
29144c8ff709SChao Yu out:
29154c8ff709SChao Yu #endif
29164c8ff709SChao Yu
29174c8ff709SChao Yu return f2fs_write_single_data_page(page, NULL, NULL, NULL,
29183afae09fSChao Yu wbc, FS_DATA_IO, 0, true);
2919f566bae8SJaegeuk Kim }
2920f566bae8SJaegeuk Kim
29218f46dcaeSChao Yu /*
2922146949deSJinyoung CHOI * This function was copied from write_cache_pages from mm/page-writeback.c.
29238f46dcaeSChao Yu * The major change is making write step of cold data page separately from
29248f46dcaeSChao Yu * warm/hot data page.
29258f46dcaeSChao Yu */
f2fs_write_cache_pages(struct address_space * mapping,struct writeback_control * wbc,enum iostat_type io_type)29268f46dcaeSChao Yu static int f2fs_write_cache_pages(struct address_space *mapping,
2927b0af6d49SChao Yu struct writeback_control *wbc,
2928b0af6d49SChao Yu enum iostat_type io_type)
29298f46dcaeSChao Yu {
29308f46dcaeSChao Yu int ret = 0;
29314c8ff709SChao Yu int done = 0, retry = 0;
29328deca179SChao Yu struct page *pages_local[F2FS_ONSTACK_PAGES];
29338deca179SChao Yu struct page **pages = pages_local;
29341cd98ee7SVishal Moola (Oracle) struct folio_batch fbatch;
2935c29fd0c0SChao Yu struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
29368648de2cSChao Yu struct bio *bio = NULL;
29378648de2cSChao Yu sector_t last_block;
29384c8ff709SChao Yu #ifdef CONFIG_F2FS_FS_COMPRESSION
29394c8ff709SChao Yu struct inode *inode = mapping->host;
29404c8ff709SChao Yu struct compress_ctx cc = {
29414c8ff709SChao Yu .inode = inode,
29424c8ff709SChao Yu .log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
29434c8ff709SChao Yu .cluster_size = F2FS_I(inode)->i_cluster_size,
29444c8ff709SChao Yu .cluster_idx = NULL_CLUSTER,
29454c8ff709SChao Yu .rpages = NULL,
29464c8ff709SChao Yu .nr_rpages = 0,
29474c8ff709SChao Yu .cpages = NULL,
29483271d7ebSFengnan Chang .valid_nr_cpages = 0,
29494c8ff709SChao Yu .rbuf = NULL,
29504c8ff709SChao Yu .cbuf = NULL,
29514c8ff709SChao Yu .rlen = PAGE_SIZE * F2FS_I(inode)->i_cluster_size,
29524c8ff709SChao Yu .private = NULL,
29534c8ff709SChao Yu };
29544c8ff709SChao Yu #endif
29551cd98ee7SVishal Moola (Oracle) int nr_folios, p, idx;
29568f46dcaeSChao Yu int nr_pages;
29578deca179SChao Yu unsigned int max_pages = F2FS_ONSTACK_PAGES;
29588f46dcaeSChao Yu pgoff_t index;
29598f46dcaeSChao Yu pgoff_t end; /* Inclusive */
29608f46dcaeSChao Yu pgoff_t done_index;
29618f46dcaeSChao Yu int range_whole = 0;
296210bbd235SMatthew Wilcox xa_mark_t tag;
2963bab475c5SChao Yu int nwritten = 0;
29644c8ff709SChao Yu int submitted = 0;
29654c8ff709SChao Yu int i;
29668f46dcaeSChao Yu
29678deca179SChao Yu #ifdef CONFIG_F2FS_FS_COMPRESSION
29688deca179SChao Yu if (f2fs_compressed_file(inode) &&
29698deca179SChao Yu 1 << cc.log_cluster_size > F2FS_ONSTACK_PAGES) {
29708deca179SChao Yu pages = f2fs_kzalloc(sbi, sizeof(struct page *) <<
29718deca179SChao Yu cc.log_cluster_size, GFP_NOFS | __GFP_NOFAIL);
29728deca179SChao Yu max_pages = 1 << cc.log_cluster_size;
29738deca179SChao Yu }
29748deca179SChao Yu #endif
29758deca179SChao Yu
29761cd98ee7SVishal Moola (Oracle) folio_batch_init(&fbatch);
29771cd98ee7SVishal Moola (Oracle)
2978ef095d19SJaegeuk Kim if (get_dirty_pages(mapping->host) <=
2979ef095d19SJaegeuk Kim SM_I(F2FS_M_SB(mapping))->min_hot_blocks)
2980ef095d19SJaegeuk Kim set_inode_flag(mapping->host, FI_HOT_DATA);
2981ef095d19SJaegeuk Kim else
2982ef095d19SJaegeuk Kim clear_inode_flag(mapping->host, FI_HOT_DATA);
2983ef095d19SJaegeuk Kim
29848f46dcaeSChao Yu if (wbc->range_cyclic) {
29854df7a75fSJason Yan index = mapping->writeback_index; /* prev offset */
29868f46dcaeSChao Yu end = -1;
29878f46dcaeSChao Yu } else {
298809cbfeafSKirill A. Shutemov index = wbc->range_start >> PAGE_SHIFT;
298909cbfeafSKirill A. Shutemov end = wbc->range_end >> PAGE_SHIFT;
29908f46dcaeSChao Yu if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
29918f46dcaeSChao Yu range_whole = 1;
29928f46dcaeSChao Yu }
29938f46dcaeSChao Yu if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
29948f46dcaeSChao Yu tag = PAGECACHE_TAG_TOWRITE;
29958f46dcaeSChao Yu else
29968f46dcaeSChao Yu tag = PAGECACHE_TAG_DIRTY;
29978f46dcaeSChao Yu retry:
29984c8ff709SChao Yu retry = 0;
29998f46dcaeSChao Yu if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
30008f46dcaeSChao Yu tag_pages_for_writeback(mapping, index, end);
30018f46dcaeSChao Yu done_index = index;
30024c8ff709SChao Yu while (!done && !retry && (index <= end)) {
30031cd98ee7SVishal Moola (Oracle) nr_pages = 0;
30041cd98ee7SVishal Moola (Oracle) again:
30051cd98ee7SVishal Moola (Oracle) nr_folios = filemap_get_folios_tag(mapping, &index, end,
30061cd98ee7SVishal Moola (Oracle) tag, &fbatch);
30071cd98ee7SVishal Moola (Oracle) if (nr_folios == 0) {
30081cd98ee7SVishal Moola (Oracle) if (nr_pages)
30091cd98ee7SVishal Moola (Oracle) goto write;
30108f46dcaeSChao Yu break;
30111cd98ee7SVishal Moola (Oracle) }
30128f46dcaeSChao Yu
30131cd98ee7SVishal Moola (Oracle) for (i = 0; i < nr_folios; i++) {
30141cd98ee7SVishal Moola (Oracle) struct folio *folio = fbatch.folios[i];
30151cd98ee7SVishal Moola (Oracle)
30161cd98ee7SVishal Moola (Oracle) idx = 0;
30171cd98ee7SVishal Moola (Oracle) p = folio_nr_pages(folio);
30181cd98ee7SVishal Moola (Oracle) add_more:
30191cd98ee7SVishal Moola (Oracle) pages[nr_pages] = folio_page(folio, idx);
30201cd98ee7SVishal Moola (Oracle) folio_get(folio);
30218deca179SChao Yu if (++nr_pages == max_pages) {
30221cd98ee7SVishal Moola (Oracle) index = folio->index + idx + 1;
30231cd98ee7SVishal Moola (Oracle) folio_batch_release(&fbatch);
30241cd98ee7SVishal Moola (Oracle) goto write;
30251cd98ee7SVishal Moola (Oracle) }
30261cd98ee7SVishal Moola (Oracle) if (++idx < p)
30271cd98ee7SVishal Moola (Oracle) goto add_more;
30281cd98ee7SVishal Moola (Oracle) }
30291cd98ee7SVishal Moola (Oracle) folio_batch_release(&fbatch);
30301cd98ee7SVishal Moola (Oracle) goto again;
30311cd98ee7SVishal Moola (Oracle) write:
30328f46dcaeSChao Yu for (i = 0; i < nr_pages; i++) {
303301fc4b9aSFengnan Chang struct page *page = pages[i];
30341cd98ee7SVishal Moola (Oracle) struct folio *folio = page_folio(page);
30354c8ff709SChao Yu bool need_readd;
30364c8ff709SChao Yu readd:
30374c8ff709SChao Yu need_readd = false;
30384c8ff709SChao Yu #ifdef CONFIG_F2FS_FS_COMPRESSION
30394c8ff709SChao Yu if (f2fs_compressed_file(inode)) {
3040b368cc5eSFengnan Chang void *fsdata = NULL;
3041b368cc5eSFengnan Chang struct page *pagep;
3042b368cc5eSFengnan Chang int ret2;
3043b368cc5eSFengnan Chang
30444c8ff709SChao Yu ret = f2fs_init_compress_ctx(&cc);
30454c8ff709SChao Yu if (ret) {
30464c8ff709SChao Yu done = 1;
30474c8ff709SChao Yu break;
30484c8ff709SChao Yu }
30498f46dcaeSChao Yu
30504c8ff709SChao Yu if (!f2fs_cluster_can_merge_page(&cc,
30511cd98ee7SVishal Moola (Oracle) folio->index)) {
30524c8ff709SChao Yu ret = f2fs_write_multi_pages(&cc,
30534c8ff709SChao Yu &submitted, wbc, io_type);
30544c8ff709SChao Yu if (!ret)
30554c8ff709SChao Yu need_readd = true;
30564c8ff709SChao Yu goto result;
30574c8ff709SChao Yu }
30584c8ff709SChao Yu
30594c8ff709SChao Yu if (unlikely(f2fs_cp_error(sbi)))
30601cd98ee7SVishal Moola (Oracle) goto lock_folio;
30614c8ff709SChao Yu
3062b368cc5eSFengnan Chang if (!f2fs_cluster_is_empty(&cc))
30631cd98ee7SVishal Moola (Oracle) goto lock_folio;
30644c8ff709SChao Yu
30654f8219f8SFengnan Chang if (f2fs_all_cluster_page_ready(&cc,
306601fc4b9aSFengnan Chang pages, i, nr_pages, true))
30671cd98ee7SVishal Moola (Oracle) goto lock_folio;
30684f8219f8SFengnan Chang
30694c8ff709SChao Yu ret2 = f2fs_prepare_compress_overwrite(
30704c8ff709SChao Yu inode, &pagep,
30711cd98ee7SVishal Moola (Oracle) folio->index, &fsdata);
30724c8ff709SChao Yu if (ret2 < 0) {
30734c8ff709SChao Yu ret = ret2;
30744c8ff709SChao Yu done = 1;
30754c8ff709SChao Yu break;
30764c8ff709SChao Yu } else if (ret2 &&
3077b368cc5eSFengnan Chang (!f2fs_compress_write_end(inode,
30781cd98ee7SVishal Moola (Oracle) fsdata, folio->index, 1) ||
30794f8219f8SFengnan Chang !f2fs_all_cluster_page_ready(&cc,
30801cd98ee7SVishal Moola (Oracle) pages, i, nr_pages,
30811cd98ee7SVishal Moola (Oracle) false))) {
30824c8ff709SChao Yu retry = 1;
30834c8ff709SChao Yu break;
30844c8ff709SChao Yu }
30854c8ff709SChao Yu }
30864c8ff709SChao Yu #endif
3087f8de4331SChao Yu /* give a priority to WB_SYNC threads */
3088c29fd0c0SChao Yu if (atomic_read(&sbi->wb_sync_req[DATA]) &&
3089f8de4331SChao Yu wbc->sync_mode == WB_SYNC_NONE) {
3090f8de4331SChao Yu done = 1;
3091f8de4331SChao Yu break;
3092f8de4331SChao Yu }
30934c8ff709SChao Yu #ifdef CONFIG_F2FS_FS_COMPRESSION
30941cd98ee7SVishal Moola (Oracle) lock_folio:
30954c8ff709SChao Yu #endif
30961cd98ee7SVishal Moola (Oracle) done_index = folio->index;
3097d29460e5SJaegeuk Kim retry_write:
30981cd98ee7SVishal Moola (Oracle) folio_lock(folio);
30998f46dcaeSChao Yu
31001cd98ee7SVishal Moola (Oracle) if (unlikely(folio->mapping != mapping)) {
31018f46dcaeSChao Yu continue_unlock:
31021cd98ee7SVishal Moola (Oracle) folio_unlock(folio);
31038f46dcaeSChao Yu continue;
31048f46dcaeSChao Yu }
31058f46dcaeSChao Yu
31061cd98ee7SVishal Moola (Oracle) if (!folio_test_dirty(folio)) {
31078f46dcaeSChao Yu /* someone wrote it for us */
31088f46dcaeSChao Yu goto continue_unlock;
31098f46dcaeSChao Yu }
31108f46dcaeSChao Yu
31111cd98ee7SVishal Moola (Oracle) if (folio_test_writeback(folio)) {
3112c948be79SYangtao Li if (wbc->sync_mode == WB_SYNC_NONE)
31138f46dcaeSChao Yu goto continue_unlock;
3114c948be79SYangtao Li f2fs_wait_on_page_writeback(&folio->page, DATA, true, true);
31158f46dcaeSChao Yu }
31168f46dcaeSChao Yu
31171cd98ee7SVishal Moola (Oracle) if (!folio_clear_dirty_for_io(folio))
31188f46dcaeSChao Yu goto continue_unlock;
31198f46dcaeSChao Yu
31204c8ff709SChao Yu #ifdef CONFIG_F2FS_FS_COMPRESSION
31214c8ff709SChao Yu if (f2fs_compressed_file(inode)) {
31221cd98ee7SVishal Moola (Oracle) folio_get(folio);
31231cd98ee7SVishal Moola (Oracle) f2fs_compress_ctx_add_page(&cc, &folio->page);
31244c8ff709SChao Yu continue;
31254c8ff709SChao Yu }
31264c8ff709SChao Yu #endif
31271cd98ee7SVishal Moola (Oracle) ret = f2fs_write_single_data_page(&folio->page,
31281cd98ee7SVishal Moola (Oracle) &submitted, &bio, &last_block,
31291cd98ee7SVishal Moola (Oracle) wbc, io_type, 0, true);
31304c8ff709SChao Yu if (ret == AOP_WRITEPAGE_ACTIVATE)
31311cd98ee7SVishal Moola (Oracle) folio_unlock(folio);
31324c8ff709SChao Yu #ifdef CONFIG_F2FS_FS_COMPRESSION
31334c8ff709SChao Yu result:
31344c8ff709SChao Yu #endif
31354c8ff709SChao Yu nwritten += submitted;
31364c8ff709SChao Yu wbc->nr_to_write -= submitted;
31374c8ff709SChao Yu
31388f46dcaeSChao Yu if (unlikely(ret)) {
31390002b61bSChao Yu /*
31400002b61bSChao Yu * keep nr_to_write, since vfs uses this to
31410002b61bSChao Yu * get # of written pages.
31420002b61bSChao Yu */
31430002b61bSChao Yu if (ret == AOP_WRITEPAGE_ACTIVATE) {
31440002b61bSChao Yu ret = 0;
31454c8ff709SChao Yu goto next;
3146d29460e5SJaegeuk Kim } else if (ret == -EAGAIN) {
3147d29460e5SJaegeuk Kim ret = 0;
3148d29460e5SJaegeuk Kim if (wbc->sync_mode == WB_SYNC_ALL) {
3149a64239d0SNeilBrown f2fs_io_schedule_timeout(
31505df7731fSChao Yu DEFAULT_IO_TIMEOUT);
3151d29460e5SJaegeuk Kim goto retry_write;
3152d29460e5SJaegeuk Kim }
31534c8ff709SChao Yu goto next;
31540002b61bSChao Yu }
3155a842a909SMinjie Du done_index = folio_next_index(folio);
31568f46dcaeSChao Yu done = 1;
31578f46dcaeSChao Yu break;
31588f46dcaeSChao Yu }
31598f46dcaeSChao Yu
31604c8ff709SChao Yu if (wbc->nr_to_write <= 0 &&
31618f46dcaeSChao Yu wbc->sync_mode == WB_SYNC_NONE) {
31628f46dcaeSChao Yu done = 1;
31638f46dcaeSChao Yu break;
31648f46dcaeSChao Yu }
31654c8ff709SChao Yu next:
31664c8ff709SChao Yu if (need_readd)
31674c8ff709SChao Yu goto readd;
31688f46dcaeSChao Yu }
316901fc4b9aSFengnan Chang release_pages(pages, nr_pages);
31708f46dcaeSChao Yu cond_resched();
31718f46dcaeSChao Yu }
31724c8ff709SChao Yu #ifdef CONFIG_F2FS_FS_COMPRESSION
31734c8ff709SChao Yu /* flush remained pages in compress cluster */
31744c8ff709SChao Yu if (f2fs_compressed_file(inode) && !f2fs_cluster_is_empty(&cc)) {
31754c8ff709SChao Yu ret = f2fs_write_multi_pages(&cc, &submitted, wbc, io_type);
31764c8ff709SChao Yu nwritten += submitted;
31774c8ff709SChao Yu wbc->nr_to_write -= submitted;
31784c8ff709SChao Yu if (ret) {
31794c8ff709SChao Yu done = 1;
31804c8ff709SChao Yu retry = 0;
31814c8ff709SChao Yu }
31824c8ff709SChao Yu }
3183adfc6943SJaegeuk Kim if (f2fs_compressed_file(inode))
31848bfbfb0dSChao Yu f2fs_destroy_compress_ctx(&cc, false);
31854c8ff709SChao Yu #endif
3186e78790f8SSahitya Tummala if (retry) {
31878f46dcaeSChao Yu index = 0;
3188e78790f8SSahitya Tummala end = -1;
31898f46dcaeSChao Yu goto retry;
31908f46dcaeSChao Yu }
3191e78790f8SSahitya Tummala if (wbc->range_cyclic && !done)
3192e78790f8SSahitya Tummala done_index = 0;
31938f46dcaeSChao Yu if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
31948f46dcaeSChao Yu mapping->writeback_index = done_index;
31958f46dcaeSChao Yu
3196bab475c5SChao Yu if (nwritten)
3197b9109b0eSJaegeuk Kim f2fs_submit_merged_write_cond(F2FS_M_SB(mapping), mapping->host,
3198bab475c5SChao Yu NULL, 0, DATA);
31998648de2cSChao Yu /* submit cached bio of IPU write */
32008648de2cSChao Yu if (bio)
32010b20fcecSChao Yu f2fs_submit_merged_ipu_write(sbi, &bio, NULL);
32026ca56ca4SChao Yu
32038deca179SChao Yu #ifdef CONFIG_F2FS_FS_COMPRESSION
32048deca179SChao Yu if (pages != pages_local)
32058deca179SChao Yu kfree(pages);
32068deca179SChao Yu #endif
32078deca179SChao Yu
32088f46dcaeSChao Yu return ret;
32098f46dcaeSChao Yu }
32108f46dcaeSChao Yu
__should_serialize_io(struct inode * inode,struct writeback_control * wbc)3211853137ceSJaegeuk Kim static inline bool __should_serialize_io(struct inode *inode,
3212853137ceSJaegeuk Kim struct writeback_control *wbc)
3213853137ceSJaegeuk Kim {
3214040d2bb3SChao Yu /* to avoid deadlock in path of data flush */
3215d80afefbSChao Yu if (F2FS_I(inode)->wb_task)
3216040d2bb3SChao Yu return false;
3217b13f67ffSChao Yu
3218b13f67ffSChao Yu if (!S_ISREG(inode->i_mode))
3219b13f67ffSChao Yu return false;
3220b13f67ffSChao Yu if (IS_NOQUOTA(inode))
3221b13f67ffSChao Yu return false;
3222b13f67ffSChao Yu
3223602a16d5SDaeho Jeong if (f2fs_need_compress_data(inode))
3224b13f67ffSChao Yu return true;
3225853137ceSJaegeuk Kim if (wbc->sync_mode != WB_SYNC_ALL)
3226853137ceSJaegeuk Kim return true;
3227853137ceSJaegeuk Kim if (get_dirty_pages(inode) >= SM_I(F2FS_I_SB(inode))->min_seq_blocks)
3228853137ceSJaegeuk Kim return true;
3229853137ceSJaegeuk Kim return false;
3230853137ceSJaegeuk Kim }
3231853137ceSJaegeuk Kim
__f2fs_write_data_pages(struct address_space * mapping,struct writeback_control * wbc,enum iostat_type io_type)3232fc99fe27SChao Yu static int __f2fs_write_data_pages(struct address_space *mapping,
3233b0af6d49SChao Yu struct writeback_control *wbc,
3234b0af6d49SChao Yu enum iostat_type io_type)
3235eb47b800SJaegeuk Kim {
3236eb47b800SJaegeuk Kim struct inode *inode = mapping->host;
32374081363fSJaegeuk Kim struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
32389dfa1bafSJaegeuk Kim struct blk_plug plug;
3239eb47b800SJaegeuk Kim int ret;
3240853137ceSJaegeuk Kim bool locked = false;
3241eb47b800SJaegeuk Kim
3242cfb185a1SP J P /* deal with chardevs and other special file */
3243cfb185a1SP J P if (!mapping->a_ops->writepage)
3244cfb185a1SP J P return 0;
3245cfb185a1SP J P
32466a290544SChao Yu /* skip writing if there is no dirty page in this inode */
32476a290544SChao Yu if (!get_dirty_pages(inode) && wbc->sync_mode == WB_SYNC_NONE)
32486a290544SChao Yu return 0;
32496a290544SChao Yu
32500771fcc7SChao Yu /* during POR, we don't need to trigger writepage at all. */
32510771fcc7SChao Yu if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
32520771fcc7SChao Yu goto skip_write;
32530771fcc7SChao Yu
3254af033b2aSChao Yu if ((S_ISDIR(inode->i_mode) || IS_NOQUOTA(inode)) &&
3255af033b2aSChao Yu wbc->sync_mode == WB_SYNC_NONE &&
3256a1257023SJaegeuk Kim get_dirty_pages(inode) < nr_pages_to_skip(sbi, DATA) &&
32574d57b86dSChao Yu f2fs_available_free_memory(sbi, DIRTY_DENTS))
3258a1257023SJaegeuk Kim goto skip_write;
3259a1257023SJaegeuk Kim
32601018a546SChao Yu /* skip writing in file defragment preparing stage */
32611018a546SChao Yu if (is_inode_flag_set(inode, FI_SKIP_WRITES))
3262d323d005SChao Yu goto skip_write;
3263d323d005SChao Yu
3264d31c7c3fSYunlei He trace_f2fs_writepages(mapping->host, wbc, DATA);
3265d31c7c3fSYunlei He
3266687de7f1SJaegeuk Kim /* to avoid spliting IOs due to mixed WB_SYNC_ALL and WB_SYNC_NONE */
3267687de7f1SJaegeuk Kim if (wbc->sync_mode == WB_SYNC_ALL)
3268c29fd0c0SChao Yu atomic_inc(&sbi->wb_sync_req[DATA]);
326934415099SChao Yu else if (atomic_read(&sbi->wb_sync_req[DATA])) {
327034415099SChao Yu /* to avoid potential deadlock */
327134415099SChao Yu if (current->plug)
327234415099SChao Yu blk_finish_plug(current->plug);
3273687de7f1SJaegeuk Kim goto skip_write;
327434415099SChao Yu }
3275687de7f1SJaegeuk Kim
3276853137ceSJaegeuk Kim if (__should_serialize_io(inode, wbc)) {
3277853137ceSJaegeuk Kim mutex_lock(&sbi->writepages);
3278853137ceSJaegeuk Kim locked = true;
3279853137ceSJaegeuk Kim }
3280853137ceSJaegeuk Kim
32819dfa1bafSJaegeuk Kim blk_start_plug(&plug);
3282b0af6d49SChao Yu ret = f2fs_write_cache_pages(mapping, wbc, io_type);
32839dfa1bafSJaegeuk Kim blk_finish_plug(&plug);
3284687de7f1SJaegeuk Kim
3285853137ceSJaegeuk Kim if (locked)
3286853137ceSJaegeuk Kim mutex_unlock(&sbi->writepages);
3287853137ceSJaegeuk Kim
3288687de7f1SJaegeuk Kim if (wbc->sync_mode == WB_SYNC_ALL)
3289c29fd0c0SChao Yu atomic_dec(&sbi->wb_sync_req[DATA]);
329028ea6162SJaegeuk Kim /*
329128ea6162SJaegeuk Kim * if some pages were truncated, we cannot guarantee its mapping->host
329228ea6162SJaegeuk Kim * to detect pending bios.
329328ea6162SJaegeuk Kim */
3294458e6197SJaegeuk Kim
32954d57b86dSChao Yu f2fs_remove_dirty_inode(inode);
3296eb47b800SJaegeuk Kim return ret;
3297d3baf95dSJaegeuk Kim
3298d3baf95dSJaegeuk Kim skip_write:
3299a7ffdbe2SJaegeuk Kim wbc->pages_skipped += get_dirty_pages(inode);
3300d31c7c3fSYunlei He trace_f2fs_writepages(mapping->host, wbc, DATA);
3301d3baf95dSJaegeuk Kim return 0;
3302eb47b800SJaegeuk Kim }
3303eb47b800SJaegeuk Kim
f2fs_write_data_pages(struct address_space * mapping,struct writeback_control * wbc)3304b0af6d49SChao Yu static int f2fs_write_data_pages(struct address_space *mapping,
3305b0af6d49SChao Yu struct writeback_control *wbc)
3306b0af6d49SChao Yu {
3307b0af6d49SChao Yu struct inode *inode = mapping->host;
3308b0af6d49SChao Yu
3309b0af6d49SChao Yu return __f2fs_write_data_pages(mapping, wbc,
3310b0af6d49SChao Yu F2FS_I(inode)->cp_task == current ?
3311b0af6d49SChao Yu FS_CP_DATA_IO : FS_DATA_IO);
3312b0af6d49SChao Yu }
3313b0af6d49SChao Yu
f2fs_write_failed(struct inode * inode,loff_t to)3314a1e09b03SEric Biggers void f2fs_write_failed(struct inode *inode, loff_t to)
33153aab8f82SChao Yu {
3316819d9153SJaegeuk Kim loff_t i_size = i_size_read(inode);
33173aab8f82SChao Yu
33183f188c23SJaegeuk Kim if (IS_NOQUOTA(inode))
33193f188c23SJaegeuk Kim return;
33203f188c23SJaegeuk Kim
332195ae251fSEric Biggers /* In the fs-verity case, f2fs_end_enable_verity() does the truncate */
332295ae251fSEric Biggers if (to > i_size && !f2fs_verity_in_progress(inode)) {
3323e4544b63STim Murray f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
33246abaa83cSLinus Torvalds filemap_invalidate_lock(inode->i_mapping);
3325a33c1502SChao Yu
3326819d9153SJaegeuk Kim truncate_pagecache(inode, i_size);
3327c42d28ceSChao Yu f2fs_truncate_blocks(inode, i_size, true);
3328a33c1502SChao Yu
33296abaa83cSLinus Torvalds filemap_invalidate_unlock(inode->i_mapping);
3330e4544b63STim Murray f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
33313aab8f82SChao Yu }
33323aab8f82SChao Yu }
33333aab8f82SChao Yu
prepare_write_begin(struct f2fs_sb_info * sbi,struct page * page,loff_t pos,unsigned len,block_t * blk_addr,bool * node_changed)33342aadac08SJaegeuk Kim static int prepare_write_begin(struct f2fs_sb_info *sbi,
33352aadac08SJaegeuk Kim struct page *page, loff_t pos, unsigned len,
33362aadac08SJaegeuk Kim block_t *blk_addr, bool *node_changed)
33372aadac08SJaegeuk Kim {
33382aadac08SJaegeuk Kim struct inode *inode = page->mapping->host;
33392aadac08SJaegeuk Kim pgoff_t index = page->index;
33402aadac08SJaegeuk Kim struct dnode_of_data dn;
33412aadac08SJaegeuk Kim struct page *ipage;
3342b4d07a3eSJaegeuk Kim bool locked = false;
334344b0dfebSChristoph Hellwig int flag = F2FS_GET_BLOCK_PRE_AIO;
33442aadac08SJaegeuk Kim int err = 0;
33452aadac08SJaegeuk Kim
334624b84912SJaegeuk Kim /*
33473d697a4aSEric Biggers * If a whole page is being written and we already preallocated all the
33483d697a4aSEric Biggers * blocks, then there is no need to get a block address now.
334924b84912SJaegeuk Kim */
33503d697a4aSEric Biggers if (len == PAGE_SIZE && is_inode_flag_set(inode, FI_PREALLOCATED_ALL))
335124b84912SJaegeuk Kim return 0;
335224b84912SJaegeuk Kim
33532866fb16SSheng Yong /* f2fs_lock_op avoids race between write CP and convert_inline_page */
335444b0dfebSChristoph Hellwig if (f2fs_has_inline_data(inode)) {
335544b0dfebSChristoph Hellwig if (pos + len > MAX_INLINE_DATA(inode))
33562866fb16SSheng Yong flag = F2FS_GET_BLOCK_DEFAULT;
335744b0dfebSChristoph Hellwig f2fs_map_lock(sbi, flag);
335844b0dfebSChristoph Hellwig locked = true;
335944b0dfebSChristoph Hellwig } else if ((pos & PAGE_MASK) >= i_size_read(inode)) {
33602f51ade9SChristoph Hellwig f2fs_map_lock(sbi, flag);
3361b4d07a3eSJaegeuk Kim locked = true;
3362b4d07a3eSJaegeuk Kim }
33634c8ff709SChao Yu
3364b4d07a3eSJaegeuk Kim restart:
33652aadac08SJaegeuk Kim /* check inline_data */
33664d57b86dSChao Yu ipage = f2fs_get_node_page(sbi, inode->i_ino);
33672aadac08SJaegeuk Kim if (IS_ERR(ipage)) {
33682aadac08SJaegeuk Kim err = PTR_ERR(ipage);
33692aadac08SJaegeuk Kim goto unlock_out;
33702aadac08SJaegeuk Kim }
33712aadac08SJaegeuk Kim
33722aadac08SJaegeuk Kim set_new_dnode(&dn, inode, ipage, ipage, 0);
33732aadac08SJaegeuk Kim
33742aadac08SJaegeuk Kim if (f2fs_has_inline_data(inode)) {
3375f2470371SChao Yu if (pos + len <= MAX_INLINE_DATA(inode)) {
33764d57b86dSChao Yu f2fs_do_read_inline_data(page, ipage);
337791942321SJaegeuk Kim set_inode_flag(inode, FI_DATA_EXIST);
3378ab47036dSChao Yu if (inode->i_nlink)
3379b763f3beSChao Yu set_page_private_inline(ipage);
3380b4d07a3eSJaegeuk Kim goto out;
3381b4d07a3eSJaegeuk Kim }
338244b0dfebSChristoph Hellwig err = f2fs_convert_inline_page(&dn, page);
338344b0dfebSChristoph Hellwig if (err || dn.data_blkaddr != NULL_ADDR)
338444b0dfebSChristoph Hellwig goto out;
338544b0dfebSChristoph Hellwig }
338644b0dfebSChristoph Hellwig
338744b0dfebSChristoph Hellwig if (!f2fs_lookup_read_extent_cache_block(inode, index,
338804a91ab0SChristoph Hellwig &dn.data_blkaddr)) {
338944b0dfebSChristoph Hellwig if (locked) {
339044b0dfebSChristoph Hellwig err = f2fs_reserve_block(&dn, index);
339144b0dfebSChristoph Hellwig goto out;
339244b0dfebSChristoph Hellwig }
339344b0dfebSChristoph Hellwig
3394b4d07a3eSJaegeuk Kim /* hole case */
33954d57b86dSChao Yu err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
339644b0dfebSChristoph Hellwig if (!err && dn.data_blkaddr != NULL_ADDR)
339744b0dfebSChristoph Hellwig goto out;
3398b4d07a3eSJaegeuk Kim f2fs_put_dnode(&dn);
33992f51ade9SChristoph Hellwig f2fs_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO);
34002866fb16SSheng Yong WARN_ON(flag != F2FS_GET_BLOCK_PRE_AIO);
3401b4d07a3eSJaegeuk Kim locked = true;
3402b4d07a3eSJaegeuk Kim goto restart;
3403b4d07a3eSJaegeuk Kim }
340444b0dfebSChristoph Hellwig out:
340544b0dfebSChristoph Hellwig if (!err) {
34062aadac08SJaegeuk Kim /* convert_inline_page can make node_changed */
34072aadac08SJaegeuk Kim *blk_addr = dn.data_blkaddr;
34082aadac08SJaegeuk Kim *node_changed = dn.node_changed;
340944b0dfebSChristoph Hellwig }
34102aadac08SJaegeuk Kim f2fs_put_dnode(&dn);
34112aadac08SJaegeuk Kim unlock_out:
3412b4d07a3eSJaegeuk Kim if (locked)
34132f51ade9SChristoph Hellwig f2fs_map_unlock(sbi, flag);
34142aadac08SJaegeuk Kim return err;
34152aadac08SJaegeuk Kim }
34162aadac08SJaegeuk Kim
__find_data_block(struct inode * inode,pgoff_t index,block_t * blk_addr)34173db1de0eSDaeho Jeong static int __find_data_block(struct inode *inode, pgoff_t index,
34183db1de0eSDaeho Jeong block_t *blk_addr)
34193db1de0eSDaeho Jeong {
34203db1de0eSDaeho Jeong struct dnode_of_data dn;
34213db1de0eSDaeho Jeong struct page *ipage;
34223db1de0eSDaeho Jeong int err = 0;
34233db1de0eSDaeho Jeong
34243db1de0eSDaeho Jeong ipage = f2fs_get_node_page(F2FS_I_SB(inode), inode->i_ino);
34253db1de0eSDaeho Jeong if (IS_ERR(ipage))
34263db1de0eSDaeho Jeong return PTR_ERR(ipage);
34273db1de0eSDaeho Jeong
34283db1de0eSDaeho Jeong set_new_dnode(&dn, inode, ipage, ipage, 0);
34293db1de0eSDaeho Jeong
343004a91ab0SChristoph Hellwig if (!f2fs_lookup_read_extent_cache_block(inode, index,
343104a91ab0SChristoph Hellwig &dn.data_blkaddr)) {
34323db1de0eSDaeho Jeong /* hole case */
34333db1de0eSDaeho Jeong err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
34343db1de0eSDaeho Jeong if (err) {
34353db1de0eSDaeho Jeong dn.data_blkaddr = NULL_ADDR;
34363db1de0eSDaeho Jeong err = 0;
34373db1de0eSDaeho Jeong }
34383db1de0eSDaeho Jeong }
34393db1de0eSDaeho Jeong *blk_addr = dn.data_blkaddr;
34403db1de0eSDaeho Jeong f2fs_put_dnode(&dn);
34413db1de0eSDaeho Jeong return err;
34423db1de0eSDaeho Jeong }
34433db1de0eSDaeho Jeong
__reserve_data_block(struct inode * inode,pgoff_t index,block_t * blk_addr,bool * node_changed)34443db1de0eSDaeho Jeong static int __reserve_data_block(struct inode *inode, pgoff_t index,
34453db1de0eSDaeho Jeong block_t *blk_addr, bool *node_changed)
34463db1de0eSDaeho Jeong {
34473db1de0eSDaeho Jeong struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
34483db1de0eSDaeho Jeong struct dnode_of_data dn;
34493db1de0eSDaeho Jeong struct page *ipage;
34503db1de0eSDaeho Jeong int err = 0;
34513db1de0eSDaeho Jeong
34522f51ade9SChristoph Hellwig f2fs_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO);
34533db1de0eSDaeho Jeong
34543db1de0eSDaeho Jeong ipage = f2fs_get_node_page(sbi, inode->i_ino);
34553db1de0eSDaeho Jeong if (IS_ERR(ipage)) {
34563db1de0eSDaeho Jeong err = PTR_ERR(ipage);
34573db1de0eSDaeho Jeong goto unlock_out;
34583db1de0eSDaeho Jeong }
34593db1de0eSDaeho Jeong set_new_dnode(&dn, inode, ipage, ipage, 0);
34603db1de0eSDaeho Jeong
3461ffdeab71SChristoph Hellwig if (!f2fs_lookup_read_extent_cache_block(dn.inode, index,
3462ffdeab71SChristoph Hellwig &dn.data_blkaddr))
3463ffdeab71SChristoph Hellwig err = f2fs_reserve_block(&dn, index);
34643db1de0eSDaeho Jeong
34653db1de0eSDaeho Jeong *blk_addr = dn.data_blkaddr;
34663db1de0eSDaeho Jeong *node_changed = dn.node_changed;
34673db1de0eSDaeho Jeong f2fs_put_dnode(&dn);
34683db1de0eSDaeho Jeong
34693db1de0eSDaeho Jeong unlock_out:
34702f51ade9SChristoph Hellwig f2fs_map_unlock(sbi, F2FS_GET_BLOCK_PRE_AIO);
34713db1de0eSDaeho Jeong return err;
34723db1de0eSDaeho Jeong }
34733db1de0eSDaeho Jeong
prepare_atomic_write_begin(struct f2fs_sb_info * sbi,struct page * page,loff_t pos,unsigned int len,block_t * blk_addr,bool * node_changed,bool * use_cow)34743db1de0eSDaeho Jeong static int prepare_atomic_write_begin(struct f2fs_sb_info *sbi,
34753db1de0eSDaeho Jeong struct page *page, loff_t pos, unsigned int len,
3476591fc34eSDaeho Jeong block_t *blk_addr, bool *node_changed, bool *use_cow)
34773db1de0eSDaeho Jeong {
34783db1de0eSDaeho Jeong struct inode *inode = page->mapping->host;
34793db1de0eSDaeho Jeong struct inode *cow_inode = F2FS_I(inode)->cow_inode;
34803db1de0eSDaeho Jeong pgoff_t index = page->index;
34813db1de0eSDaeho Jeong int err = 0;
3482f8e2f32bSDaeho Jeong block_t ori_blk_addr = NULL_ADDR;
34833db1de0eSDaeho Jeong
34843db1de0eSDaeho Jeong /* If pos is beyond the end of file, reserve a new block in COW inode */
34853db1de0eSDaeho Jeong if ((pos & PAGE_MASK) >= i_size_read(inode))
3486f8e2f32bSDaeho Jeong goto reserve_block;
34873db1de0eSDaeho Jeong
34883db1de0eSDaeho Jeong /* Look for the block in COW inode first */
34893db1de0eSDaeho Jeong err = __find_data_block(cow_inode, index, blk_addr);
3490591fc34eSDaeho Jeong if (err) {
34913db1de0eSDaeho Jeong return err;
3492591fc34eSDaeho Jeong } else if (*blk_addr != NULL_ADDR) {
3493591fc34eSDaeho Jeong *use_cow = true;
34943db1de0eSDaeho Jeong return 0;
3495591fc34eSDaeho Jeong }
34963db1de0eSDaeho Jeong
349741e8f85aSDaeho Jeong if (is_inode_flag_set(inode, FI_ATOMIC_REPLACE))
349841e8f85aSDaeho Jeong goto reserve_block;
349941e8f85aSDaeho Jeong
35003db1de0eSDaeho Jeong /* Look for the block in the original inode */
35013db1de0eSDaeho Jeong err = __find_data_block(inode, index, &ori_blk_addr);
35023db1de0eSDaeho Jeong if (err)
35033db1de0eSDaeho Jeong return err;
35043db1de0eSDaeho Jeong
3505f8e2f32bSDaeho Jeong reserve_block:
35063db1de0eSDaeho Jeong /* Finally, we should reserve a new block in COW inode for the update */
35073db1de0eSDaeho Jeong err = __reserve_data_block(cow_inode, index, blk_addr, node_changed);
35083db1de0eSDaeho Jeong if (err)
35093db1de0eSDaeho Jeong return err;
3510f8e2f32bSDaeho Jeong inc_atomic_write_cnt(inode);
35113db1de0eSDaeho Jeong
35123db1de0eSDaeho Jeong if (ori_blk_addr != NULL_ADDR)
35133db1de0eSDaeho Jeong *blk_addr = ori_blk_addr;
35143db1de0eSDaeho Jeong return 0;
35153db1de0eSDaeho Jeong }
35163db1de0eSDaeho Jeong
f2fs_write_begin(struct file * file,struct address_space * mapping,loff_t pos,unsigned len,struct page ** pagep,void ** fsdata)3517eb47b800SJaegeuk Kim static int f2fs_write_begin(struct file *file, struct address_space *mapping,
35189d6b0cd7SMatthew Wilcox (Oracle) loff_t pos, unsigned len, struct page **pagep, void **fsdata)
3519eb47b800SJaegeuk Kim {
3520eb47b800SJaegeuk Kim struct inode *inode = mapping->host;
35214081363fSJaegeuk Kim struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
352286531d6bSJaegeuk Kim struct page *page = NULL;
352309cbfeafSKirill A. Shutemov pgoff_t index = ((unsigned long long) pos) >> PAGE_SHIFT;
35243db1de0eSDaeho Jeong bool need_balance = false;
3525591fc34eSDaeho Jeong bool use_cow = false;
35262aadac08SJaegeuk Kim block_t blkaddr = NULL_ADDR;
3527eb47b800SJaegeuk Kim int err = 0;
3528eb47b800SJaegeuk Kim
35299d6b0cd7SMatthew Wilcox (Oracle) trace_f2fs_write_begin(inode, pos, len);
353062aed044SChao Yu
353100e09c0bSChao Yu if (!f2fs_is_checkpoint_ready(sbi)) {
353200e09c0bSChao Yu err = -ENOSPC;
35334354994fSDaniel Rosenberg goto fail;
353400e09c0bSChao Yu }
35354354994fSDaniel Rosenberg
35365f727395SJaegeuk Kim /*
35375f727395SJaegeuk Kim * We should check this at this moment to avoid deadlock on inode page
35385f727395SJaegeuk Kim * and #0 page. The locking rule for inline_data conversion should be:
35395f727395SJaegeuk Kim * lock_page(page #0) -> lock_page(inode_page)
35405f727395SJaegeuk Kim */
35415f727395SJaegeuk Kim if (index != 0) {
35425f727395SJaegeuk Kim err = f2fs_convert_inline_inode(inode);
35435f727395SJaegeuk Kim if (err)
35445f727395SJaegeuk Kim goto fail;
35455f727395SJaegeuk Kim }
35464c8ff709SChao Yu
35474c8ff709SChao Yu #ifdef CONFIG_F2FS_FS_COMPRESSION
35484c8ff709SChao Yu if (f2fs_compressed_file(inode)) {
35494c8ff709SChao Yu int ret;
35504c8ff709SChao Yu
35514c8ff709SChao Yu *fsdata = NULL;
35524c8ff709SChao Yu
35539b56adcfSFengnan Chang if (len == PAGE_SIZE && !(f2fs_is_atomic_file(inode)))
35547eab7a69SFengnan Chang goto repeat;
35557eab7a69SFengnan Chang
35564c8ff709SChao Yu ret = f2fs_prepare_compress_overwrite(inode, pagep,
35574c8ff709SChao Yu index, fsdata);
35584c8ff709SChao Yu if (ret < 0) {
35594c8ff709SChao Yu err = ret;
35604c8ff709SChao Yu goto fail;
35614c8ff709SChao Yu } else if (ret) {
35624c8ff709SChao Yu return 0;
35634c8ff709SChao Yu }
35644c8ff709SChao Yu }
35654c8ff709SChao Yu #endif
35664c8ff709SChao Yu
3567afcb7ca0SJaegeuk Kim repeat:
356886d54795SJaegeuk Kim /*
356986d54795SJaegeuk Kim * Do not use grab_cache_page_write_begin() to avoid deadlock due to
357086d54795SJaegeuk Kim * wait_for_stable_page. Will wait that below with our IO control.
357186d54795SJaegeuk Kim */
357201eccef7SChao Yu page = f2fs_pagecache_get_page(mapping, index,
357386d54795SJaegeuk Kim FGP_LOCK | FGP_WRITE | FGP_CREAT, GFP_NOFS);
35743aab8f82SChao Yu if (!page) {
35753aab8f82SChao Yu err = -ENOMEM;
35763aab8f82SChao Yu goto fail;
35773aab8f82SChao Yu }
3578d5f66990SJaegeuk Kim
35794c8ff709SChao Yu /* TODO: cluster can be compressed due to race with .writepage */
35804c8ff709SChao Yu
3581eb47b800SJaegeuk Kim *pagep = page;
3582eb47b800SJaegeuk Kim
35833db1de0eSDaeho Jeong if (f2fs_is_atomic_file(inode))
35843db1de0eSDaeho Jeong err = prepare_atomic_write_begin(sbi, page, pos, len,
3585591fc34eSDaeho Jeong &blkaddr, &need_balance, &use_cow);
35863db1de0eSDaeho Jeong else
35872aadac08SJaegeuk Kim err = prepare_write_begin(sbi, page, pos, len,
35882aadac08SJaegeuk Kim &blkaddr, &need_balance);
3589b3d208f9SJaegeuk Kim if (err)
35902aadac08SJaegeuk Kim goto fail;
3591759af1c9SFan Li
3592af033b2aSChao Yu if (need_balance && !IS_NOQUOTA(inode) &&
3593af033b2aSChao Yu has_not_enough_free_secs(sbi, 0, 0)) {
35942a340760SJaegeuk Kim unlock_page(page);
35952c4db1a6SJaegeuk Kim f2fs_balance_fs(sbi, true);
35962a340760SJaegeuk Kim lock_page(page);
35972a340760SJaegeuk Kim if (page->mapping != mapping) {
35982a340760SJaegeuk Kim /* The page got truncated from under us */
35992a340760SJaegeuk Kim f2fs_put_page(page, 1);
36002a340760SJaegeuk Kim goto repeat;
36012a340760SJaegeuk Kim }
36022a340760SJaegeuk Kim }
36032a340760SJaegeuk Kim
3604bae0ee7aSChao Yu f2fs_wait_on_page_writeback(page, DATA, false, true);
3605b3d208f9SJaegeuk Kim
3606649d7df2SJaegeuk Kim if (len == PAGE_SIZE || PageUptodate(page))
3607649d7df2SJaegeuk Kim return 0;
3608eb47b800SJaegeuk Kim
360995ae251fSEric Biggers if (!(pos & (PAGE_SIZE - 1)) && (pos + len) >= i_size_read(inode) &&
361095ae251fSEric Biggers !f2fs_verity_in_progress(inode)) {
3611746e2403SYunlei He zero_user_segment(page, len, PAGE_SIZE);
3612746e2403SYunlei He return 0;
3613746e2403SYunlei He }
3614746e2403SYunlei He
36152aadac08SJaegeuk Kim if (blkaddr == NEW_ADDR) {
361609cbfeafSKirill A. Shutemov zero_user_segment(page, 0, PAGE_SIZE);
3617649d7df2SJaegeuk Kim SetPageUptodate(page);
3618d54c795bSChao Yu } else {
361993770ab7SChao Yu if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
362093770ab7SChao Yu DATA_GENERIC_ENHANCE_READ)) {
362110f966bbSChao Yu err = -EFSCORRUPTED;
362295fa90c9SChao Yu f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR);
362393770ab7SChao Yu goto fail;
362493770ab7SChao Yu }
3625591fc34eSDaeho Jeong err = f2fs_submit_page_read(use_cow ?
3626591fc34eSDaeho Jeong F2FS_I(inode)->cow_inode : inode, page,
3627591fc34eSDaeho Jeong blkaddr, 0, true);
362813ba41e3SJaegeuk Kim if (err)
36293aab8f82SChao Yu goto fail;
3630d54c795bSChao Yu
3631393ff91fSJaegeuk Kim lock_page(page);
36326bacf52fSJaegeuk Kim if (unlikely(page->mapping != mapping)) {
3633afcb7ca0SJaegeuk Kim f2fs_put_page(page, 1);
3634afcb7ca0SJaegeuk Kim goto repeat;
3635eb47b800SJaegeuk Kim }
36361563ac75SChao Yu if (unlikely(!PageUptodate(page))) {
36371563ac75SChao Yu err = -EIO;
36384375a336SJaegeuk Kim goto fail;
36394375a336SJaegeuk Kim }
36404375a336SJaegeuk Kim }
3641eb47b800SJaegeuk Kim return 0;
36429ba69cf9SJaegeuk Kim
36433aab8f82SChao Yu fail:
364486531d6bSJaegeuk Kim f2fs_put_page(page, 1);
36453e679dc7SEric Biggers f2fs_write_failed(inode, pos + len);
36463aab8f82SChao Yu return err;
3647eb47b800SJaegeuk Kim }
3648eb47b800SJaegeuk Kim
f2fs_write_end(struct file * file,struct address_space * mapping,loff_t pos,unsigned len,unsigned copied,struct page * page,void * fsdata)3649a1dd3c13SJaegeuk Kim static int f2fs_write_end(struct file *file,
3650a1dd3c13SJaegeuk Kim struct address_space *mapping,
3651a1dd3c13SJaegeuk Kim loff_t pos, unsigned len, unsigned copied,
3652a1dd3c13SJaegeuk Kim struct page *page, void *fsdata)
3653a1dd3c13SJaegeuk Kim {
3654a1dd3c13SJaegeuk Kim struct inode *inode = page->mapping->host;
3655a1dd3c13SJaegeuk Kim
3656dfb2bf38SChao Yu trace_f2fs_write_end(inode, pos, len, copied);
3657dfb2bf38SChao Yu
3658649d7df2SJaegeuk Kim /*
3659649d7df2SJaegeuk Kim * This should be come from len == PAGE_SIZE, and we expect copied
3660649d7df2SJaegeuk Kim * should be PAGE_SIZE. Otherwise, we treat it with zero copied and
3661649d7df2SJaegeuk Kim * let generic_perform_write() try to copy data again through copied=0.
3662649d7df2SJaegeuk Kim */
3663649d7df2SJaegeuk Kim if (!PageUptodate(page)) {
3664746e2403SYunlei He if (unlikely(copied != len))
3665649d7df2SJaegeuk Kim copied = 0;
3666649d7df2SJaegeuk Kim else
3667649d7df2SJaegeuk Kim SetPageUptodate(page);
3668649d7df2SJaegeuk Kim }
36694c8ff709SChao Yu
36704c8ff709SChao Yu #ifdef CONFIG_F2FS_FS_COMPRESSION
36714c8ff709SChao Yu /* overwrite compressed file */
36724c8ff709SChao Yu if (f2fs_compressed_file(inode) && fsdata) {
36734c8ff709SChao Yu f2fs_compress_write_end(inode, fsdata, page->index, copied);
36744c8ff709SChao Yu f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
3675944dd22eSChao Yu
3676944dd22eSChao Yu if (pos + copied > i_size_read(inode) &&
3677944dd22eSChao Yu !f2fs_verity_in_progress(inode))
3678944dd22eSChao Yu f2fs_i_size_write(inode, pos + copied);
36794c8ff709SChao Yu return copied;
36804c8ff709SChao Yu }
36814c8ff709SChao Yu #endif
36824c8ff709SChao Yu
3683649d7df2SJaegeuk Kim if (!copied)
3684649d7df2SJaegeuk Kim goto unlock_out;
3685649d7df2SJaegeuk Kim
3686a1dd3c13SJaegeuk Kim set_page_dirty(page);
3687a1dd3c13SJaegeuk Kim
3688d889928bSChao Yu if (f2fs_is_atomic_file(inode))
3689d889928bSChao Yu set_page_private_atomic(page);
3690d889928bSChao Yu
369195ae251fSEric Biggers if (pos + copied > i_size_read(inode) &&
36923db1de0eSDaeho Jeong !f2fs_verity_in_progress(inode)) {
3693fc9581c8SJaegeuk Kim f2fs_i_size_write(inode, pos + copied);
36943db1de0eSDaeho Jeong if (f2fs_is_atomic_file(inode))
36953db1de0eSDaeho Jeong f2fs_i_size_write(F2FS_I(inode)->cow_inode,
36963db1de0eSDaeho Jeong pos + copied);
36973db1de0eSDaeho Jeong }
3698649d7df2SJaegeuk Kim unlock_out:
36993024c9a1SChao Yu f2fs_put_page(page, 1);
3700d0239e1bSJaegeuk Kim f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
3701a1dd3c13SJaegeuk Kim return copied;
3702a1dd3c13SJaegeuk Kim }
3703a1dd3c13SJaegeuk Kim
f2fs_invalidate_folio(struct folio * folio,size_t offset,size_t length)370491503996SMatthew Wilcox (Oracle) void f2fs_invalidate_folio(struct folio *folio, size_t offset, size_t length)
3705eb47b800SJaegeuk Kim {
370691503996SMatthew Wilcox (Oracle) struct inode *inode = folio->mapping->host;
3707487261f3SChao Yu struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3708a7ffdbe2SJaegeuk Kim
3709487261f3SChao Yu if (inode->i_ino >= F2FS_ROOT_INO(sbi) &&
371091503996SMatthew Wilcox (Oracle) (offset || length != folio_size(folio)))
3711a7ffdbe2SJaegeuk Kim return;
3712a7ffdbe2SJaegeuk Kim
371391503996SMatthew Wilcox (Oracle) if (folio_test_dirty(folio)) {
3714933439c8SChao Yu if (inode->i_ino == F2FS_META_INO(sbi)) {
3715487261f3SChao Yu dec_page_count(sbi, F2FS_DIRTY_META);
3716933439c8SChao Yu } else if (inode->i_ino == F2FS_NODE_INO(sbi)) {
3717487261f3SChao Yu dec_page_count(sbi, F2FS_DIRTY_NODES);
3718933439c8SChao Yu } else {
3719a7ffdbe2SJaegeuk Kim inode_dec_dirty_pages(inode);
37204d57b86dSChao Yu f2fs_remove_dirty_inode(inode);
3721933439c8SChao Yu }
3722487261f3SChao Yu }
3723635a52daSChao Yu clear_page_private_all(&folio->page);
3724eb47b800SJaegeuk Kim }
3725eb47b800SJaegeuk Kim
f2fs_release_folio(struct folio * folio,gfp_t wait)3726c26cd045SMatthew Wilcox (Oracle) bool f2fs_release_folio(struct folio *folio, gfp_t wait)
3727eb47b800SJaegeuk Kim {
3728c26cd045SMatthew Wilcox (Oracle) /* If this is dirty folio, keep private data */
3729c26cd045SMatthew Wilcox (Oracle) if (folio_test_dirty(folio))
3730c26cd045SMatthew Wilcox (Oracle) return false;
3731f68daeebSJaegeuk Kim
3732635a52daSChao Yu clear_page_private_all(&folio->page);
3733c26cd045SMatthew Wilcox (Oracle) return true;
3734eb47b800SJaegeuk Kim }
3735eb47b800SJaegeuk Kim
f2fs_dirty_data_folio(struct address_space * mapping,struct folio * folio)37364f5e34f7SMatthew Wilcox (Oracle) static bool f2fs_dirty_data_folio(struct address_space *mapping,
37374f5e34f7SMatthew Wilcox (Oracle) struct folio *folio)
3738eb47b800SJaegeuk Kim {
37394f5e34f7SMatthew Wilcox (Oracle) struct inode *inode = mapping->host;
3740eb47b800SJaegeuk Kim
37414f5e34f7SMatthew Wilcox (Oracle) trace_f2fs_set_page_dirty(&folio->page, DATA);
374226c6b887SJaegeuk Kim
37434f5e34f7SMatthew Wilcox (Oracle) if (!folio_test_uptodate(folio))
37444f5e34f7SMatthew Wilcox (Oracle) folio_mark_uptodate(folio);
37454f5e34f7SMatthew Wilcox (Oracle) BUG_ON(folio_test_swapcache(folio));
374634ba94baSJaegeuk Kim
37479b7eadd9SShuqi Zhang if (filemap_dirty_folio(mapping, folio)) {
37484f5e34f7SMatthew Wilcox (Oracle) f2fs_update_dirty_folio(inode, folio);
37494f5e34f7SMatthew Wilcox (Oracle) return true;
3750eb47b800SJaegeuk Kim }
37510fb5b2ebSMatthew Wilcox (Oracle) return false;
3752eb47b800SJaegeuk Kim }
3753eb47b800SJaegeuk Kim
3754c1c63387SChao Yu
f2fs_bmap_compress(struct inode * inode,sector_t block)3755c1c63387SChao Yu static sector_t f2fs_bmap_compress(struct inode *inode, sector_t block)
3756c1c63387SChao Yu {
3757c1c63387SChao Yu #ifdef CONFIG_F2FS_FS_COMPRESSION
3758c1c63387SChao Yu struct dnode_of_data dn;
3759c1c63387SChao Yu sector_t start_idx, blknr = 0;
3760c1c63387SChao Yu int ret;
3761c1c63387SChao Yu
3762c1c63387SChao Yu start_idx = round_down(block, F2FS_I(inode)->i_cluster_size);
3763c1c63387SChao Yu
3764c1c63387SChao Yu set_new_dnode(&dn, inode, NULL, NULL, 0);
3765c1c63387SChao Yu ret = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
3766c1c63387SChao Yu if (ret)
3767c1c63387SChao Yu return 0;
3768c1c63387SChao Yu
3769c1c63387SChao Yu if (dn.data_blkaddr != COMPRESS_ADDR) {
3770c1c63387SChao Yu dn.ofs_in_node += block - start_idx;
3771c1c63387SChao Yu blknr = f2fs_data_blkaddr(&dn);
3772c1c63387SChao Yu if (!__is_valid_data_blkaddr(blknr))
3773c1c63387SChao Yu blknr = 0;
3774c1c63387SChao Yu }
3775c1c63387SChao Yu
3776c1c63387SChao Yu f2fs_put_dnode(&dn);
3777c1c63387SChao Yu return blknr;
3778c1c63387SChao Yu #else
3779250e84d7SChao Yu return 0;
3780c1c63387SChao Yu #endif
3781c1c63387SChao Yu }
3782c1c63387SChao Yu
3783c1c63387SChao Yu
f2fs_bmap(struct address_space * mapping,sector_t block)3784c01e54b7SJaegeuk Kim static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
3785c01e54b7SJaegeuk Kim {
3786454ae7e5SChao Yu struct inode *inode = mapping->host;
3787b79b0a31SChao Yu sector_t blknr = 0;
3788454ae7e5SChao Yu
37891d373a0eSJaegeuk Kim if (f2fs_has_inline_data(inode))
3790b79b0a31SChao Yu goto out;
37911d373a0eSJaegeuk Kim
37921d373a0eSJaegeuk Kim /* make sure allocating whole blocks */
37931d373a0eSJaegeuk Kim if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
37941d373a0eSJaegeuk Kim filemap_write_and_wait(mapping);
37951d373a0eSJaegeuk Kim
37964eda1682SDaeho Jeong /* Block number less than F2FS MAX BLOCKS */
37976d1451bfSChengguang Xu if (unlikely(block >= max_file_blocks(inode)))
37984eda1682SDaeho Jeong goto out;
3799c1c63387SChao Yu
38004eda1682SDaeho Jeong if (f2fs_compressed_file(inode)) {
38014eda1682SDaeho Jeong blknr = f2fs_bmap_compress(inode, block);
38024eda1682SDaeho Jeong } else {
3803b876f4c9SJaegeuk Kim struct f2fs_map_blocks map;
3804b876f4c9SJaegeuk Kim
3805b876f4c9SJaegeuk Kim memset(&map, 0, sizeof(map));
3806b876f4c9SJaegeuk Kim map.m_lblk = block;
3807b876f4c9SJaegeuk Kim map.m_len = 1;
3808b876f4c9SJaegeuk Kim map.m_next_pgofs = NULL;
3809b876f4c9SJaegeuk Kim map.m_seg_type = NO_CHECK_TYPE;
3810b876f4c9SJaegeuk Kim
3811cd8fc522SChristoph Hellwig if (!f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_BMAP))
3812b876f4c9SJaegeuk Kim blknr = map.m_pblk;
38134eda1682SDaeho Jeong }
3814b79b0a31SChao Yu out:
3815b79b0a31SChao Yu trace_f2fs_bmap(inode, block, blknr);
3816b79b0a31SChao Yu return blknr;
3817429511cdSChao Yu }
3818429511cdSChao Yu
38194969c06aSJaegeuk Kim #ifdef CONFIG_SWAP
f2fs_migrate_blocks(struct inode * inode,block_t start_blk,unsigned int blkcnt)3820859fca6bSChao Yu static int f2fs_migrate_blocks(struct inode *inode, block_t start_blk,
3821859fca6bSChao Yu unsigned int blkcnt)
3822859fca6bSChao Yu {
3823859fca6bSChao Yu struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3824859fca6bSChao Yu unsigned int blkofs;
3825859fca6bSChao Yu unsigned int blk_per_sec = BLKS_PER_SEC(sbi);
3826859fca6bSChao Yu unsigned int secidx = start_blk / blk_per_sec;
382740d76c39SDaeho Jeong unsigned int end_sec;
3828859fca6bSChao Yu int ret = 0;
3829859fca6bSChao Yu
383040d76c39SDaeho Jeong if (!blkcnt)
383140d76c39SDaeho Jeong return 0;
383240d76c39SDaeho Jeong end_sec = secidx + (blkcnt - 1) / blk_per_sec;
383340d76c39SDaeho Jeong
3834e4544b63STim Murray f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3835edc6d01bSJan Kara filemap_invalidate_lock(inode->i_mapping);
3836859fca6bSChao Yu
3837859fca6bSChao Yu set_inode_flag(inode, FI_ALIGNED_WRITE);
38381018a546SChao Yu set_inode_flag(inode, FI_OPU_WRITE);
3839859fca6bSChao Yu
384040d76c39SDaeho Jeong for (; secidx <= end_sec; secidx++) {
384140d76c39SDaeho Jeong unsigned int blkofs_end = secidx == end_sec ?
384240d76c39SDaeho Jeong (blkcnt - 1) % blk_per_sec : blk_per_sec - 1;
384340d76c39SDaeho Jeong
3844e4544b63STim Murray f2fs_down_write(&sbi->pin_sem);
3845859fca6bSChao Yu
384640d76c39SDaeho Jeong ret = f2fs_allocate_pinning_section(sbi);
384740d76c39SDaeho Jeong if (ret) {
384840d76c39SDaeho Jeong f2fs_up_write(&sbi->pin_sem);
384940d76c39SDaeho Jeong break;
385040d76c39SDaeho Jeong }
3851859fca6bSChao Yu
38521018a546SChao Yu set_inode_flag(inode, FI_SKIP_WRITES);
3853859fca6bSChao Yu
385440d76c39SDaeho Jeong for (blkofs = 0; blkofs <= blkofs_end; blkofs++) {
3855859fca6bSChao Yu struct page *page;
3856859fca6bSChao Yu unsigned int blkidx = secidx * blk_per_sec + blkofs;
3857859fca6bSChao Yu
3858859fca6bSChao Yu page = f2fs_get_lock_data_page(inode, blkidx, true);
3859859fca6bSChao Yu if (IS_ERR(page)) {
3860e4544b63STim Murray f2fs_up_write(&sbi->pin_sem);
3861859fca6bSChao Yu ret = PTR_ERR(page);
3862859fca6bSChao Yu goto done;
3863859fca6bSChao Yu }
3864859fca6bSChao Yu
3865859fca6bSChao Yu set_page_dirty(page);
3866859fca6bSChao Yu f2fs_put_page(page, 1);
3867859fca6bSChao Yu }
3868859fca6bSChao Yu
38691018a546SChao Yu clear_inode_flag(inode, FI_SKIP_WRITES);
3870859fca6bSChao Yu
3871859fca6bSChao Yu ret = filemap_fdatawrite(inode->i_mapping);
3872859fca6bSChao Yu
3873e4544b63STim Murray f2fs_up_write(&sbi->pin_sem);
3874859fca6bSChao Yu
3875859fca6bSChao Yu if (ret)
3876859fca6bSChao Yu break;
3877859fca6bSChao Yu }
3878859fca6bSChao Yu
3879859fca6bSChao Yu done:
38801018a546SChao Yu clear_inode_flag(inode, FI_SKIP_WRITES);
38811018a546SChao Yu clear_inode_flag(inode, FI_OPU_WRITE);
3882859fca6bSChao Yu clear_inode_flag(inode, FI_ALIGNED_WRITE);
3883859fca6bSChao Yu
3884edc6d01bSJan Kara filemap_invalidate_unlock(inode->i_mapping);
3885e4544b63STim Murray f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3886859fca6bSChao Yu
3887859fca6bSChao Yu return ret;
3888859fca6bSChao Yu }
3889859fca6bSChao Yu
check_swap_activate(struct swap_info_struct * sis,struct file * swap_file,sector_t * span)38900b8fc006SChao Yu static int check_swap_activate(struct swap_info_struct *sis,
3891af4b6b8eSChao Yu struct file *swap_file, sector_t *span)
3892af4b6b8eSChao Yu {
3893af4b6b8eSChao Yu struct address_space *mapping = swap_file->f_mapping;
3894af4b6b8eSChao Yu struct inode *inode = mapping->host;
389536e4d958Shuangjianan@oppo.com struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
38966902179aSWu Bo block_t cur_lblock;
38976902179aSWu Bo block_t last_lblock;
38986902179aSWu Bo block_t pblock;
38996902179aSWu Bo block_t lowest_pblock = -1;
39006902179aSWu Bo block_t highest_pblock = 0;
3901af4b6b8eSChao Yu int nr_extents = 0;
39026902179aSWu Bo unsigned int nr_pblocks;
3903859fca6bSChao Yu unsigned int blks_per_sec = BLKS_PER_SEC(sbi);
3904ca298241SJaegeuk Kim unsigned int not_aligned = 0;
390536e4d958Shuangjianan@oppo.com int ret = 0;
3906af4b6b8eSChao Yu
3907af4b6b8eSChao Yu /*
3908af4b6b8eSChao Yu * Map all the blocks into the extent list. This code doesn't try
3909af4b6b8eSChao Yu * to be very smart.
3910af4b6b8eSChao Yu */
3911af4b6b8eSChao Yu cur_lblock = 0;
39126cbfcab5SJaegeuk Kim last_lblock = bytes_to_blks(inode, i_size_read(inode));
3913af4b6b8eSChao Yu
39141da66103Shuangjianan@oppo.com while (cur_lblock < last_lblock && cur_lblock < sis->max) {
3915b876f4c9SJaegeuk Kim struct f2fs_map_blocks map;
3916859fca6bSChao Yu retry:
3917af4b6b8eSChao Yu cond_resched();
3918af4b6b8eSChao Yu
3919b876f4c9SJaegeuk Kim memset(&map, 0, sizeof(map));
3920b876f4c9SJaegeuk Kim map.m_lblk = cur_lblock;
392136e4d958Shuangjianan@oppo.com map.m_len = last_lblock - cur_lblock;
392236e4d958Shuangjianan@oppo.com map.m_next_pgofs = NULL;
392336e4d958Shuangjianan@oppo.com map.m_next_extent = NULL;
3924b876f4c9SJaegeuk Kim map.m_seg_type = NO_CHECK_TYPE;
392536e4d958Shuangjianan@oppo.com map.m_may_create = false;
3926af4b6b8eSChao Yu
3927cd8fc522SChristoph Hellwig ret = f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_FIEMAP);
3928af4b6b8eSChao Yu if (ret)
392936e4d958Shuangjianan@oppo.com goto out;
3930af4b6b8eSChao Yu
3931af4b6b8eSChao Yu /* hole */
393236e4d958Shuangjianan@oppo.com if (!(map.m_flags & F2FS_MAP_FLAGS)) {
3933833dcd35SJoe Perches f2fs_err(sbi, "Swapfile has holes");
3934f395183fSJaegeuk Kim ret = -EINVAL;
393536e4d958Shuangjianan@oppo.com goto out;
393636e4d958Shuangjianan@oppo.com }
3937af4b6b8eSChao Yu
3938b876f4c9SJaegeuk Kim pblock = map.m_pblk;
3939b876f4c9SJaegeuk Kim nr_pblocks = map.m_len;
3940af4b6b8eSChao Yu
39416902179aSWu Bo if ((pblock - SM_I(sbi)->main_blkaddr) % blks_per_sec ||
39426902179aSWu Bo nr_pblocks % blks_per_sec ||
394340d76c39SDaeho Jeong !f2fs_valid_pinned_area(sbi, pblock)) {
394440d76c39SDaeho Jeong bool last_extent = false;
394540d76c39SDaeho Jeong
3946ca298241SJaegeuk Kim not_aligned++;
3947859fca6bSChao Yu
3948859fca6bSChao Yu nr_pblocks = roundup(nr_pblocks, blks_per_sec);
3949859fca6bSChao Yu if (cur_lblock + nr_pblocks > sis->max)
3950859fca6bSChao Yu nr_pblocks -= blks_per_sec;
3951859fca6bSChao Yu
3952859fca6bSChao Yu /* this extent is last one */
395340d76c39SDaeho Jeong if (!nr_pblocks) {
395440d76c39SDaeho Jeong nr_pblocks = last_lblock - cur_lblock;
395540d76c39SDaeho Jeong last_extent = true;
3956ca298241SJaegeuk Kim }
395736e4d958Shuangjianan@oppo.com
3958859fca6bSChao Yu ret = f2fs_migrate_blocks(inode, cur_lblock,
3959859fca6bSChao Yu nr_pblocks);
396040d76c39SDaeho Jeong if (ret) {
396140d76c39SDaeho Jeong if (ret == -ENOENT)
396240d76c39SDaeho Jeong ret = -EINVAL;
3963859fca6bSChao Yu goto out;
396440d76c39SDaeho Jeong }
396540d76c39SDaeho Jeong
396640d76c39SDaeho Jeong if (!last_extent)
3967859fca6bSChao Yu goto retry;
3968859fca6bSChao Yu }
396940d76c39SDaeho Jeong
3970af4b6b8eSChao Yu if (cur_lblock + nr_pblocks >= sis->max)
3971af4b6b8eSChao Yu nr_pblocks = sis->max - cur_lblock;
3972af4b6b8eSChao Yu
3973af4b6b8eSChao Yu if (cur_lblock) { /* exclude the header page */
3974af4b6b8eSChao Yu if (pblock < lowest_pblock)
3975af4b6b8eSChao Yu lowest_pblock = pblock;
3976af4b6b8eSChao Yu if (pblock + nr_pblocks - 1 > highest_pblock)
3977af4b6b8eSChao Yu highest_pblock = pblock + nr_pblocks - 1;
3978af4b6b8eSChao Yu }
3979af4b6b8eSChao Yu
3980af4b6b8eSChao Yu /*
3981af4b6b8eSChao Yu * We found a PAGE_SIZE-length, PAGE_SIZE-aligned run of blocks
3982af4b6b8eSChao Yu */
3983af4b6b8eSChao Yu ret = add_swap_extent(sis, cur_lblock, nr_pblocks, pblock);
3984af4b6b8eSChao Yu if (ret < 0)
3985af4b6b8eSChao Yu goto out;
3986af4b6b8eSChao Yu nr_extents += ret;
3987af4b6b8eSChao Yu cur_lblock += nr_pblocks;
3988af4b6b8eSChao Yu }
3989af4b6b8eSChao Yu ret = nr_extents;
3990af4b6b8eSChao Yu *span = 1 + highest_pblock - lowest_pblock;
3991af4b6b8eSChao Yu if (cur_lblock == 0)
3992af4b6b8eSChao Yu cur_lblock = 1; /* force Empty message */
3993af4b6b8eSChao Yu sis->max = cur_lblock;
3994af4b6b8eSChao Yu sis->pages = cur_lblock - 1;
3995af4b6b8eSChao Yu sis->highest_bit = cur_lblock - 1;
3996af4b6b8eSChao Yu out:
3997859fca6bSChao Yu if (not_aligned)
3998859fca6bSChao Yu f2fs_warn(sbi, "Swapfile (%u) is not align to section: 1) creat(), 2) ioctl(F2FS_IOC_SET_PIN_FILE), 3) fallocate(%u * N)",
3999859fca6bSChao Yu not_aligned, blks_per_sec * F2FS_BLKSIZE);
4000af4b6b8eSChao Yu return ret;
4001af4b6b8eSChao Yu }
4002af4b6b8eSChao Yu
f2fs_swap_activate(struct swap_info_struct * sis,struct file * file,sector_t * span)40034969c06aSJaegeuk Kim static int f2fs_swap_activate(struct swap_info_struct *sis, struct file *file,
40044969c06aSJaegeuk Kim sector_t *span)
40054969c06aSJaegeuk Kim {
40064969c06aSJaegeuk Kim struct inode *inode = file_inode(file);
400740d76c39SDaeho Jeong struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
40084969c06aSJaegeuk Kim int ret;
40094969c06aSJaegeuk Kim
40104969c06aSJaegeuk Kim if (!S_ISREG(inode->i_mode))
40114969c06aSJaegeuk Kim return -EINVAL;
40124969c06aSJaegeuk Kim
401340d76c39SDaeho Jeong if (f2fs_readonly(sbi->sb))
40144969c06aSJaegeuk Kim return -EROFS;
40154969c06aSJaegeuk Kim
401640d76c39SDaeho Jeong if (f2fs_lfs_mode(sbi) && !f2fs_sb_has_blkzoned(sbi)) {
401740d76c39SDaeho Jeong f2fs_err(sbi, "Swapfile not supported in LFS mode");
4018d927ccfcSShin'ichiro Kawasaki return -EINVAL;
4019d927ccfcSShin'ichiro Kawasaki }
4020d927ccfcSShin'ichiro Kawasaki
40214969c06aSJaegeuk Kim ret = f2fs_convert_inline_inode(inode);
40224969c06aSJaegeuk Kim if (ret)
40234969c06aSJaegeuk Kim return ret;
40244969c06aSJaegeuk Kim
402578134d03SDaeho Jeong if (!f2fs_disable_compressed_file(inode))
40264c8ff709SChao Yu return -EINVAL;
40274c8ff709SChao Yu
40280b979f1bSChao Yu f2fs_precache_extents(inode);
40290b979f1bSChao Yu
403040d76c39SDaeho Jeong ret = filemap_fdatawrite(inode->i_mapping);
403140d76c39SDaeho Jeong if (ret < 0)
403240d76c39SDaeho Jeong return ret;
403340d76c39SDaeho Jeong
40343e5e479aSChao Yu ret = check_swap_activate(sis, file, span);
40353e5e479aSChao Yu if (ret < 0)
40364969c06aSJaegeuk Kim return ret;
40374969c06aSJaegeuk Kim
40388ec071c3SChao Yu stat_inc_swapfile_inode(inode);
40394969c06aSJaegeuk Kim set_inode_flag(inode, FI_PIN_FILE);
404040d76c39SDaeho Jeong f2fs_update_time(sbi, REQ_TIME);
40413e5e479aSChao Yu return ret;
40424969c06aSJaegeuk Kim }
40434969c06aSJaegeuk Kim
f2fs_swap_deactivate(struct file * file)40444969c06aSJaegeuk Kim static void f2fs_swap_deactivate(struct file *file)
40454969c06aSJaegeuk Kim {
40464969c06aSJaegeuk Kim struct inode *inode = file_inode(file);
40474969c06aSJaegeuk Kim
40488ec071c3SChao Yu stat_dec_swapfile_inode(inode);
40494969c06aSJaegeuk Kim clear_inode_flag(inode, FI_PIN_FILE);
40504969c06aSJaegeuk Kim }
40514969c06aSJaegeuk Kim #else
f2fs_swap_activate(struct swap_info_struct * sis,struct file * file,sector_t * span)40524969c06aSJaegeuk Kim static int f2fs_swap_activate(struct swap_info_struct *sis, struct file *file,
40534969c06aSJaegeuk Kim sector_t *span)
40544969c06aSJaegeuk Kim {
40554969c06aSJaegeuk Kim return -EOPNOTSUPP;
40564969c06aSJaegeuk Kim }
40574969c06aSJaegeuk Kim
f2fs_swap_deactivate(struct file * file)40584969c06aSJaegeuk Kim static void f2fs_swap_deactivate(struct file *file)
40594969c06aSJaegeuk Kim {
40604969c06aSJaegeuk Kim }
40614969c06aSJaegeuk Kim #endif
40624969c06aSJaegeuk Kim
4063eb47b800SJaegeuk Kim const struct address_space_operations f2fs_dblock_aops = {
4064be05584fSMatthew Wilcox (Oracle) .read_folio = f2fs_read_data_folio,
406523323196SMatthew Wilcox (Oracle) .readahead = f2fs_readahead,
4066eb47b800SJaegeuk Kim .writepage = f2fs_write_data_page,
4067eb47b800SJaegeuk Kim .writepages = f2fs_write_data_pages,
4068eb47b800SJaegeuk Kim .write_begin = f2fs_write_begin,
4069a1dd3c13SJaegeuk Kim .write_end = f2fs_write_end,
40704f5e34f7SMatthew Wilcox (Oracle) .dirty_folio = f2fs_dirty_data_folio,
40711d5b9bd6SMatthew Wilcox (Oracle) .migrate_folio = filemap_migrate_folio,
407291503996SMatthew Wilcox (Oracle) .invalidate_folio = f2fs_invalidate_folio,
4073c26cd045SMatthew Wilcox (Oracle) .release_folio = f2fs_release_folio,
4074c01e54b7SJaegeuk Kim .bmap = f2fs_bmap,
40754969c06aSJaegeuk Kim .swap_activate = f2fs_swap_activate,
40764969c06aSJaegeuk Kim .swap_deactivate = f2fs_swap_deactivate,
4077eb47b800SJaegeuk Kim };
40786dbb1796SEric Biggers
f2fs_clear_page_cache_dirty_tag(struct page * page)40795ec2d99dSMatthew Wilcox void f2fs_clear_page_cache_dirty_tag(struct page *page)
4080aec2f729SChao Yu {
4081aec2f729SChao Yu struct address_space *mapping = page_mapping(page);
4082aec2f729SChao Yu unsigned long flags;
4083aec2f729SChao Yu
4084aec2f729SChao Yu xa_lock_irqsave(&mapping->i_pages, flags);
40855ec2d99dSMatthew Wilcox __xa_clear_mark(&mapping->i_pages, page_index(page),
4086aec2f729SChao Yu PAGECACHE_TAG_DIRTY);
4087aec2f729SChao Yu xa_unlock_irqrestore(&mapping->i_pages, flags);
4088aec2f729SChao Yu }
4089aec2f729SChao Yu
f2fs_init_post_read_processing(void)40906dbb1796SEric Biggers int __init f2fs_init_post_read_processing(void)
40916dbb1796SEric Biggers {
409295ae251fSEric Biggers bio_post_read_ctx_cache =
409395ae251fSEric Biggers kmem_cache_create("f2fs_bio_post_read_ctx",
409495ae251fSEric Biggers sizeof(struct bio_post_read_ctx), 0, 0, NULL);
40956dbb1796SEric Biggers if (!bio_post_read_ctx_cache)
40966dbb1796SEric Biggers goto fail;
40976dbb1796SEric Biggers bio_post_read_ctx_pool =
40986dbb1796SEric Biggers mempool_create_slab_pool(NUM_PREALLOC_POST_READ_CTXS,
40996dbb1796SEric Biggers bio_post_read_ctx_cache);
41006dbb1796SEric Biggers if (!bio_post_read_ctx_pool)
41016dbb1796SEric Biggers goto fail_free_cache;
41026dbb1796SEric Biggers return 0;
41036dbb1796SEric Biggers
41046dbb1796SEric Biggers fail_free_cache:
41056dbb1796SEric Biggers kmem_cache_destroy(bio_post_read_ctx_cache);
41066dbb1796SEric Biggers fail:
41076dbb1796SEric Biggers return -ENOMEM;
41086dbb1796SEric Biggers }
41096dbb1796SEric Biggers
f2fs_destroy_post_read_processing(void)41100b20fcecSChao Yu void f2fs_destroy_post_read_processing(void)
41116dbb1796SEric Biggers {
41126dbb1796SEric Biggers mempool_destroy(bio_post_read_ctx_pool);
41136dbb1796SEric Biggers kmem_cache_destroy(bio_post_read_ctx_cache);
41146dbb1796SEric Biggers }
41150b20fcecSChao Yu
f2fs_init_post_read_wq(struct f2fs_sb_info * sbi)41164c8ff709SChao Yu int f2fs_init_post_read_wq(struct f2fs_sb_info *sbi)
41174c8ff709SChao Yu {
41184c8ff709SChao Yu if (!f2fs_sb_has_encrypt(sbi) &&
41194c8ff709SChao Yu !f2fs_sb_has_verity(sbi) &&
41204c8ff709SChao Yu !f2fs_sb_has_compression(sbi))
41214c8ff709SChao Yu return 0;
41224c8ff709SChao Yu
41234c8ff709SChao Yu sbi->post_read_wq = alloc_workqueue("f2fs_post_read_wq",
41244c8ff709SChao Yu WQ_UNBOUND | WQ_HIGHPRI,
41254c8ff709SChao Yu num_online_cpus());
4126870af777SYangtao Li return sbi->post_read_wq ? 0 : -ENOMEM;
41274c8ff709SChao Yu }
41284c8ff709SChao Yu
f2fs_destroy_post_read_wq(struct f2fs_sb_info * sbi)41294c8ff709SChao Yu void f2fs_destroy_post_read_wq(struct f2fs_sb_info *sbi)
41304c8ff709SChao Yu {
41314c8ff709SChao Yu if (sbi->post_read_wq)
41324c8ff709SChao Yu destroy_workqueue(sbi->post_read_wq);
41334c8ff709SChao Yu }
41344c8ff709SChao Yu
f2fs_init_bio_entry_cache(void)41350b20fcecSChao Yu int __init f2fs_init_bio_entry_cache(void)
41360b20fcecSChao Yu {
413798510003SChao Yu bio_entry_slab = f2fs_kmem_cache_create("f2fs_bio_entry_slab",
41380b20fcecSChao Yu sizeof(struct bio_entry));
4139870af777SYangtao Li return bio_entry_slab ? 0 : -ENOMEM;
41400b20fcecSChao Yu }
41410b20fcecSChao Yu
f2fs_destroy_bio_entry_cache(void)4142f543805fSChao Yu void f2fs_destroy_bio_entry_cache(void)
41430b20fcecSChao Yu {
41440b20fcecSChao Yu kmem_cache_destroy(bio_entry_slab);
41450b20fcecSChao Yu }
41461517c1a7SEric Biggers
f2fs_iomap_begin(struct inode * inode,loff_t offset,loff_t length,unsigned int flags,struct iomap * iomap,struct iomap * srcmap)41471517c1a7SEric Biggers static int f2fs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
41481517c1a7SEric Biggers unsigned int flags, struct iomap *iomap,
41491517c1a7SEric Biggers struct iomap *srcmap)
41501517c1a7SEric Biggers {
41511517c1a7SEric Biggers struct f2fs_map_blocks map = {};
41521517c1a7SEric Biggers pgoff_t next_pgofs = 0;
41531517c1a7SEric Biggers int err;
41541517c1a7SEric Biggers
41551517c1a7SEric Biggers map.m_lblk = bytes_to_blks(inode, offset);
41561517c1a7SEric Biggers map.m_len = bytes_to_blks(inode, offset + length - 1) - map.m_lblk + 1;
41571517c1a7SEric Biggers map.m_next_pgofs = &next_pgofs;
41581517c1a7SEric Biggers map.m_seg_type = f2fs_rw_hint_to_seg_type(inode->i_write_hint);
41591517c1a7SEric Biggers if (flags & IOMAP_WRITE)
41601517c1a7SEric Biggers map.m_may_create = true;
41611517c1a7SEric Biggers
4162cd8fc522SChristoph Hellwig err = f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_DIO);
41631517c1a7SEric Biggers if (err)
41641517c1a7SEric Biggers return err;
41651517c1a7SEric Biggers
41661517c1a7SEric Biggers iomap->offset = blks_to_bytes(inode, map.m_lblk);
41671517c1a7SEric Biggers
41688a2c77bcSEric Biggers /*
41698a2c77bcSEric Biggers * When inline encryption is enabled, sometimes I/O to an encrypted file
41708a2c77bcSEric Biggers * has to be broken up to guarantee DUN contiguity. Handle this by
41718a2c77bcSEric Biggers * limiting the length of the mapping returned.
41728a2c77bcSEric Biggers */
41738a2c77bcSEric Biggers map.m_len = fscrypt_limit_io_blocks(inode, map.m_lblk, map.m_len);
41748a2c77bcSEric Biggers
41758d3c1fa3SChristoph Hellwig /*
41768d3c1fa3SChristoph Hellwig * We should never see delalloc or compressed extents here based on
41778d3c1fa3SChristoph Hellwig * prior flushing and checks.
41788d3c1fa3SChristoph Hellwig */
41798d3c1fa3SChristoph Hellwig if (WARN_ON_ONCE(map.m_pblk == NEW_ADDR))
41808d3c1fa3SChristoph Hellwig return -EINVAL;
41818d3c1fa3SChristoph Hellwig if (WARN_ON_ONCE(map.m_pblk == COMPRESS_ADDR))
41821517c1a7SEric Biggers return -EINVAL;
41831517c1a7SEric Biggers
41842b2611a4SChao Yu if (map.m_flags & F2FS_MAP_MAPPED) {
41858d3c1fa3SChristoph Hellwig iomap->length = blks_to_bytes(inode, map.m_len);
41868d3c1fa3SChristoph Hellwig iomap->type = IOMAP_MAPPED;
41878d3c1fa3SChristoph Hellwig iomap->flags |= IOMAP_F_MERGED;
41881517c1a7SEric Biggers iomap->bdev = map.m_bdev;
41891517c1a7SEric Biggers iomap->addr = blks_to_bytes(inode, map.m_pblk);
41901517c1a7SEric Biggers } else {
41918d3c1fa3SChristoph Hellwig if (flags & IOMAP_WRITE)
41928d3c1fa3SChristoph Hellwig return -ENOTBLK;
41931517c1a7SEric Biggers iomap->length = blks_to_bytes(inode, next_pgofs) -
41941517c1a7SEric Biggers iomap->offset;
41951517c1a7SEric Biggers iomap->type = IOMAP_HOLE;
41961517c1a7SEric Biggers iomap->addr = IOMAP_NULL_ADDR;
41971517c1a7SEric Biggers }
41981517c1a7SEric Biggers
41991517c1a7SEric Biggers if (map.m_flags & F2FS_MAP_NEW)
42001517c1a7SEric Biggers iomap->flags |= IOMAP_F_NEW;
42011517c1a7SEric Biggers if ((inode->i_state & I_DIRTY_DATASYNC) ||
42021517c1a7SEric Biggers offset + length > i_size_read(inode))
42031517c1a7SEric Biggers iomap->flags |= IOMAP_F_DIRTY;
42041517c1a7SEric Biggers
42051517c1a7SEric Biggers return 0;
42061517c1a7SEric Biggers }
42071517c1a7SEric Biggers
42081517c1a7SEric Biggers const struct iomap_ops f2fs_iomap_ops = {
42091517c1a7SEric Biggers .iomap_begin = f2fs_iomap_begin,
42101517c1a7SEric Biggers };
4211