17c1a000dSChao Yu // SPDX-License-Identifier: GPL-2.0
20a8165d7SJaegeuk Kim /*
3eb47b800SJaegeuk Kim * fs/f2fs/data.c
4eb47b800SJaegeuk Kim *
5eb47b800SJaegeuk Kim * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6eb47b800SJaegeuk Kim * http://www.samsung.com/
7eb47b800SJaegeuk Kim */
8eb47b800SJaegeuk Kim #include <linux/fs.h>
9eb47b800SJaegeuk Kim #include <linux/f2fs_fs.h>
10eb47b800SJaegeuk Kim #include <linux/buffer_head.h>
114034247aSNeilBrown #include <linux/sched/mm.h>
12eb47b800SJaegeuk Kim #include <linux/mpage.h>
13eb47b800SJaegeuk Kim #include <linux/writeback.h>
148f46dcaeSChao Yu #include <linux/pagevec.h>
15eb47b800SJaegeuk Kim #include <linux/blkdev.h>
16eb47b800SJaegeuk Kim #include <linux/bio.h>
1727aacd28SSatya Tangirala #include <linux/blk-crypto.h>
184969c06aSJaegeuk Kim #include <linux/swap.h>
19690e4a3eSGeert Uytterhoeven #include <linux/prefetch.h>
20e2e40f2cSChristoph Hellwig #include <linux/uio.h>
21174cd4b1SIngo Molnar #include <linux/sched/signal.h>
2210c5db28SChristoph Hellwig #include <linux/fiemap.h>
231517c1a7SEric Biggers #include <linux/iomap.h>
24eb47b800SJaegeuk Kim
25eb47b800SJaegeuk Kim #include "f2fs.h"
26eb47b800SJaegeuk Kim #include "node.h"
27eb47b800SJaegeuk Kim #include "segment.h"
2852118743SDaeho Jeong #include "iostat.h"
29848753aaSNamjae Jeon #include <trace/events/f2fs.h>
30eb47b800SJaegeuk Kim
316dbb1796SEric Biggers #define NUM_PREALLOC_POST_READ_CTXS 128
326dbb1796SEric Biggers
336dbb1796SEric Biggers static struct kmem_cache *bio_post_read_ctx_cache;
340b20fcecSChao Yu static struct kmem_cache *bio_entry_slab;
356dbb1796SEric Biggers static mempool_t *bio_post_read_ctx_pool;
36f543805fSChao Yu static struct bio_set f2fs_bioset;
37f543805fSChao Yu
38f543805fSChao Yu #define F2FS_BIO_POOL_SIZE NR_CURSEG_TYPE
39f543805fSChao Yu
f2fs_init_bioset(void)40f543805fSChao Yu int __init f2fs_init_bioset(void)
41f543805fSChao Yu {
42870af777SYangtao Li return bioset_init(&f2fs_bioset, F2FS_BIO_POOL_SIZE,
43870af777SYangtao Li 0, BIOSET_NEED_BVECS);
44f543805fSChao Yu }
45f543805fSChao Yu
f2fs_destroy_bioset(void)46f543805fSChao Yu void f2fs_destroy_bioset(void)
47f543805fSChao Yu {
48f543805fSChao Yu bioset_exit(&f2fs_bioset);
49f543805fSChao Yu }
50f543805fSChao Yu
f2fs_is_cp_guaranteed(struct page * page)5182704e59SChao Yu bool f2fs_is_cp_guaranteed(struct page *page)
5236951b38SChao Yu {
5336951b38SChao Yu struct address_space *mapping = page->mapping;
5436951b38SChao Yu struct inode *inode;
5536951b38SChao Yu struct f2fs_sb_info *sbi;
5636951b38SChao Yu
5736951b38SChao Yu if (!mapping)
5836951b38SChao Yu return false;
5936951b38SChao Yu
6036951b38SChao Yu inode = mapping->host;
6136951b38SChao Yu sbi = F2FS_I_SB(inode);
6236951b38SChao Yu
6336951b38SChao Yu if (inode->i_ino == F2FS_META_INO(sbi) ||
6436951b38SChao Yu inode->i_ino == F2FS_NODE_INO(sbi) ||
65b763f3beSChao Yu S_ISDIR(inode->i_mode))
66b763f3beSChao Yu return true;
67b763f3beSChao Yu
683db1de0eSDaeho Jeong if ((S_ISREG(inode->i_mode) && IS_NOQUOTA(inode)) ||
69b763f3beSChao Yu page_private_gcing(page))
7036951b38SChao Yu return true;
7136951b38SChao Yu return false;
7236951b38SChao Yu }
7336951b38SChao Yu
__read_io_type(struct page * page)745f9abab4SJaegeuk Kim static enum count_type __read_io_type(struct page *page)
755f9abab4SJaegeuk Kim {
764969c06aSJaegeuk Kim struct address_space *mapping = page_file_mapping(page);
775f9abab4SJaegeuk Kim
785f9abab4SJaegeuk Kim if (mapping) {
795f9abab4SJaegeuk Kim struct inode *inode = mapping->host;
805f9abab4SJaegeuk Kim struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
815f9abab4SJaegeuk Kim
825f9abab4SJaegeuk Kim if (inode->i_ino == F2FS_META_INO(sbi))
835f9abab4SJaegeuk Kim return F2FS_RD_META;
845f9abab4SJaegeuk Kim
855f9abab4SJaegeuk Kim if (inode->i_ino == F2FS_NODE_INO(sbi))
865f9abab4SJaegeuk Kim return F2FS_RD_NODE;
875f9abab4SJaegeuk Kim }
885f9abab4SJaegeuk Kim return F2FS_RD_DATA;
895f9abab4SJaegeuk Kim }
905f9abab4SJaegeuk Kim
916dbb1796SEric Biggers /* postprocessing steps for read bios */
926dbb1796SEric Biggers enum bio_post_read_step {
937f59b277SEric Biggers #ifdef CONFIG_FS_ENCRYPTION
94447286ebSYangtao Li STEP_DECRYPT = BIT(0),
957f59b277SEric Biggers #else
967f59b277SEric Biggers STEP_DECRYPT = 0, /* compile out the decryption-related code */
977f59b277SEric Biggers #endif
987f59b277SEric Biggers #ifdef CONFIG_F2FS_FS_COMPRESSION
99447286ebSYangtao Li STEP_DECOMPRESS = BIT(1),
1007f59b277SEric Biggers #else
1017f59b277SEric Biggers STEP_DECOMPRESS = 0, /* compile out the decompression-related code */
1027f59b277SEric Biggers #endif
1037f59b277SEric Biggers #ifdef CONFIG_FS_VERITY
104447286ebSYangtao Li STEP_VERITY = BIT(2),
1057f59b277SEric Biggers #else
1067f59b277SEric Biggers STEP_VERITY = 0, /* compile out the verity-related code */
1077f59b277SEric Biggers #endif
1086dbb1796SEric Biggers };
1096dbb1796SEric Biggers
1106dbb1796SEric Biggers struct bio_post_read_ctx {
1116dbb1796SEric Biggers struct bio *bio;
1124c8ff709SChao Yu struct f2fs_sb_info *sbi;
1136dbb1796SEric Biggers struct work_struct work;
1146dbb1796SEric Biggers unsigned int enabled_steps;
11598dc08baSEric Biggers /*
11698dc08baSEric Biggers * decompression_attempted keeps track of whether
11798dc08baSEric Biggers * f2fs_end_read_compressed_page() has been called on the pages in the
11898dc08baSEric Biggers * bio that belong to a compressed cluster yet.
11998dc08baSEric Biggers */
12098dc08baSEric Biggers bool decompression_attempted;
1214931e0c9SDaeho Jeong block_t fs_blkaddr;
1226dbb1796SEric Biggers };
1236dbb1796SEric Biggers
12498dc08baSEric Biggers /*
12598dc08baSEric Biggers * Update and unlock a bio's pages, and free the bio.
12698dc08baSEric Biggers *
12798dc08baSEric Biggers * This marks pages up-to-date only if there was no error in the bio (I/O error,
12898dc08baSEric Biggers * decryption error, or verity error), as indicated by bio->bi_status.
12998dc08baSEric Biggers *
13098dc08baSEric Biggers * "Compressed pages" (pagecache pages backed by a compressed cluster on-disk)
13198dc08baSEric Biggers * aren't marked up-to-date here, as decompression is done on a per-compression-
13298dc08baSEric Biggers * cluster basis rather than a per-bio basis. Instead, we only must do two
13398dc08baSEric Biggers * things for each compressed page here: call f2fs_end_read_compressed_page()
13498dc08baSEric Biggers * with failed=true if an error occurred before it would have normally gotten
13598dc08baSEric Biggers * called (i.e., I/O error or decryption error, but *not* verity error), and
13698dc08baSEric Biggers * release the bio's reference to the decompress_io_ctx of the page's cluster.
13798dc08baSEric Biggers */
f2fs_finish_read_bio(struct bio * bio,bool in_task)138bff139b4SDaeho Jeong static void f2fs_finish_read_bio(struct bio *bio, bool in_task)
13993dfe2acSJaegeuk Kim {
1406dbb1796SEric Biggers struct bio_vec *bv;
1416dc4f100SMing Lei struct bvec_iter_all iter_all;
14298dc08baSEric Biggers struct bio_post_read_ctx *ctx = bio->bi_private;
14393dfe2acSJaegeuk Kim
1442b070cfeSChristoph Hellwig bio_for_each_segment_all(bv, bio, iter_all) {
1457f59b277SEric Biggers struct page *page = bv->bv_page;
1466dbb1796SEric Biggers
1477f59b277SEric Biggers if (f2fs_is_compressed_page(page)) {
14898dc08baSEric Biggers if (ctx && !ctx->decompression_attempted)
149bff139b4SDaeho Jeong f2fs_end_read_compressed_page(page, true, 0,
150bff139b4SDaeho Jeong in_task);
151bff139b4SDaeho Jeong f2fs_put_page_dic(page, in_task);
1524c8ff709SChao Yu continue;
1534c8ff709SChao Yu }
1544c8ff709SChao Yu
15598dc08baSEric Biggers if (bio->bi_status)
1566dbb1796SEric Biggers ClearPageUptodate(page);
15798dc08baSEric Biggers else
1586dbb1796SEric Biggers SetPageUptodate(page);
1595f9abab4SJaegeuk Kim dec_page_count(F2FS_P_SB(page), __read_io_type(page));
1606dbb1796SEric Biggers unlock_page(page);
1616dbb1796SEric Biggers }
1627f59b277SEric Biggers
16398dc08baSEric Biggers if (ctx)
16498dc08baSEric Biggers mempool_free(ctx, bio_post_read_ctx_pool);
1657f59b277SEric Biggers bio_put(bio);
1664c8ff709SChao Yu }
1674c8ff709SChao Yu
f2fs_verify_bio(struct work_struct * work)1687f59b277SEric Biggers static void f2fs_verify_bio(struct work_struct *work)
1694c8ff709SChao Yu {
1707f59b277SEric Biggers struct bio_post_read_ctx *ctx =
1717f59b277SEric Biggers container_of(work, struct bio_post_read_ctx, work);
1727f59b277SEric Biggers struct bio *bio = ctx->bio;
1737f59b277SEric Biggers bool may_have_compressed_pages = (ctx->enabled_steps & STEP_DECOMPRESS);
1744c8ff709SChao Yu
1757f59b277SEric Biggers /*
176704528d8SMatthew Wilcox (Oracle) * fsverity_verify_bio() may call readahead() again, and while verity
1777f59b277SEric Biggers * will be disabled for this, decryption and/or decompression may still
1787f59b277SEric Biggers * be needed, resulting in another bio_post_read_ctx being allocated.
1797f59b277SEric Biggers * So to prevent deadlocks we need to release the current ctx to the
1807f59b277SEric Biggers * mempool first. This assumes that verity is the last post-read step.
1817f59b277SEric Biggers */
1827f59b277SEric Biggers mempool_free(ctx, bio_post_read_ctx_pool);
1837f59b277SEric Biggers bio->bi_private = NULL;
1846dbb1796SEric Biggers
1857f59b277SEric Biggers /*
1867f59b277SEric Biggers * Verify the bio's pages with fs-verity. Exclude compressed pages,
1877f59b277SEric Biggers * as those were handled separately by f2fs_end_read_compressed_page().
1887f59b277SEric Biggers */
1897f59b277SEric Biggers if (may_have_compressed_pages) {
19079bbefb1SChao Yu struct bio_vec *bv;
19179bbefb1SChao Yu struct bvec_iter_all iter_all;
1924c8ff709SChao Yu
19379bbefb1SChao Yu bio_for_each_segment_all(bv, bio, iter_all) {
19479bbefb1SChao Yu struct page *page = bv->bv_page;
19579bbefb1SChao Yu
1967f59b277SEric Biggers if (!f2fs_is_compressed_page(page) &&
19798dc08baSEric Biggers !fsverity_verify_page(page)) {
19898dc08baSEric Biggers bio->bi_status = BLK_STS_IOERR;
19998dc08baSEric Biggers break;
20098dc08baSEric Biggers }
2017f59b277SEric Biggers }
2027f59b277SEric Biggers } else {
2037f59b277SEric Biggers fsverity_verify_bio(bio);
20479bbefb1SChao Yu }
20579bbefb1SChao Yu
206bff139b4SDaeho Jeong f2fs_finish_read_bio(bio, true);
20779bbefb1SChao Yu }
208644c8c92SEric Biggers
209644c8c92SEric Biggers /*
2107f59b277SEric Biggers * If the bio's data needs to be verified with fs-verity, then enqueue the
2117f59b277SEric Biggers * verity work for the bio. Otherwise finish the bio now.
2127f59b277SEric Biggers *
2137f59b277SEric Biggers * Note that to avoid deadlocks, the verity work can't be done on the
2147f59b277SEric Biggers * decryption/decompression workqueue. This is because verifying the data pages
2157f59b277SEric Biggers * can involve reading verity metadata pages from the file, and these verity
2167f59b277SEric Biggers * metadata pages may be encrypted and/or compressed.
217644c8c92SEric Biggers */
f2fs_verify_and_finish_bio(struct bio * bio,bool in_task)218bff139b4SDaeho Jeong static void f2fs_verify_and_finish_bio(struct bio *bio, bool in_task)
2197f59b277SEric Biggers {
2207f59b277SEric Biggers struct bio_post_read_ctx *ctx = bio->bi_private;
22195ae251fSEric Biggers
2227f59b277SEric Biggers if (ctx && (ctx->enabled_steps & STEP_VERITY)) {
2237f59b277SEric Biggers INIT_WORK(&ctx->work, f2fs_verify_bio);
2247f59b277SEric Biggers fsverity_enqueue_verify_work(&ctx->work);
2257f59b277SEric Biggers } else {
226bff139b4SDaeho Jeong f2fs_finish_read_bio(bio, in_task);
2274c8ff709SChao Yu }
2287f59b277SEric Biggers }
2294c8ff709SChao Yu
2307f59b277SEric Biggers /*
2317f59b277SEric Biggers * Handle STEP_DECOMPRESS by decompressing any compressed clusters whose last
2327f59b277SEric Biggers * remaining page was read by @ctx->bio.
2337f59b277SEric Biggers *
2347f59b277SEric Biggers * Note that a bio may span clusters (even a mix of compressed and uncompressed
2357f59b277SEric Biggers * clusters) or be for just part of a cluster. STEP_DECOMPRESS just indicates
2367f59b277SEric Biggers * that the bio includes at least one compressed page. The actual decompression
2377f59b277SEric Biggers * is done on a per-cluster basis, not a per-bio basis.
2387f59b277SEric Biggers */
f2fs_handle_step_decompress(struct bio_post_read_ctx * ctx,bool in_task)239bff139b4SDaeho Jeong static void f2fs_handle_step_decompress(struct bio_post_read_ctx *ctx,
240bff139b4SDaeho Jeong bool in_task)
2417f59b277SEric Biggers {
2427f59b277SEric Biggers struct bio_vec *bv;
2437f59b277SEric Biggers struct bvec_iter_all iter_all;
2447f59b277SEric Biggers bool all_compressed = true;
2454931e0c9SDaeho Jeong block_t blkaddr = ctx->fs_blkaddr;
2467f59b277SEric Biggers
2477f59b277SEric Biggers bio_for_each_segment_all(bv, ctx->bio, iter_all) {
2487f59b277SEric Biggers struct page *page = bv->bv_page;
2497f59b277SEric Biggers
2507f59b277SEric Biggers if (f2fs_is_compressed_page(page))
25114db0b3cSEric Biggers f2fs_end_read_compressed_page(page, false, blkaddr,
25214db0b3cSEric Biggers in_task);
2537f59b277SEric Biggers else
2547f59b277SEric Biggers all_compressed = false;
2556ce19affSChao Yu
2566ce19affSChao Yu blkaddr++;
2577f59b277SEric Biggers }
2587f59b277SEric Biggers
25998dc08baSEric Biggers ctx->decompression_attempted = true;
26098dc08baSEric Biggers
2617f59b277SEric Biggers /*
2627f59b277SEric Biggers * Optimization: if all the bio's pages are compressed, then scheduling
2637f59b277SEric Biggers * the per-bio verity work is unnecessary, as verity will be fully
2647f59b277SEric Biggers * handled at the compression cluster level.
2657f59b277SEric Biggers */
2667f59b277SEric Biggers if (all_compressed)
2677f59b277SEric Biggers ctx->enabled_steps &= ~STEP_VERITY;
2684c8ff709SChao Yu }
2694c8ff709SChao Yu
f2fs_post_read_work(struct work_struct * work)2704c8ff709SChao Yu static void f2fs_post_read_work(struct work_struct *work)
2714c8ff709SChao Yu {
2724c8ff709SChao Yu struct bio_post_read_ctx *ctx =
2734c8ff709SChao Yu container_of(work, struct bio_post_read_ctx, work);
27414db0b3cSEric Biggers struct bio *bio = ctx->bio;
2754c8ff709SChao Yu
27614db0b3cSEric Biggers if ((ctx->enabled_steps & STEP_DECRYPT) && !fscrypt_decrypt_bio(bio)) {
27714db0b3cSEric Biggers f2fs_finish_read_bio(bio, true);
27814db0b3cSEric Biggers return;
27914db0b3cSEric Biggers }
2804c8ff709SChao Yu
2817f59b277SEric Biggers if (ctx->enabled_steps & STEP_DECOMPRESS)
282bff139b4SDaeho Jeong f2fs_handle_step_decompress(ctx, true);
2834c8ff709SChao Yu
28414db0b3cSEric Biggers f2fs_verify_and_finish_bio(bio, true);
2856dbb1796SEric Biggers }
2866dbb1796SEric Biggers
f2fs_read_end_io(struct bio * bio)2876dbb1796SEric Biggers static void f2fs_read_end_io(struct bio *bio)
2886dbb1796SEric Biggers {
289c45d6002SChao Yu struct f2fs_sb_info *sbi = F2FS_P_SB(bio_first_page_all(bio));
290a4b68176SDaeho Jeong struct bio_post_read_ctx *ctx;
291bff139b4SDaeho Jeong bool intask = in_task();
292a4b68176SDaeho Jeong
293d9bac032SYangtao Li iostat_update_and_unbind_ctx(bio);
294a4b68176SDaeho Jeong ctx = bio->bi_private;
295c45d6002SChao Yu
296c40e15a9SYangtao Li if (time_to_inject(sbi, FAULT_READ_IO))
2974e4cbee9SChristoph Hellwig bio->bi_status = BLK_STS_IOERR;
2988b038c70SChao Yu
2997f59b277SEric Biggers if (bio->bi_status) {
300bff139b4SDaeho Jeong f2fs_finish_read_bio(bio, intask);
30112377024SChao Yu return;
30212377024SChao Yu }
30312377024SChao Yu
304bff139b4SDaeho Jeong if (ctx) {
305bff139b4SDaeho Jeong unsigned int enabled_steps = ctx->enabled_steps &
306bff139b4SDaeho Jeong (STEP_DECRYPT | STEP_DECOMPRESS);
307bff139b4SDaeho Jeong
308bff139b4SDaeho Jeong /*
309bff139b4SDaeho Jeong * If we have only decompression step between decompression and
310bff139b4SDaeho Jeong * decrypt, we don't need post processing for this.
311bff139b4SDaeho Jeong */
312bff139b4SDaeho Jeong if (enabled_steps == STEP_DECOMPRESS &&
313bff139b4SDaeho Jeong !f2fs_low_mem_mode(sbi)) {
314bff139b4SDaeho Jeong f2fs_handle_step_decompress(ctx, intask);
315bff139b4SDaeho Jeong } else if (enabled_steps) {
3167f59b277SEric Biggers INIT_WORK(&ctx->work, f2fs_post_read_work);
3177f59b277SEric Biggers queue_work(ctx->sbi->post_read_wq, &ctx->work);
318bff139b4SDaeho Jeong return;
3197f59b277SEric Biggers }
32093dfe2acSJaegeuk Kim }
32193dfe2acSJaegeuk Kim
322bff139b4SDaeho Jeong f2fs_verify_and_finish_bio(bio, intask);
323bff139b4SDaeho Jeong }
324bff139b4SDaeho Jeong
f2fs_write_end_io(struct bio * bio)3254246a0b6SChristoph Hellwig static void f2fs_write_end_io(struct bio *bio)
32693dfe2acSJaegeuk Kim {
327a4b68176SDaeho Jeong struct f2fs_sb_info *sbi;
328f568849eSLinus Torvalds struct bio_vec *bvec;
3296dc4f100SMing Lei struct bvec_iter_all iter_all;
33093dfe2acSJaegeuk Kim
331d9bac032SYangtao Li iostat_update_and_unbind_ctx(bio);
332a4b68176SDaeho Jeong sbi = bio->bi_private;
333a4b68176SDaeho Jeong
334c40e15a9SYangtao Li if (time_to_inject(sbi, FAULT_WRITE_IO))
3356f5c2ed0SChao Yu bio->bi_status = BLK_STS_IOERR;
3366f5c2ed0SChao Yu
3372b070cfeSChristoph Hellwig bio_for_each_segment_all(bvec, bio, iter_all) {
33893dfe2acSJaegeuk Kim struct page *page = bvec->bv_page;
33982704e59SChao Yu enum count_type type = WB_DATA_TYPE(page, false);
34093dfe2acSJaegeuk Kim
341b763f3beSChao Yu if (page_private_dummy(page)) {
342b763f3beSChao Yu clear_page_private_dummy(page);
3430a595ebaSJaegeuk Kim unlock_page(page);
3440a595ebaSJaegeuk Kim mempool_free(page, sbi->write_io_dummy);
3450a595ebaSJaegeuk Kim
3464e4cbee9SChristoph Hellwig if (unlikely(bio->bi_status))
347a9cfee0eSChao Yu f2fs_stop_checkpoint(sbi, true,
348a9cfee0eSChao Yu STOP_CP_REASON_WRITE_FAIL);
3490a595ebaSJaegeuk Kim continue;
3500a595ebaSJaegeuk Kim }
3510a595ebaSJaegeuk Kim
352d2d0727bSEric Biggers fscrypt_finalize_bounce_page(&page);
3534375a336SJaegeuk Kim
3544c8ff709SChao Yu #ifdef CONFIG_F2FS_FS_COMPRESSION
3554c8ff709SChao Yu if (f2fs_is_compressed_page(page)) {
3564c8ff709SChao Yu f2fs_compress_write_end_io(bio, page);
3574c8ff709SChao Yu continue;
3584c8ff709SChao Yu }
3594c8ff709SChao Yu #endif
3604c8ff709SChao Yu
3614e4cbee9SChristoph Hellwig if (unlikely(bio->bi_status)) {
3625114a97aSMichal Hocko mapping_set_error(page->mapping, -EIO);
363b1ca321dSJaegeuk Kim if (type == F2FS_WB_CP_DATA)
364a9cfee0eSChao Yu f2fs_stop_checkpoint(sbi, true,
365a9cfee0eSChao Yu STOP_CP_REASON_WRITE_FAIL);
36693dfe2acSJaegeuk Kim }
3677dff55d2SYunlei He
3687dff55d2SYunlei He f2fs_bug_on(sbi, page->mapping == NODE_MAPPING(sbi) &&
3697dff55d2SYunlei He page->index != nid_of_node(page));
3707dff55d2SYunlei He
37136951b38SChao Yu dec_page_count(sbi, type);
37250fa53ecSChao Yu if (f2fs_in_warm_node_list(sbi, page))
37350fa53ecSChao Yu f2fs_del_fsync_node_entry(sbi, page);
374b763f3beSChao Yu clear_page_private_gcing(page);
37593dfe2acSJaegeuk Kim end_page_writeback(page);
376f568849eSLinus Torvalds }
37736951b38SChao Yu if (!get_pages(sbi, F2FS_WB_CP_DATA) &&
378f5730184SJaegeuk Kim wq_has_sleeper(&sbi->cp_wait))
37993dfe2acSJaegeuk Kim wake_up(&sbi->cp_wait);
38093dfe2acSJaegeuk Kim
38193dfe2acSJaegeuk Kim bio_put(bio);
38293dfe2acSJaegeuk Kim }
38393dfe2acSJaegeuk Kim
384e067dc3cSDaeho Jeong #ifdef CONFIG_BLK_DEV_ZONED
f2fs_zone_write_end_io(struct bio * bio)385e067dc3cSDaeho Jeong static void f2fs_zone_write_end_io(struct bio *bio)
386e067dc3cSDaeho Jeong {
387e067dc3cSDaeho Jeong struct f2fs_bio_info *io = (struct f2fs_bio_info *)bio->bi_private;
388e067dc3cSDaeho Jeong
389e067dc3cSDaeho Jeong bio->bi_private = io->bi_private;
390e067dc3cSDaeho Jeong complete(&io->zone_wait);
391e067dc3cSDaeho Jeong f2fs_write_end_io(bio);
392e067dc3cSDaeho Jeong }
393e067dc3cSDaeho Jeong #endif
394e067dc3cSDaeho Jeong
f2fs_target_device(struct f2fs_sb_info * sbi,block_t blk_addr,sector_t * sector)3953c62be17SJaegeuk Kim struct block_device *f2fs_target_device(struct f2fs_sb_info *sbi,
3965189810aSChristoph Hellwig block_t blk_addr, sector_t *sector)
3973c62be17SJaegeuk Kim {
3983c62be17SJaegeuk Kim struct block_device *bdev = sbi->sb->s_bdev;
3993c62be17SJaegeuk Kim int i;
4003c62be17SJaegeuk Kim
4010916878dSDamien Le Moal if (f2fs_is_multi_device(sbi)) {
4023c62be17SJaegeuk Kim for (i = 0; i < sbi->s_ndevs; i++) {
4033c62be17SJaegeuk Kim if (FDEV(i).start_blk <= blk_addr &&
4043c62be17SJaegeuk Kim FDEV(i).end_blk >= blk_addr) {
4053c62be17SJaegeuk Kim blk_addr -= FDEV(i).start_blk;
4063c62be17SJaegeuk Kim bdev = FDEV(i).bdev;
4073c62be17SJaegeuk Kim break;
4083c62be17SJaegeuk Kim }
4093c62be17SJaegeuk Kim }
4100916878dSDamien Le Moal }
4115189810aSChristoph Hellwig
4125189810aSChristoph Hellwig if (sector)
4135189810aSChristoph Hellwig *sector = SECTOR_FROM_BLOCK(blk_addr);
4143c62be17SJaegeuk Kim return bdev;
4153c62be17SJaegeuk Kim }
4163c62be17SJaegeuk Kim
f2fs_target_device_index(struct f2fs_sb_info * sbi,block_t blkaddr)4173c62be17SJaegeuk Kim int f2fs_target_device_index(struct f2fs_sb_info *sbi, block_t blkaddr)
4183c62be17SJaegeuk Kim {
4193c62be17SJaegeuk Kim int i;
4203c62be17SJaegeuk Kim
4210916878dSDamien Le Moal if (!f2fs_is_multi_device(sbi))
4220916878dSDamien Le Moal return 0;
4230916878dSDamien Le Moal
4243c62be17SJaegeuk Kim for (i = 0; i < sbi->s_ndevs; i++)
4253c62be17SJaegeuk Kim if (FDEV(i).start_blk <= blkaddr && FDEV(i).end_blk >= blkaddr)
4263c62be17SJaegeuk Kim return i;
4273c62be17SJaegeuk Kim return 0;
4283c62be17SJaegeuk Kim }
4293c62be17SJaegeuk Kim
f2fs_io_flags(struct f2fs_io_info * fio)4307649c873SBart Van Assche static blk_opf_t f2fs_io_flags(struct f2fs_io_info *fio)
43164bf0eefSChristoph Hellwig {
432447286ebSYangtao Li unsigned int temp_mask = GENMASK(NR_TEMP_TYPE - 1, 0);
4330adc2ab0SJaegeuk Kim unsigned int fua_flag, meta_flag, io_flag;
4347649c873SBart Van Assche blk_opf_t op_flags = 0;
4350adc2ab0SJaegeuk Kim
4360adc2ab0SJaegeuk Kim if (fio->op != REQ_OP_WRITE)
4370adc2ab0SJaegeuk Kim return 0;
4380adc2ab0SJaegeuk Kim if (fio->type == DATA)
4390adc2ab0SJaegeuk Kim io_flag = fio->sbi->data_io_flag;
4400adc2ab0SJaegeuk Kim else if (fio->type == NODE)
4410adc2ab0SJaegeuk Kim io_flag = fio->sbi->node_io_flag;
4420adc2ab0SJaegeuk Kim else
4430adc2ab0SJaegeuk Kim return 0;
4440adc2ab0SJaegeuk Kim
4450adc2ab0SJaegeuk Kim fua_flag = io_flag & temp_mask;
4460adc2ab0SJaegeuk Kim meta_flag = (io_flag >> NR_TEMP_TYPE) & temp_mask;
44764bf0eefSChristoph Hellwig
44864bf0eefSChristoph Hellwig /*
44964bf0eefSChristoph Hellwig * data/node io flag bits per temp:
45064bf0eefSChristoph Hellwig * REQ_META | REQ_FUA |
45164bf0eefSChristoph Hellwig * 5 | 4 | 3 | 2 | 1 | 0 |
45264bf0eefSChristoph Hellwig * Cold | Warm | Hot | Cold | Warm | Hot |
45364bf0eefSChristoph Hellwig */
454447286ebSYangtao Li if (BIT(fio->temp) & meta_flag)
4550adc2ab0SJaegeuk Kim op_flags |= REQ_META;
456447286ebSYangtao Li if (BIT(fio->temp) & fua_flag)
4570adc2ab0SJaegeuk Kim op_flags |= REQ_FUA;
4580adc2ab0SJaegeuk Kim return op_flags;
45964bf0eefSChristoph Hellwig }
46064bf0eefSChristoph Hellwig
__bio_alloc(struct f2fs_io_info * fio,int npages)461b757f6edSChao Yu static struct bio *__bio_alloc(struct f2fs_io_info *fio, int npages)
462940a6d34SGu Zheng {
463b757f6edSChao Yu struct f2fs_sb_info *sbi = fio->sbi;
4645189810aSChristoph Hellwig struct block_device *bdev;
4655189810aSChristoph Hellwig sector_t sector;
466940a6d34SGu Zheng struct bio *bio;
467940a6d34SGu Zheng
4685189810aSChristoph Hellwig bdev = f2fs_target_device(sbi, fio->new_blkaddr, §or);
4690adc2ab0SJaegeuk Kim bio = bio_alloc_bioset(bdev, npages,
4700adc2ab0SJaegeuk Kim fio->op | fio->op_flags | f2fs_io_flags(fio),
4710adc2ab0SJaegeuk Kim GFP_NOIO, &f2fs_bioset);
4725189810aSChristoph Hellwig bio->bi_iter.bi_sector = sector;
473b757f6edSChao Yu if (is_read_io(fio->op)) {
4740cdd3195SHyunchul Lee bio->bi_end_io = f2fs_read_end_io;
4750cdd3195SHyunchul Lee bio->bi_private = NULL;
4760cdd3195SHyunchul Lee } else {
4770cdd3195SHyunchul Lee bio->bi_end_io = f2fs_write_end_io;
4780cdd3195SHyunchul Lee bio->bi_private = sbi;
4790cdd3195SHyunchul Lee }
480a4b68176SDaeho Jeong iostat_alloc_and_bind_ctx(sbi, bio, NULL);
481a4b68176SDaeho Jeong
482b757f6edSChao Yu if (fio->io_wbc)
483b757f6edSChao Yu wbc_init_bio(fio->io_wbc, bio);
484940a6d34SGu Zheng
485940a6d34SGu Zheng return bio;
486940a6d34SGu Zheng }
487940a6d34SGu Zheng
f2fs_set_bio_crypt_ctx(struct bio * bio,const struct inode * inode,pgoff_t first_idx,const struct f2fs_io_info * fio,gfp_t gfp_mask)48827aacd28SSatya Tangirala static void f2fs_set_bio_crypt_ctx(struct bio *bio, const struct inode *inode,
48927aacd28SSatya Tangirala pgoff_t first_idx,
49027aacd28SSatya Tangirala const struct f2fs_io_info *fio,
49127aacd28SSatya Tangirala gfp_t gfp_mask)
49227aacd28SSatya Tangirala {
49327aacd28SSatya Tangirala /*
49427aacd28SSatya Tangirala * The f2fs garbage collector sets ->encrypted_page when it wants to
49527aacd28SSatya Tangirala * read/write raw data without encryption.
49627aacd28SSatya Tangirala */
49727aacd28SSatya Tangirala if (!fio || !fio->encrypted_page)
49827aacd28SSatya Tangirala fscrypt_set_bio_crypt_ctx(bio, inode, first_idx, gfp_mask);
49927aacd28SSatya Tangirala }
50027aacd28SSatya Tangirala
f2fs_crypt_mergeable_bio(struct bio * bio,const struct inode * inode,pgoff_t next_idx,const struct f2fs_io_info * fio)50127aacd28SSatya Tangirala static bool f2fs_crypt_mergeable_bio(struct bio *bio, const struct inode *inode,
50227aacd28SSatya Tangirala pgoff_t next_idx,
50327aacd28SSatya Tangirala const struct f2fs_io_info *fio)
50427aacd28SSatya Tangirala {
50527aacd28SSatya Tangirala /*
50627aacd28SSatya Tangirala * The f2fs garbage collector sets ->encrypted_page when it wants to
50727aacd28SSatya Tangirala * read/write raw data without encryption.
50827aacd28SSatya Tangirala */
50927aacd28SSatya Tangirala if (fio && fio->encrypted_page)
51027aacd28SSatya Tangirala return !bio_has_crypt_ctx(bio);
51127aacd28SSatya Tangirala
51227aacd28SSatya Tangirala return fscrypt_mergeable_bio(bio, inode, next_idx);
51327aacd28SSatya Tangirala }
51427aacd28SSatya Tangirala
f2fs_submit_read_bio(struct f2fs_sb_info * sbi,struct bio * bio,enum page_type type)515bc29835aSChristoph Hellwig void f2fs_submit_read_bio(struct f2fs_sb_info *sbi, struct bio *bio,
516bc29835aSChristoph Hellwig enum page_type type)
517f5730184SJaegeuk Kim {
518bc29835aSChristoph Hellwig WARN_ON_ONCE(!is_read_io(bio_op(bio)));
519bc29835aSChristoph Hellwig trace_f2fs_submit_read_bio(sbi->sb, type, bio);
5200a595ebaSJaegeuk Kim
521bc29835aSChristoph Hellwig iostat_update_submit_ctx(bio, type);
522bc29835aSChristoph Hellwig submit_bio(bio);
523bc29835aSChristoph Hellwig }
5240a595ebaSJaegeuk Kim
f2fs_align_write_bio(struct f2fs_sb_info * sbi,struct bio * bio)525bc29835aSChristoph Hellwig static void f2fs_align_write_bio(struct f2fs_sb_info *sbi, struct bio *bio)
526bc29835aSChristoph Hellwig {
527bc29835aSChristoph Hellwig unsigned int start =
528bc29835aSChristoph Hellwig (bio->bi_iter.bi_size >> F2FS_BLKSIZE_BITS) % F2FS_IO_SIZE(sbi);
5290a595ebaSJaegeuk Kim
5300a595ebaSJaegeuk Kim if (start == 0)
531bc29835aSChristoph Hellwig return;
5320a595ebaSJaegeuk Kim
5330a595ebaSJaegeuk Kim /* fill dummy pages */
5340a595ebaSJaegeuk Kim for (; start < F2FS_IO_SIZE(sbi); start++) {
5350a595ebaSJaegeuk Kim struct page *page =
5360a595ebaSJaegeuk Kim mempool_alloc(sbi->write_io_dummy,
537bc73a4b2SGao Xiang GFP_NOIO | __GFP_NOFAIL);
5380a595ebaSJaegeuk Kim f2fs_bug_on(sbi, !page);
5390a595ebaSJaegeuk Kim
5400a595ebaSJaegeuk Kim lock_page(page);
541b763f3beSChao Yu
542b763f3beSChao Yu zero_user_segment(page, 0, PAGE_SIZE);
543b763f3beSChao Yu set_page_private_dummy(page);
544b763f3beSChao Yu
5450a595ebaSJaegeuk Kim if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE)
5460a595ebaSJaegeuk Kim f2fs_bug_on(sbi, 1);
54719a5f5e2SJaegeuk Kim }
548bc29835aSChristoph Hellwig }
549bc29835aSChristoph Hellwig
f2fs_submit_write_bio(struct f2fs_sb_info * sbi,struct bio * bio,enum page_type type)550bc29835aSChristoph Hellwig static void f2fs_submit_write_bio(struct f2fs_sb_info *sbi, struct bio *bio,
551bc29835aSChristoph Hellwig enum page_type type)
552bc29835aSChristoph Hellwig {
553bc29835aSChristoph Hellwig WARN_ON_ONCE(is_read_io(bio_op(bio)));
554bc29835aSChristoph Hellwig
555bc29835aSChristoph Hellwig if (type == DATA || type == NODE) {
556bc29835aSChristoph Hellwig if (f2fs_lfs_mode(sbi) && current->plug)
557bc29835aSChristoph Hellwig blk_finish_plug(current->plug);
558bc29835aSChristoph Hellwig
559bc29835aSChristoph Hellwig if (F2FS_IO_ALIGNED(sbi)) {
560bc29835aSChristoph Hellwig f2fs_align_write_bio(sbi, bio);
5610a595ebaSJaegeuk Kim /*
562bc29835aSChristoph Hellwig * In the NODE case, we lose next block address chain.
563bc29835aSChristoph Hellwig * So, we need to do checkpoint in f2fs_sync_file.
5640a595ebaSJaegeuk Kim */
5650a595ebaSJaegeuk Kim if (type == NODE)
5660a595ebaSJaegeuk Kim set_sbi_flag(sbi, SBI_NEED_CP);
5670a595ebaSJaegeuk Kim }
568f5730184SJaegeuk Kim }
569f5730184SJaegeuk Kim
570bc29835aSChristoph Hellwig trace_f2fs_submit_write_bio(sbi->sb, type, bio);
571bc29835aSChristoph Hellwig iostat_update_submit_ctx(bio, type);
572bc29835aSChristoph Hellwig submit_bio(bio);
5734c8ff709SChao Yu }
5744c8ff709SChao Yu
__submit_merged_bio(struct f2fs_bio_info * io)575458e6197SJaegeuk Kim static void __submit_merged_bio(struct f2fs_bio_info *io)
57693dfe2acSJaegeuk Kim {
577458e6197SJaegeuk Kim struct f2fs_io_info *fio = &io->fio;
57893dfe2acSJaegeuk Kim
57993dfe2acSJaegeuk Kim if (!io->bio)
58093dfe2acSJaegeuk Kim return;
58193dfe2acSJaegeuk Kim
582bc29835aSChristoph Hellwig if (is_read_io(fio->op)) {
583554b5125SJaegeuk Kim trace_f2fs_prepare_read_bio(io->sbi->sb, fio->type, io->bio);
584bc29835aSChristoph Hellwig f2fs_submit_read_bio(io->sbi, io->bio, fio->type);
585bc29835aSChristoph Hellwig } else {
586554b5125SJaegeuk Kim trace_f2fs_prepare_write_bio(io->sbi->sb, fio->type, io->bio);
587bc29835aSChristoph Hellwig f2fs_submit_write_bio(io->sbi, io->bio, fio->type);
588bc29835aSChristoph Hellwig }
58993dfe2acSJaegeuk Kim io->bio = NULL;
59093dfe2acSJaegeuk Kim }
59193dfe2acSJaegeuk Kim
__has_merged_page(struct bio * bio,struct inode * inode,struct page * page,nid_t ino)5928648de2cSChao Yu static bool __has_merged_page(struct bio *bio, struct inode *inode,
593bab475c5SChao Yu struct page *page, nid_t ino)
5940fd785ebSChao Yu {
5950fd785ebSChao Yu struct bio_vec *bvec;
5966dc4f100SMing Lei struct bvec_iter_all iter_all;
5970fd785ebSChao Yu
5988648de2cSChao Yu if (!bio)
5990fd785ebSChao Yu return false;
6000c3a5797SChao Yu
601bab475c5SChao Yu if (!inode && !page && !ino)
6020c3a5797SChao Yu return true;
6030fd785ebSChao Yu
6048648de2cSChao Yu bio_for_each_segment_all(bvec, bio, iter_all) {
6054c8ff709SChao Yu struct page *target = bvec->bv_page;
6060fd785ebSChao Yu
6074c8ff709SChao Yu if (fscrypt_is_bounce_page(target)) {
608d2d0727bSEric Biggers target = fscrypt_pagecache_page(target);
6094c8ff709SChao Yu if (IS_ERR(target))
6104c8ff709SChao Yu continue;
6114c8ff709SChao Yu }
6124c8ff709SChao Yu if (f2fs_is_compressed_page(target)) {
6134c8ff709SChao Yu target = f2fs_compress_control_page(target);
6144c8ff709SChao Yu if (IS_ERR(target))
6154c8ff709SChao Yu continue;
6164c8ff709SChao Yu }
6170fd785ebSChao Yu
6180c3a5797SChao Yu if (inode && inode == target->mapping->host)
6190c3a5797SChao Yu return true;
620bab475c5SChao Yu if (page && page == target)
621bab475c5SChao Yu return true;
6220c3a5797SChao Yu if (ino && ino == ino_of_node(target))
6230fd785ebSChao Yu return true;
6240fd785ebSChao Yu }
6250fd785ebSChao Yu
6260fd785ebSChao Yu return false;
6270fd785ebSChao Yu }
6280fd785ebSChao Yu
f2fs_init_write_merge_io(struct f2fs_sb_info * sbi)629908ea654SYufen Yu int f2fs_init_write_merge_io(struct f2fs_sb_info *sbi)
630908ea654SYufen Yu {
631908ea654SYufen Yu int i;
632908ea654SYufen Yu
633908ea654SYufen Yu for (i = 0; i < NR_PAGE_TYPE; i++) {
634908ea654SYufen Yu int n = (i == META) ? 1 : NR_TEMP_TYPE;
635908ea654SYufen Yu int j;
636908ea654SYufen Yu
637908ea654SYufen Yu sbi->write_io[i] = f2fs_kmalloc(sbi,
638908ea654SYufen Yu array_size(n, sizeof(struct f2fs_bio_info)),
639908ea654SYufen Yu GFP_KERNEL);
640908ea654SYufen Yu if (!sbi->write_io[i])
641908ea654SYufen Yu return -ENOMEM;
642908ea654SYufen Yu
643908ea654SYufen Yu for (j = HOT; j < n; j++) {
644908ea654SYufen Yu init_f2fs_rwsem(&sbi->write_io[i][j].io_rwsem);
645908ea654SYufen Yu sbi->write_io[i][j].sbi = sbi;
646908ea654SYufen Yu sbi->write_io[i][j].bio = NULL;
647908ea654SYufen Yu spin_lock_init(&sbi->write_io[i][j].io_lock);
648908ea654SYufen Yu INIT_LIST_HEAD(&sbi->write_io[i][j].io_list);
649908ea654SYufen Yu INIT_LIST_HEAD(&sbi->write_io[i][j].bio_list);
650908ea654SYufen Yu init_f2fs_rwsem(&sbi->write_io[i][j].bio_list_lock);
651e067dc3cSDaeho Jeong #ifdef CONFIG_BLK_DEV_ZONED
652e067dc3cSDaeho Jeong init_completion(&sbi->write_io[i][j].zone_wait);
653e067dc3cSDaeho Jeong sbi->write_io[i][j].zone_pending_bio = NULL;
654e067dc3cSDaeho Jeong sbi->write_io[i][j].bi_private = NULL;
655e067dc3cSDaeho Jeong #endif
656908ea654SYufen Yu }
657908ea654SYufen Yu }
658908ea654SYufen Yu
659908ea654SYufen Yu return 0;
660908ea654SYufen Yu }
661908ea654SYufen Yu
__f2fs_submit_merged_write(struct f2fs_sb_info * sbi,enum page_type type,enum temp_type temp)662b9109b0eSJaegeuk Kim static void __f2fs_submit_merged_write(struct f2fs_sb_info *sbi,
663a912b54dSJaegeuk Kim enum page_type type, enum temp_type temp)
66493dfe2acSJaegeuk Kim {
66593dfe2acSJaegeuk Kim enum page_type btype = PAGE_TYPE_OF_BIO(type);
666a912b54dSJaegeuk Kim struct f2fs_bio_info *io = sbi->write_io[btype] + temp;
66793dfe2acSJaegeuk Kim
668e4544b63STim Murray f2fs_down_write(&io->io_rwsem);
669458e6197SJaegeuk Kim
670267c159fSJaegeuk Kim if (!io->bio)
671267c159fSJaegeuk Kim goto unlock_out;
672267c159fSJaegeuk Kim
673458e6197SJaegeuk Kim /* change META to META_FLUSH in the checkpoint procedure */
674458e6197SJaegeuk Kim if (type >= META_FLUSH) {
675458e6197SJaegeuk Kim io->fio.type = META_FLUSH;
67664bf0eefSChristoph Hellwig io->bio->bi_opf |= REQ_META | REQ_PRIO | REQ_SYNC;
67770fd7614SChristoph Hellwig if (!test_opt(sbi, NOBARRIER))
67864bf0eefSChristoph Hellwig io->bio->bi_opf |= REQ_PREFLUSH | REQ_FUA;
679458e6197SJaegeuk Kim }
680458e6197SJaegeuk Kim __submit_merged_bio(io);
681267c159fSJaegeuk Kim unlock_out:
682e4544b63STim Murray f2fs_up_write(&io->io_rwsem);
68393dfe2acSJaegeuk Kim }
68493dfe2acSJaegeuk Kim
__submit_merged_write_cond(struct f2fs_sb_info * sbi,struct inode * inode,struct page * page,nid_t ino,enum page_type type,bool force)685a912b54dSJaegeuk Kim static void __submit_merged_write_cond(struct f2fs_sb_info *sbi,
686bab475c5SChao Yu struct inode *inode, struct page *page,
687bab475c5SChao Yu nid_t ino, enum page_type type, bool force)
6880c3a5797SChao Yu {
689a912b54dSJaegeuk Kim enum temp_type temp;
6901e771e83SYunlong Song bool ret = true;
691a912b54dSJaegeuk Kim
692a912b54dSJaegeuk Kim for (temp = HOT; temp < NR_TEMP_TYPE; temp++) {
6931e771e83SYunlong Song if (!force) {
6941e771e83SYunlong Song enum page_type btype = PAGE_TYPE_OF_BIO(type);
6951e771e83SYunlong Song struct f2fs_bio_info *io = sbi->write_io[btype] + temp;
696a912b54dSJaegeuk Kim
697e4544b63STim Murray f2fs_down_read(&io->io_rwsem);
6988648de2cSChao Yu ret = __has_merged_page(io->bio, inode, page, ino);
699e4544b63STim Murray f2fs_up_read(&io->io_rwsem);
7001e771e83SYunlong Song }
7011e771e83SYunlong Song if (ret)
702a912b54dSJaegeuk Kim __f2fs_submit_merged_write(sbi, type, temp);
703a912b54dSJaegeuk Kim
704a912b54dSJaegeuk Kim /* TODO: use HOT temp only for meta pages now. */
705a912b54dSJaegeuk Kim if (type >= META)
706a912b54dSJaegeuk Kim break;
707a912b54dSJaegeuk Kim }
7080c3a5797SChao Yu }
7090c3a5797SChao Yu
f2fs_submit_merged_write(struct f2fs_sb_info * sbi,enum page_type type)710b9109b0eSJaegeuk Kim void f2fs_submit_merged_write(struct f2fs_sb_info *sbi, enum page_type type)
711406657ddSChao Yu {
712adcc00f7SHariprasad Kelam __submit_merged_write_cond(sbi, NULL, NULL, 0, type, true);
71393dfe2acSJaegeuk Kim }
71493dfe2acSJaegeuk Kim
f2fs_submit_merged_write_cond(struct f2fs_sb_info * sbi,struct inode * inode,struct page * page,nid_t ino,enum page_type type)715b9109b0eSJaegeuk Kim void f2fs_submit_merged_write_cond(struct f2fs_sb_info *sbi,
716bab475c5SChao Yu struct inode *inode, struct page *page,
717bab475c5SChao Yu nid_t ino, enum page_type type)
71893dfe2acSJaegeuk Kim {
719bab475c5SChao Yu __submit_merged_write_cond(sbi, inode, page, ino, type, false);
72093dfe2acSJaegeuk Kim }
72193dfe2acSJaegeuk Kim
f2fs_flush_merged_writes(struct f2fs_sb_info * sbi)722b9109b0eSJaegeuk Kim void f2fs_flush_merged_writes(struct f2fs_sb_info *sbi)
723406657ddSChao Yu {
724b9109b0eSJaegeuk Kim f2fs_submit_merged_write(sbi, DATA);
725b9109b0eSJaegeuk Kim f2fs_submit_merged_write(sbi, NODE);
726b9109b0eSJaegeuk Kim f2fs_submit_merged_write(sbi, META);
727406657ddSChao Yu }
728406657ddSChao Yu
72993dfe2acSJaegeuk Kim /*
73093dfe2acSJaegeuk Kim * Fill the locked page with data located in the block address.
731771a9a71STomohiro Kusumi * A caller needs to unlock the page on failure.
73293dfe2acSJaegeuk Kim */
f2fs_submit_page_bio(struct f2fs_io_info * fio)73305ca3632SJaegeuk Kim int f2fs_submit_page_bio(struct f2fs_io_info *fio)
73493dfe2acSJaegeuk Kim {
73593dfe2acSJaegeuk Kim struct bio *bio;
7360b81d077SJaegeuk Kim struct page *page = fio->encrypted_page ?
7370b81d077SJaegeuk Kim fio->encrypted_page : fio->page;
73893dfe2acSJaegeuk Kim
739c9b60788SChao Yu if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr,
74093770ab7SChao Yu fio->is_por ? META_POR : (__is_meta_io(fio) ?
74195fa90c9SChao Yu META_GENERIC : DATA_GENERIC_ENHANCE))) {
74295fa90c9SChao Yu f2fs_handle_error(fio->sbi, ERROR_INVALID_BLKADDR);
74310f966bbSChao Yu return -EFSCORRUPTED;
74495fa90c9SChao Yu }
745c9b60788SChao Yu
7462ace38e0SChao Yu trace_f2fs_submit_page_bio(page, fio);
74793dfe2acSJaegeuk Kim
74893dfe2acSJaegeuk Kim /* Allocate a new bio */
749b757f6edSChao Yu bio = __bio_alloc(fio, 1);
75093dfe2acSJaegeuk Kim
75127aacd28SSatya Tangirala f2fs_set_bio_crypt_ctx(bio, fio->page->mapping->host,
75227aacd28SSatya Tangirala fio->page->index, fio, GFP_NOIO);
75327aacd28SSatya Tangirala
75409cbfeafSKirill A. Shutemov if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
75593dfe2acSJaegeuk Kim bio_put(bio);
75693dfe2acSJaegeuk Kim return -EFAULT;
75793dfe2acSJaegeuk Kim }
75878efac53SChao Yu
75978efac53SChao Yu if (fio->io_wbc && !is_read_io(fio->op))
760844545c5SEric Biggers wbc_account_cgroup_owner(fio->io_wbc, fio->page, PAGE_SIZE);
76178efac53SChao Yu
7625f9abab4SJaegeuk Kim inc_page_count(fio->sbi, is_read_io(fio->op) ?
76382704e59SChao Yu __read_io_type(page) : WB_DATA_TYPE(fio->page, false));
7644c58ed07SChao Yu
765bc29835aSChristoph Hellwig if (is_read_io(bio_op(bio)))
766bc29835aSChristoph Hellwig f2fs_submit_read_bio(fio->sbi, bio, fio->type);
767bc29835aSChristoph Hellwig else
768bc29835aSChristoph Hellwig f2fs_submit_write_bio(fio->sbi, bio, fio->type);
76993dfe2acSJaegeuk Kim return 0;
77093dfe2acSJaegeuk Kim }
77193dfe2acSJaegeuk Kim
page_is_mergeable(struct f2fs_sb_info * sbi,struct bio * bio,block_t last_blkaddr,block_t cur_blkaddr)7728896cbdfSChao Yu static bool page_is_mergeable(struct f2fs_sb_info *sbi, struct bio *bio,
7738896cbdfSChao Yu block_t last_blkaddr, block_t cur_blkaddr)
7748896cbdfSChao Yu {
77510208567SJaegeuk Kim if (unlikely(sbi->max_io_bytes &&
77610208567SJaegeuk Kim bio->bi_iter.bi_size >= sbi->max_io_bytes))
77710208567SJaegeuk Kim return false;
7788896cbdfSChao Yu if (last_blkaddr + 1 != cur_blkaddr)
7798896cbdfSChao Yu return false;
780309dca30SChristoph Hellwig return bio->bi_bdev == f2fs_target_device(sbi, cur_blkaddr, NULL);
7818896cbdfSChao Yu }
7828896cbdfSChao Yu
io_type_is_mergeable(struct f2fs_bio_info * io,struct f2fs_io_info * fio)7838896cbdfSChao Yu static bool io_type_is_mergeable(struct f2fs_bio_info *io,
7848896cbdfSChao Yu struct f2fs_io_info *fio)
7858896cbdfSChao Yu {
7868896cbdfSChao Yu if (io->fio.op != fio->op)
7878896cbdfSChao Yu return false;
7888896cbdfSChao Yu return io->fio.op_flags == fio->op_flags;
7898896cbdfSChao Yu }
7908896cbdfSChao Yu
io_is_mergeable(struct f2fs_sb_info * sbi,struct bio * bio,struct f2fs_bio_info * io,struct f2fs_io_info * fio,block_t last_blkaddr,block_t cur_blkaddr)7918896cbdfSChao Yu static bool io_is_mergeable(struct f2fs_sb_info *sbi, struct bio *bio,
7928896cbdfSChao Yu struct f2fs_bio_info *io,
7938896cbdfSChao Yu struct f2fs_io_info *fio,
7948896cbdfSChao Yu block_t last_blkaddr,
7958896cbdfSChao Yu block_t cur_blkaddr)
7968896cbdfSChao Yu {
797c72db71eSChao Yu if (F2FS_IO_ALIGNED(sbi) && (fio->type == DATA || fio->type == NODE)) {
798c72db71eSChao Yu unsigned int filled_blocks =
799c72db71eSChao Yu F2FS_BYTES_TO_BLK(bio->bi_iter.bi_size);
800c72db71eSChao Yu unsigned int io_size = F2FS_IO_SIZE(sbi);
801c72db71eSChao Yu unsigned int left_vecs = bio->bi_max_vecs - bio->bi_vcnt;
802c72db71eSChao Yu
803c72db71eSChao Yu /* IOs in bio is aligned and left space of vectors is not enough */
804c72db71eSChao Yu if (!(filled_blocks % io_size) && left_vecs < io_size)
805c72db71eSChao Yu return false;
806c72db71eSChao Yu }
8078896cbdfSChao Yu if (!page_is_mergeable(sbi, bio, last_blkaddr, cur_blkaddr))
8088896cbdfSChao Yu return false;
8098896cbdfSChao Yu return io_type_is_mergeable(io, fio);
8108896cbdfSChao Yu }
8118896cbdfSChao Yu
add_bio_entry(struct f2fs_sb_info * sbi,struct bio * bio,struct page * page,enum temp_type temp)8120b20fcecSChao Yu static void add_bio_entry(struct f2fs_sb_info *sbi, struct bio *bio,
8130b20fcecSChao Yu struct page *page, enum temp_type temp)
8140b20fcecSChao Yu {
8150b20fcecSChao Yu struct f2fs_bio_info *io = sbi->write_io[DATA] + temp;
8160b20fcecSChao Yu struct bio_entry *be;
8170b20fcecSChao Yu
81832410577SChao Yu be = f2fs_kmem_cache_alloc(bio_entry_slab, GFP_NOFS, true, NULL);
8190b20fcecSChao Yu be->bio = bio;
8200b20fcecSChao Yu bio_get(bio);
8210b20fcecSChao Yu
8220b20fcecSChao Yu if (bio_add_page(bio, page, PAGE_SIZE, 0) != PAGE_SIZE)
8230b20fcecSChao Yu f2fs_bug_on(sbi, 1);
8240b20fcecSChao Yu
825e4544b63STim Murray f2fs_down_write(&io->bio_list_lock);
8260b20fcecSChao Yu list_add_tail(&be->list, &io->bio_list);
827e4544b63STim Murray f2fs_up_write(&io->bio_list_lock);
8280b20fcecSChao Yu }
8290b20fcecSChao Yu
del_bio_entry(struct bio_entry * be)8300b20fcecSChao Yu static void del_bio_entry(struct bio_entry *be)
8310b20fcecSChao Yu {
8320b20fcecSChao Yu list_del(&be->list);
8330b20fcecSChao Yu kmem_cache_free(bio_entry_slab, be);
8340b20fcecSChao Yu }
8350b20fcecSChao Yu
add_ipu_page(struct f2fs_io_info * fio,struct bio ** bio,struct page * page)83627aacd28SSatya Tangirala static int add_ipu_page(struct f2fs_io_info *fio, struct bio **bio,
8370b20fcecSChao Yu struct page *page)
8380b20fcecSChao Yu {
83927aacd28SSatya Tangirala struct f2fs_sb_info *sbi = fio->sbi;
8400b20fcecSChao Yu enum temp_type temp;
8410b20fcecSChao Yu bool found = false;
8420b20fcecSChao Yu int ret = -EAGAIN;
8430b20fcecSChao Yu
8440b20fcecSChao Yu for (temp = HOT; temp < NR_TEMP_TYPE && !found; temp++) {
8450b20fcecSChao Yu struct f2fs_bio_info *io = sbi->write_io[DATA] + temp;
8460b20fcecSChao Yu struct list_head *head = &io->bio_list;
8470b20fcecSChao Yu struct bio_entry *be;
8480b20fcecSChao Yu
849e4544b63STim Murray f2fs_down_write(&io->bio_list_lock);
8500b20fcecSChao Yu list_for_each_entry(be, head, list) {
8510b20fcecSChao Yu if (be->bio != *bio)
8520b20fcecSChao Yu continue;
8530b20fcecSChao Yu
8540b20fcecSChao Yu found = true;
8550b20fcecSChao Yu
85627aacd28SSatya Tangirala f2fs_bug_on(sbi, !page_is_mergeable(sbi, *bio,
85727aacd28SSatya Tangirala *fio->last_block,
85827aacd28SSatya Tangirala fio->new_blkaddr));
85927aacd28SSatya Tangirala if (f2fs_crypt_mergeable_bio(*bio,
86027aacd28SSatya Tangirala fio->page->mapping->host,
86127aacd28SSatya Tangirala fio->page->index, fio) &&
86227aacd28SSatya Tangirala bio_add_page(*bio, page, PAGE_SIZE, 0) ==
8634c8ff709SChao Yu PAGE_SIZE) {
8640b20fcecSChao Yu ret = 0;
8650b20fcecSChao Yu break;
8660b20fcecSChao Yu }
8670b20fcecSChao Yu
86827aacd28SSatya Tangirala /* page can't be merged into bio; submit the bio */
8690b20fcecSChao Yu del_bio_entry(be);
870bc29835aSChristoph Hellwig f2fs_submit_write_bio(sbi, *bio, DATA);
8710b20fcecSChao Yu break;
8720b20fcecSChao Yu }
873e4544b63STim Murray f2fs_up_write(&io->bio_list_lock);
8740b20fcecSChao Yu }
8750b20fcecSChao Yu
8760b20fcecSChao Yu if (ret) {
8770b20fcecSChao Yu bio_put(*bio);
8780b20fcecSChao Yu *bio = NULL;
8790b20fcecSChao Yu }
8800b20fcecSChao Yu
8810b20fcecSChao Yu return ret;
8820b20fcecSChao Yu }
8830b20fcecSChao Yu
f2fs_submit_merged_ipu_write(struct f2fs_sb_info * sbi,struct bio ** bio,struct page * page)8840b20fcecSChao Yu void f2fs_submit_merged_ipu_write(struct f2fs_sb_info *sbi,
8850b20fcecSChao Yu struct bio **bio, struct page *page)
8860b20fcecSChao Yu {
8870b20fcecSChao Yu enum temp_type temp;
8880b20fcecSChao Yu bool found = false;
8890b20fcecSChao Yu struct bio *target = bio ? *bio : NULL;
8900b20fcecSChao Yu
8915cdb422cSChao Yu f2fs_bug_on(sbi, !target && !page);
8925cdb422cSChao Yu
8930b20fcecSChao Yu for (temp = HOT; temp < NR_TEMP_TYPE && !found; temp++) {
8940b20fcecSChao Yu struct f2fs_bio_info *io = sbi->write_io[DATA] + temp;
8950b20fcecSChao Yu struct list_head *head = &io->bio_list;
8960b20fcecSChao Yu struct bio_entry *be;
8970b20fcecSChao Yu
8980b20fcecSChao Yu if (list_empty(head))
8990b20fcecSChao Yu continue;
9000b20fcecSChao Yu
901e4544b63STim Murray f2fs_down_read(&io->bio_list_lock);
9020b20fcecSChao Yu list_for_each_entry(be, head, list) {
9030b20fcecSChao Yu if (target)
9040b20fcecSChao Yu found = (target == be->bio);
9050b20fcecSChao Yu else
9060b20fcecSChao Yu found = __has_merged_page(be->bio, NULL,
9070b20fcecSChao Yu page, 0);
9080b20fcecSChao Yu if (found)
9090b20fcecSChao Yu break;
9100b20fcecSChao Yu }
911e4544b63STim Murray f2fs_up_read(&io->bio_list_lock);
9120b20fcecSChao Yu
9130b20fcecSChao Yu if (!found)
9140b20fcecSChao Yu continue;
9150b20fcecSChao Yu
9160b20fcecSChao Yu found = false;
9170b20fcecSChao Yu
918e4544b63STim Murray f2fs_down_write(&io->bio_list_lock);
9190b20fcecSChao Yu list_for_each_entry(be, head, list) {
9200b20fcecSChao Yu if (target)
9210b20fcecSChao Yu found = (target == be->bio);
9220b20fcecSChao Yu else
9230b20fcecSChao Yu found = __has_merged_page(be->bio, NULL,
9240b20fcecSChao Yu page, 0);
9250b20fcecSChao Yu if (found) {
9260b20fcecSChao Yu target = be->bio;
9270b20fcecSChao Yu del_bio_entry(be);
9280b20fcecSChao Yu break;
9290b20fcecSChao Yu }
9300b20fcecSChao Yu }
931e4544b63STim Murray f2fs_up_write(&io->bio_list_lock);
9320b20fcecSChao Yu }
9330b20fcecSChao Yu
9340b20fcecSChao Yu if (found)
935bc29835aSChristoph Hellwig f2fs_submit_write_bio(sbi, target, DATA);
9360b20fcecSChao Yu if (bio && *bio) {
9370b20fcecSChao Yu bio_put(*bio);
9380b20fcecSChao Yu *bio = NULL;
9390b20fcecSChao Yu }
9400b20fcecSChao Yu }
9410b20fcecSChao Yu
f2fs_merge_page_bio(struct f2fs_io_info * fio)9428648de2cSChao Yu int f2fs_merge_page_bio(struct f2fs_io_info *fio)
9438648de2cSChao Yu {
9448648de2cSChao Yu struct bio *bio = *fio->bio;
9458648de2cSChao Yu struct page *page = fio->encrypted_page ?
9468648de2cSChao Yu fio->encrypted_page : fio->page;
9478648de2cSChao Yu
9488648de2cSChao Yu if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr,
94995fa90c9SChao Yu __is_meta_io(fio) ? META_GENERIC : DATA_GENERIC)) {
95095fa90c9SChao Yu f2fs_handle_error(fio->sbi, ERROR_INVALID_BLKADDR);
95110f966bbSChao Yu return -EFSCORRUPTED;
95295fa90c9SChao Yu }
9538648de2cSChao Yu
9548648de2cSChao Yu trace_f2fs_submit_page_bio(page, fio);
9558648de2cSChao Yu
9568896cbdfSChao Yu if (bio && !page_is_mergeable(fio->sbi, bio, *fio->last_block,
9570b20fcecSChao Yu fio->new_blkaddr))
9580b20fcecSChao Yu f2fs_submit_merged_ipu_write(fio->sbi, &bio, NULL);
9598648de2cSChao Yu alloc_new:
9608648de2cSChao Yu if (!bio) {
961a8affc03SChristoph Hellwig bio = __bio_alloc(fio, BIO_MAX_VECS);
96227aacd28SSatya Tangirala f2fs_set_bio_crypt_ctx(bio, fio->page->mapping->host,
96327aacd28SSatya Tangirala fio->page->index, fio, GFP_NOIO);
9648648de2cSChao Yu
9650b20fcecSChao Yu add_bio_entry(fio->sbi, bio, page, fio->temp);
9660b20fcecSChao Yu } else {
96727aacd28SSatya Tangirala if (add_ipu_page(fio, &bio, page))
9688648de2cSChao Yu goto alloc_new;
9698648de2cSChao Yu }
9708648de2cSChao Yu
9718648de2cSChao Yu if (fio->io_wbc)
972844545c5SEric Biggers wbc_account_cgroup_owner(fio->io_wbc, fio->page, PAGE_SIZE);
9738648de2cSChao Yu
97482704e59SChao Yu inc_page_count(fio->sbi, WB_DATA_TYPE(page, false));
9758648de2cSChao Yu
9768648de2cSChao Yu *fio->last_block = fio->new_blkaddr;
9778648de2cSChao Yu *fio->bio = bio;
9788648de2cSChao Yu
9798648de2cSChao Yu return 0;
9808648de2cSChao Yu }
9818648de2cSChao Yu
982e067dc3cSDaeho Jeong #ifdef CONFIG_BLK_DEV_ZONED
is_end_zone_blkaddr(struct f2fs_sb_info * sbi,block_t blkaddr)983e067dc3cSDaeho Jeong static bool is_end_zone_blkaddr(struct f2fs_sb_info *sbi, block_t blkaddr)
984e067dc3cSDaeho Jeong {
985e067dc3cSDaeho Jeong int devi = 0;
986e067dc3cSDaeho Jeong
987e067dc3cSDaeho Jeong if (f2fs_is_multi_device(sbi)) {
988e067dc3cSDaeho Jeong devi = f2fs_target_device_index(sbi, blkaddr);
989e067dc3cSDaeho Jeong if (blkaddr < FDEV(devi).start_blk ||
990e067dc3cSDaeho Jeong blkaddr > FDEV(devi).end_blk) {
991e067dc3cSDaeho Jeong f2fs_err(sbi, "Invalid block %x", blkaddr);
992e067dc3cSDaeho Jeong return false;
993e067dc3cSDaeho Jeong }
994e067dc3cSDaeho Jeong blkaddr -= FDEV(devi).start_blk;
995e067dc3cSDaeho Jeong }
996e067dc3cSDaeho Jeong return bdev_zoned_model(FDEV(devi).bdev) == BLK_ZONED_HM &&
997e067dc3cSDaeho Jeong f2fs_blkz_is_seq(sbi, devi, blkaddr) &&
998e067dc3cSDaeho Jeong (blkaddr % sbi->blocks_per_blkz == sbi->blocks_per_blkz - 1);
999e067dc3cSDaeho Jeong }
1000e067dc3cSDaeho Jeong #endif
1001e067dc3cSDaeho Jeong
f2fs_submit_page_write(struct f2fs_io_info * fio)1002fe16efe6SChao Yu void f2fs_submit_page_write(struct f2fs_io_info *fio)
100393dfe2acSJaegeuk Kim {
100405ca3632SJaegeuk Kim struct f2fs_sb_info *sbi = fio->sbi;
1005458e6197SJaegeuk Kim enum page_type btype = PAGE_TYPE_OF_BIO(fio->type);
1006a912b54dSJaegeuk Kim struct f2fs_bio_info *io = sbi->write_io[btype] + fio->temp;
10074375a336SJaegeuk Kim struct page *bio_page;
100882704e59SChao Yu enum count_type type;
100993dfe2acSJaegeuk Kim
1010b9109b0eSJaegeuk Kim f2fs_bug_on(sbi, is_read_io(fio->op));
101193dfe2acSJaegeuk Kim
1012e4544b63STim Murray f2fs_down_write(&io->io_rwsem);
101310b19ea7SChao Yu next:
1014e067dc3cSDaeho Jeong #ifdef CONFIG_BLK_DEV_ZONED
1015e067dc3cSDaeho Jeong if (f2fs_sb_has_blkzoned(sbi) && btype < META && io->zone_pending_bio) {
1016e067dc3cSDaeho Jeong wait_for_completion_io(&io->zone_wait);
1017e067dc3cSDaeho Jeong bio_put(io->zone_pending_bio);
1018e067dc3cSDaeho Jeong io->zone_pending_bio = NULL;
1019e067dc3cSDaeho Jeong io->bi_private = NULL;
1020e067dc3cSDaeho Jeong }
1021e067dc3cSDaeho Jeong #endif
1022e067dc3cSDaeho Jeong
1023fb830fc5SChao Yu if (fio->in_list) {
1024fb830fc5SChao Yu spin_lock(&io->io_lock);
1025fb830fc5SChao Yu if (list_empty(&io->io_list)) {
1026fb830fc5SChao Yu spin_unlock(&io->io_lock);
1027fe16efe6SChao Yu goto out;
1028fb830fc5SChao Yu }
1029fb830fc5SChao Yu fio = list_first_entry(&io->io_list,
1030fb830fc5SChao Yu struct f2fs_io_info, list);
1031fb830fc5SChao Yu list_del(&fio->list);
1032fb830fc5SChao Yu spin_unlock(&io->io_lock);
1033fb830fc5SChao Yu }
103493dfe2acSJaegeuk Kim
103593770ab7SChao Yu verify_fio_blkaddr(fio);
103693dfe2acSJaegeuk Kim
10374c8ff709SChao Yu if (fio->encrypted_page)
10384c8ff709SChao Yu bio_page = fio->encrypted_page;
10394c8ff709SChao Yu else if (fio->compressed_page)
10404c8ff709SChao Yu bio_page = fio->compressed_page;
10414c8ff709SChao Yu else
10424c8ff709SChao Yu bio_page = fio->page;
104336951b38SChao Yu
1044ebf7c522SThomas Meyer /* set submitted = true as a return value */
10452eae077eSChao Yu fio->submitted = 1;
1046d68f735bSJaegeuk Kim
104782704e59SChao Yu type = WB_DATA_TYPE(bio_page, fio->compressed_page);
104882704e59SChao Yu inc_page_count(sbi, type);
104936951b38SChao Yu
105027aacd28SSatya Tangirala if (io->bio &&
105127aacd28SSatya Tangirala (!io_is_mergeable(sbi, io->bio, io, fio, io->last_block_in_bio,
105227aacd28SSatya Tangirala fio->new_blkaddr) ||
105327aacd28SSatya Tangirala !f2fs_crypt_mergeable_bio(io->bio, fio->page->mapping->host,
105427aacd28SSatya Tangirala bio_page->index, fio)))
1055458e6197SJaegeuk Kim __submit_merged_bio(io);
105693dfe2acSJaegeuk Kim alloc_new:
105793dfe2acSJaegeuk Kim if (io->bio == NULL) {
10588223ecc4SChao Yu if (F2FS_IO_ALIGNED(sbi) &&
10598223ecc4SChao Yu (fio->type == DATA || fio->type == NODE) &&
10600a595ebaSJaegeuk Kim fio->new_blkaddr & F2FS_IO_SIZE_MASK(sbi)) {
106182704e59SChao Yu dec_page_count(sbi, WB_DATA_TYPE(bio_page,
106282704e59SChao Yu fio->compressed_page));
10632eae077eSChao Yu fio->retry = 1;
1064fe16efe6SChao Yu goto skip;
10650a595ebaSJaegeuk Kim }
1066a8affc03SChristoph Hellwig io->bio = __bio_alloc(fio, BIO_MAX_VECS);
106727aacd28SSatya Tangirala f2fs_set_bio_crypt_ctx(io->bio, fio->page->mapping->host,
106827aacd28SSatya Tangirala bio_page->index, fio, GFP_NOIO);
1069458e6197SJaegeuk Kim io->fio = *fio;
107093dfe2acSJaegeuk Kim }
107193dfe2acSJaegeuk Kim
1072a912b54dSJaegeuk Kim if (bio_add_page(io->bio, bio_page, PAGE_SIZE, 0) < PAGE_SIZE) {
1073458e6197SJaegeuk Kim __submit_merged_bio(io);
107493dfe2acSJaegeuk Kim goto alloc_new;
107593dfe2acSJaegeuk Kim }
107693dfe2acSJaegeuk Kim
1077578c6478SYufen Yu if (fio->io_wbc)
1078844545c5SEric Biggers wbc_account_cgroup_owner(fio->io_wbc, fio->page, PAGE_SIZE);
1079578c6478SYufen Yu
10807a9d7548SChao Yu io->last_block_in_bio = fio->new_blkaddr;
1081fb830fc5SChao Yu
1082fb830fc5SChao Yu trace_f2fs_submit_page_write(fio->page, fio);
1083e067dc3cSDaeho Jeong #ifdef CONFIG_BLK_DEV_ZONED
1084e067dc3cSDaeho Jeong if (f2fs_sb_has_blkzoned(sbi) && btype < META &&
1085e067dc3cSDaeho Jeong is_end_zone_blkaddr(sbi, fio->new_blkaddr)) {
1086e067dc3cSDaeho Jeong bio_get(io->bio);
1087e067dc3cSDaeho Jeong reinit_completion(&io->zone_wait);
1088e067dc3cSDaeho Jeong io->bi_private = io->bio->bi_private;
1089e067dc3cSDaeho Jeong io->bio->bi_private = io;
1090e067dc3cSDaeho Jeong io->bio->bi_end_io = f2fs_zone_write_end_io;
1091e067dc3cSDaeho Jeong io->zone_pending_bio = io->bio;
1092e067dc3cSDaeho Jeong __submit_merged_bio(io);
1093e067dc3cSDaeho Jeong }
1094e067dc3cSDaeho Jeong #endif
10958e2ea8b0SWenjie Qi skip:
10968e2ea8b0SWenjie Qi if (fio->in_list)
10978e2ea8b0SWenjie Qi goto next;
10988e2ea8b0SWenjie Qi out:
10994354994fSDaniel Rosenberg if (is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN) ||
110000e09c0bSChao Yu !f2fs_is_checkpoint_ready(sbi))
11015ce80586SJaegeuk Kim __submit_merged_bio(io);
1102e4544b63STim Murray f2fs_up_write(&io->io_rwsem);
110393dfe2acSJaegeuk Kim }
110493dfe2acSJaegeuk Kim
f2fs_grab_read_bio(struct inode * inode,block_t blkaddr,unsigned nr_pages,blk_opf_t op_flag,pgoff_t first_idx,bool for_write)110513ba41e3SJaegeuk Kim static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr,
11067649c873SBart Van Assche unsigned nr_pages, blk_opf_t op_flag,
11077f59b277SEric Biggers pgoff_t first_idx, bool for_write)
110813ba41e3SJaegeuk Kim {
110913ba41e3SJaegeuk Kim struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
111013ba41e3SJaegeuk Kim struct bio *bio;
1111a4b68176SDaeho Jeong struct bio_post_read_ctx *ctx = NULL;
11126dbb1796SEric Biggers unsigned int post_read_steps = 0;
11135189810aSChristoph Hellwig sector_t sector;
11145189810aSChristoph Hellwig struct block_device *bdev = f2fs_target_device(sbi, blkaddr, §or);
111513ba41e3SJaegeuk Kim
111664bf0eefSChristoph Hellwig bio = bio_alloc_bioset(bdev, bio_max_segs(nr_pages),
111764bf0eefSChristoph Hellwig REQ_OP_READ | op_flag,
1118609be106SChristoph Hellwig for_write ? GFP_NOIO : GFP_KERNEL, &f2fs_bioset);
11196dbb1796SEric Biggers if (!bio)
11206dbb1796SEric Biggers return ERR_PTR(-ENOMEM);
11215189810aSChristoph Hellwig bio->bi_iter.bi_sector = sector;
112227aacd28SSatya Tangirala f2fs_set_bio_crypt_ctx(bio, inode, first_idx, NULL, GFP_NOFS);
11236dbb1796SEric Biggers bio->bi_end_io = f2fs_read_end_io;
11246dbb1796SEric Biggers
112527aacd28SSatya Tangirala if (fscrypt_inode_uses_fs_layer_crypto(inode))
11267f59b277SEric Biggers post_read_steps |= STEP_DECRYPT;
112795ae251fSEric Biggers
11287f59b277SEric Biggers if (f2fs_need_verity(inode, first_idx))
11297f59b277SEric Biggers post_read_steps |= STEP_VERITY;
11307f59b277SEric Biggers
11317f59b277SEric Biggers /*
11327f59b277SEric Biggers * STEP_DECOMPRESS is handled specially, since a compressed file might
11337f59b277SEric Biggers * contain both compressed and uncompressed clusters. We'll allocate a
11347f59b277SEric Biggers * bio_post_read_ctx if the file is compressed, but the caller is
11357f59b277SEric Biggers * responsible for enabling STEP_DECOMPRESS if it's actually needed.
11367f59b277SEric Biggers */
11377f59b277SEric Biggers
11387f59b277SEric Biggers if (post_read_steps || f2fs_compressed_file(inode)) {
1139e8ce5749SEric Biggers /* Due to the mempool, this never fails. */
11406dbb1796SEric Biggers ctx = mempool_alloc(bio_post_read_ctx_pool, GFP_NOFS);
11416dbb1796SEric Biggers ctx->bio = bio;
11424c8ff709SChao Yu ctx->sbi = sbi;
11436dbb1796SEric Biggers ctx->enabled_steps = post_read_steps;
11444931e0c9SDaeho Jeong ctx->fs_blkaddr = blkaddr;
114598dc08baSEric Biggers ctx->decompression_attempted = false;
11466dbb1796SEric Biggers bio->bi_private = ctx;
114713ba41e3SJaegeuk Kim }
1148a4b68176SDaeho Jeong iostat_alloc_and_bind_ctx(sbi, bio, ctx);
114913ba41e3SJaegeuk Kim
115013ba41e3SJaegeuk Kim return bio;
115113ba41e3SJaegeuk Kim }
115213ba41e3SJaegeuk Kim
115313ba41e3SJaegeuk Kim /* This can handle encryption stuffs */
f2fs_submit_page_read(struct inode * inode,struct page * page,block_t blkaddr,blk_opf_t op_flags,bool for_write)115413ba41e3SJaegeuk Kim static int f2fs_submit_page_read(struct inode *inode, struct page *page,
11557649c873SBart Van Assche block_t blkaddr, blk_opf_t op_flags,
11567649c873SBart Van Assche bool for_write)
115713ba41e3SJaegeuk Kim {
115893770ab7SChao Yu struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
115993770ab7SChao Yu struct bio *bio;
116013ba41e3SJaegeuk Kim
1161b7973091SJia Yang bio = f2fs_grab_read_bio(inode, blkaddr, 1, op_flags,
11627f59b277SEric Biggers page->index, for_write);
116313ba41e3SJaegeuk Kim if (IS_ERR(bio))
116413ba41e3SJaegeuk Kim return PTR_ERR(bio);
116513ba41e3SJaegeuk Kim
11660ded69f6SJaegeuk Kim /* wait for GCed page writeback via META_MAPPING */
11670ded69f6SJaegeuk Kim f2fs_wait_on_block_writeback(inode, blkaddr);
11680ded69f6SJaegeuk Kim
116913ba41e3SJaegeuk Kim if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
11705118697fSChao Yu iostat_update_and_unbind_ctx(bio);
11715118697fSChao Yu if (bio->bi_private)
11725118697fSChao Yu mempool_free(bio->bi_private, bio_post_read_ctx_pool);
117313ba41e3SJaegeuk Kim bio_put(bio);
117413ba41e3SJaegeuk Kim return -EFAULT;
117513ba41e3SJaegeuk Kim }
117693770ab7SChao Yu inc_page_count(sbi, F2FS_RD_DATA);
117734a23525SChao Yu f2fs_update_iostat(sbi, NULL, FS_DATA_READ_IO, F2FS_BLKSIZE);
1178bc29835aSChristoph Hellwig f2fs_submit_read_bio(sbi, bio, DATA);
117913ba41e3SJaegeuk Kim return 0;
118013ba41e3SJaegeuk Kim }
118113ba41e3SJaegeuk Kim
__set_data_blkaddr(struct dnode_of_data * dn,block_t blkaddr)1182eb6d30bcSChao Yu static void __set_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr)
118346008c6dSChao Yu {
1184dcd6b38bSChao Yu __le32 *addr = get_dnode_addr(dn->inode, dn->node_page);
11857a2af766SChao Yu
1186eb6d30bcSChao Yu dn->data_blkaddr = blkaddr;
1187dcd6b38bSChao Yu addr[dn->ofs_in_node] = cpu_to_le32(dn->data_blkaddr);
118846008c6dSChao Yu }
118946008c6dSChao Yu
119093dfe2acSJaegeuk Kim /*
1191eb47b800SJaegeuk Kim * Lock ordering for the change of data block address:
1192eb47b800SJaegeuk Kim * ->data_page
1193eb47b800SJaegeuk Kim * ->node_page
1194eb47b800SJaegeuk Kim * update block addresses in the node page
1195eb47b800SJaegeuk Kim */
f2fs_set_data_blkaddr(struct dnode_of_data * dn,block_t blkaddr)1196eb6d30bcSChao Yu void f2fs_set_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr)
1197eb47b800SJaegeuk Kim {
1198bae0ee7aSChao Yu f2fs_wait_on_page_writeback(dn->node_page, NODE, true, true);
1199eb6d30bcSChao Yu __set_data_blkaddr(dn, blkaddr);
120046008c6dSChao Yu if (set_page_dirty(dn->node_page))
120193bae099SJaegeuk Kim dn->node_changed = true;
1202eb47b800SJaegeuk Kim }
1203eb47b800SJaegeuk Kim
f2fs_update_data_blkaddr(struct dnode_of_data * dn,block_t blkaddr)1204f28b3434SChao Yu void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr)
1205f28b3434SChao Yu {
1206eb6d30bcSChao Yu f2fs_set_data_blkaddr(dn, blkaddr);
1207e7547dacSJaegeuk Kim f2fs_update_read_extent_cache(dn);
1208f28b3434SChao Yu }
1209f28b3434SChao Yu
121046008c6dSChao Yu /* dn->ofs_in_node will be returned with up-to-date last block pointer */
f2fs_reserve_new_blocks(struct dnode_of_data * dn,blkcnt_t count)12114d57b86dSChao Yu int f2fs_reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count)
1212eb47b800SJaegeuk Kim {
12134081363fSJaegeuk Kim struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
12140abd675eSChao Yu int err;
1215eb47b800SJaegeuk Kim
121646008c6dSChao Yu if (!count)
121746008c6dSChao Yu return 0;
121846008c6dSChao Yu
121991942321SJaegeuk Kim if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
1220eb47b800SJaegeuk Kim return -EPERM;
1221bc1e3992SChao Yu err = inc_valid_block_count(sbi, dn->inode, &count, true);
1222bc1e3992SChao Yu if (unlikely(err))
12230abd675eSChao Yu return err;
1224eb47b800SJaegeuk Kim
122546008c6dSChao Yu trace_f2fs_reserve_new_blocks(dn->inode, dn->nid,
122646008c6dSChao Yu dn->ofs_in_node, count);
1227c01e2853SNamjae Jeon
1228bae0ee7aSChao Yu f2fs_wait_on_page_writeback(dn->node_page, NODE, true, true);
122946008c6dSChao Yu
123046008c6dSChao Yu for (; count > 0; dn->ofs_in_node++) {
1231a2ced1ceSChao Yu block_t blkaddr = f2fs_data_blkaddr(dn);
12325f029c04SYi Zhuang
123346008c6dSChao Yu if (blkaddr == NULL_ADDR) {
1234eb6d30bcSChao Yu __set_data_blkaddr(dn, NEW_ADDR);
123546008c6dSChao Yu count--;
123646008c6dSChao Yu }
123746008c6dSChao Yu }
123846008c6dSChao Yu
123946008c6dSChao Yu if (set_page_dirty(dn->node_page))
124046008c6dSChao Yu dn->node_changed = true;
1241eb47b800SJaegeuk Kim return 0;
1242eb47b800SJaegeuk Kim }
1243eb47b800SJaegeuk Kim
124446008c6dSChao Yu /* Should keep dn->ofs_in_node unchanged */
f2fs_reserve_new_block(struct dnode_of_data * dn)12454d57b86dSChao Yu int f2fs_reserve_new_block(struct dnode_of_data *dn)
124646008c6dSChao Yu {
124746008c6dSChao Yu unsigned int ofs_in_node = dn->ofs_in_node;
124846008c6dSChao Yu int ret;
124946008c6dSChao Yu
12504d57b86dSChao Yu ret = f2fs_reserve_new_blocks(dn, 1);
125146008c6dSChao Yu dn->ofs_in_node = ofs_in_node;
125246008c6dSChao Yu return ret;
125346008c6dSChao Yu }
125446008c6dSChao Yu
f2fs_reserve_block(struct dnode_of_data * dn,pgoff_t index)1255b600965cSHuajun Li int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index)
1256b600965cSHuajun Li {
1257b600965cSHuajun Li bool need_put = dn->inode_page ? false : true;
1258b600965cSHuajun Li int err;
1259b600965cSHuajun Li
12604d57b86dSChao Yu err = f2fs_get_dnode_of_data(dn, index, ALLOC_NODE);
1261b600965cSHuajun Li if (err)
1262b600965cSHuajun Li return err;
1263a8865372SJaegeuk Kim
1264b600965cSHuajun Li if (dn->data_blkaddr == NULL_ADDR)
12654d57b86dSChao Yu err = f2fs_reserve_new_block(dn);
1266a8865372SJaegeuk Kim if (err || need_put)
1267b600965cSHuajun Li f2fs_put_dnode(dn);
1268b600965cSHuajun Li return err;
1269b600965cSHuajun Li }
1270b600965cSHuajun Li
f2fs_get_read_data_page(struct inode * inode,pgoff_t index,blk_opf_t op_flags,bool for_write,pgoff_t * next_pgofs)12714d57b86dSChao Yu struct page *f2fs_get_read_data_page(struct inode *inode, pgoff_t index,
127259237a21SChao Yu blk_opf_t op_flags, bool for_write,
127359237a21SChao Yu pgoff_t *next_pgofs)
1274eb47b800SJaegeuk Kim {
1275eb47b800SJaegeuk Kim struct address_space *mapping = inode->i_mapping;
1276eb47b800SJaegeuk Kim struct dnode_of_data dn;
1277eb47b800SJaegeuk Kim struct page *page;
1278eb47b800SJaegeuk Kim int err;
12794375a336SJaegeuk Kim
1280a56c7c6fSJaegeuk Kim page = f2fs_grab_cache_page(mapping, index, for_write);
1281eb47b800SJaegeuk Kim if (!page)
1282eb47b800SJaegeuk Kim return ERR_PTR(-ENOMEM);
1283eb47b800SJaegeuk Kim
128404a91ab0SChristoph Hellwig if (f2fs_lookup_read_extent_cache_block(inode, index,
128504a91ab0SChristoph Hellwig &dn.data_blkaddr)) {
128693770ab7SChao Yu if (!f2fs_is_valid_blkaddr(F2FS_I_SB(inode), dn.data_blkaddr,
128793770ab7SChao Yu DATA_GENERIC_ENHANCE_READ)) {
128810f966bbSChao Yu err = -EFSCORRUPTED;
128995fa90c9SChao Yu f2fs_handle_error(F2FS_I_SB(inode),
129095fa90c9SChao Yu ERROR_INVALID_BLKADDR);
129193770ab7SChao Yu goto put_err;
129293770ab7SChao Yu }
1293cb3bc9eeSChao Yu goto got_it;
1294cb3bc9eeSChao Yu }
1295cb3bc9eeSChao Yu
1296650495deSJaegeuk Kim set_new_dnode(&dn, inode, NULL, NULL, 0);
12974d57b86dSChao Yu err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
129859237a21SChao Yu if (err) {
129959237a21SChao Yu if (err == -ENOENT && next_pgofs)
130059237a21SChao Yu *next_pgofs = f2fs_get_next_page_offset(&dn, index);
130186531d6bSJaegeuk Kim goto put_err;
130259237a21SChao Yu }
1303650495deSJaegeuk Kim f2fs_put_dnode(&dn);
1304650495deSJaegeuk Kim
13056bacf52fSJaegeuk Kim if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
130686531d6bSJaegeuk Kim err = -ENOENT;
130759237a21SChao Yu if (next_pgofs)
130859237a21SChao Yu *next_pgofs = index + 1;
130986531d6bSJaegeuk Kim goto put_err;
1310650495deSJaegeuk Kim }
131193770ab7SChao Yu if (dn.data_blkaddr != NEW_ADDR &&
131293770ab7SChao Yu !f2fs_is_valid_blkaddr(F2FS_I_SB(inode),
131393770ab7SChao Yu dn.data_blkaddr,
131493770ab7SChao Yu DATA_GENERIC_ENHANCE)) {
131510f966bbSChao Yu err = -EFSCORRUPTED;
131695fa90c9SChao Yu f2fs_handle_error(F2FS_I_SB(inode),
131795fa90c9SChao Yu ERROR_INVALID_BLKADDR);
131893770ab7SChao Yu goto put_err;
131993770ab7SChao Yu }
1320cb3bc9eeSChao Yu got_it:
132143f3eae1SJaegeuk Kim if (PageUptodate(page)) {
132243f3eae1SJaegeuk Kim unlock_page(page);
1323eb47b800SJaegeuk Kim return page;
132443f3eae1SJaegeuk Kim }
1325eb47b800SJaegeuk Kim
1326d59ff4dfSJaegeuk Kim /*
1327d59ff4dfSJaegeuk Kim * A new dentry page is allocated but not able to be written, since its
1328d59ff4dfSJaegeuk Kim * new inode page couldn't be allocated due to -ENOSPC.
1329d59ff4dfSJaegeuk Kim * In such the case, its blkaddr can be remained as NEW_ADDR.
13304d57b86dSChao Yu * see, f2fs_add_link -> f2fs_get_new_data_page ->
13314d57b86dSChao Yu * f2fs_init_inode_metadata.
1332d59ff4dfSJaegeuk Kim */
1333d59ff4dfSJaegeuk Kim if (dn.data_blkaddr == NEW_ADDR) {
133409cbfeafSKirill A. Shutemov zero_user_segment(page, 0, PAGE_SIZE);
1335237c0790SJaegeuk Kim if (!PageUptodate(page))
1336d59ff4dfSJaegeuk Kim SetPageUptodate(page);
133743f3eae1SJaegeuk Kim unlock_page(page);
1338d59ff4dfSJaegeuk Kim return page;
1339d59ff4dfSJaegeuk Kim }
1340eb47b800SJaegeuk Kim
1341b7973091SJia Yang err = f2fs_submit_page_read(inode, page, dn.data_blkaddr,
1342b7973091SJia Yang op_flags, for_write);
1343393ff91fSJaegeuk Kim if (err)
134486531d6bSJaegeuk Kim goto put_err;
134543f3eae1SJaegeuk Kim return page;
134686531d6bSJaegeuk Kim
134786531d6bSJaegeuk Kim put_err:
134886531d6bSJaegeuk Kim f2fs_put_page(page, 1);
134986531d6bSJaegeuk Kim return ERR_PTR(err);
135043f3eae1SJaegeuk Kim }
1351393ff91fSJaegeuk Kim
f2fs_find_data_page(struct inode * inode,pgoff_t index,pgoff_t * next_pgofs)135259237a21SChao Yu struct page *f2fs_find_data_page(struct inode *inode, pgoff_t index,
135359237a21SChao Yu pgoff_t *next_pgofs)
135443f3eae1SJaegeuk Kim {
135543f3eae1SJaegeuk Kim struct address_space *mapping = inode->i_mapping;
135643f3eae1SJaegeuk Kim struct page *page;
135743f3eae1SJaegeuk Kim
135843f3eae1SJaegeuk Kim page = find_get_page(mapping, index);
135943f3eae1SJaegeuk Kim if (page && PageUptodate(page))
136043f3eae1SJaegeuk Kim return page;
136143f3eae1SJaegeuk Kim f2fs_put_page(page, 0);
136243f3eae1SJaegeuk Kim
136359237a21SChao Yu page = f2fs_get_read_data_page(inode, index, 0, false, next_pgofs);
136443f3eae1SJaegeuk Kim if (IS_ERR(page))
136543f3eae1SJaegeuk Kim return page;
136643f3eae1SJaegeuk Kim
136743f3eae1SJaegeuk Kim if (PageUptodate(page))
136843f3eae1SJaegeuk Kim return page;
136943f3eae1SJaegeuk Kim
137043f3eae1SJaegeuk Kim wait_on_page_locked(page);
137143f3eae1SJaegeuk Kim if (unlikely(!PageUptodate(page))) {
137243f3eae1SJaegeuk Kim f2fs_put_page(page, 0);
137343f3eae1SJaegeuk Kim return ERR_PTR(-EIO);
137443f3eae1SJaegeuk Kim }
137543f3eae1SJaegeuk Kim return page;
137643f3eae1SJaegeuk Kim }
137743f3eae1SJaegeuk Kim
137843f3eae1SJaegeuk Kim /*
137943f3eae1SJaegeuk Kim * If it tries to access a hole, return an error.
138043f3eae1SJaegeuk Kim * Because, the callers, functions in dir.c and GC, should be able to know
138143f3eae1SJaegeuk Kim * whether this page exists or not.
138243f3eae1SJaegeuk Kim */
f2fs_get_lock_data_page(struct inode * inode,pgoff_t index,bool for_write)13834d57b86dSChao Yu struct page *f2fs_get_lock_data_page(struct inode *inode, pgoff_t index,
1384a56c7c6fSJaegeuk Kim bool for_write)
138543f3eae1SJaegeuk Kim {
138643f3eae1SJaegeuk Kim struct address_space *mapping = inode->i_mapping;
138743f3eae1SJaegeuk Kim struct page *page;
1388d2d9bb3bSJaegeuk Kim
138959237a21SChao Yu page = f2fs_get_read_data_page(inode, index, 0, for_write, NULL);
139043f3eae1SJaegeuk Kim if (IS_ERR(page))
139143f3eae1SJaegeuk Kim return page;
139243f3eae1SJaegeuk Kim
139343f3eae1SJaegeuk Kim /* wait for read completion */
1394393ff91fSJaegeuk Kim lock_page(page);
1395d2d9bb3bSJaegeuk Kim if (unlikely(page->mapping != mapping || !PageUptodate(page))) {
13961563ac75SChao Yu f2fs_put_page(page, 1);
13971563ac75SChao Yu return ERR_PTR(-EIO);
13981563ac75SChao Yu }
1399eb47b800SJaegeuk Kim return page;
1400eb47b800SJaegeuk Kim }
1401eb47b800SJaegeuk Kim
14020a8165d7SJaegeuk Kim /*
1403eb47b800SJaegeuk Kim * Caller ensures that this data page is never allocated.
1404eb47b800SJaegeuk Kim * A new zero-filled data page is allocated in the page cache.
140539936837SJaegeuk Kim *
14064f4124d0SChao Yu * Also, caller should grab and release a rwsem by calling f2fs_lock_op() and
14074f4124d0SChao Yu * f2fs_unlock_op().
1408470f00e9SChao Yu * Note that, ipage is set only by make_empty_dir, and if any error occur,
1409470f00e9SChao Yu * ipage should be released by this function.
1410eb47b800SJaegeuk Kim */
f2fs_get_new_data_page(struct inode * inode,struct page * ipage,pgoff_t index,bool new_i_size)14114d57b86dSChao Yu struct page *f2fs_get_new_data_page(struct inode *inode,
1412a8865372SJaegeuk Kim struct page *ipage, pgoff_t index, bool new_i_size)
1413eb47b800SJaegeuk Kim {
1414eb47b800SJaegeuk Kim struct address_space *mapping = inode->i_mapping;
1415eb47b800SJaegeuk Kim struct page *page;
1416eb47b800SJaegeuk Kim struct dnode_of_data dn;
1417eb47b800SJaegeuk Kim int err;
14187612118aSJaegeuk Kim
1419a56c7c6fSJaegeuk Kim page = f2fs_grab_cache_page(mapping, index, true);
1420470f00e9SChao Yu if (!page) {
1421470f00e9SChao Yu /*
1422470f00e9SChao Yu * before exiting, we should make sure ipage will be released
1423470f00e9SChao Yu * if any error occur.
1424470f00e9SChao Yu */
1425470f00e9SChao Yu f2fs_put_page(ipage, 1);
142601f28610SJaegeuk Kim return ERR_PTR(-ENOMEM);
1427470f00e9SChao Yu }
1428eb47b800SJaegeuk Kim
1429a8865372SJaegeuk Kim set_new_dnode(&dn, inode, ipage, NULL, 0);
1430b600965cSHuajun Li err = f2fs_reserve_block(&dn, index);
143101f28610SJaegeuk Kim if (err) {
143201f28610SJaegeuk Kim f2fs_put_page(page, 1);
1433eb47b800SJaegeuk Kim return ERR_PTR(err);
1434a8865372SJaegeuk Kim }
143501f28610SJaegeuk Kim if (!ipage)
143601f28610SJaegeuk Kim f2fs_put_dnode(&dn);
1437eb47b800SJaegeuk Kim
1438eb47b800SJaegeuk Kim if (PageUptodate(page))
143901f28610SJaegeuk Kim goto got_it;
1440eb47b800SJaegeuk Kim
1441eb47b800SJaegeuk Kim if (dn.data_blkaddr == NEW_ADDR) {
144209cbfeafSKirill A. Shutemov zero_user_segment(page, 0, PAGE_SIZE);
1443237c0790SJaegeuk Kim if (!PageUptodate(page))
1444393ff91fSJaegeuk Kim SetPageUptodate(page);
1445eb47b800SJaegeuk Kim } else {
14464375a336SJaegeuk Kim f2fs_put_page(page, 1);
1447a8865372SJaegeuk Kim
14487612118aSJaegeuk Kim /* if ipage exists, blkaddr should be NEW_ADDR */
14497612118aSJaegeuk Kim f2fs_bug_on(F2FS_I_SB(inode), ipage);
14504d57b86dSChao Yu page = f2fs_get_lock_data_page(inode, index, true);
14514375a336SJaegeuk Kim if (IS_ERR(page))
14527612118aSJaegeuk Kim return page;
1453eb47b800SJaegeuk Kim }
145401f28610SJaegeuk Kim got_it:
14559edcdabfSChao Yu if (new_i_size && i_size_read(inode) <
1456ee6d182fSJaegeuk Kim ((loff_t)(index + 1) << PAGE_SHIFT))
1457fc9581c8SJaegeuk Kim f2fs_i_size_write(inode, ((loff_t)(index + 1) << PAGE_SHIFT));
1458eb47b800SJaegeuk Kim return page;
1459eb47b800SJaegeuk Kim }
1460eb47b800SJaegeuk Kim
__allocate_data_block(struct dnode_of_data * dn,int seg_type)1461d5097be5SHyunchul Lee static int __allocate_data_block(struct dnode_of_data *dn, int seg_type)
1462bfad7c2dSJaegeuk Kim {
14634081363fSJaegeuk Kim struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
1464bfad7c2dSJaegeuk Kim struct f2fs_summary sum;
1465bfad7c2dSJaegeuk Kim struct node_info ni;
14666aa58d8aSChao Yu block_t old_blkaddr;
146746008c6dSChao Yu blkcnt_t count = 1;
14680abd675eSChao Yu int err;
1469bfad7c2dSJaegeuk Kim
147091942321SJaegeuk Kim if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
1471bfad7c2dSJaegeuk Kim return -EPERM;
1472df6136efSChao Yu
1473a9419b63SJaegeuk Kim err = f2fs_get_node_info(sbi, dn->nid, &ni, false);
14747735730dSChao Yu if (err)
14757735730dSChao Yu return err;
14767735730dSChao Yu
1477a2ced1ceSChao Yu dn->data_blkaddr = f2fs_data_blkaddr(dn);
14783cf684f2SChristoph Hellwig if (dn->data_blkaddr == NULL_ADDR) {
1479bc1e3992SChao Yu err = inc_valid_block_count(sbi, dn->inode, &count, true);
14803cf684f2SChristoph Hellwig if (unlikely(err))
14810abd675eSChao Yu return err;
14823cf684f2SChristoph Hellwig }
1483bfad7c2dSJaegeuk Kim
1484bfad7c2dSJaegeuk Kim set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
14856aa58d8aSChao Yu old_blkaddr = dn->data_blkaddr;
14866aa58d8aSChao Yu f2fs_allocate_data_block(sbi, NULL, old_blkaddr, &dn->data_blkaddr,
1487093749e2SChao Yu &sum, seg_type, NULL);
1488*cfd217f6SChao Yu if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO)
1489*cfd217f6SChao Yu f2fs_invalidate_internal_cache(sbi, old_blkaddr);
1490*cfd217f6SChao Yu
149186f35dc3SChao Yu f2fs_update_data_blkaddr(dn, dn->data_blkaddr);
1492bfad7c2dSJaegeuk Kim return 0;
1493bfad7c2dSJaegeuk Kim }
1494bfad7c2dSJaegeuk Kim
f2fs_map_lock(struct f2fs_sb_info * sbi,int flag)14952f51ade9SChristoph Hellwig static void f2fs_map_lock(struct f2fs_sb_info *sbi, int flag)
149659c9081bSYunlei He {
14972f51ade9SChristoph Hellwig if (flag == F2FS_GET_BLOCK_PRE_AIO)
1498e4544b63STim Murray f2fs_down_read(&sbi->node_change);
149959c9081bSYunlei He else
150059c9081bSYunlei He f2fs_lock_op(sbi);
15012f51ade9SChristoph Hellwig }
15022f51ade9SChristoph Hellwig
f2fs_map_unlock(struct f2fs_sb_info * sbi,int flag)15032f51ade9SChristoph Hellwig static void f2fs_map_unlock(struct f2fs_sb_info *sbi, int flag)
15042f51ade9SChristoph Hellwig {
15052f51ade9SChristoph Hellwig if (flag == F2FS_GET_BLOCK_PRE_AIO)
15062f51ade9SChristoph Hellwig f2fs_up_read(&sbi->node_change);
150759c9081bSYunlei He else
150859c9081bSYunlei He f2fs_unlock_op(sbi);
150959c9081bSYunlei He }
151059c9081bSYunlei He
f2fs_get_block_locked(struct dnode_of_data * dn,pgoff_t index)1511cf342d3bSChristoph Hellwig int f2fs_get_block_locked(struct dnode_of_data *dn, pgoff_t index)
1512cf342d3bSChristoph Hellwig {
1513cf342d3bSChristoph Hellwig struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
1514ffdeab71SChristoph Hellwig int err = 0;
1515cf342d3bSChristoph Hellwig
15162f51ade9SChristoph Hellwig f2fs_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO);
1517ffdeab71SChristoph Hellwig if (!f2fs_lookup_read_extent_cache_block(dn->inode, index,
1518ffdeab71SChristoph Hellwig &dn->data_blkaddr))
1519ffdeab71SChristoph Hellwig err = f2fs_reserve_block(dn, index);
15202f51ade9SChristoph Hellwig f2fs_map_unlock(sbi, F2FS_GET_BLOCK_PRE_AIO);
1521cf342d3bSChristoph Hellwig
1522cf342d3bSChristoph Hellwig return err;
1523cf342d3bSChristoph Hellwig }
1524cf342d3bSChristoph Hellwig
f2fs_map_no_dnode(struct inode * inode,struct f2fs_map_blocks * map,struct dnode_of_data * dn,pgoff_t pgoff)1525817c968bSChristoph Hellwig static int f2fs_map_no_dnode(struct inode *inode,
1526817c968bSChristoph Hellwig struct f2fs_map_blocks *map, struct dnode_of_data *dn,
1527817c968bSChristoph Hellwig pgoff_t pgoff)
1528817c968bSChristoph Hellwig {
1529817c968bSChristoph Hellwig struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1530817c968bSChristoph Hellwig
1531817c968bSChristoph Hellwig /*
1532817c968bSChristoph Hellwig * There is one exceptional case that read_node_page() may return
1533817c968bSChristoph Hellwig * -ENOENT due to filesystem has been shutdown or cp_error, return
1534817c968bSChristoph Hellwig * -EIO in that case.
1535817c968bSChristoph Hellwig */
1536817c968bSChristoph Hellwig if (map->m_may_create &&
1537817c968bSChristoph Hellwig (is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN) || f2fs_cp_error(sbi)))
1538817c968bSChristoph Hellwig return -EIO;
1539817c968bSChristoph Hellwig
1540817c968bSChristoph Hellwig if (map->m_next_pgofs)
1541817c968bSChristoph Hellwig *map->m_next_pgofs = f2fs_get_next_page_offset(dn, pgoff);
1542817c968bSChristoph Hellwig if (map->m_next_extent)
1543817c968bSChristoph Hellwig *map->m_next_extent = f2fs_get_next_page_offset(dn, pgoff);
1544817c968bSChristoph Hellwig return 0;
1545817c968bSChristoph Hellwig }
1546817c968bSChristoph Hellwig
f2fs_map_blocks_cached(struct inode * inode,struct f2fs_map_blocks * map,int flag)15470094e98bSChristoph Hellwig static bool f2fs_map_blocks_cached(struct inode *inode,
15480094e98bSChristoph Hellwig struct f2fs_map_blocks *map, int flag)
15490094e98bSChristoph Hellwig {
15500094e98bSChristoph Hellwig struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
15510094e98bSChristoph Hellwig unsigned int maxblocks = map->m_len;
15520094e98bSChristoph Hellwig pgoff_t pgoff = (pgoff_t)map->m_lblk;
15530094e98bSChristoph Hellwig struct extent_info ei = {};
15540094e98bSChristoph Hellwig
15550094e98bSChristoph Hellwig if (!f2fs_lookup_read_extent_cache(inode, pgoff, &ei))
15560094e98bSChristoph Hellwig return false;
15570094e98bSChristoph Hellwig
15580094e98bSChristoph Hellwig map->m_pblk = ei.blk + pgoff - ei.fofs;
15590094e98bSChristoph Hellwig map->m_len = min((pgoff_t)maxblocks, ei.fofs + ei.len - pgoff);
15600094e98bSChristoph Hellwig map->m_flags = F2FS_MAP_MAPPED;
15610094e98bSChristoph Hellwig if (map->m_next_extent)
15620094e98bSChristoph Hellwig *map->m_next_extent = pgoff + map->m_len;
15630094e98bSChristoph Hellwig
15640094e98bSChristoph Hellwig /* for hardware encryption, but to avoid potential issue in future */
15650094e98bSChristoph Hellwig if (flag == F2FS_GET_BLOCK_DIO)
15660094e98bSChristoph Hellwig f2fs_wait_on_block_writeback_range(inode,
15670094e98bSChristoph Hellwig map->m_pblk, map->m_len);
15680094e98bSChristoph Hellwig
15690094e98bSChristoph Hellwig if (f2fs_allow_multi_device_dio(sbi, flag)) {
15700094e98bSChristoph Hellwig int bidx = f2fs_target_device_index(sbi, map->m_pblk);
15710094e98bSChristoph Hellwig struct f2fs_dev_info *dev = &sbi->devs[bidx];
15720094e98bSChristoph Hellwig
15730094e98bSChristoph Hellwig map->m_bdev = dev->bdev;
15740094e98bSChristoph Hellwig map->m_pblk -= dev->start_blk;
15750094e98bSChristoph Hellwig map->m_len = min(map->m_len, dev->end_blk + 1 - map->m_pblk);
15760094e98bSChristoph Hellwig } else {
15770094e98bSChristoph Hellwig map->m_bdev = inode->i_sb->s_bdev;
15780094e98bSChristoph Hellwig }
15790094e98bSChristoph Hellwig return true;
158059b802e5SJaegeuk Kim }
158159b802e5SJaegeuk Kim
15820a8165d7SJaegeuk Kim /*
15837a88ddb5SChao Yu * f2fs_map_blocks() tries to find or build mapping relationship which
15847a88ddb5SChao Yu * maps continuous logical blocks to physical blocks, and return such
15857a88ddb5SChao Yu * info via f2fs_map_blocks structure.
1586eb47b800SJaegeuk Kim */
f2fs_map_blocks(struct inode * inode,struct f2fs_map_blocks * map,int flag)1587cd8fc522SChristoph Hellwig int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map, int flag)
1588eb47b800SJaegeuk Kim {
1589003a3e1dSJaegeuk Kim unsigned int maxblocks = map->m_len;
1590eb47b800SJaegeuk Kim struct dnode_of_data dn;
1591f9811703SChao Yu struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1592f9d6d059SChao Yu int mode = map->m_may_create ? ALLOC_NODE : LOOKUP_NODE;
159346008c6dSChao Yu pgoff_t pgofs, end_offset, end;
1594bfad7c2dSJaegeuk Kim int err = 0, ofs = 1;
159546008c6dSChao Yu unsigned int ofs_in_node, last_ofs_in_node;
159646008c6dSChao Yu blkcnt_t prealloc;
15977df3a431SFan Li block_t blkaddr;
1598c4020b2dSChao Yu unsigned int start_pgofs;
159971f2c820SChao Yu int bidx = 0;
1600fdbf69a7SChristoph Hellwig bool is_hole;
1601eb47b800SJaegeuk Kim
1602dfd02e4dSChao Yu if (!maxblocks)
1603dfd02e4dSChao Yu return 0;
1604dfd02e4dSChao Yu
16050094e98bSChristoph Hellwig if (!map->m_may_create && f2fs_map_blocks_cached(inode, map, flag))
16060094e98bSChristoph Hellwig goto out;
16070094e98bSChristoph Hellwig
160871f2c820SChao Yu map->m_bdev = inode->i_sb->s_bdev;
160971f2c820SChao Yu map->m_multidev_dio =
161071f2c820SChao Yu f2fs_allow_multi_device_dio(F2FS_I_SB(inode), flag);
161171f2c820SChao Yu
1612003a3e1dSJaegeuk Kim map->m_len = 0;
1613003a3e1dSJaegeuk Kim map->m_flags = 0;
1614003a3e1dSJaegeuk Kim
1615003a3e1dSJaegeuk Kim /* it only supports block size == page size */
1616003a3e1dSJaegeuk Kim pgofs = (pgoff_t)map->m_lblk;
161746008c6dSChao Yu end = pgofs + maxblocks;
1618eb47b800SJaegeuk Kim
16194fe71e88SChao Yu next_dnode:
1620f9d6d059SChao Yu if (map->m_may_create)
16212f51ade9SChristoph Hellwig f2fs_map_lock(sbi, flag);
1622eb47b800SJaegeuk Kim
1623eb47b800SJaegeuk Kim /* When reading holes, we need its node page */
1624eb47b800SJaegeuk Kim set_new_dnode(&dn, inode, NULL, NULL, 0);
16254d57b86dSChao Yu err = f2fs_get_dnode_of_data(&dn, pgofs, mode);
16261ec79083SJaegeuk Kim if (err) {
162743473f96SChao Yu if (flag == F2FS_GET_BLOCK_BMAP)
162843473f96SChao Yu map->m_pblk = 0;
1629817c968bSChristoph Hellwig if (err == -ENOENT)
1630817c968bSChristoph Hellwig err = f2fs_map_no_dnode(inode, map, &dn, pgofs);
1631bfad7c2dSJaegeuk Kim goto unlock_out;
1632848753aaSNamjae Jeon }
1633eb47b800SJaegeuk Kim
1634c4020b2dSChao Yu start_pgofs = pgofs;
163546008c6dSChao Yu prealloc = 0;
1636230436b3SArnd Bergmann last_ofs_in_node = ofs_in_node = dn.ofs_in_node;
163781ca7350SChao Yu end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
1638eb47b800SJaegeuk Kim
16394fe71e88SChao Yu next_block:
1640a2ced1ceSChao Yu blkaddr = f2fs_data_blkaddr(&dn);
1641fdbf69a7SChristoph Hellwig is_hole = !__is_valid_data_blkaddr(blkaddr);
1642fdbf69a7SChristoph Hellwig if (!is_hole &&
164393770ab7SChao Yu !f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE)) {
164410f966bbSChao Yu err = -EFSCORRUPTED;
164595fa90c9SChao Yu f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR);
1646c9b60788SChao Yu goto sync_out;
1647c9b60788SChao Yu }
1648c9b60788SChao Yu
1649fdbf69a7SChristoph Hellwig /* use out-place-update for direct IO under LFS mode */
1650fdbf69a7SChristoph Hellwig if (map->m_may_create &&
1651fdbf69a7SChristoph Hellwig (is_hole || (f2fs_lfs_mode(sbi) && flag == F2FS_GET_BLOCK_DIO))) {
1652f9811703SChao Yu if (unlikely(f2fs_cp_error(sbi))) {
1653f9811703SChao Yu err = -EIO;
1654f9811703SChao Yu goto sync_out;
1655f9811703SChao Yu }
1656fdbf69a7SChristoph Hellwig
1657fdbf69a7SChristoph Hellwig switch (flag) {
1658fdbf69a7SChristoph Hellwig case F2FS_GET_BLOCK_PRE_AIO:
165946008c6dSChao Yu if (blkaddr == NULL_ADDR) {
166046008c6dSChao Yu prealloc++;
166146008c6dSChao Yu last_ofs_in_node = dn.ofs_in_node;
166246008c6dSChao Yu }
1663fdbf69a7SChristoph Hellwig break;
1664fdbf69a7SChristoph Hellwig case F2FS_GET_BLOCK_PRE_DIO:
1665fdbf69a7SChristoph Hellwig case F2FS_GET_BLOCK_DIO:
1666fdbf69a7SChristoph Hellwig err = __allocate_data_block(&dn, map->m_seg_type);
1667fdbf69a7SChristoph Hellwig if (err)
1668fdbf69a7SChristoph Hellwig goto sync_out;
1669d4dd19ecSJaegeuk Kim if (flag == F2FS_GET_BLOCK_PRE_DIO)
1670d4dd19ecSJaegeuk Kim file_need_truncate(inode);
167191942321SJaegeuk Kim set_inode_flag(inode, FI_APPEND_WRITE);
1672fdbf69a7SChristoph Hellwig break;
1673fdbf69a7SChristoph Hellwig default:
1674fdbf69a7SChristoph Hellwig WARN_ON_ONCE(1);
1675fdbf69a7SChristoph Hellwig err = -EIO;
1676bfad7c2dSJaegeuk Kim goto sync_out;
1677fdbf69a7SChristoph Hellwig }
1678fdbf69a7SChristoph Hellwig
1679bfad7c2dSJaegeuk Kim blkaddr = dn.data_blkaddr;
1680fdbf69a7SChristoph Hellwig if (is_hole)
1681fdbf69a7SChristoph Hellwig map->m_flags |= F2FS_MAP_NEW;
1682fdbf69a7SChristoph Hellwig } else if (is_hole) {
1683bbe1da7eSChao Yu if (f2fs_compressed_file(inode) &&
1684bbe1da7eSChao Yu f2fs_sanity_check_cluster(&dn) &&
1685bbe1da7eSChao Yu (flag != F2FS_GET_BLOCK_FIEMAP ||
1686bbe1da7eSChao Yu IS_ENABLED(CONFIG_F2FS_CHECK_FS))) {
1687bbe1da7eSChao Yu err = -EFSCORRUPTED;
168895fa90c9SChao Yu f2fs_handle_error(sbi,
168995fa90c9SChao Yu ERROR_CORRUPTED_CLUSTER);
1690bbe1da7eSChao Yu goto sync_out;
1691bbe1da7eSChao Yu }
1692fdbf69a7SChristoph Hellwig
1693fdbf69a7SChristoph Hellwig switch (flag) {
1694fdbf69a7SChristoph Hellwig case F2FS_GET_BLOCK_PRECACHE:
1695fdbf69a7SChristoph Hellwig goto sync_out;
1696fdbf69a7SChristoph Hellwig case F2FS_GET_BLOCK_BMAP:
169743473f96SChao Yu map->m_pblk = 0;
169843473f96SChao Yu goto sync_out;
1699fdbf69a7SChristoph Hellwig case F2FS_GET_BLOCK_FIEMAP:
1700fdbf69a7SChristoph Hellwig if (blkaddr == NULL_ADDR) {
1701da85985cSChao Yu if (map->m_next_pgofs)
1702da85985cSChao Yu *map->m_next_pgofs = pgofs + 1;
1703973163fcSChao Yu goto sync_out;
1704bfad7c2dSJaegeuk Kim }
1705fdbf69a7SChristoph Hellwig break;
1706fdbf69a7SChristoph Hellwig default:
1707f3d98e74SChao Yu /* for defragment case */
1708f3d98e74SChao Yu if (map->m_next_pgofs)
1709f3d98e74SChao Yu *map->m_next_pgofs = pgofs + 1;
1710bfad7c2dSJaegeuk Kim goto sync_out;
1711bfad7c2dSJaegeuk Kim }
1712973163fcSChao Yu }
1713973163fcSChao Yu
171446008c6dSChao Yu if (flag == F2FS_GET_BLOCK_PRE_AIO)
171546008c6dSChao Yu goto skip;
17167f63eb77SJaegeuk Kim
171771f2c820SChao Yu if (map->m_multidev_dio)
171871f2c820SChao Yu bidx = f2fs_target_device_index(sbi, blkaddr);
171971f2c820SChao Yu
17204fe71e88SChao Yu if (map->m_len == 0) {
1721da8c7fecSChristoph Hellwig /* reserved delalloc block should be mapped for fiemap. */
17224fe71e88SChao Yu if (blkaddr == NEW_ADDR)
1723da8c7fecSChristoph Hellwig map->m_flags |= F2FS_MAP_DELALLOC;
17244fe71e88SChao Yu map->m_flags |= F2FS_MAP_MAPPED;
17254fe71e88SChao Yu
17264fe71e88SChao Yu map->m_pblk = blkaddr;
17274fe71e88SChao Yu map->m_len = 1;
172871f2c820SChao Yu
172971f2c820SChao Yu if (map->m_multidev_dio)
173071f2c820SChao Yu map->m_bdev = FDEV(bidx).bdev;
17314fe71e88SChao Yu } else if ((map->m_pblk != NEW_ADDR &&
17327f63eb77SJaegeuk Kim blkaddr == (map->m_pblk + ofs)) ||
1733b439b103SJaegeuk Kim (map->m_pblk == NEW_ADDR && blkaddr == NEW_ADDR) ||
173446008c6dSChao Yu flag == F2FS_GET_BLOCK_PRE_DIO) {
173571f2c820SChao Yu if (map->m_multidev_dio && map->m_bdev != FDEV(bidx).bdev)
173671f2c820SChao Yu goto sync_out;
1737bfad7c2dSJaegeuk Kim ofs++;
17384fe71e88SChao Yu map->m_len++;
17394fe71e88SChao Yu } else {
17404fe71e88SChao Yu goto sync_out;
17414fe71e88SChao Yu }
17424fe71e88SChao Yu
174346008c6dSChao Yu skip:
1744bfad7c2dSJaegeuk Kim dn.ofs_in_node++;
1745bfad7c2dSJaegeuk Kim pgofs++;
17464fe71e88SChao Yu
174746008c6dSChao Yu /* preallocate blocks in batch for one dnode page */
174846008c6dSChao Yu if (flag == F2FS_GET_BLOCK_PRE_AIO &&
174946008c6dSChao Yu (pgofs == end || dn.ofs_in_node == end_offset)) {
175046008c6dSChao Yu
175146008c6dSChao Yu dn.ofs_in_node = ofs_in_node;
17524d57b86dSChao Yu err = f2fs_reserve_new_blocks(&dn, prealloc);
175346008c6dSChao Yu if (err)
175446008c6dSChao Yu goto sync_out;
175546008c6dSChao Yu
175646008c6dSChao Yu map->m_len += dn.ofs_in_node - ofs_in_node;
175746008c6dSChao Yu if (prealloc && dn.ofs_in_node != last_ofs_in_node + 1) {
175846008c6dSChao Yu err = -ENOSPC;
175946008c6dSChao Yu goto sync_out;
176046008c6dSChao Yu }
176146008c6dSChao Yu dn.ofs_in_node = end_offset;
176246008c6dSChao Yu }
176346008c6dSChao Yu
176446008c6dSChao Yu if (pgofs >= end)
176546008c6dSChao Yu goto sync_out;
176646008c6dSChao Yu else if (dn.ofs_in_node < end_offset)
17674fe71e88SChao Yu goto next_block;
17684fe71e88SChao Yu
1769c4020b2dSChao Yu if (flag == F2FS_GET_BLOCK_PRECACHE) {
1770c4020b2dSChao Yu if (map->m_flags & F2FS_MAP_MAPPED) {
1771c4020b2dSChao Yu unsigned int ofs = start_pgofs - map->m_lblk;
1772c4020b2dSChao Yu
1773e7547dacSJaegeuk Kim f2fs_update_read_extent_cache_range(&dn,
1774c4020b2dSChao Yu start_pgofs, map->m_pblk + ofs,
1775c4020b2dSChao Yu map->m_len - ofs);
1776c4020b2dSChao Yu }
1777c4020b2dSChao Yu }
1778c4020b2dSChao Yu
17794fe71e88SChao Yu f2fs_put_dnode(&dn);
17804fe71e88SChao Yu
1781f9d6d059SChao Yu if (map->m_may_create) {
17822f51ade9SChristoph Hellwig f2fs_map_unlock(sbi, flag);
17836f2d8ed6SChao Yu f2fs_balance_fs(sbi, dn.node_changed);
17844fe71e88SChao Yu }
17854fe71e88SChao Yu goto next_dnode;
17867df3a431SFan Li
1787bfad7c2dSJaegeuk Kim sync_out:
17881e78e8bdSSahitya Tummala
178971f2c820SChao Yu if (flag == F2FS_GET_BLOCK_DIO && map->m_flags & F2FS_MAP_MAPPED) {
179071f2c820SChao Yu /*
179171f2c820SChao Yu * for hardware encryption, but to avoid potential issue
179271f2c820SChao Yu * in future
179371f2c820SChao Yu */
17941e78e8bdSSahitya Tummala f2fs_wait_on_block_writeback_range(inode,
17951e78e8bdSSahitya Tummala map->m_pblk, map->m_len);
17961e78e8bdSSahitya Tummala
179771f2c820SChao Yu if (map->m_multidev_dio) {
179871f2c820SChao Yu block_t blk_addr = map->m_pblk;
179971f2c820SChao Yu
180071f2c820SChao Yu bidx = f2fs_target_device_index(sbi, map->m_pblk);
180171f2c820SChao Yu
180271f2c820SChao Yu map->m_bdev = FDEV(bidx).bdev;
180371f2c820SChao Yu map->m_pblk -= FDEV(bidx).start_blk;
180471f2c820SChao Yu
180571f2c820SChao Yu if (map->m_may_create)
180671f2c820SChao Yu f2fs_update_device_state(sbi, inode->i_ino,
180771f2c820SChao Yu blk_addr, map->m_len);
180871f2c820SChao Yu
180971f2c820SChao Yu f2fs_bug_on(sbi, blk_addr + map->m_len >
181071f2c820SChao Yu FDEV(bidx).end_blk + 1);
181171f2c820SChao Yu }
181271f2c820SChao Yu }
181371f2c820SChao Yu
1814c4020b2dSChao Yu if (flag == F2FS_GET_BLOCK_PRECACHE) {
1815c4020b2dSChao Yu if (map->m_flags & F2FS_MAP_MAPPED) {
1816c4020b2dSChao Yu unsigned int ofs = start_pgofs - map->m_lblk;
1817c4020b2dSChao Yu
1818e7547dacSJaegeuk Kim f2fs_update_read_extent_cache_range(&dn,
1819c4020b2dSChao Yu start_pgofs, map->m_pblk + ofs,
1820c4020b2dSChao Yu map->m_len - ofs);
1821c4020b2dSChao Yu }
1822c4020b2dSChao Yu if (map->m_next_extent)
1823c4020b2dSChao Yu *map->m_next_extent = pgofs + 1;
1824c4020b2dSChao Yu }
1825bfad7c2dSJaegeuk Kim f2fs_put_dnode(&dn);
1826bfad7c2dSJaegeuk Kim unlock_out:
1827f9d6d059SChao Yu if (map->m_may_create) {
18282f51ade9SChristoph Hellwig f2fs_map_unlock(sbi, flag);
18296f2d8ed6SChao Yu f2fs_balance_fs(sbi, dn.node_changed);
18302a340760SJaegeuk Kim }
1831bfad7c2dSJaegeuk Kim out:
1832cd8fc522SChristoph Hellwig trace_f2fs_map_blocks(inode, map, flag, err);
1833bfad7c2dSJaegeuk Kim return err;
1834eb47b800SJaegeuk Kim }
1835eb47b800SJaegeuk Kim
f2fs_overwrite_io(struct inode * inode,loff_t pos,size_t len)1836b91050a8SHyunchul Lee bool f2fs_overwrite_io(struct inode *inode, loff_t pos, size_t len)
1837b91050a8SHyunchul Lee {
1838b91050a8SHyunchul Lee struct f2fs_map_blocks map;
1839b91050a8SHyunchul Lee block_t last_lblk;
1840b91050a8SHyunchul Lee int err;
1841b91050a8SHyunchul Lee
1842b91050a8SHyunchul Lee if (pos + len > i_size_read(inode))
1843b91050a8SHyunchul Lee return false;
1844b91050a8SHyunchul Lee
1845b91050a8SHyunchul Lee map.m_lblk = F2FS_BYTES_TO_BLK(pos);
1846b91050a8SHyunchul Lee map.m_next_pgofs = NULL;
1847b91050a8SHyunchul Lee map.m_next_extent = NULL;
1848b91050a8SHyunchul Lee map.m_seg_type = NO_CHECK_TYPE;
1849f4f0b677SJia Zhu map.m_may_create = false;
1850b91050a8SHyunchul Lee last_lblk = F2FS_BLK_ALIGN(pos + len);
1851b91050a8SHyunchul Lee
1852b91050a8SHyunchul Lee while (map.m_lblk < last_lblk) {
1853b91050a8SHyunchul Lee map.m_len = last_lblk - map.m_lblk;
1854cd8fc522SChristoph Hellwig err = f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_DEFAULT);
1855b91050a8SHyunchul Lee if (err || map.m_len == 0)
1856b91050a8SHyunchul Lee return false;
1857b91050a8SHyunchul Lee map.m_lblk += map.m_len;
1858b91050a8SHyunchul Lee }
1859b91050a8SHyunchul Lee return true;
1860b91050a8SHyunchul Lee }
1861b91050a8SHyunchul Lee
bytes_to_blks(struct inode * inode,u64 bytes)186243b9d4b4SJaegeuk Kim static inline u64 bytes_to_blks(struct inode *inode, u64 bytes)
186343b9d4b4SJaegeuk Kim {
186443b9d4b4SJaegeuk Kim return (bytes >> inode->i_blkbits);
186543b9d4b4SJaegeuk Kim }
186643b9d4b4SJaegeuk Kim
blks_to_bytes(struct inode * inode,u64 blks)186743b9d4b4SJaegeuk Kim static inline u64 blks_to_bytes(struct inode *inode, u64 blks)
186843b9d4b4SJaegeuk Kim {
186943b9d4b4SJaegeuk Kim return (blks << inode->i_blkbits);
187043b9d4b4SJaegeuk Kim }
187143b9d4b4SJaegeuk Kim
f2fs_xattr_fiemap(struct inode * inode,struct fiemap_extent_info * fieinfo)1872442a9dbdSChao Yu static int f2fs_xattr_fiemap(struct inode *inode,
1873442a9dbdSChao Yu struct fiemap_extent_info *fieinfo)
1874442a9dbdSChao Yu {
1875442a9dbdSChao Yu struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1876442a9dbdSChao Yu struct page *page;
1877442a9dbdSChao Yu struct node_info ni;
1878442a9dbdSChao Yu __u64 phys = 0, len;
1879442a9dbdSChao Yu __u32 flags;
1880442a9dbdSChao Yu nid_t xnid = F2FS_I(inode)->i_xattr_nid;
1881442a9dbdSChao Yu int err = 0;
1882442a9dbdSChao Yu
1883442a9dbdSChao Yu if (f2fs_has_inline_xattr(inode)) {
1884442a9dbdSChao Yu int offset;
1885442a9dbdSChao Yu
1886442a9dbdSChao Yu page = f2fs_grab_cache_page(NODE_MAPPING(sbi),
1887442a9dbdSChao Yu inode->i_ino, false);
1888442a9dbdSChao Yu if (!page)
1889442a9dbdSChao Yu return -ENOMEM;
1890442a9dbdSChao Yu
1891a9419b63SJaegeuk Kim err = f2fs_get_node_info(sbi, inode->i_ino, &ni, false);
18927735730dSChao Yu if (err) {
18937735730dSChao Yu f2fs_put_page(page, 1);
18947735730dSChao Yu return err;
18957735730dSChao Yu }
1896442a9dbdSChao Yu
18976cbfcab5SJaegeuk Kim phys = blks_to_bytes(inode, ni.blk_addr);
1898442a9dbdSChao Yu offset = offsetof(struct f2fs_inode, i_addr) +
1899442a9dbdSChao Yu sizeof(__le32) * (DEF_ADDRS_PER_INODE -
1900b323fd28SChao Yu get_inline_xattr_addrs(inode));
1901442a9dbdSChao Yu
1902442a9dbdSChao Yu phys += offset;
1903442a9dbdSChao Yu len = inline_xattr_size(inode);
1904442a9dbdSChao Yu
1905442a9dbdSChao Yu f2fs_put_page(page, 1);
1906442a9dbdSChao Yu
1907442a9dbdSChao Yu flags = FIEMAP_EXTENT_DATA_INLINE | FIEMAP_EXTENT_NOT_ALIGNED;
1908442a9dbdSChao Yu
1909442a9dbdSChao Yu if (!xnid)
1910442a9dbdSChao Yu flags |= FIEMAP_EXTENT_LAST;
1911442a9dbdSChao Yu
1912442a9dbdSChao Yu err = fiemap_fill_next_extent(fieinfo, 0, phys, len, flags);
1913dd5a09bdSChao Yu trace_f2fs_fiemap(inode, 0, phys, len, flags, err);
1914ca7efd71SZhang Qilong if (err)
1915442a9dbdSChao Yu return err;
1916442a9dbdSChao Yu }
1917442a9dbdSChao Yu
1918442a9dbdSChao Yu if (xnid) {
1919442a9dbdSChao Yu page = f2fs_grab_cache_page(NODE_MAPPING(sbi), xnid, false);
1920442a9dbdSChao Yu if (!page)
1921442a9dbdSChao Yu return -ENOMEM;
1922442a9dbdSChao Yu
1923a9419b63SJaegeuk Kim err = f2fs_get_node_info(sbi, xnid, &ni, false);
19247735730dSChao Yu if (err) {
19257735730dSChao Yu f2fs_put_page(page, 1);
19267735730dSChao Yu return err;
19277735730dSChao Yu }
1928442a9dbdSChao Yu
19296cbfcab5SJaegeuk Kim phys = blks_to_bytes(inode, ni.blk_addr);
1930442a9dbdSChao Yu len = inode->i_sb->s_blocksize;
1931442a9dbdSChao Yu
1932442a9dbdSChao Yu f2fs_put_page(page, 1);
1933442a9dbdSChao Yu
1934442a9dbdSChao Yu flags = FIEMAP_EXTENT_LAST;
1935442a9dbdSChao Yu }
1936442a9dbdSChao Yu
1937dd5a09bdSChao Yu if (phys) {
1938442a9dbdSChao Yu err = fiemap_fill_next_extent(fieinfo, 0, phys, len, flags);
1939dd5a09bdSChao Yu trace_f2fs_fiemap(inode, 0, phys, len, flags, err);
1940dd5a09bdSChao Yu }
1941442a9dbdSChao Yu
1942442a9dbdSChao Yu return (err < 0 ? err : 0);
1943442a9dbdSChao Yu }
1944442a9dbdSChao Yu
max_inode_blocks(struct inode * inode)1945bf38fbadSChao Yu static loff_t max_inode_blocks(struct inode *inode)
1946bf38fbadSChao Yu {
1947bf38fbadSChao Yu loff_t result = ADDRS_PER_INODE(inode);
1948bf38fbadSChao Yu loff_t leaf_count = ADDRS_PER_BLOCK(inode);
1949bf38fbadSChao Yu
1950bf38fbadSChao Yu /* two direct node blocks */
1951bf38fbadSChao Yu result += (leaf_count * 2);
1952bf38fbadSChao Yu
1953bf38fbadSChao Yu /* two indirect node blocks */
1954bf38fbadSChao Yu leaf_count *= NIDS_PER_BLOCK;
1955bf38fbadSChao Yu result += (leaf_count * 2);
1956bf38fbadSChao Yu
1957bf38fbadSChao Yu /* one double indirect node block */
1958bf38fbadSChao Yu leaf_count *= NIDS_PER_BLOCK;
1959bf38fbadSChao Yu result += leaf_count;
1960bf38fbadSChao Yu
1961bf38fbadSChao Yu return result;
1962bf38fbadSChao Yu }
1963bf38fbadSChao Yu
f2fs_fiemap(struct inode * inode,struct fiemap_extent_info * fieinfo,u64 start,u64 len)19649ab70134SJaegeuk Kim int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
19659ab70134SJaegeuk Kim u64 start, u64 len)
19669ab70134SJaegeuk Kim {
1967b876f4c9SJaegeuk Kim struct f2fs_map_blocks map;
19687f63eb77SJaegeuk Kim sector_t start_blk, last_blk;
1969da85985cSChao Yu pgoff_t next_pgofs;
19707f63eb77SJaegeuk Kim u64 logical = 0, phys = 0, size = 0;
19717f63eb77SJaegeuk Kim u32 flags = 0;
19727f63eb77SJaegeuk Kim int ret = 0;
1973093f0bacSDaeho Jeong bool compr_cluster = false, compr_appended;
1974bf38fbadSChao Yu unsigned int cluster_size = F2FS_I(inode)->i_cluster_size;
1975093f0bacSDaeho Jeong unsigned int count_in_cluster = 0;
19760bb2045cSChengguang Xu loff_t maxbytes;
19777f63eb77SJaegeuk Kim
1978c4020b2dSChao Yu if (fieinfo->fi_flags & FIEMAP_FLAG_CACHE) {
1979c4020b2dSChao Yu ret = f2fs_precache_extents(inode);
19807f63eb77SJaegeuk Kim if (ret)
19817f63eb77SJaegeuk Kim return ret;
1982c4020b2dSChao Yu }
1983c4020b2dSChao Yu
198445dd052eSChristoph Hellwig ret = fiemap_prep(inode, fieinfo, start, &len, FIEMAP_FLAG_XATTR);
19857f63eb77SJaegeuk Kim if (ret)
19867f63eb77SJaegeuk Kim return ret;
19877f63eb77SJaegeuk Kim
1988f1b43d4cSChao Yu inode_lock(inode);
1989f1b43d4cSChao Yu
19900bb2045cSChengguang Xu maxbytes = max_file_blocks(inode) << F2FS_BLKSIZE_BITS;
19910bb2045cSChengguang Xu if (start > maxbytes) {
19920bb2045cSChengguang Xu ret = -EFBIG;
19930bb2045cSChengguang Xu goto out;
19940bb2045cSChengguang Xu }
19950bb2045cSChengguang Xu
19960bb2045cSChengguang Xu if (len > maxbytes || (maxbytes - len) < start)
19970bb2045cSChengguang Xu len = maxbytes - start;
19980bb2045cSChengguang Xu
1999442a9dbdSChao Yu if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) {
2000442a9dbdSChao Yu ret = f2fs_xattr_fiemap(inode, fieinfo);
2001442a9dbdSChao Yu goto out;
2002442a9dbdSChao Yu }
20037f63eb77SJaegeuk Kim
20047975f349SChao Yu if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode)) {
200567f8cf3cSJaegeuk Kim ret = f2fs_inline_data_fiemap(inode, fieinfo, start, len);
200667f8cf3cSJaegeuk Kim if (ret != -EAGAIN)
2007f1b43d4cSChao Yu goto out;
200867f8cf3cSJaegeuk Kim }
200967f8cf3cSJaegeuk Kim
20106cbfcab5SJaegeuk Kim if (bytes_to_blks(inode, len) == 0)
20116cbfcab5SJaegeuk Kim len = blks_to_bytes(inode, 1);
20127f63eb77SJaegeuk Kim
20136cbfcab5SJaegeuk Kim start_blk = bytes_to_blks(inode, start);
20146cbfcab5SJaegeuk Kim last_blk = bytes_to_blks(inode, start + len - 1);
20159a950d52SFan Li
20167f63eb77SJaegeuk Kim next:
2017b876f4c9SJaegeuk Kim memset(&map, 0, sizeof(map));
2018b876f4c9SJaegeuk Kim map.m_lblk = start_blk;
2019b876f4c9SJaegeuk Kim map.m_len = bytes_to_blks(inode, len);
2020b876f4c9SJaegeuk Kim map.m_next_pgofs = &next_pgofs;
2021b876f4c9SJaegeuk Kim map.m_seg_type = NO_CHECK_TYPE;
20227f63eb77SJaegeuk Kim
2023093f0bacSDaeho Jeong if (compr_cluster) {
2024093f0bacSDaeho Jeong map.m_lblk += 1;
2025093f0bacSDaeho Jeong map.m_len = cluster_size - count_in_cluster;
2026093f0bacSDaeho Jeong }
2027bf38fbadSChao Yu
2028cd8fc522SChristoph Hellwig ret = f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_FIEMAP);
20297f63eb77SJaegeuk Kim if (ret)
20307f63eb77SJaegeuk Kim goto out;
20317f63eb77SJaegeuk Kim
20327f63eb77SJaegeuk Kim /* HOLE */
2033093f0bacSDaeho Jeong if (!compr_cluster && !(map.m_flags & F2FS_MAP_FLAGS)) {
2034da85985cSChao Yu start_blk = next_pgofs;
203558736fa6SChao Yu
20366cbfcab5SJaegeuk Kim if (blks_to_bytes(inode, start_blk) < blks_to_bytes(inode,
2037bf38fbadSChao Yu max_inode_blocks(inode)))
20389a950d52SFan Li goto prep_next;
203958736fa6SChao Yu
20409a950d52SFan Li flags |= FIEMAP_EXTENT_LAST;
20419a950d52SFan Li }
20429a950d52SFan Li
2043093f0bacSDaeho Jeong compr_appended = false;
2044093f0bacSDaeho Jeong /* In a case of compressed cluster, append this to the last extent */
2045da8c7fecSChristoph Hellwig if (compr_cluster && ((map.m_flags & F2FS_MAP_DELALLOC) ||
2046093f0bacSDaeho Jeong !(map.m_flags & F2FS_MAP_FLAGS))) {
2047093f0bacSDaeho Jeong compr_appended = true;
2048093f0bacSDaeho Jeong goto skip_fill;
2049093f0bacSDaeho Jeong }
2050093f0bacSDaeho Jeong
2051da5af127SChao Yu if (size) {
20520953fe86SChao Yu flags |= FIEMAP_EXTENT_MERGED;
205362230e0dSChandan Rajendra if (IS_ENCRYPTED(inode))
2054da5af127SChao Yu flags |= FIEMAP_EXTENT_DATA_ENCRYPTED;
2055da5af127SChao Yu
20567f63eb77SJaegeuk Kim ret = fiemap_fill_next_extent(fieinfo, logical,
20577f63eb77SJaegeuk Kim phys, size, flags);
2058dd5a09bdSChao Yu trace_f2fs_fiemap(inode, logical, phys, size, flags, ret);
2059bf38fbadSChao Yu if (ret)
2060bf38fbadSChao Yu goto out;
2061bf38fbadSChao Yu size = 0;
2062da5af127SChao Yu }
20639a950d52SFan Li
2064bf38fbadSChao Yu if (start_blk > last_blk)
20657f63eb77SJaegeuk Kim goto out;
20667f63eb77SJaegeuk Kim
2067093f0bacSDaeho Jeong skip_fill:
2068b876f4c9SJaegeuk Kim if (map.m_pblk == COMPRESS_ADDR) {
2069bf38fbadSChao Yu compr_cluster = true;
2070093f0bacSDaeho Jeong count_in_cluster = 1;
2071093f0bacSDaeho Jeong } else if (compr_appended) {
2072093f0bacSDaeho Jeong unsigned int appended_blks = cluster_size -
2073093f0bacSDaeho Jeong count_in_cluster + 1;
2074093f0bacSDaeho Jeong size += blks_to_bytes(inode, appended_blks);
2075093f0bacSDaeho Jeong start_blk += appended_blks;
2076093f0bacSDaeho Jeong compr_cluster = false;
2077093f0bacSDaeho Jeong } else {
20786cbfcab5SJaegeuk Kim logical = blks_to_bytes(inode, start_blk);
2079093f0bacSDaeho Jeong phys = __is_valid_data_blkaddr(map.m_pblk) ?
2080093f0bacSDaeho Jeong blks_to_bytes(inode, map.m_pblk) : 0;
2081b876f4c9SJaegeuk Kim size = blks_to_bytes(inode, map.m_len);
20827f63eb77SJaegeuk Kim flags = 0;
2083093f0bacSDaeho Jeong
2084093f0bacSDaeho Jeong if (compr_cluster) {
2085093f0bacSDaeho Jeong flags = FIEMAP_EXTENT_ENCODED;
2086093f0bacSDaeho Jeong count_in_cluster += map.m_len;
2087093f0bacSDaeho Jeong if (count_in_cluster == cluster_size) {
2088093f0bacSDaeho Jeong compr_cluster = false;
2089093f0bacSDaeho Jeong size += blks_to_bytes(inode, 1);
2090093f0bacSDaeho Jeong }
2091da8c7fecSChristoph Hellwig } else if (map.m_flags & F2FS_MAP_DELALLOC) {
20927f63eb77SJaegeuk Kim flags = FIEMAP_EXTENT_UNWRITTEN;
2093093f0bacSDaeho Jeong }
20947f63eb77SJaegeuk Kim
20956cbfcab5SJaegeuk Kim start_blk += bytes_to_blks(inode, size);
2096093f0bacSDaeho Jeong }
20977f63eb77SJaegeuk Kim
20989a950d52SFan Li prep_next:
20997f63eb77SJaegeuk Kim cond_resched();
21007f63eb77SJaegeuk Kim if (fatal_signal_pending(current))
21017f63eb77SJaegeuk Kim ret = -EINTR;
21027f63eb77SJaegeuk Kim else
21037f63eb77SJaegeuk Kim goto next;
21047f63eb77SJaegeuk Kim out:
21057f63eb77SJaegeuk Kim if (ret == 1)
21067f63eb77SJaegeuk Kim ret = 0;
21077f63eb77SJaegeuk Kim
21085955102cSAl Viro inode_unlock(inode);
21097f63eb77SJaegeuk Kim return ret;
21109ab70134SJaegeuk Kim }
21119ab70134SJaegeuk Kim
f2fs_readpage_limit(struct inode * inode)211295ae251fSEric Biggers static inline loff_t f2fs_readpage_limit(struct inode *inode)
211395ae251fSEric Biggers {
2114feb0576aSEric Biggers if (IS_ENABLED(CONFIG_FS_VERITY) && IS_VERITY(inode))
211595ae251fSEric Biggers return inode->i_sb->s_maxbytes;
211695ae251fSEric Biggers
211795ae251fSEric Biggers return i_size_read(inode);
211895ae251fSEric Biggers }
211995ae251fSEric Biggers
f2fs_read_single_page(struct inode * inode,struct page * page,unsigned nr_pages,struct f2fs_map_blocks * map,struct bio ** bio_ret,sector_t * last_block_in_bio,bool is_readahead)21202df0ab04SChao Yu static int f2fs_read_single_page(struct inode *inode, struct page *page,
21212df0ab04SChao Yu unsigned nr_pages,
21222df0ab04SChao Yu struct f2fs_map_blocks *map,
21232df0ab04SChao Yu struct bio **bio_ret,
21242df0ab04SChao Yu sector_t *last_block_in_bio,
21252df0ab04SChao Yu bool is_readahead)
21262df0ab04SChao Yu {
21272df0ab04SChao Yu struct bio *bio = *bio_ret;
212843b9d4b4SJaegeuk Kim const unsigned blocksize = blks_to_bytes(inode, 1);
21292df0ab04SChao Yu sector_t block_in_file;
21302df0ab04SChao Yu sector_t last_block;
21312df0ab04SChao Yu sector_t last_block_in_file;
21322df0ab04SChao Yu sector_t block_nr;
21332df0ab04SChao Yu int ret = 0;
21342df0ab04SChao Yu
21354969c06aSJaegeuk Kim block_in_file = (sector_t)page_index(page);
21362df0ab04SChao Yu last_block = block_in_file + nr_pages;
213743b9d4b4SJaegeuk Kim last_block_in_file = bytes_to_blks(inode,
213843b9d4b4SJaegeuk Kim f2fs_readpage_limit(inode) + blocksize - 1);
21392df0ab04SChao Yu if (last_block > last_block_in_file)
21402df0ab04SChao Yu last_block = last_block_in_file;
21412df0ab04SChao Yu
21422df0ab04SChao Yu /* just zeroing out page which is beyond EOF */
21432df0ab04SChao Yu if (block_in_file >= last_block)
21442df0ab04SChao Yu goto zero_out;
21452df0ab04SChao Yu /*
21462df0ab04SChao Yu * Map blocks using the previous result first.
21472df0ab04SChao Yu */
21482df0ab04SChao Yu if ((map->m_flags & F2FS_MAP_MAPPED) &&
21492df0ab04SChao Yu block_in_file > map->m_lblk &&
21502df0ab04SChao Yu block_in_file < (map->m_lblk + map->m_len))
21512df0ab04SChao Yu goto got_it;
21522df0ab04SChao Yu
21532df0ab04SChao Yu /*
21542df0ab04SChao Yu * Then do more f2fs_map_blocks() calls until we are
21552df0ab04SChao Yu * done with this page.
21562df0ab04SChao Yu */
21572df0ab04SChao Yu map->m_lblk = block_in_file;
21582df0ab04SChao Yu map->m_len = last_block - block_in_file;
21592df0ab04SChao Yu
2160cd8fc522SChristoph Hellwig ret = f2fs_map_blocks(inode, map, F2FS_GET_BLOCK_DEFAULT);
21612df0ab04SChao Yu if (ret)
21622df0ab04SChao Yu goto out;
21632df0ab04SChao Yu got_it:
21642df0ab04SChao Yu if ((map->m_flags & F2FS_MAP_MAPPED)) {
21652df0ab04SChao Yu block_nr = map->m_pblk + block_in_file - map->m_lblk;
21662df0ab04SChao Yu SetPageMappedToDisk(page);
21672df0ab04SChao Yu
21682df0ab04SChao Yu if (!f2fs_is_valid_blkaddr(F2FS_I_SB(inode), block_nr,
216993770ab7SChao Yu DATA_GENERIC_ENHANCE_READ)) {
217010f966bbSChao Yu ret = -EFSCORRUPTED;
217195fa90c9SChao Yu f2fs_handle_error(F2FS_I_SB(inode),
217295fa90c9SChao Yu ERROR_INVALID_BLKADDR);
21732df0ab04SChao Yu goto out;
21742df0ab04SChao Yu }
21752df0ab04SChao Yu } else {
21762df0ab04SChao Yu zero_out:
21772df0ab04SChao Yu zero_user_segment(page, 0, PAGE_SIZE);
217895ae251fSEric Biggers if (f2fs_need_verity(inode, page->index) &&
217995ae251fSEric Biggers !fsverity_verify_page(page)) {
218095ae251fSEric Biggers ret = -EIO;
218195ae251fSEric Biggers goto out;
218295ae251fSEric Biggers }
21832df0ab04SChao Yu if (!PageUptodate(page))
21842df0ab04SChao Yu SetPageUptodate(page);
21852df0ab04SChao Yu unlock_page(page);
21862df0ab04SChao Yu goto out;
21872df0ab04SChao Yu }
21882df0ab04SChao Yu
21892df0ab04SChao Yu /*
21902df0ab04SChao Yu * This page will go to BIO. Do we need to send this
21912df0ab04SChao Yu * BIO off first?
21922df0ab04SChao Yu */
219327aacd28SSatya Tangirala if (bio && (!page_is_mergeable(F2FS_I_SB(inode), bio,
219427aacd28SSatya Tangirala *last_block_in_bio, block_nr) ||
219527aacd28SSatya Tangirala !f2fs_crypt_mergeable_bio(bio, inode, page->index, NULL))) {
21962df0ab04SChao Yu submit_and_realloc:
2197bc29835aSChristoph Hellwig f2fs_submit_read_bio(F2FS_I_SB(inode), bio, DATA);
21982df0ab04SChao Yu bio = NULL;
21992df0ab04SChao Yu }
22002df0ab04SChao Yu if (bio == NULL) {
22012df0ab04SChao Yu bio = f2fs_grab_read_bio(inode, block_nr, nr_pages,
22020683728aSChao Yu is_readahead ? REQ_RAHEAD : 0, page->index,
22037f59b277SEric Biggers false);
22042df0ab04SChao Yu if (IS_ERR(bio)) {
22052df0ab04SChao Yu ret = PTR_ERR(bio);
22062df0ab04SChao Yu bio = NULL;
22072df0ab04SChao Yu goto out;
22082df0ab04SChao Yu }
22092df0ab04SChao Yu }
22102df0ab04SChao Yu
22112df0ab04SChao Yu /*
22122df0ab04SChao Yu * If the page is under writeback, we need to wait for
22132df0ab04SChao Yu * its completion to see the correct decrypted data.
22142df0ab04SChao Yu */
22152df0ab04SChao Yu f2fs_wait_on_block_writeback(inode, block_nr);
22162df0ab04SChao Yu
22172df0ab04SChao Yu if (bio_add_page(bio, page, blocksize, 0) < blocksize)
22182df0ab04SChao Yu goto submit_and_realloc;
22192df0ab04SChao Yu
22202df0ab04SChao Yu inc_page_count(F2FS_I_SB(inode), F2FS_RD_DATA);
222134a23525SChao Yu f2fs_update_iostat(F2FS_I_SB(inode), NULL, FS_DATA_READ_IO,
222234a23525SChao Yu F2FS_BLKSIZE);
22232df0ab04SChao Yu *last_block_in_bio = block_nr;
22242df0ab04SChao Yu out:
22252df0ab04SChao Yu *bio_ret = bio;
22262df0ab04SChao Yu return ret;
22272df0ab04SChao Yu }
22282df0ab04SChao Yu
22294c8ff709SChao Yu #ifdef CONFIG_F2FS_FS_COMPRESSION
f2fs_read_multi_pages(struct compress_ctx * cc,struct bio ** bio_ret,unsigned nr_pages,sector_t * last_block_in_bio,bool is_readahead,bool for_write)22304c8ff709SChao Yu int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
22314c8ff709SChao Yu unsigned nr_pages, sector_t *last_block_in_bio,
22320683728aSChao Yu bool is_readahead, bool for_write)
22334c8ff709SChao Yu {
22344c8ff709SChao Yu struct dnode_of_data dn;
22354c8ff709SChao Yu struct inode *inode = cc->inode;
22364c8ff709SChao Yu struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
22374c8ff709SChao Yu struct bio *bio = *bio_ret;
22384c8ff709SChao Yu unsigned int start_idx = cc->cluster_idx << cc->log_cluster_size;
22394c8ff709SChao Yu sector_t last_block_in_file;
224043b9d4b4SJaegeuk Kim const unsigned blocksize = blks_to_bytes(inode, 1);
22414c8ff709SChao Yu struct decompress_io_ctx *dic = NULL;
2242fe59109aSJaegeuk Kim struct extent_info ei = {};
224394afd6d6SChao Yu bool from_dnode = true;
22444c8ff709SChao Yu int i;
22454c8ff709SChao Yu int ret = 0;
22464c8ff709SChao Yu
22474c8ff709SChao Yu f2fs_bug_on(sbi, f2fs_cluster_is_empty(cc));
22484c8ff709SChao Yu
224943b9d4b4SJaegeuk Kim last_block_in_file = bytes_to_blks(inode,
225043b9d4b4SJaegeuk Kim f2fs_readpage_limit(inode) + blocksize - 1);
22514c8ff709SChao Yu
22524c8ff709SChao Yu /* get rid of pages beyond EOF */
22534c8ff709SChao Yu for (i = 0; i < cc->cluster_size; i++) {
22544c8ff709SChao Yu struct page *page = cc->rpages[i];
22554c8ff709SChao Yu
22564c8ff709SChao Yu if (!page)
22574c8ff709SChao Yu continue;
22584c8ff709SChao Yu if ((sector_t)page->index >= last_block_in_file) {
22594c8ff709SChao Yu zero_user_segment(page, 0, PAGE_SIZE);
22604c8ff709SChao Yu if (!PageUptodate(page))
22614c8ff709SChao Yu SetPageUptodate(page);
22624c8ff709SChao Yu } else if (!PageUptodate(page)) {
22634c8ff709SChao Yu continue;
22644c8ff709SChao Yu }
22654c8ff709SChao Yu unlock_page(page);
22669605f75cSJaegeuk Kim if (for_write)
22679605f75cSJaegeuk Kim put_page(page);
22684c8ff709SChao Yu cc->rpages[i] = NULL;
22694c8ff709SChao Yu cc->nr_rpages--;
22704c8ff709SChao Yu }
22714c8ff709SChao Yu
22724c8ff709SChao Yu /* we are done since all pages are beyond EOF */
22734c8ff709SChao Yu if (f2fs_cluster_is_empty(cc))
22744c8ff709SChao Yu goto out;
22754c8ff709SChao Yu
2276e7547dacSJaegeuk Kim if (f2fs_lookup_read_extent_cache(inode, start_idx, &ei))
227794afd6d6SChao Yu from_dnode = false;
227894afd6d6SChao Yu
227994afd6d6SChao Yu if (!from_dnode)
228094afd6d6SChao Yu goto skip_reading_dnode;
228194afd6d6SChao Yu
22824c8ff709SChao Yu set_new_dnode(&dn, inode, NULL, NULL, 0);
22834c8ff709SChao Yu ret = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
22844c8ff709SChao Yu if (ret)
22854c8ff709SChao Yu goto out;
22864c8ff709SChao Yu
2287bd90c5cdSJaegeuk Kim if (unlikely(f2fs_cp_error(sbi))) {
2288bd90c5cdSJaegeuk Kim ret = -EIO;
2289bd90c5cdSJaegeuk Kim goto out_put_dnode;
2290bd90c5cdSJaegeuk Kim }
2291a86d27ddSChao Yu f2fs_bug_on(sbi, dn.data_blkaddr != COMPRESS_ADDR);
22924c8ff709SChao Yu
229394afd6d6SChao Yu skip_reading_dnode:
22944c8ff709SChao Yu for (i = 1; i < cc->cluster_size; i++) {
22954c8ff709SChao Yu block_t blkaddr;
22964c8ff709SChao Yu
229794afd6d6SChao Yu blkaddr = from_dnode ? data_blkaddr(dn.inode, dn.node_page,
229894afd6d6SChao Yu dn.ofs_in_node + i) :
229994afd6d6SChao Yu ei.blk + i - 1;
23004c8ff709SChao Yu
23014c8ff709SChao Yu if (!__is_valid_data_blkaddr(blkaddr))
23024c8ff709SChao Yu break;
23034c8ff709SChao Yu
23044c8ff709SChao Yu if (!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC)) {
23054c8ff709SChao Yu ret = -EFAULT;
23064c8ff709SChao Yu goto out_put_dnode;
23074c8ff709SChao Yu }
23084c8ff709SChao Yu cc->nr_cpages++;
230994afd6d6SChao Yu
231094afd6d6SChao Yu if (!from_dnode && i >= ei.c_len)
231194afd6d6SChao Yu break;
23124c8ff709SChao Yu }
23134c8ff709SChao Yu
23144c8ff709SChao Yu /* nothing to decompress */
23154c8ff709SChao Yu if (cc->nr_cpages == 0) {
23164c8ff709SChao Yu ret = 0;
23174c8ff709SChao Yu goto out_put_dnode;
23184c8ff709SChao Yu }
23194c8ff709SChao Yu
23204c8ff709SChao Yu dic = f2fs_alloc_dic(cc);
23214c8ff709SChao Yu if (IS_ERR(dic)) {
23224c8ff709SChao Yu ret = PTR_ERR(dic);
23234c8ff709SChao Yu goto out_put_dnode;
23244c8ff709SChao Yu }
23254c8ff709SChao Yu
23266ce19affSChao Yu for (i = 0; i < cc->nr_cpages; i++) {
23274c8ff709SChao Yu struct page *page = dic->cpages[i];
23284c8ff709SChao Yu block_t blkaddr;
23297f59b277SEric Biggers struct bio_post_read_ctx *ctx;
23304c8ff709SChao Yu
233194afd6d6SChao Yu blkaddr = from_dnode ? data_blkaddr(dn.inode, dn.node_page,
233294afd6d6SChao Yu dn.ofs_in_node + i + 1) :
233394afd6d6SChao Yu ei.blk + i;
23344c8ff709SChao Yu
23356ce19affSChao Yu f2fs_wait_on_block_writeback(inode, blkaddr);
23366ce19affSChao Yu
23376ce19affSChao Yu if (f2fs_load_compressed_page(sbi, page, blkaddr)) {
23389d065aa5SChao Yu if (atomic_dec_and_test(&dic->remaining_pages)) {
2339bff139b4SDaeho Jeong f2fs_decompress_cluster(dic, true);
23409d065aa5SChao Yu break;
23419d065aa5SChao Yu }
23426ce19affSChao Yu continue;
23436ce19affSChao Yu }
23446ce19affSChao Yu
234527aacd28SSatya Tangirala if (bio && (!page_is_mergeable(sbi, bio,
234627aacd28SSatya Tangirala *last_block_in_bio, blkaddr) ||
234727aacd28SSatya Tangirala !f2fs_crypt_mergeable_bio(bio, inode, page->index, NULL))) {
23484c8ff709SChao Yu submit_and_realloc:
2349bc29835aSChristoph Hellwig f2fs_submit_read_bio(sbi, bio, DATA);
23504c8ff709SChao Yu bio = NULL;
23514c8ff709SChao Yu }
23524c8ff709SChao Yu
23534c8ff709SChao Yu if (!bio) {
23544c8ff709SChao Yu bio = f2fs_grab_read_bio(inode, blkaddr, nr_pages,
23554c8ff709SChao Yu is_readahead ? REQ_RAHEAD : 0,
23567f59b277SEric Biggers page->index, for_write);
23574c8ff709SChao Yu if (IS_ERR(bio)) {
23584c8ff709SChao Yu ret = PTR_ERR(bio);
2359bff139b4SDaeho Jeong f2fs_decompress_end_io(dic, ret, true);
23604c8ff709SChao Yu f2fs_put_dnode(&dn);
2361f3494345SChao Yu *bio_ret = NULL;
23624c8ff709SChao Yu return ret;
23634c8ff709SChao Yu }
23644c8ff709SChao Yu }
23654c8ff709SChao Yu
23664c8ff709SChao Yu if (bio_add_page(bio, page, blocksize, 0) < blocksize)
23674c8ff709SChao Yu goto submit_and_realloc;
23684c8ff709SChao Yu
2369a4b68176SDaeho Jeong ctx = get_post_read_ctx(bio);
23707f59b277SEric Biggers ctx->enabled_steps |= STEP_DECOMPRESS;
23717f59b277SEric Biggers refcount_inc(&dic->refcnt);
237203382f1aSChao Yu
23734c8ff709SChao Yu inc_page_count(sbi, F2FS_RD_DATA);
237434a23525SChao Yu f2fs_update_iostat(sbi, inode, FS_DATA_READ_IO, F2FS_BLKSIZE);
23754c8ff709SChao Yu *last_block_in_bio = blkaddr;
23764c8ff709SChao Yu }
23774c8ff709SChao Yu
237894afd6d6SChao Yu if (from_dnode)
23794c8ff709SChao Yu f2fs_put_dnode(&dn);
23804c8ff709SChao Yu
23814c8ff709SChao Yu *bio_ret = bio;
23824c8ff709SChao Yu return 0;
23834c8ff709SChao Yu
23844c8ff709SChao Yu out_put_dnode:
238594afd6d6SChao Yu if (from_dnode)
23864c8ff709SChao Yu f2fs_put_dnode(&dn);
23874c8ff709SChao Yu out:
23887f59b277SEric Biggers for (i = 0; i < cc->cluster_size; i++) {
23897f59b277SEric Biggers if (cc->rpages[i]) {
23907f59b277SEric Biggers ClearPageUptodate(cc->rpages[i]);
23917f59b277SEric Biggers unlock_page(cc->rpages[i]);
23927f59b277SEric Biggers }
23937f59b277SEric Biggers }
23944c8ff709SChao Yu *bio_ret = bio;
23954c8ff709SChao Yu return ret;
23964c8ff709SChao Yu }
23974c8ff709SChao Yu #endif
23984c8ff709SChao Yu
2399f1e88660SJaegeuk Kim /*
2400f1e88660SJaegeuk Kim * This function was originally taken from fs/mpage.c, and customized for f2fs.
2401f1e88660SJaegeuk Kim * Major change was from block_size == page_size in f2fs by default.
2402f1e88660SJaegeuk Kim */
f2fs_mpage_readpages(struct inode * inode,struct readahead_control * rac,struct page * page)2403e20a7693SMatthew Wilcox (Oracle) static int f2fs_mpage_readpages(struct inode *inode,
240423323196SMatthew Wilcox (Oracle) struct readahead_control *rac, struct page *page)
2405f1e88660SJaegeuk Kim {
2406f1e88660SJaegeuk Kim struct bio *bio = NULL;
2407f1e88660SJaegeuk Kim sector_t last_block_in_bio = 0;
2408f1e88660SJaegeuk Kim struct f2fs_map_blocks map;
24094c8ff709SChao Yu #ifdef CONFIG_F2FS_FS_COMPRESSION
24104c8ff709SChao Yu struct compress_ctx cc = {
24114c8ff709SChao Yu .inode = inode,
24124c8ff709SChao Yu .log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
24134c8ff709SChao Yu .cluster_size = F2FS_I(inode)->i_cluster_size,
24144c8ff709SChao Yu .cluster_idx = NULL_CLUSTER,
24154c8ff709SChao Yu .rpages = NULL,
24164c8ff709SChao Yu .cpages = NULL,
24174c8ff709SChao Yu .nr_rpages = 0,
24184c8ff709SChao Yu .nr_cpages = 0,
24194c8ff709SChao Yu };
2420a2649315SFengnan Chang pgoff_t nc_cluster_idx = NULL_CLUSTER;
24214c8ff709SChao Yu #endif
242223323196SMatthew Wilcox (Oracle) unsigned nr_pages = rac ? readahead_count(rac) : 1;
24234c8ff709SChao Yu unsigned max_nr_pages = nr_pages;
24242df0ab04SChao Yu int ret = 0;
2425f1e88660SJaegeuk Kim
2426f1e88660SJaegeuk Kim map.m_pblk = 0;
2427f1e88660SJaegeuk Kim map.m_lblk = 0;
2428f1e88660SJaegeuk Kim map.m_len = 0;
2429f1e88660SJaegeuk Kim map.m_flags = 0;
2430da85985cSChao Yu map.m_next_pgofs = NULL;
2431c4020b2dSChao Yu map.m_next_extent = NULL;
2432d5097be5SHyunchul Lee map.m_seg_type = NO_CHECK_TYPE;
2433f9d6d059SChao Yu map.m_may_create = false;
2434f1e88660SJaegeuk Kim
2435736c0a74SLiFan for (; nr_pages; nr_pages--) {
243623323196SMatthew Wilcox (Oracle) if (rac) {
243723323196SMatthew Wilcox (Oracle) page = readahead_page(rac);
2438a83d50bcSKinglong Mee prefetchw(&page->flags);
2439f1e88660SJaegeuk Kim }
2440f1e88660SJaegeuk Kim
24414c8ff709SChao Yu #ifdef CONFIG_F2FS_FS_COMPRESSION
24424c8ff709SChao Yu if (f2fs_compressed_file(inode)) {
2443146949deSJinyoung CHOI /* there are remained compressed pages, submit them */
24444c8ff709SChao Yu if (!f2fs_cluster_can_merge_page(&cc, page->index)) {
24454c8ff709SChao Yu ret = f2fs_read_multi_pages(&cc, &bio,
24464c8ff709SChao Yu max_nr_pages,
24474c8ff709SChao Yu &last_block_in_bio,
244823323196SMatthew Wilcox (Oracle) rac != NULL, false);
24498bfbfb0dSChao Yu f2fs_destroy_compress_ctx(&cc, false);
24504c8ff709SChao Yu if (ret)
24514c8ff709SChao Yu goto set_error_page;
24524c8ff709SChao Yu }
2453a2649315SFengnan Chang if (cc.cluster_idx == NULL_CLUSTER) {
2454a2649315SFengnan Chang if (nc_cluster_idx ==
2455a2649315SFengnan Chang page->index >> cc.log_cluster_size) {
2456a2649315SFengnan Chang goto read_single_page;
2457a2649315SFengnan Chang }
2458a2649315SFengnan Chang
24594c8ff709SChao Yu ret = f2fs_is_compressed_cluster(inode, page->index);
24604c8ff709SChao Yu if (ret < 0)
24614c8ff709SChao Yu goto set_error_page;
2462a2649315SFengnan Chang else if (!ret) {
2463a2649315SFengnan Chang nc_cluster_idx =
2464a2649315SFengnan Chang page->index >> cc.log_cluster_size;
24654c8ff709SChao Yu goto read_single_page;
2466a2649315SFengnan Chang }
24674c8ff709SChao Yu
2468a2649315SFengnan Chang nc_cluster_idx = NULL_CLUSTER;
2469a2649315SFengnan Chang }
24704c8ff709SChao Yu ret = f2fs_init_compress_ctx(&cc);
24714c8ff709SChao Yu if (ret)
24724c8ff709SChao Yu goto set_error_page;
24734c8ff709SChao Yu
24744c8ff709SChao Yu f2fs_compress_ctx_add_page(&cc, page);
24754c8ff709SChao Yu
24764c8ff709SChao Yu goto next_page;
24774c8ff709SChao Yu }
24784c8ff709SChao Yu read_single_page:
24794c8ff709SChao Yu #endif
24804c8ff709SChao Yu
24814c8ff709SChao Yu ret = f2fs_read_single_page(inode, page, max_nr_pages, &map,
248223323196SMatthew Wilcox (Oracle) &bio, &last_block_in_bio, rac);
24832df0ab04SChao Yu if (ret) {
24844c8ff709SChao Yu #ifdef CONFIG_F2FS_FS_COMPRESSION
24854c8ff709SChao Yu set_error_page:
24864c8ff709SChao Yu #endif
248709cbfeafSKirill A. Shutemov zero_user_segment(page, 0, PAGE_SIZE);
2488f1e88660SJaegeuk Kim unlock_page(page);
2489f1e88660SJaegeuk Kim }
249023323196SMatthew Wilcox (Oracle) #ifdef CONFIG_F2FS_FS_COMPRESSION
2491f1e88660SJaegeuk Kim next_page:
249223323196SMatthew Wilcox (Oracle) #endif
249323323196SMatthew Wilcox (Oracle) if (rac)
249409cbfeafSKirill A. Shutemov put_page(page);
24954c8ff709SChao Yu
24964c8ff709SChao Yu #ifdef CONFIG_F2FS_FS_COMPRESSION
24974c8ff709SChao Yu if (f2fs_compressed_file(inode)) {
24984c8ff709SChao Yu /* last page */
24994c8ff709SChao Yu if (nr_pages == 1 && !f2fs_cluster_is_empty(&cc)) {
25004c8ff709SChao Yu ret = f2fs_read_multi_pages(&cc, &bio,
25014c8ff709SChao Yu max_nr_pages,
25024c8ff709SChao Yu &last_block_in_bio,
250323323196SMatthew Wilcox (Oracle) rac != NULL, false);
25048bfbfb0dSChao Yu f2fs_destroy_compress_ctx(&cc, false);
25054c8ff709SChao Yu }
25064c8ff709SChao Yu }
25074c8ff709SChao Yu #endif
2508f1e88660SJaegeuk Kim }
2509f1e88660SJaegeuk Kim if (bio)
2510bc29835aSChristoph Hellwig f2fs_submit_read_bio(F2FS_I_SB(inode), bio, DATA);
251123323196SMatthew Wilcox (Oracle) return ret;
2512f1e88660SJaegeuk Kim }
2513f1e88660SJaegeuk Kim
f2fs_read_data_folio(struct file * file,struct folio * folio)2514be05584fSMatthew Wilcox (Oracle) static int f2fs_read_data_folio(struct file *file, struct folio *folio)
2515eb47b800SJaegeuk Kim {
2516be05584fSMatthew Wilcox (Oracle) struct page *page = &folio->page;
25174969c06aSJaegeuk Kim struct inode *inode = page_file_mapping(page)->host;
2518b3d208f9SJaegeuk Kim int ret = -EAGAIN;
25199ffe0fb5SHuajun Li
2520c20e89cdSChao Yu trace_f2fs_readpage(page, DATA);
2521c20e89cdSChao Yu
25224c8ff709SChao Yu if (!f2fs_is_compress_backend_ready(inode)) {
25234c8ff709SChao Yu unlock_page(page);
25244c8ff709SChao Yu return -EOPNOTSUPP;
25254c8ff709SChao Yu }
25264c8ff709SChao Yu
2527e1c42045Sarter97 /* If the file has inline data, try to read it directly */
25289ffe0fb5SHuajun Li if (f2fs_has_inline_data(inode))
25299ffe0fb5SHuajun Li ret = f2fs_read_inline_data(inode, page);
2530b3d208f9SJaegeuk Kim if (ret == -EAGAIN)
2531e20a7693SMatthew Wilcox (Oracle) ret = f2fs_mpage_readpages(inode, NULL, page);
25329ffe0fb5SHuajun Li return ret;
2533eb47b800SJaegeuk Kim }
2534eb47b800SJaegeuk Kim
f2fs_readahead(struct readahead_control * rac)253523323196SMatthew Wilcox (Oracle) static void f2fs_readahead(struct readahead_control *rac)
2536eb47b800SJaegeuk Kim {
253723323196SMatthew Wilcox (Oracle) struct inode *inode = rac->mapping->host;
2538b8c29400SChao Yu
253923323196SMatthew Wilcox (Oracle) trace_f2fs_readpages(inode, readahead_index(rac), readahead_count(rac));
25409ffe0fb5SHuajun Li
25414c8ff709SChao Yu if (!f2fs_is_compress_backend_ready(inode))
254223323196SMatthew Wilcox (Oracle) return;
25434c8ff709SChao Yu
2544704528d8SMatthew Wilcox (Oracle) /* If the file has inline data, skip readahead */
25459ffe0fb5SHuajun Li if (f2fs_has_inline_data(inode))
254623323196SMatthew Wilcox (Oracle) return;
25479ffe0fb5SHuajun Li
2548e20a7693SMatthew Wilcox (Oracle) f2fs_mpage_readpages(inode, rac, NULL);
2549eb47b800SJaegeuk Kim }
2550eb47b800SJaegeuk Kim
f2fs_encrypt_one_page(struct f2fs_io_info * fio)25514c8ff709SChao Yu int f2fs_encrypt_one_page(struct f2fs_io_info *fio)
25527eab0c0dSHou Pengyang {
25537eab0c0dSHou Pengyang struct inode *inode = fio->page->mapping->host;
25544c8ff709SChao Yu struct page *mpage, *page;
25557eab0c0dSHou Pengyang gfp_t gfp_flags = GFP_NOFS;
25567eab0c0dSHou Pengyang
25571958593eSJaegeuk Kim if (!f2fs_encrypted_file(inode))
25587eab0c0dSHou Pengyang return 0;
25597eab0c0dSHou Pengyang
25604c8ff709SChao Yu page = fio->compressed_page ? fio->compressed_page : fio->page;
25614c8ff709SChao Yu
256227aacd28SSatya Tangirala if (fscrypt_inode_uses_inline_crypto(inode))
256327aacd28SSatya Tangirala return 0;
256427aacd28SSatya Tangirala
25657eab0c0dSHou Pengyang retry_encrypt:
25664c8ff709SChao Yu fio->encrypted_page = fscrypt_encrypt_pagecache_blocks(page,
25674c8ff709SChao Yu PAGE_SIZE, 0, gfp_flags);
25686aa58d8aSChao Yu if (IS_ERR(fio->encrypted_page)) {
25697eab0c0dSHou Pengyang /* flush pending IOs and wait for a while in the ENOMEM case */
25707eab0c0dSHou Pengyang if (PTR_ERR(fio->encrypted_page) == -ENOMEM) {
2571b9109b0eSJaegeuk Kim f2fs_flush_merged_writes(fio->sbi);
25724034247aSNeilBrown memalloc_retry_wait(GFP_NOFS);
25737eab0c0dSHou Pengyang gfp_flags |= __GFP_NOFAIL;
25747eab0c0dSHou Pengyang goto retry_encrypt;
25757eab0c0dSHou Pengyang }
25767eab0c0dSHou Pengyang return PTR_ERR(fio->encrypted_page);
25777eab0c0dSHou Pengyang }
25787eab0c0dSHou Pengyang
25796aa58d8aSChao Yu mpage = find_lock_page(META_MAPPING(fio->sbi), fio->old_blkaddr);
25806aa58d8aSChao Yu if (mpage) {
25816aa58d8aSChao Yu if (PageUptodate(mpage))
25826aa58d8aSChao Yu memcpy(page_address(mpage),
25836aa58d8aSChao Yu page_address(fio->encrypted_page), PAGE_SIZE);
25846aa58d8aSChao Yu f2fs_put_page(mpage, 1);
25856aa58d8aSChao Yu }
25866aa58d8aSChao Yu return 0;
25876aa58d8aSChao Yu }
25886aa58d8aSChao Yu
check_inplace_update_policy(struct inode * inode,struct f2fs_io_info * fio)2589bb9e3bb8SChao Yu static inline bool check_inplace_update_policy(struct inode *inode,
2590bb9e3bb8SChao Yu struct f2fs_io_info *fio)
2591bb9e3bb8SChao Yu {
2592bb9e3bb8SChao Yu struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2593bb9e3bb8SChao Yu
2594fdb7ccc3SYangtao Li if (IS_F2FS_IPU_HONOR_OPU_WRITE(sbi) &&
25951018a546SChao Yu is_inode_flag_set(inode, FI_OPU_WRITE))
25961018a546SChao Yu return false;
2597fdb7ccc3SYangtao Li if (IS_F2FS_IPU_FORCE(sbi))
2598bb9e3bb8SChao Yu return true;
2599fdb7ccc3SYangtao Li if (IS_F2FS_IPU_SSR(sbi) && f2fs_need_SSR(sbi))
2600bb9e3bb8SChao Yu return true;
2601fdb7ccc3SYangtao Li if (IS_F2FS_IPU_UTIL(sbi) && utilization(sbi) > SM_I(sbi)->min_ipu_util)
2602bb9e3bb8SChao Yu return true;
2603fdb7ccc3SYangtao Li if (IS_F2FS_IPU_SSR_UTIL(sbi) && f2fs_need_SSR(sbi) &&
2604bb9e3bb8SChao Yu utilization(sbi) > SM_I(sbi)->min_ipu_util)
2605bb9e3bb8SChao Yu return true;
2606bb9e3bb8SChao Yu
2607bb9e3bb8SChao Yu /*
2608bb9e3bb8SChao Yu * IPU for rewrite async pages
2609bb9e3bb8SChao Yu */
2610fdb7ccc3SYangtao Li if (IS_F2FS_IPU_ASYNC(sbi) && fio && fio->op == REQ_OP_WRITE &&
2611fdb7ccc3SYangtao Li !(fio->op_flags & REQ_SYNC) && !IS_ENCRYPTED(inode))
2612bb9e3bb8SChao Yu return true;
2613bb9e3bb8SChao Yu
2614bb9e3bb8SChao Yu /* this is only set during fdatasync */
2615fdb7ccc3SYangtao Li if (IS_F2FS_IPU_FSYNC(sbi) && is_inode_flag_set(inode, FI_NEED_IPU))
2616bb9e3bb8SChao Yu return true;
2617bb9e3bb8SChao Yu
26184354994fSDaniel Rosenberg if (unlikely(fio && is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
26194354994fSDaniel Rosenberg !f2fs_is_checkpointed_data(sbi, fio->old_blkaddr)))
26204354994fSDaniel Rosenberg return true;
26214354994fSDaniel Rosenberg
2622bb9e3bb8SChao Yu return false;
2623bb9e3bb8SChao Yu }
2624bb9e3bb8SChao Yu
f2fs_should_update_inplace(struct inode * inode,struct f2fs_io_info * fio)26254d57b86dSChao Yu bool f2fs_should_update_inplace(struct inode *inode, struct f2fs_io_info *fio)
2626bb9e3bb8SChao Yu {
2627859fca6bSChao Yu /* swap file is migrating in aligned write mode */
2628859fca6bSChao Yu if (is_inode_flag_set(inode, FI_ALIGNED_WRITE))
2629859fca6bSChao Yu return false;
2630859fca6bSChao Yu
2631bb9e3bb8SChao Yu if (f2fs_is_pinned_file(inode))
2632bb9e3bb8SChao Yu return true;
2633bb9e3bb8SChao Yu
2634bb9e3bb8SChao Yu /* if this is cold file, we should overwrite to avoid fragmentation */
2635f3b23c78SWeichao Guo if (file_is_cold(inode) && !is_inode_flag_set(inode, FI_OPU_WRITE))
2636bb9e3bb8SChao Yu return true;
2637bb9e3bb8SChao Yu
2638bb9e3bb8SChao Yu return check_inplace_update_policy(inode, fio);
2639bb9e3bb8SChao Yu }
2640bb9e3bb8SChao Yu
f2fs_should_update_outplace(struct inode * inode,struct f2fs_io_info * fio)26414d57b86dSChao Yu bool f2fs_should_update_outplace(struct inode *inode, struct f2fs_io_info *fio)
2642bb9e3bb8SChao Yu {
2643bb9e3bb8SChao Yu struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2644bb9e3bb8SChao Yu
264519bdba52SJaegeuk Kim /* The below cases were checked when setting it. */
264619bdba52SJaegeuk Kim if (f2fs_is_pinned_file(inode))
264719bdba52SJaegeuk Kim return false;
264819bdba52SJaegeuk Kim if (fio && is_sbi_flag_set(sbi, SBI_NEED_FSCK))
264919bdba52SJaegeuk Kim return true;
2650b0332a0fSChao Yu if (f2fs_lfs_mode(sbi))
2651bb9e3bb8SChao Yu return true;
2652bb9e3bb8SChao Yu if (S_ISDIR(inode->i_mode))
2653bb9e3bb8SChao Yu return true;
2654af033b2aSChao Yu if (IS_NOQUOTA(inode))
2655af033b2aSChao Yu return true;
2656bb9e3bb8SChao Yu if (f2fs_is_atomic_file(inode))
2657bb9e3bb8SChao Yu return true;
2658859fca6bSChao Yu
2659859fca6bSChao Yu /* swap file is migrating in aligned write mode */
2660859fca6bSChao Yu if (is_inode_flag_set(inode, FI_ALIGNED_WRITE))
2661859fca6bSChao Yu return true;
2662859fca6bSChao Yu
26631018a546SChao Yu if (is_inode_flag_set(inode, FI_OPU_WRITE))
26641018a546SChao Yu return true;
26651018a546SChao Yu
2666bb9e3bb8SChao Yu if (fio) {
2667b763f3beSChao Yu if (page_private_gcing(fio->page))
2668bb9e3bb8SChao Yu return true;
2669b763f3beSChao Yu if (page_private_dummy(fio->page))
2670bb9e3bb8SChao Yu return true;
26714354994fSDaniel Rosenberg if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
26724354994fSDaniel Rosenberg f2fs_is_checkpointed_data(sbi, fio->old_blkaddr)))
26734354994fSDaniel Rosenberg return true;
2674bb9e3bb8SChao Yu }
2675bb9e3bb8SChao Yu return false;
2676bb9e3bb8SChao Yu }
2677bb9e3bb8SChao Yu
need_inplace_update(struct f2fs_io_info * fio)26787eab0c0dSHou Pengyang static inline bool need_inplace_update(struct f2fs_io_info *fio)
26797eab0c0dSHou Pengyang {
26807eab0c0dSHou Pengyang struct inode *inode = fio->page->mapping->host;
26817eab0c0dSHou Pengyang
26824d57b86dSChao Yu if (f2fs_should_update_outplace(inode, fio))
26837eab0c0dSHou Pengyang return false;
26847eab0c0dSHou Pengyang
26854d57b86dSChao Yu return f2fs_should_update_inplace(inode, fio);
26867eab0c0dSHou Pengyang }
26877eab0c0dSHou Pengyang
f2fs_do_write_data_page(struct f2fs_io_info * fio)26884d57b86dSChao Yu int f2fs_do_write_data_page(struct f2fs_io_info *fio)
2689eb47b800SJaegeuk Kim {
269005ca3632SJaegeuk Kim struct page *page = fio->page;
2691eb47b800SJaegeuk Kim struct inode *inode = page->mapping->host;
2692eb47b800SJaegeuk Kim struct dnode_of_data dn;
26937735730dSChao Yu struct node_info ni;
2694e959c8f5SHou Pengyang bool ipu_force = false;
2695eb47b800SJaegeuk Kim int err = 0;
2696eb47b800SJaegeuk Kim
26973db1de0eSDaeho Jeong /* Use COW inode to make dnode_of_data for atomic write */
26983db1de0eSDaeho Jeong if (f2fs_is_atomic_file(inode))
26993db1de0eSDaeho Jeong set_new_dnode(&dn, F2FS_I(inode)->cow_inode, NULL, NULL, 0);
27003db1de0eSDaeho Jeong else
2701eb47b800SJaegeuk Kim set_new_dnode(&dn, inode, NULL, NULL, 0);
27023db1de0eSDaeho Jeong
2703e959c8f5SHou Pengyang if (need_inplace_update(fio) &&
270404a91ab0SChristoph Hellwig f2fs_lookup_read_extent_cache_block(inode, page->index,
270504a91ab0SChristoph Hellwig &fio->old_blkaddr)) {
2706c9b60788SChao Yu if (!f2fs_is_valid_blkaddr(fio->sbi, fio->old_blkaddr,
270795fa90c9SChao Yu DATA_GENERIC_ENHANCE)) {
270895fa90c9SChao Yu f2fs_handle_error(fio->sbi,
270995fa90c9SChao Yu ERROR_INVALID_BLKADDR);
271010f966bbSChao Yu return -EFSCORRUPTED;
271195fa90c9SChao Yu }
2712c9b60788SChao Yu
2713e959c8f5SHou Pengyang ipu_force = true;
2714cc15620bSJaegeuk Kim fio->need_lock = LOCK_DONE;
2715e959c8f5SHou Pengyang goto got_it;
2716e959c8f5SHou Pengyang }
2717279d6df2SHou Pengyang
2718d29460e5SJaegeuk Kim /* Deadlock due to between page->lock and f2fs_lock_op */
2719d29460e5SJaegeuk Kim if (fio->need_lock == LOCK_REQ && !f2fs_trylock_op(fio->sbi))
2720d29460e5SJaegeuk Kim return -EAGAIN;
2721279d6df2SHou Pengyang
27224d57b86dSChao Yu err = f2fs_get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
2723eb47b800SJaegeuk Kim if (err)
2724279d6df2SHou Pengyang goto out;
2725eb47b800SJaegeuk Kim
272628bc106bSChao Yu fio->old_blkaddr = dn.data_blkaddr;
2727eb47b800SJaegeuk Kim
2728eb47b800SJaegeuk Kim /* This page is already truncated */
27297a9d7548SChao Yu if (fio->old_blkaddr == NULL_ADDR) {
27302bca1e23SJaegeuk Kim ClearPageUptodate(page);
2731b763f3beSChao Yu clear_page_private_gcing(page);
2732eb47b800SJaegeuk Kim goto out_writepage;
27332bca1e23SJaegeuk Kim }
2734e959c8f5SHou Pengyang got_it:
2735c9b60788SChao Yu if (__is_valid_data_blkaddr(fio->old_blkaddr) &&
2736c9b60788SChao Yu !f2fs_is_valid_blkaddr(fio->sbi, fio->old_blkaddr,
273793770ab7SChao Yu DATA_GENERIC_ENHANCE)) {
273810f966bbSChao Yu err = -EFSCORRUPTED;
273995fa90c9SChao Yu f2fs_handle_error(fio->sbi, ERROR_INVALID_BLKADDR);
2740c9b60788SChao Yu goto out_writepage;
2741c9b60788SChao Yu }
27423db1de0eSDaeho Jeong
27434535be48SChao Yu /* wait for GCed page writeback via META_MAPPING */
27444535be48SChao Yu if (fio->post_read)
27454535be48SChao Yu f2fs_wait_on_block_writeback(inode, fio->old_blkaddr);
27464535be48SChao Yu
2747eb47b800SJaegeuk Kim /*
2748eb47b800SJaegeuk Kim * If current allocation needs SSR,
2749eb47b800SJaegeuk Kim * it had better in-place writes for updated data.
2750eb47b800SJaegeuk Kim */
275193770ab7SChao Yu if (ipu_force ||
275293770ab7SChao Yu (__is_valid_data_blkaddr(fio->old_blkaddr) &&
27537b525dd0SChao Yu need_inplace_update(fio))) {
27544c8ff709SChao Yu err = f2fs_encrypt_one_page(fio);
2755cc15620bSJaegeuk Kim if (err)
2756cc15620bSJaegeuk Kim goto out_writepage;
2757cc15620bSJaegeuk Kim
2758cc15620bSJaegeuk Kim set_page_writeback(page);
2759279d6df2SHou Pengyang f2fs_put_dnode(&dn);
2760cc15620bSJaegeuk Kim if (fio->need_lock == LOCK_REQ)
27617eab0c0dSHou Pengyang f2fs_unlock_op(fio->sbi);
27624d57b86dSChao Yu err = f2fs_inplace_write_data(fio);
27636492a335SChao Yu if (err) {
276427aacd28SSatya Tangirala if (fscrypt_inode_uses_fs_layer_crypto(inode))
2765d2d0727bSEric Biggers fscrypt_finalize_bounce_page(&fio->encrypted_page);
27666492a335SChao Yu if (PageWriteback(page))
27672062e0c3SSheng Yong end_page_writeback(page);
2768cd23ffa9SChao Yu } else {
2769cd23ffa9SChao Yu set_inode_flag(inode, FI_UPDATE_WRITE);
27706492a335SChao Yu }
27717eab0c0dSHou Pengyang trace_f2fs_do_write_data_page(fio->page, IPU);
2772279d6df2SHou Pengyang return err;
2773279d6df2SHou Pengyang }
2774279d6df2SHou Pengyang
2775cc15620bSJaegeuk Kim if (fio->need_lock == LOCK_RETRY) {
2776cc15620bSJaegeuk Kim if (!f2fs_trylock_op(fio->sbi)) {
2777cc15620bSJaegeuk Kim err = -EAGAIN;
2778cc15620bSJaegeuk Kim goto out_writepage;
2779cc15620bSJaegeuk Kim }
2780cc15620bSJaegeuk Kim fio->need_lock = LOCK_REQ;
2781cc15620bSJaegeuk Kim }
2782cc15620bSJaegeuk Kim
2783a9419b63SJaegeuk Kim err = f2fs_get_node_info(fio->sbi, dn.nid, &ni, false);
27847735730dSChao Yu if (err)
27857735730dSChao Yu goto out_writepage;
27867735730dSChao Yu
27877735730dSChao Yu fio->version = ni.version;
27887735730dSChao Yu
27894c8ff709SChao Yu err = f2fs_encrypt_one_page(fio);
2790cc15620bSJaegeuk Kim if (err)
2791cc15620bSJaegeuk Kim goto out_writepage;
2792cc15620bSJaegeuk Kim
2793cc15620bSJaegeuk Kim set_page_writeback(page);
2794cc15620bSJaegeuk Kim
27954c8ff709SChao Yu if (fio->compr_blocks && fio->old_blkaddr == COMPRESS_ADDR)
27964c8ff709SChao Yu f2fs_i_compr_blocks_update(inode, fio->compr_blocks - 1, false);
27974c8ff709SChao Yu
2798279d6df2SHou Pengyang /* LFS mode write path */
27994d57b86dSChao Yu f2fs_outplace_write_data(&dn, fio);
28008ce67cb0SJaegeuk Kim trace_f2fs_do_write_data_page(page, OPU);
280191942321SJaegeuk Kim set_inode_flag(inode, FI_APPEND_WRITE);
2802eb47b800SJaegeuk Kim out_writepage:
2803eb47b800SJaegeuk Kim f2fs_put_dnode(&dn);
2804279d6df2SHou Pengyang out:
2805cc15620bSJaegeuk Kim if (fio->need_lock == LOCK_REQ)
2806279d6df2SHou Pengyang f2fs_unlock_op(fio->sbi);
2807eb47b800SJaegeuk Kim return err;
2808eb47b800SJaegeuk Kim }
2809eb47b800SJaegeuk Kim
f2fs_write_single_data_page(struct page * page,int * submitted,struct bio ** bio,sector_t * last_block,struct writeback_control * wbc,enum iostat_type io_type,int compr_blocks,bool allow_balance)28104c8ff709SChao Yu int f2fs_write_single_data_page(struct page *page, int *submitted,
28118648de2cSChao Yu struct bio **bio,
28128648de2cSChao Yu sector_t *last_block,
2813b0af6d49SChao Yu struct writeback_control *wbc,
28144c8ff709SChao Yu enum iostat_type io_type,
28153afae09fSChao Yu int compr_blocks,
28163afae09fSChao Yu bool allow_balance)
2817eb47b800SJaegeuk Kim {
2818eb47b800SJaegeuk Kim struct inode *inode = page->mapping->host;
28194081363fSJaegeuk Kim struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2820eb47b800SJaegeuk Kim loff_t i_size = i_size_read(inode);
2821eb47b800SJaegeuk Kim const pgoff_t end_index = ((unsigned long long)i_size)
282209cbfeafSKirill A. Shutemov >> PAGE_SHIFT;
28231f0d5c91SChao Yu loff_t psize = (loff_t)(page->index + 1) << PAGE_SHIFT;
28249ffe0fb5SHuajun Li unsigned offset = 0;
282539936837SJaegeuk Kim bool need_balance_fs = false;
2826f082c6b2SChao Yu bool quota_inode = IS_NOQUOTA(inode);
2827eb47b800SJaegeuk Kim int err = 0;
2828458e6197SJaegeuk Kim struct f2fs_io_info fio = {
282905ca3632SJaegeuk Kim .sbi = sbi,
283039d787beSChao Yu .ino = inode->i_ino,
2831458e6197SJaegeuk Kim .type = DATA,
283204d328deSMike Christie .op = REQ_OP_WRITE,
28337637241eSJens Axboe .op_flags = wbc_to_write_flags(wbc),
2834e959c8f5SHou Pengyang .old_blkaddr = NULL_ADDR,
283505ca3632SJaegeuk Kim .page = page,
28364375a336SJaegeuk Kim .encrypted_page = NULL,
28372eae077eSChao Yu .submitted = 0,
28384c8ff709SChao Yu .compr_blocks = compr_blocks,
283975abfd61SChao Yu .need_lock = compr_blocks ? LOCK_DONE : LOCK_RETRY,
28402eae077eSChao Yu .post_read = f2fs_post_read_required(inode) ? 1 : 0,
2841b0af6d49SChao Yu .io_type = io_type,
2842578c6478SYufen Yu .io_wbc = wbc,
28438648de2cSChao Yu .bio = bio,
28448648de2cSChao Yu .last_block = last_block,
2845458e6197SJaegeuk Kim };
2846eb47b800SJaegeuk Kim
2847ecda0de3SChao Yu trace_f2fs_writepage(page, DATA);
2848ecda0de3SChao Yu
2849146949deSJinyoung CHOI /* we should bypass data pages to proceed the kworker jobs */
2850db198ae0SChao Yu if (unlikely(f2fs_cp_error(sbi))) {
2851db198ae0SChao Yu mapping_set_error(page->mapping, -EIO);
28521174abfdSChao Yu /*
28531174abfdSChao Yu * don't drop any dirty dentry pages for keeping lastest
28541174abfdSChao Yu * directory structure.
28551174abfdSChao Yu */
2856c9b3649aSChao Yu if (S_ISDIR(inode->i_mode) &&
2857c9b3649aSChao Yu !is_sbi_flag_set(sbi, SBI_IS_CLOSE))
28581174abfdSChao Yu goto redirty_out;
2859b62e71beSChao Yu
2860b62e71beSChao Yu /* keep data pages in remount-ro mode */
2861b62e71beSChao Yu if (F2FS_OPTION(sbi).errors == MOUNT_ERRORS_READONLY)
2862b62e71beSChao Yu goto redirty_out;
2863db198ae0SChao Yu goto out;
2864db198ae0SChao Yu }
2865db198ae0SChao Yu
28660771fcc7SChao Yu if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
28670771fcc7SChao Yu goto redirty_out;
28680771fcc7SChao Yu
28694c8ff709SChao Yu if (page->index < end_index ||
28704c8ff709SChao Yu f2fs_verity_in_progress(inode) ||
28714c8ff709SChao Yu compr_blocks)
287239936837SJaegeuk Kim goto write;
2873eb47b800SJaegeuk Kim
2874eb47b800SJaegeuk Kim /*
2875eb47b800SJaegeuk Kim * If the offset is out-of-range of file size,
2876eb47b800SJaegeuk Kim * this page does not have to be written to disk.
2877eb47b800SJaegeuk Kim */
287809cbfeafSKirill A. Shutemov offset = i_size & (PAGE_SIZE - 1);
287976f60268SJaegeuk Kim if ((page->index >= end_index + 1) || !offset)
288039936837SJaegeuk Kim goto out;
2881eb47b800SJaegeuk Kim
288209cbfeafSKirill A. Shutemov zero_user_segment(page, offset, PAGE_SIZE);
288339936837SJaegeuk Kim write:
2884435cbab9SJaegeuk Kim /* Dentry/quota blocks are controlled by checkpoint */
2885f082c6b2SChao Yu if (S_ISDIR(inode->i_mode) || quota_inode) {
288679963d96SChao Yu /*
288779963d96SChao Yu * We need to wait for node_write to avoid block allocation during
288879963d96SChao Yu * checkpoint. This can only happen to quota writes which can cause
288979963d96SChao Yu * the below discard race condition.
289079963d96SChao Yu */
2891f082c6b2SChao Yu if (quota_inode)
2892e4544b63STim Murray f2fs_down_read(&sbi->node_write);
289379963d96SChao Yu
2894cc15620bSJaegeuk Kim fio.need_lock = LOCK_DONE;
28954d57b86dSChao Yu err = f2fs_do_write_data_page(&fio);
289679963d96SChao Yu
2897f082c6b2SChao Yu if (quota_inode)
2898e4544b63STim Murray f2fs_up_read(&sbi->node_write);
289979963d96SChao Yu
2900b230e6caSJaegeuk Kim goto done;
2901b230e6caSJaegeuk Kim }
2902b230e6caSJaegeuk Kim
29038618b881SJaegeuk Kim if (!wbc->for_reclaim)
290439936837SJaegeuk Kim need_balance_fs = true;
29057f3037a5SJaegeuk Kim else if (has_not_enough_free_secs(sbi, 0, 0))
290639936837SJaegeuk Kim goto redirty_out;
2907ef095d19SJaegeuk Kim else
2908ef095d19SJaegeuk Kim set_inode_flag(inode, FI_HOT_DATA);
2909eb47b800SJaegeuk Kim
2910b3d208f9SJaegeuk Kim err = -EAGAIN;
2911dd7b2333SYunlei He if (f2fs_has_inline_data(inode)) {
2912b3d208f9SJaegeuk Kim err = f2fs_write_inline_data(inode, page);
2913dd7b2333SYunlei He if (!err)
2914dd7b2333SYunlei He goto out;
2915dd7b2333SYunlei He }
2916279d6df2SHou Pengyang
2917cc15620bSJaegeuk Kim if (err == -EAGAIN) {
29184d57b86dSChao Yu err = f2fs_do_write_data_page(&fio);
2919cc15620bSJaegeuk Kim if (err == -EAGAIN) {
292075abfd61SChao Yu f2fs_bug_on(sbi, compr_blocks);
2921cc15620bSJaegeuk Kim fio.need_lock = LOCK_REQ;
29224d57b86dSChao Yu err = f2fs_do_write_data_page(&fio);
2923cc15620bSJaegeuk Kim }
2924cc15620bSJaegeuk Kim }
2925a0d00fadSChao Yu
2926eb449797SChao Yu if (err) {
2927eb449797SChao Yu file_set_keep_isize(inode);
2928eb449797SChao Yu } else {
2929c10c9820SChao Yu spin_lock(&F2FS_I(inode)->i_size_lock);
293026de9b11SJaegeuk Kim if (F2FS_I(inode)->last_disk_size < psize)
293126de9b11SJaegeuk Kim F2FS_I(inode)->last_disk_size = psize;
2932c10c9820SChao Yu spin_unlock(&F2FS_I(inode)->i_size_lock);
2933eb449797SChao Yu }
2934279d6df2SHou Pengyang
29358618b881SJaegeuk Kim done:
29368618b881SJaegeuk Kim if (err && err != -ENOENT)
29378618b881SJaegeuk Kim goto redirty_out;
2938eb47b800SJaegeuk Kim
293939936837SJaegeuk Kim out:
2940a7ffdbe2SJaegeuk Kim inode_dec_dirty_pages(inode);
29412baf0781SChao Yu if (err) {
29422bca1e23SJaegeuk Kim ClearPageUptodate(page);
2943b763f3beSChao Yu clear_page_private_gcing(page);
29442baf0781SChao Yu }
29450c3a5797SChao Yu
29460c3a5797SChao Yu if (wbc->for_reclaim) {
2947bab475c5SChao Yu f2fs_submit_merged_write_cond(sbi, NULL, page, 0, DATA);
2948ef095d19SJaegeuk Kim clear_inode_flag(inode, FI_HOT_DATA);
29494d57b86dSChao Yu f2fs_remove_dirty_inode(inode);
2950d68f735bSJaegeuk Kim submitted = NULL;
2951eb7e813cSChao Yu }
29520c3a5797SChao Yu unlock_page(page);
2953186857c5SChao Yu if (!S_ISDIR(inode->i_mode) && !IS_NOQUOTA(inode) &&
2954d80afefbSChao Yu !F2FS_I(inode)->wb_task && allow_balance)
29550c3a5797SChao Yu f2fs_balance_fs(sbi, need_balance_fs);
29560c3a5797SChao Yu
2957d68f735bSJaegeuk Kim if (unlikely(f2fs_cp_error(sbi))) {
2958b9109b0eSJaegeuk Kim f2fs_submit_merged_write(sbi, DATA);
29595cdb422cSChao Yu if (bio && *bio)
29600b20fcecSChao Yu f2fs_submit_merged_ipu_write(sbi, bio, NULL);
2961d68f735bSJaegeuk Kim submitted = NULL;
2962d68f735bSJaegeuk Kim }
2963d68f735bSJaegeuk Kim
2964d68f735bSJaegeuk Kim if (submitted)
29652eae077eSChao Yu *submitted = fio.submitted;
29660c3a5797SChao Yu
2967eb47b800SJaegeuk Kim return 0;
2968eb47b800SJaegeuk Kim
2969eb47b800SJaegeuk Kim redirty_out:
297076f60268SJaegeuk Kim redirty_page_for_writepage(wbc, page);
29715b19d284SJaegeuk Kim /*
2972146949deSJinyoung CHOI * pageout() in MM translates EAGAIN, so calls handle_write_error()
29735b19d284SJaegeuk Kim * -> mapping_set_error() -> set_bit(AS_EIO, ...).
29745b19d284SJaegeuk Kim * file_write_and_wait_range() will see EIO error, which is critical
29755b19d284SJaegeuk Kim * to return value of fsync() followed by atomic_write failure to user.
29765b19d284SJaegeuk Kim */
29775b19d284SJaegeuk Kim if (!err || wbc->for_reclaim)
29780002b61bSChao Yu return AOP_WRITEPAGE_ACTIVATE;
2979b230e6caSJaegeuk Kim unlock_page(page);
2980b230e6caSJaegeuk Kim return err;
2981fa9150a8SNamjae Jeon }
2982fa9150a8SNamjae Jeon
f2fs_write_data_page(struct page * page,struct writeback_control * wbc)2983f566bae8SJaegeuk Kim static int f2fs_write_data_page(struct page *page,
2984f566bae8SJaegeuk Kim struct writeback_control *wbc)
2985f566bae8SJaegeuk Kim {
29864c8ff709SChao Yu #ifdef CONFIG_F2FS_FS_COMPRESSION
29874c8ff709SChao Yu struct inode *inode = page->mapping->host;
29884c8ff709SChao Yu
29894c8ff709SChao Yu if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
29904c8ff709SChao Yu goto out;
29914c8ff709SChao Yu
29924c8ff709SChao Yu if (f2fs_compressed_file(inode)) {
29934c8ff709SChao Yu if (f2fs_is_compressed_cluster(inode, page->index)) {
29944c8ff709SChao Yu redirty_page_for_writepage(wbc, page);
29954c8ff709SChao Yu return AOP_WRITEPAGE_ACTIVATE;
29964c8ff709SChao Yu }
29974c8ff709SChao Yu }
29984c8ff709SChao Yu out:
29994c8ff709SChao Yu #endif
30004c8ff709SChao Yu
30014c8ff709SChao Yu return f2fs_write_single_data_page(page, NULL, NULL, NULL,
30023afae09fSChao Yu wbc, FS_DATA_IO, 0, true);
3003f566bae8SJaegeuk Kim }
3004f566bae8SJaegeuk Kim
30058f46dcaeSChao Yu /*
3006146949deSJinyoung CHOI * This function was copied from write_cache_pages from mm/page-writeback.c.
30078f46dcaeSChao Yu * The major change is making write step of cold data page separately from
30088f46dcaeSChao Yu * warm/hot data page.
30098f46dcaeSChao Yu */
f2fs_write_cache_pages(struct address_space * mapping,struct writeback_control * wbc,enum iostat_type io_type)30108f46dcaeSChao Yu static int f2fs_write_cache_pages(struct address_space *mapping,
3011b0af6d49SChao Yu struct writeback_control *wbc,
3012b0af6d49SChao Yu enum iostat_type io_type)
30138f46dcaeSChao Yu {
30148f46dcaeSChao Yu int ret = 0;
30154c8ff709SChao Yu int done = 0, retry = 0;
30168deca179SChao Yu struct page *pages_local[F2FS_ONSTACK_PAGES];
30178deca179SChao Yu struct page **pages = pages_local;
30181cd98ee7SVishal Moola (Oracle) struct folio_batch fbatch;
3019c29fd0c0SChao Yu struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
30208648de2cSChao Yu struct bio *bio = NULL;
30218648de2cSChao Yu sector_t last_block;
30224c8ff709SChao Yu #ifdef CONFIG_F2FS_FS_COMPRESSION
30234c8ff709SChao Yu struct inode *inode = mapping->host;
30244c8ff709SChao Yu struct compress_ctx cc = {
30254c8ff709SChao Yu .inode = inode,
30264c8ff709SChao Yu .log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
30274c8ff709SChao Yu .cluster_size = F2FS_I(inode)->i_cluster_size,
30284c8ff709SChao Yu .cluster_idx = NULL_CLUSTER,
30294c8ff709SChao Yu .rpages = NULL,
30304c8ff709SChao Yu .nr_rpages = 0,
30314c8ff709SChao Yu .cpages = NULL,
30323271d7ebSFengnan Chang .valid_nr_cpages = 0,
30334c8ff709SChao Yu .rbuf = NULL,
30344c8ff709SChao Yu .cbuf = NULL,
30354c8ff709SChao Yu .rlen = PAGE_SIZE * F2FS_I(inode)->i_cluster_size,
30364c8ff709SChao Yu .private = NULL,
30374c8ff709SChao Yu };
30384c8ff709SChao Yu #endif
30391cd98ee7SVishal Moola (Oracle) int nr_folios, p, idx;
30408f46dcaeSChao Yu int nr_pages;
30418deca179SChao Yu unsigned int max_pages = F2FS_ONSTACK_PAGES;
30428f46dcaeSChao Yu pgoff_t index;
30438f46dcaeSChao Yu pgoff_t end; /* Inclusive */
30448f46dcaeSChao Yu pgoff_t done_index;
30458f46dcaeSChao Yu int range_whole = 0;
304610bbd235SMatthew Wilcox xa_mark_t tag;
3047bab475c5SChao Yu int nwritten = 0;
30484c8ff709SChao Yu int submitted = 0;
30494c8ff709SChao Yu int i;
30508f46dcaeSChao Yu
30518deca179SChao Yu #ifdef CONFIG_F2FS_FS_COMPRESSION
30528deca179SChao Yu if (f2fs_compressed_file(inode) &&
30538deca179SChao Yu 1 << cc.log_cluster_size > F2FS_ONSTACK_PAGES) {
30548deca179SChao Yu pages = f2fs_kzalloc(sbi, sizeof(struct page *) <<
30558deca179SChao Yu cc.log_cluster_size, GFP_NOFS | __GFP_NOFAIL);
30568deca179SChao Yu max_pages = 1 << cc.log_cluster_size;
30578deca179SChao Yu }
30588deca179SChao Yu #endif
30598deca179SChao Yu
30601cd98ee7SVishal Moola (Oracle) folio_batch_init(&fbatch);
30611cd98ee7SVishal Moola (Oracle)
3062ef095d19SJaegeuk Kim if (get_dirty_pages(mapping->host) <=
3063ef095d19SJaegeuk Kim SM_I(F2FS_M_SB(mapping))->min_hot_blocks)
3064ef095d19SJaegeuk Kim set_inode_flag(mapping->host, FI_HOT_DATA);
3065ef095d19SJaegeuk Kim else
3066ef095d19SJaegeuk Kim clear_inode_flag(mapping->host, FI_HOT_DATA);
3067ef095d19SJaegeuk Kim
30688f46dcaeSChao Yu if (wbc->range_cyclic) {
30694df7a75fSJason Yan index = mapping->writeback_index; /* prev offset */
30708f46dcaeSChao Yu end = -1;
30718f46dcaeSChao Yu } else {
307209cbfeafSKirill A. Shutemov index = wbc->range_start >> PAGE_SHIFT;
307309cbfeafSKirill A. Shutemov end = wbc->range_end >> PAGE_SHIFT;
30748f46dcaeSChao Yu if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
30758f46dcaeSChao Yu range_whole = 1;
30768f46dcaeSChao Yu }
30778f46dcaeSChao Yu if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
30788f46dcaeSChao Yu tag = PAGECACHE_TAG_TOWRITE;
30798f46dcaeSChao Yu else
30808f46dcaeSChao Yu tag = PAGECACHE_TAG_DIRTY;
30818f46dcaeSChao Yu retry:
30824c8ff709SChao Yu retry = 0;
30838f46dcaeSChao Yu if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
30848f46dcaeSChao Yu tag_pages_for_writeback(mapping, index, end);
30858f46dcaeSChao Yu done_index = index;
30864c8ff709SChao Yu while (!done && !retry && (index <= end)) {
30871cd98ee7SVishal Moola (Oracle) nr_pages = 0;
30881cd98ee7SVishal Moola (Oracle) again:
30891cd98ee7SVishal Moola (Oracle) nr_folios = filemap_get_folios_tag(mapping, &index, end,
30901cd98ee7SVishal Moola (Oracle) tag, &fbatch);
30911cd98ee7SVishal Moola (Oracle) if (nr_folios == 0) {
30921cd98ee7SVishal Moola (Oracle) if (nr_pages)
30931cd98ee7SVishal Moola (Oracle) goto write;
30948f46dcaeSChao Yu break;
30951cd98ee7SVishal Moola (Oracle) }
30968f46dcaeSChao Yu
30971cd98ee7SVishal Moola (Oracle) for (i = 0; i < nr_folios; i++) {
30981cd98ee7SVishal Moola (Oracle) struct folio *folio = fbatch.folios[i];
30991cd98ee7SVishal Moola (Oracle)
31001cd98ee7SVishal Moola (Oracle) idx = 0;
31011cd98ee7SVishal Moola (Oracle) p = folio_nr_pages(folio);
31021cd98ee7SVishal Moola (Oracle) add_more:
31031cd98ee7SVishal Moola (Oracle) pages[nr_pages] = folio_page(folio, idx);
31041cd98ee7SVishal Moola (Oracle) folio_get(folio);
31058deca179SChao Yu if (++nr_pages == max_pages) {
31061cd98ee7SVishal Moola (Oracle) index = folio->index + idx + 1;
31071cd98ee7SVishal Moola (Oracle) folio_batch_release(&fbatch);
31081cd98ee7SVishal Moola (Oracle) goto write;
31091cd98ee7SVishal Moola (Oracle) }
31101cd98ee7SVishal Moola (Oracle) if (++idx < p)
31111cd98ee7SVishal Moola (Oracle) goto add_more;
31121cd98ee7SVishal Moola (Oracle) }
31131cd98ee7SVishal Moola (Oracle) folio_batch_release(&fbatch);
31141cd98ee7SVishal Moola (Oracle) goto again;
31151cd98ee7SVishal Moola (Oracle) write:
31168f46dcaeSChao Yu for (i = 0; i < nr_pages; i++) {
311701fc4b9aSFengnan Chang struct page *page = pages[i];
31181cd98ee7SVishal Moola (Oracle) struct folio *folio = page_folio(page);
31194c8ff709SChao Yu bool need_readd;
31204c8ff709SChao Yu readd:
31214c8ff709SChao Yu need_readd = false;
31224c8ff709SChao Yu #ifdef CONFIG_F2FS_FS_COMPRESSION
31234c8ff709SChao Yu if (f2fs_compressed_file(inode)) {
3124b368cc5eSFengnan Chang void *fsdata = NULL;
3125b368cc5eSFengnan Chang struct page *pagep;
3126b368cc5eSFengnan Chang int ret2;
3127b368cc5eSFengnan Chang
31284c8ff709SChao Yu ret = f2fs_init_compress_ctx(&cc);
31294c8ff709SChao Yu if (ret) {
31304c8ff709SChao Yu done = 1;
31314c8ff709SChao Yu break;
31324c8ff709SChao Yu }
31338f46dcaeSChao Yu
31344c8ff709SChao Yu if (!f2fs_cluster_can_merge_page(&cc,
31351cd98ee7SVishal Moola (Oracle) folio->index)) {
31364c8ff709SChao Yu ret = f2fs_write_multi_pages(&cc,
31374c8ff709SChao Yu &submitted, wbc, io_type);
31384c8ff709SChao Yu if (!ret)
31394c8ff709SChao Yu need_readd = true;
31404c8ff709SChao Yu goto result;
31414c8ff709SChao Yu }
31424c8ff709SChao Yu
31434c8ff709SChao Yu if (unlikely(f2fs_cp_error(sbi)))
31441cd98ee7SVishal Moola (Oracle) goto lock_folio;
31454c8ff709SChao Yu
3146b368cc5eSFengnan Chang if (!f2fs_cluster_is_empty(&cc))
31471cd98ee7SVishal Moola (Oracle) goto lock_folio;
31484c8ff709SChao Yu
31494f8219f8SFengnan Chang if (f2fs_all_cluster_page_ready(&cc,
315001fc4b9aSFengnan Chang pages, i, nr_pages, true))
31511cd98ee7SVishal Moola (Oracle) goto lock_folio;
31524f8219f8SFengnan Chang
31534c8ff709SChao Yu ret2 = f2fs_prepare_compress_overwrite(
31544c8ff709SChao Yu inode, &pagep,
31551cd98ee7SVishal Moola (Oracle) folio->index, &fsdata);
31564c8ff709SChao Yu if (ret2 < 0) {
31574c8ff709SChao Yu ret = ret2;
31584c8ff709SChao Yu done = 1;
31594c8ff709SChao Yu break;
31604c8ff709SChao Yu } else if (ret2 &&
3161b368cc5eSFengnan Chang (!f2fs_compress_write_end(inode,
31621cd98ee7SVishal Moola (Oracle) fsdata, folio->index, 1) ||
31634f8219f8SFengnan Chang !f2fs_all_cluster_page_ready(&cc,
31641cd98ee7SVishal Moola (Oracle) pages, i, nr_pages,
31651cd98ee7SVishal Moola (Oracle) false))) {
31664c8ff709SChao Yu retry = 1;
31674c8ff709SChao Yu break;
31684c8ff709SChao Yu }
31694c8ff709SChao Yu }
31704c8ff709SChao Yu #endif
3171f8de4331SChao Yu /* give a priority to WB_SYNC threads */
3172c29fd0c0SChao Yu if (atomic_read(&sbi->wb_sync_req[DATA]) &&
3173f8de4331SChao Yu wbc->sync_mode == WB_SYNC_NONE) {
3174f8de4331SChao Yu done = 1;
3175f8de4331SChao Yu break;
3176f8de4331SChao Yu }
31774c8ff709SChao Yu #ifdef CONFIG_F2FS_FS_COMPRESSION
31781cd98ee7SVishal Moola (Oracle) lock_folio:
31794c8ff709SChao Yu #endif
31801cd98ee7SVishal Moola (Oracle) done_index = folio->index;
3181d29460e5SJaegeuk Kim retry_write:
31821cd98ee7SVishal Moola (Oracle) folio_lock(folio);
31838f46dcaeSChao Yu
31841cd98ee7SVishal Moola (Oracle) if (unlikely(folio->mapping != mapping)) {
31858f46dcaeSChao Yu continue_unlock:
31861cd98ee7SVishal Moola (Oracle) folio_unlock(folio);
31878f46dcaeSChao Yu continue;
31888f46dcaeSChao Yu }
31898f46dcaeSChao Yu
31901cd98ee7SVishal Moola (Oracle) if (!folio_test_dirty(folio)) {
31918f46dcaeSChao Yu /* someone wrote it for us */
31928f46dcaeSChao Yu goto continue_unlock;
31938f46dcaeSChao Yu }
31948f46dcaeSChao Yu
31951cd98ee7SVishal Moola (Oracle) if (folio_test_writeback(folio)) {
3196c948be79SYangtao Li if (wbc->sync_mode == WB_SYNC_NONE)
31978f46dcaeSChao Yu goto continue_unlock;
3198c948be79SYangtao Li f2fs_wait_on_page_writeback(&folio->page, DATA, true, true);
31998f46dcaeSChao Yu }
32008f46dcaeSChao Yu
32011cd98ee7SVishal Moola (Oracle) if (!folio_clear_dirty_for_io(folio))
32028f46dcaeSChao Yu goto continue_unlock;
32038f46dcaeSChao Yu
32044c8ff709SChao Yu #ifdef CONFIG_F2FS_FS_COMPRESSION
32054c8ff709SChao Yu if (f2fs_compressed_file(inode)) {
32061cd98ee7SVishal Moola (Oracle) folio_get(folio);
32071cd98ee7SVishal Moola (Oracle) f2fs_compress_ctx_add_page(&cc, &folio->page);
32084c8ff709SChao Yu continue;
32094c8ff709SChao Yu }
32104c8ff709SChao Yu #endif
32111cd98ee7SVishal Moola (Oracle) ret = f2fs_write_single_data_page(&folio->page,
32121cd98ee7SVishal Moola (Oracle) &submitted, &bio, &last_block,
32131cd98ee7SVishal Moola (Oracle) wbc, io_type, 0, true);
32144c8ff709SChao Yu if (ret == AOP_WRITEPAGE_ACTIVATE)
32151cd98ee7SVishal Moola (Oracle) folio_unlock(folio);
32164c8ff709SChao Yu #ifdef CONFIG_F2FS_FS_COMPRESSION
32174c8ff709SChao Yu result:
32184c8ff709SChao Yu #endif
32194c8ff709SChao Yu nwritten += submitted;
32204c8ff709SChao Yu wbc->nr_to_write -= submitted;
32214c8ff709SChao Yu
32228f46dcaeSChao Yu if (unlikely(ret)) {
32230002b61bSChao Yu /*
32240002b61bSChao Yu * keep nr_to_write, since vfs uses this to
32250002b61bSChao Yu * get # of written pages.
32260002b61bSChao Yu */
32270002b61bSChao Yu if (ret == AOP_WRITEPAGE_ACTIVATE) {
32280002b61bSChao Yu ret = 0;
32294c8ff709SChao Yu goto next;
3230d29460e5SJaegeuk Kim } else if (ret == -EAGAIN) {
3231d29460e5SJaegeuk Kim ret = 0;
3232d29460e5SJaegeuk Kim if (wbc->sync_mode == WB_SYNC_ALL) {
3233a64239d0SNeilBrown f2fs_io_schedule_timeout(
32345df7731fSChao Yu DEFAULT_IO_TIMEOUT);
3235d29460e5SJaegeuk Kim goto retry_write;
3236d29460e5SJaegeuk Kim }
32374c8ff709SChao Yu goto next;
32380002b61bSChao Yu }
3239a842a909SMinjie Du done_index = folio_next_index(folio);
32408f46dcaeSChao Yu done = 1;
32418f46dcaeSChao Yu break;
32428f46dcaeSChao Yu }
32438f46dcaeSChao Yu
32444c8ff709SChao Yu if (wbc->nr_to_write <= 0 &&
32458f46dcaeSChao Yu wbc->sync_mode == WB_SYNC_NONE) {
32468f46dcaeSChao Yu done = 1;
32478f46dcaeSChao Yu break;
32488f46dcaeSChao Yu }
32494c8ff709SChao Yu next:
32504c8ff709SChao Yu if (need_readd)
32514c8ff709SChao Yu goto readd;
32528f46dcaeSChao Yu }
325301fc4b9aSFengnan Chang release_pages(pages, nr_pages);
32548f46dcaeSChao Yu cond_resched();
32558f46dcaeSChao Yu }
32564c8ff709SChao Yu #ifdef CONFIG_F2FS_FS_COMPRESSION
32574c8ff709SChao Yu /* flush remained pages in compress cluster */
32584c8ff709SChao Yu if (f2fs_compressed_file(inode) && !f2fs_cluster_is_empty(&cc)) {
32594c8ff709SChao Yu ret = f2fs_write_multi_pages(&cc, &submitted, wbc, io_type);
32604c8ff709SChao Yu nwritten += submitted;
32614c8ff709SChao Yu wbc->nr_to_write -= submitted;
32624c8ff709SChao Yu if (ret) {
32634c8ff709SChao Yu done = 1;
32644c8ff709SChao Yu retry = 0;
32654c8ff709SChao Yu }
32664c8ff709SChao Yu }
3267adfc6943SJaegeuk Kim if (f2fs_compressed_file(inode))
32688bfbfb0dSChao Yu f2fs_destroy_compress_ctx(&cc, false);
32694c8ff709SChao Yu #endif
3270e78790f8SSahitya Tummala if (retry) {
32718f46dcaeSChao Yu index = 0;
3272e78790f8SSahitya Tummala end = -1;
32738f46dcaeSChao Yu goto retry;
32748f46dcaeSChao Yu }
3275e78790f8SSahitya Tummala if (wbc->range_cyclic && !done)
3276e78790f8SSahitya Tummala done_index = 0;
32778f46dcaeSChao Yu if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
32788f46dcaeSChao Yu mapping->writeback_index = done_index;
32798f46dcaeSChao Yu
3280bab475c5SChao Yu if (nwritten)
3281b9109b0eSJaegeuk Kim f2fs_submit_merged_write_cond(F2FS_M_SB(mapping), mapping->host,
3282bab475c5SChao Yu NULL, 0, DATA);
32838648de2cSChao Yu /* submit cached bio of IPU write */
32848648de2cSChao Yu if (bio)
32850b20fcecSChao Yu f2fs_submit_merged_ipu_write(sbi, &bio, NULL);
32866ca56ca4SChao Yu
32878deca179SChao Yu #ifdef CONFIG_F2FS_FS_COMPRESSION
32888deca179SChao Yu if (pages != pages_local)
32898deca179SChao Yu kfree(pages);
32908deca179SChao Yu #endif
32918deca179SChao Yu
32928f46dcaeSChao Yu return ret;
32938f46dcaeSChao Yu }
32948f46dcaeSChao Yu
__should_serialize_io(struct inode * inode,struct writeback_control * wbc)3295853137ceSJaegeuk Kim static inline bool __should_serialize_io(struct inode *inode,
3296853137ceSJaegeuk Kim struct writeback_control *wbc)
3297853137ceSJaegeuk Kim {
3298040d2bb3SChao Yu /* to avoid deadlock in path of data flush */
3299d80afefbSChao Yu if (F2FS_I(inode)->wb_task)
3300040d2bb3SChao Yu return false;
3301b13f67ffSChao Yu
3302b13f67ffSChao Yu if (!S_ISREG(inode->i_mode))
3303b13f67ffSChao Yu return false;
3304b13f67ffSChao Yu if (IS_NOQUOTA(inode))
3305b13f67ffSChao Yu return false;
3306b13f67ffSChao Yu
3307602a16d5SDaeho Jeong if (f2fs_need_compress_data(inode))
3308b13f67ffSChao Yu return true;
3309853137ceSJaegeuk Kim if (wbc->sync_mode != WB_SYNC_ALL)
3310853137ceSJaegeuk Kim return true;
3311853137ceSJaegeuk Kim if (get_dirty_pages(inode) >= SM_I(F2FS_I_SB(inode))->min_seq_blocks)
3312853137ceSJaegeuk Kim return true;
3313853137ceSJaegeuk Kim return false;
3314853137ceSJaegeuk Kim }
3315853137ceSJaegeuk Kim
__f2fs_write_data_pages(struct address_space * mapping,struct writeback_control * wbc,enum iostat_type io_type)3316fc99fe27SChao Yu static int __f2fs_write_data_pages(struct address_space *mapping,
3317b0af6d49SChao Yu struct writeback_control *wbc,
3318b0af6d49SChao Yu enum iostat_type io_type)
3319eb47b800SJaegeuk Kim {
3320eb47b800SJaegeuk Kim struct inode *inode = mapping->host;
33214081363fSJaegeuk Kim struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
33229dfa1bafSJaegeuk Kim struct blk_plug plug;
3323eb47b800SJaegeuk Kim int ret;
3324853137ceSJaegeuk Kim bool locked = false;
3325eb47b800SJaegeuk Kim
3326cfb185a1SP J P /* deal with chardevs and other special file */
3327cfb185a1SP J P if (!mapping->a_ops->writepage)
3328cfb185a1SP J P return 0;
3329cfb185a1SP J P
33306a290544SChao Yu /* skip writing if there is no dirty page in this inode */
33316a290544SChao Yu if (!get_dirty_pages(inode) && wbc->sync_mode == WB_SYNC_NONE)
33326a290544SChao Yu return 0;
33336a290544SChao Yu
33340771fcc7SChao Yu /* during POR, we don't need to trigger writepage at all. */
33350771fcc7SChao Yu if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
33360771fcc7SChao Yu goto skip_write;
33370771fcc7SChao Yu
3338af033b2aSChao Yu if ((S_ISDIR(inode->i_mode) || IS_NOQUOTA(inode)) &&
3339af033b2aSChao Yu wbc->sync_mode == WB_SYNC_NONE &&
3340a1257023SJaegeuk Kim get_dirty_pages(inode) < nr_pages_to_skip(sbi, DATA) &&
33414d57b86dSChao Yu f2fs_available_free_memory(sbi, DIRTY_DENTS))
3342a1257023SJaegeuk Kim goto skip_write;
3343a1257023SJaegeuk Kim
33441018a546SChao Yu /* skip writing in file defragment preparing stage */
33451018a546SChao Yu if (is_inode_flag_set(inode, FI_SKIP_WRITES))
3346d323d005SChao Yu goto skip_write;
3347d323d005SChao Yu
3348d31c7c3fSYunlei He trace_f2fs_writepages(mapping->host, wbc, DATA);
3349d31c7c3fSYunlei He
3350687de7f1SJaegeuk Kim /* to avoid spliting IOs due to mixed WB_SYNC_ALL and WB_SYNC_NONE */
3351687de7f1SJaegeuk Kim if (wbc->sync_mode == WB_SYNC_ALL)
3352c29fd0c0SChao Yu atomic_inc(&sbi->wb_sync_req[DATA]);
335334415099SChao Yu else if (atomic_read(&sbi->wb_sync_req[DATA])) {
335434415099SChao Yu /* to avoid potential deadlock */
335534415099SChao Yu if (current->plug)
335634415099SChao Yu blk_finish_plug(current->plug);
3357687de7f1SJaegeuk Kim goto skip_write;
335834415099SChao Yu }
3359687de7f1SJaegeuk Kim
3360853137ceSJaegeuk Kim if (__should_serialize_io(inode, wbc)) {
3361853137ceSJaegeuk Kim mutex_lock(&sbi->writepages);
3362853137ceSJaegeuk Kim locked = true;
3363853137ceSJaegeuk Kim }
3364853137ceSJaegeuk Kim
33659dfa1bafSJaegeuk Kim blk_start_plug(&plug);
3366b0af6d49SChao Yu ret = f2fs_write_cache_pages(mapping, wbc, io_type);
33679dfa1bafSJaegeuk Kim blk_finish_plug(&plug);
3368687de7f1SJaegeuk Kim
3369853137ceSJaegeuk Kim if (locked)
3370853137ceSJaegeuk Kim mutex_unlock(&sbi->writepages);
3371853137ceSJaegeuk Kim
3372687de7f1SJaegeuk Kim if (wbc->sync_mode == WB_SYNC_ALL)
3373c29fd0c0SChao Yu atomic_dec(&sbi->wb_sync_req[DATA]);
337428ea6162SJaegeuk Kim /*
337528ea6162SJaegeuk Kim * if some pages were truncated, we cannot guarantee its mapping->host
337628ea6162SJaegeuk Kim * to detect pending bios.
337728ea6162SJaegeuk Kim */
3378458e6197SJaegeuk Kim
33794d57b86dSChao Yu f2fs_remove_dirty_inode(inode);
3380eb47b800SJaegeuk Kim return ret;
3381d3baf95dSJaegeuk Kim
3382d3baf95dSJaegeuk Kim skip_write:
3383a7ffdbe2SJaegeuk Kim wbc->pages_skipped += get_dirty_pages(inode);
3384d31c7c3fSYunlei He trace_f2fs_writepages(mapping->host, wbc, DATA);
3385d3baf95dSJaegeuk Kim return 0;
3386eb47b800SJaegeuk Kim }
3387eb47b800SJaegeuk Kim
f2fs_write_data_pages(struct address_space * mapping,struct writeback_control * wbc)3388b0af6d49SChao Yu static int f2fs_write_data_pages(struct address_space *mapping,
3389b0af6d49SChao Yu struct writeback_control *wbc)
3390b0af6d49SChao Yu {
3391b0af6d49SChao Yu struct inode *inode = mapping->host;
3392b0af6d49SChao Yu
3393b0af6d49SChao Yu return __f2fs_write_data_pages(mapping, wbc,
3394b0af6d49SChao Yu F2FS_I(inode)->cp_task == current ?
3395b0af6d49SChao Yu FS_CP_DATA_IO : FS_DATA_IO);
3396b0af6d49SChao Yu }
3397b0af6d49SChao Yu
f2fs_write_failed(struct inode * inode,loff_t to)3398a1e09b03SEric Biggers void f2fs_write_failed(struct inode *inode, loff_t to)
33993aab8f82SChao Yu {
3400819d9153SJaegeuk Kim loff_t i_size = i_size_read(inode);
34013aab8f82SChao Yu
34023f188c23SJaegeuk Kim if (IS_NOQUOTA(inode))
34033f188c23SJaegeuk Kim return;
34043f188c23SJaegeuk Kim
340595ae251fSEric Biggers /* In the fs-verity case, f2fs_end_enable_verity() does the truncate */
340695ae251fSEric Biggers if (to > i_size && !f2fs_verity_in_progress(inode)) {
3407e4544b63STim Murray f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
34086abaa83cSLinus Torvalds filemap_invalidate_lock(inode->i_mapping);
3409a33c1502SChao Yu
3410819d9153SJaegeuk Kim truncate_pagecache(inode, i_size);
3411c42d28ceSChao Yu f2fs_truncate_blocks(inode, i_size, true);
3412a33c1502SChao Yu
34136abaa83cSLinus Torvalds filemap_invalidate_unlock(inode->i_mapping);
3414e4544b63STim Murray f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
34153aab8f82SChao Yu }
34163aab8f82SChao Yu }
34173aab8f82SChao Yu
prepare_write_begin(struct f2fs_sb_info * sbi,struct page * page,loff_t pos,unsigned len,block_t * blk_addr,bool * node_changed)34182aadac08SJaegeuk Kim static int prepare_write_begin(struct f2fs_sb_info *sbi,
34192aadac08SJaegeuk Kim struct page *page, loff_t pos, unsigned len,
34202aadac08SJaegeuk Kim block_t *blk_addr, bool *node_changed)
34212aadac08SJaegeuk Kim {
34222aadac08SJaegeuk Kim struct inode *inode = page->mapping->host;
34232aadac08SJaegeuk Kim pgoff_t index = page->index;
34242aadac08SJaegeuk Kim struct dnode_of_data dn;
34252aadac08SJaegeuk Kim struct page *ipage;
3426b4d07a3eSJaegeuk Kim bool locked = false;
342744b0dfebSChristoph Hellwig int flag = F2FS_GET_BLOCK_PRE_AIO;
34282aadac08SJaegeuk Kim int err = 0;
34292aadac08SJaegeuk Kim
343024b84912SJaegeuk Kim /*
34313d697a4aSEric Biggers * If a whole page is being written and we already preallocated all the
34323d697a4aSEric Biggers * blocks, then there is no need to get a block address now.
343324b84912SJaegeuk Kim */
34343d697a4aSEric Biggers if (len == PAGE_SIZE && is_inode_flag_set(inode, FI_PREALLOCATED_ALL))
343524b84912SJaegeuk Kim return 0;
343624b84912SJaegeuk Kim
34372866fb16SSheng Yong /* f2fs_lock_op avoids race between write CP and convert_inline_page */
343844b0dfebSChristoph Hellwig if (f2fs_has_inline_data(inode)) {
343944b0dfebSChristoph Hellwig if (pos + len > MAX_INLINE_DATA(inode))
34402866fb16SSheng Yong flag = F2FS_GET_BLOCK_DEFAULT;
344144b0dfebSChristoph Hellwig f2fs_map_lock(sbi, flag);
344244b0dfebSChristoph Hellwig locked = true;
344344b0dfebSChristoph Hellwig } else if ((pos & PAGE_MASK) >= i_size_read(inode)) {
34442f51ade9SChristoph Hellwig f2fs_map_lock(sbi, flag);
3445b4d07a3eSJaegeuk Kim locked = true;
3446b4d07a3eSJaegeuk Kim }
34474c8ff709SChao Yu
3448b4d07a3eSJaegeuk Kim restart:
34492aadac08SJaegeuk Kim /* check inline_data */
34504d57b86dSChao Yu ipage = f2fs_get_node_page(sbi, inode->i_ino);
34512aadac08SJaegeuk Kim if (IS_ERR(ipage)) {
34522aadac08SJaegeuk Kim err = PTR_ERR(ipage);
34532aadac08SJaegeuk Kim goto unlock_out;
34542aadac08SJaegeuk Kim }
34552aadac08SJaegeuk Kim
34562aadac08SJaegeuk Kim set_new_dnode(&dn, inode, ipage, ipage, 0);
34572aadac08SJaegeuk Kim
34582aadac08SJaegeuk Kim if (f2fs_has_inline_data(inode)) {
3459f2470371SChao Yu if (pos + len <= MAX_INLINE_DATA(inode)) {
34604d57b86dSChao Yu f2fs_do_read_inline_data(page, ipage);
346191942321SJaegeuk Kim set_inode_flag(inode, FI_DATA_EXIST);
3462ab47036dSChao Yu if (inode->i_nlink)
3463b763f3beSChao Yu set_page_private_inline(ipage);
3464b4d07a3eSJaegeuk Kim goto out;
3465b4d07a3eSJaegeuk Kim }
346644b0dfebSChristoph Hellwig err = f2fs_convert_inline_page(&dn, page);
346744b0dfebSChristoph Hellwig if (err || dn.data_blkaddr != NULL_ADDR)
346844b0dfebSChristoph Hellwig goto out;
346944b0dfebSChristoph Hellwig }
347044b0dfebSChristoph Hellwig
347144b0dfebSChristoph Hellwig if (!f2fs_lookup_read_extent_cache_block(inode, index,
347204a91ab0SChristoph Hellwig &dn.data_blkaddr)) {
347344b0dfebSChristoph Hellwig if (locked) {
347444b0dfebSChristoph Hellwig err = f2fs_reserve_block(&dn, index);
347544b0dfebSChristoph Hellwig goto out;
347644b0dfebSChristoph Hellwig }
347744b0dfebSChristoph Hellwig
3478b4d07a3eSJaegeuk Kim /* hole case */
34794d57b86dSChao Yu err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
348044b0dfebSChristoph Hellwig if (!err && dn.data_blkaddr != NULL_ADDR)
348144b0dfebSChristoph Hellwig goto out;
3482b4d07a3eSJaegeuk Kim f2fs_put_dnode(&dn);
34832f51ade9SChristoph Hellwig f2fs_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO);
34842866fb16SSheng Yong WARN_ON(flag != F2FS_GET_BLOCK_PRE_AIO);
3485b4d07a3eSJaegeuk Kim locked = true;
3486b4d07a3eSJaegeuk Kim goto restart;
3487b4d07a3eSJaegeuk Kim }
348844b0dfebSChristoph Hellwig out:
348944b0dfebSChristoph Hellwig if (!err) {
34902aadac08SJaegeuk Kim /* convert_inline_page can make node_changed */
34912aadac08SJaegeuk Kim *blk_addr = dn.data_blkaddr;
34922aadac08SJaegeuk Kim *node_changed = dn.node_changed;
349344b0dfebSChristoph Hellwig }
34942aadac08SJaegeuk Kim f2fs_put_dnode(&dn);
34952aadac08SJaegeuk Kim unlock_out:
3496b4d07a3eSJaegeuk Kim if (locked)
34972f51ade9SChristoph Hellwig f2fs_map_unlock(sbi, flag);
34982aadac08SJaegeuk Kim return err;
34992aadac08SJaegeuk Kim }
35002aadac08SJaegeuk Kim
__find_data_block(struct inode * inode,pgoff_t index,block_t * blk_addr)35013db1de0eSDaeho Jeong static int __find_data_block(struct inode *inode, pgoff_t index,
35023db1de0eSDaeho Jeong block_t *blk_addr)
35033db1de0eSDaeho Jeong {
35043db1de0eSDaeho Jeong struct dnode_of_data dn;
35053db1de0eSDaeho Jeong struct page *ipage;
35063db1de0eSDaeho Jeong int err = 0;
35073db1de0eSDaeho Jeong
35083db1de0eSDaeho Jeong ipage = f2fs_get_node_page(F2FS_I_SB(inode), inode->i_ino);
35093db1de0eSDaeho Jeong if (IS_ERR(ipage))
35103db1de0eSDaeho Jeong return PTR_ERR(ipage);
35113db1de0eSDaeho Jeong
35123db1de0eSDaeho Jeong set_new_dnode(&dn, inode, ipage, ipage, 0);
35133db1de0eSDaeho Jeong
351404a91ab0SChristoph Hellwig if (!f2fs_lookup_read_extent_cache_block(inode, index,
351504a91ab0SChristoph Hellwig &dn.data_blkaddr)) {
35163db1de0eSDaeho Jeong /* hole case */
35173db1de0eSDaeho Jeong err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
35183db1de0eSDaeho Jeong if (err) {
35193db1de0eSDaeho Jeong dn.data_blkaddr = NULL_ADDR;
35203db1de0eSDaeho Jeong err = 0;
35213db1de0eSDaeho Jeong }
35223db1de0eSDaeho Jeong }
35233db1de0eSDaeho Jeong *blk_addr = dn.data_blkaddr;
35243db1de0eSDaeho Jeong f2fs_put_dnode(&dn);
35253db1de0eSDaeho Jeong return err;
35263db1de0eSDaeho Jeong }
35273db1de0eSDaeho Jeong
__reserve_data_block(struct inode * inode,pgoff_t index,block_t * blk_addr,bool * node_changed)35283db1de0eSDaeho Jeong static int __reserve_data_block(struct inode *inode, pgoff_t index,
35293db1de0eSDaeho Jeong block_t *blk_addr, bool *node_changed)
35303db1de0eSDaeho Jeong {
35313db1de0eSDaeho Jeong struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
35323db1de0eSDaeho Jeong struct dnode_of_data dn;
35333db1de0eSDaeho Jeong struct page *ipage;
35343db1de0eSDaeho Jeong int err = 0;
35353db1de0eSDaeho Jeong
35362f51ade9SChristoph Hellwig f2fs_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO);
35373db1de0eSDaeho Jeong
35383db1de0eSDaeho Jeong ipage = f2fs_get_node_page(sbi, inode->i_ino);
35393db1de0eSDaeho Jeong if (IS_ERR(ipage)) {
35403db1de0eSDaeho Jeong err = PTR_ERR(ipage);
35413db1de0eSDaeho Jeong goto unlock_out;
35423db1de0eSDaeho Jeong }
35433db1de0eSDaeho Jeong set_new_dnode(&dn, inode, ipage, ipage, 0);
35443db1de0eSDaeho Jeong
3545ffdeab71SChristoph Hellwig if (!f2fs_lookup_read_extent_cache_block(dn.inode, index,
3546ffdeab71SChristoph Hellwig &dn.data_blkaddr))
3547ffdeab71SChristoph Hellwig err = f2fs_reserve_block(&dn, index);
35483db1de0eSDaeho Jeong
35493db1de0eSDaeho Jeong *blk_addr = dn.data_blkaddr;
35503db1de0eSDaeho Jeong *node_changed = dn.node_changed;
35513db1de0eSDaeho Jeong f2fs_put_dnode(&dn);
35523db1de0eSDaeho Jeong
35533db1de0eSDaeho Jeong unlock_out:
35542f51ade9SChristoph Hellwig f2fs_map_unlock(sbi, F2FS_GET_BLOCK_PRE_AIO);
35553db1de0eSDaeho Jeong return err;
35563db1de0eSDaeho Jeong }
35573db1de0eSDaeho Jeong
prepare_atomic_write_begin(struct f2fs_sb_info * sbi,struct page * page,loff_t pos,unsigned int len,block_t * blk_addr,bool * node_changed,bool * use_cow)35583db1de0eSDaeho Jeong static int prepare_atomic_write_begin(struct f2fs_sb_info *sbi,
35593db1de0eSDaeho Jeong struct page *page, loff_t pos, unsigned int len,
3560591fc34eSDaeho Jeong block_t *blk_addr, bool *node_changed, bool *use_cow)
35613db1de0eSDaeho Jeong {
35623db1de0eSDaeho Jeong struct inode *inode = page->mapping->host;
35633db1de0eSDaeho Jeong struct inode *cow_inode = F2FS_I(inode)->cow_inode;
35643db1de0eSDaeho Jeong pgoff_t index = page->index;
35653db1de0eSDaeho Jeong int err = 0;
3566f8e2f32bSDaeho Jeong block_t ori_blk_addr = NULL_ADDR;
35673db1de0eSDaeho Jeong
35683db1de0eSDaeho Jeong /* If pos is beyond the end of file, reserve a new block in COW inode */
35693db1de0eSDaeho Jeong if ((pos & PAGE_MASK) >= i_size_read(inode))
3570f8e2f32bSDaeho Jeong goto reserve_block;
35713db1de0eSDaeho Jeong
35723db1de0eSDaeho Jeong /* Look for the block in COW inode first */
35733db1de0eSDaeho Jeong err = __find_data_block(cow_inode, index, blk_addr);
3574591fc34eSDaeho Jeong if (err) {
35753db1de0eSDaeho Jeong return err;
3576591fc34eSDaeho Jeong } else if (*blk_addr != NULL_ADDR) {
3577591fc34eSDaeho Jeong *use_cow = true;
35783db1de0eSDaeho Jeong return 0;
3579591fc34eSDaeho Jeong }
35803db1de0eSDaeho Jeong
358141e8f85aSDaeho Jeong if (is_inode_flag_set(inode, FI_ATOMIC_REPLACE))
358241e8f85aSDaeho Jeong goto reserve_block;
358341e8f85aSDaeho Jeong
35843db1de0eSDaeho Jeong /* Look for the block in the original inode */
35853db1de0eSDaeho Jeong err = __find_data_block(inode, index, &ori_blk_addr);
35863db1de0eSDaeho Jeong if (err)
35873db1de0eSDaeho Jeong return err;
35883db1de0eSDaeho Jeong
3589f8e2f32bSDaeho Jeong reserve_block:
35903db1de0eSDaeho Jeong /* Finally, we should reserve a new block in COW inode for the update */
35913db1de0eSDaeho Jeong err = __reserve_data_block(cow_inode, index, blk_addr, node_changed);
35923db1de0eSDaeho Jeong if (err)
35933db1de0eSDaeho Jeong return err;
3594f8e2f32bSDaeho Jeong inc_atomic_write_cnt(inode);
35953db1de0eSDaeho Jeong
35963db1de0eSDaeho Jeong if (ori_blk_addr != NULL_ADDR)
35973db1de0eSDaeho Jeong *blk_addr = ori_blk_addr;
35983db1de0eSDaeho Jeong return 0;
35993db1de0eSDaeho Jeong }
36003db1de0eSDaeho Jeong
f2fs_write_begin(struct file * file,struct address_space * mapping,loff_t pos,unsigned len,struct page ** pagep,void ** fsdata)3601eb47b800SJaegeuk Kim static int f2fs_write_begin(struct file *file, struct address_space *mapping,
36029d6b0cd7SMatthew Wilcox (Oracle) loff_t pos, unsigned len, struct page **pagep, void **fsdata)
3603eb47b800SJaegeuk Kim {
3604eb47b800SJaegeuk Kim struct inode *inode = mapping->host;
36054081363fSJaegeuk Kim struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
360686531d6bSJaegeuk Kim struct page *page = NULL;
360709cbfeafSKirill A. Shutemov pgoff_t index = ((unsigned long long) pos) >> PAGE_SHIFT;
36083db1de0eSDaeho Jeong bool need_balance = false;
3609591fc34eSDaeho Jeong bool use_cow = false;
36102aadac08SJaegeuk Kim block_t blkaddr = NULL_ADDR;
3611eb47b800SJaegeuk Kim int err = 0;
3612eb47b800SJaegeuk Kim
36139d6b0cd7SMatthew Wilcox (Oracle) trace_f2fs_write_begin(inode, pos, len);
361462aed044SChao Yu
361500e09c0bSChao Yu if (!f2fs_is_checkpoint_ready(sbi)) {
361600e09c0bSChao Yu err = -ENOSPC;
36174354994fSDaniel Rosenberg goto fail;
361800e09c0bSChao Yu }
36194354994fSDaniel Rosenberg
36205f727395SJaegeuk Kim /*
36215f727395SJaegeuk Kim * We should check this at this moment to avoid deadlock on inode page
36225f727395SJaegeuk Kim * and #0 page. The locking rule for inline_data conversion should be:
36235f727395SJaegeuk Kim * lock_page(page #0) -> lock_page(inode_page)
36245f727395SJaegeuk Kim */
36255f727395SJaegeuk Kim if (index != 0) {
36265f727395SJaegeuk Kim err = f2fs_convert_inline_inode(inode);
36275f727395SJaegeuk Kim if (err)
36285f727395SJaegeuk Kim goto fail;
36295f727395SJaegeuk Kim }
36304c8ff709SChao Yu
36314c8ff709SChao Yu #ifdef CONFIG_F2FS_FS_COMPRESSION
36324c8ff709SChao Yu if (f2fs_compressed_file(inode)) {
36334c8ff709SChao Yu int ret;
36344c8ff709SChao Yu
36354c8ff709SChao Yu *fsdata = NULL;
36364c8ff709SChao Yu
36379b56adcfSFengnan Chang if (len == PAGE_SIZE && !(f2fs_is_atomic_file(inode)))
36387eab7a69SFengnan Chang goto repeat;
36397eab7a69SFengnan Chang
36404c8ff709SChao Yu ret = f2fs_prepare_compress_overwrite(inode, pagep,
36414c8ff709SChao Yu index, fsdata);
36424c8ff709SChao Yu if (ret < 0) {
36434c8ff709SChao Yu err = ret;
36444c8ff709SChao Yu goto fail;
36454c8ff709SChao Yu } else if (ret) {
36464c8ff709SChao Yu return 0;
36474c8ff709SChao Yu }
36484c8ff709SChao Yu }
36494c8ff709SChao Yu #endif
36504c8ff709SChao Yu
3651afcb7ca0SJaegeuk Kim repeat:
365286d54795SJaegeuk Kim /*
365386d54795SJaegeuk Kim * Do not use grab_cache_page_write_begin() to avoid deadlock due to
365486d54795SJaegeuk Kim * wait_for_stable_page. Will wait that below with our IO control.
365586d54795SJaegeuk Kim */
365601eccef7SChao Yu page = f2fs_pagecache_get_page(mapping, index,
365786d54795SJaegeuk Kim FGP_LOCK | FGP_WRITE | FGP_CREAT, GFP_NOFS);
36583aab8f82SChao Yu if (!page) {
36593aab8f82SChao Yu err = -ENOMEM;
36603aab8f82SChao Yu goto fail;
36613aab8f82SChao Yu }
3662d5f66990SJaegeuk Kim
36634c8ff709SChao Yu /* TODO: cluster can be compressed due to race with .writepage */
36644c8ff709SChao Yu
3665eb47b800SJaegeuk Kim *pagep = page;
3666eb47b800SJaegeuk Kim
36673db1de0eSDaeho Jeong if (f2fs_is_atomic_file(inode))
36683db1de0eSDaeho Jeong err = prepare_atomic_write_begin(sbi, page, pos, len,
3669591fc34eSDaeho Jeong &blkaddr, &need_balance, &use_cow);
36703db1de0eSDaeho Jeong else
36712aadac08SJaegeuk Kim err = prepare_write_begin(sbi, page, pos, len,
36722aadac08SJaegeuk Kim &blkaddr, &need_balance);
3673b3d208f9SJaegeuk Kim if (err)
36742aadac08SJaegeuk Kim goto fail;
3675759af1c9SFan Li
3676af033b2aSChao Yu if (need_balance && !IS_NOQUOTA(inode) &&
3677af033b2aSChao Yu has_not_enough_free_secs(sbi, 0, 0)) {
36782a340760SJaegeuk Kim unlock_page(page);
36792c4db1a6SJaegeuk Kim f2fs_balance_fs(sbi, true);
36802a340760SJaegeuk Kim lock_page(page);
36812a340760SJaegeuk Kim if (page->mapping != mapping) {
36822a340760SJaegeuk Kim /* The page got truncated from under us */
36832a340760SJaegeuk Kim f2fs_put_page(page, 1);
36842a340760SJaegeuk Kim goto repeat;
36852a340760SJaegeuk Kim }
36862a340760SJaegeuk Kim }
36872a340760SJaegeuk Kim
3688bae0ee7aSChao Yu f2fs_wait_on_page_writeback(page, DATA, false, true);
3689b3d208f9SJaegeuk Kim
3690649d7df2SJaegeuk Kim if (len == PAGE_SIZE || PageUptodate(page))
3691649d7df2SJaegeuk Kim return 0;
3692eb47b800SJaegeuk Kim
369395ae251fSEric Biggers if (!(pos & (PAGE_SIZE - 1)) && (pos + len) >= i_size_read(inode) &&
369495ae251fSEric Biggers !f2fs_verity_in_progress(inode)) {
3695746e2403SYunlei He zero_user_segment(page, len, PAGE_SIZE);
3696746e2403SYunlei He return 0;
3697746e2403SYunlei He }
3698746e2403SYunlei He
36992aadac08SJaegeuk Kim if (blkaddr == NEW_ADDR) {
370009cbfeafSKirill A. Shutemov zero_user_segment(page, 0, PAGE_SIZE);
3701649d7df2SJaegeuk Kim SetPageUptodate(page);
3702d54c795bSChao Yu } else {
370393770ab7SChao Yu if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
370493770ab7SChao Yu DATA_GENERIC_ENHANCE_READ)) {
370510f966bbSChao Yu err = -EFSCORRUPTED;
370695fa90c9SChao Yu f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR);
370793770ab7SChao Yu goto fail;
370893770ab7SChao Yu }
3709591fc34eSDaeho Jeong err = f2fs_submit_page_read(use_cow ?
3710591fc34eSDaeho Jeong F2FS_I(inode)->cow_inode : inode, page,
3711591fc34eSDaeho Jeong blkaddr, 0, true);
371213ba41e3SJaegeuk Kim if (err)
37133aab8f82SChao Yu goto fail;
3714d54c795bSChao Yu
3715393ff91fSJaegeuk Kim lock_page(page);
37166bacf52fSJaegeuk Kim if (unlikely(page->mapping != mapping)) {
3717afcb7ca0SJaegeuk Kim f2fs_put_page(page, 1);
3718afcb7ca0SJaegeuk Kim goto repeat;
3719eb47b800SJaegeuk Kim }
37201563ac75SChao Yu if (unlikely(!PageUptodate(page))) {
37211563ac75SChao Yu err = -EIO;
37224375a336SJaegeuk Kim goto fail;
37234375a336SJaegeuk Kim }
37244375a336SJaegeuk Kim }
3725eb47b800SJaegeuk Kim return 0;
37269ba69cf9SJaegeuk Kim
37273aab8f82SChao Yu fail:
372886531d6bSJaegeuk Kim f2fs_put_page(page, 1);
37293e679dc7SEric Biggers f2fs_write_failed(inode, pos + len);
37303aab8f82SChao Yu return err;
3731eb47b800SJaegeuk Kim }
3732eb47b800SJaegeuk Kim
f2fs_write_end(struct file * file,struct address_space * mapping,loff_t pos,unsigned len,unsigned copied,struct page * page,void * fsdata)3733a1dd3c13SJaegeuk Kim static int f2fs_write_end(struct file *file,
3734a1dd3c13SJaegeuk Kim struct address_space *mapping,
3735a1dd3c13SJaegeuk Kim loff_t pos, unsigned len, unsigned copied,
3736a1dd3c13SJaegeuk Kim struct page *page, void *fsdata)
3737a1dd3c13SJaegeuk Kim {
3738a1dd3c13SJaegeuk Kim struct inode *inode = page->mapping->host;
3739a1dd3c13SJaegeuk Kim
3740dfb2bf38SChao Yu trace_f2fs_write_end(inode, pos, len, copied);
3741dfb2bf38SChao Yu
3742649d7df2SJaegeuk Kim /*
3743649d7df2SJaegeuk Kim * This should be come from len == PAGE_SIZE, and we expect copied
3744649d7df2SJaegeuk Kim * should be PAGE_SIZE. Otherwise, we treat it with zero copied and
3745649d7df2SJaegeuk Kim * let generic_perform_write() try to copy data again through copied=0.
3746649d7df2SJaegeuk Kim */
3747649d7df2SJaegeuk Kim if (!PageUptodate(page)) {
3748746e2403SYunlei He if (unlikely(copied != len))
3749649d7df2SJaegeuk Kim copied = 0;
3750649d7df2SJaegeuk Kim else
3751649d7df2SJaegeuk Kim SetPageUptodate(page);
3752649d7df2SJaegeuk Kim }
37534c8ff709SChao Yu
37544c8ff709SChao Yu #ifdef CONFIG_F2FS_FS_COMPRESSION
37554c8ff709SChao Yu /* overwrite compressed file */
37564c8ff709SChao Yu if (f2fs_compressed_file(inode) && fsdata) {
37574c8ff709SChao Yu f2fs_compress_write_end(inode, fsdata, page->index, copied);
37584c8ff709SChao Yu f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
3759944dd22eSChao Yu
3760944dd22eSChao Yu if (pos + copied > i_size_read(inode) &&
3761944dd22eSChao Yu !f2fs_verity_in_progress(inode))
3762944dd22eSChao Yu f2fs_i_size_write(inode, pos + copied);
37634c8ff709SChao Yu return copied;
37644c8ff709SChao Yu }
37654c8ff709SChao Yu #endif
37664c8ff709SChao Yu
3767649d7df2SJaegeuk Kim if (!copied)
3768649d7df2SJaegeuk Kim goto unlock_out;
3769649d7df2SJaegeuk Kim
3770a1dd3c13SJaegeuk Kim set_page_dirty(page);
3771a1dd3c13SJaegeuk Kim
377295ae251fSEric Biggers if (pos + copied > i_size_read(inode) &&
37733db1de0eSDaeho Jeong !f2fs_verity_in_progress(inode)) {
3774fc9581c8SJaegeuk Kim f2fs_i_size_write(inode, pos + copied);
37753db1de0eSDaeho Jeong if (f2fs_is_atomic_file(inode))
37763db1de0eSDaeho Jeong f2fs_i_size_write(F2FS_I(inode)->cow_inode,
37773db1de0eSDaeho Jeong pos + copied);
37783db1de0eSDaeho Jeong }
3779649d7df2SJaegeuk Kim unlock_out:
37803024c9a1SChao Yu f2fs_put_page(page, 1);
3781d0239e1bSJaegeuk Kim f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
3782a1dd3c13SJaegeuk Kim return copied;
3783a1dd3c13SJaegeuk Kim }
3784a1dd3c13SJaegeuk Kim
f2fs_invalidate_folio(struct folio * folio,size_t offset,size_t length)378591503996SMatthew Wilcox (Oracle) void f2fs_invalidate_folio(struct folio *folio, size_t offset, size_t length)
3786eb47b800SJaegeuk Kim {
378791503996SMatthew Wilcox (Oracle) struct inode *inode = folio->mapping->host;
3788487261f3SChao Yu struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3789a7ffdbe2SJaegeuk Kim
3790487261f3SChao Yu if (inode->i_ino >= F2FS_ROOT_INO(sbi) &&
379191503996SMatthew Wilcox (Oracle) (offset || length != folio_size(folio)))
3792a7ffdbe2SJaegeuk Kim return;
3793a7ffdbe2SJaegeuk Kim
379491503996SMatthew Wilcox (Oracle) if (folio_test_dirty(folio)) {
3795933439c8SChao Yu if (inode->i_ino == F2FS_META_INO(sbi)) {
3796487261f3SChao Yu dec_page_count(sbi, F2FS_DIRTY_META);
3797933439c8SChao Yu } else if (inode->i_ino == F2FS_NODE_INO(sbi)) {
3798487261f3SChao Yu dec_page_count(sbi, F2FS_DIRTY_NODES);
3799933439c8SChao Yu } else {
3800a7ffdbe2SJaegeuk Kim inode_dec_dirty_pages(inode);
38014d57b86dSChao Yu f2fs_remove_dirty_inode(inode);
3802933439c8SChao Yu }
3803487261f3SChao Yu }
3804635a52daSChao Yu clear_page_private_all(&folio->page);
3805eb47b800SJaegeuk Kim }
3806eb47b800SJaegeuk Kim
f2fs_release_folio(struct folio * folio,gfp_t wait)3807c26cd045SMatthew Wilcox (Oracle) bool f2fs_release_folio(struct folio *folio, gfp_t wait)
3808eb47b800SJaegeuk Kim {
3809c26cd045SMatthew Wilcox (Oracle) /* If this is dirty folio, keep private data */
3810c26cd045SMatthew Wilcox (Oracle) if (folio_test_dirty(folio))
3811c26cd045SMatthew Wilcox (Oracle) return false;
3812f68daeebSJaegeuk Kim
3813635a52daSChao Yu clear_page_private_all(&folio->page);
3814c26cd045SMatthew Wilcox (Oracle) return true;
3815eb47b800SJaegeuk Kim }
3816eb47b800SJaegeuk Kim
f2fs_dirty_data_folio(struct address_space * mapping,struct folio * folio)38174f5e34f7SMatthew Wilcox (Oracle) static bool f2fs_dirty_data_folio(struct address_space *mapping,
38184f5e34f7SMatthew Wilcox (Oracle) struct folio *folio)
3819eb47b800SJaegeuk Kim {
38204f5e34f7SMatthew Wilcox (Oracle) struct inode *inode = mapping->host;
3821eb47b800SJaegeuk Kim
38224f5e34f7SMatthew Wilcox (Oracle) trace_f2fs_set_page_dirty(&folio->page, DATA);
382326c6b887SJaegeuk Kim
38244f5e34f7SMatthew Wilcox (Oracle) if (!folio_test_uptodate(folio))
38254f5e34f7SMatthew Wilcox (Oracle) folio_mark_uptodate(folio);
38264f5e34f7SMatthew Wilcox (Oracle) BUG_ON(folio_test_swapcache(folio));
382734ba94baSJaegeuk Kim
38289b7eadd9SShuqi Zhang if (filemap_dirty_folio(mapping, folio)) {
38294f5e34f7SMatthew Wilcox (Oracle) f2fs_update_dirty_folio(inode, folio);
38304f5e34f7SMatthew Wilcox (Oracle) return true;
3831eb47b800SJaegeuk Kim }
38320fb5b2ebSMatthew Wilcox (Oracle) return false;
3833eb47b800SJaegeuk Kim }
3834eb47b800SJaegeuk Kim
3835c1c63387SChao Yu
f2fs_bmap_compress(struct inode * inode,sector_t block)3836c1c63387SChao Yu static sector_t f2fs_bmap_compress(struct inode *inode, sector_t block)
3837c1c63387SChao Yu {
3838c1c63387SChao Yu #ifdef CONFIG_F2FS_FS_COMPRESSION
3839c1c63387SChao Yu struct dnode_of_data dn;
3840c1c63387SChao Yu sector_t start_idx, blknr = 0;
3841c1c63387SChao Yu int ret;
3842c1c63387SChao Yu
3843c1c63387SChao Yu start_idx = round_down(block, F2FS_I(inode)->i_cluster_size);
3844c1c63387SChao Yu
3845c1c63387SChao Yu set_new_dnode(&dn, inode, NULL, NULL, 0);
3846c1c63387SChao Yu ret = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
3847c1c63387SChao Yu if (ret)
3848c1c63387SChao Yu return 0;
3849c1c63387SChao Yu
3850c1c63387SChao Yu if (dn.data_blkaddr != COMPRESS_ADDR) {
3851c1c63387SChao Yu dn.ofs_in_node += block - start_idx;
3852c1c63387SChao Yu blknr = f2fs_data_blkaddr(&dn);
3853c1c63387SChao Yu if (!__is_valid_data_blkaddr(blknr))
3854c1c63387SChao Yu blknr = 0;
3855c1c63387SChao Yu }
3856c1c63387SChao Yu
3857c1c63387SChao Yu f2fs_put_dnode(&dn);
3858c1c63387SChao Yu return blknr;
3859c1c63387SChao Yu #else
3860250e84d7SChao Yu return 0;
3861c1c63387SChao Yu #endif
3862c1c63387SChao Yu }
3863c1c63387SChao Yu
3864c1c63387SChao Yu
f2fs_bmap(struct address_space * mapping,sector_t block)3865c01e54b7SJaegeuk Kim static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
3866c01e54b7SJaegeuk Kim {
3867454ae7e5SChao Yu struct inode *inode = mapping->host;
3868b79b0a31SChao Yu sector_t blknr = 0;
3869454ae7e5SChao Yu
38701d373a0eSJaegeuk Kim if (f2fs_has_inline_data(inode))
3871b79b0a31SChao Yu goto out;
38721d373a0eSJaegeuk Kim
38731d373a0eSJaegeuk Kim /* make sure allocating whole blocks */
38741d373a0eSJaegeuk Kim if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
38751d373a0eSJaegeuk Kim filemap_write_and_wait(mapping);
38761d373a0eSJaegeuk Kim
38774eda1682SDaeho Jeong /* Block number less than F2FS MAX BLOCKS */
38786d1451bfSChengguang Xu if (unlikely(block >= max_file_blocks(inode)))
38794eda1682SDaeho Jeong goto out;
3880c1c63387SChao Yu
38814eda1682SDaeho Jeong if (f2fs_compressed_file(inode)) {
38824eda1682SDaeho Jeong blknr = f2fs_bmap_compress(inode, block);
38834eda1682SDaeho Jeong } else {
3884b876f4c9SJaegeuk Kim struct f2fs_map_blocks map;
3885b876f4c9SJaegeuk Kim
3886b876f4c9SJaegeuk Kim memset(&map, 0, sizeof(map));
3887b876f4c9SJaegeuk Kim map.m_lblk = block;
3888b876f4c9SJaegeuk Kim map.m_len = 1;
3889b876f4c9SJaegeuk Kim map.m_next_pgofs = NULL;
3890b876f4c9SJaegeuk Kim map.m_seg_type = NO_CHECK_TYPE;
3891b876f4c9SJaegeuk Kim
3892cd8fc522SChristoph Hellwig if (!f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_BMAP))
3893b876f4c9SJaegeuk Kim blknr = map.m_pblk;
38944eda1682SDaeho Jeong }
3895b79b0a31SChao Yu out:
3896b79b0a31SChao Yu trace_f2fs_bmap(inode, block, blknr);
3897b79b0a31SChao Yu return blknr;
3898429511cdSChao Yu }
3899429511cdSChao Yu
39004969c06aSJaegeuk Kim #ifdef CONFIG_SWAP
f2fs_migrate_blocks(struct inode * inode,block_t start_blk,unsigned int blkcnt)3901859fca6bSChao Yu static int f2fs_migrate_blocks(struct inode *inode, block_t start_blk,
3902859fca6bSChao Yu unsigned int blkcnt)
3903859fca6bSChao Yu {
3904859fca6bSChao Yu struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3905859fca6bSChao Yu unsigned int blkofs;
3906859fca6bSChao Yu unsigned int blk_per_sec = BLKS_PER_SEC(sbi);
3907859fca6bSChao Yu unsigned int secidx = start_blk / blk_per_sec;
3908859fca6bSChao Yu unsigned int end_sec = secidx + blkcnt / blk_per_sec;
3909859fca6bSChao Yu int ret = 0;
3910859fca6bSChao Yu
3911e4544b63STim Murray f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3912edc6d01bSJan Kara filemap_invalidate_lock(inode->i_mapping);
3913859fca6bSChao Yu
3914859fca6bSChao Yu set_inode_flag(inode, FI_ALIGNED_WRITE);
39151018a546SChao Yu set_inode_flag(inode, FI_OPU_WRITE);
3916859fca6bSChao Yu
3917859fca6bSChao Yu for (; secidx < end_sec; secidx++) {
3918e4544b63STim Murray f2fs_down_write(&sbi->pin_sem);
3919859fca6bSChao Yu
3920859fca6bSChao Yu f2fs_lock_op(sbi);
3921859fca6bSChao Yu f2fs_allocate_new_section(sbi, CURSEG_COLD_DATA_PINNED, false);
3922859fca6bSChao Yu f2fs_unlock_op(sbi);
3923859fca6bSChao Yu
39241018a546SChao Yu set_inode_flag(inode, FI_SKIP_WRITES);
3925859fca6bSChao Yu
3926859fca6bSChao Yu for (blkofs = 0; blkofs < blk_per_sec; blkofs++) {
3927859fca6bSChao Yu struct page *page;
3928859fca6bSChao Yu unsigned int blkidx = secidx * blk_per_sec + blkofs;
3929859fca6bSChao Yu
3930859fca6bSChao Yu page = f2fs_get_lock_data_page(inode, blkidx, true);
3931859fca6bSChao Yu if (IS_ERR(page)) {
3932e4544b63STim Murray f2fs_up_write(&sbi->pin_sem);
3933859fca6bSChao Yu ret = PTR_ERR(page);
3934859fca6bSChao Yu goto done;
3935859fca6bSChao Yu }
3936859fca6bSChao Yu
3937859fca6bSChao Yu set_page_dirty(page);
3938859fca6bSChao Yu f2fs_put_page(page, 1);
3939859fca6bSChao Yu }
3940859fca6bSChao Yu
39411018a546SChao Yu clear_inode_flag(inode, FI_SKIP_WRITES);
3942859fca6bSChao Yu
3943859fca6bSChao Yu ret = filemap_fdatawrite(inode->i_mapping);
3944859fca6bSChao Yu
3945e4544b63STim Murray f2fs_up_write(&sbi->pin_sem);
3946859fca6bSChao Yu
3947859fca6bSChao Yu if (ret)
3948859fca6bSChao Yu break;
3949859fca6bSChao Yu }
3950859fca6bSChao Yu
3951859fca6bSChao Yu done:
39521018a546SChao Yu clear_inode_flag(inode, FI_SKIP_WRITES);
39531018a546SChao Yu clear_inode_flag(inode, FI_OPU_WRITE);
3954859fca6bSChao Yu clear_inode_flag(inode, FI_ALIGNED_WRITE);
3955859fca6bSChao Yu
3956edc6d01bSJan Kara filemap_invalidate_unlock(inode->i_mapping);
3957e4544b63STim Murray f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3958859fca6bSChao Yu
3959859fca6bSChao Yu return ret;
3960859fca6bSChao Yu }
3961859fca6bSChao Yu
check_swap_activate(struct swap_info_struct * sis,struct file * swap_file,sector_t * span)39620b8fc006SChao Yu static int check_swap_activate(struct swap_info_struct *sis,
3963af4b6b8eSChao Yu struct file *swap_file, sector_t *span)
3964af4b6b8eSChao Yu {
3965af4b6b8eSChao Yu struct address_space *mapping = swap_file->f_mapping;
3966af4b6b8eSChao Yu struct inode *inode = mapping->host;
396736e4d958Shuangjianan@oppo.com struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3968af4b6b8eSChao Yu sector_t cur_lblock;
3969af4b6b8eSChao Yu sector_t last_lblock;
3970af4b6b8eSChao Yu sector_t pblock;
3971af4b6b8eSChao Yu sector_t lowest_pblock = -1;
3972af4b6b8eSChao Yu sector_t highest_pblock = 0;
3973af4b6b8eSChao Yu int nr_extents = 0;
3974af4b6b8eSChao Yu unsigned long nr_pblocks;
3975859fca6bSChao Yu unsigned int blks_per_sec = BLKS_PER_SEC(sbi);
3976859fca6bSChao Yu unsigned int sec_blks_mask = BLKS_PER_SEC(sbi) - 1;
3977ca298241SJaegeuk Kim unsigned int not_aligned = 0;
397836e4d958Shuangjianan@oppo.com int ret = 0;
3979af4b6b8eSChao Yu
3980af4b6b8eSChao Yu /*
3981af4b6b8eSChao Yu * Map all the blocks into the extent list. This code doesn't try
3982af4b6b8eSChao Yu * to be very smart.
3983af4b6b8eSChao Yu */
3984af4b6b8eSChao Yu cur_lblock = 0;
39856cbfcab5SJaegeuk Kim last_lblock = bytes_to_blks(inode, i_size_read(inode));
3986af4b6b8eSChao Yu
39871da66103Shuangjianan@oppo.com while (cur_lblock < last_lblock && cur_lblock < sis->max) {
3988b876f4c9SJaegeuk Kim struct f2fs_map_blocks map;
3989859fca6bSChao Yu retry:
3990af4b6b8eSChao Yu cond_resched();
3991af4b6b8eSChao Yu
3992b876f4c9SJaegeuk Kim memset(&map, 0, sizeof(map));
3993b876f4c9SJaegeuk Kim map.m_lblk = cur_lblock;
399436e4d958Shuangjianan@oppo.com map.m_len = last_lblock - cur_lblock;
399536e4d958Shuangjianan@oppo.com map.m_next_pgofs = NULL;
399636e4d958Shuangjianan@oppo.com map.m_next_extent = NULL;
3997b876f4c9SJaegeuk Kim map.m_seg_type = NO_CHECK_TYPE;
399836e4d958Shuangjianan@oppo.com map.m_may_create = false;
3999af4b6b8eSChao Yu
4000cd8fc522SChristoph Hellwig ret = f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_FIEMAP);
4001af4b6b8eSChao Yu if (ret)
400236e4d958Shuangjianan@oppo.com goto out;
4003af4b6b8eSChao Yu
4004af4b6b8eSChao Yu /* hole */
400536e4d958Shuangjianan@oppo.com if (!(map.m_flags & F2FS_MAP_FLAGS)) {
4006833dcd35SJoe Perches f2fs_err(sbi, "Swapfile has holes");
4007f395183fSJaegeuk Kim ret = -EINVAL;
400836e4d958Shuangjianan@oppo.com goto out;
400936e4d958Shuangjianan@oppo.com }
4010af4b6b8eSChao Yu
4011b876f4c9SJaegeuk Kim pblock = map.m_pblk;
4012b876f4c9SJaegeuk Kim nr_pblocks = map.m_len;
4013af4b6b8eSChao Yu
4014859fca6bSChao Yu if ((pblock - SM_I(sbi)->main_blkaddr) & sec_blks_mask ||
4015859fca6bSChao Yu nr_pblocks & sec_blks_mask) {
4016ca298241SJaegeuk Kim not_aligned++;
4017859fca6bSChao Yu
4018859fca6bSChao Yu nr_pblocks = roundup(nr_pblocks, blks_per_sec);
4019859fca6bSChao Yu if (cur_lblock + nr_pblocks > sis->max)
4020859fca6bSChao Yu nr_pblocks -= blks_per_sec;
4021859fca6bSChao Yu
4022859fca6bSChao Yu if (!nr_pblocks) {
4023859fca6bSChao Yu /* this extent is last one */
4024859fca6bSChao Yu nr_pblocks = map.m_len;
4025859fca6bSChao Yu f2fs_warn(sbi, "Swapfile: last extent is not aligned to section");
4026859fca6bSChao Yu goto next;
4027ca298241SJaegeuk Kim }
402836e4d958Shuangjianan@oppo.com
4029859fca6bSChao Yu ret = f2fs_migrate_blocks(inode, cur_lblock,
4030859fca6bSChao Yu nr_pblocks);
4031859fca6bSChao Yu if (ret)
4032859fca6bSChao Yu goto out;
4033859fca6bSChao Yu goto retry;
4034859fca6bSChao Yu }
4035859fca6bSChao Yu next:
4036af4b6b8eSChao Yu if (cur_lblock + nr_pblocks >= sis->max)
4037af4b6b8eSChao Yu nr_pblocks = sis->max - cur_lblock;
4038af4b6b8eSChao Yu
4039af4b6b8eSChao Yu if (cur_lblock) { /* exclude the header page */
4040af4b6b8eSChao Yu if (pblock < lowest_pblock)
4041af4b6b8eSChao Yu lowest_pblock = pblock;
4042af4b6b8eSChao Yu if (pblock + nr_pblocks - 1 > highest_pblock)
4043af4b6b8eSChao Yu highest_pblock = pblock + nr_pblocks - 1;
4044af4b6b8eSChao Yu }
4045af4b6b8eSChao Yu
4046af4b6b8eSChao Yu /*
4047af4b6b8eSChao Yu * We found a PAGE_SIZE-length, PAGE_SIZE-aligned run of blocks
4048af4b6b8eSChao Yu */
4049af4b6b8eSChao Yu ret = add_swap_extent(sis, cur_lblock, nr_pblocks, pblock);
4050af4b6b8eSChao Yu if (ret < 0)
4051af4b6b8eSChao Yu goto out;
4052af4b6b8eSChao Yu nr_extents += ret;
4053af4b6b8eSChao Yu cur_lblock += nr_pblocks;
4054af4b6b8eSChao Yu }
4055af4b6b8eSChao Yu ret = nr_extents;
4056af4b6b8eSChao Yu *span = 1 + highest_pblock - lowest_pblock;
4057af4b6b8eSChao Yu if (cur_lblock == 0)
4058af4b6b8eSChao Yu cur_lblock = 1; /* force Empty message */
4059af4b6b8eSChao Yu sis->max = cur_lblock;
4060af4b6b8eSChao Yu sis->pages = cur_lblock - 1;
4061af4b6b8eSChao Yu sis->highest_bit = cur_lblock - 1;
4062af4b6b8eSChao Yu out:
4063859fca6bSChao Yu if (not_aligned)
4064859fca6bSChao Yu f2fs_warn(sbi, "Swapfile (%u) is not align to section: 1) creat(), 2) ioctl(F2FS_IOC_SET_PIN_FILE), 3) fallocate(%u * N)",
4065859fca6bSChao Yu not_aligned, blks_per_sec * F2FS_BLKSIZE);
4066af4b6b8eSChao Yu return ret;
4067af4b6b8eSChao Yu }
4068af4b6b8eSChao Yu
f2fs_swap_activate(struct swap_info_struct * sis,struct file * file,sector_t * span)40694969c06aSJaegeuk Kim static int f2fs_swap_activate(struct swap_info_struct *sis, struct file *file,
40704969c06aSJaegeuk Kim sector_t *span)
40714969c06aSJaegeuk Kim {
40724969c06aSJaegeuk Kim struct inode *inode = file_inode(file);
40734969c06aSJaegeuk Kim int ret;
40744969c06aSJaegeuk Kim
40754969c06aSJaegeuk Kim if (!S_ISREG(inode->i_mode))
40764969c06aSJaegeuk Kim return -EINVAL;
40774969c06aSJaegeuk Kim
40784969c06aSJaegeuk Kim if (f2fs_readonly(F2FS_I_SB(inode)->sb))
40794969c06aSJaegeuk Kim return -EROFS;
40804969c06aSJaegeuk Kim
4081d927ccfcSShin'ichiro Kawasaki if (f2fs_lfs_mode(F2FS_I_SB(inode))) {
4082d927ccfcSShin'ichiro Kawasaki f2fs_err(F2FS_I_SB(inode),
4083d927ccfcSShin'ichiro Kawasaki "Swapfile not supported in LFS mode");
4084d927ccfcSShin'ichiro Kawasaki return -EINVAL;
4085d927ccfcSShin'ichiro Kawasaki }
4086d927ccfcSShin'ichiro Kawasaki
40874969c06aSJaegeuk Kim ret = f2fs_convert_inline_inode(inode);
40884969c06aSJaegeuk Kim if (ret)
40894969c06aSJaegeuk Kim return ret;
40904969c06aSJaegeuk Kim
409178134d03SDaeho Jeong if (!f2fs_disable_compressed_file(inode))
40924c8ff709SChao Yu return -EINVAL;
40934c8ff709SChao Yu
40940b979f1bSChao Yu f2fs_precache_extents(inode);
40950b979f1bSChao Yu
40963e5e479aSChao Yu ret = check_swap_activate(sis, file, span);
40973e5e479aSChao Yu if (ret < 0)
40984969c06aSJaegeuk Kim return ret;
40994969c06aSJaegeuk Kim
41008ec071c3SChao Yu stat_inc_swapfile_inode(inode);
41014969c06aSJaegeuk Kim set_inode_flag(inode, FI_PIN_FILE);
41024969c06aSJaegeuk Kim f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
41033e5e479aSChao Yu return ret;
41044969c06aSJaegeuk Kim }
41054969c06aSJaegeuk Kim
f2fs_swap_deactivate(struct file * file)41064969c06aSJaegeuk Kim static void f2fs_swap_deactivate(struct file *file)
41074969c06aSJaegeuk Kim {
41084969c06aSJaegeuk Kim struct inode *inode = file_inode(file);
41094969c06aSJaegeuk Kim
41108ec071c3SChao Yu stat_dec_swapfile_inode(inode);
41114969c06aSJaegeuk Kim clear_inode_flag(inode, FI_PIN_FILE);
41124969c06aSJaegeuk Kim }
41134969c06aSJaegeuk Kim #else
f2fs_swap_activate(struct swap_info_struct * sis,struct file * file,sector_t * span)41144969c06aSJaegeuk Kim static int f2fs_swap_activate(struct swap_info_struct *sis, struct file *file,
41154969c06aSJaegeuk Kim sector_t *span)
41164969c06aSJaegeuk Kim {
41174969c06aSJaegeuk Kim return -EOPNOTSUPP;
41184969c06aSJaegeuk Kim }
41194969c06aSJaegeuk Kim
f2fs_swap_deactivate(struct file * file)41204969c06aSJaegeuk Kim static void f2fs_swap_deactivate(struct file *file)
41214969c06aSJaegeuk Kim {
41224969c06aSJaegeuk Kim }
41234969c06aSJaegeuk Kim #endif
41244969c06aSJaegeuk Kim
4125eb47b800SJaegeuk Kim const struct address_space_operations f2fs_dblock_aops = {
4126be05584fSMatthew Wilcox (Oracle) .read_folio = f2fs_read_data_folio,
412723323196SMatthew Wilcox (Oracle) .readahead = f2fs_readahead,
4128eb47b800SJaegeuk Kim .writepage = f2fs_write_data_page,
4129eb47b800SJaegeuk Kim .writepages = f2fs_write_data_pages,
4130eb47b800SJaegeuk Kim .write_begin = f2fs_write_begin,
4131a1dd3c13SJaegeuk Kim .write_end = f2fs_write_end,
41324f5e34f7SMatthew Wilcox (Oracle) .dirty_folio = f2fs_dirty_data_folio,
41331d5b9bd6SMatthew Wilcox (Oracle) .migrate_folio = filemap_migrate_folio,
413491503996SMatthew Wilcox (Oracle) .invalidate_folio = f2fs_invalidate_folio,
4135c26cd045SMatthew Wilcox (Oracle) .release_folio = f2fs_release_folio,
4136c01e54b7SJaegeuk Kim .bmap = f2fs_bmap,
41374969c06aSJaegeuk Kim .swap_activate = f2fs_swap_activate,
41384969c06aSJaegeuk Kim .swap_deactivate = f2fs_swap_deactivate,
4139eb47b800SJaegeuk Kim };
41406dbb1796SEric Biggers
f2fs_clear_page_cache_dirty_tag(struct page * page)41415ec2d99dSMatthew Wilcox void f2fs_clear_page_cache_dirty_tag(struct page *page)
4142aec2f729SChao Yu {
4143aec2f729SChao Yu struct address_space *mapping = page_mapping(page);
4144aec2f729SChao Yu unsigned long flags;
4145aec2f729SChao Yu
4146aec2f729SChao Yu xa_lock_irqsave(&mapping->i_pages, flags);
41475ec2d99dSMatthew Wilcox __xa_clear_mark(&mapping->i_pages, page_index(page),
4148aec2f729SChao Yu PAGECACHE_TAG_DIRTY);
4149aec2f729SChao Yu xa_unlock_irqrestore(&mapping->i_pages, flags);
4150aec2f729SChao Yu }
4151aec2f729SChao Yu
f2fs_init_post_read_processing(void)41526dbb1796SEric Biggers int __init f2fs_init_post_read_processing(void)
41536dbb1796SEric Biggers {
415495ae251fSEric Biggers bio_post_read_ctx_cache =
415595ae251fSEric Biggers kmem_cache_create("f2fs_bio_post_read_ctx",
415695ae251fSEric Biggers sizeof(struct bio_post_read_ctx), 0, 0, NULL);
41576dbb1796SEric Biggers if (!bio_post_read_ctx_cache)
41586dbb1796SEric Biggers goto fail;
41596dbb1796SEric Biggers bio_post_read_ctx_pool =
41606dbb1796SEric Biggers mempool_create_slab_pool(NUM_PREALLOC_POST_READ_CTXS,
41616dbb1796SEric Biggers bio_post_read_ctx_cache);
41626dbb1796SEric Biggers if (!bio_post_read_ctx_pool)
41636dbb1796SEric Biggers goto fail_free_cache;
41646dbb1796SEric Biggers return 0;
41656dbb1796SEric Biggers
41666dbb1796SEric Biggers fail_free_cache:
41676dbb1796SEric Biggers kmem_cache_destroy(bio_post_read_ctx_cache);
41686dbb1796SEric Biggers fail:
41696dbb1796SEric Biggers return -ENOMEM;
41706dbb1796SEric Biggers }
41716dbb1796SEric Biggers
f2fs_destroy_post_read_processing(void)41720b20fcecSChao Yu void f2fs_destroy_post_read_processing(void)
41736dbb1796SEric Biggers {
41746dbb1796SEric Biggers mempool_destroy(bio_post_read_ctx_pool);
41756dbb1796SEric Biggers kmem_cache_destroy(bio_post_read_ctx_cache);
41766dbb1796SEric Biggers }
41770b20fcecSChao Yu
f2fs_init_post_read_wq(struct f2fs_sb_info * sbi)41784c8ff709SChao Yu int f2fs_init_post_read_wq(struct f2fs_sb_info *sbi)
41794c8ff709SChao Yu {
41804c8ff709SChao Yu if (!f2fs_sb_has_encrypt(sbi) &&
41814c8ff709SChao Yu !f2fs_sb_has_verity(sbi) &&
41824c8ff709SChao Yu !f2fs_sb_has_compression(sbi))
41834c8ff709SChao Yu return 0;
41844c8ff709SChao Yu
41854c8ff709SChao Yu sbi->post_read_wq = alloc_workqueue("f2fs_post_read_wq",
41864c8ff709SChao Yu WQ_UNBOUND | WQ_HIGHPRI,
41874c8ff709SChao Yu num_online_cpus());
4188870af777SYangtao Li return sbi->post_read_wq ? 0 : -ENOMEM;
41894c8ff709SChao Yu }
41904c8ff709SChao Yu
f2fs_destroy_post_read_wq(struct f2fs_sb_info * sbi)41914c8ff709SChao Yu void f2fs_destroy_post_read_wq(struct f2fs_sb_info *sbi)
41924c8ff709SChao Yu {
41934c8ff709SChao Yu if (sbi->post_read_wq)
41944c8ff709SChao Yu destroy_workqueue(sbi->post_read_wq);
41954c8ff709SChao Yu }
41964c8ff709SChao Yu
f2fs_init_bio_entry_cache(void)41970b20fcecSChao Yu int __init f2fs_init_bio_entry_cache(void)
41980b20fcecSChao Yu {
419998510003SChao Yu bio_entry_slab = f2fs_kmem_cache_create("f2fs_bio_entry_slab",
42000b20fcecSChao Yu sizeof(struct bio_entry));
4201870af777SYangtao Li return bio_entry_slab ? 0 : -ENOMEM;
42020b20fcecSChao Yu }
42030b20fcecSChao Yu
f2fs_destroy_bio_entry_cache(void)4204f543805fSChao Yu void f2fs_destroy_bio_entry_cache(void)
42050b20fcecSChao Yu {
42060b20fcecSChao Yu kmem_cache_destroy(bio_entry_slab);
42070b20fcecSChao Yu }
42081517c1a7SEric Biggers
f2fs_iomap_begin(struct inode * inode,loff_t offset,loff_t length,unsigned int flags,struct iomap * iomap,struct iomap * srcmap)42091517c1a7SEric Biggers static int f2fs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
42101517c1a7SEric Biggers unsigned int flags, struct iomap *iomap,
42111517c1a7SEric Biggers struct iomap *srcmap)
42121517c1a7SEric Biggers {
42131517c1a7SEric Biggers struct f2fs_map_blocks map = {};
42141517c1a7SEric Biggers pgoff_t next_pgofs = 0;
42151517c1a7SEric Biggers int err;
42161517c1a7SEric Biggers
42171517c1a7SEric Biggers map.m_lblk = bytes_to_blks(inode, offset);
42181517c1a7SEric Biggers map.m_len = bytes_to_blks(inode, offset + length - 1) - map.m_lblk + 1;
42191517c1a7SEric Biggers map.m_next_pgofs = &next_pgofs;
42201517c1a7SEric Biggers map.m_seg_type = f2fs_rw_hint_to_seg_type(inode->i_write_hint);
42211517c1a7SEric Biggers if (flags & IOMAP_WRITE)
42221517c1a7SEric Biggers map.m_may_create = true;
42231517c1a7SEric Biggers
4224cd8fc522SChristoph Hellwig err = f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_DIO);
42251517c1a7SEric Biggers if (err)
42261517c1a7SEric Biggers return err;
42271517c1a7SEric Biggers
42281517c1a7SEric Biggers iomap->offset = blks_to_bytes(inode, map.m_lblk);
42291517c1a7SEric Biggers
42308a2c77bcSEric Biggers /*
42318a2c77bcSEric Biggers * When inline encryption is enabled, sometimes I/O to an encrypted file
42328a2c77bcSEric Biggers * has to be broken up to guarantee DUN contiguity. Handle this by
42338a2c77bcSEric Biggers * limiting the length of the mapping returned.
42348a2c77bcSEric Biggers */
42358a2c77bcSEric Biggers map.m_len = fscrypt_limit_io_blocks(inode, map.m_lblk, map.m_len);
42368a2c77bcSEric Biggers
42378d3c1fa3SChristoph Hellwig /*
42388d3c1fa3SChristoph Hellwig * We should never see delalloc or compressed extents here based on
42398d3c1fa3SChristoph Hellwig * prior flushing and checks.
42408d3c1fa3SChristoph Hellwig */
42418d3c1fa3SChristoph Hellwig if (WARN_ON_ONCE(map.m_pblk == NEW_ADDR))
42428d3c1fa3SChristoph Hellwig return -EINVAL;
42438d3c1fa3SChristoph Hellwig if (WARN_ON_ONCE(map.m_pblk == COMPRESS_ADDR))
42441517c1a7SEric Biggers return -EINVAL;
42451517c1a7SEric Biggers
42468d3c1fa3SChristoph Hellwig if (map.m_pblk != NULL_ADDR) {
42478d3c1fa3SChristoph Hellwig iomap->length = blks_to_bytes(inode, map.m_len);
42488d3c1fa3SChristoph Hellwig iomap->type = IOMAP_MAPPED;
42498d3c1fa3SChristoph Hellwig iomap->flags |= IOMAP_F_MERGED;
42501517c1a7SEric Biggers iomap->bdev = map.m_bdev;
42511517c1a7SEric Biggers iomap->addr = blks_to_bytes(inode, map.m_pblk);
42521517c1a7SEric Biggers } else {
42538d3c1fa3SChristoph Hellwig if (flags & IOMAP_WRITE)
42548d3c1fa3SChristoph Hellwig return -ENOTBLK;
42551517c1a7SEric Biggers iomap->length = blks_to_bytes(inode, next_pgofs) -
42561517c1a7SEric Biggers iomap->offset;
42571517c1a7SEric Biggers iomap->type = IOMAP_HOLE;
42581517c1a7SEric Biggers iomap->addr = IOMAP_NULL_ADDR;
42591517c1a7SEric Biggers }
42601517c1a7SEric Biggers
42611517c1a7SEric Biggers if (map.m_flags & F2FS_MAP_NEW)
42621517c1a7SEric Biggers iomap->flags |= IOMAP_F_NEW;
42631517c1a7SEric Biggers if ((inode->i_state & I_DIRTY_DATASYNC) ||
42641517c1a7SEric Biggers offset + length > i_size_read(inode))
42651517c1a7SEric Biggers iomap->flags |= IOMAP_F_DIRTY;
42661517c1a7SEric Biggers
42671517c1a7SEric Biggers return 0;
42681517c1a7SEric Biggers }
42691517c1a7SEric Biggers
42701517c1a7SEric Biggers const struct iomap_ops f2fs_iomap_ops = {
42711517c1a7SEric Biggers .iomap_begin = f2fs_iomap_begin,
42721517c1a7SEric Biggers };
4273