1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * fs/f2fs/data.c
4 *
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
7 */
8 #include <linux/fs.h>
9 #include <linux/f2fs_fs.h>
10 #include <linux/buffer_head.h>
11 #include <linux/sched/mm.h>
12 #include <linux/mpage.h>
13 #include <linux/writeback.h>
14 #include <linux/pagevec.h>
15 #include <linux/blkdev.h>
16 #include <linux/bio.h>
17 #include <linux/blk-crypto.h>
18 #include <linux/swap.h>
19 #include <linux/prefetch.h>
20 #include <linux/uio.h>
21 #include <linux/sched/signal.h>
22 #include <linux/fiemap.h>
23 #include <linux/iomap.h>
24
25 #include "f2fs.h"
26 #include "node.h"
27 #include "segment.h"
28 #include "iostat.h"
29 #include <trace/events/f2fs.h>
30
31 #define NUM_PREALLOC_POST_READ_CTXS 128
32
33 static struct kmem_cache *bio_post_read_ctx_cache;
34 static struct kmem_cache *bio_entry_slab;
35 static mempool_t *bio_post_read_ctx_pool;
36 static struct bio_set f2fs_bioset;
37
38 #define F2FS_BIO_POOL_SIZE NR_CURSEG_TYPE
39
f2fs_init_bioset(void)40 int __init f2fs_init_bioset(void)
41 {
42 return bioset_init(&f2fs_bioset, F2FS_BIO_POOL_SIZE,
43 0, BIOSET_NEED_BVECS);
44 }
45
f2fs_destroy_bioset(void)46 void f2fs_destroy_bioset(void)
47 {
48 bioset_exit(&f2fs_bioset);
49 }
50
f2fs_is_cp_guaranteed(struct page * page)51 bool f2fs_is_cp_guaranteed(struct page *page)
52 {
53 struct address_space *mapping = page->mapping;
54 struct inode *inode;
55 struct f2fs_sb_info *sbi;
56
57 if (!mapping)
58 return false;
59
60 inode = mapping->host;
61 sbi = F2FS_I_SB(inode);
62
63 if (inode->i_ino == F2FS_META_INO(sbi) ||
64 inode->i_ino == F2FS_NODE_INO(sbi) ||
65 S_ISDIR(inode->i_mode))
66 return true;
67
68 if ((S_ISREG(inode->i_mode) && IS_NOQUOTA(inode)) ||
69 page_private_gcing(page))
70 return true;
71 return false;
72 }
73
__read_io_type(struct page * page)74 static enum count_type __read_io_type(struct page *page)
75 {
76 struct address_space *mapping = page_file_mapping(page);
77
78 if (mapping) {
79 struct inode *inode = mapping->host;
80 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
81
82 if (inode->i_ino == F2FS_META_INO(sbi))
83 return F2FS_RD_META;
84
85 if (inode->i_ino == F2FS_NODE_INO(sbi))
86 return F2FS_RD_NODE;
87 }
88 return F2FS_RD_DATA;
89 }
90
91 /* postprocessing steps for read bios */
92 enum bio_post_read_step {
93 #ifdef CONFIG_FS_ENCRYPTION
94 STEP_DECRYPT = BIT(0),
95 #else
96 STEP_DECRYPT = 0, /* compile out the decryption-related code */
97 #endif
98 #ifdef CONFIG_F2FS_FS_COMPRESSION
99 STEP_DECOMPRESS = BIT(1),
100 #else
101 STEP_DECOMPRESS = 0, /* compile out the decompression-related code */
102 #endif
103 #ifdef CONFIG_FS_VERITY
104 STEP_VERITY = BIT(2),
105 #else
106 STEP_VERITY = 0, /* compile out the verity-related code */
107 #endif
108 };
109
110 struct bio_post_read_ctx {
111 struct bio *bio;
112 struct f2fs_sb_info *sbi;
113 struct work_struct work;
114 unsigned int enabled_steps;
115 /*
116 * decompression_attempted keeps track of whether
117 * f2fs_end_read_compressed_page() has been called on the pages in the
118 * bio that belong to a compressed cluster yet.
119 */
120 bool decompression_attempted;
121 block_t fs_blkaddr;
122 };
123
124 /*
125 * Update and unlock a bio's pages, and free the bio.
126 *
127 * This marks pages up-to-date only if there was no error in the bio (I/O error,
128 * decryption error, or verity error), as indicated by bio->bi_status.
129 *
130 * "Compressed pages" (pagecache pages backed by a compressed cluster on-disk)
131 * aren't marked up-to-date here, as decompression is done on a per-compression-
132 * cluster basis rather than a per-bio basis. Instead, we only must do two
133 * things for each compressed page here: call f2fs_end_read_compressed_page()
134 * with failed=true if an error occurred before it would have normally gotten
135 * called (i.e., I/O error or decryption error, but *not* verity error), and
136 * release the bio's reference to the decompress_io_ctx of the page's cluster.
137 */
f2fs_finish_read_bio(struct bio * bio,bool in_task)138 static void f2fs_finish_read_bio(struct bio *bio, bool in_task)
139 {
140 struct bio_vec *bv;
141 struct bvec_iter_all iter_all;
142 struct bio_post_read_ctx *ctx = bio->bi_private;
143
144 bio_for_each_segment_all(bv, bio, iter_all) {
145 struct page *page = bv->bv_page;
146
147 if (f2fs_is_compressed_page(page)) {
148 if (ctx && !ctx->decompression_attempted)
149 f2fs_end_read_compressed_page(page, true, 0,
150 in_task);
151 f2fs_put_page_dic(page, in_task);
152 continue;
153 }
154
155 if (bio->bi_status)
156 ClearPageUptodate(page);
157 else
158 SetPageUptodate(page);
159 dec_page_count(F2FS_P_SB(page), __read_io_type(page));
160 unlock_page(page);
161 }
162
163 if (ctx)
164 mempool_free(ctx, bio_post_read_ctx_pool);
165 bio_put(bio);
166 }
167
f2fs_verify_bio(struct work_struct * work)168 static void f2fs_verify_bio(struct work_struct *work)
169 {
170 struct bio_post_read_ctx *ctx =
171 container_of(work, struct bio_post_read_ctx, work);
172 struct bio *bio = ctx->bio;
173 bool may_have_compressed_pages = (ctx->enabled_steps & STEP_DECOMPRESS);
174
175 /*
176 * fsverity_verify_bio() may call readahead() again, and while verity
177 * will be disabled for this, decryption and/or decompression may still
178 * be needed, resulting in another bio_post_read_ctx being allocated.
179 * So to prevent deadlocks we need to release the current ctx to the
180 * mempool first. This assumes that verity is the last post-read step.
181 */
182 mempool_free(ctx, bio_post_read_ctx_pool);
183 bio->bi_private = NULL;
184
185 /*
186 * Verify the bio's pages with fs-verity. Exclude compressed pages,
187 * as those were handled separately by f2fs_end_read_compressed_page().
188 */
189 if (may_have_compressed_pages) {
190 struct bio_vec *bv;
191 struct bvec_iter_all iter_all;
192
193 bio_for_each_segment_all(bv, bio, iter_all) {
194 struct page *page = bv->bv_page;
195
196 if (!f2fs_is_compressed_page(page) &&
197 !fsverity_verify_page(page)) {
198 bio->bi_status = BLK_STS_IOERR;
199 break;
200 }
201 }
202 } else {
203 fsverity_verify_bio(bio);
204 }
205
206 f2fs_finish_read_bio(bio, true);
207 }
208
209 /*
210 * If the bio's data needs to be verified with fs-verity, then enqueue the
211 * verity work for the bio. Otherwise finish the bio now.
212 *
213 * Note that to avoid deadlocks, the verity work can't be done on the
214 * decryption/decompression workqueue. This is because verifying the data pages
215 * can involve reading verity metadata pages from the file, and these verity
216 * metadata pages may be encrypted and/or compressed.
217 */
f2fs_verify_and_finish_bio(struct bio * bio,bool in_task)218 static void f2fs_verify_and_finish_bio(struct bio *bio, bool in_task)
219 {
220 struct bio_post_read_ctx *ctx = bio->bi_private;
221
222 if (ctx && (ctx->enabled_steps & STEP_VERITY)) {
223 INIT_WORK(&ctx->work, f2fs_verify_bio);
224 fsverity_enqueue_verify_work(&ctx->work);
225 } else {
226 f2fs_finish_read_bio(bio, in_task);
227 }
228 }
229
230 /*
231 * Handle STEP_DECOMPRESS by decompressing any compressed clusters whose last
232 * remaining page was read by @ctx->bio.
233 *
234 * Note that a bio may span clusters (even a mix of compressed and uncompressed
235 * clusters) or be for just part of a cluster. STEP_DECOMPRESS just indicates
236 * that the bio includes at least one compressed page. The actual decompression
237 * is done on a per-cluster basis, not a per-bio basis.
238 */
f2fs_handle_step_decompress(struct bio_post_read_ctx * ctx,bool in_task)239 static void f2fs_handle_step_decompress(struct bio_post_read_ctx *ctx,
240 bool in_task)
241 {
242 struct bio_vec *bv;
243 struct bvec_iter_all iter_all;
244 bool all_compressed = true;
245 block_t blkaddr = ctx->fs_blkaddr;
246
247 bio_for_each_segment_all(bv, ctx->bio, iter_all) {
248 struct page *page = bv->bv_page;
249
250 if (f2fs_is_compressed_page(page))
251 f2fs_end_read_compressed_page(page, false, blkaddr,
252 in_task);
253 else
254 all_compressed = false;
255
256 blkaddr++;
257 }
258
259 ctx->decompression_attempted = true;
260
261 /*
262 * Optimization: if all the bio's pages are compressed, then scheduling
263 * the per-bio verity work is unnecessary, as verity will be fully
264 * handled at the compression cluster level.
265 */
266 if (all_compressed)
267 ctx->enabled_steps &= ~STEP_VERITY;
268 }
269
f2fs_post_read_work(struct work_struct * work)270 static void f2fs_post_read_work(struct work_struct *work)
271 {
272 struct bio_post_read_ctx *ctx =
273 container_of(work, struct bio_post_read_ctx, work);
274 struct bio *bio = ctx->bio;
275
276 if ((ctx->enabled_steps & STEP_DECRYPT) && !fscrypt_decrypt_bio(bio)) {
277 f2fs_finish_read_bio(bio, true);
278 return;
279 }
280
281 if (ctx->enabled_steps & STEP_DECOMPRESS)
282 f2fs_handle_step_decompress(ctx, true);
283
284 f2fs_verify_and_finish_bio(bio, true);
285 }
286
f2fs_read_end_io(struct bio * bio)287 static void f2fs_read_end_io(struct bio *bio)
288 {
289 struct f2fs_sb_info *sbi = F2FS_P_SB(bio_first_page_all(bio));
290 struct bio_post_read_ctx *ctx;
291 bool intask = in_task();
292
293 iostat_update_and_unbind_ctx(bio);
294 ctx = bio->bi_private;
295
296 if (time_to_inject(sbi, FAULT_READ_IO))
297 bio->bi_status = BLK_STS_IOERR;
298
299 if (bio->bi_status) {
300 f2fs_finish_read_bio(bio, intask);
301 return;
302 }
303
304 if (ctx) {
305 unsigned int enabled_steps = ctx->enabled_steps &
306 (STEP_DECRYPT | STEP_DECOMPRESS);
307
308 /*
309 * If we have only decompression step between decompression and
310 * decrypt, we don't need post processing for this.
311 */
312 if (enabled_steps == STEP_DECOMPRESS &&
313 !f2fs_low_mem_mode(sbi)) {
314 f2fs_handle_step_decompress(ctx, intask);
315 } else if (enabled_steps) {
316 INIT_WORK(&ctx->work, f2fs_post_read_work);
317 queue_work(ctx->sbi->post_read_wq, &ctx->work);
318 return;
319 }
320 }
321
322 f2fs_verify_and_finish_bio(bio, intask);
323 }
324
f2fs_write_end_io(struct bio * bio)325 static void f2fs_write_end_io(struct bio *bio)
326 {
327 struct f2fs_sb_info *sbi;
328 struct bio_vec *bvec;
329 struct bvec_iter_all iter_all;
330
331 iostat_update_and_unbind_ctx(bio);
332 sbi = bio->bi_private;
333
334 if (time_to_inject(sbi, FAULT_WRITE_IO))
335 bio->bi_status = BLK_STS_IOERR;
336
337 bio_for_each_segment_all(bvec, bio, iter_all) {
338 struct page *page = bvec->bv_page;
339 enum count_type type = WB_DATA_TYPE(page, false);
340
341 fscrypt_finalize_bounce_page(&page);
342
343 #ifdef CONFIG_F2FS_FS_COMPRESSION
344 if (f2fs_is_compressed_page(page)) {
345 f2fs_compress_write_end_io(bio, page);
346 continue;
347 }
348 #endif
349
350 if (unlikely(bio->bi_status)) {
351 mapping_set_error(page->mapping, -EIO);
352 if (type == F2FS_WB_CP_DATA)
353 f2fs_stop_checkpoint(sbi, true,
354 STOP_CP_REASON_WRITE_FAIL);
355 }
356
357 f2fs_bug_on(sbi, page->mapping == NODE_MAPPING(sbi) &&
358 page->index != nid_of_node(page));
359
360 dec_page_count(sbi, type);
361 if (f2fs_in_warm_node_list(sbi, page))
362 f2fs_del_fsync_node_entry(sbi, page);
363 clear_page_private_gcing(page);
364 end_page_writeback(page);
365 }
366 if (!get_pages(sbi, F2FS_WB_CP_DATA) &&
367 wq_has_sleeper(&sbi->cp_wait))
368 wake_up(&sbi->cp_wait);
369
370 bio_put(bio);
371 }
372
373 #ifdef CONFIG_BLK_DEV_ZONED
f2fs_zone_write_end_io(struct bio * bio)374 static void f2fs_zone_write_end_io(struct bio *bio)
375 {
376 struct f2fs_bio_info *io = (struct f2fs_bio_info *)bio->bi_private;
377
378 bio->bi_private = io->bi_private;
379 complete(&io->zone_wait);
380 f2fs_write_end_io(bio);
381 }
382 #endif
383
f2fs_target_device(struct f2fs_sb_info * sbi,block_t blk_addr,sector_t * sector)384 struct block_device *f2fs_target_device(struct f2fs_sb_info *sbi,
385 block_t blk_addr, sector_t *sector)
386 {
387 struct block_device *bdev = sbi->sb->s_bdev;
388 int i;
389
390 if (f2fs_is_multi_device(sbi)) {
391 for (i = 0; i < sbi->s_ndevs; i++) {
392 if (FDEV(i).start_blk <= blk_addr &&
393 FDEV(i).end_blk >= blk_addr) {
394 blk_addr -= FDEV(i).start_blk;
395 bdev = FDEV(i).bdev;
396 break;
397 }
398 }
399 }
400
401 if (sector)
402 *sector = SECTOR_FROM_BLOCK(blk_addr);
403 return bdev;
404 }
405
f2fs_target_device_index(struct f2fs_sb_info * sbi,block_t blkaddr)406 int f2fs_target_device_index(struct f2fs_sb_info *sbi, block_t blkaddr)
407 {
408 int i;
409
410 if (!f2fs_is_multi_device(sbi))
411 return 0;
412
413 for (i = 0; i < sbi->s_ndevs; i++)
414 if (FDEV(i).start_blk <= blkaddr && FDEV(i).end_blk >= blkaddr)
415 return i;
416 return 0;
417 }
418
f2fs_io_flags(struct f2fs_io_info * fio)419 static blk_opf_t f2fs_io_flags(struct f2fs_io_info *fio)
420 {
421 unsigned int temp_mask = GENMASK(NR_TEMP_TYPE - 1, 0);
422 unsigned int fua_flag, meta_flag, io_flag;
423 blk_opf_t op_flags = 0;
424
425 if (fio->op != REQ_OP_WRITE)
426 return 0;
427 if (fio->type == DATA)
428 io_flag = fio->sbi->data_io_flag;
429 else if (fio->type == NODE)
430 io_flag = fio->sbi->node_io_flag;
431 else
432 return 0;
433
434 fua_flag = io_flag & temp_mask;
435 meta_flag = (io_flag >> NR_TEMP_TYPE) & temp_mask;
436
437 /*
438 * data/node io flag bits per temp:
439 * REQ_META | REQ_FUA |
440 * 5 | 4 | 3 | 2 | 1 | 0 |
441 * Cold | Warm | Hot | Cold | Warm | Hot |
442 */
443 if (BIT(fio->temp) & meta_flag)
444 op_flags |= REQ_META;
445 if (BIT(fio->temp) & fua_flag)
446 op_flags |= REQ_FUA;
447 return op_flags;
448 }
449
__bio_alloc(struct f2fs_io_info * fio,int npages)450 static struct bio *__bio_alloc(struct f2fs_io_info *fio, int npages)
451 {
452 struct f2fs_sb_info *sbi = fio->sbi;
453 struct block_device *bdev;
454 sector_t sector;
455 struct bio *bio;
456
457 bdev = f2fs_target_device(sbi, fio->new_blkaddr, §or);
458 bio = bio_alloc_bioset(bdev, npages,
459 fio->op | fio->op_flags | f2fs_io_flags(fio),
460 GFP_NOIO, &f2fs_bioset);
461 bio->bi_iter.bi_sector = sector;
462 if (is_read_io(fio->op)) {
463 bio->bi_end_io = f2fs_read_end_io;
464 bio->bi_private = NULL;
465 } else {
466 bio->bi_end_io = f2fs_write_end_io;
467 bio->bi_private = sbi;
468 }
469 iostat_alloc_and_bind_ctx(sbi, bio, NULL);
470
471 if (fio->io_wbc)
472 wbc_init_bio(fio->io_wbc, bio);
473
474 return bio;
475 }
476
f2fs_set_bio_crypt_ctx(struct bio * bio,const struct inode * inode,pgoff_t first_idx,const struct f2fs_io_info * fio,gfp_t gfp_mask)477 static void f2fs_set_bio_crypt_ctx(struct bio *bio, const struct inode *inode,
478 pgoff_t first_idx,
479 const struct f2fs_io_info *fio,
480 gfp_t gfp_mask)
481 {
482 /*
483 * The f2fs garbage collector sets ->encrypted_page when it wants to
484 * read/write raw data without encryption.
485 */
486 if (!fio || !fio->encrypted_page)
487 fscrypt_set_bio_crypt_ctx(bio, inode, first_idx, gfp_mask);
488 }
489
f2fs_crypt_mergeable_bio(struct bio * bio,const struct inode * inode,pgoff_t next_idx,const struct f2fs_io_info * fio)490 static bool f2fs_crypt_mergeable_bio(struct bio *bio, const struct inode *inode,
491 pgoff_t next_idx,
492 const struct f2fs_io_info *fio)
493 {
494 /*
495 * The f2fs garbage collector sets ->encrypted_page when it wants to
496 * read/write raw data without encryption.
497 */
498 if (fio && fio->encrypted_page)
499 return !bio_has_crypt_ctx(bio);
500
501 return fscrypt_mergeable_bio(bio, inode, next_idx);
502 }
503
f2fs_submit_read_bio(struct f2fs_sb_info * sbi,struct bio * bio,enum page_type type)504 void f2fs_submit_read_bio(struct f2fs_sb_info *sbi, struct bio *bio,
505 enum page_type type)
506 {
507 WARN_ON_ONCE(!is_read_io(bio_op(bio)));
508 trace_f2fs_submit_read_bio(sbi->sb, type, bio);
509
510 iostat_update_submit_ctx(bio, type);
511 submit_bio(bio);
512 }
513
f2fs_submit_write_bio(struct f2fs_sb_info * sbi,struct bio * bio,enum page_type type)514 static void f2fs_submit_write_bio(struct f2fs_sb_info *sbi, struct bio *bio,
515 enum page_type type)
516 {
517 WARN_ON_ONCE(is_read_io(bio_op(bio)));
518
519 if (f2fs_lfs_mode(sbi) && current->plug && PAGE_TYPE_ON_MAIN(type))
520 blk_finish_plug(current->plug);
521
522 trace_f2fs_submit_write_bio(sbi->sb, type, bio);
523 iostat_update_submit_ctx(bio, type);
524 submit_bio(bio);
525 }
526
__submit_merged_bio(struct f2fs_bio_info * io)527 static void __submit_merged_bio(struct f2fs_bio_info *io)
528 {
529 struct f2fs_io_info *fio = &io->fio;
530
531 if (!io->bio)
532 return;
533
534 if (is_read_io(fio->op)) {
535 trace_f2fs_prepare_read_bio(io->sbi->sb, fio->type, io->bio);
536 f2fs_submit_read_bio(io->sbi, io->bio, fio->type);
537 } else {
538 trace_f2fs_prepare_write_bio(io->sbi->sb, fio->type, io->bio);
539 f2fs_submit_write_bio(io->sbi, io->bio, fio->type);
540 }
541 io->bio = NULL;
542 }
543
__has_merged_page(struct bio * bio,struct inode * inode,struct page * page,nid_t ino)544 static bool __has_merged_page(struct bio *bio, struct inode *inode,
545 struct page *page, nid_t ino)
546 {
547 struct bio_vec *bvec;
548 struct bvec_iter_all iter_all;
549
550 if (!bio)
551 return false;
552
553 if (!inode && !page && !ino)
554 return true;
555
556 bio_for_each_segment_all(bvec, bio, iter_all) {
557 struct page *target = bvec->bv_page;
558
559 if (fscrypt_is_bounce_page(target)) {
560 target = fscrypt_pagecache_page(target);
561 if (IS_ERR(target))
562 continue;
563 }
564 if (f2fs_is_compressed_page(target)) {
565 target = f2fs_compress_control_page(target);
566 if (IS_ERR(target))
567 continue;
568 }
569
570 if (inode && inode == target->mapping->host)
571 return true;
572 if (page && page == target)
573 return true;
574 if (ino && ino == ino_of_node(target))
575 return true;
576 }
577
578 return false;
579 }
580
f2fs_init_write_merge_io(struct f2fs_sb_info * sbi)581 int f2fs_init_write_merge_io(struct f2fs_sb_info *sbi)
582 {
583 int i;
584
585 for (i = 0; i < NR_PAGE_TYPE; i++) {
586 int n = (i == META) ? 1 : NR_TEMP_TYPE;
587 int j;
588
589 sbi->write_io[i] = f2fs_kmalloc(sbi,
590 array_size(n, sizeof(struct f2fs_bio_info)),
591 GFP_KERNEL);
592 if (!sbi->write_io[i])
593 return -ENOMEM;
594
595 for (j = HOT; j < n; j++) {
596 init_f2fs_rwsem(&sbi->write_io[i][j].io_rwsem);
597 sbi->write_io[i][j].sbi = sbi;
598 sbi->write_io[i][j].bio = NULL;
599 spin_lock_init(&sbi->write_io[i][j].io_lock);
600 INIT_LIST_HEAD(&sbi->write_io[i][j].io_list);
601 INIT_LIST_HEAD(&sbi->write_io[i][j].bio_list);
602 init_f2fs_rwsem(&sbi->write_io[i][j].bio_list_lock);
603 #ifdef CONFIG_BLK_DEV_ZONED
604 init_completion(&sbi->write_io[i][j].zone_wait);
605 sbi->write_io[i][j].zone_pending_bio = NULL;
606 sbi->write_io[i][j].bi_private = NULL;
607 #endif
608 }
609 }
610
611 return 0;
612 }
613
__f2fs_submit_merged_write(struct f2fs_sb_info * sbi,enum page_type type,enum temp_type temp)614 static void __f2fs_submit_merged_write(struct f2fs_sb_info *sbi,
615 enum page_type type, enum temp_type temp)
616 {
617 enum page_type btype = PAGE_TYPE_OF_BIO(type);
618 struct f2fs_bio_info *io = sbi->write_io[btype] + temp;
619
620 f2fs_down_write(&io->io_rwsem);
621
622 if (!io->bio)
623 goto unlock_out;
624
625 /* change META to META_FLUSH in the checkpoint procedure */
626 if (type >= META_FLUSH) {
627 io->fio.type = META_FLUSH;
628 io->bio->bi_opf |= REQ_META | REQ_PRIO | REQ_SYNC;
629 if (!test_opt(sbi, NOBARRIER))
630 io->bio->bi_opf |= REQ_PREFLUSH | REQ_FUA;
631 }
632 __submit_merged_bio(io);
633 unlock_out:
634 f2fs_up_write(&io->io_rwsem);
635 }
636
__submit_merged_write_cond(struct f2fs_sb_info * sbi,struct inode * inode,struct page * page,nid_t ino,enum page_type type,bool force)637 static void __submit_merged_write_cond(struct f2fs_sb_info *sbi,
638 struct inode *inode, struct page *page,
639 nid_t ino, enum page_type type, bool force)
640 {
641 enum temp_type temp;
642 bool ret = true;
643
644 for (temp = HOT; temp < NR_TEMP_TYPE; temp++) {
645 if (!force) {
646 enum page_type btype = PAGE_TYPE_OF_BIO(type);
647 struct f2fs_bio_info *io = sbi->write_io[btype] + temp;
648
649 f2fs_down_read(&io->io_rwsem);
650 ret = __has_merged_page(io->bio, inode, page, ino);
651 f2fs_up_read(&io->io_rwsem);
652 }
653 if (ret)
654 __f2fs_submit_merged_write(sbi, type, temp);
655
656 /* TODO: use HOT temp only for meta pages now. */
657 if (type >= META)
658 break;
659 }
660 }
661
f2fs_submit_merged_write(struct f2fs_sb_info * sbi,enum page_type type)662 void f2fs_submit_merged_write(struct f2fs_sb_info *sbi, enum page_type type)
663 {
664 __submit_merged_write_cond(sbi, NULL, NULL, 0, type, true);
665 }
666
f2fs_submit_merged_write_cond(struct f2fs_sb_info * sbi,struct inode * inode,struct page * page,nid_t ino,enum page_type type)667 void f2fs_submit_merged_write_cond(struct f2fs_sb_info *sbi,
668 struct inode *inode, struct page *page,
669 nid_t ino, enum page_type type)
670 {
671 __submit_merged_write_cond(sbi, inode, page, ino, type, false);
672 }
673
f2fs_flush_merged_writes(struct f2fs_sb_info * sbi)674 void f2fs_flush_merged_writes(struct f2fs_sb_info *sbi)
675 {
676 f2fs_submit_merged_write(sbi, DATA);
677 f2fs_submit_merged_write(sbi, NODE);
678 f2fs_submit_merged_write(sbi, META);
679 }
680
681 /*
682 * Fill the locked page with data located in the block address.
683 * A caller needs to unlock the page on failure.
684 */
f2fs_submit_page_bio(struct f2fs_io_info * fio)685 int f2fs_submit_page_bio(struct f2fs_io_info *fio)
686 {
687 struct bio *bio;
688 struct page *page = fio->encrypted_page ?
689 fio->encrypted_page : fio->page;
690
691 if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr,
692 fio->is_por ? META_POR : (__is_meta_io(fio) ?
693 META_GENERIC : DATA_GENERIC_ENHANCE))) {
694 f2fs_handle_error(fio->sbi, ERROR_INVALID_BLKADDR);
695 return -EFSCORRUPTED;
696 }
697
698 trace_f2fs_submit_page_bio(page, fio);
699
700 /* Allocate a new bio */
701 bio = __bio_alloc(fio, 1);
702
703 f2fs_set_bio_crypt_ctx(bio, fio->page->mapping->host,
704 fio->page->index, fio, GFP_NOIO);
705
706 if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
707 bio_put(bio);
708 return -EFAULT;
709 }
710
711 if (fio->io_wbc && !is_read_io(fio->op))
712 wbc_account_cgroup_owner(fio->io_wbc, fio->page, PAGE_SIZE);
713
714 inc_page_count(fio->sbi, is_read_io(fio->op) ?
715 __read_io_type(page) : WB_DATA_TYPE(fio->page, false));
716
717 if (is_read_io(bio_op(bio)))
718 f2fs_submit_read_bio(fio->sbi, bio, fio->type);
719 else
720 f2fs_submit_write_bio(fio->sbi, bio, fio->type);
721 return 0;
722 }
723
page_is_mergeable(struct f2fs_sb_info * sbi,struct bio * bio,block_t last_blkaddr,block_t cur_blkaddr)724 static bool page_is_mergeable(struct f2fs_sb_info *sbi, struct bio *bio,
725 block_t last_blkaddr, block_t cur_blkaddr)
726 {
727 if (unlikely(sbi->max_io_bytes &&
728 bio->bi_iter.bi_size >= sbi->max_io_bytes))
729 return false;
730 if (last_blkaddr + 1 != cur_blkaddr)
731 return false;
732 return bio->bi_bdev == f2fs_target_device(sbi, cur_blkaddr, NULL);
733 }
734
io_type_is_mergeable(struct f2fs_bio_info * io,struct f2fs_io_info * fio)735 static bool io_type_is_mergeable(struct f2fs_bio_info *io,
736 struct f2fs_io_info *fio)
737 {
738 if (io->fio.op != fio->op)
739 return false;
740 return io->fio.op_flags == fio->op_flags;
741 }
742
io_is_mergeable(struct f2fs_sb_info * sbi,struct bio * bio,struct f2fs_bio_info * io,struct f2fs_io_info * fio,block_t last_blkaddr,block_t cur_blkaddr)743 static bool io_is_mergeable(struct f2fs_sb_info *sbi, struct bio *bio,
744 struct f2fs_bio_info *io,
745 struct f2fs_io_info *fio,
746 block_t last_blkaddr,
747 block_t cur_blkaddr)
748 {
749 if (!page_is_mergeable(sbi, bio, last_blkaddr, cur_blkaddr))
750 return false;
751 return io_type_is_mergeable(io, fio);
752 }
753
add_bio_entry(struct f2fs_sb_info * sbi,struct bio * bio,struct page * page,enum temp_type temp)754 static void add_bio_entry(struct f2fs_sb_info *sbi, struct bio *bio,
755 struct page *page, enum temp_type temp)
756 {
757 struct f2fs_bio_info *io = sbi->write_io[DATA] + temp;
758 struct bio_entry *be;
759
760 be = f2fs_kmem_cache_alloc(bio_entry_slab, GFP_NOFS, true, NULL);
761 be->bio = bio;
762 bio_get(bio);
763
764 if (bio_add_page(bio, page, PAGE_SIZE, 0) != PAGE_SIZE)
765 f2fs_bug_on(sbi, 1);
766
767 f2fs_down_write(&io->bio_list_lock);
768 list_add_tail(&be->list, &io->bio_list);
769 f2fs_up_write(&io->bio_list_lock);
770 }
771
del_bio_entry(struct bio_entry * be)772 static void del_bio_entry(struct bio_entry *be)
773 {
774 list_del(&be->list);
775 kmem_cache_free(bio_entry_slab, be);
776 }
777
add_ipu_page(struct f2fs_io_info * fio,struct bio ** bio,struct page * page)778 static int add_ipu_page(struct f2fs_io_info *fio, struct bio **bio,
779 struct page *page)
780 {
781 struct f2fs_sb_info *sbi = fio->sbi;
782 enum temp_type temp;
783 bool found = false;
784 int ret = -EAGAIN;
785
786 for (temp = HOT; temp < NR_TEMP_TYPE && !found; temp++) {
787 struct f2fs_bio_info *io = sbi->write_io[DATA] + temp;
788 struct list_head *head = &io->bio_list;
789 struct bio_entry *be;
790
791 f2fs_down_write(&io->bio_list_lock);
792 list_for_each_entry(be, head, list) {
793 if (be->bio != *bio)
794 continue;
795
796 found = true;
797
798 f2fs_bug_on(sbi, !page_is_mergeable(sbi, *bio,
799 *fio->last_block,
800 fio->new_blkaddr));
801 if (f2fs_crypt_mergeable_bio(*bio,
802 fio->page->mapping->host,
803 fio->page->index, fio) &&
804 bio_add_page(*bio, page, PAGE_SIZE, 0) ==
805 PAGE_SIZE) {
806 ret = 0;
807 break;
808 }
809
810 /* page can't be merged into bio; submit the bio */
811 del_bio_entry(be);
812 f2fs_submit_write_bio(sbi, *bio, DATA);
813 break;
814 }
815 f2fs_up_write(&io->bio_list_lock);
816 }
817
818 if (ret) {
819 bio_put(*bio);
820 *bio = NULL;
821 }
822
823 return ret;
824 }
825
f2fs_submit_merged_ipu_write(struct f2fs_sb_info * sbi,struct bio ** bio,struct page * page)826 void f2fs_submit_merged_ipu_write(struct f2fs_sb_info *sbi,
827 struct bio **bio, struct page *page)
828 {
829 enum temp_type temp;
830 bool found = false;
831 struct bio *target = bio ? *bio : NULL;
832
833 f2fs_bug_on(sbi, !target && !page);
834
835 for (temp = HOT; temp < NR_TEMP_TYPE && !found; temp++) {
836 struct f2fs_bio_info *io = sbi->write_io[DATA] + temp;
837 struct list_head *head = &io->bio_list;
838 struct bio_entry *be;
839
840 if (list_empty(head))
841 continue;
842
843 f2fs_down_read(&io->bio_list_lock);
844 list_for_each_entry(be, head, list) {
845 if (target)
846 found = (target == be->bio);
847 else
848 found = __has_merged_page(be->bio, NULL,
849 page, 0);
850 if (found)
851 break;
852 }
853 f2fs_up_read(&io->bio_list_lock);
854
855 if (!found)
856 continue;
857
858 found = false;
859
860 f2fs_down_write(&io->bio_list_lock);
861 list_for_each_entry(be, head, list) {
862 if (target)
863 found = (target == be->bio);
864 else
865 found = __has_merged_page(be->bio, NULL,
866 page, 0);
867 if (found) {
868 target = be->bio;
869 del_bio_entry(be);
870 break;
871 }
872 }
873 f2fs_up_write(&io->bio_list_lock);
874 }
875
876 if (found)
877 f2fs_submit_write_bio(sbi, target, DATA);
878 if (bio && *bio) {
879 bio_put(*bio);
880 *bio = NULL;
881 }
882 }
883
f2fs_merge_page_bio(struct f2fs_io_info * fio)884 int f2fs_merge_page_bio(struct f2fs_io_info *fio)
885 {
886 struct bio *bio = *fio->bio;
887 struct page *page = fio->encrypted_page ?
888 fio->encrypted_page : fio->page;
889
890 if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr,
891 __is_meta_io(fio) ? META_GENERIC : DATA_GENERIC)) {
892 f2fs_handle_error(fio->sbi, ERROR_INVALID_BLKADDR);
893 return -EFSCORRUPTED;
894 }
895
896 trace_f2fs_submit_page_bio(page, fio);
897
898 if (bio && !page_is_mergeable(fio->sbi, bio, *fio->last_block,
899 fio->new_blkaddr))
900 f2fs_submit_merged_ipu_write(fio->sbi, &bio, NULL);
901 alloc_new:
902 if (!bio) {
903 bio = __bio_alloc(fio, BIO_MAX_VECS);
904 f2fs_set_bio_crypt_ctx(bio, fio->page->mapping->host,
905 fio->page->index, fio, GFP_NOIO);
906
907 add_bio_entry(fio->sbi, bio, page, fio->temp);
908 } else {
909 if (add_ipu_page(fio, &bio, page))
910 goto alloc_new;
911 }
912
913 if (fio->io_wbc)
914 wbc_account_cgroup_owner(fio->io_wbc, fio->page, PAGE_SIZE);
915
916 inc_page_count(fio->sbi, WB_DATA_TYPE(page, false));
917
918 *fio->last_block = fio->new_blkaddr;
919 *fio->bio = bio;
920
921 return 0;
922 }
923
924 #ifdef CONFIG_BLK_DEV_ZONED
is_end_zone_blkaddr(struct f2fs_sb_info * sbi,block_t blkaddr)925 static bool is_end_zone_blkaddr(struct f2fs_sb_info *sbi, block_t blkaddr)
926 {
927 int devi = 0;
928
929 if (f2fs_is_multi_device(sbi)) {
930 devi = f2fs_target_device_index(sbi, blkaddr);
931 if (blkaddr < FDEV(devi).start_blk ||
932 blkaddr > FDEV(devi).end_blk) {
933 f2fs_err(sbi, "Invalid block %x", blkaddr);
934 return false;
935 }
936 blkaddr -= FDEV(devi).start_blk;
937 }
938 return bdev_zoned_model(FDEV(devi).bdev) == BLK_ZONED_HM &&
939 f2fs_blkz_is_seq(sbi, devi, blkaddr) &&
940 (blkaddr % sbi->blocks_per_blkz == sbi->blocks_per_blkz - 1);
941 }
942 #endif
943
f2fs_submit_page_write(struct f2fs_io_info * fio)944 void f2fs_submit_page_write(struct f2fs_io_info *fio)
945 {
946 struct f2fs_sb_info *sbi = fio->sbi;
947 enum page_type btype = PAGE_TYPE_OF_BIO(fio->type);
948 struct f2fs_bio_info *io = sbi->write_io[btype] + fio->temp;
949 struct page *bio_page;
950 enum count_type type;
951
952 f2fs_bug_on(sbi, is_read_io(fio->op));
953
954 f2fs_down_write(&io->io_rwsem);
955 next:
956 #ifdef CONFIG_BLK_DEV_ZONED
957 if (f2fs_sb_has_blkzoned(sbi) && btype < META && io->zone_pending_bio) {
958 wait_for_completion_io(&io->zone_wait);
959 bio_put(io->zone_pending_bio);
960 io->zone_pending_bio = NULL;
961 io->bi_private = NULL;
962 }
963 #endif
964
965 if (fio->in_list) {
966 spin_lock(&io->io_lock);
967 if (list_empty(&io->io_list)) {
968 spin_unlock(&io->io_lock);
969 goto out;
970 }
971 fio = list_first_entry(&io->io_list,
972 struct f2fs_io_info, list);
973 list_del(&fio->list);
974 spin_unlock(&io->io_lock);
975 }
976
977 verify_fio_blkaddr(fio);
978
979 if (fio->encrypted_page)
980 bio_page = fio->encrypted_page;
981 else if (fio->compressed_page)
982 bio_page = fio->compressed_page;
983 else
984 bio_page = fio->page;
985
986 /* set submitted = true as a return value */
987 fio->submitted = 1;
988
989 type = WB_DATA_TYPE(bio_page, fio->compressed_page);
990 inc_page_count(sbi, type);
991
992 if (io->bio &&
993 (!io_is_mergeable(sbi, io->bio, io, fio, io->last_block_in_bio,
994 fio->new_blkaddr) ||
995 !f2fs_crypt_mergeable_bio(io->bio, fio->page->mapping->host,
996 bio_page->index, fio)))
997 __submit_merged_bio(io);
998 alloc_new:
999 if (io->bio == NULL) {
1000 io->bio = __bio_alloc(fio, BIO_MAX_VECS);
1001 f2fs_set_bio_crypt_ctx(io->bio, fio->page->mapping->host,
1002 bio_page->index, fio, GFP_NOIO);
1003 io->fio = *fio;
1004 }
1005
1006 if (bio_add_page(io->bio, bio_page, PAGE_SIZE, 0) < PAGE_SIZE) {
1007 __submit_merged_bio(io);
1008 goto alloc_new;
1009 }
1010
1011 if (fio->io_wbc)
1012 wbc_account_cgroup_owner(fio->io_wbc, fio->page, PAGE_SIZE);
1013
1014 io->last_block_in_bio = fio->new_blkaddr;
1015
1016 trace_f2fs_submit_page_write(fio->page, fio);
1017 #ifdef CONFIG_BLK_DEV_ZONED
1018 if (f2fs_sb_has_blkzoned(sbi) && btype < META &&
1019 is_end_zone_blkaddr(sbi, fio->new_blkaddr)) {
1020 bio_get(io->bio);
1021 reinit_completion(&io->zone_wait);
1022 io->bi_private = io->bio->bi_private;
1023 io->bio->bi_private = io;
1024 io->bio->bi_end_io = f2fs_zone_write_end_io;
1025 io->zone_pending_bio = io->bio;
1026 __submit_merged_bio(io);
1027 }
1028 #endif
1029 if (fio->in_list)
1030 goto next;
1031 out:
1032 if (is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN) ||
1033 !f2fs_is_checkpoint_ready(sbi))
1034 __submit_merged_bio(io);
1035 f2fs_up_write(&io->io_rwsem);
1036 }
1037
f2fs_grab_read_bio(struct inode * inode,block_t blkaddr,unsigned nr_pages,blk_opf_t op_flag,pgoff_t first_idx,bool for_write)1038 static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr,
1039 unsigned nr_pages, blk_opf_t op_flag,
1040 pgoff_t first_idx, bool for_write)
1041 {
1042 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1043 struct bio *bio;
1044 struct bio_post_read_ctx *ctx = NULL;
1045 unsigned int post_read_steps = 0;
1046 sector_t sector;
1047 struct block_device *bdev = f2fs_target_device(sbi, blkaddr, §or);
1048
1049 bio = bio_alloc_bioset(bdev, bio_max_segs(nr_pages),
1050 REQ_OP_READ | op_flag,
1051 for_write ? GFP_NOIO : GFP_KERNEL, &f2fs_bioset);
1052 if (!bio)
1053 return ERR_PTR(-ENOMEM);
1054 bio->bi_iter.bi_sector = sector;
1055 f2fs_set_bio_crypt_ctx(bio, inode, first_idx, NULL, GFP_NOFS);
1056 bio->bi_end_io = f2fs_read_end_io;
1057
1058 if (fscrypt_inode_uses_fs_layer_crypto(inode))
1059 post_read_steps |= STEP_DECRYPT;
1060
1061 if (f2fs_need_verity(inode, first_idx))
1062 post_read_steps |= STEP_VERITY;
1063
1064 /*
1065 * STEP_DECOMPRESS is handled specially, since a compressed file might
1066 * contain both compressed and uncompressed clusters. We'll allocate a
1067 * bio_post_read_ctx if the file is compressed, but the caller is
1068 * responsible for enabling STEP_DECOMPRESS if it's actually needed.
1069 */
1070
1071 if (post_read_steps || f2fs_compressed_file(inode)) {
1072 /* Due to the mempool, this never fails. */
1073 ctx = mempool_alloc(bio_post_read_ctx_pool, GFP_NOFS);
1074 ctx->bio = bio;
1075 ctx->sbi = sbi;
1076 ctx->enabled_steps = post_read_steps;
1077 ctx->fs_blkaddr = blkaddr;
1078 ctx->decompression_attempted = false;
1079 bio->bi_private = ctx;
1080 }
1081 iostat_alloc_and_bind_ctx(sbi, bio, ctx);
1082
1083 return bio;
1084 }
1085
1086 /* This can handle encryption stuffs */
f2fs_submit_page_read(struct inode * inode,struct page * page,block_t blkaddr,blk_opf_t op_flags,bool for_write)1087 static int f2fs_submit_page_read(struct inode *inode, struct page *page,
1088 block_t blkaddr, blk_opf_t op_flags,
1089 bool for_write)
1090 {
1091 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1092 struct bio *bio;
1093
1094 bio = f2fs_grab_read_bio(inode, blkaddr, 1, op_flags,
1095 page->index, for_write);
1096 if (IS_ERR(bio))
1097 return PTR_ERR(bio);
1098
1099 /* wait for GCed page writeback via META_MAPPING */
1100 f2fs_wait_on_block_writeback(inode, blkaddr);
1101
1102 if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
1103 iostat_update_and_unbind_ctx(bio);
1104 if (bio->bi_private)
1105 mempool_free(bio->bi_private, bio_post_read_ctx_pool);
1106 bio_put(bio);
1107 return -EFAULT;
1108 }
1109 inc_page_count(sbi, F2FS_RD_DATA);
1110 f2fs_update_iostat(sbi, NULL, FS_DATA_READ_IO, F2FS_BLKSIZE);
1111 f2fs_submit_read_bio(sbi, bio, DATA);
1112 return 0;
1113 }
1114
__set_data_blkaddr(struct dnode_of_data * dn,block_t blkaddr)1115 static void __set_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr)
1116 {
1117 __le32 *addr = get_dnode_addr(dn->inode, dn->node_page);
1118
1119 dn->data_blkaddr = blkaddr;
1120 addr[dn->ofs_in_node] = cpu_to_le32(dn->data_blkaddr);
1121 }
1122
1123 /*
1124 * Lock ordering for the change of data block address:
1125 * ->data_page
1126 * ->node_page
1127 * update block addresses in the node page
1128 */
f2fs_set_data_blkaddr(struct dnode_of_data * dn,block_t blkaddr)1129 void f2fs_set_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr)
1130 {
1131 f2fs_wait_on_page_writeback(dn->node_page, NODE, true, true);
1132 __set_data_blkaddr(dn, blkaddr);
1133 if (set_page_dirty(dn->node_page))
1134 dn->node_changed = true;
1135 }
1136
f2fs_update_data_blkaddr(struct dnode_of_data * dn,block_t blkaddr)1137 void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr)
1138 {
1139 f2fs_set_data_blkaddr(dn, blkaddr);
1140 f2fs_update_read_extent_cache(dn);
1141 }
1142
1143 /* dn->ofs_in_node will be returned with up-to-date last block pointer */
f2fs_reserve_new_blocks(struct dnode_of_data * dn,blkcnt_t count)1144 int f2fs_reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count)
1145 {
1146 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
1147 int err;
1148
1149 if (!count)
1150 return 0;
1151
1152 if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
1153 return -EPERM;
1154 err = inc_valid_block_count(sbi, dn->inode, &count, true);
1155 if (unlikely(err))
1156 return err;
1157
1158 trace_f2fs_reserve_new_blocks(dn->inode, dn->nid,
1159 dn->ofs_in_node, count);
1160
1161 f2fs_wait_on_page_writeback(dn->node_page, NODE, true, true);
1162
1163 for (; count > 0; dn->ofs_in_node++) {
1164 block_t blkaddr = f2fs_data_blkaddr(dn);
1165
1166 if (blkaddr == NULL_ADDR) {
1167 __set_data_blkaddr(dn, NEW_ADDR);
1168 count--;
1169 }
1170 }
1171
1172 if (set_page_dirty(dn->node_page))
1173 dn->node_changed = true;
1174 return 0;
1175 }
1176
1177 /* Should keep dn->ofs_in_node unchanged */
f2fs_reserve_new_block(struct dnode_of_data * dn)1178 int f2fs_reserve_new_block(struct dnode_of_data *dn)
1179 {
1180 unsigned int ofs_in_node = dn->ofs_in_node;
1181 int ret;
1182
1183 ret = f2fs_reserve_new_blocks(dn, 1);
1184 dn->ofs_in_node = ofs_in_node;
1185 return ret;
1186 }
1187
f2fs_reserve_block(struct dnode_of_data * dn,pgoff_t index)1188 int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index)
1189 {
1190 bool need_put = dn->inode_page ? false : true;
1191 int err;
1192
1193 err = f2fs_get_dnode_of_data(dn, index, ALLOC_NODE);
1194 if (err)
1195 return err;
1196
1197 if (dn->data_blkaddr == NULL_ADDR)
1198 err = f2fs_reserve_new_block(dn);
1199 if (err || need_put)
1200 f2fs_put_dnode(dn);
1201 return err;
1202 }
1203
f2fs_get_read_data_page(struct inode * inode,pgoff_t index,blk_opf_t op_flags,bool for_write,pgoff_t * next_pgofs)1204 struct page *f2fs_get_read_data_page(struct inode *inode, pgoff_t index,
1205 blk_opf_t op_flags, bool for_write,
1206 pgoff_t *next_pgofs)
1207 {
1208 struct address_space *mapping = inode->i_mapping;
1209 struct dnode_of_data dn;
1210 struct page *page;
1211 int err;
1212
1213 page = f2fs_grab_cache_page(mapping, index, for_write);
1214 if (!page)
1215 return ERR_PTR(-ENOMEM);
1216
1217 if (f2fs_lookup_read_extent_cache_block(inode, index,
1218 &dn.data_blkaddr)) {
1219 if (!f2fs_is_valid_blkaddr(F2FS_I_SB(inode), dn.data_blkaddr,
1220 DATA_GENERIC_ENHANCE_READ)) {
1221 err = -EFSCORRUPTED;
1222 f2fs_handle_error(F2FS_I_SB(inode),
1223 ERROR_INVALID_BLKADDR);
1224 goto put_err;
1225 }
1226 goto got_it;
1227 }
1228
1229 set_new_dnode(&dn, inode, NULL, NULL, 0);
1230 err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
1231 if (err) {
1232 if (err == -ENOENT && next_pgofs)
1233 *next_pgofs = f2fs_get_next_page_offset(&dn, index);
1234 goto put_err;
1235 }
1236 f2fs_put_dnode(&dn);
1237
1238 if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
1239 err = -ENOENT;
1240 if (next_pgofs)
1241 *next_pgofs = index + 1;
1242 goto put_err;
1243 }
1244 if (dn.data_blkaddr != NEW_ADDR &&
1245 !f2fs_is_valid_blkaddr(F2FS_I_SB(inode),
1246 dn.data_blkaddr,
1247 DATA_GENERIC_ENHANCE)) {
1248 err = -EFSCORRUPTED;
1249 f2fs_handle_error(F2FS_I_SB(inode),
1250 ERROR_INVALID_BLKADDR);
1251 goto put_err;
1252 }
1253 got_it:
1254 if (PageUptodate(page)) {
1255 unlock_page(page);
1256 return page;
1257 }
1258
1259 /*
1260 * A new dentry page is allocated but not able to be written, since its
1261 * new inode page couldn't be allocated due to -ENOSPC.
1262 * In such the case, its blkaddr can be remained as NEW_ADDR.
1263 * see, f2fs_add_link -> f2fs_get_new_data_page ->
1264 * f2fs_init_inode_metadata.
1265 */
1266 if (dn.data_blkaddr == NEW_ADDR) {
1267 zero_user_segment(page, 0, PAGE_SIZE);
1268 if (!PageUptodate(page))
1269 SetPageUptodate(page);
1270 unlock_page(page);
1271 return page;
1272 }
1273
1274 err = f2fs_submit_page_read(inode, page, dn.data_blkaddr,
1275 op_flags, for_write);
1276 if (err)
1277 goto put_err;
1278 return page;
1279
1280 put_err:
1281 f2fs_put_page(page, 1);
1282 return ERR_PTR(err);
1283 }
1284
f2fs_find_data_page(struct inode * inode,pgoff_t index,pgoff_t * next_pgofs)1285 struct page *f2fs_find_data_page(struct inode *inode, pgoff_t index,
1286 pgoff_t *next_pgofs)
1287 {
1288 struct address_space *mapping = inode->i_mapping;
1289 struct page *page;
1290
1291 page = find_get_page(mapping, index);
1292 if (page && PageUptodate(page))
1293 return page;
1294 f2fs_put_page(page, 0);
1295
1296 page = f2fs_get_read_data_page(inode, index, 0, false, next_pgofs);
1297 if (IS_ERR(page))
1298 return page;
1299
1300 if (PageUptodate(page))
1301 return page;
1302
1303 wait_on_page_locked(page);
1304 if (unlikely(!PageUptodate(page))) {
1305 f2fs_put_page(page, 0);
1306 return ERR_PTR(-EIO);
1307 }
1308 return page;
1309 }
1310
1311 /*
1312 * If it tries to access a hole, return an error.
1313 * Because, the callers, functions in dir.c and GC, should be able to know
1314 * whether this page exists or not.
1315 */
f2fs_get_lock_data_page(struct inode * inode,pgoff_t index,bool for_write)1316 struct page *f2fs_get_lock_data_page(struct inode *inode, pgoff_t index,
1317 bool for_write)
1318 {
1319 struct address_space *mapping = inode->i_mapping;
1320 struct page *page;
1321
1322 page = f2fs_get_read_data_page(inode, index, 0, for_write, NULL);
1323 if (IS_ERR(page))
1324 return page;
1325
1326 /* wait for read completion */
1327 lock_page(page);
1328 if (unlikely(page->mapping != mapping || !PageUptodate(page))) {
1329 f2fs_put_page(page, 1);
1330 return ERR_PTR(-EIO);
1331 }
1332 return page;
1333 }
1334
1335 /*
1336 * Caller ensures that this data page is never allocated.
1337 * A new zero-filled data page is allocated in the page cache.
1338 *
1339 * Also, caller should grab and release a rwsem by calling f2fs_lock_op() and
1340 * f2fs_unlock_op().
1341 * Note that, ipage is set only by make_empty_dir, and if any error occur,
1342 * ipage should be released by this function.
1343 */
f2fs_get_new_data_page(struct inode * inode,struct page * ipage,pgoff_t index,bool new_i_size)1344 struct page *f2fs_get_new_data_page(struct inode *inode,
1345 struct page *ipage, pgoff_t index, bool new_i_size)
1346 {
1347 struct address_space *mapping = inode->i_mapping;
1348 struct page *page;
1349 struct dnode_of_data dn;
1350 int err;
1351
1352 page = f2fs_grab_cache_page(mapping, index, true);
1353 if (!page) {
1354 /*
1355 * before exiting, we should make sure ipage will be released
1356 * if any error occur.
1357 */
1358 f2fs_put_page(ipage, 1);
1359 return ERR_PTR(-ENOMEM);
1360 }
1361
1362 set_new_dnode(&dn, inode, ipage, NULL, 0);
1363 err = f2fs_reserve_block(&dn, index);
1364 if (err) {
1365 f2fs_put_page(page, 1);
1366 return ERR_PTR(err);
1367 }
1368 if (!ipage)
1369 f2fs_put_dnode(&dn);
1370
1371 if (PageUptodate(page))
1372 goto got_it;
1373
1374 if (dn.data_blkaddr == NEW_ADDR) {
1375 zero_user_segment(page, 0, PAGE_SIZE);
1376 if (!PageUptodate(page))
1377 SetPageUptodate(page);
1378 } else {
1379 f2fs_put_page(page, 1);
1380
1381 /* if ipage exists, blkaddr should be NEW_ADDR */
1382 f2fs_bug_on(F2FS_I_SB(inode), ipage);
1383 page = f2fs_get_lock_data_page(inode, index, true);
1384 if (IS_ERR(page))
1385 return page;
1386 }
1387 got_it:
1388 if (new_i_size && i_size_read(inode) <
1389 ((loff_t)(index + 1) << PAGE_SHIFT))
1390 f2fs_i_size_write(inode, ((loff_t)(index + 1) << PAGE_SHIFT));
1391 return page;
1392 }
1393
__allocate_data_block(struct dnode_of_data * dn,int seg_type)1394 static int __allocate_data_block(struct dnode_of_data *dn, int seg_type)
1395 {
1396 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
1397 struct f2fs_summary sum;
1398 struct node_info ni;
1399 block_t old_blkaddr;
1400 blkcnt_t count = 1;
1401 int err;
1402
1403 if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
1404 return -EPERM;
1405
1406 err = f2fs_get_node_info(sbi, dn->nid, &ni, false);
1407 if (err)
1408 return err;
1409
1410 dn->data_blkaddr = f2fs_data_blkaddr(dn);
1411 if (dn->data_blkaddr == NULL_ADDR) {
1412 err = inc_valid_block_count(sbi, dn->inode, &count, true);
1413 if (unlikely(err))
1414 return err;
1415 }
1416
1417 set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
1418 old_blkaddr = dn->data_blkaddr;
1419 f2fs_allocate_data_block(sbi, NULL, old_blkaddr, &dn->data_blkaddr,
1420 &sum, seg_type, NULL);
1421 if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO)
1422 f2fs_invalidate_internal_cache(sbi, old_blkaddr);
1423
1424 f2fs_update_data_blkaddr(dn, dn->data_blkaddr);
1425 return 0;
1426 }
1427
f2fs_map_lock(struct f2fs_sb_info * sbi,int flag)1428 static void f2fs_map_lock(struct f2fs_sb_info *sbi, int flag)
1429 {
1430 if (flag == F2FS_GET_BLOCK_PRE_AIO)
1431 f2fs_down_read(&sbi->node_change);
1432 else
1433 f2fs_lock_op(sbi);
1434 }
1435
f2fs_map_unlock(struct f2fs_sb_info * sbi,int flag)1436 static void f2fs_map_unlock(struct f2fs_sb_info *sbi, int flag)
1437 {
1438 if (flag == F2FS_GET_BLOCK_PRE_AIO)
1439 f2fs_up_read(&sbi->node_change);
1440 else
1441 f2fs_unlock_op(sbi);
1442 }
1443
f2fs_get_block_locked(struct dnode_of_data * dn,pgoff_t index)1444 int f2fs_get_block_locked(struct dnode_of_data *dn, pgoff_t index)
1445 {
1446 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
1447 int err = 0;
1448
1449 f2fs_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO);
1450 if (!f2fs_lookup_read_extent_cache_block(dn->inode, index,
1451 &dn->data_blkaddr))
1452 err = f2fs_reserve_block(dn, index);
1453 f2fs_map_unlock(sbi, F2FS_GET_BLOCK_PRE_AIO);
1454
1455 return err;
1456 }
1457
f2fs_map_no_dnode(struct inode * inode,struct f2fs_map_blocks * map,struct dnode_of_data * dn,pgoff_t pgoff)1458 static int f2fs_map_no_dnode(struct inode *inode,
1459 struct f2fs_map_blocks *map, struct dnode_of_data *dn,
1460 pgoff_t pgoff)
1461 {
1462 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1463
1464 /*
1465 * There is one exceptional case that read_node_page() may return
1466 * -ENOENT due to filesystem has been shutdown or cp_error, return
1467 * -EIO in that case.
1468 */
1469 if (map->m_may_create &&
1470 (is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN) || f2fs_cp_error(sbi)))
1471 return -EIO;
1472
1473 if (map->m_next_pgofs)
1474 *map->m_next_pgofs = f2fs_get_next_page_offset(dn, pgoff);
1475 if (map->m_next_extent)
1476 *map->m_next_extent = f2fs_get_next_page_offset(dn, pgoff);
1477 return 0;
1478 }
1479
f2fs_map_blocks_cached(struct inode * inode,struct f2fs_map_blocks * map,int flag)1480 static bool f2fs_map_blocks_cached(struct inode *inode,
1481 struct f2fs_map_blocks *map, int flag)
1482 {
1483 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1484 unsigned int maxblocks = map->m_len;
1485 pgoff_t pgoff = (pgoff_t)map->m_lblk;
1486 struct extent_info ei = {};
1487
1488 if (!f2fs_lookup_read_extent_cache(inode, pgoff, &ei))
1489 return false;
1490
1491 map->m_pblk = ei.blk + pgoff - ei.fofs;
1492 map->m_len = min((pgoff_t)maxblocks, ei.fofs + ei.len - pgoff);
1493 map->m_flags = F2FS_MAP_MAPPED;
1494 if (map->m_next_extent)
1495 *map->m_next_extent = pgoff + map->m_len;
1496
1497 /* for hardware encryption, but to avoid potential issue in future */
1498 if (flag == F2FS_GET_BLOCK_DIO)
1499 f2fs_wait_on_block_writeback_range(inode,
1500 map->m_pblk, map->m_len);
1501
1502 if (f2fs_allow_multi_device_dio(sbi, flag)) {
1503 int bidx = f2fs_target_device_index(sbi, map->m_pblk);
1504 struct f2fs_dev_info *dev = &sbi->devs[bidx];
1505
1506 map->m_bdev = dev->bdev;
1507 map->m_pblk -= dev->start_blk;
1508 map->m_len = min(map->m_len, dev->end_blk + 1 - map->m_pblk);
1509 } else {
1510 map->m_bdev = inode->i_sb->s_bdev;
1511 }
1512 return true;
1513 }
1514
1515 /*
1516 * f2fs_map_blocks() tries to find or build mapping relationship which
1517 * maps continuous logical blocks to physical blocks, and return such
1518 * info via f2fs_map_blocks structure.
1519 */
f2fs_map_blocks(struct inode * inode,struct f2fs_map_blocks * map,int flag)1520 int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map, int flag)
1521 {
1522 unsigned int maxblocks = map->m_len;
1523 struct dnode_of_data dn;
1524 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1525 int mode = map->m_may_create ? ALLOC_NODE : LOOKUP_NODE;
1526 pgoff_t pgofs, end_offset, end;
1527 int err = 0, ofs = 1;
1528 unsigned int ofs_in_node, last_ofs_in_node;
1529 blkcnt_t prealloc;
1530 block_t blkaddr;
1531 unsigned int start_pgofs;
1532 int bidx = 0;
1533 bool is_hole;
1534
1535 if (!maxblocks)
1536 return 0;
1537
1538 if (!map->m_may_create && f2fs_map_blocks_cached(inode, map, flag))
1539 goto out;
1540
1541 map->m_bdev = inode->i_sb->s_bdev;
1542 map->m_multidev_dio =
1543 f2fs_allow_multi_device_dio(F2FS_I_SB(inode), flag);
1544
1545 map->m_len = 0;
1546 map->m_flags = 0;
1547
1548 /* it only supports block size == page size */
1549 pgofs = (pgoff_t)map->m_lblk;
1550 end = pgofs + maxblocks;
1551
1552 next_dnode:
1553 if (map->m_may_create)
1554 f2fs_map_lock(sbi, flag);
1555
1556 /* When reading holes, we need its node page */
1557 set_new_dnode(&dn, inode, NULL, NULL, 0);
1558 err = f2fs_get_dnode_of_data(&dn, pgofs, mode);
1559 if (err) {
1560 if (flag == F2FS_GET_BLOCK_BMAP)
1561 map->m_pblk = 0;
1562 if (err == -ENOENT)
1563 err = f2fs_map_no_dnode(inode, map, &dn, pgofs);
1564 goto unlock_out;
1565 }
1566
1567 start_pgofs = pgofs;
1568 prealloc = 0;
1569 last_ofs_in_node = ofs_in_node = dn.ofs_in_node;
1570 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
1571
1572 next_block:
1573 blkaddr = f2fs_data_blkaddr(&dn);
1574 is_hole = !__is_valid_data_blkaddr(blkaddr);
1575 if (!is_hole &&
1576 !f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE)) {
1577 err = -EFSCORRUPTED;
1578 f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR);
1579 goto sync_out;
1580 }
1581
1582 /* use out-place-update for direct IO under LFS mode */
1583 if (map->m_may_create &&
1584 (is_hole || (f2fs_lfs_mode(sbi) && flag == F2FS_GET_BLOCK_DIO))) {
1585 if (unlikely(f2fs_cp_error(sbi))) {
1586 err = -EIO;
1587 goto sync_out;
1588 }
1589
1590 switch (flag) {
1591 case F2FS_GET_BLOCK_PRE_AIO:
1592 if (blkaddr == NULL_ADDR) {
1593 prealloc++;
1594 last_ofs_in_node = dn.ofs_in_node;
1595 }
1596 break;
1597 case F2FS_GET_BLOCK_PRE_DIO:
1598 case F2FS_GET_BLOCK_DIO:
1599 err = __allocate_data_block(&dn, map->m_seg_type);
1600 if (err)
1601 goto sync_out;
1602 if (flag == F2FS_GET_BLOCK_PRE_DIO)
1603 file_need_truncate(inode);
1604 set_inode_flag(inode, FI_APPEND_WRITE);
1605 break;
1606 default:
1607 WARN_ON_ONCE(1);
1608 err = -EIO;
1609 goto sync_out;
1610 }
1611
1612 blkaddr = dn.data_blkaddr;
1613 if (is_hole)
1614 map->m_flags |= F2FS_MAP_NEW;
1615 } else if (is_hole) {
1616 if (f2fs_compressed_file(inode) &&
1617 f2fs_sanity_check_cluster(&dn) &&
1618 (flag != F2FS_GET_BLOCK_FIEMAP ||
1619 IS_ENABLED(CONFIG_F2FS_CHECK_FS))) {
1620 err = -EFSCORRUPTED;
1621 f2fs_handle_error(sbi,
1622 ERROR_CORRUPTED_CLUSTER);
1623 goto sync_out;
1624 }
1625
1626 switch (flag) {
1627 case F2FS_GET_BLOCK_PRECACHE:
1628 goto sync_out;
1629 case F2FS_GET_BLOCK_BMAP:
1630 map->m_pblk = 0;
1631 goto sync_out;
1632 case F2FS_GET_BLOCK_FIEMAP:
1633 if (blkaddr == NULL_ADDR) {
1634 if (map->m_next_pgofs)
1635 *map->m_next_pgofs = pgofs + 1;
1636 goto sync_out;
1637 }
1638 break;
1639 default:
1640 /* for defragment case */
1641 if (map->m_next_pgofs)
1642 *map->m_next_pgofs = pgofs + 1;
1643 goto sync_out;
1644 }
1645 }
1646
1647 if (flag == F2FS_GET_BLOCK_PRE_AIO)
1648 goto skip;
1649
1650 if (map->m_multidev_dio)
1651 bidx = f2fs_target_device_index(sbi, blkaddr);
1652
1653 if (map->m_len == 0) {
1654 /* reserved delalloc block should be mapped for fiemap. */
1655 if (blkaddr == NEW_ADDR)
1656 map->m_flags |= F2FS_MAP_DELALLOC;
1657 map->m_flags |= F2FS_MAP_MAPPED;
1658
1659 map->m_pblk = blkaddr;
1660 map->m_len = 1;
1661
1662 if (map->m_multidev_dio)
1663 map->m_bdev = FDEV(bidx).bdev;
1664 } else if ((map->m_pblk != NEW_ADDR &&
1665 blkaddr == (map->m_pblk + ofs)) ||
1666 (map->m_pblk == NEW_ADDR && blkaddr == NEW_ADDR) ||
1667 flag == F2FS_GET_BLOCK_PRE_DIO) {
1668 if (map->m_multidev_dio && map->m_bdev != FDEV(bidx).bdev)
1669 goto sync_out;
1670 ofs++;
1671 map->m_len++;
1672 } else {
1673 goto sync_out;
1674 }
1675
1676 skip:
1677 dn.ofs_in_node++;
1678 pgofs++;
1679
1680 /* preallocate blocks in batch for one dnode page */
1681 if (flag == F2FS_GET_BLOCK_PRE_AIO &&
1682 (pgofs == end || dn.ofs_in_node == end_offset)) {
1683
1684 dn.ofs_in_node = ofs_in_node;
1685 err = f2fs_reserve_new_blocks(&dn, prealloc);
1686 if (err)
1687 goto sync_out;
1688
1689 map->m_len += dn.ofs_in_node - ofs_in_node;
1690 if (prealloc && dn.ofs_in_node != last_ofs_in_node + 1) {
1691 err = -ENOSPC;
1692 goto sync_out;
1693 }
1694 dn.ofs_in_node = end_offset;
1695 }
1696
1697 if (pgofs >= end)
1698 goto sync_out;
1699 else if (dn.ofs_in_node < end_offset)
1700 goto next_block;
1701
1702 if (flag == F2FS_GET_BLOCK_PRECACHE) {
1703 if (map->m_flags & F2FS_MAP_MAPPED) {
1704 unsigned int ofs = start_pgofs - map->m_lblk;
1705
1706 f2fs_update_read_extent_cache_range(&dn,
1707 start_pgofs, map->m_pblk + ofs,
1708 map->m_len - ofs);
1709 }
1710 }
1711
1712 f2fs_put_dnode(&dn);
1713
1714 if (map->m_may_create) {
1715 f2fs_map_unlock(sbi, flag);
1716 f2fs_balance_fs(sbi, dn.node_changed);
1717 }
1718 goto next_dnode;
1719
1720 sync_out:
1721
1722 if (flag == F2FS_GET_BLOCK_DIO && map->m_flags & F2FS_MAP_MAPPED) {
1723 /*
1724 * for hardware encryption, but to avoid potential issue
1725 * in future
1726 */
1727 f2fs_wait_on_block_writeback_range(inode,
1728 map->m_pblk, map->m_len);
1729
1730 if (map->m_multidev_dio) {
1731 block_t blk_addr = map->m_pblk;
1732
1733 bidx = f2fs_target_device_index(sbi, map->m_pblk);
1734
1735 map->m_bdev = FDEV(bidx).bdev;
1736 map->m_pblk -= FDEV(bidx).start_blk;
1737
1738 if (map->m_may_create)
1739 f2fs_update_device_state(sbi, inode->i_ino,
1740 blk_addr, map->m_len);
1741
1742 f2fs_bug_on(sbi, blk_addr + map->m_len >
1743 FDEV(bidx).end_blk + 1);
1744 }
1745 }
1746
1747 if (flag == F2FS_GET_BLOCK_PRECACHE) {
1748 if (map->m_flags & F2FS_MAP_MAPPED) {
1749 unsigned int ofs = start_pgofs - map->m_lblk;
1750
1751 f2fs_update_read_extent_cache_range(&dn,
1752 start_pgofs, map->m_pblk + ofs,
1753 map->m_len - ofs);
1754 }
1755 if (map->m_next_extent)
1756 *map->m_next_extent = pgofs + 1;
1757 }
1758 f2fs_put_dnode(&dn);
1759 unlock_out:
1760 if (map->m_may_create) {
1761 f2fs_map_unlock(sbi, flag);
1762 f2fs_balance_fs(sbi, dn.node_changed);
1763 }
1764 out:
1765 trace_f2fs_map_blocks(inode, map, flag, err);
1766 return err;
1767 }
1768
f2fs_overwrite_io(struct inode * inode,loff_t pos,size_t len)1769 bool f2fs_overwrite_io(struct inode *inode, loff_t pos, size_t len)
1770 {
1771 struct f2fs_map_blocks map;
1772 block_t last_lblk;
1773 int err;
1774
1775 if (pos + len > i_size_read(inode))
1776 return false;
1777
1778 map.m_lblk = F2FS_BYTES_TO_BLK(pos);
1779 map.m_next_pgofs = NULL;
1780 map.m_next_extent = NULL;
1781 map.m_seg_type = NO_CHECK_TYPE;
1782 map.m_may_create = false;
1783 last_lblk = F2FS_BLK_ALIGN(pos + len);
1784
1785 while (map.m_lblk < last_lblk) {
1786 map.m_len = last_lblk - map.m_lblk;
1787 err = f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_DEFAULT);
1788 if (err || map.m_len == 0)
1789 return false;
1790 map.m_lblk += map.m_len;
1791 }
1792 return true;
1793 }
1794
bytes_to_blks(struct inode * inode,u64 bytes)1795 static inline u64 bytes_to_blks(struct inode *inode, u64 bytes)
1796 {
1797 return (bytes >> inode->i_blkbits);
1798 }
1799
blks_to_bytes(struct inode * inode,u64 blks)1800 static inline u64 blks_to_bytes(struct inode *inode, u64 blks)
1801 {
1802 return (blks << inode->i_blkbits);
1803 }
1804
f2fs_xattr_fiemap(struct inode * inode,struct fiemap_extent_info * fieinfo)1805 static int f2fs_xattr_fiemap(struct inode *inode,
1806 struct fiemap_extent_info *fieinfo)
1807 {
1808 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1809 struct page *page;
1810 struct node_info ni;
1811 __u64 phys = 0, len;
1812 __u32 flags;
1813 nid_t xnid = F2FS_I(inode)->i_xattr_nid;
1814 int err = 0;
1815
1816 if (f2fs_has_inline_xattr(inode)) {
1817 int offset;
1818
1819 page = f2fs_grab_cache_page(NODE_MAPPING(sbi),
1820 inode->i_ino, false);
1821 if (!page)
1822 return -ENOMEM;
1823
1824 err = f2fs_get_node_info(sbi, inode->i_ino, &ni, false);
1825 if (err) {
1826 f2fs_put_page(page, 1);
1827 return err;
1828 }
1829
1830 phys = blks_to_bytes(inode, ni.blk_addr);
1831 offset = offsetof(struct f2fs_inode, i_addr) +
1832 sizeof(__le32) * (DEF_ADDRS_PER_INODE -
1833 get_inline_xattr_addrs(inode));
1834
1835 phys += offset;
1836 len = inline_xattr_size(inode);
1837
1838 f2fs_put_page(page, 1);
1839
1840 flags = FIEMAP_EXTENT_DATA_INLINE | FIEMAP_EXTENT_NOT_ALIGNED;
1841
1842 if (!xnid)
1843 flags |= FIEMAP_EXTENT_LAST;
1844
1845 err = fiemap_fill_next_extent(fieinfo, 0, phys, len, flags);
1846 trace_f2fs_fiemap(inode, 0, phys, len, flags, err);
1847 if (err)
1848 return err;
1849 }
1850
1851 if (xnid) {
1852 page = f2fs_grab_cache_page(NODE_MAPPING(sbi), xnid, false);
1853 if (!page)
1854 return -ENOMEM;
1855
1856 err = f2fs_get_node_info(sbi, xnid, &ni, false);
1857 if (err) {
1858 f2fs_put_page(page, 1);
1859 return err;
1860 }
1861
1862 phys = blks_to_bytes(inode, ni.blk_addr);
1863 len = inode->i_sb->s_blocksize;
1864
1865 f2fs_put_page(page, 1);
1866
1867 flags = FIEMAP_EXTENT_LAST;
1868 }
1869
1870 if (phys) {
1871 err = fiemap_fill_next_extent(fieinfo, 0, phys, len, flags);
1872 trace_f2fs_fiemap(inode, 0, phys, len, flags, err);
1873 }
1874
1875 return (err < 0 ? err : 0);
1876 }
1877
max_inode_blocks(struct inode * inode)1878 static loff_t max_inode_blocks(struct inode *inode)
1879 {
1880 loff_t result = ADDRS_PER_INODE(inode);
1881 loff_t leaf_count = ADDRS_PER_BLOCK(inode);
1882
1883 /* two direct node blocks */
1884 result += (leaf_count * 2);
1885
1886 /* two indirect node blocks */
1887 leaf_count *= NIDS_PER_BLOCK;
1888 result += (leaf_count * 2);
1889
1890 /* one double indirect node block */
1891 leaf_count *= NIDS_PER_BLOCK;
1892 result += leaf_count;
1893
1894 return result;
1895 }
1896
f2fs_fiemap(struct inode * inode,struct fiemap_extent_info * fieinfo,u64 start,u64 len)1897 int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
1898 u64 start, u64 len)
1899 {
1900 struct f2fs_map_blocks map;
1901 sector_t start_blk, last_blk;
1902 pgoff_t next_pgofs;
1903 u64 logical = 0, phys = 0, size = 0;
1904 u32 flags = 0;
1905 int ret = 0;
1906 bool compr_cluster = false, compr_appended;
1907 unsigned int cluster_size = F2FS_I(inode)->i_cluster_size;
1908 unsigned int count_in_cluster = 0;
1909 loff_t maxbytes;
1910
1911 if (fieinfo->fi_flags & FIEMAP_FLAG_CACHE) {
1912 ret = f2fs_precache_extents(inode);
1913 if (ret)
1914 return ret;
1915 }
1916
1917 ret = fiemap_prep(inode, fieinfo, start, &len, FIEMAP_FLAG_XATTR);
1918 if (ret)
1919 return ret;
1920
1921 inode_lock(inode);
1922
1923 maxbytes = max_file_blocks(inode) << F2FS_BLKSIZE_BITS;
1924 if (start > maxbytes) {
1925 ret = -EFBIG;
1926 goto out;
1927 }
1928
1929 if (len > maxbytes || (maxbytes - len) < start)
1930 len = maxbytes - start;
1931
1932 if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) {
1933 ret = f2fs_xattr_fiemap(inode, fieinfo);
1934 goto out;
1935 }
1936
1937 if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode)) {
1938 ret = f2fs_inline_data_fiemap(inode, fieinfo, start, len);
1939 if (ret != -EAGAIN)
1940 goto out;
1941 }
1942
1943 if (bytes_to_blks(inode, len) == 0)
1944 len = blks_to_bytes(inode, 1);
1945
1946 start_blk = bytes_to_blks(inode, start);
1947 last_blk = bytes_to_blks(inode, start + len - 1);
1948
1949 next:
1950 memset(&map, 0, sizeof(map));
1951 map.m_lblk = start_blk;
1952 map.m_len = bytes_to_blks(inode, len);
1953 map.m_next_pgofs = &next_pgofs;
1954 map.m_seg_type = NO_CHECK_TYPE;
1955
1956 if (compr_cluster) {
1957 map.m_lblk += 1;
1958 map.m_len = cluster_size - count_in_cluster;
1959 }
1960
1961 ret = f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_FIEMAP);
1962 if (ret)
1963 goto out;
1964
1965 /* HOLE */
1966 if (!compr_cluster && !(map.m_flags & F2FS_MAP_FLAGS)) {
1967 start_blk = next_pgofs;
1968
1969 if (blks_to_bytes(inode, start_blk) < blks_to_bytes(inode,
1970 max_inode_blocks(inode)))
1971 goto prep_next;
1972
1973 flags |= FIEMAP_EXTENT_LAST;
1974 }
1975
1976 compr_appended = false;
1977 /* In a case of compressed cluster, append this to the last extent */
1978 if (compr_cluster && ((map.m_flags & F2FS_MAP_DELALLOC) ||
1979 !(map.m_flags & F2FS_MAP_FLAGS))) {
1980 compr_appended = true;
1981 goto skip_fill;
1982 }
1983
1984 if (size) {
1985 flags |= FIEMAP_EXTENT_MERGED;
1986 if (IS_ENCRYPTED(inode))
1987 flags |= FIEMAP_EXTENT_DATA_ENCRYPTED;
1988
1989 ret = fiemap_fill_next_extent(fieinfo, logical,
1990 phys, size, flags);
1991 trace_f2fs_fiemap(inode, logical, phys, size, flags, ret);
1992 if (ret)
1993 goto out;
1994 size = 0;
1995 }
1996
1997 if (start_blk > last_blk)
1998 goto out;
1999
2000 skip_fill:
2001 if (map.m_pblk == COMPRESS_ADDR) {
2002 compr_cluster = true;
2003 count_in_cluster = 1;
2004 } else if (compr_appended) {
2005 unsigned int appended_blks = cluster_size -
2006 count_in_cluster + 1;
2007 size += blks_to_bytes(inode, appended_blks);
2008 start_blk += appended_blks;
2009 compr_cluster = false;
2010 } else {
2011 logical = blks_to_bytes(inode, start_blk);
2012 phys = __is_valid_data_blkaddr(map.m_pblk) ?
2013 blks_to_bytes(inode, map.m_pblk) : 0;
2014 size = blks_to_bytes(inode, map.m_len);
2015 flags = 0;
2016
2017 if (compr_cluster) {
2018 flags = FIEMAP_EXTENT_ENCODED;
2019 count_in_cluster += map.m_len;
2020 if (count_in_cluster == cluster_size) {
2021 compr_cluster = false;
2022 size += blks_to_bytes(inode, 1);
2023 }
2024 } else if (map.m_flags & F2FS_MAP_DELALLOC) {
2025 flags = FIEMAP_EXTENT_UNWRITTEN;
2026 }
2027
2028 start_blk += bytes_to_blks(inode, size);
2029 }
2030
2031 prep_next:
2032 cond_resched();
2033 if (fatal_signal_pending(current))
2034 ret = -EINTR;
2035 else
2036 goto next;
2037 out:
2038 if (ret == 1)
2039 ret = 0;
2040
2041 inode_unlock(inode);
2042 return ret;
2043 }
2044
f2fs_readpage_limit(struct inode * inode)2045 static inline loff_t f2fs_readpage_limit(struct inode *inode)
2046 {
2047 if (IS_ENABLED(CONFIG_FS_VERITY) && IS_VERITY(inode))
2048 return inode->i_sb->s_maxbytes;
2049
2050 return i_size_read(inode);
2051 }
2052
f2fs_read_single_page(struct inode * inode,struct page * page,unsigned nr_pages,struct f2fs_map_blocks * map,struct bio ** bio_ret,sector_t * last_block_in_bio,bool is_readahead)2053 static int f2fs_read_single_page(struct inode *inode, struct page *page,
2054 unsigned nr_pages,
2055 struct f2fs_map_blocks *map,
2056 struct bio **bio_ret,
2057 sector_t *last_block_in_bio,
2058 bool is_readahead)
2059 {
2060 struct bio *bio = *bio_ret;
2061 const unsigned blocksize = blks_to_bytes(inode, 1);
2062 sector_t block_in_file;
2063 sector_t last_block;
2064 sector_t last_block_in_file;
2065 sector_t block_nr;
2066 int ret = 0;
2067
2068 block_in_file = (sector_t)page_index(page);
2069 last_block = block_in_file + nr_pages;
2070 last_block_in_file = bytes_to_blks(inode,
2071 f2fs_readpage_limit(inode) + blocksize - 1);
2072 if (last_block > last_block_in_file)
2073 last_block = last_block_in_file;
2074
2075 /* just zeroing out page which is beyond EOF */
2076 if (block_in_file >= last_block)
2077 goto zero_out;
2078 /*
2079 * Map blocks using the previous result first.
2080 */
2081 if ((map->m_flags & F2FS_MAP_MAPPED) &&
2082 block_in_file > map->m_lblk &&
2083 block_in_file < (map->m_lblk + map->m_len))
2084 goto got_it;
2085
2086 /*
2087 * Then do more f2fs_map_blocks() calls until we are
2088 * done with this page.
2089 */
2090 map->m_lblk = block_in_file;
2091 map->m_len = last_block - block_in_file;
2092
2093 ret = f2fs_map_blocks(inode, map, F2FS_GET_BLOCK_DEFAULT);
2094 if (ret)
2095 goto out;
2096 got_it:
2097 if ((map->m_flags & F2FS_MAP_MAPPED)) {
2098 block_nr = map->m_pblk + block_in_file - map->m_lblk;
2099 SetPageMappedToDisk(page);
2100
2101 if (!f2fs_is_valid_blkaddr(F2FS_I_SB(inode), block_nr,
2102 DATA_GENERIC_ENHANCE_READ)) {
2103 ret = -EFSCORRUPTED;
2104 f2fs_handle_error(F2FS_I_SB(inode),
2105 ERROR_INVALID_BLKADDR);
2106 goto out;
2107 }
2108 } else {
2109 zero_out:
2110 zero_user_segment(page, 0, PAGE_SIZE);
2111 if (f2fs_need_verity(inode, page->index) &&
2112 !fsverity_verify_page(page)) {
2113 ret = -EIO;
2114 goto out;
2115 }
2116 if (!PageUptodate(page))
2117 SetPageUptodate(page);
2118 unlock_page(page);
2119 goto out;
2120 }
2121
2122 /*
2123 * This page will go to BIO. Do we need to send this
2124 * BIO off first?
2125 */
2126 if (bio && (!page_is_mergeable(F2FS_I_SB(inode), bio,
2127 *last_block_in_bio, block_nr) ||
2128 !f2fs_crypt_mergeable_bio(bio, inode, page->index, NULL))) {
2129 submit_and_realloc:
2130 f2fs_submit_read_bio(F2FS_I_SB(inode), bio, DATA);
2131 bio = NULL;
2132 }
2133 if (bio == NULL) {
2134 bio = f2fs_grab_read_bio(inode, block_nr, nr_pages,
2135 is_readahead ? REQ_RAHEAD : 0, page->index,
2136 false);
2137 if (IS_ERR(bio)) {
2138 ret = PTR_ERR(bio);
2139 bio = NULL;
2140 goto out;
2141 }
2142 }
2143
2144 /*
2145 * If the page is under writeback, we need to wait for
2146 * its completion to see the correct decrypted data.
2147 */
2148 f2fs_wait_on_block_writeback(inode, block_nr);
2149
2150 if (bio_add_page(bio, page, blocksize, 0) < blocksize)
2151 goto submit_and_realloc;
2152
2153 inc_page_count(F2FS_I_SB(inode), F2FS_RD_DATA);
2154 f2fs_update_iostat(F2FS_I_SB(inode), NULL, FS_DATA_READ_IO,
2155 F2FS_BLKSIZE);
2156 *last_block_in_bio = block_nr;
2157 out:
2158 *bio_ret = bio;
2159 return ret;
2160 }
2161
2162 #ifdef CONFIG_F2FS_FS_COMPRESSION
f2fs_read_multi_pages(struct compress_ctx * cc,struct bio ** bio_ret,unsigned nr_pages,sector_t * last_block_in_bio,bool is_readahead,bool for_write)2163 int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
2164 unsigned nr_pages, sector_t *last_block_in_bio,
2165 bool is_readahead, bool for_write)
2166 {
2167 struct dnode_of_data dn;
2168 struct inode *inode = cc->inode;
2169 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2170 struct bio *bio = *bio_ret;
2171 unsigned int start_idx = cc->cluster_idx << cc->log_cluster_size;
2172 sector_t last_block_in_file;
2173 const unsigned blocksize = blks_to_bytes(inode, 1);
2174 struct decompress_io_ctx *dic = NULL;
2175 struct extent_info ei = {};
2176 bool from_dnode = true;
2177 int i;
2178 int ret = 0;
2179
2180 f2fs_bug_on(sbi, f2fs_cluster_is_empty(cc));
2181
2182 last_block_in_file = bytes_to_blks(inode,
2183 f2fs_readpage_limit(inode) + blocksize - 1);
2184
2185 /* get rid of pages beyond EOF */
2186 for (i = 0; i < cc->cluster_size; i++) {
2187 struct page *page = cc->rpages[i];
2188
2189 if (!page)
2190 continue;
2191 if ((sector_t)page->index >= last_block_in_file) {
2192 zero_user_segment(page, 0, PAGE_SIZE);
2193 if (!PageUptodate(page))
2194 SetPageUptodate(page);
2195 } else if (!PageUptodate(page)) {
2196 continue;
2197 }
2198 unlock_page(page);
2199 if (for_write)
2200 put_page(page);
2201 cc->rpages[i] = NULL;
2202 cc->nr_rpages--;
2203 }
2204
2205 /* we are done since all pages are beyond EOF */
2206 if (f2fs_cluster_is_empty(cc))
2207 goto out;
2208
2209 if (f2fs_lookup_read_extent_cache(inode, start_idx, &ei))
2210 from_dnode = false;
2211
2212 if (!from_dnode)
2213 goto skip_reading_dnode;
2214
2215 set_new_dnode(&dn, inode, NULL, NULL, 0);
2216 ret = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
2217 if (ret)
2218 goto out;
2219
2220 if (unlikely(f2fs_cp_error(sbi))) {
2221 ret = -EIO;
2222 goto out_put_dnode;
2223 }
2224 f2fs_bug_on(sbi, dn.data_blkaddr != COMPRESS_ADDR);
2225
2226 skip_reading_dnode:
2227 for (i = 1; i < cc->cluster_size; i++) {
2228 block_t blkaddr;
2229
2230 blkaddr = from_dnode ? data_blkaddr(dn.inode, dn.node_page,
2231 dn.ofs_in_node + i) :
2232 ei.blk + i - 1;
2233
2234 if (!__is_valid_data_blkaddr(blkaddr))
2235 break;
2236
2237 if (!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC)) {
2238 ret = -EFAULT;
2239 goto out_put_dnode;
2240 }
2241 cc->nr_cpages++;
2242
2243 if (!from_dnode && i >= ei.c_len)
2244 break;
2245 }
2246
2247 /* nothing to decompress */
2248 if (cc->nr_cpages == 0) {
2249 ret = 0;
2250 goto out_put_dnode;
2251 }
2252
2253 dic = f2fs_alloc_dic(cc);
2254 if (IS_ERR(dic)) {
2255 ret = PTR_ERR(dic);
2256 goto out_put_dnode;
2257 }
2258
2259 for (i = 0; i < cc->nr_cpages; i++) {
2260 struct page *page = dic->cpages[i];
2261 block_t blkaddr;
2262 struct bio_post_read_ctx *ctx;
2263
2264 blkaddr = from_dnode ? data_blkaddr(dn.inode, dn.node_page,
2265 dn.ofs_in_node + i + 1) :
2266 ei.blk + i;
2267
2268 f2fs_wait_on_block_writeback(inode, blkaddr);
2269
2270 if (f2fs_load_compressed_page(sbi, page, blkaddr)) {
2271 if (atomic_dec_and_test(&dic->remaining_pages)) {
2272 f2fs_decompress_cluster(dic, true);
2273 break;
2274 }
2275 continue;
2276 }
2277
2278 if (bio && (!page_is_mergeable(sbi, bio,
2279 *last_block_in_bio, blkaddr) ||
2280 !f2fs_crypt_mergeable_bio(bio, inode, page->index, NULL))) {
2281 submit_and_realloc:
2282 f2fs_submit_read_bio(sbi, bio, DATA);
2283 bio = NULL;
2284 }
2285
2286 if (!bio) {
2287 bio = f2fs_grab_read_bio(inode, blkaddr, nr_pages,
2288 is_readahead ? REQ_RAHEAD : 0,
2289 page->index, for_write);
2290 if (IS_ERR(bio)) {
2291 ret = PTR_ERR(bio);
2292 f2fs_decompress_end_io(dic, ret, true);
2293 f2fs_put_dnode(&dn);
2294 *bio_ret = NULL;
2295 return ret;
2296 }
2297 }
2298
2299 if (bio_add_page(bio, page, blocksize, 0) < blocksize)
2300 goto submit_and_realloc;
2301
2302 ctx = get_post_read_ctx(bio);
2303 ctx->enabled_steps |= STEP_DECOMPRESS;
2304 refcount_inc(&dic->refcnt);
2305
2306 inc_page_count(sbi, F2FS_RD_DATA);
2307 f2fs_update_iostat(sbi, inode, FS_DATA_READ_IO, F2FS_BLKSIZE);
2308 *last_block_in_bio = blkaddr;
2309 }
2310
2311 if (from_dnode)
2312 f2fs_put_dnode(&dn);
2313
2314 *bio_ret = bio;
2315 return 0;
2316
2317 out_put_dnode:
2318 if (from_dnode)
2319 f2fs_put_dnode(&dn);
2320 out:
2321 for (i = 0; i < cc->cluster_size; i++) {
2322 if (cc->rpages[i]) {
2323 ClearPageUptodate(cc->rpages[i]);
2324 unlock_page(cc->rpages[i]);
2325 }
2326 }
2327 *bio_ret = bio;
2328 return ret;
2329 }
2330 #endif
2331
2332 /*
2333 * This function was originally taken from fs/mpage.c, and customized for f2fs.
2334 * Major change was from block_size == page_size in f2fs by default.
2335 */
f2fs_mpage_readpages(struct inode * inode,struct readahead_control * rac,struct page * page)2336 static int f2fs_mpage_readpages(struct inode *inode,
2337 struct readahead_control *rac, struct page *page)
2338 {
2339 struct bio *bio = NULL;
2340 sector_t last_block_in_bio = 0;
2341 struct f2fs_map_blocks map;
2342 #ifdef CONFIG_F2FS_FS_COMPRESSION
2343 struct compress_ctx cc = {
2344 .inode = inode,
2345 .log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
2346 .cluster_size = F2FS_I(inode)->i_cluster_size,
2347 .cluster_idx = NULL_CLUSTER,
2348 .rpages = NULL,
2349 .cpages = NULL,
2350 .nr_rpages = 0,
2351 .nr_cpages = 0,
2352 };
2353 pgoff_t nc_cluster_idx = NULL_CLUSTER;
2354 #endif
2355 unsigned nr_pages = rac ? readahead_count(rac) : 1;
2356 unsigned max_nr_pages = nr_pages;
2357 int ret = 0;
2358
2359 map.m_pblk = 0;
2360 map.m_lblk = 0;
2361 map.m_len = 0;
2362 map.m_flags = 0;
2363 map.m_next_pgofs = NULL;
2364 map.m_next_extent = NULL;
2365 map.m_seg_type = NO_CHECK_TYPE;
2366 map.m_may_create = false;
2367
2368 for (; nr_pages; nr_pages--) {
2369 if (rac) {
2370 page = readahead_page(rac);
2371 prefetchw(&page->flags);
2372 }
2373
2374 #ifdef CONFIG_F2FS_FS_COMPRESSION
2375 if (f2fs_compressed_file(inode)) {
2376 /* there are remained compressed pages, submit them */
2377 if (!f2fs_cluster_can_merge_page(&cc, page->index)) {
2378 ret = f2fs_read_multi_pages(&cc, &bio,
2379 max_nr_pages,
2380 &last_block_in_bio,
2381 rac != NULL, false);
2382 f2fs_destroy_compress_ctx(&cc, false);
2383 if (ret)
2384 goto set_error_page;
2385 }
2386 if (cc.cluster_idx == NULL_CLUSTER) {
2387 if (nc_cluster_idx ==
2388 page->index >> cc.log_cluster_size) {
2389 goto read_single_page;
2390 }
2391
2392 ret = f2fs_is_compressed_cluster(inode, page->index);
2393 if (ret < 0)
2394 goto set_error_page;
2395 else if (!ret) {
2396 nc_cluster_idx =
2397 page->index >> cc.log_cluster_size;
2398 goto read_single_page;
2399 }
2400
2401 nc_cluster_idx = NULL_CLUSTER;
2402 }
2403 ret = f2fs_init_compress_ctx(&cc);
2404 if (ret)
2405 goto set_error_page;
2406
2407 f2fs_compress_ctx_add_page(&cc, page);
2408
2409 goto next_page;
2410 }
2411 read_single_page:
2412 #endif
2413
2414 ret = f2fs_read_single_page(inode, page, max_nr_pages, &map,
2415 &bio, &last_block_in_bio, rac);
2416 if (ret) {
2417 #ifdef CONFIG_F2FS_FS_COMPRESSION
2418 set_error_page:
2419 #endif
2420 zero_user_segment(page, 0, PAGE_SIZE);
2421 unlock_page(page);
2422 }
2423 #ifdef CONFIG_F2FS_FS_COMPRESSION
2424 next_page:
2425 #endif
2426 if (rac)
2427 put_page(page);
2428
2429 #ifdef CONFIG_F2FS_FS_COMPRESSION
2430 if (f2fs_compressed_file(inode)) {
2431 /* last page */
2432 if (nr_pages == 1 && !f2fs_cluster_is_empty(&cc)) {
2433 ret = f2fs_read_multi_pages(&cc, &bio,
2434 max_nr_pages,
2435 &last_block_in_bio,
2436 rac != NULL, false);
2437 f2fs_destroy_compress_ctx(&cc, false);
2438 }
2439 }
2440 #endif
2441 }
2442 if (bio)
2443 f2fs_submit_read_bio(F2FS_I_SB(inode), bio, DATA);
2444 return ret;
2445 }
2446
f2fs_read_data_folio(struct file * file,struct folio * folio)2447 static int f2fs_read_data_folio(struct file *file, struct folio *folio)
2448 {
2449 struct page *page = &folio->page;
2450 struct inode *inode = page_file_mapping(page)->host;
2451 int ret = -EAGAIN;
2452
2453 trace_f2fs_readpage(page, DATA);
2454
2455 if (!f2fs_is_compress_backend_ready(inode)) {
2456 unlock_page(page);
2457 return -EOPNOTSUPP;
2458 }
2459
2460 /* If the file has inline data, try to read it directly */
2461 if (f2fs_has_inline_data(inode))
2462 ret = f2fs_read_inline_data(inode, page);
2463 if (ret == -EAGAIN)
2464 ret = f2fs_mpage_readpages(inode, NULL, page);
2465 return ret;
2466 }
2467
f2fs_readahead(struct readahead_control * rac)2468 static void f2fs_readahead(struct readahead_control *rac)
2469 {
2470 struct inode *inode = rac->mapping->host;
2471
2472 trace_f2fs_readpages(inode, readahead_index(rac), readahead_count(rac));
2473
2474 if (!f2fs_is_compress_backend_ready(inode))
2475 return;
2476
2477 /* If the file has inline data, skip readahead */
2478 if (f2fs_has_inline_data(inode))
2479 return;
2480
2481 f2fs_mpage_readpages(inode, rac, NULL);
2482 }
2483
f2fs_encrypt_one_page(struct f2fs_io_info * fio)2484 int f2fs_encrypt_one_page(struct f2fs_io_info *fio)
2485 {
2486 struct inode *inode = fio->page->mapping->host;
2487 struct page *mpage, *page;
2488 gfp_t gfp_flags = GFP_NOFS;
2489
2490 if (!f2fs_encrypted_file(inode))
2491 return 0;
2492
2493 page = fio->compressed_page ? fio->compressed_page : fio->page;
2494
2495 if (fscrypt_inode_uses_inline_crypto(inode))
2496 return 0;
2497
2498 retry_encrypt:
2499 fio->encrypted_page = fscrypt_encrypt_pagecache_blocks(page,
2500 PAGE_SIZE, 0, gfp_flags);
2501 if (IS_ERR(fio->encrypted_page)) {
2502 /* flush pending IOs and wait for a while in the ENOMEM case */
2503 if (PTR_ERR(fio->encrypted_page) == -ENOMEM) {
2504 f2fs_flush_merged_writes(fio->sbi);
2505 memalloc_retry_wait(GFP_NOFS);
2506 gfp_flags |= __GFP_NOFAIL;
2507 goto retry_encrypt;
2508 }
2509 return PTR_ERR(fio->encrypted_page);
2510 }
2511
2512 mpage = find_lock_page(META_MAPPING(fio->sbi), fio->old_blkaddr);
2513 if (mpage) {
2514 if (PageUptodate(mpage))
2515 memcpy(page_address(mpage),
2516 page_address(fio->encrypted_page), PAGE_SIZE);
2517 f2fs_put_page(mpage, 1);
2518 }
2519 return 0;
2520 }
2521
check_inplace_update_policy(struct inode * inode,struct f2fs_io_info * fio)2522 static inline bool check_inplace_update_policy(struct inode *inode,
2523 struct f2fs_io_info *fio)
2524 {
2525 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2526
2527 if (IS_F2FS_IPU_HONOR_OPU_WRITE(sbi) &&
2528 is_inode_flag_set(inode, FI_OPU_WRITE))
2529 return false;
2530 if (IS_F2FS_IPU_FORCE(sbi))
2531 return true;
2532 if (IS_F2FS_IPU_SSR(sbi) && f2fs_need_SSR(sbi))
2533 return true;
2534 if (IS_F2FS_IPU_UTIL(sbi) && utilization(sbi) > SM_I(sbi)->min_ipu_util)
2535 return true;
2536 if (IS_F2FS_IPU_SSR_UTIL(sbi) && f2fs_need_SSR(sbi) &&
2537 utilization(sbi) > SM_I(sbi)->min_ipu_util)
2538 return true;
2539
2540 /*
2541 * IPU for rewrite async pages
2542 */
2543 if (IS_F2FS_IPU_ASYNC(sbi) && fio && fio->op == REQ_OP_WRITE &&
2544 !(fio->op_flags & REQ_SYNC) && !IS_ENCRYPTED(inode))
2545 return true;
2546
2547 /* this is only set during fdatasync */
2548 if (IS_F2FS_IPU_FSYNC(sbi) && is_inode_flag_set(inode, FI_NEED_IPU))
2549 return true;
2550
2551 if (unlikely(fio && is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
2552 !f2fs_is_checkpointed_data(sbi, fio->old_blkaddr)))
2553 return true;
2554
2555 return false;
2556 }
2557
f2fs_should_update_inplace(struct inode * inode,struct f2fs_io_info * fio)2558 bool f2fs_should_update_inplace(struct inode *inode, struct f2fs_io_info *fio)
2559 {
2560 /* swap file is migrating in aligned write mode */
2561 if (is_inode_flag_set(inode, FI_ALIGNED_WRITE))
2562 return false;
2563
2564 if (f2fs_is_pinned_file(inode))
2565 return true;
2566
2567 /* if this is cold file, we should overwrite to avoid fragmentation */
2568 if (file_is_cold(inode) && !is_inode_flag_set(inode, FI_OPU_WRITE))
2569 return true;
2570
2571 return check_inplace_update_policy(inode, fio);
2572 }
2573
f2fs_should_update_outplace(struct inode * inode,struct f2fs_io_info * fio)2574 bool f2fs_should_update_outplace(struct inode *inode, struct f2fs_io_info *fio)
2575 {
2576 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2577
2578 /* The below cases were checked when setting it. */
2579 if (f2fs_is_pinned_file(inode))
2580 return false;
2581 if (fio && is_sbi_flag_set(sbi, SBI_NEED_FSCK))
2582 return true;
2583 if (f2fs_lfs_mode(sbi))
2584 return true;
2585 if (S_ISDIR(inode->i_mode))
2586 return true;
2587 if (IS_NOQUOTA(inode))
2588 return true;
2589 if (f2fs_used_in_atomic_write(inode))
2590 return true;
2591
2592 /* swap file is migrating in aligned write mode */
2593 if (is_inode_flag_set(inode, FI_ALIGNED_WRITE))
2594 return true;
2595
2596 if (is_inode_flag_set(inode, FI_OPU_WRITE))
2597 return true;
2598
2599 if (fio) {
2600 if (page_private_gcing(fio->page))
2601 return true;
2602 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
2603 f2fs_is_checkpointed_data(sbi, fio->old_blkaddr)))
2604 return true;
2605 }
2606 return false;
2607 }
2608
need_inplace_update(struct f2fs_io_info * fio)2609 static inline bool need_inplace_update(struct f2fs_io_info *fio)
2610 {
2611 struct inode *inode = fio->page->mapping->host;
2612
2613 if (f2fs_should_update_outplace(inode, fio))
2614 return false;
2615
2616 return f2fs_should_update_inplace(inode, fio);
2617 }
2618
f2fs_do_write_data_page(struct f2fs_io_info * fio)2619 int f2fs_do_write_data_page(struct f2fs_io_info *fio)
2620 {
2621 struct page *page = fio->page;
2622 struct inode *inode = page->mapping->host;
2623 struct dnode_of_data dn;
2624 struct node_info ni;
2625 bool ipu_force = false;
2626 int err = 0;
2627
2628 /* Use COW inode to make dnode_of_data for atomic write */
2629 if (f2fs_is_atomic_file(inode))
2630 set_new_dnode(&dn, F2FS_I(inode)->cow_inode, NULL, NULL, 0);
2631 else
2632 set_new_dnode(&dn, inode, NULL, NULL, 0);
2633
2634 if (need_inplace_update(fio) &&
2635 f2fs_lookup_read_extent_cache_block(inode, page->index,
2636 &fio->old_blkaddr)) {
2637 if (!f2fs_is_valid_blkaddr(fio->sbi, fio->old_blkaddr,
2638 DATA_GENERIC_ENHANCE)) {
2639 f2fs_handle_error(fio->sbi,
2640 ERROR_INVALID_BLKADDR);
2641 return -EFSCORRUPTED;
2642 }
2643
2644 ipu_force = true;
2645 fio->need_lock = LOCK_DONE;
2646 goto got_it;
2647 }
2648
2649 /* Deadlock due to between page->lock and f2fs_lock_op */
2650 if (fio->need_lock == LOCK_REQ && !f2fs_trylock_op(fio->sbi))
2651 return -EAGAIN;
2652
2653 err = f2fs_get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
2654 if (err)
2655 goto out;
2656
2657 fio->old_blkaddr = dn.data_blkaddr;
2658
2659 /* This page is already truncated */
2660 if (fio->old_blkaddr == NULL_ADDR) {
2661 ClearPageUptodate(page);
2662 clear_page_private_gcing(page);
2663 goto out_writepage;
2664 }
2665 got_it:
2666 if (__is_valid_data_blkaddr(fio->old_blkaddr) &&
2667 !f2fs_is_valid_blkaddr(fio->sbi, fio->old_blkaddr,
2668 DATA_GENERIC_ENHANCE)) {
2669 err = -EFSCORRUPTED;
2670 f2fs_handle_error(fio->sbi, ERROR_INVALID_BLKADDR);
2671 goto out_writepage;
2672 }
2673
2674 /* wait for GCed page writeback via META_MAPPING */
2675 if (fio->meta_gc)
2676 f2fs_wait_on_block_writeback(inode, fio->old_blkaddr);
2677
2678 /*
2679 * If current allocation needs SSR,
2680 * it had better in-place writes for updated data.
2681 */
2682 if (ipu_force ||
2683 (__is_valid_data_blkaddr(fio->old_blkaddr) &&
2684 need_inplace_update(fio))) {
2685 err = f2fs_encrypt_one_page(fio);
2686 if (err)
2687 goto out_writepage;
2688
2689 set_page_writeback(page);
2690 f2fs_put_dnode(&dn);
2691 if (fio->need_lock == LOCK_REQ)
2692 f2fs_unlock_op(fio->sbi);
2693 err = f2fs_inplace_write_data(fio);
2694 if (err) {
2695 if (fscrypt_inode_uses_fs_layer_crypto(inode))
2696 fscrypt_finalize_bounce_page(&fio->encrypted_page);
2697 if (PageWriteback(page))
2698 end_page_writeback(page);
2699 } else {
2700 set_inode_flag(inode, FI_UPDATE_WRITE);
2701 }
2702 trace_f2fs_do_write_data_page(fio->page, IPU);
2703 return err;
2704 }
2705
2706 if (fio->need_lock == LOCK_RETRY) {
2707 if (!f2fs_trylock_op(fio->sbi)) {
2708 err = -EAGAIN;
2709 goto out_writepage;
2710 }
2711 fio->need_lock = LOCK_REQ;
2712 }
2713
2714 err = f2fs_get_node_info(fio->sbi, dn.nid, &ni, false);
2715 if (err)
2716 goto out_writepage;
2717
2718 fio->version = ni.version;
2719
2720 err = f2fs_encrypt_one_page(fio);
2721 if (err)
2722 goto out_writepage;
2723
2724 set_page_writeback(page);
2725
2726 if (fio->compr_blocks && fio->old_blkaddr == COMPRESS_ADDR)
2727 f2fs_i_compr_blocks_update(inode, fio->compr_blocks - 1, false);
2728
2729 /* LFS mode write path */
2730 f2fs_outplace_write_data(&dn, fio);
2731 trace_f2fs_do_write_data_page(page, OPU);
2732 set_inode_flag(inode, FI_APPEND_WRITE);
2733 out_writepage:
2734 f2fs_put_dnode(&dn);
2735 out:
2736 if (fio->need_lock == LOCK_REQ)
2737 f2fs_unlock_op(fio->sbi);
2738 return err;
2739 }
2740
f2fs_write_single_data_page(struct page * page,int * submitted,struct bio ** bio,sector_t * last_block,struct writeback_control * wbc,enum iostat_type io_type,int compr_blocks,bool allow_balance)2741 int f2fs_write_single_data_page(struct page *page, int *submitted,
2742 struct bio **bio,
2743 sector_t *last_block,
2744 struct writeback_control *wbc,
2745 enum iostat_type io_type,
2746 int compr_blocks,
2747 bool allow_balance)
2748 {
2749 struct inode *inode = page->mapping->host;
2750 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2751 loff_t i_size = i_size_read(inode);
2752 const pgoff_t end_index = ((unsigned long long)i_size)
2753 >> PAGE_SHIFT;
2754 loff_t psize = (loff_t)(page->index + 1) << PAGE_SHIFT;
2755 unsigned offset = 0;
2756 bool need_balance_fs = false;
2757 bool quota_inode = IS_NOQUOTA(inode);
2758 int err = 0;
2759 struct f2fs_io_info fio = {
2760 .sbi = sbi,
2761 .ino = inode->i_ino,
2762 .type = DATA,
2763 .op = REQ_OP_WRITE,
2764 .op_flags = wbc_to_write_flags(wbc),
2765 .old_blkaddr = NULL_ADDR,
2766 .page = page,
2767 .encrypted_page = NULL,
2768 .submitted = 0,
2769 .compr_blocks = compr_blocks,
2770 .need_lock = compr_blocks ? LOCK_DONE : LOCK_RETRY,
2771 .meta_gc = f2fs_meta_inode_gc_required(inode) ? 1 : 0,
2772 .io_type = io_type,
2773 .io_wbc = wbc,
2774 .bio = bio,
2775 .last_block = last_block,
2776 };
2777
2778 trace_f2fs_writepage(page, DATA);
2779
2780 /* we should bypass data pages to proceed the kworker jobs */
2781 if (unlikely(f2fs_cp_error(sbi))) {
2782 mapping_set_error(page->mapping, -EIO);
2783 /*
2784 * don't drop any dirty dentry pages for keeping lastest
2785 * directory structure.
2786 */
2787 if (S_ISDIR(inode->i_mode) &&
2788 !is_sbi_flag_set(sbi, SBI_IS_CLOSE))
2789 goto redirty_out;
2790
2791 /* keep data pages in remount-ro mode */
2792 if (F2FS_OPTION(sbi).errors == MOUNT_ERRORS_READONLY)
2793 goto redirty_out;
2794 goto out;
2795 }
2796
2797 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
2798 goto redirty_out;
2799
2800 if (page->index < end_index ||
2801 f2fs_verity_in_progress(inode) ||
2802 compr_blocks)
2803 goto write;
2804
2805 /*
2806 * If the offset is out-of-range of file size,
2807 * this page does not have to be written to disk.
2808 */
2809 offset = i_size & (PAGE_SIZE - 1);
2810 if ((page->index >= end_index + 1) || !offset)
2811 goto out;
2812
2813 zero_user_segment(page, offset, PAGE_SIZE);
2814 write:
2815 /* Dentry/quota blocks are controlled by checkpoint */
2816 if (S_ISDIR(inode->i_mode) || quota_inode) {
2817 /*
2818 * We need to wait for node_write to avoid block allocation during
2819 * checkpoint. This can only happen to quota writes which can cause
2820 * the below discard race condition.
2821 */
2822 if (quota_inode)
2823 f2fs_down_read(&sbi->node_write);
2824
2825 fio.need_lock = LOCK_DONE;
2826 err = f2fs_do_write_data_page(&fio);
2827
2828 if (quota_inode)
2829 f2fs_up_read(&sbi->node_write);
2830
2831 goto done;
2832 }
2833
2834 if (!wbc->for_reclaim)
2835 need_balance_fs = true;
2836 else if (has_not_enough_free_secs(sbi, 0, 0))
2837 goto redirty_out;
2838 else
2839 set_inode_flag(inode, FI_HOT_DATA);
2840
2841 err = -EAGAIN;
2842 if (f2fs_has_inline_data(inode)) {
2843 err = f2fs_write_inline_data(inode, page);
2844 if (!err)
2845 goto out;
2846 }
2847
2848 if (err == -EAGAIN) {
2849 err = f2fs_do_write_data_page(&fio);
2850 if (err == -EAGAIN) {
2851 f2fs_bug_on(sbi, compr_blocks);
2852 fio.need_lock = LOCK_REQ;
2853 err = f2fs_do_write_data_page(&fio);
2854 }
2855 }
2856
2857 if (err) {
2858 file_set_keep_isize(inode);
2859 } else {
2860 spin_lock(&F2FS_I(inode)->i_size_lock);
2861 if (F2FS_I(inode)->last_disk_size < psize)
2862 F2FS_I(inode)->last_disk_size = psize;
2863 spin_unlock(&F2FS_I(inode)->i_size_lock);
2864 }
2865
2866 done:
2867 if (err && err != -ENOENT)
2868 goto redirty_out;
2869
2870 out:
2871 inode_dec_dirty_pages(inode);
2872 if (err) {
2873 ClearPageUptodate(page);
2874 clear_page_private_gcing(page);
2875 }
2876
2877 if (wbc->for_reclaim) {
2878 f2fs_submit_merged_write_cond(sbi, NULL, page, 0, DATA);
2879 clear_inode_flag(inode, FI_HOT_DATA);
2880 f2fs_remove_dirty_inode(inode);
2881 submitted = NULL;
2882 }
2883 unlock_page(page);
2884 if (!S_ISDIR(inode->i_mode) && !IS_NOQUOTA(inode) &&
2885 !F2FS_I(inode)->wb_task && allow_balance)
2886 f2fs_balance_fs(sbi, need_balance_fs);
2887
2888 if (unlikely(f2fs_cp_error(sbi))) {
2889 f2fs_submit_merged_write(sbi, DATA);
2890 if (bio && *bio)
2891 f2fs_submit_merged_ipu_write(sbi, bio, NULL);
2892 submitted = NULL;
2893 }
2894
2895 if (submitted)
2896 *submitted = fio.submitted;
2897
2898 return 0;
2899
2900 redirty_out:
2901 redirty_page_for_writepage(wbc, page);
2902 /*
2903 * pageout() in MM translates EAGAIN, so calls handle_write_error()
2904 * -> mapping_set_error() -> set_bit(AS_EIO, ...).
2905 * file_write_and_wait_range() will see EIO error, which is critical
2906 * to return value of fsync() followed by atomic_write failure to user.
2907 */
2908 if (!err || wbc->for_reclaim)
2909 return AOP_WRITEPAGE_ACTIVATE;
2910 unlock_page(page);
2911 return err;
2912 }
2913
f2fs_write_data_page(struct page * page,struct writeback_control * wbc)2914 static int f2fs_write_data_page(struct page *page,
2915 struct writeback_control *wbc)
2916 {
2917 #ifdef CONFIG_F2FS_FS_COMPRESSION
2918 struct inode *inode = page->mapping->host;
2919
2920 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
2921 goto out;
2922
2923 if (f2fs_compressed_file(inode)) {
2924 if (f2fs_is_compressed_cluster(inode, page->index)) {
2925 redirty_page_for_writepage(wbc, page);
2926 return AOP_WRITEPAGE_ACTIVATE;
2927 }
2928 }
2929 out:
2930 #endif
2931
2932 return f2fs_write_single_data_page(page, NULL, NULL, NULL,
2933 wbc, FS_DATA_IO, 0, true);
2934 }
2935
2936 /*
2937 * This function was copied from write_cache_pages from mm/page-writeback.c.
2938 * The major change is making write step of cold data page separately from
2939 * warm/hot data page.
2940 */
f2fs_write_cache_pages(struct address_space * mapping,struct writeback_control * wbc,enum iostat_type io_type)2941 static int f2fs_write_cache_pages(struct address_space *mapping,
2942 struct writeback_control *wbc,
2943 enum iostat_type io_type)
2944 {
2945 int ret = 0;
2946 int done = 0, retry = 0;
2947 struct page *pages_local[F2FS_ONSTACK_PAGES];
2948 struct page **pages = pages_local;
2949 struct folio_batch fbatch;
2950 struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
2951 struct bio *bio = NULL;
2952 sector_t last_block;
2953 #ifdef CONFIG_F2FS_FS_COMPRESSION
2954 struct inode *inode = mapping->host;
2955 struct compress_ctx cc = {
2956 .inode = inode,
2957 .log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
2958 .cluster_size = F2FS_I(inode)->i_cluster_size,
2959 .cluster_idx = NULL_CLUSTER,
2960 .rpages = NULL,
2961 .nr_rpages = 0,
2962 .cpages = NULL,
2963 .valid_nr_cpages = 0,
2964 .rbuf = NULL,
2965 .cbuf = NULL,
2966 .rlen = PAGE_SIZE * F2FS_I(inode)->i_cluster_size,
2967 .private = NULL,
2968 };
2969 #endif
2970 int nr_folios, p, idx;
2971 int nr_pages;
2972 unsigned int max_pages = F2FS_ONSTACK_PAGES;
2973 pgoff_t index;
2974 pgoff_t end; /* Inclusive */
2975 pgoff_t done_index;
2976 int range_whole = 0;
2977 xa_mark_t tag;
2978 int nwritten = 0;
2979 int submitted = 0;
2980 int i;
2981
2982 #ifdef CONFIG_F2FS_FS_COMPRESSION
2983 if (f2fs_compressed_file(inode) &&
2984 1 << cc.log_cluster_size > F2FS_ONSTACK_PAGES) {
2985 pages = f2fs_kzalloc(sbi, sizeof(struct page *) <<
2986 cc.log_cluster_size, GFP_NOFS | __GFP_NOFAIL);
2987 max_pages = 1 << cc.log_cluster_size;
2988 }
2989 #endif
2990
2991 folio_batch_init(&fbatch);
2992
2993 if (get_dirty_pages(mapping->host) <=
2994 SM_I(F2FS_M_SB(mapping))->min_hot_blocks)
2995 set_inode_flag(mapping->host, FI_HOT_DATA);
2996 else
2997 clear_inode_flag(mapping->host, FI_HOT_DATA);
2998
2999 if (wbc->range_cyclic) {
3000 index = mapping->writeback_index; /* prev offset */
3001 end = -1;
3002 } else {
3003 index = wbc->range_start >> PAGE_SHIFT;
3004 end = wbc->range_end >> PAGE_SHIFT;
3005 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
3006 range_whole = 1;
3007 }
3008 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
3009 tag = PAGECACHE_TAG_TOWRITE;
3010 else
3011 tag = PAGECACHE_TAG_DIRTY;
3012 retry:
3013 retry = 0;
3014 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
3015 tag_pages_for_writeback(mapping, index, end);
3016 done_index = index;
3017 while (!done && !retry && (index <= end)) {
3018 nr_pages = 0;
3019 again:
3020 nr_folios = filemap_get_folios_tag(mapping, &index, end,
3021 tag, &fbatch);
3022 if (nr_folios == 0) {
3023 if (nr_pages)
3024 goto write;
3025 break;
3026 }
3027
3028 for (i = 0; i < nr_folios; i++) {
3029 struct folio *folio = fbatch.folios[i];
3030
3031 idx = 0;
3032 p = folio_nr_pages(folio);
3033 add_more:
3034 pages[nr_pages] = folio_page(folio, idx);
3035 folio_get(folio);
3036 if (++nr_pages == max_pages) {
3037 index = folio->index + idx + 1;
3038 folio_batch_release(&fbatch);
3039 goto write;
3040 }
3041 if (++idx < p)
3042 goto add_more;
3043 }
3044 folio_batch_release(&fbatch);
3045 goto again;
3046 write:
3047 for (i = 0; i < nr_pages; i++) {
3048 struct page *page = pages[i];
3049 struct folio *folio = page_folio(page);
3050 bool need_readd;
3051 readd:
3052 need_readd = false;
3053 #ifdef CONFIG_F2FS_FS_COMPRESSION
3054 if (f2fs_compressed_file(inode)) {
3055 void *fsdata = NULL;
3056 struct page *pagep;
3057 int ret2;
3058
3059 ret = f2fs_init_compress_ctx(&cc);
3060 if (ret) {
3061 done = 1;
3062 break;
3063 }
3064
3065 if (!f2fs_cluster_can_merge_page(&cc,
3066 folio->index)) {
3067 ret = f2fs_write_multi_pages(&cc,
3068 &submitted, wbc, io_type);
3069 if (!ret)
3070 need_readd = true;
3071 goto result;
3072 }
3073
3074 if (unlikely(f2fs_cp_error(sbi)))
3075 goto lock_folio;
3076
3077 if (!f2fs_cluster_is_empty(&cc))
3078 goto lock_folio;
3079
3080 if (f2fs_all_cluster_page_ready(&cc,
3081 pages, i, nr_pages, true))
3082 goto lock_folio;
3083
3084 ret2 = f2fs_prepare_compress_overwrite(
3085 inode, &pagep,
3086 folio->index, &fsdata);
3087 if (ret2 < 0) {
3088 ret = ret2;
3089 done = 1;
3090 break;
3091 } else if (ret2 &&
3092 (!f2fs_compress_write_end(inode,
3093 fsdata, folio->index, 1) ||
3094 !f2fs_all_cluster_page_ready(&cc,
3095 pages, i, nr_pages,
3096 false))) {
3097 retry = 1;
3098 break;
3099 }
3100 }
3101 #endif
3102 /* give a priority to WB_SYNC threads */
3103 if (atomic_read(&sbi->wb_sync_req[DATA]) &&
3104 wbc->sync_mode == WB_SYNC_NONE) {
3105 done = 1;
3106 break;
3107 }
3108 #ifdef CONFIG_F2FS_FS_COMPRESSION
3109 lock_folio:
3110 #endif
3111 done_index = folio->index;
3112 retry_write:
3113 folio_lock(folio);
3114
3115 if (unlikely(folio->mapping != mapping)) {
3116 continue_unlock:
3117 folio_unlock(folio);
3118 continue;
3119 }
3120
3121 if (!folio_test_dirty(folio)) {
3122 /* someone wrote it for us */
3123 goto continue_unlock;
3124 }
3125
3126 if (folio_test_writeback(folio)) {
3127 if (wbc->sync_mode == WB_SYNC_NONE)
3128 goto continue_unlock;
3129 f2fs_wait_on_page_writeback(&folio->page, DATA, true, true);
3130 }
3131
3132 if (!folio_clear_dirty_for_io(folio))
3133 goto continue_unlock;
3134
3135 #ifdef CONFIG_F2FS_FS_COMPRESSION
3136 if (f2fs_compressed_file(inode)) {
3137 folio_get(folio);
3138 f2fs_compress_ctx_add_page(&cc, &folio->page);
3139 continue;
3140 }
3141 #endif
3142 ret = f2fs_write_single_data_page(&folio->page,
3143 &submitted, &bio, &last_block,
3144 wbc, io_type, 0, true);
3145 if (ret == AOP_WRITEPAGE_ACTIVATE)
3146 folio_unlock(folio);
3147 #ifdef CONFIG_F2FS_FS_COMPRESSION
3148 result:
3149 #endif
3150 nwritten += submitted;
3151 wbc->nr_to_write -= submitted;
3152
3153 if (unlikely(ret)) {
3154 /*
3155 * keep nr_to_write, since vfs uses this to
3156 * get # of written pages.
3157 */
3158 if (ret == AOP_WRITEPAGE_ACTIVATE) {
3159 ret = 0;
3160 goto next;
3161 } else if (ret == -EAGAIN) {
3162 ret = 0;
3163 if (wbc->sync_mode == WB_SYNC_ALL) {
3164 f2fs_io_schedule_timeout(
3165 DEFAULT_IO_TIMEOUT);
3166 goto retry_write;
3167 }
3168 goto next;
3169 }
3170 done_index = folio_next_index(folio);
3171 done = 1;
3172 break;
3173 }
3174
3175 if (wbc->nr_to_write <= 0 &&
3176 wbc->sync_mode == WB_SYNC_NONE) {
3177 done = 1;
3178 break;
3179 }
3180 next:
3181 if (need_readd)
3182 goto readd;
3183 }
3184 release_pages(pages, nr_pages);
3185 cond_resched();
3186 }
3187 #ifdef CONFIG_F2FS_FS_COMPRESSION
3188 /* flush remained pages in compress cluster */
3189 if (f2fs_compressed_file(inode) && !f2fs_cluster_is_empty(&cc)) {
3190 ret = f2fs_write_multi_pages(&cc, &submitted, wbc, io_type);
3191 nwritten += submitted;
3192 wbc->nr_to_write -= submitted;
3193 if (ret) {
3194 done = 1;
3195 retry = 0;
3196 }
3197 }
3198 if (f2fs_compressed_file(inode))
3199 f2fs_destroy_compress_ctx(&cc, false);
3200 #endif
3201 if (retry) {
3202 index = 0;
3203 end = -1;
3204 goto retry;
3205 }
3206 if (wbc->range_cyclic && !done)
3207 done_index = 0;
3208 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
3209 mapping->writeback_index = done_index;
3210
3211 if (nwritten)
3212 f2fs_submit_merged_write_cond(F2FS_M_SB(mapping), mapping->host,
3213 NULL, 0, DATA);
3214 /* submit cached bio of IPU write */
3215 if (bio)
3216 f2fs_submit_merged_ipu_write(sbi, &bio, NULL);
3217
3218 #ifdef CONFIG_F2FS_FS_COMPRESSION
3219 if (pages != pages_local)
3220 kfree(pages);
3221 #endif
3222
3223 return ret;
3224 }
3225
__should_serialize_io(struct inode * inode,struct writeback_control * wbc)3226 static inline bool __should_serialize_io(struct inode *inode,
3227 struct writeback_control *wbc)
3228 {
3229 /* to avoid deadlock in path of data flush */
3230 if (F2FS_I(inode)->wb_task)
3231 return false;
3232
3233 if (!S_ISREG(inode->i_mode))
3234 return false;
3235 if (IS_NOQUOTA(inode))
3236 return false;
3237
3238 if (f2fs_need_compress_data(inode))
3239 return true;
3240 if (wbc->sync_mode != WB_SYNC_ALL)
3241 return true;
3242 if (get_dirty_pages(inode) >= SM_I(F2FS_I_SB(inode))->min_seq_blocks)
3243 return true;
3244 return false;
3245 }
3246
__f2fs_write_data_pages(struct address_space * mapping,struct writeback_control * wbc,enum iostat_type io_type)3247 static int __f2fs_write_data_pages(struct address_space *mapping,
3248 struct writeback_control *wbc,
3249 enum iostat_type io_type)
3250 {
3251 struct inode *inode = mapping->host;
3252 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3253 struct blk_plug plug;
3254 int ret;
3255 bool locked = false;
3256
3257 /* deal with chardevs and other special file */
3258 if (!mapping->a_ops->writepage)
3259 return 0;
3260
3261 /* skip writing if there is no dirty page in this inode */
3262 if (!get_dirty_pages(inode) && wbc->sync_mode == WB_SYNC_NONE)
3263 return 0;
3264
3265 /* during POR, we don't need to trigger writepage at all. */
3266 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
3267 goto skip_write;
3268
3269 if ((S_ISDIR(inode->i_mode) || IS_NOQUOTA(inode)) &&
3270 wbc->sync_mode == WB_SYNC_NONE &&
3271 get_dirty_pages(inode) < nr_pages_to_skip(sbi, DATA) &&
3272 f2fs_available_free_memory(sbi, DIRTY_DENTS))
3273 goto skip_write;
3274
3275 /* skip writing in file defragment preparing stage */
3276 if (is_inode_flag_set(inode, FI_SKIP_WRITES))
3277 goto skip_write;
3278
3279 trace_f2fs_writepages(mapping->host, wbc, DATA);
3280
3281 /* to avoid spliting IOs due to mixed WB_SYNC_ALL and WB_SYNC_NONE */
3282 if (wbc->sync_mode == WB_SYNC_ALL)
3283 atomic_inc(&sbi->wb_sync_req[DATA]);
3284 else if (atomic_read(&sbi->wb_sync_req[DATA])) {
3285 /* to avoid potential deadlock */
3286 if (current->plug)
3287 blk_finish_plug(current->plug);
3288 goto skip_write;
3289 }
3290
3291 if (__should_serialize_io(inode, wbc)) {
3292 mutex_lock(&sbi->writepages);
3293 locked = true;
3294 }
3295
3296 blk_start_plug(&plug);
3297 ret = f2fs_write_cache_pages(mapping, wbc, io_type);
3298 blk_finish_plug(&plug);
3299
3300 if (locked)
3301 mutex_unlock(&sbi->writepages);
3302
3303 if (wbc->sync_mode == WB_SYNC_ALL)
3304 atomic_dec(&sbi->wb_sync_req[DATA]);
3305 /*
3306 * if some pages were truncated, we cannot guarantee its mapping->host
3307 * to detect pending bios.
3308 */
3309
3310 f2fs_remove_dirty_inode(inode);
3311 return ret;
3312
3313 skip_write:
3314 wbc->pages_skipped += get_dirty_pages(inode);
3315 trace_f2fs_writepages(mapping->host, wbc, DATA);
3316 return 0;
3317 }
3318
f2fs_write_data_pages(struct address_space * mapping,struct writeback_control * wbc)3319 static int f2fs_write_data_pages(struct address_space *mapping,
3320 struct writeback_control *wbc)
3321 {
3322 struct inode *inode = mapping->host;
3323
3324 return __f2fs_write_data_pages(mapping, wbc,
3325 F2FS_I(inode)->cp_task == current ?
3326 FS_CP_DATA_IO : FS_DATA_IO);
3327 }
3328
f2fs_write_failed(struct inode * inode,loff_t to)3329 void f2fs_write_failed(struct inode *inode, loff_t to)
3330 {
3331 loff_t i_size = i_size_read(inode);
3332
3333 if (IS_NOQUOTA(inode))
3334 return;
3335
3336 /* In the fs-verity case, f2fs_end_enable_verity() does the truncate */
3337 if (to > i_size && !f2fs_verity_in_progress(inode)) {
3338 f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3339 filemap_invalidate_lock(inode->i_mapping);
3340
3341 truncate_pagecache(inode, i_size);
3342 f2fs_truncate_blocks(inode, i_size, true);
3343
3344 filemap_invalidate_unlock(inode->i_mapping);
3345 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3346 }
3347 }
3348
prepare_write_begin(struct f2fs_sb_info * sbi,struct page * page,loff_t pos,unsigned len,block_t * blk_addr,bool * node_changed)3349 static int prepare_write_begin(struct f2fs_sb_info *sbi,
3350 struct page *page, loff_t pos, unsigned len,
3351 block_t *blk_addr, bool *node_changed)
3352 {
3353 struct inode *inode = page->mapping->host;
3354 pgoff_t index = page->index;
3355 struct dnode_of_data dn;
3356 struct page *ipage;
3357 bool locked = false;
3358 int flag = F2FS_GET_BLOCK_PRE_AIO;
3359 int err = 0;
3360
3361 /*
3362 * If a whole page is being written and we already preallocated all the
3363 * blocks, then there is no need to get a block address now.
3364 */
3365 if (len == PAGE_SIZE && is_inode_flag_set(inode, FI_PREALLOCATED_ALL))
3366 return 0;
3367
3368 /* f2fs_lock_op avoids race between write CP and convert_inline_page */
3369 if (f2fs_has_inline_data(inode)) {
3370 if (pos + len > MAX_INLINE_DATA(inode))
3371 flag = F2FS_GET_BLOCK_DEFAULT;
3372 f2fs_map_lock(sbi, flag);
3373 locked = true;
3374 } else if ((pos & PAGE_MASK) >= i_size_read(inode)) {
3375 f2fs_map_lock(sbi, flag);
3376 locked = true;
3377 }
3378
3379 restart:
3380 /* check inline_data */
3381 ipage = f2fs_get_node_page(sbi, inode->i_ino);
3382 if (IS_ERR(ipage)) {
3383 err = PTR_ERR(ipage);
3384 goto unlock_out;
3385 }
3386
3387 set_new_dnode(&dn, inode, ipage, ipage, 0);
3388
3389 if (f2fs_has_inline_data(inode)) {
3390 if (pos + len <= MAX_INLINE_DATA(inode)) {
3391 f2fs_do_read_inline_data(page, ipage);
3392 set_inode_flag(inode, FI_DATA_EXIST);
3393 if (inode->i_nlink)
3394 set_page_private_inline(ipage);
3395 goto out;
3396 }
3397 err = f2fs_convert_inline_page(&dn, page);
3398 if (err || dn.data_blkaddr != NULL_ADDR)
3399 goto out;
3400 }
3401
3402 if (!f2fs_lookup_read_extent_cache_block(inode, index,
3403 &dn.data_blkaddr)) {
3404 if (locked) {
3405 err = f2fs_reserve_block(&dn, index);
3406 goto out;
3407 }
3408
3409 /* hole case */
3410 err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
3411 if (!err && dn.data_blkaddr != NULL_ADDR)
3412 goto out;
3413 f2fs_put_dnode(&dn);
3414 f2fs_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO);
3415 WARN_ON(flag != F2FS_GET_BLOCK_PRE_AIO);
3416 locked = true;
3417 goto restart;
3418 }
3419 out:
3420 if (!err) {
3421 /* convert_inline_page can make node_changed */
3422 *blk_addr = dn.data_blkaddr;
3423 *node_changed = dn.node_changed;
3424 }
3425 f2fs_put_dnode(&dn);
3426 unlock_out:
3427 if (locked)
3428 f2fs_map_unlock(sbi, flag);
3429 return err;
3430 }
3431
__find_data_block(struct inode * inode,pgoff_t index,block_t * blk_addr)3432 static int __find_data_block(struct inode *inode, pgoff_t index,
3433 block_t *blk_addr)
3434 {
3435 struct dnode_of_data dn;
3436 struct page *ipage;
3437 int err = 0;
3438
3439 ipage = f2fs_get_node_page(F2FS_I_SB(inode), inode->i_ino);
3440 if (IS_ERR(ipage))
3441 return PTR_ERR(ipage);
3442
3443 set_new_dnode(&dn, inode, ipage, ipage, 0);
3444
3445 if (!f2fs_lookup_read_extent_cache_block(inode, index,
3446 &dn.data_blkaddr)) {
3447 /* hole case */
3448 err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
3449 if (err) {
3450 dn.data_blkaddr = NULL_ADDR;
3451 err = 0;
3452 }
3453 }
3454 *blk_addr = dn.data_blkaddr;
3455 f2fs_put_dnode(&dn);
3456 return err;
3457 }
3458
__reserve_data_block(struct inode * inode,pgoff_t index,block_t * blk_addr,bool * node_changed)3459 static int __reserve_data_block(struct inode *inode, pgoff_t index,
3460 block_t *blk_addr, bool *node_changed)
3461 {
3462 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3463 struct dnode_of_data dn;
3464 struct page *ipage;
3465 int err = 0;
3466
3467 f2fs_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO);
3468
3469 ipage = f2fs_get_node_page(sbi, inode->i_ino);
3470 if (IS_ERR(ipage)) {
3471 err = PTR_ERR(ipage);
3472 goto unlock_out;
3473 }
3474 set_new_dnode(&dn, inode, ipage, ipage, 0);
3475
3476 if (!f2fs_lookup_read_extent_cache_block(dn.inode, index,
3477 &dn.data_blkaddr))
3478 err = f2fs_reserve_block(&dn, index);
3479
3480 *blk_addr = dn.data_blkaddr;
3481 *node_changed = dn.node_changed;
3482 f2fs_put_dnode(&dn);
3483
3484 unlock_out:
3485 f2fs_map_unlock(sbi, F2FS_GET_BLOCK_PRE_AIO);
3486 return err;
3487 }
3488
prepare_atomic_write_begin(struct f2fs_sb_info * sbi,struct page * page,loff_t pos,unsigned int len,block_t * blk_addr,bool * node_changed,bool * use_cow)3489 static int prepare_atomic_write_begin(struct f2fs_sb_info *sbi,
3490 struct page *page, loff_t pos, unsigned int len,
3491 block_t *blk_addr, bool *node_changed, bool *use_cow)
3492 {
3493 struct inode *inode = page->mapping->host;
3494 struct inode *cow_inode = F2FS_I(inode)->cow_inode;
3495 pgoff_t index = page->index;
3496 int err = 0;
3497 block_t ori_blk_addr = NULL_ADDR;
3498
3499 /* If pos is beyond the end of file, reserve a new block in COW inode */
3500 if ((pos & PAGE_MASK) >= i_size_read(inode))
3501 goto reserve_block;
3502
3503 /* Look for the block in COW inode first */
3504 err = __find_data_block(cow_inode, index, blk_addr);
3505 if (err) {
3506 return err;
3507 } else if (*blk_addr != NULL_ADDR) {
3508 *use_cow = true;
3509 return 0;
3510 }
3511
3512 if (is_inode_flag_set(inode, FI_ATOMIC_REPLACE))
3513 goto reserve_block;
3514
3515 /* Look for the block in the original inode */
3516 err = __find_data_block(inode, index, &ori_blk_addr);
3517 if (err)
3518 return err;
3519
3520 reserve_block:
3521 /* Finally, we should reserve a new block in COW inode for the update */
3522 err = __reserve_data_block(cow_inode, index, blk_addr, node_changed);
3523 if (err)
3524 return err;
3525 inc_atomic_write_cnt(inode);
3526
3527 if (ori_blk_addr != NULL_ADDR)
3528 *blk_addr = ori_blk_addr;
3529 return 0;
3530 }
3531
f2fs_write_begin(struct file * file,struct address_space * mapping,loff_t pos,unsigned len,struct page ** pagep,void ** fsdata)3532 static int f2fs_write_begin(struct file *file, struct address_space *mapping,
3533 loff_t pos, unsigned len, struct page **pagep, void **fsdata)
3534 {
3535 struct inode *inode = mapping->host;
3536 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3537 struct page *page = NULL;
3538 pgoff_t index = ((unsigned long long) pos) >> PAGE_SHIFT;
3539 bool need_balance = false;
3540 bool use_cow = false;
3541 block_t blkaddr = NULL_ADDR;
3542 int err = 0;
3543
3544 trace_f2fs_write_begin(inode, pos, len);
3545
3546 if (!f2fs_is_checkpoint_ready(sbi)) {
3547 err = -ENOSPC;
3548 goto fail;
3549 }
3550
3551 /*
3552 * We should check this at this moment to avoid deadlock on inode page
3553 * and #0 page. The locking rule for inline_data conversion should be:
3554 * lock_page(page #0) -> lock_page(inode_page)
3555 */
3556 if (index != 0) {
3557 err = f2fs_convert_inline_inode(inode);
3558 if (err)
3559 goto fail;
3560 }
3561
3562 #ifdef CONFIG_F2FS_FS_COMPRESSION
3563 if (f2fs_compressed_file(inode)) {
3564 int ret;
3565
3566 *fsdata = NULL;
3567
3568 if (len == PAGE_SIZE && !(f2fs_is_atomic_file(inode)))
3569 goto repeat;
3570
3571 ret = f2fs_prepare_compress_overwrite(inode, pagep,
3572 index, fsdata);
3573 if (ret < 0) {
3574 err = ret;
3575 goto fail;
3576 } else if (ret) {
3577 return 0;
3578 }
3579 }
3580 #endif
3581
3582 repeat:
3583 /*
3584 * Do not use grab_cache_page_write_begin() to avoid deadlock due to
3585 * wait_for_stable_page. Will wait that below with our IO control.
3586 */
3587 page = f2fs_pagecache_get_page(mapping, index,
3588 FGP_LOCK | FGP_WRITE | FGP_CREAT, GFP_NOFS);
3589 if (!page) {
3590 err = -ENOMEM;
3591 goto fail;
3592 }
3593
3594 /* TODO: cluster can be compressed due to race with .writepage */
3595
3596 *pagep = page;
3597
3598 if (f2fs_is_atomic_file(inode))
3599 err = prepare_atomic_write_begin(sbi, page, pos, len,
3600 &blkaddr, &need_balance, &use_cow);
3601 else
3602 err = prepare_write_begin(sbi, page, pos, len,
3603 &blkaddr, &need_balance);
3604 if (err)
3605 goto fail;
3606
3607 if (need_balance && !IS_NOQUOTA(inode) &&
3608 has_not_enough_free_secs(sbi, 0, 0)) {
3609 unlock_page(page);
3610 f2fs_balance_fs(sbi, true);
3611 lock_page(page);
3612 if (page->mapping != mapping) {
3613 /* The page got truncated from under us */
3614 f2fs_put_page(page, 1);
3615 goto repeat;
3616 }
3617 }
3618
3619 f2fs_wait_on_page_writeback(page, DATA, false, true);
3620
3621 if (len == PAGE_SIZE || PageUptodate(page))
3622 return 0;
3623
3624 if (!(pos & (PAGE_SIZE - 1)) && (pos + len) >= i_size_read(inode) &&
3625 !f2fs_verity_in_progress(inode)) {
3626 zero_user_segment(page, len, PAGE_SIZE);
3627 return 0;
3628 }
3629
3630 if (blkaddr == NEW_ADDR) {
3631 zero_user_segment(page, 0, PAGE_SIZE);
3632 SetPageUptodate(page);
3633 } else {
3634 if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
3635 DATA_GENERIC_ENHANCE_READ)) {
3636 err = -EFSCORRUPTED;
3637 f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR);
3638 goto fail;
3639 }
3640 err = f2fs_submit_page_read(use_cow ?
3641 F2FS_I(inode)->cow_inode : inode, page,
3642 blkaddr, 0, true);
3643 if (err)
3644 goto fail;
3645
3646 lock_page(page);
3647 if (unlikely(page->mapping != mapping)) {
3648 f2fs_put_page(page, 1);
3649 goto repeat;
3650 }
3651 if (unlikely(!PageUptodate(page))) {
3652 err = -EIO;
3653 goto fail;
3654 }
3655 }
3656 return 0;
3657
3658 fail:
3659 f2fs_put_page(page, 1);
3660 f2fs_write_failed(inode, pos + len);
3661 return err;
3662 }
3663
f2fs_write_end(struct file * file,struct address_space * mapping,loff_t pos,unsigned len,unsigned copied,struct page * page,void * fsdata)3664 static int f2fs_write_end(struct file *file,
3665 struct address_space *mapping,
3666 loff_t pos, unsigned len, unsigned copied,
3667 struct page *page, void *fsdata)
3668 {
3669 struct inode *inode = page->mapping->host;
3670
3671 trace_f2fs_write_end(inode, pos, len, copied);
3672
3673 /*
3674 * This should be come from len == PAGE_SIZE, and we expect copied
3675 * should be PAGE_SIZE. Otherwise, we treat it with zero copied and
3676 * let generic_perform_write() try to copy data again through copied=0.
3677 */
3678 if (!PageUptodate(page)) {
3679 if (unlikely(copied != len))
3680 copied = 0;
3681 else
3682 SetPageUptodate(page);
3683 }
3684
3685 #ifdef CONFIG_F2FS_FS_COMPRESSION
3686 /* overwrite compressed file */
3687 if (f2fs_compressed_file(inode) && fsdata) {
3688 f2fs_compress_write_end(inode, fsdata, page->index, copied);
3689 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
3690
3691 if (pos + copied > i_size_read(inode) &&
3692 !f2fs_verity_in_progress(inode))
3693 f2fs_i_size_write(inode, pos + copied);
3694 return copied;
3695 }
3696 #endif
3697
3698 if (!copied)
3699 goto unlock_out;
3700
3701 set_page_dirty(page);
3702
3703 if (pos + copied > i_size_read(inode) &&
3704 !f2fs_verity_in_progress(inode)) {
3705 f2fs_i_size_write(inode, pos + copied);
3706 if (f2fs_is_atomic_file(inode))
3707 f2fs_i_size_write(F2FS_I(inode)->cow_inode,
3708 pos + copied);
3709 }
3710 unlock_out:
3711 f2fs_put_page(page, 1);
3712 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
3713 return copied;
3714 }
3715
f2fs_invalidate_folio(struct folio * folio,size_t offset,size_t length)3716 void f2fs_invalidate_folio(struct folio *folio, size_t offset, size_t length)
3717 {
3718 struct inode *inode = folio->mapping->host;
3719 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3720
3721 if (inode->i_ino >= F2FS_ROOT_INO(sbi) &&
3722 (offset || length != folio_size(folio)))
3723 return;
3724
3725 if (folio_test_dirty(folio)) {
3726 if (inode->i_ino == F2FS_META_INO(sbi)) {
3727 dec_page_count(sbi, F2FS_DIRTY_META);
3728 } else if (inode->i_ino == F2FS_NODE_INO(sbi)) {
3729 dec_page_count(sbi, F2FS_DIRTY_NODES);
3730 } else {
3731 inode_dec_dirty_pages(inode);
3732 f2fs_remove_dirty_inode(inode);
3733 }
3734 }
3735 clear_page_private_all(&folio->page);
3736 }
3737
f2fs_release_folio(struct folio * folio,gfp_t wait)3738 bool f2fs_release_folio(struct folio *folio, gfp_t wait)
3739 {
3740 /* If this is dirty folio, keep private data */
3741 if (folio_test_dirty(folio))
3742 return false;
3743
3744 clear_page_private_all(&folio->page);
3745 return true;
3746 }
3747
f2fs_dirty_data_folio(struct address_space * mapping,struct folio * folio)3748 static bool f2fs_dirty_data_folio(struct address_space *mapping,
3749 struct folio *folio)
3750 {
3751 struct inode *inode = mapping->host;
3752
3753 trace_f2fs_set_page_dirty(&folio->page, DATA);
3754
3755 if (!folio_test_uptodate(folio))
3756 folio_mark_uptodate(folio);
3757 BUG_ON(folio_test_swapcache(folio));
3758
3759 if (filemap_dirty_folio(mapping, folio)) {
3760 f2fs_update_dirty_folio(inode, folio);
3761 return true;
3762 }
3763 return false;
3764 }
3765
3766
f2fs_bmap_compress(struct inode * inode,sector_t block)3767 static sector_t f2fs_bmap_compress(struct inode *inode, sector_t block)
3768 {
3769 #ifdef CONFIG_F2FS_FS_COMPRESSION
3770 struct dnode_of_data dn;
3771 sector_t start_idx, blknr = 0;
3772 int ret;
3773
3774 start_idx = round_down(block, F2FS_I(inode)->i_cluster_size);
3775
3776 set_new_dnode(&dn, inode, NULL, NULL, 0);
3777 ret = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
3778 if (ret)
3779 return 0;
3780
3781 if (dn.data_blkaddr != COMPRESS_ADDR) {
3782 dn.ofs_in_node += block - start_idx;
3783 blknr = f2fs_data_blkaddr(&dn);
3784 if (!__is_valid_data_blkaddr(blknr))
3785 blknr = 0;
3786 }
3787
3788 f2fs_put_dnode(&dn);
3789 return blknr;
3790 #else
3791 return 0;
3792 #endif
3793 }
3794
3795
f2fs_bmap(struct address_space * mapping,sector_t block)3796 static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
3797 {
3798 struct inode *inode = mapping->host;
3799 sector_t blknr = 0;
3800
3801 if (f2fs_has_inline_data(inode))
3802 goto out;
3803
3804 /* make sure allocating whole blocks */
3805 if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
3806 filemap_write_and_wait(mapping);
3807
3808 /* Block number less than F2FS MAX BLOCKS */
3809 if (unlikely(block >= max_file_blocks(inode)))
3810 goto out;
3811
3812 if (f2fs_compressed_file(inode)) {
3813 blknr = f2fs_bmap_compress(inode, block);
3814 } else {
3815 struct f2fs_map_blocks map;
3816
3817 memset(&map, 0, sizeof(map));
3818 map.m_lblk = block;
3819 map.m_len = 1;
3820 map.m_next_pgofs = NULL;
3821 map.m_seg_type = NO_CHECK_TYPE;
3822
3823 if (!f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_BMAP))
3824 blknr = map.m_pblk;
3825 }
3826 out:
3827 trace_f2fs_bmap(inode, block, blknr);
3828 return blknr;
3829 }
3830
3831 #ifdef CONFIG_SWAP
f2fs_migrate_blocks(struct inode * inode,block_t start_blk,unsigned int blkcnt)3832 static int f2fs_migrate_blocks(struct inode *inode, block_t start_blk,
3833 unsigned int blkcnt)
3834 {
3835 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3836 unsigned int blkofs;
3837 unsigned int blk_per_sec = BLKS_PER_SEC(sbi);
3838 unsigned int secidx = start_blk / blk_per_sec;
3839 unsigned int end_sec;
3840 int ret = 0;
3841
3842 if (!blkcnt)
3843 return 0;
3844 end_sec = secidx + (blkcnt - 1) / blk_per_sec;
3845
3846 f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3847 filemap_invalidate_lock(inode->i_mapping);
3848
3849 set_inode_flag(inode, FI_ALIGNED_WRITE);
3850 set_inode_flag(inode, FI_OPU_WRITE);
3851
3852 for (; secidx <= end_sec; secidx++) {
3853 unsigned int blkofs_end = secidx == end_sec ?
3854 (blkcnt - 1) % blk_per_sec : blk_per_sec - 1;
3855
3856 f2fs_down_write(&sbi->pin_sem);
3857
3858 ret = f2fs_allocate_pinning_section(sbi);
3859 if (ret) {
3860 f2fs_up_write(&sbi->pin_sem);
3861 break;
3862 }
3863
3864 set_inode_flag(inode, FI_SKIP_WRITES);
3865
3866 for (blkofs = 0; blkofs <= blkofs_end; blkofs++) {
3867 struct page *page;
3868 unsigned int blkidx = secidx * blk_per_sec + blkofs;
3869
3870 page = f2fs_get_lock_data_page(inode, blkidx, true);
3871 if (IS_ERR(page)) {
3872 f2fs_up_write(&sbi->pin_sem);
3873 ret = PTR_ERR(page);
3874 goto done;
3875 }
3876
3877 set_page_dirty(page);
3878 f2fs_put_page(page, 1);
3879 }
3880
3881 clear_inode_flag(inode, FI_SKIP_WRITES);
3882
3883 ret = filemap_fdatawrite(inode->i_mapping);
3884
3885 f2fs_up_write(&sbi->pin_sem);
3886
3887 if (ret)
3888 break;
3889 }
3890
3891 done:
3892 clear_inode_flag(inode, FI_SKIP_WRITES);
3893 clear_inode_flag(inode, FI_OPU_WRITE);
3894 clear_inode_flag(inode, FI_ALIGNED_WRITE);
3895
3896 filemap_invalidate_unlock(inode->i_mapping);
3897 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3898
3899 return ret;
3900 }
3901
check_swap_activate(struct swap_info_struct * sis,struct file * swap_file,sector_t * span)3902 static int check_swap_activate(struct swap_info_struct *sis,
3903 struct file *swap_file, sector_t *span)
3904 {
3905 struct address_space *mapping = swap_file->f_mapping;
3906 struct inode *inode = mapping->host;
3907 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3908 block_t cur_lblock;
3909 block_t last_lblock;
3910 block_t pblock;
3911 block_t lowest_pblock = -1;
3912 block_t highest_pblock = 0;
3913 int nr_extents = 0;
3914 unsigned int nr_pblocks;
3915 unsigned int blks_per_sec = BLKS_PER_SEC(sbi);
3916 unsigned int not_aligned = 0;
3917 int ret = 0;
3918
3919 /*
3920 * Map all the blocks into the extent list. This code doesn't try
3921 * to be very smart.
3922 */
3923 cur_lblock = 0;
3924 last_lblock = bytes_to_blks(inode, i_size_read(inode));
3925
3926 while (cur_lblock < last_lblock && cur_lblock < sis->max) {
3927 struct f2fs_map_blocks map;
3928 retry:
3929 cond_resched();
3930
3931 memset(&map, 0, sizeof(map));
3932 map.m_lblk = cur_lblock;
3933 map.m_len = last_lblock - cur_lblock;
3934 map.m_next_pgofs = NULL;
3935 map.m_next_extent = NULL;
3936 map.m_seg_type = NO_CHECK_TYPE;
3937 map.m_may_create = false;
3938
3939 ret = f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_FIEMAP);
3940 if (ret)
3941 goto out;
3942
3943 /* hole */
3944 if (!(map.m_flags & F2FS_MAP_FLAGS)) {
3945 f2fs_err(sbi, "Swapfile has holes");
3946 ret = -EINVAL;
3947 goto out;
3948 }
3949
3950 pblock = map.m_pblk;
3951 nr_pblocks = map.m_len;
3952
3953 if ((pblock - SM_I(sbi)->main_blkaddr) % blks_per_sec ||
3954 nr_pblocks % blks_per_sec ||
3955 !f2fs_valid_pinned_area(sbi, pblock)) {
3956 bool last_extent = false;
3957
3958 not_aligned++;
3959
3960 nr_pblocks = roundup(nr_pblocks, blks_per_sec);
3961 if (cur_lblock + nr_pblocks > sis->max)
3962 nr_pblocks -= blks_per_sec;
3963
3964 /* this extent is last one */
3965 if (!nr_pblocks) {
3966 nr_pblocks = last_lblock - cur_lblock;
3967 last_extent = true;
3968 }
3969
3970 ret = f2fs_migrate_blocks(inode, cur_lblock,
3971 nr_pblocks);
3972 if (ret) {
3973 if (ret == -ENOENT)
3974 ret = -EINVAL;
3975 goto out;
3976 }
3977
3978 if (!last_extent)
3979 goto retry;
3980 }
3981
3982 if (cur_lblock + nr_pblocks >= sis->max)
3983 nr_pblocks = sis->max - cur_lblock;
3984
3985 if (cur_lblock) { /* exclude the header page */
3986 if (pblock < lowest_pblock)
3987 lowest_pblock = pblock;
3988 if (pblock + nr_pblocks - 1 > highest_pblock)
3989 highest_pblock = pblock + nr_pblocks - 1;
3990 }
3991
3992 /*
3993 * We found a PAGE_SIZE-length, PAGE_SIZE-aligned run of blocks
3994 */
3995 ret = add_swap_extent(sis, cur_lblock, nr_pblocks, pblock);
3996 if (ret < 0)
3997 goto out;
3998 nr_extents += ret;
3999 cur_lblock += nr_pblocks;
4000 }
4001 ret = nr_extents;
4002 *span = 1 + highest_pblock - lowest_pblock;
4003 if (cur_lblock == 0)
4004 cur_lblock = 1; /* force Empty message */
4005 sis->max = cur_lblock;
4006 sis->pages = cur_lblock - 1;
4007 sis->highest_bit = cur_lblock - 1;
4008 out:
4009 if (not_aligned)
4010 f2fs_warn(sbi, "Swapfile (%u) is not align to section: 1) creat(), 2) ioctl(F2FS_IOC_SET_PIN_FILE), 3) fallocate(%u * N)",
4011 not_aligned, blks_per_sec * F2FS_BLKSIZE);
4012 return ret;
4013 }
4014
f2fs_swap_activate(struct swap_info_struct * sis,struct file * file,sector_t * span)4015 static int f2fs_swap_activate(struct swap_info_struct *sis, struct file *file,
4016 sector_t *span)
4017 {
4018 struct inode *inode = file_inode(file);
4019 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4020 int ret;
4021
4022 if (!S_ISREG(inode->i_mode))
4023 return -EINVAL;
4024
4025 if (f2fs_readonly(sbi->sb))
4026 return -EROFS;
4027
4028 if (f2fs_lfs_mode(sbi) && !f2fs_sb_has_blkzoned(sbi)) {
4029 f2fs_err(sbi, "Swapfile not supported in LFS mode");
4030 return -EINVAL;
4031 }
4032
4033 ret = f2fs_convert_inline_inode(inode);
4034 if (ret)
4035 return ret;
4036
4037 if (!f2fs_disable_compressed_file(inode))
4038 return -EINVAL;
4039
4040 f2fs_precache_extents(inode);
4041
4042 ret = filemap_fdatawrite(inode->i_mapping);
4043 if (ret < 0)
4044 return ret;
4045
4046 ret = check_swap_activate(sis, file, span);
4047 if (ret < 0)
4048 return ret;
4049
4050 stat_inc_swapfile_inode(inode);
4051 set_inode_flag(inode, FI_PIN_FILE);
4052 f2fs_update_time(sbi, REQ_TIME);
4053 return ret;
4054 }
4055
f2fs_swap_deactivate(struct file * file)4056 static void f2fs_swap_deactivate(struct file *file)
4057 {
4058 struct inode *inode = file_inode(file);
4059
4060 stat_dec_swapfile_inode(inode);
4061 clear_inode_flag(inode, FI_PIN_FILE);
4062 }
4063 #else
f2fs_swap_activate(struct swap_info_struct * sis,struct file * file,sector_t * span)4064 static int f2fs_swap_activate(struct swap_info_struct *sis, struct file *file,
4065 sector_t *span)
4066 {
4067 return -EOPNOTSUPP;
4068 }
4069
f2fs_swap_deactivate(struct file * file)4070 static void f2fs_swap_deactivate(struct file *file)
4071 {
4072 }
4073 #endif
4074
4075 const struct address_space_operations f2fs_dblock_aops = {
4076 .read_folio = f2fs_read_data_folio,
4077 .readahead = f2fs_readahead,
4078 .writepage = f2fs_write_data_page,
4079 .writepages = f2fs_write_data_pages,
4080 .write_begin = f2fs_write_begin,
4081 .write_end = f2fs_write_end,
4082 .dirty_folio = f2fs_dirty_data_folio,
4083 .migrate_folio = filemap_migrate_folio,
4084 .invalidate_folio = f2fs_invalidate_folio,
4085 .release_folio = f2fs_release_folio,
4086 .bmap = f2fs_bmap,
4087 .swap_activate = f2fs_swap_activate,
4088 .swap_deactivate = f2fs_swap_deactivate,
4089 };
4090
f2fs_clear_page_cache_dirty_tag(struct page * page)4091 void f2fs_clear_page_cache_dirty_tag(struct page *page)
4092 {
4093 struct address_space *mapping = page_mapping(page);
4094 unsigned long flags;
4095
4096 xa_lock_irqsave(&mapping->i_pages, flags);
4097 __xa_clear_mark(&mapping->i_pages, page_index(page),
4098 PAGECACHE_TAG_DIRTY);
4099 xa_unlock_irqrestore(&mapping->i_pages, flags);
4100 }
4101
f2fs_init_post_read_processing(void)4102 int __init f2fs_init_post_read_processing(void)
4103 {
4104 bio_post_read_ctx_cache =
4105 kmem_cache_create("f2fs_bio_post_read_ctx",
4106 sizeof(struct bio_post_read_ctx), 0, 0, NULL);
4107 if (!bio_post_read_ctx_cache)
4108 goto fail;
4109 bio_post_read_ctx_pool =
4110 mempool_create_slab_pool(NUM_PREALLOC_POST_READ_CTXS,
4111 bio_post_read_ctx_cache);
4112 if (!bio_post_read_ctx_pool)
4113 goto fail_free_cache;
4114 return 0;
4115
4116 fail_free_cache:
4117 kmem_cache_destroy(bio_post_read_ctx_cache);
4118 fail:
4119 return -ENOMEM;
4120 }
4121
f2fs_destroy_post_read_processing(void)4122 void f2fs_destroy_post_read_processing(void)
4123 {
4124 mempool_destroy(bio_post_read_ctx_pool);
4125 kmem_cache_destroy(bio_post_read_ctx_cache);
4126 }
4127
f2fs_init_post_read_wq(struct f2fs_sb_info * sbi)4128 int f2fs_init_post_read_wq(struct f2fs_sb_info *sbi)
4129 {
4130 if (!f2fs_sb_has_encrypt(sbi) &&
4131 !f2fs_sb_has_verity(sbi) &&
4132 !f2fs_sb_has_compression(sbi))
4133 return 0;
4134
4135 sbi->post_read_wq = alloc_workqueue("f2fs_post_read_wq",
4136 WQ_UNBOUND | WQ_HIGHPRI,
4137 num_online_cpus());
4138 return sbi->post_read_wq ? 0 : -ENOMEM;
4139 }
4140
f2fs_destroy_post_read_wq(struct f2fs_sb_info * sbi)4141 void f2fs_destroy_post_read_wq(struct f2fs_sb_info *sbi)
4142 {
4143 if (sbi->post_read_wq)
4144 destroy_workqueue(sbi->post_read_wq);
4145 }
4146
f2fs_init_bio_entry_cache(void)4147 int __init f2fs_init_bio_entry_cache(void)
4148 {
4149 bio_entry_slab = f2fs_kmem_cache_create("f2fs_bio_entry_slab",
4150 sizeof(struct bio_entry));
4151 return bio_entry_slab ? 0 : -ENOMEM;
4152 }
4153
f2fs_destroy_bio_entry_cache(void)4154 void f2fs_destroy_bio_entry_cache(void)
4155 {
4156 kmem_cache_destroy(bio_entry_slab);
4157 }
4158
f2fs_iomap_begin(struct inode * inode,loff_t offset,loff_t length,unsigned int flags,struct iomap * iomap,struct iomap * srcmap)4159 static int f2fs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
4160 unsigned int flags, struct iomap *iomap,
4161 struct iomap *srcmap)
4162 {
4163 struct f2fs_map_blocks map = {};
4164 pgoff_t next_pgofs = 0;
4165 int err;
4166
4167 map.m_lblk = bytes_to_blks(inode, offset);
4168 map.m_len = bytes_to_blks(inode, offset + length - 1) - map.m_lblk + 1;
4169 map.m_next_pgofs = &next_pgofs;
4170 map.m_seg_type = f2fs_rw_hint_to_seg_type(inode->i_write_hint);
4171 if (flags & IOMAP_WRITE)
4172 map.m_may_create = true;
4173
4174 err = f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_DIO);
4175 if (err)
4176 return err;
4177
4178 iomap->offset = blks_to_bytes(inode, map.m_lblk);
4179
4180 /*
4181 * When inline encryption is enabled, sometimes I/O to an encrypted file
4182 * has to be broken up to guarantee DUN contiguity. Handle this by
4183 * limiting the length of the mapping returned.
4184 */
4185 map.m_len = fscrypt_limit_io_blocks(inode, map.m_lblk, map.m_len);
4186
4187 /*
4188 * We should never see delalloc or compressed extents here based on
4189 * prior flushing and checks.
4190 */
4191 if (WARN_ON_ONCE(map.m_pblk == NEW_ADDR))
4192 return -EINVAL;
4193 if (WARN_ON_ONCE(map.m_pblk == COMPRESS_ADDR))
4194 return -EINVAL;
4195
4196 if (map.m_flags & F2FS_MAP_MAPPED) {
4197 iomap->length = blks_to_bytes(inode, map.m_len);
4198 iomap->type = IOMAP_MAPPED;
4199 iomap->flags |= IOMAP_F_MERGED;
4200 iomap->bdev = map.m_bdev;
4201 iomap->addr = blks_to_bytes(inode, map.m_pblk);
4202 } else {
4203 if (flags & IOMAP_WRITE)
4204 return -ENOTBLK;
4205 iomap->length = blks_to_bytes(inode, next_pgofs) -
4206 iomap->offset;
4207 iomap->type = IOMAP_HOLE;
4208 iomap->addr = IOMAP_NULL_ADDR;
4209 }
4210
4211 if (map.m_flags & F2FS_MAP_NEW)
4212 iomap->flags |= IOMAP_F_NEW;
4213 if ((inode->i_state & I_DIRTY_DATASYNC) ||
4214 offset + length > i_size_read(inode))
4215 iomap->flags |= IOMAP_F_DIRTY;
4216
4217 return 0;
4218 }
4219
4220 const struct iomap_ops f2fs_iomap_ops = {
4221 .iomap_begin = f2fs_iomap_begin,
4222 };
4223