1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * fs/f2fs/segment.c 4 * 5 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 6 * http://www.samsung.com/ 7 */ 8 #include <linux/fs.h> 9 #include <linux/f2fs_fs.h> 10 #include <linux/bio.h> 11 #include <linux/blkdev.h> 12 #include <linux/sched/mm.h> 13 #include <linux/prefetch.h> 14 #include <linux/kthread.h> 15 #include <linux/swap.h> 16 #include <linux/timer.h> 17 #include <linux/freezer.h> 18 #include <linux/sched/signal.h> 19 #include <linux/random.h> 20 21 #include "f2fs.h" 22 #include "segment.h" 23 #include "node.h" 24 #include "gc.h" 25 #include "iostat.h" 26 #include <trace/events/f2fs.h> 27 28 #define __reverse_ffz(x) __reverse_ffs(~(x)) 29 30 static struct kmem_cache *discard_entry_slab; 31 static struct kmem_cache *discard_cmd_slab; 32 static struct kmem_cache *sit_entry_set_slab; 33 static struct kmem_cache *revoke_entry_slab; 34 35 static unsigned long __reverse_ulong(unsigned char *str) 36 { 37 unsigned long tmp = 0; 38 int shift = 24, idx = 0; 39 40 #if BITS_PER_LONG == 64 41 shift = 56; 42 #endif 43 while (shift >= 0) { 44 tmp |= (unsigned long)str[idx++] << shift; 45 shift -= BITS_PER_BYTE; 46 } 47 return tmp; 48 } 49 50 /* 51 * __reverse_ffs is copied from include/asm-generic/bitops/__ffs.h since 52 * MSB and LSB are reversed in a byte by f2fs_set_bit. 53 */ 54 static inline unsigned long __reverse_ffs(unsigned long word) 55 { 56 int num = 0; 57 58 #if BITS_PER_LONG == 64 59 if ((word & 0xffffffff00000000UL) == 0) 60 num += 32; 61 else 62 word >>= 32; 63 #endif 64 if ((word & 0xffff0000) == 0) 65 num += 16; 66 else 67 word >>= 16; 68 69 if ((word & 0xff00) == 0) 70 num += 8; 71 else 72 word >>= 8; 73 74 if ((word & 0xf0) == 0) 75 num += 4; 76 else 77 word >>= 4; 78 79 if ((word & 0xc) == 0) 80 num += 2; 81 else 82 word >>= 2; 83 84 if ((word & 0x2) == 0) 85 num += 1; 86 return num; 87 } 88 89 /* 90 * __find_rev_next(_zero)_bit is copied from lib/find_next_bit.c because 91 * f2fs_set_bit makes MSB and LSB reversed in a byte. 92 * @size must be integral times of unsigned long. 93 * Example: 94 * MSB <--> LSB 95 * f2fs_set_bit(0, bitmap) => 1000 0000 96 * f2fs_set_bit(7, bitmap) => 0000 0001 97 */ 98 static unsigned long __find_rev_next_bit(const unsigned long *addr, 99 unsigned long size, unsigned long offset) 100 { 101 const unsigned long *p = addr + BIT_WORD(offset); 102 unsigned long result = size; 103 unsigned long tmp; 104 105 if (offset >= size) 106 return size; 107 108 size -= (offset & ~(BITS_PER_LONG - 1)); 109 offset %= BITS_PER_LONG; 110 111 while (1) { 112 if (*p == 0) 113 goto pass; 114 115 tmp = __reverse_ulong((unsigned char *)p); 116 117 tmp &= ~0UL >> offset; 118 if (size < BITS_PER_LONG) 119 tmp &= (~0UL << (BITS_PER_LONG - size)); 120 if (tmp) 121 goto found; 122 pass: 123 if (size <= BITS_PER_LONG) 124 break; 125 size -= BITS_PER_LONG; 126 offset = 0; 127 p++; 128 } 129 return result; 130 found: 131 return result - size + __reverse_ffs(tmp); 132 } 133 134 static unsigned long __find_rev_next_zero_bit(const unsigned long *addr, 135 unsigned long size, unsigned long offset) 136 { 137 const unsigned long *p = addr + BIT_WORD(offset); 138 unsigned long result = size; 139 unsigned long tmp; 140 141 if (offset >= size) 142 return size; 143 144 size -= (offset & ~(BITS_PER_LONG - 1)); 145 offset %= BITS_PER_LONG; 146 147 while (1) { 148 if (*p == ~0UL) 149 goto pass; 150 151 tmp = __reverse_ulong((unsigned char *)p); 152 153 if (offset) 154 tmp |= ~0UL << (BITS_PER_LONG - offset); 155 if (size < BITS_PER_LONG) 156 tmp |= ~0UL >> size; 157 if (tmp != ~0UL) 158 goto found; 159 pass: 160 if (size <= BITS_PER_LONG) 161 break; 162 size -= BITS_PER_LONG; 163 offset = 0; 164 p++; 165 } 166 return result; 167 found: 168 return result - size + __reverse_ffz(tmp); 169 } 170 171 bool f2fs_need_SSR(struct f2fs_sb_info *sbi) 172 { 173 int node_secs = get_blocktype_secs(sbi, F2FS_DIRTY_NODES); 174 int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS); 175 int imeta_secs = get_blocktype_secs(sbi, F2FS_DIRTY_IMETA); 176 177 if (f2fs_lfs_mode(sbi)) 178 return false; 179 if (sbi->gc_mode == GC_URGENT_HIGH) 180 return true; 181 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) 182 return true; 183 184 return free_sections(sbi) <= (node_secs + 2 * dent_secs + imeta_secs + 185 SM_I(sbi)->min_ssr_sections + reserved_sections(sbi)); 186 } 187 188 void f2fs_abort_atomic_write(struct inode *inode, bool clean) 189 { 190 struct f2fs_inode_info *fi = F2FS_I(inode); 191 192 if (!f2fs_is_atomic_file(inode)) 193 return; 194 195 if (clean) 196 truncate_inode_pages_final(inode->i_mapping); 197 198 release_atomic_write_cnt(inode); 199 clear_inode_flag(inode, FI_ATOMIC_COMMITTED); 200 clear_inode_flag(inode, FI_ATOMIC_REPLACE); 201 clear_inode_flag(inode, FI_ATOMIC_FILE); 202 if (is_inode_flag_set(inode, FI_ATOMIC_DIRTIED)) { 203 clear_inode_flag(inode, FI_ATOMIC_DIRTIED); 204 f2fs_mark_inode_dirty_sync(inode, true); 205 } 206 stat_dec_atomic_inode(inode); 207 208 F2FS_I(inode)->atomic_write_task = NULL; 209 210 if (clean) { 211 f2fs_i_size_write(inode, fi->original_i_size); 212 fi->original_i_size = 0; 213 } 214 /* avoid stale dirty inode during eviction */ 215 sync_inode_metadata(inode, 0); 216 } 217 218 static int __replace_atomic_write_block(struct inode *inode, pgoff_t index, 219 block_t new_addr, block_t *old_addr, bool recover) 220 { 221 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 222 struct dnode_of_data dn; 223 struct node_info ni; 224 int err; 225 226 retry: 227 set_new_dnode(&dn, inode, NULL, NULL, 0); 228 err = f2fs_get_dnode_of_data(&dn, index, ALLOC_NODE); 229 if (err) { 230 if (err == -ENOMEM) { 231 f2fs_io_schedule_timeout(DEFAULT_IO_TIMEOUT); 232 goto retry; 233 } 234 return err; 235 } 236 237 err = f2fs_get_node_info(sbi, dn.nid, &ni, false); 238 if (err) { 239 f2fs_put_dnode(&dn); 240 return err; 241 } 242 243 if (recover) { 244 /* dn.data_blkaddr is always valid */ 245 if (!__is_valid_data_blkaddr(new_addr)) { 246 if (new_addr == NULL_ADDR) 247 dec_valid_block_count(sbi, inode, 1); 248 f2fs_invalidate_blocks(sbi, dn.data_blkaddr); 249 f2fs_update_data_blkaddr(&dn, new_addr); 250 } else { 251 f2fs_replace_block(sbi, &dn, dn.data_blkaddr, 252 new_addr, ni.version, true, true); 253 } 254 } else { 255 blkcnt_t count = 1; 256 257 err = inc_valid_block_count(sbi, inode, &count, true); 258 if (err) { 259 f2fs_put_dnode(&dn); 260 return err; 261 } 262 263 *old_addr = dn.data_blkaddr; 264 f2fs_truncate_data_blocks_range(&dn, 1); 265 dec_valid_block_count(sbi, F2FS_I(inode)->cow_inode, count); 266 267 f2fs_replace_block(sbi, &dn, dn.data_blkaddr, new_addr, 268 ni.version, true, false); 269 } 270 271 f2fs_put_dnode(&dn); 272 273 trace_f2fs_replace_atomic_write_block(inode, F2FS_I(inode)->cow_inode, 274 index, old_addr ? *old_addr : 0, new_addr, recover); 275 return 0; 276 } 277 278 static void __complete_revoke_list(struct inode *inode, struct list_head *head, 279 bool revoke) 280 { 281 struct revoke_entry *cur, *tmp; 282 pgoff_t start_index = 0; 283 bool truncate = is_inode_flag_set(inode, FI_ATOMIC_REPLACE); 284 285 list_for_each_entry_safe(cur, tmp, head, list) { 286 if (revoke) { 287 __replace_atomic_write_block(inode, cur->index, 288 cur->old_addr, NULL, true); 289 } else if (truncate) { 290 f2fs_truncate_hole(inode, start_index, cur->index); 291 start_index = cur->index + 1; 292 } 293 294 list_del(&cur->list); 295 kmem_cache_free(revoke_entry_slab, cur); 296 } 297 298 if (!revoke && truncate) 299 f2fs_do_truncate_blocks(inode, start_index * PAGE_SIZE, false); 300 } 301 302 static int __f2fs_commit_atomic_write(struct inode *inode) 303 { 304 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 305 struct f2fs_inode_info *fi = F2FS_I(inode); 306 struct inode *cow_inode = fi->cow_inode; 307 struct revoke_entry *new; 308 struct list_head revoke_list; 309 block_t blkaddr; 310 struct dnode_of_data dn; 311 pgoff_t len = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); 312 pgoff_t off = 0, blen, index; 313 int ret = 0, i; 314 315 INIT_LIST_HEAD(&revoke_list); 316 317 while (len) { 318 blen = min_t(pgoff_t, ADDRS_PER_BLOCK(cow_inode), len); 319 320 set_new_dnode(&dn, cow_inode, NULL, NULL, 0); 321 ret = f2fs_get_dnode_of_data(&dn, off, LOOKUP_NODE_RA); 322 if (ret && ret != -ENOENT) { 323 goto out; 324 } else if (ret == -ENOENT) { 325 ret = 0; 326 if (dn.max_level == 0) 327 goto out; 328 goto next; 329 } 330 331 blen = min((pgoff_t)ADDRS_PER_PAGE(dn.node_page, cow_inode), 332 len); 333 index = off; 334 for (i = 0; i < blen; i++, dn.ofs_in_node++, index++) { 335 blkaddr = f2fs_data_blkaddr(&dn); 336 337 if (!__is_valid_data_blkaddr(blkaddr)) { 338 continue; 339 } else if (!f2fs_is_valid_blkaddr(sbi, blkaddr, 340 DATA_GENERIC_ENHANCE)) { 341 f2fs_put_dnode(&dn); 342 ret = -EFSCORRUPTED; 343 f2fs_handle_error(sbi, 344 ERROR_INVALID_BLKADDR); 345 goto out; 346 } 347 348 new = f2fs_kmem_cache_alloc(revoke_entry_slab, GFP_NOFS, 349 true, NULL); 350 351 ret = __replace_atomic_write_block(inode, index, blkaddr, 352 &new->old_addr, false); 353 if (ret) { 354 f2fs_put_dnode(&dn); 355 kmem_cache_free(revoke_entry_slab, new); 356 goto out; 357 } 358 359 f2fs_update_data_blkaddr(&dn, NULL_ADDR); 360 new->index = index; 361 list_add_tail(&new->list, &revoke_list); 362 } 363 f2fs_put_dnode(&dn); 364 next: 365 off += blen; 366 len -= blen; 367 } 368 369 out: 370 if (ret) { 371 sbi->revoked_atomic_block += fi->atomic_write_cnt; 372 } else { 373 sbi->committed_atomic_block += fi->atomic_write_cnt; 374 set_inode_flag(inode, FI_ATOMIC_COMMITTED); 375 if (is_inode_flag_set(inode, FI_ATOMIC_DIRTIED)) { 376 clear_inode_flag(inode, FI_ATOMIC_DIRTIED); 377 f2fs_mark_inode_dirty_sync(inode, true); 378 } 379 } 380 381 __complete_revoke_list(inode, &revoke_list, ret ? true : false); 382 383 return ret; 384 } 385 386 int f2fs_commit_atomic_write(struct inode *inode) 387 { 388 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 389 struct f2fs_inode_info *fi = F2FS_I(inode); 390 int err; 391 392 err = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX); 393 if (err) 394 return err; 395 396 f2fs_down_write(&fi->i_gc_rwsem[WRITE]); 397 f2fs_lock_op(sbi); 398 399 err = __f2fs_commit_atomic_write(inode); 400 401 f2fs_unlock_op(sbi); 402 f2fs_up_write(&fi->i_gc_rwsem[WRITE]); 403 404 return err; 405 } 406 407 /* 408 * This function balances dirty node and dentry pages. 409 * In addition, it controls garbage collection. 410 */ 411 void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need) 412 { 413 if (time_to_inject(sbi, FAULT_CHECKPOINT)) 414 f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_FAULT_INJECT); 415 416 /* balance_fs_bg is able to be pending */ 417 if (need && excess_cached_nats(sbi)) 418 f2fs_balance_fs_bg(sbi, false); 419 420 if (!f2fs_is_checkpoint_ready(sbi)) 421 return; 422 423 /* 424 * We should do GC or end up with checkpoint, if there are so many dirty 425 * dir/node pages without enough free segments. 426 */ 427 if (has_enough_free_secs(sbi, 0, 0)) 428 return; 429 430 if (test_opt(sbi, GC_MERGE) && sbi->gc_thread && 431 sbi->gc_thread->f2fs_gc_task) { 432 DEFINE_WAIT(wait); 433 434 prepare_to_wait(&sbi->gc_thread->fggc_wq, &wait, 435 TASK_UNINTERRUPTIBLE); 436 wake_up(&sbi->gc_thread->gc_wait_queue_head); 437 io_schedule(); 438 finish_wait(&sbi->gc_thread->fggc_wq, &wait); 439 } else { 440 struct f2fs_gc_control gc_control = { 441 .victim_segno = NULL_SEGNO, 442 .init_gc_type = BG_GC, 443 .no_bg_gc = true, 444 .should_migrate_blocks = false, 445 .err_gc_skipped = false, 446 .nr_free_secs = 1 }; 447 f2fs_down_write(&sbi->gc_lock); 448 stat_inc_gc_call_count(sbi, FOREGROUND); 449 f2fs_gc(sbi, &gc_control); 450 } 451 } 452 453 static inline bool excess_dirty_threshold(struct f2fs_sb_info *sbi) 454 { 455 int factor = f2fs_rwsem_is_locked(&sbi->cp_rwsem) ? 3 : 2; 456 unsigned int dents = get_pages(sbi, F2FS_DIRTY_DENTS); 457 unsigned int qdata = get_pages(sbi, F2FS_DIRTY_QDATA); 458 unsigned int nodes = get_pages(sbi, F2FS_DIRTY_NODES); 459 unsigned int meta = get_pages(sbi, F2FS_DIRTY_META); 460 unsigned int imeta = get_pages(sbi, F2FS_DIRTY_IMETA); 461 unsigned int threshold = (factor * DEFAULT_DIRTY_THRESHOLD) << 462 sbi->log_blocks_per_seg; 463 unsigned int global_threshold = threshold * 3 / 2; 464 465 if (dents >= threshold || qdata >= threshold || 466 nodes >= threshold || meta >= threshold || 467 imeta >= threshold) 468 return true; 469 return dents + qdata + nodes + meta + imeta > global_threshold; 470 } 471 472 void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi, bool from_bg) 473 { 474 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) 475 return; 476 477 /* try to shrink extent cache when there is no enough memory */ 478 if (!f2fs_available_free_memory(sbi, READ_EXTENT_CACHE)) 479 f2fs_shrink_read_extent_tree(sbi, 480 READ_EXTENT_CACHE_SHRINK_NUMBER); 481 482 /* try to shrink age extent cache when there is no enough memory */ 483 if (!f2fs_available_free_memory(sbi, AGE_EXTENT_CACHE)) 484 f2fs_shrink_age_extent_tree(sbi, 485 AGE_EXTENT_CACHE_SHRINK_NUMBER); 486 487 /* check the # of cached NAT entries */ 488 if (!f2fs_available_free_memory(sbi, NAT_ENTRIES)) 489 f2fs_try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK); 490 491 if (!f2fs_available_free_memory(sbi, FREE_NIDS)) 492 f2fs_try_to_free_nids(sbi, MAX_FREE_NIDS); 493 else 494 f2fs_build_free_nids(sbi, false, false); 495 496 if (excess_dirty_nats(sbi) || excess_dirty_threshold(sbi) || 497 excess_prefree_segs(sbi) || !f2fs_space_for_roll_forward(sbi)) 498 goto do_sync; 499 500 /* there is background inflight IO or foreground operation recently */ 501 if (is_inflight_io(sbi, REQ_TIME) || 502 (!f2fs_time_over(sbi, REQ_TIME) && f2fs_rwsem_is_locked(&sbi->cp_rwsem))) 503 return; 504 505 /* exceed periodical checkpoint timeout threshold */ 506 if (f2fs_time_over(sbi, CP_TIME)) 507 goto do_sync; 508 509 /* checkpoint is the only way to shrink partial cached entries */ 510 if (f2fs_available_free_memory(sbi, NAT_ENTRIES) && 511 f2fs_available_free_memory(sbi, INO_ENTRIES)) 512 return; 513 514 do_sync: 515 if (test_opt(sbi, DATA_FLUSH) && from_bg) { 516 struct blk_plug plug; 517 518 mutex_lock(&sbi->flush_lock); 519 520 blk_start_plug(&plug); 521 f2fs_sync_dirty_inodes(sbi, FILE_INODE, false); 522 blk_finish_plug(&plug); 523 524 mutex_unlock(&sbi->flush_lock); 525 } 526 stat_inc_cp_call_count(sbi, BACKGROUND); 527 f2fs_sync_fs(sbi->sb, 1); 528 } 529 530 static int __submit_flush_wait(struct f2fs_sb_info *sbi, 531 struct block_device *bdev) 532 { 533 int ret = blkdev_issue_flush(bdev); 534 535 trace_f2fs_issue_flush(bdev, test_opt(sbi, NOBARRIER), 536 test_opt(sbi, FLUSH_MERGE), ret); 537 if (!ret) 538 f2fs_update_iostat(sbi, NULL, FS_FLUSH_IO, 0); 539 return ret; 540 } 541 542 static int submit_flush_wait(struct f2fs_sb_info *sbi, nid_t ino) 543 { 544 int ret = 0; 545 int i; 546 547 if (!f2fs_is_multi_device(sbi)) 548 return __submit_flush_wait(sbi, sbi->sb->s_bdev); 549 550 for (i = 0; i < sbi->s_ndevs; i++) { 551 if (!f2fs_is_dirty_device(sbi, ino, i, FLUSH_INO)) 552 continue; 553 ret = __submit_flush_wait(sbi, FDEV(i).bdev); 554 if (ret) 555 break; 556 } 557 return ret; 558 } 559 560 static int issue_flush_thread(void *data) 561 { 562 struct f2fs_sb_info *sbi = data; 563 struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info; 564 wait_queue_head_t *q = &fcc->flush_wait_queue; 565 repeat: 566 if (kthread_should_stop()) 567 return 0; 568 569 if (!llist_empty(&fcc->issue_list)) { 570 struct flush_cmd *cmd, *next; 571 int ret; 572 573 fcc->dispatch_list = llist_del_all(&fcc->issue_list); 574 fcc->dispatch_list = llist_reverse_order(fcc->dispatch_list); 575 576 cmd = llist_entry(fcc->dispatch_list, struct flush_cmd, llnode); 577 578 ret = submit_flush_wait(sbi, cmd->ino); 579 atomic_inc(&fcc->issued_flush); 580 581 llist_for_each_entry_safe(cmd, next, 582 fcc->dispatch_list, llnode) { 583 cmd->ret = ret; 584 complete(&cmd->wait); 585 } 586 fcc->dispatch_list = NULL; 587 } 588 589 wait_event_interruptible(*q, 590 kthread_should_stop() || !llist_empty(&fcc->issue_list)); 591 goto repeat; 592 } 593 594 int f2fs_issue_flush(struct f2fs_sb_info *sbi, nid_t ino) 595 { 596 struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info; 597 struct flush_cmd cmd; 598 int ret; 599 600 if (test_opt(sbi, NOBARRIER)) 601 return 0; 602 603 if (!test_opt(sbi, FLUSH_MERGE)) { 604 atomic_inc(&fcc->queued_flush); 605 ret = submit_flush_wait(sbi, ino); 606 atomic_dec(&fcc->queued_flush); 607 atomic_inc(&fcc->issued_flush); 608 return ret; 609 } 610 611 if (atomic_inc_return(&fcc->queued_flush) == 1 || 612 f2fs_is_multi_device(sbi)) { 613 ret = submit_flush_wait(sbi, ino); 614 atomic_dec(&fcc->queued_flush); 615 616 atomic_inc(&fcc->issued_flush); 617 return ret; 618 } 619 620 cmd.ino = ino; 621 init_completion(&cmd.wait); 622 623 llist_add(&cmd.llnode, &fcc->issue_list); 624 625 /* 626 * update issue_list before we wake up issue_flush thread, this 627 * smp_mb() pairs with another barrier in ___wait_event(), see 628 * more details in comments of waitqueue_active(). 629 */ 630 smp_mb(); 631 632 if (waitqueue_active(&fcc->flush_wait_queue)) 633 wake_up(&fcc->flush_wait_queue); 634 635 if (fcc->f2fs_issue_flush) { 636 wait_for_completion(&cmd.wait); 637 atomic_dec(&fcc->queued_flush); 638 } else { 639 struct llist_node *list; 640 641 list = llist_del_all(&fcc->issue_list); 642 if (!list) { 643 wait_for_completion(&cmd.wait); 644 atomic_dec(&fcc->queued_flush); 645 } else { 646 struct flush_cmd *tmp, *next; 647 648 ret = submit_flush_wait(sbi, ino); 649 650 llist_for_each_entry_safe(tmp, next, list, llnode) { 651 if (tmp == &cmd) { 652 cmd.ret = ret; 653 atomic_dec(&fcc->queued_flush); 654 continue; 655 } 656 tmp->ret = ret; 657 complete(&tmp->wait); 658 } 659 } 660 } 661 662 return cmd.ret; 663 } 664 665 int f2fs_create_flush_cmd_control(struct f2fs_sb_info *sbi) 666 { 667 dev_t dev = sbi->sb->s_bdev->bd_dev; 668 struct flush_cmd_control *fcc; 669 670 if (SM_I(sbi)->fcc_info) { 671 fcc = SM_I(sbi)->fcc_info; 672 if (fcc->f2fs_issue_flush) 673 return 0; 674 goto init_thread; 675 } 676 677 fcc = f2fs_kzalloc(sbi, sizeof(struct flush_cmd_control), GFP_KERNEL); 678 if (!fcc) 679 return -ENOMEM; 680 atomic_set(&fcc->issued_flush, 0); 681 atomic_set(&fcc->queued_flush, 0); 682 init_waitqueue_head(&fcc->flush_wait_queue); 683 init_llist_head(&fcc->issue_list); 684 SM_I(sbi)->fcc_info = fcc; 685 if (!test_opt(sbi, FLUSH_MERGE)) 686 return 0; 687 688 init_thread: 689 fcc->f2fs_issue_flush = kthread_run(issue_flush_thread, sbi, 690 "f2fs_flush-%u:%u", MAJOR(dev), MINOR(dev)); 691 if (IS_ERR(fcc->f2fs_issue_flush)) { 692 int err = PTR_ERR(fcc->f2fs_issue_flush); 693 694 fcc->f2fs_issue_flush = NULL; 695 return err; 696 } 697 698 return 0; 699 } 700 701 void f2fs_destroy_flush_cmd_control(struct f2fs_sb_info *sbi, bool free) 702 { 703 struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info; 704 705 if (fcc && fcc->f2fs_issue_flush) { 706 struct task_struct *flush_thread = fcc->f2fs_issue_flush; 707 708 fcc->f2fs_issue_flush = NULL; 709 kthread_stop(flush_thread); 710 } 711 if (free) { 712 kfree(fcc); 713 SM_I(sbi)->fcc_info = NULL; 714 } 715 } 716 717 int f2fs_flush_device_cache(struct f2fs_sb_info *sbi) 718 { 719 int ret = 0, i; 720 721 if (!f2fs_is_multi_device(sbi)) 722 return 0; 723 724 if (test_opt(sbi, NOBARRIER)) 725 return 0; 726 727 for (i = 1; i < sbi->s_ndevs; i++) { 728 int count = DEFAULT_RETRY_IO_COUNT; 729 730 if (!f2fs_test_bit(i, (char *)&sbi->dirty_device)) 731 continue; 732 733 do { 734 ret = __submit_flush_wait(sbi, FDEV(i).bdev); 735 if (ret) 736 f2fs_io_schedule_timeout(DEFAULT_IO_TIMEOUT); 737 } while (ret && --count); 738 739 if (ret) { 740 f2fs_stop_checkpoint(sbi, false, 741 STOP_CP_REASON_FLUSH_FAIL); 742 break; 743 } 744 745 spin_lock(&sbi->dev_lock); 746 f2fs_clear_bit(i, (char *)&sbi->dirty_device); 747 spin_unlock(&sbi->dev_lock); 748 } 749 750 return ret; 751 } 752 753 static void __locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno, 754 enum dirty_type dirty_type) 755 { 756 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 757 758 /* need not be added */ 759 if (IS_CURSEG(sbi, segno)) 760 return; 761 762 if (!test_and_set_bit(segno, dirty_i->dirty_segmap[dirty_type])) 763 dirty_i->nr_dirty[dirty_type]++; 764 765 if (dirty_type == DIRTY) { 766 struct seg_entry *sentry = get_seg_entry(sbi, segno); 767 enum dirty_type t = sentry->type; 768 769 if (unlikely(t >= DIRTY)) { 770 f2fs_bug_on(sbi, 1); 771 return; 772 } 773 if (!test_and_set_bit(segno, dirty_i->dirty_segmap[t])) 774 dirty_i->nr_dirty[t]++; 775 776 if (__is_large_section(sbi)) { 777 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno); 778 block_t valid_blocks = 779 get_valid_blocks(sbi, segno, true); 780 781 f2fs_bug_on(sbi, unlikely(!valid_blocks || 782 valid_blocks == CAP_BLKS_PER_SEC(sbi))); 783 784 if (!IS_CURSEC(sbi, secno)) 785 set_bit(secno, dirty_i->dirty_secmap); 786 } 787 } 788 } 789 790 static void __remove_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno, 791 enum dirty_type dirty_type) 792 { 793 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 794 block_t valid_blocks; 795 796 if (test_and_clear_bit(segno, dirty_i->dirty_segmap[dirty_type])) 797 dirty_i->nr_dirty[dirty_type]--; 798 799 if (dirty_type == DIRTY) { 800 struct seg_entry *sentry = get_seg_entry(sbi, segno); 801 enum dirty_type t = sentry->type; 802 803 if (test_and_clear_bit(segno, dirty_i->dirty_segmap[t])) 804 dirty_i->nr_dirty[t]--; 805 806 valid_blocks = get_valid_blocks(sbi, segno, true); 807 if (valid_blocks == 0) { 808 clear_bit(GET_SEC_FROM_SEG(sbi, segno), 809 dirty_i->victim_secmap); 810 #ifdef CONFIG_F2FS_CHECK_FS 811 clear_bit(segno, SIT_I(sbi)->invalid_segmap); 812 #endif 813 } 814 if (__is_large_section(sbi)) { 815 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno); 816 817 if (!valid_blocks || 818 valid_blocks == CAP_BLKS_PER_SEC(sbi)) { 819 clear_bit(secno, dirty_i->dirty_secmap); 820 return; 821 } 822 823 if (!IS_CURSEC(sbi, secno)) 824 set_bit(secno, dirty_i->dirty_secmap); 825 } 826 } 827 } 828 829 /* 830 * Should not occur error such as -ENOMEM. 831 * Adding dirty entry into seglist is not critical operation. 832 * If a given segment is one of current working segments, it won't be added. 833 */ 834 static void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno) 835 { 836 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 837 unsigned short valid_blocks, ckpt_valid_blocks; 838 unsigned int usable_blocks; 839 840 if (segno == NULL_SEGNO || IS_CURSEG(sbi, segno)) 841 return; 842 843 usable_blocks = f2fs_usable_blks_in_seg(sbi, segno); 844 mutex_lock(&dirty_i->seglist_lock); 845 846 valid_blocks = get_valid_blocks(sbi, segno, false); 847 ckpt_valid_blocks = get_ckpt_valid_blocks(sbi, segno, false); 848 849 if (valid_blocks == 0 && (!is_sbi_flag_set(sbi, SBI_CP_DISABLED) || 850 ckpt_valid_blocks == usable_blocks)) { 851 __locate_dirty_segment(sbi, segno, PRE); 852 __remove_dirty_segment(sbi, segno, DIRTY); 853 } else if (valid_blocks < usable_blocks) { 854 __locate_dirty_segment(sbi, segno, DIRTY); 855 } else { 856 /* Recovery routine with SSR needs this */ 857 __remove_dirty_segment(sbi, segno, DIRTY); 858 } 859 860 mutex_unlock(&dirty_i->seglist_lock); 861 } 862 863 /* This moves currently empty dirty blocks to prefree. Must hold seglist_lock */ 864 void f2fs_dirty_to_prefree(struct f2fs_sb_info *sbi) 865 { 866 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 867 unsigned int segno; 868 869 mutex_lock(&dirty_i->seglist_lock); 870 for_each_set_bit(segno, dirty_i->dirty_segmap[DIRTY], MAIN_SEGS(sbi)) { 871 if (get_valid_blocks(sbi, segno, false)) 872 continue; 873 if (IS_CURSEG(sbi, segno)) 874 continue; 875 __locate_dirty_segment(sbi, segno, PRE); 876 __remove_dirty_segment(sbi, segno, DIRTY); 877 } 878 mutex_unlock(&dirty_i->seglist_lock); 879 } 880 881 block_t f2fs_get_unusable_blocks(struct f2fs_sb_info *sbi) 882 { 883 int ovp_hole_segs = 884 (overprovision_segments(sbi) - reserved_segments(sbi)); 885 block_t ovp_holes = ovp_hole_segs << sbi->log_blocks_per_seg; 886 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 887 block_t holes[2] = {0, 0}; /* DATA and NODE */ 888 block_t unusable; 889 struct seg_entry *se; 890 unsigned int segno; 891 892 mutex_lock(&dirty_i->seglist_lock); 893 for_each_set_bit(segno, dirty_i->dirty_segmap[DIRTY], MAIN_SEGS(sbi)) { 894 se = get_seg_entry(sbi, segno); 895 if (IS_NODESEG(se->type)) 896 holes[NODE] += f2fs_usable_blks_in_seg(sbi, segno) - 897 se->valid_blocks; 898 else 899 holes[DATA] += f2fs_usable_blks_in_seg(sbi, segno) - 900 se->valid_blocks; 901 } 902 mutex_unlock(&dirty_i->seglist_lock); 903 904 unusable = max(holes[DATA], holes[NODE]); 905 if (unusable > ovp_holes) 906 return unusable - ovp_holes; 907 return 0; 908 } 909 910 int f2fs_disable_cp_again(struct f2fs_sb_info *sbi, block_t unusable) 911 { 912 int ovp_hole_segs = 913 (overprovision_segments(sbi) - reserved_segments(sbi)); 914 if (unusable > F2FS_OPTION(sbi).unusable_cap) 915 return -EAGAIN; 916 if (is_sbi_flag_set(sbi, SBI_CP_DISABLED_QUICK) && 917 dirty_segments(sbi) > ovp_hole_segs) 918 return -EAGAIN; 919 return 0; 920 } 921 922 /* This is only used by SBI_CP_DISABLED */ 923 static unsigned int get_free_segment(struct f2fs_sb_info *sbi) 924 { 925 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 926 unsigned int segno = 0; 927 928 mutex_lock(&dirty_i->seglist_lock); 929 for_each_set_bit(segno, dirty_i->dirty_segmap[DIRTY], MAIN_SEGS(sbi)) { 930 if (get_valid_blocks(sbi, segno, false)) 931 continue; 932 if (get_ckpt_valid_blocks(sbi, segno, false)) 933 continue; 934 mutex_unlock(&dirty_i->seglist_lock); 935 return segno; 936 } 937 mutex_unlock(&dirty_i->seglist_lock); 938 return NULL_SEGNO; 939 } 940 941 static struct discard_cmd *__create_discard_cmd(struct f2fs_sb_info *sbi, 942 struct block_device *bdev, block_t lstart, 943 block_t start, block_t len) 944 { 945 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; 946 struct list_head *pend_list; 947 struct discard_cmd *dc; 948 949 f2fs_bug_on(sbi, !len); 950 951 pend_list = &dcc->pend_list[plist_idx(len)]; 952 953 dc = f2fs_kmem_cache_alloc(discard_cmd_slab, GFP_NOFS, true, NULL); 954 INIT_LIST_HEAD(&dc->list); 955 dc->bdev = bdev; 956 dc->di.lstart = lstart; 957 dc->di.start = start; 958 dc->di.len = len; 959 dc->ref = 0; 960 dc->state = D_PREP; 961 dc->queued = 0; 962 dc->error = 0; 963 init_completion(&dc->wait); 964 list_add_tail(&dc->list, pend_list); 965 spin_lock_init(&dc->lock); 966 dc->bio_ref = 0; 967 atomic_inc(&dcc->discard_cmd_cnt); 968 dcc->undiscard_blks += len; 969 970 return dc; 971 } 972 973 static bool f2fs_check_discard_tree(struct f2fs_sb_info *sbi) 974 { 975 #ifdef CONFIG_F2FS_CHECK_FS 976 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; 977 struct rb_node *cur = rb_first_cached(&dcc->root), *next; 978 struct discard_cmd *cur_dc, *next_dc; 979 980 while (cur) { 981 next = rb_next(cur); 982 if (!next) 983 return true; 984 985 cur_dc = rb_entry(cur, struct discard_cmd, rb_node); 986 next_dc = rb_entry(next, struct discard_cmd, rb_node); 987 988 if (cur_dc->di.lstart + cur_dc->di.len > next_dc->di.lstart) { 989 f2fs_info(sbi, "broken discard_rbtree, " 990 "cur(%u, %u) next(%u, %u)", 991 cur_dc->di.lstart, cur_dc->di.len, 992 next_dc->di.lstart, next_dc->di.len); 993 return false; 994 } 995 cur = next; 996 } 997 #endif 998 return true; 999 } 1000 1001 static struct discard_cmd *__lookup_discard_cmd(struct f2fs_sb_info *sbi, 1002 block_t blkaddr) 1003 { 1004 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; 1005 struct rb_node *node = dcc->root.rb_root.rb_node; 1006 struct discard_cmd *dc; 1007 1008 while (node) { 1009 dc = rb_entry(node, struct discard_cmd, rb_node); 1010 1011 if (blkaddr < dc->di.lstart) 1012 node = node->rb_left; 1013 else if (blkaddr >= dc->di.lstart + dc->di.len) 1014 node = node->rb_right; 1015 else 1016 return dc; 1017 } 1018 return NULL; 1019 } 1020 1021 static struct discard_cmd *__lookup_discard_cmd_ret(struct rb_root_cached *root, 1022 block_t blkaddr, 1023 struct discard_cmd **prev_entry, 1024 struct discard_cmd **next_entry, 1025 struct rb_node ***insert_p, 1026 struct rb_node **insert_parent) 1027 { 1028 struct rb_node **pnode = &root->rb_root.rb_node; 1029 struct rb_node *parent = NULL, *tmp_node; 1030 struct discard_cmd *dc; 1031 1032 *insert_p = NULL; 1033 *insert_parent = NULL; 1034 *prev_entry = NULL; 1035 *next_entry = NULL; 1036 1037 if (RB_EMPTY_ROOT(&root->rb_root)) 1038 return NULL; 1039 1040 while (*pnode) { 1041 parent = *pnode; 1042 dc = rb_entry(*pnode, struct discard_cmd, rb_node); 1043 1044 if (blkaddr < dc->di.lstart) 1045 pnode = &(*pnode)->rb_left; 1046 else if (blkaddr >= dc->di.lstart + dc->di.len) 1047 pnode = &(*pnode)->rb_right; 1048 else 1049 goto lookup_neighbors; 1050 } 1051 1052 *insert_p = pnode; 1053 *insert_parent = parent; 1054 1055 dc = rb_entry(parent, struct discard_cmd, rb_node); 1056 tmp_node = parent; 1057 if (parent && blkaddr > dc->di.lstart) 1058 tmp_node = rb_next(parent); 1059 *next_entry = rb_entry_safe(tmp_node, struct discard_cmd, rb_node); 1060 1061 tmp_node = parent; 1062 if (parent && blkaddr < dc->di.lstart) 1063 tmp_node = rb_prev(parent); 1064 *prev_entry = rb_entry_safe(tmp_node, struct discard_cmd, rb_node); 1065 return NULL; 1066 1067 lookup_neighbors: 1068 /* lookup prev node for merging backward later */ 1069 tmp_node = rb_prev(&dc->rb_node); 1070 *prev_entry = rb_entry_safe(tmp_node, struct discard_cmd, rb_node); 1071 1072 /* lookup next node for merging frontward later */ 1073 tmp_node = rb_next(&dc->rb_node); 1074 *next_entry = rb_entry_safe(tmp_node, struct discard_cmd, rb_node); 1075 return dc; 1076 } 1077 1078 static void __detach_discard_cmd(struct discard_cmd_control *dcc, 1079 struct discard_cmd *dc) 1080 { 1081 if (dc->state == D_DONE) 1082 atomic_sub(dc->queued, &dcc->queued_discard); 1083 1084 list_del(&dc->list); 1085 rb_erase_cached(&dc->rb_node, &dcc->root); 1086 dcc->undiscard_blks -= dc->di.len; 1087 1088 kmem_cache_free(discard_cmd_slab, dc); 1089 1090 atomic_dec(&dcc->discard_cmd_cnt); 1091 } 1092 1093 static void __remove_discard_cmd(struct f2fs_sb_info *sbi, 1094 struct discard_cmd *dc) 1095 { 1096 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; 1097 unsigned long flags; 1098 1099 trace_f2fs_remove_discard(dc->bdev, dc->di.start, dc->di.len); 1100 1101 spin_lock_irqsave(&dc->lock, flags); 1102 if (dc->bio_ref) { 1103 spin_unlock_irqrestore(&dc->lock, flags); 1104 return; 1105 } 1106 spin_unlock_irqrestore(&dc->lock, flags); 1107 1108 f2fs_bug_on(sbi, dc->ref); 1109 1110 if (dc->error == -EOPNOTSUPP) 1111 dc->error = 0; 1112 1113 if (dc->error) 1114 f2fs_info_ratelimited(sbi, 1115 "Issue discard(%u, %u, %u) failed, ret: %d", 1116 dc->di.lstart, dc->di.start, dc->di.len, dc->error); 1117 __detach_discard_cmd(dcc, dc); 1118 } 1119 1120 static void f2fs_submit_discard_endio(struct bio *bio) 1121 { 1122 struct discard_cmd *dc = (struct discard_cmd *)bio->bi_private; 1123 unsigned long flags; 1124 1125 spin_lock_irqsave(&dc->lock, flags); 1126 if (!dc->error) 1127 dc->error = blk_status_to_errno(bio->bi_status); 1128 dc->bio_ref--; 1129 if (!dc->bio_ref && dc->state == D_SUBMIT) { 1130 dc->state = D_DONE; 1131 complete_all(&dc->wait); 1132 } 1133 spin_unlock_irqrestore(&dc->lock, flags); 1134 bio_put(bio); 1135 } 1136 1137 static void __check_sit_bitmap(struct f2fs_sb_info *sbi, 1138 block_t start, block_t end) 1139 { 1140 #ifdef CONFIG_F2FS_CHECK_FS 1141 struct seg_entry *sentry; 1142 unsigned int segno; 1143 block_t blk = start; 1144 unsigned long offset, size, *map; 1145 1146 while (blk < end) { 1147 segno = GET_SEGNO(sbi, blk); 1148 sentry = get_seg_entry(sbi, segno); 1149 offset = GET_BLKOFF_FROM_SEG0(sbi, blk); 1150 1151 if (end < START_BLOCK(sbi, segno + 1)) 1152 size = GET_BLKOFF_FROM_SEG0(sbi, end); 1153 else 1154 size = BLKS_PER_SEG(sbi); 1155 map = (unsigned long *)(sentry->cur_valid_map); 1156 offset = __find_rev_next_bit(map, size, offset); 1157 f2fs_bug_on(sbi, offset != size); 1158 blk = START_BLOCK(sbi, segno + 1); 1159 } 1160 #endif 1161 } 1162 1163 static void __init_discard_policy(struct f2fs_sb_info *sbi, 1164 struct discard_policy *dpolicy, 1165 int discard_type, unsigned int granularity) 1166 { 1167 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; 1168 1169 /* common policy */ 1170 dpolicy->type = discard_type; 1171 dpolicy->sync = true; 1172 dpolicy->ordered = false; 1173 dpolicy->granularity = granularity; 1174 1175 dpolicy->max_requests = dcc->max_discard_request; 1176 dpolicy->io_aware_gran = dcc->discard_io_aware_gran; 1177 dpolicy->timeout = false; 1178 1179 if (discard_type == DPOLICY_BG) { 1180 dpolicy->min_interval = dcc->min_discard_issue_time; 1181 dpolicy->mid_interval = dcc->mid_discard_issue_time; 1182 dpolicy->max_interval = dcc->max_discard_issue_time; 1183 dpolicy->io_aware = true; 1184 dpolicy->sync = false; 1185 dpolicy->ordered = true; 1186 if (utilization(sbi) > dcc->discard_urgent_util) { 1187 dpolicy->granularity = MIN_DISCARD_GRANULARITY; 1188 if (atomic_read(&dcc->discard_cmd_cnt)) 1189 dpolicy->max_interval = 1190 dcc->min_discard_issue_time; 1191 } 1192 } else if (discard_type == DPOLICY_FORCE) { 1193 dpolicy->min_interval = dcc->min_discard_issue_time; 1194 dpolicy->mid_interval = dcc->mid_discard_issue_time; 1195 dpolicy->max_interval = dcc->max_discard_issue_time; 1196 dpolicy->io_aware = false; 1197 } else if (discard_type == DPOLICY_FSTRIM) { 1198 dpolicy->io_aware = false; 1199 } else if (discard_type == DPOLICY_UMOUNT) { 1200 dpolicy->io_aware = false; 1201 /* we need to issue all to keep CP_TRIMMED_FLAG */ 1202 dpolicy->granularity = MIN_DISCARD_GRANULARITY; 1203 dpolicy->timeout = true; 1204 } 1205 } 1206 1207 static void __update_discard_tree_range(struct f2fs_sb_info *sbi, 1208 struct block_device *bdev, block_t lstart, 1209 block_t start, block_t len); 1210 1211 #ifdef CONFIG_BLK_DEV_ZONED 1212 static void __submit_zone_reset_cmd(struct f2fs_sb_info *sbi, 1213 struct discard_cmd *dc, blk_opf_t flag, 1214 struct list_head *wait_list, 1215 unsigned int *issued) 1216 { 1217 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; 1218 struct block_device *bdev = dc->bdev; 1219 struct bio *bio = bio_alloc(bdev, 0, REQ_OP_ZONE_RESET | flag, GFP_NOFS); 1220 unsigned long flags; 1221 1222 trace_f2fs_issue_reset_zone(bdev, dc->di.start); 1223 1224 spin_lock_irqsave(&dc->lock, flags); 1225 dc->state = D_SUBMIT; 1226 dc->bio_ref++; 1227 spin_unlock_irqrestore(&dc->lock, flags); 1228 1229 if (issued) 1230 (*issued)++; 1231 1232 atomic_inc(&dcc->queued_discard); 1233 dc->queued++; 1234 list_move_tail(&dc->list, wait_list); 1235 1236 /* sanity check on discard range */ 1237 __check_sit_bitmap(sbi, dc->di.lstart, dc->di.lstart + dc->di.len); 1238 1239 bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(dc->di.start); 1240 bio->bi_private = dc; 1241 bio->bi_end_io = f2fs_submit_discard_endio; 1242 submit_bio(bio); 1243 1244 atomic_inc(&dcc->issued_discard); 1245 f2fs_update_iostat(sbi, NULL, FS_ZONE_RESET_IO, dc->di.len * F2FS_BLKSIZE); 1246 } 1247 #endif 1248 1249 /* this function is copied from blkdev_issue_discard from block/blk-lib.c */ 1250 static int __submit_discard_cmd(struct f2fs_sb_info *sbi, 1251 struct discard_policy *dpolicy, 1252 struct discard_cmd *dc, int *issued) 1253 { 1254 struct block_device *bdev = dc->bdev; 1255 unsigned int max_discard_blocks = 1256 SECTOR_TO_BLOCK(bdev_max_discard_sectors(bdev)); 1257 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; 1258 struct list_head *wait_list = (dpolicy->type == DPOLICY_FSTRIM) ? 1259 &(dcc->fstrim_list) : &(dcc->wait_list); 1260 blk_opf_t flag = dpolicy->sync ? REQ_SYNC : 0; 1261 block_t lstart, start, len, total_len; 1262 int err = 0; 1263 1264 if (dc->state != D_PREP) 1265 return 0; 1266 1267 if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) 1268 return 0; 1269 1270 #ifdef CONFIG_BLK_DEV_ZONED 1271 if (f2fs_sb_has_blkzoned(sbi) && bdev_is_zoned(bdev)) { 1272 int devi = f2fs_bdev_index(sbi, bdev); 1273 1274 if (devi < 0) 1275 return -EINVAL; 1276 1277 if (f2fs_blkz_is_seq(sbi, devi, dc->di.start)) { 1278 __submit_zone_reset_cmd(sbi, dc, flag, 1279 wait_list, issued); 1280 return 0; 1281 } 1282 } 1283 #endif 1284 1285 trace_f2fs_issue_discard(bdev, dc->di.start, dc->di.len); 1286 1287 lstart = dc->di.lstart; 1288 start = dc->di.start; 1289 len = dc->di.len; 1290 total_len = len; 1291 1292 dc->di.len = 0; 1293 1294 while (total_len && *issued < dpolicy->max_requests && !err) { 1295 struct bio *bio = NULL; 1296 unsigned long flags; 1297 bool last = true; 1298 1299 if (len > max_discard_blocks) { 1300 len = max_discard_blocks; 1301 last = false; 1302 } 1303 1304 (*issued)++; 1305 if (*issued == dpolicy->max_requests) 1306 last = true; 1307 1308 dc->di.len += len; 1309 1310 if (time_to_inject(sbi, FAULT_DISCARD)) { 1311 err = -EIO; 1312 } else { 1313 err = __blkdev_issue_discard(bdev, 1314 SECTOR_FROM_BLOCK(start), 1315 SECTOR_FROM_BLOCK(len), 1316 GFP_NOFS, &bio); 1317 } 1318 if (err) { 1319 spin_lock_irqsave(&dc->lock, flags); 1320 if (dc->state == D_PARTIAL) 1321 dc->state = D_SUBMIT; 1322 spin_unlock_irqrestore(&dc->lock, flags); 1323 1324 break; 1325 } 1326 1327 f2fs_bug_on(sbi, !bio); 1328 1329 /* 1330 * should keep before submission to avoid D_DONE 1331 * right away 1332 */ 1333 spin_lock_irqsave(&dc->lock, flags); 1334 if (last) 1335 dc->state = D_SUBMIT; 1336 else 1337 dc->state = D_PARTIAL; 1338 dc->bio_ref++; 1339 spin_unlock_irqrestore(&dc->lock, flags); 1340 1341 atomic_inc(&dcc->queued_discard); 1342 dc->queued++; 1343 list_move_tail(&dc->list, wait_list); 1344 1345 /* sanity check on discard range */ 1346 __check_sit_bitmap(sbi, lstart, lstart + len); 1347 1348 bio->bi_private = dc; 1349 bio->bi_end_io = f2fs_submit_discard_endio; 1350 bio->bi_opf |= flag; 1351 submit_bio(bio); 1352 1353 atomic_inc(&dcc->issued_discard); 1354 1355 f2fs_update_iostat(sbi, NULL, FS_DISCARD_IO, len * F2FS_BLKSIZE); 1356 1357 lstart += len; 1358 start += len; 1359 total_len -= len; 1360 len = total_len; 1361 } 1362 1363 if (!err && len) { 1364 dcc->undiscard_blks -= len; 1365 __update_discard_tree_range(sbi, bdev, lstart, start, len); 1366 } 1367 return err; 1368 } 1369 1370 static void __insert_discard_cmd(struct f2fs_sb_info *sbi, 1371 struct block_device *bdev, block_t lstart, 1372 block_t start, block_t len) 1373 { 1374 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; 1375 struct rb_node **p = &dcc->root.rb_root.rb_node; 1376 struct rb_node *parent = NULL; 1377 struct discard_cmd *dc; 1378 bool leftmost = true; 1379 1380 /* look up rb tree to find parent node */ 1381 while (*p) { 1382 parent = *p; 1383 dc = rb_entry(parent, struct discard_cmd, rb_node); 1384 1385 if (lstart < dc->di.lstart) { 1386 p = &(*p)->rb_left; 1387 } else if (lstart >= dc->di.lstart + dc->di.len) { 1388 p = &(*p)->rb_right; 1389 leftmost = false; 1390 } else { 1391 f2fs_bug_on(sbi, 1); 1392 } 1393 } 1394 1395 dc = __create_discard_cmd(sbi, bdev, lstart, start, len); 1396 1397 rb_link_node(&dc->rb_node, parent, p); 1398 rb_insert_color_cached(&dc->rb_node, &dcc->root, leftmost); 1399 } 1400 1401 static void __relocate_discard_cmd(struct discard_cmd_control *dcc, 1402 struct discard_cmd *dc) 1403 { 1404 list_move_tail(&dc->list, &dcc->pend_list[plist_idx(dc->di.len)]); 1405 } 1406 1407 static void __punch_discard_cmd(struct f2fs_sb_info *sbi, 1408 struct discard_cmd *dc, block_t blkaddr) 1409 { 1410 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; 1411 struct discard_info di = dc->di; 1412 bool modified = false; 1413 1414 if (dc->state == D_DONE || dc->di.len == 1) { 1415 __remove_discard_cmd(sbi, dc); 1416 return; 1417 } 1418 1419 dcc->undiscard_blks -= di.len; 1420 1421 if (blkaddr > di.lstart) { 1422 dc->di.len = blkaddr - dc->di.lstart; 1423 dcc->undiscard_blks += dc->di.len; 1424 __relocate_discard_cmd(dcc, dc); 1425 modified = true; 1426 } 1427 1428 if (blkaddr < di.lstart + di.len - 1) { 1429 if (modified) { 1430 __insert_discard_cmd(sbi, dc->bdev, blkaddr + 1, 1431 di.start + blkaddr + 1 - di.lstart, 1432 di.lstart + di.len - 1 - blkaddr); 1433 } else { 1434 dc->di.lstart++; 1435 dc->di.len--; 1436 dc->di.start++; 1437 dcc->undiscard_blks += dc->di.len; 1438 __relocate_discard_cmd(dcc, dc); 1439 } 1440 } 1441 } 1442 1443 static void __update_discard_tree_range(struct f2fs_sb_info *sbi, 1444 struct block_device *bdev, block_t lstart, 1445 block_t start, block_t len) 1446 { 1447 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; 1448 struct discard_cmd *prev_dc = NULL, *next_dc = NULL; 1449 struct discard_cmd *dc; 1450 struct discard_info di = {0}; 1451 struct rb_node **insert_p = NULL, *insert_parent = NULL; 1452 unsigned int max_discard_blocks = 1453 SECTOR_TO_BLOCK(bdev_max_discard_sectors(bdev)); 1454 block_t end = lstart + len; 1455 1456 dc = __lookup_discard_cmd_ret(&dcc->root, lstart, 1457 &prev_dc, &next_dc, &insert_p, &insert_parent); 1458 if (dc) 1459 prev_dc = dc; 1460 1461 if (!prev_dc) { 1462 di.lstart = lstart; 1463 di.len = next_dc ? next_dc->di.lstart - lstart : len; 1464 di.len = min(di.len, len); 1465 di.start = start; 1466 } 1467 1468 while (1) { 1469 struct rb_node *node; 1470 bool merged = false; 1471 struct discard_cmd *tdc = NULL; 1472 1473 if (prev_dc) { 1474 di.lstart = prev_dc->di.lstart + prev_dc->di.len; 1475 if (di.lstart < lstart) 1476 di.lstart = lstart; 1477 if (di.lstart >= end) 1478 break; 1479 1480 if (!next_dc || next_dc->di.lstart > end) 1481 di.len = end - di.lstart; 1482 else 1483 di.len = next_dc->di.lstart - di.lstart; 1484 di.start = start + di.lstart - lstart; 1485 } 1486 1487 if (!di.len) 1488 goto next; 1489 1490 if (prev_dc && prev_dc->state == D_PREP && 1491 prev_dc->bdev == bdev && 1492 __is_discard_back_mergeable(&di, &prev_dc->di, 1493 max_discard_blocks)) { 1494 prev_dc->di.len += di.len; 1495 dcc->undiscard_blks += di.len; 1496 __relocate_discard_cmd(dcc, prev_dc); 1497 di = prev_dc->di; 1498 tdc = prev_dc; 1499 merged = true; 1500 } 1501 1502 if (next_dc && next_dc->state == D_PREP && 1503 next_dc->bdev == bdev && 1504 __is_discard_front_mergeable(&di, &next_dc->di, 1505 max_discard_blocks)) { 1506 next_dc->di.lstart = di.lstart; 1507 next_dc->di.len += di.len; 1508 next_dc->di.start = di.start; 1509 dcc->undiscard_blks += di.len; 1510 __relocate_discard_cmd(dcc, next_dc); 1511 if (tdc) 1512 __remove_discard_cmd(sbi, tdc); 1513 merged = true; 1514 } 1515 1516 if (!merged) 1517 __insert_discard_cmd(sbi, bdev, 1518 di.lstart, di.start, di.len); 1519 next: 1520 prev_dc = next_dc; 1521 if (!prev_dc) 1522 break; 1523 1524 node = rb_next(&prev_dc->rb_node); 1525 next_dc = rb_entry_safe(node, struct discard_cmd, rb_node); 1526 } 1527 } 1528 1529 #ifdef CONFIG_BLK_DEV_ZONED 1530 static void __queue_zone_reset_cmd(struct f2fs_sb_info *sbi, 1531 struct block_device *bdev, block_t blkstart, block_t lblkstart, 1532 block_t blklen) 1533 { 1534 trace_f2fs_queue_reset_zone(bdev, blkstart); 1535 1536 mutex_lock(&SM_I(sbi)->dcc_info->cmd_lock); 1537 __insert_discard_cmd(sbi, bdev, lblkstart, blkstart, blklen); 1538 mutex_unlock(&SM_I(sbi)->dcc_info->cmd_lock); 1539 } 1540 #endif 1541 1542 static void __queue_discard_cmd(struct f2fs_sb_info *sbi, 1543 struct block_device *bdev, block_t blkstart, block_t blklen) 1544 { 1545 block_t lblkstart = blkstart; 1546 1547 if (!f2fs_bdev_support_discard(bdev)) 1548 return; 1549 1550 trace_f2fs_queue_discard(bdev, blkstart, blklen); 1551 1552 if (f2fs_is_multi_device(sbi)) { 1553 int devi = f2fs_target_device_index(sbi, blkstart); 1554 1555 blkstart -= FDEV(devi).start_blk; 1556 } 1557 mutex_lock(&SM_I(sbi)->dcc_info->cmd_lock); 1558 __update_discard_tree_range(sbi, bdev, lblkstart, blkstart, blklen); 1559 mutex_unlock(&SM_I(sbi)->dcc_info->cmd_lock); 1560 } 1561 1562 static void __issue_discard_cmd_orderly(struct f2fs_sb_info *sbi, 1563 struct discard_policy *dpolicy, int *issued) 1564 { 1565 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; 1566 struct discard_cmd *prev_dc = NULL, *next_dc = NULL; 1567 struct rb_node **insert_p = NULL, *insert_parent = NULL; 1568 struct discard_cmd *dc; 1569 struct blk_plug plug; 1570 bool io_interrupted = false; 1571 1572 mutex_lock(&dcc->cmd_lock); 1573 dc = __lookup_discard_cmd_ret(&dcc->root, dcc->next_pos, 1574 &prev_dc, &next_dc, &insert_p, &insert_parent); 1575 if (!dc) 1576 dc = next_dc; 1577 1578 blk_start_plug(&plug); 1579 1580 while (dc) { 1581 struct rb_node *node; 1582 int err = 0; 1583 1584 if (dc->state != D_PREP) 1585 goto next; 1586 1587 if (dpolicy->io_aware && !is_idle(sbi, DISCARD_TIME)) { 1588 io_interrupted = true; 1589 break; 1590 } 1591 1592 dcc->next_pos = dc->di.lstart + dc->di.len; 1593 err = __submit_discard_cmd(sbi, dpolicy, dc, issued); 1594 1595 if (*issued >= dpolicy->max_requests) 1596 break; 1597 next: 1598 node = rb_next(&dc->rb_node); 1599 if (err) 1600 __remove_discard_cmd(sbi, dc); 1601 dc = rb_entry_safe(node, struct discard_cmd, rb_node); 1602 } 1603 1604 blk_finish_plug(&plug); 1605 1606 if (!dc) 1607 dcc->next_pos = 0; 1608 1609 mutex_unlock(&dcc->cmd_lock); 1610 1611 if (!(*issued) && io_interrupted) 1612 *issued = -1; 1613 } 1614 static unsigned int __wait_all_discard_cmd(struct f2fs_sb_info *sbi, 1615 struct discard_policy *dpolicy); 1616 1617 static int __issue_discard_cmd(struct f2fs_sb_info *sbi, 1618 struct discard_policy *dpolicy) 1619 { 1620 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; 1621 struct list_head *pend_list; 1622 struct discard_cmd *dc, *tmp; 1623 struct blk_plug plug; 1624 int i, issued; 1625 bool io_interrupted = false; 1626 1627 if (dpolicy->timeout) 1628 f2fs_update_time(sbi, UMOUNT_DISCARD_TIMEOUT); 1629 1630 retry: 1631 issued = 0; 1632 for (i = MAX_PLIST_NUM - 1; i >= 0; i--) { 1633 if (dpolicy->timeout && 1634 f2fs_time_over(sbi, UMOUNT_DISCARD_TIMEOUT)) 1635 break; 1636 1637 if (i + 1 < dpolicy->granularity) 1638 break; 1639 1640 if (i + 1 < dcc->max_ordered_discard && dpolicy->ordered) { 1641 __issue_discard_cmd_orderly(sbi, dpolicy, &issued); 1642 return issued; 1643 } 1644 1645 pend_list = &dcc->pend_list[i]; 1646 1647 mutex_lock(&dcc->cmd_lock); 1648 if (list_empty(pend_list)) 1649 goto next; 1650 if (unlikely(dcc->rbtree_check)) 1651 f2fs_bug_on(sbi, !f2fs_check_discard_tree(sbi)); 1652 blk_start_plug(&plug); 1653 list_for_each_entry_safe(dc, tmp, pend_list, list) { 1654 f2fs_bug_on(sbi, dc->state != D_PREP); 1655 1656 if (dpolicy->timeout && 1657 f2fs_time_over(sbi, UMOUNT_DISCARD_TIMEOUT)) 1658 break; 1659 1660 if (dpolicy->io_aware && i < dpolicy->io_aware_gran && 1661 !is_idle(sbi, DISCARD_TIME)) { 1662 io_interrupted = true; 1663 break; 1664 } 1665 1666 __submit_discard_cmd(sbi, dpolicy, dc, &issued); 1667 1668 if (issued >= dpolicy->max_requests) 1669 break; 1670 } 1671 blk_finish_plug(&plug); 1672 next: 1673 mutex_unlock(&dcc->cmd_lock); 1674 1675 if (issued >= dpolicy->max_requests || io_interrupted) 1676 break; 1677 } 1678 1679 if (dpolicy->type == DPOLICY_UMOUNT && issued) { 1680 __wait_all_discard_cmd(sbi, dpolicy); 1681 goto retry; 1682 } 1683 1684 if (!issued && io_interrupted) 1685 issued = -1; 1686 1687 return issued; 1688 } 1689 1690 static bool __drop_discard_cmd(struct f2fs_sb_info *sbi) 1691 { 1692 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; 1693 struct list_head *pend_list; 1694 struct discard_cmd *dc, *tmp; 1695 int i; 1696 bool dropped = false; 1697 1698 mutex_lock(&dcc->cmd_lock); 1699 for (i = MAX_PLIST_NUM - 1; i >= 0; i--) { 1700 pend_list = &dcc->pend_list[i]; 1701 list_for_each_entry_safe(dc, tmp, pend_list, list) { 1702 f2fs_bug_on(sbi, dc->state != D_PREP); 1703 __remove_discard_cmd(sbi, dc); 1704 dropped = true; 1705 } 1706 } 1707 mutex_unlock(&dcc->cmd_lock); 1708 1709 return dropped; 1710 } 1711 1712 void f2fs_drop_discard_cmd(struct f2fs_sb_info *sbi) 1713 { 1714 __drop_discard_cmd(sbi); 1715 } 1716 1717 static unsigned int __wait_one_discard_bio(struct f2fs_sb_info *sbi, 1718 struct discard_cmd *dc) 1719 { 1720 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; 1721 unsigned int len = 0; 1722 1723 wait_for_completion_io(&dc->wait); 1724 mutex_lock(&dcc->cmd_lock); 1725 f2fs_bug_on(sbi, dc->state != D_DONE); 1726 dc->ref--; 1727 if (!dc->ref) { 1728 if (!dc->error) 1729 len = dc->di.len; 1730 __remove_discard_cmd(sbi, dc); 1731 } 1732 mutex_unlock(&dcc->cmd_lock); 1733 1734 return len; 1735 } 1736 1737 static unsigned int __wait_discard_cmd_range(struct f2fs_sb_info *sbi, 1738 struct discard_policy *dpolicy, 1739 block_t start, block_t end) 1740 { 1741 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; 1742 struct list_head *wait_list = (dpolicy->type == DPOLICY_FSTRIM) ? 1743 &(dcc->fstrim_list) : &(dcc->wait_list); 1744 struct discard_cmd *dc = NULL, *iter, *tmp; 1745 unsigned int trimmed = 0; 1746 1747 next: 1748 dc = NULL; 1749 1750 mutex_lock(&dcc->cmd_lock); 1751 list_for_each_entry_safe(iter, tmp, wait_list, list) { 1752 if (iter->di.lstart + iter->di.len <= start || 1753 end <= iter->di.lstart) 1754 continue; 1755 if (iter->di.len < dpolicy->granularity) 1756 continue; 1757 if (iter->state == D_DONE && !iter->ref) { 1758 wait_for_completion_io(&iter->wait); 1759 if (!iter->error) 1760 trimmed += iter->di.len; 1761 __remove_discard_cmd(sbi, iter); 1762 } else { 1763 iter->ref++; 1764 dc = iter; 1765 break; 1766 } 1767 } 1768 mutex_unlock(&dcc->cmd_lock); 1769 1770 if (dc) { 1771 trimmed += __wait_one_discard_bio(sbi, dc); 1772 goto next; 1773 } 1774 1775 return trimmed; 1776 } 1777 1778 static unsigned int __wait_all_discard_cmd(struct f2fs_sb_info *sbi, 1779 struct discard_policy *dpolicy) 1780 { 1781 struct discard_policy dp; 1782 unsigned int discard_blks; 1783 1784 if (dpolicy) 1785 return __wait_discard_cmd_range(sbi, dpolicy, 0, UINT_MAX); 1786 1787 /* wait all */ 1788 __init_discard_policy(sbi, &dp, DPOLICY_FSTRIM, MIN_DISCARD_GRANULARITY); 1789 discard_blks = __wait_discard_cmd_range(sbi, &dp, 0, UINT_MAX); 1790 __init_discard_policy(sbi, &dp, DPOLICY_UMOUNT, MIN_DISCARD_GRANULARITY); 1791 discard_blks += __wait_discard_cmd_range(sbi, &dp, 0, UINT_MAX); 1792 1793 return discard_blks; 1794 } 1795 1796 /* This should be covered by global mutex, &sit_i->sentry_lock */ 1797 static void f2fs_wait_discard_bio(struct f2fs_sb_info *sbi, block_t blkaddr) 1798 { 1799 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; 1800 struct discard_cmd *dc; 1801 bool need_wait = false; 1802 1803 mutex_lock(&dcc->cmd_lock); 1804 dc = __lookup_discard_cmd(sbi, blkaddr); 1805 #ifdef CONFIG_BLK_DEV_ZONED 1806 if (dc && f2fs_sb_has_blkzoned(sbi) && bdev_is_zoned(dc->bdev)) { 1807 int devi = f2fs_bdev_index(sbi, dc->bdev); 1808 1809 if (devi < 0) { 1810 mutex_unlock(&dcc->cmd_lock); 1811 return; 1812 } 1813 1814 if (f2fs_blkz_is_seq(sbi, devi, dc->di.start)) { 1815 /* force submit zone reset */ 1816 if (dc->state == D_PREP) 1817 __submit_zone_reset_cmd(sbi, dc, REQ_SYNC, 1818 &dcc->wait_list, NULL); 1819 dc->ref++; 1820 mutex_unlock(&dcc->cmd_lock); 1821 /* wait zone reset */ 1822 __wait_one_discard_bio(sbi, dc); 1823 return; 1824 } 1825 } 1826 #endif 1827 if (dc) { 1828 if (dc->state == D_PREP) { 1829 __punch_discard_cmd(sbi, dc, blkaddr); 1830 } else { 1831 dc->ref++; 1832 need_wait = true; 1833 } 1834 } 1835 mutex_unlock(&dcc->cmd_lock); 1836 1837 if (need_wait) 1838 __wait_one_discard_bio(sbi, dc); 1839 } 1840 1841 void f2fs_stop_discard_thread(struct f2fs_sb_info *sbi) 1842 { 1843 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; 1844 1845 if (dcc && dcc->f2fs_issue_discard) { 1846 struct task_struct *discard_thread = dcc->f2fs_issue_discard; 1847 1848 dcc->f2fs_issue_discard = NULL; 1849 kthread_stop(discard_thread); 1850 } 1851 } 1852 1853 /** 1854 * f2fs_issue_discard_timeout() - Issue all discard cmd within UMOUNT_DISCARD_TIMEOUT 1855 * @sbi: the f2fs_sb_info data for discard cmd to issue 1856 * 1857 * When UMOUNT_DISCARD_TIMEOUT is exceeded, all remaining discard commands will be dropped 1858 * 1859 * Return true if issued all discard cmd or no discard cmd need issue, otherwise return false. 1860 */ 1861 bool f2fs_issue_discard_timeout(struct f2fs_sb_info *sbi) 1862 { 1863 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; 1864 struct discard_policy dpolicy; 1865 bool dropped; 1866 1867 if (!atomic_read(&dcc->discard_cmd_cnt)) 1868 return true; 1869 1870 __init_discard_policy(sbi, &dpolicy, DPOLICY_UMOUNT, 1871 dcc->discard_granularity); 1872 __issue_discard_cmd(sbi, &dpolicy); 1873 dropped = __drop_discard_cmd(sbi); 1874 1875 /* just to make sure there is no pending discard commands */ 1876 __wait_all_discard_cmd(sbi, NULL); 1877 1878 f2fs_bug_on(sbi, atomic_read(&dcc->discard_cmd_cnt)); 1879 return !dropped; 1880 } 1881 1882 static int issue_discard_thread(void *data) 1883 { 1884 struct f2fs_sb_info *sbi = data; 1885 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; 1886 wait_queue_head_t *q = &dcc->discard_wait_queue; 1887 struct discard_policy dpolicy; 1888 unsigned int wait_ms = dcc->min_discard_issue_time; 1889 int issued; 1890 1891 set_freezable(); 1892 1893 do { 1894 wait_event_interruptible_timeout(*q, 1895 kthread_should_stop() || freezing(current) || 1896 dcc->discard_wake, 1897 msecs_to_jiffies(wait_ms)); 1898 1899 if (sbi->gc_mode == GC_URGENT_HIGH || 1900 !f2fs_available_free_memory(sbi, DISCARD_CACHE)) 1901 __init_discard_policy(sbi, &dpolicy, DPOLICY_FORCE, 1902 MIN_DISCARD_GRANULARITY); 1903 else 1904 __init_discard_policy(sbi, &dpolicy, DPOLICY_BG, 1905 dcc->discard_granularity); 1906 1907 if (dcc->discard_wake) 1908 dcc->discard_wake = false; 1909 1910 /* clean up pending candidates before going to sleep */ 1911 if (atomic_read(&dcc->queued_discard)) 1912 __wait_all_discard_cmd(sbi, NULL); 1913 1914 if (try_to_freeze()) 1915 continue; 1916 if (f2fs_readonly(sbi->sb)) 1917 continue; 1918 if (kthread_should_stop()) 1919 return 0; 1920 if (is_sbi_flag_set(sbi, SBI_NEED_FSCK) || 1921 !atomic_read(&dcc->discard_cmd_cnt)) { 1922 wait_ms = dpolicy.max_interval; 1923 continue; 1924 } 1925 1926 sb_start_intwrite(sbi->sb); 1927 1928 issued = __issue_discard_cmd(sbi, &dpolicy); 1929 if (issued > 0) { 1930 __wait_all_discard_cmd(sbi, &dpolicy); 1931 wait_ms = dpolicy.min_interval; 1932 } else if (issued == -1) { 1933 wait_ms = f2fs_time_to_wait(sbi, DISCARD_TIME); 1934 if (!wait_ms) 1935 wait_ms = dpolicy.mid_interval; 1936 } else { 1937 wait_ms = dpolicy.max_interval; 1938 } 1939 if (!atomic_read(&dcc->discard_cmd_cnt)) 1940 wait_ms = dpolicy.max_interval; 1941 1942 sb_end_intwrite(sbi->sb); 1943 1944 } while (!kthread_should_stop()); 1945 return 0; 1946 } 1947 1948 #ifdef CONFIG_BLK_DEV_ZONED 1949 static int __f2fs_issue_discard_zone(struct f2fs_sb_info *sbi, 1950 struct block_device *bdev, block_t blkstart, block_t blklen) 1951 { 1952 sector_t sector, nr_sects; 1953 block_t lblkstart = blkstart; 1954 int devi = 0; 1955 u64 remainder = 0; 1956 1957 if (f2fs_is_multi_device(sbi)) { 1958 devi = f2fs_target_device_index(sbi, blkstart); 1959 if (blkstart < FDEV(devi).start_blk || 1960 blkstart > FDEV(devi).end_blk) { 1961 f2fs_err(sbi, "Invalid block %x", blkstart); 1962 return -EIO; 1963 } 1964 blkstart -= FDEV(devi).start_blk; 1965 } 1966 1967 /* For sequential zones, reset the zone write pointer */ 1968 if (f2fs_blkz_is_seq(sbi, devi, blkstart)) { 1969 sector = SECTOR_FROM_BLOCK(blkstart); 1970 nr_sects = SECTOR_FROM_BLOCK(blklen); 1971 div64_u64_rem(sector, bdev_zone_sectors(bdev), &remainder); 1972 1973 if (remainder || nr_sects != bdev_zone_sectors(bdev)) { 1974 f2fs_err(sbi, "(%d) %s: Unaligned zone reset attempted (block %x + %x)", 1975 devi, sbi->s_ndevs ? FDEV(devi).path : "", 1976 blkstart, blklen); 1977 return -EIO; 1978 } 1979 1980 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) { 1981 trace_f2fs_issue_reset_zone(bdev, blkstart); 1982 return blkdev_zone_mgmt(bdev, REQ_OP_ZONE_RESET, 1983 sector, nr_sects, GFP_NOFS); 1984 } 1985 1986 __queue_zone_reset_cmd(sbi, bdev, blkstart, lblkstart, blklen); 1987 return 0; 1988 } 1989 1990 /* For conventional zones, use regular discard if supported */ 1991 __queue_discard_cmd(sbi, bdev, lblkstart, blklen); 1992 return 0; 1993 } 1994 #endif 1995 1996 static int __issue_discard_async(struct f2fs_sb_info *sbi, 1997 struct block_device *bdev, block_t blkstart, block_t blklen) 1998 { 1999 #ifdef CONFIG_BLK_DEV_ZONED 2000 if (f2fs_sb_has_blkzoned(sbi) && bdev_is_zoned(bdev)) 2001 return __f2fs_issue_discard_zone(sbi, bdev, blkstart, blklen); 2002 #endif 2003 __queue_discard_cmd(sbi, bdev, blkstart, blklen); 2004 return 0; 2005 } 2006 2007 static int f2fs_issue_discard(struct f2fs_sb_info *sbi, 2008 block_t blkstart, block_t blklen) 2009 { 2010 sector_t start = blkstart, len = 0; 2011 struct block_device *bdev; 2012 struct seg_entry *se; 2013 unsigned int offset; 2014 block_t i; 2015 int err = 0; 2016 2017 bdev = f2fs_target_device(sbi, blkstart, NULL); 2018 2019 for (i = blkstart; i < blkstart + blklen; i++, len++) { 2020 if (i != start) { 2021 struct block_device *bdev2 = 2022 f2fs_target_device(sbi, i, NULL); 2023 2024 if (bdev2 != bdev) { 2025 err = __issue_discard_async(sbi, bdev, 2026 start, len); 2027 if (err) 2028 return err; 2029 bdev = bdev2; 2030 start = i; 2031 len = 0; 2032 } 2033 } 2034 2035 se = get_seg_entry(sbi, GET_SEGNO(sbi, i)); 2036 offset = GET_BLKOFF_FROM_SEG0(sbi, i); 2037 2038 if (f2fs_block_unit_discard(sbi) && 2039 !f2fs_test_and_set_bit(offset, se->discard_map)) 2040 sbi->discard_blks--; 2041 } 2042 2043 if (len) 2044 err = __issue_discard_async(sbi, bdev, start, len); 2045 return err; 2046 } 2047 2048 static bool add_discard_addrs(struct f2fs_sb_info *sbi, struct cp_control *cpc, 2049 bool check_only) 2050 { 2051 int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long); 2052 struct seg_entry *se = get_seg_entry(sbi, cpc->trim_start); 2053 unsigned long *cur_map = (unsigned long *)se->cur_valid_map; 2054 unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map; 2055 unsigned long *discard_map = (unsigned long *)se->discard_map; 2056 unsigned long *dmap = SIT_I(sbi)->tmp_map; 2057 unsigned int start = 0, end = -1; 2058 bool force = (cpc->reason & CP_DISCARD); 2059 struct discard_entry *de = NULL; 2060 struct list_head *head = &SM_I(sbi)->dcc_info->entry_list; 2061 int i; 2062 2063 if (se->valid_blocks == BLKS_PER_SEG(sbi) || 2064 !f2fs_hw_support_discard(sbi) || 2065 !f2fs_block_unit_discard(sbi)) 2066 return false; 2067 2068 if (!force) { 2069 if (!f2fs_realtime_discard_enable(sbi) || !se->valid_blocks || 2070 SM_I(sbi)->dcc_info->nr_discards >= 2071 SM_I(sbi)->dcc_info->max_discards) 2072 return false; 2073 } 2074 2075 /* SIT_VBLOCK_MAP_SIZE should be multiple of sizeof(unsigned long) */ 2076 for (i = 0; i < entries; i++) 2077 dmap[i] = force ? ~ckpt_map[i] & ~discard_map[i] : 2078 (cur_map[i] ^ ckpt_map[i]) & ckpt_map[i]; 2079 2080 while (force || SM_I(sbi)->dcc_info->nr_discards <= 2081 SM_I(sbi)->dcc_info->max_discards) { 2082 start = __find_rev_next_bit(dmap, BLKS_PER_SEG(sbi), end + 1); 2083 if (start >= BLKS_PER_SEG(sbi)) 2084 break; 2085 2086 end = __find_rev_next_zero_bit(dmap, 2087 BLKS_PER_SEG(sbi), start + 1); 2088 if (force && start && end != BLKS_PER_SEG(sbi) && 2089 (end - start) < cpc->trim_minlen) 2090 continue; 2091 2092 if (check_only) 2093 return true; 2094 2095 if (!de) { 2096 de = f2fs_kmem_cache_alloc(discard_entry_slab, 2097 GFP_F2FS_ZERO, true, NULL); 2098 de->start_blkaddr = START_BLOCK(sbi, cpc->trim_start); 2099 list_add_tail(&de->list, head); 2100 } 2101 2102 for (i = start; i < end; i++) 2103 __set_bit_le(i, (void *)de->discard_map); 2104 2105 SM_I(sbi)->dcc_info->nr_discards += end - start; 2106 } 2107 return false; 2108 } 2109 2110 static void release_discard_addr(struct discard_entry *entry) 2111 { 2112 list_del(&entry->list); 2113 kmem_cache_free(discard_entry_slab, entry); 2114 } 2115 2116 void f2fs_release_discard_addrs(struct f2fs_sb_info *sbi) 2117 { 2118 struct list_head *head = &(SM_I(sbi)->dcc_info->entry_list); 2119 struct discard_entry *entry, *this; 2120 2121 /* drop caches */ 2122 list_for_each_entry_safe(entry, this, head, list) 2123 release_discard_addr(entry); 2124 } 2125 2126 /* 2127 * Should call f2fs_clear_prefree_segments after checkpoint is done. 2128 */ 2129 static void set_prefree_as_free_segments(struct f2fs_sb_info *sbi) 2130 { 2131 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 2132 unsigned int segno; 2133 2134 mutex_lock(&dirty_i->seglist_lock); 2135 for_each_set_bit(segno, dirty_i->dirty_segmap[PRE], MAIN_SEGS(sbi)) 2136 __set_test_and_free(sbi, segno, false); 2137 mutex_unlock(&dirty_i->seglist_lock); 2138 } 2139 2140 void f2fs_clear_prefree_segments(struct f2fs_sb_info *sbi, 2141 struct cp_control *cpc) 2142 { 2143 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; 2144 struct list_head *head = &dcc->entry_list; 2145 struct discard_entry *entry, *this; 2146 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 2147 unsigned long *prefree_map = dirty_i->dirty_segmap[PRE]; 2148 unsigned int start = 0, end = -1; 2149 unsigned int secno, start_segno; 2150 bool force = (cpc->reason & CP_DISCARD); 2151 bool section_alignment = F2FS_OPTION(sbi).discard_unit == 2152 DISCARD_UNIT_SECTION; 2153 2154 if (f2fs_lfs_mode(sbi) && __is_large_section(sbi)) 2155 section_alignment = true; 2156 2157 mutex_lock(&dirty_i->seglist_lock); 2158 2159 while (1) { 2160 int i; 2161 2162 if (section_alignment && end != -1) 2163 end--; 2164 start = find_next_bit(prefree_map, MAIN_SEGS(sbi), end + 1); 2165 if (start >= MAIN_SEGS(sbi)) 2166 break; 2167 end = find_next_zero_bit(prefree_map, MAIN_SEGS(sbi), 2168 start + 1); 2169 2170 if (section_alignment) { 2171 start = rounddown(start, SEGS_PER_SEC(sbi)); 2172 end = roundup(end, SEGS_PER_SEC(sbi)); 2173 } 2174 2175 for (i = start; i < end; i++) { 2176 if (test_and_clear_bit(i, prefree_map)) 2177 dirty_i->nr_dirty[PRE]--; 2178 } 2179 2180 if (!f2fs_realtime_discard_enable(sbi)) 2181 continue; 2182 2183 if (force && start >= cpc->trim_start && 2184 (end - 1) <= cpc->trim_end) 2185 continue; 2186 2187 /* Should cover 2MB zoned device for zone-based reset */ 2188 if (!f2fs_sb_has_blkzoned(sbi) && 2189 (!f2fs_lfs_mode(sbi) || !__is_large_section(sbi))) { 2190 f2fs_issue_discard(sbi, START_BLOCK(sbi, start), 2191 (end - start) << sbi->log_blocks_per_seg); 2192 continue; 2193 } 2194 next: 2195 secno = GET_SEC_FROM_SEG(sbi, start); 2196 start_segno = GET_SEG_FROM_SEC(sbi, secno); 2197 if (!IS_CURSEC(sbi, secno) && 2198 !get_valid_blocks(sbi, start, true)) 2199 f2fs_issue_discard(sbi, START_BLOCK(sbi, start_segno), 2200 BLKS_PER_SEC(sbi)); 2201 2202 start = start_segno + SEGS_PER_SEC(sbi); 2203 if (start < end) 2204 goto next; 2205 else 2206 end = start - 1; 2207 } 2208 mutex_unlock(&dirty_i->seglist_lock); 2209 2210 if (!f2fs_block_unit_discard(sbi)) 2211 goto wakeup; 2212 2213 /* send small discards */ 2214 list_for_each_entry_safe(entry, this, head, list) { 2215 unsigned int cur_pos = 0, next_pos, len, total_len = 0; 2216 bool is_valid = test_bit_le(0, entry->discard_map); 2217 2218 find_next: 2219 if (is_valid) { 2220 next_pos = find_next_zero_bit_le(entry->discard_map, 2221 BLKS_PER_SEG(sbi), cur_pos); 2222 len = next_pos - cur_pos; 2223 2224 if (f2fs_sb_has_blkzoned(sbi) || 2225 (force && len < cpc->trim_minlen)) 2226 goto skip; 2227 2228 f2fs_issue_discard(sbi, entry->start_blkaddr + cur_pos, 2229 len); 2230 total_len += len; 2231 } else { 2232 next_pos = find_next_bit_le(entry->discard_map, 2233 BLKS_PER_SEG(sbi), cur_pos); 2234 } 2235 skip: 2236 cur_pos = next_pos; 2237 is_valid = !is_valid; 2238 2239 if (cur_pos < BLKS_PER_SEG(sbi)) 2240 goto find_next; 2241 2242 release_discard_addr(entry); 2243 dcc->nr_discards -= total_len; 2244 } 2245 2246 wakeup: 2247 wake_up_discard_thread(sbi, false); 2248 } 2249 2250 int f2fs_start_discard_thread(struct f2fs_sb_info *sbi) 2251 { 2252 dev_t dev = sbi->sb->s_bdev->bd_dev; 2253 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; 2254 int err = 0; 2255 2256 if (!f2fs_realtime_discard_enable(sbi)) 2257 return 0; 2258 2259 dcc->f2fs_issue_discard = kthread_run(issue_discard_thread, sbi, 2260 "f2fs_discard-%u:%u", MAJOR(dev), MINOR(dev)); 2261 if (IS_ERR(dcc->f2fs_issue_discard)) { 2262 err = PTR_ERR(dcc->f2fs_issue_discard); 2263 dcc->f2fs_issue_discard = NULL; 2264 } 2265 2266 return err; 2267 } 2268 2269 static int create_discard_cmd_control(struct f2fs_sb_info *sbi) 2270 { 2271 struct discard_cmd_control *dcc; 2272 int err = 0, i; 2273 2274 if (SM_I(sbi)->dcc_info) { 2275 dcc = SM_I(sbi)->dcc_info; 2276 goto init_thread; 2277 } 2278 2279 dcc = f2fs_kzalloc(sbi, sizeof(struct discard_cmd_control), GFP_KERNEL); 2280 if (!dcc) 2281 return -ENOMEM; 2282 2283 dcc->discard_io_aware_gran = MAX_PLIST_NUM; 2284 dcc->discard_granularity = DEFAULT_DISCARD_GRANULARITY; 2285 dcc->max_ordered_discard = DEFAULT_MAX_ORDERED_DISCARD_GRANULARITY; 2286 if (F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_SEGMENT) 2287 dcc->discard_granularity = BLKS_PER_SEG(sbi); 2288 else if (F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_SECTION) 2289 dcc->discard_granularity = BLKS_PER_SEC(sbi); 2290 2291 INIT_LIST_HEAD(&dcc->entry_list); 2292 for (i = 0; i < MAX_PLIST_NUM; i++) 2293 INIT_LIST_HEAD(&dcc->pend_list[i]); 2294 INIT_LIST_HEAD(&dcc->wait_list); 2295 INIT_LIST_HEAD(&dcc->fstrim_list); 2296 mutex_init(&dcc->cmd_lock); 2297 atomic_set(&dcc->issued_discard, 0); 2298 atomic_set(&dcc->queued_discard, 0); 2299 atomic_set(&dcc->discard_cmd_cnt, 0); 2300 dcc->nr_discards = 0; 2301 dcc->max_discards = MAIN_SEGS(sbi) << sbi->log_blocks_per_seg; 2302 dcc->max_discard_request = DEF_MAX_DISCARD_REQUEST; 2303 dcc->min_discard_issue_time = DEF_MIN_DISCARD_ISSUE_TIME; 2304 dcc->mid_discard_issue_time = DEF_MID_DISCARD_ISSUE_TIME; 2305 dcc->max_discard_issue_time = DEF_MAX_DISCARD_ISSUE_TIME; 2306 dcc->discard_urgent_util = DEF_DISCARD_URGENT_UTIL; 2307 dcc->undiscard_blks = 0; 2308 dcc->next_pos = 0; 2309 dcc->root = RB_ROOT_CACHED; 2310 dcc->rbtree_check = false; 2311 2312 init_waitqueue_head(&dcc->discard_wait_queue); 2313 SM_I(sbi)->dcc_info = dcc; 2314 init_thread: 2315 err = f2fs_start_discard_thread(sbi); 2316 if (err) { 2317 kfree(dcc); 2318 SM_I(sbi)->dcc_info = NULL; 2319 } 2320 2321 return err; 2322 } 2323 2324 static void destroy_discard_cmd_control(struct f2fs_sb_info *sbi) 2325 { 2326 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; 2327 2328 if (!dcc) 2329 return; 2330 2331 f2fs_stop_discard_thread(sbi); 2332 2333 /* 2334 * Recovery can cache discard commands, so in error path of 2335 * fill_super(), it needs to give a chance to handle them. 2336 */ 2337 f2fs_issue_discard_timeout(sbi); 2338 2339 kfree(dcc); 2340 SM_I(sbi)->dcc_info = NULL; 2341 } 2342 2343 static bool __mark_sit_entry_dirty(struct f2fs_sb_info *sbi, unsigned int segno) 2344 { 2345 struct sit_info *sit_i = SIT_I(sbi); 2346 2347 if (!__test_and_set_bit(segno, sit_i->dirty_sentries_bitmap)) { 2348 sit_i->dirty_sentries++; 2349 return false; 2350 } 2351 2352 return true; 2353 } 2354 2355 static void __set_sit_entry_type(struct f2fs_sb_info *sbi, int type, 2356 unsigned int segno, int modified) 2357 { 2358 struct seg_entry *se = get_seg_entry(sbi, segno); 2359 2360 se->type = type; 2361 if (modified) 2362 __mark_sit_entry_dirty(sbi, segno); 2363 } 2364 2365 static inline unsigned long long get_segment_mtime(struct f2fs_sb_info *sbi, 2366 block_t blkaddr) 2367 { 2368 unsigned int segno = GET_SEGNO(sbi, blkaddr); 2369 2370 if (segno == NULL_SEGNO) 2371 return 0; 2372 return get_seg_entry(sbi, segno)->mtime; 2373 } 2374 2375 static void update_segment_mtime(struct f2fs_sb_info *sbi, block_t blkaddr, 2376 unsigned long long old_mtime) 2377 { 2378 struct seg_entry *se; 2379 unsigned int segno = GET_SEGNO(sbi, blkaddr); 2380 unsigned long long ctime = get_mtime(sbi, false); 2381 unsigned long long mtime = old_mtime ? old_mtime : ctime; 2382 2383 if (segno == NULL_SEGNO) 2384 return; 2385 2386 se = get_seg_entry(sbi, segno); 2387 2388 if (!se->mtime) 2389 se->mtime = mtime; 2390 else 2391 se->mtime = div_u64(se->mtime * se->valid_blocks + mtime, 2392 se->valid_blocks + 1); 2393 2394 if (ctime > SIT_I(sbi)->max_mtime) 2395 SIT_I(sbi)->max_mtime = ctime; 2396 } 2397 2398 static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del) 2399 { 2400 struct seg_entry *se; 2401 unsigned int segno, offset; 2402 long int new_vblocks; 2403 bool exist; 2404 #ifdef CONFIG_F2FS_CHECK_FS 2405 bool mir_exist; 2406 #endif 2407 2408 segno = GET_SEGNO(sbi, blkaddr); 2409 if (segno == NULL_SEGNO) 2410 return; 2411 2412 se = get_seg_entry(sbi, segno); 2413 new_vblocks = se->valid_blocks + del; 2414 offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr); 2415 2416 f2fs_bug_on(sbi, (new_vblocks < 0 || 2417 (new_vblocks > f2fs_usable_blks_in_seg(sbi, segno)))); 2418 2419 se->valid_blocks = new_vblocks; 2420 2421 /* Update valid block bitmap */ 2422 if (del > 0) { 2423 exist = f2fs_test_and_set_bit(offset, se->cur_valid_map); 2424 #ifdef CONFIG_F2FS_CHECK_FS 2425 mir_exist = f2fs_test_and_set_bit(offset, 2426 se->cur_valid_map_mir); 2427 if (unlikely(exist != mir_exist)) { 2428 f2fs_err(sbi, "Inconsistent error when setting bitmap, blk:%u, old bit:%d", 2429 blkaddr, exist); 2430 f2fs_bug_on(sbi, 1); 2431 } 2432 #endif 2433 if (unlikely(exist)) { 2434 f2fs_err(sbi, "Bitmap was wrongly set, blk:%u", 2435 blkaddr); 2436 f2fs_bug_on(sbi, 1); 2437 se->valid_blocks--; 2438 del = 0; 2439 } 2440 2441 if (f2fs_block_unit_discard(sbi) && 2442 !f2fs_test_and_set_bit(offset, se->discard_map)) 2443 sbi->discard_blks--; 2444 2445 /* 2446 * SSR should never reuse block which is checkpointed 2447 * or newly invalidated. 2448 */ 2449 if (!is_sbi_flag_set(sbi, SBI_CP_DISABLED)) { 2450 if (!f2fs_test_and_set_bit(offset, se->ckpt_valid_map)) 2451 se->ckpt_valid_blocks++; 2452 } 2453 } else { 2454 exist = f2fs_test_and_clear_bit(offset, se->cur_valid_map); 2455 #ifdef CONFIG_F2FS_CHECK_FS 2456 mir_exist = f2fs_test_and_clear_bit(offset, 2457 se->cur_valid_map_mir); 2458 if (unlikely(exist != mir_exist)) { 2459 f2fs_err(sbi, "Inconsistent error when clearing bitmap, blk:%u, old bit:%d", 2460 blkaddr, exist); 2461 f2fs_bug_on(sbi, 1); 2462 } 2463 #endif 2464 if (unlikely(!exist)) { 2465 f2fs_err(sbi, "Bitmap was wrongly cleared, blk:%u", 2466 blkaddr); 2467 f2fs_bug_on(sbi, 1); 2468 se->valid_blocks++; 2469 del = 0; 2470 } else if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) { 2471 /* 2472 * If checkpoints are off, we must not reuse data that 2473 * was used in the previous checkpoint. If it was used 2474 * before, we must track that to know how much space we 2475 * really have. 2476 */ 2477 if (f2fs_test_bit(offset, se->ckpt_valid_map)) { 2478 spin_lock(&sbi->stat_lock); 2479 sbi->unusable_block_count++; 2480 spin_unlock(&sbi->stat_lock); 2481 } 2482 } 2483 2484 if (f2fs_block_unit_discard(sbi) && 2485 f2fs_test_and_clear_bit(offset, se->discard_map)) 2486 sbi->discard_blks++; 2487 } 2488 if (!f2fs_test_bit(offset, se->ckpt_valid_map)) 2489 se->ckpt_valid_blocks += del; 2490 2491 __mark_sit_entry_dirty(sbi, segno); 2492 2493 /* update total number of valid blocks to be written in ckpt area */ 2494 SIT_I(sbi)->written_valid_blocks += del; 2495 2496 if (__is_large_section(sbi)) 2497 get_sec_entry(sbi, segno)->valid_blocks += del; 2498 } 2499 2500 void f2fs_invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr) 2501 { 2502 unsigned int segno = GET_SEGNO(sbi, addr); 2503 struct sit_info *sit_i = SIT_I(sbi); 2504 2505 f2fs_bug_on(sbi, addr == NULL_ADDR); 2506 if (addr == NEW_ADDR || addr == COMPRESS_ADDR) 2507 return; 2508 2509 f2fs_invalidate_internal_cache(sbi, addr); 2510 2511 /* add it into sit main buffer */ 2512 down_write(&sit_i->sentry_lock); 2513 2514 update_segment_mtime(sbi, addr, 0); 2515 update_sit_entry(sbi, addr, -1); 2516 2517 /* add it into dirty seglist */ 2518 locate_dirty_segment(sbi, segno); 2519 2520 up_write(&sit_i->sentry_lock); 2521 } 2522 2523 bool f2fs_is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr) 2524 { 2525 struct sit_info *sit_i = SIT_I(sbi); 2526 unsigned int segno, offset; 2527 struct seg_entry *se; 2528 bool is_cp = false; 2529 2530 if (!__is_valid_data_blkaddr(blkaddr)) 2531 return true; 2532 2533 down_read(&sit_i->sentry_lock); 2534 2535 segno = GET_SEGNO(sbi, blkaddr); 2536 se = get_seg_entry(sbi, segno); 2537 offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr); 2538 2539 if (f2fs_test_bit(offset, se->ckpt_valid_map)) 2540 is_cp = true; 2541 2542 up_read(&sit_i->sentry_lock); 2543 2544 return is_cp; 2545 } 2546 2547 static unsigned short f2fs_curseg_valid_blocks(struct f2fs_sb_info *sbi, int type) 2548 { 2549 struct curseg_info *curseg = CURSEG_I(sbi, type); 2550 2551 if (sbi->ckpt->alloc_type[type] == SSR) 2552 return BLKS_PER_SEG(sbi); 2553 return curseg->next_blkoff; 2554 } 2555 2556 /* 2557 * Calculate the number of current summary pages for writing 2558 */ 2559 int f2fs_npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra) 2560 { 2561 int valid_sum_count = 0; 2562 int i, sum_in_page; 2563 2564 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) { 2565 if (sbi->ckpt->alloc_type[i] != SSR && for_ra) 2566 valid_sum_count += 2567 le16_to_cpu(F2FS_CKPT(sbi)->cur_data_blkoff[i]); 2568 else 2569 valid_sum_count += f2fs_curseg_valid_blocks(sbi, i); 2570 } 2571 2572 sum_in_page = (PAGE_SIZE - 2 * SUM_JOURNAL_SIZE - 2573 SUM_FOOTER_SIZE) / SUMMARY_SIZE; 2574 if (valid_sum_count <= sum_in_page) 2575 return 1; 2576 else if ((valid_sum_count - sum_in_page) <= 2577 (PAGE_SIZE - SUM_FOOTER_SIZE) / SUMMARY_SIZE) 2578 return 2; 2579 return 3; 2580 } 2581 2582 /* 2583 * Caller should put this summary page 2584 */ 2585 struct page *f2fs_get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno) 2586 { 2587 if (unlikely(f2fs_cp_error(sbi))) 2588 return ERR_PTR(-EIO); 2589 return f2fs_get_meta_page_retry(sbi, GET_SUM_BLOCK(sbi, segno)); 2590 } 2591 2592 void f2fs_update_meta_page(struct f2fs_sb_info *sbi, 2593 void *src, block_t blk_addr) 2594 { 2595 struct page *page = f2fs_grab_meta_page(sbi, blk_addr); 2596 2597 memcpy(page_address(page), src, PAGE_SIZE); 2598 set_page_dirty(page); 2599 f2fs_put_page(page, 1); 2600 } 2601 2602 static void write_sum_page(struct f2fs_sb_info *sbi, 2603 struct f2fs_summary_block *sum_blk, block_t blk_addr) 2604 { 2605 f2fs_update_meta_page(sbi, (void *)sum_blk, blk_addr); 2606 } 2607 2608 static void write_current_sum_page(struct f2fs_sb_info *sbi, 2609 int type, block_t blk_addr) 2610 { 2611 struct curseg_info *curseg = CURSEG_I(sbi, type); 2612 struct page *page = f2fs_grab_meta_page(sbi, blk_addr); 2613 struct f2fs_summary_block *src = curseg->sum_blk; 2614 struct f2fs_summary_block *dst; 2615 2616 dst = (struct f2fs_summary_block *)page_address(page); 2617 memset(dst, 0, PAGE_SIZE); 2618 2619 mutex_lock(&curseg->curseg_mutex); 2620 2621 down_read(&curseg->journal_rwsem); 2622 memcpy(&dst->journal, curseg->journal, SUM_JOURNAL_SIZE); 2623 up_read(&curseg->journal_rwsem); 2624 2625 memcpy(dst->entries, src->entries, SUM_ENTRY_SIZE); 2626 memcpy(&dst->footer, &src->footer, SUM_FOOTER_SIZE); 2627 2628 mutex_unlock(&curseg->curseg_mutex); 2629 2630 set_page_dirty(page); 2631 f2fs_put_page(page, 1); 2632 } 2633 2634 static int is_next_segment_free(struct f2fs_sb_info *sbi, 2635 struct curseg_info *curseg, int type) 2636 { 2637 unsigned int segno = curseg->segno + 1; 2638 struct free_segmap_info *free_i = FREE_I(sbi); 2639 2640 if (segno < MAIN_SEGS(sbi) && segno % SEGS_PER_SEC(sbi)) 2641 return !test_bit(segno, free_i->free_segmap); 2642 return 0; 2643 } 2644 2645 /* 2646 * Find a new segment from the free segments bitmap to right order 2647 * This function should be returned with success, otherwise BUG 2648 */ 2649 static void get_new_segment(struct f2fs_sb_info *sbi, 2650 unsigned int *newseg, bool new_sec, bool pinning) 2651 { 2652 struct free_segmap_info *free_i = FREE_I(sbi); 2653 unsigned int segno, secno, zoneno; 2654 unsigned int total_zones = MAIN_SECS(sbi) / sbi->secs_per_zone; 2655 unsigned int hint = GET_SEC_FROM_SEG(sbi, *newseg); 2656 unsigned int old_zoneno = GET_ZONE_FROM_SEG(sbi, *newseg); 2657 bool init = true; 2658 int i; 2659 int ret = 0; 2660 2661 spin_lock(&free_i->segmap_lock); 2662 2663 if (!new_sec && ((*newseg + 1) % SEGS_PER_SEC(sbi))) { 2664 segno = find_next_zero_bit(free_i->free_segmap, 2665 GET_SEG_FROM_SEC(sbi, hint + 1), *newseg + 1); 2666 if (segno < GET_SEG_FROM_SEC(sbi, hint + 1)) 2667 goto got_it; 2668 } 2669 2670 /* 2671 * If we format f2fs on zoned storage, let's try to get pinned sections 2672 * from beginning of the storage, which should be a conventional one. 2673 */ 2674 if (f2fs_sb_has_blkzoned(sbi)) { 2675 segno = pinning ? 0 : max(first_zoned_segno(sbi), *newseg); 2676 hint = GET_SEC_FROM_SEG(sbi, segno); 2677 } 2678 2679 find_other_zone: 2680 secno = find_next_zero_bit(free_i->free_secmap, MAIN_SECS(sbi), hint); 2681 if (secno >= MAIN_SECS(sbi)) { 2682 secno = find_first_zero_bit(free_i->free_secmap, 2683 MAIN_SECS(sbi)); 2684 if (secno >= MAIN_SECS(sbi)) { 2685 ret = -ENOSPC; 2686 goto out_unlock; 2687 } 2688 } 2689 segno = GET_SEG_FROM_SEC(sbi, secno); 2690 zoneno = GET_ZONE_FROM_SEC(sbi, secno); 2691 2692 /* give up on finding another zone */ 2693 if (!init) 2694 goto got_it; 2695 if (sbi->secs_per_zone == 1) 2696 goto got_it; 2697 if (zoneno == old_zoneno) 2698 goto got_it; 2699 for (i = 0; i < NR_CURSEG_TYPE; i++) 2700 if (CURSEG_I(sbi, i)->zone == zoneno) 2701 break; 2702 2703 if (i < NR_CURSEG_TYPE) { 2704 /* zone is in user, try another */ 2705 if (zoneno + 1 >= total_zones) 2706 hint = 0; 2707 else 2708 hint = (zoneno + 1) * sbi->secs_per_zone; 2709 init = false; 2710 goto find_other_zone; 2711 } 2712 got_it: 2713 /* set it as dirty segment in free segmap */ 2714 f2fs_bug_on(sbi, test_bit(segno, free_i->free_segmap)); 2715 __set_inuse(sbi, segno); 2716 *newseg = segno; 2717 out_unlock: 2718 spin_unlock(&free_i->segmap_lock); 2719 2720 if (ret) { 2721 f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_NO_SEGMENT); 2722 f2fs_bug_on(sbi, 1); 2723 } 2724 } 2725 2726 static void reset_curseg(struct f2fs_sb_info *sbi, int type, int modified) 2727 { 2728 struct curseg_info *curseg = CURSEG_I(sbi, type); 2729 struct summary_footer *sum_footer; 2730 unsigned short seg_type = curseg->seg_type; 2731 2732 curseg->inited = true; 2733 curseg->segno = curseg->next_segno; 2734 curseg->zone = GET_ZONE_FROM_SEG(sbi, curseg->segno); 2735 curseg->next_blkoff = 0; 2736 curseg->next_segno = NULL_SEGNO; 2737 2738 sum_footer = &(curseg->sum_blk->footer); 2739 memset(sum_footer, 0, sizeof(struct summary_footer)); 2740 2741 sanity_check_seg_type(sbi, seg_type); 2742 2743 if (IS_DATASEG(seg_type)) 2744 SET_SUM_TYPE(sum_footer, SUM_TYPE_DATA); 2745 if (IS_NODESEG(seg_type)) 2746 SET_SUM_TYPE(sum_footer, SUM_TYPE_NODE); 2747 __set_sit_entry_type(sbi, seg_type, curseg->segno, modified); 2748 } 2749 2750 static unsigned int __get_next_segno(struct f2fs_sb_info *sbi, int type) 2751 { 2752 struct curseg_info *curseg = CURSEG_I(sbi, type); 2753 unsigned short seg_type = curseg->seg_type; 2754 2755 sanity_check_seg_type(sbi, seg_type); 2756 if (f2fs_need_rand_seg(sbi)) 2757 return get_random_u32_below(MAIN_SECS(sbi) * SEGS_PER_SEC(sbi)); 2758 2759 if (__is_large_section(sbi)) 2760 return curseg->segno; 2761 2762 /* inmem log may not locate on any segment after mount */ 2763 if (!curseg->inited) 2764 return 0; 2765 2766 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) 2767 return 0; 2768 2769 if (seg_type == CURSEG_HOT_DATA || IS_NODESEG(seg_type)) 2770 return 0; 2771 2772 if (SIT_I(sbi)->last_victim[ALLOC_NEXT]) 2773 return SIT_I(sbi)->last_victim[ALLOC_NEXT]; 2774 2775 /* find segments from 0 to reuse freed segments */ 2776 if (F2FS_OPTION(sbi).alloc_mode == ALLOC_MODE_REUSE) 2777 return 0; 2778 2779 return curseg->segno; 2780 } 2781 2782 /* 2783 * Allocate a current working segment. 2784 * This function always allocates a free segment in LFS manner. 2785 */ 2786 static int new_curseg(struct f2fs_sb_info *sbi, int type, bool new_sec) 2787 { 2788 struct curseg_info *curseg = CURSEG_I(sbi, type); 2789 unsigned int segno = curseg->segno; 2790 bool pinning = type == CURSEG_COLD_DATA_PINNED; 2791 2792 if (curseg->inited) 2793 write_sum_page(sbi, curseg->sum_blk, GET_SUM_BLOCK(sbi, segno)); 2794 2795 segno = __get_next_segno(sbi, type); 2796 get_new_segment(sbi, &segno, new_sec, pinning); 2797 if (new_sec && pinning && 2798 !f2fs_valid_pinned_area(sbi, START_BLOCK(sbi, segno))) { 2799 __set_free(sbi, segno); 2800 return -EAGAIN; 2801 } 2802 2803 curseg->next_segno = segno; 2804 reset_curseg(sbi, type, 1); 2805 curseg->alloc_type = LFS; 2806 if (F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_BLK) 2807 curseg->fragment_remained_chunk = 2808 get_random_u32_inclusive(1, sbi->max_fragment_chunk); 2809 return 0; 2810 } 2811 2812 static int __next_free_blkoff(struct f2fs_sb_info *sbi, 2813 int segno, block_t start) 2814 { 2815 struct seg_entry *se = get_seg_entry(sbi, segno); 2816 int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long); 2817 unsigned long *target_map = SIT_I(sbi)->tmp_map; 2818 unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map; 2819 unsigned long *cur_map = (unsigned long *)se->cur_valid_map; 2820 int i; 2821 2822 for (i = 0; i < entries; i++) 2823 target_map[i] = ckpt_map[i] | cur_map[i]; 2824 2825 return __find_rev_next_zero_bit(target_map, BLKS_PER_SEG(sbi), start); 2826 } 2827 2828 static int f2fs_find_next_ssr_block(struct f2fs_sb_info *sbi, 2829 struct curseg_info *seg) 2830 { 2831 return __next_free_blkoff(sbi, seg->segno, seg->next_blkoff + 1); 2832 } 2833 2834 bool f2fs_segment_has_free_slot(struct f2fs_sb_info *sbi, int segno) 2835 { 2836 return __next_free_blkoff(sbi, segno, 0) < BLKS_PER_SEG(sbi); 2837 } 2838 2839 /* 2840 * This function always allocates a used segment(from dirty seglist) by SSR 2841 * manner, so it should recover the existing segment information of valid blocks 2842 */ 2843 static void change_curseg(struct f2fs_sb_info *sbi, int type) 2844 { 2845 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 2846 struct curseg_info *curseg = CURSEG_I(sbi, type); 2847 unsigned int new_segno = curseg->next_segno; 2848 struct f2fs_summary_block *sum_node; 2849 struct page *sum_page; 2850 2851 if (curseg->inited) 2852 write_sum_page(sbi, curseg->sum_blk, GET_SUM_BLOCK(sbi, curseg->segno)); 2853 2854 __set_test_and_inuse(sbi, new_segno); 2855 2856 mutex_lock(&dirty_i->seglist_lock); 2857 __remove_dirty_segment(sbi, new_segno, PRE); 2858 __remove_dirty_segment(sbi, new_segno, DIRTY); 2859 mutex_unlock(&dirty_i->seglist_lock); 2860 2861 reset_curseg(sbi, type, 1); 2862 curseg->alloc_type = SSR; 2863 curseg->next_blkoff = __next_free_blkoff(sbi, curseg->segno, 0); 2864 2865 sum_page = f2fs_get_sum_page(sbi, new_segno); 2866 if (IS_ERR(sum_page)) { 2867 /* GC won't be able to use stale summary pages by cp_error */ 2868 memset(curseg->sum_blk, 0, SUM_ENTRY_SIZE); 2869 return; 2870 } 2871 sum_node = (struct f2fs_summary_block *)page_address(sum_page); 2872 memcpy(curseg->sum_blk, sum_node, SUM_ENTRY_SIZE); 2873 f2fs_put_page(sum_page, 1); 2874 } 2875 2876 static int get_ssr_segment(struct f2fs_sb_info *sbi, int type, 2877 int alloc_mode, unsigned long long age); 2878 2879 static void get_atssr_segment(struct f2fs_sb_info *sbi, int type, 2880 int target_type, int alloc_mode, 2881 unsigned long long age) 2882 { 2883 struct curseg_info *curseg = CURSEG_I(sbi, type); 2884 2885 curseg->seg_type = target_type; 2886 2887 if (get_ssr_segment(sbi, type, alloc_mode, age)) { 2888 struct seg_entry *se = get_seg_entry(sbi, curseg->next_segno); 2889 2890 curseg->seg_type = se->type; 2891 change_curseg(sbi, type); 2892 } else { 2893 /* allocate cold segment by default */ 2894 curseg->seg_type = CURSEG_COLD_DATA; 2895 new_curseg(sbi, type, true); 2896 } 2897 stat_inc_seg_type(sbi, curseg); 2898 } 2899 2900 static void __f2fs_init_atgc_curseg(struct f2fs_sb_info *sbi) 2901 { 2902 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_ALL_DATA_ATGC); 2903 2904 if (!sbi->am.atgc_enabled) 2905 return; 2906 2907 f2fs_down_read(&SM_I(sbi)->curseg_lock); 2908 2909 mutex_lock(&curseg->curseg_mutex); 2910 down_write(&SIT_I(sbi)->sentry_lock); 2911 2912 get_atssr_segment(sbi, CURSEG_ALL_DATA_ATGC, CURSEG_COLD_DATA, SSR, 0); 2913 2914 up_write(&SIT_I(sbi)->sentry_lock); 2915 mutex_unlock(&curseg->curseg_mutex); 2916 2917 f2fs_up_read(&SM_I(sbi)->curseg_lock); 2918 2919 } 2920 void f2fs_init_inmem_curseg(struct f2fs_sb_info *sbi) 2921 { 2922 __f2fs_init_atgc_curseg(sbi); 2923 } 2924 2925 static void __f2fs_save_inmem_curseg(struct f2fs_sb_info *sbi, int type) 2926 { 2927 struct curseg_info *curseg = CURSEG_I(sbi, type); 2928 2929 mutex_lock(&curseg->curseg_mutex); 2930 if (!curseg->inited) 2931 goto out; 2932 2933 if (get_valid_blocks(sbi, curseg->segno, false)) { 2934 write_sum_page(sbi, curseg->sum_blk, 2935 GET_SUM_BLOCK(sbi, curseg->segno)); 2936 } else { 2937 mutex_lock(&DIRTY_I(sbi)->seglist_lock); 2938 __set_test_and_free(sbi, curseg->segno, true); 2939 mutex_unlock(&DIRTY_I(sbi)->seglist_lock); 2940 } 2941 out: 2942 mutex_unlock(&curseg->curseg_mutex); 2943 } 2944 2945 void f2fs_save_inmem_curseg(struct f2fs_sb_info *sbi) 2946 { 2947 __f2fs_save_inmem_curseg(sbi, CURSEG_COLD_DATA_PINNED); 2948 2949 if (sbi->am.atgc_enabled) 2950 __f2fs_save_inmem_curseg(sbi, CURSEG_ALL_DATA_ATGC); 2951 } 2952 2953 static void __f2fs_restore_inmem_curseg(struct f2fs_sb_info *sbi, int type) 2954 { 2955 struct curseg_info *curseg = CURSEG_I(sbi, type); 2956 2957 mutex_lock(&curseg->curseg_mutex); 2958 if (!curseg->inited) 2959 goto out; 2960 if (get_valid_blocks(sbi, curseg->segno, false)) 2961 goto out; 2962 2963 mutex_lock(&DIRTY_I(sbi)->seglist_lock); 2964 __set_test_and_inuse(sbi, curseg->segno); 2965 mutex_unlock(&DIRTY_I(sbi)->seglist_lock); 2966 out: 2967 mutex_unlock(&curseg->curseg_mutex); 2968 } 2969 2970 void f2fs_restore_inmem_curseg(struct f2fs_sb_info *sbi) 2971 { 2972 __f2fs_restore_inmem_curseg(sbi, CURSEG_COLD_DATA_PINNED); 2973 2974 if (sbi->am.atgc_enabled) 2975 __f2fs_restore_inmem_curseg(sbi, CURSEG_ALL_DATA_ATGC); 2976 } 2977 2978 static int get_ssr_segment(struct f2fs_sb_info *sbi, int type, 2979 int alloc_mode, unsigned long long age) 2980 { 2981 struct curseg_info *curseg = CURSEG_I(sbi, type); 2982 unsigned segno = NULL_SEGNO; 2983 unsigned short seg_type = curseg->seg_type; 2984 int i, cnt; 2985 bool reversed = false; 2986 2987 sanity_check_seg_type(sbi, seg_type); 2988 2989 /* f2fs_need_SSR() already forces to do this */ 2990 if (!f2fs_get_victim(sbi, &segno, BG_GC, seg_type, alloc_mode, age)) { 2991 curseg->next_segno = segno; 2992 return 1; 2993 } 2994 2995 /* For node segments, let's do SSR more intensively */ 2996 if (IS_NODESEG(seg_type)) { 2997 if (seg_type >= CURSEG_WARM_NODE) { 2998 reversed = true; 2999 i = CURSEG_COLD_NODE; 3000 } else { 3001 i = CURSEG_HOT_NODE; 3002 } 3003 cnt = NR_CURSEG_NODE_TYPE; 3004 } else { 3005 if (seg_type >= CURSEG_WARM_DATA) { 3006 reversed = true; 3007 i = CURSEG_COLD_DATA; 3008 } else { 3009 i = CURSEG_HOT_DATA; 3010 } 3011 cnt = NR_CURSEG_DATA_TYPE; 3012 } 3013 3014 for (; cnt-- > 0; reversed ? i-- : i++) { 3015 if (i == seg_type) 3016 continue; 3017 if (!f2fs_get_victim(sbi, &segno, BG_GC, i, alloc_mode, age)) { 3018 curseg->next_segno = segno; 3019 return 1; 3020 } 3021 } 3022 3023 /* find valid_blocks=0 in dirty list */ 3024 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) { 3025 segno = get_free_segment(sbi); 3026 if (segno != NULL_SEGNO) { 3027 curseg->next_segno = segno; 3028 return 1; 3029 } 3030 } 3031 return 0; 3032 } 3033 3034 static bool need_new_seg(struct f2fs_sb_info *sbi, int type) 3035 { 3036 struct curseg_info *curseg = CURSEG_I(sbi, type); 3037 3038 if (!is_set_ckpt_flags(sbi, CP_CRC_RECOVERY_FLAG) && 3039 curseg->seg_type == CURSEG_WARM_NODE) 3040 return true; 3041 if (curseg->alloc_type == LFS && 3042 is_next_segment_free(sbi, curseg, type) && 3043 likely(!is_sbi_flag_set(sbi, SBI_CP_DISABLED))) 3044 return true; 3045 if (!f2fs_need_SSR(sbi) || !get_ssr_segment(sbi, type, SSR, 0)) 3046 return true; 3047 return false; 3048 } 3049 3050 void f2fs_allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type, 3051 unsigned int start, unsigned int end) 3052 { 3053 struct curseg_info *curseg = CURSEG_I(sbi, type); 3054 unsigned int segno; 3055 3056 f2fs_down_read(&SM_I(sbi)->curseg_lock); 3057 mutex_lock(&curseg->curseg_mutex); 3058 down_write(&SIT_I(sbi)->sentry_lock); 3059 3060 segno = CURSEG_I(sbi, type)->segno; 3061 if (segno < start || segno > end) 3062 goto unlock; 3063 3064 if (f2fs_need_SSR(sbi) && get_ssr_segment(sbi, type, SSR, 0)) 3065 change_curseg(sbi, type); 3066 else 3067 new_curseg(sbi, type, true); 3068 3069 stat_inc_seg_type(sbi, curseg); 3070 3071 locate_dirty_segment(sbi, segno); 3072 unlock: 3073 up_write(&SIT_I(sbi)->sentry_lock); 3074 3075 if (segno != curseg->segno) 3076 f2fs_notice(sbi, "For resize: curseg of type %d: %u ==> %u", 3077 type, segno, curseg->segno); 3078 3079 mutex_unlock(&curseg->curseg_mutex); 3080 f2fs_up_read(&SM_I(sbi)->curseg_lock); 3081 } 3082 3083 static int __allocate_new_segment(struct f2fs_sb_info *sbi, int type, 3084 bool new_sec, bool force) 3085 { 3086 struct curseg_info *curseg = CURSEG_I(sbi, type); 3087 unsigned int old_segno; 3088 3089 if (!force && curseg->inited && 3090 !curseg->next_blkoff && 3091 !get_valid_blocks(sbi, curseg->segno, new_sec) && 3092 !get_ckpt_valid_blocks(sbi, curseg->segno, new_sec)) 3093 return 0; 3094 3095 old_segno = curseg->segno; 3096 if (new_curseg(sbi, type, true)) 3097 return -EAGAIN; 3098 stat_inc_seg_type(sbi, curseg); 3099 locate_dirty_segment(sbi, old_segno); 3100 return 0; 3101 } 3102 3103 int f2fs_allocate_new_section(struct f2fs_sb_info *sbi, int type, bool force) 3104 { 3105 int ret; 3106 3107 f2fs_down_read(&SM_I(sbi)->curseg_lock); 3108 down_write(&SIT_I(sbi)->sentry_lock); 3109 ret = __allocate_new_segment(sbi, type, true, force); 3110 up_write(&SIT_I(sbi)->sentry_lock); 3111 f2fs_up_read(&SM_I(sbi)->curseg_lock); 3112 3113 return ret; 3114 } 3115 3116 int f2fs_allocate_pinning_section(struct f2fs_sb_info *sbi) 3117 { 3118 int err; 3119 bool gc_required = true; 3120 3121 retry: 3122 f2fs_lock_op(sbi); 3123 err = f2fs_allocate_new_section(sbi, CURSEG_COLD_DATA_PINNED, false); 3124 f2fs_unlock_op(sbi); 3125 3126 if (f2fs_sb_has_blkzoned(sbi) && err && gc_required) { 3127 f2fs_down_write(&sbi->gc_lock); 3128 f2fs_gc_range(sbi, 0, GET_SEGNO(sbi, FDEV(0).end_blk), true, 1); 3129 f2fs_up_write(&sbi->gc_lock); 3130 3131 gc_required = false; 3132 goto retry; 3133 } 3134 3135 return err; 3136 } 3137 3138 void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi) 3139 { 3140 int i; 3141 3142 f2fs_down_read(&SM_I(sbi)->curseg_lock); 3143 down_write(&SIT_I(sbi)->sentry_lock); 3144 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) 3145 __allocate_new_segment(sbi, i, false, false); 3146 up_write(&SIT_I(sbi)->sentry_lock); 3147 f2fs_up_read(&SM_I(sbi)->curseg_lock); 3148 } 3149 3150 bool f2fs_exist_trim_candidates(struct f2fs_sb_info *sbi, 3151 struct cp_control *cpc) 3152 { 3153 __u64 trim_start = cpc->trim_start; 3154 bool has_candidate = false; 3155 3156 down_write(&SIT_I(sbi)->sentry_lock); 3157 for (; cpc->trim_start <= cpc->trim_end; cpc->trim_start++) { 3158 if (add_discard_addrs(sbi, cpc, true)) { 3159 has_candidate = true; 3160 break; 3161 } 3162 } 3163 up_write(&SIT_I(sbi)->sentry_lock); 3164 3165 cpc->trim_start = trim_start; 3166 return has_candidate; 3167 } 3168 3169 static unsigned int __issue_discard_cmd_range(struct f2fs_sb_info *sbi, 3170 struct discard_policy *dpolicy, 3171 unsigned int start, unsigned int end) 3172 { 3173 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; 3174 struct discard_cmd *prev_dc = NULL, *next_dc = NULL; 3175 struct rb_node **insert_p = NULL, *insert_parent = NULL; 3176 struct discard_cmd *dc; 3177 struct blk_plug plug; 3178 int issued; 3179 unsigned int trimmed = 0; 3180 3181 next: 3182 issued = 0; 3183 3184 mutex_lock(&dcc->cmd_lock); 3185 if (unlikely(dcc->rbtree_check)) 3186 f2fs_bug_on(sbi, !f2fs_check_discard_tree(sbi)); 3187 3188 dc = __lookup_discard_cmd_ret(&dcc->root, start, 3189 &prev_dc, &next_dc, &insert_p, &insert_parent); 3190 if (!dc) 3191 dc = next_dc; 3192 3193 blk_start_plug(&plug); 3194 3195 while (dc && dc->di.lstart <= end) { 3196 struct rb_node *node; 3197 int err = 0; 3198 3199 if (dc->di.len < dpolicy->granularity) 3200 goto skip; 3201 3202 if (dc->state != D_PREP) { 3203 list_move_tail(&dc->list, &dcc->fstrim_list); 3204 goto skip; 3205 } 3206 3207 err = __submit_discard_cmd(sbi, dpolicy, dc, &issued); 3208 3209 if (issued >= dpolicy->max_requests) { 3210 start = dc->di.lstart + dc->di.len; 3211 3212 if (err) 3213 __remove_discard_cmd(sbi, dc); 3214 3215 blk_finish_plug(&plug); 3216 mutex_unlock(&dcc->cmd_lock); 3217 trimmed += __wait_all_discard_cmd(sbi, NULL); 3218 f2fs_io_schedule_timeout(DEFAULT_IO_TIMEOUT); 3219 goto next; 3220 } 3221 skip: 3222 node = rb_next(&dc->rb_node); 3223 if (err) 3224 __remove_discard_cmd(sbi, dc); 3225 dc = rb_entry_safe(node, struct discard_cmd, rb_node); 3226 3227 if (fatal_signal_pending(current)) 3228 break; 3229 } 3230 3231 blk_finish_plug(&plug); 3232 mutex_unlock(&dcc->cmd_lock); 3233 3234 return trimmed; 3235 } 3236 3237 int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range) 3238 { 3239 __u64 start = F2FS_BYTES_TO_BLK(range->start); 3240 __u64 end = start + F2FS_BYTES_TO_BLK(range->len) - 1; 3241 unsigned int start_segno, end_segno; 3242 block_t start_block, end_block; 3243 struct cp_control cpc; 3244 struct discard_policy dpolicy; 3245 unsigned long long trimmed = 0; 3246 int err = 0; 3247 bool need_align = f2fs_lfs_mode(sbi) && __is_large_section(sbi); 3248 3249 if (start >= MAX_BLKADDR(sbi) || range->len < sbi->blocksize) 3250 return -EINVAL; 3251 3252 if (end < MAIN_BLKADDR(sbi)) 3253 goto out; 3254 3255 if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) { 3256 f2fs_warn(sbi, "Found FS corruption, run fsck to fix."); 3257 return -EFSCORRUPTED; 3258 } 3259 3260 /* start/end segment number in main_area */ 3261 start_segno = (start <= MAIN_BLKADDR(sbi)) ? 0 : GET_SEGNO(sbi, start); 3262 end_segno = (end >= MAX_BLKADDR(sbi)) ? MAIN_SEGS(sbi) - 1 : 3263 GET_SEGNO(sbi, end); 3264 if (need_align) { 3265 start_segno = rounddown(start_segno, SEGS_PER_SEC(sbi)); 3266 end_segno = roundup(end_segno + 1, SEGS_PER_SEC(sbi)) - 1; 3267 } 3268 3269 cpc.reason = CP_DISCARD; 3270 cpc.trim_minlen = max_t(__u64, 1, F2FS_BYTES_TO_BLK(range->minlen)); 3271 cpc.trim_start = start_segno; 3272 cpc.trim_end = end_segno; 3273 3274 if (sbi->discard_blks == 0) 3275 goto out; 3276 3277 f2fs_down_write(&sbi->gc_lock); 3278 stat_inc_cp_call_count(sbi, TOTAL_CALL); 3279 err = f2fs_write_checkpoint(sbi, &cpc); 3280 f2fs_up_write(&sbi->gc_lock); 3281 if (err) 3282 goto out; 3283 3284 /* 3285 * We filed discard candidates, but actually we don't need to wait for 3286 * all of them, since they'll be issued in idle time along with runtime 3287 * discard option. User configuration looks like using runtime discard 3288 * or periodic fstrim instead of it. 3289 */ 3290 if (f2fs_realtime_discard_enable(sbi)) 3291 goto out; 3292 3293 start_block = START_BLOCK(sbi, start_segno); 3294 end_block = START_BLOCK(sbi, end_segno + 1); 3295 3296 __init_discard_policy(sbi, &dpolicy, DPOLICY_FSTRIM, cpc.trim_minlen); 3297 trimmed = __issue_discard_cmd_range(sbi, &dpolicy, 3298 start_block, end_block); 3299 3300 trimmed += __wait_discard_cmd_range(sbi, &dpolicy, 3301 start_block, end_block); 3302 out: 3303 if (!err) 3304 range->len = F2FS_BLK_TO_BYTES(trimmed); 3305 return err; 3306 } 3307 3308 int f2fs_rw_hint_to_seg_type(enum rw_hint hint) 3309 { 3310 switch (hint) { 3311 case WRITE_LIFE_SHORT: 3312 return CURSEG_HOT_DATA; 3313 case WRITE_LIFE_EXTREME: 3314 return CURSEG_COLD_DATA; 3315 default: 3316 return CURSEG_WARM_DATA; 3317 } 3318 } 3319 3320 static int __get_segment_type_2(struct f2fs_io_info *fio) 3321 { 3322 if (fio->type == DATA) 3323 return CURSEG_HOT_DATA; 3324 else 3325 return CURSEG_HOT_NODE; 3326 } 3327 3328 static int __get_segment_type_4(struct f2fs_io_info *fio) 3329 { 3330 if (fio->type == DATA) { 3331 struct inode *inode = fio->page->mapping->host; 3332 3333 if (S_ISDIR(inode->i_mode)) 3334 return CURSEG_HOT_DATA; 3335 else 3336 return CURSEG_COLD_DATA; 3337 } else { 3338 if (IS_DNODE(fio->page) && is_cold_node(fio->page)) 3339 return CURSEG_WARM_NODE; 3340 else 3341 return CURSEG_COLD_NODE; 3342 } 3343 } 3344 3345 static int __get_age_segment_type(struct inode *inode, pgoff_t pgofs) 3346 { 3347 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 3348 struct extent_info ei = {}; 3349 3350 if (f2fs_lookup_age_extent_cache(inode, pgofs, &ei)) { 3351 if (!ei.age) 3352 return NO_CHECK_TYPE; 3353 if (ei.age <= sbi->hot_data_age_threshold) 3354 return CURSEG_HOT_DATA; 3355 if (ei.age <= sbi->warm_data_age_threshold) 3356 return CURSEG_WARM_DATA; 3357 return CURSEG_COLD_DATA; 3358 } 3359 return NO_CHECK_TYPE; 3360 } 3361 3362 static int __get_segment_type_6(struct f2fs_io_info *fio) 3363 { 3364 if (fio->type == DATA) { 3365 struct inode *inode = fio->page->mapping->host; 3366 int type; 3367 3368 if (is_inode_flag_set(inode, FI_ALIGNED_WRITE)) 3369 return CURSEG_COLD_DATA_PINNED; 3370 3371 if (page_private_gcing(fio->page)) { 3372 if (fio->sbi->am.atgc_enabled && 3373 (fio->io_type == FS_DATA_IO) && 3374 (fio->sbi->gc_mode != GC_URGENT_HIGH) && 3375 __is_valid_data_blkaddr(fio->old_blkaddr) && 3376 !is_inode_flag_set(inode, FI_OPU_WRITE)) 3377 return CURSEG_ALL_DATA_ATGC; 3378 else 3379 return CURSEG_COLD_DATA; 3380 } 3381 if (file_is_cold(inode) || f2fs_need_compress_data(inode)) 3382 return CURSEG_COLD_DATA; 3383 3384 type = __get_age_segment_type(inode, fio->page->index); 3385 if (type != NO_CHECK_TYPE) 3386 return type; 3387 3388 if (file_is_hot(inode) || 3389 is_inode_flag_set(inode, FI_HOT_DATA) || 3390 f2fs_is_cow_file(inode)) 3391 return CURSEG_HOT_DATA; 3392 return f2fs_rw_hint_to_seg_type(inode->i_write_hint); 3393 } else { 3394 if (IS_DNODE(fio->page)) 3395 return is_cold_node(fio->page) ? CURSEG_WARM_NODE : 3396 CURSEG_HOT_NODE; 3397 return CURSEG_COLD_NODE; 3398 } 3399 } 3400 3401 static int __get_segment_type(struct f2fs_io_info *fio) 3402 { 3403 int type = 0; 3404 3405 switch (F2FS_OPTION(fio->sbi).active_logs) { 3406 case 2: 3407 type = __get_segment_type_2(fio); 3408 break; 3409 case 4: 3410 type = __get_segment_type_4(fio); 3411 break; 3412 case 6: 3413 type = __get_segment_type_6(fio); 3414 break; 3415 default: 3416 f2fs_bug_on(fio->sbi, true); 3417 } 3418 3419 if (IS_HOT(type)) 3420 fio->temp = HOT; 3421 else if (IS_WARM(type)) 3422 fio->temp = WARM; 3423 else 3424 fio->temp = COLD; 3425 return type; 3426 } 3427 3428 static void f2fs_randomize_chunk(struct f2fs_sb_info *sbi, 3429 struct curseg_info *seg) 3430 { 3431 /* To allocate block chunks in different sizes, use random number */ 3432 if (--seg->fragment_remained_chunk > 0) 3433 return; 3434 3435 seg->fragment_remained_chunk = 3436 get_random_u32_inclusive(1, sbi->max_fragment_chunk); 3437 seg->next_blkoff += 3438 get_random_u32_inclusive(1, sbi->max_fragment_hole); 3439 } 3440 3441 void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page, 3442 block_t old_blkaddr, block_t *new_blkaddr, 3443 struct f2fs_summary *sum, int type, 3444 struct f2fs_io_info *fio) 3445 { 3446 struct sit_info *sit_i = SIT_I(sbi); 3447 struct curseg_info *curseg = CURSEG_I(sbi, type); 3448 unsigned long long old_mtime; 3449 bool from_gc = (type == CURSEG_ALL_DATA_ATGC); 3450 struct seg_entry *se = NULL; 3451 bool segment_full = false; 3452 3453 f2fs_down_read(&SM_I(sbi)->curseg_lock); 3454 3455 mutex_lock(&curseg->curseg_mutex); 3456 down_write(&sit_i->sentry_lock); 3457 3458 if (from_gc) { 3459 f2fs_bug_on(sbi, GET_SEGNO(sbi, old_blkaddr) == NULL_SEGNO); 3460 se = get_seg_entry(sbi, GET_SEGNO(sbi, old_blkaddr)); 3461 sanity_check_seg_type(sbi, se->type); 3462 f2fs_bug_on(sbi, IS_NODESEG(se->type)); 3463 } 3464 *new_blkaddr = NEXT_FREE_BLKADDR(sbi, curseg); 3465 3466 f2fs_bug_on(sbi, curseg->next_blkoff >= BLKS_PER_SEG(sbi)); 3467 3468 f2fs_wait_discard_bio(sbi, *new_blkaddr); 3469 3470 curseg->sum_blk->entries[curseg->next_blkoff] = *sum; 3471 if (curseg->alloc_type == SSR) { 3472 curseg->next_blkoff = f2fs_find_next_ssr_block(sbi, curseg); 3473 } else { 3474 curseg->next_blkoff++; 3475 if (F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_BLK) 3476 f2fs_randomize_chunk(sbi, curseg); 3477 } 3478 if (curseg->next_blkoff >= f2fs_usable_blks_in_seg(sbi, curseg->segno)) 3479 segment_full = true; 3480 stat_inc_block_count(sbi, curseg); 3481 3482 if (from_gc) { 3483 old_mtime = get_segment_mtime(sbi, old_blkaddr); 3484 } else { 3485 update_segment_mtime(sbi, old_blkaddr, 0); 3486 old_mtime = 0; 3487 } 3488 update_segment_mtime(sbi, *new_blkaddr, old_mtime); 3489 3490 /* 3491 * SIT information should be updated before segment allocation, 3492 * since SSR needs latest valid block information. 3493 */ 3494 update_sit_entry(sbi, *new_blkaddr, 1); 3495 update_sit_entry(sbi, old_blkaddr, -1); 3496 3497 /* 3498 * If the current segment is full, flush it out and replace it with a 3499 * new segment. 3500 */ 3501 if (segment_full) { 3502 if (type == CURSEG_COLD_DATA_PINNED && 3503 !((curseg->segno + 1) % sbi->segs_per_sec)) { 3504 write_sum_page(sbi, curseg->sum_blk, 3505 GET_SUM_BLOCK(sbi, curseg->segno)); 3506 goto skip_new_segment; 3507 } 3508 3509 if (from_gc) { 3510 get_atssr_segment(sbi, type, se->type, 3511 AT_SSR, se->mtime); 3512 } else { 3513 if (need_new_seg(sbi, type)) 3514 new_curseg(sbi, type, false); 3515 else 3516 change_curseg(sbi, type); 3517 stat_inc_seg_type(sbi, curseg); 3518 } 3519 } 3520 3521 skip_new_segment: 3522 /* 3523 * segment dirty status should be updated after segment allocation, 3524 * so we just need to update status only one time after previous 3525 * segment being closed. 3526 */ 3527 locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr)); 3528 locate_dirty_segment(sbi, GET_SEGNO(sbi, *new_blkaddr)); 3529 3530 if (IS_DATASEG(curseg->seg_type)) 3531 atomic64_inc(&sbi->allocated_data_blocks); 3532 3533 up_write(&sit_i->sentry_lock); 3534 3535 if (page && IS_NODESEG(curseg->seg_type)) { 3536 fill_node_footer_blkaddr(page, NEXT_FREE_BLKADDR(sbi, curseg)); 3537 3538 f2fs_inode_chksum_set(sbi, page); 3539 } 3540 3541 if (fio) { 3542 struct f2fs_bio_info *io; 3543 3544 INIT_LIST_HEAD(&fio->list); 3545 fio->in_list = 1; 3546 io = sbi->write_io[fio->type] + fio->temp; 3547 spin_lock(&io->io_lock); 3548 list_add_tail(&fio->list, &io->io_list); 3549 spin_unlock(&io->io_lock); 3550 } 3551 3552 mutex_unlock(&curseg->curseg_mutex); 3553 3554 f2fs_up_read(&SM_I(sbi)->curseg_lock); 3555 } 3556 3557 void f2fs_update_device_state(struct f2fs_sb_info *sbi, nid_t ino, 3558 block_t blkaddr, unsigned int blkcnt) 3559 { 3560 if (!f2fs_is_multi_device(sbi)) 3561 return; 3562 3563 while (1) { 3564 unsigned int devidx = f2fs_target_device_index(sbi, blkaddr); 3565 unsigned int blks = FDEV(devidx).end_blk - blkaddr + 1; 3566 3567 /* update device state for fsync */ 3568 f2fs_set_dirty_device(sbi, ino, devidx, FLUSH_INO); 3569 3570 /* update device state for checkpoint */ 3571 if (!f2fs_test_bit(devidx, (char *)&sbi->dirty_device)) { 3572 spin_lock(&sbi->dev_lock); 3573 f2fs_set_bit(devidx, (char *)&sbi->dirty_device); 3574 spin_unlock(&sbi->dev_lock); 3575 } 3576 3577 if (blkcnt <= blks) 3578 break; 3579 blkcnt -= blks; 3580 blkaddr += blks; 3581 } 3582 } 3583 3584 static void do_write_page(struct f2fs_summary *sum, struct f2fs_io_info *fio) 3585 { 3586 int type = __get_segment_type(fio); 3587 bool keep_order = (f2fs_lfs_mode(fio->sbi) && type == CURSEG_COLD_DATA); 3588 3589 if (keep_order) 3590 f2fs_down_read(&fio->sbi->io_order_lock); 3591 3592 f2fs_allocate_data_block(fio->sbi, fio->page, fio->old_blkaddr, 3593 &fio->new_blkaddr, sum, type, fio); 3594 if (GET_SEGNO(fio->sbi, fio->old_blkaddr) != NULL_SEGNO) 3595 f2fs_invalidate_internal_cache(fio->sbi, fio->old_blkaddr); 3596 3597 /* writeout dirty page into bdev */ 3598 f2fs_submit_page_write(fio); 3599 3600 f2fs_update_device_state(fio->sbi, fio->ino, fio->new_blkaddr, 1); 3601 3602 if (keep_order) 3603 f2fs_up_read(&fio->sbi->io_order_lock); 3604 } 3605 3606 void f2fs_do_write_meta_page(struct f2fs_sb_info *sbi, struct page *page, 3607 enum iostat_type io_type) 3608 { 3609 struct f2fs_io_info fio = { 3610 .sbi = sbi, 3611 .type = META, 3612 .temp = HOT, 3613 .op = REQ_OP_WRITE, 3614 .op_flags = REQ_SYNC | REQ_META | REQ_PRIO, 3615 .old_blkaddr = page->index, 3616 .new_blkaddr = page->index, 3617 .page = page, 3618 .encrypted_page = NULL, 3619 .in_list = 0, 3620 }; 3621 3622 if (unlikely(page->index >= MAIN_BLKADDR(sbi))) 3623 fio.op_flags &= ~REQ_META; 3624 3625 set_page_writeback(page); 3626 f2fs_submit_page_write(&fio); 3627 3628 stat_inc_meta_count(sbi, page->index); 3629 f2fs_update_iostat(sbi, NULL, io_type, F2FS_BLKSIZE); 3630 } 3631 3632 void f2fs_do_write_node_page(unsigned int nid, struct f2fs_io_info *fio) 3633 { 3634 struct f2fs_summary sum; 3635 3636 set_summary(&sum, nid, 0, 0); 3637 do_write_page(&sum, fio); 3638 3639 f2fs_update_iostat(fio->sbi, NULL, fio->io_type, F2FS_BLKSIZE); 3640 } 3641 3642 void f2fs_outplace_write_data(struct dnode_of_data *dn, 3643 struct f2fs_io_info *fio) 3644 { 3645 struct f2fs_sb_info *sbi = fio->sbi; 3646 struct f2fs_summary sum; 3647 3648 f2fs_bug_on(sbi, dn->data_blkaddr == NULL_ADDR); 3649 if (fio->io_type == FS_DATA_IO || fio->io_type == FS_CP_DATA_IO) 3650 f2fs_update_age_extent_cache(dn); 3651 set_summary(&sum, dn->nid, dn->ofs_in_node, fio->version); 3652 do_write_page(&sum, fio); 3653 f2fs_update_data_blkaddr(dn, fio->new_blkaddr); 3654 3655 f2fs_update_iostat(sbi, dn->inode, fio->io_type, F2FS_BLKSIZE); 3656 } 3657 3658 int f2fs_inplace_write_data(struct f2fs_io_info *fio) 3659 { 3660 int err; 3661 struct f2fs_sb_info *sbi = fio->sbi; 3662 unsigned int segno; 3663 3664 fio->new_blkaddr = fio->old_blkaddr; 3665 /* i/o temperature is needed for passing down write hints */ 3666 __get_segment_type(fio); 3667 3668 segno = GET_SEGNO(sbi, fio->new_blkaddr); 3669 3670 if (!IS_DATASEG(get_seg_entry(sbi, segno)->type)) { 3671 set_sbi_flag(sbi, SBI_NEED_FSCK); 3672 f2fs_warn(sbi, "%s: incorrect segment(%u) type, run fsck to fix.", 3673 __func__, segno); 3674 err = -EFSCORRUPTED; 3675 f2fs_handle_error(sbi, ERROR_INCONSISTENT_SUM_TYPE); 3676 goto drop_bio; 3677 } 3678 3679 if (f2fs_cp_error(sbi)) { 3680 err = -EIO; 3681 goto drop_bio; 3682 } 3683 3684 if (fio->meta_gc) 3685 f2fs_truncate_meta_inode_pages(sbi, fio->new_blkaddr, 1); 3686 3687 stat_inc_inplace_blocks(fio->sbi); 3688 3689 if (fio->bio && !IS_F2FS_IPU_NOCACHE(sbi)) 3690 err = f2fs_merge_page_bio(fio); 3691 else 3692 err = f2fs_submit_page_bio(fio); 3693 if (!err) { 3694 f2fs_update_device_state(fio->sbi, fio->ino, 3695 fio->new_blkaddr, 1); 3696 f2fs_update_iostat(fio->sbi, fio->page->mapping->host, 3697 fio->io_type, F2FS_BLKSIZE); 3698 } 3699 3700 return err; 3701 drop_bio: 3702 if (fio->bio && *(fio->bio)) { 3703 struct bio *bio = *(fio->bio); 3704 3705 bio->bi_status = BLK_STS_IOERR; 3706 bio_endio(bio); 3707 *(fio->bio) = NULL; 3708 } 3709 return err; 3710 } 3711 3712 static inline int __f2fs_get_curseg(struct f2fs_sb_info *sbi, 3713 unsigned int segno) 3714 { 3715 int i; 3716 3717 for (i = CURSEG_HOT_DATA; i < NO_CHECK_TYPE; i++) { 3718 if (CURSEG_I(sbi, i)->segno == segno) 3719 break; 3720 } 3721 return i; 3722 } 3723 3724 void f2fs_do_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, 3725 block_t old_blkaddr, block_t new_blkaddr, 3726 bool recover_curseg, bool recover_newaddr, 3727 bool from_gc) 3728 { 3729 struct sit_info *sit_i = SIT_I(sbi); 3730 struct curseg_info *curseg; 3731 unsigned int segno, old_cursegno; 3732 struct seg_entry *se; 3733 int type; 3734 unsigned short old_blkoff; 3735 unsigned char old_alloc_type; 3736 3737 segno = GET_SEGNO(sbi, new_blkaddr); 3738 se = get_seg_entry(sbi, segno); 3739 type = se->type; 3740 3741 f2fs_down_write(&SM_I(sbi)->curseg_lock); 3742 3743 if (!recover_curseg) { 3744 /* for recovery flow */ 3745 if (se->valid_blocks == 0 && !IS_CURSEG(sbi, segno)) { 3746 if (old_blkaddr == NULL_ADDR) 3747 type = CURSEG_COLD_DATA; 3748 else 3749 type = CURSEG_WARM_DATA; 3750 } 3751 } else { 3752 if (IS_CURSEG(sbi, segno)) { 3753 /* se->type is volatile as SSR allocation */ 3754 type = __f2fs_get_curseg(sbi, segno); 3755 f2fs_bug_on(sbi, type == NO_CHECK_TYPE); 3756 } else { 3757 type = CURSEG_WARM_DATA; 3758 } 3759 } 3760 3761 curseg = CURSEG_I(sbi, type); 3762 f2fs_bug_on(sbi, !IS_DATASEG(curseg->seg_type)); 3763 3764 mutex_lock(&curseg->curseg_mutex); 3765 down_write(&sit_i->sentry_lock); 3766 3767 old_cursegno = curseg->segno; 3768 old_blkoff = curseg->next_blkoff; 3769 old_alloc_type = curseg->alloc_type; 3770 3771 /* change the current segment */ 3772 if (segno != curseg->segno) { 3773 curseg->next_segno = segno; 3774 change_curseg(sbi, type); 3775 } 3776 3777 curseg->next_blkoff = GET_BLKOFF_FROM_SEG0(sbi, new_blkaddr); 3778 curseg->sum_blk->entries[curseg->next_blkoff] = *sum; 3779 3780 if (!recover_curseg || recover_newaddr) { 3781 if (!from_gc) 3782 update_segment_mtime(sbi, new_blkaddr, 0); 3783 update_sit_entry(sbi, new_blkaddr, 1); 3784 } 3785 if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO) { 3786 f2fs_invalidate_internal_cache(sbi, old_blkaddr); 3787 if (!from_gc) 3788 update_segment_mtime(sbi, old_blkaddr, 0); 3789 update_sit_entry(sbi, old_blkaddr, -1); 3790 } 3791 3792 locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr)); 3793 locate_dirty_segment(sbi, GET_SEGNO(sbi, new_blkaddr)); 3794 3795 locate_dirty_segment(sbi, old_cursegno); 3796 3797 if (recover_curseg) { 3798 if (old_cursegno != curseg->segno) { 3799 curseg->next_segno = old_cursegno; 3800 change_curseg(sbi, type); 3801 } 3802 curseg->next_blkoff = old_blkoff; 3803 curseg->alloc_type = old_alloc_type; 3804 } 3805 3806 up_write(&sit_i->sentry_lock); 3807 mutex_unlock(&curseg->curseg_mutex); 3808 f2fs_up_write(&SM_I(sbi)->curseg_lock); 3809 } 3810 3811 void f2fs_replace_block(struct f2fs_sb_info *sbi, struct dnode_of_data *dn, 3812 block_t old_addr, block_t new_addr, 3813 unsigned char version, bool recover_curseg, 3814 bool recover_newaddr) 3815 { 3816 struct f2fs_summary sum; 3817 3818 set_summary(&sum, dn->nid, dn->ofs_in_node, version); 3819 3820 f2fs_do_replace_block(sbi, &sum, old_addr, new_addr, 3821 recover_curseg, recover_newaddr, false); 3822 3823 f2fs_update_data_blkaddr(dn, new_addr); 3824 } 3825 3826 void f2fs_wait_on_page_writeback(struct page *page, 3827 enum page_type type, bool ordered, bool locked) 3828 { 3829 if (PageWriteback(page)) { 3830 struct f2fs_sb_info *sbi = F2FS_P_SB(page); 3831 3832 /* submit cached LFS IO */ 3833 f2fs_submit_merged_write_cond(sbi, NULL, page, 0, type); 3834 /* submit cached IPU IO */ 3835 f2fs_submit_merged_ipu_write(sbi, NULL, page); 3836 if (ordered) { 3837 wait_on_page_writeback(page); 3838 f2fs_bug_on(sbi, locked && PageWriteback(page)); 3839 } else { 3840 wait_for_stable_page(page); 3841 } 3842 } 3843 } 3844 3845 void f2fs_wait_on_block_writeback(struct inode *inode, block_t blkaddr) 3846 { 3847 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 3848 struct page *cpage; 3849 3850 if (!f2fs_meta_inode_gc_required(inode)) 3851 return; 3852 3853 if (!__is_valid_data_blkaddr(blkaddr)) 3854 return; 3855 3856 cpage = find_lock_page(META_MAPPING(sbi), blkaddr); 3857 if (cpage) { 3858 f2fs_wait_on_page_writeback(cpage, DATA, true, true); 3859 f2fs_put_page(cpage, 1); 3860 } 3861 } 3862 3863 void f2fs_wait_on_block_writeback_range(struct inode *inode, block_t blkaddr, 3864 block_t len) 3865 { 3866 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 3867 block_t i; 3868 3869 if (!f2fs_meta_inode_gc_required(inode)) 3870 return; 3871 3872 for (i = 0; i < len; i++) 3873 f2fs_wait_on_block_writeback(inode, blkaddr + i); 3874 3875 f2fs_truncate_meta_inode_pages(sbi, blkaddr, len); 3876 } 3877 3878 static int read_compacted_summaries(struct f2fs_sb_info *sbi) 3879 { 3880 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); 3881 struct curseg_info *seg_i; 3882 unsigned char *kaddr; 3883 struct page *page; 3884 block_t start; 3885 int i, j, offset; 3886 3887 start = start_sum_block(sbi); 3888 3889 page = f2fs_get_meta_page(sbi, start++); 3890 if (IS_ERR(page)) 3891 return PTR_ERR(page); 3892 kaddr = (unsigned char *)page_address(page); 3893 3894 /* Step 1: restore nat cache */ 3895 seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA); 3896 memcpy(seg_i->journal, kaddr, SUM_JOURNAL_SIZE); 3897 3898 /* Step 2: restore sit cache */ 3899 seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA); 3900 memcpy(seg_i->journal, kaddr + SUM_JOURNAL_SIZE, SUM_JOURNAL_SIZE); 3901 offset = 2 * SUM_JOURNAL_SIZE; 3902 3903 /* Step 3: restore summary entries */ 3904 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) { 3905 unsigned short blk_off; 3906 unsigned int segno; 3907 3908 seg_i = CURSEG_I(sbi, i); 3909 segno = le32_to_cpu(ckpt->cur_data_segno[i]); 3910 blk_off = le16_to_cpu(ckpt->cur_data_blkoff[i]); 3911 seg_i->next_segno = segno; 3912 reset_curseg(sbi, i, 0); 3913 seg_i->alloc_type = ckpt->alloc_type[i]; 3914 seg_i->next_blkoff = blk_off; 3915 3916 if (seg_i->alloc_type == SSR) 3917 blk_off = BLKS_PER_SEG(sbi); 3918 3919 for (j = 0; j < blk_off; j++) { 3920 struct f2fs_summary *s; 3921 3922 s = (struct f2fs_summary *)(kaddr + offset); 3923 seg_i->sum_blk->entries[j] = *s; 3924 offset += SUMMARY_SIZE; 3925 if (offset + SUMMARY_SIZE <= PAGE_SIZE - 3926 SUM_FOOTER_SIZE) 3927 continue; 3928 3929 f2fs_put_page(page, 1); 3930 page = NULL; 3931 3932 page = f2fs_get_meta_page(sbi, start++); 3933 if (IS_ERR(page)) 3934 return PTR_ERR(page); 3935 kaddr = (unsigned char *)page_address(page); 3936 offset = 0; 3937 } 3938 } 3939 f2fs_put_page(page, 1); 3940 return 0; 3941 } 3942 3943 static int read_normal_summaries(struct f2fs_sb_info *sbi, int type) 3944 { 3945 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); 3946 struct f2fs_summary_block *sum; 3947 struct curseg_info *curseg; 3948 struct page *new; 3949 unsigned short blk_off; 3950 unsigned int segno = 0; 3951 block_t blk_addr = 0; 3952 int err = 0; 3953 3954 /* get segment number and block addr */ 3955 if (IS_DATASEG(type)) { 3956 segno = le32_to_cpu(ckpt->cur_data_segno[type]); 3957 blk_off = le16_to_cpu(ckpt->cur_data_blkoff[type - 3958 CURSEG_HOT_DATA]); 3959 if (__exist_node_summaries(sbi)) 3960 blk_addr = sum_blk_addr(sbi, NR_CURSEG_PERSIST_TYPE, type); 3961 else 3962 blk_addr = sum_blk_addr(sbi, NR_CURSEG_DATA_TYPE, type); 3963 } else { 3964 segno = le32_to_cpu(ckpt->cur_node_segno[type - 3965 CURSEG_HOT_NODE]); 3966 blk_off = le16_to_cpu(ckpt->cur_node_blkoff[type - 3967 CURSEG_HOT_NODE]); 3968 if (__exist_node_summaries(sbi)) 3969 blk_addr = sum_blk_addr(sbi, NR_CURSEG_NODE_TYPE, 3970 type - CURSEG_HOT_NODE); 3971 else 3972 blk_addr = GET_SUM_BLOCK(sbi, segno); 3973 } 3974 3975 new = f2fs_get_meta_page(sbi, blk_addr); 3976 if (IS_ERR(new)) 3977 return PTR_ERR(new); 3978 sum = (struct f2fs_summary_block *)page_address(new); 3979 3980 if (IS_NODESEG(type)) { 3981 if (__exist_node_summaries(sbi)) { 3982 struct f2fs_summary *ns = &sum->entries[0]; 3983 int i; 3984 3985 for (i = 0; i < BLKS_PER_SEG(sbi); i++, ns++) { 3986 ns->version = 0; 3987 ns->ofs_in_node = 0; 3988 } 3989 } else { 3990 err = f2fs_restore_node_summary(sbi, segno, sum); 3991 if (err) 3992 goto out; 3993 } 3994 } 3995 3996 /* set uncompleted segment to curseg */ 3997 curseg = CURSEG_I(sbi, type); 3998 mutex_lock(&curseg->curseg_mutex); 3999 4000 /* update journal info */ 4001 down_write(&curseg->journal_rwsem); 4002 memcpy(curseg->journal, &sum->journal, SUM_JOURNAL_SIZE); 4003 up_write(&curseg->journal_rwsem); 4004 4005 memcpy(curseg->sum_blk->entries, sum->entries, SUM_ENTRY_SIZE); 4006 memcpy(&curseg->sum_blk->footer, &sum->footer, SUM_FOOTER_SIZE); 4007 curseg->next_segno = segno; 4008 reset_curseg(sbi, type, 0); 4009 curseg->alloc_type = ckpt->alloc_type[type]; 4010 curseg->next_blkoff = blk_off; 4011 mutex_unlock(&curseg->curseg_mutex); 4012 out: 4013 f2fs_put_page(new, 1); 4014 return err; 4015 } 4016 4017 static int restore_curseg_summaries(struct f2fs_sb_info *sbi) 4018 { 4019 struct f2fs_journal *sit_j = CURSEG_I(sbi, CURSEG_COLD_DATA)->journal; 4020 struct f2fs_journal *nat_j = CURSEG_I(sbi, CURSEG_HOT_DATA)->journal; 4021 int type = CURSEG_HOT_DATA; 4022 int err; 4023 4024 if (is_set_ckpt_flags(sbi, CP_COMPACT_SUM_FLAG)) { 4025 int npages = f2fs_npages_for_summary_flush(sbi, true); 4026 4027 if (npages >= 2) 4028 f2fs_ra_meta_pages(sbi, start_sum_block(sbi), npages, 4029 META_CP, true); 4030 4031 /* restore for compacted data summary */ 4032 err = read_compacted_summaries(sbi); 4033 if (err) 4034 return err; 4035 type = CURSEG_HOT_NODE; 4036 } 4037 4038 if (__exist_node_summaries(sbi)) 4039 f2fs_ra_meta_pages(sbi, 4040 sum_blk_addr(sbi, NR_CURSEG_PERSIST_TYPE, type), 4041 NR_CURSEG_PERSIST_TYPE - type, META_CP, true); 4042 4043 for (; type <= CURSEG_COLD_NODE; type++) { 4044 err = read_normal_summaries(sbi, type); 4045 if (err) 4046 return err; 4047 } 4048 4049 /* sanity check for summary blocks */ 4050 if (nats_in_cursum(nat_j) > NAT_JOURNAL_ENTRIES || 4051 sits_in_cursum(sit_j) > SIT_JOURNAL_ENTRIES) { 4052 f2fs_err(sbi, "invalid journal entries nats %u sits %u", 4053 nats_in_cursum(nat_j), sits_in_cursum(sit_j)); 4054 return -EINVAL; 4055 } 4056 4057 return 0; 4058 } 4059 4060 static void write_compacted_summaries(struct f2fs_sb_info *sbi, block_t blkaddr) 4061 { 4062 struct page *page; 4063 unsigned char *kaddr; 4064 struct f2fs_summary *summary; 4065 struct curseg_info *seg_i; 4066 int written_size = 0; 4067 int i, j; 4068 4069 page = f2fs_grab_meta_page(sbi, blkaddr++); 4070 kaddr = (unsigned char *)page_address(page); 4071 memset(kaddr, 0, PAGE_SIZE); 4072 4073 /* Step 1: write nat cache */ 4074 seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA); 4075 memcpy(kaddr, seg_i->journal, SUM_JOURNAL_SIZE); 4076 written_size += SUM_JOURNAL_SIZE; 4077 4078 /* Step 2: write sit cache */ 4079 seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA); 4080 memcpy(kaddr + written_size, seg_i->journal, SUM_JOURNAL_SIZE); 4081 written_size += SUM_JOURNAL_SIZE; 4082 4083 /* Step 3: write summary entries */ 4084 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) { 4085 seg_i = CURSEG_I(sbi, i); 4086 for (j = 0; j < f2fs_curseg_valid_blocks(sbi, i); j++) { 4087 if (!page) { 4088 page = f2fs_grab_meta_page(sbi, blkaddr++); 4089 kaddr = (unsigned char *)page_address(page); 4090 memset(kaddr, 0, PAGE_SIZE); 4091 written_size = 0; 4092 } 4093 summary = (struct f2fs_summary *)(kaddr + written_size); 4094 *summary = seg_i->sum_blk->entries[j]; 4095 written_size += SUMMARY_SIZE; 4096 4097 if (written_size + SUMMARY_SIZE <= PAGE_SIZE - 4098 SUM_FOOTER_SIZE) 4099 continue; 4100 4101 set_page_dirty(page); 4102 f2fs_put_page(page, 1); 4103 page = NULL; 4104 } 4105 } 4106 if (page) { 4107 set_page_dirty(page); 4108 f2fs_put_page(page, 1); 4109 } 4110 } 4111 4112 static void write_normal_summaries(struct f2fs_sb_info *sbi, 4113 block_t blkaddr, int type) 4114 { 4115 int i, end; 4116 4117 if (IS_DATASEG(type)) 4118 end = type + NR_CURSEG_DATA_TYPE; 4119 else 4120 end = type + NR_CURSEG_NODE_TYPE; 4121 4122 for (i = type; i < end; i++) 4123 write_current_sum_page(sbi, i, blkaddr + (i - type)); 4124 } 4125 4126 void f2fs_write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk) 4127 { 4128 if (is_set_ckpt_flags(sbi, CP_COMPACT_SUM_FLAG)) 4129 write_compacted_summaries(sbi, start_blk); 4130 else 4131 write_normal_summaries(sbi, start_blk, CURSEG_HOT_DATA); 4132 } 4133 4134 void f2fs_write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk) 4135 { 4136 write_normal_summaries(sbi, start_blk, CURSEG_HOT_NODE); 4137 } 4138 4139 int f2fs_lookup_journal_in_cursum(struct f2fs_journal *journal, int type, 4140 unsigned int val, int alloc) 4141 { 4142 int i; 4143 4144 if (type == NAT_JOURNAL) { 4145 for (i = 0; i < nats_in_cursum(journal); i++) { 4146 if (le32_to_cpu(nid_in_journal(journal, i)) == val) 4147 return i; 4148 } 4149 if (alloc && __has_cursum_space(journal, 1, NAT_JOURNAL)) 4150 return update_nats_in_cursum(journal, 1); 4151 } else if (type == SIT_JOURNAL) { 4152 for (i = 0; i < sits_in_cursum(journal); i++) 4153 if (le32_to_cpu(segno_in_journal(journal, i)) == val) 4154 return i; 4155 if (alloc && __has_cursum_space(journal, 1, SIT_JOURNAL)) 4156 return update_sits_in_cursum(journal, 1); 4157 } 4158 return -1; 4159 } 4160 4161 static struct page *get_current_sit_page(struct f2fs_sb_info *sbi, 4162 unsigned int segno) 4163 { 4164 return f2fs_get_meta_page(sbi, current_sit_addr(sbi, segno)); 4165 } 4166 4167 static struct page *get_next_sit_page(struct f2fs_sb_info *sbi, 4168 unsigned int start) 4169 { 4170 struct sit_info *sit_i = SIT_I(sbi); 4171 struct page *page; 4172 pgoff_t src_off, dst_off; 4173 4174 src_off = current_sit_addr(sbi, start); 4175 dst_off = next_sit_addr(sbi, src_off); 4176 4177 page = f2fs_grab_meta_page(sbi, dst_off); 4178 seg_info_to_sit_page(sbi, page, start); 4179 4180 set_page_dirty(page); 4181 set_to_next_sit(sit_i, start); 4182 4183 return page; 4184 } 4185 4186 static struct sit_entry_set *grab_sit_entry_set(void) 4187 { 4188 struct sit_entry_set *ses = 4189 f2fs_kmem_cache_alloc(sit_entry_set_slab, 4190 GFP_NOFS, true, NULL); 4191 4192 ses->entry_cnt = 0; 4193 INIT_LIST_HEAD(&ses->set_list); 4194 return ses; 4195 } 4196 4197 static void release_sit_entry_set(struct sit_entry_set *ses) 4198 { 4199 list_del(&ses->set_list); 4200 kmem_cache_free(sit_entry_set_slab, ses); 4201 } 4202 4203 static void adjust_sit_entry_set(struct sit_entry_set *ses, 4204 struct list_head *head) 4205 { 4206 struct sit_entry_set *next = ses; 4207 4208 if (list_is_last(&ses->set_list, head)) 4209 return; 4210 4211 list_for_each_entry_continue(next, head, set_list) 4212 if (ses->entry_cnt <= next->entry_cnt) { 4213 list_move_tail(&ses->set_list, &next->set_list); 4214 return; 4215 } 4216 4217 list_move_tail(&ses->set_list, head); 4218 } 4219 4220 static void add_sit_entry(unsigned int segno, struct list_head *head) 4221 { 4222 struct sit_entry_set *ses; 4223 unsigned int start_segno = START_SEGNO(segno); 4224 4225 list_for_each_entry(ses, head, set_list) { 4226 if (ses->start_segno == start_segno) { 4227 ses->entry_cnt++; 4228 adjust_sit_entry_set(ses, head); 4229 return; 4230 } 4231 } 4232 4233 ses = grab_sit_entry_set(); 4234 4235 ses->start_segno = start_segno; 4236 ses->entry_cnt++; 4237 list_add(&ses->set_list, head); 4238 } 4239 4240 static void add_sits_in_set(struct f2fs_sb_info *sbi) 4241 { 4242 struct f2fs_sm_info *sm_info = SM_I(sbi); 4243 struct list_head *set_list = &sm_info->sit_entry_set; 4244 unsigned long *bitmap = SIT_I(sbi)->dirty_sentries_bitmap; 4245 unsigned int segno; 4246 4247 for_each_set_bit(segno, bitmap, MAIN_SEGS(sbi)) 4248 add_sit_entry(segno, set_list); 4249 } 4250 4251 static void remove_sits_in_journal(struct f2fs_sb_info *sbi) 4252 { 4253 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA); 4254 struct f2fs_journal *journal = curseg->journal; 4255 int i; 4256 4257 down_write(&curseg->journal_rwsem); 4258 for (i = 0; i < sits_in_cursum(journal); i++) { 4259 unsigned int segno; 4260 bool dirtied; 4261 4262 segno = le32_to_cpu(segno_in_journal(journal, i)); 4263 dirtied = __mark_sit_entry_dirty(sbi, segno); 4264 4265 if (!dirtied) 4266 add_sit_entry(segno, &SM_I(sbi)->sit_entry_set); 4267 } 4268 update_sits_in_cursum(journal, -i); 4269 up_write(&curseg->journal_rwsem); 4270 } 4271 4272 /* 4273 * CP calls this function, which flushes SIT entries including sit_journal, 4274 * and moves prefree segs to free segs. 4275 */ 4276 void f2fs_flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc) 4277 { 4278 struct sit_info *sit_i = SIT_I(sbi); 4279 unsigned long *bitmap = sit_i->dirty_sentries_bitmap; 4280 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA); 4281 struct f2fs_journal *journal = curseg->journal; 4282 struct sit_entry_set *ses, *tmp; 4283 struct list_head *head = &SM_I(sbi)->sit_entry_set; 4284 bool to_journal = !is_sbi_flag_set(sbi, SBI_IS_RESIZEFS); 4285 struct seg_entry *se; 4286 4287 down_write(&sit_i->sentry_lock); 4288 4289 if (!sit_i->dirty_sentries) 4290 goto out; 4291 4292 /* 4293 * add and account sit entries of dirty bitmap in sit entry 4294 * set temporarily 4295 */ 4296 add_sits_in_set(sbi); 4297 4298 /* 4299 * if there are no enough space in journal to store dirty sit 4300 * entries, remove all entries from journal and add and account 4301 * them in sit entry set. 4302 */ 4303 if (!__has_cursum_space(journal, sit_i->dirty_sentries, SIT_JOURNAL) || 4304 !to_journal) 4305 remove_sits_in_journal(sbi); 4306 4307 /* 4308 * there are two steps to flush sit entries: 4309 * #1, flush sit entries to journal in current cold data summary block. 4310 * #2, flush sit entries to sit page. 4311 */ 4312 list_for_each_entry_safe(ses, tmp, head, set_list) { 4313 struct page *page = NULL; 4314 struct f2fs_sit_block *raw_sit = NULL; 4315 unsigned int start_segno = ses->start_segno; 4316 unsigned int end = min(start_segno + SIT_ENTRY_PER_BLOCK, 4317 (unsigned long)MAIN_SEGS(sbi)); 4318 unsigned int segno = start_segno; 4319 4320 if (to_journal && 4321 !__has_cursum_space(journal, ses->entry_cnt, SIT_JOURNAL)) 4322 to_journal = false; 4323 4324 if (to_journal) { 4325 down_write(&curseg->journal_rwsem); 4326 } else { 4327 page = get_next_sit_page(sbi, start_segno); 4328 raw_sit = page_address(page); 4329 } 4330 4331 /* flush dirty sit entries in region of current sit set */ 4332 for_each_set_bit_from(segno, bitmap, end) { 4333 int offset, sit_offset; 4334 4335 se = get_seg_entry(sbi, segno); 4336 #ifdef CONFIG_F2FS_CHECK_FS 4337 if (memcmp(se->cur_valid_map, se->cur_valid_map_mir, 4338 SIT_VBLOCK_MAP_SIZE)) 4339 f2fs_bug_on(sbi, 1); 4340 #endif 4341 4342 /* add discard candidates */ 4343 if (!(cpc->reason & CP_DISCARD)) { 4344 cpc->trim_start = segno; 4345 add_discard_addrs(sbi, cpc, false); 4346 } 4347 4348 if (to_journal) { 4349 offset = f2fs_lookup_journal_in_cursum(journal, 4350 SIT_JOURNAL, segno, 1); 4351 f2fs_bug_on(sbi, offset < 0); 4352 segno_in_journal(journal, offset) = 4353 cpu_to_le32(segno); 4354 seg_info_to_raw_sit(se, 4355 &sit_in_journal(journal, offset)); 4356 check_block_count(sbi, segno, 4357 &sit_in_journal(journal, offset)); 4358 } else { 4359 sit_offset = SIT_ENTRY_OFFSET(sit_i, segno); 4360 seg_info_to_raw_sit(se, 4361 &raw_sit->entries[sit_offset]); 4362 check_block_count(sbi, segno, 4363 &raw_sit->entries[sit_offset]); 4364 } 4365 4366 __clear_bit(segno, bitmap); 4367 sit_i->dirty_sentries--; 4368 ses->entry_cnt--; 4369 } 4370 4371 if (to_journal) 4372 up_write(&curseg->journal_rwsem); 4373 else 4374 f2fs_put_page(page, 1); 4375 4376 f2fs_bug_on(sbi, ses->entry_cnt); 4377 release_sit_entry_set(ses); 4378 } 4379 4380 f2fs_bug_on(sbi, !list_empty(head)); 4381 f2fs_bug_on(sbi, sit_i->dirty_sentries); 4382 out: 4383 if (cpc->reason & CP_DISCARD) { 4384 __u64 trim_start = cpc->trim_start; 4385 4386 for (; cpc->trim_start <= cpc->trim_end; cpc->trim_start++) 4387 add_discard_addrs(sbi, cpc, false); 4388 4389 cpc->trim_start = trim_start; 4390 } 4391 up_write(&sit_i->sentry_lock); 4392 4393 set_prefree_as_free_segments(sbi); 4394 } 4395 4396 static int build_sit_info(struct f2fs_sb_info *sbi) 4397 { 4398 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi); 4399 struct sit_info *sit_i; 4400 unsigned int sit_segs, start; 4401 char *src_bitmap, *bitmap; 4402 unsigned int bitmap_size, main_bitmap_size, sit_bitmap_size; 4403 unsigned int discard_map = f2fs_block_unit_discard(sbi) ? 1 : 0; 4404 4405 /* allocate memory for SIT information */ 4406 sit_i = f2fs_kzalloc(sbi, sizeof(struct sit_info), GFP_KERNEL); 4407 if (!sit_i) 4408 return -ENOMEM; 4409 4410 SM_I(sbi)->sit_info = sit_i; 4411 4412 sit_i->sentries = 4413 f2fs_kvzalloc(sbi, array_size(sizeof(struct seg_entry), 4414 MAIN_SEGS(sbi)), 4415 GFP_KERNEL); 4416 if (!sit_i->sentries) 4417 return -ENOMEM; 4418 4419 main_bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi)); 4420 sit_i->dirty_sentries_bitmap = f2fs_kvzalloc(sbi, main_bitmap_size, 4421 GFP_KERNEL); 4422 if (!sit_i->dirty_sentries_bitmap) 4423 return -ENOMEM; 4424 4425 #ifdef CONFIG_F2FS_CHECK_FS 4426 bitmap_size = MAIN_SEGS(sbi) * SIT_VBLOCK_MAP_SIZE * (3 + discard_map); 4427 #else 4428 bitmap_size = MAIN_SEGS(sbi) * SIT_VBLOCK_MAP_SIZE * (2 + discard_map); 4429 #endif 4430 sit_i->bitmap = f2fs_kvzalloc(sbi, bitmap_size, GFP_KERNEL); 4431 if (!sit_i->bitmap) 4432 return -ENOMEM; 4433 4434 bitmap = sit_i->bitmap; 4435 4436 for (start = 0; start < MAIN_SEGS(sbi); start++) { 4437 sit_i->sentries[start].cur_valid_map = bitmap; 4438 bitmap += SIT_VBLOCK_MAP_SIZE; 4439 4440 sit_i->sentries[start].ckpt_valid_map = bitmap; 4441 bitmap += SIT_VBLOCK_MAP_SIZE; 4442 4443 #ifdef CONFIG_F2FS_CHECK_FS 4444 sit_i->sentries[start].cur_valid_map_mir = bitmap; 4445 bitmap += SIT_VBLOCK_MAP_SIZE; 4446 #endif 4447 4448 if (discard_map) { 4449 sit_i->sentries[start].discard_map = bitmap; 4450 bitmap += SIT_VBLOCK_MAP_SIZE; 4451 } 4452 } 4453 4454 sit_i->tmp_map = f2fs_kzalloc(sbi, SIT_VBLOCK_MAP_SIZE, GFP_KERNEL); 4455 if (!sit_i->tmp_map) 4456 return -ENOMEM; 4457 4458 if (__is_large_section(sbi)) { 4459 sit_i->sec_entries = 4460 f2fs_kvzalloc(sbi, array_size(sizeof(struct sec_entry), 4461 MAIN_SECS(sbi)), 4462 GFP_KERNEL); 4463 if (!sit_i->sec_entries) 4464 return -ENOMEM; 4465 } 4466 4467 /* get information related with SIT */ 4468 sit_segs = le32_to_cpu(raw_super->segment_count_sit) >> 1; 4469 4470 /* setup SIT bitmap from ckeckpoint pack */ 4471 sit_bitmap_size = __bitmap_size(sbi, SIT_BITMAP); 4472 src_bitmap = __bitmap_ptr(sbi, SIT_BITMAP); 4473 4474 sit_i->sit_bitmap = kmemdup(src_bitmap, sit_bitmap_size, GFP_KERNEL); 4475 if (!sit_i->sit_bitmap) 4476 return -ENOMEM; 4477 4478 #ifdef CONFIG_F2FS_CHECK_FS 4479 sit_i->sit_bitmap_mir = kmemdup(src_bitmap, 4480 sit_bitmap_size, GFP_KERNEL); 4481 if (!sit_i->sit_bitmap_mir) 4482 return -ENOMEM; 4483 4484 sit_i->invalid_segmap = f2fs_kvzalloc(sbi, 4485 main_bitmap_size, GFP_KERNEL); 4486 if (!sit_i->invalid_segmap) 4487 return -ENOMEM; 4488 #endif 4489 4490 sit_i->sit_base_addr = le32_to_cpu(raw_super->sit_blkaddr); 4491 sit_i->sit_blocks = sit_segs << sbi->log_blocks_per_seg; 4492 sit_i->written_valid_blocks = 0; 4493 sit_i->bitmap_size = sit_bitmap_size; 4494 sit_i->dirty_sentries = 0; 4495 sit_i->sents_per_block = SIT_ENTRY_PER_BLOCK; 4496 sit_i->elapsed_time = le64_to_cpu(sbi->ckpt->elapsed_time); 4497 sit_i->mounted_time = ktime_get_boottime_seconds(); 4498 init_rwsem(&sit_i->sentry_lock); 4499 return 0; 4500 } 4501 4502 static int build_free_segmap(struct f2fs_sb_info *sbi) 4503 { 4504 struct free_segmap_info *free_i; 4505 unsigned int bitmap_size, sec_bitmap_size; 4506 4507 /* allocate memory for free segmap information */ 4508 free_i = f2fs_kzalloc(sbi, sizeof(struct free_segmap_info), GFP_KERNEL); 4509 if (!free_i) 4510 return -ENOMEM; 4511 4512 SM_I(sbi)->free_info = free_i; 4513 4514 bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi)); 4515 free_i->free_segmap = f2fs_kvmalloc(sbi, bitmap_size, GFP_KERNEL); 4516 if (!free_i->free_segmap) 4517 return -ENOMEM; 4518 4519 sec_bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi)); 4520 free_i->free_secmap = f2fs_kvmalloc(sbi, sec_bitmap_size, GFP_KERNEL); 4521 if (!free_i->free_secmap) 4522 return -ENOMEM; 4523 4524 /* set all segments as dirty temporarily */ 4525 memset(free_i->free_segmap, 0xff, bitmap_size); 4526 memset(free_i->free_secmap, 0xff, sec_bitmap_size); 4527 4528 /* init free segmap information */ 4529 free_i->start_segno = GET_SEGNO_FROM_SEG0(sbi, MAIN_BLKADDR(sbi)); 4530 free_i->free_segments = 0; 4531 free_i->free_sections = 0; 4532 spin_lock_init(&free_i->segmap_lock); 4533 return 0; 4534 } 4535 4536 static int build_curseg(struct f2fs_sb_info *sbi) 4537 { 4538 struct curseg_info *array; 4539 int i; 4540 4541 array = f2fs_kzalloc(sbi, array_size(NR_CURSEG_TYPE, 4542 sizeof(*array)), GFP_KERNEL); 4543 if (!array) 4544 return -ENOMEM; 4545 4546 SM_I(sbi)->curseg_array = array; 4547 4548 for (i = 0; i < NO_CHECK_TYPE; i++) { 4549 mutex_init(&array[i].curseg_mutex); 4550 array[i].sum_blk = f2fs_kzalloc(sbi, PAGE_SIZE, GFP_KERNEL); 4551 if (!array[i].sum_blk) 4552 return -ENOMEM; 4553 init_rwsem(&array[i].journal_rwsem); 4554 array[i].journal = f2fs_kzalloc(sbi, 4555 sizeof(struct f2fs_journal), GFP_KERNEL); 4556 if (!array[i].journal) 4557 return -ENOMEM; 4558 if (i < NR_PERSISTENT_LOG) 4559 array[i].seg_type = CURSEG_HOT_DATA + i; 4560 else if (i == CURSEG_COLD_DATA_PINNED) 4561 array[i].seg_type = CURSEG_COLD_DATA; 4562 else if (i == CURSEG_ALL_DATA_ATGC) 4563 array[i].seg_type = CURSEG_COLD_DATA; 4564 array[i].segno = NULL_SEGNO; 4565 array[i].next_blkoff = 0; 4566 array[i].inited = false; 4567 } 4568 return restore_curseg_summaries(sbi); 4569 } 4570 4571 static int build_sit_entries(struct f2fs_sb_info *sbi) 4572 { 4573 struct sit_info *sit_i = SIT_I(sbi); 4574 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA); 4575 struct f2fs_journal *journal = curseg->journal; 4576 struct seg_entry *se; 4577 struct f2fs_sit_entry sit; 4578 int sit_blk_cnt = SIT_BLK_CNT(sbi); 4579 unsigned int i, start, end; 4580 unsigned int readed, start_blk = 0; 4581 int err = 0; 4582 block_t sit_valid_blocks[2] = {0, 0}; 4583 4584 do { 4585 readed = f2fs_ra_meta_pages(sbi, start_blk, BIO_MAX_VECS, 4586 META_SIT, true); 4587 4588 start = start_blk * sit_i->sents_per_block; 4589 end = (start_blk + readed) * sit_i->sents_per_block; 4590 4591 for (; start < end && start < MAIN_SEGS(sbi); start++) { 4592 struct f2fs_sit_block *sit_blk; 4593 struct page *page; 4594 4595 se = &sit_i->sentries[start]; 4596 page = get_current_sit_page(sbi, start); 4597 if (IS_ERR(page)) 4598 return PTR_ERR(page); 4599 sit_blk = (struct f2fs_sit_block *)page_address(page); 4600 sit = sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, start)]; 4601 f2fs_put_page(page, 1); 4602 4603 err = check_block_count(sbi, start, &sit); 4604 if (err) 4605 return err; 4606 seg_info_from_raw_sit(se, &sit); 4607 4608 if (se->type >= NR_PERSISTENT_LOG) { 4609 f2fs_err(sbi, "Invalid segment type: %u, segno: %u", 4610 se->type, start); 4611 f2fs_handle_error(sbi, 4612 ERROR_INCONSISTENT_SUM_TYPE); 4613 return -EFSCORRUPTED; 4614 } 4615 4616 sit_valid_blocks[SE_PAGETYPE(se)] += se->valid_blocks; 4617 4618 if (!f2fs_block_unit_discard(sbi)) 4619 goto init_discard_map_done; 4620 4621 /* build discard map only one time */ 4622 if (is_set_ckpt_flags(sbi, CP_TRIMMED_FLAG)) { 4623 memset(se->discard_map, 0xff, 4624 SIT_VBLOCK_MAP_SIZE); 4625 goto init_discard_map_done; 4626 } 4627 memcpy(se->discard_map, se->cur_valid_map, 4628 SIT_VBLOCK_MAP_SIZE); 4629 sbi->discard_blks += BLKS_PER_SEG(sbi) - 4630 se->valid_blocks; 4631 init_discard_map_done: 4632 if (__is_large_section(sbi)) 4633 get_sec_entry(sbi, start)->valid_blocks += 4634 se->valid_blocks; 4635 } 4636 start_blk += readed; 4637 } while (start_blk < sit_blk_cnt); 4638 4639 down_read(&curseg->journal_rwsem); 4640 for (i = 0; i < sits_in_cursum(journal); i++) { 4641 unsigned int old_valid_blocks; 4642 4643 start = le32_to_cpu(segno_in_journal(journal, i)); 4644 if (start >= MAIN_SEGS(sbi)) { 4645 f2fs_err(sbi, "Wrong journal entry on segno %u", 4646 start); 4647 err = -EFSCORRUPTED; 4648 f2fs_handle_error(sbi, ERROR_CORRUPTED_JOURNAL); 4649 break; 4650 } 4651 4652 se = &sit_i->sentries[start]; 4653 sit = sit_in_journal(journal, i); 4654 4655 old_valid_blocks = se->valid_blocks; 4656 4657 sit_valid_blocks[SE_PAGETYPE(se)] -= old_valid_blocks; 4658 4659 err = check_block_count(sbi, start, &sit); 4660 if (err) 4661 break; 4662 seg_info_from_raw_sit(se, &sit); 4663 4664 if (se->type >= NR_PERSISTENT_LOG) { 4665 f2fs_err(sbi, "Invalid segment type: %u, segno: %u", 4666 se->type, start); 4667 err = -EFSCORRUPTED; 4668 f2fs_handle_error(sbi, ERROR_INCONSISTENT_SUM_TYPE); 4669 break; 4670 } 4671 4672 sit_valid_blocks[SE_PAGETYPE(se)] += se->valid_blocks; 4673 4674 if (f2fs_block_unit_discard(sbi)) { 4675 if (is_set_ckpt_flags(sbi, CP_TRIMMED_FLAG)) { 4676 memset(se->discard_map, 0xff, SIT_VBLOCK_MAP_SIZE); 4677 } else { 4678 memcpy(se->discard_map, se->cur_valid_map, 4679 SIT_VBLOCK_MAP_SIZE); 4680 sbi->discard_blks += old_valid_blocks; 4681 sbi->discard_blks -= se->valid_blocks; 4682 } 4683 } 4684 4685 if (__is_large_section(sbi)) { 4686 get_sec_entry(sbi, start)->valid_blocks += 4687 se->valid_blocks; 4688 get_sec_entry(sbi, start)->valid_blocks -= 4689 old_valid_blocks; 4690 } 4691 } 4692 up_read(&curseg->journal_rwsem); 4693 4694 if (err) 4695 return err; 4696 4697 if (sit_valid_blocks[NODE] != valid_node_count(sbi)) { 4698 f2fs_err(sbi, "SIT is corrupted node# %u vs %u", 4699 sit_valid_blocks[NODE], valid_node_count(sbi)); 4700 f2fs_handle_error(sbi, ERROR_INCONSISTENT_NODE_COUNT); 4701 return -EFSCORRUPTED; 4702 } 4703 4704 if (sit_valid_blocks[DATA] + sit_valid_blocks[NODE] > 4705 valid_user_blocks(sbi)) { 4706 f2fs_err(sbi, "SIT is corrupted data# %u %u vs %u", 4707 sit_valid_blocks[DATA], sit_valid_blocks[NODE], 4708 valid_user_blocks(sbi)); 4709 f2fs_handle_error(sbi, ERROR_INCONSISTENT_BLOCK_COUNT); 4710 return -EFSCORRUPTED; 4711 } 4712 4713 return 0; 4714 } 4715 4716 static void init_free_segmap(struct f2fs_sb_info *sbi) 4717 { 4718 unsigned int start; 4719 int type; 4720 struct seg_entry *sentry; 4721 4722 for (start = 0; start < MAIN_SEGS(sbi); start++) { 4723 if (f2fs_usable_blks_in_seg(sbi, start) == 0) 4724 continue; 4725 sentry = get_seg_entry(sbi, start); 4726 if (!sentry->valid_blocks) 4727 __set_free(sbi, start); 4728 else 4729 SIT_I(sbi)->written_valid_blocks += 4730 sentry->valid_blocks; 4731 } 4732 4733 /* set use the current segments */ 4734 for (type = CURSEG_HOT_DATA; type <= CURSEG_COLD_NODE; type++) { 4735 struct curseg_info *curseg_t = CURSEG_I(sbi, type); 4736 4737 __set_test_and_inuse(sbi, curseg_t->segno); 4738 } 4739 } 4740 4741 static void init_dirty_segmap(struct f2fs_sb_info *sbi) 4742 { 4743 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 4744 struct free_segmap_info *free_i = FREE_I(sbi); 4745 unsigned int segno = 0, offset = 0, secno; 4746 block_t valid_blocks, usable_blks_in_seg; 4747 4748 while (1) { 4749 /* find dirty segment based on free segmap */ 4750 segno = find_next_inuse(free_i, MAIN_SEGS(sbi), offset); 4751 if (segno >= MAIN_SEGS(sbi)) 4752 break; 4753 offset = segno + 1; 4754 valid_blocks = get_valid_blocks(sbi, segno, false); 4755 usable_blks_in_seg = f2fs_usable_blks_in_seg(sbi, segno); 4756 if (valid_blocks == usable_blks_in_seg || !valid_blocks) 4757 continue; 4758 if (valid_blocks > usable_blks_in_seg) { 4759 f2fs_bug_on(sbi, 1); 4760 continue; 4761 } 4762 mutex_lock(&dirty_i->seglist_lock); 4763 __locate_dirty_segment(sbi, segno, DIRTY); 4764 mutex_unlock(&dirty_i->seglist_lock); 4765 } 4766 4767 if (!__is_large_section(sbi)) 4768 return; 4769 4770 mutex_lock(&dirty_i->seglist_lock); 4771 for (segno = 0; segno < MAIN_SEGS(sbi); segno += SEGS_PER_SEC(sbi)) { 4772 valid_blocks = get_valid_blocks(sbi, segno, true); 4773 secno = GET_SEC_FROM_SEG(sbi, segno); 4774 4775 if (!valid_blocks || valid_blocks == CAP_BLKS_PER_SEC(sbi)) 4776 continue; 4777 if (IS_CURSEC(sbi, secno)) 4778 continue; 4779 set_bit(secno, dirty_i->dirty_secmap); 4780 } 4781 mutex_unlock(&dirty_i->seglist_lock); 4782 } 4783 4784 static int init_victim_secmap(struct f2fs_sb_info *sbi) 4785 { 4786 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 4787 unsigned int bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi)); 4788 4789 dirty_i->victim_secmap = f2fs_kvzalloc(sbi, bitmap_size, GFP_KERNEL); 4790 if (!dirty_i->victim_secmap) 4791 return -ENOMEM; 4792 4793 dirty_i->pinned_secmap = f2fs_kvzalloc(sbi, bitmap_size, GFP_KERNEL); 4794 if (!dirty_i->pinned_secmap) 4795 return -ENOMEM; 4796 4797 dirty_i->pinned_secmap_cnt = 0; 4798 dirty_i->enable_pin_section = true; 4799 return 0; 4800 } 4801 4802 static int build_dirty_segmap(struct f2fs_sb_info *sbi) 4803 { 4804 struct dirty_seglist_info *dirty_i; 4805 unsigned int bitmap_size, i; 4806 4807 /* allocate memory for dirty segments list information */ 4808 dirty_i = f2fs_kzalloc(sbi, sizeof(struct dirty_seglist_info), 4809 GFP_KERNEL); 4810 if (!dirty_i) 4811 return -ENOMEM; 4812 4813 SM_I(sbi)->dirty_info = dirty_i; 4814 mutex_init(&dirty_i->seglist_lock); 4815 4816 bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi)); 4817 4818 for (i = 0; i < NR_DIRTY_TYPE; i++) { 4819 dirty_i->dirty_segmap[i] = f2fs_kvzalloc(sbi, bitmap_size, 4820 GFP_KERNEL); 4821 if (!dirty_i->dirty_segmap[i]) 4822 return -ENOMEM; 4823 } 4824 4825 if (__is_large_section(sbi)) { 4826 bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi)); 4827 dirty_i->dirty_secmap = f2fs_kvzalloc(sbi, 4828 bitmap_size, GFP_KERNEL); 4829 if (!dirty_i->dirty_secmap) 4830 return -ENOMEM; 4831 } 4832 4833 init_dirty_segmap(sbi); 4834 return init_victim_secmap(sbi); 4835 } 4836 4837 static int sanity_check_curseg(struct f2fs_sb_info *sbi) 4838 { 4839 int i; 4840 4841 /* 4842 * In LFS/SSR curseg, .next_blkoff should point to an unused blkaddr; 4843 * In LFS curseg, all blkaddr after .next_blkoff should be unused. 4844 */ 4845 for (i = 0; i < NR_PERSISTENT_LOG; i++) { 4846 struct curseg_info *curseg = CURSEG_I(sbi, i); 4847 struct seg_entry *se = get_seg_entry(sbi, curseg->segno); 4848 unsigned int blkofs = curseg->next_blkoff; 4849 4850 if (f2fs_sb_has_readonly(sbi) && 4851 i != CURSEG_HOT_DATA && i != CURSEG_HOT_NODE) 4852 continue; 4853 4854 sanity_check_seg_type(sbi, curseg->seg_type); 4855 4856 if (curseg->alloc_type != LFS && curseg->alloc_type != SSR) { 4857 f2fs_err(sbi, 4858 "Current segment has invalid alloc_type:%d", 4859 curseg->alloc_type); 4860 f2fs_handle_error(sbi, ERROR_INVALID_CURSEG); 4861 return -EFSCORRUPTED; 4862 } 4863 4864 if (f2fs_test_bit(blkofs, se->cur_valid_map)) 4865 goto out; 4866 4867 if (curseg->alloc_type == SSR) 4868 continue; 4869 4870 for (blkofs += 1; blkofs < BLKS_PER_SEG(sbi); blkofs++) { 4871 if (!f2fs_test_bit(blkofs, se->cur_valid_map)) 4872 continue; 4873 out: 4874 f2fs_err(sbi, 4875 "Current segment's next free block offset is inconsistent with bitmap, logtype:%u, segno:%u, type:%u, next_blkoff:%u, blkofs:%u", 4876 i, curseg->segno, curseg->alloc_type, 4877 curseg->next_blkoff, blkofs); 4878 f2fs_handle_error(sbi, ERROR_INVALID_CURSEG); 4879 return -EFSCORRUPTED; 4880 } 4881 } 4882 return 0; 4883 } 4884 4885 #ifdef CONFIG_BLK_DEV_ZONED 4886 4887 static int check_zone_write_pointer(struct f2fs_sb_info *sbi, 4888 struct f2fs_dev_info *fdev, 4889 struct blk_zone *zone) 4890 { 4891 unsigned int wp_segno, wp_blkoff, zone_secno, zone_segno, segno; 4892 block_t zone_block, wp_block, last_valid_block; 4893 unsigned int log_sectors_per_block = sbi->log_blocksize - SECTOR_SHIFT; 4894 int i, s, b, ret; 4895 struct seg_entry *se; 4896 4897 if (zone->type != BLK_ZONE_TYPE_SEQWRITE_REQ) 4898 return 0; 4899 4900 wp_block = fdev->start_blk + (zone->wp >> log_sectors_per_block); 4901 wp_segno = GET_SEGNO(sbi, wp_block); 4902 wp_blkoff = wp_block - START_BLOCK(sbi, wp_segno); 4903 zone_block = fdev->start_blk + (zone->start >> log_sectors_per_block); 4904 zone_segno = GET_SEGNO(sbi, zone_block); 4905 zone_secno = GET_SEC_FROM_SEG(sbi, zone_segno); 4906 4907 if (zone_segno >= MAIN_SEGS(sbi)) 4908 return 0; 4909 4910 /* 4911 * Skip check of zones cursegs point to, since 4912 * fix_curseg_write_pointer() checks them. 4913 */ 4914 for (i = 0; i < NO_CHECK_TYPE; i++) 4915 if (zone_secno == GET_SEC_FROM_SEG(sbi, 4916 CURSEG_I(sbi, i)->segno)) 4917 return 0; 4918 4919 /* 4920 * Get last valid block of the zone. 4921 */ 4922 last_valid_block = zone_block - 1; 4923 for (s = sbi->segs_per_sec - 1; s >= 0; s--) { 4924 segno = zone_segno + s; 4925 se = get_seg_entry(sbi, segno); 4926 for (b = sbi->blocks_per_seg - 1; b >= 0; b--) 4927 if (f2fs_test_bit(b, se->cur_valid_map)) { 4928 last_valid_block = START_BLOCK(sbi, segno) + b; 4929 break; 4930 } 4931 if (last_valid_block >= zone_block) 4932 break; 4933 } 4934 4935 /* 4936 * The write pointer matches with the valid blocks or 4937 * already points to the end of the zone. 4938 */ 4939 if ((last_valid_block + 1 == wp_block) || 4940 (zone->wp == zone->start + zone->len)) 4941 return 0; 4942 4943 if (last_valid_block + 1 == zone_block) { 4944 /* 4945 * If there is no valid block in the zone and if write pointer 4946 * is not at zone start, reset the write pointer. 4947 */ 4948 f2fs_notice(sbi, 4949 "Zone without valid block has non-zero write " 4950 "pointer. Reset the write pointer: wp[0x%x,0x%x]", 4951 wp_segno, wp_blkoff); 4952 ret = __f2fs_issue_discard_zone(sbi, fdev->bdev, zone_block, 4953 zone->len >> log_sectors_per_block); 4954 if (ret) 4955 f2fs_err(sbi, "Discard zone failed: %s (errno=%d)", 4956 fdev->path, ret); 4957 4958 return ret; 4959 } 4960 4961 /* 4962 * If there are valid blocks and the write pointer doesn't 4963 * match with them, we need to report the inconsistency and 4964 * fill the zone till the end to close the zone. This inconsistency 4965 * does not cause write error because the zone will not be selected 4966 * for write operation until it get discarded. 4967 */ 4968 f2fs_notice(sbi, "Valid blocks are not aligned with write pointer: " 4969 "valid block[0x%x,0x%x] wp[0x%x,0x%x]", 4970 GET_SEGNO(sbi, last_valid_block), 4971 GET_BLKOFF_FROM_SEG0(sbi, last_valid_block), 4972 wp_segno, wp_blkoff); 4973 4974 ret = blkdev_zone_mgmt(fdev->bdev, REQ_OP_ZONE_FINISH, 4975 zone->start, zone->len, GFP_NOFS); 4976 if (ret == -EOPNOTSUPP) { 4977 ret = blkdev_issue_zeroout(fdev->bdev, zone->wp, 4978 zone->len - (zone->wp - zone->start), 4979 GFP_NOFS, 0); 4980 if (ret) 4981 f2fs_err(sbi, "Fill up zone failed: %s (errno=%d)", 4982 fdev->path, ret); 4983 } else if (ret) { 4984 f2fs_err(sbi, "Finishing zone failed: %s (errno=%d)", 4985 fdev->path, ret); 4986 } 4987 4988 return ret; 4989 } 4990 4991 static struct f2fs_dev_info *get_target_zoned_dev(struct f2fs_sb_info *sbi, 4992 block_t zone_blkaddr) 4993 { 4994 int i; 4995 4996 for (i = 0; i < sbi->s_ndevs; i++) { 4997 if (!bdev_is_zoned(FDEV(i).bdev)) 4998 continue; 4999 if (sbi->s_ndevs == 1 || (FDEV(i).start_blk <= zone_blkaddr && 5000 zone_blkaddr <= FDEV(i).end_blk)) 5001 return &FDEV(i); 5002 } 5003 5004 return NULL; 5005 } 5006 5007 static int report_one_zone_cb(struct blk_zone *zone, unsigned int idx, 5008 void *data) 5009 { 5010 memcpy(data, zone, sizeof(struct blk_zone)); 5011 return 0; 5012 } 5013 5014 static int fix_curseg_write_pointer(struct f2fs_sb_info *sbi, int type) 5015 { 5016 struct curseg_info *cs = CURSEG_I(sbi, type); 5017 struct f2fs_dev_info *zbd; 5018 struct blk_zone zone; 5019 unsigned int cs_section, wp_segno, wp_blkoff, wp_sector_off; 5020 block_t cs_zone_block, wp_block; 5021 unsigned int log_sectors_per_block = sbi->log_blocksize - SECTOR_SHIFT; 5022 sector_t zone_sector; 5023 int err; 5024 5025 cs_section = GET_SEC_FROM_SEG(sbi, cs->segno); 5026 cs_zone_block = START_BLOCK(sbi, GET_SEG_FROM_SEC(sbi, cs_section)); 5027 5028 zbd = get_target_zoned_dev(sbi, cs_zone_block); 5029 if (!zbd) 5030 return 0; 5031 5032 /* report zone for the sector the curseg points to */ 5033 zone_sector = (sector_t)(cs_zone_block - zbd->start_blk) 5034 << log_sectors_per_block; 5035 err = blkdev_report_zones(zbd->bdev, zone_sector, 1, 5036 report_one_zone_cb, &zone); 5037 if (err != 1) { 5038 f2fs_err(sbi, "Report zone failed: %s errno=(%d)", 5039 zbd->path, err); 5040 return err; 5041 } 5042 5043 if (zone.type != BLK_ZONE_TYPE_SEQWRITE_REQ) 5044 return 0; 5045 5046 wp_block = zbd->start_blk + (zone.wp >> log_sectors_per_block); 5047 wp_segno = GET_SEGNO(sbi, wp_block); 5048 wp_blkoff = wp_block - START_BLOCK(sbi, wp_segno); 5049 wp_sector_off = zone.wp & GENMASK(log_sectors_per_block - 1, 0); 5050 5051 if (cs->segno == wp_segno && cs->next_blkoff == wp_blkoff && 5052 wp_sector_off == 0) 5053 return 0; 5054 5055 f2fs_notice(sbi, "Unaligned curseg[%d] with write pointer: " 5056 "curseg[0x%x,0x%x] wp[0x%x,0x%x]", 5057 type, cs->segno, cs->next_blkoff, wp_segno, wp_blkoff); 5058 5059 f2fs_notice(sbi, "Assign new section to curseg[%d]: " 5060 "curseg[0x%x,0x%x]", type, cs->segno, cs->next_blkoff); 5061 5062 f2fs_allocate_new_section(sbi, type, true); 5063 5064 /* check consistency of the zone curseg pointed to */ 5065 if (check_zone_write_pointer(sbi, zbd, &zone)) 5066 return -EIO; 5067 5068 /* check newly assigned zone */ 5069 cs_section = GET_SEC_FROM_SEG(sbi, cs->segno); 5070 cs_zone_block = START_BLOCK(sbi, GET_SEG_FROM_SEC(sbi, cs_section)); 5071 5072 zbd = get_target_zoned_dev(sbi, cs_zone_block); 5073 if (!zbd) 5074 return 0; 5075 5076 zone_sector = (sector_t)(cs_zone_block - zbd->start_blk) 5077 << log_sectors_per_block; 5078 err = blkdev_report_zones(zbd->bdev, zone_sector, 1, 5079 report_one_zone_cb, &zone); 5080 if (err != 1) { 5081 f2fs_err(sbi, "Report zone failed: %s errno=(%d)", 5082 zbd->path, err); 5083 return err; 5084 } 5085 5086 if (zone.type != BLK_ZONE_TYPE_SEQWRITE_REQ) 5087 return 0; 5088 5089 if (zone.wp != zone.start) { 5090 f2fs_notice(sbi, 5091 "New zone for curseg[%d] is not yet discarded. " 5092 "Reset the zone: curseg[0x%x,0x%x]", 5093 type, cs->segno, cs->next_blkoff); 5094 err = __f2fs_issue_discard_zone(sbi, zbd->bdev, cs_zone_block, 5095 zone.len >> log_sectors_per_block); 5096 if (err) { 5097 f2fs_err(sbi, "Discard zone failed: %s (errno=%d)", 5098 zbd->path, err); 5099 return err; 5100 } 5101 } 5102 5103 return 0; 5104 } 5105 5106 int f2fs_fix_curseg_write_pointer(struct f2fs_sb_info *sbi) 5107 { 5108 int i, ret; 5109 5110 for (i = 0; i < NR_PERSISTENT_LOG; i++) { 5111 ret = fix_curseg_write_pointer(sbi, i); 5112 if (ret) 5113 return ret; 5114 } 5115 5116 return 0; 5117 } 5118 5119 struct check_zone_write_pointer_args { 5120 struct f2fs_sb_info *sbi; 5121 struct f2fs_dev_info *fdev; 5122 }; 5123 5124 static int check_zone_write_pointer_cb(struct blk_zone *zone, unsigned int idx, 5125 void *data) 5126 { 5127 struct check_zone_write_pointer_args *args; 5128 5129 args = (struct check_zone_write_pointer_args *)data; 5130 5131 return check_zone_write_pointer(args->sbi, args->fdev, zone); 5132 } 5133 5134 int f2fs_check_write_pointer(struct f2fs_sb_info *sbi) 5135 { 5136 int i, ret; 5137 struct check_zone_write_pointer_args args; 5138 5139 for (i = 0; i < sbi->s_ndevs; i++) { 5140 if (!bdev_is_zoned(FDEV(i).bdev)) 5141 continue; 5142 5143 args.sbi = sbi; 5144 args.fdev = &FDEV(i); 5145 ret = blkdev_report_zones(FDEV(i).bdev, 0, BLK_ALL_ZONES, 5146 check_zone_write_pointer_cb, &args); 5147 if (ret < 0) 5148 return ret; 5149 } 5150 5151 return 0; 5152 } 5153 5154 /* 5155 * Return the number of usable blocks in a segment. The number of blocks 5156 * returned is always equal to the number of blocks in a segment for 5157 * segments fully contained within a sequential zone capacity or a 5158 * conventional zone. For segments partially contained in a sequential 5159 * zone capacity, the number of usable blocks up to the zone capacity 5160 * is returned. 0 is returned in all other cases. 5161 */ 5162 static inline unsigned int f2fs_usable_zone_blks_in_seg( 5163 struct f2fs_sb_info *sbi, unsigned int segno) 5164 { 5165 block_t seg_start, sec_start_blkaddr, sec_cap_blkaddr; 5166 unsigned int secno; 5167 5168 if (!sbi->unusable_blocks_per_sec) 5169 return BLKS_PER_SEG(sbi); 5170 5171 secno = GET_SEC_FROM_SEG(sbi, segno); 5172 seg_start = START_BLOCK(sbi, segno); 5173 sec_start_blkaddr = START_BLOCK(sbi, GET_SEG_FROM_SEC(sbi, secno)); 5174 sec_cap_blkaddr = sec_start_blkaddr + CAP_BLKS_PER_SEC(sbi); 5175 5176 /* 5177 * If segment starts before zone capacity and spans beyond 5178 * zone capacity, then usable blocks are from seg start to 5179 * zone capacity. If the segment starts after the zone capacity, 5180 * then there are no usable blocks. 5181 */ 5182 if (seg_start >= sec_cap_blkaddr) 5183 return 0; 5184 if (seg_start + BLKS_PER_SEG(sbi) > sec_cap_blkaddr) 5185 return sec_cap_blkaddr - seg_start; 5186 5187 return BLKS_PER_SEG(sbi); 5188 } 5189 #else 5190 int f2fs_fix_curseg_write_pointer(struct f2fs_sb_info *sbi) 5191 { 5192 return 0; 5193 } 5194 5195 int f2fs_check_write_pointer(struct f2fs_sb_info *sbi) 5196 { 5197 return 0; 5198 } 5199 5200 static inline unsigned int f2fs_usable_zone_blks_in_seg(struct f2fs_sb_info *sbi, 5201 unsigned int segno) 5202 { 5203 return 0; 5204 } 5205 5206 #endif 5207 unsigned int f2fs_usable_blks_in_seg(struct f2fs_sb_info *sbi, 5208 unsigned int segno) 5209 { 5210 if (f2fs_sb_has_blkzoned(sbi)) 5211 return f2fs_usable_zone_blks_in_seg(sbi, segno); 5212 5213 return BLKS_PER_SEG(sbi); 5214 } 5215 5216 unsigned int f2fs_usable_segs_in_sec(struct f2fs_sb_info *sbi, 5217 unsigned int segno) 5218 { 5219 if (f2fs_sb_has_blkzoned(sbi)) 5220 return CAP_SEGS_PER_SEC(sbi); 5221 5222 return SEGS_PER_SEC(sbi); 5223 } 5224 5225 /* 5226 * Update min, max modified time for cost-benefit GC algorithm 5227 */ 5228 static void init_min_max_mtime(struct f2fs_sb_info *sbi) 5229 { 5230 struct sit_info *sit_i = SIT_I(sbi); 5231 unsigned int segno; 5232 5233 down_write(&sit_i->sentry_lock); 5234 5235 sit_i->min_mtime = ULLONG_MAX; 5236 5237 for (segno = 0; segno < MAIN_SEGS(sbi); segno += SEGS_PER_SEC(sbi)) { 5238 unsigned int i; 5239 unsigned long long mtime = 0; 5240 5241 for (i = 0; i < SEGS_PER_SEC(sbi); i++) 5242 mtime += get_seg_entry(sbi, segno + i)->mtime; 5243 5244 mtime = div_u64(mtime, SEGS_PER_SEC(sbi)); 5245 5246 if (sit_i->min_mtime > mtime) 5247 sit_i->min_mtime = mtime; 5248 } 5249 sit_i->max_mtime = get_mtime(sbi, false); 5250 sit_i->dirty_max_mtime = 0; 5251 up_write(&sit_i->sentry_lock); 5252 } 5253 5254 int f2fs_build_segment_manager(struct f2fs_sb_info *sbi) 5255 { 5256 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi); 5257 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); 5258 struct f2fs_sm_info *sm_info; 5259 int err; 5260 5261 sm_info = f2fs_kzalloc(sbi, sizeof(struct f2fs_sm_info), GFP_KERNEL); 5262 if (!sm_info) 5263 return -ENOMEM; 5264 5265 /* init sm info */ 5266 sbi->sm_info = sm_info; 5267 sm_info->seg0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr); 5268 sm_info->main_blkaddr = le32_to_cpu(raw_super->main_blkaddr); 5269 sm_info->segment_count = le32_to_cpu(raw_super->segment_count); 5270 sm_info->reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count); 5271 sm_info->ovp_segments = le32_to_cpu(ckpt->overprov_segment_count); 5272 sm_info->main_segments = le32_to_cpu(raw_super->segment_count_main); 5273 sm_info->ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr); 5274 sm_info->rec_prefree_segments = sm_info->main_segments * 5275 DEF_RECLAIM_PREFREE_SEGMENTS / 100; 5276 if (sm_info->rec_prefree_segments > DEF_MAX_RECLAIM_PREFREE_SEGMENTS) 5277 sm_info->rec_prefree_segments = DEF_MAX_RECLAIM_PREFREE_SEGMENTS; 5278 5279 if (!f2fs_lfs_mode(sbi)) 5280 sm_info->ipu_policy = BIT(F2FS_IPU_FSYNC); 5281 sm_info->min_ipu_util = DEF_MIN_IPU_UTIL; 5282 sm_info->min_fsync_blocks = DEF_MIN_FSYNC_BLOCKS; 5283 sm_info->min_seq_blocks = BLKS_PER_SEG(sbi); 5284 sm_info->min_hot_blocks = DEF_MIN_HOT_BLOCKS; 5285 sm_info->min_ssr_sections = reserved_sections(sbi); 5286 5287 INIT_LIST_HEAD(&sm_info->sit_entry_set); 5288 5289 init_f2fs_rwsem(&sm_info->curseg_lock); 5290 5291 err = f2fs_create_flush_cmd_control(sbi); 5292 if (err) 5293 return err; 5294 5295 err = create_discard_cmd_control(sbi); 5296 if (err) 5297 return err; 5298 5299 err = build_sit_info(sbi); 5300 if (err) 5301 return err; 5302 err = build_free_segmap(sbi); 5303 if (err) 5304 return err; 5305 err = build_curseg(sbi); 5306 if (err) 5307 return err; 5308 5309 /* reinit free segmap based on SIT */ 5310 err = build_sit_entries(sbi); 5311 if (err) 5312 return err; 5313 5314 init_free_segmap(sbi); 5315 err = build_dirty_segmap(sbi); 5316 if (err) 5317 return err; 5318 5319 err = sanity_check_curseg(sbi); 5320 if (err) 5321 return err; 5322 5323 init_min_max_mtime(sbi); 5324 return 0; 5325 } 5326 5327 static void discard_dirty_segmap(struct f2fs_sb_info *sbi, 5328 enum dirty_type dirty_type) 5329 { 5330 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 5331 5332 mutex_lock(&dirty_i->seglist_lock); 5333 kvfree(dirty_i->dirty_segmap[dirty_type]); 5334 dirty_i->nr_dirty[dirty_type] = 0; 5335 mutex_unlock(&dirty_i->seglist_lock); 5336 } 5337 5338 static void destroy_victim_secmap(struct f2fs_sb_info *sbi) 5339 { 5340 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 5341 5342 kvfree(dirty_i->pinned_secmap); 5343 kvfree(dirty_i->victim_secmap); 5344 } 5345 5346 static void destroy_dirty_segmap(struct f2fs_sb_info *sbi) 5347 { 5348 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 5349 int i; 5350 5351 if (!dirty_i) 5352 return; 5353 5354 /* discard pre-free/dirty segments list */ 5355 for (i = 0; i < NR_DIRTY_TYPE; i++) 5356 discard_dirty_segmap(sbi, i); 5357 5358 if (__is_large_section(sbi)) { 5359 mutex_lock(&dirty_i->seglist_lock); 5360 kvfree(dirty_i->dirty_secmap); 5361 mutex_unlock(&dirty_i->seglist_lock); 5362 } 5363 5364 destroy_victim_secmap(sbi); 5365 SM_I(sbi)->dirty_info = NULL; 5366 kfree(dirty_i); 5367 } 5368 5369 static void destroy_curseg(struct f2fs_sb_info *sbi) 5370 { 5371 struct curseg_info *array = SM_I(sbi)->curseg_array; 5372 int i; 5373 5374 if (!array) 5375 return; 5376 SM_I(sbi)->curseg_array = NULL; 5377 for (i = 0; i < NR_CURSEG_TYPE; i++) { 5378 kfree(array[i].sum_blk); 5379 kfree(array[i].journal); 5380 } 5381 kfree(array); 5382 } 5383 5384 static void destroy_free_segmap(struct f2fs_sb_info *sbi) 5385 { 5386 struct free_segmap_info *free_i = SM_I(sbi)->free_info; 5387 5388 if (!free_i) 5389 return; 5390 SM_I(sbi)->free_info = NULL; 5391 kvfree(free_i->free_segmap); 5392 kvfree(free_i->free_secmap); 5393 kfree(free_i); 5394 } 5395 5396 static void destroy_sit_info(struct f2fs_sb_info *sbi) 5397 { 5398 struct sit_info *sit_i = SIT_I(sbi); 5399 5400 if (!sit_i) 5401 return; 5402 5403 if (sit_i->sentries) 5404 kvfree(sit_i->bitmap); 5405 kfree(sit_i->tmp_map); 5406 5407 kvfree(sit_i->sentries); 5408 kvfree(sit_i->sec_entries); 5409 kvfree(sit_i->dirty_sentries_bitmap); 5410 5411 SM_I(sbi)->sit_info = NULL; 5412 kvfree(sit_i->sit_bitmap); 5413 #ifdef CONFIG_F2FS_CHECK_FS 5414 kvfree(sit_i->sit_bitmap_mir); 5415 kvfree(sit_i->invalid_segmap); 5416 #endif 5417 kfree(sit_i); 5418 } 5419 5420 void f2fs_destroy_segment_manager(struct f2fs_sb_info *sbi) 5421 { 5422 struct f2fs_sm_info *sm_info = SM_I(sbi); 5423 5424 if (!sm_info) 5425 return; 5426 f2fs_destroy_flush_cmd_control(sbi, true); 5427 destroy_discard_cmd_control(sbi); 5428 destroy_dirty_segmap(sbi); 5429 destroy_curseg(sbi); 5430 destroy_free_segmap(sbi); 5431 destroy_sit_info(sbi); 5432 sbi->sm_info = NULL; 5433 kfree(sm_info); 5434 } 5435 5436 int __init f2fs_create_segment_manager_caches(void) 5437 { 5438 discard_entry_slab = f2fs_kmem_cache_create("f2fs_discard_entry", 5439 sizeof(struct discard_entry)); 5440 if (!discard_entry_slab) 5441 goto fail; 5442 5443 discard_cmd_slab = f2fs_kmem_cache_create("f2fs_discard_cmd", 5444 sizeof(struct discard_cmd)); 5445 if (!discard_cmd_slab) 5446 goto destroy_discard_entry; 5447 5448 sit_entry_set_slab = f2fs_kmem_cache_create("f2fs_sit_entry_set", 5449 sizeof(struct sit_entry_set)); 5450 if (!sit_entry_set_slab) 5451 goto destroy_discard_cmd; 5452 5453 revoke_entry_slab = f2fs_kmem_cache_create("f2fs_revoke_entry", 5454 sizeof(struct revoke_entry)); 5455 if (!revoke_entry_slab) 5456 goto destroy_sit_entry_set; 5457 return 0; 5458 5459 destroy_sit_entry_set: 5460 kmem_cache_destroy(sit_entry_set_slab); 5461 destroy_discard_cmd: 5462 kmem_cache_destroy(discard_cmd_slab); 5463 destroy_discard_entry: 5464 kmem_cache_destroy(discard_entry_slab); 5465 fail: 5466 return -ENOMEM; 5467 } 5468 5469 void f2fs_destroy_segment_manager_caches(void) 5470 { 5471 kmem_cache_destroy(sit_entry_set_slab); 5472 kmem_cache_destroy(discard_cmd_slab); 5473 kmem_cache_destroy(discard_entry_slab); 5474 kmem_cache_destroy(revoke_entry_slab); 5475 } 5476