1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * fs/f2fs/segment.c 4 * 5 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 6 * http://www.samsung.com/ 7 */ 8 #include <linux/fs.h> 9 #include <linux/f2fs_fs.h> 10 #include <linux/bio.h> 11 #include <linux/blkdev.h> 12 #include <linux/sched/mm.h> 13 #include <linux/prefetch.h> 14 #include <linux/kthread.h> 15 #include <linux/swap.h> 16 #include <linux/timer.h> 17 #include <linux/freezer.h> 18 #include <linux/sched/signal.h> 19 #include <linux/random.h> 20 21 #include "f2fs.h" 22 #include "segment.h" 23 #include "node.h" 24 #include "gc.h" 25 #include "iostat.h" 26 #include <trace/events/f2fs.h> 27 28 #define __reverse_ffz(x) __reverse_ffs(~(x)) 29 30 static struct kmem_cache *discard_entry_slab; 31 static struct kmem_cache *discard_cmd_slab; 32 static struct kmem_cache *sit_entry_set_slab; 33 static struct kmem_cache *revoke_entry_slab; 34 35 static unsigned long __reverse_ulong(unsigned char *str) 36 { 37 unsigned long tmp = 0; 38 int shift = 24, idx = 0; 39 40 #if BITS_PER_LONG == 64 41 shift = 56; 42 #endif 43 while (shift >= 0) { 44 tmp |= (unsigned long)str[idx++] << shift; 45 shift -= BITS_PER_BYTE; 46 } 47 return tmp; 48 } 49 50 /* 51 * __reverse_ffs is copied from include/asm-generic/bitops/__ffs.h since 52 * MSB and LSB are reversed in a byte by f2fs_set_bit. 53 */ 54 static inline unsigned long __reverse_ffs(unsigned long word) 55 { 56 int num = 0; 57 58 #if BITS_PER_LONG == 64 59 if ((word & 0xffffffff00000000UL) == 0) 60 num += 32; 61 else 62 word >>= 32; 63 #endif 64 if ((word & 0xffff0000) == 0) 65 num += 16; 66 else 67 word >>= 16; 68 69 if ((word & 0xff00) == 0) 70 num += 8; 71 else 72 word >>= 8; 73 74 if ((word & 0xf0) == 0) 75 num += 4; 76 else 77 word >>= 4; 78 79 if ((word & 0xc) == 0) 80 num += 2; 81 else 82 word >>= 2; 83 84 if ((word & 0x2) == 0) 85 num += 1; 86 return num; 87 } 88 89 /* 90 * __find_rev_next(_zero)_bit is copied from lib/find_next_bit.c because 91 * f2fs_set_bit makes MSB and LSB reversed in a byte. 92 * @size must be integral times of unsigned long. 93 * Example: 94 * MSB <--> LSB 95 * f2fs_set_bit(0, bitmap) => 1000 0000 96 * f2fs_set_bit(7, bitmap) => 0000 0001 97 */ 98 static unsigned long __find_rev_next_bit(const unsigned long *addr, 99 unsigned long size, unsigned long offset) 100 { 101 const unsigned long *p = addr + BIT_WORD(offset); 102 unsigned long result = size; 103 unsigned long tmp; 104 105 if (offset >= size) 106 return size; 107 108 size -= (offset & ~(BITS_PER_LONG - 1)); 109 offset %= BITS_PER_LONG; 110 111 while (1) { 112 if (*p == 0) 113 goto pass; 114 115 tmp = __reverse_ulong((unsigned char *)p); 116 117 tmp &= ~0UL >> offset; 118 if (size < BITS_PER_LONG) 119 tmp &= (~0UL << (BITS_PER_LONG - size)); 120 if (tmp) 121 goto found; 122 pass: 123 if (size <= BITS_PER_LONG) 124 break; 125 size -= BITS_PER_LONG; 126 offset = 0; 127 p++; 128 } 129 return result; 130 found: 131 return result - size + __reverse_ffs(tmp); 132 } 133 134 static unsigned long __find_rev_next_zero_bit(const unsigned long *addr, 135 unsigned long size, unsigned long offset) 136 { 137 const unsigned long *p = addr + BIT_WORD(offset); 138 unsigned long result = size; 139 unsigned long tmp; 140 141 if (offset >= size) 142 return size; 143 144 size -= (offset & ~(BITS_PER_LONG - 1)); 145 offset %= BITS_PER_LONG; 146 147 while (1) { 148 if (*p == ~0UL) 149 goto pass; 150 151 tmp = __reverse_ulong((unsigned char *)p); 152 153 if (offset) 154 tmp |= ~0UL << (BITS_PER_LONG - offset); 155 if (size < BITS_PER_LONG) 156 tmp |= ~0UL >> size; 157 if (tmp != ~0UL) 158 goto found; 159 pass: 160 if (size <= BITS_PER_LONG) 161 break; 162 size -= BITS_PER_LONG; 163 offset = 0; 164 p++; 165 } 166 return result; 167 found: 168 return result - size + __reverse_ffz(tmp); 169 } 170 171 bool f2fs_need_SSR(struct f2fs_sb_info *sbi) 172 { 173 int node_secs = get_blocktype_secs(sbi, F2FS_DIRTY_NODES); 174 int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS); 175 int imeta_secs = get_blocktype_secs(sbi, F2FS_DIRTY_IMETA); 176 177 if (f2fs_lfs_mode(sbi)) 178 return false; 179 if (sbi->gc_mode == GC_URGENT_HIGH) 180 return true; 181 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) 182 return true; 183 184 return free_sections(sbi) <= (node_secs + 2 * dent_secs + imeta_secs + 185 SM_I(sbi)->min_ssr_sections + reserved_sections(sbi)); 186 } 187 188 void f2fs_abort_atomic_write(struct inode *inode, bool clean) 189 { 190 struct f2fs_inode_info *fi = F2FS_I(inode); 191 192 if (!f2fs_is_atomic_file(inode)) 193 return; 194 195 if (clean) 196 truncate_inode_pages_final(inode->i_mapping); 197 198 release_atomic_write_cnt(inode); 199 clear_inode_flag(inode, FI_ATOMIC_COMMITTED); 200 clear_inode_flag(inode, FI_ATOMIC_REPLACE); 201 clear_inode_flag(inode, FI_ATOMIC_FILE); 202 if (is_inode_flag_set(inode, FI_ATOMIC_DIRTIED)) { 203 clear_inode_flag(inode, FI_ATOMIC_DIRTIED); 204 f2fs_mark_inode_dirty_sync(inode, true); 205 } 206 stat_dec_atomic_inode(inode); 207 208 F2FS_I(inode)->atomic_write_task = NULL; 209 210 if (clean) { 211 f2fs_i_size_write(inode, fi->original_i_size); 212 fi->original_i_size = 0; 213 } 214 /* avoid stale dirty inode during eviction */ 215 sync_inode_metadata(inode, 0); 216 } 217 218 static int __replace_atomic_write_block(struct inode *inode, pgoff_t index, 219 block_t new_addr, block_t *old_addr, bool recover) 220 { 221 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 222 struct dnode_of_data dn; 223 struct node_info ni; 224 int err; 225 226 retry: 227 set_new_dnode(&dn, inode, NULL, NULL, 0); 228 err = f2fs_get_dnode_of_data(&dn, index, ALLOC_NODE); 229 if (err) { 230 if (err == -ENOMEM) { 231 f2fs_io_schedule_timeout(DEFAULT_IO_TIMEOUT); 232 goto retry; 233 } 234 return err; 235 } 236 237 err = f2fs_get_node_info(sbi, dn.nid, &ni, false); 238 if (err) { 239 f2fs_put_dnode(&dn); 240 return err; 241 } 242 243 if (recover) { 244 /* dn.data_blkaddr is always valid */ 245 if (!__is_valid_data_blkaddr(new_addr)) { 246 if (new_addr == NULL_ADDR) 247 dec_valid_block_count(sbi, inode, 1); 248 f2fs_invalidate_blocks(sbi, dn.data_blkaddr); 249 f2fs_update_data_blkaddr(&dn, new_addr); 250 } else { 251 f2fs_replace_block(sbi, &dn, dn.data_blkaddr, 252 new_addr, ni.version, true, true); 253 } 254 } else { 255 blkcnt_t count = 1; 256 257 err = inc_valid_block_count(sbi, inode, &count, true); 258 if (err) { 259 f2fs_put_dnode(&dn); 260 return err; 261 } 262 263 *old_addr = dn.data_blkaddr; 264 f2fs_truncate_data_blocks_range(&dn, 1); 265 dec_valid_block_count(sbi, F2FS_I(inode)->cow_inode, count); 266 267 f2fs_replace_block(sbi, &dn, dn.data_blkaddr, new_addr, 268 ni.version, true, false); 269 } 270 271 f2fs_put_dnode(&dn); 272 273 trace_f2fs_replace_atomic_write_block(inode, F2FS_I(inode)->cow_inode, 274 index, old_addr ? *old_addr : 0, new_addr, recover); 275 return 0; 276 } 277 278 static void __complete_revoke_list(struct inode *inode, struct list_head *head, 279 bool revoke) 280 { 281 struct revoke_entry *cur, *tmp; 282 pgoff_t start_index = 0; 283 bool truncate = is_inode_flag_set(inode, FI_ATOMIC_REPLACE); 284 285 list_for_each_entry_safe(cur, tmp, head, list) { 286 if (revoke) { 287 __replace_atomic_write_block(inode, cur->index, 288 cur->old_addr, NULL, true); 289 } else if (truncate) { 290 f2fs_truncate_hole(inode, start_index, cur->index); 291 start_index = cur->index + 1; 292 } 293 294 list_del(&cur->list); 295 kmem_cache_free(revoke_entry_slab, cur); 296 } 297 298 if (!revoke && truncate) 299 f2fs_do_truncate_blocks(inode, start_index * PAGE_SIZE, false); 300 } 301 302 static int __f2fs_commit_atomic_write(struct inode *inode) 303 { 304 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 305 struct f2fs_inode_info *fi = F2FS_I(inode); 306 struct inode *cow_inode = fi->cow_inode; 307 struct revoke_entry *new; 308 struct list_head revoke_list; 309 block_t blkaddr; 310 struct dnode_of_data dn; 311 pgoff_t len = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); 312 pgoff_t off = 0, blen, index; 313 int ret = 0, i; 314 315 INIT_LIST_HEAD(&revoke_list); 316 317 while (len) { 318 blen = min_t(pgoff_t, ADDRS_PER_BLOCK(cow_inode), len); 319 320 set_new_dnode(&dn, cow_inode, NULL, NULL, 0); 321 ret = f2fs_get_dnode_of_data(&dn, off, LOOKUP_NODE_RA); 322 if (ret && ret != -ENOENT) { 323 goto out; 324 } else if (ret == -ENOENT) { 325 ret = 0; 326 if (dn.max_level == 0) 327 goto out; 328 goto next; 329 } 330 331 blen = min((pgoff_t)ADDRS_PER_PAGE(dn.node_page, cow_inode), 332 len); 333 index = off; 334 for (i = 0; i < blen; i++, dn.ofs_in_node++, index++) { 335 blkaddr = f2fs_data_blkaddr(&dn); 336 337 if (!__is_valid_data_blkaddr(blkaddr)) { 338 continue; 339 } else if (!f2fs_is_valid_blkaddr(sbi, blkaddr, 340 DATA_GENERIC_ENHANCE)) { 341 f2fs_put_dnode(&dn); 342 ret = -EFSCORRUPTED; 343 f2fs_handle_error(sbi, 344 ERROR_INVALID_BLKADDR); 345 goto out; 346 } 347 348 new = f2fs_kmem_cache_alloc(revoke_entry_slab, GFP_NOFS, 349 true, NULL); 350 351 ret = __replace_atomic_write_block(inode, index, blkaddr, 352 &new->old_addr, false); 353 if (ret) { 354 f2fs_put_dnode(&dn); 355 kmem_cache_free(revoke_entry_slab, new); 356 goto out; 357 } 358 359 f2fs_update_data_blkaddr(&dn, NULL_ADDR); 360 new->index = index; 361 list_add_tail(&new->list, &revoke_list); 362 } 363 f2fs_put_dnode(&dn); 364 next: 365 off += blen; 366 len -= blen; 367 } 368 369 out: 370 if (ret) { 371 sbi->revoked_atomic_block += fi->atomic_write_cnt; 372 } else { 373 sbi->committed_atomic_block += fi->atomic_write_cnt; 374 set_inode_flag(inode, FI_ATOMIC_COMMITTED); 375 if (is_inode_flag_set(inode, FI_ATOMIC_DIRTIED)) { 376 clear_inode_flag(inode, FI_ATOMIC_DIRTIED); 377 f2fs_mark_inode_dirty_sync(inode, true); 378 } 379 } 380 381 __complete_revoke_list(inode, &revoke_list, ret ? true : false); 382 383 return ret; 384 } 385 386 int f2fs_commit_atomic_write(struct inode *inode) 387 { 388 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 389 struct f2fs_inode_info *fi = F2FS_I(inode); 390 int err; 391 392 err = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX); 393 if (err) 394 return err; 395 396 f2fs_down_write(&fi->i_gc_rwsem[WRITE]); 397 f2fs_lock_op(sbi); 398 399 err = __f2fs_commit_atomic_write(inode); 400 401 f2fs_unlock_op(sbi); 402 f2fs_up_write(&fi->i_gc_rwsem[WRITE]); 403 404 return err; 405 } 406 407 /* 408 * This function balances dirty node and dentry pages. 409 * In addition, it controls garbage collection. 410 */ 411 void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need) 412 { 413 if (time_to_inject(sbi, FAULT_CHECKPOINT)) 414 f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_FAULT_INJECT); 415 416 /* balance_fs_bg is able to be pending */ 417 if (need && excess_cached_nats(sbi)) 418 f2fs_balance_fs_bg(sbi, false); 419 420 if (!f2fs_is_checkpoint_ready(sbi)) 421 return; 422 423 /* 424 * We should do GC or end up with checkpoint, if there are so many dirty 425 * dir/node pages without enough free segments. 426 */ 427 if (has_enough_free_secs(sbi, 0, 0)) 428 return; 429 430 if (test_opt(sbi, GC_MERGE) && sbi->gc_thread && 431 sbi->gc_thread->f2fs_gc_task) { 432 DEFINE_WAIT(wait); 433 434 prepare_to_wait(&sbi->gc_thread->fggc_wq, &wait, 435 TASK_UNINTERRUPTIBLE); 436 wake_up(&sbi->gc_thread->gc_wait_queue_head); 437 io_schedule(); 438 finish_wait(&sbi->gc_thread->fggc_wq, &wait); 439 } else { 440 struct f2fs_gc_control gc_control = { 441 .victim_segno = NULL_SEGNO, 442 .init_gc_type = BG_GC, 443 .no_bg_gc = true, 444 .should_migrate_blocks = false, 445 .err_gc_skipped = false, 446 .nr_free_secs = 1 }; 447 f2fs_down_write(&sbi->gc_lock); 448 stat_inc_gc_call_count(sbi, FOREGROUND); 449 f2fs_gc(sbi, &gc_control); 450 } 451 } 452 453 static inline bool excess_dirty_threshold(struct f2fs_sb_info *sbi) 454 { 455 int factor = f2fs_rwsem_is_locked(&sbi->cp_rwsem) ? 3 : 2; 456 unsigned int dents = get_pages(sbi, F2FS_DIRTY_DENTS); 457 unsigned int qdata = get_pages(sbi, F2FS_DIRTY_QDATA); 458 unsigned int nodes = get_pages(sbi, F2FS_DIRTY_NODES); 459 unsigned int meta = get_pages(sbi, F2FS_DIRTY_META); 460 unsigned int imeta = get_pages(sbi, F2FS_DIRTY_IMETA); 461 unsigned int threshold = (factor * DEFAULT_DIRTY_THRESHOLD) << 462 sbi->log_blocks_per_seg; 463 unsigned int global_threshold = threshold * 3 / 2; 464 465 if (dents >= threshold || qdata >= threshold || 466 nodes >= threshold || meta >= threshold || 467 imeta >= threshold) 468 return true; 469 return dents + qdata + nodes + meta + imeta > global_threshold; 470 } 471 472 void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi, bool from_bg) 473 { 474 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) 475 return; 476 477 /* try to shrink extent cache when there is no enough memory */ 478 if (!f2fs_available_free_memory(sbi, READ_EXTENT_CACHE)) 479 f2fs_shrink_read_extent_tree(sbi, 480 READ_EXTENT_CACHE_SHRINK_NUMBER); 481 482 /* try to shrink age extent cache when there is no enough memory */ 483 if (!f2fs_available_free_memory(sbi, AGE_EXTENT_CACHE)) 484 f2fs_shrink_age_extent_tree(sbi, 485 AGE_EXTENT_CACHE_SHRINK_NUMBER); 486 487 /* check the # of cached NAT entries */ 488 if (!f2fs_available_free_memory(sbi, NAT_ENTRIES)) 489 f2fs_try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK); 490 491 if (!f2fs_available_free_memory(sbi, FREE_NIDS)) 492 f2fs_try_to_free_nids(sbi, MAX_FREE_NIDS); 493 else 494 f2fs_build_free_nids(sbi, false, false); 495 496 if (excess_dirty_nats(sbi) || excess_dirty_threshold(sbi) || 497 excess_prefree_segs(sbi) || !f2fs_space_for_roll_forward(sbi)) 498 goto do_sync; 499 500 /* there is background inflight IO or foreground operation recently */ 501 if (is_inflight_io(sbi, REQ_TIME) || 502 (!f2fs_time_over(sbi, REQ_TIME) && f2fs_rwsem_is_locked(&sbi->cp_rwsem))) 503 return; 504 505 /* exceed periodical checkpoint timeout threshold */ 506 if (f2fs_time_over(sbi, CP_TIME)) 507 goto do_sync; 508 509 /* checkpoint is the only way to shrink partial cached entries */ 510 if (f2fs_available_free_memory(sbi, NAT_ENTRIES) && 511 f2fs_available_free_memory(sbi, INO_ENTRIES)) 512 return; 513 514 do_sync: 515 if (test_opt(sbi, DATA_FLUSH) && from_bg) { 516 struct blk_plug plug; 517 518 mutex_lock(&sbi->flush_lock); 519 520 blk_start_plug(&plug); 521 f2fs_sync_dirty_inodes(sbi, FILE_INODE, false); 522 blk_finish_plug(&plug); 523 524 mutex_unlock(&sbi->flush_lock); 525 } 526 stat_inc_cp_call_count(sbi, BACKGROUND); 527 f2fs_sync_fs(sbi->sb, 1); 528 } 529 530 static int __submit_flush_wait(struct f2fs_sb_info *sbi, 531 struct block_device *bdev) 532 { 533 int ret = blkdev_issue_flush(bdev); 534 535 trace_f2fs_issue_flush(bdev, test_opt(sbi, NOBARRIER), 536 test_opt(sbi, FLUSH_MERGE), ret); 537 if (!ret) 538 f2fs_update_iostat(sbi, NULL, FS_FLUSH_IO, 0); 539 return ret; 540 } 541 542 static int submit_flush_wait(struct f2fs_sb_info *sbi, nid_t ino) 543 { 544 int ret = 0; 545 int i; 546 547 if (!f2fs_is_multi_device(sbi)) 548 return __submit_flush_wait(sbi, sbi->sb->s_bdev); 549 550 for (i = 0; i < sbi->s_ndevs; i++) { 551 if (!f2fs_is_dirty_device(sbi, ino, i, FLUSH_INO)) 552 continue; 553 ret = __submit_flush_wait(sbi, FDEV(i).bdev); 554 if (ret) 555 break; 556 } 557 return ret; 558 } 559 560 static int issue_flush_thread(void *data) 561 { 562 struct f2fs_sb_info *sbi = data; 563 struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info; 564 wait_queue_head_t *q = &fcc->flush_wait_queue; 565 repeat: 566 if (kthread_should_stop()) 567 return 0; 568 569 if (!llist_empty(&fcc->issue_list)) { 570 struct flush_cmd *cmd, *next; 571 int ret; 572 573 fcc->dispatch_list = llist_del_all(&fcc->issue_list); 574 fcc->dispatch_list = llist_reverse_order(fcc->dispatch_list); 575 576 cmd = llist_entry(fcc->dispatch_list, struct flush_cmd, llnode); 577 578 ret = submit_flush_wait(sbi, cmd->ino); 579 atomic_inc(&fcc->issued_flush); 580 581 llist_for_each_entry_safe(cmd, next, 582 fcc->dispatch_list, llnode) { 583 cmd->ret = ret; 584 complete(&cmd->wait); 585 } 586 fcc->dispatch_list = NULL; 587 } 588 589 wait_event_interruptible(*q, 590 kthread_should_stop() || !llist_empty(&fcc->issue_list)); 591 goto repeat; 592 } 593 594 int f2fs_issue_flush(struct f2fs_sb_info *sbi, nid_t ino) 595 { 596 struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info; 597 struct flush_cmd cmd; 598 int ret; 599 600 if (test_opt(sbi, NOBARRIER)) 601 return 0; 602 603 if (!test_opt(sbi, FLUSH_MERGE)) { 604 atomic_inc(&fcc->queued_flush); 605 ret = submit_flush_wait(sbi, ino); 606 atomic_dec(&fcc->queued_flush); 607 atomic_inc(&fcc->issued_flush); 608 return ret; 609 } 610 611 if (atomic_inc_return(&fcc->queued_flush) == 1 || 612 f2fs_is_multi_device(sbi)) { 613 ret = submit_flush_wait(sbi, ino); 614 atomic_dec(&fcc->queued_flush); 615 616 atomic_inc(&fcc->issued_flush); 617 return ret; 618 } 619 620 cmd.ino = ino; 621 init_completion(&cmd.wait); 622 623 llist_add(&cmd.llnode, &fcc->issue_list); 624 625 /* 626 * update issue_list before we wake up issue_flush thread, this 627 * smp_mb() pairs with another barrier in ___wait_event(), see 628 * more details in comments of waitqueue_active(). 629 */ 630 smp_mb(); 631 632 if (waitqueue_active(&fcc->flush_wait_queue)) 633 wake_up(&fcc->flush_wait_queue); 634 635 if (fcc->f2fs_issue_flush) { 636 wait_for_completion(&cmd.wait); 637 atomic_dec(&fcc->queued_flush); 638 } else { 639 struct llist_node *list; 640 641 list = llist_del_all(&fcc->issue_list); 642 if (!list) { 643 wait_for_completion(&cmd.wait); 644 atomic_dec(&fcc->queued_flush); 645 } else { 646 struct flush_cmd *tmp, *next; 647 648 ret = submit_flush_wait(sbi, ino); 649 650 llist_for_each_entry_safe(tmp, next, list, llnode) { 651 if (tmp == &cmd) { 652 cmd.ret = ret; 653 atomic_dec(&fcc->queued_flush); 654 continue; 655 } 656 tmp->ret = ret; 657 complete(&tmp->wait); 658 } 659 } 660 } 661 662 return cmd.ret; 663 } 664 665 int f2fs_create_flush_cmd_control(struct f2fs_sb_info *sbi) 666 { 667 dev_t dev = sbi->sb->s_bdev->bd_dev; 668 struct flush_cmd_control *fcc; 669 670 if (SM_I(sbi)->fcc_info) { 671 fcc = SM_I(sbi)->fcc_info; 672 if (fcc->f2fs_issue_flush) 673 return 0; 674 goto init_thread; 675 } 676 677 fcc = f2fs_kzalloc(sbi, sizeof(struct flush_cmd_control), GFP_KERNEL); 678 if (!fcc) 679 return -ENOMEM; 680 atomic_set(&fcc->issued_flush, 0); 681 atomic_set(&fcc->queued_flush, 0); 682 init_waitqueue_head(&fcc->flush_wait_queue); 683 init_llist_head(&fcc->issue_list); 684 SM_I(sbi)->fcc_info = fcc; 685 if (!test_opt(sbi, FLUSH_MERGE)) 686 return 0; 687 688 init_thread: 689 fcc->f2fs_issue_flush = kthread_run(issue_flush_thread, sbi, 690 "f2fs_flush-%u:%u", MAJOR(dev), MINOR(dev)); 691 if (IS_ERR(fcc->f2fs_issue_flush)) { 692 int err = PTR_ERR(fcc->f2fs_issue_flush); 693 694 fcc->f2fs_issue_flush = NULL; 695 return err; 696 } 697 698 return 0; 699 } 700 701 void f2fs_destroy_flush_cmd_control(struct f2fs_sb_info *sbi, bool free) 702 { 703 struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info; 704 705 if (fcc && fcc->f2fs_issue_flush) { 706 struct task_struct *flush_thread = fcc->f2fs_issue_flush; 707 708 fcc->f2fs_issue_flush = NULL; 709 kthread_stop(flush_thread); 710 } 711 if (free) { 712 kfree(fcc); 713 SM_I(sbi)->fcc_info = NULL; 714 } 715 } 716 717 int f2fs_flush_device_cache(struct f2fs_sb_info *sbi) 718 { 719 int ret = 0, i; 720 721 if (!f2fs_is_multi_device(sbi)) 722 return 0; 723 724 if (test_opt(sbi, NOBARRIER)) 725 return 0; 726 727 for (i = 1; i < sbi->s_ndevs; i++) { 728 int count = DEFAULT_RETRY_IO_COUNT; 729 730 if (!f2fs_test_bit(i, (char *)&sbi->dirty_device)) 731 continue; 732 733 do { 734 ret = __submit_flush_wait(sbi, FDEV(i).bdev); 735 if (ret) 736 f2fs_io_schedule_timeout(DEFAULT_IO_TIMEOUT); 737 } while (ret && --count); 738 739 if (ret) { 740 f2fs_stop_checkpoint(sbi, false, 741 STOP_CP_REASON_FLUSH_FAIL); 742 break; 743 } 744 745 spin_lock(&sbi->dev_lock); 746 f2fs_clear_bit(i, (char *)&sbi->dirty_device); 747 spin_unlock(&sbi->dev_lock); 748 } 749 750 return ret; 751 } 752 753 static void __locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno, 754 enum dirty_type dirty_type) 755 { 756 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 757 758 /* need not be added */ 759 if (IS_CURSEG(sbi, segno)) 760 return; 761 762 if (!test_and_set_bit(segno, dirty_i->dirty_segmap[dirty_type])) 763 dirty_i->nr_dirty[dirty_type]++; 764 765 if (dirty_type == DIRTY) { 766 struct seg_entry *sentry = get_seg_entry(sbi, segno); 767 enum dirty_type t = sentry->type; 768 769 if (unlikely(t >= DIRTY)) { 770 f2fs_bug_on(sbi, 1); 771 return; 772 } 773 if (!test_and_set_bit(segno, dirty_i->dirty_segmap[t])) 774 dirty_i->nr_dirty[t]++; 775 776 if (__is_large_section(sbi)) { 777 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno); 778 block_t valid_blocks = 779 get_valid_blocks(sbi, segno, true); 780 781 f2fs_bug_on(sbi, unlikely(!valid_blocks || 782 valid_blocks == CAP_BLKS_PER_SEC(sbi))); 783 784 if (!IS_CURSEC(sbi, secno)) 785 set_bit(secno, dirty_i->dirty_secmap); 786 } 787 } 788 } 789 790 static void __remove_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno, 791 enum dirty_type dirty_type) 792 { 793 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 794 block_t valid_blocks; 795 796 if (test_and_clear_bit(segno, dirty_i->dirty_segmap[dirty_type])) 797 dirty_i->nr_dirty[dirty_type]--; 798 799 if (dirty_type == DIRTY) { 800 struct seg_entry *sentry = get_seg_entry(sbi, segno); 801 enum dirty_type t = sentry->type; 802 803 if (test_and_clear_bit(segno, dirty_i->dirty_segmap[t])) 804 dirty_i->nr_dirty[t]--; 805 806 valid_blocks = get_valid_blocks(sbi, segno, true); 807 if (valid_blocks == 0) { 808 clear_bit(GET_SEC_FROM_SEG(sbi, segno), 809 dirty_i->victim_secmap); 810 #ifdef CONFIG_F2FS_CHECK_FS 811 clear_bit(segno, SIT_I(sbi)->invalid_segmap); 812 #endif 813 } 814 if (__is_large_section(sbi)) { 815 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno); 816 817 if (!valid_blocks || 818 valid_blocks == CAP_BLKS_PER_SEC(sbi)) { 819 clear_bit(secno, dirty_i->dirty_secmap); 820 return; 821 } 822 823 if (!IS_CURSEC(sbi, secno)) 824 set_bit(secno, dirty_i->dirty_secmap); 825 } 826 } 827 } 828 829 /* 830 * Should not occur error such as -ENOMEM. 831 * Adding dirty entry into seglist is not critical operation. 832 * If a given segment is one of current working segments, it won't be added. 833 */ 834 static void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno) 835 { 836 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 837 unsigned short valid_blocks, ckpt_valid_blocks; 838 unsigned int usable_blocks; 839 840 if (segno == NULL_SEGNO || IS_CURSEG(sbi, segno)) 841 return; 842 843 usable_blocks = f2fs_usable_blks_in_seg(sbi, segno); 844 mutex_lock(&dirty_i->seglist_lock); 845 846 valid_blocks = get_valid_blocks(sbi, segno, false); 847 ckpt_valid_blocks = get_ckpt_valid_blocks(sbi, segno, false); 848 849 if (valid_blocks == 0 && (!is_sbi_flag_set(sbi, SBI_CP_DISABLED) || 850 ckpt_valid_blocks == usable_blocks)) { 851 __locate_dirty_segment(sbi, segno, PRE); 852 __remove_dirty_segment(sbi, segno, DIRTY); 853 } else if (valid_blocks < usable_blocks) { 854 __locate_dirty_segment(sbi, segno, DIRTY); 855 } else { 856 /* Recovery routine with SSR needs this */ 857 __remove_dirty_segment(sbi, segno, DIRTY); 858 } 859 860 mutex_unlock(&dirty_i->seglist_lock); 861 } 862 863 /* This moves currently empty dirty blocks to prefree. Must hold seglist_lock */ 864 void f2fs_dirty_to_prefree(struct f2fs_sb_info *sbi) 865 { 866 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 867 unsigned int segno; 868 869 mutex_lock(&dirty_i->seglist_lock); 870 for_each_set_bit(segno, dirty_i->dirty_segmap[DIRTY], MAIN_SEGS(sbi)) { 871 if (get_valid_blocks(sbi, segno, false)) 872 continue; 873 if (IS_CURSEG(sbi, segno)) 874 continue; 875 __locate_dirty_segment(sbi, segno, PRE); 876 __remove_dirty_segment(sbi, segno, DIRTY); 877 } 878 mutex_unlock(&dirty_i->seglist_lock); 879 } 880 881 block_t f2fs_get_unusable_blocks(struct f2fs_sb_info *sbi) 882 { 883 int ovp_hole_segs = 884 (overprovision_segments(sbi) - reserved_segments(sbi)); 885 block_t ovp_holes = ovp_hole_segs << sbi->log_blocks_per_seg; 886 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 887 block_t holes[2] = {0, 0}; /* DATA and NODE */ 888 block_t unusable; 889 struct seg_entry *se; 890 unsigned int segno; 891 892 mutex_lock(&dirty_i->seglist_lock); 893 for_each_set_bit(segno, dirty_i->dirty_segmap[DIRTY], MAIN_SEGS(sbi)) { 894 se = get_seg_entry(sbi, segno); 895 if (IS_NODESEG(se->type)) 896 holes[NODE] += f2fs_usable_blks_in_seg(sbi, segno) - 897 se->valid_blocks; 898 else 899 holes[DATA] += f2fs_usable_blks_in_seg(sbi, segno) - 900 se->valid_blocks; 901 } 902 mutex_unlock(&dirty_i->seglist_lock); 903 904 unusable = max(holes[DATA], holes[NODE]); 905 if (unusable > ovp_holes) 906 return unusable - ovp_holes; 907 return 0; 908 } 909 910 int f2fs_disable_cp_again(struct f2fs_sb_info *sbi, block_t unusable) 911 { 912 int ovp_hole_segs = 913 (overprovision_segments(sbi) - reserved_segments(sbi)); 914 if (unusable > F2FS_OPTION(sbi).unusable_cap) 915 return -EAGAIN; 916 if (is_sbi_flag_set(sbi, SBI_CP_DISABLED_QUICK) && 917 dirty_segments(sbi) > ovp_hole_segs) 918 return -EAGAIN; 919 return 0; 920 } 921 922 /* This is only used by SBI_CP_DISABLED */ 923 static unsigned int get_free_segment(struct f2fs_sb_info *sbi) 924 { 925 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 926 unsigned int segno = 0; 927 928 mutex_lock(&dirty_i->seglist_lock); 929 for_each_set_bit(segno, dirty_i->dirty_segmap[DIRTY], MAIN_SEGS(sbi)) { 930 if (get_valid_blocks(sbi, segno, false)) 931 continue; 932 if (get_ckpt_valid_blocks(sbi, segno, false)) 933 continue; 934 mutex_unlock(&dirty_i->seglist_lock); 935 return segno; 936 } 937 mutex_unlock(&dirty_i->seglist_lock); 938 return NULL_SEGNO; 939 } 940 941 static struct discard_cmd *__create_discard_cmd(struct f2fs_sb_info *sbi, 942 struct block_device *bdev, block_t lstart, 943 block_t start, block_t len) 944 { 945 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; 946 struct list_head *pend_list; 947 struct discard_cmd *dc; 948 949 f2fs_bug_on(sbi, !len); 950 951 pend_list = &dcc->pend_list[plist_idx(len)]; 952 953 dc = f2fs_kmem_cache_alloc(discard_cmd_slab, GFP_NOFS, true, NULL); 954 INIT_LIST_HEAD(&dc->list); 955 dc->bdev = bdev; 956 dc->di.lstart = lstart; 957 dc->di.start = start; 958 dc->di.len = len; 959 dc->ref = 0; 960 dc->state = D_PREP; 961 dc->queued = 0; 962 dc->error = 0; 963 init_completion(&dc->wait); 964 list_add_tail(&dc->list, pend_list); 965 spin_lock_init(&dc->lock); 966 dc->bio_ref = 0; 967 atomic_inc(&dcc->discard_cmd_cnt); 968 dcc->undiscard_blks += len; 969 970 return dc; 971 } 972 973 static bool f2fs_check_discard_tree(struct f2fs_sb_info *sbi) 974 { 975 #ifdef CONFIG_F2FS_CHECK_FS 976 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; 977 struct rb_node *cur = rb_first_cached(&dcc->root), *next; 978 struct discard_cmd *cur_dc, *next_dc; 979 980 while (cur) { 981 next = rb_next(cur); 982 if (!next) 983 return true; 984 985 cur_dc = rb_entry(cur, struct discard_cmd, rb_node); 986 next_dc = rb_entry(next, struct discard_cmd, rb_node); 987 988 if (cur_dc->di.lstart + cur_dc->di.len > next_dc->di.lstart) { 989 f2fs_info(sbi, "broken discard_rbtree, " 990 "cur(%u, %u) next(%u, %u)", 991 cur_dc->di.lstart, cur_dc->di.len, 992 next_dc->di.lstart, next_dc->di.len); 993 return false; 994 } 995 cur = next; 996 } 997 #endif 998 return true; 999 } 1000 1001 static struct discard_cmd *__lookup_discard_cmd(struct f2fs_sb_info *sbi, 1002 block_t blkaddr) 1003 { 1004 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; 1005 struct rb_node *node = dcc->root.rb_root.rb_node; 1006 struct discard_cmd *dc; 1007 1008 while (node) { 1009 dc = rb_entry(node, struct discard_cmd, rb_node); 1010 1011 if (blkaddr < dc->di.lstart) 1012 node = node->rb_left; 1013 else if (blkaddr >= dc->di.lstart + dc->di.len) 1014 node = node->rb_right; 1015 else 1016 return dc; 1017 } 1018 return NULL; 1019 } 1020 1021 static struct discard_cmd *__lookup_discard_cmd_ret(struct rb_root_cached *root, 1022 block_t blkaddr, 1023 struct discard_cmd **prev_entry, 1024 struct discard_cmd **next_entry, 1025 struct rb_node ***insert_p, 1026 struct rb_node **insert_parent) 1027 { 1028 struct rb_node **pnode = &root->rb_root.rb_node; 1029 struct rb_node *parent = NULL, *tmp_node; 1030 struct discard_cmd *dc; 1031 1032 *insert_p = NULL; 1033 *insert_parent = NULL; 1034 *prev_entry = NULL; 1035 *next_entry = NULL; 1036 1037 if (RB_EMPTY_ROOT(&root->rb_root)) 1038 return NULL; 1039 1040 while (*pnode) { 1041 parent = *pnode; 1042 dc = rb_entry(*pnode, struct discard_cmd, rb_node); 1043 1044 if (blkaddr < dc->di.lstart) 1045 pnode = &(*pnode)->rb_left; 1046 else if (blkaddr >= dc->di.lstart + dc->di.len) 1047 pnode = &(*pnode)->rb_right; 1048 else 1049 goto lookup_neighbors; 1050 } 1051 1052 *insert_p = pnode; 1053 *insert_parent = parent; 1054 1055 dc = rb_entry(parent, struct discard_cmd, rb_node); 1056 tmp_node = parent; 1057 if (parent && blkaddr > dc->di.lstart) 1058 tmp_node = rb_next(parent); 1059 *next_entry = rb_entry_safe(tmp_node, struct discard_cmd, rb_node); 1060 1061 tmp_node = parent; 1062 if (parent && blkaddr < dc->di.lstart) 1063 tmp_node = rb_prev(parent); 1064 *prev_entry = rb_entry_safe(tmp_node, struct discard_cmd, rb_node); 1065 return NULL; 1066 1067 lookup_neighbors: 1068 /* lookup prev node for merging backward later */ 1069 tmp_node = rb_prev(&dc->rb_node); 1070 *prev_entry = rb_entry_safe(tmp_node, struct discard_cmd, rb_node); 1071 1072 /* lookup next node for merging frontward later */ 1073 tmp_node = rb_next(&dc->rb_node); 1074 *next_entry = rb_entry_safe(tmp_node, struct discard_cmd, rb_node); 1075 return dc; 1076 } 1077 1078 static void __detach_discard_cmd(struct discard_cmd_control *dcc, 1079 struct discard_cmd *dc) 1080 { 1081 if (dc->state == D_DONE) 1082 atomic_sub(dc->queued, &dcc->queued_discard); 1083 1084 list_del(&dc->list); 1085 rb_erase_cached(&dc->rb_node, &dcc->root); 1086 dcc->undiscard_blks -= dc->di.len; 1087 1088 kmem_cache_free(discard_cmd_slab, dc); 1089 1090 atomic_dec(&dcc->discard_cmd_cnt); 1091 } 1092 1093 static void __remove_discard_cmd(struct f2fs_sb_info *sbi, 1094 struct discard_cmd *dc) 1095 { 1096 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; 1097 unsigned long flags; 1098 1099 trace_f2fs_remove_discard(dc->bdev, dc->di.start, dc->di.len); 1100 1101 spin_lock_irqsave(&dc->lock, flags); 1102 if (dc->bio_ref) { 1103 spin_unlock_irqrestore(&dc->lock, flags); 1104 return; 1105 } 1106 spin_unlock_irqrestore(&dc->lock, flags); 1107 1108 f2fs_bug_on(sbi, dc->ref); 1109 1110 if (dc->error == -EOPNOTSUPP) 1111 dc->error = 0; 1112 1113 if (dc->error) 1114 f2fs_info_ratelimited(sbi, 1115 "Issue discard(%u, %u, %u) failed, ret: %d", 1116 dc->di.lstart, dc->di.start, dc->di.len, dc->error); 1117 __detach_discard_cmd(dcc, dc); 1118 } 1119 1120 static void f2fs_submit_discard_endio(struct bio *bio) 1121 { 1122 struct discard_cmd *dc = (struct discard_cmd *)bio->bi_private; 1123 unsigned long flags; 1124 1125 spin_lock_irqsave(&dc->lock, flags); 1126 if (!dc->error) 1127 dc->error = blk_status_to_errno(bio->bi_status); 1128 dc->bio_ref--; 1129 if (!dc->bio_ref && dc->state == D_SUBMIT) { 1130 dc->state = D_DONE; 1131 complete_all(&dc->wait); 1132 } 1133 spin_unlock_irqrestore(&dc->lock, flags); 1134 bio_put(bio); 1135 } 1136 1137 static void __check_sit_bitmap(struct f2fs_sb_info *sbi, 1138 block_t start, block_t end) 1139 { 1140 #ifdef CONFIG_F2FS_CHECK_FS 1141 struct seg_entry *sentry; 1142 unsigned int segno; 1143 block_t blk = start; 1144 unsigned long offset, size, *map; 1145 1146 while (blk < end) { 1147 segno = GET_SEGNO(sbi, blk); 1148 sentry = get_seg_entry(sbi, segno); 1149 offset = GET_BLKOFF_FROM_SEG0(sbi, blk); 1150 1151 if (end < START_BLOCK(sbi, segno + 1)) 1152 size = GET_BLKOFF_FROM_SEG0(sbi, end); 1153 else 1154 size = BLKS_PER_SEG(sbi); 1155 map = (unsigned long *)(sentry->cur_valid_map); 1156 offset = __find_rev_next_bit(map, size, offset); 1157 f2fs_bug_on(sbi, offset != size); 1158 blk = START_BLOCK(sbi, segno + 1); 1159 } 1160 #endif 1161 } 1162 1163 static void __init_discard_policy(struct f2fs_sb_info *sbi, 1164 struct discard_policy *dpolicy, 1165 int discard_type, unsigned int granularity) 1166 { 1167 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; 1168 1169 /* common policy */ 1170 dpolicy->type = discard_type; 1171 dpolicy->sync = true; 1172 dpolicy->ordered = false; 1173 dpolicy->granularity = granularity; 1174 1175 dpolicy->max_requests = dcc->max_discard_request; 1176 dpolicy->io_aware_gran = dcc->discard_io_aware_gran; 1177 dpolicy->timeout = false; 1178 1179 if (discard_type == DPOLICY_BG) { 1180 dpolicy->min_interval = dcc->min_discard_issue_time; 1181 dpolicy->mid_interval = dcc->mid_discard_issue_time; 1182 dpolicy->max_interval = dcc->max_discard_issue_time; 1183 dpolicy->io_aware = true; 1184 dpolicy->sync = false; 1185 dpolicy->ordered = true; 1186 if (utilization(sbi) > dcc->discard_urgent_util) { 1187 dpolicy->granularity = MIN_DISCARD_GRANULARITY; 1188 if (atomic_read(&dcc->discard_cmd_cnt)) 1189 dpolicy->max_interval = 1190 dcc->min_discard_issue_time; 1191 } 1192 } else if (discard_type == DPOLICY_FORCE) { 1193 dpolicy->min_interval = dcc->min_discard_issue_time; 1194 dpolicy->mid_interval = dcc->mid_discard_issue_time; 1195 dpolicy->max_interval = dcc->max_discard_issue_time; 1196 dpolicy->io_aware = false; 1197 } else if (discard_type == DPOLICY_FSTRIM) { 1198 dpolicy->io_aware = false; 1199 } else if (discard_type == DPOLICY_UMOUNT) { 1200 dpolicy->io_aware = false; 1201 /* we need to issue all to keep CP_TRIMMED_FLAG */ 1202 dpolicy->granularity = MIN_DISCARD_GRANULARITY; 1203 dpolicy->timeout = true; 1204 } 1205 } 1206 1207 static void __update_discard_tree_range(struct f2fs_sb_info *sbi, 1208 struct block_device *bdev, block_t lstart, 1209 block_t start, block_t len); 1210 1211 #ifdef CONFIG_BLK_DEV_ZONED 1212 static void __submit_zone_reset_cmd(struct f2fs_sb_info *sbi, 1213 struct discard_cmd *dc, blk_opf_t flag, 1214 struct list_head *wait_list, 1215 unsigned int *issued) 1216 { 1217 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; 1218 struct block_device *bdev = dc->bdev; 1219 struct bio *bio = bio_alloc(bdev, 0, REQ_OP_ZONE_RESET | flag, GFP_NOFS); 1220 unsigned long flags; 1221 1222 trace_f2fs_issue_reset_zone(bdev, dc->di.start); 1223 1224 spin_lock_irqsave(&dc->lock, flags); 1225 dc->state = D_SUBMIT; 1226 dc->bio_ref++; 1227 spin_unlock_irqrestore(&dc->lock, flags); 1228 1229 if (issued) 1230 (*issued)++; 1231 1232 atomic_inc(&dcc->queued_discard); 1233 dc->queued++; 1234 list_move_tail(&dc->list, wait_list); 1235 1236 /* sanity check on discard range */ 1237 __check_sit_bitmap(sbi, dc->di.lstart, dc->di.lstart + dc->di.len); 1238 1239 bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(dc->di.start); 1240 bio->bi_private = dc; 1241 bio->bi_end_io = f2fs_submit_discard_endio; 1242 submit_bio(bio); 1243 1244 atomic_inc(&dcc->issued_discard); 1245 f2fs_update_iostat(sbi, NULL, FS_ZONE_RESET_IO, dc->di.len * F2FS_BLKSIZE); 1246 } 1247 #endif 1248 1249 /* this function is copied from blkdev_issue_discard from block/blk-lib.c */ 1250 static int __submit_discard_cmd(struct f2fs_sb_info *sbi, 1251 struct discard_policy *dpolicy, 1252 struct discard_cmd *dc, int *issued) 1253 { 1254 struct block_device *bdev = dc->bdev; 1255 unsigned int max_discard_blocks = 1256 SECTOR_TO_BLOCK(bdev_max_discard_sectors(bdev)); 1257 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; 1258 struct list_head *wait_list = (dpolicy->type == DPOLICY_FSTRIM) ? 1259 &(dcc->fstrim_list) : &(dcc->wait_list); 1260 blk_opf_t flag = dpolicy->sync ? REQ_SYNC : 0; 1261 block_t lstart, start, len, total_len; 1262 int err = 0; 1263 1264 if (dc->state != D_PREP) 1265 return 0; 1266 1267 if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) 1268 return 0; 1269 1270 #ifdef CONFIG_BLK_DEV_ZONED 1271 if (f2fs_sb_has_blkzoned(sbi) && bdev_is_zoned(bdev)) { 1272 int devi = f2fs_bdev_index(sbi, bdev); 1273 1274 if (devi < 0) 1275 return -EINVAL; 1276 1277 if (f2fs_blkz_is_seq(sbi, devi, dc->di.start)) { 1278 __submit_zone_reset_cmd(sbi, dc, flag, 1279 wait_list, issued); 1280 return 0; 1281 } 1282 } 1283 #endif 1284 1285 /* 1286 * stop issuing discard for any of below cases: 1287 * 1. device is conventional zone, but it doesn't support discard. 1288 * 2. device is regulare device, after snapshot it doesn't support 1289 * discard. 1290 */ 1291 if (!bdev_max_discard_sectors(bdev)) 1292 return -EOPNOTSUPP; 1293 1294 trace_f2fs_issue_discard(bdev, dc->di.start, dc->di.len); 1295 1296 lstart = dc->di.lstart; 1297 start = dc->di.start; 1298 len = dc->di.len; 1299 total_len = len; 1300 1301 dc->di.len = 0; 1302 1303 while (total_len && *issued < dpolicy->max_requests && !err) { 1304 struct bio *bio = NULL; 1305 unsigned long flags; 1306 bool last = true; 1307 1308 if (len > max_discard_blocks) { 1309 len = max_discard_blocks; 1310 last = false; 1311 } 1312 1313 (*issued)++; 1314 if (*issued == dpolicy->max_requests) 1315 last = true; 1316 1317 dc->di.len += len; 1318 1319 if (time_to_inject(sbi, FAULT_DISCARD)) { 1320 err = -EIO; 1321 } else { 1322 err = __blkdev_issue_discard(bdev, 1323 SECTOR_FROM_BLOCK(start), 1324 SECTOR_FROM_BLOCK(len), 1325 GFP_NOFS, &bio); 1326 } 1327 if (err) { 1328 spin_lock_irqsave(&dc->lock, flags); 1329 if (dc->state == D_PARTIAL) 1330 dc->state = D_SUBMIT; 1331 spin_unlock_irqrestore(&dc->lock, flags); 1332 1333 break; 1334 } 1335 1336 f2fs_bug_on(sbi, !bio); 1337 1338 /* 1339 * should keep before submission to avoid D_DONE 1340 * right away 1341 */ 1342 spin_lock_irqsave(&dc->lock, flags); 1343 if (last) 1344 dc->state = D_SUBMIT; 1345 else 1346 dc->state = D_PARTIAL; 1347 dc->bio_ref++; 1348 spin_unlock_irqrestore(&dc->lock, flags); 1349 1350 atomic_inc(&dcc->queued_discard); 1351 dc->queued++; 1352 list_move_tail(&dc->list, wait_list); 1353 1354 /* sanity check on discard range */ 1355 __check_sit_bitmap(sbi, lstart, lstart + len); 1356 1357 bio->bi_private = dc; 1358 bio->bi_end_io = f2fs_submit_discard_endio; 1359 bio->bi_opf |= flag; 1360 submit_bio(bio); 1361 1362 atomic_inc(&dcc->issued_discard); 1363 1364 f2fs_update_iostat(sbi, NULL, FS_DISCARD_IO, len * F2FS_BLKSIZE); 1365 1366 lstart += len; 1367 start += len; 1368 total_len -= len; 1369 len = total_len; 1370 } 1371 1372 if (!err && len) { 1373 dcc->undiscard_blks -= len; 1374 __update_discard_tree_range(sbi, bdev, lstart, start, len); 1375 } 1376 return err; 1377 } 1378 1379 static void __insert_discard_cmd(struct f2fs_sb_info *sbi, 1380 struct block_device *bdev, block_t lstart, 1381 block_t start, block_t len) 1382 { 1383 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; 1384 struct rb_node **p = &dcc->root.rb_root.rb_node; 1385 struct rb_node *parent = NULL; 1386 struct discard_cmd *dc; 1387 bool leftmost = true; 1388 1389 /* look up rb tree to find parent node */ 1390 while (*p) { 1391 parent = *p; 1392 dc = rb_entry(parent, struct discard_cmd, rb_node); 1393 1394 if (lstart < dc->di.lstart) { 1395 p = &(*p)->rb_left; 1396 } else if (lstart >= dc->di.lstart + dc->di.len) { 1397 p = &(*p)->rb_right; 1398 leftmost = false; 1399 } else { 1400 f2fs_bug_on(sbi, 1); 1401 } 1402 } 1403 1404 dc = __create_discard_cmd(sbi, bdev, lstart, start, len); 1405 1406 rb_link_node(&dc->rb_node, parent, p); 1407 rb_insert_color_cached(&dc->rb_node, &dcc->root, leftmost); 1408 } 1409 1410 static void __relocate_discard_cmd(struct discard_cmd_control *dcc, 1411 struct discard_cmd *dc) 1412 { 1413 list_move_tail(&dc->list, &dcc->pend_list[plist_idx(dc->di.len)]); 1414 } 1415 1416 static void __punch_discard_cmd(struct f2fs_sb_info *sbi, 1417 struct discard_cmd *dc, block_t blkaddr) 1418 { 1419 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; 1420 struct discard_info di = dc->di; 1421 bool modified = false; 1422 1423 if (dc->state == D_DONE || dc->di.len == 1) { 1424 __remove_discard_cmd(sbi, dc); 1425 return; 1426 } 1427 1428 dcc->undiscard_blks -= di.len; 1429 1430 if (blkaddr > di.lstart) { 1431 dc->di.len = blkaddr - dc->di.lstart; 1432 dcc->undiscard_blks += dc->di.len; 1433 __relocate_discard_cmd(dcc, dc); 1434 modified = true; 1435 } 1436 1437 if (blkaddr < di.lstart + di.len - 1) { 1438 if (modified) { 1439 __insert_discard_cmd(sbi, dc->bdev, blkaddr + 1, 1440 di.start + blkaddr + 1 - di.lstart, 1441 di.lstart + di.len - 1 - blkaddr); 1442 } else { 1443 dc->di.lstart++; 1444 dc->di.len--; 1445 dc->di.start++; 1446 dcc->undiscard_blks += dc->di.len; 1447 __relocate_discard_cmd(dcc, dc); 1448 } 1449 } 1450 } 1451 1452 static void __update_discard_tree_range(struct f2fs_sb_info *sbi, 1453 struct block_device *bdev, block_t lstart, 1454 block_t start, block_t len) 1455 { 1456 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; 1457 struct discard_cmd *prev_dc = NULL, *next_dc = NULL; 1458 struct discard_cmd *dc; 1459 struct discard_info di = {0}; 1460 struct rb_node **insert_p = NULL, *insert_parent = NULL; 1461 unsigned int max_discard_blocks = 1462 SECTOR_TO_BLOCK(bdev_max_discard_sectors(bdev)); 1463 block_t end = lstart + len; 1464 1465 dc = __lookup_discard_cmd_ret(&dcc->root, lstart, 1466 &prev_dc, &next_dc, &insert_p, &insert_parent); 1467 if (dc) 1468 prev_dc = dc; 1469 1470 if (!prev_dc) { 1471 di.lstart = lstart; 1472 di.len = next_dc ? next_dc->di.lstart - lstart : len; 1473 di.len = min(di.len, len); 1474 di.start = start; 1475 } 1476 1477 while (1) { 1478 struct rb_node *node; 1479 bool merged = false; 1480 struct discard_cmd *tdc = NULL; 1481 1482 if (prev_dc) { 1483 di.lstart = prev_dc->di.lstart + prev_dc->di.len; 1484 if (di.lstart < lstart) 1485 di.lstart = lstart; 1486 if (di.lstart >= end) 1487 break; 1488 1489 if (!next_dc || next_dc->di.lstart > end) 1490 di.len = end - di.lstart; 1491 else 1492 di.len = next_dc->di.lstart - di.lstart; 1493 di.start = start + di.lstart - lstart; 1494 } 1495 1496 if (!di.len) 1497 goto next; 1498 1499 if (prev_dc && prev_dc->state == D_PREP && 1500 prev_dc->bdev == bdev && 1501 __is_discard_back_mergeable(&di, &prev_dc->di, 1502 max_discard_blocks)) { 1503 prev_dc->di.len += di.len; 1504 dcc->undiscard_blks += di.len; 1505 __relocate_discard_cmd(dcc, prev_dc); 1506 di = prev_dc->di; 1507 tdc = prev_dc; 1508 merged = true; 1509 } 1510 1511 if (next_dc && next_dc->state == D_PREP && 1512 next_dc->bdev == bdev && 1513 __is_discard_front_mergeable(&di, &next_dc->di, 1514 max_discard_blocks)) { 1515 next_dc->di.lstart = di.lstart; 1516 next_dc->di.len += di.len; 1517 next_dc->di.start = di.start; 1518 dcc->undiscard_blks += di.len; 1519 __relocate_discard_cmd(dcc, next_dc); 1520 if (tdc) 1521 __remove_discard_cmd(sbi, tdc); 1522 merged = true; 1523 } 1524 1525 if (!merged) 1526 __insert_discard_cmd(sbi, bdev, 1527 di.lstart, di.start, di.len); 1528 next: 1529 prev_dc = next_dc; 1530 if (!prev_dc) 1531 break; 1532 1533 node = rb_next(&prev_dc->rb_node); 1534 next_dc = rb_entry_safe(node, struct discard_cmd, rb_node); 1535 } 1536 } 1537 1538 #ifdef CONFIG_BLK_DEV_ZONED 1539 static void __queue_zone_reset_cmd(struct f2fs_sb_info *sbi, 1540 struct block_device *bdev, block_t blkstart, block_t lblkstart, 1541 block_t blklen) 1542 { 1543 trace_f2fs_queue_reset_zone(bdev, blkstart); 1544 1545 mutex_lock(&SM_I(sbi)->dcc_info->cmd_lock); 1546 __insert_discard_cmd(sbi, bdev, lblkstart, blkstart, blklen); 1547 mutex_unlock(&SM_I(sbi)->dcc_info->cmd_lock); 1548 } 1549 #endif 1550 1551 static void __queue_discard_cmd(struct f2fs_sb_info *sbi, 1552 struct block_device *bdev, block_t blkstart, block_t blklen) 1553 { 1554 block_t lblkstart = blkstart; 1555 1556 if (!f2fs_bdev_support_discard(bdev)) 1557 return; 1558 1559 trace_f2fs_queue_discard(bdev, blkstart, blklen); 1560 1561 if (f2fs_is_multi_device(sbi)) { 1562 int devi = f2fs_target_device_index(sbi, blkstart); 1563 1564 blkstart -= FDEV(devi).start_blk; 1565 } 1566 mutex_lock(&SM_I(sbi)->dcc_info->cmd_lock); 1567 __update_discard_tree_range(sbi, bdev, lblkstart, blkstart, blklen); 1568 mutex_unlock(&SM_I(sbi)->dcc_info->cmd_lock); 1569 } 1570 1571 static void __issue_discard_cmd_orderly(struct f2fs_sb_info *sbi, 1572 struct discard_policy *dpolicy, int *issued) 1573 { 1574 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; 1575 struct discard_cmd *prev_dc = NULL, *next_dc = NULL; 1576 struct rb_node **insert_p = NULL, *insert_parent = NULL; 1577 struct discard_cmd *dc; 1578 struct blk_plug plug; 1579 bool io_interrupted = false; 1580 1581 mutex_lock(&dcc->cmd_lock); 1582 dc = __lookup_discard_cmd_ret(&dcc->root, dcc->next_pos, 1583 &prev_dc, &next_dc, &insert_p, &insert_parent); 1584 if (!dc) 1585 dc = next_dc; 1586 1587 blk_start_plug(&plug); 1588 1589 while (dc) { 1590 struct rb_node *node; 1591 int err = 0; 1592 1593 if (dc->state != D_PREP) 1594 goto next; 1595 1596 if (dpolicy->io_aware && !is_idle(sbi, DISCARD_TIME)) { 1597 io_interrupted = true; 1598 break; 1599 } 1600 1601 dcc->next_pos = dc->di.lstart + dc->di.len; 1602 err = __submit_discard_cmd(sbi, dpolicy, dc, issued); 1603 1604 if (*issued >= dpolicy->max_requests) 1605 break; 1606 next: 1607 node = rb_next(&dc->rb_node); 1608 if (err) 1609 __remove_discard_cmd(sbi, dc); 1610 dc = rb_entry_safe(node, struct discard_cmd, rb_node); 1611 } 1612 1613 blk_finish_plug(&plug); 1614 1615 if (!dc) 1616 dcc->next_pos = 0; 1617 1618 mutex_unlock(&dcc->cmd_lock); 1619 1620 if (!(*issued) && io_interrupted) 1621 *issued = -1; 1622 } 1623 static unsigned int __wait_all_discard_cmd(struct f2fs_sb_info *sbi, 1624 struct discard_policy *dpolicy); 1625 1626 static int __issue_discard_cmd(struct f2fs_sb_info *sbi, 1627 struct discard_policy *dpolicy) 1628 { 1629 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; 1630 struct list_head *pend_list; 1631 struct discard_cmd *dc, *tmp; 1632 struct blk_plug plug; 1633 int i, issued; 1634 bool io_interrupted = false; 1635 1636 if (dpolicy->timeout) 1637 f2fs_update_time(sbi, UMOUNT_DISCARD_TIMEOUT); 1638 1639 retry: 1640 issued = 0; 1641 for (i = MAX_PLIST_NUM - 1; i >= 0; i--) { 1642 if (dpolicy->timeout && 1643 f2fs_time_over(sbi, UMOUNT_DISCARD_TIMEOUT)) 1644 break; 1645 1646 if (i + 1 < dpolicy->granularity) 1647 break; 1648 1649 if (i + 1 < dcc->max_ordered_discard && dpolicy->ordered) { 1650 __issue_discard_cmd_orderly(sbi, dpolicy, &issued); 1651 return issued; 1652 } 1653 1654 pend_list = &dcc->pend_list[i]; 1655 1656 mutex_lock(&dcc->cmd_lock); 1657 if (list_empty(pend_list)) 1658 goto next; 1659 if (unlikely(dcc->rbtree_check)) 1660 f2fs_bug_on(sbi, !f2fs_check_discard_tree(sbi)); 1661 blk_start_plug(&plug); 1662 list_for_each_entry_safe(dc, tmp, pend_list, list) { 1663 f2fs_bug_on(sbi, dc->state != D_PREP); 1664 1665 if (dpolicy->timeout && 1666 f2fs_time_over(sbi, UMOUNT_DISCARD_TIMEOUT)) 1667 break; 1668 1669 if (dpolicy->io_aware && i < dpolicy->io_aware_gran && 1670 !is_idle(sbi, DISCARD_TIME)) { 1671 io_interrupted = true; 1672 break; 1673 } 1674 1675 __submit_discard_cmd(sbi, dpolicy, dc, &issued); 1676 1677 if (issued >= dpolicy->max_requests) 1678 break; 1679 } 1680 blk_finish_plug(&plug); 1681 next: 1682 mutex_unlock(&dcc->cmd_lock); 1683 1684 if (issued >= dpolicy->max_requests || io_interrupted) 1685 break; 1686 } 1687 1688 if (dpolicy->type == DPOLICY_UMOUNT && issued) { 1689 __wait_all_discard_cmd(sbi, dpolicy); 1690 goto retry; 1691 } 1692 1693 if (!issued && io_interrupted) 1694 issued = -1; 1695 1696 return issued; 1697 } 1698 1699 static bool __drop_discard_cmd(struct f2fs_sb_info *sbi) 1700 { 1701 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; 1702 struct list_head *pend_list; 1703 struct discard_cmd *dc, *tmp; 1704 int i; 1705 bool dropped = false; 1706 1707 mutex_lock(&dcc->cmd_lock); 1708 for (i = MAX_PLIST_NUM - 1; i >= 0; i--) { 1709 pend_list = &dcc->pend_list[i]; 1710 list_for_each_entry_safe(dc, tmp, pend_list, list) { 1711 f2fs_bug_on(sbi, dc->state != D_PREP); 1712 __remove_discard_cmd(sbi, dc); 1713 dropped = true; 1714 } 1715 } 1716 mutex_unlock(&dcc->cmd_lock); 1717 1718 return dropped; 1719 } 1720 1721 void f2fs_drop_discard_cmd(struct f2fs_sb_info *sbi) 1722 { 1723 __drop_discard_cmd(sbi); 1724 } 1725 1726 static unsigned int __wait_one_discard_bio(struct f2fs_sb_info *sbi, 1727 struct discard_cmd *dc) 1728 { 1729 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; 1730 unsigned int len = 0; 1731 1732 wait_for_completion_io(&dc->wait); 1733 mutex_lock(&dcc->cmd_lock); 1734 f2fs_bug_on(sbi, dc->state != D_DONE); 1735 dc->ref--; 1736 if (!dc->ref) { 1737 if (!dc->error) 1738 len = dc->di.len; 1739 __remove_discard_cmd(sbi, dc); 1740 } 1741 mutex_unlock(&dcc->cmd_lock); 1742 1743 return len; 1744 } 1745 1746 static unsigned int __wait_discard_cmd_range(struct f2fs_sb_info *sbi, 1747 struct discard_policy *dpolicy, 1748 block_t start, block_t end) 1749 { 1750 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; 1751 struct list_head *wait_list = (dpolicy->type == DPOLICY_FSTRIM) ? 1752 &(dcc->fstrim_list) : &(dcc->wait_list); 1753 struct discard_cmd *dc = NULL, *iter, *tmp; 1754 unsigned int trimmed = 0; 1755 1756 next: 1757 dc = NULL; 1758 1759 mutex_lock(&dcc->cmd_lock); 1760 list_for_each_entry_safe(iter, tmp, wait_list, list) { 1761 if (iter->di.lstart + iter->di.len <= start || 1762 end <= iter->di.lstart) 1763 continue; 1764 if (iter->di.len < dpolicy->granularity) 1765 continue; 1766 if (iter->state == D_DONE && !iter->ref) { 1767 wait_for_completion_io(&iter->wait); 1768 if (!iter->error) 1769 trimmed += iter->di.len; 1770 __remove_discard_cmd(sbi, iter); 1771 } else { 1772 iter->ref++; 1773 dc = iter; 1774 break; 1775 } 1776 } 1777 mutex_unlock(&dcc->cmd_lock); 1778 1779 if (dc) { 1780 trimmed += __wait_one_discard_bio(sbi, dc); 1781 goto next; 1782 } 1783 1784 return trimmed; 1785 } 1786 1787 static unsigned int __wait_all_discard_cmd(struct f2fs_sb_info *sbi, 1788 struct discard_policy *dpolicy) 1789 { 1790 struct discard_policy dp; 1791 unsigned int discard_blks; 1792 1793 if (dpolicy) 1794 return __wait_discard_cmd_range(sbi, dpolicy, 0, UINT_MAX); 1795 1796 /* wait all */ 1797 __init_discard_policy(sbi, &dp, DPOLICY_FSTRIM, MIN_DISCARD_GRANULARITY); 1798 discard_blks = __wait_discard_cmd_range(sbi, &dp, 0, UINT_MAX); 1799 __init_discard_policy(sbi, &dp, DPOLICY_UMOUNT, MIN_DISCARD_GRANULARITY); 1800 discard_blks += __wait_discard_cmd_range(sbi, &dp, 0, UINT_MAX); 1801 1802 return discard_blks; 1803 } 1804 1805 /* This should be covered by global mutex, &sit_i->sentry_lock */ 1806 static void f2fs_wait_discard_bio(struct f2fs_sb_info *sbi, block_t blkaddr) 1807 { 1808 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; 1809 struct discard_cmd *dc; 1810 bool need_wait = false; 1811 1812 mutex_lock(&dcc->cmd_lock); 1813 dc = __lookup_discard_cmd(sbi, blkaddr); 1814 #ifdef CONFIG_BLK_DEV_ZONED 1815 if (dc && f2fs_sb_has_blkzoned(sbi) && bdev_is_zoned(dc->bdev)) { 1816 int devi = f2fs_bdev_index(sbi, dc->bdev); 1817 1818 if (devi < 0) { 1819 mutex_unlock(&dcc->cmd_lock); 1820 return; 1821 } 1822 1823 if (f2fs_blkz_is_seq(sbi, devi, dc->di.start)) { 1824 /* force submit zone reset */ 1825 if (dc->state == D_PREP) 1826 __submit_zone_reset_cmd(sbi, dc, REQ_SYNC, 1827 &dcc->wait_list, NULL); 1828 dc->ref++; 1829 mutex_unlock(&dcc->cmd_lock); 1830 /* wait zone reset */ 1831 __wait_one_discard_bio(sbi, dc); 1832 return; 1833 } 1834 } 1835 #endif 1836 if (dc) { 1837 if (dc->state == D_PREP) { 1838 __punch_discard_cmd(sbi, dc, blkaddr); 1839 } else { 1840 dc->ref++; 1841 need_wait = true; 1842 } 1843 } 1844 mutex_unlock(&dcc->cmd_lock); 1845 1846 if (need_wait) 1847 __wait_one_discard_bio(sbi, dc); 1848 } 1849 1850 void f2fs_stop_discard_thread(struct f2fs_sb_info *sbi) 1851 { 1852 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; 1853 1854 if (dcc && dcc->f2fs_issue_discard) { 1855 struct task_struct *discard_thread = dcc->f2fs_issue_discard; 1856 1857 dcc->f2fs_issue_discard = NULL; 1858 kthread_stop(discard_thread); 1859 } 1860 } 1861 1862 /** 1863 * f2fs_issue_discard_timeout() - Issue all discard cmd within UMOUNT_DISCARD_TIMEOUT 1864 * @sbi: the f2fs_sb_info data for discard cmd to issue 1865 * 1866 * When UMOUNT_DISCARD_TIMEOUT is exceeded, all remaining discard commands will be dropped 1867 * 1868 * Return true if issued all discard cmd or no discard cmd need issue, otherwise return false. 1869 */ 1870 bool f2fs_issue_discard_timeout(struct f2fs_sb_info *sbi) 1871 { 1872 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; 1873 struct discard_policy dpolicy; 1874 bool dropped; 1875 1876 if (!atomic_read(&dcc->discard_cmd_cnt)) 1877 return true; 1878 1879 __init_discard_policy(sbi, &dpolicy, DPOLICY_UMOUNT, 1880 dcc->discard_granularity); 1881 __issue_discard_cmd(sbi, &dpolicy); 1882 dropped = __drop_discard_cmd(sbi); 1883 1884 /* just to make sure there is no pending discard commands */ 1885 __wait_all_discard_cmd(sbi, NULL); 1886 1887 f2fs_bug_on(sbi, atomic_read(&dcc->discard_cmd_cnt)); 1888 return !dropped; 1889 } 1890 1891 static int issue_discard_thread(void *data) 1892 { 1893 struct f2fs_sb_info *sbi = data; 1894 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; 1895 wait_queue_head_t *q = &dcc->discard_wait_queue; 1896 struct discard_policy dpolicy; 1897 unsigned int wait_ms = dcc->min_discard_issue_time; 1898 int issued; 1899 1900 set_freezable(); 1901 1902 do { 1903 wait_event_interruptible_timeout(*q, 1904 kthread_should_stop() || freezing(current) || 1905 dcc->discard_wake, 1906 msecs_to_jiffies(wait_ms)); 1907 1908 if (sbi->gc_mode == GC_URGENT_HIGH || 1909 !f2fs_available_free_memory(sbi, DISCARD_CACHE)) 1910 __init_discard_policy(sbi, &dpolicy, DPOLICY_FORCE, 1911 MIN_DISCARD_GRANULARITY); 1912 else 1913 __init_discard_policy(sbi, &dpolicy, DPOLICY_BG, 1914 dcc->discard_granularity); 1915 1916 if (dcc->discard_wake) 1917 dcc->discard_wake = false; 1918 1919 /* clean up pending candidates before going to sleep */ 1920 if (atomic_read(&dcc->queued_discard)) 1921 __wait_all_discard_cmd(sbi, NULL); 1922 1923 if (try_to_freeze()) 1924 continue; 1925 if (f2fs_readonly(sbi->sb)) 1926 continue; 1927 if (kthread_should_stop()) 1928 return 0; 1929 if (is_sbi_flag_set(sbi, SBI_NEED_FSCK) || 1930 !atomic_read(&dcc->discard_cmd_cnt)) { 1931 wait_ms = dpolicy.max_interval; 1932 continue; 1933 } 1934 1935 sb_start_intwrite(sbi->sb); 1936 1937 issued = __issue_discard_cmd(sbi, &dpolicy); 1938 if (issued > 0) { 1939 __wait_all_discard_cmd(sbi, &dpolicy); 1940 wait_ms = dpolicy.min_interval; 1941 } else if (issued == -1) { 1942 wait_ms = f2fs_time_to_wait(sbi, DISCARD_TIME); 1943 if (!wait_ms) 1944 wait_ms = dpolicy.mid_interval; 1945 } else { 1946 wait_ms = dpolicy.max_interval; 1947 } 1948 if (!atomic_read(&dcc->discard_cmd_cnt)) 1949 wait_ms = dpolicy.max_interval; 1950 1951 sb_end_intwrite(sbi->sb); 1952 1953 } while (!kthread_should_stop()); 1954 return 0; 1955 } 1956 1957 #ifdef CONFIG_BLK_DEV_ZONED 1958 static int __f2fs_issue_discard_zone(struct f2fs_sb_info *sbi, 1959 struct block_device *bdev, block_t blkstart, block_t blklen) 1960 { 1961 sector_t sector, nr_sects; 1962 block_t lblkstart = blkstart; 1963 int devi = 0; 1964 u64 remainder = 0; 1965 1966 if (f2fs_is_multi_device(sbi)) { 1967 devi = f2fs_target_device_index(sbi, blkstart); 1968 if (blkstart < FDEV(devi).start_blk || 1969 blkstart > FDEV(devi).end_blk) { 1970 f2fs_err(sbi, "Invalid block %x", blkstart); 1971 return -EIO; 1972 } 1973 blkstart -= FDEV(devi).start_blk; 1974 } 1975 1976 /* For sequential zones, reset the zone write pointer */ 1977 if (f2fs_blkz_is_seq(sbi, devi, blkstart)) { 1978 sector = SECTOR_FROM_BLOCK(blkstart); 1979 nr_sects = SECTOR_FROM_BLOCK(blklen); 1980 div64_u64_rem(sector, bdev_zone_sectors(bdev), &remainder); 1981 1982 if (remainder || nr_sects != bdev_zone_sectors(bdev)) { 1983 f2fs_err(sbi, "(%d) %s: Unaligned zone reset attempted (block %x + %x)", 1984 devi, sbi->s_ndevs ? FDEV(devi).path : "", 1985 blkstart, blklen); 1986 return -EIO; 1987 } 1988 1989 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) { 1990 trace_f2fs_issue_reset_zone(bdev, blkstart); 1991 return blkdev_zone_mgmt(bdev, REQ_OP_ZONE_RESET, 1992 sector, nr_sects, GFP_NOFS); 1993 } 1994 1995 __queue_zone_reset_cmd(sbi, bdev, blkstart, lblkstart, blklen); 1996 return 0; 1997 } 1998 1999 /* For conventional zones, use regular discard if supported */ 2000 __queue_discard_cmd(sbi, bdev, lblkstart, blklen); 2001 return 0; 2002 } 2003 #endif 2004 2005 static int __issue_discard_async(struct f2fs_sb_info *sbi, 2006 struct block_device *bdev, block_t blkstart, block_t blklen) 2007 { 2008 #ifdef CONFIG_BLK_DEV_ZONED 2009 if (f2fs_sb_has_blkzoned(sbi) && bdev_is_zoned(bdev)) 2010 return __f2fs_issue_discard_zone(sbi, bdev, blkstart, blklen); 2011 #endif 2012 __queue_discard_cmd(sbi, bdev, blkstart, blklen); 2013 return 0; 2014 } 2015 2016 static int f2fs_issue_discard(struct f2fs_sb_info *sbi, 2017 block_t blkstart, block_t blklen) 2018 { 2019 sector_t start = blkstart, len = 0; 2020 struct block_device *bdev; 2021 struct seg_entry *se; 2022 unsigned int offset; 2023 block_t i; 2024 int err = 0; 2025 2026 bdev = f2fs_target_device(sbi, blkstart, NULL); 2027 2028 for (i = blkstart; i < blkstart + blklen; i++, len++) { 2029 if (i != start) { 2030 struct block_device *bdev2 = 2031 f2fs_target_device(sbi, i, NULL); 2032 2033 if (bdev2 != bdev) { 2034 err = __issue_discard_async(sbi, bdev, 2035 start, len); 2036 if (err) 2037 return err; 2038 bdev = bdev2; 2039 start = i; 2040 len = 0; 2041 } 2042 } 2043 2044 se = get_seg_entry(sbi, GET_SEGNO(sbi, i)); 2045 offset = GET_BLKOFF_FROM_SEG0(sbi, i); 2046 2047 if (f2fs_block_unit_discard(sbi) && 2048 !f2fs_test_and_set_bit(offset, se->discard_map)) 2049 sbi->discard_blks--; 2050 } 2051 2052 if (len) 2053 err = __issue_discard_async(sbi, bdev, start, len); 2054 return err; 2055 } 2056 2057 static bool add_discard_addrs(struct f2fs_sb_info *sbi, struct cp_control *cpc, 2058 bool check_only) 2059 { 2060 int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long); 2061 struct seg_entry *se = get_seg_entry(sbi, cpc->trim_start); 2062 unsigned long *cur_map = (unsigned long *)se->cur_valid_map; 2063 unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map; 2064 unsigned long *discard_map = (unsigned long *)se->discard_map; 2065 unsigned long *dmap = SIT_I(sbi)->tmp_map; 2066 unsigned int start = 0, end = -1; 2067 bool force = (cpc->reason & CP_DISCARD); 2068 struct discard_entry *de = NULL; 2069 struct list_head *head = &SM_I(sbi)->dcc_info->entry_list; 2070 int i; 2071 2072 if (se->valid_blocks == BLKS_PER_SEG(sbi) || 2073 !f2fs_hw_support_discard(sbi) || 2074 !f2fs_block_unit_discard(sbi)) 2075 return false; 2076 2077 if (!force) { 2078 if (!f2fs_realtime_discard_enable(sbi) || !se->valid_blocks || 2079 SM_I(sbi)->dcc_info->nr_discards >= 2080 SM_I(sbi)->dcc_info->max_discards) 2081 return false; 2082 } 2083 2084 /* SIT_VBLOCK_MAP_SIZE should be multiple of sizeof(unsigned long) */ 2085 for (i = 0; i < entries; i++) 2086 dmap[i] = force ? ~ckpt_map[i] & ~discard_map[i] : 2087 (cur_map[i] ^ ckpt_map[i]) & ckpt_map[i]; 2088 2089 while (force || SM_I(sbi)->dcc_info->nr_discards <= 2090 SM_I(sbi)->dcc_info->max_discards) { 2091 start = __find_rev_next_bit(dmap, BLKS_PER_SEG(sbi), end + 1); 2092 if (start >= BLKS_PER_SEG(sbi)) 2093 break; 2094 2095 end = __find_rev_next_zero_bit(dmap, 2096 BLKS_PER_SEG(sbi), start + 1); 2097 if (force && start && end != BLKS_PER_SEG(sbi) && 2098 (end - start) < cpc->trim_minlen) 2099 continue; 2100 2101 if (check_only) 2102 return true; 2103 2104 if (!de) { 2105 de = f2fs_kmem_cache_alloc(discard_entry_slab, 2106 GFP_F2FS_ZERO, true, NULL); 2107 de->start_blkaddr = START_BLOCK(sbi, cpc->trim_start); 2108 list_add_tail(&de->list, head); 2109 } 2110 2111 for (i = start; i < end; i++) 2112 __set_bit_le(i, (void *)de->discard_map); 2113 2114 SM_I(sbi)->dcc_info->nr_discards += end - start; 2115 } 2116 return false; 2117 } 2118 2119 static void release_discard_addr(struct discard_entry *entry) 2120 { 2121 list_del(&entry->list); 2122 kmem_cache_free(discard_entry_slab, entry); 2123 } 2124 2125 void f2fs_release_discard_addrs(struct f2fs_sb_info *sbi) 2126 { 2127 struct list_head *head = &(SM_I(sbi)->dcc_info->entry_list); 2128 struct discard_entry *entry, *this; 2129 2130 /* drop caches */ 2131 list_for_each_entry_safe(entry, this, head, list) 2132 release_discard_addr(entry); 2133 } 2134 2135 /* 2136 * Should call f2fs_clear_prefree_segments after checkpoint is done. 2137 */ 2138 static void set_prefree_as_free_segments(struct f2fs_sb_info *sbi) 2139 { 2140 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 2141 unsigned int segno; 2142 2143 mutex_lock(&dirty_i->seglist_lock); 2144 for_each_set_bit(segno, dirty_i->dirty_segmap[PRE], MAIN_SEGS(sbi)) 2145 __set_test_and_free(sbi, segno, false); 2146 mutex_unlock(&dirty_i->seglist_lock); 2147 } 2148 2149 void f2fs_clear_prefree_segments(struct f2fs_sb_info *sbi, 2150 struct cp_control *cpc) 2151 { 2152 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; 2153 struct list_head *head = &dcc->entry_list; 2154 struct discard_entry *entry, *this; 2155 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 2156 unsigned long *prefree_map = dirty_i->dirty_segmap[PRE]; 2157 unsigned int start = 0, end = -1; 2158 unsigned int secno, start_segno; 2159 bool force = (cpc->reason & CP_DISCARD); 2160 bool section_alignment = F2FS_OPTION(sbi).discard_unit == 2161 DISCARD_UNIT_SECTION; 2162 2163 if (f2fs_lfs_mode(sbi) && __is_large_section(sbi)) 2164 section_alignment = true; 2165 2166 mutex_lock(&dirty_i->seglist_lock); 2167 2168 while (1) { 2169 int i; 2170 2171 if (section_alignment && end != -1) 2172 end--; 2173 start = find_next_bit(prefree_map, MAIN_SEGS(sbi), end + 1); 2174 if (start >= MAIN_SEGS(sbi)) 2175 break; 2176 end = find_next_zero_bit(prefree_map, MAIN_SEGS(sbi), 2177 start + 1); 2178 2179 if (section_alignment) { 2180 start = rounddown(start, SEGS_PER_SEC(sbi)); 2181 end = roundup(end, SEGS_PER_SEC(sbi)); 2182 } 2183 2184 for (i = start; i < end; i++) { 2185 if (test_and_clear_bit(i, prefree_map)) 2186 dirty_i->nr_dirty[PRE]--; 2187 } 2188 2189 if (!f2fs_realtime_discard_enable(sbi)) 2190 continue; 2191 2192 if (force && start >= cpc->trim_start && 2193 (end - 1) <= cpc->trim_end) 2194 continue; 2195 2196 /* Should cover 2MB zoned device for zone-based reset */ 2197 if (!f2fs_sb_has_blkzoned(sbi) && 2198 (!f2fs_lfs_mode(sbi) || !__is_large_section(sbi))) { 2199 f2fs_issue_discard(sbi, START_BLOCK(sbi, start), 2200 (end - start) << sbi->log_blocks_per_seg); 2201 continue; 2202 } 2203 next: 2204 secno = GET_SEC_FROM_SEG(sbi, start); 2205 start_segno = GET_SEG_FROM_SEC(sbi, secno); 2206 if (!IS_CURSEC(sbi, secno) && 2207 !get_valid_blocks(sbi, start, true)) 2208 f2fs_issue_discard(sbi, START_BLOCK(sbi, start_segno), 2209 BLKS_PER_SEC(sbi)); 2210 2211 start = start_segno + SEGS_PER_SEC(sbi); 2212 if (start < end) 2213 goto next; 2214 else 2215 end = start - 1; 2216 } 2217 mutex_unlock(&dirty_i->seglist_lock); 2218 2219 if (!f2fs_block_unit_discard(sbi)) 2220 goto wakeup; 2221 2222 /* send small discards */ 2223 list_for_each_entry_safe(entry, this, head, list) { 2224 unsigned int cur_pos = 0, next_pos, len, total_len = 0; 2225 bool is_valid = test_bit_le(0, entry->discard_map); 2226 2227 find_next: 2228 if (is_valid) { 2229 next_pos = find_next_zero_bit_le(entry->discard_map, 2230 BLKS_PER_SEG(sbi), cur_pos); 2231 len = next_pos - cur_pos; 2232 2233 if (f2fs_sb_has_blkzoned(sbi) || 2234 (force && len < cpc->trim_minlen)) 2235 goto skip; 2236 2237 f2fs_issue_discard(sbi, entry->start_blkaddr + cur_pos, 2238 len); 2239 total_len += len; 2240 } else { 2241 next_pos = find_next_bit_le(entry->discard_map, 2242 BLKS_PER_SEG(sbi), cur_pos); 2243 } 2244 skip: 2245 cur_pos = next_pos; 2246 is_valid = !is_valid; 2247 2248 if (cur_pos < BLKS_PER_SEG(sbi)) 2249 goto find_next; 2250 2251 release_discard_addr(entry); 2252 dcc->nr_discards -= total_len; 2253 } 2254 2255 wakeup: 2256 wake_up_discard_thread(sbi, false); 2257 } 2258 2259 int f2fs_start_discard_thread(struct f2fs_sb_info *sbi) 2260 { 2261 dev_t dev = sbi->sb->s_bdev->bd_dev; 2262 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; 2263 int err = 0; 2264 2265 if (!f2fs_realtime_discard_enable(sbi)) 2266 return 0; 2267 2268 dcc->f2fs_issue_discard = kthread_run(issue_discard_thread, sbi, 2269 "f2fs_discard-%u:%u", MAJOR(dev), MINOR(dev)); 2270 if (IS_ERR(dcc->f2fs_issue_discard)) { 2271 err = PTR_ERR(dcc->f2fs_issue_discard); 2272 dcc->f2fs_issue_discard = NULL; 2273 } 2274 2275 return err; 2276 } 2277 2278 static int create_discard_cmd_control(struct f2fs_sb_info *sbi) 2279 { 2280 struct discard_cmd_control *dcc; 2281 int err = 0, i; 2282 2283 if (SM_I(sbi)->dcc_info) { 2284 dcc = SM_I(sbi)->dcc_info; 2285 goto init_thread; 2286 } 2287 2288 dcc = f2fs_kzalloc(sbi, sizeof(struct discard_cmd_control), GFP_KERNEL); 2289 if (!dcc) 2290 return -ENOMEM; 2291 2292 dcc->discard_io_aware_gran = MAX_PLIST_NUM; 2293 dcc->discard_granularity = DEFAULT_DISCARD_GRANULARITY; 2294 dcc->max_ordered_discard = DEFAULT_MAX_ORDERED_DISCARD_GRANULARITY; 2295 if (F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_SEGMENT) 2296 dcc->discard_granularity = BLKS_PER_SEG(sbi); 2297 else if (F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_SECTION) 2298 dcc->discard_granularity = BLKS_PER_SEC(sbi); 2299 2300 INIT_LIST_HEAD(&dcc->entry_list); 2301 for (i = 0; i < MAX_PLIST_NUM; i++) 2302 INIT_LIST_HEAD(&dcc->pend_list[i]); 2303 INIT_LIST_HEAD(&dcc->wait_list); 2304 INIT_LIST_HEAD(&dcc->fstrim_list); 2305 mutex_init(&dcc->cmd_lock); 2306 atomic_set(&dcc->issued_discard, 0); 2307 atomic_set(&dcc->queued_discard, 0); 2308 atomic_set(&dcc->discard_cmd_cnt, 0); 2309 dcc->nr_discards = 0; 2310 dcc->max_discards = MAIN_SEGS(sbi) << sbi->log_blocks_per_seg; 2311 dcc->max_discard_request = DEF_MAX_DISCARD_REQUEST; 2312 dcc->min_discard_issue_time = DEF_MIN_DISCARD_ISSUE_TIME; 2313 dcc->mid_discard_issue_time = DEF_MID_DISCARD_ISSUE_TIME; 2314 dcc->max_discard_issue_time = DEF_MAX_DISCARD_ISSUE_TIME; 2315 dcc->discard_urgent_util = DEF_DISCARD_URGENT_UTIL; 2316 dcc->undiscard_blks = 0; 2317 dcc->next_pos = 0; 2318 dcc->root = RB_ROOT_CACHED; 2319 dcc->rbtree_check = false; 2320 2321 init_waitqueue_head(&dcc->discard_wait_queue); 2322 SM_I(sbi)->dcc_info = dcc; 2323 init_thread: 2324 err = f2fs_start_discard_thread(sbi); 2325 if (err) { 2326 kfree(dcc); 2327 SM_I(sbi)->dcc_info = NULL; 2328 } 2329 2330 return err; 2331 } 2332 2333 static void destroy_discard_cmd_control(struct f2fs_sb_info *sbi) 2334 { 2335 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; 2336 2337 if (!dcc) 2338 return; 2339 2340 f2fs_stop_discard_thread(sbi); 2341 2342 /* 2343 * Recovery can cache discard commands, so in error path of 2344 * fill_super(), it needs to give a chance to handle them. 2345 */ 2346 f2fs_issue_discard_timeout(sbi); 2347 2348 kfree(dcc); 2349 SM_I(sbi)->dcc_info = NULL; 2350 } 2351 2352 static bool __mark_sit_entry_dirty(struct f2fs_sb_info *sbi, unsigned int segno) 2353 { 2354 struct sit_info *sit_i = SIT_I(sbi); 2355 2356 if (!__test_and_set_bit(segno, sit_i->dirty_sentries_bitmap)) { 2357 sit_i->dirty_sentries++; 2358 return false; 2359 } 2360 2361 return true; 2362 } 2363 2364 static void __set_sit_entry_type(struct f2fs_sb_info *sbi, int type, 2365 unsigned int segno, int modified) 2366 { 2367 struct seg_entry *se = get_seg_entry(sbi, segno); 2368 2369 se->type = type; 2370 if (modified) 2371 __mark_sit_entry_dirty(sbi, segno); 2372 } 2373 2374 static inline unsigned long long get_segment_mtime(struct f2fs_sb_info *sbi, 2375 block_t blkaddr) 2376 { 2377 unsigned int segno = GET_SEGNO(sbi, blkaddr); 2378 2379 if (segno == NULL_SEGNO) 2380 return 0; 2381 return get_seg_entry(sbi, segno)->mtime; 2382 } 2383 2384 static void update_segment_mtime(struct f2fs_sb_info *sbi, block_t blkaddr, 2385 unsigned long long old_mtime) 2386 { 2387 struct seg_entry *se; 2388 unsigned int segno = GET_SEGNO(sbi, blkaddr); 2389 unsigned long long ctime = get_mtime(sbi, false); 2390 unsigned long long mtime = old_mtime ? old_mtime : ctime; 2391 2392 if (segno == NULL_SEGNO) 2393 return; 2394 2395 se = get_seg_entry(sbi, segno); 2396 2397 if (!se->mtime) 2398 se->mtime = mtime; 2399 else 2400 se->mtime = div_u64(se->mtime * se->valid_blocks + mtime, 2401 se->valid_blocks + 1); 2402 2403 if (ctime > SIT_I(sbi)->max_mtime) 2404 SIT_I(sbi)->max_mtime = ctime; 2405 } 2406 2407 static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del) 2408 { 2409 struct seg_entry *se; 2410 unsigned int segno, offset; 2411 long int new_vblocks; 2412 bool exist; 2413 #ifdef CONFIG_F2FS_CHECK_FS 2414 bool mir_exist; 2415 #endif 2416 2417 segno = GET_SEGNO(sbi, blkaddr); 2418 if (segno == NULL_SEGNO) 2419 return; 2420 2421 se = get_seg_entry(sbi, segno); 2422 new_vblocks = se->valid_blocks + del; 2423 offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr); 2424 2425 f2fs_bug_on(sbi, (new_vblocks < 0 || 2426 (new_vblocks > f2fs_usable_blks_in_seg(sbi, segno)))); 2427 2428 se->valid_blocks = new_vblocks; 2429 2430 /* Update valid block bitmap */ 2431 if (del > 0) { 2432 exist = f2fs_test_and_set_bit(offset, se->cur_valid_map); 2433 #ifdef CONFIG_F2FS_CHECK_FS 2434 mir_exist = f2fs_test_and_set_bit(offset, 2435 se->cur_valid_map_mir); 2436 if (unlikely(exist != mir_exist)) { 2437 f2fs_err(sbi, "Inconsistent error when setting bitmap, blk:%u, old bit:%d", 2438 blkaddr, exist); 2439 f2fs_bug_on(sbi, 1); 2440 } 2441 #endif 2442 if (unlikely(exist)) { 2443 f2fs_err(sbi, "Bitmap was wrongly set, blk:%u", 2444 blkaddr); 2445 f2fs_bug_on(sbi, 1); 2446 se->valid_blocks--; 2447 del = 0; 2448 } 2449 2450 if (f2fs_block_unit_discard(sbi) && 2451 !f2fs_test_and_set_bit(offset, se->discard_map)) 2452 sbi->discard_blks--; 2453 2454 /* 2455 * SSR should never reuse block which is checkpointed 2456 * or newly invalidated. 2457 */ 2458 if (!is_sbi_flag_set(sbi, SBI_CP_DISABLED)) { 2459 if (!f2fs_test_and_set_bit(offset, se->ckpt_valid_map)) 2460 se->ckpt_valid_blocks++; 2461 } 2462 } else { 2463 exist = f2fs_test_and_clear_bit(offset, se->cur_valid_map); 2464 #ifdef CONFIG_F2FS_CHECK_FS 2465 mir_exist = f2fs_test_and_clear_bit(offset, 2466 se->cur_valid_map_mir); 2467 if (unlikely(exist != mir_exist)) { 2468 f2fs_err(sbi, "Inconsistent error when clearing bitmap, blk:%u, old bit:%d", 2469 blkaddr, exist); 2470 f2fs_bug_on(sbi, 1); 2471 } 2472 #endif 2473 if (unlikely(!exist)) { 2474 f2fs_err(sbi, "Bitmap was wrongly cleared, blk:%u", 2475 blkaddr); 2476 f2fs_bug_on(sbi, 1); 2477 se->valid_blocks++; 2478 del = 0; 2479 } else if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) { 2480 /* 2481 * If checkpoints are off, we must not reuse data that 2482 * was used in the previous checkpoint. If it was used 2483 * before, we must track that to know how much space we 2484 * really have. 2485 */ 2486 if (f2fs_test_bit(offset, se->ckpt_valid_map)) { 2487 spin_lock(&sbi->stat_lock); 2488 sbi->unusable_block_count++; 2489 spin_unlock(&sbi->stat_lock); 2490 } 2491 } 2492 2493 if (f2fs_block_unit_discard(sbi) && 2494 f2fs_test_and_clear_bit(offset, se->discard_map)) 2495 sbi->discard_blks++; 2496 } 2497 if (!f2fs_test_bit(offset, se->ckpt_valid_map)) 2498 se->ckpt_valid_blocks += del; 2499 2500 __mark_sit_entry_dirty(sbi, segno); 2501 2502 /* update total number of valid blocks to be written in ckpt area */ 2503 SIT_I(sbi)->written_valid_blocks += del; 2504 2505 if (__is_large_section(sbi)) 2506 get_sec_entry(sbi, segno)->valid_blocks += del; 2507 } 2508 2509 void f2fs_invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr) 2510 { 2511 unsigned int segno = GET_SEGNO(sbi, addr); 2512 struct sit_info *sit_i = SIT_I(sbi); 2513 2514 f2fs_bug_on(sbi, addr == NULL_ADDR); 2515 if (addr == NEW_ADDR || addr == COMPRESS_ADDR) 2516 return; 2517 2518 f2fs_invalidate_internal_cache(sbi, addr); 2519 2520 /* add it into sit main buffer */ 2521 down_write(&sit_i->sentry_lock); 2522 2523 update_segment_mtime(sbi, addr, 0); 2524 update_sit_entry(sbi, addr, -1); 2525 2526 /* add it into dirty seglist */ 2527 locate_dirty_segment(sbi, segno); 2528 2529 up_write(&sit_i->sentry_lock); 2530 } 2531 2532 bool f2fs_is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr) 2533 { 2534 struct sit_info *sit_i = SIT_I(sbi); 2535 unsigned int segno, offset; 2536 struct seg_entry *se; 2537 bool is_cp = false; 2538 2539 if (!__is_valid_data_blkaddr(blkaddr)) 2540 return true; 2541 2542 down_read(&sit_i->sentry_lock); 2543 2544 segno = GET_SEGNO(sbi, blkaddr); 2545 se = get_seg_entry(sbi, segno); 2546 offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr); 2547 2548 if (f2fs_test_bit(offset, se->ckpt_valid_map)) 2549 is_cp = true; 2550 2551 up_read(&sit_i->sentry_lock); 2552 2553 return is_cp; 2554 } 2555 2556 static unsigned short f2fs_curseg_valid_blocks(struct f2fs_sb_info *sbi, int type) 2557 { 2558 struct curseg_info *curseg = CURSEG_I(sbi, type); 2559 2560 if (sbi->ckpt->alloc_type[type] == SSR) 2561 return BLKS_PER_SEG(sbi); 2562 return curseg->next_blkoff; 2563 } 2564 2565 /* 2566 * Calculate the number of current summary pages for writing 2567 */ 2568 int f2fs_npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra) 2569 { 2570 int valid_sum_count = 0; 2571 int i, sum_in_page; 2572 2573 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) { 2574 if (sbi->ckpt->alloc_type[i] != SSR && for_ra) 2575 valid_sum_count += 2576 le16_to_cpu(F2FS_CKPT(sbi)->cur_data_blkoff[i]); 2577 else 2578 valid_sum_count += f2fs_curseg_valid_blocks(sbi, i); 2579 } 2580 2581 sum_in_page = (PAGE_SIZE - 2 * SUM_JOURNAL_SIZE - 2582 SUM_FOOTER_SIZE) / SUMMARY_SIZE; 2583 if (valid_sum_count <= sum_in_page) 2584 return 1; 2585 else if ((valid_sum_count - sum_in_page) <= 2586 (PAGE_SIZE - SUM_FOOTER_SIZE) / SUMMARY_SIZE) 2587 return 2; 2588 return 3; 2589 } 2590 2591 /* 2592 * Caller should put this summary page 2593 */ 2594 struct page *f2fs_get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno) 2595 { 2596 if (unlikely(f2fs_cp_error(sbi))) 2597 return ERR_PTR(-EIO); 2598 return f2fs_get_meta_page_retry(sbi, GET_SUM_BLOCK(sbi, segno)); 2599 } 2600 2601 void f2fs_update_meta_page(struct f2fs_sb_info *sbi, 2602 void *src, block_t blk_addr) 2603 { 2604 struct page *page = f2fs_grab_meta_page(sbi, blk_addr); 2605 2606 memcpy(page_address(page), src, PAGE_SIZE); 2607 set_page_dirty(page); 2608 f2fs_put_page(page, 1); 2609 } 2610 2611 static void write_sum_page(struct f2fs_sb_info *sbi, 2612 struct f2fs_summary_block *sum_blk, block_t blk_addr) 2613 { 2614 f2fs_update_meta_page(sbi, (void *)sum_blk, blk_addr); 2615 } 2616 2617 static void write_current_sum_page(struct f2fs_sb_info *sbi, 2618 int type, block_t blk_addr) 2619 { 2620 struct curseg_info *curseg = CURSEG_I(sbi, type); 2621 struct page *page = f2fs_grab_meta_page(sbi, blk_addr); 2622 struct f2fs_summary_block *src = curseg->sum_blk; 2623 struct f2fs_summary_block *dst; 2624 2625 dst = (struct f2fs_summary_block *)page_address(page); 2626 memset(dst, 0, PAGE_SIZE); 2627 2628 mutex_lock(&curseg->curseg_mutex); 2629 2630 down_read(&curseg->journal_rwsem); 2631 memcpy(&dst->journal, curseg->journal, SUM_JOURNAL_SIZE); 2632 up_read(&curseg->journal_rwsem); 2633 2634 memcpy(dst->entries, src->entries, SUM_ENTRY_SIZE); 2635 memcpy(&dst->footer, &src->footer, SUM_FOOTER_SIZE); 2636 2637 mutex_unlock(&curseg->curseg_mutex); 2638 2639 set_page_dirty(page); 2640 f2fs_put_page(page, 1); 2641 } 2642 2643 static int is_next_segment_free(struct f2fs_sb_info *sbi, 2644 struct curseg_info *curseg, int type) 2645 { 2646 unsigned int segno = curseg->segno + 1; 2647 struct free_segmap_info *free_i = FREE_I(sbi); 2648 2649 if (segno < MAIN_SEGS(sbi) && segno % SEGS_PER_SEC(sbi)) 2650 return !test_bit(segno, free_i->free_segmap); 2651 return 0; 2652 } 2653 2654 /* 2655 * Find a new segment from the free segments bitmap to right order 2656 * This function should be returned with success, otherwise BUG 2657 */ 2658 static void get_new_segment(struct f2fs_sb_info *sbi, 2659 unsigned int *newseg, bool new_sec, bool pinning) 2660 { 2661 struct free_segmap_info *free_i = FREE_I(sbi); 2662 unsigned int segno, secno, zoneno; 2663 unsigned int total_zones = MAIN_SECS(sbi) / sbi->secs_per_zone; 2664 unsigned int hint = GET_SEC_FROM_SEG(sbi, *newseg); 2665 unsigned int old_zoneno = GET_ZONE_FROM_SEG(sbi, *newseg); 2666 bool init = true; 2667 int i; 2668 int ret = 0; 2669 2670 spin_lock(&free_i->segmap_lock); 2671 2672 if (!new_sec && ((*newseg + 1) % SEGS_PER_SEC(sbi))) { 2673 segno = find_next_zero_bit(free_i->free_segmap, 2674 GET_SEG_FROM_SEC(sbi, hint + 1), *newseg + 1); 2675 if (segno < GET_SEG_FROM_SEC(sbi, hint + 1)) 2676 goto got_it; 2677 } 2678 2679 /* 2680 * If we format f2fs on zoned storage, let's try to get pinned sections 2681 * from beginning of the storage, which should be a conventional one. 2682 */ 2683 if (f2fs_sb_has_blkzoned(sbi)) { 2684 segno = pinning ? 0 : max(first_zoned_segno(sbi), *newseg); 2685 hint = GET_SEC_FROM_SEG(sbi, segno); 2686 } 2687 2688 find_other_zone: 2689 secno = find_next_zero_bit(free_i->free_secmap, MAIN_SECS(sbi), hint); 2690 if (secno >= MAIN_SECS(sbi)) { 2691 secno = find_first_zero_bit(free_i->free_secmap, 2692 MAIN_SECS(sbi)); 2693 if (secno >= MAIN_SECS(sbi)) { 2694 ret = -ENOSPC; 2695 goto out_unlock; 2696 } 2697 } 2698 segno = GET_SEG_FROM_SEC(sbi, secno); 2699 zoneno = GET_ZONE_FROM_SEC(sbi, secno); 2700 2701 /* give up on finding another zone */ 2702 if (!init) 2703 goto got_it; 2704 if (sbi->secs_per_zone == 1) 2705 goto got_it; 2706 if (zoneno == old_zoneno) 2707 goto got_it; 2708 for (i = 0; i < NR_CURSEG_TYPE; i++) 2709 if (CURSEG_I(sbi, i)->zone == zoneno) 2710 break; 2711 2712 if (i < NR_CURSEG_TYPE) { 2713 /* zone is in user, try another */ 2714 if (zoneno + 1 >= total_zones) 2715 hint = 0; 2716 else 2717 hint = (zoneno + 1) * sbi->secs_per_zone; 2718 init = false; 2719 goto find_other_zone; 2720 } 2721 got_it: 2722 /* set it as dirty segment in free segmap */ 2723 f2fs_bug_on(sbi, test_bit(segno, free_i->free_segmap)); 2724 __set_inuse(sbi, segno); 2725 *newseg = segno; 2726 out_unlock: 2727 spin_unlock(&free_i->segmap_lock); 2728 2729 if (ret) { 2730 f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_NO_SEGMENT); 2731 f2fs_bug_on(sbi, 1); 2732 } 2733 } 2734 2735 static void reset_curseg(struct f2fs_sb_info *sbi, int type, int modified) 2736 { 2737 struct curseg_info *curseg = CURSEG_I(sbi, type); 2738 struct summary_footer *sum_footer; 2739 unsigned short seg_type = curseg->seg_type; 2740 2741 curseg->inited = true; 2742 curseg->segno = curseg->next_segno; 2743 curseg->zone = GET_ZONE_FROM_SEG(sbi, curseg->segno); 2744 curseg->next_blkoff = 0; 2745 curseg->next_segno = NULL_SEGNO; 2746 2747 sum_footer = &(curseg->sum_blk->footer); 2748 memset(sum_footer, 0, sizeof(struct summary_footer)); 2749 2750 sanity_check_seg_type(sbi, seg_type); 2751 2752 if (IS_DATASEG(seg_type)) 2753 SET_SUM_TYPE(sum_footer, SUM_TYPE_DATA); 2754 if (IS_NODESEG(seg_type)) 2755 SET_SUM_TYPE(sum_footer, SUM_TYPE_NODE); 2756 __set_sit_entry_type(sbi, seg_type, curseg->segno, modified); 2757 } 2758 2759 static unsigned int __get_next_segno(struct f2fs_sb_info *sbi, int type) 2760 { 2761 struct curseg_info *curseg = CURSEG_I(sbi, type); 2762 unsigned short seg_type = curseg->seg_type; 2763 2764 sanity_check_seg_type(sbi, seg_type); 2765 if (f2fs_need_rand_seg(sbi)) 2766 return get_random_u32_below(MAIN_SECS(sbi) * SEGS_PER_SEC(sbi)); 2767 2768 if (__is_large_section(sbi)) 2769 return curseg->segno; 2770 2771 /* inmem log may not locate on any segment after mount */ 2772 if (!curseg->inited) 2773 return 0; 2774 2775 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) 2776 return 0; 2777 2778 if (seg_type == CURSEG_HOT_DATA || IS_NODESEG(seg_type)) 2779 return 0; 2780 2781 if (SIT_I(sbi)->last_victim[ALLOC_NEXT]) 2782 return SIT_I(sbi)->last_victim[ALLOC_NEXT]; 2783 2784 /* find segments from 0 to reuse freed segments */ 2785 if (F2FS_OPTION(sbi).alloc_mode == ALLOC_MODE_REUSE) 2786 return 0; 2787 2788 return curseg->segno; 2789 } 2790 2791 /* 2792 * Allocate a current working segment. 2793 * This function always allocates a free segment in LFS manner. 2794 */ 2795 static int new_curseg(struct f2fs_sb_info *sbi, int type, bool new_sec) 2796 { 2797 struct curseg_info *curseg = CURSEG_I(sbi, type); 2798 unsigned int segno = curseg->segno; 2799 bool pinning = type == CURSEG_COLD_DATA_PINNED; 2800 2801 if (curseg->inited) 2802 write_sum_page(sbi, curseg->sum_blk, GET_SUM_BLOCK(sbi, segno)); 2803 2804 segno = __get_next_segno(sbi, type); 2805 get_new_segment(sbi, &segno, new_sec, pinning); 2806 if (new_sec && pinning && 2807 !f2fs_valid_pinned_area(sbi, START_BLOCK(sbi, segno))) { 2808 __set_free(sbi, segno); 2809 return -EAGAIN; 2810 } 2811 2812 curseg->next_segno = segno; 2813 reset_curseg(sbi, type, 1); 2814 curseg->alloc_type = LFS; 2815 if (F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_BLK) 2816 curseg->fragment_remained_chunk = 2817 get_random_u32_inclusive(1, sbi->max_fragment_chunk); 2818 return 0; 2819 } 2820 2821 static int __next_free_blkoff(struct f2fs_sb_info *sbi, 2822 int segno, block_t start) 2823 { 2824 struct seg_entry *se = get_seg_entry(sbi, segno); 2825 int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long); 2826 unsigned long *target_map = SIT_I(sbi)->tmp_map; 2827 unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map; 2828 unsigned long *cur_map = (unsigned long *)se->cur_valid_map; 2829 int i; 2830 2831 for (i = 0; i < entries; i++) 2832 target_map[i] = ckpt_map[i] | cur_map[i]; 2833 2834 return __find_rev_next_zero_bit(target_map, BLKS_PER_SEG(sbi), start); 2835 } 2836 2837 static int f2fs_find_next_ssr_block(struct f2fs_sb_info *sbi, 2838 struct curseg_info *seg) 2839 { 2840 return __next_free_blkoff(sbi, seg->segno, seg->next_blkoff + 1); 2841 } 2842 2843 bool f2fs_segment_has_free_slot(struct f2fs_sb_info *sbi, int segno) 2844 { 2845 return __next_free_blkoff(sbi, segno, 0) < BLKS_PER_SEG(sbi); 2846 } 2847 2848 /* 2849 * This function always allocates a used segment(from dirty seglist) by SSR 2850 * manner, so it should recover the existing segment information of valid blocks 2851 */ 2852 static void change_curseg(struct f2fs_sb_info *sbi, int type) 2853 { 2854 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 2855 struct curseg_info *curseg = CURSEG_I(sbi, type); 2856 unsigned int new_segno = curseg->next_segno; 2857 struct f2fs_summary_block *sum_node; 2858 struct page *sum_page; 2859 2860 if (curseg->inited) 2861 write_sum_page(sbi, curseg->sum_blk, GET_SUM_BLOCK(sbi, curseg->segno)); 2862 2863 __set_test_and_inuse(sbi, new_segno); 2864 2865 mutex_lock(&dirty_i->seglist_lock); 2866 __remove_dirty_segment(sbi, new_segno, PRE); 2867 __remove_dirty_segment(sbi, new_segno, DIRTY); 2868 mutex_unlock(&dirty_i->seglist_lock); 2869 2870 reset_curseg(sbi, type, 1); 2871 curseg->alloc_type = SSR; 2872 curseg->next_blkoff = __next_free_blkoff(sbi, curseg->segno, 0); 2873 2874 sum_page = f2fs_get_sum_page(sbi, new_segno); 2875 if (IS_ERR(sum_page)) { 2876 /* GC won't be able to use stale summary pages by cp_error */ 2877 memset(curseg->sum_blk, 0, SUM_ENTRY_SIZE); 2878 return; 2879 } 2880 sum_node = (struct f2fs_summary_block *)page_address(sum_page); 2881 memcpy(curseg->sum_blk, sum_node, SUM_ENTRY_SIZE); 2882 f2fs_put_page(sum_page, 1); 2883 } 2884 2885 static int get_ssr_segment(struct f2fs_sb_info *sbi, int type, 2886 int alloc_mode, unsigned long long age); 2887 2888 static void get_atssr_segment(struct f2fs_sb_info *sbi, int type, 2889 int target_type, int alloc_mode, 2890 unsigned long long age) 2891 { 2892 struct curseg_info *curseg = CURSEG_I(sbi, type); 2893 2894 curseg->seg_type = target_type; 2895 2896 if (get_ssr_segment(sbi, type, alloc_mode, age)) { 2897 struct seg_entry *se = get_seg_entry(sbi, curseg->next_segno); 2898 2899 curseg->seg_type = se->type; 2900 change_curseg(sbi, type); 2901 } else { 2902 /* allocate cold segment by default */ 2903 curseg->seg_type = CURSEG_COLD_DATA; 2904 new_curseg(sbi, type, true); 2905 } 2906 stat_inc_seg_type(sbi, curseg); 2907 } 2908 2909 static void __f2fs_init_atgc_curseg(struct f2fs_sb_info *sbi) 2910 { 2911 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_ALL_DATA_ATGC); 2912 2913 if (!sbi->am.atgc_enabled) 2914 return; 2915 2916 f2fs_down_read(&SM_I(sbi)->curseg_lock); 2917 2918 mutex_lock(&curseg->curseg_mutex); 2919 down_write(&SIT_I(sbi)->sentry_lock); 2920 2921 get_atssr_segment(sbi, CURSEG_ALL_DATA_ATGC, CURSEG_COLD_DATA, SSR, 0); 2922 2923 up_write(&SIT_I(sbi)->sentry_lock); 2924 mutex_unlock(&curseg->curseg_mutex); 2925 2926 f2fs_up_read(&SM_I(sbi)->curseg_lock); 2927 2928 } 2929 void f2fs_init_inmem_curseg(struct f2fs_sb_info *sbi) 2930 { 2931 __f2fs_init_atgc_curseg(sbi); 2932 } 2933 2934 static void __f2fs_save_inmem_curseg(struct f2fs_sb_info *sbi, int type) 2935 { 2936 struct curseg_info *curseg = CURSEG_I(sbi, type); 2937 2938 mutex_lock(&curseg->curseg_mutex); 2939 if (!curseg->inited) 2940 goto out; 2941 2942 if (get_valid_blocks(sbi, curseg->segno, false)) { 2943 write_sum_page(sbi, curseg->sum_blk, 2944 GET_SUM_BLOCK(sbi, curseg->segno)); 2945 } else { 2946 mutex_lock(&DIRTY_I(sbi)->seglist_lock); 2947 __set_test_and_free(sbi, curseg->segno, true); 2948 mutex_unlock(&DIRTY_I(sbi)->seglist_lock); 2949 } 2950 out: 2951 mutex_unlock(&curseg->curseg_mutex); 2952 } 2953 2954 void f2fs_save_inmem_curseg(struct f2fs_sb_info *sbi) 2955 { 2956 __f2fs_save_inmem_curseg(sbi, CURSEG_COLD_DATA_PINNED); 2957 2958 if (sbi->am.atgc_enabled) 2959 __f2fs_save_inmem_curseg(sbi, CURSEG_ALL_DATA_ATGC); 2960 } 2961 2962 static void __f2fs_restore_inmem_curseg(struct f2fs_sb_info *sbi, int type) 2963 { 2964 struct curseg_info *curseg = CURSEG_I(sbi, type); 2965 2966 mutex_lock(&curseg->curseg_mutex); 2967 if (!curseg->inited) 2968 goto out; 2969 if (get_valid_blocks(sbi, curseg->segno, false)) 2970 goto out; 2971 2972 mutex_lock(&DIRTY_I(sbi)->seglist_lock); 2973 __set_test_and_inuse(sbi, curseg->segno); 2974 mutex_unlock(&DIRTY_I(sbi)->seglist_lock); 2975 out: 2976 mutex_unlock(&curseg->curseg_mutex); 2977 } 2978 2979 void f2fs_restore_inmem_curseg(struct f2fs_sb_info *sbi) 2980 { 2981 __f2fs_restore_inmem_curseg(sbi, CURSEG_COLD_DATA_PINNED); 2982 2983 if (sbi->am.atgc_enabled) 2984 __f2fs_restore_inmem_curseg(sbi, CURSEG_ALL_DATA_ATGC); 2985 } 2986 2987 static int get_ssr_segment(struct f2fs_sb_info *sbi, int type, 2988 int alloc_mode, unsigned long long age) 2989 { 2990 struct curseg_info *curseg = CURSEG_I(sbi, type); 2991 unsigned segno = NULL_SEGNO; 2992 unsigned short seg_type = curseg->seg_type; 2993 int i, cnt; 2994 bool reversed = false; 2995 2996 sanity_check_seg_type(sbi, seg_type); 2997 2998 /* f2fs_need_SSR() already forces to do this */ 2999 if (!f2fs_get_victim(sbi, &segno, BG_GC, seg_type, alloc_mode, age)) { 3000 curseg->next_segno = segno; 3001 return 1; 3002 } 3003 3004 /* For node segments, let's do SSR more intensively */ 3005 if (IS_NODESEG(seg_type)) { 3006 if (seg_type >= CURSEG_WARM_NODE) { 3007 reversed = true; 3008 i = CURSEG_COLD_NODE; 3009 } else { 3010 i = CURSEG_HOT_NODE; 3011 } 3012 cnt = NR_CURSEG_NODE_TYPE; 3013 } else { 3014 if (seg_type >= CURSEG_WARM_DATA) { 3015 reversed = true; 3016 i = CURSEG_COLD_DATA; 3017 } else { 3018 i = CURSEG_HOT_DATA; 3019 } 3020 cnt = NR_CURSEG_DATA_TYPE; 3021 } 3022 3023 for (; cnt-- > 0; reversed ? i-- : i++) { 3024 if (i == seg_type) 3025 continue; 3026 if (!f2fs_get_victim(sbi, &segno, BG_GC, i, alloc_mode, age)) { 3027 curseg->next_segno = segno; 3028 return 1; 3029 } 3030 } 3031 3032 /* find valid_blocks=0 in dirty list */ 3033 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) { 3034 segno = get_free_segment(sbi); 3035 if (segno != NULL_SEGNO) { 3036 curseg->next_segno = segno; 3037 return 1; 3038 } 3039 } 3040 return 0; 3041 } 3042 3043 static bool need_new_seg(struct f2fs_sb_info *sbi, int type) 3044 { 3045 struct curseg_info *curseg = CURSEG_I(sbi, type); 3046 3047 if (!is_set_ckpt_flags(sbi, CP_CRC_RECOVERY_FLAG) && 3048 curseg->seg_type == CURSEG_WARM_NODE) 3049 return true; 3050 if (curseg->alloc_type == LFS && 3051 is_next_segment_free(sbi, curseg, type) && 3052 likely(!is_sbi_flag_set(sbi, SBI_CP_DISABLED))) 3053 return true; 3054 if (!f2fs_need_SSR(sbi) || !get_ssr_segment(sbi, type, SSR, 0)) 3055 return true; 3056 return false; 3057 } 3058 3059 void f2fs_allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type, 3060 unsigned int start, unsigned int end) 3061 { 3062 struct curseg_info *curseg = CURSEG_I(sbi, type); 3063 unsigned int segno; 3064 3065 f2fs_down_read(&SM_I(sbi)->curseg_lock); 3066 mutex_lock(&curseg->curseg_mutex); 3067 down_write(&SIT_I(sbi)->sentry_lock); 3068 3069 segno = CURSEG_I(sbi, type)->segno; 3070 if (segno < start || segno > end) 3071 goto unlock; 3072 3073 if (f2fs_need_SSR(sbi) && get_ssr_segment(sbi, type, SSR, 0)) 3074 change_curseg(sbi, type); 3075 else 3076 new_curseg(sbi, type, true); 3077 3078 stat_inc_seg_type(sbi, curseg); 3079 3080 locate_dirty_segment(sbi, segno); 3081 unlock: 3082 up_write(&SIT_I(sbi)->sentry_lock); 3083 3084 if (segno != curseg->segno) 3085 f2fs_notice(sbi, "For resize: curseg of type %d: %u ==> %u", 3086 type, segno, curseg->segno); 3087 3088 mutex_unlock(&curseg->curseg_mutex); 3089 f2fs_up_read(&SM_I(sbi)->curseg_lock); 3090 } 3091 3092 static int __allocate_new_segment(struct f2fs_sb_info *sbi, int type, 3093 bool new_sec, bool force) 3094 { 3095 struct curseg_info *curseg = CURSEG_I(sbi, type); 3096 unsigned int old_segno; 3097 3098 if (!force && curseg->inited && 3099 !curseg->next_blkoff && 3100 !get_valid_blocks(sbi, curseg->segno, new_sec) && 3101 !get_ckpt_valid_blocks(sbi, curseg->segno, new_sec)) 3102 return 0; 3103 3104 old_segno = curseg->segno; 3105 if (new_curseg(sbi, type, true)) 3106 return -EAGAIN; 3107 stat_inc_seg_type(sbi, curseg); 3108 locate_dirty_segment(sbi, old_segno); 3109 return 0; 3110 } 3111 3112 int f2fs_allocate_new_section(struct f2fs_sb_info *sbi, int type, bool force) 3113 { 3114 int ret; 3115 3116 f2fs_down_read(&SM_I(sbi)->curseg_lock); 3117 down_write(&SIT_I(sbi)->sentry_lock); 3118 ret = __allocate_new_segment(sbi, type, true, force); 3119 up_write(&SIT_I(sbi)->sentry_lock); 3120 f2fs_up_read(&SM_I(sbi)->curseg_lock); 3121 3122 return ret; 3123 } 3124 3125 int f2fs_allocate_pinning_section(struct f2fs_sb_info *sbi) 3126 { 3127 int err; 3128 bool gc_required = true; 3129 3130 retry: 3131 f2fs_lock_op(sbi); 3132 err = f2fs_allocate_new_section(sbi, CURSEG_COLD_DATA_PINNED, false); 3133 f2fs_unlock_op(sbi); 3134 3135 if (f2fs_sb_has_blkzoned(sbi) && err && gc_required) { 3136 f2fs_down_write(&sbi->gc_lock); 3137 f2fs_gc_range(sbi, 0, GET_SEGNO(sbi, FDEV(0).end_blk), true, 1); 3138 f2fs_up_write(&sbi->gc_lock); 3139 3140 gc_required = false; 3141 goto retry; 3142 } 3143 3144 return err; 3145 } 3146 3147 void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi) 3148 { 3149 int i; 3150 3151 f2fs_down_read(&SM_I(sbi)->curseg_lock); 3152 down_write(&SIT_I(sbi)->sentry_lock); 3153 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) 3154 __allocate_new_segment(sbi, i, false, false); 3155 up_write(&SIT_I(sbi)->sentry_lock); 3156 f2fs_up_read(&SM_I(sbi)->curseg_lock); 3157 } 3158 3159 bool f2fs_exist_trim_candidates(struct f2fs_sb_info *sbi, 3160 struct cp_control *cpc) 3161 { 3162 __u64 trim_start = cpc->trim_start; 3163 bool has_candidate = false; 3164 3165 down_write(&SIT_I(sbi)->sentry_lock); 3166 for (; cpc->trim_start <= cpc->trim_end; cpc->trim_start++) { 3167 if (add_discard_addrs(sbi, cpc, true)) { 3168 has_candidate = true; 3169 break; 3170 } 3171 } 3172 up_write(&SIT_I(sbi)->sentry_lock); 3173 3174 cpc->trim_start = trim_start; 3175 return has_candidate; 3176 } 3177 3178 static unsigned int __issue_discard_cmd_range(struct f2fs_sb_info *sbi, 3179 struct discard_policy *dpolicy, 3180 unsigned int start, unsigned int end) 3181 { 3182 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; 3183 struct discard_cmd *prev_dc = NULL, *next_dc = NULL; 3184 struct rb_node **insert_p = NULL, *insert_parent = NULL; 3185 struct discard_cmd *dc; 3186 struct blk_plug plug; 3187 int issued; 3188 unsigned int trimmed = 0; 3189 3190 next: 3191 issued = 0; 3192 3193 mutex_lock(&dcc->cmd_lock); 3194 if (unlikely(dcc->rbtree_check)) 3195 f2fs_bug_on(sbi, !f2fs_check_discard_tree(sbi)); 3196 3197 dc = __lookup_discard_cmd_ret(&dcc->root, start, 3198 &prev_dc, &next_dc, &insert_p, &insert_parent); 3199 if (!dc) 3200 dc = next_dc; 3201 3202 blk_start_plug(&plug); 3203 3204 while (dc && dc->di.lstart <= end) { 3205 struct rb_node *node; 3206 int err = 0; 3207 3208 if (dc->di.len < dpolicy->granularity) 3209 goto skip; 3210 3211 if (dc->state != D_PREP) { 3212 list_move_tail(&dc->list, &dcc->fstrim_list); 3213 goto skip; 3214 } 3215 3216 err = __submit_discard_cmd(sbi, dpolicy, dc, &issued); 3217 3218 if (issued >= dpolicy->max_requests) { 3219 start = dc->di.lstart + dc->di.len; 3220 3221 if (err) 3222 __remove_discard_cmd(sbi, dc); 3223 3224 blk_finish_plug(&plug); 3225 mutex_unlock(&dcc->cmd_lock); 3226 trimmed += __wait_all_discard_cmd(sbi, NULL); 3227 f2fs_io_schedule_timeout(DEFAULT_IO_TIMEOUT); 3228 goto next; 3229 } 3230 skip: 3231 node = rb_next(&dc->rb_node); 3232 if (err) 3233 __remove_discard_cmd(sbi, dc); 3234 dc = rb_entry_safe(node, struct discard_cmd, rb_node); 3235 3236 if (fatal_signal_pending(current)) 3237 break; 3238 } 3239 3240 blk_finish_plug(&plug); 3241 mutex_unlock(&dcc->cmd_lock); 3242 3243 return trimmed; 3244 } 3245 3246 int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range) 3247 { 3248 __u64 start = F2FS_BYTES_TO_BLK(range->start); 3249 __u64 end = start + F2FS_BYTES_TO_BLK(range->len) - 1; 3250 unsigned int start_segno, end_segno; 3251 block_t start_block, end_block; 3252 struct cp_control cpc; 3253 struct discard_policy dpolicy; 3254 unsigned long long trimmed = 0; 3255 int err = 0; 3256 bool need_align = f2fs_lfs_mode(sbi) && __is_large_section(sbi); 3257 3258 if (start >= MAX_BLKADDR(sbi) || range->len < sbi->blocksize) 3259 return -EINVAL; 3260 3261 if (end < MAIN_BLKADDR(sbi)) 3262 goto out; 3263 3264 if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) { 3265 f2fs_warn(sbi, "Found FS corruption, run fsck to fix."); 3266 return -EFSCORRUPTED; 3267 } 3268 3269 /* start/end segment number in main_area */ 3270 start_segno = (start <= MAIN_BLKADDR(sbi)) ? 0 : GET_SEGNO(sbi, start); 3271 end_segno = (end >= MAX_BLKADDR(sbi)) ? MAIN_SEGS(sbi) - 1 : 3272 GET_SEGNO(sbi, end); 3273 if (need_align) { 3274 start_segno = rounddown(start_segno, SEGS_PER_SEC(sbi)); 3275 end_segno = roundup(end_segno + 1, SEGS_PER_SEC(sbi)) - 1; 3276 } 3277 3278 cpc.reason = CP_DISCARD; 3279 cpc.trim_minlen = max_t(__u64, 1, F2FS_BYTES_TO_BLK(range->minlen)); 3280 cpc.trim_start = start_segno; 3281 cpc.trim_end = end_segno; 3282 3283 if (sbi->discard_blks == 0) 3284 goto out; 3285 3286 f2fs_down_write(&sbi->gc_lock); 3287 stat_inc_cp_call_count(sbi, TOTAL_CALL); 3288 err = f2fs_write_checkpoint(sbi, &cpc); 3289 f2fs_up_write(&sbi->gc_lock); 3290 if (err) 3291 goto out; 3292 3293 /* 3294 * We filed discard candidates, but actually we don't need to wait for 3295 * all of them, since they'll be issued in idle time along with runtime 3296 * discard option. User configuration looks like using runtime discard 3297 * or periodic fstrim instead of it. 3298 */ 3299 if (f2fs_realtime_discard_enable(sbi)) 3300 goto out; 3301 3302 start_block = START_BLOCK(sbi, start_segno); 3303 end_block = START_BLOCK(sbi, end_segno + 1); 3304 3305 __init_discard_policy(sbi, &dpolicy, DPOLICY_FSTRIM, cpc.trim_minlen); 3306 trimmed = __issue_discard_cmd_range(sbi, &dpolicy, 3307 start_block, end_block); 3308 3309 trimmed += __wait_discard_cmd_range(sbi, &dpolicy, 3310 start_block, end_block); 3311 out: 3312 if (!err) 3313 range->len = F2FS_BLK_TO_BYTES(trimmed); 3314 return err; 3315 } 3316 3317 int f2fs_rw_hint_to_seg_type(enum rw_hint hint) 3318 { 3319 switch (hint) { 3320 case WRITE_LIFE_SHORT: 3321 return CURSEG_HOT_DATA; 3322 case WRITE_LIFE_EXTREME: 3323 return CURSEG_COLD_DATA; 3324 default: 3325 return CURSEG_WARM_DATA; 3326 } 3327 } 3328 3329 static int __get_segment_type_2(struct f2fs_io_info *fio) 3330 { 3331 if (fio->type == DATA) 3332 return CURSEG_HOT_DATA; 3333 else 3334 return CURSEG_HOT_NODE; 3335 } 3336 3337 static int __get_segment_type_4(struct f2fs_io_info *fio) 3338 { 3339 if (fio->type == DATA) { 3340 struct inode *inode = fio->page->mapping->host; 3341 3342 if (S_ISDIR(inode->i_mode)) 3343 return CURSEG_HOT_DATA; 3344 else 3345 return CURSEG_COLD_DATA; 3346 } else { 3347 if (IS_DNODE(fio->page) && is_cold_node(fio->page)) 3348 return CURSEG_WARM_NODE; 3349 else 3350 return CURSEG_COLD_NODE; 3351 } 3352 } 3353 3354 static int __get_age_segment_type(struct inode *inode, pgoff_t pgofs) 3355 { 3356 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 3357 struct extent_info ei = {}; 3358 3359 if (f2fs_lookup_age_extent_cache(inode, pgofs, &ei)) { 3360 if (!ei.age) 3361 return NO_CHECK_TYPE; 3362 if (ei.age <= sbi->hot_data_age_threshold) 3363 return CURSEG_HOT_DATA; 3364 if (ei.age <= sbi->warm_data_age_threshold) 3365 return CURSEG_WARM_DATA; 3366 return CURSEG_COLD_DATA; 3367 } 3368 return NO_CHECK_TYPE; 3369 } 3370 3371 static int __get_segment_type_6(struct f2fs_io_info *fio) 3372 { 3373 if (fio->type == DATA) { 3374 struct inode *inode = fio->page->mapping->host; 3375 int type; 3376 3377 if (is_inode_flag_set(inode, FI_ALIGNED_WRITE)) 3378 return CURSEG_COLD_DATA_PINNED; 3379 3380 if (page_private_gcing(fio->page)) { 3381 if (fio->sbi->am.atgc_enabled && 3382 (fio->io_type == FS_DATA_IO) && 3383 (fio->sbi->gc_mode != GC_URGENT_HIGH) && 3384 __is_valid_data_blkaddr(fio->old_blkaddr) && 3385 !is_inode_flag_set(inode, FI_OPU_WRITE)) 3386 return CURSEG_ALL_DATA_ATGC; 3387 else 3388 return CURSEG_COLD_DATA; 3389 } 3390 if (file_is_cold(inode) || f2fs_need_compress_data(inode)) 3391 return CURSEG_COLD_DATA; 3392 3393 type = __get_age_segment_type(inode, fio->page->index); 3394 if (type != NO_CHECK_TYPE) 3395 return type; 3396 3397 if (file_is_hot(inode) || 3398 is_inode_flag_set(inode, FI_HOT_DATA) || 3399 f2fs_is_cow_file(inode)) 3400 return CURSEG_HOT_DATA; 3401 return f2fs_rw_hint_to_seg_type(inode->i_write_hint); 3402 } else { 3403 if (IS_DNODE(fio->page)) 3404 return is_cold_node(fio->page) ? CURSEG_WARM_NODE : 3405 CURSEG_HOT_NODE; 3406 return CURSEG_COLD_NODE; 3407 } 3408 } 3409 3410 static int __get_segment_type(struct f2fs_io_info *fio) 3411 { 3412 int type = 0; 3413 3414 switch (F2FS_OPTION(fio->sbi).active_logs) { 3415 case 2: 3416 type = __get_segment_type_2(fio); 3417 break; 3418 case 4: 3419 type = __get_segment_type_4(fio); 3420 break; 3421 case 6: 3422 type = __get_segment_type_6(fio); 3423 break; 3424 default: 3425 f2fs_bug_on(fio->sbi, true); 3426 } 3427 3428 if (IS_HOT(type)) 3429 fio->temp = HOT; 3430 else if (IS_WARM(type)) 3431 fio->temp = WARM; 3432 else 3433 fio->temp = COLD; 3434 return type; 3435 } 3436 3437 static void f2fs_randomize_chunk(struct f2fs_sb_info *sbi, 3438 struct curseg_info *seg) 3439 { 3440 /* To allocate block chunks in different sizes, use random number */ 3441 if (--seg->fragment_remained_chunk > 0) 3442 return; 3443 3444 seg->fragment_remained_chunk = 3445 get_random_u32_inclusive(1, sbi->max_fragment_chunk); 3446 seg->next_blkoff += 3447 get_random_u32_inclusive(1, sbi->max_fragment_hole); 3448 } 3449 3450 void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page, 3451 block_t old_blkaddr, block_t *new_blkaddr, 3452 struct f2fs_summary *sum, int type, 3453 struct f2fs_io_info *fio) 3454 { 3455 struct sit_info *sit_i = SIT_I(sbi); 3456 struct curseg_info *curseg = CURSEG_I(sbi, type); 3457 unsigned long long old_mtime; 3458 bool from_gc = (type == CURSEG_ALL_DATA_ATGC); 3459 struct seg_entry *se = NULL; 3460 bool segment_full = false; 3461 3462 f2fs_down_read(&SM_I(sbi)->curseg_lock); 3463 3464 mutex_lock(&curseg->curseg_mutex); 3465 down_write(&sit_i->sentry_lock); 3466 3467 if (from_gc) { 3468 f2fs_bug_on(sbi, GET_SEGNO(sbi, old_blkaddr) == NULL_SEGNO); 3469 se = get_seg_entry(sbi, GET_SEGNO(sbi, old_blkaddr)); 3470 sanity_check_seg_type(sbi, se->type); 3471 f2fs_bug_on(sbi, IS_NODESEG(se->type)); 3472 } 3473 *new_blkaddr = NEXT_FREE_BLKADDR(sbi, curseg); 3474 3475 f2fs_bug_on(sbi, curseg->next_blkoff >= BLKS_PER_SEG(sbi)); 3476 3477 f2fs_wait_discard_bio(sbi, *new_blkaddr); 3478 3479 curseg->sum_blk->entries[curseg->next_blkoff] = *sum; 3480 if (curseg->alloc_type == SSR) { 3481 curseg->next_blkoff = f2fs_find_next_ssr_block(sbi, curseg); 3482 } else { 3483 curseg->next_blkoff++; 3484 if (F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_BLK) 3485 f2fs_randomize_chunk(sbi, curseg); 3486 } 3487 if (curseg->next_blkoff >= f2fs_usable_blks_in_seg(sbi, curseg->segno)) 3488 segment_full = true; 3489 stat_inc_block_count(sbi, curseg); 3490 3491 if (from_gc) { 3492 old_mtime = get_segment_mtime(sbi, old_blkaddr); 3493 } else { 3494 update_segment_mtime(sbi, old_blkaddr, 0); 3495 old_mtime = 0; 3496 } 3497 update_segment_mtime(sbi, *new_blkaddr, old_mtime); 3498 3499 /* 3500 * SIT information should be updated before segment allocation, 3501 * since SSR needs latest valid block information. 3502 */ 3503 update_sit_entry(sbi, *new_blkaddr, 1); 3504 update_sit_entry(sbi, old_blkaddr, -1); 3505 3506 /* 3507 * If the current segment is full, flush it out and replace it with a 3508 * new segment. 3509 */ 3510 if (segment_full) { 3511 if (type == CURSEG_COLD_DATA_PINNED && 3512 !((curseg->segno + 1) % sbi->segs_per_sec)) { 3513 write_sum_page(sbi, curseg->sum_blk, 3514 GET_SUM_BLOCK(sbi, curseg->segno)); 3515 goto skip_new_segment; 3516 } 3517 3518 if (from_gc) { 3519 get_atssr_segment(sbi, type, se->type, 3520 AT_SSR, se->mtime); 3521 } else { 3522 if (need_new_seg(sbi, type)) 3523 new_curseg(sbi, type, false); 3524 else 3525 change_curseg(sbi, type); 3526 stat_inc_seg_type(sbi, curseg); 3527 } 3528 } 3529 3530 skip_new_segment: 3531 /* 3532 * segment dirty status should be updated after segment allocation, 3533 * so we just need to update status only one time after previous 3534 * segment being closed. 3535 */ 3536 locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr)); 3537 locate_dirty_segment(sbi, GET_SEGNO(sbi, *new_blkaddr)); 3538 3539 if (IS_DATASEG(curseg->seg_type)) 3540 atomic64_inc(&sbi->allocated_data_blocks); 3541 3542 up_write(&sit_i->sentry_lock); 3543 3544 if (page && IS_NODESEG(curseg->seg_type)) { 3545 fill_node_footer_blkaddr(page, NEXT_FREE_BLKADDR(sbi, curseg)); 3546 3547 f2fs_inode_chksum_set(sbi, page); 3548 } 3549 3550 if (fio) { 3551 struct f2fs_bio_info *io; 3552 3553 INIT_LIST_HEAD(&fio->list); 3554 fio->in_list = 1; 3555 io = sbi->write_io[fio->type] + fio->temp; 3556 spin_lock(&io->io_lock); 3557 list_add_tail(&fio->list, &io->io_list); 3558 spin_unlock(&io->io_lock); 3559 } 3560 3561 mutex_unlock(&curseg->curseg_mutex); 3562 3563 f2fs_up_read(&SM_I(sbi)->curseg_lock); 3564 } 3565 3566 void f2fs_update_device_state(struct f2fs_sb_info *sbi, nid_t ino, 3567 block_t blkaddr, unsigned int blkcnt) 3568 { 3569 if (!f2fs_is_multi_device(sbi)) 3570 return; 3571 3572 while (1) { 3573 unsigned int devidx = f2fs_target_device_index(sbi, blkaddr); 3574 unsigned int blks = FDEV(devidx).end_blk - blkaddr + 1; 3575 3576 /* update device state for fsync */ 3577 f2fs_set_dirty_device(sbi, ino, devidx, FLUSH_INO); 3578 3579 /* update device state for checkpoint */ 3580 if (!f2fs_test_bit(devidx, (char *)&sbi->dirty_device)) { 3581 spin_lock(&sbi->dev_lock); 3582 f2fs_set_bit(devidx, (char *)&sbi->dirty_device); 3583 spin_unlock(&sbi->dev_lock); 3584 } 3585 3586 if (blkcnt <= blks) 3587 break; 3588 blkcnt -= blks; 3589 blkaddr += blks; 3590 } 3591 } 3592 3593 static void do_write_page(struct f2fs_summary *sum, struct f2fs_io_info *fio) 3594 { 3595 int type = __get_segment_type(fio); 3596 bool keep_order = (f2fs_lfs_mode(fio->sbi) && type == CURSEG_COLD_DATA); 3597 3598 if (keep_order) 3599 f2fs_down_read(&fio->sbi->io_order_lock); 3600 3601 f2fs_allocate_data_block(fio->sbi, fio->page, fio->old_blkaddr, 3602 &fio->new_blkaddr, sum, type, fio); 3603 if (GET_SEGNO(fio->sbi, fio->old_blkaddr) != NULL_SEGNO) 3604 f2fs_invalidate_internal_cache(fio->sbi, fio->old_blkaddr); 3605 3606 /* writeout dirty page into bdev */ 3607 f2fs_submit_page_write(fio); 3608 3609 f2fs_update_device_state(fio->sbi, fio->ino, fio->new_blkaddr, 1); 3610 3611 if (keep_order) 3612 f2fs_up_read(&fio->sbi->io_order_lock); 3613 } 3614 3615 void f2fs_do_write_meta_page(struct f2fs_sb_info *sbi, struct page *page, 3616 enum iostat_type io_type) 3617 { 3618 struct f2fs_io_info fio = { 3619 .sbi = sbi, 3620 .type = META, 3621 .temp = HOT, 3622 .op = REQ_OP_WRITE, 3623 .op_flags = REQ_SYNC | REQ_META | REQ_PRIO, 3624 .old_blkaddr = page->index, 3625 .new_blkaddr = page->index, 3626 .page = page, 3627 .encrypted_page = NULL, 3628 .in_list = 0, 3629 }; 3630 3631 if (unlikely(page->index >= MAIN_BLKADDR(sbi))) 3632 fio.op_flags &= ~REQ_META; 3633 3634 set_page_writeback(page); 3635 f2fs_submit_page_write(&fio); 3636 3637 stat_inc_meta_count(sbi, page->index); 3638 f2fs_update_iostat(sbi, NULL, io_type, F2FS_BLKSIZE); 3639 } 3640 3641 void f2fs_do_write_node_page(unsigned int nid, struct f2fs_io_info *fio) 3642 { 3643 struct f2fs_summary sum; 3644 3645 set_summary(&sum, nid, 0, 0); 3646 do_write_page(&sum, fio); 3647 3648 f2fs_update_iostat(fio->sbi, NULL, fio->io_type, F2FS_BLKSIZE); 3649 } 3650 3651 void f2fs_outplace_write_data(struct dnode_of_data *dn, 3652 struct f2fs_io_info *fio) 3653 { 3654 struct f2fs_sb_info *sbi = fio->sbi; 3655 struct f2fs_summary sum; 3656 3657 f2fs_bug_on(sbi, dn->data_blkaddr == NULL_ADDR); 3658 if (fio->io_type == FS_DATA_IO || fio->io_type == FS_CP_DATA_IO) 3659 f2fs_update_age_extent_cache(dn); 3660 set_summary(&sum, dn->nid, dn->ofs_in_node, fio->version); 3661 do_write_page(&sum, fio); 3662 f2fs_update_data_blkaddr(dn, fio->new_blkaddr); 3663 3664 f2fs_update_iostat(sbi, dn->inode, fio->io_type, F2FS_BLKSIZE); 3665 } 3666 3667 int f2fs_inplace_write_data(struct f2fs_io_info *fio) 3668 { 3669 int err; 3670 struct f2fs_sb_info *sbi = fio->sbi; 3671 unsigned int segno; 3672 3673 fio->new_blkaddr = fio->old_blkaddr; 3674 /* i/o temperature is needed for passing down write hints */ 3675 __get_segment_type(fio); 3676 3677 segno = GET_SEGNO(sbi, fio->new_blkaddr); 3678 3679 if (!IS_DATASEG(get_seg_entry(sbi, segno)->type)) { 3680 set_sbi_flag(sbi, SBI_NEED_FSCK); 3681 f2fs_warn(sbi, "%s: incorrect segment(%u) type, run fsck to fix.", 3682 __func__, segno); 3683 err = -EFSCORRUPTED; 3684 f2fs_handle_error(sbi, ERROR_INCONSISTENT_SUM_TYPE); 3685 goto drop_bio; 3686 } 3687 3688 if (f2fs_cp_error(sbi)) { 3689 err = -EIO; 3690 goto drop_bio; 3691 } 3692 3693 if (fio->meta_gc) 3694 f2fs_truncate_meta_inode_pages(sbi, fio->new_blkaddr, 1); 3695 3696 stat_inc_inplace_blocks(fio->sbi); 3697 3698 if (fio->bio && !IS_F2FS_IPU_NOCACHE(sbi)) 3699 err = f2fs_merge_page_bio(fio); 3700 else 3701 err = f2fs_submit_page_bio(fio); 3702 if (!err) { 3703 f2fs_update_device_state(fio->sbi, fio->ino, 3704 fio->new_blkaddr, 1); 3705 f2fs_update_iostat(fio->sbi, fio->page->mapping->host, 3706 fio->io_type, F2FS_BLKSIZE); 3707 } 3708 3709 return err; 3710 drop_bio: 3711 if (fio->bio && *(fio->bio)) { 3712 struct bio *bio = *(fio->bio); 3713 3714 bio->bi_status = BLK_STS_IOERR; 3715 bio_endio(bio); 3716 *(fio->bio) = NULL; 3717 } 3718 return err; 3719 } 3720 3721 static inline int __f2fs_get_curseg(struct f2fs_sb_info *sbi, 3722 unsigned int segno) 3723 { 3724 int i; 3725 3726 for (i = CURSEG_HOT_DATA; i < NO_CHECK_TYPE; i++) { 3727 if (CURSEG_I(sbi, i)->segno == segno) 3728 break; 3729 } 3730 return i; 3731 } 3732 3733 void f2fs_do_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, 3734 block_t old_blkaddr, block_t new_blkaddr, 3735 bool recover_curseg, bool recover_newaddr, 3736 bool from_gc) 3737 { 3738 struct sit_info *sit_i = SIT_I(sbi); 3739 struct curseg_info *curseg; 3740 unsigned int segno, old_cursegno; 3741 struct seg_entry *se; 3742 int type; 3743 unsigned short old_blkoff; 3744 unsigned char old_alloc_type; 3745 3746 segno = GET_SEGNO(sbi, new_blkaddr); 3747 se = get_seg_entry(sbi, segno); 3748 type = se->type; 3749 3750 f2fs_down_write(&SM_I(sbi)->curseg_lock); 3751 3752 if (!recover_curseg) { 3753 /* for recovery flow */ 3754 if (se->valid_blocks == 0 && !IS_CURSEG(sbi, segno)) { 3755 if (old_blkaddr == NULL_ADDR) 3756 type = CURSEG_COLD_DATA; 3757 else 3758 type = CURSEG_WARM_DATA; 3759 } 3760 } else { 3761 if (IS_CURSEG(sbi, segno)) { 3762 /* se->type is volatile as SSR allocation */ 3763 type = __f2fs_get_curseg(sbi, segno); 3764 f2fs_bug_on(sbi, type == NO_CHECK_TYPE); 3765 } else { 3766 type = CURSEG_WARM_DATA; 3767 } 3768 } 3769 3770 curseg = CURSEG_I(sbi, type); 3771 f2fs_bug_on(sbi, !IS_DATASEG(curseg->seg_type)); 3772 3773 mutex_lock(&curseg->curseg_mutex); 3774 down_write(&sit_i->sentry_lock); 3775 3776 old_cursegno = curseg->segno; 3777 old_blkoff = curseg->next_blkoff; 3778 old_alloc_type = curseg->alloc_type; 3779 3780 /* change the current segment */ 3781 if (segno != curseg->segno) { 3782 curseg->next_segno = segno; 3783 change_curseg(sbi, type); 3784 } 3785 3786 curseg->next_blkoff = GET_BLKOFF_FROM_SEG0(sbi, new_blkaddr); 3787 curseg->sum_blk->entries[curseg->next_blkoff] = *sum; 3788 3789 if (!recover_curseg || recover_newaddr) { 3790 if (!from_gc) 3791 update_segment_mtime(sbi, new_blkaddr, 0); 3792 update_sit_entry(sbi, new_blkaddr, 1); 3793 } 3794 if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO) { 3795 f2fs_invalidate_internal_cache(sbi, old_blkaddr); 3796 if (!from_gc) 3797 update_segment_mtime(sbi, old_blkaddr, 0); 3798 update_sit_entry(sbi, old_blkaddr, -1); 3799 } 3800 3801 locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr)); 3802 locate_dirty_segment(sbi, GET_SEGNO(sbi, new_blkaddr)); 3803 3804 locate_dirty_segment(sbi, old_cursegno); 3805 3806 if (recover_curseg) { 3807 if (old_cursegno != curseg->segno) { 3808 curseg->next_segno = old_cursegno; 3809 change_curseg(sbi, type); 3810 } 3811 curseg->next_blkoff = old_blkoff; 3812 curseg->alloc_type = old_alloc_type; 3813 } 3814 3815 up_write(&sit_i->sentry_lock); 3816 mutex_unlock(&curseg->curseg_mutex); 3817 f2fs_up_write(&SM_I(sbi)->curseg_lock); 3818 } 3819 3820 void f2fs_replace_block(struct f2fs_sb_info *sbi, struct dnode_of_data *dn, 3821 block_t old_addr, block_t new_addr, 3822 unsigned char version, bool recover_curseg, 3823 bool recover_newaddr) 3824 { 3825 struct f2fs_summary sum; 3826 3827 set_summary(&sum, dn->nid, dn->ofs_in_node, version); 3828 3829 f2fs_do_replace_block(sbi, &sum, old_addr, new_addr, 3830 recover_curseg, recover_newaddr, false); 3831 3832 f2fs_update_data_blkaddr(dn, new_addr); 3833 } 3834 3835 void f2fs_wait_on_page_writeback(struct page *page, 3836 enum page_type type, bool ordered, bool locked) 3837 { 3838 if (PageWriteback(page)) { 3839 struct f2fs_sb_info *sbi = F2FS_P_SB(page); 3840 3841 /* submit cached LFS IO */ 3842 f2fs_submit_merged_write_cond(sbi, NULL, page, 0, type); 3843 /* submit cached IPU IO */ 3844 f2fs_submit_merged_ipu_write(sbi, NULL, page); 3845 if (ordered) { 3846 wait_on_page_writeback(page); 3847 f2fs_bug_on(sbi, locked && PageWriteback(page)); 3848 } else { 3849 wait_for_stable_page(page); 3850 } 3851 } 3852 } 3853 3854 void f2fs_wait_on_block_writeback(struct inode *inode, block_t blkaddr) 3855 { 3856 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 3857 struct page *cpage; 3858 3859 if (!f2fs_meta_inode_gc_required(inode)) 3860 return; 3861 3862 if (!__is_valid_data_blkaddr(blkaddr)) 3863 return; 3864 3865 cpage = find_lock_page(META_MAPPING(sbi), blkaddr); 3866 if (cpage) { 3867 f2fs_wait_on_page_writeback(cpage, DATA, true, true); 3868 f2fs_put_page(cpage, 1); 3869 } 3870 } 3871 3872 void f2fs_wait_on_block_writeback_range(struct inode *inode, block_t blkaddr, 3873 block_t len) 3874 { 3875 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 3876 block_t i; 3877 3878 if (!f2fs_meta_inode_gc_required(inode)) 3879 return; 3880 3881 for (i = 0; i < len; i++) 3882 f2fs_wait_on_block_writeback(inode, blkaddr + i); 3883 3884 f2fs_truncate_meta_inode_pages(sbi, blkaddr, len); 3885 } 3886 3887 static int read_compacted_summaries(struct f2fs_sb_info *sbi) 3888 { 3889 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); 3890 struct curseg_info *seg_i; 3891 unsigned char *kaddr; 3892 struct page *page; 3893 block_t start; 3894 int i, j, offset; 3895 3896 start = start_sum_block(sbi); 3897 3898 page = f2fs_get_meta_page(sbi, start++); 3899 if (IS_ERR(page)) 3900 return PTR_ERR(page); 3901 kaddr = (unsigned char *)page_address(page); 3902 3903 /* Step 1: restore nat cache */ 3904 seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA); 3905 memcpy(seg_i->journal, kaddr, SUM_JOURNAL_SIZE); 3906 3907 /* Step 2: restore sit cache */ 3908 seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA); 3909 memcpy(seg_i->journal, kaddr + SUM_JOURNAL_SIZE, SUM_JOURNAL_SIZE); 3910 offset = 2 * SUM_JOURNAL_SIZE; 3911 3912 /* Step 3: restore summary entries */ 3913 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) { 3914 unsigned short blk_off; 3915 unsigned int segno; 3916 3917 seg_i = CURSEG_I(sbi, i); 3918 segno = le32_to_cpu(ckpt->cur_data_segno[i]); 3919 blk_off = le16_to_cpu(ckpt->cur_data_blkoff[i]); 3920 seg_i->next_segno = segno; 3921 reset_curseg(sbi, i, 0); 3922 seg_i->alloc_type = ckpt->alloc_type[i]; 3923 seg_i->next_blkoff = blk_off; 3924 3925 if (seg_i->alloc_type == SSR) 3926 blk_off = BLKS_PER_SEG(sbi); 3927 3928 for (j = 0; j < blk_off; j++) { 3929 struct f2fs_summary *s; 3930 3931 s = (struct f2fs_summary *)(kaddr + offset); 3932 seg_i->sum_blk->entries[j] = *s; 3933 offset += SUMMARY_SIZE; 3934 if (offset + SUMMARY_SIZE <= PAGE_SIZE - 3935 SUM_FOOTER_SIZE) 3936 continue; 3937 3938 f2fs_put_page(page, 1); 3939 page = NULL; 3940 3941 page = f2fs_get_meta_page(sbi, start++); 3942 if (IS_ERR(page)) 3943 return PTR_ERR(page); 3944 kaddr = (unsigned char *)page_address(page); 3945 offset = 0; 3946 } 3947 } 3948 f2fs_put_page(page, 1); 3949 return 0; 3950 } 3951 3952 static int read_normal_summaries(struct f2fs_sb_info *sbi, int type) 3953 { 3954 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); 3955 struct f2fs_summary_block *sum; 3956 struct curseg_info *curseg; 3957 struct page *new; 3958 unsigned short blk_off; 3959 unsigned int segno = 0; 3960 block_t blk_addr = 0; 3961 int err = 0; 3962 3963 /* get segment number and block addr */ 3964 if (IS_DATASEG(type)) { 3965 segno = le32_to_cpu(ckpt->cur_data_segno[type]); 3966 blk_off = le16_to_cpu(ckpt->cur_data_blkoff[type - 3967 CURSEG_HOT_DATA]); 3968 if (__exist_node_summaries(sbi)) 3969 blk_addr = sum_blk_addr(sbi, NR_CURSEG_PERSIST_TYPE, type); 3970 else 3971 blk_addr = sum_blk_addr(sbi, NR_CURSEG_DATA_TYPE, type); 3972 } else { 3973 segno = le32_to_cpu(ckpt->cur_node_segno[type - 3974 CURSEG_HOT_NODE]); 3975 blk_off = le16_to_cpu(ckpt->cur_node_blkoff[type - 3976 CURSEG_HOT_NODE]); 3977 if (__exist_node_summaries(sbi)) 3978 blk_addr = sum_blk_addr(sbi, NR_CURSEG_NODE_TYPE, 3979 type - CURSEG_HOT_NODE); 3980 else 3981 blk_addr = GET_SUM_BLOCK(sbi, segno); 3982 } 3983 3984 new = f2fs_get_meta_page(sbi, blk_addr); 3985 if (IS_ERR(new)) 3986 return PTR_ERR(new); 3987 sum = (struct f2fs_summary_block *)page_address(new); 3988 3989 if (IS_NODESEG(type)) { 3990 if (__exist_node_summaries(sbi)) { 3991 struct f2fs_summary *ns = &sum->entries[0]; 3992 int i; 3993 3994 for (i = 0; i < BLKS_PER_SEG(sbi); i++, ns++) { 3995 ns->version = 0; 3996 ns->ofs_in_node = 0; 3997 } 3998 } else { 3999 err = f2fs_restore_node_summary(sbi, segno, sum); 4000 if (err) 4001 goto out; 4002 } 4003 } 4004 4005 /* set uncompleted segment to curseg */ 4006 curseg = CURSEG_I(sbi, type); 4007 mutex_lock(&curseg->curseg_mutex); 4008 4009 /* update journal info */ 4010 down_write(&curseg->journal_rwsem); 4011 memcpy(curseg->journal, &sum->journal, SUM_JOURNAL_SIZE); 4012 up_write(&curseg->journal_rwsem); 4013 4014 memcpy(curseg->sum_blk->entries, sum->entries, SUM_ENTRY_SIZE); 4015 memcpy(&curseg->sum_blk->footer, &sum->footer, SUM_FOOTER_SIZE); 4016 curseg->next_segno = segno; 4017 reset_curseg(sbi, type, 0); 4018 curseg->alloc_type = ckpt->alloc_type[type]; 4019 curseg->next_blkoff = blk_off; 4020 mutex_unlock(&curseg->curseg_mutex); 4021 out: 4022 f2fs_put_page(new, 1); 4023 return err; 4024 } 4025 4026 static int restore_curseg_summaries(struct f2fs_sb_info *sbi) 4027 { 4028 struct f2fs_journal *sit_j = CURSEG_I(sbi, CURSEG_COLD_DATA)->journal; 4029 struct f2fs_journal *nat_j = CURSEG_I(sbi, CURSEG_HOT_DATA)->journal; 4030 int type = CURSEG_HOT_DATA; 4031 int err; 4032 4033 if (is_set_ckpt_flags(sbi, CP_COMPACT_SUM_FLAG)) { 4034 int npages = f2fs_npages_for_summary_flush(sbi, true); 4035 4036 if (npages >= 2) 4037 f2fs_ra_meta_pages(sbi, start_sum_block(sbi), npages, 4038 META_CP, true); 4039 4040 /* restore for compacted data summary */ 4041 err = read_compacted_summaries(sbi); 4042 if (err) 4043 return err; 4044 type = CURSEG_HOT_NODE; 4045 } 4046 4047 if (__exist_node_summaries(sbi)) 4048 f2fs_ra_meta_pages(sbi, 4049 sum_blk_addr(sbi, NR_CURSEG_PERSIST_TYPE, type), 4050 NR_CURSEG_PERSIST_TYPE - type, META_CP, true); 4051 4052 for (; type <= CURSEG_COLD_NODE; type++) { 4053 err = read_normal_summaries(sbi, type); 4054 if (err) 4055 return err; 4056 } 4057 4058 /* sanity check for summary blocks */ 4059 if (nats_in_cursum(nat_j) > NAT_JOURNAL_ENTRIES || 4060 sits_in_cursum(sit_j) > SIT_JOURNAL_ENTRIES) { 4061 f2fs_err(sbi, "invalid journal entries nats %u sits %u", 4062 nats_in_cursum(nat_j), sits_in_cursum(sit_j)); 4063 return -EINVAL; 4064 } 4065 4066 return 0; 4067 } 4068 4069 static void write_compacted_summaries(struct f2fs_sb_info *sbi, block_t blkaddr) 4070 { 4071 struct page *page; 4072 unsigned char *kaddr; 4073 struct f2fs_summary *summary; 4074 struct curseg_info *seg_i; 4075 int written_size = 0; 4076 int i, j; 4077 4078 page = f2fs_grab_meta_page(sbi, blkaddr++); 4079 kaddr = (unsigned char *)page_address(page); 4080 memset(kaddr, 0, PAGE_SIZE); 4081 4082 /* Step 1: write nat cache */ 4083 seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA); 4084 memcpy(kaddr, seg_i->journal, SUM_JOURNAL_SIZE); 4085 written_size += SUM_JOURNAL_SIZE; 4086 4087 /* Step 2: write sit cache */ 4088 seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA); 4089 memcpy(kaddr + written_size, seg_i->journal, SUM_JOURNAL_SIZE); 4090 written_size += SUM_JOURNAL_SIZE; 4091 4092 /* Step 3: write summary entries */ 4093 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) { 4094 seg_i = CURSEG_I(sbi, i); 4095 for (j = 0; j < f2fs_curseg_valid_blocks(sbi, i); j++) { 4096 if (!page) { 4097 page = f2fs_grab_meta_page(sbi, blkaddr++); 4098 kaddr = (unsigned char *)page_address(page); 4099 memset(kaddr, 0, PAGE_SIZE); 4100 written_size = 0; 4101 } 4102 summary = (struct f2fs_summary *)(kaddr + written_size); 4103 *summary = seg_i->sum_blk->entries[j]; 4104 written_size += SUMMARY_SIZE; 4105 4106 if (written_size + SUMMARY_SIZE <= PAGE_SIZE - 4107 SUM_FOOTER_SIZE) 4108 continue; 4109 4110 set_page_dirty(page); 4111 f2fs_put_page(page, 1); 4112 page = NULL; 4113 } 4114 } 4115 if (page) { 4116 set_page_dirty(page); 4117 f2fs_put_page(page, 1); 4118 } 4119 } 4120 4121 static void write_normal_summaries(struct f2fs_sb_info *sbi, 4122 block_t blkaddr, int type) 4123 { 4124 int i, end; 4125 4126 if (IS_DATASEG(type)) 4127 end = type + NR_CURSEG_DATA_TYPE; 4128 else 4129 end = type + NR_CURSEG_NODE_TYPE; 4130 4131 for (i = type; i < end; i++) 4132 write_current_sum_page(sbi, i, blkaddr + (i - type)); 4133 } 4134 4135 void f2fs_write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk) 4136 { 4137 if (is_set_ckpt_flags(sbi, CP_COMPACT_SUM_FLAG)) 4138 write_compacted_summaries(sbi, start_blk); 4139 else 4140 write_normal_summaries(sbi, start_blk, CURSEG_HOT_DATA); 4141 } 4142 4143 void f2fs_write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk) 4144 { 4145 write_normal_summaries(sbi, start_blk, CURSEG_HOT_NODE); 4146 } 4147 4148 int f2fs_lookup_journal_in_cursum(struct f2fs_journal *journal, int type, 4149 unsigned int val, int alloc) 4150 { 4151 int i; 4152 4153 if (type == NAT_JOURNAL) { 4154 for (i = 0; i < nats_in_cursum(journal); i++) { 4155 if (le32_to_cpu(nid_in_journal(journal, i)) == val) 4156 return i; 4157 } 4158 if (alloc && __has_cursum_space(journal, 1, NAT_JOURNAL)) 4159 return update_nats_in_cursum(journal, 1); 4160 } else if (type == SIT_JOURNAL) { 4161 for (i = 0; i < sits_in_cursum(journal); i++) 4162 if (le32_to_cpu(segno_in_journal(journal, i)) == val) 4163 return i; 4164 if (alloc && __has_cursum_space(journal, 1, SIT_JOURNAL)) 4165 return update_sits_in_cursum(journal, 1); 4166 } 4167 return -1; 4168 } 4169 4170 static struct page *get_current_sit_page(struct f2fs_sb_info *sbi, 4171 unsigned int segno) 4172 { 4173 return f2fs_get_meta_page(sbi, current_sit_addr(sbi, segno)); 4174 } 4175 4176 static struct page *get_next_sit_page(struct f2fs_sb_info *sbi, 4177 unsigned int start) 4178 { 4179 struct sit_info *sit_i = SIT_I(sbi); 4180 struct page *page; 4181 pgoff_t src_off, dst_off; 4182 4183 src_off = current_sit_addr(sbi, start); 4184 dst_off = next_sit_addr(sbi, src_off); 4185 4186 page = f2fs_grab_meta_page(sbi, dst_off); 4187 seg_info_to_sit_page(sbi, page, start); 4188 4189 set_page_dirty(page); 4190 set_to_next_sit(sit_i, start); 4191 4192 return page; 4193 } 4194 4195 static struct sit_entry_set *grab_sit_entry_set(void) 4196 { 4197 struct sit_entry_set *ses = 4198 f2fs_kmem_cache_alloc(sit_entry_set_slab, 4199 GFP_NOFS, true, NULL); 4200 4201 ses->entry_cnt = 0; 4202 INIT_LIST_HEAD(&ses->set_list); 4203 return ses; 4204 } 4205 4206 static void release_sit_entry_set(struct sit_entry_set *ses) 4207 { 4208 list_del(&ses->set_list); 4209 kmem_cache_free(sit_entry_set_slab, ses); 4210 } 4211 4212 static void adjust_sit_entry_set(struct sit_entry_set *ses, 4213 struct list_head *head) 4214 { 4215 struct sit_entry_set *next = ses; 4216 4217 if (list_is_last(&ses->set_list, head)) 4218 return; 4219 4220 list_for_each_entry_continue(next, head, set_list) 4221 if (ses->entry_cnt <= next->entry_cnt) { 4222 list_move_tail(&ses->set_list, &next->set_list); 4223 return; 4224 } 4225 4226 list_move_tail(&ses->set_list, head); 4227 } 4228 4229 static void add_sit_entry(unsigned int segno, struct list_head *head) 4230 { 4231 struct sit_entry_set *ses; 4232 unsigned int start_segno = START_SEGNO(segno); 4233 4234 list_for_each_entry(ses, head, set_list) { 4235 if (ses->start_segno == start_segno) { 4236 ses->entry_cnt++; 4237 adjust_sit_entry_set(ses, head); 4238 return; 4239 } 4240 } 4241 4242 ses = grab_sit_entry_set(); 4243 4244 ses->start_segno = start_segno; 4245 ses->entry_cnt++; 4246 list_add(&ses->set_list, head); 4247 } 4248 4249 static void add_sits_in_set(struct f2fs_sb_info *sbi) 4250 { 4251 struct f2fs_sm_info *sm_info = SM_I(sbi); 4252 struct list_head *set_list = &sm_info->sit_entry_set; 4253 unsigned long *bitmap = SIT_I(sbi)->dirty_sentries_bitmap; 4254 unsigned int segno; 4255 4256 for_each_set_bit(segno, bitmap, MAIN_SEGS(sbi)) 4257 add_sit_entry(segno, set_list); 4258 } 4259 4260 static void remove_sits_in_journal(struct f2fs_sb_info *sbi) 4261 { 4262 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA); 4263 struct f2fs_journal *journal = curseg->journal; 4264 int i; 4265 4266 down_write(&curseg->journal_rwsem); 4267 for (i = 0; i < sits_in_cursum(journal); i++) { 4268 unsigned int segno; 4269 bool dirtied; 4270 4271 segno = le32_to_cpu(segno_in_journal(journal, i)); 4272 dirtied = __mark_sit_entry_dirty(sbi, segno); 4273 4274 if (!dirtied) 4275 add_sit_entry(segno, &SM_I(sbi)->sit_entry_set); 4276 } 4277 update_sits_in_cursum(journal, -i); 4278 up_write(&curseg->journal_rwsem); 4279 } 4280 4281 /* 4282 * CP calls this function, which flushes SIT entries including sit_journal, 4283 * and moves prefree segs to free segs. 4284 */ 4285 void f2fs_flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc) 4286 { 4287 struct sit_info *sit_i = SIT_I(sbi); 4288 unsigned long *bitmap = sit_i->dirty_sentries_bitmap; 4289 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA); 4290 struct f2fs_journal *journal = curseg->journal; 4291 struct sit_entry_set *ses, *tmp; 4292 struct list_head *head = &SM_I(sbi)->sit_entry_set; 4293 bool to_journal = !is_sbi_flag_set(sbi, SBI_IS_RESIZEFS); 4294 struct seg_entry *se; 4295 4296 down_write(&sit_i->sentry_lock); 4297 4298 if (!sit_i->dirty_sentries) 4299 goto out; 4300 4301 /* 4302 * add and account sit entries of dirty bitmap in sit entry 4303 * set temporarily 4304 */ 4305 add_sits_in_set(sbi); 4306 4307 /* 4308 * if there are no enough space in journal to store dirty sit 4309 * entries, remove all entries from journal and add and account 4310 * them in sit entry set. 4311 */ 4312 if (!__has_cursum_space(journal, sit_i->dirty_sentries, SIT_JOURNAL) || 4313 !to_journal) 4314 remove_sits_in_journal(sbi); 4315 4316 /* 4317 * there are two steps to flush sit entries: 4318 * #1, flush sit entries to journal in current cold data summary block. 4319 * #2, flush sit entries to sit page. 4320 */ 4321 list_for_each_entry_safe(ses, tmp, head, set_list) { 4322 struct page *page = NULL; 4323 struct f2fs_sit_block *raw_sit = NULL; 4324 unsigned int start_segno = ses->start_segno; 4325 unsigned int end = min(start_segno + SIT_ENTRY_PER_BLOCK, 4326 (unsigned long)MAIN_SEGS(sbi)); 4327 unsigned int segno = start_segno; 4328 4329 if (to_journal && 4330 !__has_cursum_space(journal, ses->entry_cnt, SIT_JOURNAL)) 4331 to_journal = false; 4332 4333 if (to_journal) { 4334 down_write(&curseg->journal_rwsem); 4335 } else { 4336 page = get_next_sit_page(sbi, start_segno); 4337 raw_sit = page_address(page); 4338 } 4339 4340 /* flush dirty sit entries in region of current sit set */ 4341 for_each_set_bit_from(segno, bitmap, end) { 4342 int offset, sit_offset; 4343 4344 se = get_seg_entry(sbi, segno); 4345 #ifdef CONFIG_F2FS_CHECK_FS 4346 if (memcmp(se->cur_valid_map, se->cur_valid_map_mir, 4347 SIT_VBLOCK_MAP_SIZE)) 4348 f2fs_bug_on(sbi, 1); 4349 #endif 4350 4351 /* add discard candidates */ 4352 if (!(cpc->reason & CP_DISCARD)) { 4353 cpc->trim_start = segno; 4354 add_discard_addrs(sbi, cpc, false); 4355 } 4356 4357 if (to_journal) { 4358 offset = f2fs_lookup_journal_in_cursum(journal, 4359 SIT_JOURNAL, segno, 1); 4360 f2fs_bug_on(sbi, offset < 0); 4361 segno_in_journal(journal, offset) = 4362 cpu_to_le32(segno); 4363 seg_info_to_raw_sit(se, 4364 &sit_in_journal(journal, offset)); 4365 check_block_count(sbi, segno, 4366 &sit_in_journal(journal, offset)); 4367 } else { 4368 sit_offset = SIT_ENTRY_OFFSET(sit_i, segno); 4369 seg_info_to_raw_sit(se, 4370 &raw_sit->entries[sit_offset]); 4371 check_block_count(sbi, segno, 4372 &raw_sit->entries[sit_offset]); 4373 } 4374 4375 __clear_bit(segno, bitmap); 4376 sit_i->dirty_sentries--; 4377 ses->entry_cnt--; 4378 } 4379 4380 if (to_journal) 4381 up_write(&curseg->journal_rwsem); 4382 else 4383 f2fs_put_page(page, 1); 4384 4385 f2fs_bug_on(sbi, ses->entry_cnt); 4386 release_sit_entry_set(ses); 4387 } 4388 4389 f2fs_bug_on(sbi, !list_empty(head)); 4390 f2fs_bug_on(sbi, sit_i->dirty_sentries); 4391 out: 4392 if (cpc->reason & CP_DISCARD) { 4393 __u64 trim_start = cpc->trim_start; 4394 4395 for (; cpc->trim_start <= cpc->trim_end; cpc->trim_start++) 4396 add_discard_addrs(sbi, cpc, false); 4397 4398 cpc->trim_start = trim_start; 4399 } 4400 up_write(&sit_i->sentry_lock); 4401 4402 set_prefree_as_free_segments(sbi); 4403 } 4404 4405 static int build_sit_info(struct f2fs_sb_info *sbi) 4406 { 4407 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi); 4408 struct sit_info *sit_i; 4409 unsigned int sit_segs, start; 4410 char *src_bitmap, *bitmap; 4411 unsigned int bitmap_size, main_bitmap_size, sit_bitmap_size; 4412 unsigned int discard_map = f2fs_block_unit_discard(sbi) ? 1 : 0; 4413 4414 /* allocate memory for SIT information */ 4415 sit_i = f2fs_kzalloc(sbi, sizeof(struct sit_info), GFP_KERNEL); 4416 if (!sit_i) 4417 return -ENOMEM; 4418 4419 SM_I(sbi)->sit_info = sit_i; 4420 4421 sit_i->sentries = 4422 f2fs_kvzalloc(sbi, array_size(sizeof(struct seg_entry), 4423 MAIN_SEGS(sbi)), 4424 GFP_KERNEL); 4425 if (!sit_i->sentries) 4426 return -ENOMEM; 4427 4428 main_bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi)); 4429 sit_i->dirty_sentries_bitmap = f2fs_kvzalloc(sbi, main_bitmap_size, 4430 GFP_KERNEL); 4431 if (!sit_i->dirty_sentries_bitmap) 4432 return -ENOMEM; 4433 4434 #ifdef CONFIG_F2FS_CHECK_FS 4435 bitmap_size = MAIN_SEGS(sbi) * SIT_VBLOCK_MAP_SIZE * (3 + discard_map); 4436 #else 4437 bitmap_size = MAIN_SEGS(sbi) * SIT_VBLOCK_MAP_SIZE * (2 + discard_map); 4438 #endif 4439 sit_i->bitmap = f2fs_kvzalloc(sbi, bitmap_size, GFP_KERNEL); 4440 if (!sit_i->bitmap) 4441 return -ENOMEM; 4442 4443 bitmap = sit_i->bitmap; 4444 4445 for (start = 0; start < MAIN_SEGS(sbi); start++) { 4446 sit_i->sentries[start].cur_valid_map = bitmap; 4447 bitmap += SIT_VBLOCK_MAP_SIZE; 4448 4449 sit_i->sentries[start].ckpt_valid_map = bitmap; 4450 bitmap += SIT_VBLOCK_MAP_SIZE; 4451 4452 #ifdef CONFIG_F2FS_CHECK_FS 4453 sit_i->sentries[start].cur_valid_map_mir = bitmap; 4454 bitmap += SIT_VBLOCK_MAP_SIZE; 4455 #endif 4456 4457 if (discard_map) { 4458 sit_i->sentries[start].discard_map = bitmap; 4459 bitmap += SIT_VBLOCK_MAP_SIZE; 4460 } 4461 } 4462 4463 sit_i->tmp_map = f2fs_kzalloc(sbi, SIT_VBLOCK_MAP_SIZE, GFP_KERNEL); 4464 if (!sit_i->tmp_map) 4465 return -ENOMEM; 4466 4467 if (__is_large_section(sbi)) { 4468 sit_i->sec_entries = 4469 f2fs_kvzalloc(sbi, array_size(sizeof(struct sec_entry), 4470 MAIN_SECS(sbi)), 4471 GFP_KERNEL); 4472 if (!sit_i->sec_entries) 4473 return -ENOMEM; 4474 } 4475 4476 /* get information related with SIT */ 4477 sit_segs = le32_to_cpu(raw_super->segment_count_sit) >> 1; 4478 4479 /* setup SIT bitmap from ckeckpoint pack */ 4480 sit_bitmap_size = __bitmap_size(sbi, SIT_BITMAP); 4481 src_bitmap = __bitmap_ptr(sbi, SIT_BITMAP); 4482 4483 sit_i->sit_bitmap = kmemdup(src_bitmap, sit_bitmap_size, GFP_KERNEL); 4484 if (!sit_i->sit_bitmap) 4485 return -ENOMEM; 4486 4487 #ifdef CONFIG_F2FS_CHECK_FS 4488 sit_i->sit_bitmap_mir = kmemdup(src_bitmap, 4489 sit_bitmap_size, GFP_KERNEL); 4490 if (!sit_i->sit_bitmap_mir) 4491 return -ENOMEM; 4492 4493 sit_i->invalid_segmap = f2fs_kvzalloc(sbi, 4494 main_bitmap_size, GFP_KERNEL); 4495 if (!sit_i->invalid_segmap) 4496 return -ENOMEM; 4497 #endif 4498 4499 sit_i->sit_base_addr = le32_to_cpu(raw_super->sit_blkaddr); 4500 sit_i->sit_blocks = sit_segs << sbi->log_blocks_per_seg; 4501 sit_i->written_valid_blocks = 0; 4502 sit_i->bitmap_size = sit_bitmap_size; 4503 sit_i->dirty_sentries = 0; 4504 sit_i->sents_per_block = SIT_ENTRY_PER_BLOCK; 4505 sit_i->elapsed_time = le64_to_cpu(sbi->ckpt->elapsed_time); 4506 sit_i->mounted_time = ktime_get_boottime_seconds(); 4507 init_rwsem(&sit_i->sentry_lock); 4508 return 0; 4509 } 4510 4511 static int build_free_segmap(struct f2fs_sb_info *sbi) 4512 { 4513 struct free_segmap_info *free_i; 4514 unsigned int bitmap_size, sec_bitmap_size; 4515 4516 /* allocate memory for free segmap information */ 4517 free_i = f2fs_kzalloc(sbi, sizeof(struct free_segmap_info), GFP_KERNEL); 4518 if (!free_i) 4519 return -ENOMEM; 4520 4521 SM_I(sbi)->free_info = free_i; 4522 4523 bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi)); 4524 free_i->free_segmap = f2fs_kvmalloc(sbi, bitmap_size, GFP_KERNEL); 4525 if (!free_i->free_segmap) 4526 return -ENOMEM; 4527 4528 sec_bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi)); 4529 free_i->free_secmap = f2fs_kvmalloc(sbi, sec_bitmap_size, GFP_KERNEL); 4530 if (!free_i->free_secmap) 4531 return -ENOMEM; 4532 4533 /* set all segments as dirty temporarily */ 4534 memset(free_i->free_segmap, 0xff, bitmap_size); 4535 memset(free_i->free_secmap, 0xff, sec_bitmap_size); 4536 4537 /* init free segmap information */ 4538 free_i->start_segno = GET_SEGNO_FROM_SEG0(sbi, MAIN_BLKADDR(sbi)); 4539 free_i->free_segments = 0; 4540 free_i->free_sections = 0; 4541 spin_lock_init(&free_i->segmap_lock); 4542 return 0; 4543 } 4544 4545 static int build_curseg(struct f2fs_sb_info *sbi) 4546 { 4547 struct curseg_info *array; 4548 int i; 4549 4550 array = f2fs_kzalloc(sbi, array_size(NR_CURSEG_TYPE, 4551 sizeof(*array)), GFP_KERNEL); 4552 if (!array) 4553 return -ENOMEM; 4554 4555 SM_I(sbi)->curseg_array = array; 4556 4557 for (i = 0; i < NO_CHECK_TYPE; i++) { 4558 mutex_init(&array[i].curseg_mutex); 4559 array[i].sum_blk = f2fs_kzalloc(sbi, PAGE_SIZE, GFP_KERNEL); 4560 if (!array[i].sum_blk) 4561 return -ENOMEM; 4562 init_rwsem(&array[i].journal_rwsem); 4563 array[i].journal = f2fs_kzalloc(sbi, 4564 sizeof(struct f2fs_journal), GFP_KERNEL); 4565 if (!array[i].journal) 4566 return -ENOMEM; 4567 if (i < NR_PERSISTENT_LOG) 4568 array[i].seg_type = CURSEG_HOT_DATA + i; 4569 else if (i == CURSEG_COLD_DATA_PINNED) 4570 array[i].seg_type = CURSEG_COLD_DATA; 4571 else if (i == CURSEG_ALL_DATA_ATGC) 4572 array[i].seg_type = CURSEG_COLD_DATA; 4573 array[i].segno = NULL_SEGNO; 4574 array[i].next_blkoff = 0; 4575 array[i].inited = false; 4576 } 4577 return restore_curseg_summaries(sbi); 4578 } 4579 4580 static int build_sit_entries(struct f2fs_sb_info *sbi) 4581 { 4582 struct sit_info *sit_i = SIT_I(sbi); 4583 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA); 4584 struct f2fs_journal *journal = curseg->journal; 4585 struct seg_entry *se; 4586 struct f2fs_sit_entry sit; 4587 int sit_blk_cnt = SIT_BLK_CNT(sbi); 4588 unsigned int i, start, end; 4589 unsigned int readed, start_blk = 0; 4590 int err = 0; 4591 block_t sit_valid_blocks[2] = {0, 0}; 4592 4593 do { 4594 readed = f2fs_ra_meta_pages(sbi, start_blk, BIO_MAX_VECS, 4595 META_SIT, true); 4596 4597 start = start_blk * sit_i->sents_per_block; 4598 end = (start_blk + readed) * sit_i->sents_per_block; 4599 4600 for (; start < end && start < MAIN_SEGS(sbi); start++) { 4601 struct f2fs_sit_block *sit_blk; 4602 struct page *page; 4603 4604 se = &sit_i->sentries[start]; 4605 page = get_current_sit_page(sbi, start); 4606 if (IS_ERR(page)) 4607 return PTR_ERR(page); 4608 sit_blk = (struct f2fs_sit_block *)page_address(page); 4609 sit = sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, start)]; 4610 f2fs_put_page(page, 1); 4611 4612 err = check_block_count(sbi, start, &sit); 4613 if (err) 4614 return err; 4615 seg_info_from_raw_sit(se, &sit); 4616 4617 if (se->type >= NR_PERSISTENT_LOG) { 4618 f2fs_err(sbi, "Invalid segment type: %u, segno: %u", 4619 se->type, start); 4620 f2fs_handle_error(sbi, 4621 ERROR_INCONSISTENT_SUM_TYPE); 4622 return -EFSCORRUPTED; 4623 } 4624 4625 sit_valid_blocks[SE_PAGETYPE(se)] += se->valid_blocks; 4626 4627 if (!f2fs_block_unit_discard(sbi)) 4628 goto init_discard_map_done; 4629 4630 /* build discard map only one time */ 4631 if (is_set_ckpt_flags(sbi, CP_TRIMMED_FLAG)) { 4632 memset(se->discard_map, 0xff, 4633 SIT_VBLOCK_MAP_SIZE); 4634 goto init_discard_map_done; 4635 } 4636 memcpy(se->discard_map, se->cur_valid_map, 4637 SIT_VBLOCK_MAP_SIZE); 4638 sbi->discard_blks += BLKS_PER_SEG(sbi) - 4639 se->valid_blocks; 4640 init_discard_map_done: 4641 if (__is_large_section(sbi)) 4642 get_sec_entry(sbi, start)->valid_blocks += 4643 se->valid_blocks; 4644 } 4645 start_blk += readed; 4646 } while (start_blk < sit_blk_cnt); 4647 4648 down_read(&curseg->journal_rwsem); 4649 for (i = 0; i < sits_in_cursum(journal); i++) { 4650 unsigned int old_valid_blocks; 4651 4652 start = le32_to_cpu(segno_in_journal(journal, i)); 4653 if (start >= MAIN_SEGS(sbi)) { 4654 f2fs_err(sbi, "Wrong journal entry on segno %u", 4655 start); 4656 err = -EFSCORRUPTED; 4657 f2fs_handle_error(sbi, ERROR_CORRUPTED_JOURNAL); 4658 break; 4659 } 4660 4661 se = &sit_i->sentries[start]; 4662 sit = sit_in_journal(journal, i); 4663 4664 old_valid_blocks = se->valid_blocks; 4665 4666 sit_valid_blocks[SE_PAGETYPE(se)] -= old_valid_blocks; 4667 4668 err = check_block_count(sbi, start, &sit); 4669 if (err) 4670 break; 4671 seg_info_from_raw_sit(se, &sit); 4672 4673 if (se->type >= NR_PERSISTENT_LOG) { 4674 f2fs_err(sbi, "Invalid segment type: %u, segno: %u", 4675 se->type, start); 4676 err = -EFSCORRUPTED; 4677 f2fs_handle_error(sbi, ERROR_INCONSISTENT_SUM_TYPE); 4678 break; 4679 } 4680 4681 sit_valid_blocks[SE_PAGETYPE(se)] += se->valid_blocks; 4682 4683 if (f2fs_block_unit_discard(sbi)) { 4684 if (is_set_ckpt_flags(sbi, CP_TRIMMED_FLAG)) { 4685 memset(se->discard_map, 0xff, SIT_VBLOCK_MAP_SIZE); 4686 } else { 4687 memcpy(se->discard_map, se->cur_valid_map, 4688 SIT_VBLOCK_MAP_SIZE); 4689 sbi->discard_blks += old_valid_blocks; 4690 sbi->discard_blks -= se->valid_blocks; 4691 } 4692 } 4693 4694 if (__is_large_section(sbi)) { 4695 get_sec_entry(sbi, start)->valid_blocks += 4696 se->valid_blocks; 4697 get_sec_entry(sbi, start)->valid_blocks -= 4698 old_valid_blocks; 4699 } 4700 } 4701 up_read(&curseg->journal_rwsem); 4702 4703 if (err) 4704 return err; 4705 4706 if (sit_valid_blocks[NODE] != valid_node_count(sbi)) { 4707 f2fs_err(sbi, "SIT is corrupted node# %u vs %u", 4708 sit_valid_blocks[NODE], valid_node_count(sbi)); 4709 f2fs_handle_error(sbi, ERROR_INCONSISTENT_NODE_COUNT); 4710 return -EFSCORRUPTED; 4711 } 4712 4713 if (sit_valid_blocks[DATA] + sit_valid_blocks[NODE] > 4714 valid_user_blocks(sbi)) { 4715 f2fs_err(sbi, "SIT is corrupted data# %u %u vs %u", 4716 sit_valid_blocks[DATA], sit_valid_blocks[NODE], 4717 valid_user_blocks(sbi)); 4718 f2fs_handle_error(sbi, ERROR_INCONSISTENT_BLOCK_COUNT); 4719 return -EFSCORRUPTED; 4720 } 4721 4722 return 0; 4723 } 4724 4725 static void init_free_segmap(struct f2fs_sb_info *sbi) 4726 { 4727 unsigned int start; 4728 int type; 4729 struct seg_entry *sentry; 4730 4731 for (start = 0; start < MAIN_SEGS(sbi); start++) { 4732 if (f2fs_usable_blks_in_seg(sbi, start) == 0) 4733 continue; 4734 sentry = get_seg_entry(sbi, start); 4735 if (!sentry->valid_blocks) 4736 __set_free(sbi, start); 4737 else 4738 SIT_I(sbi)->written_valid_blocks += 4739 sentry->valid_blocks; 4740 } 4741 4742 /* set use the current segments */ 4743 for (type = CURSEG_HOT_DATA; type <= CURSEG_COLD_NODE; type++) { 4744 struct curseg_info *curseg_t = CURSEG_I(sbi, type); 4745 4746 __set_test_and_inuse(sbi, curseg_t->segno); 4747 } 4748 } 4749 4750 static void init_dirty_segmap(struct f2fs_sb_info *sbi) 4751 { 4752 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 4753 struct free_segmap_info *free_i = FREE_I(sbi); 4754 unsigned int segno = 0, offset = 0, secno; 4755 block_t valid_blocks, usable_blks_in_seg; 4756 4757 while (1) { 4758 /* find dirty segment based on free segmap */ 4759 segno = find_next_inuse(free_i, MAIN_SEGS(sbi), offset); 4760 if (segno >= MAIN_SEGS(sbi)) 4761 break; 4762 offset = segno + 1; 4763 valid_blocks = get_valid_blocks(sbi, segno, false); 4764 usable_blks_in_seg = f2fs_usable_blks_in_seg(sbi, segno); 4765 if (valid_blocks == usable_blks_in_seg || !valid_blocks) 4766 continue; 4767 if (valid_blocks > usable_blks_in_seg) { 4768 f2fs_bug_on(sbi, 1); 4769 continue; 4770 } 4771 mutex_lock(&dirty_i->seglist_lock); 4772 __locate_dirty_segment(sbi, segno, DIRTY); 4773 mutex_unlock(&dirty_i->seglist_lock); 4774 } 4775 4776 if (!__is_large_section(sbi)) 4777 return; 4778 4779 mutex_lock(&dirty_i->seglist_lock); 4780 for (segno = 0; segno < MAIN_SEGS(sbi); segno += SEGS_PER_SEC(sbi)) { 4781 valid_blocks = get_valid_blocks(sbi, segno, true); 4782 secno = GET_SEC_FROM_SEG(sbi, segno); 4783 4784 if (!valid_blocks || valid_blocks == CAP_BLKS_PER_SEC(sbi)) 4785 continue; 4786 if (IS_CURSEC(sbi, secno)) 4787 continue; 4788 set_bit(secno, dirty_i->dirty_secmap); 4789 } 4790 mutex_unlock(&dirty_i->seglist_lock); 4791 } 4792 4793 static int init_victim_secmap(struct f2fs_sb_info *sbi) 4794 { 4795 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 4796 unsigned int bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi)); 4797 4798 dirty_i->victim_secmap = f2fs_kvzalloc(sbi, bitmap_size, GFP_KERNEL); 4799 if (!dirty_i->victim_secmap) 4800 return -ENOMEM; 4801 4802 dirty_i->pinned_secmap = f2fs_kvzalloc(sbi, bitmap_size, GFP_KERNEL); 4803 if (!dirty_i->pinned_secmap) 4804 return -ENOMEM; 4805 4806 dirty_i->pinned_secmap_cnt = 0; 4807 dirty_i->enable_pin_section = true; 4808 return 0; 4809 } 4810 4811 static int build_dirty_segmap(struct f2fs_sb_info *sbi) 4812 { 4813 struct dirty_seglist_info *dirty_i; 4814 unsigned int bitmap_size, i; 4815 4816 /* allocate memory for dirty segments list information */ 4817 dirty_i = f2fs_kzalloc(sbi, sizeof(struct dirty_seglist_info), 4818 GFP_KERNEL); 4819 if (!dirty_i) 4820 return -ENOMEM; 4821 4822 SM_I(sbi)->dirty_info = dirty_i; 4823 mutex_init(&dirty_i->seglist_lock); 4824 4825 bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi)); 4826 4827 for (i = 0; i < NR_DIRTY_TYPE; i++) { 4828 dirty_i->dirty_segmap[i] = f2fs_kvzalloc(sbi, bitmap_size, 4829 GFP_KERNEL); 4830 if (!dirty_i->dirty_segmap[i]) 4831 return -ENOMEM; 4832 } 4833 4834 if (__is_large_section(sbi)) { 4835 bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi)); 4836 dirty_i->dirty_secmap = f2fs_kvzalloc(sbi, 4837 bitmap_size, GFP_KERNEL); 4838 if (!dirty_i->dirty_secmap) 4839 return -ENOMEM; 4840 } 4841 4842 init_dirty_segmap(sbi); 4843 return init_victim_secmap(sbi); 4844 } 4845 4846 static int sanity_check_curseg(struct f2fs_sb_info *sbi) 4847 { 4848 int i; 4849 4850 /* 4851 * In LFS/SSR curseg, .next_blkoff should point to an unused blkaddr; 4852 * In LFS curseg, all blkaddr after .next_blkoff should be unused. 4853 */ 4854 for (i = 0; i < NR_PERSISTENT_LOG; i++) { 4855 struct curseg_info *curseg = CURSEG_I(sbi, i); 4856 struct seg_entry *se = get_seg_entry(sbi, curseg->segno); 4857 unsigned int blkofs = curseg->next_blkoff; 4858 4859 if (f2fs_sb_has_readonly(sbi) && 4860 i != CURSEG_HOT_DATA && i != CURSEG_HOT_NODE) 4861 continue; 4862 4863 sanity_check_seg_type(sbi, curseg->seg_type); 4864 4865 if (curseg->alloc_type != LFS && curseg->alloc_type != SSR) { 4866 f2fs_err(sbi, 4867 "Current segment has invalid alloc_type:%d", 4868 curseg->alloc_type); 4869 f2fs_handle_error(sbi, ERROR_INVALID_CURSEG); 4870 return -EFSCORRUPTED; 4871 } 4872 4873 if (f2fs_test_bit(blkofs, se->cur_valid_map)) 4874 goto out; 4875 4876 if (curseg->alloc_type == SSR) 4877 continue; 4878 4879 for (blkofs += 1; blkofs < BLKS_PER_SEG(sbi); blkofs++) { 4880 if (!f2fs_test_bit(blkofs, se->cur_valid_map)) 4881 continue; 4882 out: 4883 f2fs_err(sbi, 4884 "Current segment's next free block offset is inconsistent with bitmap, logtype:%u, segno:%u, type:%u, next_blkoff:%u, blkofs:%u", 4885 i, curseg->segno, curseg->alloc_type, 4886 curseg->next_blkoff, blkofs); 4887 f2fs_handle_error(sbi, ERROR_INVALID_CURSEG); 4888 return -EFSCORRUPTED; 4889 } 4890 } 4891 return 0; 4892 } 4893 4894 #ifdef CONFIG_BLK_DEV_ZONED 4895 4896 static int check_zone_write_pointer(struct f2fs_sb_info *sbi, 4897 struct f2fs_dev_info *fdev, 4898 struct blk_zone *zone) 4899 { 4900 unsigned int wp_segno, wp_blkoff, zone_secno, zone_segno, segno; 4901 block_t zone_block, wp_block, last_valid_block; 4902 unsigned int log_sectors_per_block = sbi->log_blocksize - SECTOR_SHIFT; 4903 int i, s, b, ret; 4904 struct seg_entry *se; 4905 4906 if (zone->type != BLK_ZONE_TYPE_SEQWRITE_REQ) 4907 return 0; 4908 4909 wp_block = fdev->start_blk + (zone->wp >> log_sectors_per_block); 4910 wp_segno = GET_SEGNO(sbi, wp_block); 4911 wp_blkoff = wp_block - START_BLOCK(sbi, wp_segno); 4912 zone_block = fdev->start_blk + (zone->start >> log_sectors_per_block); 4913 zone_segno = GET_SEGNO(sbi, zone_block); 4914 zone_secno = GET_SEC_FROM_SEG(sbi, zone_segno); 4915 4916 if (zone_segno >= MAIN_SEGS(sbi)) 4917 return 0; 4918 4919 /* 4920 * Skip check of zones cursegs point to, since 4921 * fix_curseg_write_pointer() checks them. 4922 */ 4923 for (i = 0; i < NO_CHECK_TYPE; i++) 4924 if (zone_secno == GET_SEC_FROM_SEG(sbi, 4925 CURSEG_I(sbi, i)->segno)) 4926 return 0; 4927 4928 /* 4929 * Get last valid block of the zone. 4930 */ 4931 last_valid_block = zone_block - 1; 4932 for (s = sbi->segs_per_sec - 1; s >= 0; s--) { 4933 segno = zone_segno + s; 4934 se = get_seg_entry(sbi, segno); 4935 for (b = sbi->blocks_per_seg - 1; b >= 0; b--) 4936 if (f2fs_test_bit(b, se->cur_valid_map)) { 4937 last_valid_block = START_BLOCK(sbi, segno) + b; 4938 break; 4939 } 4940 if (last_valid_block >= zone_block) 4941 break; 4942 } 4943 4944 /* 4945 * The write pointer matches with the valid blocks or 4946 * already points to the end of the zone. 4947 */ 4948 if ((last_valid_block + 1 == wp_block) || 4949 (zone->wp == zone->start + zone->len)) 4950 return 0; 4951 4952 if (last_valid_block + 1 == zone_block) { 4953 /* 4954 * If there is no valid block in the zone and if write pointer 4955 * is not at zone start, reset the write pointer. 4956 */ 4957 f2fs_notice(sbi, 4958 "Zone without valid block has non-zero write " 4959 "pointer. Reset the write pointer: wp[0x%x,0x%x]", 4960 wp_segno, wp_blkoff); 4961 ret = __f2fs_issue_discard_zone(sbi, fdev->bdev, zone_block, 4962 zone->len >> log_sectors_per_block); 4963 if (ret) 4964 f2fs_err(sbi, "Discard zone failed: %s (errno=%d)", 4965 fdev->path, ret); 4966 4967 return ret; 4968 } 4969 4970 /* 4971 * If there are valid blocks and the write pointer doesn't 4972 * match with them, we need to report the inconsistency and 4973 * fill the zone till the end to close the zone. This inconsistency 4974 * does not cause write error because the zone will not be selected 4975 * for write operation until it get discarded. 4976 */ 4977 f2fs_notice(sbi, "Valid blocks are not aligned with write pointer: " 4978 "valid block[0x%x,0x%x] wp[0x%x,0x%x]", 4979 GET_SEGNO(sbi, last_valid_block), 4980 GET_BLKOFF_FROM_SEG0(sbi, last_valid_block), 4981 wp_segno, wp_blkoff); 4982 4983 ret = blkdev_zone_mgmt(fdev->bdev, REQ_OP_ZONE_FINISH, 4984 zone->start, zone->len, GFP_NOFS); 4985 if (ret == -EOPNOTSUPP) { 4986 ret = blkdev_issue_zeroout(fdev->bdev, zone->wp, 4987 zone->len - (zone->wp - zone->start), 4988 GFP_NOFS, 0); 4989 if (ret) 4990 f2fs_err(sbi, "Fill up zone failed: %s (errno=%d)", 4991 fdev->path, ret); 4992 } else if (ret) { 4993 f2fs_err(sbi, "Finishing zone failed: %s (errno=%d)", 4994 fdev->path, ret); 4995 } 4996 4997 return ret; 4998 } 4999 5000 static struct f2fs_dev_info *get_target_zoned_dev(struct f2fs_sb_info *sbi, 5001 block_t zone_blkaddr) 5002 { 5003 int i; 5004 5005 for (i = 0; i < sbi->s_ndevs; i++) { 5006 if (!bdev_is_zoned(FDEV(i).bdev)) 5007 continue; 5008 if (sbi->s_ndevs == 1 || (FDEV(i).start_blk <= zone_blkaddr && 5009 zone_blkaddr <= FDEV(i).end_blk)) 5010 return &FDEV(i); 5011 } 5012 5013 return NULL; 5014 } 5015 5016 static int report_one_zone_cb(struct blk_zone *zone, unsigned int idx, 5017 void *data) 5018 { 5019 memcpy(data, zone, sizeof(struct blk_zone)); 5020 return 0; 5021 } 5022 5023 static int fix_curseg_write_pointer(struct f2fs_sb_info *sbi, int type) 5024 { 5025 struct curseg_info *cs = CURSEG_I(sbi, type); 5026 struct f2fs_dev_info *zbd; 5027 struct blk_zone zone; 5028 unsigned int cs_section, wp_segno, wp_blkoff, wp_sector_off; 5029 block_t cs_zone_block, wp_block; 5030 unsigned int log_sectors_per_block = sbi->log_blocksize - SECTOR_SHIFT; 5031 sector_t zone_sector; 5032 int err; 5033 5034 cs_section = GET_SEC_FROM_SEG(sbi, cs->segno); 5035 cs_zone_block = START_BLOCK(sbi, GET_SEG_FROM_SEC(sbi, cs_section)); 5036 5037 zbd = get_target_zoned_dev(sbi, cs_zone_block); 5038 if (!zbd) 5039 return 0; 5040 5041 /* report zone for the sector the curseg points to */ 5042 zone_sector = (sector_t)(cs_zone_block - zbd->start_blk) 5043 << log_sectors_per_block; 5044 err = blkdev_report_zones(zbd->bdev, zone_sector, 1, 5045 report_one_zone_cb, &zone); 5046 if (err != 1) { 5047 f2fs_err(sbi, "Report zone failed: %s errno=(%d)", 5048 zbd->path, err); 5049 return err; 5050 } 5051 5052 if (zone.type != BLK_ZONE_TYPE_SEQWRITE_REQ) 5053 return 0; 5054 5055 wp_block = zbd->start_blk + (zone.wp >> log_sectors_per_block); 5056 wp_segno = GET_SEGNO(sbi, wp_block); 5057 wp_blkoff = wp_block - START_BLOCK(sbi, wp_segno); 5058 wp_sector_off = zone.wp & GENMASK(log_sectors_per_block - 1, 0); 5059 5060 if (cs->segno == wp_segno && cs->next_blkoff == wp_blkoff && 5061 wp_sector_off == 0) 5062 return 0; 5063 5064 f2fs_notice(sbi, "Unaligned curseg[%d] with write pointer: " 5065 "curseg[0x%x,0x%x] wp[0x%x,0x%x]", 5066 type, cs->segno, cs->next_blkoff, wp_segno, wp_blkoff); 5067 5068 f2fs_notice(sbi, "Assign new section to curseg[%d]: " 5069 "curseg[0x%x,0x%x]", type, cs->segno, cs->next_blkoff); 5070 5071 f2fs_allocate_new_section(sbi, type, true); 5072 5073 /* check consistency of the zone curseg pointed to */ 5074 if (check_zone_write_pointer(sbi, zbd, &zone)) 5075 return -EIO; 5076 5077 /* check newly assigned zone */ 5078 cs_section = GET_SEC_FROM_SEG(sbi, cs->segno); 5079 cs_zone_block = START_BLOCK(sbi, GET_SEG_FROM_SEC(sbi, cs_section)); 5080 5081 zbd = get_target_zoned_dev(sbi, cs_zone_block); 5082 if (!zbd) 5083 return 0; 5084 5085 zone_sector = (sector_t)(cs_zone_block - zbd->start_blk) 5086 << log_sectors_per_block; 5087 err = blkdev_report_zones(zbd->bdev, zone_sector, 1, 5088 report_one_zone_cb, &zone); 5089 if (err != 1) { 5090 f2fs_err(sbi, "Report zone failed: %s errno=(%d)", 5091 zbd->path, err); 5092 return err; 5093 } 5094 5095 if (zone.type != BLK_ZONE_TYPE_SEQWRITE_REQ) 5096 return 0; 5097 5098 if (zone.wp != zone.start) { 5099 f2fs_notice(sbi, 5100 "New zone for curseg[%d] is not yet discarded. " 5101 "Reset the zone: curseg[0x%x,0x%x]", 5102 type, cs->segno, cs->next_blkoff); 5103 err = __f2fs_issue_discard_zone(sbi, zbd->bdev, cs_zone_block, 5104 zone.len >> log_sectors_per_block); 5105 if (err) { 5106 f2fs_err(sbi, "Discard zone failed: %s (errno=%d)", 5107 zbd->path, err); 5108 return err; 5109 } 5110 } 5111 5112 return 0; 5113 } 5114 5115 int f2fs_fix_curseg_write_pointer(struct f2fs_sb_info *sbi) 5116 { 5117 int i, ret; 5118 5119 for (i = 0; i < NR_PERSISTENT_LOG; i++) { 5120 ret = fix_curseg_write_pointer(sbi, i); 5121 if (ret) 5122 return ret; 5123 } 5124 5125 return 0; 5126 } 5127 5128 struct check_zone_write_pointer_args { 5129 struct f2fs_sb_info *sbi; 5130 struct f2fs_dev_info *fdev; 5131 }; 5132 5133 static int check_zone_write_pointer_cb(struct blk_zone *zone, unsigned int idx, 5134 void *data) 5135 { 5136 struct check_zone_write_pointer_args *args; 5137 5138 args = (struct check_zone_write_pointer_args *)data; 5139 5140 return check_zone_write_pointer(args->sbi, args->fdev, zone); 5141 } 5142 5143 int f2fs_check_write_pointer(struct f2fs_sb_info *sbi) 5144 { 5145 int i, ret; 5146 struct check_zone_write_pointer_args args; 5147 5148 for (i = 0; i < sbi->s_ndevs; i++) { 5149 if (!bdev_is_zoned(FDEV(i).bdev)) 5150 continue; 5151 5152 args.sbi = sbi; 5153 args.fdev = &FDEV(i); 5154 ret = blkdev_report_zones(FDEV(i).bdev, 0, BLK_ALL_ZONES, 5155 check_zone_write_pointer_cb, &args); 5156 if (ret < 0) 5157 return ret; 5158 } 5159 5160 return 0; 5161 } 5162 5163 /* 5164 * Return the number of usable blocks in a segment. The number of blocks 5165 * returned is always equal to the number of blocks in a segment for 5166 * segments fully contained within a sequential zone capacity or a 5167 * conventional zone. For segments partially contained in a sequential 5168 * zone capacity, the number of usable blocks up to the zone capacity 5169 * is returned. 0 is returned in all other cases. 5170 */ 5171 static inline unsigned int f2fs_usable_zone_blks_in_seg( 5172 struct f2fs_sb_info *sbi, unsigned int segno) 5173 { 5174 block_t seg_start, sec_start_blkaddr, sec_cap_blkaddr; 5175 unsigned int secno; 5176 5177 if (!sbi->unusable_blocks_per_sec) 5178 return BLKS_PER_SEG(sbi); 5179 5180 secno = GET_SEC_FROM_SEG(sbi, segno); 5181 seg_start = START_BLOCK(sbi, segno); 5182 sec_start_blkaddr = START_BLOCK(sbi, GET_SEG_FROM_SEC(sbi, secno)); 5183 sec_cap_blkaddr = sec_start_blkaddr + CAP_BLKS_PER_SEC(sbi); 5184 5185 /* 5186 * If segment starts before zone capacity and spans beyond 5187 * zone capacity, then usable blocks are from seg start to 5188 * zone capacity. If the segment starts after the zone capacity, 5189 * then there are no usable blocks. 5190 */ 5191 if (seg_start >= sec_cap_blkaddr) 5192 return 0; 5193 if (seg_start + BLKS_PER_SEG(sbi) > sec_cap_blkaddr) 5194 return sec_cap_blkaddr - seg_start; 5195 5196 return BLKS_PER_SEG(sbi); 5197 } 5198 #else 5199 int f2fs_fix_curseg_write_pointer(struct f2fs_sb_info *sbi) 5200 { 5201 return 0; 5202 } 5203 5204 int f2fs_check_write_pointer(struct f2fs_sb_info *sbi) 5205 { 5206 return 0; 5207 } 5208 5209 static inline unsigned int f2fs_usable_zone_blks_in_seg(struct f2fs_sb_info *sbi, 5210 unsigned int segno) 5211 { 5212 return 0; 5213 } 5214 5215 #endif 5216 unsigned int f2fs_usable_blks_in_seg(struct f2fs_sb_info *sbi, 5217 unsigned int segno) 5218 { 5219 if (f2fs_sb_has_blkzoned(sbi)) 5220 return f2fs_usable_zone_blks_in_seg(sbi, segno); 5221 5222 return BLKS_PER_SEG(sbi); 5223 } 5224 5225 unsigned int f2fs_usable_segs_in_sec(struct f2fs_sb_info *sbi, 5226 unsigned int segno) 5227 { 5228 if (f2fs_sb_has_blkzoned(sbi)) 5229 return CAP_SEGS_PER_SEC(sbi); 5230 5231 return SEGS_PER_SEC(sbi); 5232 } 5233 5234 /* 5235 * Update min, max modified time for cost-benefit GC algorithm 5236 */ 5237 static void init_min_max_mtime(struct f2fs_sb_info *sbi) 5238 { 5239 struct sit_info *sit_i = SIT_I(sbi); 5240 unsigned int segno; 5241 5242 down_write(&sit_i->sentry_lock); 5243 5244 sit_i->min_mtime = ULLONG_MAX; 5245 5246 for (segno = 0; segno < MAIN_SEGS(sbi); segno += SEGS_PER_SEC(sbi)) { 5247 unsigned int i; 5248 unsigned long long mtime = 0; 5249 5250 for (i = 0; i < SEGS_PER_SEC(sbi); i++) 5251 mtime += get_seg_entry(sbi, segno + i)->mtime; 5252 5253 mtime = div_u64(mtime, SEGS_PER_SEC(sbi)); 5254 5255 if (sit_i->min_mtime > mtime) 5256 sit_i->min_mtime = mtime; 5257 } 5258 sit_i->max_mtime = get_mtime(sbi, false); 5259 sit_i->dirty_max_mtime = 0; 5260 up_write(&sit_i->sentry_lock); 5261 } 5262 5263 int f2fs_build_segment_manager(struct f2fs_sb_info *sbi) 5264 { 5265 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi); 5266 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); 5267 struct f2fs_sm_info *sm_info; 5268 int err; 5269 5270 sm_info = f2fs_kzalloc(sbi, sizeof(struct f2fs_sm_info), GFP_KERNEL); 5271 if (!sm_info) 5272 return -ENOMEM; 5273 5274 /* init sm info */ 5275 sbi->sm_info = sm_info; 5276 sm_info->seg0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr); 5277 sm_info->main_blkaddr = le32_to_cpu(raw_super->main_blkaddr); 5278 sm_info->segment_count = le32_to_cpu(raw_super->segment_count); 5279 sm_info->reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count); 5280 sm_info->ovp_segments = le32_to_cpu(ckpt->overprov_segment_count); 5281 sm_info->main_segments = le32_to_cpu(raw_super->segment_count_main); 5282 sm_info->ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr); 5283 sm_info->rec_prefree_segments = sm_info->main_segments * 5284 DEF_RECLAIM_PREFREE_SEGMENTS / 100; 5285 if (sm_info->rec_prefree_segments > DEF_MAX_RECLAIM_PREFREE_SEGMENTS) 5286 sm_info->rec_prefree_segments = DEF_MAX_RECLAIM_PREFREE_SEGMENTS; 5287 5288 if (!f2fs_lfs_mode(sbi)) 5289 sm_info->ipu_policy = BIT(F2FS_IPU_FSYNC); 5290 sm_info->min_ipu_util = DEF_MIN_IPU_UTIL; 5291 sm_info->min_fsync_blocks = DEF_MIN_FSYNC_BLOCKS; 5292 sm_info->min_seq_blocks = BLKS_PER_SEG(sbi); 5293 sm_info->min_hot_blocks = DEF_MIN_HOT_BLOCKS; 5294 sm_info->min_ssr_sections = reserved_sections(sbi); 5295 5296 INIT_LIST_HEAD(&sm_info->sit_entry_set); 5297 5298 init_f2fs_rwsem(&sm_info->curseg_lock); 5299 5300 err = f2fs_create_flush_cmd_control(sbi); 5301 if (err) 5302 return err; 5303 5304 err = create_discard_cmd_control(sbi); 5305 if (err) 5306 return err; 5307 5308 err = build_sit_info(sbi); 5309 if (err) 5310 return err; 5311 err = build_free_segmap(sbi); 5312 if (err) 5313 return err; 5314 err = build_curseg(sbi); 5315 if (err) 5316 return err; 5317 5318 /* reinit free segmap based on SIT */ 5319 err = build_sit_entries(sbi); 5320 if (err) 5321 return err; 5322 5323 init_free_segmap(sbi); 5324 err = build_dirty_segmap(sbi); 5325 if (err) 5326 return err; 5327 5328 err = sanity_check_curseg(sbi); 5329 if (err) 5330 return err; 5331 5332 init_min_max_mtime(sbi); 5333 return 0; 5334 } 5335 5336 static void discard_dirty_segmap(struct f2fs_sb_info *sbi, 5337 enum dirty_type dirty_type) 5338 { 5339 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 5340 5341 mutex_lock(&dirty_i->seglist_lock); 5342 kvfree(dirty_i->dirty_segmap[dirty_type]); 5343 dirty_i->nr_dirty[dirty_type] = 0; 5344 mutex_unlock(&dirty_i->seglist_lock); 5345 } 5346 5347 static void destroy_victim_secmap(struct f2fs_sb_info *sbi) 5348 { 5349 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 5350 5351 kvfree(dirty_i->pinned_secmap); 5352 kvfree(dirty_i->victim_secmap); 5353 } 5354 5355 static void destroy_dirty_segmap(struct f2fs_sb_info *sbi) 5356 { 5357 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 5358 int i; 5359 5360 if (!dirty_i) 5361 return; 5362 5363 /* discard pre-free/dirty segments list */ 5364 for (i = 0; i < NR_DIRTY_TYPE; i++) 5365 discard_dirty_segmap(sbi, i); 5366 5367 if (__is_large_section(sbi)) { 5368 mutex_lock(&dirty_i->seglist_lock); 5369 kvfree(dirty_i->dirty_secmap); 5370 mutex_unlock(&dirty_i->seglist_lock); 5371 } 5372 5373 destroy_victim_secmap(sbi); 5374 SM_I(sbi)->dirty_info = NULL; 5375 kfree(dirty_i); 5376 } 5377 5378 static void destroy_curseg(struct f2fs_sb_info *sbi) 5379 { 5380 struct curseg_info *array = SM_I(sbi)->curseg_array; 5381 int i; 5382 5383 if (!array) 5384 return; 5385 SM_I(sbi)->curseg_array = NULL; 5386 for (i = 0; i < NR_CURSEG_TYPE; i++) { 5387 kfree(array[i].sum_blk); 5388 kfree(array[i].journal); 5389 } 5390 kfree(array); 5391 } 5392 5393 static void destroy_free_segmap(struct f2fs_sb_info *sbi) 5394 { 5395 struct free_segmap_info *free_i = SM_I(sbi)->free_info; 5396 5397 if (!free_i) 5398 return; 5399 SM_I(sbi)->free_info = NULL; 5400 kvfree(free_i->free_segmap); 5401 kvfree(free_i->free_secmap); 5402 kfree(free_i); 5403 } 5404 5405 static void destroy_sit_info(struct f2fs_sb_info *sbi) 5406 { 5407 struct sit_info *sit_i = SIT_I(sbi); 5408 5409 if (!sit_i) 5410 return; 5411 5412 if (sit_i->sentries) 5413 kvfree(sit_i->bitmap); 5414 kfree(sit_i->tmp_map); 5415 5416 kvfree(sit_i->sentries); 5417 kvfree(sit_i->sec_entries); 5418 kvfree(sit_i->dirty_sentries_bitmap); 5419 5420 SM_I(sbi)->sit_info = NULL; 5421 kvfree(sit_i->sit_bitmap); 5422 #ifdef CONFIG_F2FS_CHECK_FS 5423 kvfree(sit_i->sit_bitmap_mir); 5424 kvfree(sit_i->invalid_segmap); 5425 #endif 5426 kfree(sit_i); 5427 } 5428 5429 void f2fs_destroy_segment_manager(struct f2fs_sb_info *sbi) 5430 { 5431 struct f2fs_sm_info *sm_info = SM_I(sbi); 5432 5433 if (!sm_info) 5434 return; 5435 f2fs_destroy_flush_cmd_control(sbi, true); 5436 destroy_discard_cmd_control(sbi); 5437 destroy_dirty_segmap(sbi); 5438 destroy_curseg(sbi); 5439 destroy_free_segmap(sbi); 5440 destroy_sit_info(sbi); 5441 sbi->sm_info = NULL; 5442 kfree(sm_info); 5443 } 5444 5445 int __init f2fs_create_segment_manager_caches(void) 5446 { 5447 discard_entry_slab = f2fs_kmem_cache_create("f2fs_discard_entry", 5448 sizeof(struct discard_entry)); 5449 if (!discard_entry_slab) 5450 goto fail; 5451 5452 discard_cmd_slab = f2fs_kmem_cache_create("f2fs_discard_cmd", 5453 sizeof(struct discard_cmd)); 5454 if (!discard_cmd_slab) 5455 goto destroy_discard_entry; 5456 5457 sit_entry_set_slab = f2fs_kmem_cache_create("f2fs_sit_entry_set", 5458 sizeof(struct sit_entry_set)); 5459 if (!sit_entry_set_slab) 5460 goto destroy_discard_cmd; 5461 5462 revoke_entry_slab = f2fs_kmem_cache_create("f2fs_revoke_entry", 5463 sizeof(struct revoke_entry)); 5464 if (!revoke_entry_slab) 5465 goto destroy_sit_entry_set; 5466 return 0; 5467 5468 destroy_sit_entry_set: 5469 kmem_cache_destroy(sit_entry_set_slab); 5470 destroy_discard_cmd: 5471 kmem_cache_destroy(discard_cmd_slab); 5472 destroy_discard_entry: 5473 kmem_cache_destroy(discard_entry_slab); 5474 fail: 5475 return -ENOMEM; 5476 } 5477 5478 void f2fs_destroy_segment_manager_caches(void) 5479 { 5480 kmem_cache_destroy(sit_entry_set_slab); 5481 kmem_cache_destroy(discard_cmd_slab); 5482 kmem_cache_destroy(discard_entry_slab); 5483 kmem_cache_destroy(revoke_entry_slab); 5484 } 5485