1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * fs/f2fs/segment.c 4 * 5 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 6 * http://www.samsung.com/ 7 */ 8 #include <linux/fs.h> 9 #include <linux/f2fs_fs.h> 10 #include <linux/bio.h> 11 #include <linux/blkdev.h> 12 #include <linux/sched/mm.h> 13 #include <linux/prefetch.h> 14 #include <linux/kthread.h> 15 #include <linux/swap.h> 16 #include <linux/timer.h> 17 #include <linux/freezer.h> 18 #include <linux/sched/signal.h> 19 #include <linux/random.h> 20 21 #include "f2fs.h" 22 #include "segment.h" 23 #include "node.h" 24 #include "gc.h" 25 #include "iostat.h" 26 #include <trace/events/f2fs.h> 27 28 #define __reverse_ffz(x) __reverse_ffs(~(x)) 29 30 static struct kmem_cache *discard_entry_slab; 31 static struct kmem_cache *discard_cmd_slab; 32 static struct kmem_cache *sit_entry_set_slab; 33 static struct kmem_cache *revoke_entry_slab; 34 35 static unsigned long __reverse_ulong(unsigned char *str) 36 { 37 unsigned long tmp = 0; 38 int shift = 24, idx = 0; 39 40 #if BITS_PER_LONG == 64 41 shift = 56; 42 #endif 43 while (shift >= 0) { 44 tmp |= (unsigned long)str[idx++] << shift; 45 shift -= BITS_PER_BYTE; 46 } 47 return tmp; 48 } 49 50 /* 51 * __reverse_ffs is copied from include/asm-generic/bitops/__ffs.h since 52 * MSB and LSB are reversed in a byte by f2fs_set_bit. 53 */ 54 static inline unsigned long __reverse_ffs(unsigned long word) 55 { 56 int num = 0; 57 58 #if BITS_PER_LONG == 64 59 if ((word & 0xffffffff00000000UL) == 0) 60 num += 32; 61 else 62 word >>= 32; 63 #endif 64 if ((word & 0xffff0000) == 0) 65 num += 16; 66 else 67 word >>= 16; 68 69 if ((word & 0xff00) == 0) 70 num += 8; 71 else 72 word >>= 8; 73 74 if ((word & 0xf0) == 0) 75 num += 4; 76 else 77 word >>= 4; 78 79 if ((word & 0xc) == 0) 80 num += 2; 81 else 82 word >>= 2; 83 84 if ((word & 0x2) == 0) 85 num += 1; 86 return num; 87 } 88 89 /* 90 * __find_rev_next(_zero)_bit is copied from lib/find_next_bit.c because 91 * f2fs_set_bit makes MSB and LSB reversed in a byte. 92 * @size must be integral times of unsigned long. 93 * Example: 94 * MSB <--> LSB 95 * f2fs_set_bit(0, bitmap) => 1000 0000 96 * f2fs_set_bit(7, bitmap) => 0000 0001 97 */ 98 static unsigned long __find_rev_next_bit(const unsigned long *addr, 99 unsigned long size, unsigned long offset) 100 { 101 const unsigned long *p = addr + BIT_WORD(offset); 102 unsigned long result = size; 103 unsigned long tmp; 104 105 if (offset >= size) 106 return size; 107 108 size -= (offset & ~(BITS_PER_LONG - 1)); 109 offset %= BITS_PER_LONG; 110 111 while (1) { 112 if (*p == 0) 113 goto pass; 114 115 tmp = __reverse_ulong((unsigned char *)p); 116 117 tmp &= ~0UL >> offset; 118 if (size < BITS_PER_LONG) 119 tmp &= (~0UL << (BITS_PER_LONG - size)); 120 if (tmp) 121 goto found; 122 pass: 123 if (size <= BITS_PER_LONG) 124 break; 125 size -= BITS_PER_LONG; 126 offset = 0; 127 p++; 128 } 129 return result; 130 found: 131 return result - size + __reverse_ffs(tmp); 132 } 133 134 static unsigned long __find_rev_next_zero_bit(const unsigned long *addr, 135 unsigned long size, unsigned long offset) 136 { 137 const unsigned long *p = addr + BIT_WORD(offset); 138 unsigned long result = size; 139 unsigned long tmp; 140 141 if (offset >= size) 142 return size; 143 144 size -= (offset & ~(BITS_PER_LONG - 1)); 145 offset %= BITS_PER_LONG; 146 147 while (1) { 148 if (*p == ~0UL) 149 goto pass; 150 151 tmp = __reverse_ulong((unsigned char *)p); 152 153 if (offset) 154 tmp |= ~0UL << (BITS_PER_LONG - offset); 155 if (size < BITS_PER_LONG) 156 tmp |= ~0UL >> size; 157 if (tmp != ~0UL) 158 goto found; 159 pass: 160 if (size <= BITS_PER_LONG) 161 break; 162 size -= BITS_PER_LONG; 163 offset = 0; 164 p++; 165 } 166 return result; 167 found: 168 return result - size + __reverse_ffz(tmp); 169 } 170 171 bool f2fs_need_SSR(struct f2fs_sb_info *sbi) 172 { 173 int node_secs = get_blocktype_secs(sbi, F2FS_DIRTY_NODES); 174 int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS); 175 int imeta_secs = get_blocktype_secs(sbi, F2FS_DIRTY_IMETA); 176 177 if (f2fs_lfs_mode(sbi)) 178 return false; 179 if (sbi->gc_mode == GC_URGENT_HIGH) 180 return true; 181 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) 182 return true; 183 184 return free_sections(sbi) <= (node_secs + 2 * dent_secs + imeta_secs + 185 SM_I(sbi)->min_ssr_sections + reserved_sections(sbi)); 186 } 187 188 void f2fs_abort_atomic_write(struct inode *inode, bool clean) 189 { 190 struct f2fs_inode_info *fi = F2FS_I(inode); 191 192 if (!f2fs_is_atomic_file(inode)) 193 return; 194 195 if (clean) 196 truncate_inode_pages_final(inode->i_mapping); 197 198 release_atomic_write_cnt(inode); 199 clear_inode_flag(inode, FI_ATOMIC_COMMITTED); 200 clear_inode_flag(inode, FI_ATOMIC_REPLACE); 201 clear_inode_flag(inode, FI_ATOMIC_FILE); 202 if (is_inode_flag_set(inode, FI_ATOMIC_DIRTIED)) { 203 clear_inode_flag(inode, FI_ATOMIC_DIRTIED); 204 f2fs_mark_inode_dirty_sync(inode, true); 205 } 206 stat_dec_atomic_inode(inode); 207 208 F2FS_I(inode)->atomic_write_task = NULL; 209 210 if (clean) { 211 f2fs_i_size_write(inode, fi->original_i_size); 212 fi->original_i_size = 0; 213 } 214 /* avoid stale dirty inode during eviction */ 215 sync_inode_metadata(inode, 0); 216 } 217 218 static int __replace_atomic_write_block(struct inode *inode, pgoff_t index, 219 block_t new_addr, block_t *old_addr, bool recover) 220 { 221 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 222 struct dnode_of_data dn; 223 struct node_info ni; 224 int err; 225 226 retry: 227 set_new_dnode(&dn, inode, NULL, NULL, 0); 228 err = f2fs_get_dnode_of_data(&dn, index, ALLOC_NODE); 229 if (err) { 230 if (err == -ENOMEM) { 231 f2fs_io_schedule_timeout(DEFAULT_IO_TIMEOUT); 232 goto retry; 233 } 234 return err; 235 } 236 237 err = f2fs_get_node_info(sbi, dn.nid, &ni, false); 238 if (err) { 239 f2fs_put_dnode(&dn); 240 return err; 241 } 242 243 if (recover) { 244 /* dn.data_blkaddr is always valid */ 245 if (!__is_valid_data_blkaddr(new_addr)) { 246 if (new_addr == NULL_ADDR) 247 dec_valid_block_count(sbi, inode, 1); 248 f2fs_invalidate_blocks(sbi, dn.data_blkaddr); 249 f2fs_update_data_blkaddr(&dn, new_addr); 250 } else { 251 f2fs_replace_block(sbi, &dn, dn.data_blkaddr, 252 new_addr, ni.version, true, true); 253 } 254 } else { 255 blkcnt_t count = 1; 256 257 err = inc_valid_block_count(sbi, inode, &count, true); 258 if (err) { 259 f2fs_put_dnode(&dn); 260 return err; 261 } 262 263 *old_addr = dn.data_blkaddr; 264 f2fs_truncate_data_blocks_range(&dn, 1); 265 dec_valid_block_count(sbi, F2FS_I(inode)->cow_inode, count); 266 267 f2fs_replace_block(sbi, &dn, dn.data_blkaddr, new_addr, 268 ni.version, true, false); 269 } 270 271 f2fs_put_dnode(&dn); 272 273 trace_f2fs_replace_atomic_write_block(inode, F2FS_I(inode)->cow_inode, 274 index, old_addr ? *old_addr : 0, new_addr, recover); 275 return 0; 276 } 277 278 static void __complete_revoke_list(struct inode *inode, struct list_head *head, 279 bool revoke) 280 { 281 struct revoke_entry *cur, *tmp; 282 pgoff_t start_index = 0; 283 bool truncate = is_inode_flag_set(inode, FI_ATOMIC_REPLACE); 284 285 list_for_each_entry_safe(cur, tmp, head, list) { 286 if (revoke) { 287 __replace_atomic_write_block(inode, cur->index, 288 cur->old_addr, NULL, true); 289 } else if (truncate) { 290 f2fs_truncate_hole(inode, start_index, cur->index); 291 start_index = cur->index + 1; 292 } 293 294 list_del(&cur->list); 295 kmem_cache_free(revoke_entry_slab, cur); 296 } 297 298 if (!revoke && truncate) 299 f2fs_do_truncate_blocks(inode, start_index * PAGE_SIZE, false); 300 } 301 302 static int __f2fs_commit_atomic_write(struct inode *inode) 303 { 304 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 305 struct f2fs_inode_info *fi = F2FS_I(inode); 306 struct inode *cow_inode = fi->cow_inode; 307 struct revoke_entry *new; 308 struct list_head revoke_list; 309 block_t blkaddr; 310 struct dnode_of_data dn; 311 pgoff_t len = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); 312 pgoff_t off = 0, blen, index; 313 int ret = 0, i; 314 315 INIT_LIST_HEAD(&revoke_list); 316 317 while (len) { 318 blen = min_t(pgoff_t, ADDRS_PER_BLOCK(cow_inode), len); 319 320 set_new_dnode(&dn, cow_inode, NULL, NULL, 0); 321 ret = f2fs_get_dnode_of_data(&dn, off, LOOKUP_NODE_RA); 322 if (ret && ret != -ENOENT) { 323 goto out; 324 } else if (ret == -ENOENT) { 325 ret = 0; 326 if (dn.max_level == 0) 327 goto out; 328 goto next; 329 } 330 331 blen = min((pgoff_t)ADDRS_PER_PAGE(dn.node_page, cow_inode), 332 len); 333 index = off; 334 for (i = 0; i < blen; i++, dn.ofs_in_node++, index++) { 335 blkaddr = f2fs_data_blkaddr(&dn); 336 337 if (!__is_valid_data_blkaddr(blkaddr)) { 338 continue; 339 } else if (!f2fs_is_valid_blkaddr(sbi, blkaddr, 340 DATA_GENERIC_ENHANCE)) { 341 f2fs_put_dnode(&dn); 342 ret = -EFSCORRUPTED; 343 f2fs_handle_error(sbi, 344 ERROR_INVALID_BLKADDR); 345 goto out; 346 } 347 348 new = f2fs_kmem_cache_alloc(revoke_entry_slab, GFP_NOFS, 349 true, NULL); 350 351 ret = __replace_atomic_write_block(inode, index, blkaddr, 352 &new->old_addr, false); 353 if (ret) { 354 f2fs_put_dnode(&dn); 355 kmem_cache_free(revoke_entry_slab, new); 356 goto out; 357 } 358 359 f2fs_update_data_blkaddr(&dn, NULL_ADDR); 360 new->index = index; 361 list_add_tail(&new->list, &revoke_list); 362 } 363 f2fs_put_dnode(&dn); 364 next: 365 off += blen; 366 len -= blen; 367 } 368 369 out: 370 if (ret) { 371 sbi->revoked_atomic_block += fi->atomic_write_cnt; 372 } else { 373 sbi->committed_atomic_block += fi->atomic_write_cnt; 374 set_inode_flag(inode, FI_ATOMIC_COMMITTED); 375 if (is_inode_flag_set(inode, FI_ATOMIC_DIRTIED)) { 376 clear_inode_flag(inode, FI_ATOMIC_DIRTIED); 377 f2fs_mark_inode_dirty_sync(inode, true); 378 } 379 } 380 381 __complete_revoke_list(inode, &revoke_list, ret ? true : false); 382 383 return ret; 384 } 385 386 int f2fs_commit_atomic_write(struct inode *inode) 387 { 388 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 389 struct f2fs_inode_info *fi = F2FS_I(inode); 390 int err; 391 392 err = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX); 393 if (err) 394 return err; 395 396 f2fs_down_write(&fi->i_gc_rwsem[WRITE]); 397 f2fs_lock_op(sbi); 398 399 err = __f2fs_commit_atomic_write(inode); 400 401 f2fs_unlock_op(sbi); 402 f2fs_up_write(&fi->i_gc_rwsem[WRITE]); 403 404 return err; 405 } 406 407 /* 408 * This function balances dirty node and dentry pages. 409 * In addition, it controls garbage collection. 410 */ 411 void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need) 412 { 413 if (time_to_inject(sbi, FAULT_CHECKPOINT)) 414 f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_FAULT_INJECT); 415 416 /* balance_fs_bg is able to be pending */ 417 if (need && excess_cached_nats(sbi)) 418 f2fs_balance_fs_bg(sbi, false); 419 420 if (!f2fs_is_checkpoint_ready(sbi)) 421 return; 422 423 /* 424 * We should do GC or end up with checkpoint, if there are so many dirty 425 * dir/node pages without enough free segments. 426 */ 427 if (has_enough_free_secs(sbi, 0, 0)) 428 return; 429 430 if (test_opt(sbi, GC_MERGE) && sbi->gc_thread && 431 sbi->gc_thread->f2fs_gc_task) { 432 DEFINE_WAIT(wait); 433 434 prepare_to_wait(&sbi->gc_thread->fggc_wq, &wait, 435 TASK_UNINTERRUPTIBLE); 436 wake_up(&sbi->gc_thread->gc_wait_queue_head); 437 io_schedule(); 438 finish_wait(&sbi->gc_thread->fggc_wq, &wait); 439 } else { 440 struct f2fs_gc_control gc_control = { 441 .victim_segno = NULL_SEGNO, 442 .init_gc_type = BG_GC, 443 .no_bg_gc = true, 444 .should_migrate_blocks = false, 445 .err_gc_skipped = false, 446 .nr_free_secs = 1 }; 447 f2fs_down_write(&sbi->gc_lock); 448 stat_inc_gc_call_count(sbi, FOREGROUND); 449 f2fs_gc(sbi, &gc_control); 450 } 451 } 452 453 static inline bool excess_dirty_threshold(struct f2fs_sb_info *sbi) 454 { 455 int factor = f2fs_rwsem_is_locked(&sbi->cp_rwsem) ? 3 : 2; 456 unsigned int dents = get_pages(sbi, F2FS_DIRTY_DENTS); 457 unsigned int qdata = get_pages(sbi, F2FS_DIRTY_QDATA); 458 unsigned int nodes = get_pages(sbi, F2FS_DIRTY_NODES); 459 unsigned int meta = get_pages(sbi, F2FS_DIRTY_META); 460 unsigned int imeta = get_pages(sbi, F2FS_DIRTY_IMETA); 461 unsigned int threshold = (factor * DEFAULT_DIRTY_THRESHOLD) << 462 sbi->log_blocks_per_seg; 463 unsigned int global_threshold = threshold * 3 / 2; 464 465 if (dents >= threshold || qdata >= threshold || 466 nodes >= threshold || meta >= threshold || 467 imeta >= threshold) 468 return true; 469 return dents + qdata + nodes + meta + imeta > global_threshold; 470 } 471 472 void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi, bool from_bg) 473 { 474 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) 475 return; 476 477 /* try to shrink extent cache when there is no enough memory */ 478 if (!f2fs_available_free_memory(sbi, READ_EXTENT_CACHE)) 479 f2fs_shrink_read_extent_tree(sbi, 480 READ_EXTENT_CACHE_SHRINK_NUMBER); 481 482 /* try to shrink age extent cache when there is no enough memory */ 483 if (!f2fs_available_free_memory(sbi, AGE_EXTENT_CACHE)) 484 f2fs_shrink_age_extent_tree(sbi, 485 AGE_EXTENT_CACHE_SHRINK_NUMBER); 486 487 /* check the # of cached NAT entries */ 488 if (!f2fs_available_free_memory(sbi, NAT_ENTRIES)) 489 f2fs_try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK); 490 491 if (!f2fs_available_free_memory(sbi, FREE_NIDS)) 492 f2fs_try_to_free_nids(sbi, MAX_FREE_NIDS); 493 else 494 f2fs_build_free_nids(sbi, false, false); 495 496 if (excess_dirty_nats(sbi) || excess_dirty_threshold(sbi) || 497 excess_prefree_segs(sbi) || !f2fs_space_for_roll_forward(sbi)) 498 goto do_sync; 499 500 /* there is background inflight IO or foreground operation recently */ 501 if (is_inflight_io(sbi, REQ_TIME) || 502 (!f2fs_time_over(sbi, REQ_TIME) && f2fs_rwsem_is_locked(&sbi->cp_rwsem))) 503 return; 504 505 /* exceed periodical checkpoint timeout threshold */ 506 if (f2fs_time_over(sbi, CP_TIME)) 507 goto do_sync; 508 509 /* checkpoint is the only way to shrink partial cached entries */ 510 if (f2fs_available_free_memory(sbi, NAT_ENTRIES) && 511 f2fs_available_free_memory(sbi, INO_ENTRIES)) 512 return; 513 514 do_sync: 515 if (test_opt(sbi, DATA_FLUSH) && from_bg) { 516 struct blk_plug plug; 517 518 mutex_lock(&sbi->flush_lock); 519 520 blk_start_plug(&plug); 521 f2fs_sync_dirty_inodes(sbi, FILE_INODE, false); 522 blk_finish_plug(&plug); 523 524 mutex_unlock(&sbi->flush_lock); 525 } 526 stat_inc_cp_call_count(sbi, BACKGROUND); 527 f2fs_sync_fs(sbi->sb, 1); 528 } 529 530 static int __submit_flush_wait(struct f2fs_sb_info *sbi, 531 struct block_device *bdev) 532 { 533 int ret = blkdev_issue_flush(bdev); 534 535 trace_f2fs_issue_flush(bdev, test_opt(sbi, NOBARRIER), 536 test_opt(sbi, FLUSH_MERGE), ret); 537 if (!ret) 538 f2fs_update_iostat(sbi, NULL, FS_FLUSH_IO, 0); 539 return ret; 540 } 541 542 static int submit_flush_wait(struct f2fs_sb_info *sbi, nid_t ino) 543 { 544 int ret = 0; 545 int i; 546 547 if (!f2fs_is_multi_device(sbi)) 548 return __submit_flush_wait(sbi, sbi->sb->s_bdev); 549 550 for (i = 0; i < sbi->s_ndevs; i++) { 551 if (!f2fs_is_dirty_device(sbi, ino, i, FLUSH_INO)) 552 continue; 553 ret = __submit_flush_wait(sbi, FDEV(i).bdev); 554 if (ret) 555 break; 556 } 557 return ret; 558 } 559 560 static int issue_flush_thread(void *data) 561 { 562 struct f2fs_sb_info *sbi = data; 563 struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info; 564 wait_queue_head_t *q = &fcc->flush_wait_queue; 565 repeat: 566 if (kthread_should_stop()) 567 return 0; 568 569 if (!llist_empty(&fcc->issue_list)) { 570 struct flush_cmd *cmd, *next; 571 int ret; 572 573 fcc->dispatch_list = llist_del_all(&fcc->issue_list); 574 fcc->dispatch_list = llist_reverse_order(fcc->dispatch_list); 575 576 cmd = llist_entry(fcc->dispatch_list, struct flush_cmd, llnode); 577 578 ret = submit_flush_wait(sbi, cmd->ino); 579 atomic_inc(&fcc->issued_flush); 580 581 llist_for_each_entry_safe(cmd, next, 582 fcc->dispatch_list, llnode) { 583 cmd->ret = ret; 584 complete(&cmd->wait); 585 } 586 fcc->dispatch_list = NULL; 587 } 588 589 wait_event_interruptible(*q, 590 kthread_should_stop() || !llist_empty(&fcc->issue_list)); 591 goto repeat; 592 } 593 594 int f2fs_issue_flush(struct f2fs_sb_info *sbi, nid_t ino) 595 { 596 struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info; 597 struct flush_cmd cmd; 598 int ret; 599 600 if (test_opt(sbi, NOBARRIER)) 601 return 0; 602 603 if (!test_opt(sbi, FLUSH_MERGE)) { 604 atomic_inc(&fcc->queued_flush); 605 ret = submit_flush_wait(sbi, ino); 606 atomic_dec(&fcc->queued_flush); 607 atomic_inc(&fcc->issued_flush); 608 return ret; 609 } 610 611 if (atomic_inc_return(&fcc->queued_flush) == 1 || 612 f2fs_is_multi_device(sbi)) { 613 ret = submit_flush_wait(sbi, ino); 614 atomic_dec(&fcc->queued_flush); 615 616 atomic_inc(&fcc->issued_flush); 617 return ret; 618 } 619 620 cmd.ino = ino; 621 init_completion(&cmd.wait); 622 623 llist_add(&cmd.llnode, &fcc->issue_list); 624 625 /* 626 * update issue_list before we wake up issue_flush thread, this 627 * smp_mb() pairs with another barrier in ___wait_event(), see 628 * more details in comments of waitqueue_active(). 629 */ 630 smp_mb(); 631 632 if (waitqueue_active(&fcc->flush_wait_queue)) 633 wake_up(&fcc->flush_wait_queue); 634 635 if (fcc->f2fs_issue_flush) { 636 wait_for_completion(&cmd.wait); 637 atomic_dec(&fcc->queued_flush); 638 } else { 639 struct llist_node *list; 640 641 list = llist_del_all(&fcc->issue_list); 642 if (!list) { 643 wait_for_completion(&cmd.wait); 644 atomic_dec(&fcc->queued_flush); 645 } else { 646 struct flush_cmd *tmp, *next; 647 648 ret = submit_flush_wait(sbi, ino); 649 650 llist_for_each_entry_safe(tmp, next, list, llnode) { 651 if (tmp == &cmd) { 652 cmd.ret = ret; 653 atomic_dec(&fcc->queued_flush); 654 continue; 655 } 656 tmp->ret = ret; 657 complete(&tmp->wait); 658 } 659 } 660 } 661 662 return cmd.ret; 663 } 664 665 int f2fs_create_flush_cmd_control(struct f2fs_sb_info *sbi) 666 { 667 dev_t dev = sbi->sb->s_bdev->bd_dev; 668 struct flush_cmd_control *fcc; 669 670 if (SM_I(sbi)->fcc_info) { 671 fcc = SM_I(sbi)->fcc_info; 672 if (fcc->f2fs_issue_flush) 673 return 0; 674 goto init_thread; 675 } 676 677 fcc = f2fs_kzalloc(sbi, sizeof(struct flush_cmd_control), GFP_KERNEL); 678 if (!fcc) 679 return -ENOMEM; 680 atomic_set(&fcc->issued_flush, 0); 681 atomic_set(&fcc->queued_flush, 0); 682 init_waitqueue_head(&fcc->flush_wait_queue); 683 init_llist_head(&fcc->issue_list); 684 SM_I(sbi)->fcc_info = fcc; 685 if (!test_opt(sbi, FLUSH_MERGE)) 686 return 0; 687 688 init_thread: 689 fcc->f2fs_issue_flush = kthread_run(issue_flush_thread, sbi, 690 "f2fs_flush-%u:%u", MAJOR(dev), MINOR(dev)); 691 if (IS_ERR(fcc->f2fs_issue_flush)) { 692 int err = PTR_ERR(fcc->f2fs_issue_flush); 693 694 fcc->f2fs_issue_flush = NULL; 695 return err; 696 } 697 698 return 0; 699 } 700 701 void f2fs_destroy_flush_cmd_control(struct f2fs_sb_info *sbi, bool free) 702 { 703 struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info; 704 705 if (fcc && fcc->f2fs_issue_flush) { 706 struct task_struct *flush_thread = fcc->f2fs_issue_flush; 707 708 fcc->f2fs_issue_flush = NULL; 709 kthread_stop(flush_thread); 710 } 711 if (free) { 712 kfree(fcc); 713 SM_I(sbi)->fcc_info = NULL; 714 } 715 } 716 717 int f2fs_flush_device_cache(struct f2fs_sb_info *sbi) 718 { 719 int ret = 0, i; 720 721 if (!f2fs_is_multi_device(sbi)) 722 return 0; 723 724 if (test_opt(sbi, NOBARRIER)) 725 return 0; 726 727 for (i = 1; i < sbi->s_ndevs; i++) { 728 int count = DEFAULT_RETRY_IO_COUNT; 729 730 if (!f2fs_test_bit(i, (char *)&sbi->dirty_device)) 731 continue; 732 733 do { 734 ret = __submit_flush_wait(sbi, FDEV(i).bdev); 735 if (ret) 736 f2fs_io_schedule_timeout(DEFAULT_IO_TIMEOUT); 737 } while (ret && --count); 738 739 if (ret) { 740 f2fs_stop_checkpoint(sbi, false, 741 STOP_CP_REASON_FLUSH_FAIL); 742 break; 743 } 744 745 spin_lock(&sbi->dev_lock); 746 f2fs_clear_bit(i, (char *)&sbi->dirty_device); 747 spin_unlock(&sbi->dev_lock); 748 } 749 750 return ret; 751 } 752 753 static void __locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno, 754 enum dirty_type dirty_type) 755 { 756 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 757 758 /* need not be added */ 759 if (IS_CURSEG(sbi, segno)) 760 return; 761 762 if (!test_and_set_bit(segno, dirty_i->dirty_segmap[dirty_type])) 763 dirty_i->nr_dirty[dirty_type]++; 764 765 if (dirty_type == DIRTY) { 766 struct seg_entry *sentry = get_seg_entry(sbi, segno); 767 enum dirty_type t = sentry->type; 768 769 if (unlikely(t >= DIRTY)) { 770 f2fs_bug_on(sbi, 1); 771 return; 772 } 773 if (!test_and_set_bit(segno, dirty_i->dirty_segmap[t])) 774 dirty_i->nr_dirty[t]++; 775 776 if (__is_large_section(sbi)) { 777 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno); 778 block_t valid_blocks = 779 get_valid_blocks(sbi, segno, true); 780 781 f2fs_bug_on(sbi, unlikely(!valid_blocks || 782 valid_blocks == CAP_BLKS_PER_SEC(sbi))); 783 784 if (!IS_CURSEC(sbi, secno)) 785 set_bit(secno, dirty_i->dirty_secmap); 786 } 787 } 788 } 789 790 static void __remove_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno, 791 enum dirty_type dirty_type) 792 { 793 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 794 block_t valid_blocks; 795 796 if (test_and_clear_bit(segno, dirty_i->dirty_segmap[dirty_type])) 797 dirty_i->nr_dirty[dirty_type]--; 798 799 if (dirty_type == DIRTY) { 800 struct seg_entry *sentry = get_seg_entry(sbi, segno); 801 enum dirty_type t = sentry->type; 802 803 if (test_and_clear_bit(segno, dirty_i->dirty_segmap[t])) 804 dirty_i->nr_dirty[t]--; 805 806 valid_blocks = get_valid_blocks(sbi, segno, true); 807 if (valid_blocks == 0) { 808 clear_bit(GET_SEC_FROM_SEG(sbi, segno), 809 dirty_i->victim_secmap); 810 #ifdef CONFIG_F2FS_CHECK_FS 811 clear_bit(segno, SIT_I(sbi)->invalid_segmap); 812 #endif 813 } 814 if (__is_large_section(sbi)) { 815 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno); 816 817 if (!valid_blocks || 818 valid_blocks == CAP_BLKS_PER_SEC(sbi)) { 819 clear_bit(secno, dirty_i->dirty_secmap); 820 return; 821 } 822 823 if (!IS_CURSEC(sbi, secno)) 824 set_bit(secno, dirty_i->dirty_secmap); 825 } 826 } 827 } 828 829 /* 830 * Should not occur error such as -ENOMEM. 831 * Adding dirty entry into seglist is not critical operation. 832 * If a given segment is one of current working segments, it won't be added. 833 */ 834 static void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno) 835 { 836 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 837 unsigned short valid_blocks, ckpt_valid_blocks; 838 unsigned int usable_blocks; 839 840 if (segno == NULL_SEGNO || IS_CURSEG(sbi, segno)) 841 return; 842 843 usable_blocks = f2fs_usable_blks_in_seg(sbi, segno); 844 mutex_lock(&dirty_i->seglist_lock); 845 846 valid_blocks = get_valid_blocks(sbi, segno, false); 847 ckpt_valid_blocks = get_ckpt_valid_blocks(sbi, segno, false); 848 849 if (valid_blocks == 0 && (!is_sbi_flag_set(sbi, SBI_CP_DISABLED) || 850 ckpt_valid_blocks == usable_blocks)) { 851 __locate_dirty_segment(sbi, segno, PRE); 852 __remove_dirty_segment(sbi, segno, DIRTY); 853 } else if (valid_blocks < usable_blocks) { 854 __locate_dirty_segment(sbi, segno, DIRTY); 855 } else { 856 /* Recovery routine with SSR needs this */ 857 __remove_dirty_segment(sbi, segno, DIRTY); 858 } 859 860 mutex_unlock(&dirty_i->seglist_lock); 861 } 862 863 /* This moves currently empty dirty blocks to prefree. Must hold seglist_lock */ 864 void f2fs_dirty_to_prefree(struct f2fs_sb_info *sbi) 865 { 866 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 867 unsigned int segno; 868 869 mutex_lock(&dirty_i->seglist_lock); 870 for_each_set_bit(segno, dirty_i->dirty_segmap[DIRTY], MAIN_SEGS(sbi)) { 871 if (get_valid_blocks(sbi, segno, false)) 872 continue; 873 if (IS_CURSEG(sbi, segno)) 874 continue; 875 __locate_dirty_segment(sbi, segno, PRE); 876 __remove_dirty_segment(sbi, segno, DIRTY); 877 } 878 mutex_unlock(&dirty_i->seglist_lock); 879 } 880 881 block_t f2fs_get_unusable_blocks(struct f2fs_sb_info *sbi) 882 { 883 int ovp_hole_segs = 884 (overprovision_segments(sbi) - reserved_segments(sbi)); 885 block_t ovp_holes = ovp_hole_segs << sbi->log_blocks_per_seg; 886 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 887 block_t holes[2] = {0, 0}; /* DATA and NODE */ 888 block_t unusable; 889 struct seg_entry *se; 890 unsigned int segno; 891 892 mutex_lock(&dirty_i->seglist_lock); 893 for_each_set_bit(segno, dirty_i->dirty_segmap[DIRTY], MAIN_SEGS(sbi)) { 894 se = get_seg_entry(sbi, segno); 895 if (IS_NODESEG(se->type)) 896 holes[NODE] += f2fs_usable_blks_in_seg(sbi, segno) - 897 se->valid_blocks; 898 else 899 holes[DATA] += f2fs_usable_blks_in_seg(sbi, segno) - 900 se->valid_blocks; 901 } 902 mutex_unlock(&dirty_i->seglist_lock); 903 904 unusable = max(holes[DATA], holes[NODE]); 905 if (unusable > ovp_holes) 906 return unusable - ovp_holes; 907 return 0; 908 } 909 910 int f2fs_disable_cp_again(struct f2fs_sb_info *sbi, block_t unusable) 911 { 912 int ovp_hole_segs = 913 (overprovision_segments(sbi) - reserved_segments(sbi)); 914 if (unusable > F2FS_OPTION(sbi).unusable_cap) 915 return -EAGAIN; 916 if (is_sbi_flag_set(sbi, SBI_CP_DISABLED_QUICK) && 917 dirty_segments(sbi) > ovp_hole_segs) 918 return -EAGAIN; 919 return 0; 920 } 921 922 /* This is only used by SBI_CP_DISABLED */ 923 static unsigned int get_free_segment(struct f2fs_sb_info *sbi) 924 { 925 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 926 unsigned int segno = 0; 927 928 mutex_lock(&dirty_i->seglist_lock); 929 for_each_set_bit(segno, dirty_i->dirty_segmap[DIRTY], MAIN_SEGS(sbi)) { 930 if (get_valid_blocks(sbi, segno, false)) 931 continue; 932 if (get_ckpt_valid_blocks(sbi, segno, false)) 933 continue; 934 mutex_unlock(&dirty_i->seglist_lock); 935 return segno; 936 } 937 mutex_unlock(&dirty_i->seglist_lock); 938 return NULL_SEGNO; 939 } 940 941 static struct discard_cmd *__create_discard_cmd(struct f2fs_sb_info *sbi, 942 struct block_device *bdev, block_t lstart, 943 block_t start, block_t len) 944 { 945 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; 946 struct list_head *pend_list; 947 struct discard_cmd *dc; 948 949 f2fs_bug_on(sbi, !len); 950 951 pend_list = &dcc->pend_list[plist_idx(len)]; 952 953 dc = f2fs_kmem_cache_alloc(discard_cmd_slab, GFP_NOFS, true, NULL); 954 INIT_LIST_HEAD(&dc->list); 955 dc->bdev = bdev; 956 dc->di.lstart = lstart; 957 dc->di.start = start; 958 dc->di.len = len; 959 dc->ref = 0; 960 dc->state = D_PREP; 961 dc->queued = 0; 962 dc->error = 0; 963 init_completion(&dc->wait); 964 list_add_tail(&dc->list, pend_list); 965 spin_lock_init(&dc->lock); 966 dc->bio_ref = 0; 967 atomic_inc(&dcc->discard_cmd_cnt); 968 dcc->undiscard_blks += len; 969 970 return dc; 971 } 972 973 static bool f2fs_check_discard_tree(struct f2fs_sb_info *sbi) 974 { 975 #ifdef CONFIG_F2FS_CHECK_FS 976 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; 977 struct rb_node *cur = rb_first_cached(&dcc->root), *next; 978 struct discard_cmd *cur_dc, *next_dc; 979 980 while (cur) { 981 next = rb_next(cur); 982 if (!next) 983 return true; 984 985 cur_dc = rb_entry(cur, struct discard_cmd, rb_node); 986 next_dc = rb_entry(next, struct discard_cmd, rb_node); 987 988 if (cur_dc->di.lstart + cur_dc->di.len > next_dc->di.lstart) { 989 f2fs_info(sbi, "broken discard_rbtree, " 990 "cur(%u, %u) next(%u, %u)", 991 cur_dc->di.lstart, cur_dc->di.len, 992 next_dc->di.lstart, next_dc->di.len); 993 return false; 994 } 995 cur = next; 996 } 997 #endif 998 return true; 999 } 1000 1001 static struct discard_cmd *__lookup_discard_cmd(struct f2fs_sb_info *sbi, 1002 block_t blkaddr) 1003 { 1004 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; 1005 struct rb_node *node = dcc->root.rb_root.rb_node; 1006 struct discard_cmd *dc; 1007 1008 while (node) { 1009 dc = rb_entry(node, struct discard_cmd, rb_node); 1010 1011 if (blkaddr < dc->di.lstart) 1012 node = node->rb_left; 1013 else if (blkaddr >= dc->di.lstart + dc->di.len) 1014 node = node->rb_right; 1015 else 1016 return dc; 1017 } 1018 return NULL; 1019 } 1020 1021 static struct discard_cmd *__lookup_discard_cmd_ret(struct rb_root_cached *root, 1022 block_t blkaddr, 1023 struct discard_cmd **prev_entry, 1024 struct discard_cmd **next_entry, 1025 struct rb_node ***insert_p, 1026 struct rb_node **insert_parent) 1027 { 1028 struct rb_node **pnode = &root->rb_root.rb_node; 1029 struct rb_node *parent = NULL, *tmp_node; 1030 struct discard_cmd *dc; 1031 1032 *insert_p = NULL; 1033 *insert_parent = NULL; 1034 *prev_entry = NULL; 1035 *next_entry = NULL; 1036 1037 if (RB_EMPTY_ROOT(&root->rb_root)) 1038 return NULL; 1039 1040 while (*pnode) { 1041 parent = *pnode; 1042 dc = rb_entry(*pnode, struct discard_cmd, rb_node); 1043 1044 if (blkaddr < dc->di.lstart) 1045 pnode = &(*pnode)->rb_left; 1046 else if (blkaddr >= dc->di.lstart + dc->di.len) 1047 pnode = &(*pnode)->rb_right; 1048 else 1049 goto lookup_neighbors; 1050 } 1051 1052 *insert_p = pnode; 1053 *insert_parent = parent; 1054 1055 dc = rb_entry(parent, struct discard_cmd, rb_node); 1056 tmp_node = parent; 1057 if (parent && blkaddr > dc->di.lstart) 1058 tmp_node = rb_next(parent); 1059 *next_entry = rb_entry_safe(tmp_node, struct discard_cmd, rb_node); 1060 1061 tmp_node = parent; 1062 if (parent && blkaddr < dc->di.lstart) 1063 tmp_node = rb_prev(parent); 1064 *prev_entry = rb_entry_safe(tmp_node, struct discard_cmd, rb_node); 1065 return NULL; 1066 1067 lookup_neighbors: 1068 /* lookup prev node for merging backward later */ 1069 tmp_node = rb_prev(&dc->rb_node); 1070 *prev_entry = rb_entry_safe(tmp_node, struct discard_cmd, rb_node); 1071 1072 /* lookup next node for merging frontward later */ 1073 tmp_node = rb_next(&dc->rb_node); 1074 *next_entry = rb_entry_safe(tmp_node, struct discard_cmd, rb_node); 1075 return dc; 1076 } 1077 1078 static void __detach_discard_cmd(struct discard_cmd_control *dcc, 1079 struct discard_cmd *dc) 1080 { 1081 if (dc->state == D_DONE) 1082 atomic_sub(dc->queued, &dcc->queued_discard); 1083 1084 list_del(&dc->list); 1085 rb_erase_cached(&dc->rb_node, &dcc->root); 1086 dcc->undiscard_blks -= dc->di.len; 1087 1088 kmem_cache_free(discard_cmd_slab, dc); 1089 1090 atomic_dec(&dcc->discard_cmd_cnt); 1091 } 1092 1093 static void __remove_discard_cmd(struct f2fs_sb_info *sbi, 1094 struct discard_cmd *dc) 1095 { 1096 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; 1097 unsigned long flags; 1098 1099 trace_f2fs_remove_discard(dc->bdev, dc->di.start, dc->di.len); 1100 1101 spin_lock_irqsave(&dc->lock, flags); 1102 if (dc->bio_ref) { 1103 spin_unlock_irqrestore(&dc->lock, flags); 1104 return; 1105 } 1106 spin_unlock_irqrestore(&dc->lock, flags); 1107 1108 f2fs_bug_on(sbi, dc->ref); 1109 1110 if (dc->error == -EOPNOTSUPP) 1111 dc->error = 0; 1112 1113 if (dc->error) 1114 f2fs_info_ratelimited(sbi, 1115 "Issue discard(%u, %u, %u) failed, ret: %d", 1116 dc->di.lstart, dc->di.start, dc->di.len, dc->error); 1117 __detach_discard_cmd(dcc, dc); 1118 } 1119 1120 static void f2fs_submit_discard_endio(struct bio *bio) 1121 { 1122 struct discard_cmd *dc = (struct discard_cmd *)bio->bi_private; 1123 unsigned long flags; 1124 1125 spin_lock_irqsave(&dc->lock, flags); 1126 if (!dc->error) 1127 dc->error = blk_status_to_errno(bio->bi_status); 1128 dc->bio_ref--; 1129 if (!dc->bio_ref && dc->state == D_SUBMIT) { 1130 dc->state = D_DONE; 1131 complete_all(&dc->wait); 1132 } 1133 spin_unlock_irqrestore(&dc->lock, flags); 1134 bio_put(bio); 1135 } 1136 1137 static void __check_sit_bitmap(struct f2fs_sb_info *sbi, 1138 block_t start, block_t end) 1139 { 1140 #ifdef CONFIG_F2FS_CHECK_FS 1141 struct seg_entry *sentry; 1142 unsigned int segno; 1143 block_t blk = start; 1144 unsigned long offset, size, *map; 1145 1146 while (blk < end) { 1147 segno = GET_SEGNO(sbi, blk); 1148 sentry = get_seg_entry(sbi, segno); 1149 offset = GET_BLKOFF_FROM_SEG0(sbi, blk); 1150 1151 if (end < START_BLOCK(sbi, segno + 1)) 1152 size = GET_BLKOFF_FROM_SEG0(sbi, end); 1153 else 1154 size = BLKS_PER_SEG(sbi); 1155 map = (unsigned long *)(sentry->cur_valid_map); 1156 offset = __find_rev_next_bit(map, size, offset); 1157 f2fs_bug_on(sbi, offset != size); 1158 blk = START_BLOCK(sbi, segno + 1); 1159 } 1160 #endif 1161 } 1162 1163 static void __init_discard_policy(struct f2fs_sb_info *sbi, 1164 struct discard_policy *dpolicy, 1165 int discard_type, unsigned int granularity) 1166 { 1167 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; 1168 1169 /* common policy */ 1170 dpolicy->type = discard_type; 1171 dpolicy->sync = true; 1172 dpolicy->ordered = false; 1173 dpolicy->granularity = granularity; 1174 1175 dpolicy->max_requests = dcc->max_discard_request; 1176 dpolicy->io_aware_gran = dcc->discard_io_aware_gran; 1177 dpolicy->timeout = false; 1178 1179 if (discard_type == DPOLICY_BG) { 1180 dpolicy->min_interval = dcc->min_discard_issue_time; 1181 dpolicy->mid_interval = dcc->mid_discard_issue_time; 1182 dpolicy->max_interval = dcc->max_discard_issue_time; 1183 dpolicy->io_aware = true; 1184 dpolicy->sync = false; 1185 dpolicy->ordered = true; 1186 if (utilization(sbi) > dcc->discard_urgent_util) { 1187 dpolicy->granularity = MIN_DISCARD_GRANULARITY; 1188 if (atomic_read(&dcc->discard_cmd_cnt)) 1189 dpolicy->max_interval = 1190 dcc->min_discard_issue_time; 1191 } 1192 } else if (discard_type == DPOLICY_FORCE) { 1193 dpolicy->min_interval = dcc->min_discard_issue_time; 1194 dpolicy->mid_interval = dcc->mid_discard_issue_time; 1195 dpolicy->max_interval = dcc->max_discard_issue_time; 1196 dpolicy->io_aware = false; 1197 } else if (discard_type == DPOLICY_FSTRIM) { 1198 dpolicy->io_aware = false; 1199 } else if (discard_type == DPOLICY_UMOUNT) { 1200 dpolicy->io_aware = false; 1201 /* we need to issue all to keep CP_TRIMMED_FLAG */ 1202 dpolicy->granularity = MIN_DISCARD_GRANULARITY; 1203 dpolicy->timeout = true; 1204 } 1205 } 1206 1207 static void __update_discard_tree_range(struct f2fs_sb_info *sbi, 1208 struct block_device *bdev, block_t lstart, 1209 block_t start, block_t len); 1210 1211 #ifdef CONFIG_BLK_DEV_ZONED 1212 static void __submit_zone_reset_cmd(struct f2fs_sb_info *sbi, 1213 struct discard_cmd *dc, blk_opf_t flag, 1214 struct list_head *wait_list, 1215 unsigned int *issued) 1216 { 1217 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; 1218 struct block_device *bdev = dc->bdev; 1219 struct bio *bio = bio_alloc(bdev, 0, REQ_OP_ZONE_RESET | flag, GFP_NOFS); 1220 unsigned long flags; 1221 1222 trace_f2fs_issue_reset_zone(bdev, dc->di.start); 1223 1224 spin_lock_irqsave(&dc->lock, flags); 1225 dc->state = D_SUBMIT; 1226 dc->bio_ref++; 1227 spin_unlock_irqrestore(&dc->lock, flags); 1228 1229 if (issued) 1230 (*issued)++; 1231 1232 atomic_inc(&dcc->queued_discard); 1233 dc->queued++; 1234 list_move_tail(&dc->list, wait_list); 1235 1236 /* sanity check on discard range */ 1237 __check_sit_bitmap(sbi, dc->di.lstart, dc->di.lstart + dc->di.len); 1238 1239 bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(dc->di.start); 1240 bio->bi_private = dc; 1241 bio->bi_end_io = f2fs_submit_discard_endio; 1242 submit_bio(bio); 1243 1244 atomic_inc(&dcc->issued_discard); 1245 f2fs_update_iostat(sbi, NULL, FS_ZONE_RESET_IO, dc->di.len * F2FS_BLKSIZE); 1246 } 1247 #endif 1248 1249 /* this function is copied from blkdev_issue_discard from block/blk-lib.c */ 1250 static int __submit_discard_cmd(struct f2fs_sb_info *sbi, 1251 struct discard_policy *dpolicy, 1252 struct discard_cmd *dc, int *issued) 1253 { 1254 struct block_device *bdev = dc->bdev; 1255 unsigned int max_discard_blocks = 1256 SECTOR_TO_BLOCK(bdev_max_discard_sectors(bdev)); 1257 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; 1258 struct list_head *wait_list = (dpolicy->type == DPOLICY_FSTRIM) ? 1259 &(dcc->fstrim_list) : &(dcc->wait_list); 1260 blk_opf_t flag = dpolicy->sync ? REQ_SYNC : 0; 1261 block_t lstart, start, len, total_len; 1262 int err = 0; 1263 1264 if (dc->state != D_PREP) 1265 return 0; 1266 1267 if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) 1268 return 0; 1269 1270 #ifdef CONFIG_BLK_DEV_ZONED 1271 if (f2fs_sb_has_blkzoned(sbi) && bdev_is_zoned(bdev)) { 1272 int devi = f2fs_bdev_index(sbi, bdev); 1273 1274 if (devi < 0) 1275 return -EINVAL; 1276 1277 if (f2fs_blkz_is_seq(sbi, devi, dc->di.start)) { 1278 __submit_zone_reset_cmd(sbi, dc, flag, 1279 wait_list, issued); 1280 return 0; 1281 } 1282 } 1283 #endif 1284 1285 trace_f2fs_issue_discard(bdev, dc->di.start, dc->di.len); 1286 1287 lstart = dc->di.lstart; 1288 start = dc->di.start; 1289 len = dc->di.len; 1290 total_len = len; 1291 1292 dc->di.len = 0; 1293 1294 while (total_len && *issued < dpolicy->max_requests && !err) { 1295 struct bio *bio = NULL; 1296 unsigned long flags; 1297 bool last = true; 1298 1299 if (len > max_discard_blocks) { 1300 len = max_discard_blocks; 1301 last = false; 1302 } 1303 1304 (*issued)++; 1305 if (*issued == dpolicy->max_requests) 1306 last = true; 1307 1308 dc->di.len += len; 1309 1310 if (time_to_inject(sbi, FAULT_DISCARD)) { 1311 err = -EIO; 1312 } else { 1313 err = __blkdev_issue_discard(bdev, 1314 SECTOR_FROM_BLOCK(start), 1315 SECTOR_FROM_BLOCK(len), 1316 GFP_NOFS, &bio); 1317 } 1318 if (err) { 1319 spin_lock_irqsave(&dc->lock, flags); 1320 if (dc->state == D_PARTIAL) 1321 dc->state = D_SUBMIT; 1322 spin_unlock_irqrestore(&dc->lock, flags); 1323 1324 break; 1325 } 1326 1327 f2fs_bug_on(sbi, !bio); 1328 1329 /* 1330 * should keep before submission to avoid D_DONE 1331 * right away 1332 */ 1333 spin_lock_irqsave(&dc->lock, flags); 1334 if (last) 1335 dc->state = D_SUBMIT; 1336 else 1337 dc->state = D_PARTIAL; 1338 dc->bio_ref++; 1339 spin_unlock_irqrestore(&dc->lock, flags); 1340 1341 atomic_inc(&dcc->queued_discard); 1342 dc->queued++; 1343 list_move_tail(&dc->list, wait_list); 1344 1345 /* sanity check on discard range */ 1346 __check_sit_bitmap(sbi, lstart, lstart + len); 1347 1348 bio->bi_private = dc; 1349 bio->bi_end_io = f2fs_submit_discard_endio; 1350 bio->bi_opf |= flag; 1351 submit_bio(bio); 1352 1353 atomic_inc(&dcc->issued_discard); 1354 1355 f2fs_update_iostat(sbi, NULL, FS_DISCARD_IO, len * F2FS_BLKSIZE); 1356 1357 lstart += len; 1358 start += len; 1359 total_len -= len; 1360 len = total_len; 1361 } 1362 1363 if (!err && len) { 1364 dcc->undiscard_blks -= len; 1365 __update_discard_tree_range(sbi, bdev, lstart, start, len); 1366 } 1367 return err; 1368 } 1369 1370 static void __insert_discard_cmd(struct f2fs_sb_info *sbi, 1371 struct block_device *bdev, block_t lstart, 1372 block_t start, block_t len) 1373 { 1374 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; 1375 struct rb_node **p = &dcc->root.rb_root.rb_node; 1376 struct rb_node *parent = NULL; 1377 struct discard_cmd *dc; 1378 bool leftmost = true; 1379 1380 /* look up rb tree to find parent node */ 1381 while (*p) { 1382 parent = *p; 1383 dc = rb_entry(parent, struct discard_cmd, rb_node); 1384 1385 if (lstart < dc->di.lstart) { 1386 p = &(*p)->rb_left; 1387 } else if (lstart >= dc->di.lstart + dc->di.len) { 1388 p = &(*p)->rb_right; 1389 leftmost = false; 1390 } else { 1391 f2fs_bug_on(sbi, 1); 1392 } 1393 } 1394 1395 dc = __create_discard_cmd(sbi, bdev, lstart, start, len); 1396 1397 rb_link_node(&dc->rb_node, parent, p); 1398 rb_insert_color_cached(&dc->rb_node, &dcc->root, leftmost); 1399 } 1400 1401 static void __relocate_discard_cmd(struct discard_cmd_control *dcc, 1402 struct discard_cmd *dc) 1403 { 1404 list_move_tail(&dc->list, &dcc->pend_list[plist_idx(dc->di.len)]); 1405 } 1406 1407 static void __punch_discard_cmd(struct f2fs_sb_info *sbi, 1408 struct discard_cmd *dc, block_t blkaddr) 1409 { 1410 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; 1411 struct discard_info di = dc->di; 1412 bool modified = false; 1413 1414 if (dc->state == D_DONE || dc->di.len == 1) { 1415 __remove_discard_cmd(sbi, dc); 1416 return; 1417 } 1418 1419 dcc->undiscard_blks -= di.len; 1420 1421 if (blkaddr > di.lstart) { 1422 dc->di.len = blkaddr - dc->di.lstart; 1423 dcc->undiscard_blks += dc->di.len; 1424 __relocate_discard_cmd(dcc, dc); 1425 modified = true; 1426 } 1427 1428 if (blkaddr < di.lstart + di.len - 1) { 1429 if (modified) { 1430 __insert_discard_cmd(sbi, dc->bdev, blkaddr + 1, 1431 di.start + blkaddr + 1 - di.lstart, 1432 di.lstart + di.len - 1 - blkaddr); 1433 } else { 1434 dc->di.lstart++; 1435 dc->di.len--; 1436 dc->di.start++; 1437 dcc->undiscard_blks += dc->di.len; 1438 __relocate_discard_cmd(dcc, dc); 1439 } 1440 } 1441 } 1442 1443 static void __update_discard_tree_range(struct f2fs_sb_info *sbi, 1444 struct block_device *bdev, block_t lstart, 1445 block_t start, block_t len) 1446 { 1447 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; 1448 struct discard_cmd *prev_dc = NULL, *next_dc = NULL; 1449 struct discard_cmd *dc; 1450 struct discard_info di = {0}; 1451 struct rb_node **insert_p = NULL, *insert_parent = NULL; 1452 unsigned int max_discard_blocks = 1453 SECTOR_TO_BLOCK(bdev_max_discard_sectors(bdev)); 1454 block_t end = lstart + len; 1455 1456 dc = __lookup_discard_cmd_ret(&dcc->root, lstart, 1457 &prev_dc, &next_dc, &insert_p, &insert_parent); 1458 if (dc) 1459 prev_dc = dc; 1460 1461 if (!prev_dc) { 1462 di.lstart = lstart; 1463 di.len = next_dc ? next_dc->di.lstart - lstart : len; 1464 di.len = min(di.len, len); 1465 di.start = start; 1466 } 1467 1468 while (1) { 1469 struct rb_node *node; 1470 bool merged = false; 1471 struct discard_cmd *tdc = NULL; 1472 1473 if (prev_dc) { 1474 di.lstart = prev_dc->di.lstart + prev_dc->di.len; 1475 if (di.lstart < lstart) 1476 di.lstart = lstart; 1477 if (di.lstart >= end) 1478 break; 1479 1480 if (!next_dc || next_dc->di.lstart > end) 1481 di.len = end - di.lstart; 1482 else 1483 di.len = next_dc->di.lstart - di.lstart; 1484 di.start = start + di.lstart - lstart; 1485 } 1486 1487 if (!di.len) 1488 goto next; 1489 1490 if (prev_dc && prev_dc->state == D_PREP && 1491 prev_dc->bdev == bdev && 1492 __is_discard_back_mergeable(&di, &prev_dc->di, 1493 max_discard_blocks)) { 1494 prev_dc->di.len += di.len; 1495 dcc->undiscard_blks += di.len; 1496 __relocate_discard_cmd(dcc, prev_dc); 1497 di = prev_dc->di; 1498 tdc = prev_dc; 1499 merged = true; 1500 } 1501 1502 if (next_dc && next_dc->state == D_PREP && 1503 next_dc->bdev == bdev && 1504 __is_discard_front_mergeable(&di, &next_dc->di, 1505 max_discard_blocks)) { 1506 next_dc->di.lstart = di.lstart; 1507 next_dc->di.len += di.len; 1508 next_dc->di.start = di.start; 1509 dcc->undiscard_blks += di.len; 1510 __relocate_discard_cmd(dcc, next_dc); 1511 if (tdc) 1512 __remove_discard_cmd(sbi, tdc); 1513 merged = true; 1514 } 1515 1516 if (!merged) 1517 __insert_discard_cmd(sbi, bdev, 1518 di.lstart, di.start, di.len); 1519 next: 1520 prev_dc = next_dc; 1521 if (!prev_dc) 1522 break; 1523 1524 node = rb_next(&prev_dc->rb_node); 1525 next_dc = rb_entry_safe(node, struct discard_cmd, rb_node); 1526 } 1527 } 1528 1529 #ifdef CONFIG_BLK_DEV_ZONED 1530 static void __queue_zone_reset_cmd(struct f2fs_sb_info *sbi, 1531 struct block_device *bdev, block_t blkstart, block_t lblkstart, 1532 block_t blklen) 1533 { 1534 trace_f2fs_queue_reset_zone(bdev, blkstart); 1535 1536 mutex_lock(&SM_I(sbi)->dcc_info->cmd_lock); 1537 __insert_discard_cmd(sbi, bdev, lblkstart, blkstart, blklen); 1538 mutex_unlock(&SM_I(sbi)->dcc_info->cmd_lock); 1539 } 1540 #endif 1541 1542 static void __queue_discard_cmd(struct f2fs_sb_info *sbi, 1543 struct block_device *bdev, block_t blkstart, block_t blklen) 1544 { 1545 block_t lblkstart = blkstart; 1546 1547 if (!f2fs_bdev_support_discard(bdev)) 1548 return; 1549 1550 trace_f2fs_queue_discard(bdev, blkstart, blklen); 1551 1552 if (f2fs_is_multi_device(sbi)) { 1553 int devi = f2fs_target_device_index(sbi, blkstart); 1554 1555 blkstart -= FDEV(devi).start_blk; 1556 } 1557 mutex_lock(&SM_I(sbi)->dcc_info->cmd_lock); 1558 __update_discard_tree_range(sbi, bdev, lblkstart, blkstart, blklen); 1559 mutex_unlock(&SM_I(sbi)->dcc_info->cmd_lock); 1560 } 1561 1562 static void __issue_discard_cmd_orderly(struct f2fs_sb_info *sbi, 1563 struct discard_policy *dpolicy, int *issued) 1564 { 1565 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; 1566 struct discard_cmd *prev_dc = NULL, *next_dc = NULL; 1567 struct rb_node **insert_p = NULL, *insert_parent = NULL; 1568 struct discard_cmd *dc; 1569 struct blk_plug plug; 1570 bool io_interrupted = false; 1571 1572 mutex_lock(&dcc->cmd_lock); 1573 dc = __lookup_discard_cmd_ret(&dcc->root, dcc->next_pos, 1574 &prev_dc, &next_dc, &insert_p, &insert_parent); 1575 if (!dc) 1576 dc = next_dc; 1577 1578 blk_start_plug(&plug); 1579 1580 while (dc) { 1581 struct rb_node *node; 1582 int err = 0; 1583 1584 if (dc->state != D_PREP) 1585 goto next; 1586 1587 if (dpolicy->io_aware && !is_idle(sbi, DISCARD_TIME)) { 1588 io_interrupted = true; 1589 break; 1590 } 1591 1592 dcc->next_pos = dc->di.lstart + dc->di.len; 1593 err = __submit_discard_cmd(sbi, dpolicy, dc, issued); 1594 1595 if (*issued >= dpolicy->max_requests) 1596 break; 1597 next: 1598 node = rb_next(&dc->rb_node); 1599 if (err) 1600 __remove_discard_cmd(sbi, dc); 1601 dc = rb_entry_safe(node, struct discard_cmd, rb_node); 1602 } 1603 1604 blk_finish_plug(&plug); 1605 1606 if (!dc) 1607 dcc->next_pos = 0; 1608 1609 mutex_unlock(&dcc->cmd_lock); 1610 1611 if (!(*issued) && io_interrupted) 1612 *issued = -1; 1613 } 1614 static unsigned int __wait_all_discard_cmd(struct f2fs_sb_info *sbi, 1615 struct discard_policy *dpolicy); 1616 1617 static int __issue_discard_cmd(struct f2fs_sb_info *sbi, 1618 struct discard_policy *dpolicy) 1619 { 1620 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; 1621 struct list_head *pend_list; 1622 struct discard_cmd *dc, *tmp; 1623 struct blk_plug plug; 1624 int i, issued; 1625 bool io_interrupted = false; 1626 1627 if (dpolicy->timeout) 1628 f2fs_update_time(sbi, UMOUNT_DISCARD_TIMEOUT); 1629 1630 retry: 1631 issued = 0; 1632 for (i = MAX_PLIST_NUM - 1; i >= 0; i--) { 1633 if (dpolicy->timeout && 1634 f2fs_time_over(sbi, UMOUNT_DISCARD_TIMEOUT)) 1635 break; 1636 1637 if (i + 1 < dpolicy->granularity) 1638 break; 1639 1640 if (i + 1 < dcc->max_ordered_discard && dpolicy->ordered) { 1641 __issue_discard_cmd_orderly(sbi, dpolicy, &issued); 1642 return issued; 1643 } 1644 1645 pend_list = &dcc->pend_list[i]; 1646 1647 mutex_lock(&dcc->cmd_lock); 1648 if (list_empty(pend_list)) 1649 goto next; 1650 if (unlikely(dcc->rbtree_check)) 1651 f2fs_bug_on(sbi, !f2fs_check_discard_tree(sbi)); 1652 blk_start_plug(&plug); 1653 list_for_each_entry_safe(dc, tmp, pend_list, list) { 1654 f2fs_bug_on(sbi, dc->state != D_PREP); 1655 1656 if (dpolicy->timeout && 1657 f2fs_time_over(sbi, UMOUNT_DISCARD_TIMEOUT)) 1658 break; 1659 1660 if (dpolicy->io_aware && i < dpolicy->io_aware_gran && 1661 !is_idle(sbi, DISCARD_TIME)) { 1662 io_interrupted = true; 1663 break; 1664 } 1665 1666 __submit_discard_cmd(sbi, dpolicy, dc, &issued); 1667 1668 if (issued >= dpolicy->max_requests) 1669 break; 1670 } 1671 blk_finish_plug(&plug); 1672 next: 1673 mutex_unlock(&dcc->cmd_lock); 1674 1675 if (issued >= dpolicy->max_requests || io_interrupted) 1676 break; 1677 } 1678 1679 if (dpolicy->type == DPOLICY_UMOUNT && issued) { 1680 __wait_all_discard_cmd(sbi, dpolicy); 1681 goto retry; 1682 } 1683 1684 if (!issued && io_interrupted) 1685 issued = -1; 1686 1687 return issued; 1688 } 1689 1690 static bool __drop_discard_cmd(struct f2fs_sb_info *sbi) 1691 { 1692 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; 1693 struct list_head *pend_list; 1694 struct discard_cmd *dc, *tmp; 1695 int i; 1696 bool dropped = false; 1697 1698 mutex_lock(&dcc->cmd_lock); 1699 for (i = MAX_PLIST_NUM - 1; i >= 0; i--) { 1700 pend_list = &dcc->pend_list[i]; 1701 list_for_each_entry_safe(dc, tmp, pend_list, list) { 1702 f2fs_bug_on(sbi, dc->state != D_PREP); 1703 __remove_discard_cmd(sbi, dc); 1704 dropped = true; 1705 } 1706 } 1707 mutex_unlock(&dcc->cmd_lock); 1708 1709 return dropped; 1710 } 1711 1712 void f2fs_drop_discard_cmd(struct f2fs_sb_info *sbi) 1713 { 1714 __drop_discard_cmd(sbi); 1715 } 1716 1717 static unsigned int __wait_one_discard_bio(struct f2fs_sb_info *sbi, 1718 struct discard_cmd *dc) 1719 { 1720 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; 1721 unsigned int len = 0; 1722 1723 wait_for_completion_io(&dc->wait); 1724 mutex_lock(&dcc->cmd_lock); 1725 f2fs_bug_on(sbi, dc->state != D_DONE); 1726 dc->ref--; 1727 if (!dc->ref) { 1728 if (!dc->error) 1729 len = dc->di.len; 1730 __remove_discard_cmd(sbi, dc); 1731 } 1732 mutex_unlock(&dcc->cmd_lock); 1733 1734 return len; 1735 } 1736 1737 static unsigned int __wait_discard_cmd_range(struct f2fs_sb_info *sbi, 1738 struct discard_policy *dpolicy, 1739 block_t start, block_t end) 1740 { 1741 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; 1742 struct list_head *wait_list = (dpolicy->type == DPOLICY_FSTRIM) ? 1743 &(dcc->fstrim_list) : &(dcc->wait_list); 1744 struct discard_cmd *dc = NULL, *iter, *tmp; 1745 unsigned int trimmed = 0; 1746 1747 next: 1748 dc = NULL; 1749 1750 mutex_lock(&dcc->cmd_lock); 1751 list_for_each_entry_safe(iter, tmp, wait_list, list) { 1752 if (iter->di.lstart + iter->di.len <= start || 1753 end <= iter->di.lstart) 1754 continue; 1755 if (iter->di.len < dpolicy->granularity) 1756 continue; 1757 if (iter->state == D_DONE && !iter->ref) { 1758 wait_for_completion_io(&iter->wait); 1759 if (!iter->error) 1760 trimmed += iter->di.len; 1761 __remove_discard_cmd(sbi, iter); 1762 } else { 1763 iter->ref++; 1764 dc = iter; 1765 break; 1766 } 1767 } 1768 mutex_unlock(&dcc->cmd_lock); 1769 1770 if (dc) { 1771 trimmed += __wait_one_discard_bio(sbi, dc); 1772 goto next; 1773 } 1774 1775 return trimmed; 1776 } 1777 1778 static unsigned int __wait_all_discard_cmd(struct f2fs_sb_info *sbi, 1779 struct discard_policy *dpolicy) 1780 { 1781 struct discard_policy dp; 1782 unsigned int discard_blks; 1783 1784 if (dpolicy) 1785 return __wait_discard_cmd_range(sbi, dpolicy, 0, UINT_MAX); 1786 1787 /* wait all */ 1788 __init_discard_policy(sbi, &dp, DPOLICY_FSTRIM, MIN_DISCARD_GRANULARITY); 1789 discard_blks = __wait_discard_cmd_range(sbi, &dp, 0, UINT_MAX); 1790 __init_discard_policy(sbi, &dp, DPOLICY_UMOUNT, MIN_DISCARD_GRANULARITY); 1791 discard_blks += __wait_discard_cmd_range(sbi, &dp, 0, UINT_MAX); 1792 1793 return discard_blks; 1794 } 1795 1796 /* This should be covered by global mutex, &sit_i->sentry_lock */ 1797 static void f2fs_wait_discard_bio(struct f2fs_sb_info *sbi, block_t blkaddr) 1798 { 1799 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; 1800 struct discard_cmd *dc; 1801 bool need_wait = false; 1802 1803 mutex_lock(&dcc->cmd_lock); 1804 dc = __lookup_discard_cmd(sbi, blkaddr); 1805 #ifdef CONFIG_BLK_DEV_ZONED 1806 if (dc && f2fs_sb_has_blkzoned(sbi) && bdev_is_zoned(dc->bdev)) { 1807 int devi = f2fs_bdev_index(sbi, dc->bdev); 1808 1809 if (devi < 0) { 1810 mutex_unlock(&dcc->cmd_lock); 1811 return; 1812 } 1813 1814 if (f2fs_blkz_is_seq(sbi, devi, dc->di.start)) { 1815 /* force submit zone reset */ 1816 if (dc->state == D_PREP) 1817 __submit_zone_reset_cmd(sbi, dc, REQ_SYNC, 1818 &dcc->wait_list, NULL); 1819 dc->ref++; 1820 mutex_unlock(&dcc->cmd_lock); 1821 /* wait zone reset */ 1822 __wait_one_discard_bio(sbi, dc); 1823 return; 1824 } 1825 } 1826 #endif 1827 if (dc) { 1828 if (dc->state == D_PREP) { 1829 __punch_discard_cmd(sbi, dc, blkaddr); 1830 } else { 1831 dc->ref++; 1832 need_wait = true; 1833 } 1834 } 1835 mutex_unlock(&dcc->cmd_lock); 1836 1837 if (need_wait) 1838 __wait_one_discard_bio(sbi, dc); 1839 } 1840 1841 void f2fs_stop_discard_thread(struct f2fs_sb_info *sbi) 1842 { 1843 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; 1844 1845 if (dcc && dcc->f2fs_issue_discard) { 1846 struct task_struct *discard_thread = dcc->f2fs_issue_discard; 1847 1848 dcc->f2fs_issue_discard = NULL; 1849 kthread_stop(discard_thread); 1850 } 1851 } 1852 1853 /** 1854 * f2fs_issue_discard_timeout() - Issue all discard cmd within UMOUNT_DISCARD_TIMEOUT 1855 * @sbi: the f2fs_sb_info data for discard cmd to issue 1856 * 1857 * When UMOUNT_DISCARD_TIMEOUT is exceeded, all remaining discard commands will be dropped 1858 * 1859 * Return true if issued all discard cmd or no discard cmd need issue, otherwise return false. 1860 */ 1861 bool f2fs_issue_discard_timeout(struct f2fs_sb_info *sbi) 1862 { 1863 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; 1864 struct discard_policy dpolicy; 1865 bool dropped; 1866 1867 if (!atomic_read(&dcc->discard_cmd_cnt)) 1868 return true; 1869 1870 __init_discard_policy(sbi, &dpolicy, DPOLICY_UMOUNT, 1871 dcc->discard_granularity); 1872 __issue_discard_cmd(sbi, &dpolicy); 1873 dropped = __drop_discard_cmd(sbi); 1874 1875 /* just to make sure there is no pending discard commands */ 1876 __wait_all_discard_cmd(sbi, NULL); 1877 1878 f2fs_bug_on(sbi, atomic_read(&dcc->discard_cmd_cnt)); 1879 return !dropped; 1880 } 1881 1882 static int issue_discard_thread(void *data) 1883 { 1884 struct f2fs_sb_info *sbi = data; 1885 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; 1886 wait_queue_head_t *q = &dcc->discard_wait_queue; 1887 struct discard_policy dpolicy; 1888 unsigned int wait_ms = dcc->min_discard_issue_time; 1889 int issued; 1890 1891 set_freezable(); 1892 1893 do { 1894 wait_event_interruptible_timeout(*q, 1895 kthread_should_stop() || freezing(current) || 1896 dcc->discard_wake, 1897 msecs_to_jiffies(wait_ms)); 1898 1899 if (sbi->gc_mode == GC_URGENT_HIGH || 1900 !f2fs_available_free_memory(sbi, DISCARD_CACHE)) 1901 __init_discard_policy(sbi, &dpolicy, DPOLICY_FORCE, 1902 MIN_DISCARD_GRANULARITY); 1903 else 1904 __init_discard_policy(sbi, &dpolicy, DPOLICY_BG, 1905 dcc->discard_granularity); 1906 1907 if (dcc->discard_wake) 1908 dcc->discard_wake = false; 1909 1910 /* clean up pending candidates before going to sleep */ 1911 if (atomic_read(&dcc->queued_discard)) 1912 __wait_all_discard_cmd(sbi, NULL); 1913 1914 if (try_to_freeze()) 1915 continue; 1916 if (f2fs_readonly(sbi->sb)) 1917 continue; 1918 if (kthread_should_stop()) 1919 return 0; 1920 if (is_sbi_flag_set(sbi, SBI_NEED_FSCK) || 1921 !atomic_read(&dcc->discard_cmd_cnt)) { 1922 wait_ms = dpolicy.max_interval; 1923 continue; 1924 } 1925 1926 sb_start_intwrite(sbi->sb); 1927 1928 issued = __issue_discard_cmd(sbi, &dpolicy); 1929 if (issued > 0) { 1930 __wait_all_discard_cmd(sbi, &dpolicy); 1931 wait_ms = dpolicy.min_interval; 1932 } else if (issued == -1) { 1933 wait_ms = f2fs_time_to_wait(sbi, DISCARD_TIME); 1934 if (!wait_ms) 1935 wait_ms = dpolicy.mid_interval; 1936 } else { 1937 wait_ms = dpolicy.max_interval; 1938 } 1939 if (!atomic_read(&dcc->discard_cmd_cnt)) 1940 wait_ms = dpolicy.max_interval; 1941 1942 sb_end_intwrite(sbi->sb); 1943 1944 } while (!kthread_should_stop()); 1945 return 0; 1946 } 1947 1948 #ifdef CONFIG_BLK_DEV_ZONED 1949 static int __f2fs_issue_discard_zone(struct f2fs_sb_info *sbi, 1950 struct block_device *bdev, block_t blkstart, block_t blklen) 1951 { 1952 sector_t sector, nr_sects; 1953 block_t lblkstart = blkstart; 1954 int devi = 0; 1955 u64 remainder = 0; 1956 1957 if (f2fs_is_multi_device(sbi)) { 1958 devi = f2fs_target_device_index(sbi, blkstart); 1959 if (blkstart < FDEV(devi).start_blk || 1960 blkstart > FDEV(devi).end_blk) { 1961 f2fs_err(sbi, "Invalid block %x", blkstart); 1962 return -EIO; 1963 } 1964 blkstart -= FDEV(devi).start_blk; 1965 } 1966 1967 /* For sequential zones, reset the zone write pointer */ 1968 if (f2fs_blkz_is_seq(sbi, devi, blkstart)) { 1969 sector = SECTOR_FROM_BLOCK(blkstart); 1970 nr_sects = SECTOR_FROM_BLOCK(blklen); 1971 div64_u64_rem(sector, bdev_zone_sectors(bdev), &remainder); 1972 1973 if (remainder || nr_sects != bdev_zone_sectors(bdev)) { 1974 f2fs_err(sbi, "(%d) %s: Unaligned zone reset attempted (block %x + %x)", 1975 devi, sbi->s_ndevs ? FDEV(devi).path : "", 1976 blkstart, blklen); 1977 return -EIO; 1978 } 1979 1980 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) { 1981 trace_f2fs_issue_reset_zone(bdev, blkstart); 1982 return blkdev_zone_mgmt(bdev, REQ_OP_ZONE_RESET, 1983 sector, nr_sects, GFP_NOFS); 1984 } 1985 1986 __queue_zone_reset_cmd(sbi, bdev, blkstart, lblkstart, blklen); 1987 return 0; 1988 } 1989 1990 /* For conventional zones, use regular discard if supported */ 1991 __queue_discard_cmd(sbi, bdev, lblkstart, blklen); 1992 return 0; 1993 } 1994 #endif 1995 1996 static int __issue_discard_async(struct f2fs_sb_info *sbi, 1997 struct block_device *bdev, block_t blkstart, block_t blklen) 1998 { 1999 #ifdef CONFIG_BLK_DEV_ZONED 2000 if (f2fs_sb_has_blkzoned(sbi) && bdev_is_zoned(bdev)) 2001 return __f2fs_issue_discard_zone(sbi, bdev, blkstart, blklen); 2002 #endif 2003 __queue_discard_cmd(sbi, bdev, blkstart, blklen); 2004 return 0; 2005 } 2006 2007 static int f2fs_issue_discard(struct f2fs_sb_info *sbi, 2008 block_t blkstart, block_t blklen) 2009 { 2010 sector_t start = blkstart, len = 0; 2011 struct block_device *bdev; 2012 struct seg_entry *se; 2013 unsigned int offset; 2014 block_t i; 2015 int err = 0; 2016 2017 bdev = f2fs_target_device(sbi, blkstart, NULL); 2018 2019 for (i = blkstart; i < blkstart + blklen; i++, len++) { 2020 if (i != start) { 2021 struct block_device *bdev2 = 2022 f2fs_target_device(sbi, i, NULL); 2023 2024 if (bdev2 != bdev) { 2025 err = __issue_discard_async(sbi, bdev, 2026 start, len); 2027 if (err) 2028 return err; 2029 bdev = bdev2; 2030 start = i; 2031 len = 0; 2032 } 2033 } 2034 2035 se = get_seg_entry(sbi, GET_SEGNO(sbi, i)); 2036 offset = GET_BLKOFF_FROM_SEG0(sbi, i); 2037 2038 if (f2fs_block_unit_discard(sbi) && 2039 !f2fs_test_and_set_bit(offset, se->discard_map)) 2040 sbi->discard_blks--; 2041 } 2042 2043 if (len) 2044 err = __issue_discard_async(sbi, bdev, start, len); 2045 return err; 2046 } 2047 2048 static bool add_discard_addrs(struct f2fs_sb_info *sbi, struct cp_control *cpc, 2049 bool check_only) 2050 { 2051 int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long); 2052 struct seg_entry *se = get_seg_entry(sbi, cpc->trim_start); 2053 unsigned long *cur_map = (unsigned long *)se->cur_valid_map; 2054 unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map; 2055 unsigned long *discard_map = (unsigned long *)se->discard_map; 2056 unsigned long *dmap = SIT_I(sbi)->tmp_map; 2057 unsigned int start = 0, end = -1; 2058 bool force = (cpc->reason & CP_DISCARD); 2059 struct discard_entry *de = NULL; 2060 struct list_head *head = &SM_I(sbi)->dcc_info->entry_list; 2061 int i; 2062 2063 if (se->valid_blocks == BLKS_PER_SEG(sbi) || 2064 !f2fs_hw_support_discard(sbi) || 2065 !f2fs_block_unit_discard(sbi)) 2066 return false; 2067 2068 if (!force) { 2069 if (!f2fs_realtime_discard_enable(sbi) || !se->valid_blocks || 2070 SM_I(sbi)->dcc_info->nr_discards >= 2071 SM_I(sbi)->dcc_info->max_discards) 2072 return false; 2073 } 2074 2075 /* SIT_VBLOCK_MAP_SIZE should be multiple of sizeof(unsigned long) */ 2076 for (i = 0; i < entries; i++) 2077 dmap[i] = force ? ~ckpt_map[i] & ~discard_map[i] : 2078 (cur_map[i] ^ ckpt_map[i]) & ckpt_map[i]; 2079 2080 while (force || SM_I(sbi)->dcc_info->nr_discards <= 2081 SM_I(sbi)->dcc_info->max_discards) { 2082 start = __find_rev_next_bit(dmap, BLKS_PER_SEG(sbi), end + 1); 2083 if (start >= BLKS_PER_SEG(sbi)) 2084 break; 2085 2086 end = __find_rev_next_zero_bit(dmap, 2087 BLKS_PER_SEG(sbi), start + 1); 2088 if (force && start && end != BLKS_PER_SEG(sbi) && 2089 (end - start) < cpc->trim_minlen) 2090 continue; 2091 2092 if (check_only) 2093 return true; 2094 2095 if (!de) { 2096 de = f2fs_kmem_cache_alloc(discard_entry_slab, 2097 GFP_F2FS_ZERO, true, NULL); 2098 de->start_blkaddr = START_BLOCK(sbi, cpc->trim_start); 2099 list_add_tail(&de->list, head); 2100 } 2101 2102 for (i = start; i < end; i++) 2103 __set_bit_le(i, (void *)de->discard_map); 2104 2105 SM_I(sbi)->dcc_info->nr_discards += end - start; 2106 } 2107 return false; 2108 } 2109 2110 static void release_discard_addr(struct discard_entry *entry) 2111 { 2112 list_del(&entry->list); 2113 kmem_cache_free(discard_entry_slab, entry); 2114 } 2115 2116 void f2fs_release_discard_addrs(struct f2fs_sb_info *sbi) 2117 { 2118 struct list_head *head = &(SM_I(sbi)->dcc_info->entry_list); 2119 struct discard_entry *entry, *this; 2120 2121 /* drop caches */ 2122 list_for_each_entry_safe(entry, this, head, list) 2123 release_discard_addr(entry); 2124 } 2125 2126 /* 2127 * Should call f2fs_clear_prefree_segments after checkpoint is done. 2128 */ 2129 static void set_prefree_as_free_segments(struct f2fs_sb_info *sbi) 2130 { 2131 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 2132 unsigned int segno; 2133 2134 mutex_lock(&dirty_i->seglist_lock); 2135 for_each_set_bit(segno, dirty_i->dirty_segmap[PRE], MAIN_SEGS(sbi)) 2136 __set_test_and_free(sbi, segno, false); 2137 mutex_unlock(&dirty_i->seglist_lock); 2138 } 2139 2140 void f2fs_clear_prefree_segments(struct f2fs_sb_info *sbi, 2141 struct cp_control *cpc) 2142 { 2143 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; 2144 struct list_head *head = &dcc->entry_list; 2145 struct discard_entry *entry, *this; 2146 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 2147 unsigned long *prefree_map = dirty_i->dirty_segmap[PRE]; 2148 unsigned int start = 0, end = -1; 2149 unsigned int secno, start_segno; 2150 bool force = (cpc->reason & CP_DISCARD); 2151 bool section_alignment = F2FS_OPTION(sbi).discard_unit == 2152 DISCARD_UNIT_SECTION; 2153 2154 if (f2fs_lfs_mode(sbi) && __is_large_section(sbi)) 2155 section_alignment = true; 2156 2157 mutex_lock(&dirty_i->seglist_lock); 2158 2159 while (1) { 2160 int i; 2161 2162 if (section_alignment && end != -1) 2163 end--; 2164 start = find_next_bit(prefree_map, MAIN_SEGS(sbi), end + 1); 2165 if (start >= MAIN_SEGS(sbi)) 2166 break; 2167 end = find_next_zero_bit(prefree_map, MAIN_SEGS(sbi), 2168 start + 1); 2169 2170 if (section_alignment) { 2171 start = rounddown(start, SEGS_PER_SEC(sbi)); 2172 end = roundup(end, SEGS_PER_SEC(sbi)); 2173 } 2174 2175 for (i = start; i < end; i++) { 2176 if (test_and_clear_bit(i, prefree_map)) 2177 dirty_i->nr_dirty[PRE]--; 2178 } 2179 2180 if (!f2fs_realtime_discard_enable(sbi)) 2181 continue; 2182 2183 if (force && start >= cpc->trim_start && 2184 (end - 1) <= cpc->trim_end) 2185 continue; 2186 2187 /* Should cover 2MB zoned device for zone-based reset */ 2188 if (!f2fs_sb_has_blkzoned(sbi) && 2189 (!f2fs_lfs_mode(sbi) || !__is_large_section(sbi))) { 2190 f2fs_issue_discard(sbi, START_BLOCK(sbi, start), 2191 (end - start) << sbi->log_blocks_per_seg); 2192 continue; 2193 } 2194 next: 2195 secno = GET_SEC_FROM_SEG(sbi, start); 2196 start_segno = GET_SEG_FROM_SEC(sbi, secno); 2197 if (!IS_CURSEC(sbi, secno) && 2198 !get_valid_blocks(sbi, start, true)) 2199 f2fs_issue_discard(sbi, START_BLOCK(sbi, start_segno), 2200 BLKS_PER_SEC(sbi)); 2201 2202 start = start_segno + SEGS_PER_SEC(sbi); 2203 if (start < end) 2204 goto next; 2205 else 2206 end = start - 1; 2207 } 2208 mutex_unlock(&dirty_i->seglist_lock); 2209 2210 if (!f2fs_block_unit_discard(sbi)) 2211 goto wakeup; 2212 2213 /* send small discards */ 2214 list_for_each_entry_safe(entry, this, head, list) { 2215 unsigned int cur_pos = 0, next_pos, len, total_len = 0; 2216 bool is_valid = test_bit_le(0, entry->discard_map); 2217 2218 find_next: 2219 if (is_valid) { 2220 next_pos = find_next_zero_bit_le(entry->discard_map, 2221 BLKS_PER_SEG(sbi), cur_pos); 2222 len = next_pos - cur_pos; 2223 2224 if (f2fs_sb_has_blkzoned(sbi) || 2225 (force && len < cpc->trim_minlen)) 2226 goto skip; 2227 2228 f2fs_issue_discard(sbi, entry->start_blkaddr + cur_pos, 2229 len); 2230 total_len += len; 2231 } else { 2232 next_pos = find_next_bit_le(entry->discard_map, 2233 BLKS_PER_SEG(sbi), cur_pos); 2234 } 2235 skip: 2236 cur_pos = next_pos; 2237 is_valid = !is_valid; 2238 2239 if (cur_pos < BLKS_PER_SEG(sbi)) 2240 goto find_next; 2241 2242 release_discard_addr(entry); 2243 dcc->nr_discards -= total_len; 2244 } 2245 2246 wakeup: 2247 wake_up_discard_thread(sbi, false); 2248 } 2249 2250 int f2fs_start_discard_thread(struct f2fs_sb_info *sbi) 2251 { 2252 dev_t dev = sbi->sb->s_bdev->bd_dev; 2253 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; 2254 int err = 0; 2255 2256 if (!f2fs_realtime_discard_enable(sbi)) 2257 return 0; 2258 2259 dcc->f2fs_issue_discard = kthread_run(issue_discard_thread, sbi, 2260 "f2fs_discard-%u:%u", MAJOR(dev), MINOR(dev)); 2261 if (IS_ERR(dcc->f2fs_issue_discard)) { 2262 err = PTR_ERR(dcc->f2fs_issue_discard); 2263 dcc->f2fs_issue_discard = NULL; 2264 } 2265 2266 return err; 2267 } 2268 2269 static int create_discard_cmd_control(struct f2fs_sb_info *sbi) 2270 { 2271 struct discard_cmd_control *dcc; 2272 int err = 0, i; 2273 2274 if (SM_I(sbi)->dcc_info) { 2275 dcc = SM_I(sbi)->dcc_info; 2276 goto init_thread; 2277 } 2278 2279 dcc = f2fs_kzalloc(sbi, sizeof(struct discard_cmd_control), GFP_KERNEL); 2280 if (!dcc) 2281 return -ENOMEM; 2282 2283 dcc->discard_io_aware_gran = MAX_PLIST_NUM; 2284 dcc->discard_granularity = DEFAULT_DISCARD_GRANULARITY; 2285 dcc->max_ordered_discard = DEFAULT_MAX_ORDERED_DISCARD_GRANULARITY; 2286 if (F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_SEGMENT) 2287 dcc->discard_granularity = BLKS_PER_SEG(sbi); 2288 else if (F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_SECTION) 2289 dcc->discard_granularity = BLKS_PER_SEC(sbi); 2290 2291 INIT_LIST_HEAD(&dcc->entry_list); 2292 for (i = 0; i < MAX_PLIST_NUM; i++) 2293 INIT_LIST_HEAD(&dcc->pend_list[i]); 2294 INIT_LIST_HEAD(&dcc->wait_list); 2295 INIT_LIST_HEAD(&dcc->fstrim_list); 2296 mutex_init(&dcc->cmd_lock); 2297 atomic_set(&dcc->issued_discard, 0); 2298 atomic_set(&dcc->queued_discard, 0); 2299 atomic_set(&dcc->discard_cmd_cnt, 0); 2300 dcc->nr_discards = 0; 2301 dcc->max_discards = MAIN_SEGS(sbi) << sbi->log_blocks_per_seg; 2302 dcc->max_discard_request = DEF_MAX_DISCARD_REQUEST; 2303 dcc->min_discard_issue_time = DEF_MIN_DISCARD_ISSUE_TIME; 2304 dcc->mid_discard_issue_time = DEF_MID_DISCARD_ISSUE_TIME; 2305 dcc->max_discard_issue_time = DEF_MAX_DISCARD_ISSUE_TIME; 2306 dcc->discard_urgent_util = DEF_DISCARD_URGENT_UTIL; 2307 dcc->undiscard_blks = 0; 2308 dcc->next_pos = 0; 2309 dcc->root = RB_ROOT_CACHED; 2310 dcc->rbtree_check = false; 2311 2312 init_waitqueue_head(&dcc->discard_wait_queue); 2313 SM_I(sbi)->dcc_info = dcc; 2314 init_thread: 2315 err = f2fs_start_discard_thread(sbi); 2316 if (err) { 2317 kfree(dcc); 2318 SM_I(sbi)->dcc_info = NULL; 2319 } 2320 2321 return err; 2322 } 2323 2324 static void destroy_discard_cmd_control(struct f2fs_sb_info *sbi) 2325 { 2326 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; 2327 2328 if (!dcc) 2329 return; 2330 2331 f2fs_stop_discard_thread(sbi); 2332 2333 /* 2334 * Recovery can cache discard commands, so in error path of 2335 * fill_super(), it needs to give a chance to handle them. 2336 */ 2337 f2fs_issue_discard_timeout(sbi); 2338 2339 kfree(dcc); 2340 SM_I(sbi)->dcc_info = NULL; 2341 } 2342 2343 static bool __mark_sit_entry_dirty(struct f2fs_sb_info *sbi, unsigned int segno) 2344 { 2345 struct sit_info *sit_i = SIT_I(sbi); 2346 2347 if (!__test_and_set_bit(segno, sit_i->dirty_sentries_bitmap)) { 2348 sit_i->dirty_sentries++; 2349 return false; 2350 } 2351 2352 return true; 2353 } 2354 2355 static void __set_sit_entry_type(struct f2fs_sb_info *sbi, int type, 2356 unsigned int segno, int modified) 2357 { 2358 struct seg_entry *se = get_seg_entry(sbi, segno); 2359 2360 se->type = type; 2361 if (modified) 2362 __mark_sit_entry_dirty(sbi, segno); 2363 } 2364 2365 static inline unsigned long long get_segment_mtime(struct f2fs_sb_info *sbi, 2366 block_t blkaddr) 2367 { 2368 unsigned int segno = GET_SEGNO(sbi, blkaddr); 2369 2370 if (segno == NULL_SEGNO) 2371 return 0; 2372 return get_seg_entry(sbi, segno)->mtime; 2373 } 2374 2375 static void update_segment_mtime(struct f2fs_sb_info *sbi, block_t blkaddr, 2376 unsigned long long old_mtime) 2377 { 2378 struct seg_entry *se; 2379 unsigned int segno = GET_SEGNO(sbi, blkaddr); 2380 unsigned long long ctime = get_mtime(sbi, false); 2381 unsigned long long mtime = old_mtime ? old_mtime : ctime; 2382 2383 if (segno == NULL_SEGNO) 2384 return; 2385 2386 se = get_seg_entry(sbi, segno); 2387 2388 if (!se->mtime) 2389 se->mtime = mtime; 2390 else 2391 se->mtime = div_u64(se->mtime * se->valid_blocks + mtime, 2392 se->valid_blocks + 1); 2393 2394 if (ctime > SIT_I(sbi)->max_mtime) 2395 SIT_I(sbi)->max_mtime = ctime; 2396 } 2397 2398 static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del) 2399 { 2400 struct seg_entry *se; 2401 unsigned int segno, offset; 2402 long int new_vblocks; 2403 bool exist; 2404 #ifdef CONFIG_F2FS_CHECK_FS 2405 bool mir_exist; 2406 #endif 2407 2408 segno = GET_SEGNO(sbi, blkaddr); 2409 if (segno == NULL_SEGNO) 2410 return; 2411 2412 se = get_seg_entry(sbi, segno); 2413 new_vblocks = se->valid_blocks + del; 2414 offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr); 2415 2416 f2fs_bug_on(sbi, (new_vblocks < 0 || 2417 (new_vblocks > f2fs_usable_blks_in_seg(sbi, segno)))); 2418 2419 se->valid_blocks = new_vblocks; 2420 2421 /* Update valid block bitmap */ 2422 if (del > 0) { 2423 exist = f2fs_test_and_set_bit(offset, se->cur_valid_map); 2424 #ifdef CONFIG_F2FS_CHECK_FS 2425 mir_exist = f2fs_test_and_set_bit(offset, 2426 se->cur_valid_map_mir); 2427 if (unlikely(exist != mir_exist)) { 2428 f2fs_err(sbi, "Inconsistent error when setting bitmap, blk:%u, old bit:%d", 2429 blkaddr, exist); 2430 f2fs_bug_on(sbi, 1); 2431 } 2432 #endif 2433 if (unlikely(exist)) { 2434 f2fs_err(sbi, "Bitmap was wrongly set, blk:%u", 2435 blkaddr); 2436 f2fs_bug_on(sbi, 1); 2437 se->valid_blocks--; 2438 del = 0; 2439 } 2440 2441 if (f2fs_block_unit_discard(sbi) && 2442 !f2fs_test_and_set_bit(offset, se->discard_map)) 2443 sbi->discard_blks--; 2444 2445 /* 2446 * SSR should never reuse block which is checkpointed 2447 * or newly invalidated. 2448 */ 2449 if (!is_sbi_flag_set(sbi, SBI_CP_DISABLED)) { 2450 if (!f2fs_test_and_set_bit(offset, se->ckpt_valid_map)) 2451 se->ckpt_valid_blocks++; 2452 } 2453 } else { 2454 exist = f2fs_test_and_clear_bit(offset, se->cur_valid_map); 2455 #ifdef CONFIG_F2FS_CHECK_FS 2456 mir_exist = f2fs_test_and_clear_bit(offset, 2457 se->cur_valid_map_mir); 2458 if (unlikely(exist != mir_exist)) { 2459 f2fs_err(sbi, "Inconsistent error when clearing bitmap, blk:%u, old bit:%d", 2460 blkaddr, exist); 2461 f2fs_bug_on(sbi, 1); 2462 } 2463 #endif 2464 if (unlikely(!exist)) { 2465 f2fs_err(sbi, "Bitmap was wrongly cleared, blk:%u", 2466 blkaddr); 2467 f2fs_bug_on(sbi, 1); 2468 se->valid_blocks++; 2469 del = 0; 2470 } else if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) { 2471 /* 2472 * If checkpoints are off, we must not reuse data that 2473 * was used in the previous checkpoint. If it was used 2474 * before, we must track that to know how much space we 2475 * really have. 2476 */ 2477 if (f2fs_test_bit(offset, se->ckpt_valid_map)) { 2478 spin_lock(&sbi->stat_lock); 2479 sbi->unusable_block_count++; 2480 spin_unlock(&sbi->stat_lock); 2481 } 2482 } 2483 2484 if (f2fs_block_unit_discard(sbi) && 2485 f2fs_test_and_clear_bit(offset, se->discard_map)) 2486 sbi->discard_blks++; 2487 } 2488 if (!f2fs_test_bit(offset, se->ckpt_valid_map)) 2489 se->ckpt_valid_blocks += del; 2490 2491 __mark_sit_entry_dirty(sbi, segno); 2492 2493 /* update total number of valid blocks to be written in ckpt area */ 2494 SIT_I(sbi)->written_valid_blocks += del; 2495 2496 if (__is_large_section(sbi)) 2497 get_sec_entry(sbi, segno)->valid_blocks += del; 2498 } 2499 2500 void f2fs_invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr) 2501 { 2502 unsigned int segno = GET_SEGNO(sbi, addr); 2503 struct sit_info *sit_i = SIT_I(sbi); 2504 2505 f2fs_bug_on(sbi, addr == NULL_ADDR); 2506 if (addr == NEW_ADDR || addr == COMPRESS_ADDR) 2507 return; 2508 2509 f2fs_invalidate_internal_cache(sbi, addr); 2510 2511 /* add it into sit main buffer */ 2512 down_write(&sit_i->sentry_lock); 2513 2514 update_segment_mtime(sbi, addr, 0); 2515 update_sit_entry(sbi, addr, -1); 2516 2517 /* add it into dirty seglist */ 2518 locate_dirty_segment(sbi, segno); 2519 2520 up_write(&sit_i->sentry_lock); 2521 } 2522 2523 bool f2fs_is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr) 2524 { 2525 struct sit_info *sit_i = SIT_I(sbi); 2526 unsigned int segno, offset; 2527 struct seg_entry *se; 2528 bool is_cp = false; 2529 2530 if (!__is_valid_data_blkaddr(blkaddr)) 2531 return true; 2532 2533 down_read(&sit_i->sentry_lock); 2534 2535 segno = GET_SEGNO(sbi, blkaddr); 2536 se = get_seg_entry(sbi, segno); 2537 offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr); 2538 2539 if (f2fs_test_bit(offset, se->ckpt_valid_map)) 2540 is_cp = true; 2541 2542 up_read(&sit_i->sentry_lock); 2543 2544 return is_cp; 2545 } 2546 2547 static unsigned short f2fs_curseg_valid_blocks(struct f2fs_sb_info *sbi, int type) 2548 { 2549 struct curseg_info *curseg = CURSEG_I(sbi, type); 2550 2551 if (sbi->ckpt->alloc_type[type] == SSR) 2552 return BLKS_PER_SEG(sbi); 2553 return curseg->next_blkoff; 2554 } 2555 2556 /* 2557 * Calculate the number of current summary pages for writing 2558 */ 2559 int f2fs_npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra) 2560 { 2561 int valid_sum_count = 0; 2562 int i, sum_in_page; 2563 2564 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) { 2565 if (sbi->ckpt->alloc_type[i] != SSR && for_ra) 2566 valid_sum_count += 2567 le16_to_cpu(F2FS_CKPT(sbi)->cur_data_blkoff[i]); 2568 else 2569 valid_sum_count += f2fs_curseg_valid_blocks(sbi, i); 2570 } 2571 2572 sum_in_page = (PAGE_SIZE - 2 * SUM_JOURNAL_SIZE - 2573 SUM_FOOTER_SIZE) / SUMMARY_SIZE; 2574 if (valid_sum_count <= sum_in_page) 2575 return 1; 2576 else if ((valid_sum_count - sum_in_page) <= 2577 (PAGE_SIZE - SUM_FOOTER_SIZE) / SUMMARY_SIZE) 2578 return 2; 2579 return 3; 2580 } 2581 2582 /* 2583 * Caller should put this summary page 2584 */ 2585 struct page *f2fs_get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno) 2586 { 2587 if (unlikely(f2fs_cp_error(sbi))) 2588 return ERR_PTR(-EIO); 2589 return f2fs_get_meta_page_retry(sbi, GET_SUM_BLOCK(sbi, segno)); 2590 } 2591 2592 void f2fs_update_meta_page(struct f2fs_sb_info *sbi, 2593 void *src, block_t blk_addr) 2594 { 2595 struct page *page = f2fs_grab_meta_page(sbi, blk_addr); 2596 2597 memcpy(page_address(page), src, PAGE_SIZE); 2598 set_page_dirty(page); 2599 f2fs_put_page(page, 1); 2600 } 2601 2602 static void write_sum_page(struct f2fs_sb_info *sbi, 2603 struct f2fs_summary_block *sum_blk, block_t blk_addr) 2604 { 2605 f2fs_update_meta_page(sbi, (void *)sum_blk, blk_addr); 2606 } 2607 2608 static void write_current_sum_page(struct f2fs_sb_info *sbi, 2609 int type, block_t blk_addr) 2610 { 2611 struct curseg_info *curseg = CURSEG_I(sbi, type); 2612 struct page *page = f2fs_grab_meta_page(sbi, blk_addr); 2613 struct f2fs_summary_block *src = curseg->sum_blk; 2614 struct f2fs_summary_block *dst; 2615 2616 dst = (struct f2fs_summary_block *)page_address(page); 2617 memset(dst, 0, PAGE_SIZE); 2618 2619 mutex_lock(&curseg->curseg_mutex); 2620 2621 down_read(&curseg->journal_rwsem); 2622 memcpy(&dst->journal, curseg->journal, SUM_JOURNAL_SIZE); 2623 up_read(&curseg->journal_rwsem); 2624 2625 memcpy(dst->entries, src->entries, SUM_ENTRY_SIZE); 2626 memcpy(&dst->footer, &src->footer, SUM_FOOTER_SIZE); 2627 2628 mutex_unlock(&curseg->curseg_mutex); 2629 2630 set_page_dirty(page); 2631 f2fs_put_page(page, 1); 2632 } 2633 2634 static int is_next_segment_free(struct f2fs_sb_info *sbi, 2635 struct curseg_info *curseg, int type) 2636 { 2637 unsigned int segno = curseg->segno + 1; 2638 struct free_segmap_info *free_i = FREE_I(sbi); 2639 2640 if (segno < MAIN_SEGS(sbi) && segno % SEGS_PER_SEC(sbi)) 2641 return !test_bit(segno, free_i->free_segmap); 2642 return 0; 2643 } 2644 2645 /* 2646 * Find a new segment from the free segments bitmap to right order 2647 * This function should be returned with success, otherwise BUG 2648 */ 2649 static void get_new_segment(struct f2fs_sb_info *sbi, 2650 unsigned int *newseg, bool new_sec, bool pinning) 2651 { 2652 struct free_segmap_info *free_i = FREE_I(sbi); 2653 unsigned int segno, secno, zoneno; 2654 unsigned int total_zones = MAIN_SECS(sbi) / sbi->secs_per_zone; 2655 unsigned int hint = GET_SEC_FROM_SEG(sbi, *newseg); 2656 unsigned int old_zoneno = GET_ZONE_FROM_SEG(sbi, *newseg); 2657 bool init = true; 2658 int i; 2659 int ret = 0; 2660 2661 spin_lock(&free_i->segmap_lock); 2662 2663 if (!new_sec && ((*newseg + 1) % SEGS_PER_SEC(sbi))) { 2664 segno = find_next_zero_bit(free_i->free_segmap, 2665 GET_SEG_FROM_SEC(sbi, hint + 1), *newseg + 1); 2666 if (segno < GET_SEG_FROM_SEC(sbi, hint + 1)) 2667 goto got_it; 2668 } 2669 2670 /* 2671 * If we format f2fs on zoned storage, let's try to get pinned sections 2672 * from beginning of the storage, which should be a conventional one. 2673 */ 2674 if (f2fs_sb_has_blkzoned(sbi)) { 2675 segno = pinning ? 0 : max(first_zoned_segno(sbi), *newseg); 2676 hint = GET_SEC_FROM_SEG(sbi, segno); 2677 } 2678 2679 find_other_zone: 2680 secno = find_next_zero_bit(free_i->free_secmap, MAIN_SECS(sbi), hint); 2681 if (secno >= MAIN_SECS(sbi)) { 2682 secno = find_first_zero_bit(free_i->free_secmap, 2683 MAIN_SECS(sbi)); 2684 if (secno >= MAIN_SECS(sbi)) { 2685 ret = -ENOSPC; 2686 goto out_unlock; 2687 } 2688 } 2689 segno = GET_SEG_FROM_SEC(sbi, secno); 2690 zoneno = GET_ZONE_FROM_SEC(sbi, secno); 2691 2692 /* give up on finding another zone */ 2693 if (!init) 2694 goto got_it; 2695 if (sbi->secs_per_zone == 1) 2696 goto got_it; 2697 if (zoneno == old_zoneno) 2698 goto got_it; 2699 for (i = 0; i < NR_CURSEG_TYPE; i++) 2700 if (CURSEG_I(sbi, i)->zone == zoneno) 2701 break; 2702 2703 if (i < NR_CURSEG_TYPE) { 2704 /* zone is in user, try another */ 2705 if (zoneno + 1 >= total_zones) 2706 hint = 0; 2707 else 2708 hint = (zoneno + 1) * sbi->secs_per_zone; 2709 init = false; 2710 goto find_other_zone; 2711 } 2712 got_it: 2713 /* set it as dirty segment in free segmap */ 2714 f2fs_bug_on(sbi, test_bit(segno, free_i->free_segmap)); 2715 __set_inuse(sbi, segno); 2716 *newseg = segno; 2717 out_unlock: 2718 spin_unlock(&free_i->segmap_lock); 2719 2720 if (ret) { 2721 f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_NO_SEGMENT); 2722 f2fs_bug_on(sbi, 1); 2723 } 2724 } 2725 2726 static void reset_curseg(struct f2fs_sb_info *sbi, int type, int modified) 2727 { 2728 struct curseg_info *curseg = CURSEG_I(sbi, type); 2729 struct summary_footer *sum_footer; 2730 unsigned short seg_type = curseg->seg_type; 2731 2732 curseg->inited = true; 2733 curseg->segno = curseg->next_segno; 2734 curseg->zone = GET_ZONE_FROM_SEG(sbi, curseg->segno); 2735 curseg->next_blkoff = 0; 2736 curseg->next_segno = NULL_SEGNO; 2737 2738 sum_footer = &(curseg->sum_blk->footer); 2739 memset(sum_footer, 0, sizeof(struct summary_footer)); 2740 2741 sanity_check_seg_type(sbi, seg_type); 2742 2743 if (IS_DATASEG(seg_type)) 2744 SET_SUM_TYPE(sum_footer, SUM_TYPE_DATA); 2745 if (IS_NODESEG(seg_type)) 2746 SET_SUM_TYPE(sum_footer, SUM_TYPE_NODE); 2747 __set_sit_entry_type(sbi, seg_type, curseg->segno, modified); 2748 } 2749 2750 static unsigned int __get_next_segno(struct f2fs_sb_info *sbi, int type) 2751 { 2752 struct curseg_info *curseg = CURSEG_I(sbi, type); 2753 unsigned short seg_type = curseg->seg_type; 2754 2755 sanity_check_seg_type(sbi, seg_type); 2756 if (f2fs_need_rand_seg(sbi)) 2757 return get_random_u32_below(MAIN_SECS(sbi) * SEGS_PER_SEC(sbi)); 2758 2759 if (__is_large_section(sbi)) 2760 return curseg->segno; 2761 2762 /* inmem log may not locate on any segment after mount */ 2763 if (!curseg->inited) 2764 return 0; 2765 2766 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) 2767 return 0; 2768 2769 if (seg_type == CURSEG_HOT_DATA || IS_NODESEG(seg_type)) 2770 return 0; 2771 2772 if (SIT_I(sbi)->last_victim[ALLOC_NEXT]) 2773 return SIT_I(sbi)->last_victim[ALLOC_NEXT]; 2774 2775 /* find segments from 0 to reuse freed segments */ 2776 if (F2FS_OPTION(sbi).alloc_mode == ALLOC_MODE_REUSE) 2777 return 0; 2778 2779 return curseg->segno; 2780 } 2781 2782 /* 2783 * Allocate a current working segment. 2784 * This function always allocates a free segment in LFS manner. 2785 */ 2786 static int new_curseg(struct f2fs_sb_info *sbi, int type, bool new_sec) 2787 { 2788 struct curseg_info *curseg = CURSEG_I(sbi, type); 2789 unsigned int segno = curseg->segno; 2790 bool pinning = type == CURSEG_COLD_DATA_PINNED; 2791 2792 if (curseg->inited) 2793 write_sum_page(sbi, curseg->sum_blk, GET_SUM_BLOCK(sbi, segno)); 2794 2795 segno = __get_next_segno(sbi, type); 2796 get_new_segment(sbi, &segno, new_sec, pinning); 2797 if (new_sec && pinning && 2798 !f2fs_valid_pinned_area(sbi, START_BLOCK(sbi, segno))) { 2799 __set_free(sbi, segno); 2800 return -EAGAIN; 2801 } 2802 2803 curseg->next_segno = segno; 2804 reset_curseg(sbi, type, 1); 2805 curseg->alloc_type = LFS; 2806 if (F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_BLK) 2807 curseg->fragment_remained_chunk = 2808 get_random_u32_inclusive(1, sbi->max_fragment_chunk); 2809 return 0; 2810 } 2811 2812 static int __next_free_blkoff(struct f2fs_sb_info *sbi, 2813 int segno, block_t start) 2814 { 2815 struct seg_entry *se = get_seg_entry(sbi, segno); 2816 int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long); 2817 unsigned long *target_map = SIT_I(sbi)->tmp_map; 2818 unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map; 2819 unsigned long *cur_map = (unsigned long *)se->cur_valid_map; 2820 int i; 2821 2822 for (i = 0; i < entries; i++) 2823 target_map[i] = ckpt_map[i] | cur_map[i]; 2824 2825 return __find_rev_next_zero_bit(target_map, BLKS_PER_SEG(sbi), start); 2826 } 2827 2828 static int f2fs_find_next_ssr_block(struct f2fs_sb_info *sbi, 2829 struct curseg_info *seg) 2830 { 2831 return __next_free_blkoff(sbi, seg->segno, seg->next_blkoff + 1); 2832 } 2833 2834 bool f2fs_segment_has_free_slot(struct f2fs_sb_info *sbi, int segno) 2835 { 2836 return __next_free_blkoff(sbi, segno, 0) < BLKS_PER_SEG(sbi); 2837 } 2838 2839 /* 2840 * This function always allocates a used segment(from dirty seglist) by SSR 2841 * manner, so it should recover the existing segment information of valid blocks 2842 */ 2843 static void change_curseg(struct f2fs_sb_info *sbi, int type) 2844 { 2845 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 2846 struct curseg_info *curseg = CURSEG_I(sbi, type); 2847 unsigned int new_segno = curseg->next_segno; 2848 struct f2fs_summary_block *sum_node; 2849 struct page *sum_page; 2850 2851 write_sum_page(sbi, curseg->sum_blk, GET_SUM_BLOCK(sbi, curseg->segno)); 2852 2853 __set_test_and_inuse(sbi, new_segno); 2854 2855 mutex_lock(&dirty_i->seglist_lock); 2856 __remove_dirty_segment(sbi, new_segno, PRE); 2857 __remove_dirty_segment(sbi, new_segno, DIRTY); 2858 mutex_unlock(&dirty_i->seglist_lock); 2859 2860 reset_curseg(sbi, type, 1); 2861 curseg->alloc_type = SSR; 2862 curseg->next_blkoff = __next_free_blkoff(sbi, curseg->segno, 0); 2863 2864 sum_page = f2fs_get_sum_page(sbi, new_segno); 2865 if (IS_ERR(sum_page)) { 2866 /* GC won't be able to use stale summary pages by cp_error */ 2867 memset(curseg->sum_blk, 0, SUM_ENTRY_SIZE); 2868 return; 2869 } 2870 sum_node = (struct f2fs_summary_block *)page_address(sum_page); 2871 memcpy(curseg->sum_blk, sum_node, SUM_ENTRY_SIZE); 2872 f2fs_put_page(sum_page, 1); 2873 } 2874 2875 static int get_ssr_segment(struct f2fs_sb_info *sbi, int type, 2876 int alloc_mode, unsigned long long age); 2877 2878 static void get_atssr_segment(struct f2fs_sb_info *sbi, int type, 2879 int target_type, int alloc_mode, 2880 unsigned long long age) 2881 { 2882 struct curseg_info *curseg = CURSEG_I(sbi, type); 2883 2884 curseg->seg_type = target_type; 2885 2886 if (get_ssr_segment(sbi, type, alloc_mode, age)) { 2887 struct seg_entry *se = get_seg_entry(sbi, curseg->next_segno); 2888 2889 curseg->seg_type = se->type; 2890 change_curseg(sbi, type); 2891 } else { 2892 /* allocate cold segment by default */ 2893 curseg->seg_type = CURSEG_COLD_DATA; 2894 new_curseg(sbi, type, true); 2895 } 2896 stat_inc_seg_type(sbi, curseg); 2897 } 2898 2899 static void __f2fs_init_atgc_curseg(struct f2fs_sb_info *sbi) 2900 { 2901 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_ALL_DATA_ATGC); 2902 2903 if (!sbi->am.atgc_enabled) 2904 return; 2905 2906 f2fs_down_read(&SM_I(sbi)->curseg_lock); 2907 2908 mutex_lock(&curseg->curseg_mutex); 2909 down_write(&SIT_I(sbi)->sentry_lock); 2910 2911 get_atssr_segment(sbi, CURSEG_ALL_DATA_ATGC, CURSEG_COLD_DATA, SSR, 0); 2912 2913 up_write(&SIT_I(sbi)->sentry_lock); 2914 mutex_unlock(&curseg->curseg_mutex); 2915 2916 f2fs_up_read(&SM_I(sbi)->curseg_lock); 2917 2918 } 2919 void f2fs_init_inmem_curseg(struct f2fs_sb_info *sbi) 2920 { 2921 __f2fs_init_atgc_curseg(sbi); 2922 } 2923 2924 static void __f2fs_save_inmem_curseg(struct f2fs_sb_info *sbi, int type) 2925 { 2926 struct curseg_info *curseg = CURSEG_I(sbi, type); 2927 2928 mutex_lock(&curseg->curseg_mutex); 2929 if (!curseg->inited) 2930 goto out; 2931 2932 if (get_valid_blocks(sbi, curseg->segno, false)) { 2933 write_sum_page(sbi, curseg->sum_blk, 2934 GET_SUM_BLOCK(sbi, curseg->segno)); 2935 } else { 2936 mutex_lock(&DIRTY_I(sbi)->seglist_lock); 2937 __set_test_and_free(sbi, curseg->segno, true); 2938 mutex_unlock(&DIRTY_I(sbi)->seglist_lock); 2939 } 2940 out: 2941 mutex_unlock(&curseg->curseg_mutex); 2942 } 2943 2944 void f2fs_save_inmem_curseg(struct f2fs_sb_info *sbi) 2945 { 2946 __f2fs_save_inmem_curseg(sbi, CURSEG_COLD_DATA_PINNED); 2947 2948 if (sbi->am.atgc_enabled) 2949 __f2fs_save_inmem_curseg(sbi, CURSEG_ALL_DATA_ATGC); 2950 } 2951 2952 static void __f2fs_restore_inmem_curseg(struct f2fs_sb_info *sbi, int type) 2953 { 2954 struct curseg_info *curseg = CURSEG_I(sbi, type); 2955 2956 mutex_lock(&curseg->curseg_mutex); 2957 if (!curseg->inited) 2958 goto out; 2959 if (get_valid_blocks(sbi, curseg->segno, false)) 2960 goto out; 2961 2962 mutex_lock(&DIRTY_I(sbi)->seglist_lock); 2963 __set_test_and_inuse(sbi, curseg->segno); 2964 mutex_unlock(&DIRTY_I(sbi)->seglist_lock); 2965 out: 2966 mutex_unlock(&curseg->curseg_mutex); 2967 } 2968 2969 void f2fs_restore_inmem_curseg(struct f2fs_sb_info *sbi) 2970 { 2971 __f2fs_restore_inmem_curseg(sbi, CURSEG_COLD_DATA_PINNED); 2972 2973 if (sbi->am.atgc_enabled) 2974 __f2fs_restore_inmem_curseg(sbi, CURSEG_ALL_DATA_ATGC); 2975 } 2976 2977 static int get_ssr_segment(struct f2fs_sb_info *sbi, int type, 2978 int alloc_mode, unsigned long long age) 2979 { 2980 struct curseg_info *curseg = CURSEG_I(sbi, type); 2981 unsigned segno = NULL_SEGNO; 2982 unsigned short seg_type = curseg->seg_type; 2983 int i, cnt; 2984 bool reversed = false; 2985 2986 sanity_check_seg_type(sbi, seg_type); 2987 2988 /* f2fs_need_SSR() already forces to do this */ 2989 if (!f2fs_get_victim(sbi, &segno, BG_GC, seg_type, alloc_mode, age)) { 2990 curseg->next_segno = segno; 2991 return 1; 2992 } 2993 2994 /* For node segments, let's do SSR more intensively */ 2995 if (IS_NODESEG(seg_type)) { 2996 if (seg_type >= CURSEG_WARM_NODE) { 2997 reversed = true; 2998 i = CURSEG_COLD_NODE; 2999 } else { 3000 i = CURSEG_HOT_NODE; 3001 } 3002 cnt = NR_CURSEG_NODE_TYPE; 3003 } else { 3004 if (seg_type >= CURSEG_WARM_DATA) { 3005 reversed = true; 3006 i = CURSEG_COLD_DATA; 3007 } else { 3008 i = CURSEG_HOT_DATA; 3009 } 3010 cnt = NR_CURSEG_DATA_TYPE; 3011 } 3012 3013 for (; cnt-- > 0; reversed ? i-- : i++) { 3014 if (i == seg_type) 3015 continue; 3016 if (!f2fs_get_victim(sbi, &segno, BG_GC, i, alloc_mode, age)) { 3017 curseg->next_segno = segno; 3018 return 1; 3019 } 3020 } 3021 3022 /* find valid_blocks=0 in dirty list */ 3023 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) { 3024 segno = get_free_segment(sbi); 3025 if (segno != NULL_SEGNO) { 3026 curseg->next_segno = segno; 3027 return 1; 3028 } 3029 } 3030 return 0; 3031 } 3032 3033 static bool need_new_seg(struct f2fs_sb_info *sbi, int type) 3034 { 3035 struct curseg_info *curseg = CURSEG_I(sbi, type); 3036 3037 if (!is_set_ckpt_flags(sbi, CP_CRC_RECOVERY_FLAG) && 3038 curseg->seg_type == CURSEG_WARM_NODE) 3039 return true; 3040 if (curseg->alloc_type == LFS && 3041 is_next_segment_free(sbi, curseg, type) && 3042 likely(!is_sbi_flag_set(sbi, SBI_CP_DISABLED))) 3043 return true; 3044 if (!f2fs_need_SSR(sbi) || !get_ssr_segment(sbi, type, SSR, 0)) 3045 return true; 3046 return false; 3047 } 3048 3049 void f2fs_allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type, 3050 unsigned int start, unsigned int end) 3051 { 3052 struct curseg_info *curseg = CURSEG_I(sbi, type); 3053 unsigned int segno; 3054 3055 f2fs_down_read(&SM_I(sbi)->curseg_lock); 3056 mutex_lock(&curseg->curseg_mutex); 3057 down_write(&SIT_I(sbi)->sentry_lock); 3058 3059 segno = CURSEG_I(sbi, type)->segno; 3060 if (segno < start || segno > end) 3061 goto unlock; 3062 3063 if (f2fs_need_SSR(sbi) && get_ssr_segment(sbi, type, SSR, 0)) 3064 change_curseg(sbi, type); 3065 else 3066 new_curseg(sbi, type, true); 3067 3068 stat_inc_seg_type(sbi, curseg); 3069 3070 locate_dirty_segment(sbi, segno); 3071 unlock: 3072 up_write(&SIT_I(sbi)->sentry_lock); 3073 3074 if (segno != curseg->segno) 3075 f2fs_notice(sbi, "For resize: curseg of type %d: %u ==> %u", 3076 type, segno, curseg->segno); 3077 3078 mutex_unlock(&curseg->curseg_mutex); 3079 f2fs_up_read(&SM_I(sbi)->curseg_lock); 3080 } 3081 3082 static int __allocate_new_segment(struct f2fs_sb_info *sbi, int type, 3083 bool new_sec, bool force) 3084 { 3085 struct curseg_info *curseg = CURSEG_I(sbi, type); 3086 unsigned int old_segno; 3087 3088 if (!force && curseg->inited && 3089 !curseg->next_blkoff && 3090 !get_valid_blocks(sbi, curseg->segno, new_sec) && 3091 !get_ckpt_valid_blocks(sbi, curseg->segno, new_sec)) 3092 return 0; 3093 3094 old_segno = curseg->segno; 3095 if (new_curseg(sbi, type, true)) 3096 return -EAGAIN; 3097 stat_inc_seg_type(sbi, curseg); 3098 locate_dirty_segment(sbi, old_segno); 3099 return 0; 3100 } 3101 3102 int f2fs_allocate_new_section(struct f2fs_sb_info *sbi, int type, bool force) 3103 { 3104 int ret; 3105 3106 f2fs_down_read(&SM_I(sbi)->curseg_lock); 3107 down_write(&SIT_I(sbi)->sentry_lock); 3108 ret = __allocate_new_segment(sbi, type, true, force); 3109 up_write(&SIT_I(sbi)->sentry_lock); 3110 f2fs_up_read(&SM_I(sbi)->curseg_lock); 3111 3112 return ret; 3113 } 3114 3115 int f2fs_allocate_pinning_section(struct f2fs_sb_info *sbi) 3116 { 3117 int err; 3118 bool gc_required = true; 3119 3120 retry: 3121 f2fs_lock_op(sbi); 3122 err = f2fs_allocate_new_section(sbi, CURSEG_COLD_DATA_PINNED, false); 3123 f2fs_unlock_op(sbi); 3124 3125 if (f2fs_sb_has_blkzoned(sbi) && err && gc_required) { 3126 f2fs_down_write(&sbi->gc_lock); 3127 f2fs_gc_range(sbi, 0, GET_SEGNO(sbi, FDEV(0).end_blk), true, 1); 3128 f2fs_up_write(&sbi->gc_lock); 3129 3130 gc_required = false; 3131 goto retry; 3132 } 3133 3134 return err; 3135 } 3136 3137 void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi) 3138 { 3139 int i; 3140 3141 f2fs_down_read(&SM_I(sbi)->curseg_lock); 3142 down_write(&SIT_I(sbi)->sentry_lock); 3143 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) 3144 __allocate_new_segment(sbi, i, false, false); 3145 up_write(&SIT_I(sbi)->sentry_lock); 3146 f2fs_up_read(&SM_I(sbi)->curseg_lock); 3147 } 3148 3149 bool f2fs_exist_trim_candidates(struct f2fs_sb_info *sbi, 3150 struct cp_control *cpc) 3151 { 3152 __u64 trim_start = cpc->trim_start; 3153 bool has_candidate = false; 3154 3155 down_write(&SIT_I(sbi)->sentry_lock); 3156 for (; cpc->trim_start <= cpc->trim_end; cpc->trim_start++) { 3157 if (add_discard_addrs(sbi, cpc, true)) { 3158 has_candidate = true; 3159 break; 3160 } 3161 } 3162 up_write(&SIT_I(sbi)->sentry_lock); 3163 3164 cpc->trim_start = trim_start; 3165 return has_candidate; 3166 } 3167 3168 static unsigned int __issue_discard_cmd_range(struct f2fs_sb_info *sbi, 3169 struct discard_policy *dpolicy, 3170 unsigned int start, unsigned int end) 3171 { 3172 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; 3173 struct discard_cmd *prev_dc = NULL, *next_dc = NULL; 3174 struct rb_node **insert_p = NULL, *insert_parent = NULL; 3175 struct discard_cmd *dc; 3176 struct blk_plug plug; 3177 int issued; 3178 unsigned int trimmed = 0; 3179 3180 next: 3181 issued = 0; 3182 3183 mutex_lock(&dcc->cmd_lock); 3184 if (unlikely(dcc->rbtree_check)) 3185 f2fs_bug_on(sbi, !f2fs_check_discard_tree(sbi)); 3186 3187 dc = __lookup_discard_cmd_ret(&dcc->root, start, 3188 &prev_dc, &next_dc, &insert_p, &insert_parent); 3189 if (!dc) 3190 dc = next_dc; 3191 3192 blk_start_plug(&plug); 3193 3194 while (dc && dc->di.lstart <= end) { 3195 struct rb_node *node; 3196 int err = 0; 3197 3198 if (dc->di.len < dpolicy->granularity) 3199 goto skip; 3200 3201 if (dc->state != D_PREP) { 3202 list_move_tail(&dc->list, &dcc->fstrim_list); 3203 goto skip; 3204 } 3205 3206 err = __submit_discard_cmd(sbi, dpolicy, dc, &issued); 3207 3208 if (issued >= dpolicy->max_requests) { 3209 start = dc->di.lstart + dc->di.len; 3210 3211 if (err) 3212 __remove_discard_cmd(sbi, dc); 3213 3214 blk_finish_plug(&plug); 3215 mutex_unlock(&dcc->cmd_lock); 3216 trimmed += __wait_all_discard_cmd(sbi, NULL); 3217 f2fs_io_schedule_timeout(DEFAULT_IO_TIMEOUT); 3218 goto next; 3219 } 3220 skip: 3221 node = rb_next(&dc->rb_node); 3222 if (err) 3223 __remove_discard_cmd(sbi, dc); 3224 dc = rb_entry_safe(node, struct discard_cmd, rb_node); 3225 3226 if (fatal_signal_pending(current)) 3227 break; 3228 } 3229 3230 blk_finish_plug(&plug); 3231 mutex_unlock(&dcc->cmd_lock); 3232 3233 return trimmed; 3234 } 3235 3236 int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range) 3237 { 3238 __u64 start = F2FS_BYTES_TO_BLK(range->start); 3239 __u64 end = start + F2FS_BYTES_TO_BLK(range->len) - 1; 3240 unsigned int start_segno, end_segno; 3241 block_t start_block, end_block; 3242 struct cp_control cpc; 3243 struct discard_policy dpolicy; 3244 unsigned long long trimmed = 0; 3245 int err = 0; 3246 bool need_align = f2fs_lfs_mode(sbi) && __is_large_section(sbi); 3247 3248 if (start >= MAX_BLKADDR(sbi) || range->len < sbi->blocksize) 3249 return -EINVAL; 3250 3251 if (end < MAIN_BLKADDR(sbi)) 3252 goto out; 3253 3254 if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) { 3255 f2fs_warn(sbi, "Found FS corruption, run fsck to fix."); 3256 return -EFSCORRUPTED; 3257 } 3258 3259 /* start/end segment number in main_area */ 3260 start_segno = (start <= MAIN_BLKADDR(sbi)) ? 0 : GET_SEGNO(sbi, start); 3261 end_segno = (end >= MAX_BLKADDR(sbi)) ? MAIN_SEGS(sbi) - 1 : 3262 GET_SEGNO(sbi, end); 3263 if (need_align) { 3264 start_segno = rounddown(start_segno, SEGS_PER_SEC(sbi)); 3265 end_segno = roundup(end_segno + 1, SEGS_PER_SEC(sbi)) - 1; 3266 } 3267 3268 cpc.reason = CP_DISCARD; 3269 cpc.trim_minlen = max_t(__u64, 1, F2FS_BYTES_TO_BLK(range->minlen)); 3270 cpc.trim_start = start_segno; 3271 cpc.trim_end = end_segno; 3272 3273 if (sbi->discard_blks == 0) 3274 goto out; 3275 3276 f2fs_down_write(&sbi->gc_lock); 3277 stat_inc_cp_call_count(sbi, TOTAL_CALL); 3278 err = f2fs_write_checkpoint(sbi, &cpc); 3279 f2fs_up_write(&sbi->gc_lock); 3280 if (err) 3281 goto out; 3282 3283 /* 3284 * We filed discard candidates, but actually we don't need to wait for 3285 * all of them, since they'll be issued in idle time along with runtime 3286 * discard option. User configuration looks like using runtime discard 3287 * or periodic fstrim instead of it. 3288 */ 3289 if (f2fs_realtime_discard_enable(sbi)) 3290 goto out; 3291 3292 start_block = START_BLOCK(sbi, start_segno); 3293 end_block = START_BLOCK(sbi, end_segno + 1); 3294 3295 __init_discard_policy(sbi, &dpolicy, DPOLICY_FSTRIM, cpc.trim_minlen); 3296 trimmed = __issue_discard_cmd_range(sbi, &dpolicy, 3297 start_block, end_block); 3298 3299 trimmed += __wait_discard_cmd_range(sbi, &dpolicy, 3300 start_block, end_block); 3301 out: 3302 if (!err) 3303 range->len = F2FS_BLK_TO_BYTES(trimmed); 3304 return err; 3305 } 3306 3307 int f2fs_rw_hint_to_seg_type(enum rw_hint hint) 3308 { 3309 switch (hint) { 3310 case WRITE_LIFE_SHORT: 3311 return CURSEG_HOT_DATA; 3312 case WRITE_LIFE_EXTREME: 3313 return CURSEG_COLD_DATA; 3314 default: 3315 return CURSEG_WARM_DATA; 3316 } 3317 } 3318 3319 static int __get_segment_type_2(struct f2fs_io_info *fio) 3320 { 3321 if (fio->type == DATA) 3322 return CURSEG_HOT_DATA; 3323 else 3324 return CURSEG_HOT_NODE; 3325 } 3326 3327 static int __get_segment_type_4(struct f2fs_io_info *fio) 3328 { 3329 if (fio->type == DATA) { 3330 struct inode *inode = fio->page->mapping->host; 3331 3332 if (S_ISDIR(inode->i_mode)) 3333 return CURSEG_HOT_DATA; 3334 else 3335 return CURSEG_COLD_DATA; 3336 } else { 3337 if (IS_DNODE(fio->page) && is_cold_node(fio->page)) 3338 return CURSEG_WARM_NODE; 3339 else 3340 return CURSEG_COLD_NODE; 3341 } 3342 } 3343 3344 static int __get_age_segment_type(struct inode *inode, pgoff_t pgofs) 3345 { 3346 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 3347 struct extent_info ei = {}; 3348 3349 if (f2fs_lookup_age_extent_cache(inode, pgofs, &ei)) { 3350 if (!ei.age) 3351 return NO_CHECK_TYPE; 3352 if (ei.age <= sbi->hot_data_age_threshold) 3353 return CURSEG_HOT_DATA; 3354 if (ei.age <= sbi->warm_data_age_threshold) 3355 return CURSEG_WARM_DATA; 3356 return CURSEG_COLD_DATA; 3357 } 3358 return NO_CHECK_TYPE; 3359 } 3360 3361 static int __get_segment_type_6(struct f2fs_io_info *fio) 3362 { 3363 if (fio->type == DATA) { 3364 struct inode *inode = fio->page->mapping->host; 3365 int type; 3366 3367 if (is_inode_flag_set(inode, FI_ALIGNED_WRITE)) 3368 return CURSEG_COLD_DATA_PINNED; 3369 3370 if (page_private_gcing(fio->page)) { 3371 if (fio->sbi->am.atgc_enabled && 3372 (fio->io_type == FS_DATA_IO) && 3373 (fio->sbi->gc_mode != GC_URGENT_HIGH) && 3374 __is_valid_data_blkaddr(fio->old_blkaddr) && 3375 !is_inode_flag_set(inode, FI_OPU_WRITE)) 3376 return CURSEG_ALL_DATA_ATGC; 3377 else 3378 return CURSEG_COLD_DATA; 3379 } 3380 if (file_is_cold(inode) || f2fs_need_compress_data(inode)) 3381 return CURSEG_COLD_DATA; 3382 3383 type = __get_age_segment_type(inode, fio->page->index); 3384 if (type != NO_CHECK_TYPE) 3385 return type; 3386 3387 if (file_is_hot(inode) || 3388 is_inode_flag_set(inode, FI_HOT_DATA) || 3389 f2fs_is_cow_file(inode)) 3390 return CURSEG_HOT_DATA; 3391 return f2fs_rw_hint_to_seg_type(inode->i_write_hint); 3392 } else { 3393 if (IS_DNODE(fio->page)) 3394 return is_cold_node(fio->page) ? CURSEG_WARM_NODE : 3395 CURSEG_HOT_NODE; 3396 return CURSEG_COLD_NODE; 3397 } 3398 } 3399 3400 static int __get_segment_type(struct f2fs_io_info *fio) 3401 { 3402 int type = 0; 3403 3404 switch (F2FS_OPTION(fio->sbi).active_logs) { 3405 case 2: 3406 type = __get_segment_type_2(fio); 3407 break; 3408 case 4: 3409 type = __get_segment_type_4(fio); 3410 break; 3411 case 6: 3412 type = __get_segment_type_6(fio); 3413 break; 3414 default: 3415 f2fs_bug_on(fio->sbi, true); 3416 } 3417 3418 if (IS_HOT(type)) 3419 fio->temp = HOT; 3420 else if (IS_WARM(type)) 3421 fio->temp = WARM; 3422 else 3423 fio->temp = COLD; 3424 return type; 3425 } 3426 3427 static void f2fs_randomize_chunk(struct f2fs_sb_info *sbi, 3428 struct curseg_info *seg) 3429 { 3430 /* To allocate block chunks in different sizes, use random number */ 3431 if (--seg->fragment_remained_chunk > 0) 3432 return; 3433 3434 seg->fragment_remained_chunk = 3435 get_random_u32_inclusive(1, sbi->max_fragment_chunk); 3436 seg->next_blkoff += 3437 get_random_u32_inclusive(1, sbi->max_fragment_hole); 3438 } 3439 3440 void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page, 3441 block_t old_blkaddr, block_t *new_blkaddr, 3442 struct f2fs_summary *sum, int type, 3443 struct f2fs_io_info *fio) 3444 { 3445 struct sit_info *sit_i = SIT_I(sbi); 3446 struct curseg_info *curseg = CURSEG_I(sbi, type); 3447 unsigned long long old_mtime; 3448 bool from_gc = (type == CURSEG_ALL_DATA_ATGC); 3449 struct seg_entry *se = NULL; 3450 bool segment_full = false; 3451 3452 f2fs_down_read(&SM_I(sbi)->curseg_lock); 3453 3454 mutex_lock(&curseg->curseg_mutex); 3455 down_write(&sit_i->sentry_lock); 3456 3457 if (from_gc) { 3458 f2fs_bug_on(sbi, GET_SEGNO(sbi, old_blkaddr) == NULL_SEGNO); 3459 se = get_seg_entry(sbi, GET_SEGNO(sbi, old_blkaddr)); 3460 sanity_check_seg_type(sbi, se->type); 3461 f2fs_bug_on(sbi, IS_NODESEG(se->type)); 3462 } 3463 *new_blkaddr = NEXT_FREE_BLKADDR(sbi, curseg); 3464 3465 f2fs_bug_on(sbi, curseg->next_blkoff >= BLKS_PER_SEG(sbi)); 3466 3467 f2fs_wait_discard_bio(sbi, *new_blkaddr); 3468 3469 curseg->sum_blk->entries[curseg->next_blkoff] = *sum; 3470 if (curseg->alloc_type == SSR) { 3471 curseg->next_blkoff = f2fs_find_next_ssr_block(sbi, curseg); 3472 } else { 3473 curseg->next_blkoff++; 3474 if (F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_BLK) 3475 f2fs_randomize_chunk(sbi, curseg); 3476 } 3477 if (curseg->next_blkoff >= f2fs_usable_blks_in_seg(sbi, curseg->segno)) 3478 segment_full = true; 3479 stat_inc_block_count(sbi, curseg); 3480 3481 if (from_gc) { 3482 old_mtime = get_segment_mtime(sbi, old_blkaddr); 3483 } else { 3484 update_segment_mtime(sbi, old_blkaddr, 0); 3485 old_mtime = 0; 3486 } 3487 update_segment_mtime(sbi, *new_blkaddr, old_mtime); 3488 3489 /* 3490 * SIT information should be updated before segment allocation, 3491 * since SSR needs latest valid block information. 3492 */ 3493 update_sit_entry(sbi, *new_blkaddr, 1); 3494 update_sit_entry(sbi, old_blkaddr, -1); 3495 3496 /* 3497 * If the current segment is full, flush it out and replace it with a 3498 * new segment. 3499 */ 3500 if (segment_full) { 3501 if (type == CURSEG_COLD_DATA_PINNED && 3502 !((curseg->segno + 1) % sbi->segs_per_sec)) { 3503 write_sum_page(sbi, curseg->sum_blk, 3504 GET_SUM_BLOCK(sbi, curseg->segno)); 3505 goto skip_new_segment; 3506 } 3507 3508 if (from_gc) { 3509 get_atssr_segment(sbi, type, se->type, 3510 AT_SSR, se->mtime); 3511 } else { 3512 if (need_new_seg(sbi, type)) 3513 new_curseg(sbi, type, false); 3514 else 3515 change_curseg(sbi, type); 3516 stat_inc_seg_type(sbi, curseg); 3517 } 3518 } 3519 3520 skip_new_segment: 3521 /* 3522 * segment dirty status should be updated after segment allocation, 3523 * so we just need to update status only one time after previous 3524 * segment being closed. 3525 */ 3526 locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr)); 3527 locate_dirty_segment(sbi, GET_SEGNO(sbi, *new_blkaddr)); 3528 3529 if (IS_DATASEG(curseg->seg_type)) 3530 atomic64_inc(&sbi->allocated_data_blocks); 3531 3532 up_write(&sit_i->sentry_lock); 3533 3534 if (page && IS_NODESEG(curseg->seg_type)) { 3535 fill_node_footer_blkaddr(page, NEXT_FREE_BLKADDR(sbi, curseg)); 3536 3537 f2fs_inode_chksum_set(sbi, page); 3538 } 3539 3540 if (fio) { 3541 struct f2fs_bio_info *io; 3542 3543 INIT_LIST_HEAD(&fio->list); 3544 fio->in_list = 1; 3545 io = sbi->write_io[fio->type] + fio->temp; 3546 spin_lock(&io->io_lock); 3547 list_add_tail(&fio->list, &io->io_list); 3548 spin_unlock(&io->io_lock); 3549 } 3550 3551 mutex_unlock(&curseg->curseg_mutex); 3552 3553 f2fs_up_read(&SM_I(sbi)->curseg_lock); 3554 } 3555 3556 void f2fs_update_device_state(struct f2fs_sb_info *sbi, nid_t ino, 3557 block_t blkaddr, unsigned int blkcnt) 3558 { 3559 if (!f2fs_is_multi_device(sbi)) 3560 return; 3561 3562 while (1) { 3563 unsigned int devidx = f2fs_target_device_index(sbi, blkaddr); 3564 unsigned int blks = FDEV(devidx).end_blk - blkaddr + 1; 3565 3566 /* update device state for fsync */ 3567 f2fs_set_dirty_device(sbi, ino, devidx, FLUSH_INO); 3568 3569 /* update device state for checkpoint */ 3570 if (!f2fs_test_bit(devidx, (char *)&sbi->dirty_device)) { 3571 spin_lock(&sbi->dev_lock); 3572 f2fs_set_bit(devidx, (char *)&sbi->dirty_device); 3573 spin_unlock(&sbi->dev_lock); 3574 } 3575 3576 if (blkcnt <= blks) 3577 break; 3578 blkcnt -= blks; 3579 blkaddr += blks; 3580 } 3581 } 3582 3583 static void do_write_page(struct f2fs_summary *sum, struct f2fs_io_info *fio) 3584 { 3585 int type = __get_segment_type(fio); 3586 bool keep_order = (f2fs_lfs_mode(fio->sbi) && type == CURSEG_COLD_DATA); 3587 3588 if (keep_order) 3589 f2fs_down_read(&fio->sbi->io_order_lock); 3590 3591 f2fs_allocate_data_block(fio->sbi, fio->page, fio->old_blkaddr, 3592 &fio->new_blkaddr, sum, type, fio); 3593 if (GET_SEGNO(fio->sbi, fio->old_blkaddr) != NULL_SEGNO) 3594 f2fs_invalidate_internal_cache(fio->sbi, fio->old_blkaddr); 3595 3596 /* writeout dirty page into bdev */ 3597 f2fs_submit_page_write(fio); 3598 3599 f2fs_update_device_state(fio->sbi, fio->ino, fio->new_blkaddr, 1); 3600 3601 if (keep_order) 3602 f2fs_up_read(&fio->sbi->io_order_lock); 3603 } 3604 3605 void f2fs_do_write_meta_page(struct f2fs_sb_info *sbi, struct page *page, 3606 enum iostat_type io_type) 3607 { 3608 struct f2fs_io_info fio = { 3609 .sbi = sbi, 3610 .type = META, 3611 .temp = HOT, 3612 .op = REQ_OP_WRITE, 3613 .op_flags = REQ_SYNC | REQ_META | REQ_PRIO, 3614 .old_blkaddr = page->index, 3615 .new_blkaddr = page->index, 3616 .page = page, 3617 .encrypted_page = NULL, 3618 .in_list = 0, 3619 }; 3620 3621 if (unlikely(page->index >= MAIN_BLKADDR(sbi))) 3622 fio.op_flags &= ~REQ_META; 3623 3624 set_page_writeback(page); 3625 f2fs_submit_page_write(&fio); 3626 3627 stat_inc_meta_count(sbi, page->index); 3628 f2fs_update_iostat(sbi, NULL, io_type, F2FS_BLKSIZE); 3629 } 3630 3631 void f2fs_do_write_node_page(unsigned int nid, struct f2fs_io_info *fio) 3632 { 3633 struct f2fs_summary sum; 3634 3635 set_summary(&sum, nid, 0, 0); 3636 do_write_page(&sum, fio); 3637 3638 f2fs_update_iostat(fio->sbi, NULL, fio->io_type, F2FS_BLKSIZE); 3639 } 3640 3641 void f2fs_outplace_write_data(struct dnode_of_data *dn, 3642 struct f2fs_io_info *fio) 3643 { 3644 struct f2fs_sb_info *sbi = fio->sbi; 3645 struct f2fs_summary sum; 3646 3647 f2fs_bug_on(sbi, dn->data_blkaddr == NULL_ADDR); 3648 if (fio->io_type == FS_DATA_IO || fio->io_type == FS_CP_DATA_IO) 3649 f2fs_update_age_extent_cache(dn); 3650 set_summary(&sum, dn->nid, dn->ofs_in_node, fio->version); 3651 do_write_page(&sum, fio); 3652 f2fs_update_data_blkaddr(dn, fio->new_blkaddr); 3653 3654 f2fs_update_iostat(sbi, dn->inode, fio->io_type, F2FS_BLKSIZE); 3655 } 3656 3657 int f2fs_inplace_write_data(struct f2fs_io_info *fio) 3658 { 3659 int err; 3660 struct f2fs_sb_info *sbi = fio->sbi; 3661 unsigned int segno; 3662 3663 fio->new_blkaddr = fio->old_blkaddr; 3664 /* i/o temperature is needed for passing down write hints */ 3665 __get_segment_type(fio); 3666 3667 segno = GET_SEGNO(sbi, fio->new_blkaddr); 3668 3669 if (!IS_DATASEG(get_seg_entry(sbi, segno)->type)) { 3670 set_sbi_flag(sbi, SBI_NEED_FSCK); 3671 f2fs_warn(sbi, "%s: incorrect segment(%u) type, run fsck to fix.", 3672 __func__, segno); 3673 err = -EFSCORRUPTED; 3674 f2fs_handle_error(sbi, ERROR_INCONSISTENT_SUM_TYPE); 3675 goto drop_bio; 3676 } 3677 3678 if (f2fs_cp_error(sbi)) { 3679 err = -EIO; 3680 goto drop_bio; 3681 } 3682 3683 if (fio->meta_gc) 3684 f2fs_truncate_meta_inode_pages(sbi, fio->new_blkaddr, 1); 3685 3686 stat_inc_inplace_blocks(fio->sbi); 3687 3688 if (fio->bio && !IS_F2FS_IPU_NOCACHE(sbi)) 3689 err = f2fs_merge_page_bio(fio); 3690 else 3691 err = f2fs_submit_page_bio(fio); 3692 if (!err) { 3693 f2fs_update_device_state(fio->sbi, fio->ino, 3694 fio->new_blkaddr, 1); 3695 f2fs_update_iostat(fio->sbi, fio->page->mapping->host, 3696 fio->io_type, F2FS_BLKSIZE); 3697 } 3698 3699 return err; 3700 drop_bio: 3701 if (fio->bio && *(fio->bio)) { 3702 struct bio *bio = *(fio->bio); 3703 3704 bio->bi_status = BLK_STS_IOERR; 3705 bio_endio(bio); 3706 *(fio->bio) = NULL; 3707 } 3708 return err; 3709 } 3710 3711 static inline int __f2fs_get_curseg(struct f2fs_sb_info *sbi, 3712 unsigned int segno) 3713 { 3714 int i; 3715 3716 for (i = CURSEG_HOT_DATA; i < NO_CHECK_TYPE; i++) { 3717 if (CURSEG_I(sbi, i)->segno == segno) 3718 break; 3719 } 3720 return i; 3721 } 3722 3723 void f2fs_do_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, 3724 block_t old_blkaddr, block_t new_blkaddr, 3725 bool recover_curseg, bool recover_newaddr, 3726 bool from_gc) 3727 { 3728 struct sit_info *sit_i = SIT_I(sbi); 3729 struct curseg_info *curseg; 3730 unsigned int segno, old_cursegno; 3731 struct seg_entry *se; 3732 int type; 3733 unsigned short old_blkoff; 3734 unsigned char old_alloc_type; 3735 3736 segno = GET_SEGNO(sbi, new_blkaddr); 3737 se = get_seg_entry(sbi, segno); 3738 type = se->type; 3739 3740 f2fs_down_write(&SM_I(sbi)->curseg_lock); 3741 3742 if (!recover_curseg) { 3743 /* for recovery flow */ 3744 if (se->valid_blocks == 0 && !IS_CURSEG(sbi, segno)) { 3745 if (old_blkaddr == NULL_ADDR) 3746 type = CURSEG_COLD_DATA; 3747 else 3748 type = CURSEG_WARM_DATA; 3749 } 3750 } else { 3751 if (IS_CURSEG(sbi, segno)) { 3752 /* se->type is volatile as SSR allocation */ 3753 type = __f2fs_get_curseg(sbi, segno); 3754 f2fs_bug_on(sbi, type == NO_CHECK_TYPE); 3755 } else { 3756 type = CURSEG_WARM_DATA; 3757 } 3758 } 3759 3760 f2fs_bug_on(sbi, !IS_DATASEG(type)); 3761 curseg = CURSEG_I(sbi, type); 3762 3763 mutex_lock(&curseg->curseg_mutex); 3764 down_write(&sit_i->sentry_lock); 3765 3766 old_cursegno = curseg->segno; 3767 old_blkoff = curseg->next_blkoff; 3768 old_alloc_type = curseg->alloc_type; 3769 3770 /* change the current segment */ 3771 if (segno != curseg->segno) { 3772 curseg->next_segno = segno; 3773 change_curseg(sbi, type); 3774 } 3775 3776 curseg->next_blkoff = GET_BLKOFF_FROM_SEG0(sbi, new_blkaddr); 3777 curseg->sum_blk->entries[curseg->next_blkoff] = *sum; 3778 3779 if (!recover_curseg || recover_newaddr) { 3780 if (!from_gc) 3781 update_segment_mtime(sbi, new_blkaddr, 0); 3782 update_sit_entry(sbi, new_blkaddr, 1); 3783 } 3784 if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO) { 3785 f2fs_invalidate_internal_cache(sbi, old_blkaddr); 3786 if (!from_gc) 3787 update_segment_mtime(sbi, old_blkaddr, 0); 3788 update_sit_entry(sbi, old_blkaddr, -1); 3789 } 3790 3791 locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr)); 3792 locate_dirty_segment(sbi, GET_SEGNO(sbi, new_blkaddr)); 3793 3794 locate_dirty_segment(sbi, old_cursegno); 3795 3796 if (recover_curseg) { 3797 if (old_cursegno != curseg->segno) { 3798 curseg->next_segno = old_cursegno; 3799 change_curseg(sbi, type); 3800 } 3801 curseg->next_blkoff = old_blkoff; 3802 curseg->alloc_type = old_alloc_type; 3803 } 3804 3805 up_write(&sit_i->sentry_lock); 3806 mutex_unlock(&curseg->curseg_mutex); 3807 f2fs_up_write(&SM_I(sbi)->curseg_lock); 3808 } 3809 3810 void f2fs_replace_block(struct f2fs_sb_info *sbi, struct dnode_of_data *dn, 3811 block_t old_addr, block_t new_addr, 3812 unsigned char version, bool recover_curseg, 3813 bool recover_newaddr) 3814 { 3815 struct f2fs_summary sum; 3816 3817 set_summary(&sum, dn->nid, dn->ofs_in_node, version); 3818 3819 f2fs_do_replace_block(sbi, &sum, old_addr, new_addr, 3820 recover_curseg, recover_newaddr, false); 3821 3822 f2fs_update_data_blkaddr(dn, new_addr); 3823 } 3824 3825 void f2fs_wait_on_page_writeback(struct page *page, 3826 enum page_type type, bool ordered, bool locked) 3827 { 3828 if (PageWriteback(page)) { 3829 struct f2fs_sb_info *sbi = F2FS_P_SB(page); 3830 3831 /* submit cached LFS IO */ 3832 f2fs_submit_merged_write_cond(sbi, NULL, page, 0, type); 3833 /* submit cached IPU IO */ 3834 f2fs_submit_merged_ipu_write(sbi, NULL, page); 3835 if (ordered) { 3836 wait_on_page_writeback(page); 3837 f2fs_bug_on(sbi, locked && PageWriteback(page)); 3838 } else { 3839 wait_for_stable_page(page); 3840 } 3841 } 3842 } 3843 3844 void f2fs_wait_on_block_writeback(struct inode *inode, block_t blkaddr) 3845 { 3846 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 3847 struct page *cpage; 3848 3849 if (!f2fs_meta_inode_gc_required(inode)) 3850 return; 3851 3852 if (!__is_valid_data_blkaddr(blkaddr)) 3853 return; 3854 3855 cpage = find_lock_page(META_MAPPING(sbi), blkaddr); 3856 if (cpage) { 3857 f2fs_wait_on_page_writeback(cpage, DATA, true, true); 3858 f2fs_put_page(cpage, 1); 3859 } 3860 } 3861 3862 void f2fs_wait_on_block_writeback_range(struct inode *inode, block_t blkaddr, 3863 block_t len) 3864 { 3865 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 3866 block_t i; 3867 3868 if (!f2fs_meta_inode_gc_required(inode)) 3869 return; 3870 3871 for (i = 0; i < len; i++) 3872 f2fs_wait_on_block_writeback(inode, blkaddr + i); 3873 3874 f2fs_truncate_meta_inode_pages(sbi, blkaddr, len); 3875 } 3876 3877 static int read_compacted_summaries(struct f2fs_sb_info *sbi) 3878 { 3879 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); 3880 struct curseg_info *seg_i; 3881 unsigned char *kaddr; 3882 struct page *page; 3883 block_t start; 3884 int i, j, offset; 3885 3886 start = start_sum_block(sbi); 3887 3888 page = f2fs_get_meta_page(sbi, start++); 3889 if (IS_ERR(page)) 3890 return PTR_ERR(page); 3891 kaddr = (unsigned char *)page_address(page); 3892 3893 /* Step 1: restore nat cache */ 3894 seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA); 3895 memcpy(seg_i->journal, kaddr, SUM_JOURNAL_SIZE); 3896 3897 /* Step 2: restore sit cache */ 3898 seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA); 3899 memcpy(seg_i->journal, kaddr + SUM_JOURNAL_SIZE, SUM_JOURNAL_SIZE); 3900 offset = 2 * SUM_JOURNAL_SIZE; 3901 3902 /* Step 3: restore summary entries */ 3903 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) { 3904 unsigned short blk_off; 3905 unsigned int segno; 3906 3907 seg_i = CURSEG_I(sbi, i); 3908 segno = le32_to_cpu(ckpt->cur_data_segno[i]); 3909 blk_off = le16_to_cpu(ckpt->cur_data_blkoff[i]); 3910 seg_i->next_segno = segno; 3911 reset_curseg(sbi, i, 0); 3912 seg_i->alloc_type = ckpt->alloc_type[i]; 3913 seg_i->next_blkoff = blk_off; 3914 3915 if (seg_i->alloc_type == SSR) 3916 blk_off = BLKS_PER_SEG(sbi); 3917 3918 for (j = 0; j < blk_off; j++) { 3919 struct f2fs_summary *s; 3920 3921 s = (struct f2fs_summary *)(kaddr + offset); 3922 seg_i->sum_blk->entries[j] = *s; 3923 offset += SUMMARY_SIZE; 3924 if (offset + SUMMARY_SIZE <= PAGE_SIZE - 3925 SUM_FOOTER_SIZE) 3926 continue; 3927 3928 f2fs_put_page(page, 1); 3929 page = NULL; 3930 3931 page = f2fs_get_meta_page(sbi, start++); 3932 if (IS_ERR(page)) 3933 return PTR_ERR(page); 3934 kaddr = (unsigned char *)page_address(page); 3935 offset = 0; 3936 } 3937 } 3938 f2fs_put_page(page, 1); 3939 return 0; 3940 } 3941 3942 static int read_normal_summaries(struct f2fs_sb_info *sbi, int type) 3943 { 3944 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); 3945 struct f2fs_summary_block *sum; 3946 struct curseg_info *curseg; 3947 struct page *new; 3948 unsigned short blk_off; 3949 unsigned int segno = 0; 3950 block_t blk_addr = 0; 3951 int err = 0; 3952 3953 /* get segment number and block addr */ 3954 if (IS_DATASEG(type)) { 3955 segno = le32_to_cpu(ckpt->cur_data_segno[type]); 3956 blk_off = le16_to_cpu(ckpt->cur_data_blkoff[type - 3957 CURSEG_HOT_DATA]); 3958 if (__exist_node_summaries(sbi)) 3959 blk_addr = sum_blk_addr(sbi, NR_CURSEG_PERSIST_TYPE, type); 3960 else 3961 blk_addr = sum_blk_addr(sbi, NR_CURSEG_DATA_TYPE, type); 3962 } else { 3963 segno = le32_to_cpu(ckpt->cur_node_segno[type - 3964 CURSEG_HOT_NODE]); 3965 blk_off = le16_to_cpu(ckpt->cur_node_blkoff[type - 3966 CURSEG_HOT_NODE]); 3967 if (__exist_node_summaries(sbi)) 3968 blk_addr = sum_blk_addr(sbi, NR_CURSEG_NODE_TYPE, 3969 type - CURSEG_HOT_NODE); 3970 else 3971 blk_addr = GET_SUM_BLOCK(sbi, segno); 3972 } 3973 3974 new = f2fs_get_meta_page(sbi, blk_addr); 3975 if (IS_ERR(new)) 3976 return PTR_ERR(new); 3977 sum = (struct f2fs_summary_block *)page_address(new); 3978 3979 if (IS_NODESEG(type)) { 3980 if (__exist_node_summaries(sbi)) { 3981 struct f2fs_summary *ns = &sum->entries[0]; 3982 int i; 3983 3984 for (i = 0; i < BLKS_PER_SEG(sbi); i++, ns++) { 3985 ns->version = 0; 3986 ns->ofs_in_node = 0; 3987 } 3988 } else { 3989 err = f2fs_restore_node_summary(sbi, segno, sum); 3990 if (err) 3991 goto out; 3992 } 3993 } 3994 3995 /* set uncompleted segment to curseg */ 3996 curseg = CURSEG_I(sbi, type); 3997 mutex_lock(&curseg->curseg_mutex); 3998 3999 /* update journal info */ 4000 down_write(&curseg->journal_rwsem); 4001 memcpy(curseg->journal, &sum->journal, SUM_JOURNAL_SIZE); 4002 up_write(&curseg->journal_rwsem); 4003 4004 memcpy(curseg->sum_blk->entries, sum->entries, SUM_ENTRY_SIZE); 4005 memcpy(&curseg->sum_blk->footer, &sum->footer, SUM_FOOTER_SIZE); 4006 curseg->next_segno = segno; 4007 reset_curseg(sbi, type, 0); 4008 curseg->alloc_type = ckpt->alloc_type[type]; 4009 curseg->next_blkoff = blk_off; 4010 mutex_unlock(&curseg->curseg_mutex); 4011 out: 4012 f2fs_put_page(new, 1); 4013 return err; 4014 } 4015 4016 static int restore_curseg_summaries(struct f2fs_sb_info *sbi) 4017 { 4018 struct f2fs_journal *sit_j = CURSEG_I(sbi, CURSEG_COLD_DATA)->journal; 4019 struct f2fs_journal *nat_j = CURSEG_I(sbi, CURSEG_HOT_DATA)->journal; 4020 int type = CURSEG_HOT_DATA; 4021 int err; 4022 4023 if (is_set_ckpt_flags(sbi, CP_COMPACT_SUM_FLAG)) { 4024 int npages = f2fs_npages_for_summary_flush(sbi, true); 4025 4026 if (npages >= 2) 4027 f2fs_ra_meta_pages(sbi, start_sum_block(sbi), npages, 4028 META_CP, true); 4029 4030 /* restore for compacted data summary */ 4031 err = read_compacted_summaries(sbi); 4032 if (err) 4033 return err; 4034 type = CURSEG_HOT_NODE; 4035 } 4036 4037 if (__exist_node_summaries(sbi)) 4038 f2fs_ra_meta_pages(sbi, 4039 sum_blk_addr(sbi, NR_CURSEG_PERSIST_TYPE, type), 4040 NR_CURSEG_PERSIST_TYPE - type, META_CP, true); 4041 4042 for (; type <= CURSEG_COLD_NODE; type++) { 4043 err = read_normal_summaries(sbi, type); 4044 if (err) 4045 return err; 4046 } 4047 4048 /* sanity check for summary blocks */ 4049 if (nats_in_cursum(nat_j) > NAT_JOURNAL_ENTRIES || 4050 sits_in_cursum(sit_j) > SIT_JOURNAL_ENTRIES) { 4051 f2fs_err(sbi, "invalid journal entries nats %u sits %u", 4052 nats_in_cursum(nat_j), sits_in_cursum(sit_j)); 4053 return -EINVAL; 4054 } 4055 4056 return 0; 4057 } 4058 4059 static void write_compacted_summaries(struct f2fs_sb_info *sbi, block_t blkaddr) 4060 { 4061 struct page *page; 4062 unsigned char *kaddr; 4063 struct f2fs_summary *summary; 4064 struct curseg_info *seg_i; 4065 int written_size = 0; 4066 int i, j; 4067 4068 page = f2fs_grab_meta_page(sbi, blkaddr++); 4069 kaddr = (unsigned char *)page_address(page); 4070 memset(kaddr, 0, PAGE_SIZE); 4071 4072 /* Step 1: write nat cache */ 4073 seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA); 4074 memcpy(kaddr, seg_i->journal, SUM_JOURNAL_SIZE); 4075 written_size += SUM_JOURNAL_SIZE; 4076 4077 /* Step 2: write sit cache */ 4078 seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA); 4079 memcpy(kaddr + written_size, seg_i->journal, SUM_JOURNAL_SIZE); 4080 written_size += SUM_JOURNAL_SIZE; 4081 4082 /* Step 3: write summary entries */ 4083 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) { 4084 seg_i = CURSEG_I(sbi, i); 4085 for (j = 0; j < f2fs_curseg_valid_blocks(sbi, i); j++) { 4086 if (!page) { 4087 page = f2fs_grab_meta_page(sbi, blkaddr++); 4088 kaddr = (unsigned char *)page_address(page); 4089 memset(kaddr, 0, PAGE_SIZE); 4090 written_size = 0; 4091 } 4092 summary = (struct f2fs_summary *)(kaddr + written_size); 4093 *summary = seg_i->sum_blk->entries[j]; 4094 written_size += SUMMARY_SIZE; 4095 4096 if (written_size + SUMMARY_SIZE <= PAGE_SIZE - 4097 SUM_FOOTER_SIZE) 4098 continue; 4099 4100 set_page_dirty(page); 4101 f2fs_put_page(page, 1); 4102 page = NULL; 4103 } 4104 } 4105 if (page) { 4106 set_page_dirty(page); 4107 f2fs_put_page(page, 1); 4108 } 4109 } 4110 4111 static void write_normal_summaries(struct f2fs_sb_info *sbi, 4112 block_t blkaddr, int type) 4113 { 4114 int i, end; 4115 4116 if (IS_DATASEG(type)) 4117 end = type + NR_CURSEG_DATA_TYPE; 4118 else 4119 end = type + NR_CURSEG_NODE_TYPE; 4120 4121 for (i = type; i < end; i++) 4122 write_current_sum_page(sbi, i, blkaddr + (i - type)); 4123 } 4124 4125 void f2fs_write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk) 4126 { 4127 if (is_set_ckpt_flags(sbi, CP_COMPACT_SUM_FLAG)) 4128 write_compacted_summaries(sbi, start_blk); 4129 else 4130 write_normal_summaries(sbi, start_blk, CURSEG_HOT_DATA); 4131 } 4132 4133 void f2fs_write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk) 4134 { 4135 write_normal_summaries(sbi, start_blk, CURSEG_HOT_NODE); 4136 } 4137 4138 int f2fs_lookup_journal_in_cursum(struct f2fs_journal *journal, int type, 4139 unsigned int val, int alloc) 4140 { 4141 int i; 4142 4143 if (type == NAT_JOURNAL) { 4144 for (i = 0; i < nats_in_cursum(journal); i++) { 4145 if (le32_to_cpu(nid_in_journal(journal, i)) == val) 4146 return i; 4147 } 4148 if (alloc && __has_cursum_space(journal, 1, NAT_JOURNAL)) 4149 return update_nats_in_cursum(journal, 1); 4150 } else if (type == SIT_JOURNAL) { 4151 for (i = 0; i < sits_in_cursum(journal); i++) 4152 if (le32_to_cpu(segno_in_journal(journal, i)) == val) 4153 return i; 4154 if (alloc && __has_cursum_space(journal, 1, SIT_JOURNAL)) 4155 return update_sits_in_cursum(journal, 1); 4156 } 4157 return -1; 4158 } 4159 4160 static struct page *get_current_sit_page(struct f2fs_sb_info *sbi, 4161 unsigned int segno) 4162 { 4163 return f2fs_get_meta_page(sbi, current_sit_addr(sbi, segno)); 4164 } 4165 4166 static struct page *get_next_sit_page(struct f2fs_sb_info *sbi, 4167 unsigned int start) 4168 { 4169 struct sit_info *sit_i = SIT_I(sbi); 4170 struct page *page; 4171 pgoff_t src_off, dst_off; 4172 4173 src_off = current_sit_addr(sbi, start); 4174 dst_off = next_sit_addr(sbi, src_off); 4175 4176 page = f2fs_grab_meta_page(sbi, dst_off); 4177 seg_info_to_sit_page(sbi, page, start); 4178 4179 set_page_dirty(page); 4180 set_to_next_sit(sit_i, start); 4181 4182 return page; 4183 } 4184 4185 static struct sit_entry_set *grab_sit_entry_set(void) 4186 { 4187 struct sit_entry_set *ses = 4188 f2fs_kmem_cache_alloc(sit_entry_set_slab, 4189 GFP_NOFS, true, NULL); 4190 4191 ses->entry_cnt = 0; 4192 INIT_LIST_HEAD(&ses->set_list); 4193 return ses; 4194 } 4195 4196 static void release_sit_entry_set(struct sit_entry_set *ses) 4197 { 4198 list_del(&ses->set_list); 4199 kmem_cache_free(sit_entry_set_slab, ses); 4200 } 4201 4202 static void adjust_sit_entry_set(struct sit_entry_set *ses, 4203 struct list_head *head) 4204 { 4205 struct sit_entry_set *next = ses; 4206 4207 if (list_is_last(&ses->set_list, head)) 4208 return; 4209 4210 list_for_each_entry_continue(next, head, set_list) 4211 if (ses->entry_cnt <= next->entry_cnt) { 4212 list_move_tail(&ses->set_list, &next->set_list); 4213 return; 4214 } 4215 4216 list_move_tail(&ses->set_list, head); 4217 } 4218 4219 static void add_sit_entry(unsigned int segno, struct list_head *head) 4220 { 4221 struct sit_entry_set *ses; 4222 unsigned int start_segno = START_SEGNO(segno); 4223 4224 list_for_each_entry(ses, head, set_list) { 4225 if (ses->start_segno == start_segno) { 4226 ses->entry_cnt++; 4227 adjust_sit_entry_set(ses, head); 4228 return; 4229 } 4230 } 4231 4232 ses = grab_sit_entry_set(); 4233 4234 ses->start_segno = start_segno; 4235 ses->entry_cnt++; 4236 list_add(&ses->set_list, head); 4237 } 4238 4239 static void add_sits_in_set(struct f2fs_sb_info *sbi) 4240 { 4241 struct f2fs_sm_info *sm_info = SM_I(sbi); 4242 struct list_head *set_list = &sm_info->sit_entry_set; 4243 unsigned long *bitmap = SIT_I(sbi)->dirty_sentries_bitmap; 4244 unsigned int segno; 4245 4246 for_each_set_bit(segno, bitmap, MAIN_SEGS(sbi)) 4247 add_sit_entry(segno, set_list); 4248 } 4249 4250 static void remove_sits_in_journal(struct f2fs_sb_info *sbi) 4251 { 4252 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA); 4253 struct f2fs_journal *journal = curseg->journal; 4254 int i; 4255 4256 down_write(&curseg->journal_rwsem); 4257 for (i = 0; i < sits_in_cursum(journal); i++) { 4258 unsigned int segno; 4259 bool dirtied; 4260 4261 segno = le32_to_cpu(segno_in_journal(journal, i)); 4262 dirtied = __mark_sit_entry_dirty(sbi, segno); 4263 4264 if (!dirtied) 4265 add_sit_entry(segno, &SM_I(sbi)->sit_entry_set); 4266 } 4267 update_sits_in_cursum(journal, -i); 4268 up_write(&curseg->journal_rwsem); 4269 } 4270 4271 /* 4272 * CP calls this function, which flushes SIT entries including sit_journal, 4273 * and moves prefree segs to free segs. 4274 */ 4275 void f2fs_flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc) 4276 { 4277 struct sit_info *sit_i = SIT_I(sbi); 4278 unsigned long *bitmap = sit_i->dirty_sentries_bitmap; 4279 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA); 4280 struct f2fs_journal *journal = curseg->journal; 4281 struct sit_entry_set *ses, *tmp; 4282 struct list_head *head = &SM_I(sbi)->sit_entry_set; 4283 bool to_journal = !is_sbi_flag_set(sbi, SBI_IS_RESIZEFS); 4284 struct seg_entry *se; 4285 4286 down_write(&sit_i->sentry_lock); 4287 4288 if (!sit_i->dirty_sentries) 4289 goto out; 4290 4291 /* 4292 * add and account sit entries of dirty bitmap in sit entry 4293 * set temporarily 4294 */ 4295 add_sits_in_set(sbi); 4296 4297 /* 4298 * if there are no enough space in journal to store dirty sit 4299 * entries, remove all entries from journal and add and account 4300 * them in sit entry set. 4301 */ 4302 if (!__has_cursum_space(journal, sit_i->dirty_sentries, SIT_JOURNAL) || 4303 !to_journal) 4304 remove_sits_in_journal(sbi); 4305 4306 /* 4307 * there are two steps to flush sit entries: 4308 * #1, flush sit entries to journal in current cold data summary block. 4309 * #2, flush sit entries to sit page. 4310 */ 4311 list_for_each_entry_safe(ses, tmp, head, set_list) { 4312 struct page *page = NULL; 4313 struct f2fs_sit_block *raw_sit = NULL; 4314 unsigned int start_segno = ses->start_segno; 4315 unsigned int end = min(start_segno + SIT_ENTRY_PER_BLOCK, 4316 (unsigned long)MAIN_SEGS(sbi)); 4317 unsigned int segno = start_segno; 4318 4319 if (to_journal && 4320 !__has_cursum_space(journal, ses->entry_cnt, SIT_JOURNAL)) 4321 to_journal = false; 4322 4323 if (to_journal) { 4324 down_write(&curseg->journal_rwsem); 4325 } else { 4326 page = get_next_sit_page(sbi, start_segno); 4327 raw_sit = page_address(page); 4328 } 4329 4330 /* flush dirty sit entries in region of current sit set */ 4331 for_each_set_bit_from(segno, bitmap, end) { 4332 int offset, sit_offset; 4333 4334 se = get_seg_entry(sbi, segno); 4335 #ifdef CONFIG_F2FS_CHECK_FS 4336 if (memcmp(se->cur_valid_map, se->cur_valid_map_mir, 4337 SIT_VBLOCK_MAP_SIZE)) 4338 f2fs_bug_on(sbi, 1); 4339 #endif 4340 4341 /* add discard candidates */ 4342 if (!(cpc->reason & CP_DISCARD)) { 4343 cpc->trim_start = segno; 4344 add_discard_addrs(sbi, cpc, false); 4345 } 4346 4347 if (to_journal) { 4348 offset = f2fs_lookup_journal_in_cursum(journal, 4349 SIT_JOURNAL, segno, 1); 4350 f2fs_bug_on(sbi, offset < 0); 4351 segno_in_journal(journal, offset) = 4352 cpu_to_le32(segno); 4353 seg_info_to_raw_sit(se, 4354 &sit_in_journal(journal, offset)); 4355 check_block_count(sbi, segno, 4356 &sit_in_journal(journal, offset)); 4357 } else { 4358 sit_offset = SIT_ENTRY_OFFSET(sit_i, segno); 4359 seg_info_to_raw_sit(se, 4360 &raw_sit->entries[sit_offset]); 4361 check_block_count(sbi, segno, 4362 &raw_sit->entries[sit_offset]); 4363 } 4364 4365 __clear_bit(segno, bitmap); 4366 sit_i->dirty_sentries--; 4367 ses->entry_cnt--; 4368 } 4369 4370 if (to_journal) 4371 up_write(&curseg->journal_rwsem); 4372 else 4373 f2fs_put_page(page, 1); 4374 4375 f2fs_bug_on(sbi, ses->entry_cnt); 4376 release_sit_entry_set(ses); 4377 } 4378 4379 f2fs_bug_on(sbi, !list_empty(head)); 4380 f2fs_bug_on(sbi, sit_i->dirty_sentries); 4381 out: 4382 if (cpc->reason & CP_DISCARD) { 4383 __u64 trim_start = cpc->trim_start; 4384 4385 for (; cpc->trim_start <= cpc->trim_end; cpc->trim_start++) 4386 add_discard_addrs(sbi, cpc, false); 4387 4388 cpc->trim_start = trim_start; 4389 } 4390 up_write(&sit_i->sentry_lock); 4391 4392 set_prefree_as_free_segments(sbi); 4393 } 4394 4395 static int build_sit_info(struct f2fs_sb_info *sbi) 4396 { 4397 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi); 4398 struct sit_info *sit_i; 4399 unsigned int sit_segs, start; 4400 char *src_bitmap, *bitmap; 4401 unsigned int bitmap_size, main_bitmap_size, sit_bitmap_size; 4402 unsigned int discard_map = f2fs_block_unit_discard(sbi) ? 1 : 0; 4403 4404 /* allocate memory for SIT information */ 4405 sit_i = f2fs_kzalloc(sbi, sizeof(struct sit_info), GFP_KERNEL); 4406 if (!sit_i) 4407 return -ENOMEM; 4408 4409 SM_I(sbi)->sit_info = sit_i; 4410 4411 sit_i->sentries = 4412 f2fs_kvzalloc(sbi, array_size(sizeof(struct seg_entry), 4413 MAIN_SEGS(sbi)), 4414 GFP_KERNEL); 4415 if (!sit_i->sentries) 4416 return -ENOMEM; 4417 4418 main_bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi)); 4419 sit_i->dirty_sentries_bitmap = f2fs_kvzalloc(sbi, main_bitmap_size, 4420 GFP_KERNEL); 4421 if (!sit_i->dirty_sentries_bitmap) 4422 return -ENOMEM; 4423 4424 #ifdef CONFIG_F2FS_CHECK_FS 4425 bitmap_size = MAIN_SEGS(sbi) * SIT_VBLOCK_MAP_SIZE * (3 + discard_map); 4426 #else 4427 bitmap_size = MAIN_SEGS(sbi) * SIT_VBLOCK_MAP_SIZE * (2 + discard_map); 4428 #endif 4429 sit_i->bitmap = f2fs_kvzalloc(sbi, bitmap_size, GFP_KERNEL); 4430 if (!sit_i->bitmap) 4431 return -ENOMEM; 4432 4433 bitmap = sit_i->bitmap; 4434 4435 for (start = 0; start < MAIN_SEGS(sbi); start++) { 4436 sit_i->sentries[start].cur_valid_map = bitmap; 4437 bitmap += SIT_VBLOCK_MAP_SIZE; 4438 4439 sit_i->sentries[start].ckpt_valid_map = bitmap; 4440 bitmap += SIT_VBLOCK_MAP_SIZE; 4441 4442 #ifdef CONFIG_F2FS_CHECK_FS 4443 sit_i->sentries[start].cur_valid_map_mir = bitmap; 4444 bitmap += SIT_VBLOCK_MAP_SIZE; 4445 #endif 4446 4447 if (discard_map) { 4448 sit_i->sentries[start].discard_map = bitmap; 4449 bitmap += SIT_VBLOCK_MAP_SIZE; 4450 } 4451 } 4452 4453 sit_i->tmp_map = f2fs_kzalloc(sbi, SIT_VBLOCK_MAP_SIZE, GFP_KERNEL); 4454 if (!sit_i->tmp_map) 4455 return -ENOMEM; 4456 4457 if (__is_large_section(sbi)) { 4458 sit_i->sec_entries = 4459 f2fs_kvzalloc(sbi, array_size(sizeof(struct sec_entry), 4460 MAIN_SECS(sbi)), 4461 GFP_KERNEL); 4462 if (!sit_i->sec_entries) 4463 return -ENOMEM; 4464 } 4465 4466 /* get information related with SIT */ 4467 sit_segs = le32_to_cpu(raw_super->segment_count_sit) >> 1; 4468 4469 /* setup SIT bitmap from ckeckpoint pack */ 4470 sit_bitmap_size = __bitmap_size(sbi, SIT_BITMAP); 4471 src_bitmap = __bitmap_ptr(sbi, SIT_BITMAP); 4472 4473 sit_i->sit_bitmap = kmemdup(src_bitmap, sit_bitmap_size, GFP_KERNEL); 4474 if (!sit_i->sit_bitmap) 4475 return -ENOMEM; 4476 4477 #ifdef CONFIG_F2FS_CHECK_FS 4478 sit_i->sit_bitmap_mir = kmemdup(src_bitmap, 4479 sit_bitmap_size, GFP_KERNEL); 4480 if (!sit_i->sit_bitmap_mir) 4481 return -ENOMEM; 4482 4483 sit_i->invalid_segmap = f2fs_kvzalloc(sbi, 4484 main_bitmap_size, GFP_KERNEL); 4485 if (!sit_i->invalid_segmap) 4486 return -ENOMEM; 4487 #endif 4488 4489 sit_i->sit_base_addr = le32_to_cpu(raw_super->sit_blkaddr); 4490 sit_i->sit_blocks = sit_segs << sbi->log_blocks_per_seg; 4491 sit_i->written_valid_blocks = 0; 4492 sit_i->bitmap_size = sit_bitmap_size; 4493 sit_i->dirty_sentries = 0; 4494 sit_i->sents_per_block = SIT_ENTRY_PER_BLOCK; 4495 sit_i->elapsed_time = le64_to_cpu(sbi->ckpt->elapsed_time); 4496 sit_i->mounted_time = ktime_get_boottime_seconds(); 4497 init_rwsem(&sit_i->sentry_lock); 4498 return 0; 4499 } 4500 4501 static int build_free_segmap(struct f2fs_sb_info *sbi) 4502 { 4503 struct free_segmap_info *free_i; 4504 unsigned int bitmap_size, sec_bitmap_size; 4505 4506 /* allocate memory for free segmap information */ 4507 free_i = f2fs_kzalloc(sbi, sizeof(struct free_segmap_info), GFP_KERNEL); 4508 if (!free_i) 4509 return -ENOMEM; 4510 4511 SM_I(sbi)->free_info = free_i; 4512 4513 bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi)); 4514 free_i->free_segmap = f2fs_kvmalloc(sbi, bitmap_size, GFP_KERNEL); 4515 if (!free_i->free_segmap) 4516 return -ENOMEM; 4517 4518 sec_bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi)); 4519 free_i->free_secmap = f2fs_kvmalloc(sbi, sec_bitmap_size, GFP_KERNEL); 4520 if (!free_i->free_secmap) 4521 return -ENOMEM; 4522 4523 /* set all segments as dirty temporarily */ 4524 memset(free_i->free_segmap, 0xff, bitmap_size); 4525 memset(free_i->free_secmap, 0xff, sec_bitmap_size); 4526 4527 /* init free segmap information */ 4528 free_i->start_segno = GET_SEGNO_FROM_SEG0(sbi, MAIN_BLKADDR(sbi)); 4529 free_i->free_segments = 0; 4530 free_i->free_sections = 0; 4531 spin_lock_init(&free_i->segmap_lock); 4532 return 0; 4533 } 4534 4535 static int build_curseg(struct f2fs_sb_info *sbi) 4536 { 4537 struct curseg_info *array; 4538 int i; 4539 4540 array = f2fs_kzalloc(sbi, array_size(NR_CURSEG_TYPE, 4541 sizeof(*array)), GFP_KERNEL); 4542 if (!array) 4543 return -ENOMEM; 4544 4545 SM_I(sbi)->curseg_array = array; 4546 4547 for (i = 0; i < NO_CHECK_TYPE; i++) { 4548 mutex_init(&array[i].curseg_mutex); 4549 array[i].sum_blk = f2fs_kzalloc(sbi, PAGE_SIZE, GFP_KERNEL); 4550 if (!array[i].sum_blk) 4551 return -ENOMEM; 4552 init_rwsem(&array[i].journal_rwsem); 4553 array[i].journal = f2fs_kzalloc(sbi, 4554 sizeof(struct f2fs_journal), GFP_KERNEL); 4555 if (!array[i].journal) 4556 return -ENOMEM; 4557 if (i < NR_PERSISTENT_LOG) 4558 array[i].seg_type = CURSEG_HOT_DATA + i; 4559 else if (i == CURSEG_COLD_DATA_PINNED) 4560 array[i].seg_type = CURSEG_COLD_DATA; 4561 else if (i == CURSEG_ALL_DATA_ATGC) 4562 array[i].seg_type = CURSEG_COLD_DATA; 4563 array[i].segno = NULL_SEGNO; 4564 array[i].next_blkoff = 0; 4565 array[i].inited = false; 4566 } 4567 return restore_curseg_summaries(sbi); 4568 } 4569 4570 static int build_sit_entries(struct f2fs_sb_info *sbi) 4571 { 4572 struct sit_info *sit_i = SIT_I(sbi); 4573 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA); 4574 struct f2fs_journal *journal = curseg->journal; 4575 struct seg_entry *se; 4576 struct f2fs_sit_entry sit; 4577 int sit_blk_cnt = SIT_BLK_CNT(sbi); 4578 unsigned int i, start, end; 4579 unsigned int readed, start_blk = 0; 4580 int err = 0; 4581 block_t sit_valid_blocks[2] = {0, 0}; 4582 4583 do { 4584 readed = f2fs_ra_meta_pages(sbi, start_blk, BIO_MAX_VECS, 4585 META_SIT, true); 4586 4587 start = start_blk * sit_i->sents_per_block; 4588 end = (start_blk + readed) * sit_i->sents_per_block; 4589 4590 for (; start < end && start < MAIN_SEGS(sbi); start++) { 4591 struct f2fs_sit_block *sit_blk; 4592 struct page *page; 4593 4594 se = &sit_i->sentries[start]; 4595 page = get_current_sit_page(sbi, start); 4596 if (IS_ERR(page)) 4597 return PTR_ERR(page); 4598 sit_blk = (struct f2fs_sit_block *)page_address(page); 4599 sit = sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, start)]; 4600 f2fs_put_page(page, 1); 4601 4602 err = check_block_count(sbi, start, &sit); 4603 if (err) 4604 return err; 4605 seg_info_from_raw_sit(se, &sit); 4606 4607 if (se->type >= NR_PERSISTENT_LOG) { 4608 f2fs_err(sbi, "Invalid segment type: %u, segno: %u", 4609 se->type, start); 4610 f2fs_handle_error(sbi, 4611 ERROR_INCONSISTENT_SUM_TYPE); 4612 return -EFSCORRUPTED; 4613 } 4614 4615 sit_valid_blocks[SE_PAGETYPE(se)] += se->valid_blocks; 4616 4617 if (!f2fs_block_unit_discard(sbi)) 4618 goto init_discard_map_done; 4619 4620 /* build discard map only one time */ 4621 if (is_set_ckpt_flags(sbi, CP_TRIMMED_FLAG)) { 4622 memset(se->discard_map, 0xff, 4623 SIT_VBLOCK_MAP_SIZE); 4624 goto init_discard_map_done; 4625 } 4626 memcpy(se->discard_map, se->cur_valid_map, 4627 SIT_VBLOCK_MAP_SIZE); 4628 sbi->discard_blks += BLKS_PER_SEG(sbi) - 4629 se->valid_blocks; 4630 init_discard_map_done: 4631 if (__is_large_section(sbi)) 4632 get_sec_entry(sbi, start)->valid_blocks += 4633 se->valid_blocks; 4634 } 4635 start_blk += readed; 4636 } while (start_blk < sit_blk_cnt); 4637 4638 down_read(&curseg->journal_rwsem); 4639 for (i = 0; i < sits_in_cursum(journal); i++) { 4640 unsigned int old_valid_blocks; 4641 4642 start = le32_to_cpu(segno_in_journal(journal, i)); 4643 if (start >= MAIN_SEGS(sbi)) { 4644 f2fs_err(sbi, "Wrong journal entry on segno %u", 4645 start); 4646 err = -EFSCORRUPTED; 4647 f2fs_handle_error(sbi, ERROR_CORRUPTED_JOURNAL); 4648 break; 4649 } 4650 4651 se = &sit_i->sentries[start]; 4652 sit = sit_in_journal(journal, i); 4653 4654 old_valid_blocks = se->valid_blocks; 4655 4656 sit_valid_blocks[SE_PAGETYPE(se)] -= old_valid_blocks; 4657 4658 err = check_block_count(sbi, start, &sit); 4659 if (err) 4660 break; 4661 seg_info_from_raw_sit(se, &sit); 4662 4663 if (se->type >= NR_PERSISTENT_LOG) { 4664 f2fs_err(sbi, "Invalid segment type: %u, segno: %u", 4665 se->type, start); 4666 err = -EFSCORRUPTED; 4667 f2fs_handle_error(sbi, ERROR_INCONSISTENT_SUM_TYPE); 4668 break; 4669 } 4670 4671 sit_valid_blocks[SE_PAGETYPE(se)] += se->valid_blocks; 4672 4673 if (f2fs_block_unit_discard(sbi)) { 4674 if (is_set_ckpt_flags(sbi, CP_TRIMMED_FLAG)) { 4675 memset(se->discard_map, 0xff, SIT_VBLOCK_MAP_SIZE); 4676 } else { 4677 memcpy(se->discard_map, se->cur_valid_map, 4678 SIT_VBLOCK_MAP_SIZE); 4679 sbi->discard_blks += old_valid_blocks; 4680 sbi->discard_blks -= se->valid_blocks; 4681 } 4682 } 4683 4684 if (__is_large_section(sbi)) { 4685 get_sec_entry(sbi, start)->valid_blocks += 4686 se->valid_blocks; 4687 get_sec_entry(sbi, start)->valid_blocks -= 4688 old_valid_blocks; 4689 } 4690 } 4691 up_read(&curseg->journal_rwsem); 4692 4693 if (err) 4694 return err; 4695 4696 if (sit_valid_blocks[NODE] != valid_node_count(sbi)) { 4697 f2fs_err(sbi, "SIT is corrupted node# %u vs %u", 4698 sit_valid_blocks[NODE], valid_node_count(sbi)); 4699 f2fs_handle_error(sbi, ERROR_INCONSISTENT_NODE_COUNT); 4700 return -EFSCORRUPTED; 4701 } 4702 4703 if (sit_valid_blocks[DATA] + sit_valid_blocks[NODE] > 4704 valid_user_blocks(sbi)) { 4705 f2fs_err(sbi, "SIT is corrupted data# %u %u vs %u", 4706 sit_valid_blocks[DATA], sit_valid_blocks[NODE], 4707 valid_user_blocks(sbi)); 4708 f2fs_handle_error(sbi, ERROR_INCONSISTENT_BLOCK_COUNT); 4709 return -EFSCORRUPTED; 4710 } 4711 4712 return 0; 4713 } 4714 4715 static void init_free_segmap(struct f2fs_sb_info *sbi) 4716 { 4717 unsigned int start; 4718 int type; 4719 struct seg_entry *sentry; 4720 4721 for (start = 0; start < MAIN_SEGS(sbi); start++) { 4722 if (f2fs_usable_blks_in_seg(sbi, start) == 0) 4723 continue; 4724 sentry = get_seg_entry(sbi, start); 4725 if (!sentry->valid_blocks) 4726 __set_free(sbi, start); 4727 else 4728 SIT_I(sbi)->written_valid_blocks += 4729 sentry->valid_blocks; 4730 } 4731 4732 /* set use the current segments */ 4733 for (type = CURSEG_HOT_DATA; type <= CURSEG_COLD_NODE; type++) { 4734 struct curseg_info *curseg_t = CURSEG_I(sbi, type); 4735 4736 __set_test_and_inuse(sbi, curseg_t->segno); 4737 } 4738 } 4739 4740 static void init_dirty_segmap(struct f2fs_sb_info *sbi) 4741 { 4742 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 4743 struct free_segmap_info *free_i = FREE_I(sbi); 4744 unsigned int segno = 0, offset = 0, secno; 4745 block_t valid_blocks, usable_blks_in_seg; 4746 4747 while (1) { 4748 /* find dirty segment based on free segmap */ 4749 segno = find_next_inuse(free_i, MAIN_SEGS(sbi), offset); 4750 if (segno >= MAIN_SEGS(sbi)) 4751 break; 4752 offset = segno + 1; 4753 valid_blocks = get_valid_blocks(sbi, segno, false); 4754 usable_blks_in_seg = f2fs_usable_blks_in_seg(sbi, segno); 4755 if (valid_blocks == usable_blks_in_seg || !valid_blocks) 4756 continue; 4757 if (valid_blocks > usable_blks_in_seg) { 4758 f2fs_bug_on(sbi, 1); 4759 continue; 4760 } 4761 mutex_lock(&dirty_i->seglist_lock); 4762 __locate_dirty_segment(sbi, segno, DIRTY); 4763 mutex_unlock(&dirty_i->seglist_lock); 4764 } 4765 4766 if (!__is_large_section(sbi)) 4767 return; 4768 4769 mutex_lock(&dirty_i->seglist_lock); 4770 for (segno = 0; segno < MAIN_SEGS(sbi); segno += SEGS_PER_SEC(sbi)) { 4771 valid_blocks = get_valid_blocks(sbi, segno, true); 4772 secno = GET_SEC_FROM_SEG(sbi, segno); 4773 4774 if (!valid_blocks || valid_blocks == CAP_BLKS_PER_SEC(sbi)) 4775 continue; 4776 if (IS_CURSEC(sbi, secno)) 4777 continue; 4778 set_bit(secno, dirty_i->dirty_secmap); 4779 } 4780 mutex_unlock(&dirty_i->seglist_lock); 4781 } 4782 4783 static int init_victim_secmap(struct f2fs_sb_info *sbi) 4784 { 4785 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 4786 unsigned int bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi)); 4787 4788 dirty_i->victim_secmap = f2fs_kvzalloc(sbi, bitmap_size, GFP_KERNEL); 4789 if (!dirty_i->victim_secmap) 4790 return -ENOMEM; 4791 4792 dirty_i->pinned_secmap = f2fs_kvzalloc(sbi, bitmap_size, GFP_KERNEL); 4793 if (!dirty_i->pinned_secmap) 4794 return -ENOMEM; 4795 4796 dirty_i->pinned_secmap_cnt = 0; 4797 dirty_i->enable_pin_section = true; 4798 return 0; 4799 } 4800 4801 static int build_dirty_segmap(struct f2fs_sb_info *sbi) 4802 { 4803 struct dirty_seglist_info *dirty_i; 4804 unsigned int bitmap_size, i; 4805 4806 /* allocate memory for dirty segments list information */ 4807 dirty_i = f2fs_kzalloc(sbi, sizeof(struct dirty_seglist_info), 4808 GFP_KERNEL); 4809 if (!dirty_i) 4810 return -ENOMEM; 4811 4812 SM_I(sbi)->dirty_info = dirty_i; 4813 mutex_init(&dirty_i->seglist_lock); 4814 4815 bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi)); 4816 4817 for (i = 0; i < NR_DIRTY_TYPE; i++) { 4818 dirty_i->dirty_segmap[i] = f2fs_kvzalloc(sbi, bitmap_size, 4819 GFP_KERNEL); 4820 if (!dirty_i->dirty_segmap[i]) 4821 return -ENOMEM; 4822 } 4823 4824 if (__is_large_section(sbi)) { 4825 bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi)); 4826 dirty_i->dirty_secmap = f2fs_kvzalloc(sbi, 4827 bitmap_size, GFP_KERNEL); 4828 if (!dirty_i->dirty_secmap) 4829 return -ENOMEM; 4830 } 4831 4832 init_dirty_segmap(sbi); 4833 return init_victim_secmap(sbi); 4834 } 4835 4836 static int sanity_check_curseg(struct f2fs_sb_info *sbi) 4837 { 4838 int i; 4839 4840 /* 4841 * In LFS/SSR curseg, .next_blkoff should point to an unused blkaddr; 4842 * In LFS curseg, all blkaddr after .next_blkoff should be unused. 4843 */ 4844 for (i = 0; i < NR_PERSISTENT_LOG; i++) { 4845 struct curseg_info *curseg = CURSEG_I(sbi, i); 4846 struct seg_entry *se = get_seg_entry(sbi, curseg->segno); 4847 unsigned int blkofs = curseg->next_blkoff; 4848 4849 if (f2fs_sb_has_readonly(sbi) && 4850 i != CURSEG_HOT_DATA && i != CURSEG_HOT_NODE) 4851 continue; 4852 4853 sanity_check_seg_type(sbi, curseg->seg_type); 4854 4855 if (curseg->alloc_type != LFS && curseg->alloc_type != SSR) { 4856 f2fs_err(sbi, 4857 "Current segment has invalid alloc_type:%d", 4858 curseg->alloc_type); 4859 f2fs_handle_error(sbi, ERROR_INVALID_CURSEG); 4860 return -EFSCORRUPTED; 4861 } 4862 4863 if (f2fs_test_bit(blkofs, se->cur_valid_map)) 4864 goto out; 4865 4866 if (curseg->alloc_type == SSR) 4867 continue; 4868 4869 for (blkofs += 1; blkofs < BLKS_PER_SEG(sbi); blkofs++) { 4870 if (!f2fs_test_bit(blkofs, se->cur_valid_map)) 4871 continue; 4872 out: 4873 f2fs_err(sbi, 4874 "Current segment's next free block offset is inconsistent with bitmap, logtype:%u, segno:%u, type:%u, next_blkoff:%u, blkofs:%u", 4875 i, curseg->segno, curseg->alloc_type, 4876 curseg->next_blkoff, blkofs); 4877 f2fs_handle_error(sbi, ERROR_INVALID_CURSEG); 4878 return -EFSCORRUPTED; 4879 } 4880 } 4881 return 0; 4882 } 4883 4884 #ifdef CONFIG_BLK_DEV_ZONED 4885 4886 static int check_zone_write_pointer(struct f2fs_sb_info *sbi, 4887 struct f2fs_dev_info *fdev, 4888 struct blk_zone *zone) 4889 { 4890 unsigned int wp_segno, wp_blkoff, zone_secno, zone_segno, segno; 4891 block_t zone_block, wp_block, last_valid_block; 4892 unsigned int log_sectors_per_block = sbi->log_blocksize - SECTOR_SHIFT; 4893 int i, s, b, ret; 4894 struct seg_entry *se; 4895 4896 if (zone->type != BLK_ZONE_TYPE_SEQWRITE_REQ) 4897 return 0; 4898 4899 wp_block = fdev->start_blk + (zone->wp >> log_sectors_per_block); 4900 wp_segno = GET_SEGNO(sbi, wp_block); 4901 wp_blkoff = wp_block - START_BLOCK(sbi, wp_segno); 4902 zone_block = fdev->start_blk + (zone->start >> log_sectors_per_block); 4903 zone_segno = GET_SEGNO(sbi, zone_block); 4904 zone_secno = GET_SEC_FROM_SEG(sbi, zone_segno); 4905 4906 if (zone_segno >= MAIN_SEGS(sbi)) 4907 return 0; 4908 4909 /* 4910 * Skip check of zones cursegs point to, since 4911 * fix_curseg_write_pointer() checks them. 4912 */ 4913 for (i = 0; i < NO_CHECK_TYPE; i++) 4914 if (zone_secno == GET_SEC_FROM_SEG(sbi, 4915 CURSEG_I(sbi, i)->segno)) 4916 return 0; 4917 4918 /* 4919 * Get last valid block of the zone. 4920 */ 4921 last_valid_block = zone_block - 1; 4922 for (s = sbi->segs_per_sec - 1; s >= 0; s--) { 4923 segno = zone_segno + s; 4924 se = get_seg_entry(sbi, segno); 4925 for (b = sbi->blocks_per_seg - 1; b >= 0; b--) 4926 if (f2fs_test_bit(b, se->cur_valid_map)) { 4927 last_valid_block = START_BLOCK(sbi, segno) + b; 4928 break; 4929 } 4930 if (last_valid_block >= zone_block) 4931 break; 4932 } 4933 4934 /* 4935 * The write pointer matches with the valid blocks or 4936 * already points to the end of the zone. 4937 */ 4938 if ((last_valid_block + 1 == wp_block) || 4939 (zone->wp == zone->start + zone->len)) 4940 return 0; 4941 4942 if (last_valid_block + 1 == zone_block) { 4943 /* 4944 * If there is no valid block in the zone and if write pointer 4945 * is not at zone start, reset the write pointer. 4946 */ 4947 f2fs_notice(sbi, 4948 "Zone without valid block has non-zero write " 4949 "pointer. Reset the write pointer: wp[0x%x,0x%x]", 4950 wp_segno, wp_blkoff); 4951 ret = __f2fs_issue_discard_zone(sbi, fdev->bdev, zone_block, 4952 zone->len >> log_sectors_per_block); 4953 if (ret) 4954 f2fs_err(sbi, "Discard zone failed: %s (errno=%d)", 4955 fdev->path, ret); 4956 4957 return ret; 4958 } 4959 4960 /* 4961 * If there are valid blocks and the write pointer doesn't 4962 * match with them, we need to report the inconsistency and 4963 * fill the zone till the end to close the zone. This inconsistency 4964 * does not cause write error because the zone will not be selected 4965 * for write operation until it get discarded. 4966 */ 4967 f2fs_notice(sbi, "Valid blocks are not aligned with write pointer: " 4968 "valid block[0x%x,0x%x] wp[0x%x,0x%x]", 4969 GET_SEGNO(sbi, last_valid_block), 4970 GET_BLKOFF_FROM_SEG0(sbi, last_valid_block), 4971 wp_segno, wp_blkoff); 4972 4973 ret = blkdev_zone_mgmt(fdev->bdev, REQ_OP_ZONE_FINISH, 4974 zone->start, zone->len, GFP_NOFS); 4975 if (ret == -EOPNOTSUPP) { 4976 ret = blkdev_issue_zeroout(fdev->bdev, zone->wp, 4977 zone->len - (zone->wp - zone->start), 4978 GFP_NOFS, 0); 4979 if (ret) 4980 f2fs_err(sbi, "Fill up zone failed: %s (errno=%d)", 4981 fdev->path, ret); 4982 } else if (ret) { 4983 f2fs_err(sbi, "Finishing zone failed: %s (errno=%d)", 4984 fdev->path, ret); 4985 } 4986 4987 return ret; 4988 } 4989 4990 static struct f2fs_dev_info *get_target_zoned_dev(struct f2fs_sb_info *sbi, 4991 block_t zone_blkaddr) 4992 { 4993 int i; 4994 4995 for (i = 0; i < sbi->s_ndevs; i++) { 4996 if (!bdev_is_zoned(FDEV(i).bdev)) 4997 continue; 4998 if (sbi->s_ndevs == 1 || (FDEV(i).start_blk <= zone_blkaddr && 4999 zone_blkaddr <= FDEV(i).end_blk)) 5000 return &FDEV(i); 5001 } 5002 5003 return NULL; 5004 } 5005 5006 static int report_one_zone_cb(struct blk_zone *zone, unsigned int idx, 5007 void *data) 5008 { 5009 memcpy(data, zone, sizeof(struct blk_zone)); 5010 return 0; 5011 } 5012 5013 static int fix_curseg_write_pointer(struct f2fs_sb_info *sbi, int type) 5014 { 5015 struct curseg_info *cs = CURSEG_I(sbi, type); 5016 struct f2fs_dev_info *zbd; 5017 struct blk_zone zone; 5018 unsigned int cs_section, wp_segno, wp_blkoff, wp_sector_off; 5019 block_t cs_zone_block, wp_block; 5020 unsigned int log_sectors_per_block = sbi->log_blocksize - SECTOR_SHIFT; 5021 sector_t zone_sector; 5022 int err; 5023 5024 cs_section = GET_SEC_FROM_SEG(sbi, cs->segno); 5025 cs_zone_block = START_BLOCK(sbi, GET_SEG_FROM_SEC(sbi, cs_section)); 5026 5027 zbd = get_target_zoned_dev(sbi, cs_zone_block); 5028 if (!zbd) 5029 return 0; 5030 5031 /* report zone for the sector the curseg points to */ 5032 zone_sector = (sector_t)(cs_zone_block - zbd->start_blk) 5033 << log_sectors_per_block; 5034 err = blkdev_report_zones(zbd->bdev, zone_sector, 1, 5035 report_one_zone_cb, &zone); 5036 if (err != 1) { 5037 f2fs_err(sbi, "Report zone failed: %s errno=(%d)", 5038 zbd->path, err); 5039 return err; 5040 } 5041 5042 if (zone.type != BLK_ZONE_TYPE_SEQWRITE_REQ) 5043 return 0; 5044 5045 wp_block = zbd->start_blk + (zone.wp >> log_sectors_per_block); 5046 wp_segno = GET_SEGNO(sbi, wp_block); 5047 wp_blkoff = wp_block - START_BLOCK(sbi, wp_segno); 5048 wp_sector_off = zone.wp & GENMASK(log_sectors_per_block - 1, 0); 5049 5050 if (cs->segno == wp_segno && cs->next_blkoff == wp_blkoff && 5051 wp_sector_off == 0) 5052 return 0; 5053 5054 f2fs_notice(sbi, "Unaligned curseg[%d] with write pointer: " 5055 "curseg[0x%x,0x%x] wp[0x%x,0x%x]", 5056 type, cs->segno, cs->next_blkoff, wp_segno, wp_blkoff); 5057 5058 f2fs_notice(sbi, "Assign new section to curseg[%d]: " 5059 "curseg[0x%x,0x%x]", type, cs->segno, cs->next_blkoff); 5060 5061 f2fs_allocate_new_section(sbi, type, true); 5062 5063 /* check consistency of the zone curseg pointed to */ 5064 if (check_zone_write_pointer(sbi, zbd, &zone)) 5065 return -EIO; 5066 5067 /* check newly assigned zone */ 5068 cs_section = GET_SEC_FROM_SEG(sbi, cs->segno); 5069 cs_zone_block = START_BLOCK(sbi, GET_SEG_FROM_SEC(sbi, cs_section)); 5070 5071 zbd = get_target_zoned_dev(sbi, cs_zone_block); 5072 if (!zbd) 5073 return 0; 5074 5075 zone_sector = (sector_t)(cs_zone_block - zbd->start_blk) 5076 << log_sectors_per_block; 5077 err = blkdev_report_zones(zbd->bdev, zone_sector, 1, 5078 report_one_zone_cb, &zone); 5079 if (err != 1) { 5080 f2fs_err(sbi, "Report zone failed: %s errno=(%d)", 5081 zbd->path, err); 5082 return err; 5083 } 5084 5085 if (zone.type != BLK_ZONE_TYPE_SEQWRITE_REQ) 5086 return 0; 5087 5088 if (zone.wp != zone.start) { 5089 f2fs_notice(sbi, 5090 "New zone for curseg[%d] is not yet discarded. " 5091 "Reset the zone: curseg[0x%x,0x%x]", 5092 type, cs->segno, cs->next_blkoff); 5093 err = __f2fs_issue_discard_zone(sbi, zbd->bdev, cs_zone_block, 5094 zone.len >> log_sectors_per_block); 5095 if (err) { 5096 f2fs_err(sbi, "Discard zone failed: %s (errno=%d)", 5097 zbd->path, err); 5098 return err; 5099 } 5100 } 5101 5102 return 0; 5103 } 5104 5105 int f2fs_fix_curseg_write_pointer(struct f2fs_sb_info *sbi) 5106 { 5107 int i, ret; 5108 5109 for (i = 0; i < NR_PERSISTENT_LOG; i++) { 5110 ret = fix_curseg_write_pointer(sbi, i); 5111 if (ret) 5112 return ret; 5113 } 5114 5115 return 0; 5116 } 5117 5118 struct check_zone_write_pointer_args { 5119 struct f2fs_sb_info *sbi; 5120 struct f2fs_dev_info *fdev; 5121 }; 5122 5123 static int check_zone_write_pointer_cb(struct blk_zone *zone, unsigned int idx, 5124 void *data) 5125 { 5126 struct check_zone_write_pointer_args *args; 5127 5128 args = (struct check_zone_write_pointer_args *)data; 5129 5130 return check_zone_write_pointer(args->sbi, args->fdev, zone); 5131 } 5132 5133 int f2fs_check_write_pointer(struct f2fs_sb_info *sbi) 5134 { 5135 int i, ret; 5136 struct check_zone_write_pointer_args args; 5137 5138 for (i = 0; i < sbi->s_ndevs; i++) { 5139 if (!bdev_is_zoned(FDEV(i).bdev)) 5140 continue; 5141 5142 args.sbi = sbi; 5143 args.fdev = &FDEV(i); 5144 ret = blkdev_report_zones(FDEV(i).bdev, 0, BLK_ALL_ZONES, 5145 check_zone_write_pointer_cb, &args); 5146 if (ret < 0) 5147 return ret; 5148 } 5149 5150 return 0; 5151 } 5152 5153 /* 5154 * Return the number of usable blocks in a segment. The number of blocks 5155 * returned is always equal to the number of blocks in a segment for 5156 * segments fully contained within a sequential zone capacity or a 5157 * conventional zone. For segments partially contained in a sequential 5158 * zone capacity, the number of usable blocks up to the zone capacity 5159 * is returned. 0 is returned in all other cases. 5160 */ 5161 static inline unsigned int f2fs_usable_zone_blks_in_seg( 5162 struct f2fs_sb_info *sbi, unsigned int segno) 5163 { 5164 block_t seg_start, sec_start_blkaddr, sec_cap_blkaddr; 5165 unsigned int secno; 5166 5167 if (!sbi->unusable_blocks_per_sec) 5168 return BLKS_PER_SEG(sbi); 5169 5170 secno = GET_SEC_FROM_SEG(sbi, segno); 5171 seg_start = START_BLOCK(sbi, segno); 5172 sec_start_blkaddr = START_BLOCK(sbi, GET_SEG_FROM_SEC(sbi, secno)); 5173 sec_cap_blkaddr = sec_start_blkaddr + CAP_BLKS_PER_SEC(sbi); 5174 5175 /* 5176 * If segment starts before zone capacity and spans beyond 5177 * zone capacity, then usable blocks are from seg start to 5178 * zone capacity. If the segment starts after the zone capacity, 5179 * then there are no usable blocks. 5180 */ 5181 if (seg_start >= sec_cap_blkaddr) 5182 return 0; 5183 if (seg_start + BLKS_PER_SEG(sbi) > sec_cap_blkaddr) 5184 return sec_cap_blkaddr - seg_start; 5185 5186 return BLKS_PER_SEG(sbi); 5187 } 5188 #else 5189 int f2fs_fix_curseg_write_pointer(struct f2fs_sb_info *sbi) 5190 { 5191 return 0; 5192 } 5193 5194 int f2fs_check_write_pointer(struct f2fs_sb_info *sbi) 5195 { 5196 return 0; 5197 } 5198 5199 static inline unsigned int f2fs_usable_zone_blks_in_seg(struct f2fs_sb_info *sbi, 5200 unsigned int segno) 5201 { 5202 return 0; 5203 } 5204 5205 #endif 5206 unsigned int f2fs_usable_blks_in_seg(struct f2fs_sb_info *sbi, 5207 unsigned int segno) 5208 { 5209 if (f2fs_sb_has_blkzoned(sbi)) 5210 return f2fs_usable_zone_blks_in_seg(sbi, segno); 5211 5212 return BLKS_PER_SEG(sbi); 5213 } 5214 5215 unsigned int f2fs_usable_segs_in_sec(struct f2fs_sb_info *sbi, 5216 unsigned int segno) 5217 { 5218 if (f2fs_sb_has_blkzoned(sbi)) 5219 return CAP_SEGS_PER_SEC(sbi); 5220 5221 return SEGS_PER_SEC(sbi); 5222 } 5223 5224 /* 5225 * Update min, max modified time for cost-benefit GC algorithm 5226 */ 5227 static void init_min_max_mtime(struct f2fs_sb_info *sbi) 5228 { 5229 struct sit_info *sit_i = SIT_I(sbi); 5230 unsigned int segno; 5231 5232 down_write(&sit_i->sentry_lock); 5233 5234 sit_i->min_mtime = ULLONG_MAX; 5235 5236 for (segno = 0; segno < MAIN_SEGS(sbi); segno += SEGS_PER_SEC(sbi)) { 5237 unsigned int i; 5238 unsigned long long mtime = 0; 5239 5240 for (i = 0; i < SEGS_PER_SEC(sbi); i++) 5241 mtime += get_seg_entry(sbi, segno + i)->mtime; 5242 5243 mtime = div_u64(mtime, SEGS_PER_SEC(sbi)); 5244 5245 if (sit_i->min_mtime > mtime) 5246 sit_i->min_mtime = mtime; 5247 } 5248 sit_i->max_mtime = get_mtime(sbi, false); 5249 sit_i->dirty_max_mtime = 0; 5250 up_write(&sit_i->sentry_lock); 5251 } 5252 5253 int f2fs_build_segment_manager(struct f2fs_sb_info *sbi) 5254 { 5255 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi); 5256 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); 5257 struct f2fs_sm_info *sm_info; 5258 int err; 5259 5260 sm_info = f2fs_kzalloc(sbi, sizeof(struct f2fs_sm_info), GFP_KERNEL); 5261 if (!sm_info) 5262 return -ENOMEM; 5263 5264 /* init sm info */ 5265 sbi->sm_info = sm_info; 5266 sm_info->seg0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr); 5267 sm_info->main_blkaddr = le32_to_cpu(raw_super->main_blkaddr); 5268 sm_info->segment_count = le32_to_cpu(raw_super->segment_count); 5269 sm_info->reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count); 5270 sm_info->ovp_segments = le32_to_cpu(ckpt->overprov_segment_count); 5271 sm_info->main_segments = le32_to_cpu(raw_super->segment_count_main); 5272 sm_info->ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr); 5273 sm_info->rec_prefree_segments = sm_info->main_segments * 5274 DEF_RECLAIM_PREFREE_SEGMENTS / 100; 5275 if (sm_info->rec_prefree_segments > DEF_MAX_RECLAIM_PREFREE_SEGMENTS) 5276 sm_info->rec_prefree_segments = DEF_MAX_RECLAIM_PREFREE_SEGMENTS; 5277 5278 if (!f2fs_lfs_mode(sbi)) 5279 sm_info->ipu_policy = BIT(F2FS_IPU_FSYNC); 5280 sm_info->min_ipu_util = DEF_MIN_IPU_UTIL; 5281 sm_info->min_fsync_blocks = DEF_MIN_FSYNC_BLOCKS; 5282 sm_info->min_seq_blocks = BLKS_PER_SEG(sbi); 5283 sm_info->min_hot_blocks = DEF_MIN_HOT_BLOCKS; 5284 sm_info->min_ssr_sections = reserved_sections(sbi); 5285 5286 INIT_LIST_HEAD(&sm_info->sit_entry_set); 5287 5288 init_f2fs_rwsem(&sm_info->curseg_lock); 5289 5290 err = f2fs_create_flush_cmd_control(sbi); 5291 if (err) 5292 return err; 5293 5294 err = create_discard_cmd_control(sbi); 5295 if (err) 5296 return err; 5297 5298 err = build_sit_info(sbi); 5299 if (err) 5300 return err; 5301 err = build_free_segmap(sbi); 5302 if (err) 5303 return err; 5304 err = build_curseg(sbi); 5305 if (err) 5306 return err; 5307 5308 /* reinit free segmap based on SIT */ 5309 err = build_sit_entries(sbi); 5310 if (err) 5311 return err; 5312 5313 init_free_segmap(sbi); 5314 err = build_dirty_segmap(sbi); 5315 if (err) 5316 return err; 5317 5318 err = sanity_check_curseg(sbi); 5319 if (err) 5320 return err; 5321 5322 init_min_max_mtime(sbi); 5323 return 0; 5324 } 5325 5326 static void discard_dirty_segmap(struct f2fs_sb_info *sbi, 5327 enum dirty_type dirty_type) 5328 { 5329 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 5330 5331 mutex_lock(&dirty_i->seglist_lock); 5332 kvfree(dirty_i->dirty_segmap[dirty_type]); 5333 dirty_i->nr_dirty[dirty_type] = 0; 5334 mutex_unlock(&dirty_i->seglist_lock); 5335 } 5336 5337 static void destroy_victim_secmap(struct f2fs_sb_info *sbi) 5338 { 5339 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 5340 5341 kvfree(dirty_i->pinned_secmap); 5342 kvfree(dirty_i->victim_secmap); 5343 } 5344 5345 static void destroy_dirty_segmap(struct f2fs_sb_info *sbi) 5346 { 5347 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 5348 int i; 5349 5350 if (!dirty_i) 5351 return; 5352 5353 /* discard pre-free/dirty segments list */ 5354 for (i = 0; i < NR_DIRTY_TYPE; i++) 5355 discard_dirty_segmap(sbi, i); 5356 5357 if (__is_large_section(sbi)) { 5358 mutex_lock(&dirty_i->seglist_lock); 5359 kvfree(dirty_i->dirty_secmap); 5360 mutex_unlock(&dirty_i->seglist_lock); 5361 } 5362 5363 destroy_victim_secmap(sbi); 5364 SM_I(sbi)->dirty_info = NULL; 5365 kfree(dirty_i); 5366 } 5367 5368 static void destroy_curseg(struct f2fs_sb_info *sbi) 5369 { 5370 struct curseg_info *array = SM_I(sbi)->curseg_array; 5371 int i; 5372 5373 if (!array) 5374 return; 5375 SM_I(sbi)->curseg_array = NULL; 5376 for (i = 0; i < NR_CURSEG_TYPE; i++) { 5377 kfree(array[i].sum_blk); 5378 kfree(array[i].journal); 5379 } 5380 kfree(array); 5381 } 5382 5383 static void destroy_free_segmap(struct f2fs_sb_info *sbi) 5384 { 5385 struct free_segmap_info *free_i = SM_I(sbi)->free_info; 5386 5387 if (!free_i) 5388 return; 5389 SM_I(sbi)->free_info = NULL; 5390 kvfree(free_i->free_segmap); 5391 kvfree(free_i->free_secmap); 5392 kfree(free_i); 5393 } 5394 5395 static void destroy_sit_info(struct f2fs_sb_info *sbi) 5396 { 5397 struct sit_info *sit_i = SIT_I(sbi); 5398 5399 if (!sit_i) 5400 return; 5401 5402 if (sit_i->sentries) 5403 kvfree(sit_i->bitmap); 5404 kfree(sit_i->tmp_map); 5405 5406 kvfree(sit_i->sentries); 5407 kvfree(sit_i->sec_entries); 5408 kvfree(sit_i->dirty_sentries_bitmap); 5409 5410 SM_I(sbi)->sit_info = NULL; 5411 kvfree(sit_i->sit_bitmap); 5412 #ifdef CONFIG_F2FS_CHECK_FS 5413 kvfree(sit_i->sit_bitmap_mir); 5414 kvfree(sit_i->invalid_segmap); 5415 #endif 5416 kfree(sit_i); 5417 } 5418 5419 void f2fs_destroy_segment_manager(struct f2fs_sb_info *sbi) 5420 { 5421 struct f2fs_sm_info *sm_info = SM_I(sbi); 5422 5423 if (!sm_info) 5424 return; 5425 f2fs_destroy_flush_cmd_control(sbi, true); 5426 destroy_discard_cmd_control(sbi); 5427 destroy_dirty_segmap(sbi); 5428 destroy_curseg(sbi); 5429 destroy_free_segmap(sbi); 5430 destroy_sit_info(sbi); 5431 sbi->sm_info = NULL; 5432 kfree(sm_info); 5433 } 5434 5435 int __init f2fs_create_segment_manager_caches(void) 5436 { 5437 discard_entry_slab = f2fs_kmem_cache_create("f2fs_discard_entry", 5438 sizeof(struct discard_entry)); 5439 if (!discard_entry_slab) 5440 goto fail; 5441 5442 discard_cmd_slab = f2fs_kmem_cache_create("f2fs_discard_cmd", 5443 sizeof(struct discard_cmd)); 5444 if (!discard_cmd_slab) 5445 goto destroy_discard_entry; 5446 5447 sit_entry_set_slab = f2fs_kmem_cache_create("f2fs_sit_entry_set", 5448 sizeof(struct sit_entry_set)); 5449 if (!sit_entry_set_slab) 5450 goto destroy_discard_cmd; 5451 5452 revoke_entry_slab = f2fs_kmem_cache_create("f2fs_revoke_entry", 5453 sizeof(struct revoke_entry)); 5454 if (!revoke_entry_slab) 5455 goto destroy_sit_entry_set; 5456 return 0; 5457 5458 destroy_sit_entry_set: 5459 kmem_cache_destroy(sit_entry_set_slab); 5460 destroy_discard_cmd: 5461 kmem_cache_destroy(discard_cmd_slab); 5462 destroy_discard_entry: 5463 kmem_cache_destroy(discard_entry_slab); 5464 fail: 5465 return -ENOMEM; 5466 } 5467 5468 void f2fs_destroy_segment_manager_caches(void) 5469 { 5470 kmem_cache_destroy(sit_entry_set_slab); 5471 kmem_cache_destroy(discard_cmd_slab); 5472 kmem_cache_destroy(discard_entry_slab); 5473 kmem_cache_destroy(revoke_entry_slab); 5474 } 5475