1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * fs/f2fs/segment.c 4 * 5 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 6 * http://www.samsung.com/ 7 */ 8 #include <linux/fs.h> 9 #include <linux/f2fs_fs.h> 10 #include <linux/bio.h> 11 #include <linux/blkdev.h> 12 #include <linux/sched/mm.h> 13 #include <linux/prefetch.h> 14 #include <linux/kthread.h> 15 #include <linux/swap.h> 16 #include <linux/timer.h> 17 #include <linux/freezer.h> 18 #include <linux/sched/signal.h> 19 #include <linux/random.h> 20 21 #include "f2fs.h" 22 #include "segment.h" 23 #include "node.h" 24 #include "gc.h" 25 #include "iostat.h" 26 #include <trace/events/f2fs.h> 27 28 #define __reverse_ffz(x) __reverse_ffs(~(x)) 29 30 static struct kmem_cache *discard_entry_slab; 31 static struct kmem_cache *discard_cmd_slab; 32 static struct kmem_cache *sit_entry_set_slab; 33 static struct kmem_cache *revoke_entry_slab; 34 35 static unsigned long __reverse_ulong(unsigned char *str) 36 { 37 unsigned long tmp = 0; 38 int shift = 24, idx = 0; 39 40 #if BITS_PER_LONG == 64 41 shift = 56; 42 #endif 43 while (shift >= 0) { 44 tmp |= (unsigned long)str[idx++] << shift; 45 shift -= BITS_PER_BYTE; 46 } 47 return tmp; 48 } 49 50 /* 51 * __reverse_ffs is copied from include/asm-generic/bitops/__ffs.h since 52 * MSB and LSB are reversed in a byte by f2fs_set_bit. 53 */ 54 static inline unsigned long __reverse_ffs(unsigned long word) 55 { 56 int num = 0; 57 58 #if BITS_PER_LONG == 64 59 if ((word & 0xffffffff00000000UL) == 0) 60 num += 32; 61 else 62 word >>= 32; 63 #endif 64 if ((word & 0xffff0000) == 0) 65 num += 16; 66 else 67 word >>= 16; 68 69 if ((word & 0xff00) == 0) 70 num += 8; 71 else 72 word >>= 8; 73 74 if ((word & 0xf0) == 0) 75 num += 4; 76 else 77 word >>= 4; 78 79 if ((word & 0xc) == 0) 80 num += 2; 81 else 82 word >>= 2; 83 84 if ((word & 0x2) == 0) 85 num += 1; 86 return num; 87 } 88 89 /* 90 * __find_rev_next(_zero)_bit is copied from lib/find_next_bit.c because 91 * f2fs_set_bit makes MSB and LSB reversed in a byte. 92 * @size must be integral times of unsigned long. 93 * Example: 94 * MSB <--> LSB 95 * f2fs_set_bit(0, bitmap) => 1000 0000 96 * f2fs_set_bit(7, bitmap) => 0000 0001 97 */ 98 static unsigned long __find_rev_next_bit(const unsigned long *addr, 99 unsigned long size, unsigned long offset) 100 { 101 const unsigned long *p = addr + BIT_WORD(offset); 102 unsigned long result = size; 103 unsigned long tmp; 104 105 if (offset >= size) 106 return size; 107 108 size -= (offset & ~(BITS_PER_LONG - 1)); 109 offset %= BITS_PER_LONG; 110 111 while (1) { 112 if (*p == 0) 113 goto pass; 114 115 tmp = __reverse_ulong((unsigned char *)p); 116 117 tmp &= ~0UL >> offset; 118 if (size < BITS_PER_LONG) 119 tmp &= (~0UL << (BITS_PER_LONG - size)); 120 if (tmp) 121 goto found; 122 pass: 123 if (size <= BITS_PER_LONG) 124 break; 125 size -= BITS_PER_LONG; 126 offset = 0; 127 p++; 128 } 129 return result; 130 found: 131 return result - size + __reverse_ffs(tmp); 132 } 133 134 static unsigned long __find_rev_next_zero_bit(const unsigned long *addr, 135 unsigned long size, unsigned long offset) 136 { 137 const unsigned long *p = addr + BIT_WORD(offset); 138 unsigned long result = size; 139 unsigned long tmp; 140 141 if (offset >= size) 142 return size; 143 144 size -= (offset & ~(BITS_PER_LONG - 1)); 145 offset %= BITS_PER_LONG; 146 147 while (1) { 148 if (*p == ~0UL) 149 goto pass; 150 151 tmp = __reverse_ulong((unsigned char *)p); 152 153 if (offset) 154 tmp |= ~0UL << (BITS_PER_LONG - offset); 155 if (size < BITS_PER_LONG) 156 tmp |= ~0UL >> size; 157 if (tmp != ~0UL) 158 goto found; 159 pass: 160 if (size <= BITS_PER_LONG) 161 break; 162 size -= BITS_PER_LONG; 163 offset = 0; 164 p++; 165 } 166 return result; 167 found: 168 return result - size + __reverse_ffz(tmp); 169 } 170 171 bool f2fs_need_SSR(struct f2fs_sb_info *sbi) 172 { 173 int node_secs = get_blocktype_secs(sbi, F2FS_DIRTY_NODES); 174 int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS); 175 int imeta_secs = get_blocktype_secs(sbi, F2FS_DIRTY_IMETA); 176 177 if (f2fs_lfs_mode(sbi)) 178 return false; 179 if (sbi->gc_mode == GC_URGENT_HIGH) 180 return true; 181 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) 182 return true; 183 184 return free_sections(sbi) <= (node_secs + 2 * dent_secs + imeta_secs + 185 SM_I(sbi)->min_ssr_sections + reserved_sections(sbi)); 186 } 187 188 void f2fs_abort_atomic_write(struct inode *inode, bool clean) 189 { 190 struct f2fs_inode_info *fi = F2FS_I(inode); 191 192 if (!f2fs_is_atomic_file(inode)) 193 return; 194 195 if (clean) 196 truncate_inode_pages_final(inode->i_mapping); 197 198 release_atomic_write_cnt(inode); 199 clear_inode_flag(inode, FI_ATOMIC_COMMITTED); 200 clear_inode_flag(inode, FI_ATOMIC_REPLACE); 201 clear_inode_flag(inode, FI_ATOMIC_FILE); 202 stat_dec_atomic_inode(inode); 203 204 F2FS_I(inode)->atomic_write_task = NULL; 205 206 if (clean) { 207 f2fs_i_size_write(inode, fi->original_i_size); 208 fi->original_i_size = 0; 209 } 210 /* avoid stale dirty inode during eviction */ 211 sync_inode_metadata(inode, 0); 212 } 213 214 static int __replace_atomic_write_block(struct inode *inode, pgoff_t index, 215 block_t new_addr, block_t *old_addr, bool recover) 216 { 217 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 218 struct dnode_of_data dn; 219 struct node_info ni; 220 int err; 221 222 retry: 223 set_new_dnode(&dn, inode, NULL, NULL, 0); 224 err = f2fs_get_dnode_of_data(&dn, index, ALLOC_NODE); 225 if (err) { 226 if (err == -ENOMEM) { 227 f2fs_io_schedule_timeout(DEFAULT_IO_TIMEOUT); 228 goto retry; 229 } 230 return err; 231 } 232 233 err = f2fs_get_node_info(sbi, dn.nid, &ni, false); 234 if (err) { 235 f2fs_put_dnode(&dn); 236 return err; 237 } 238 239 if (recover) { 240 /* dn.data_blkaddr is always valid */ 241 if (!__is_valid_data_blkaddr(new_addr)) { 242 if (new_addr == NULL_ADDR) 243 dec_valid_block_count(sbi, inode, 1); 244 f2fs_invalidate_blocks(sbi, dn.data_blkaddr); 245 f2fs_update_data_blkaddr(&dn, new_addr); 246 } else { 247 f2fs_replace_block(sbi, &dn, dn.data_blkaddr, 248 new_addr, ni.version, true, true); 249 } 250 } else { 251 blkcnt_t count = 1; 252 253 err = inc_valid_block_count(sbi, inode, &count, true); 254 if (err) { 255 f2fs_put_dnode(&dn); 256 return err; 257 } 258 259 *old_addr = dn.data_blkaddr; 260 f2fs_truncate_data_blocks_range(&dn, 1); 261 dec_valid_block_count(sbi, F2FS_I(inode)->cow_inode, count); 262 263 f2fs_replace_block(sbi, &dn, dn.data_blkaddr, new_addr, 264 ni.version, true, false); 265 } 266 267 f2fs_put_dnode(&dn); 268 269 trace_f2fs_replace_atomic_write_block(inode, F2FS_I(inode)->cow_inode, 270 index, old_addr ? *old_addr : 0, new_addr, recover); 271 return 0; 272 } 273 274 static void __complete_revoke_list(struct inode *inode, struct list_head *head, 275 bool revoke) 276 { 277 struct revoke_entry *cur, *tmp; 278 pgoff_t start_index = 0; 279 bool truncate = is_inode_flag_set(inode, FI_ATOMIC_REPLACE); 280 281 list_for_each_entry_safe(cur, tmp, head, list) { 282 if (revoke) { 283 __replace_atomic_write_block(inode, cur->index, 284 cur->old_addr, NULL, true); 285 } else if (truncate) { 286 f2fs_truncate_hole(inode, start_index, cur->index); 287 start_index = cur->index + 1; 288 } 289 290 list_del(&cur->list); 291 kmem_cache_free(revoke_entry_slab, cur); 292 } 293 294 if (!revoke && truncate) 295 f2fs_do_truncate_blocks(inode, start_index * PAGE_SIZE, false); 296 } 297 298 static int __f2fs_commit_atomic_write(struct inode *inode) 299 { 300 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 301 struct f2fs_inode_info *fi = F2FS_I(inode); 302 struct inode *cow_inode = fi->cow_inode; 303 struct revoke_entry *new; 304 struct list_head revoke_list; 305 block_t blkaddr; 306 struct dnode_of_data dn; 307 pgoff_t len = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); 308 pgoff_t off = 0, blen, index; 309 int ret = 0, i; 310 311 INIT_LIST_HEAD(&revoke_list); 312 313 while (len) { 314 blen = min_t(pgoff_t, ADDRS_PER_BLOCK(cow_inode), len); 315 316 set_new_dnode(&dn, cow_inode, NULL, NULL, 0); 317 ret = f2fs_get_dnode_of_data(&dn, off, LOOKUP_NODE_RA); 318 if (ret && ret != -ENOENT) { 319 goto out; 320 } else if (ret == -ENOENT) { 321 ret = 0; 322 if (dn.max_level == 0) 323 goto out; 324 goto next; 325 } 326 327 blen = min((pgoff_t)ADDRS_PER_PAGE(dn.node_page, cow_inode), 328 len); 329 index = off; 330 for (i = 0; i < blen; i++, dn.ofs_in_node++, index++) { 331 blkaddr = f2fs_data_blkaddr(&dn); 332 333 if (!__is_valid_data_blkaddr(blkaddr)) { 334 continue; 335 } else if (!f2fs_is_valid_blkaddr(sbi, blkaddr, 336 DATA_GENERIC_ENHANCE)) { 337 f2fs_put_dnode(&dn); 338 ret = -EFSCORRUPTED; 339 f2fs_handle_error(sbi, 340 ERROR_INVALID_BLKADDR); 341 goto out; 342 } 343 344 new = f2fs_kmem_cache_alloc(revoke_entry_slab, GFP_NOFS, 345 true, NULL); 346 347 ret = __replace_atomic_write_block(inode, index, blkaddr, 348 &new->old_addr, false); 349 if (ret) { 350 f2fs_put_dnode(&dn); 351 kmem_cache_free(revoke_entry_slab, new); 352 goto out; 353 } 354 355 f2fs_update_data_blkaddr(&dn, NULL_ADDR); 356 new->index = index; 357 list_add_tail(&new->list, &revoke_list); 358 } 359 f2fs_put_dnode(&dn); 360 next: 361 off += blen; 362 len -= blen; 363 } 364 365 out: 366 if (ret) { 367 sbi->revoked_atomic_block += fi->atomic_write_cnt; 368 } else { 369 sbi->committed_atomic_block += fi->atomic_write_cnt; 370 set_inode_flag(inode, FI_ATOMIC_COMMITTED); 371 } 372 373 __complete_revoke_list(inode, &revoke_list, ret ? true : false); 374 375 return ret; 376 } 377 378 int f2fs_commit_atomic_write(struct inode *inode) 379 { 380 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 381 struct f2fs_inode_info *fi = F2FS_I(inode); 382 int err; 383 384 err = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX); 385 if (err) 386 return err; 387 388 f2fs_down_write(&fi->i_gc_rwsem[WRITE]); 389 f2fs_lock_op(sbi); 390 391 err = __f2fs_commit_atomic_write(inode); 392 393 f2fs_unlock_op(sbi); 394 f2fs_up_write(&fi->i_gc_rwsem[WRITE]); 395 396 return err; 397 } 398 399 /* 400 * This function balances dirty node and dentry pages. 401 * In addition, it controls garbage collection. 402 */ 403 void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need) 404 { 405 if (time_to_inject(sbi, FAULT_CHECKPOINT)) 406 f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_FAULT_INJECT); 407 408 /* balance_fs_bg is able to be pending */ 409 if (need && excess_cached_nats(sbi)) 410 f2fs_balance_fs_bg(sbi, false); 411 412 if (!f2fs_is_checkpoint_ready(sbi)) 413 return; 414 415 /* 416 * We should do GC or end up with checkpoint, if there are so many dirty 417 * dir/node pages without enough free segments. 418 */ 419 if (has_enough_free_secs(sbi, 0, 0)) 420 return; 421 422 if (test_opt(sbi, GC_MERGE) && sbi->gc_thread && 423 sbi->gc_thread->f2fs_gc_task) { 424 DEFINE_WAIT(wait); 425 426 prepare_to_wait(&sbi->gc_thread->fggc_wq, &wait, 427 TASK_UNINTERRUPTIBLE); 428 wake_up(&sbi->gc_thread->gc_wait_queue_head); 429 io_schedule(); 430 finish_wait(&sbi->gc_thread->fggc_wq, &wait); 431 } else { 432 struct f2fs_gc_control gc_control = { 433 .victim_segno = NULL_SEGNO, 434 .init_gc_type = BG_GC, 435 .no_bg_gc = true, 436 .should_migrate_blocks = false, 437 .err_gc_skipped = false, 438 .nr_free_secs = 1 }; 439 f2fs_down_write(&sbi->gc_lock); 440 stat_inc_gc_call_count(sbi, FOREGROUND); 441 f2fs_gc(sbi, &gc_control); 442 } 443 } 444 445 static inline bool excess_dirty_threshold(struct f2fs_sb_info *sbi) 446 { 447 int factor = f2fs_rwsem_is_locked(&sbi->cp_rwsem) ? 3 : 2; 448 unsigned int dents = get_pages(sbi, F2FS_DIRTY_DENTS); 449 unsigned int qdata = get_pages(sbi, F2FS_DIRTY_QDATA); 450 unsigned int nodes = get_pages(sbi, F2FS_DIRTY_NODES); 451 unsigned int meta = get_pages(sbi, F2FS_DIRTY_META); 452 unsigned int imeta = get_pages(sbi, F2FS_DIRTY_IMETA); 453 unsigned int threshold = (factor * DEFAULT_DIRTY_THRESHOLD) << 454 sbi->log_blocks_per_seg; 455 unsigned int global_threshold = threshold * 3 / 2; 456 457 if (dents >= threshold || qdata >= threshold || 458 nodes >= threshold || meta >= threshold || 459 imeta >= threshold) 460 return true; 461 return dents + qdata + nodes + meta + imeta > global_threshold; 462 } 463 464 void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi, bool from_bg) 465 { 466 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) 467 return; 468 469 /* try to shrink extent cache when there is no enough memory */ 470 if (!f2fs_available_free_memory(sbi, READ_EXTENT_CACHE)) 471 f2fs_shrink_read_extent_tree(sbi, 472 READ_EXTENT_CACHE_SHRINK_NUMBER); 473 474 /* try to shrink age extent cache when there is no enough memory */ 475 if (!f2fs_available_free_memory(sbi, AGE_EXTENT_CACHE)) 476 f2fs_shrink_age_extent_tree(sbi, 477 AGE_EXTENT_CACHE_SHRINK_NUMBER); 478 479 /* check the # of cached NAT entries */ 480 if (!f2fs_available_free_memory(sbi, NAT_ENTRIES)) 481 f2fs_try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK); 482 483 if (!f2fs_available_free_memory(sbi, FREE_NIDS)) 484 f2fs_try_to_free_nids(sbi, MAX_FREE_NIDS); 485 else 486 f2fs_build_free_nids(sbi, false, false); 487 488 if (excess_dirty_nats(sbi) || excess_dirty_threshold(sbi) || 489 excess_prefree_segs(sbi) || !f2fs_space_for_roll_forward(sbi)) 490 goto do_sync; 491 492 /* there is background inflight IO or foreground operation recently */ 493 if (is_inflight_io(sbi, REQ_TIME) || 494 (!f2fs_time_over(sbi, REQ_TIME) && f2fs_rwsem_is_locked(&sbi->cp_rwsem))) 495 return; 496 497 /* exceed periodical checkpoint timeout threshold */ 498 if (f2fs_time_over(sbi, CP_TIME)) 499 goto do_sync; 500 501 /* checkpoint is the only way to shrink partial cached entries */ 502 if (f2fs_available_free_memory(sbi, NAT_ENTRIES) && 503 f2fs_available_free_memory(sbi, INO_ENTRIES)) 504 return; 505 506 do_sync: 507 if (test_opt(sbi, DATA_FLUSH) && from_bg) { 508 struct blk_plug plug; 509 510 mutex_lock(&sbi->flush_lock); 511 512 blk_start_plug(&plug); 513 f2fs_sync_dirty_inodes(sbi, FILE_INODE, false); 514 blk_finish_plug(&plug); 515 516 mutex_unlock(&sbi->flush_lock); 517 } 518 stat_inc_cp_call_count(sbi, BACKGROUND); 519 f2fs_sync_fs(sbi->sb, 1); 520 } 521 522 static int __submit_flush_wait(struct f2fs_sb_info *sbi, 523 struct block_device *bdev) 524 { 525 int ret = blkdev_issue_flush(bdev); 526 527 trace_f2fs_issue_flush(bdev, test_opt(sbi, NOBARRIER), 528 test_opt(sbi, FLUSH_MERGE), ret); 529 if (!ret) 530 f2fs_update_iostat(sbi, NULL, FS_FLUSH_IO, 0); 531 return ret; 532 } 533 534 static int submit_flush_wait(struct f2fs_sb_info *sbi, nid_t ino) 535 { 536 int ret = 0; 537 int i; 538 539 if (!f2fs_is_multi_device(sbi)) 540 return __submit_flush_wait(sbi, sbi->sb->s_bdev); 541 542 for (i = 0; i < sbi->s_ndevs; i++) { 543 if (!f2fs_is_dirty_device(sbi, ino, i, FLUSH_INO)) 544 continue; 545 ret = __submit_flush_wait(sbi, FDEV(i).bdev); 546 if (ret) 547 break; 548 } 549 return ret; 550 } 551 552 static int issue_flush_thread(void *data) 553 { 554 struct f2fs_sb_info *sbi = data; 555 struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info; 556 wait_queue_head_t *q = &fcc->flush_wait_queue; 557 repeat: 558 if (kthread_should_stop()) 559 return 0; 560 561 if (!llist_empty(&fcc->issue_list)) { 562 struct flush_cmd *cmd, *next; 563 int ret; 564 565 fcc->dispatch_list = llist_del_all(&fcc->issue_list); 566 fcc->dispatch_list = llist_reverse_order(fcc->dispatch_list); 567 568 cmd = llist_entry(fcc->dispatch_list, struct flush_cmd, llnode); 569 570 ret = submit_flush_wait(sbi, cmd->ino); 571 atomic_inc(&fcc->issued_flush); 572 573 llist_for_each_entry_safe(cmd, next, 574 fcc->dispatch_list, llnode) { 575 cmd->ret = ret; 576 complete(&cmd->wait); 577 } 578 fcc->dispatch_list = NULL; 579 } 580 581 wait_event_interruptible(*q, 582 kthread_should_stop() || !llist_empty(&fcc->issue_list)); 583 goto repeat; 584 } 585 586 int f2fs_issue_flush(struct f2fs_sb_info *sbi, nid_t ino) 587 { 588 struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info; 589 struct flush_cmd cmd; 590 int ret; 591 592 if (test_opt(sbi, NOBARRIER)) 593 return 0; 594 595 if (!test_opt(sbi, FLUSH_MERGE)) { 596 atomic_inc(&fcc->queued_flush); 597 ret = submit_flush_wait(sbi, ino); 598 atomic_dec(&fcc->queued_flush); 599 atomic_inc(&fcc->issued_flush); 600 return ret; 601 } 602 603 if (atomic_inc_return(&fcc->queued_flush) == 1 || 604 f2fs_is_multi_device(sbi)) { 605 ret = submit_flush_wait(sbi, ino); 606 atomic_dec(&fcc->queued_flush); 607 608 atomic_inc(&fcc->issued_flush); 609 return ret; 610 } 611 612 cmd.ino = ino; 613 init_completion(&cmd.wait); 614 615 llist_add(&cmd.llnode, &fcc->issue_list); 616 617 /* 618 * update issue_list before we wake up issue_flush thread, this 619 * smp_mb() pairs with another barrier in ___wait_event(), see 620 * more details in comments of waitqueue_active(). 621 */ 622 smp_mb(); 623 624 if (waitqueue_active(&fcc->flush_wait_queue)) 625 wake_up(&fcc->flush_wait_queue); 626 627 if (fcc->f2fs_issue_flush) { 628 wait_for_completion(&cmd.wait); 629 atomic_dec(&fcc->queued_flush); 630 } else { 631 struct llist_node *list; 632 633 list = llist_del_all(&fcc->issue_list); 634 if (!list) { 635 wait_for_completion(&cmd.wait); 636 atomic_dec(&fcc->queued_flush); 637 } else { 638 struct flush_cmd *tmp, *next; 639 640 ret = submit_flush_wait(sbi, ino); 641 642 llist_for_each_entry_safe(tmp, next, list, llnode) { 643 if (tmp == &cmd) { 644 cmd.ret = ret; 645 atomic_dec(&fcc->queued_flush); 646 continue; 647 } 648 tmp->ret = ret; 649 complete(&tmp->wait); 650 } 651 } 652 } 653 654 return cmd.ret; 655 } 656 657 int f2fs_create_flush_cmd_control(struct f2fs_sb_info *sbi) 658 { 659 dev_t dev = sbi->sb->s_bdev->bd_dev; 660 struct flush_cmd_control *fcc; 661 662 if (SM_I(sbi)->fcc_info) { 663 fcc = SM_I(sbi)->fcc_info; 664 if (fcc->f2fs_issue_flush) 665 return 0; 666 goto init_thread; 667 } 668 669 fcc = f2fs_kzalloc(sbi, sizeof(struct flush_cmd_control), GFP_KERNEL); 670 if (!fcc) 671 return -ENOMEM; 672 atomic_set(&fcc->issued_flush, 0); 673 atomic_set(&fcc->queued_flush, 0); 674 init_waitqueue_head(&fcc->flush_wait_queue); 675 init_llist_head(&fcc->issue_list); 676 SM_I(sbi)->fcc_info = fcc; 677 if (!test_opt(sbi, FLUSH_MERGE)) 678 return 0; 679 680 init_thread: 681 fcc->f2fs_issue_flush = kthread_run(issue_flush_thread, sbi, 682 "f2fs_flush-%u:%u", MAJOR(dev), MINOR(dev)); 683 if (IS_ERR(fcc->f2fs_issue_flush)) { 684 int err = PTR_ERR(fcc->f2fs_issue_flush); 685 686 fcc->f2fs_issue_flush = NULL; 687 return err; 688 } 689 690 return 0; 691 } 692 693 void f2fs_destroy_flush_cmd_control(struct f2fs_sb_info *sbi, bool free) 694 { 695 struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info; 696 697 if (fcc && fcc->f2fs_issue_flush) { 698 struct task_struct *flush_thread = fcc->f2fs_issue_flush; 699 700 fcc->f2fs_issue_flush = NULL; 701 kthread_stop(flush_thread); 702 } 703 if (free) { 704 kfree(fcc); 705 SM_I(sbi)->fcc_info = NULL; 706 } 707 } 708 709 int f2fs_flush_device_cache(struct f2fs_sb_info *sbi) 710 { 711 int ret = 0, i; 712 713 if (!f2fs_is_multi_device(sbi)) 714 return 0; 715 716 if (test_opt(sbi, NOBARRIER)) 717 return 0; 718 719 for (i = 1; i < sbi->s_ndevs; i++) { 720 int count = DEFAULT_RETRY_IO_COUNT; 721 722 if (!f2fs_test_bit(i, (char *)&sbi->dirty_device)) 723 continue; 724 725 do { 726 ret = __submit_flush_wait(sbi, FDEV(i).bdev); 727 if (ret) 728 f2fs_io_schedule_timeout(DEFAULT_IO_TIMEOUT); 729 } while (ret && --count); 730 731 if (ret) { 732 f2fs_stop_checkpoint(sbi, false, 733 STOP_CP_REASON_FLUSH_FAIL); 734 break; 735 } 736 737 spin_lock(&sbi->dev_lock); 738 f2fs_clear_bit(i, (char *)&sbi->dirty_device); 739 spin_unlock(&sbi->dev_lock); 740 } 741 742 return ret; 743 } 744 745 static void __locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno, 746 enum dirty_type dirty_type) 747 { 748 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 749 750 /* need not be added */ 751 if (IS_CURSEG(sbi, segno)) 752 return; 753 754 if (!test_and_set_bit(segno, dirty_i->dirty_segmap[dirty_type])) 755 dirty_i->nr_dirty[dirty_type]++; 756 757 if (dirty_type == DIRTY) { 758 struct seg_entry *sentry = get_seg_entry(sbi, segno); 759 enum dirty_type t = sentry->type; 760 761 if (unlikely(t >= DIRTY)) { 762 f2fs_bug_on(sbi, 1); 763 return; 764 } 765 if (!test_and_set_bit(segno, dirty_i->dirty_segmap[t])) 766 dirty_i->nr_dirty[t]++; 767 768 if (__is_large_section(sbi)) { 769 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno); 770 block_t valid_blocks = 771 get_valid_blocks(sbi, segno, true); 772 773 f2fs_bug_on(sbi, unlikely(!valid_blocks || 774 valid_blocks == CAP_BLKS_PER_SEC(sbi))); 775 776 if (!IS_CURSEC(sbi, secno)) 777 set_bit(secno, dirty_i->dirty_secmap); 778 } 779 } 780 } 781 782 static void __remove_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno, 783 enum dirty_type dirty_type) 784 { 785 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 786 block_t valid_blocks; 787 788 if (test_and_clear_bit(segno, dirty_i->dirty_segmap[dirty_type])) 789 dirty_i->nr_dirty[dirty_type]--; 790 791 if (dirty_type == DIRTY) { 792 struct seg_entry *sentry = get_seg_entry(sbi, segno); 793 enum dirty_type t = sentry->type; 794 795 if (test_and_clear_bit(segno, dirty_i->dirty_segmap[t])) 796 dirty_i->nr_dirty[t]--; 797 798 valid_blocks = get_valid_blocks(sbi, segno, true); 799 if (valid_blocks == 0) { 800 clear_bit(GET_SEC_FROM_SEG(sbi, segno), 801 dirty_i->victim_secmap); 802 #ifdef CONFIG_F2FS_CHECK_FS 803 clear_bit(segno, SIT_I(sbi)->invalid_segmap); 804 #endif 805 } 806 if (__is_large_section(sbi)) { 807 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno); 808 809 if (!valid_blocks || 810 valid_blocks == CAP_BLKS_PER_SEC(sbi)) { 811 clear_bit(secno, dirty_i->dirty_secmap); 812 return; 813 } 814 815 if (!IS_CURSEC(sbi, secno)) 816 set_bit(secno, dirty_i->dirty_secmap); 817 } 818 } 819 } 820 821 /* 822 * Should not occur error such as -ENOMEM. 823 * Adding dirty entry into seglist is not critical operation. 824 * If a given segment is one of current working segments, it won't be added. 825 */ 826 static void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno) 827 { 828 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 829 unsigned short valid_blocks, ckpt_valid_blocks; 830 unsigned int usable_blocks; 831 832 if (segno == NULL_SEGNO || IS_CURSEG(sbi, segno)) 833 return; 834 835 usable_blocks = f2fs_usable_blks_in_seg(sbi, segno); 836 mutex_lock(&dirty_i->seglist_lock); 837 838 valid_blocks = get_valid_blocks(sbi, segno, false); 839 ckpt_valid_blocks = get_ckpt_valid_blocks(sbi, segno, false); 840 841 if (valid_blocks == 0 && (!is_sbi_flag_set(sbi, SBI_CP_DISABLED) || 842 ckpt_valid_blocks == usable_blocks)) { 843 __locate_dirty_segment(sbi, segno, PRE); 844 __remove_dirty_segment(sbi, segno, DIRTY); 845 } else if (valid_blocks < usable_blocks) { 846 __locate_dirty_segment(sbi, segno, DIRTY); 847 } else { 848 /* Recovery routine with SSR needs this */ 849 __remove_dirty_segment(sbi, segno, DIRTY); 850 } 851 852 mutex_unlock(&dirty_i->seglist_lock); 853 } 854 855 /* This moves currently empty dirty blocks to prefree. Must hold seglist_lock */ 856 void f2fs_dirty_to_prefree(struct f2fs_sb_info *sbi) 857 { 858 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 859 unsigned int segno; 860 861 mutex_lock(&dirty_i->seglist_lock); 862 for_each_set_bit(segno, dirty_i->dirty_segmap[DIRTY], MAIN_SEGS(sbi)) { 863 if (get_valid_blocks(sbi, segno, false)) 864 continue; 865 if (IS_CURSEG(sbi, segno)) 866 continue; 867 __locate_dirty_segment(sbi, segno, PRE); 868 __remove_dirty_segment(sbi, segno, DIRTY); 869 } 870 mutex_unlock(&dirty_i->seglist_lock); 871 } 872 873 block_t f2fs_get_unusable_blocks(struct f2fs_sb_info *sbi) 874 { 875 int ovp_hole_segs = 876 (overprovision_segments(sbi) - reserved_segments(sbi)); 877 block_t ovp_holes = ovp_hole_segs << sbi->log_blocks_per_seg; 878 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 879 block_t holes[2] = {0, 0}; /* DATA and NODE */ 880 block_t unusable; 881 struct seg_entry *se; 882 unsigned int segno; 883 884 mutex_lock(&dirty_i->seglist_lock); 885 for_each_set_bit(segno, dirty_i->dirty_segmap[DIRTY], MAIN_SEGS(sbi)) { 886 se = get_seg_entry(sbi, segno); 887 if (IS_NODESEG(se->type)) 888 holes[NODE] += f2fs_usable_blks_in_seg(sbi, segno) - 889 se->valid_blocks; 890 else 891 holes[DATA] += f2fs_usable_blks_in_seg(sbi, segno) - 892 se->valid_blocks; 893 } 894 mutex_unlock(&dirty_i->seglist_lock); 895 896 unusable = max(holes[DATA], holes[NODE]); 897 if (unusable > ovp_holes) 898 return unusable - ovp_holes; 899 return 0; 900 } 901 902 int f2fs_disable_cp_again(struct f2fs_sb_info *sbi, block_t unusable) 903 { 904 int ovp_hole_segs = 905 (overprovision_segments(sbi) - reserved_segments(sbi)); 906 if (unusable > F2FS_OPTION(sbi).unusable_cap) 907 return -EAGAIN; 908 if (is_sbi_flag_set(sbi, SBI_CP_DISABLED_QUICK) && 909 dirty_segments(sbi) > ovp_hole_segs) 910 return -EAGAIN; 911 return 0; 912 } 913 914 /* This is only used by SBI_CP_DISABLED */ 915 static unsigned int get_free_segment(struct f2fs_sb_info *sbi) 916 { 917 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 918 unsigned int segno = 0; 919 920 mutex_lock(&dirty_i->seglist_lock); 921 for_each_set_bit(segno, dirty_i->dirty_segmap[DIRTY], MAIN_SEGS(sbi)) { 922 if (get_valid_blocks(sbi, segno, false)) 923 continue; 924 if (get_ckpt_valid_blocks(sbi, segno, false)) 925 continue; 926 mutex_unlock(&dirty_i->seglist_lock); 927 return segno; 928 } 929 mutex_unlock(&dirty_i->seglist_lock); 930 return NULL_SEGNO; 931 } 932 933 static struct discard_cmd *__create_discard_cmd(struct f2fs_sb_info *sbi, 934 struct block_device *bdev, block_t lstart, 935 block_t start, block_t len) 936 { 937 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; 938 struct list_head *pend_list; 939 struct discard_cmd *dc; 940 941 f2fs_bug_on(sbi, !len); 942 943 pend_list = &dcc->pend_list[plist_idx(len)]; 944 945 dc = f2fs_kmem_cache_alloc(discard_cmd_slab, GFP_NOFS, true, NULL); 946 INIT_LIST_HEAD(&dc->list); 947 dc->bdev = bdev; 948 dc->di.lstart = lstart; 949 dc->di.start = start; 950 dc->di.len = len; 951 dc->ref = 0; 952 dc->state = D_PREP; 953 dc->queued = 0; 954 dc->error = 0; 955 init_completion(&dc->wait); 956 list_add_tail(&dc->list, pend_list); 957 spin_lock_init(&dc->lock); 958 dc->bio_ref = 0; 959 atomic_inc(&dcc->discard_cmd_cnt); 960 dcc->undiscard_blks += len; 961 962 return dc; 963 } 964 965 static bool f2fs_check_discard_tree(struct f2fs_sb_info *sbi) 966 { 967 #ifdef CONFIG_F2FS_CHECK_FS 968 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; 969 struct rb_node *cur = rb_first_cached(&dcc->root), *next; 970 struct discard_cmd *cur_dc, *next_dc; 971 972 while (cur) { 973 next = rb_next(cur); 974 if (!next) 975 return true; 976 977 cur_dc = rb_entry(cur, struct discard_cmd, rb_node); 978 next_dc = rb_entry(next, struct discard_cmd, rb_node); 979 980 if (cur_dc->di.lstart + cur_dc->di.len > next_dc->di.lstart) { 981 f2fs_info(sbi, "broken discard_rbtree, " 982 "cur(%u, %u) next(%u, %u)", 983 cur_dc->di.lstart, cur_dc->di.len, 984 next_dc->di.lstart, next_dc->di.len); 985 return false; 986 } 987 cur = next; 988 } 989 #endif 990 return true; 991 } 992 993 static struct discard_cmd *__lookup_discard_cmd(struct f2fs_sb_info *sbi, 994 block_t blkaddr) 995 { 996 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; 997 struct rb_node *node = dcc->root.rb_root.rb_node; 998 struct discard_cmd *dc; 999 1000 while (node) { 1001 dc = rb_entry(node, struct discard_cmd, rb_node); 1002 1003 if (blkaddr < dc->di.lstart) 1004 node = node->rb_left; 1005 else if (blkaddr >= dc->di.lstart + dc->di.len) 1006 node = node->rb_right; 1007 else 1008 return dc; 1009 } 1010 return NULL; 1011 } 1012 1013 static struct discard_cmd *__lookup_discard_cmd_ret(struct rb_root_cached *root, 1014 block_t blkaddr, 1015 struct discard_cmd **prev_entry, 1016 struct discard_cmd **next_entry, 1017 struct rb_node ***insert_p, 1018 struct rb_node **insert_parent) 1019 { 1020 struct rb_node **pnode = &root->rb_root.rb_node; 1021 struct rb_node *parent = NULL, *tmp_node; 1022 struct discard_cmd *dc; 1023 1024 *insert_p = NULL; 1025 *insert_parent = NULL; 1026 *prev_entry = NULL; 1027 *next_entry = NULL; 1028 1029 if (RB_EMPTY_ROOT(&root->rb_root)) 1030 return NULL; 1031 1032 while (*pnode) { 1033 parent = *pnode; 1034 dc = rb_entry(*pnode, struct discard_cmd, rb_node); 1035 1036 if (blkaddr < dc->di.lstart) 1037 pnode = &(*pnode)->rb_left; 1038 else if (blkaddr >= dc->di.lstart + dc->di.len) 1039 pnode = &(*pnode)->rb_right; 1040 else 1041 goto lookup_neighbors; 1042 } 1043 1044 *insert_p = pnode; 1045 *insert_parent = parent; 1046 1047 dc = rb_entry(parent, struct discard_cmd, rb_node); 1048 tmp_node = parent; 1049 if (parent && blkaddr > dc->di.lstart) 1050 tmp_node = rb_next(parent); 1051 *next_entry = rb_entry_safe(tmp_node, struct discard_cmd, rb_node); 1052 1053 tmp_node = parent; 1054 if (parent && blkaddr < dc->di.lstart) 1055 tmp_node = rb_prev(parent); 1056 *prev_entry = rb_entry_safe(tmp_node, struct discard_cmd, rb_node); 1057 return NULL; 1058 1059 lookup_neighbors: 1060 /* lookup prev node for merging backward later */ 1061 tmp_node = rb_prev(&dc->rb_node); 1062 *prev_entry = rb_entry_safe(tmp_node, struct discard_cmd, rb_node); 1063 1064 /* lookup next node for merging frontward later */ 1065 tmp_node = rb_next(&dc->rb_node); 1066 *next_entry = rb_entry_safe(tmp_node, struct discard_cmd, rb_node); 1067 return dc; 1068 } 1069 1070 static void __detach_discard_cmd(struct discard_cmd_control *dcc, 1071 struct discard_cmd *dc) 1072 { 1073 if (dc->state == D_DONE) 1074 atomic_sub(dc->queued, &dcc->queued_discard); 1075 1076 list_del(&dc->list); 1077 rb_erase_cached(&dc->rb_node, &dcc->root); 1078 dcc->undiscard_blks -= dc->di.len; 1079 1080 kmem_cache_free(discard_cmd_slab, dc); 1081 1082 atomic_dec(&dcc->discard_cmd_cnt); 1083 } 1084 1085 static void __remove_discard_cmd(struct f2fs_sb_info *sbi, 1086 struct discard_cmd *dc) 1087 { 1088 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; 1089 unsigned long flags; 1090 1091 trace_f2fs_remove_discard(dc->bdev, dc->di.start, dc->di.len); 1092 1093 spin_lock_irqsave(&dc->lock, flags); 1094 if (dc->bio_ref) { 1095 spin_unlock_irqrestore(&dc->lock, flags); 1096 return; 1097 } 1098 spin_unlock_irqrestore(&dc->lock, flags); 1099 1100 f2fs_bug_on(sbi, dc->ref); 1101 1102 if (dc->error == -EOPNOTSUPP) 1103 dc->error = 0; 1104 1105 if (dc->error) 1106 printk_ratelimited( 1107 "%sF2FS-fs (%s): Issue discard(%u, %u, %u) failed, ret: %d", 1108 KERN_INFO, sbi->sb->s_id, 1109 dc->di.lstart, dc->di.start, dc->di.len, dc->error); 1110 __detach_discard_cmd(dcc, dc); 1111 } 1112 1113 static void f2fs_submit_discard_endio(struct bio *bio) 1114 { 1115 struct discard_cmd *dc = (struct discard_cmd *)bio->bi_private; 1116 unsigned long flags; 1117 1118 spin_lock_irqsave(&dc->lock, flags); 1119 if (!dc->error) 1120 dc->error = blk_status_to_errno(bio->bi_status); 1121 dc->bio_ref--; 1122 if (!dc->bio_ref && dc->state == D_SUBMIT) { 1123 dc->state = D_DONE; 1124 complete_all(&dc->wait); 1125 } 1126 spin_unlock_irqrestore(&dc->lock, flags); 1127 bio_put(bio); 1128 } 1129 1130 static void __check_sit_bitmap(struct f2fs_sb_info *sbi, 1131 block_t start, block_t end) 1132 { 1133 #ifdef CONFIG_F2FS_CHECK_FS 1134 struct seg_entry *sentry; 1135 unsigned int segno; 1136 block_t blk = start; 1137 unsigned long offset, size, *map; 1138 1139 while (blk < end) { 1140 segno = GET_SEGNO(sbi, blk); 1141 sentry = get_seg_entry(sbi, segno); 1142 offset = GET_BLKOFF_FROM_SEG0(sbi, blk); 1143 1144 if (end < START_BLOCK(sbi, segno + 1)) 1145 size = GET_BLKOFF_FROM_SEG0(sbi, end); 1146 else 1147 size = BLKS_PER_SEG(sbi); 1148 map = (unsigned long *)(sentry->cur_valid_map); 1149 offset = __find_rev_next_bit(map, size, offset); 1150 f2fs_bug_on(sbi, offset != size); 1151 blk = START_BLOCK(sbi, segno + 1); 1152 } 1153 #endif 1154 } 1155 1156 static void __init_discard_policy(struct f2fs_sb_info *sbi, 1157 struct discard_policy *dpolicy, 1158 int discard_type, unsigned int granularity) 1159 { 1160 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; 1161 1162 /* common policy */ 1163 dpolicy->type = discard_type; 1164 dpolicy->sync = true; 1165 dpolicy->ordered = false; 1166 dpolicy->granularity = granularity; 1167 1168 dpolicy->max_requests = dcc->max_discard_request; 1169 dpolicy->io_aware_gran = dcc->discard_io_aware_gran; 1170 dpolicy->timeout = false; 1171 1172 if (discard_type == DPOLICY_BG) { 1173 dpolicy->min_interval = dcc->min_discard_issue_time; 1174 dpolicy->mid_interval = dcc->mid_discard_issue_time; 1175 dpolicy->max_interval = dcc->max_discard_issue_time; 1176 dpolicy->io_aware = true; 1177 dpolicy->sync = false; 1178 dpolicy->ordered = true; 1179 if (utilization(sbi) > dcc->discard_urgent_util) { 1180 dpolicy->granularity = MIN_DISCARD_GRANULARITY; 1181 if (atomic_read(&dcc->discard_cmd_cnt)) 1182 dpolicy->max_interval = 1183 dcc->min_discard_issue_time; 1184 } 1185 } else if (discard_type == DPOLICY_FORCE) { 1186 dpolicy->min_interval = dcc->min_discard_issue_time; 1187 dpolicy->mid_interval = dcc->mid_discard_issue_time; 1188 dpolicy->max_interval = dcc->max_discard_issue_time; 1189 dpolicy->io_aware = false; 1190 } else if (discard_type == DPOLICY_FSTRIM) { 1191 dpolicy->io_aware = false; 1192 } else if (discard_type == DPOLICY_UMOUNT) { 1193 dpolicy->io_aware = false; 1194 /* we need to issue all to keep CP_TRIMMED_FLAG */ 1195 dpolicy->granularity = MIN_DISCARD_GRANULARITY; 1196 dpolicy->timeout = true; 1197 } 1198 } 1199 1200 static void __update_discard_tree_range(struct f2fs_sb_info *sbi, 1201 struct block_device *bdev, block_t lstart, 1202 block_t start, block_t len); 1203 1204 #ifdef CONFIG_BLK_DEV_ZONED 1205 static void __submit_zone_reset_cmd(struct f2fs_sb_info *sbi, 1206 struct discard_cmd *dc, blk_opf_t flag, 1207 struct list_head *wait_list, 1208 unsigned int *issued) 1209 { 1210 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; 1211 struct block_device *bdev = dc->bdev; 1212 struct bio *bio = bio_alloc(bdev, 0, REQ_OP_ZONE_RESET | flag, GFP_NOFS); 1213 unsigned long flags; 1214 1215 trace_f2fs_issue_reset_zone(bdev, dc->di.start); 1216 1217 spin_lock_irqsave(&dc->lock, flags); 1218 dc->state = D_SUBMIT; 1219 dc->bio_ref++; 1220 spin_unlock_irqrestore(&dc->lock, flags); 1221 1222 if (issued) 1223 (*issued)++; 1224 1225 atomic_inc(&dcc->queued_discard); 1226 dc->queued++; 1227 list_move_tail(&dc->list, wait_list); 1228 1229 /* sanity check on discard range */ 1230 __check_sit_bitmap(sbi, dc->di.lstart, dc->di.lstart + dc->di.len); 1231 1232 bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(dc->di.start); 1233 bio->bi_private = dc; 1234 bio->bi_end_io = f2fs_submit_discard_endio; 1235 submit_bio(bio); 1236 1237 atomic_inc(&dcc->issued_discard); 1238 f2fs_update_iostat(sbi, NULL, FS_ZONE_RESET_IO, dc->di.len * F2FS_BLKSIZE); 1239 } 1240 #endif 1241 1242 /* this function is copied from blkdev_issue_discard from block/blk-lib.c */ 1243 static int __submit_discard_cmd(struct f2fs_sb_info *sbi, 1244 struct discard_policy *dpolicy, 1245 struct discard_cmd *dc, int *issued) 1246 { 1247 struct block_device *bdev = dc->bdev; 1248 unsigned int max_discard_blocks = 1249 SECTOR_TO_BLOCK(bdev_max_discard_sectors(bdev)); 1250 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; 1251 struct list_head *wait_list = (dpolicy->type == DPOLICY_FSTRIM) ? 1252 &(dcc->fstrim_list) : &(dcc->wait_list); 1253 blk_opf_t flag = dpolicy->sync ? REQ_SYNC : 0; 1254 block_t lstart, start, len, total_len; 1255 int err = 0; 1256 1257 if (dc->state != D_PREP) 1258 return 0; 1259 1260 if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) 1261 return 0; 1262 1263 #ifdef CONFIG_BLK_DEV_ZONED 1264 if (f2fs_sb_has_blkzoned(sbi) && bdev_is_zoned(bdev)) { 1265 int devi = f2fs_bdev_index(sbi, bdev); 1266 1267 if (devi < 0) 1268 return -EINVAL; 1269 1270 if (f2fs_blkz_is_seq(sbi, devi, dc->di.start)) { 1271 __submit_zone_reset_cmd(sbi, dc, flag, 1272 wait_list, issued); 1273 return 0; 1274 } 1275 } 1276 #endif 1277 1278 trace_f2fs_issue_discard(bdev, dc->di.start, dc->di.len); 1279 1280 lstart = dc->di.lstart; 1281 start = dc->di.start; 1282 len = dc->di.len; 1283 total_len = len; 1284 1285 dc->di.len = 0; 1286 1287 while (total_len && *issued < dpolicy->max_requests && !err) { 1288 struct bio *bio = NULL; 1289 unsigned long flags; 1290 bool last = true; 1291 1292 if (len > max_discard_blocks) { 1293 len = max_discard_blocks; 1294 last = false; 1295 } 1296 1297 (*issued)++; 1298 if (*issued == dpolicy->max_requests) 1299 last = true; 1300 1301 dc->di.len += len; 1302 1303 if (time_to_inject(sbi, FAULT_DISCARD)) { 1304 err = -EIO; 1305 } else { 1306 err = __blkdev_issue_discard(bdev, 1307 SECTOR_FROM_BLOCK(start), 1308 SECTOR_FROM_BLOCK(len), 1309 GFP_NOFS, &bio); 1310 } 1311 if (err) { 1312 spin_lock_irqsave(&dc->lock, flags); 1313 if (dc->state == D_PARTIAL) 1314 dc->state = D_SUBMIT; 1315 spin_unlock_irqrestore(&dc->lock, flags); 1316 1317 break; 1318 } 1319 1320 f2fs_bug_on(sbi, !bio); 1321 1322 /* 1323 * should keep before submission to avoid D_DONE 1324 * right away 1325 */ 1326 spin_lock_irqsave(&dc->lock, flags); 1327 if (last) 1328 dc->state = D_SUBMIT; 1329 else 1330 dc->state = D_PARTIAL; 1331 dc->bio_ref++; 1332 spin_unlock_irqrestore(&dc->lock, flags); 1333 1334 atomic_inc(&dcc->queued_discard); 1335 dc->queued++; 1336 list_move_tail(&dc->list, wait_list); 1337 1338 /* sanity check on discard range */ 1339 __check_sit_bitmap(sbi, lstart, lstart + len); 1340 1341 bio->bi_private = dc; 1342 bio->bi_end_io = f2fs_submit_discard_endio; 1343 bio->bi_opf |= flag; 1344 submit_bio(bio); 1345 1346 atomic_inc(&dcc->issued_discard); 1347 1348 f2fs_update_iostat(sbi, NULL, FS_DISCARD_IO, len * F2FS_BLKSIZE); 1349 1350 lstart += len; 1351 start += len; 1352 total_len -= len; 1353 len = total_len; 1354 } 1355 1356 if (!err && len) { 1357 dcc->undiscard_blks -= len; 1358 __update_discard_tree_range(sbi, bdev, lstart, start, len); 1359 } 1360 return err; 1361 } 1362 1363 static void __insert_discard_cmd(struct f2fs_sb_info *sbi, 1364 struct block_device *bdev, block_t lstart, 1365 block_t start, block_t len) 1366 { 1367 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; 1368 struct rb_node **p = &dcc->root.rb_root.rb_node; 1369 struct rb_node *parent = NULL; 1370 struct discard_cmd *dc; 1371 bool leftmost = true; 1372 1373 /* look up rb tree to find parent node */ 1374 while (*p) { 1375 parent = *p; 1376 dc = rb_entry(parent, struct discard_cmd, rb_node); 1377 1378 if (lstart < dc->di.lstart) { 1379 p = &(*p)->rb_left; 1380 } else if (lstart >= dc->di.lstart + dc->di.len) { 1381 p = &(*p)->rb_right; 1382 leftmost = false; 1383 } else { 1384 f2fs_bug_on(sbi, 1); 1385 } 1386 } 1387 1388 dc = __create_discard_cmd(sbi, bdev, lstart, start, len); 1389 1390 rb_link_node(&dc->rb_node, parent, p); 1391 rb_insert_color_cached(&dc->rb_node, &dcc->root, leftmost); 1392 } 1393 1394 static void __relocate_discard_cmd(struct discard_cmd_control *dcc, 1395 struct discard_cmd *dc) 1396 { 1397 list_move_tail(&dc->list, &dcc->pend_list[plist_idx(dc->di.len)]); 1398 } 1399 1400 static void __punch_discard_cmd(struct f2fs_sb_info *sbi, 1401 struct discard_cmd *dc, block_t blkaddr) 1402 { 1403 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; 1404 struct discard_info di = dc->di; 1405 bool modified = false; 1406 1407 if (dc->state == D_DONE || dc->di.len == 1) { 1408 __remove_discard_cmd(sbi, dc); 1409 return; 1410 } 1411 1412 dcc->undiscard_blks -= di.len; 1413 1414 if (blkaddr > di.lstart) { 1415 dc->di.len = blkaddr - dc->di.lstart; 1416 dcc->undiscard_blks += dc->di.len; 1417 __relocate_discard_cmd(dcc, dc); 1418 modified = true; 1419 } 1420 1421 if (blkaddr < di.lstart + di.len - 1) { 1422 if (modified) { 1423 __insert_discard_cmd(sbi, dc->bdev, blkaddr + 1, 1424 di.start + blkaddr + 1 - di.lstart, 1425 di.lstart + di.len - 1 - blkaddr); 1426 } else { 1427 dc->di.lstart++; 1428 dc->di.len--; 1429 dc->di.start++; 1430 dcc->undiscard_blks += dc->di.len; 1431 __relocate_discard_cmd(dcc, dc); 1432 } 1433 } 1434 } 1435 1436 static void __update_discard_tree_range(struct f2fs_sb_info *sbi, 1437 struct block_device *bdev, block_t lstart, 1438 block_t start, block_t len) 1439 { 1440 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; 1441 struct discard_cmd *prev_dc = NULL, *next_dc = NULL; 1442 struct discard_cmd *dc; 1443 struct discard_info di = {0}; 1444 struct rb_node **insert_p = NULL, *insert_parent = NULL; 1445 unsigned int max_discard_blocks = 1446 SECTOR_TO_BLOCK(bdev_max_discard_sectors(bdev)); 1447 block_t end = lstart + len; 1448 1449 dc = __lookup_discard_cmd_ret(&dcc->root, lstart, 1450 &prev_dc, &next_dc, &insert_p, &insert_parent); 1451 if (dc) 1452 prev_dc = dc; 1453 1454 if (!prev_dc) { 1455 di.lstart = lstart; 1456 di.len = next_dc ? next_dc->di.lstart - lstart : len; 1457 di.len = min(di.len, len); 1458 di.start = start; 1459 } 1460 1461 while (1) { 1462 struct rb_node *node; 1463 bool merged = false; 1464 struct discard_cmd *tdc = NULL; 1465 1466 if (prev_dc) { 1467 di.lstart = prev_dc->di.lstart + prev_dc->di.len; 1468 if (di.lstart < lstart) 1469 di.lstart = lstart; 1470 if (di.lstart >= end) 1471 break; 1472 1473 if (!next_dc || next_dc->di.lstart > end) 1474 di.len = end - di.lstart; 1475 else 1476 di.len = next_dc->di.lstart - di.lstart; 1477 di.start = start + di.lstart - lstart; 1478 } 1479 1480 if (!di.len) 1481 goto next; 1482 1483 if (prev_dc && prev_dc->state == D_PREP && 1484 prev_dc->bdev == bdev && 1485 __is_discard_back_mergeable(&di, &prev_dc->di, 1486 max_discard_blocks)) { 1487 prev_dc->di.len += di.len; 1488 dcc->undiscard_blks += di.len; 1489 __relocate_discard_cmd(dcc, prev_dc); 1490 di = prev_dc->di; 1491 tdc = prev_dc; 1492 merged = true; 1493 } 1494 1495 if (next_dc && next_dc->state == D_PREP && 1496 next_dc->bdev == bdev && 1497 __is_discard_front_mergeable(&di, &next_dc->di, 1498 max_discard_blocks)) { 1499 next_dc->di.lstart = di.lstart; 1500 next_dc->di.len += di.len; 1501 next_dc->di.start = di.start; 1502 dcc->undiscard_blks += di.len; 1503 __relocate_discard_cmd(dcc, next_dc); 1504 if (tdc) 1505 __remove_discard_cmd(sbi, tdc); 1506 merged = true; 1507 } 1508 1509 if (!merged) 1510 __insert_discard_cmd(sbi, bdev, 1511 di.lstart, di.start, di.len); 1512 next: 1513 prev_dc = next_dc; 1514 if (!prev_dc) 1515 break; 1516 1517 node = rb_next(&prev_dc->rb_node); 1518 next_dc = rb_entry_safe(node, struct discard_cmd, rb_node); 1519 } 1520 } 1521 1522 #ifdef CONFIG_BLK_DEV_ZONED 1523 static void __queue_zone_reset_cmd(struct f2fs_sb_info *sbi, 1524 struct block_device *bdev, block_t blkstart, block_t lblkstart, 1525 block_t blklen) 1526 { 1527 trace_f2fs_queue_reset_zone(bdev, blkstart); 1528 1529 mutex_lock(&SM_I(sbi)->dcc_info->cmd_lock); 1530 __insert_discard_cmd(sbi, bdev, lblkstart, blkstart, blklen); 1531 mutex_unlock(&SM_I(sbi)->dcc_info->cmd_lock); 1532 } 1533 #endif 1534 1535 static void __queue_discard_cmd(struct f2fs_sb_info *sbi, 1536 struct block_device *bdev, block_t blkstart, block_t blklen) 1537 { 1538 block_t lblkstart = blkstart; 1539 1540 if (!f2fs_bdev_support_discard(bdev)) 1541 return; 1542 1543 trace_f2fs_queue_discard(bdev, blkstart, blklen); 1544 1545 if (f2fs_is_multi_device(sbi)) { 1546 int devi = f2fs_target_device_index(sbi, blkstart); 1547 1548 blkstart -= FDEV(devi).start_blk; 1549 } 1550 mutex_lock(&SM_I(sbi)->dcc_info->cmd_lock); 1551 __update_discard_tree_range(sbi, bdev, lblkstart, blkstart, blklen); 1552 mutex_unlock(&SM_I(sbi)->dcc_info->cmd_lock); 1553 } 1554 1555 static void __issue_discard_cmd_orderly(struct f2fs_sb_info *sbi, 1556 struct discard_policy *dpolicy, int *issued) 1557 { 1558 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; 1559 struct discard_cmd *prev_dc = NULL, *next_dc = NULL; 1560 struct rb_node **insert_p = NULL, *insert_parent = NULL; 1561 struct discard_cmd *dc; 1562 struct blk_plug plug; 1563 bool io_interrupted = false; 1564 1565 mutex_lock(&dcc->cmd_lock); 1566 dc = __lookup_discard_cmd_ret(&dcc->root, dcc->next_pos, 1567 &prev_dc, &next_dc, &insert_p, &insert_parent); 1568 if (!dc) 1569 dc = next_dc; 1570 1571 blk_start_plug(&plug); 1572 1573 while (dc) { 1574 struct rb_node *node; 1575 int err = 0; 1576 1577 if (dc->state != D_PREP) 1578 goto next; 1579 1580 if (dpolicy->io_aware && !is_idle(sbi, DISCARD_TIME)) { 1581 io_interrupted = true; 1582 break; 1583 } 1584 1585 dcc->next_pos = dc->di.lstart + dc->di.len; 1586 err = __submit_discard_cmd(sbi, dpolicy, dc, issued); 1587 1588 if (*issued >= dpolicy->max_requests) 1589 break; 1590 next: 1591 node = rb_next(&dc->rb_node); 1592 if (err) 1593 __remove_discard_cmd(sbi, dc); 1594 dc = rb_entry_safe(node, struct discard_cmd, rb_node); 1595 } 1596 1597 blk_finish_plug(&plug); 1598 1599 if (!dc) 1600 dcc->next_pos = 0; 1601 1602 mutex_unlock(&dcc->cmd_lock); 1603 1604 if (!(*issued) && io_interrupted) 1605 *issued = -1; 1606 } 1607 static unsigned int __wait_all_discard_cmd(struct f2fs_sb_info *sbi, 1608 struct discard_policy *dpolicy); 1609 1610 static int __issue_discard_cmd(struct f2fs_sb_info *sbi, 1611 struct discard_policy *dpolicy) 1612 { 1613 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; 1614 struct list_head *pend_list; 1615 struct discard_cmd *dc, *tmp; 1616 struct blk_plug plug; 1617 int i, issued; 1618 bool io_interrupted = false; 1619 1620 if (dpolicy->timeout) 1621 f2fs_update_time(sbi, UMOUNT_DISCARD_TIMEOUT); 1622 1623 retry: 1624 issued = 0; 1625 for (i = MAX_PLIST_NUM - 1; i >= 0; i--) { 1626 if (dpolicy->timeout && 1627 f2fs_time_over(sbi, UMOUNT_DISCARD_TIMEOUT)) 1628 break; 1629 1630 if (i + 1 < dpolicy->granularity) 1631 break; 1632 1633 if (i + 1 < dcc->max_ordered_discard && dpolicy->ordered) { 1634 __issue_discard_cmd_orderly(sbi, dpolicy, &issued); 1635 return issued; 1636 } 1637 1638 pend_list = &dcc->pend_list[i]; 1639 1640 mutex_lock(&dcc->cmd_lock); 1641 if (list_empty(pend_list)) 1642 goto next; 1643 if (unlikely(dcc->rbtree_check)) 1644 f2fs_bug_on(sbi, !f2fs_check_discard_tree(sbi)); 1645 blk_start_plug(&plug); 1646 list_for_each_entry_safe(dc, tmp, pend_list, list) { 1647 f2fs_bug_on(sbi, dc->state != D_PREP); 1648 1649 if (dpolicy->timeout && 1650 f2fs_time_over(sbi, UMOUNT_DISCARD_TIMEOUT)) 1651 break; 1652 1653 if (dpolicy->io_aware && i < dpolicy->io_aware_gran && 1654 !is_idle(sbi, DISCARD_TIME)) { 1655 io_interrupted = true; 1656 break; 1657 } 1658 1659 __submit_discard_cmd(sbi, dpolicy, dc, &issued); 1660 1661 if (issued >= dpolicy->max_requests) 1662 break; 1663 } 1664 blk_finish_plug(&plug); 1665 next: 1666 mutex_unlock(&dcc->cmd_lock); 1667 1668 if (issued >= dpolicy->max_requests || io_interrupted) 1669 break; 1670 } 1671 1672 if (dpolicy->type == DPOLICY_UMOUNT && issued) { 1673 __wait_all_discard_cmd(sbi, dpolicy); 1674 goto retry; 1675 } 1676 1677 if (!issued && io_interrupted) 1678 issued = -1; 1679 1680 return issued; 1681 } 1682 1683 static bool __drop_discard_cmd(struct f2fs_sb_info *sbi) 1684 { 1685 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; 1686 struct list_head *pend_list; 1687 struct discard_cmd *dc, *tmp; 1688 int i; 1689 bool dropped = false; 1690 1691 mutex_lock(&dcc->cmd_lock); 1692 for (i = MAX_PLIST_NUM - 1; i >= 0; i--) { 1693 pend_list = &dcc->pend_list[i]; 1694 list_for_each_entry_safe(dc, tmp, pend_list, list) { 1695 f2fs_bug_on(sbi, dc->state != D_PREP); 1696 __remove_discard_cmd(sbi, dc); 1697 dropped = true; 1698 } 1699 } 1700 mutex_unlock(&dcc->cmd_lock); 1701 1702 return dropped; 1703 } 1704 1705 void f2fs_drop_discard_cmd(struct f2fs_sb_info *sbi) 1706 { 1707 __drop_discard_cmd(sbi); 1708 } 1709 1710 static unsigned int __wait_one_discard_bio(struct f2fs_sb_info *sbi, 1711 struct discard_cmd *dc) 1712 { 1713 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; 1714 unsigned int len = 0; 1715 1716 wait_for_completion_io(&dc->wait); 1717 mutex_lock(&dcc->cmd_lock); 1718 f2fs_bug_on(sbi, dc->state != D_DONE); 1719 dc->ref--; 1720 if (!dc->ref) { 1721 if (!dc->error) 1722 len = dc->di.len; 1723 __remove_discard_cmd(sbi, dc); 1724 } 1725 mutex_unlock(&dcc->cmd_lock); 1726 1727 return len; 1728 } 1729 1730 static unsigned int __wait_discard_cmd_range(struct f2fs_sb_info *sbi, 1731 struct discard_policy *dpolicy, 1732 block_t start, block_t end) 1733 { 1734 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; 1735 struct list_head *wait_list = (dpolicy->type == DPOLICY_FSTRIM) ? 1736 &(dcc->fstrim_list) : &(dcc->wait_list); 1737 struct discard_cmd *dc = NULL, *iter, *tmp; 1738 unsigned int trimmed = 0; 1739 1740 next: 1741 dc = NULL; 1742 1743 mutex_lock(&dcc->cmd_lock); 1744 list_for_each_entry_safe(iter, tmp, wait_list, list) { 1745 if (iter->di.lstart + iter->di.len <= start || 1746 end <= iter->di.lstart) 1747 continue; 1748 if (iter->di.len < dpolicy->granularity) 1749 continue; 1750 if (iter->state == D_DONE && !iter->ref) { 1751 wait_for_completion_io(&iter->wait); 1752 if (!iter->error) 1753 trimmed += iter->di.len; 1754 __remove_discard_cmd(sbi, iter); 1755 } else { 1756 iter->ref++; 1757 dc = iter; 1758 break; 1759 } 1760 } 1761 mutex_unlock(&dcc->cmd_lock); 1762 1763 if (dc) { 1764 trimmed += __wait_one_discard_bio(sbi, dc); 1765 goto next; 1766 } 1767 1768 return trimmed; 1769 } 1770 1771 static unsigned int __wait_all_discard_cmd(struct f2fs_sb_info *sbi, 1772 struct discard_policy *dpolicy) 1773 { 1774 struct discard_policy dp; 1775 unsigned int discard_blks; 1776 1777 if (dpolicy) 1778 return __wait_discard_cmd_range(sbi, dpolicy, 0, UINT_MAX); 1779 1780 /* wait all */ 1781 __init_discard_policy(sbi, &dp, DPOLICY_FSTRIM, MIN_DISCARD_GRANULARITY); 1782 discard_blks = __wait_discard_cmd_range(sbi, &dp, 0, UINT_MAX); 1783 __init_discard_policy(sbi, &dp, DPOLICY_UMOUNT, MIN_DISCARD_GRANULARITY); 1784 discard_blks += __wait_discard_cmd_range(sbi, &dp, 0, UINT_MAX); 1785 1786 return discard_blks; 1787 } 1788 1789 /* This should be covered by global mutex, &sit_i->sentry_lock */ 1790 static void f2fs_wait_discard_bio(struct f2fs_sb_info *sbi, block_t blkaddr) 1791 { 1792 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; 1793 struct discard_cmd *dc; 1794 bool need_wait = false; 1795 1796 mutex_lock(&dcc->cmd_lock); 1797 dc = __lookup_discard_cmd(sbi, blkaddr); 1798 #ifdef CONFIG_BLK_DEV_ZONED 1799 if (dc && f2fs_sb_has_blkzoned(sbi) && bdev_is_zoned(dc->bdev)) { 1800 int devi = f2fs_bdev_index(sbi, dc->bdev); 1801 1802 if (devi < 0) { 1803 mutex_unlock(&dcc->cmd_lock); 1804 return; 1805 } 1806 1807 if (f2fs_blkz_is_seq(sbi, devi, dc->di.start)) { 1808 /* force submit zone reset */ 1809 if (dc->state == D_PREP) 1810 __submit_zone_reset_cmd(sbi, dc, REQ_SYNC, 1811 &dcc->wait_list, NULL); 1812 dc->ref++; 1813 mutex_unlock(&dcc->cmd_lock); 1814 /* wait zone reset */ 1815 __wait_one_discard_bio(sbi, dc); 1816 return; 1817 } 1818 } 1819 #endif 1820 if (dc) { 1821 if (dc->state == D_PREP) { 1822 __punch_discard_cmd(sbi, dc, blkaddr); 1823 } else { 1824 dc->ref++; 1825 need_wait = true; 1826 } 1827 } 1828 mutex_unlock(&dcc->cmd_lock); 1829 1830 if (need_wait) 1831 __wait_one_discard_bio(sbi, dc); 1832 } 1833 1834 void f2fs_stop_discard_thread(struct f2fs_sb_info *sbi) 1835 { 1836 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; 1837 1838 if (dcc && dcc->f2fs_issue_discard) { 1839 struct task_struct *discard_thread = dcc->f2fs_issue_discard; 1840 1841 dcc->f2fs_issue_discard = NULL; 1842 kthread_stop(discard_thread); 1843 } 1844 } 1845 1846 /** 1847 * f2fs_issue_discard_timeout() - Issue all discard cmd within UMOUNT_DISCARD_TIMEOUT 1848 * @sbi: the f2fs_sb_info data for discard cmd to issue 1849 * 1850 * When UMOUNT_DISCARD_TIMEOUT is exceeded, all remaining discard commands will be dropped 1851 * 1852 * Return true if issued all discard cmd or no discard cmd need issue, otherwise return false. 1853 */ 1854 bool f2fs_issue_discard_timeout(struct f2fs_sb_info *sbi) 1855 { 1856 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; 1857 struct discard_policy dpolicy; 1858 bool dropped; 1859 1860 if (!atomic_read(&dcc->discard_cmd_cnt)) 1861 return true; 1862 1863 __init_discard_policy(sbi, &dpolicy, DPOLICY_UMOUNT, 1864 dcc->discard_granularity); 1865 __issue_discard_cmd(sbi, &dpolicy); 1866 dropped = __drop_discard_cmd(sbi); 1867 1868 /* just to make sure there is no pending discard commands */ 1869 __wait_all_discard_cmd(sbi, NULL); 1870 1871 f2fs_bug_on(sbi, atomic_read(&dcc->discard_cmd_cnt)); 1872 return !dropped; 1873 } 1874 1875 static int issue_discard_thread(void *data) 1876 { 1877 struct f2fs_sb_info *sbi = data; 1878 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; 1879 wait_queue_head_t *q = &dcc->discard_wait_queue; 1880 struct discard_policy dpolicy; 1881 unsigned int wait_ms = dcc->min_discard_issue_time; 1882 int issued; 1883 1884 set_freezable(); 1885 1886 do { 1887 wait_event_interruptible_timeout(*q, 1888 kthread_should_stop() || freezing(current) || 1889 dcc->discard_wake, 1890 msecs_to_jiffies(wait_ms)); 1891 1892 if (sbi->gc_mode == GC_URGENT_HIGH || 1893 !f2fs_available_free_memory(sbi, DISCARD_CACHE)) 1894 __init_discard_policy(sbi, &dpolicy, DPOLICY_FORCE, 1895 MIN_DISCARD_GRANULARITY); 1896 else 1897 __init_discard_policy(sbi, &dpolicy, DPOLICY_BG, 1898 dcc->discard_granularity); 1899 1900 if (dcc->discard_wake) 1901 dcc->discard_wake = false; 1902 1903 /* clean up pending candidates before going to sleep */ 1904 if (atomic_read(&dcc->queued_discard)) 1905 __wait_all_discard_cmd(sbi, NULL); 1906 1907 if (try_to_freeze()) 1908 continue; 1909 if (f2fs_readonly(sbi->sb)) 1910 continue; 1911 if (kthread_should_stop()) 1912 return 0; 1913 if (is_sbi_flag_set(sbi, SBI_NEED_FSCK) || 1914 !atomic_read(&dcc->discard_cmd_cnt)) { 1915 wait_ms = dpolicy.max_interval; 1916 continue; 1917 } 1918 1919 sb_start_intwrite(sbi->sb); 1920 1921 issued = __issue_discard_cmd(sbi, &dpolicy); 1922 if (issued > 0) { 1923 __wait_all_discard_cmd(sbi, &dpolicy); 1924 wait_ms = dpolicy.min_interval; 1925 } else if (issued == -1) { 1926 wait_ms = f2fs_time_to_wait(sbi, DISCARD_TIME); 1927 if (!wait_ms) 1928 wait_ms = dpolicy.mid_interval; 1929 } else { 1930 wait_ms = dpolicy.max_interval; 1931 } 1932 if (!atomic_read(&dcc->discard_cmd_cnt)) 1933 wait_ms = dpolicy.max_interval; 1934 1935 sb_end_intwrite(sbi->sb); 1936 1937 } while (!kthread_should_stop()); 1938 return 0; 1939 } 1940 1941 #ifdef CONFIG_BLK_DEV_ZONED 1942 static int __f2fs_issue_discard_zone(struct f2fs_sb_info *sbi, 1943 struct block_device *bdev, block_t blkstart, block_t blklen) 1944 { 1945 sector_t sector, nr_sects; 1946 block_t lblkstart = blkstart; 1947 int devi = 0; 1948 u64 remainder = 0; 1949 1950 if (f2fs_is_multi_device(sbi)) { 1951 devi = f2fs_target_device_index(sbi, blkstart); 1952 if (blkstart < FDEV(devi).start_blk || 1953 blkstart > FDEV(devi).end_blk) { 1954 f2fs_err(sbi, "Invalid block %x", blkstart); 1955 return -EIO; 1956 } 1957 blkstart -= FDEV(devi).start_blk; 1958 } 1959 1960 /* For sequential zones, reset the zone write pointer */ 1961 if (f2fs_blkz_is_seq(sbi, devi, blkstart)) { 1962 sector = SECTOR_FROM_BLOCK(blkstart); 1963 nr_sects = SECTOR_FROM_BLOCK(blklen); 1964 div64_u64_rem(sector, bdev_zone_sectors(bdev), &remainder); 1965 1966 if (remainder || nr_sects != bdev_zone_sectors(bdev)) { 1967 f2fs_err(sbi, "(%d) %s: Unaligned zone reset attempted (block %x + %x)", 1968 devi, sbi->s_ndevs ? FDEV(devi).path : "", 1969 blkstart, blklen); 1970 return -EIO; 1971 } 1972 1973 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) { 1974 trace_f2fs_issue_reset_zone(bdev, blkstart); 1975 return blkdev_zone_mgmt(bdev, REQ_OP_ZONE_RESET, 1976 sector, nr_sects, GFP_NOFS); 1977 } 1978 1979 __queue_zone_reset_cmd(sbi, bdev, blkstart, lblkstart, blklen); 1980 return 0; 1981 } 1982 1983 /* For conventional zones, use regular discard if supported */ 1984 __queue_discard_cmd(sbi, bdev, lblkstart, blklen); 1985 return 0; 1986 } 1987 #endif 1988 1989 static int __issue_discard_async(struct f2fs_sb_info *sbi, 1990 struct block_device *bdev, block_t blkstart, block_t blklen) 1991 { 1992 #ifdef CONFIG_BLK_DEV_ZONED 1993 if (f2fs_sb_has_blkzoned(sbi) && bdev_is_zoned(bdev)) 1994 return __f2fs_issue_discard_zone(sbi, bdev, blkstart, blklen); 1995 #endif 1996 __queue_discard_cmd(sbi, bdev, blkstart, blklen); 1997 return 0; 1998 } 1999 2000 static int f2fs_issue_discard(struct f2fs_sb_info *sbi, 2001 block_t blkstart, block_t blklen) 2002 { 2003 sector_t start = blkstart, len = 0; 2004 struct block_device *bdev; 2005 struct seg_entry *se; 2006 unsigned int offset; 2007 block_t i; 2008 int err = 0; 2009 2010 bdev = f2fs_target_device(sbi, blkstart, NULL); 2011 2012 for (i = blkstart; i < blkstart + blklen; i++, len++) { 2013 if (i != start) { 2014 struct block_device *bdev2 = 2015 f2fs_target_device(sbi, i, NULL); 2016 2017 if (bdev2 != bdev) { 2018 err = __issue_discard_async(sbi, bdev, 2019 start, len); 2020 if (err) 2021 return err; 2022 bdev = bdev2; 2023 start = i; 2024 len = 0; 2025 } 2026 } 2027 2028 se = get_seg_entry(sbi, GET_SEGNO(sbi, i)); 2029 offset = GET_BLKOFF_FROM_SEG0(sbi, i); 2030 2031 if (f2fs_block_unit_discard(sbi) && 2032 !f2fs_test_and_set_bit(offset, se->discard_map)) 2033 sbi->discard_blks--; 2034 } 2035 2036 if (len) 2037 err = __issue_discard_async(sbi, bdev, start, len); 2038 return err; 2039 } 2040 2041 static bool add_discard_addrs(struct f2fs_sb_info *sbi, struct cp_control *cpc, 2042 bool check_only) 2043 { 2044 int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long); 2045 struct seg_entry *se = get_seg_entry(sbi, cpc->trim_start); 2046 unsigned long *cur_map = (unsigned long *)se->cur_valid_map; 2047 unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map; 2048 unsigned long *discard_map = (unsigned long *)se->discard_map; 2049 unsigned long *dmap = SIT_I(sbi)->tmp_map; 2050 unsigned int start = 0, end = -1; 2051 bool force = (cpc->reason & CP_DISCARD); 2052 struct discard_entry *de = NULL; 2053 struct list_head *head = &SM_I(sbi)->dcc_info->entry_list; 2054 int i; 2055 2056 if (se->valid_blocks == BLKS_PER_SEG(sbi) || 2057 !f2fs_hw_support_discard(sbi) || 2058 !f2fs_block_unit_discard(sbi)) 2059 return false; 2060 2061 if (!force) { 2062 if (!f2fs_realtime_discard_enable(sbi) || !se->valid_blocks || 2063 SM_I(sbi)->dcc_info->nr_discards >= 2064 SM_I(sbi)->dcc_info->max_discards) 2065 return false; 2066 } 2067 2068 /* SIT_VBLOCK_MAP_SIZE should be multiple of sizeof(unsigned long) */ 2069 for (i = 0; i < entries; i++) 2070 dmap[i] = force ? ~ckpt_map[i] & ~discard_map[i] : 2071 (cur_map[i] ^ ckpt_map[i]) & ckpt_map[i]; 2072 2073 while (force || SM_I(sbi)->dcc_info->nr_discards <= 2074 SM_I(sbi)->dcc_info->max_discards) { 2075 start = __find_rev_next_bit(dmap, BLKS_PER_SEG(sbi), end + 1); 2076 if (start >= BLKS_PER_SEG(sbi)) 2077 break; 2078 2079 end = __find_rev_next_zero_bit(dmap, 2080 BLKS_PER_SEG(sbi), start + 1); 2081 if (force && start && end != BLKS_PER_SEG(sbi) && 2082 (end - start) < cpc->trim_minlen) 2083 continue; 2084 2085 if (check_only) 2086 return true; 2087 2088 if (!de) { 2089 de = f2fs_kmem_cache_alloc(discard_entry_slab, 2090 GFP_F2FS_ZERO, true, NULL); 2091 de->start_blkaddr = START_BLOCK(sbi, cpc->trim_start); 2092 list_add_tail(&de->list, head); 2093 } 2094 2095 for (i = start; i < end; i++) 2096 __set_bit_le(i, (void *)de->discard_map); 2097 2098 SM_I(sbi)->dcc_info->nr_discards += end - start; 2099 } 2100 return false; 2101 } 2102 2103 static void release_discard_addr(struct discard_entry *entry) 2104 { 2105 list_del(&entry->list); 2106 kmem_cache_free(discard_entry_slab, entry); 2107 } 2108 2109 void f2fs_release_discard_addrs(struct f2fs_sb_info *sbi) 2110 { 2111 struct list_head *head = &(SM_I(sbi)->dcc_info->entry_list); 2112 struct discard_entry *entry, *this; 2113 2114 /* drop caches */ 2115 list_for_each_entry_safe(entry, this, head, list) 2116 release_discard_addr(entry); 2117 } 2118 2119 /* 2120 * Should call f2fs_clear_prefree_segments after checkpoint is done. 2121 */ 2122 static void set_prefree_as_free_segments(struct f2fs_sb_info *sbi) 2123 { 2124 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 2125 unsigned int segno; 2126 2127 mutex_lock(&dirty_i->seglist_lock); 2128 for_each_set_bit(segno, dirty_i->dirty_segmap[PRE], MAIN_SEGS(sbi)) 2129 __set_test_and_free(sbi, segno, false); 2130 mutex_unlock(&dirty_i->seglist_lock); 2131 } 2132 2133 void f2fs_clear_prefree_segments(struct f2fs_sb_info *sbi, 2134 struct cp_control *cpc) 2135 { 2136 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; 2137 struct list_head *head = &dcc->entry_list; 2138 struct discard_entry *entry, *this; 2139 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 2140 unsigned long *prefree_map = dirty_i->dirty_segmap[PRE]; 2141 unsigned int start = 0, end = -1; 2142 unsigned int secno, start_segno; 2143 bool force = (cpc->reason & CP_DISCARD); 2144 bool section_alignment = F2FS_OPTION(sbi).discard_unit == 2145 DISCARD_UNIT_SECTION; 2146 2147 if (f2fs_lfs_mode(sbi) && __is_large_section(sbi)) 2148 section_alignment = true; 2149 2150 mutex_lock(&dirty_i->seglist_lock); 2151 2152 while (1) { 2153 int i; 2154 2155 if (section_alignment && end != -1) 2156 end--; 2157 start = find_next_bit(prefree_map, MAIN_SEGS(sbi), end + 1); 2158 if (start >= MAIN_SEGS(sbi)) 2159 break; 2160 end = find_next_zero_bit(prefree_map, MAIN_SEGS(sbi), 2161 start + 1); 2162 2163 if (section_alignment) { 2164 start = rounddown(start, SEGS_PER_SEC(sbi)); 2165 end = roundup(end, SEGS_PER_SEC(sbi)); 2166 } 2167 2168 for (i = start; i < end; i++) { 2169 if (test_and_clear_bit(i, prefree_map)) 2170 dirty_i->nr_dirty[PRE]--; 2171 } 2172 2173 if (!f2fs_realtime_discard_enable(sbi)) 2174 continue; 2175 2176 if (force && start >= cpc->trim_start && 2177 (end - 1) <= cpc->trim_end) 2178 continue; 2179 2180 /* Should cover 2MB zoned device for zone-based reset */ 2181 if (!f2fs_sb_has_blkzoned(sbi) && 2182 (!f2fs_lfs_mode(sbi) || !__is_large_section(sbi))) { 2183 f2fs_issue_discard(sbi, START_BLOCK(sbi, start), 2184 (end - start) << sbi->log_blocks_per_seg); 2185 continue; 2186 } 2187 next: 2188 secno = GET_SEC_FROM_SEG(sbi, start); 2189 start_segno = GET_SEG_FROM_SEC(sbi, secno); 2190 if (!IS_CURSEC(sbi, secno) && 2191 !get_valid_blocks(sbi, start, true)) 2192 f2fs_issue_discard(sbi, START_BLOCK(sbi, start_segno), 2193 BLKS_PER_SEC(sbi)); 2194 2195 start = start_segno + SEGS_PER_SEC(sbi); 2196 if (start < end) 2197 goto next; 2198 else 2199 end = start - 1; 2200 } 2201 mutex_unlock(&dirty_i->seglist_lock); 2202 2203 if (!f2fs_block_unit_discard(sbi)) 2204 goto wakeup; 2205 2206 /* send small discards */ 2207 list_for_each_entry_safe(entry, this, head, list) { 2208 unsigned int cur_pos = 0, next_pos, len, total_len = 0; 2209 bool is_valid = test_bit_le(0, entry->discard_map); 2210 2211 find_next: 2212 if (is_valid) { 2213 next_pos = find_next_zero_bit_le(entry->discard_map, 2214 BLKS_PER_SEG(sbi), cur_pos); 2215 len = next_pos - cur_pos; 2216 2217 if (f2fs_sb_has_blkzoned(sbi) || 2218 (force && len < cpc->trim_minlen)) 2219 goto skip; 2220 2221 f2fs_issue_discard(sbi, entry->start_blkaddr + cur_pos, 2222 len); 2223 total_len += len; 2224 } else { 2225 next_pos = find_next_bit_le(entry->discard_map, 2226 BLKS_PER_SEG(sbi), cur_pos); 2227 } 2228 skip: 2229 cur_pos = next_pos; 2230 is_valid = !is_valid; 2231 2232 if (cur_pos < BLKS_PER_SEG(sbi)) 2233 goto find_next; 2234 2235 release_discard_addr(entry); 2236 dcc->nr_discards -= total_len; 2237 } 2238 2239 wakeup: 2240 wake_up_discard_thread(sbi, false); 2241 } 2242 2243 int f2fs_start_discard_thread(struct f2fs_sb_info *sbi) 2244 { 2245 dev_t dev = sbi->sb->s_bdev->bd_dev; 2246 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; 2247 int err = 0; 2248 2249 if (!f2fs_realtime_discard_enable(sbi)) 2250 return 0; 2251 2252 dcc->f2fs_issue_discard = kthread_run(issue_discard_thread, sbi, 2253 "f2fs_discard-%u:%u", MAJOR(dev), MINOR(dev)); 2254 if (IS_ERR(dcc->f2fs_issue_discard)) { 2255 err = PTR_ERR(dcc->f2fs_issue_discard); 2256 dcc->f2fs_issue_discard = NULL; 2257 } 2258 2259 return err; 2260 } 2261 2262 static int create_discard_cmd_control(struct f2fs_sb_info *sbi) 2263 { 2264 struct discard_cmd_control *dcc; 2265 int err = 0, i; 2266 2267 if (SM_I(sbi)->dcc_info) { 2268 dcc = SM_I(sbi)->dcc_info; 2269 goto init_thread; 2270 } 2271 2272 dcc = f2fs_kzalloc(sbi, sizeof(struct discard_cmd_control), GFP_KERNEL); 2273 if (!dcc) 2274 return -ENOMEM; 2275 2276 dcc->discard_io_aware_gran = MAX_PLIST_NUM; 2277 dcc->discard_granularity = DEFAULT_DISCARD_GRANULARITY; 2278 dcc->max_ordered_discard = DEFAULT_MAX_ORDERED_DISCARD_GRANULARITY; 2279 if (F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_SEGMENT) 2280 dcc->discard_granularity = BLKS_PER_SEG(sbi); 2281 else if (F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_SECTION) 2282 dcc->discard_granularity = BLKS_PER_SEC(sbi); 2283 2284 INIT_LIST_HEAD(&dcc->entry_list); 2285 for (i = 0; i < MAX_PLIST_NUM; i++) 2286 INIT_LIST_HEAD(&dcc->pend_list[i]); 2287 INIT_LIST_HEAD(&dcc->wait_list); 2288 INIT_LIST_HEAD(&dcc->fstrim_list); 2289 mutex_init(&dcc->cmd_lock); 2290 atomic_set(&dcc->issued_discard, 0); 2291 atomic_set(&dcc->queued_discard, 0); 2292 atomic_set(&dcc->discard_cmd_cnt, 0); 2293 dcc->nr_discards = 0; 2294 dcc->max_discards = MAIN_SEGS(sbi) << sbi->log_blocks_per_seg; 2295 dcc->max_discard_request = DEF_MAX_DISCARD_REQUEST; 2296 dcc->min_discard_issue_time = DEF_MIN_DISCARD_ISSUE_TIME; 2297 dcc->mid_discard_issue_time = DEF_MID_DISCARD_ISSUE_TIME; 2298 dcc->max_discard_issue_time = DEF_MAX_DISCARD_ISSUE_TIME; 2299 dcc->discard_urgent_util = DEF_DISCARD_URGENT_UTIL; 2300 dcc->undiscard_blks = 0; 2301 dcc->next_pos = 0; 2302 dcc->root = RB_ROOT_CACHED; 2303 dcc->rbtree_check = false; 2304 2305 init_waitqueue_head(&dcc->discard_wait_queue); 2306 SM_I(sbi)->dcc_info = dcc; 2307 init_thread: 2308 err = f2fs_start_discard_thread(sbi); 2309 if (err) { 2310 kfree(dcc); 2311 SM_I(sbi)->dcc_info = NULL; 2312 } 2313 2314 return err; 2315 } 2316 2317 static void destroy_discard_cmd_control(struct f2fs_sb_info *sbi) 2318 { 2319 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; 2320 2321 if (!dcc) 2322 return; 2323 2324 f2fs_stop_discard_thread(sbi); 2325 2326 /* 2327 * Recovery can cache discard commands, so in error path of 2328 * fill_super(), it needs to give a chance to handle them. 2329 */ 2330 f2fs_issue_discard_timeout(sbi); 2331 2332 kfree(dcc); 2333 SM_I(sbi)->dcc_info = NULL; 2334 } 2335 2336 static bool __mark_sit_entry_dirty(struct f2fs_sb_info *sbi, unsigned int segno) 2337 { 2338 struct sit_info *sit_i = SIT_I(sbi); 2339 2340 if (!__test_and_set_bit(segno, sit_i->dirty_sentries_bitmap)) { 2341 sit_i->dirty_sentries++; 2342 return false; 2343 } 2344 2345 return true; 2346 } 2347 2348 static void __set_sit_entry_type(struct f2fs_sb_info *sbi, int type, 2349 unsigned int segno, int modified) 2350 { 2351 struct seg_entry *se = get_seg_entry(sbi, segno); 2352 2353 se->type = type; 2354 if (modified) 2355 __mark_sit_entry_dirty(sbi, segno); 2356 } 2357 2358 static inline unsigned long long get_segment_mtime(struct f2fs_sb_info *sbi, 2359 block_t blkaddr) 2360 { 2361 unsigned int segno = GET_SEGNO(sbi, blkaddr); 2362 2363 if (segno == NULL_SEGNO) 2364 return 0; 2365 return get_seg_entry(sbi, segno)->mtime; 2366 } 2367 2368 static void update_segment_mtime(struct f2fs_sb_info *sbi, block_t blkaddr, 2369 unsigned long long old_mtime) 2370 { 2371 struct seg_entry *se; 2372 unsigned int segno = GET_SEGNO(sbi, blkaddr); 2373 unsigned long long ctime = get_mtime(sbi, false); 2374 unsigned long long mtime = old_mtime ? old_mtime : ctime; 2375 2376 if (segno == NULL_SEGNO) 2377 return; 2378 2379 se = get_seg_entry(sbi, segno); 2380 2381 if (!se->mtime) 2382 se->mtime = mtime; 2383 else 2384 se->mtime = div_u64(se->mtime * se->valid_blocks + mtime, 2385 se->valid_blocks + 1); 2386 2387 if (ctime > SIT_I(sbi)->max_mtime) 2388 SIT_I(sbi)->max_mtime = ctime; 2389 } 2390 2391 static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del) 2392 { 2393 struct seg_entry *se; 2394 unsigned int segno, offset; 2395 long int new_vblocks; 2396 bool exist; 2397 #ifdef CONFIG_F2FS_CHECK_FS 2398 bool mir_exist; 2399 #endif 2400 2401 segno = GET_SEGNO(sbi, blkaddr); 2402 2403 se = get_seg_entry(sbi, segno); 2404 new_vblocks = se->valid_blocks + del; 2405 offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr); 2406 2407 f2fs_bug_on(sbi, (new_vblocks < 0 || 2408 (new_vblocks > f2fs_usable_blks_in_seg(sbi, segno)))); 2409 2410 se->valid_blocks = new_vblocks; 2411 2412 /* Update valid block bitmap */ 2413 if (del > 0) { 2414 exist = f2fs_test_and_set_bit(offset, se->cur_valid_map); 2415 #ifdef CONFIG_F2FS_CHECK_FS 2416 mir_exist = f2fs_test_and_set_bit(offset, 2417 se->cur_valid_map_mir); 2418 if (unlikely(exist != mir_exist)) { 2419 f2fs_err(sbi, "Inconsistent error when setting bitmap, blk:%u, old bit:%d", 2420 blkaddr, exist); 2421 f2fs_bug_on(sbi, 1); 2422 } 2423 #endif 2424 if (unlikely(exist)) { 2425 f2fs_err(sbi, "Bitmap was wrongly set, blk:%u", 2426 blkaddr); 2427 f2fs_bug_on(sbi, 1); 2428 se->valid_blocks--; 2429 del = 0; 2430 } 2431 2432 if (f2fs_block_unit_discard(sbi) && 2433 !f2fs_test_and_set_bit(offset, se->discard_map)) 2434 sbi->discard_blks--; 2435 2436 /* 2437 * SSR should never reuse block which is checkpointed 2438 * or newly invalidated. 2439 */ 2440 if (!is_sbi_flag_set(sbi, SBI_CP_DISABLED)) { 2441 if (!f2fs_test_and_set_bit(offset, se->ckpt_valid_map)) 2442 se->ckpt_valid_blocks++; 2443 } 2444 } else { 2445 exist = f2fs_test_and_clear_bit(offset, se->cur_valid_map); 2446 #ifdef CONFIG_F2FS_CHECK_FS 2447 mir_exist = f2fs_test_and_clear_bit(offset, 2448 se->cur_valid_map_mir); 2449 if (unlikely(exist != mir_exist)) { 2450 f2fs_err(sbi, "Inconsistent error when clearing bitmap, blk:%u, old bit:%d", 2451 blkaddr, exist); 2452 f2fs_bug_on(sbi, 1); 2453 } 2454 #endif 2455 if (unlikely(!exist)) { 2456 f2fs_err(sbi, "Bitmap was wrongly cleared, blk:%u", 2457 blkaddr); 2458 f2fs_bug_on(sbi, 1); 2459 se->valid_blocks++; 2460 del = 0; 2461 } else if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) { 2462 /* 2463 * If checkpoints are off, we must not reuse data that 2464 * was used in the previous checkpoint. If it was used 2465 * before, we must track that to know how much space we 2466 * really have. 2467 */ 2468 if (f2fs_test_bit(offset, se->ckpt_valid_map)) { 2469 spin_lock(&sbi->stat_lock); 2470 sbi->unusable_block_count++; 2471 spin_unlock(&sbi->stat_lock); 2472 } 2473 } 2474 2475 if (f2fs_block_unit_discard(sbi) && 2476 f2fs_test_and_clear_bit(offset, se->discard_map)) 2477 sbi->discard_blks++; 2478 } 2479 if (!f2fs_test_bit(offset, se->ckpt_valid_map)) 2480 se->ckpt_valid_blocks += del; 2481 2482 __mark_sit_entry_dirty(sbi, segno); 2483 2484 /* update total number of valid blocks to be written in ckpt area */ 2485 SIT_I(sbi)->written_valid_blocks += del; 2486 2487 if (__is_large_section(sbi)) 2488 get_sec_entry(sbi, segno)->valid_blocks += del; 2489 } 2490 2491 void f2fs_invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr) 2492 { 2493 unsigned int segno = GET_SEGNO(sbi, addr); 2494 struct sit_info *sit_i = SIT_I(sbi); 2495 2496 f2fs_bug_on(sbi, addr == NULL_ADDR); 2497 if (addr == NEW_ADDR || addr == COMPRESS_ADDR) 2498 return; 2499 2500 f2fs_invalidate_internal_cache(sbi, addr); 2501 2502 /* add it into sit main buffer */ 2503 down_write(&sit_i->sentry_lock); 2504 2505 update_segment_mtime(sbi, addr, 0); 2506 update_sit_entry(sbi, addr, -1); 2507 2508 /* add it into dirty seglist */ 2509 locate_dirty_segment(sbi, segno); 2510 2511 up_write(&sit_i->sentry_lock); 2512 } 2513 2514 bool f2fs_is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr) 2515 { 2516 struct sit_info *sit_i = SIT_I(sbi); 2517 unsigned int segno, offset; 2518 struct seg_entry *se; 2519 bool is_cp = false; 2520 2521 if (!__is_valid_data_blkaddr(blkaddr)) 2522 return true; 2523 2524 down_read(&sit_i->sentry_lock); 2525 2526 segno = GET_SEGNO(sbi, blkaddr); 2527 se = get_seg_entry(sbi, segno); 2528 offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr); 2529 2530 if (f2fs_test_bit(offset, se->ckpt_valid_map)) 2531 is_cp = true; 2532 2533 up_read(&sit_i->sentry_lock); 2534 2535 return is_cp; 2536 } 2537 2538 static unsigned short f2fs_curseg_valid_blocks(struct f2fs_sb_info *sbi, int type) 2539 { 2540 struct curseg_info *curseg = CURSEG_I(sbi, type); 2541 2542 if (sbi->ckpt->alloc_type[type] == SSR) 2543 return BLKS_PER_SEG(sbi); 2544 return curseg->next_blkoff; 2545 } 2546 2547 /* 2548 * Calculate the number of current summary pages for writing 2549 */ 2550 int f2fs_npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra) 2551 { 2552 int valid_sum_count = 0; 2553 int i, sum_in_page; 2554 2555 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) { 2556 if (sbi->ckpt->alloc_type[i] != SSR && for_ra) 2557 valid_sum_count += 2558 le16_to_cpu(F2FS_CKPT(sbi)->cur_data_blkoff[i]); 2559 else 2560 valid_sum_count += f2fs_curseg_valid_blocks(sbi, i); 2561 } 2562 2563 sum_in_page = (PAGE_SIZE - 2 * SUM_JOURNAL_SIZE - 2564 SUM_FOOTER_SIZE) / SUMMARY_SIZE; 2565 if (valid_sum_count <= sum_in_page) 2566 return 1; 2567 else if ((valid_sum_count - sum_in_page) <= 2568 (PAGE_SIZE - SUM_FOOTER_SIZE) / SUMMARY_SIZE) 2569 return 2; 2570 return 3; 2571 } 2572 2573 /* 2574 * Caller should put this summary page 2575 */ 2576 struct page *f2fs_get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno) 2577 { 2578 if (unlikely(f2fs_cp_error(sbi))) 2579 return ERR_PTR(-EIO); 2580 return f2fs_get_meta_page_retry(sbi, GET_SUM_BLOCK(sbi, segno)); 2581 } 2582 2583 void f2fs_update_meta_page(struct f2fs_sb_info *sbi, 2584 void *src, block_t blk_addr) 2585 { 2586 struct page *page = f2fs_grab_meta_page(sbi, blk_addr); 2587 2588 memcpy(page_address(page), src, PAGE_SIZE); 2589 set_page_dirty(page); 2590 f2fs_put_page(page, 1); 2591 } 2592 2593 static void write_sum_page(struct f2fs_sb_info *sbi, 2594 struct f2fs_summary_block *sum_blk, block_t blk_addr) 2595 { 2596 f2fs_update_meta_page(sbi, (void *)sum_blk, blk_addr); 2597 } 2598 2599 static void write_current_sum_page(struct f2fs_sb_info *sbi, 2600 int type, block_t blk_addr) 2601 { 2602 struct curseg_info *curseg = CURSEG_I(sbi, type); 2603 struct page *page = f2fs_grab_meta_page(sbi, blk_addr); 2604 struct f2fs_summary_block *src = curseg->sum_blk; 2605 struct f2fs_summary_block *dst; 2606 2607 dst = (struct f2fs_summary_block *)page_address(page); 2608 memset(dst, 0, PAGE_SIZE); 2609 2610 mutex_lock(&curseg->curseg_mutex); 2611 2612 down_read(&curseg->journal_rwsem); 2613 memcpy(&dst->journal, curseg->journal, SUM_JOURNAL_SIZE); 2614 up_read(&curseg->journal_rwsem); 2615 2616 memcpy(dst->entries, src->entries, SUM_ENTRY_SIZE); 2617 memcpy(&dst->footer, &src->footer, SUM_FOOTER_SIZE); 2618 2619 mutex_unlock(&curseg->curseg_mutex); 2620 2621 set_page_dirty(page); 2622 f2fs_put_page(page, 1); 2623 } 2624 2625 static int is_next_segment_free(struct f2fs_sb_info *sbi, 2626 struct curseg_info *curseg, int type) 2627 { 2628 unsigned int segno = curseg->segno + 1; 2629 struct free_segmap_info *free_i = FREE_I(sbi); 2630 2631 if (segno < MAIN_SEGS(sbi) && segno % SEGS_PER_SEC(sbi)) 2632 return !test_bit(segno, free_i->free_segmap); 2633 return 0; 2634 } 2635 2636 /* 2637 * Find a new segment from the free segments bitmap to right order 2638 * This function should be returned with success, otherwise BUG 2639 */ 2640 static void get_new_segment(struct f2fs_sb_info *sbi, 2641 unsigned int *newseg, bool new_sec, bool pinning) 2642 { 2643 struct free_segmap_info *free_i = FREE_I(sbi); 2644 unsigned int segno, secno, zoneno; 2645 unsigned int total_zones = MAIN_SECS(sbi) / sbi->secs_per_zone; 2646 unsigned int hint = GET_SEC_FROM_SEG(sbi, *newseg); 2647 unsigned int old_zoneno = GET_ZONE_FROM_SEG(sbi, *newseg); 2648 bool init = true; 2649 int i; 2650 2651 spin_lock(&free_i->segmap_lock); 2652 2653 if (!new_sec && ((*newseg + 1) % SEGS_PER_SEC(sbi))) { 2654 segno = find_next_zero_bit(free_i->free_segmap, 2655 GET_SEG_FROM_SEC(sbi, hint + 1), *newseg + 1); 2656 if (segno < GET_SEG_FROM_SEC(sbi, hint + 1)) 2657 goto got_it; 2658 } 2659 2660 /* 2661 * If we format f2fs on zoned storage, let's try to get pinned sections 2662 * from beginning of the storage, which should be a conventional one. 2663 */ 2664 if (f2fs_sb_has_blkzoned(sbi)) { 2665 segno = pinning ? 0 : max(first_zoned_segno(sbi), *newseg); 2666 hint = GET_SEC_FROM_SEG(sbi, segno); 2667 } 2668 2669 find_other_zone: 2670 secno = find_next_zero_bit(free_i->free_secmap, MAIN_SECS(sbi), hint); 2671 if (secno >= MAIN_SECS(sbi)) { 2672 secno = find_first_zero_bit(free_i->free_secmap, 2673 MAIN_SECS(sbi)); 2674 f2fs_bug_on(sbi, secno >= MAIN_SECS(sbi)); 2675 } 2676 segno = GET_SEG_FROM_SEC(sbi, secno); 2677 zoneno = GET_ZONE_FROM_SEC(sbi, secno); 2678 2679 /* give up on finding another zone */ 2680 if (!init) 2681 goto got_it; 2682 if (sbi->secs_per_zone == 1) 2683 goto got_it; 2684 if (zoneno == old_zoneno) 2685 goto got_it; 2686 for (i = 0; i < NR_CURSEG_TYPE; i++) 2687 if (CURSEG_I(sbi, i)->zone == zoneno) 2688 break; 2689 2690 if (i < NR_CURSEG_TYPE) { 2691 /* zone is in user, try another */ 2692 if (zoneno + 1 >= total_zones) 2693 hint = 0; 2694 else 2695 hint = (zoneno + 1) * sbi->secs_per_zone; 2696 init = false; 2697 goto find_other_zone; 2698 } 2699 got_it: 2700 /* set it as dirty segment in free segmap */ 2701 f2fs_bug_on(sbi, test_bit(segno, free_i->free_segmap)); 2702 __set_inuse(sbi, segno); 2703 *newseg = segno; 2704 spin_unlock(&free_i->segmap_lock); 2705 } 2706 2707 static void reset_curseg(struct f2fs_sb_info *sbi, int type, int modified) 2708 { 2709 struct curseg_info *curseg = CURSEG_I(sbi, type); 2710 struct summary_footer *sum_footer; 2711 unsigned short seg_type = curseg->seg_type; 2712 2713 curseg->inited = true; 2714 curseg->segno = curseg->next_segno; 2715 curseg->zone = GET_ZONE_FROM_SEG(sbi, curseg->segno); 2716 curseg->next_blkoff = 0; 2717 curseg->next_segno = NULL_SEGNO; 2718 2719 sum_footer = &(curseg->sum_blk->footer); 2720 memset(sum_footer, 0, sizeof(struct summary_footer)); 2721 2722 sanity_check_seg_type(sbi, seg_type); 2723 2724 if (IS_DATASEG(seg_type)) 2725 SET_SUM_TYPE(sum_footer, SUM_TYPE_DATA); 2726 if (IS_NODESEG(seg_type)) 2727 SET_SUM_TYPE(sum_footer, SUM_TYPE_NODE); 2728 __set_sit_entry_type(sbi, seg_type, curseg->segno, modified); 2729 } 2730 2731 static unsigned int __get_next_segno(struct f2fs_sb_info *sbi, int type) 2732 { 2733 struct curseg_info *curseg = CURSEG_I(sbi, type); 2734 unsigned short seg_type = curseg->seg_type; 2735 2736 sanity_check_seg_type(sbi, seg_type); 2737 if (f2fs_need_rand_seg(sbi)) 2738 return get_random_u32_below(MAIN_SECS(sbi) * SEGS_PER_SEC(sbi)); 2739 2740 if (__is_large_section(sbi)) 2741 return curseg->segno; 2742 2743 /* inmem log may not locate on any segment after mount */ 2744 if (!curseg->inited) 2745 return 0; 2746 2747 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) 2748 return 0; 2749 2750 if (seg_type == CURSEG_HOT_DATA || IS_NODESEG(seg_type)) 2751 return 0; 2752 2753 if (SIT_I(sbi)->last_victim[ALLOC_NEXT]) 2754 return SIT_I(sbi)->last_victim[ALLOC_NEXT]; 2755 2756 /* find segments from 0 to reuse freed segments */ 2757 if (F2FS_OPTION(sbi).alloc_mode == ALLOC_MODE_REUSE) 2758 return 0; 2759 2760 return curseg->segno; 2761 } 2762 2763 /* 2764 * Allocate a current working segment. 2765 * This function always allocates a free segment in LFS manner. 2766 */ 2767 static int new_curseg(struct f2fs_sb_info *sbi, int type, bool new_sec) 2768 { 2769 struct curseg_info *curseg = CURSEG_I(sbi, type); 2770 unsigned int segno = curseg->segno; 2771 bool pinning = type == CURSEG_COLD_DATA_PINNED; 2772 2773 if (curseg->inited) 2774 write_sum_page(sbi, curseg->sum_blk, GET_SUM_BLOCK(sbi, segno)); 2775 2776 segno = __get_next_segno(sbi, type); 2777 get_new_segment(sbi, &segno, new_sec, pinning); 2778 if (new_sec && pinning && 2779 !f2fs_valid_pinned_area(sbi, START_BLOCK(sbi, segno))) { 2780 __set_free(sbi, segno); 2781 return -EAGAIN; 2782 } 2783 2784 curseg->next_segno = segno; 2785 reset_curseg(sbi, type, 1); 2786 curseg->alloc_type = LFS; 2787 if (F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_BLK) 2788 curseg->fragment_remained_chunk = 2789 get_random_u32_inclusive(1, sbi->max_fragment_chunk); 2790 return 0; 2791 } 2792 2793 static int __next_free_blkoff(struct f2fs_sb_info *sbi, 2794 int segno, block_t start) 2795 { 2796 struct seg_entry *se = get_seg_entry(sbi, segno); 2797 int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long); 2798 unsigned long *target_map = SIT_I(sbi)->tmp_map; 2799 unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map; 2800 unsigned long *cur_map = (unsigned long *)se->cur_valid_map; 2801 int i; 2802 2803 for (i = 0; i < entries; i++) 2804 target_map[i] = ckpt_map[i] | cur_map[i]; 2805 2806 return __find_rev_next_zero_bit(target_map, BLKS_PER_SEG(sbi), start); 2807 } 2808 2809 static int f2fs_find_next_ssr_block(struct f2fs_sb_info *sbi, 2810 struct curseg_info *seg) 2811 { 2812 return __next_free_blkoff(sbi, seg->segno, seg->next_blkoff + 1); 2813 } 2814 2815 bool f2fs_segment_has_free_slot(struct f2fs_sb_info *sbi, int segno) 2816 { 2817 return __next_free_blkoff(sbi, segno, 0) < BLKS_PER_SEG(sbi); 2818 } 2819 2820 /* 2821 * This function always allocates a used segment(from dirty seglist) by SSR 2822 * manner, so it should recover the existing segment information of valid blocks 2823 */ 2824 static void change_curseg(struct f2fs_sb_info *sbi, int type) 2825 { 2826 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 2827 struct curseg_info *curseg = CURSEG_I(sbi, type); 2828 unsigned int new_segno = curseg->next_segno; 2829 struct f2fs_summary_block *sum_node; 2830 struct page *sum_page; 2831 2832 write_sum_page(sbi, curseg->sum_blk, GET_SUM_BLOCK(sbi, curseg->segno)); 2833 2834 __set_test_and_inuse(sbi, new_segno); 2835 2836 mutex_lock(&dirty_i->seglist_lock); 2837 __remove_dirty_segment(sbi, new_segno, PRE); 2838 __remove_dirty_segment(sbi, new_segno, DIRTY); 2839 mutex_unlock(&dirty_i->seglist_lock); 2840 2841 reset_curseg(sbi, type, 1); 2842 curseg->alloc_type = SSR; 2843 curseg->next_blkoff = __next_free_blkoff(sbi, curseg->segno, 0); 2844 2845 sum_page = f2fs_get_sum_page(sbi, new_segno); 2846 if (IS_ERR(sum_page)) { 2847 /* GC won't be able to use stale summary pages by cp_error */ 2848 memset(curseg->sum_blk, 0, SUM_ENTRY_SIZE); 2849 return; 2850 } 2851 sum_node = (struct f2fs_summary_block *)page_address(sum_page); 2852 memcpy(curseg->sum_blk, sum_node, SUM_ENTRY_SIZE); 2853 f2fs_put_page(sum_page, 1); 2854 } 2855 2856 static int get_ssr_segment(struct f2fs_sb_info *sbi, int type, 2857 int alloc_mode, unsigned long long age); 2858 2859 static void get_atssr_segment(struct f2fs_sb_info *sbi, int type, 2860 int target_type, int alloc_mode, 2861 unsigned long long age) 2862 { 2863 struct curseg_info *curseg = CURSEG_I(sbi, type); 2864 2865 curseg->seg_type = target_type; 2866 2867 if (get_ssr_segment(sbi, type, alloc_mode, age)) { 2868 struct seg_entry *se = get_seg_entry(sbi, curseg->next_segno); 2869 2870 curseg->seg_type = se->type; 2871 change_curseg(sbi, type); 2872 } else { 2873 /* allocate cold segment by default */ 2874 curseg->seg_type = CURSEG_COLD_DATA; 2875 new_curseg(sbi, type, true); 2876 } 2877 stat_inc_seg_type(sbi, curseg); 2878 } 2879 2880 static void __f2fs_init_atgc_curseg(struct f2fs_sb_info *sbi) 2881 { 2882 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_ALL_DATA_ATGC); 2883 2884 if (!sbi->am.atgc_enabled) 2885 return; 2886 2887 f2fs_down_read(&SM_I(sbi)->curseg_lock); 2888 2889 mutex_lock(&curseg->curseg_mutex); 2890 down_write(&SIT_I(sbi)->sentry_lock); 2891 2892 get_atssr_segment(sbi, CURSEG_ALL_DATA_ATGC, CURSEG_COLD_DATA, SSR, 0); 2893 2894 up_write(&SIT_I(sbi)->sentry_lock); 2895 mutex_unlock(&curseg->curseg_mutex); 2896 2897 f2fs_up_read(&SM_I(sbi)->curseg_lock); 2898 2899 } 2900 void f2fs_init_inmem_curseg(struct f2fs_sb_info *sbi) 2901 { 2902 __f2fs_init_atgc_curseg(sbi); 2903 } 2904 2905 static void __f2fs_save_inmem_curseg(struct f2fs_sb_info *sbi, int type) 2906 { 2907 struct curseg_info *curseg = CURSEG_I(sbi, type); 2908 2909 mutex_lock(&curseg->curseg_mutex); 2910 if (!curseg->inited) 2911 goto out; 2912 2913 if (get_valid_blocks(sbi, curseg->segno, false)) { 2914 write_sum_page(sbi, curseg->sum_blk, 2915 GET_SUM_BLOCK(sbi, curseg->segno)); 2916 } else { 2917 mutex_lock(&DIRTY_I(sbi)->seglist_lock); 2918 __set_test_and_free(sbi, curseg->segno, true); 2919 mutex_unlock(&DIRTY_I(sbi)->seglist_lock); 2920 } 2921 out: 2922 mutex_unlock(&curseg->curseg_mutex); 2923 } 2924 2925 void f2fs_save_inmem_curseg(struct f2fs_sb_info *sbi) 2926 { 2927 __f2fs_save_inmem_curseg(sbi, CURSEG_COLD_DATA_PINNED); 2928 2929 if (sbi->am.atgc_enabled) 2930 __f2fs_save_inmem_curseg(sbi, CURSEG_ALL_DATA_ATGC); 2931 } 2932 2933 static void __f2fs_restore_inmem_curseg(struct f2fs_sb_info *sbi, int type) 2934 { 2935 struct curseg_info *curseg = CURSEG_I(sbi, type); 2936 2937 mutex_lock(&curseg->curseg_mutex); 2938 if (!curseg->inited) 2939 goto out; 2940 if (get_valid_blocks(sbi, curseg->segno, false)) 2941 goto out; 2942 2943 mutex_lock(&DIRTY_I(sbi)->seglist_lock); 2944 __set_test_and_inuse(sbi, curseg->segno); 2945 mutex_unlock(&DIRTY_I(sbi)->seglist_lock); 2946 out: 2947 mutex_unlock(&curseg->curseg_mutex); 2948 } 2949 2950 void f2fs_restore_inmem_curseg(struct f2fs_sb_info *sbi) 2951 { 2952 __f2fs_restore_inmem_curseg(sbi, CURSEG_COLD_DATA_PINNED); 2953 2954 if (sbi->am.atgc_enabled) 2955 __f2fs_restore_inmem_curseg(sbi, CURSEG_ALL_DATA_ATGC); 2956 } 2957 2958 static int get_ssr_segment(struct f2fs_sb_info *sbi, int type, 2959 int alloc_mode, unsigned long long age) 2960 { 2961 struct curseg_info *curseg = CURSEG_I(sbi, type); 2962 unsigned segno = NULL_SEGNO; 2963 unsigned short seg_type = curseg->seg_type; 2964 int i, cnt; 2965 bool reversed = false; 2966 2967 sanity_check_seg_type(sbi, seg_type); 2968 2969 /* f2fs_need_SSR() already forces to do this */ 2970 if (!f2fs_get_victim(sbi, &segno, BG_GC, seg_type, alloc_mode, age)) { 2971 curseg->next_segno = segno; 2972 return 1; 2973 } 2974 2975 /* For node segments, let's do SSR more intensively */ 2976 if (IS_NODESEG(seg_type)) { 2977 if (seg_type >= CURSEG_WARM_NODE) { 2978 reversed = true; 2979 i = CURSEG_COLD_NODE; 2980 } else { 2981 i = CURSEG_HOT_NODE; 2982 } 2983 cnt = NR_CURSEG_NODE_TYPE; 2984 } else { 2985 if (seg_type >= CURSEG_WARM_DATA) { 2986 reversed = true; 2987 i = CURSEG_COLD_DATA; 2988 } else { 2989 i = CURSEG_HOT_DATA; 2990 } 2991 cnt = NR_CURSEG_DATA_TYPE; 2992 } 2993 2994 for (; cnt-- > 0; reversed ? i-- : i++) { 2995 if (i == seg_type) 2996 continue; 2997 if (!f2fs_get_victim(sbi, &segno, BG_GC, i, alloc_mode, age)) { 2998 curseg->next_segno = segno; 2999 return 1; 3000 } 3001 } 3002 3003 /* find valid_blocks=0 in dirty list */ 3004 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) { 3005 segno = get_free_segment(sbi); 3006 if (segno != NULL_SEGNO) { 3007 curseg->next_segno = segno; 3008 return 1; 3009 } 3010 } 3011 return 0; 3012 } 3013 3014 static bool need_new_seg(struct f2fs_sb_info *sbi, int type) 3015 { 3016 struct curseg_info *curseg = CURSEG_I(sbi, type); 3017 3018 if (!is_set_ckpt_flags(sbi, CP_CRC_RECOVERY_FLAG) && 3019 curseg->seg_type == CURSEG_WARM_NODE) 3020 return true; 3021 if (curseg->alloc_type == LFS && 3022 is_next_segment_free(sbi, curseg, type) && 3023 likely(!is_sbi_flag_set(sbi, SBI_CP_DISABLED))) 3024 return true; 3025 if (!f2fs_need_SSR(sbi) || !get_ssr_segment(sbi, type, SSR, 0)) 3026 return true; 3027 return false; 3028 } 3029 3030 void f2fs_allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type, 3031 unsigned int start, unsigned int end) 3032 { 3033 struct curseg_info *curseg = CURSEG_I(sbi, type); 3034 unsigned int segno; 3035 3036 f2fs_down_read(&SM_I(sbi)->curseg_lock); 3037 mutex_lock(&curseg->curseg_mutex); 3038 down_write(&SIT_I(sbi)->sentry_lock); 3039 3040 segno = CURSEG_I(sbi, type)->segno; 3041 if (segno < start || segno > end) 3042 goto unlock; 3043 3044 if (f2fs_need_SSR(sbi) && get_ssr_segment(sbi, type, SSR, 0)) 3045 change_curseg(sbi, type); 3046 else 3047 new_curseg(sbi, type, true); 3048 3049 stat_inc_seg_type(sbi, curseg); 3050 3051 locate_dirty_segment(sbi, segno); 3052 unlock: 3053 up_write(&SIT_I(sbi)->sentry_lock); 3054 3055 if (segno != curseg->segno) 3056 f2fs_notice(sbi, "For resize: curseg of type %d: %u ==> %u", 3057 type, segno, curseg->segno); 3058 3059 mutex_unlock(&curseg->curseg_mutex); 3060 f2fs_up_read(&SM_I(sbi)->curseg_lock); 3061 } 3062 3063 static int __allocate_new_segment(struct f2fs_sb_info *sbi, int type, 3064 bool new_sec, bool force) 3065 { 3066 struct curseg_info *curseg = CURSEG_I(sbi, type); 3067 unsigned int old_segno; 3068 3069 if (!force && curseg->inited && 3070 !curseg->next_blkoff && 3071 !get_valid_blocks(sbi, curseg->segno, new_sec) && 3072 !get_ckpt_valid_blocks(sbi, curseg->segno, new_sec)) 3073 return 0; 3074 3075 old_segno = curseg->segno; 3076 if (new_curseg(sbi, type, true)) 3077 return -EAGAIN; 3078 stat_inc_seg_type(sbi, curseg); 3079 locate_dirty_segment(sbi, old_segno); 3080 return 0; 3081 } 3082 3083 int f2fs_allocate_new_section(struct f2fs_sb_info *sbi, int type, bool force) 3084 { 3085 int ret; 3086 3087 f2fs_down_read(&SM_I(sbi)->curseg_lock); 3088 down_write(&SIT_I(sbi)->sentry_lock); 3089 ret = __allocate_new_segment(sbi, type, true, force); 3090 up_write(&SIT_I(sbi)->sentry_lock); 3091 f2fs_up_read(&SM_I(sbi)->curseg_lock); 3092 3093 return ret; 3094 } 3095 3096 int f2fs_allocate_pinning_section(struct f2fs_sb_info *sbi) 3097 { 3098 int err; 3099 bool gc_required = true; 3100 3101 retry: 3102 f2fs_lock_op(sbi); 3103 err = f2fs_allocate_new_section(sbi, CURSEG_COLD_DATA_PINNED, false); 3104 f2fs_unlock_op(sbi); 3105 3106 if (f2fs_sb_has_blkzoned(sbi) && err && gc_required) { 3107 f2fs_down_write(&sbi->gc_lock); 3108 f2fs_gc_range(sbi, 0, GET_SEGNO(sbi, FDEV(0).end_blk), true, 1); 3109 f2fs_up_write(&sbi->gc_lock); 3110 3111 gc_required = false; 3112 goto retry; 3113 } 3114 3115 return err; 3116 } 3117 3118 void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi) 3119 { 3120 int i; 3121 3122 f2fs_down_read(&SM_I(sbi)->curseg_lock); 3123 down_write(&SIT_I(sbi)->sentry_lock); 3124 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) 3125 __allocate_new_segment(sbi, i, false, false); 3126 up_write(&SIT_I(sbi)->sentry_lock); 3127 f2fs_up_read(&SM_I(sbi)->curseg_lock); 3128 } 3129 3130 bool f2fs_exist_trim_candidates(struct f2fs_sb_info *sbi, 3131 struct cp_control *cpc) 3132 { 3133 __u64 trim_start = cpc->trim_start; 3134 bool has_candidate = false; 3135 3136 down_write(&SIT_I(sbi)->sentry_lock); 3137 for (; cpc->trim_start <= cpc->trim_end; cpc->trim_start++) { 3138 if (add_discard_addrs(sbi, cpc, true)) { 3139 has_candidate = true; 3140 break; 3141 } 3142 } 3143 up_write(&SIT_I(sbi)->sentry_lock); 3144 3145 cpc->trim_start = trim_start; 3146 return has_candidate; 3147 } 3148 3149 static unsigned int __issue_discard_cmd_range(struct f2fs_sb_info *sbi, 3150 struct discard_policy *dpolicy, 3151 unsigned int start, unsigned int end) 3152 { 3153 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; 3154 struct discard_cmd *prev_dc = NULL, *next_dc = NULL; 3155 struct rb_node **insert_p = NULL, *insert_parent = NULL; 3156 struct discard_cmd *dc; 3157 struct blk_plug plug; 3158 int issued; 3159 unsigned int trimmed = 0; 3160 3161 next: 3162 issued = 0; 3163 3164 mutex_lock(&dcc->cmd_lock); 3165 if (unlikely(dcc->rbtree_check)) 3166 f2fs_bug_on(sbi, !f2fs_check_discard_tree(sbi)); 3167 3168 dc = __lookup_discard_cmd_ret(&dcc->root, start, 3169 &prev_dc, &next_dc, &insert_p, &insert_parent); 3170 if (!dc) 3171 dc = next_dc; 3172 3173 blk_start_plug(&plug); 3174 3175 while (dc && dc->di.lstart <= end) { 3176 struct rb_node *node; 3177 int err = 0; 3178 3179 if (dc->di.len < dpolicy->granularity) 3180 goto skip; 3181 3182 if (dc->state != D_PREP) { 3183 list_move_tail(&dc->list, &dcc->fstrim_list); 3184 goto skip; 3185 } 3186 3187 err = __submit_discard_cmd(sbi, dpolicy, dc, &issued); 3188 3189 if (issued >= dpolicy->max_requests) { 3190 start = dc->di.lstart + dc->di.len; 3191 3192 if (err) 3193 __remove_discard_cmd(sbi, dc); 3194 3195 blk_finish_plug(&plug); 3196 mutex_unlock(&dcc->cmd_lock); 3197 trimmed += __wait_all_discard_cmd(sbi, NULL); 3198 f2fs_io_schedule_timeout(DEFAULT_IO_TIMEOUT); 3199 goto next; 3200 } 3201 skip: 3202 node = rb_next(&dc->rb_node); 3203 if (err) 3204 __remove_discard_cmd(sbi, dc); 3205 dc = rb_entry_safe(node, struct discard_cmd, rb_node); 3206 3207 if (fatal_signal_pending(current)) 3208 break; 3209 } 3210 3211 blk_finish_plug(&plug); 3212 mutex_unlock(&dcc->cmd_lock); 3213 3214 return trimmed; 3215 } 3216 3217 int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range) 3218 { 3219 __u64 start = F2FS_BYTES_TO_BLK(range->start); 3220 __u64 end = start + F2FS_BYTES_TO_BLK(range->len) - 1; 3221 unsigned int start_segno, end_segno; 3222 block_t start_block, end_block; 3223 struct cp_control cpc; 3224 struct discard_policy dpolicy; 3225 unsigned long long trimmed = 0; 3226 int err = 0; 3227 bool need_align = f2fs_lfs_mode(sbi) && __is_large_section(sbi); 3228 3229 if (start >= MAX_BLKADDR(sbi) || range->len < sbi->blocksize) 3230 return -EINVAL; 3231 3232 if (end < MAIN_BLKADDR(sbi)) 3233 goto out; 3234 3235 if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) { 3236 f2fs_warn(sbi, "Found FS corruption, run fsck to fix."); 3237 return -EFSCORRUPTED; 3238 } 3239 3240 /* start/end segment number in main_area */ 3241 start_segno = (start <= MAIN_BLKADDR(sbi)) ? 0 : GET_SEGNO(sbi, start); 3242 end_segno = (end >= MAX_BLKADDR(sbi)) ? MAIN_SEGS(sbi) - 1 : 3243 GET_SEGNO(sbi, end); 3244 if (need_align) { 3245 start_segno = rounddown(start_segno, SEGS_PER_SEC(sbi)); 3246 end_segno = roundup(end_segno + 1, SEGS_PER_SEC(sbi)) - 1; 3247 } 3248 3249 cpc.reason = CP_DISCARD; 3250 cpc.trim_minlen = max_t(__u64, 1, F2FS_BYTES_TO_BLK(range->minlen)); 3251 cpc.trim_start = start_segno; 3252 cpc.trim_end = end_segno; 3253 3254 if (sbi->discard_blks == 0) 3255 goto out; 3256 3257 f2fs_down_write(&sbi->gc_lock); 3258 stat_inc_cp_call_count(sbi, TOTAL_CALL); 3259 err = f2fs_write_checkpoint(sbi, &cpc); 3260 f2fs_up_write(&sbi->gc_lock); 3261 if (err) 3262 goto out; 3263 3264 /* 3265 * We filed discard candidates, but actually we don't need to wait for 3266 * all of them, since they'll be issued in idle time along with runtime 3267 * discard option. User configuration looks like using runtime discard 3268 * or periodic fstrim instead of it. 3269 */ 3270 if (f2fs_realtime_discard_enable(sbi)) 3271 goto out; 3272 3273 start_block = START_BLOCK(sbi, start_segno); 3274 end_block = START_BLOCK(sbi, end_segno + 1); 3275 3276 __init_discard_policy(sbi, &dpolicy, DPOLICY_FSTRIM, cpc.trim_minlen); 3277 trimmed = __issue_discard_cmd_range(sbi, &dpolicy, 3278 start_block, end_block); 3279 3280 trimmed += __wait_discard_cmd_range(sbi, &dpolicy, 3281 start_block, end_block); 3282 out: 3283 if (!err) 3284 range->len = F2FS_BLK_TO_BYTES(trimmed); 3285 return err; 3286 } 3287 3288 int f2fs_rw_hint_to_seg_type(enum rw_hint hint) 3289 { 3290 switch (hint) { 3291 case WRITE_LIFE_SHORT: 3292 return CURSEG_HOT_DATA; 3293 case WRITE_LIFE_EXTREME: 3294 return CURSEG_COLD_DATA; 3295 default: 3296 return CURSEG_WARM_DATA; 3297 } 3298 } 3299 3300 static int __get_segment_type_2(struct f2fs_io_info *fio) 3301 { 3302 if (fio->type == DATA) 3303 return CURSEG_HOT_DATA; 3304 else 3305 return CURSEG_HOT_NODE; 3306 } 3307 3308 static int __get_segment_type_4(struct f2fs_io_info *fio) 3309 { 3310 if (fio->type == DATA) { 3311 struct inode *inode = fio->page->mapping->host; 3312 3313 if (S_ISDIR(inode->i_mode)) 3314 return CURSEG_HOT_DATA; 3315 else 3316 return CURSEG_COLD_DATA; 3317 } else { 3318 if (IS_DNODE(fio->page) && is_cold_node(fio->page)) 3319 return CURSEG_WARM_NODE; 3320 else 3321 return CURSEG_COLD_NODE; 3322 } 3323 } 3324 3325 static int __get_age_segment_type(struct inode *inode, pgoff_t pgofs) 3326 { 3327 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 3328 struct extent_info ei = {}; 3329 3330 if (f2fs_lookup_age_extent_cache(inode, pgofs, &ei)) { 3331 if (!ei.age) 3332 return NO_CHECK_TYPE; 3333 if (ei.age <= sbi->hot_data_age_threshold) 3334 return CURSEG_HOT_DATA; 3335 if (ei.age <= sbi->warm_data_age_threshold) 3336 return CURSEG_WARM_DATA; 3337 return CURSEG_COLD_DATA; 3338 } 3339 return NO_CHECK_TYPE; 3340 } 3341 3342 static int __get_segment_type_6(struct f2fs_io_info *fio) 3343 { 3344 if (fio->type == DATA) { 3345 struct inode *inode = fio->page->mapping->host; 3346 int type; 3347 3348 if (is_inode_flag_set(inode, FI_ALIGNED_WRITE)) 3349 return CURSEG_COLD_DATA_PINNED; 3350 3351 if (page_private_gcing(fio->page)) { 3352 if (fio->sbi->am.atgc_enabled && 3353 (fio->io_type == FS_DATA_IO) && 3354 (fio->sbi->gc_mode != GC_URGENT_HIGH)) 3355 return CURSEG_ALL_DATA_ATGC; 3356 else 3357 return CURSEG_COLD_DATA; 3358 } 3359 if (file_is_cold(inode) || f2fs_need_compress_data(inode)) 3360 return CURSEG_COLD_DATA; 3361 3362 type = __get_age_segment_type(inode, fio->page->index); 3363 if (type != NO_CHECK_TYPE) 3364 return type; 3365 3366 if (file_is_hot(inode) || 3367 is_inode_flag_set(inode, FI_HOT_DATA) || 3368 f2fs_is_cow_file(inode)) 3369 return CURSEG_HOT_DATA; 3370 return f2fs_rw_hint_to_seg_type(inode->i_write_hint); 3371 } else { 3372 if (IS_DNODE(fio->page)) 3373 return is_cold_node(fio->page) ? CURSEG_WARM_NODE : 3374 CURSEG_HOT_NODE; 3375 return CURSEG_COLD_NODE; 3376 } 3377 } 3378 3379 static int __get_segment_type(struct f2fs_io_info *fio) 3380 { 3381 int type = 0; 3382 3383 switch (F2FS_OPTION(fio->sbi).active_logs) { 3384 case 2: 3385 type = __get_segment_type_2(fio); 3386 break; 3387 case 4: 3388 type = __get_segment_type_4(fio); 3389 break; 3390 case 6: 3391 type = __get_segment_type_6(fio); 3392 break; 3393 default: 3394 f2fs_bug_on(fio->sbi, true); 3395 } 3396 3397 if (IS_HOT(type)) 3398 fio->temp = HOT; 3399 else if (IS_WARM(type)) 3400 fio->temp = WARM; 3401 else 3402 fio->temp = COLD; 3403 return type; 3404 } 3405 3406 static void f2fs_randomize_chunk(struct f2fs_sb_info *sbi, 3407 struct curseg_info *seg) 3408 { 3409 /* To allocate block chunks in different sizes, use random number */ 3410 if (--seg->fragment_remained_chunk > 0) 3411 return; 3412 3413 seg->fragment_remained_chunk = 3414 get_random_u32_inclusive(1, sbi->max_fragment_chunk); 3415 seg->next_blkoff += 3416 get_random_u32_inclusive(1, sbi->max_fragment_hole); 3417 } 3418 3419 void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page, 3420 block_t old_blkaddr, block_t *new_blkaddr, 3421 struct f2fs_summary *sum, int type, 3422 struct f2fs_io_info *fio) 3423 { 3424 struct sit_info *sit_i = SIT_I(sbi); 3425 struct curseg_info *curseg = CURSEG_I(sbi, type); 3426 unsigned long long old_mtime; 3427 bool from_gc = (type == CURSEG_ALL_DATA_ATGC); 3428 struct seg_entry *se = NULL; 3429 bool segment_full = false; 3430 3431 f2fs_down_read(&SM_I(sbi)->curseg_lock); 3432 3433 mutex_lock(&curseg->curseg_mutex); 3434 down_write(&sit_i->sentry_lock); 3435 3436 if (from_gc) { 3437 f2fs_bug_on(sbi, GET_SEGNO(sbi, old_blkaddr) == NULL_SEGNO); 3438 se = get_seg_entry(sbi, GET_SEGNO(sbi, old_blkaddr)); 3439 sanity_check_seg_type(sbi, se->type); 3440 f2fs_bug_on(sbi, IS_NODESEG(se->type)); 3441 } 3442 *new_blkaddr = NEXT_FREE_BLKADDR(sbi, curseg); 3443 3444 f2fs_bug_on(sbi, curseg->next_blkoff >= BLKS_PER_SEG(sbi)); 3445 3446 f2fs_wait_discard_bio(sbi, *new_blkaddr); 3447 3448 curseg->sum_blk->entries[curseg->next_blkoff] = *sum; 3449 if (curseg->alloc_type == SSR) { 3450 curseg->next_blkoff = f2fs_find_next_ssr_block(sbi, curseg); 3451 } else { 3452 curseg->next_blkoff++; 3453 if (F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_BLK) 3454 f2fs_randomize_chunk(sbi, curseg); 3455 } 3456 if (curseg->next_blkoff >= f2fs_usable_blks_in_seg(sbi, curseg->segno)) 3457 segment_full = true; 3458 stat_inc_block_count(sbi, curseg); 3459 3460 if (from_gc) { 3461 old_mtime = get_segment_mtime(sbi, old_blkaddr); 3462 } else { 3463 update_segment_mtime(sbi, old_blkaddr, 0); 3464 old_mtime = 0; 3465 } 3466 update_segment_mtime(sbi, *new_blkaddr, old_mtime); 3467 3468 /* 3469 * SIT information should be updated before segment allocation, 3470 * since SSR needs latest valid block information. 3471 */ 3472 update_sit_entry(sbi, *new_blkaddr, 1); 3473 if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO) 3474 update_sit_entry(sbi, old_blkaddr, -1); 3475 3476 /* 3477 * If the current segment is full, flush it out and replace it with a 3478 * new segment. 3479 */ 3480 if (segment_full) { 3481 if (type == CURSEG_COLD_DATA_PINNED && 3482 !((curseg->segno + 1) % sbi->segs_per_sec)) 3483 goto skip_new_segment; 3484 3485 if (from_gc) { 3486 get_atssr_segment(sbi, type, se->type, 3487 AT_SSR, se->mtime); 3488 } else { 3489 if (need_new_seg(sbi, type)) 3490 new_curseg(sbi, type, false); 3491 else 3492 change_curseg(sbi, type); 3493 stat_inc_seg_type(sbi, curseg); 3494 } 3495 } 3496 3497 skip_new_segment: 3498 /* 3499 * segment dirty status should be updated after segment allocation, 3500 * so we just need to update status only one time after previous 3501 * segment being closed. 3502 */ 3503 locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr)); 3504 locate_dirty_segment(sbi, GET_SEGNO(sbi, *new_blkaddr)); 3505 3506 if (IS_DATASEG(curseg->seg_type)) 3507 atomic64_inc(&sbi->allocated_data_blocks); 3508 3509 up_write(&sit_i->sentry_lock); 3510 3511 if (page && IS_NODESEG(curseg->seg_type)) { 3512 fill_node_footer_blkaddr(page, NEXT_FREE_BLKADDR(sbi, curseg)); 3513 3514 f2fs_inode_chksum_set(sbi, page); 3515 } 3516 3517 if (fio) { 3518 struct f2fs_bio_info *io; 3519 3520 INIT_LIST_HEAD(&fio->list); 3521 fio->in_list = 1; 3522 io = sbi->write_io[fio->type] + fio->temp; 3523 spin_lock(&io->io_lock); 3524 list_add_tail(&fio->list, &io->io_list); 3525 spin_unlock(&io->io_lock); 3526 } 3527 3528 mutex_unlock(&curseg->curseg_mutex); 3529 3530 f2fs_up_read(&SM_I(sbi)->curseg_lock); 3531 } 3532 3533 void f2fs_update_device_state(struct f2fs_sb_info *sbi, nid_t ino, 3534 block_t blkaddr, unsigned int blkcnt) 3535 { 3536 if (!f2fs_is_multi_device(sbi)) 3537 return; 3538 3539 while (1) { 3540 unsigned int devidx = f2fs_target_device_index(sbi, blkaddr); 3541 unsigned int blks = FDEV(devidx).end_blk - blkaddr + 1; 3542 3543 /* update device state for fsync */ 3544 f2fs_set_dirty_device(sbi, ino, devidx, FLUSH_INO); 3545 3546 /* update device state for checkpoint */ 3547 if (!f2fs_test_bit(devidx, (char *)&sbi->dirty_device)) { 3548 spin_lock(&sbi->dev_lock); 3549 f2fs_set_bit(devidx, (char *)&sbi->dirty_device); 3550 spin_unlock(&sbi->dev_lock); 3551 } 3552 3553 if (blkcnt <= blks) 3554 break; 3555 blkcnt -= blks; 3556 blkaddr += blks; 3557 } 3558 } 3559 3560 static void do_write_page(struct f2fs_summary *sum, struct f2fs_io_info *fio) 3561 { 3562 int type = __get_segment_type(fio); 3563 bool keep_order = (f2fs_lfs_mode(fio->sbi) && type == CURSEG_COLD_DATA); 3564 3565 if (keep_order) 3566 f2fs_down_read(&fio->sbi->io_order_lock); 3567 3568 f2fs_allocate_data_block(fio->sbi, fio->page, fio->old_blkaddr, 3569 &fio->new_blkaddr, sum, type, fio); 3570 if (GET_SEGNO(fio->sbi, fio->old_blkaddr) != NULL_SEGNO) 3571 f2fs_invalidate_internal_cache(fio->sbi, fio->old_blkaddr); 3572 3573 /* writeout dirty page into bdev */ 3574 f2fs_submit_page_write(fio); 3575 3576 f2fs_update_device_state(fio->sbi, fio->ino, fio->new_blkaddr, 1); 3577 3578 if (keep_order) 3579 f2fs_up_read(&fio->sbi->io_order_lock); 3580 } 3581 3582 void f2fs_do_write_meta_page(struct f2fs_sb_info *sbi, struct page *page, 3583 enum iostat_type io_type) 3584 { 3585 struct f2fs_io_info fio = { 3586 .sbi = sbi, 3587 .type = META, 3588 .temp = HOT, 3589 .op = REQ_OP_WRITE, 3590 .op_flags = REQ_SYNC | REQ_META | REQ_PRIO, 3591 .old_blkaddr = page->index, 3592 .new_blkaddr = page->index, 3593 .page = page, 3594 .encrypted_page = NULL, 3595 .in_list = 0, 3596 }; 3597 3598 if (unlikely(page->index >= MAIN_BLKADDR(sbi))) 3599 fio.op_flags &= ~REQ_META; 3600 3601 set_page_writeback(page); 3602 f2fs_submit_page_write(&fio); 3603 3604 stat_inc_meta_count(sbi, page->index); 3605 f2fs_update_iostat(sbi, NULL, io_type, F2FS_BLKSIZE); 3606 } 3607 3608 void f2fs_do_write_node_page(unsigned int nid, struct f2fs_io_info *fio) 3609 { 3610 struct f2fs_summary sum; 3611 3612 set_summary(&sum, nid, 0, 0); 3613 do_write_page(&sum, fio); 3614 3615 f2fs_update_iostat(fio->sbi, NULL, fio->io_type, F2FS_BLKSIZE); 3616 } 3617 3618 void f2fs_outplace_write_data(struct dnode_of_data *dn, 3619 struct f2fs_io_info *fio) 3620 { 3621 struct f2fs_sb_info *sbi = fio->sbi; 3622 struct f2fs_summary sum; 3623 3624 f2fs_bug_on(sbi, dn->data_blkaddr == NULL_ADDR); 3625 if (fio->io_type == FS_DATA_IO || fio->io_type == FS_CP_DATA_IO) 3626 f2fs_update_age_extent_cache(dn); 3627 set_summary(&sum, dn->nid, dn->ofs_in_node, fio->version); 3628 do_write_page(&sum, fio); 3629 f2fs_update_data_blkaddr(dn, fio->new_blkaddr); 3630 3631 f2fs_update_iostat(sbi, dn->inode, fio->io_type, F2FS_BLKSIZE); 3632 } 3633 3634 int f2fs_inplace_write_data(struct f2fs_io_info *fio) 3635 { 3636 int err; 3637 struct f2fs_sb_info *sbi = fio->sbi; 3638 unsigned int segno; 3639 3640 fio->new_blkaddr = fio->old_blkaddr; 3641 /* i/o temperature is needed for passing down write hints */ 3642 __get_segment_type(fio); 3643 3644 segno = GET_SEGNO(sbi, fio->new_blkaddr); 3645 3646 if (!IS_DATASEG(get_seg_entry(sbi, segno)->type)) { 3647 set_sbi_flag(sbi, SBI_NEED_FSCK); 3648 f2fs_warn(sbi, "%s: incorrect segment(%u) type, run fsck to fix.", 3649 __func__, segno); 3650 err = -EFSCORRUPTED; 3651 f2fs_handle_error(sbi, ERROR_INCONSISTENT_SUM_TYPE); 3652 goto drop_bio; 3653 } 3654 3655 if (f2fs_cp_error(sbi)) { 3656 err = -EIO; 3657 goto drop_bio; 3658 } 3659 3660 if (fio->post_read) 3661 f2fs_truncate_meta_inode_pages(sbi, fio->new_blkaddr, 1); 3662 3663 stat_inc_inplace_blocks(fio->sbi); 3664 3665 if (fio->bio && !IS_F2FS_IPU_NOCACHE(sbi)) 3666 err = f2fs_merge_page_bio(fio); 3667 else 3668 err = f2fs_submit_page_bio(fio); 3669 if (!err) { 3670 f2fs_update_device_state(fio->sbi, fio->ino, 3671 fio->new_blkaddr, 1); 3672 f2fs_update_iostat(fio->sbi, fio->page->mapping->host, 3673 fio->io_type, F2FS_BLKSIZE); 3674 } 3675 3676 return err; 3677 drop_bio: 3678 if (fio->bio && *(fio->bio)) { 3679 struct bio *bio = *(fio->bio); 3680 3681 bio->bi_status = BLK_STS_IOERR; 3682 bio_endio(bio); 3683 *(fio->bio) = NULL; 3684 } 3685 return err; 3686 } 3687 3688 static inline int __f2fs_get_curseg(struct f2fs_sb_info *sbi, 3689 unsigned int segno) 3690 { 3691 int i; 3692 3693 for (i = CURSEG_HOT_DATA; i < NO_CHECK_TYPE; i++) { 3694 if (CURSEG_I(sbi, i)->segno == segno) 3695 break; 3696 } 3697 return i; 3698 } 3699 3700 void f2fs_do_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, 3701 block_t old_blkaddr, block_t new_blkaddr, 3702 bool recover_curseg, bool recover_newaddr, 3703 bool from_gc) 3704 { 3705 struct sit_info *sit_i = SIT_I(sbi); 3706 struct curseg_info *curseg; 3707 unsigned int segno, old_cursegno; 3708 struct seg_entry *se; 3709 int type; 3710 unsigned short old_blkoff; 3711 unsigned char old_alloc_type; 3712 3713 segno = GET_SEGNO(sbi, new_blkaddr); 3714 se = get_seg_entry(sbi, segno); 3715 type = se->type; 3716 3717 f2fs_down_write(&SM_I(sbi)->curseg_lock); 3718 3719 if (!recover_curseg) { 3720 /* for recovery flow */ 3721 if (se->valid_blocks == 0 && !IS_CURSEG(sbi, segno)) { 3722 if (old_blkaddr == NULL_ADDR) 3723 type = CURSEG_COLD_DATA; 3724 else 3725 type = CURSEG_WARM_DATA; 3726 } 3727 } else { 3728 if (IS_CURSEG(sbi, segno)) { 3729 /* se->type is volatile as SSR allocation */ 3730 type = __f2fs_get_curseg(sbi, segno); 3731 f2fs_bug_on(sbi, type == NO_CHECK_TYPE); 3732 } else { 3733 type = CURSEG_WARM_DATA; 3734 } 3735 } 3736 3737 f2fs_bug_on(sbi, !IS_DATASEG(type)); 3738 curseg = CURSEG_I(sbi, type); 3739 3740 mutex_lock(&curseg->curseg_mutex); 3741 down_write(&sit_i->sentry_lock); 3742 3743 old_cursegno = curseg->segno; 3744 old_blkoff = curseg->next_blkoff; 3745 old_alloc_type = curseg->alloc_type; 3746 3747 /* change the current segment */ 3748 if (segno != curseg->segno) { 3749 curseg->next_segno = segno; 3750 change_curseg(sbi, type); 3751 } 3752 3753 curseg->next_blkoff = GET_BLKOFF_FROM_SEG0(sbi, new_blkaddr); 3754 curseg->sum_blk->entries[curseg->next_blkoff] = *sum; 3755 3756 if (!recover_curseg || recover_newaddr) { 3757 if (!from_gc) 3758 update_segment_mtime(sbi, new_blkaddr, 0); 3759 update_sit_entry(sbi, new_blkaddr, 1); 3760 } 3761 if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO) { 3762 f2fs_invalidate_internal_cache(sbi, old_blkaddr); 3763 if (!from_gc) 3764 update_segment_mtime(sbi, old_blkaddr, 0); 3765 update_sit_entry(sbi, old_blkaddr, -1); 3766 } 3767 3768 locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr)); 3769 locate_dirty_segment(sbi, GET_SEGNO(sbi, new_blkaddr)); 3770 3771 locate_dirty_segment(sbi, old_cursegno); 3772 3773 if (recover_curseg) { 3774 if (old_cursegno != curseg->segno) { 3775 curseg->next_segno = old_cursegno; 3776 change_curseg(sbi, type); 3777 } 3778 curseg->next_blkoff = old_blkoff; 3779 curseg->alloc_type = old_alloc_type; 3780 } 3781 3782 up_write(&sit_i->sentry_lock); 3783 mutex_unlock(&curseg->curseg_mutex); 3784 f2fs_up_write(&SM_I(sbi)->curseg_lock); 3785 } 3786 3787 void f2fs_replace_block(struct f2fs_sb_info *sbi, struct dnode_of_data *dn, 3788 block_t old_addr, block_t new_addr, 3789 unsigned char version, bool recover_curseg, 3790 bool recover_newaddr) 3791 { 3792 struct f2fs_summary sum; 3793 3794 set_summary(&sum, dn->nid, dn->ofs_in_node, version); 3795 3796 f2fs_do_replace_block(sbi, &sum, old_addr, new_addr, 3797 recover_curseg, recover_newaddr, false); 3798 3799 f2fs_update_data_blkaddr(dn, new_addr); 3800 } 3801 3802 void f2fs_wait_on_page_writeback(struct page *page, 3803 enum page_type type, bool ordered, bool locked) 3804 { 3805 if (PageWriteback(page)) { 3806 struct f2fs_sb_info *sbi = F2FS_P_SB(page); 3807 3808 /* submit cached LFS IO */ 3809 f2fs_submit_merged_write_cond(sbi, NULL, page, 0, type); 3810 /* submit cached IPU IO */ 3811 f2fs_submit_merged_ipu_write(sbi, NULL, page); 3812 if (ordered) { 3813 wait_on_page_writeback(page); 3814 f2fs_bug_on(sbi, locked && PageWriteback(page)); 3815 } else { 3816 wait_for_stable_page(page); 3817 } 3818 } 3819 } 3820 3821 void f2fs_wait_on_block_writeback(struct inode *inode, block_t blkaddr) 3822 { 3823 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 3824 struct page *cpage; 3825 3826 if (!f2fs_post_read_required(inode)) 3827 return; 3828 3829 if (!__is_valid_data_blkaddr(blkaddr)) 3830 return; 3831 3832 cpage = find_lock_page(META_MAPPING(sbi), blkaddr); 3833 if (cpage) { 3834 f2fs_wait_on_page_writeback(cpage, DATA, true, true); 3835 f2fs_put_page(cpage, 1); 3836 } 3837 } 3838 3839 void f2fs_wait_on_block_writeback_range(struct inode *inode, block_t blkaddr, 3840 block_t len) 3841 { 3842 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 3843 block_t i; 3844 3845 if (!f2fs_post_read_required(inode)) 3846 return; 3847 3848 for (i = 0; i < len; i++) 3849 f2fs_wait_on_block_writeback(inode, blkaddr + i); 3850 3851 f2fs_truncate_meta_inode_pages(sbi, blkaddr, len); 3852 } 3853 3854 static int read_compacted_summaries(struct f2fs_sb_info *sbi) 3855 { 3856 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); 3857 struct curseg_info *seg_i; 3858 unsigned char *kaddr; 3859 struct page *page; 3860 block_t start; 3861 int i, j, offset; 3862 3863 start = start_sum_block(sbi); 3864 3865 page = f2fs_get_meta_page(sbi, start++); 3866 if (IS_ERR(page)) 3867 return PTR_ERR(page); 3868 kaddr = (unsigned char *)page_address(page); 3869 3870 /* Step 1: restore nat cache */ 3871 seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA); 3872 memcpy(seg_i->journal, kaddr, SUM_JOURNAL_SIZE); 3873 3874 /* Step 2: restore sit cache */ 3875 seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA); 3876 memcpy(seg_i->journal, kaddr + SUM_JOURNAL_SIZE, SUM_JOURNAL_SIZE); 3877 offset = 2 * SUM_JOURNAL_SIZE; 3878 3879 /* Step 3: restore summary entries */ 3880 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) { 3881 unsigned short blk_off; 3882 unsigned int segno; 3883 3884 seg_i = CURSEG_I(sbi, i); 3885 segno = le32_to_cpu(ckpt->cur_data_segno[i]); 3886 blk_off = le16_to_cpu(ckpt->cur_data_blkoff[i]); 3887 seg_i->next_segno = segno; 3888 reset_curseg(sbi, i, 0); 3889 seg_i->alloc_type = ckpt->alloc_type[i]; 3890 seg_i->next_blkoff = blk_off; 3891 3892 if (seg_i->alloc_type == SSR) 3893 blk_off = BLKS_PER_SEG(sbi); 3894 3895 for (j = 0; j < blk_off; j++) { 3896 struct f2fs_summary *s; 3897 3898 s = (struct f2fs_summary *)(kaddr + offset); 3899 seg_i->sum_blk->entries[j] = *s; 3900 offset += SUMMARY_SIZE; 3901 if (offset + SUMMARY_SIZE <= PAGE_SIZE - 3902 SUM_FOOTER_SIZE) 3903 continue; 3904 3905 f2fs_put_page(page, 1); 3906 page = NULL; 3907 3908 page = f2fs_get_meta_page(sbi, start++); 3909 if (IS_ERR(page)) 3910 return PTR_ERR(page); 3911 kaddr = (unsigned char *)page_address(page); 3912 offset = 0; 3913 } 3914 } 3915 f2fs_put_page(page, 1); 3916 return 0; 3917 } 3918 3919 static int read_normal_summaries(struct f2fs_sb_info *sbi, int type) 3920 { 3921 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); 3922 struct f2fs_summary_block *sum; 3923 struct curseg_info *curseg; 3924 struct page *new; 3925 unsigned short blk_off; 3926 unsigned int segno = 0; 3927 block_t blk_addr = 0; 3928 int err = 0; 3929 3930 /* get segment number and block addr */ 3931 if (IS_DATASEG(type)) { 3932 segno = le32_to_cpu(ckpt->cur_data_segno[type]); 3933 blk_off = le16_to_cpu(ckpt->cur_data_blkoff[type - 3934 CURSEG_HOT_DATA]); 3935 if (__exist_node_summaries(sbi)) 3936 blk_addr = sum_blk_addr(sbi, NR_CURSEG_PERSIST_TYPE, type); 3937 else 3938 blk_addr = sum_blk_addr(sbi, NR_CURSEG_DATA_TYPE, type); 3939 } else { 3940 segno = le32_to_cpu(ckpt->cur_node_segno[type - 3941 CURSEG_HOT_NODE]); 3942 blk_off = le16_to_cpu(ckpt->cur_node_blkoff[type - 3943 CURSEG_HOT_NODE]); 3944 if (__exist_node_summaries(sbi)) 3945 blk_addr = sum_blk_addr(sbi, NR_CURSEG_NODE_TYPE, 3946 type - CURSEG_HOT_NODE); 3947 else 3948 blk_addr = GET_SUM_BLOCK(sbi, segno); 3949 } 3950 3951 new = f2fs_get_meta_page(sbi, blk_addr); 3952 if (IS_ERR(new)) 3953 return PTR_ERR(new); 3954 sum = (struct f2fs_summary_block *)page_address(new); 3955 3956 if (IS_NODESEG(type)) { 3957 if (__exist_node_summaries(sbi)) { 3958 struct f2fs_summary *ns = &sum->entries[0]; 3959 int i; 3960 3961 for (i = 0; i < BLKS_PER_SEG(sbi); i++, ns++) { 3962 ns->version = 0; 3963 ns->ofs_in_node = 0; 3964 } 3965 } else { 3966 err = f2fs_restore_node_summary(sbi, segno, sum); 3967 if (err) 3968 goto out; 3969 } 3970 } 3971 3972 /* set uncompleted segment to curseg */ 3973 curseg = CURSEG_I(sbi, type); 3974 mutex_lock(&curseg->curseg_mutex); 3975 3976 /* update journal info */ 3977 down_write(&curseg->journal_rwsem); 3978 memcpy(curseg->journal, &sum->journal, SUM_JOURNAL_SIZE); 3979 up_write(&curseg->journal_rwsem); 3980 3981 memcpy(curseg->sum_blk->entries, sum->entries, SUM_ENTRY_SIZE); 3982 memcpy(&curseg->sum_blk->footer, &sum->footer, SUM_FOOTER_SIZE); 3983 curseg->next_segno = segno; 3984 reset_curseg(sbi, type, 0); 3985 curseg->alloc_type = ckpt->alloc_type[type]; 3986 curseg->next_blkoff = blk_off; 3987 mutex_unlock(&curseg->curseg_mutex); 3988 out: 3989 f2fs_put_page(new, 1); 3990 return err; 3991 } 3992 3993 static int restore_curseg_summaries(struct f2fs_sb_info *sbi) 3994 { 3995 struct f2fs_journal *sit_j = CURSEG_I(sbi, CURSEG_COLD_DATA)->journal; 3996 struct f2fs_journal *nat_j = CURSEG_I(sbi, CURSEG_HOT_DATA)->journal; 3997 int type = CURSEG_HOT_DATA; 3998 int err; 3999 4000 if (is_set_ckpt_flags(sbi, CP_COMPACT_SUM_FLAG)) { 4001 int npages = f2fs_npages_for_summary_flush(sbi, true); 4002 4003 if (npages >= 2) 4004 f2fs_ra_meta_pages(sbi, start_sum_block(sbi), npages, 4005 META_CP, true); 4006 4007 /* restore for compacted data summary */ 4008 err = read_compacted_summaries(sbi); 4009 if (err) 4010 return err; 4011 type = CURSEG_HOT_NODE; 4012 } 4013 4014 if (__exist_node_summaries(sbi)) 4015 f2fs_ra_meta_pages(sbi, 4016 sum_blk_addr(sbi, NR_CURSEG_PERSIST_TYPE, type), 4017 NR_CURSEG_PERSIST_TYPE - type, META_CP, true); 4018 4019 for (; type <= CURSEG_COLD_NODE; type++) { 4020 err = read_normal_summaries(sbi, type); 4021 if (err) 4022 return err; 4023 } 4024 4025 /* sanity check for summary blocks */ 4026 if (nats_in_cursum(nat_j) > NAT_JOURNAL_ENTRIES || 4027 sits_in_cursum(sit_j) > SIT_JOURNAL_ENTRIES) { 4028 f2fs_err(sbi, "invalid journal entries nats %u sits %u", 4029 nats_in_cursum(nat_j), sits_in_cursum(sit_j)); 4030 return -EINVAL; 4031 } 4032 4033 return 0; 4034 } 4035 4036 static void write_compacted_summaries(struct f2fs_sb_info *sbi, block_t blkaddr) 4037 { 4038 struct page *page; 4039 unsigned char *kaddr; 4040 struct f2fs_summary *summary; 4041 struct curseg_info *seg_i; 4042 int written_size = 0; 4043 int i, j; 4044 4045 page = f2fs_grab_meta_page(sbi, blkaddr++); 4046 kaddr = (unsigned char *)page_address(page); 4047 memset(kaddr, 0, PAGE_SIZE); 4048 4049 /* Step 1: write nat cache */ 4050 seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA); 4051 memcpy(kaddr, seg_i->journal, SUM_JOURNAL_SIZE); 4052 written_size += SUM_JOURNAL_SIZE; 4053 4054 /* Step 2: write sit cache */ 4055 seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA); 4056 memcpy(kaddr + written_size, seg_i->journal, SUM_JOURNAL_SIZE); 4057 written_size += SUM_JOURNAL_SIZE; 4058 4059 /* Step 3: write summary entries */ 4060 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) { 4061 seg_i = CURSEG_I(sbi, i); 4062 for (j = 0; j < f2fs_curseg_valid_blocks(sbi, i); j++) { 4063 if (!page) { 4064 page = f2fs_grab_meta_page(sbi, blkaddr++); 4065 kaddr = (unsigned char *)page_address(page); 4066 memset(kaddr, 0, PAGE_SIZE); 4067 written_size = 0; 4068 } 4069 summary = (struct f2fs_summary *)(kaddr + written_size); 4070 *summary = seg_i->sum_blk->entries[j]; 4071 written_size += SUMMARY_SIZE; 4072 4073 if (written_size + SUMMARY_SIZE <= PAGE_SIZE - 4074 SUM_FOOTER_SIZE) 4075 continue; 4076 4077 set_page_dirty(page); 4078 f2fs_put_page(page, 1); 4079 page = NULL; 4080 } 4081 } 4082 if (page) { 4083 set_page_dirty(page); 4084 f2fs_put_page(page, 1); 4085 } 4086 } 4087 4088 static void write_normal_summaries(struct f2fs_sb_info *sbi, 4089 block_t blkaddr, int type) 4090 { 4091 int i, end; 4092 4093 if (IS_DATASEG(type)) 4094 end = type + NR_CURSEG_DATA_TYPE; 4095 else 4096 end = type + NR_CURSEG_NODE_TYPE; 4097 4098 for (i = type; i < end; i++) 4099 write_current_sum_page(sbi, i, blkaddr + (i - type)); 4100 } 4101 4102 void f2fs_write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk) 4103 { 4104 if (is_set_ckpt_flags(sbi, CP_COMPACT_SUM_FLAG)) 4105 write_compacted_summaries(sbi, start_blk); 4106 else 4107 write_normal_summaries(sbi, start_blk, CURSEG_HOT_DATA); 4108 } 4109 4110 void f2fs_write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk) 4111 { 4112 write_normal_summaries(sbi, start_blk, CURSEG_HOT_NODE); 4113 } 4114 4115 int f2fs_lookup_journal_in_cursum(struct f2fs_journal *journal, int type, 4116 unsigned int val, int alloc) 4117 { 4118 int i; 4119 4120 if (type == NAT_JOURNAL) { 4121 for (i = 0; i < nats_in_cursum(journal); i++) { 4122 if (le32_to_cpu(nid_in_journal(journal, i)) == val) 4123 return i; 4124 } 4125 if (alloc && __has_cursum_space(journal, 1, NAT_JOURNAL)) 4126 return update_nats_in_cursum(journal, 1); 4127 } else if (type == SIT_JOURNAL) { 4128 for (i = 0; i < sits_in_cursum(journal); i++) 4129 if (le32_to_cpu(segno_in_journal(journal, i)) == val) 4130 return i; 4131 if (alloc && __has_cursum_space(journal, 1, SIT_JOURNAL)) 4132 return update_sits_in_cursum(journal, 1); 4133 } 4134 return -1; 4135 } 4136 4137 static struct page *get_current_sit_page(struct f2fs_sb_info *sbi, 4138 unsigned int segno) 4139 { 4140 return f2fs_get_meta_page(sbi, current_sit_addr(sbi, segno)); 4141 } 4142 4143 static struct page *get_next_sit_page(struct f2fs_sb_info *sbi, 4144 unsigned int start) 4145 { 4146 struct sit_info *sit_i = SIT_I(sbi); 4147 struct page *page; 4148 pgoff_t src_off, dst_off; 4149 4150 src_off = current_sit_addr(sbi, start); 4151 dst_off = next_sit_addr(sbi, src_off); 4152 4153 page = f2fs_grab_meta_page(sbi, dst_off); 4154 seg_info_to_sit_page(sbi, page, start); 4155 4156 set_page_dirty(page); 4157 set_to_next_sit(sit_i, start); 4158 4159 return page; 4160 } 4161 4162 static struct sit_entry_set *grab_sit_entry_set(void) 4163 { 4164 struct sit_entry_set *ses = 4165 f2fs_kmem_cache_alloc(sit_entry_set_slab, 4166 GFP_NOFS, true, NULL); 4167 4168 ses->entry_cnt = 0; 4169 INIT_LIST_HEAD(&ses->set_list); 4170 return ses; 4171 } 4172 4173 static void release_sit_entry_set(struct sit_entry_set *ses) 4174 { 4175 list_del(&ses->set_list); 4176 kmem_cache_free(sit_entry_set_slab, ses); 4177 } 4178 4179 static void adjust_sit_entry_set(struct sit_entry_set *ses, 4180 struct list_head *head) 4181 { 4182 struct sit_entry_set *next = ses; 4183 4184 if (list_is_last(&ses->set_list, head)) 4185 return; 4186 4187 list_for_each_entry_continue(next, head, set_list) 4188 if (ses->entry_cnt <= next->entry_cnt) { 4189 list_move_tail(&ses->set_list, &next->set_list); 4190 return; 4191 } 4192 4193 list_move_tail(&ses->set_list, head); 4194 } 4195 4196 static void add_sit_entry(unsigned int segno, struct list_head *head) 4197 { 4198 struct sit_entry_set *ses; 4199 unsigned int start_segno = START_SEGNO(segno); 4200 4201 list_for_each_entry(ses, head, set_list) { 4202 if (ses->start_segno == start_segno) { 4203 ses->entry_cnt++; 4204 adjust_sit_entry_set(ses, head); 4205 return; 4206 } 4207 } 4208 4209 ses = grab_sit_entry_set(); 4210 4211 ses->start_segno = start_segno; 4212 ses->entry_cnt++; 4213 list_add(&ses->set_list, head); 4214 } 4215 4216 static void add_sits_in_set(struct f2fs_sb_info *sbi) 4217 { 4218 struct f2fs_sm_info *sm_info = SM_I(sbi); 4219 struct list_head *set_list = &sm_info->sit_entry_set; 4220 unsigned long *bitmap = SIT_I(sbi)->dirty_sentries_bitmap; 4221 unsigned int segno; 4222 4223 for_each_set_bit(segno, bitmap, MAIN_SEGS(sbi)) 4224 add_sit_entry(segno, set_list); 4225 } 4226 4227 static void remove_sits_in_journal(struct f2fs_sb_info *sbi) 4228 { 4229 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA); 4230 struct f2fs_journal *journal = curseg->journal; 4231 int i; 4232 4233 down_write(&curseg->journal_rwsem); 4234 for (i = 0; i < sits_in_cursum(journal); i++) { 4235 unsigned int segno; 4236 bool dirtied; 4237 4238 segno = le32_to_cpu(segno_in_journal(journal, i)); 4239 dirtied = __mark_sit_entry_dirty(sbi, segno); 4240 4241 if (!dirtied) 4242 add_sit_entry(segno, &SM_I(sbi)->sit_entry_set); 4243 } 4244 update_sits_in_cursum(journal, -i); 4245 up_write(&curseg->journal_rwsem); 4246 } 4247 4248 /* 4249 * CP calls this function, which flushes SIT entries including sit_journal, 4250 * and moves prefree segs to free segs. 4251 */ 4252 void f2fs_flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc) 4253 { 4254 struct sit_info *sit_i = SIT_I(sbi); 4255 unsigned long *bitmap = sit_i->dirty_sentries_bitmap; 4256 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA); 4257 struct f2fs_journal *journal = curseg->journal; 4258 struct sit_entry_set *ses, *tmp; 4259 struct list_head *head = &SM_I(sbi)->sit_entry_set; 4260 bool to_journal = !is_sbi_flag_set(sbi, SBI_IS_RESIZEFS); 4261 struct seg_entry *se; 4262 4263 down_write(&sit_i->sentry_lock); 4264 4265 if (!sit_i->dirty_sentries) 4266 goto out; 4267 4268 /* 4269 * add and account sit entries of dirty bitmap in sit entry 4270 * set temporarily 4271 */ 4272 add_sits_in_set(sbi); 4273 4274 /* 4275 * if there are no enough space in journal to store dirty sit 4276 * entries, remove all entries from journal and add and account 4277 * them in sit entry set. 4278 */ 4279 if (!__has_cursum_space(journal, sit_i->dirty_sentries, SIT_JOURNAL) || 4280 !to_journal) 4281 remove_sits_in_journal(sbi); 4282 4283 /* 4284 * there are two steps to flush sit entries: 4285 * #1, flush sit entries to journal in current cold data summary block. 4286 * #2, flush sit entries to sit page. 4287 */ 4288 list_for_each_entry_safe(ses, tmp, head, set_list) { 4289 struct page *page = NULL; 4290 struct f2fs_sit_block *raw_sit = NULL; 4291 unsigned int start_segno = ses->start_segno; 4292 unsigned int end = min(start_segno + SIT_ENTRY_PER_BLOCK, 4293 (unsigned long)MAIN_SEGS(sbi)); 4294 unsigned int segno = start_segno; 4295 4296 if (to_journal && 4297 !__has_cursum_space(journal, ses->entry_cnt, SIT_JOURNAL)) 4298 to_journal = false; 4299 4300 if (to_journal) { 4301 down_write(&curseg->journal_rwsem); 4302 } else { 4303 page = get_next_sit_page(sbi, start_segno); 4304 raw_sit = page_address(page); 4305 } 4306 4307 /* flush dirty sit entries in region of current sit set */ 4308 for_each_set_bit_from(segno, bitmap, end) { 4309 int offset, sit_offset; 4310 4311 se = get_seg_entry(sbi, segno); 4312 #ifdef CONFIG_F2FS_CHECK_FS 4313 if (memcmp(se->cur_valid_map, se->cur_valid_map_mir, 4314 SIT_VBLOCK_MAP_SIZE)) 4315 f2fs_bug_on(sbi, 1); 4316 #endif 4317 4318 /* add discard candidates */ 4319 if (!(cpc->reason & CP_DISCARD)) { 4320 cpc->trim_start = segno; 4321 add_discard_addrs(sbi, cpc, false); 4322 } 4323 4324 if (to_journal) { 4325 offset = f2fs_lookup_journal_in_cursum(journal, 4326 SIT_JOURNAL, segno, 1); 4327 f2fs_bug_on(sbi, offset < 0); 4328 segno_in_journal(journal, offset) = 4329 cpu_to_le32(segno); 4330 seg_info_to_raw_sit(se, 4331 &sit_in_journal(journal, offset)); 4332 check_block_count(sbi, segno, 4333 &sit_in_journal(journal, offset)); 4334 } else { 4335 sit_offset = SIT_ENTRY_OFFSET(sit_i, segno); 4336 seg_info_to_raw_sit(se, 4337 &raw_sit->entries[sit_offset]); 4338 check_block_count(sbi, segno, 4339 &raw_sit->entries[sit_offset]); 4340 } 4341 4342 __clear_bit(segno, bitmap); 4343 sit_i->dirty_sentries--; 4344 ses->entry_cnt--; 4345 } 4346 4347 if (to_journal) 4348 up_write(&curseg->journal_rwsem); 4349 else 4350 f2fs_put_page(page, 1); 4351 4352 f2fs_bug_on(sbi, ses->entry_cnt); 4353 release_sit_entry_set(ses); 4354 } 4355 4356 f2fs_bug_on(sbi, !list_empty(head)); 4357 f2fs_bug_on(sbi, sit_i->dirty_sentries); 4358 out: 4359 if (cpc->reason & CP_DISCARD) { 4360 __u64 trim_start = cpc->trim_start; 4361 4362 for (; cpc->trim_start <= cpc->trim_end; cpc->trim_start++) 4363 add_discard_addrs(sbi, cpc, false); 4364 4365 cpc->trim_start = trim_start; 4366 } 4367 up_write(&sit_i->sentry_lock); 4368 4369 set_prefree_as_free_segments(sbi); 4370 } 4371 4372 static int build_sit_info(struct f2fs_sb_info *sbi) 4373 { 4374 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi); 4375 struct sit_info *sit_i; 4376 unsigned int sit_segs, start; 4377 char *src_bitmap, *bitmap; 4378 unsigned int bitmap_size, main_bitmap_size, sit_bitmap_size; 4379 unsigned int discard_map = f2fs_block_unit_discard(sbi) ? 1 : 0; 4380 4381 /* allocate memory for SIT information */ 4382 sit_i = f2fs_kzalloc(sbi, sizeof(struct sit_info), GFP_KERNEL); 4383 if (!sit_i) 4384 return -ENOMEM; 4385 4386 SM_I(sbi)->sit_info = sit_i; 4387 4388 sit_i->sentries = 4389 f2fs_kvzalloc(sbi, array_size(sizeof(struct seg_entry), 4390 MAIN_SEGS(sbi)), 4391 GFP_KERNEL); 4392 if (!sit_i->sentries) 4393 return -ENOMEM; 4394 4395 main_bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi)); 4396 sit_i->dirty_sentries_bitmap = f2fs_kvzalloc(sbi, main_bitmap_size, 4397 GFP_KERNEL); 4398 if (!sit_i->dirty_sentries_bitmap) 4399 return -ENOMEM; 4400 4401 #ifdef CONFIG_F2FS_CHECK_FS 4402 bitmap_size = MAIN_SEGS(sbi) * SIT_VBLOCK_MAP_SIZE * (3 + discard_map); 4403 #else 4404 bitmap_size = MAIN_SEGS(sbi) * SIT_VBLOCK_MAP_SIZE * (2 + discard_map); 4405 #endif 4406 sit_i->bitmap = f2fs_kvzalloc(sbi, bitmap_size, GFP_KERNEL); 4407 if (!sit_i->bitmap) 4408 return -ENOMEM; 4409 4410 bitmap = sit_i->bitmap; 4411 4412 for (start = 0; start < MAIN_SEGS(sbi); start++) { 4413 sit_i->sentries[start].cur_valid_map = bitmap; 4414 bitmap += SIT_VBLOCK_MAP_SIZE; 4415 4416 sit_i->sentries[start].ckpt_valid_map = bitmap; 4417 bitmap += SIT_VBLOCK_MAP_SIZE; 4418 4419 #ifdef CONFIG_F2FS_CHECK_FS 4420 sit_i->sentries[start].cur_valid_map_mir = bitmap; 4421 bitmap += SIT_VBLOCK_MAP_SIZE; 4422 #endif 4423 4424 if (discard_map) { 4425 sit_i->sentries[start].discard_map = bitmap; 4426 bitmap += SIT_VBLOCK_MAP_SIZE; 4427 } 4428 } 4429 4430 sit_i->tmp_map = f2fs_kzalloc(sbi, SIT_VBLOCK_MAP_SIZE, GFP_KERNEL); 4431 if (!sit_i->tmp_map) 4432 return -ENOMEM; 4433 4434 if (__is_large_section(sbi)) { 4435 sit_i->sec_entries = 4436 f2fs_kvzalloc(sbi, array_size(sizeof(struct sec_entry), 4437 MAIN_SECS(sbi)), 4438 GFP_KERNEL); 4439 if (!sit_i->sec_entries) 4440 return -ENOMEM; 4441 } 4442 4443 /* get information related with SIT */ 4444 sit_segs = le32_to_cpu(raw_super->segment_count_sit) >> 1; 4445 4446 /* setup SIT bitmap from ckeckpoint pack */ 4447 sit_bitmap_size = __bitmap_size(sbi, SIT_BITMAP); 4448 src_bitmap = __bitmap_ptr(sbi, SIT_BITMAP); 4449 4450 sit_i->sit_bitmap = kmemdup(src_bitmap, sit_bitmap_size, GFP_KERNEL); 4451 if (!sit_i->sit_bitmap) 4452 return -ENOMEM; 4453 4454 #ifdef CONFIG_F2FS_CHECK_FS 4455 sit_i->sit_bitmap_mir = kmemdup(src_bitmap, 4456 sit_bitmap_size, GFP_KERNEL); 4457 if (!sit_i->sit_bitmap_mir) 4458 return -ENOMEM; 4459 4460 sit_i->invalid_segmap = f2fs_kvzalloc(sbi, 4461 main_bitmap_size, GFP_KERNEL); 4462 if (!sit_i->invalid_segmap) 4463 return -ENOMEM; 4464 #endif 4465 4466 sit_i->sit_base_addr = le32_to_cpu(raw_super->sit_blkaddr); 4467 sit_i->sit_blocks = sit_segs << sbi->log_blocks_per_seg; 4468 sit_i->written_valid_blocks = 0; 4469 sit_i->bitmap_size = sit_bitmap_size; 4470 sit_i->dirty_sentries = 0; 4471 sit_i->sents_per_block = SIT_ENTRY_PER_BLOCK; 4472 sit_i->elapsed_time = le64_to_cpu(sbi->ckpt->elapsed_time); 4473 sit_i->mounted_time = ktime_get_boottime_seconds(); 4474 init_rwsem(&sit_i->sentry_lock); 4475 return 0; 4476 } 4477 4478 static int build_free_segmap(struct f2fs_sb_info *sbi) 4479 { 4480 struct free_segmap_info *free_i; 4481 unsigned int bitmap_size, sec_bitmap_size; 4482 4483 /* allocate memory for free segmap information */ 4484 free_i = f2fs_kzalloc(sbi, sizeof(struct free_segmap_info), GFP_KERNEL); 4485 if (!free_i) 4486 return -ENOMEM; 4487 4488 SM_I(sbi)->free_info = free_i; 4489 4490 bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi)); 4491 free_i->free_segmap = f2fs_kvmalloc(sbi, bitmap_size, GFP_KERNEL); 4492 if (!free_i->free_segmap) 4493 return -ENOMEM; 4494 4495 sec_bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi)); 4496 free_i->free_secmap = f2fs_kvmalloc(sbi, sec_bitmap_size, GFP_KERNEL); 4497 if (!free_i->free_secmap) 4498 return -ENOMEM; 4499 4500 /* set all segments as dirty temporarily */ 4501 memset(free_i->free_segmap, 0xff, bitmap_size); 4502 memset(free_i->free_secmap, 0xff, sec_bitmap_size); 4503 4504 /* init free segmap information */ 4505 free_i->start_segno = GET_SEGNO_FROM_SEG0(sbi, MAIN_BLKADDR(sbi)); 4506 free_i->free_segments = 0; 4507 free_i->free_sections = 0; 4508 spin_lock_init(&free_i->segmap_lock); 4509 return 0; 4510 } 4511 4512 static int build_curseg(struct f2fs_sb_info *sbi) 4513 { 4514 struct curseg_info *array; 4515 int i; 4516 4517 array = f2fs_kzalloc(sbi, array_size(NR_CURSEG_TYPE, 4518 sizeof(*array)), GFP_KERNEL); 4519 if (!array) 4520 return -ENOMEM; 4521 4522 SM_I(sbi)->curseg_array = array; 4523 4524 for (i = 0; i < NO_CHECK_TYPE; i++) { 4525 mutex_init(&array[i].curseg_mutex); 4526 array[i].sum_blk = f2fs_kzalloc(sbi, PAGE_SIZE, GFP_KERNEL); 4527 if (!array[i].sum_blk) 4528 return -ENOMEM; 4529 init_rwsem(&array[i].journal_rwsem); 4530 array[i].journal = f2fs_kzalloc(sbi, 4531 sizeof(struct f2fs_journal), GFP_KERNEL); 4532 if (!array[i].journal) 4533 return -ENOMEM; 4534 if (i < NR_PERSISTENT_LOG) 4535 array[i].seg_type = CURSEG_HOT_DATA + i; 4536 else if (i == CURSEG_COLD_DATA_PINNED) 4537 array[i].seg_type = CURSEG_COLD_DATA; 4538 else if (i == CURSEG_ALL_DATA_ATGC) 4539 array[i].seg_type = CURSEG_COLD_DATA; 4540 array[i].segno = NULL_SEGNO; 4541 array[i].next_blkoff = 0; 4542 array[i].inited = false; 4543 } 4544 return restore_curseg_summaries(sbi); 4545 } 4546 4547 static int build_sit_entries(struct f2fs_sb_info *sbi) 4548 { 4549 struct sit_info *sit_i = SIT_I(sbi); 4550 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA); 4551 struct f2fs_journal *journal = curseg->journal; 4552 struct seg_entry *se; 4553 struct f2fs_sit_entry sit; 4554 int sit_blk_cnt = SIT_BLK_CNT(sbi); 4555 unsigned int i, start, end; 4556 unsigned int readed, start_blk = 0; 4557 int err = 0; 4558 block_t sit_valid_blocks[2] = {0, 0}; 4559 4560 do { 4561 readed = f2fs_ra_meta_pages(sbi, start_blk, BIO_MAX_VECS, 4562 META_SIT, true); 4563 4564 start = start_blk * sit_i->sents_per_block; 4565 end = (start_blk + readed) * sit_i->sents_per_block; 4566 4567 for (; start < end && start < MAIN_SEGS(sbi); start++) { 4568 struct f2fs_sit_block *sit_blk; 4569 struct page *page; 4570 4571 se = &sit_i->sentries[start]; 4572 page = get_current_sit_page(sbi, start); 4573 if (IS_ERR(page)) 4574 return PTR_ERR(page); 4575 sit_blk = (struct f2fs_sit_block *)page_address(page); 4576 sit = sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, start)]; 4577 f2fs_put_page(page, 1); 4578 4579 err = check_block_count(sbi, start, &sit); 4580 if (err) 4581 return err; 4582 seg_info_from_raw_sit(se, &sit); 4583 4584 if (se->type >= NR_PERSISTENT_LOG) { 4585 f2fs_err(sbi, "Invalid segment type: %u, segno: %u", 4586 se->type, start); 4587 f2fs_handle_error(sbi, 4588 ERROR_INCONSISTENT_SUM_TYPE); 4589 return -EFSCORRUPTED; 4590 } 4591 4592 sit_valid_blocks[SE_PAGETYPE(se)] += se->valid_blocks; 4593 4594 if (!f2fs_block_unit_discard(sbi)) 4595 goto init_discard_map_done; 4596 4597 /* build discard map only one time */ 4598 if (is_set_ckpt_flags(sbi, CP_TRIMMED_FLAG)) { 4599 memset(se->discard_map, 0xff, 4600 SIT_VBLOCK_MAP_SIZE); 4601 goto init_discard_map_done; 4602 } 4603 memcpy(se->discard_map, se->cur_valid_map, 4604 SIT_VBLOCK_MAP_SIZE); 4605 sbi->discard_blks += BLKS_PER_SEG(sbi) - 4606 se->valid_blocks; 4607 init_discard_map_done: 4608 if (__is_large_section(sbi)) 4609 get_sec_entry(sbi, start)->valid_blocks += 4610 se->valid_blocks; 4611 } 4612 start_blk += readed; 4613 } while (start_blk < sit_blk_cnt); 4614 4615 down_read(&curseg->journal_rwsem); 4616 for (i = 0; i < sits_in_cursum(journal); i++) { 4617 unsigned int old_valid_blocks; 4618 4619 start = le32_to_cpu(segno_in_journal(journal, i)); 4620 if (start >= MAIN_SEGS(sbi)) { 4621 f2fs_err(sbi, "Wrong journal entry on segno %u", 4622 start); 4623 err = -EFSCORRUPTED; 4624 f2fs_handle_error(sbi, ERROR_CORRUPTED_JOURNAL); 4625 break; 4626 } 4627 4628 se = &sit_i->sentries[start]; 4629 sit = sit_in_journal(journal, i); 4630 4631 old_valid_blocks = se->valid_blocks; 4632 4633 sit_valid_blocks[SE_PAGETYPE(se)] -= old_valid_blocks; 4634 4635 err = check_block_count(sbi, start, &sit); 4636 if (err) 4637 break; 4638 seg_info_from_raw_sit(se, &sit); 4639 4640 if (se->type >= NR_PERSISTENT_LOG) { 4641 f2fs_err(sbi, "Invalid segment type: %u, segno: %u", 4642 se->type, start); 4643 err = -EFSCORRUPTED; 4644 f2fs_handle_error(sbi, ERROR_INCONSISTENT_SUM_TYPE); 4645 break; 4646 } 4647 4648 sit_valid_blocks[SE_PAGETYPE(se)] += se->valid_blocks; 4649 4650 if (f2fs_block_unit_discard(sbi)) { 4651 if (is_set_ckpt_flags(sbi, CP_TRIMMED_FLAG)) { 4652 memset(se->discard_map, 0xff, SIT_VBLOCK_MAP_SIZE); 4653 } else { 4654 memcpy(se->discard_map, se->cur_valid_map, 4655 SIT_VBLOCK_MAP_SIZE); 4656 sbi->discard_blks += old_valid_blocks; 4657 sbi->discard_blks -= se->valid_blocks; 4658 } 4659 } 4660 4661 if (__is_large_section(sbi)) { 4662 get_sec_entry(sbi, start)->valid_blocks += 4663 se->valid_blocks; 4664 get_sec_entry(sbi, start)->valid_blocks -= 4665 old_valid_blocks; 4666 } 4667 } 4668 up_read(&curseg->journal_rwsem); 4669 4670 if (err) 4671 return err; 4672 4673 if (sit_valid_blocks[NODE] != valid_node_count(sbi)) { 4674 f2fs_err(sbi, "SIT is corrupted node# %u vs %u", 4675 sit_valid_blocks[NODE], valid_node_count(sbi)); 4676 f2fs_handle_error(sbi, ERROR_INCONSISTENT_NODE_COUNT); 4677 return -EFSCORRUPTED; 4678 } 4679 4680 if (sit_valid_blocks[DATA] + sit_valid_blocks[NODE] > 4681 valid_user_blocks(sbi)) { 4682 f2fs_err(sbi, "SIT is corrupted data# %u %u vs %u", 4683 sit_valid_blocks[DATA], sit_valid_blocks[NODE], 4684 valid_user_blocks(sbi)); 4685 f2fs_handle_error(sbi, ERROR_INCONSISTENT_BLOCK_COUNT); 4686 return -EFSCORRUPTED; 4687 } 4688 4689 return 0; 4690 } 4691 4692 static void init_free_segmap(struct f2fs_sb_info *sbi) 4693 { 4694 unsigned int start; 4695 int type; 4696 struct seg_entry *sentry; 4697 4698 for (start = 0; start < MAIN_SEGS(sbi); start++) { 4699 if (f2fs_usable_blks_in_seg(sbi, start) == 0) 4700 continue; 4701 sentry = get_seg_entry(sbi, start); 4702 if (!sentry->valid_blocks) 4703 __set_free(sbi, start); 4704 else 4705 SIT_I(sbi)->written_valid_blocks += 4706 sentry->valid_blocks; 4707 } 4708 4709 /* set use the current segments */ 4710 for (type = CURSEG_HOT_DATA; type <= CURSEG_COLD_NODE; type++) { 4711 struct curseg_info *curseg_t = CURSEG_I(sbi, type); 4712 4713 __set_test_and_inuse(sbi, curseg_t->segno); 4714 } 4715 } 4716 4717 static void init_dirty_segmap(struct f2fs_sb_info *sbi) 4718 { 4719 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 4720 struct free_segmap_info *free_i = FREE_I(sbi); 4721 unsigned int segno = 0, offset = 0, secno; 4722 block_t valid_blocks, usable_blks_in_seg; 4723 4724 while (1) { 4725 /* find dirty segment based on free segmap */ 4726 segno = find_next_inuse(free_i, MAIN_SEGS(sbi), offset); 4727 if (segno >= MAIN_SEGS(sbi)) 4728 break; 4729 offset = segno + 1; 4730 valid_blocks = get_valid_blocks(sbi, segno, false); 4731 usable_blks_in_seg = f2fs_usable_blks_in_seg(sbi, segno); 4732 if (valid_blocks == usable_blks_in_seg || !valid_blocks) 4733 continue; 4734 if (valid_blocks > usable_blks_in_seg) { 4735 f2fs_bug_on(sbi, 1); 4736 continue; 4737 } 4738 mutex_lock(&dirty_i->seglist_lock); 4739 __locate_dirty_segment(sbi, segno, DIRTY); 4740 mutex_unlock(&dirty_i->seglist_lock); 4741 } 4742 4743 if (!__is_large_section(sbi)) 4744 return; 4745 4746 mutex_lock(&dirty_i->seglist_lock); 4747 for (segno = 0; segno < MAIN_SEGS(sbi); segno += SEGS_PER_SEC(sbi)) { 4748 valid_blocks = get_valid_blocks(sbi, segno, true); 4749 secno = GET_SEC_FROM_SEG(sbi, segno); 4750 4751 if (!valid_blocks || valid_blocks == CAP_BLKS_PER_SEC(sbi)) 4752 continue; 4753 if (IS_CURSEC(sbi, secno)) 4754 continue; 4755 set_bit(secno, dirty_i->dirty_secmap); 4756 } 4757 mutex_unlock(&dirty_i->seglist_lock); 4758 } 4759 4760 static int init_victim_secmap(struct f2fs_sb_info *sbi) 4761 { 4762 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 4763 unsigned int bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi)); 4764 4765 dirty_i->victim_secmap = f2fs_kvzalloc(sbi, bitmap_size, GFP_KERNEL); 4766 if (!dirty_i->victim_secmap) 4767 return -ENOMEM; 4768 4769 dirty_i->pinned_secmap = f2fs_kvzalloc(sbi, bitmap_size, GFP_KERNEL); 4770 if (!dirty_i->pinned_secmap) 4771 return -ENOMEM; 4772 4773 dirty_i->pinned_secmap_cnt = 0; 4774 dirty_i->enable_pin_section = true; 4775 return 0; 4776 } 4777 4778 static int build_dirty_segmap(struct f2fs_sb_info *sbi) 4779 { 4780 struct dirty_seglist_info *dirty_i; 4781 unsigned int bitmap_size, i; 4782 4783 /* allocate memory for dirty segments list information */ 4784 dirty_i = f2fs_kzalloc(sbi, sizeof(struct dirty_seglist_info), 4785 GFP_KERNEL); 4786 if (!dirty_i) 4787 return -ENOMEM; 4788 4789 SM_I(sbi)->dirty_info = dirty_i; 4790 mutex_init(&dirty_i->seglist_lock); 4791 4792 bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi)); 4793 4794 for (i = 0; i < NR_DIRTY_TYPE; i++) { 4795 dirty_i->dirty_segmap[i] = f2fs_kvzalloc(sbi, bitmap_size, 4796 GFP_KERNEL); 4797 if (!dirty_i->dirty_segmap[i]) 4798 return -ENOMEM; 4799 } 4800 4801 if (__is_large_section(sbi)) { 4802 bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi)); 4803 dirty_i->dirty_secmap = f2fs_kvzalloc(sbi, 4804 bitmap_size, GFP_KERNEL); 4805 if (!dirty_i->dirty_secmap) 4806 return -ENOMEM; 4807 } 4808 4809 init_dirty_segmap(sbi); 4810 return init_victim_secmap(sbi); 4811 } 4812 4813 static int sanity_check_curseg(struct f2fs_sb_info *sbi) 4814 { 4815 int i; 4816 4817 /* 4818 * In LFS/SSR curseg, .next_blkoff should point to an unused blkaddr; 4819 * In LFS curseg, all blkaddr after .next_blkoff should be unused. 4820 */ 4821 for (i = 0; i < NR_PERSISTENT_LOG; i++) { 4822 struct curseg_info *curseg = CURSEG_I(sbi, i); 4823 struct seg_entry *se = get_seg_entry(sbi, curseg->segno); 4824 unsigned int blkofs = curseg->next_blkoff; 4825 4826 if (f2fs_sb_has_readonly(sbi) && 4827 i != CURSEG_HOT_DATA && i != CURSEG_HOT_NODE) 4828 continue; 4829 4830 sanity_check_seg_type(sbi, curseg->seg_type); 4831 4832 if (curseg->alloc_type != LFS && curseg->alloc_type != SSR) { 4833 f2fs_err(sbi, 4834 "Current segment has invalid alloc_type:%d", 4835 curseg->alloc_type); 4836 f2fs_handle_error(sbi, ERROR_INVALID_CURSEG); 4837 return -EFSCORRUPTED; 4838 } 4839 4840 if (f2fs_test_bit(blkofs, se->cur_valid_map)) 4841 goto out; 4842 4843 if (curseg->alloc_type == SSR) 4844 continue; 4845 4846 for (blkofs += 1; blkofs < BLKS_PER_SEG(sbi); blkofs++) { 4847 if (!f2fs_test_bit(blkofs, se->cur_valid_map)) 4848 continue; 4849 out: 4850 f2fs_err(sbi, 4851 "Current segment's next free block offset is inconsistent with bitmap, logtype:%u, segno:%u, type:%u, next_blkoff:%u, blkofs:%u", 4852 i, curseg->segno, curseg->alloc_type, 4853 curseg->next_blkoff, blkofs); 4854 f2fs_handle_error(sbi, ERROR_INVALID_CURSEG); 4855 return -EFSCORRUPTED; 4856 } 4857 } 4858 return 0; 4859 } 4860 4861 #ifdef CONFIG_BLK_DEV_ZONED 4862 4863 static int check_zone_write_pointer(struct f2fs_sb_info *sbi, 4864 struct f2fs_dev_info *fdev, 4865 struct blk_zone *zone) 4866 { 4867 unsigned int wp_segno, wp_blkoff, zone_secno, zone_segno, segno; 4868 block_t zone_block, wp_block, last_valid_block; 4869 unsigned int log_sectors_per_block = sbi->log_blocksize - SECTOR_SHIFT; 4870 int i, s, b, ret; 4871 struct seg_entry *se; 4872 4873 if (zone->type != BLK_ZONE_TYPE_SEQWRITE_REQ) 4874 return 0; 4875 4876 wp_block = fdev->start_blk + (zone->wp >> log_sectors_per_block); 4877 wp_segno = GET_SEGNO(sbi, wp_block); 4878 wp_blkoff = wp_block - START_BLOCK(sbi, wp_segno); 4879 zone_block = fdev->start_blk + (zone->start >> log_sectors_per_block); 4880 zone_segno = GET_SEGNO(sbi, zone_block); 4881 zone_secno = GET_SEC_FROM_SEG(sbi, zone_segno); 4882 4883 if (zone_segno >= MAIN_SEGS(sbi)) 4884 return 0; 4885 4886 /* 4887 * Skip check of zones cursegs point to, since 4888 * fix_curseg_write_pointer() checks them. 4889 */ 4890 for (i = 0; i < NO_CHECK_TYPE; i++) 4891 if (zone_secno == GET_SEC_FROM_SEG(sbi, 4892 CURSEG_I(sbi, i)->segno)) 4893 return 0; 4894 4895 /* 4896 * Get last valid block of the zone. 4897 */ 4898 last_valid_block = zone_block - 1; 4899 for (s = sbi->segs_per_sec - 1; s >= 0; s--) { 4900 segno = zone_segno + s; 4901 se = get_seg_entry(sbi, segno); 4902 for (b = sbi->blocks_per_seg - 1; b >= 0; b--) 4903 if (f2fs_test_bit(b, se->cur_valid_map)) { 4904 last_valid_block = START_BLOCK(sbi, segno) + b; 4905 break; 4906 } 4907 if (last_valid_block >= zone_block) 4908 break; 4909 } 4910 4911 /* 4912 * The write pointer matches with the valid blocks or 4913 * already points to the end of the zone. 4914 */ 4915 if ((last_valid_block + 1 == wp_block) || 4916 (zone->wp == zone->start + zone->len)) 4917 return 0; 4918 4919 if (last_valid_block + 1 == zone_block) { 4920 /* 4921 * If there is no valid block in the zone and if write pointer 4922 * is not at zone start, reset the write pointer. 4923 */ 4924 f2fs_notice(sbi, 4925 "Zone without valid block has non-zero write " 4926 "pointer. Reset the write pointer: wp[0x%x,0x%x]", 4927 wp_segno, wp_blkoff); 4928 ret = __f2fs_issue_discard_zone(sbi, fdev->bdev, zone_block, 4929 zone->len >> log_sectors_per_block); 4930 if (ret) 4931 f2fs_err(sbi, "Discard zone failed: %s (errno=%d)", 4932 fdev->path, ret); 4933 4934 return ret; 4935 } 4936 4937 /* 4938 * If there are valid blocks and the write pointer doesn't 4939 * match with them, we need to report the inconsistency and 4940 * fill the zone till the end to close the zone. This inconsistency 4941 * does not cause write error because the zone will not be selected 4942 * for write operation until it get discarded. 4943 */ 4944 f2fs_notice(sbi, "Valid blocks are not aligned with write pointer: " 4945 "valid block[0x%x,0x%x] wp[0x%x,0x%x]", 4946 GET_SEGNO(sbi, last_valid_block), 4947 GET_BLKOFF_FROM_SEG0(sbi, last_valid_block), 4948 wp_segno, wp_blkoff); 4949 4950 ret = blkdev_zone_mgmt(fdev->bdev, REQ_OP_ZONE_FINISH, 4951 zone->start, zone->len, GFP_NOFS); 4952 if (ret == -EOPNOTSUPP) { 4953 ret = blkdev_issue_zeroout(fdev->bdev, zone->wp, 4954 zone->len - (zone->wp - zone->start), 4955 GFP_NOFS, 0); 4956 if (ret) 4957 f2fs_err(sbi, "Fill up zone failed: %s (errno=%d)", 4958 fdev->path, ret); 4959 } else if (ret) { 4960 f2fs_err(sbi, "Finishing zone failed: %s (errno=%d)", 4961 fdev->path, ret); 4962 } 4963 4964 return ret; 4965 } 4966 4967 static struct f2fs_dev_info *get_target_zoned_dev(struct f2fs_sb_info *sbi, 4968 block_t zone_blkaddr) 4969 { 4970 int i; 4971 4972 for (i = 0; i < sbi->s_ndevs; i++) { 4973 if (!bdev_is_zoned(FDEV(i).bdev)) 4974 continue; 4975 if (sbi->s_ndevs == 1 || (FDEV(i).start_blk <= zone_blkaddr && 4976 zone_blkaddr <= FDEV(i).end_blk)) 4977 return &FDEV(i); 4978 } 4979 4980 return NULL; 4981 } 4982 4983 static int report_one_zone_cb(struct blk_zone *zone, unsigned int idx, 4984 void *data) 4985 { 4986 memcpy(data, zone, sizeof(struct blk_zone)); 4987 return 0; 4988 } 4989 4990 static int fix_curseg_write_pointer(struct f2fs_sb_info *sbi, int type) 4991 { 4992 struct curseg_info *cs = CURSEG_I(sbi, type); 4993 struct f2fs_dev_info *zbd; 4994 struct blk_zone zone; 4995 unsigned int cs_section, wp_segno, wp_blkoff, wp_sector_off; 4996 block_t cs_zone_block, wp_block; 4997 unsigned int log_sectors_per_block = sbi->log_blocksize - SECTOR_SHIFT; 4998 sector_t zone_sector; 4999 int err; 5000 5001 cs_section = GET_SEC_FROM_SEG(sbi, cs->segno); 5002 cs_zone_block = START_BLOCK(sbi, GET_SEG_FROM_SEC(sbi, cs_section)); 5003 5004 zbd = get_target_zoned_dev(sbi, cs_zone_block); 5005 if (!zbd) 5006 return 0; 5007 5008 /* report zone for the sector the curseg points to */ 5009 zone_sector = (sector_t)(cs_zone_block - zbd->start_blk) 5010 << log_sectors_per_block; 5011 err = blkdev_report_zones(zbd->bdev, zone_sector, 1, 5012 report_one_zone_cb, &zone); 5013 if (err != 1) { 5014 f2fs_err(sbi, "Report zone failed: %s errno=(%d)", 5015 zbd->path, err); 5016 return err; 5017 } 5018 5019 if (zone.type != BLK_ZONE_TYPE_SEQWRITE_REQ) 5020 return 0; 5021 5022 wp_block = zbd->start_blk + (zone.wp >> log_sectors_per_block); 5023 wp_segno = GET_SEGNO(sbi, wp_block); 5024 wp_blkoff = wp_block - START_BLOCK(sbi, wp_segno); 5025 wp_sector_off = zone.wp & GENMASK(log_sectors_per_block - 1, 0); 5026 5027 if (cs->segno == wp_segno && cs->next_blkoff == wp_blkoff && 5028 wp_sector_off == 0) 5029 return 0; 5030 5031 f2fs_notice(sbi, "Unaligned curseg[%d] with write pointer: " 5032 "curseg[0x%x,0x%x] wp[0x%x,0x%x]", 5033 type, cs->segno, cs->next_blkoff, wp_segno, wp_blkoff); 5034 5035 f2fs_notice(sbi, "Assign new section to curseg[%d]: " 5036 "curseg[0x%x,0x%x]", type, cs->segno, cs->next_blkoff); 5037 5038 f2fs_allocate_new_section(sbi, type, true); 5039 5040 /* check consistency of the zone curseg pointed to */ 5041 if (check_zone_write_pointer(sbi, zbd, &zone)) 5042 return -EIO; 5043 5044 /* check newly assigned zone */ 5045 cs_section = GET_SEC_FROM_SEG(sbi, cs->segno); 5046 cs_zone_block = START_BLOCK(sbi, GET_SEG_FROM_SEC(sbi, cs_section)); 5047 5048 zbd = get_target_zoned_dev(sbi, cs_zone_block); 5049 if (!zbd) 5050 return 0; 5051 5052 zone_sector = (sector_t)(cs_zone_block - zbd->start_blk) 5053 << log_sectors_per_block; 5054 err = blkdev_report_zones(zbd->bdev, zone_sector, 1, 5055 report_one_zone_cb, &zone); 5056 if (err != 1) { 5057 f2fs_err(sbi, "Report zone failed: %s errno=(%d)", 5058 zbd->path, err); 5059 return err; 5060 } 5061 5062 if (zone.type != BLK_ZONE_TYPE_SEQWRITE_REQ) 5063 return 0; 5064 5065 if (zone.wp != zone.start) { 5066 f2fs_notice(sbi, 5067 "New zone for curseg[%d] is not yet discarded. " 5068 "Reset the zone: curseg[0x%x,0x%x]", 5069 type, cs->segno, cs->next_blkoff); 5070 err = __f2fs_issue_discard_zone(sbi, zbd->bdev, cs_zone_block, 5071 zone.len >> log_sectors_per_block); 5072 if (err) { 5073 f2fs_err(sbi, "Discard zone failed: %s (errno=%d)", 5074 zbd->path, err); 5075 return err; 5076 } 5077 } 5078 5079 return 0; 5080 } 5081 5082 int f2fs_fix_curseg_write_pointer(struct f2fs_sb_info *sbi) 5083 { 5084 int i, ret; 5085 5086 for (i = 0; i < NR_PERSISTENT_LOG; i++) { 5087 ret = fix_curseg_write_pointer(sbi, i); 5088 if (ret) 5089 return ret; 5090 } 5091 5092 return 0; 5093 } 5094 5095 struct check_zone_write_pointer_args { 5096 struct f2fs_sb_info *sbi; 5097 struct f2fs_dev_info *fdev; 5098 }; 5099 5100 static int check_zone_write_pointer_cb(struct blk_zone *zone, unsigned int idx, 5101 void *data) 5102 { 5103 struct check_zone_write_pointer_args *args; 5104 5105 args = (struct check_zone_write_pointer_args *)data; 5106 5107 return check_zone_write_pointer(args->sbi, args->fdev, zone); 5108 } 5109 5110 int f2fs_check_write_pointer(struct f2fs_sb_info *sbi) 5111 { 5112 int i, ret; 5113 struct check_zone_write_pointer_args args; 5114 5115 for (i = 0; i < sbi->s_ndevs; i++) { 5116 if (!bdev_is_zoned(FDEV(i).bdev)) 5117 continue; 5118 5119 args.sbi = sbi; 5120 args.fdev = &FDEV(i); 5121 ret = blkdev_report_zones(FDEV(i).bdev, 0, BLK_ALL_ZONES, 5122 check_zone_write_pointer_cb, &args); 5123 if (ret < 0) 5124 return ret; 5125 } 5126 5127 return 0; 5128 } 5129 5130 /* 5131 * Return the number of usable blocks in a segment. The number of blocks 5132 * returned is always equal to the number of blocks in a segment for 5133 * segments fully contained within a sequential zone capacity or a 5134 * conventional zone. For segments partially contained in a sequential 5135 * zone capacity, the number of usable blocks up to the zone capacity 5136 * is returned. 0 is returned in all other cases. 5137 */ 5138 static inline unsigned int f2fs_usable_zone_blks_in_seg( 5139 struct f2fs_sb_info *sbi, unsigned int segno) 5140 { 5141 block_t seg_start, sec_start_blkaddr, sec_cap_blkaddr; 5142 unsigned int secno; 5143 5144 if (!sbi->unusable_blocks_per_sec) 5145 return BLKS_PER_SEG(sbi); 5146 5147 secno = GET_SEC_FROM_SEG(sbi, segno); 5148 seg_start = START_BLOCK(sbi, segno); 5149 sec_start_blkaddr = START_BLOCK(sbi, GET_SEG_FROM_SEC(sbi, secno)); 5150 sec_cap_blkaddr = sec_start_blkaddr + CAP_BLKS_PER_SEC(sbi); 5151 5152 /* 5153 * If segment starts before zone capacity and spans beyond 5154 * zone capacity, then usable blocks are from seg start to 5155 * zone capacity. If the segment starts after the zone capacity, 5156 * then there are no usable blocks. 5157 */ 5158 if (seg_start >= sec_cap_blkaddr) 5159 return 0; 5160 if (seg_start + BLKS_PER_SEG(sbi) > sec_cap_blkaddr) 5161 return sec_cap_blkaddr - seg_start; 5162 5163 return BLKS_PER_SEG(sbi); 5164 } 5165 #else 5166 int f2fs_fix_curseg_write_pointer(struct f2fs_sb_info *sbi) 5167 { 5168 return 0; 5169 } 5170 5171 int f2fs_check_write_pointer(struct f2fs_sb_info *sbi) 5172 { 5173 return 0; 5174 } 5175 5176 static inline unsigned int f2fs_usable_zone_blks_in_seg(struct f2fs_sb_info *sbi, 5177 unsigned int segno) 5178 { 5179 return 0; 5180 } 5181 5182 #endif 5183 unsigned int f2fs_usable_blks_in_seg(struct f2fs_sb_info *sbi, 5184 unsigned int segno) 5185 { 5186 if (f2fs_sb_has_blkzoned(sbi)) 5187 return f2fs_usable_zone_blks_in_seg(sbi, segno); 5188 5189 return BLKS_PER_SEG(sbi); 5190 } 5191 5192 unsigned int f2fs_usable_segs_in_sec(struct f2fs_sb_info *sbi, 5193 unsigned int segno) 5194 { 5195 if (f2fs_sb_has_blkzoned(sbi)) 5196 return CAP_SEGS_PER_SEC(sbi); 5197 5198 return SEGS_PER_SEC(sbi); 5199 } 5200 5201 /* 5202 * Update min, max modified time for cost-benefit GC algorithm 5203 */ 5204 static void init_min_max_mtime(struct f2fs_sb_info *sbi) 5205 { 5206 struct sit_info *sit_i = SIT_I(sbi); 5207 unsigned int segno; 5208 5209 down_write(&sit_i->sentry_lock); 5210 5211 sit_i->min_mtime = ULLONG_MAX; 5212 5213 for (segno = 0; segno < MAIN_SEGS(sbi); segno += SEGS_PER_SEC(sbi)) { 5214 unsigned int i; 5215 unsigned long long mtime = 0; 5216 5217 for (i = 0; i < SEGS_PER_SEC(sbi); i++) 5218 mtime += get_seg_entry(sbi, segno + i)->mtime; 5219 5220 mtime = div_u64(mtime, SEGS_PER_SEC(sbi)); 5221 5222 if (sit_i->min_mtime > mtime) 5223 sit_i->min_mtime = mtime; 5224 } 5225 sit_i->max_mtime = get_mtime(sbi, false); 5226 sit_i->dirty_max_mtime = 0; 5227 up_write(&sit_i->sentry_lock); 5228 } 5229 5230 int f2fs_build_segment_manager(struct f2fs_sb_info *sbi) 5231 { 5232 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi); 5233 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); 5234 struct f2fs_sm_info *sm_info; 5235 int err; 5236 5237 sm_info = f2fs_kzalloc(sbi, sizeof(struct f2fs_sm_info), GFP_KERNEL); 5238 if (!sm_info) 5239 return -ENOMEM; 5240 5241 /* init sm info */ 5242 sbi->sm_info = sm_info; 5243 sm_info->seg0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr); 5244 sm_info->main_blkaddr = le32_to_cpu(raw_super->main_blkaddr); 5245 sm_info->segment_count = le32_to_cpu(raw_super->segment_count); 5246 sm_info->reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count); 5247 sm_info->ovp_segments = le32_to_cpu(ckpt->overprov_segment_count); 5248 sm_info->main_segments = le32_to_cpu(raw_super->segment_count_main); 5249 sm_info->ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr); 5250 sm_info->rec_prefree_segments = sm_info->main_segments * 5251 DEF_RECLAIM_PREFREE_SEGMENTS / 100; 5252 if (sm_info->rec_prefree_segments > DEF_MAX_RECLAIM_PREFREE_SEGMENTS) 5253 sm_info->rec_prefree_segments = DEF_MAX_RECLAIM_PREFREE_SEGMENTS; 5254 5255 if (!f2fs_lfs_mode(sbi)) 5256 sm_info->ipu_policy = BIT(F2FS_IPU_FSYNC); 5257 sm_info->min_ipu_util = DEF_MIN_IPU_UTIL; 5258 sm_info->min_fsync_blocks = DEF_MIN_FSYNC_BLOCKS; 5259 sm_info->min_seq_blocks = BLKS_PER_SEG(sbi); 5260 sm_info->min_hot_blocks = DEF_MIN_HOT_BLOCKS; 5261 sm_info->min_ssr_sections = reserved_sections(sbi); 5262 5263 INIT_LIST_HEAD(&sm_info->sit_entry_set); 5264 5265 init_f2fs_rwsem(&sm_info->curseg_lock); 5266 5267 err = f2fs_create_flush_cmd_control(sbi); 5268 if (err) 5269 return err; 5270 5271 err = create_discard_cmd_control(sbi); 5272 if (err) 5273 return err; 5274 5275 err = build_sit_info(sbi); 5276 if (err) 5277 return err; 5278 err = build_free_segmap(sbi); 5279 if (err) 5280 return err; 5281 err = build_curseg(sbi); 5282 if (err) 5283 return err; 5284 5285 /* reinit free segmap based on SIT */ 5286 err = build_sit_entries(sbi); 5287 if (err) 5288 return err; 5289 5290 init_free_segmap(sbi); 5291 err = build_dirty_segmap(sbi); 5292 if (err) 5293 return err; 5294 5295 err = sanity_check_curseg(sbi); 5296 if (err) 5297 return err; 5298 5299 init_min_max_mtime(sbi); 5300 return 0; 5301 } 5302 5303 static void discard_dirty_segmap(struct f2fs_sb_info *sbi, 5304 enum dirty_type dirty_type) 5305 { 5306 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 5307 5308 mutex_lock(&dirty_i->seglist_lock); 5309 kvfree(dirty_i->dirty_segmap[dirty_type]); 5310 dirty_i->nr_dirty[dirty_type] = 0; 5311 mutex_unlock(&dirty_i->seglist_lock); 5312 } 5313 5314 static void destroy_victim_secmap(struct f2fs_sb_info *sbi) 5315 { 5316 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 5317 5318 kvfree(dirty_i->pinned_secmap); 5319 kvfree(dirty_i->victim_secmap); 5320 } 5321 5322 static void destroy_dirty_segmap(struct f2fs_sb_info *sbi) 5323 { 5324 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 5325 int i; 5326 5327 if (!dirty_i) 5328 return; 5329 5330 /* discard pre-free/dirty segments list */ 5331 for (i = 0; i < NR_DIRTY_TYPE; i++) 5332 discard_dirty_segmap(sbi, i); 5333 5334 if (__is_large_section(sbi)) { 5335 mutex_lock(&dirty_i->seglist_lock); 5336 kvfree(dirty_i->dirty_secmap); 5337 mutex_unlock(&dirty_i->seglist_lock); 5338 } 5339 5340 destroy_victim_secmap(sbi); 5341 SM_I(sbi)->dirty_info = NULL; 5342 kfree(dirty_i); 5343 } 5344 5345 static void destroy_curseg(struct f2fs_sb_info *sbi) 5346 { 5347 struct curseg_info *array = SM_I(sbi)->curseg_array; 5348 int i; 5349 5350 if (!array) 5351 return; 5352 SM_I(sbi)->curseg_array = NULL; 5353 for (i = 0; i < NR_CURSEG_TYPE; i++) { 5354 kfree(array[i].sum_blk); 5355 kfree(array[i].journal); 5356 } 5357 kfree(array); 5358 } 5359 5360 static void destroy_free_segmap(struct f2fs_sb_info *sbi) 5361 { 5362 struct free_segmap_info *free_i = SM_I(sbi)->free_info; 5363 5364 if (!free_i) 5365 return; 5366 SM_I(sbi)->free_info = NULL; 5367 kvfree(free_i->free_segmap); 5368 kvfree(free_i->free_secmap); 5369 kfree(free_i); 5370 } 5371 5372 static void destroy_sit_info(struct f2fs_sb_info *sbi) 5373 { 5374 struct sit_info *sit_i = SIT_I(sbi); 5375 5376 if (!sit_i) 5377 return; 5378 5379 if (sit_i->sentries) 5380 kvfree(sit_i->bitmap); 5381 kfree(sit_i->tmp_map); 5382 5383 kvfree(sit_i->sentries); 5384 kvfree(sit_i->sec_entries); 5385 kvfree(sit_i->dirty_sentries_bitmap); 5386 5387 SM_I(sbi)->sit_info = NULL; 5388 kvfree(sit_i->sit_bitmap); 5389 #ifdef CONFIG_F2FS_CHECK_FS 5390 kvfree(sit_i->sit_bitmap_mir); 5391 kvfree(sit_i->invalid_segmap); 5392 #endif 5393 kfree(sit_i); 5394 } 5395 5396 void f2fs_destroy_segment_manager(struct f2fs_sb_info *sbi) 5397 { 5398 struct f2fs_sm_info *sm_info = SM_I(sbi); 5399 5400 if (!sm_info) 5401 return; 5402 f2fs_destroy_flush_cmd_control(sbi, true); 5403 destroy_discard_cmd_control(sbi); 5404 destroy_dirty_segmap(sbi); 5405 destroy_curseg(sbi); 5406 destroy_free_segmap(sbi); 5407 destroy_sit_info(sbi); 5408 sbi->sm_info = NULL; 5409 kfree(sm_info); 5410 } 5411 5412 int __init f2fs_create_segment_manager_caches(void) 5413 { 5414 discard_entry_slab = f2fs_kmem_cache_create("f2fs_discard_entry", 5415 sizeof(struct discard_entry)); 5416 if (!discard_entry_slab) 5417 goto fail; 5418 5419 discard_cmd_slab = f2fs_kmem_cache_create("f2fs_discard_cmd", 5420 sizeof(struct discard_cmd)); 5421 if (!discard_cmd_slab) 5422 goto destroy_discard_entry; 5423 5424 sit_entry_set_slab = f2fs_kmem_cache_create("f2fs_sit_entry_set", 5425 sizeof(struct sit_entry_set)); 5426 if (!sit_entry_set_slab) 5427 goto destroy_discard_cmd; 5428 5429 revoke_entry_slab = f2fs_kmem_cache_create("f2fs_revoke_entry", 5430 sizeof(struct revoke_entry)); 5431 if (!revoke_entry_slab) 5432 goto destroy_sit_entry_set; 5433 return 0; 5434 5435 destroy_sit_entry_set: 5436 kmem_cache_destroy(sit_entry_set_slab); 5437 destroy_discard_cmd: 5438 kmem_cache_destroy(discard_cmd_slab); 5439 destroy_discard_entry: 5440 kmem_cache_destroy(discard_entry_slab); 5441 fail: 5442 return -ENOMEM; 5443 } 5444 5445 void f2fs_destroy_segment_manager_caches(void) 5446 { 5447 kmem_cache_destroy(sit_entry_set_slab); 5448 kmem_cache_destroy(discard_cmd_slab); 5449 kmem_cache_destroy(discard_entry_slab); 5450 kmem_cache_destroy(revoke_entry_slab); 5451 } 5452