segment.h (8df22a4d6f5b81c9c1703579d4907b57002689ed) | segment.h (88b88a66797159949cec32eaab12b4968f6fae2d) |
---|---|
1/* 2 * fs/f2fs/segment.h 3 * 4 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 5 * http://www.samsung.com/ 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as --- 31 unchanged lines hidden (view full) --- 40 sbi->segs_per_sec) || \ 41 (secno == CURSEG_I(sbi, CURSEG_HOT_NODE)->segno / \ 42 sbi->segs_per_sec) || \ 43 (secno == CURSEG_I(sbi, CURSEG_WARM_NODE)->segno / \ 44 sbi->segs_per_sec) || \ 45 (secno == CURSEG_I(sbi, CURSEG_COLD_NODE)->segno / \ 46 sbi->segs_per_sec)) \ 47 | 1/* 2 * fs/f2fs/segment.h 3 * 4 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 5 * http://www.samsung.com/ 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as --- 31 unchanged lines hidden (view full) --- 40 sbi->segs_per_sec) || \ 41 (secno == CURSEG_I(sbi, CURSEG_HOT_NODE)->segno / \ 42 sbi->segs_per_sec) || \ 43 (secno == CURSEG_I(sbi, CURSEG_WARM_NODE)->segno / \ 44 sbi->segs_per_sec) || \ 45 (secno == CURSEG_I(sbi, CURSEG_COLD_NODE)->segno / \ 46 sbi->segs_per_sec)) \ 47 |
48#define START_BLOCK(sbi, segno) \ 49 (SM_I(sbi)->seg0_blkaddr + \ | 48#define MAIN_BLKADDR(sbi) (SM_I(sbi)->main_blkaddr) 49#define SEG0_BLKADDR(sbi) (SM_I(sbi)->seg0_blkaddr) 50 51#define MAIN_SEGS(sbi) (SM_I(sbi)->main_segments) 52#define MAIN_SECS(sbi) (sbi->total_sections) 53 54#define TOTAL_SEGS(sbi) (SM_I(sbi)->segment_count) 55#define TOTAL_BLKS(sbi) (TOTAL_SEGS(sbi) << sbi->log_blocks_per_seg) 56 57#define MAX_BLKADDR(sbi) (SEG0_BLKADDR(sbi) + TOTAL_BLKS(sbi)) 58#define SEGMENT_SIZE(sbi) (1ULL << (sbi->log_blocksize + \ 59 sbi->log_blocks_per_seg)) 60 61#define START_BLOCK(sbi, segno) (SEG0_BLKADDR(sbi) + \ |
50 (GET_R2L_SEGNO(FREE_I(sbi), segno) << sbi->log_blocks_per_seg)) | 62 (GET_R2L_SEGNO(FREE_I(sbi), segno) << sbi->log_blocks_per_seg)) |
63 |
|
51#define NEXT_FREE_BLKADDR(sbi, curseg) \ 52 (START_BLOCK(sbi, curseg->segno) + curseg->next_blkoff) 53 | 64#define NEXT_FREE_BLKADDR(sbi, curseg) \ 65 (START_BLOCK(sbi, curseg->segno) + curseg->next_blkoff) 66 |
54#define MAIN_BASE_BLOCK(sbi) (SM_I(sbi)->main_blkaddr) 55 56#define GET_SEGOFF_FROM_SEG0(sbi, blk_addr) \ 57 ((blk_addr) - SM_I(sbi)->seg0_blkaddr) | 67#define GET_SEGOFF_FROM_SEG0(sbi, blk_addr) ((blk_addr) - SEG0_BLKADDR(sbi)) |
58#define GET_SEGNO_FROM_SEG0(sbi, blk_addr) \ 59 (GET_SEGOFF_FROM_SEG0(sbi, blk_addr) >> sbi->log_blocks_per_seg) 60#define GET_BLKOFF_FROM_SEG0(sbi, blk_addr) \ 61 (GET_SEGOFF_FROM_SEG0(sbi, blk_addr) & (sbi->blocks_per_seg - 1)) 62 63#define GET_SEGNO(sbi, blk_addr) \ 64 (((blk_addr == NULL_ADDR) || (blk_addr == NEW_ADDR)) ? \ 65 NULL_SEGNO : GET_L2R_SEGNO(FREE_I(sbi), \ --- 6 unchanged lines hidden (view full) --- 72#define GET_SUM_BLOCK(sbi, segno) \ 73 ((sbi->sm_info->ssa_blkaddr) + segno) 74 75#define GET_SUM_TYPE(footer) ((footer)->entry_type) 76#define SET_SUM_TYPE(footer, type) ((footer)->entry_type = type) 77 78#define SIT_ENTRY_OFFSET(sit_i, segno) \ 79 (segno % sit_i->sents_per_block) | 68#define GET_SEGNO_FROM_SEG0(sbi, blk_addr) \ 69 (GET_SEGOFF_FROM_SEG0(sbi, blk_addr) >> sbi->log_blocks_per_seg) 70#define GET_BLKOFF_FROM_SEG0(sbi, blk_addr) \ 71 (GET_SEGOFF_FROM_SEG0(sbi, blk_addr) & (sbi->blocks_per_seg - 1)) 72 73#define GET_SEGNO(sbi, blk_addr) \ 74 (((blk_addr == NULL_ADDR) || (blk_addr == NEW_ADDR)) ? \ 75 NULL_SEGNO : GET_L2R_SEGNO(FREE_I(sbi), \ --- 6 unchanged lines hidden (view full) --- 82#define GET_SUM_BLOCK(sbi, segno) \ 83 ((sbi->sm_info->ssa_blkaddr) + segno) 84 85#define GET_SUM_TYPE(footer) ((footer)->entry_type) 86#define SET_SUM_TYPE(footer, type) ((footer)->entry_type = type) 87 88#define SIT_ENTRY_OFFSET(sit_i, segno) \ 89 (segno % sit_i->sents_per_block) |
80#define SIT_BLOCK_OFFSET(sit_i, segno) \ | 90#define SIT_BLOCK_OFFSET(segno) \ |
81 (segno / SIT_ENTRY_PER_BLOCK) | 91 (segno / SIT_ENTRY_PER_BLOCK) |
82#define START_SEGNO(sit_i, segno) \ 83 (SIT_BLOCK_OFFSET(sit_i, segno) * SIT_ENTRY_PER_BLOCK) | 92#define START_SEGNO(segno) \ 93 (SIT_BLOCK_OFFSET(segno) * SIT_ENTRY_PER_BLOCK) |
84#define SIT_BLK_CNT(sbi) \ | 94#define SIT_BLK_CNT(sbi) \ |
85 ((TOTAL_SEGS(sbi) + SIT_ENTRY_PER_BLOCK - 1) / SIT_ENTRY_PER_BLOCK) | 95 ((MAIN_SEGS(sbi) + SIT_ENTRY_PER_BLOCK - 1) / SIT_ENTRY_PER_BLOCK) |
86#define f2fs_bitmap_size(nr) \ 87 (BITS_TO_LONGS(nr) * sizeof(unsigned long)) | 96#define f2fs_bitmap_size(nr) \ 97 (BITS_TO_LONGS(nr) * sizeof(unsigned long)) |
88#define TOTAL_SEGS(sbi) (SM_I(sbi)->main_segments) 89#define TOTAL_SECS(sbi) (sbi->total_sections) | |
90 | 98 |
91#define SECTOR_FROM_BLOCK(sbi, blk_addr) \ 92 (((sector_t)blk_addr) << (sbi)->log_sectors_per_block) 93#define SECTOR_TO_BLOCK(sbi, sectors) \ 94 (sectors >> (sbi)->log_sectors_per_block) 95#define MAX_BIO_BLOCKS(max_hw_blocks) \ 96 (min((int)max_hw_blocks, BIO_MAX_PAGES)) | 99#define SECTOR_FROM_BLOCK(blk_addr) \ 100 (((sector_t)blk_addr) << F2FS_LOG_SECTORS_PER_BLOCK) 101#define SECTOR_TO_BLOCK(sectors) \ 102 (sectors >> F2FS_LOG_SECTORS_PER_BLOCK) 103#define MAX_BIO_BLOCKS(sbi) \ 104 ((int)min((int)max_hw_blocks(sbi), BIO_MAX_PAGES)) |
97 98/* 99 * indicate a block allocation direction: RIGHT and LEFT. 100 * RIGHT means allocating new sections towards the end of volume. 101 * LEFT means the opposite direction. 102 */ 103enum { 104 ALLOC_RIGHT = 0, --- 57 unchanged lines hidden (view full) --- 162struct sec_entry { 163 unsigned int valid_blocks; /* # of valid blocks in a section */ 164}; 165 166struct segment_allocation { 167 void (*allocate_segment)(struct f2fs_sb_info *, int, bool); 168}; 169 | 105 106/* 107 * indicate a block allocation direction: RIGHT and LEFT. 108 * RIGHT means allocating new sections towards the end of volume. 109 * LEFT means the opposite direction. 110 */ 111enum { 112 ALLOC_RIGHT = 0, --- 57 unchanged lines hidden (view full) --- 170struct sec_entry { 171 unsigned int valid_blocks; /* # of valid blocks in a section */ 172}; 173 174struct segment_allocation { 175 void (*allocate_segment)(struct f2fs_sb_info *, int, bool); 176}; 177 |
178struct inmem_pages { 179 struct list_head list; 180 struct page *page; 181}; 182 |
|
170struct sit_info { 171 const struct segment_allocation *s_ops; 172 173 block_t sit_base_addr; /* start block address of SIT area */ 174 block_t sit_blocks; /* # of blocks used by SIT area */ 175 block_t written_valid_blocks; /* # of valid blocks in main area */ 176 char *sit_bitmap; /* SIT bitmap pointer */ 177 unsigned int bitmap_size; /* SIT bitmap size */ --- 54 unchanged lines hidden (view full) --- 232 struct f2fs_summary_block *sum_blk; /* cached summary block */ 233 unsigned char alloc_type; /* current allocation type */ 234 unsigned int segno; /* current segment number */ 235 unsigned short next_blkoff; /* next block offset to write */ 236 unsigned int zone; /* current zone number */ 237 unsigned int next_segno; /* preallocated segment */ 238}; 239 | 183struct sit_info { 184 const struct segment_allocation *s_ops; 185 186 block_t sit_base_addr; /* start block address of SIT area */ 187 block_t sit_blocks; /* # of blocks used by SIT area */ 188 block_t written_valid_blocks; /* # of valid blocks in main area */ 189 char *sit_bitmap; /* SIT bitmap pointer */ 190 unsigned int bitmap_size; /* SIT bitmap size */ --- 54 unchanged lines hidden (view full) --- 245 struct f2fs_summary_block *sum_blk; /* cached summary block */ 246 unsigned char alloc_type; /* current allocation type */ 247 unsigned int segno; /* current segment number */ 248 unsigned short next_blkoff; /* next block offset to write */ 249 unsigned int zone; /* current zone number */ 250 unsigned int next_segno; /* preallocated segment */ 251}; 252 |
253struct sit_entry_set { 254 struct list_head set_list; /* link with all sit sets */ 255 unsigned int start_segno; /* start segno of sits in set */ 256 unsigned int entry_cnt; /* the # of sit entries in set */ 257}; 258 |
|
240/* 241 * inline functions 242 */ 243static inline struct curseg_info *CURSEG_I(struct f2fs_sb_info *sbi, int type) 244{ 245 return (struct curseg_info *)(SM_I(sbi)->curseg_array + type); 246} 247 --- 63 unchanged lines hidden (view full) --- 311 unsigned int secno = segno / sbi->segs_per_sec; 312 unsigned int start_segno = secno * sbi->segs_per_sec; 313 unsigned int next; 314 315 write_lock(&free_i->segmap_lock); 316 clear_bit(segno, free_i->free_segmap); 317 free_i->free_segments++; 318 | 259/* 260 * inline functions 261 */ 262static inline struct curseg_info *CURSEG_I(struct f2fs_sb_info *sbi, int type) 263{ 264 return (struct curseg_info *)(SM_I(sbi)->curseg_array + type); 265} 266 --- 63 unchanged lines hidden (view full) --- 330 unsigned int secno = segno / sbi->segs_per_sec; 331 unsigned int start_segno = secno * sbi->segs_per_sec; 332 unsigned int next; 333 334 write_lock(&free_i->segmap_lock); 335 clear_bit(segno, free_i->free_segmap); 336 free_i->free_segments++; 337 |
319 next = find_next_bit(free_i->free_segmap, TOTAL_SEGS(sbi), start_segno); | 338 next = find_next_bit(free_i->free_segmap, MAIN_SEGS(sbi), start_segno); |
320 if (next >= start_segno + sbi->segs_per_sec) { 321 clear_bit(secno, free_i->free_secmap); 322 free_i->free_sections++; 323 } 324 write_unlock(&free_i->segmap_lock); 325} 326 327static inline void __set_inuse(struct f2fs_sb_info *sbi, --- 97 unchanged lines hidden (view full) --- 425 426static inline int reserved_sections(struct f2fs_sb_info *sbi) 427{ 428 return ((unsigned int) reserved_segments(sbi)) / sbi->segs_per_sec; 429} 430 431static inline bool need_SSR(struct f2fs_sb_info *sbi) 432{ | 339 if (next >= start_segno + sbi->segs_per_sec) { 340 clear_bit(secno, free_i->free_secmap); 341 free_i->free_sections++; 342 } 343 write_unlock(&free_i->segmap_lock); 344} 345 346static inline void __set_inuse(struct f2fs_sb_info *sbi, --- 97 unchanged lines hidden (view full) --- 444 445static inline int reserved_sections(struct f2fs_sb_info *sbi) 446{ 447 return ((unsigned int) reserved_segments(sbi)) / sbi->segs_per_sec; 448} 449 450static inline bool need_SSR(struct f2fs_sb_info *sbi) 451{ |
433 return (prefree_segments(sbi) / sbi->segs_per_sec) 434 + free_sections(sbi) < overprovision_sections(sbi); | 452 int node_secs = get_blocktype_secs(sbi, F2FS_DIRTY_NODES); 453 int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS); 454 return free_sections(sbi) <= (node_secs + 2 * dent_secs + 455 reserved_sections(sbi) + 1); |
435} 436 437static inline bool has_not_enough_free_secs(struct f2fs_sb_info *sbi, int freed) 438{ 439 int node_secs = get_blocktype_secs(sbi, F2FS_DIRTY_NODES); 440 int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS); 441 442 if (unlikely(sbi->por_doing)) --- 18 unchanged lines hidden (view full) --- 461 * Sometimes f2fs may be better to drop out-of-place update policy. 462 * And, users can control the policy through sysfs entries. 463 * There are five policies with triggering conditions as follows. 464 * F2FS_IPU_FORCE - all the time, 465 * F2FS_IPU_SSR - if SSR mode is activated, 466 * F2FS_IPU_UTIL - if FS utilization is over threashold, 467 * F2FS_IPU_SSR_UTIL - if SSR mode is activated and FS utilization is over 468 * threashold, | 456} 457 458static inline bool has_not_enough_free_secs(struct f2fs_sb_info *sbi, int freed) 459{ 460 int node_secs = get_blocktype_secs(sbi, F2FS_DIRTY_NODES); 461 int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS); 462 463 if (unlikely(sbi->por_doing)) --- 18 unchanged lines hidden (view full) --- 482 * Sometimes f2fs may be better to drop out-of-place update policy. 483 * And, users can control the policy through sysfs entries. 484 * There are five policies with triggering conditions as follows. 485 * F2FS_IPU_FORCE - all the time, 486 * F2FS_IPU_SSR - if SSR mode is activated, 487 * F2FS_IPU_UTIL - if FS utilization is over threashold, 488 * F2FS_IPU_SSR_UTIL - if SSR mode is activated and FS utilization is over 489 * threashold, |
490 * F2FS_IPU_FSYNC - activated in fsync path only for high performance flash 491 * storages. IPU will be triggered only if the # of dirty 492 * pages over min_fsync_blocks. |
|
469 * F2FS_IPUT_DISABLE - disable IPU. (=default option) 470 */ 471#define DEF_MIN_IPU_UTIL 70 | 493 * F2FS_IPUT_DISABLE - disable IPU. (=default option) 494 */ 495#define DEF_MIN_IPU_UTIL 70 |
496#define DEF_MIN_FSYNC_BLOCKS 8 |
|
472 473enum { 474 F2FS_IPU_FORCE, 475 F2FS_IPU_SSR, 476 F2FS_IPU_UTIL, 477 F2FS_IPU_SSR_UTIL, | 497 498enum { 499 F2FS_IPU_FORCE, 500 F2FS_IPU_SSR, 501 F2FS_IPU_UTIL, 502 F2FS_IPU_SSR_UTIL, |
478 F2FS_IPU_DISABLE, | 503 F2FS_IPU_FSYNC, |
479}; 480 481static inline bool need_inplace_update(struct inode *inode) 482{ | 504}; 505 506static inline bool need_inplace_update(struct inode *inode) 507{ |
483 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); | 508 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 509 unsigned int policy = SM_I(sbi)->ipu_policy; |
484 485 /* IPU can be done only for the user data */ | 510 511 /* IPU can be done only for the user data */ |
486 if (S_ISDIR(inode->i_mode)) | 512 if (S_ISDIR(inode->i_mode) || f2fs_is_atomic_file(inode)) |
487 return false; 488 | 513 return false; 514 |
489 /* this is only set during fdatasync */ 490 if (is_inode_flag_set(F2FS_I(inode), FI_NEED_IPU)) | 515 if (policy & (0x1 << F2FS_IPU_FORCE)) |
491 return true; | 516 return true; |
517 if (policy & (0x1 << F2FS_IPU_SSR) && need_SSR(sbi)) 518 return true; 519 if (policy & (0x1 << F2FS_IPU_UTIL) && 520 utilization(sbi) > SM_I(sbi)->min_ipu_util) 521 return true; 522 if (policy & (0x1 << F2FS_IPU_SSR_UTIL) && need_SSR(sbi) && 523 utilization(sbi) > SM_I(sbi)->min_ipu_util) 524 return true; |
|
492 | 525 |
493 switch (SM_I(sbi)->ipu_policy) { 494 case F2FS_IPU_FORCE: | 526 /* this is only set during fdatasync */ 527 if (policy & (0x1 << F2FS_IPU_FSYNC) && 528 is_inode_flag_set(F2FS_I(inode), FI_NEED_IPU)) |
495 return true; | 529 return true; |
496 case F2FS_IPU_SSR: 497 if (need_SSR(sbi)) 498 return true; 499 break; 500 case F2FS_IPU_UTIL: 501 if (utilization(sbi) > SM_I(sbi)->min_ipu_util) 502 return true; 503 break; 504 case F2FS_IPU_SSR_UTIL: 505 if (need_SSR(sbi) && utilization(sbi) > SM_I(sbi)->min_ipu_util) 506 return true; 507 break; 508 case F2FS_IPU_DISABLE: 509 break; 510 } | 530 |
511 return false; 512} 513 514static inline unsigned int curseg_segno(struct f2fs_sb_info *sbi, 515 int type) 516{ 517 struct curseg_info *curseg = CURSEG_I(sbi, type); 518 return curseg->segno; --- 10 unchanged lines hidden (view full) --- 529{ 530 struct curseg_info *curseg = CURSEG_I(sbi, type); 531 return curseg->next_blkoff; 532} 533 534#ifdef CONFIG_F2FS_CHECK_FS 535static inline void check_seg_range(struct f2fs_sb_info *sbi, unsigned int segno) 536{ | 531 return false; 532} 533 534static inline unsigned int curseg_segno(struct f2fs_sb_info *sbi, 535 int type) 536{ 537 struct curseg_info *curseg = CURSEG_I(sbi, type); 538 return curseg->segno; --- 10 unchanged lines hidden (view full) --- 549{ 550 struct curseg_info *curseg = CURSEG_I(sbi, type); 551 return curseg->next_blkoff; 552} 553 554#ifdef CONFIG_F2FS_CHECK_FS 555static inline void check_seg_range(struct f2fs_sb_info *sbi, unsigned int segno) 556{ |
537 unsigned int end_segno = SM_I(sbi)->segment_count - 1; 538 BUG_ON(segno > end_segno); | 557 BUG_ON(segno > TOTAL_SEGS(sbi) - 1); |
539} 540 541static inline void verify_block_addr(struct f2fs_sb_info *sbi, block_t blk_addr) 542{ | 558} 559 560static inline void verify_block_addr(struct f2fs_sb_info *sbi, block_t blk_addr) 561{ |
543 struct f2fs_sm_info *sm_info = SM_I(sbi); 544 block_t total_blks = sm_info->segment_count << sbi->log_blocks_per_seg; 545 block_t start_addr = sm_info->seg0_blkaddr; 546 block_t end_addr = start_addr + total_blks - 1; 547 BUG_ON(blk_addr < start_addr); 548 BUG_ON(blk_addr > end_addr); | 562 BUG_ON(blk_addr < SEG0_BLKADDR(sbi)); 563 BUG_ON(blk_addr >= MAX_BLKADDR(sbi)); |
549} 550 551/* 552 * Summary block is always treated as an invalid block 553 */ 554static inline void check_block_count(struct f2fs_sb_info *sbi, 555 int segno, struct f2fs_sit_entry *raw_sit) 556{ | 564} 565 566/* 567 * Summary block is always treated as an invalid block 568 */ 569static inline void check_block_count(struct f2fs_sb_info *sbi, 570 int segno, struct f2fs_sit_entry *raw_sit) 571{ |
557 struct f2fs_sm_info *sm_info = SM_I(sbi); 558 unsigned int end_segno = sm_info->segment_count - 1; | |
559 bool is_valid = test_bit_le(0, raw_sit->valid_map) ? true : false; 560 int valid_blocks = 0; 561 int cur_pos = 0, next_pos; 562 563 /* check segment usage */ 564 BUG_ON(GET_SIT_VBLOCKS(raw_sit) > sbi->blocks_per_seg); 565 566 /* check boundary of a given segment number */ | 572 bool is_valid = test_bit_le(0, raw_sit->valid_map) ? true : false; 573 int valid_blocks = 0; 574 int cur_pos = 0, next_pos; 575 576 /* check segment usage */ 577 BUG_ON(GET_SIT_VBLOCKS(raw_sit) > sbi->blocks_per_seg); 578 579 /* check boundary of a given segment number */ |
567 BUG_ON(segno > end_segno); | 580 BUG_ON(segno > TOTAL_SEGS(sbi) - 1); |
568 569 /* check bitmap with valid block count */ 570 do { 571 if (is_valid) { 572 next_pos = find_next_zero_bit_le(&raw_sit->valid_map, 573 sbi->blocks_per_seg, 574 cur_pos); 575 valid_blocks += next_pos - cur_pos; 576 } else 577 next_pos = find_next_bit_le(&raw_sit->valid_map, 578 sbi->blocks_per_seg, 579 cur_pos); 580 cur_pos = next_pos; 581 is_valid = !is_valid; 582 } while (cur_pos < sbi->blocks_per_seg); 583 BUG_ON(GET_SIT_VBLOCKS(raw_sit) != valid_blocks); 584} 585#else | 581 582 /* check bitmap with valid block count */ 583 do { 584 if (is_valid) { 585 next_pos = find_next_zero_bit_le(&raw_sit->valid_map, 586 sbi->blocks_per_seg, 587 cur_pos); 588 valid_blocks += next_pos - cur_pos; 589 } else 590 next_pos = find_next_bit_le(&raw_sit->valid_map, 591 sbi->blocks_per_seg, 592 cur_pos); 593 cur_pos = next_pos; 594 is_valid = !is_valid; 595 } while (cur_pos < sbi->blocks_per_seg); 596 BUG_ON(GET_SIT_VBLOCKS(raw_sit) != valid_blocks); 597} 598#else |
586#define check_seg_range(sbi, segno) 587#define verify_block_addr(sbi, blk_addr) 588#define check_block_count(sbi, segno, raw_sit) | 599static inline void check_seg_range(struct f2fs_sb_info *sbi, unsigned int segno) 600{ 601 if (segno > TOTAL_SEGS(sbi) - 1) 602 sbi->need_fsck = true; 603} 604 605static inline void verify_block_addr(struct f2fs_sb_info *sbi, block_t blk_addr) 606{ 607 if (blk_addr < SEG0_BLKADDR(sbi) || blk_addr >= MAX_BLKADDR(sbi)) 608 sbi->need_fsck = true; 609} 610 611/* 612 * Summary block is always treated as an invalid block 613 */ 614static inline void check_block_count(struct f2fs_sb_info *sbi, 615 int segno, struct f2fs_sit_entry *raw_sit) 616{ 617 /* check segment usage */ 618 if (GET_SIT_VBLOCKS(raw_sit) > sbi->blocks_per_seg) 619 sbi->need_fsck = true; 620 621 /* check boundary of a given segment number */ 622 if (segno > TOTAL_SEGS(sbi) - 1) 623 sbi->need_fsck = true; 624} |
589#endif 590 591static inline pgoff_t current_sit_addr(struct f2fs_sb_info *sbi, 592 unsigned int start) 593{ 594 struct sit_info *sit_i = SIT_I(sbi); | 625#endif 626 627static inline pgoff_t current_sit_addr(struct f2fs_sb_info *sbi, 628 unsigned int start) 629{ 630 struct sit_info *sit_i = SIT_I(sbi); |
595 unsigned int offset = SIT_BLOCK_OFFSET(sit_i, start); | 631 unsigned int offset = SIT_BLOCK_OFFSET(start); |
596 block_t blk_addr = sit_i->sit_base_addr + offset; 597 598 check_seg_range(sbi, start); 599 600 /* calculate sit block address */ 601 if (f2fs_test_bit(offset, sit_i->sit_bitmap)) 602 blk_addr += sit_i->sit_blocks; 603 --- 10 unchanged lines hidden (view full) --- 614 else 615 block_addr -= sit_i->sit_blocks; 616 617 return block_addr + sit_i->sit_base_addr; 618} 619 620static inline void set_to_next_sit(struct sit_info *sit_i, unsigned int start) 621{ | 632 block_t blk_addr = sit_i->sit_base_addr + offset; 633 634 check_seg_range(sbi, start); 635 636 /* calculate sit block address */ 637 if (f2fs_test_bit(offset, sit_i->sit_bitmap)) 638 blk_addr += sit_i->sit_blocks; 639 --- 10 unchanged lines hidden (view full) --- 650 else 651 block_addr -= sit_i->sit_blocks; 652 653 return block_addr + sit_i->sit_base_addr; 654} 655 656static inline void set_to_next_sit(struct sit_info *sit_i, unsigned int start) 657{ |
622 unsigned int block_off = SIT_BLOCK_OFFSET(sit_i, start); | 658 unsigned int block_off = SIT_BLOCK_OFFSET(start); |
623 624 if (f2fs_test_bit(block_off, sit_i->sit_bitmap)) 625 f2fs_clear_bit(block_off, sit_i->sit_bitmap); 626 else 627 f2fs_set_bit(block_off, sit_i->sit_bitmap); 628} 629 630static inline unsigned long long get_mtime(struct f2fs_sb_info *sbi) --- 30 unchanged lines hidden (view full) --- 661 return true; 662 return false; 663} 664 665static inline unsigned int max_hw_blocks(struct f2fs_sb_info *sbi) 666{ 667 struct block_device *bdev = sbi->sb->s_bdev; 668 struct request_queue *q = bdev_get_queue(bdev); | 659 660 if (f2fs_test_bit(block_off, sit_i->sit_bitmap)) 661 f2fs_clear_bit(block_off, sit_i->sit_bitmap); 662 else 663 f2fs_set_bit(block_off, sit_i->sit_bitmap); 664} 665 666static inline unsigned long long get_mtime(struct f2fs_sb_info *sbi) --- 30 unchanged lines hidden (view full) --- 697 return true; 698 return false; 699} 700 701static inline unsigned int max_hw_blocks(struct f2fs_sb_info *sbi) 702{ 703 struct block_device *bdev = sbi->sb->s_bdev; 704 struct request_queue *q = bdev_get_queue(bdev); |
669 return SECTOR_TO_BLOCK(sbi, queue_max_sectors(q)); | 705 return SECTOR_TO_BLOCK(queue_max_sectors(q)); |
670} 671 672/* 673 * It is very important to gather dirty pages and write at once, so that we can 674 * submit a big bio without interfering other data writes. 675 * By default, 512 pages for directory data, 676 * 512 pages (2MB) * 3 for three types of nodes, and 677 * max_bio_blocks for meta are set. 678 */ 679static inline int nr_pages_to_skip(struct f2fs_sb_info *sbi, int type) 680{ 681 if (type == DATA) 682 return sbi->blocks_per_seg; 683 else if (type == NODE) 684 return 3 * sbi->blocks_per_seg; 685 else if (type == META) | 706} 707 708/* 709 * It is very important to gather dirty pages and write at once, so that we can 710 * submit a big bio without interfering other data writes. 711 * By default, 512 pages for directory data, 712 * 512 pages (2MB) * 3 for three types of nodes, and 713 * max_bio_blocks for meta are set. 714 */ 715static inline int nr_pages_to_skip(struct f2fs_sb_info *sbi, int type) 716{ 717 if (type == DATA) 718 return sbi->blocks_per_seg; 719 else if (type == NODE) 720 return 3 * sbi->blocks_per_seg; 721 else if (type == META) |
686 return MAX_BIO_BLOCKS(max_hw_blocks(sbi)); | 722 return MAX_BIO_BLOCKS(sbi); |
687 else 688 return 0; 689} 690 691/* 692 * When writing pages, it'd better align nr_to_write for segment size. 693 */ 694static inline long nr_pages_to_write(struct f2fs_sb_info *sbi, int type, --- 6 unchanged lines hidden (view full) --- 701 702 nr_to_write = wbc->nr_to_write; 703 704 if (type == DATA) 705 desired = 4096; 706 else if (type == NODE) 707 desired = 3 * max_hw_blocks(sbi); 708 else | 723 else 724 return 0; 725} 726 727/* 728 * When writing pages, it'd better align nr_to_write for segment size. 729 */ 730static inline long nr_pages_to_write(struct f2fs_sb_info *sbi, int type, --- 6 unchanged lines hidden (view full) --- 737 738 nr_to_write = wbc->nr_to_write; 739 740 if (type == DATA) 741 desired = 4096; 742 else if (type == NODE) 743 desired = 3 * max_hw_blocks(sbi); 744 else |
709 desired = MAX_BIO_BLOCKS(max_hw_blocks(sbi)); | 745 desired = MAX_BIO_BLOCKS(sbi); |
710 711 wbc->nr_to_write = desired; 712 return desired - nr_to_write; 713} | 746 747 wbc->nr_to_write = desired; 748 return desired - nr_to_write; 749} |