1 /* 2 * fs/f2fs/segment.h 3 * 4 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 5 * http://www.samsung.com/ 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11 #include <linux/blkdev.h> 12 #include <linux/backing-dev.h> 13 14 /* constant macro */ 15 #define NULL_SEGNO ((unsigned int)(~0)) 16 #define NULL_SECNO ((unsigned int)(~0)) 17 18 #define DEF_RECLAIM_PREFREE_SEGMENTS 5 /* 5% over total segments */ 19 20 /* L: Logical segment # in volume, R: Relative segment # in main area */ 21 #define GET_L2R_SEGNO(free_i, segno) (segno - free_i->start_segno) 22 #define GET_R2L_SEGNO(free_i, segno) (segno + free_i->start_segno) 23 24 #define IS_DATASEG(t) (t <= CURSEG_COLD_DATA) 25 #define IS_NODESEG(t) (t >= CURSEG_HOT_NODE) 26 27 #define IS_CURSEG(sbi, seg) \ 28 ((seg == CURSEG_I(sbi, CURSEG_HOT_DATA)->segno) || \ 29 (seg == CURSEG_I(sbi, CURSEG_WARM_DATA)->segno) || \ 30 (seg == CURSEG_I(sbi, CURSEG_COLD_DATA)->segno) || \ 31 (seg == CURSEG_I(sbi, CURSEG_HOT_NODE)->segno) || \ 32 (seg == CURSEG_I(sbi, CURSEG_WARM_NODE)->segno) || \ 33 (seg == CURSEG_I(sbi, CURSEG_COLD_NODE)->segno)) 34 35 #define IS_CURSEC(sbi, secno) \ 36 ((secno == CURSEG_I(sbi, CURSEG_HOT_DATA)->segno / \ 37 sbi->segs_per_sec) || \ 38 (secno == CURSEG_I(sbi, CURSEG_WARM_DATA)->segno / \ 39 sbi->segs_per_sec) || \ 40 (secno == CURSEG_I(sbi, CURSEG_COLD_DATA)->segno / \ 41 sbi->segs_per_sec) || \ 42 (secno == CURSEG_I(sbi, CURSEG_HOT_NODE)->segno / \ 43 sbi->segs_per_sec) || \ 44 (secno == CURSEG_I(sbi, CURSEG_WARM_NODE)->segno / \ 45 sbi->segs_per_sec) || \ 46 (secno == CURSEG_I(sbi, CURSEG_COLD_NODE)->segno / \ 47 sbi->segs_per_sec)) \ 48 49 #define MAIN_BLKADDR(sbi) (SM_I(sbi)->main_blkaddr) 50 #define SEG0_BLKADDR(sbi) (SM_I(sbi)->seg0_blkaddr) 51 52 #define MAIN_SEGS(sbi) (SM_I(sbi)->main_segments) 53 #define MAIN_SECS(sbi) (sbi->total_sections) 54 55 #define TOTAL_SEGS(sbi) (SM_I(sbi)->segment_count) 56 #define TOTAL_BLKS(sbi) (TOTAL_SEGS(sbi) << sbi->log_blocks_per_seg) 57 58 #define MAX_BLKADDR(sbi) (SEG0_BLKADDR(sbi) + TOTAL_BLKS(sbi)) 59 #define SEGMENT_SIZE(sbi) (1ULL << (sbi->log_blocksize + \ 60 sbi->log_blocks_per_seg)) 61 62 #define START_BLOCK(sbi, segno) (SEG0_BLKADDR(sbi) + \ 63 (GET_R2L_SEGNO(FREE_I(sbi), segno) << sbi->log_blocks_per_seg)) 64 65 #define NEXT_FREE_BLKADDR(sbi, curseg) \ 66 (START_BLOCK(sbi, curseg->segno) + curseg->next_blkoff) 67 68 #define GET_SEGOFF_FROM_SEG0(sbi, blk_addr) ((blk_addr) - SEG0_BLKADDR(sbi)) 69 #define GET_SEGNO_FROM_SEG0(sbi, blk_addr) \ 70 (GET_SEGOFF_FROM_SEG0(sbi, blk_addr) >> sbi->log_blocks_per_seg) 71 #define GET_BLKOFF_FROM_SEG0(sbi, blk_addr) \ 72 (GET_SEGOFF_FROM_SEG0(sbi, blk_addr) & (sbi->blocks_per_seg - 1)) 73 74 #define GET_SEGNO(sbi, blk_addr) \ 75 (((blk_addr == NULL_ADDR) || (blk_addr == NEW_ADDR)) ? \ 76 NULL_SEGNO : GET_L2R_SEGNO(FREE_I(sbi), \ 77 GET_SEGNO_FROM_SEG0(sbi, blk_addr))) 78 #define GET_SECNO(sbi, segno) \ 79 ((segno) / sbi->segs_per_sec) 80 #define GET_ZONENO_FROM_SEGNO(sbi, segno) \ 81 ((segno / sbi->segs_per_sec) / sbi->secs_per_zone) 82 83 #define GET_SUM_BLOCK(sbi, segno) \ 84 ((sbi->sm_info->ssa_blkaddr) + segno) 85 86 #define GET_SUM_TYPE(footer) ((footer)->entry_type) 87 #define SET_SUM_TYPE(footer, type) ((footer)->entry_type = type) 88 89 #define SIT_ENTRY_OFFSET(sit_i, segno) \ 90 (segno % sit_i->sents_per_block) 91 #define SIT_BLOCK_OFFSET(segno) \ 92 (segno / SIT_ENTRY_PER_BLOCK) 93 #define START_SEGNO(segno) \ 94 (SIT_BLOCK_OFFSET(segno) * SIT_ENTRY_PER_BLOCK) 95 #define SIT_BLK_CNT(sbi) \ 96 ((MAIN_SEGS(sbi) + SIT_ENTRY_PER_BLOCK - 1) / SIT_ENTRY_PER_BLOCK) 97 #define f2fs_bitmap_size(nr) \ 98 (BITS_TO_LONGS(nr) * sizeof(unsigned long)) 99 100 #define SECTOR_FROM_BLOCK(blk_addr) \ 101 (((sector_t)blk_addr) << F2FS_LOG_SECTORS_PER_BLOCK) 102 #define SECTOR_TO_BLOCK(sectors) \ 103 (sectors >> F2FS_LOG_SECTORS_PER_BLOCK) 104 #define MAX_BIO_BLOCKS(sbi) \ 105 ((int)min((int)max_hw_blocks(sbi), BIO_MAX_PAGES)) 106 107 /* 108 * indicate a block allocation direction: RIGHT and LEFT. 109 * RIGHT means allocating new sections towards the end of volume. 110 * LEFT means the opposite direction. 111 */ 112 enum { 113 ALLOC_RIGHT = 0, 114 ALLOC_LEFT 115 }; 116 117 /* 118 * In the victim_sel_policy->alloc_mode, there are two block allocation modes. 119 * LFS writes data sequentially with cleaning operations. 120 * SSR (Slack Space Recycle) reuses obsolete space without cleaning operations. 121 */ 122 enum { 123 LFS = 0, 124 SSR 125 }; 126 127 /* 128 * In the victim_sel_policy->gc_mode, there are two gc, aka cleaning, modes. 129 * GC_CB is based on cost-benefit algorithm. 130 * GC_GREEDY is based on greedy algorithm. 131 */ 132 enum { 133 GC_CB = 0, 134 GC_GREEDY 135 }; 136 137 /* 138 * BG_GC means the background cleaning job. 139 * FG_GC means the on-demand cleaning job. 140 * FORCE_FG_GC means on-demand cleaning job in background. 141 */ 142 enum { 143 BG_GC = 0, 144 FG_GC, 145 FORCE_FG_GC, 146 }; 147 148 /* for a function parameter to select a victim segment */ 149 struct victim_sel_policy { 150 int alloc_mode; /* LFS or SSR */ 151 int gc_mode; /* GC_CB or GC_GREEDY */ 152 unsigned long *dirty_segmap; /* dirty segment bitmap */ 153 unsigned int max_search; /* maximum # of segments to search */ 154 unsigned int offset; /* last scanned bitmap offset */ 155 unsigned int ofs_unit; /* bitmap search unit */ 156 unsigned int min_cost; /* minimum cost */ 157 unsigned int min_segno; /* segment # having min. cost */ 158 }; 159 160 struct seg_entry { 161 unsigned int type:6; /* segment type like CURSEG_XXX_TYPE */ 162 unsigned int valid_blocks:10; /* # of valid blocks */ 163 unsigned int ckpt_valid_blocks:10; /* # of valid blocks last cp */ 164 unsigned int padding:6; /* padding */ 165 unsigned char *cur_valid_map; /* validity bitmap of blocks */ 166 /* 167 * # of valid blocks and the validity bitmap stored in the the last 168 * checkpoint pack. This information is used by the SSR mode. 169 */ 170 unsigned char *ckpt_valid_map; /* validity bitmap of blocks last cp */ 171 unsigned char *discard_map; 172 unsigned long long mtime; /* modification time of the segment */ 173 }; 174 175 struct sec_entry { 176 unsigned int valid_blocks; /* # of valid blocks in a section */ 177 }; 178 179 struct segment_allocation { 180 void (*allocate_segment)(struct f2fs_sb_info *, int, bool); 181 }; 182 183 /* 184 * this value is set in page as a private data which indicate that 185 * the page is atomically written, and it is in inmem_pages list. 186 */ 187 #define ATOMIC_WRITTEN_PAGE ((unsigned long)-1) 188 189 #define IS_ATOMIC_WRITTEN_PAGE(page) \ 190 (page_private(page) == (unsigned long)ATOMIC_WRITTEN_PAGE) 191 192 struct inmem_pages { 193 struct list_head list; 194 struct page *page; 195 block_t old_addr; /* for revoking when fail to commit */ 196 }; 197 198 struct sit_info { 199 const struct segment_allocation *s_ops; 200 201 block_t sit_base_addr; /* start block address of SIT area */ 202 block_t sit_blocks; /* # of blocks used by SIT area */ 203 block_t written_valid_blocks; /* # of valid blocks in main area */ 204 char *sit_bitmap; /* SIT bitmap pointer */ 205 unsigned int bitmap_size; /* SIT bitmap size */ 206 207 unsigned long *tmp_map; /* bitmap for temporal use */ 208 unsigned long *dirty_sentries_bitmap; /* bitmap for dirty sentries */ 209 unsigned int dirty_sentries; /* # of dirty sentries */ 210 unsigned int sents_per_block; /* # of SIT entries per block */ 211 struct mutex sentry_lock; /* to protect SIT cache */ 212 struct seg_entry *sentries; /* SIT segment-level cache */ 213 struct sec_entry *sec_entries; /* SIT section-level cache */ 214 215 /* for cost-benefit algorithm in cleaning procedure */ 216 unsigned long long elapsed_time; /* elapsed time after mount */ 217 unsigned long long mounted_time; /* mount time */ 218 unsigned long long min_mtime; /* min. modification time */ 219 unsigned long long max_mtime; /* max. modification time */ 220 }; 221 222 struct free_segmap_info { 223 unsigned int start_segno; /* start segment number logically */ 224 unsigned int free_segments; /* # of free segments */ 225 unsigned int free_sections; /* # of free sections */ 226 spinlock_t segmap_lock; /* free segmap lock */ 227 unsigned long *free_segmap; /* free segment bitmap */ 228 unsigned long *free_secmap; /* free section bitmap */ 229 }; 230 231 /* Notice: The order of dirty type is same with CURSEG_XXX in f2fs.h */ 232 enum dirty_type { 233 DIRTY_HOT_DATA, /* dirty segments assigned as hot data logs */ 234 DIRTY_WARM_DATA, /* dirty segments assigned as warm data logs */ 235 DIRTY_COLD_DATA, /* dirty segments assigned as cold data logs */ 236 DIRTY_HOT_NODE, /* dirty segments assigned as hot node logs */ 237 DIRTY_WARM_NODE, /* dirty segments assigned as warm node logs */ 238 DIRTY_COLD_NODE, /* dirty segments assigned as cold node logs */ 239 DIRTY, /* to count # of dirty segments */ 240 PRE, /* to count # of entirely obsolete segments */ 241 NR_DIRTY_TYPE 242 }; 243 244 struct dirty_seglist_info { 245 const struct victim_selection *v_ops; /* victim selction operation */ 246 unsigned long *dirty_segmap[NR_DIRTY_TYPE]; 247 struct mutex seglist_lock; /* lock for segment bitmaps */ 248 int nr_dirty[NR_DIRTY_TYPE]; /* # of dirty segments */ 249 unsigned long *victim_secmap; /* background GC victims */ 250 }; 251 252 /* victim selection function for cleaning and SSR */ 253 struct victim_selection { 254 int (*get_victim)(struct f2fs_sb_info *, unsigned int *, 255 int, int, char); 256 }; 257 258 /* for active log information */ 259 struct curseg_info { 260 struct mutex curseg_mutex; /* lock for consistency */ 261 struct f2fs_summary_block *sum_blk; /* cached summary block */ 262 struct rw_semaphore journal_rwsem; /* protect journal area */ 263 struct f2fs_journal *journal; /* cached journal info */ 264 unsigned char alloc_type; /* current allocation type */ 265 unsigned int segno; /* current segment number */ 266 unsigned short next_blkoff; /* next block offset to write */ 267 unsigned int zone; /* current zone number */ 268 unsigned int next_segno; /* preallocated segment */ 269 }; 270 271 struct sit_entry_set { 272 struct list_head set_list; /* link with all sit sets */ 273 unsigned int start_segno; /* start segno of sits in set */ 274 unsigned int entry_cnt; /* the # of sit entries in set */ 275 }; 276 277 /* 278 * inline functions 279 */ 280 static inline struct curseg_info *CURSEG_I(struct f2fs_sb_info *sbi, int type) 281 { 282 return (struct curseg_info *)(SM_I(sbi)->curseg_array + type); 283 } 284 285 static inline struct seg_entry *get_seg_entry(struct f2fs_sb_info *sbi, 286 unsigned int segno) 287 { 288 struct sit_info *sit_i = SIT_I(sbi); 289 return &sit_i->sentries[segno]; 290 } 291 292 static inline struct sec_entry *get_sec_entry(struct f2fs_sb_info *sbi, 293 unsigned int segno) 294 { 295 struct sit_info *sit_i = SIT_I(sbi); 296 return &sit_i->sec_entries[GET_SECNO(sbi, segno)]; 297 } 298 299 static inline unsigned int get_valid_blocks(struct f2fs_sb_info *sbi, 300 unsigned int segno, int section) 301 { 302 /* 303 * In order to get # of valid blocks in a section instantly from many 304 * segments, f2fs manages two counting structures separately. 305 */ 306 if (section > 1) 307 return get_sec_entry(sbi, segno)->valid_blocks; 308 else 309 return get_seg_entry(sbi, segno)->valid_blocks; 310 } 311 312 static inline void seg_info_from_raw_sit(struct seg_entry *se, 313 struct f2fs_sit_entry *rs) 314 { 315 se->valid_blocks = GET_SIT_VBLOCKS(rs); 316 se->ckpt_valid_blocks = GET_SIT_VBLOCKS(rs); 317 memcpy(se->cur_valid_map, rs->valid_map, SIT_VBLOCK_MAP_SIZE); 318 memcpy(se->ckpt_valid_map, rs->valid_map, SIT_VBLOCK_MAP_SIZE); 319 se->type = GET_SIT_TYPE(rs); 320 se->mtime = le64_to_cpu(rs->mtime); 321 } 322 323 static inline void seg_info_to_raw_sit(struct seg_entry *se, 324 struct f2fs_sit_entry *rs) 325 { 326 unsigned short raw_vblocks = (se->type << SIT_VBLOCKS_SHIFT) | 327 se->valid_blocks; 328 rs->vblocks = cpu_to_le16(raw_vblocks); 329 memcpy(rs->valid_map, se->cur_valid_map, SIT_VBLOCK_MAP_SIZE); 330 memcpy(se->ckpt_valid_map, rs->valid_map, SIT_VBLOCK_MAP_SIZE); 331 se->ckpt_valid_blocks = se->valid_blocks; 332 rs->mtime = cpu_to_le64(se->mtime); 333 } 334 335 static inline unsigned int find_next_inuse(struct free_segmap_info *free_i, 336 unsigned int max, unsigned int segno) 337 { 338 unsigned int ret; 339 spin_lock(&free_i->segmap_lock); 340 ret = find_next_bit(free_i->free_segmap, max, segno); 341 spin_unlock(&free_i->segmap_lock); 342 return ret; 343 } 344 345 static inline void __set_free(struct f2fs_sb_info *sbi, unsigned int segno) 346 { 347 struct free_segmap_info *free_i = FREE_I(sbi); 348 unsigned int secno = segno / sbi->segs_per_sec; 349 unsigned int start_segno = secno * sbi->segs_per_sec; 350 unsigned int next; 351 352 spin_lock(&free_i->segmap_lock); 353 clear_bit(segno, free_i->free_segmap); 354 free_i->free_segments++; 355 356 next = find_next_bit(free_i->free_segmap, 357 start_segno + sbi->segs_per_sec, start_segno); 358 if (next >= start_segno + sbi->segs_per_sec) { 359 clear_bit(secno, free_i->free_secmap); 360 free_i->free_sections++; 361 } 362 spin_unlock(&free_i->segmap_lock); 363 } 364 365 static inline void __set_inuse(struct f2fs_sb_info *sbi, 366 unsigned int segno) 367 { 368 struct free_segmap_info *free_i = FREE_I(sbi); 369 unsigned int secno = segno / sbi->segs_per_sec; 370 set_bit(segno, free_i->free_segmap); 371 free_i->free_segments--; 372 if (!test_and_set_bit(secno, free_i->free_secmap)) 373 free_i->free_sections--; 374 } 375 376 static inline void __set_test_and_free(struct f2fs_sb_info *sbi, 377 unsigned int segno) 378 { 379 struct free_segmap_info *free_i = FREE_I(sbi); 380 unsigned int secno = segno / sbi->segs_per_sec; 381 unsigned int start_segno = secno * sbi->segs_per_sec; 382 unsigned int next; 383 384 spin_lock(&free_i->segmap_lock); 385 if (test_and_clear_bit(segno, free_i->free_segmap)) { 386 free_i->free_segments++; 387 388 next = find_next_bit(free_i->free_segmap, 389 start_segno + sbi->segs_per_sec, start_segno); 390 if (next >= start_segno + sbi->segs_per_sec) { 391 if (test_and_clear_bit(secno, free_i->free_secmap)) 392 free_i->free_sections++; 393 } 394 } 395 spin_unlock(&free_i->segmap_lock); 396 } 397 398 static inline void __set_test_and_inuse(struct f2fs_sb_info *sbi, 399 unsigned int segno) 400 { 401 struct free_segmap_info *free_i = FREE_I(sbi); 402 unsigned int secno = segno / sbi->segs_per_sec; 403 spin_lock(&free_i->segmap_lock); 404 if (!test_and_set_bit(segno, free_i->free_segmap)) { 405 free_i->free_segments--; 406 if (!test_and_set_bit(secno, free_i->free_secmap)) 407 free_i->free_sections--; 408 } 409 spin_unlock(&free_i->segmap_lock); 410 } 411 412 static inline void get_sit_bitmap(struct f2fs_sb_info *sbi, 413 void *dst_addr) 414 { 415 struct sit_info *sit_i = SIT_I(sbi); 416 memcpy(dst_addr, sit_i->sit_bitmap, sit_i->bitmap_size); 417 } 418 419 static inline block_t written_block_count(struct f2fs_sb_info *sbi) 420 { 421 return SIT_I(sbi)->written_valid_blocks; 422 } 423 424 static inline unsigned int free_segments(struct f2fs_sb_info *sbi) 425 { 426 return FREE_I(sbi)->free_segments; 427 } 428 429 static inline int reserved_segments(struct f2fs_sb_info *sbi) 430 { 431 return SM_I(sbi)->reserved_segments; 432 } 433 434 static inline unsigned int free_sections(struct f2fs_sb_info *sbi) 435 { 436 return FREE_I(sbi)->free_sections; 437 } 438 439 static inline unsigned int prefree_segments(struct f2fs_sb_info *sbi) 440 { 441 return DIRTY_I(sbi)->nr_dirty[PRE]; 442 } 443 444 static inline unsigned int dirty_segments(struct f2fs_sb_info *sbi) 445 { 446 return DIRTY_I(sbi)->nr_dirty[DIRTY_HOT_DATA] + 447 DIRTY_I(sbi)->nr_dirty[DIRTY_WARM_DATA] + 448 DIRTY_I(sbi)->nr_dirty[DIRTY_COLD_DATA] + 449 DIRTY_I(sbi)->nr_dirty[DIRTY_HOT_NODE] + 450 DIRTY_I(sbi)->nr_dirty[DIRTY_WARM_NODE] + 451 DIRTY_I(sbi)->nr_dirty[DIRTY_COLD_NODE]; 452 } 453 454 static inline int overprovision_segments(struct f2fs_sb_info *sbi) 455 { 456 return SM_I(sbi)->ovp_segments; 457 } 458 459 static inline int overprovision_sections(struct f2fs_sb_info *sbi) 460 { 461 return ((unsigned int) overprovision_segments(sbi)) / sbi->segs_per_sec; 462 } 463 464 static inline int reserved_sections(struct f2fs_sb_info *sbi) 465 { 466 return ((unsigned int) reserved_segments(sbi)) / sbi->segs_per_sec; 467 } 468 469 static inline bool need_SSR(struct f2fs_sb_info *sbi) 470 { 471 int node_secs = get_blocktype_secs(sbi, F2FS_DIRTY_NODES); 472 int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS); 473 return free_sections(sbi) <= (node_secs + 2 * dent_secs + 474 reserved_sections(sbi) + 1); 475 } 476 477 static inline bool has_not_enough_free_secs(struct f2fs_sb_info *sbi, int freed) 478 { 479 int node_secs = get_blocktype_secs(sbi, F2FS_DIRTY_NODES); 480 int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS); 481 482 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) 483 return false; 484 485 return (free_sections(sbi) + freed) <= (node_secs + 2 * dent_secs + 486 reserved_sections(sbi)); 487 } 488 489 static inline bool excess_prefree_segs(struct f2fs_sb_info *sbi) 490 { 491 return prefree_segments(sbi) > SM_I(sbi)->rec_prefree_segments; 492 } 493 494 static inline int utilization(struct f2fs_sb_info *sbi) 495 { 496 return div_u64((u64)valid_user_blocks(sbi) * 100, 497 sbi->user_block_count); 498 } 499 500 /* 501 * Sometimes f2fs may be better to drop out-of-place update policy. 502 * And, users can control the policy through sysfs entries. 503 * There are five policies with triggering conditions as follows. 504 * F2FS_IPU_FORCE - all the time, 505 * F2FS_IPU_SSR - if SSR mode is activated, 506 * F2FS_IPU_UTIL - if FS utilization is over threashold, 507 * F2FS_IPU_SSR_UTIL - if SSR mode is activated and FS utilization is over 508 * threashold, 509 * F2FS_IPU_FSYNC - activated in fsync path only for high performance flash 510 * storages. IPU will be triggered only if the # of dirty 511 * pages over min_fsync_blocks. 512 * F2FS_IPUT_DISABLE - disable IPU. (=default option) 513 */ 514 #define DEF_MIN_IPU_UTIL 70 515 #define DEF_MIN_FSYNC_BLOCKS 8 516 517 enum { 518 F2FS_IPU_FORCE, 519 F2FS_IPU_SSR, 520 F2FS_IPU_UTIL, 521 F2FS_IPU_SSR_UTIL, 522 F2FS_IPU_FSYNC, 523 }; 524 525 static inline bool need_inplace_update(struct inode *inode) 526 { 527 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 528 unsigned int policy = SM_I(sbi)->ipu_policy; 529 530 /* IPU can be done only for the user data */ 531 if (S_ISDIR(inode->i_mode) || f2fs_is_atomic_file(inode)) 532 return false; 533 534 if (policy & (0x1 << F2FS_IPU_FORCE)) 535 return true; 536 if (policy & (0x1 << F2FS_IPU_SSR) && need_SSR(sbi)) 537 return true; 538 if (policy & (0x1 << F2FS_IPU_UTIL) && 539 utilization(sbi) > SM_I(sbi)->min_ipu_util) 540 return true; 541 if (policy & (0x1 << F2FS_IPU_SSR_UTIL) && need_SSR(sbi) && 542 utilization(sbi) > SM_I(sbi)->min_ipu_util) 543 return true; 544 545 /* this is only set during fdatasync */ 546 if (policy & (0x1 << F2FS_IPU_FSYNC) && 547 is_inode_flag_set(F2FS_I(inode), FI_NEED_IPU)) 548 return true; 549 550 return false; 551 } 552 553 static inline unsigned int curseg_segno(struct f2fs_sb_info *sbi, 554 int type) 555 { 556 struct curseg_info *curseg = CURSEG_I(sbi, type); 557 return curseg->segno; 558 } 559 560 static inline unsigned char curseg_alloc_type(struct f2fs_sb_info *sbi, 561 int type) 562 { 563 struct curseg_info *curseg = CURSEG_I(sbi, type); 564 return curseg->alloc_type; 565 } 566 567 static inline unsigned short curseg_blkoff(struct f2fs_sb_info *sbi, int type) 568 { 569 struct curseg_info *curseg = CURSEG_I(sbi, type); 570 return curseg->next_blkoff; 571 } 572 573 static inline void check_seg_range(struct f2fs_sb_info *sbi, unsigned int segno) 574 { 575 f2fs_bug_on(sbi, segno > TOTAL_SEGS(sbi) - 1); 576 } 577 578 static inline void verify_block_addr(struct f2fs_sb_info *sbi, block_t blk_addr) 579 { 580 f2fs_bug_on(sbi, blk_addr < SEG0_BLKADDR(sbi) 581 || blk_addr >= MAX_BLKADDR(sbi)); 582 } 583 584 /* 585 * Summary block is always treated as an invalid block 586 */ 587 static inline void check_block_count(struct f2fs_sb_info *sbi, 588 int segno, struct f2fs_sit_entry *raw_sit) 589 { 590 #ifdef CONFIG_F2FS_CHECK_FS 591 bool is_valid = test_bit_le(0, raw_sit->valid_map) ? true : false; 592 int valid_blocks = 0; 593 int cur_pos = 0, next_pos; 594 595 /* check bitmap with valid block count */ 596 do { 597 if (is_valid) { 598 next_pos = find_next_zero_bit_le(&raw_sit->valid_map, 599 sbi->blocks_per_seg, 600 cur_pos); 601 valid_blocks += next_pos - cur_pos; 602 } else 603 next_pos = find_next_bit_le(&raw_sit->valid_map, 604 sbi->blocks_per_seg, 605 cur_pos); 606 cur_pos = next_pos; 607 is_valid = !is_valid; 608 } while (cur_pos < sbi->blocks_per_seg); 609 BUG_ON(GET_SIT_VBLOCKS(raw_sit) != valid_blocks); 610 #endif 611 /* check segment usage, and check boundary of a given segment number */ 612 f2fs_bug_on(sbi, GET_SIT_VBLOCKS(raw_sit) > sbi->blocks_per_seg 613 || segno > TOTAL_SEGS(sbi) - 1); 614 } 615 616 static inline pgoff_t current_sit_addr(struct f2fs_sb_info *sbi, 617 unsigned int start) 618 { 619 struct sit_info *sit_i = SIT_I(sbi); 620 unsigned int offset = SIT_BLOCK_OFFSET(start); 621 block_t blk_addr = sit_i->sit_base_addr + offset; 622 623 check_seg_range(sbi, start); 624 625 /* calculate sit block address */ 626 if (f2fs_test_bit(offset, sit_i->sit_bitmap)) 627 blk_addr += sit_i->sit_blocks; 628 629 return blk_addr; 630 } 631 632 static inline pgoff_t next_sit_addr(struct f2fs_sb_info *sbi, 633 pgoff_t block_addr) 634 { 635 struct sit_info *sit_i = SIT_I(sbi); 636 block_addr -= sit_i->sit_base_addr; 637 if (block_addr < sit_i->sit_blocks) 638 block_addr += sit_i->sit_blocks; 639 else 640 block_addr -= sit_i->sit_blocks; 641 642 return block_addr + sit_i->sit_base_addr; 643 } 644 645 static inline void set_to_next_sit(struct sit_info *sit_i, unsigned int start) 646 { 647 unsigned int block_off = SIT_BLOCK_OFFSET(start); 648 649 f2fs_change_bit(block_off, sit_i->sit_bitmap); 650 } 651 652 static inline unsigned long long get_mtime(struct f2fs_sb_info *sbi) 653 { 654 struct sit_info *sit_i = SIT_I(sbi); 655 return sit_i->elapsed_time + CURRENT_TIME_SEC.tv_sec - 656 sit_i->mounted_time; 657 } 658 659 static inline void set_summary(struct f2fs_summary *sum, nid_t nid, 660 unsigned int ofs_in_node, unsigned char version) 661 { 662 sum->nid = cpu_to_le32(nid); 663 sum->ofs_in_node = cpu_to_le16(ofs_in_node); 664 sum->version = version; 665 } 666 667 static inline block_t start_sum_block(struct f2fs_sb_info *sbi) 668 { 669 return __start_cp_addr(sbi) + 670 le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_start_sum); 671 } 672 673 static inline block_t sum_blk_addr(struct f2fs_sb_info *sbi, int base, int type) 674 { 675 return __start_cp_addr(sbi) + 676 le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_total_block_count) 677 - (base + 1) + type; 678 } 679 680 static inline bool sec_usage_check(struct f2fs_sb_info *sbi, unsigned int secno) 681 { 682 if (IS_CURSEC(sbi, secno) || (sbi->cur_victim_sec == secno)) 683 return true; 684 return false; 685 } 686 687 static inline unsigned int max_hw_blocks(struct f2fs_sb_info *sbi) 688 { 689 struct block_device *bdev = sbi->sb->s_bdev; 690 struct request_queue *q = bdev_get_queue(bdev); 691 return SECTOR_TO_BLOCK(queue_max_sectors(q)); 692 } 693 694 /* 695 * It is very important to gather dirty pages and write at once, so that we can 696 * submit a big bio without interfering other data writes. 697 * By default, 512 pages for directory data, 698 * 512 pages (2MB) * 3 for three types of nodes, and 699 * max_bio_blocks for meta are set. 700 */ 701 static inline int nr_pages_to_skip(struct f2fs_sb_info *sbi, int type) 702 { 703 if (sbi->sb->s_bdi->wb.dirty_exceeded) 704 return 0; 705 706 if (type == DATA) 707 return sbi->blocks_per_seg; 708 else if (type == NODE) 709 return 3 * sbi->blocks_per_seg; 710 else if (type == META) 711 return MAX_BIO_BLOCKS(sbi); 712 else 713 return 0; 714 } 715 716 /* 717 * When writing pages, it'd better align nr_to_write for segment size. 718 */ 719 static inline long nr_pages_to_write(struct f2fs_sb_info *sbi, int type, 720 struct writeback_control *wbc) 721 { 722 long nr_to_write, desired; 723 724 if (wbc->sync_mode != WB_SYNC_NONE) 725 return 0; 726 727 nr_to_write = wbc->nr_to_write; 728 729 if (type == DATA) 730 desired = 4096; 731 else if (type == NODE) 732 desired = 3 * max_hw_blocks(sbi); 733 else 734 desired = MAX_BIO_BLOCKS(sbi); 735 736 wbc->nr_to_write = desired; 737 return desired - nr_to_write; 738 } 739