1 /* 2 * fs/f2fs/segment.h 3 * 4 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 5 * http://www.samsung.com/ 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11 #include <linux/blkdev.h> 12 #include <linux/backing-dev.h> 13 14 /* constant macro */ 15 #define NULL_SEGNO ((unsigned int)(~0)) 16 #define NULL_SECNO ((unsigned int)(~0)) 17 18 #define DEF_RECLAIM_PREFREE_SEGMENTS 5 /* 5% over total segments */ 19 20 /* L: Logical segment # in volume, R: Relative segment # in main area */ 21 #define GET_L2R_SEGNO(free_i, segno) (segno - free_i->start_segno) 22 #define GET_R2L_SEGNO(free_i, segno) (segno + free_i->start_segno) 23 24 #define IS_DATASEG(t) (t <= CURSEG_COLD_DATA) 25 #define IS_NODESEG(t) (t >= CURSEG_HOT_NODE) 26 27 #define IS_CURSEG(sbi, seg) \ 28 ((seg == CURSEG_I(sbi, CURSEG_HOT_DATA)->segno) || \ 29 (seg == CURSEG_I(sbi, CURSEG_WARM_DATA)->segno) || \ 30 (seg == CURSEG_I(sbi, CURSEG_COLD_DATA)->segno) || \ 31 (seg == CURSEG_I(sbi, CURSEG_HOT_NODE)->segno) || \ 32 (seg == CURSEG_I(sbi, CURSEG_WARM_NODE)->segno) || \ 33 (seg == CURSEG_I(sbi, CURSEG_COLD_NODE)->segno)) 34 35 #define IS_CURSEC(sbi, secno) \ 36 ((secno == CURSEG_I(sbi, CURSEG_HOT_DATA)->segno / \ 37 sbi->segs_per_sec) || \ 38 (secno == CURSEG_I(sbi, CURSEG_WARM_DATA)->segno / \ 39 sbi->segs_per_sec) || \ 40 (secno == CURSEG_I(sbi, CURSEG_COLD_DATA)->segno / \ 41 sbi->segs_per_sec) || \ 42 (secno == CURSEG_I(sbi, CURSEG_HOT_NODE)->segno / \ 43 sbi->segs_per_sec) || \ 44 (secno == CURSEG_I(sbi, CURSEG_WARM_NODE)->segno / \ 45 sbi->segs_per_sec) || \ 46 (secno == CURSEG_I(sbi, CURSEG_COLD_NODE)->segno / \ 47 sbi->segs_per_sec)) \ 48 49 #define MAIN_BLKADDR(sbi) (SM_I(sbi)->main_blkaddr) 50 #define SEG0_BLKADDR(sbi) (SM_I(sbi)->seg0_blkaddr) 51 52 #define MAIN_SEGS(sbi) (SM_I(sbi)->main_segments) 53 #define MAIN_SECS(sbi) (sbi->total_sections) 54 55 #define TOTAL_SEGS(sbi) (SM_I(sbi)->segment_count) 56 #define TOTAL_BLKS(sbi) (TOTAL_SEGS(sbi) << sbi->log_blocks_per_seg) 57 58 #define MAX_BLKADDR(sbi) (SEG0_BLKADDR(sbi) + TOTAL_BLKS(sbi)) 59 #define SEGMENT_SIZE(sbi) (1ULL << (sbi->log_blocksize + \ 60 sbi->log_blocks_per_seg)) 61 62 #define START_BLOCK(sbi, segno) (SEG0_BLKADDR(sbi) + \ 63 (GET_R2L_SEGNO(FREE_I(sbi), segno) << sbi->log_blocks_per_seg)) 64 65 #define NEXT_FREE_BLKADDR(sbi, curseg) \ 66 (START_BLOCK(sbi, curseg->segno) + curseg->next_blkoff) 67 68 #define GET_SEGOFF_FROM_SEG0(sbi, blk_addr) ((blk_addr) - SEG0_BLKADDR(sbi)) 69 #define GET_SEGNO_FROM_SEG0(sbi, blk_addr) \ 70 (GET_SEGOFF_FROM_SEG0(sbi, blk_addr) >> sbi->log_blocks_per_seg) 71 #define GET_BLKOFF_FROM_SEG0(sbi, blk_addr) \ 72 (GET_SEGOFF_FROM_SEG0(sbi, blk_addr) & (sbi->blocks_per_seg - 1)) 73 74 #define GET_SEGNO(sbi, blk_addr) \ 75 (((blk_addr == NULL_ADDR) || (blk_addr == NEW_ADDR)) ? \ 76 NULL_SEGNO : GET_L2R_SEGNO(FREE_I(sbi), \ 77 GET_SEGNO_FROM_SEG0(sbi, blk_addr))) 78 #define GET_SECNO(sbi, segno) \ 79 ((segno) / sbi->segs_per_sec) 80 #define GET_ZONENO_FROM_SEGNO(sbi, segno) \ 81 ((segno / sbi->segs_per_sec) / sbi->secs_per_zone) 82 83 #define GET_SUM_BLOCK(sbi, segno) \ 84 ((sbi->sm_info->ssa_blkaddr) + segno) 85 86 #define GET_SUM_TYPE(footer) ((footer)->entry_type) 87 #define SET_SUM_TYPE(footer, type) ((footer)->entry_type = type) 88 89 #define SIT_ENTRY_OFFSET(sit_i, segno) \ 90 (segno % sit_i->sents_per_block) 91 #define SIT_BLOCK_OFFSET(segno) \ 92 (segno / SIT_ENTRY_PER_BLOCK) 93 #define START_SEGNO(segno) \ 94 (SIT_BLOCK_OFFSET(segno) * SIT_ENTRY_PER_BLOCK) 95 #define SIT_BLK_CNT(sbi) \ 96 ((MAIN_SEGS(sbi) + SIT_ENTRY_PER_BLOCK - 1) / SIT_ENTRY_PER_BLOCK) 97 #define f2fs_bitmap_size(nr) \ 98 (BITS_TO_LONGS(nr) * sizeof(unsigned long)) 99 100 #define SECTOR_FROM_BLOCK(blk_addr) \ 101 (((sector_t)blk_addr) << F2FS_LOG_SECTORS_PER_BLOCK) 102 #define SECTOR_TO_BLOCK(sectors) \ 103 (sectors >> F2FS_LOG_SECTORS_PER_BLOCK) 104 #define MAX_BIO_BLOCKS(sbi) \ 105 ((int)min((int)max_hw_blocks(sbi), BIO_MAX_PAGES)) 106 107 /* 108 * indicate a block allocation direction: RIGHT and LEFT. 109 * RIGHT means allocating new sections towards the end of volume. 110 * LEFT means the opposite direction. 111 */ 112 enum { 113 ALLOC_RIGHT = 0, 114 ALLOC_LEFT 115 }; 116 117 /* 118 * In the victim_sel_policy->alloc_mode, there are two block allocation modes. 119 * LFS writes data sequentially with cleaning operations. 120 * SSR (Slack Space Recycle) reuses obsolete space without cleaning operations. 121 */ 122 enum { 123 LFS = 0, 124 SSR 125 }; 126 127 /* 128 * In the victim_sel_policy->gc_mode, there are two gc, aka cleaning, modes. 129 * GC_CB is based on cost-benefit algorithm. 130 * GC_GREEDY is based on greedy algorithm. 131 */ 132 enum { 133 GC_CB = 0, 134 GC_GREEDY 135 }; 136 137 /* 138 * BG_GC means the background cleaning job. 139 * FG_GC means the on-demand cleaning job. 140 */ 141 enum { 142 BG_GC = 0, 143 FG_GC 144 }; 145 146 /* for a function parameter to select a victim segment */ 147 struct victim_sel_policy { 148 int alloc_mode; /* LFS or SSR */ 149 int gc_mode; /* GC_CB or GC_GREEDY */ 150 unsigned long *dirty_segmap; /* dirty segment bitmap */ 151 unsigned int max_search; /* maximum # of segments to search */ 152 unsigned int offset; /* last scanned bitmap offset */ 153 unsigned int ofs_unit; /* bitmap search unit */ 154 unsigned int min_cost; /* minimum cost */ 155 unsigned int min_segno; /* segment # having min. cost */ 156 }; 157 158 struct seg_entry { 159 unsigned short valid_blocks; /* # of valid blocks */ 160 unsigned char *cur_valid_map; /* validity bitmap of blocks */ 161 /* 162 * # of valid blocks and the validity bitmap stored in the the last 163 * checkpoint pack. This information is used by the SSR mode. 164 */ 165 unsigned short ckpt_valid_blocks; 166 unsigned char *ckpt_valid_map; 167 unsigned char *discard_map; 168 unsigned char type; /* segment type like CURSEG_XXX_TYPE */ 169 unsigned long long mtime; /* modification time of the segment */ 170 }; 171 172 struct sec_entry { 173 unsigned int valid_blocks; /* # of valid blocks in a section */ 174 }; 175 176 struct segment_allocation { 177 void (*allocate_segment)(struct f2fs_sb_info *, int, bool); 178 }; 179 180 struct inmem_pages { 181 struct list_head list; 182 struct page *page; 183 }; 184 185 struct sit_info { 186 const struct segment_allocation *s_ops; 187 188 block_t sit_base_addr; /* start block address of SIT area */ 189 block_t sit_blocks; /* # of blocks used by SIT area */ 190 block_t written_valid_blocks; /* # of valid blocks in main area */ 191 char *sit_bitmap; /* SIT bitmap pointer */ 192 unsigned int bitmap_size; /* SIT bitmap size */ 193 194 unsigned long *tmp_map; /* bitmap for temporal use */ 195 unsigned long *dirty_sentries_bitmap; /* bitmap for dirty sentries */ 196 unsigned int dirty_sentries; /* # of dirty sentries */ 197 unsigned int sents_per_block; /* # of SIT entries per block */ 198 struct mutex sentry_lock; /* to protect SIT cache */ 199 struct seg_entry *sentries; /* SIT segment-level cache */ 200 struct sec_entry *sec_entries; /* SIT section-level cache */ 201 202 /* for cost-benefit algorithm in cleaning procedure */ 203 unsigned long long elapsed_time; /* elapsed time after mount */ 204 unsigned long long mounted_time; /* mount time */ 205 unsigned long long min_mtime; /* min. modification time */ 206 unsigned long long max_mtime; /* max. modification time */ 207 }; 208 209 struct free_segmap_info { 210 unsigned int start_segno; /* start segment number logically */ 211 unsigned int free_segments; /* # of free segments */ 212 unsigned int free_sections; /* # of free sections */ 213 spinlock_t segmap_lock; /* free segmap lock */ 214 unsigned long *free_segmap; /* free segment bitmap */ 215 unsigned long *free_secmap; /* free section bitmap */ 216 }; 217 218 /* Notice: The order of dirty type is same with CURSEG_XXX in f2fs.h */ 219 enum dirty_type { 220 DIRTY_HOT_DATA, /* dirty segments assigned as hot data logs */ 221 DIRTY_WARM_DATA, /* dirty segments assigned as warm data logs */ 222 DIRTY_COLD_DATA, /* dirty segments assigned as cold data logs */ 223 DIRTY_HOT_NODE, /* dirty segments assigned as hot node logs */ 224 DIRTY_WARM_NODE, /* dirty segments assigned as warm node logs */ 225 DIRTY_COLD_NODE, /* dirty segments assigned as cold node logs */ 226 DIRTY, /* to count # of dirty segments */ 227 PRE, /* to count # of entirely obsolete segments */ 228 NR_DIRTY_TYPE 229 }; 230 231 struct dirty_seglist_info { 232 const struct victim_selection *v_ops; /* victim selction operation */ 233 unsigned long *dirty_segmap[NR_DIRTY_TYPE]; 234 struct mutex seglist_lock; /* lock for segment bitmaps */ 235 int nr_dirty[NR_DIRTY_TYPE]; /* # of dirty segments */ 236 unsigned long *victim_secmap; /* background GC victims */ 237 }; 238 239 /* victim selection function for cleaning and SSR */ 240 struct victim_selection { 241 int (*get_victim)(struct f2fs_sb_info *, unsigned int *, 242 int, int, char); 243 }; 244 245 /* for active log information */ 246 struct curseg_info { 247 struct mutex curseg_mutex; /* lock for consistency */ 248 struct f2fs_summary_block *sum_blk; /* cached summary block */ 249 unsigned char alloc_type; /* current allocation type */ 250 unsigned int segno; /* current segment number */ 251 unsigned short next_blkoff; /* next block offset to write */ 252 unsigned int zone; /* current zone number */ 253 unsigned int next_segno; /* preallocated segment */ 254 }; 255 256 struct sit_entry_set { 257 struct list_head set_list; /* link with all sit sets */ 258 unsigned int start_segno; /* start segno of sits in set */ 259 unsigned int entry_cnt; /* the # of sit entries in set */ 260 }; 261 262 /* 263 * inline functions 264 */ 265 static inline struct curseg_info *CURSEG_I(struct f2fs_sb_info *sbi, int type) 266 { 267 return (struct curseg_info *)(SM_I(sbi)->curseg_array + type); 268 } 269 270 static inline struct seg_entry *get_seg_entry(struct f2fs_sb_info *sbi, 271 unsigned int segno) 272 { 273 struct sit_info *sit_i = SIT_I(sbi); 274 return &sit_i->sentries[segno]; 275 } 276 277 static inline struct sec_entry *get_sec_entry(struct f2fs_sb_info *sbi, 278 unsigned int segno) 279 { 280 struct sit_info *sit_i = SIT_I(sbi); 281 return &sit_i->sec_entries[GET_SECNO(sbi, segno)]; 282 } 283 284 static inline unsigned int get_valid_blocks(struct f2fs_sb_info *sbi, 285 unsigned int segno, int section) 286 { 287 /* 288 * In order to get # of valid blocks in a section instantly from many 289 * segments, f2fs manages two counting structures separately. 290 */ 291 if (section > 1) 292 return get_sec_entry(sbi, segno)->valid_blocks; 293 else 294 return get_seg_entry(sbi, segno)->valid_blocks; 295 } 296 297 static inline void seg_info_from_raw_sit(struct seg_entry *se, 298 struct f2fs_sit_entry *rs) 299 { 300 se->valid_blocks = GET_SIT_VBLOCKS(rs); 301 se->ckpt_valid_blocks = GET_SIT_VBLOCKS(rs); 302 memcpy(se->cur_valid_map, rs->valid_map, SIT_VBLOCK_MAP_SIZE); 303 memcpy(se->ckpt_valid_map, rs->valid_map, SIT_VBLOCK_MAP_SIZE); 304 se->type = GET_SIT_TYPE(rs); 305 se->mtime = le64_to_cpu(rs->mtime); 306 } 307 308 static inline void seg_info_to_raw_sit(struct seg_entry *se, 309 struct f2fs_sit_entry *rs) 310 { 311 unsigned short raw_vblocks = (se->type << SIT_VBLOCKS_SHIFT) | 312 se->valid_blocks; 313 rs->vblocks = cpu_to_le16(raw_vblocks); 314 memcpy(rs->valid_map, se->cur_valid_map, SIT_VBLOCK_MAP_SIZE); 315 memcpy(se->ckpt_valid_map, rs->valid_map, SIT_VBLOCK_MAP_SIZE); 316 se->ckpt_valid_blocks = se->valid_blocks; 317 rs->mtime = cpu_to_le64(se->mtime); 318 } 319 320 static inline unsigned int find_next_inuse(struct free_segmap_info *free_i, 321 unsigned int max, unsigned int segno) 322 { 323 unsigned int ret; 324 spin_lock(&free_i->segmap_lock); 325 ret = find_next_bit(free_i->free_segmap, max, segno); 326 spin_unlock(&free_i->segmap_lock); 327 return ret; 328 } 329 330 static inline void __set_free(struct f2fs_sb_info *sbi, unsigned int segno) 331 { 332 struct free_segmap_info *free_i = FREE_I(sbi); 333 unsigned int secno = segno / sbi->segs_per_sec; 334 unsigned int start_segno = secno * sbi->segs_per_sec; 335 unsigned int next; 336 337 spin_lock(&free_i->segmap_lock); 338 clear_bit(segno, free_i->free_segmap); 339 free_i->free_segments++; 340 341 next = find_next_bit(free_i->free_segmap, 342 start_segno + sbi->segs_per_sec, start_segno); 343 if (next >= start_segno + sbi->segs_per_sec) { 344 clear_bit(secno, free_i->free_secmap); 345 free_i->free_sections++; 346 } 347 spin_unlock(&free_i->segmap_lock); 348 } 349 350 static inline void __set_inuse(struct f2fs_sb_info *sbi, 351 unsigned int segno) 352 { 353 struct free_segmap_info *free_i = FREE_I(sbi); 354 unsigned int secno = segno / sbi->segs_per_sec; 355 set_bit(segno, free_i->free_segmap); 356 free_i->free_segments--; 357 if (!test_and_set_bit(secno, free_i->free_secmap)) 358 free_i->free_sections--; 359 } 360 361 static inline void __set_test_and_free(struct f2fs_sb_info *sbi, 362 unsigned int segno) 363 { 364 struct free_segmap_info *free_i = FREE_I(sbi); 365 unsigned int secno = segno / sbi->segs_per_sec; 366 unsigned int start_segno = secno * sbi->segs_per_sec; 367 unsigned int next; 368 369 spin_lock(&free_i->segmap_lock); 370 if (test_and_clear_bit(segno, free_i->free_segmap)) { 371 free_i->free_segments++; 372 373 next = find_next_bit(free_i->free_segmap, 374 start_segno + sbi->segs_per_sec, start_segno); 375 if (next >= start_segno + sbi->segs_per_sec) { 376 if (test_and_clear_bit(secno, free_i->free_secmap)) 377 free_i->free_sections++; 378 } 379 } 380 spin_unlock(&free_i->segmap_lock); 381 } 382 383 static inline void __set_test_and_inuse(struct f2fs_sb_info *sbi, 384 unsigned int segno) 385 { 386 struct free_segmap_info *free_i = FREE_I(sbi); 387 unsigned int secno = segno / sbi->segs_per_sec; 388 spin_lock(&free_i->segmap_lock); 389 if (!test_and_set_bit(segno, free_i->free_segmap)) { 390 free_i->free_segments--; 391 if (!test_and_set_bit(secno, free_i->free_secmap)) 392 free_i->free_sections--; 393 } 394 spin_unlock(&free_i->segmap_lock); 395 } 396 397 static inline void get_sit_bitmap(struct f2fs_sb_info *sbi, 398 void *dst_addr) 399 { 400 struct sit_info *sit_i = SIT_I(sbi); 401 memcpy(dst_addr, sit_i->sit_bitmap, sit_i->bitmap_size); 402 } 403 404 static inline block_t written_block_count(struct f2fs_sb_info *sbi) 405 { 406 return SIT_I(sbi)->written_valid_blocks; 407 } 408 409 static inline unsigned int free_segments(struct f2fs_sb_info *sbi) 410 { 411 return FREE_I(sbi)->free_segments; 412 } 413 414 static inline int reserved_segments(struct f2fs_sb_info *sbi) 415 { 416 return SM_I(sbi)->reserved_segments; 417 } 418 419 static inline unsigned int free_sections(struct f2fs_sb_info *sbi) 420 { 421 return FREE_I(sbi)->free_sections; 422 } 423 424 static inline unsigned int prefree_segments(struct f2fs_sb_info *sbi) 425 { 426 return DIRTY_I(sbi)->nr_dirty[PRE]; 427 } 428 429 static inline unsigned int dirty_segments(struct f2fs_sb_info *sbi) 430 { 431 return DIRTY_I(sbi)->nr_dirty[DIRTY_HOT_DATA] + 432 DIRTY_I(sbi)->nr_dirty[DIRTY_WARM_DATA] + 433 DIRTY_I(sbi)->nr_dirty[DIRTY_COLD_DATA] + 434 DIRTY_I(sbi)->nr_dirty[DIRTY_HOT_NODE] + 435 DIRTY_I(sbi)->nr_dirty[DIRTY_WARM_NODE] + 436 DIRTY_I(sbi)->nr_dirty[DIRTY_COLD_NODE]; 437 } 438 439 static inline int overprovision_segments(struct f2fs_sb_info *sbi) 440 { 441 return SM_I(sbi)->ovp_segments; 442 } 443 444 static inline int overprovision_sections(struct f2fs_sb_info *sbi) 445 { 446 return ((unsigned int) overprovision_segments(sbi)) / sbi->segs_per_sec; 447 } 448 449 static inline int reserved_sections(struct f2fs_sb_info *sbi) 450 { 451 return ((unsigned int) reserved_segments(sbi)) / sbi->segs_per_sec; 452 } 453 454 static inline bool need_SSR(struct f2fs_sb_info *sbi) 455 { 456 int node_secs = get_blocktype_secs(sbi, F2FS_DIRTY_NODES); 457 int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS); 458 return free_sections(sbi) <= (node_secs + 2 * dent_secs + 459 reserved_sections(sbi) + 1); 460 } 461 462 static inline bool has_not_enough_free_secs(struct f2fs_sb_info *sbi, int freed) 463 { 464 int node_secs = get_blocktype_secs(sbi, F2FS_DIRTY_NODES); 465 int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS); 466 467 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) 468 return false; 469 470 return (free_sections(sbi) + freed) <= (node_secs + 2 * dent_secs + 471 reserved_sections(sbi)); 472 } 473 474 static inline bool excess_prefree_segs(struct f2fs_sb_info *sbi) 475 { 476 return prefree_segments(sbi) > SM_I(sbi)->rec_prefree_segments; 477 } 478 479 static inline int utilization(struct f2fs_sb_info *sbi) 480 { 481 return div_u64((u64)valid_user_blocks(sbi) * 100, 482 sbi->user_block_count); 483 } 484 485 /* 486 * Sometimes f2fs may be better to drop out-of-place update policy. 487 * And, users can control the policy through sysfs entries. 488 * There are five policies with triggering conditions as follows. 489 * F2FS_IPU_FORCE - all the time, 490 * F2FS_IPU_SSR - if SSR mode is activated, 491 * F2FS_IPU_UTIL - if FS utilization is over threashold, 492 * F2FS_IPU_SSR_UTIL - if SSR mode is activated and FS utilization is over 493 * threashold, 494 * F2FS_IPU_FSYNC - activated in fsync path only for high performance flash 495 * storages. IPU will be triggered only if the # of dirty 496 * pages over min_fsync_blocks. 497 * F2FS_IPUT_DISABLE - disable IPU. (=default option) 498 */ 499 #define DEF_MIN_IPU_UTIL 70 500 #define DEF_MIN_FSYNC_BLOCKS 8 501 502 enum { 503 F2FS_IPU_FORCE, 504 F2FS_IPU_SSR, 505 F2FS_IPU_UTIL, 506 F2FS_IPU_SSR_UTIL, 507 F2FS_IPU_FSYNC, 508 }; 509 510 static inline bool need_inplace_update(struct inode *inode) 511 { 512 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 513 unsigned int policy = SM_I(sbi)->ipu_policy; 514 515 /* IPU can be done only for the user data */ 516 if (S_ISDIR(inode->i_mode) || f2fs_is_atomic_file(inode)) 517 return false; 518 519 if (policy & (0x1 << F2FS_IPU_FORCE)) 520 return true; 521 if (policy & (0x1 << F2FS_IPU_SSR) && need_SSR(sbi)) 522 return true; 523 if (policy & (0x1 << F2FS_IPU_UTIL) && 524 utilization(sbi) > SM_I(sbi)->min_ipu_util) 525 return true; 526 if (policy & (0x1 << F2FS_IPU_SSR_UTIL) && need_SSR(sbi) && 527 utilization(sbi) > SM_I(sbi)->min_ipu_util) 528 return true; 529 530 /* this is only set during fdatasync */ 531 if (policy & (0x1 << F2FS_IPU_FSYNC) && 532 is_inode_flag_set(F2FS_I(inode), FI_NEED_IPU)) 533 return true; 534 535 return false; 536 } 537 538 static inline unsigned int curseg_segno(struct f2fs_sb_info *sbi, 539 int type) 540 { 541 struct curseg_info *curseg = CURSEG_I(sbi, type); 542 return curseg->segno; 543 } 544 545 static inline unsigned char curseg_alloc_type(struct f2fs_sb_info *sbi, 546 int type) 547 { 548 struct curseg_info *curseg = CURSEG_I(sbi, type); 549 return curseg->alloc_type; 550 } 551 552 static inline unsigned short curseg_blkoff(struct f2fs_sb_info *sbi, int type) 553 { 554 struct curseg_info *curseg = CURSEG_I(sbi, type); 555 return curseg->next_blkoff; 556 } 557 558 #ifdef CONFIG_F2FS_CHECK_FS 559 static inline void check_seg_range(struct f2fs_sb_info *sbi, unsigned int segno) 560 { 561 BUG_ON(segno > TOTAL_SEGS(sbi) - 1); 562 } 563 564 static inline void verify_block_addr(struct f2fs_sb_info *sbi, block_t blk_addr) 565 { 566 BUG_ON(blk_addr < SEG0_BLKADDR(sbi)); 567 BUG_ON(blk_addr >= MAX_BLKADDR(sbi)); 568 } 569 570 /* 571 * Summary block is always treated as an invalid block 572 */ 573 static inline void check_block_count(struct f2fs_sb_info *sbi, 574 int segno, struct f2fs_sit_entry *raw_sit) 575 { 576 bool is_valid = test_bit_le(0, raw_sit->valid_map) ? true : false; 577 int valid_blocks = 0; 578 int cur_pos = 0, next_pos; 579 580 /* check segment usage */ 581 BUG_ON(GET_SIT_VBLOCKS(raw_sit) > sbi->blocks_per_seg); 582 583 /* check boundary of a given segment number */ 584 BUG_ON(segno > TOTAL_SEGS(sbi) - 1); 585 586 /* check bitmap with valid block count */ 587 do { 588 if (is_valid) { 589 next_pos = find_next_zero_bit_le(&raw_sit->valid_map, 590 sbi->blocks_per_seg, 591 cur_pos); 592 valid_blocks += next_pos - cur_pos; 593 } else 594 next_pos = find_next_bit_le(&raw_sit->valid_map, 595 sbi->blocks_per_seg, 596 cur_pos); 597 cur_pos = next_pos; 598 is_valid = !is_valid; 599 } while (cur_pos < sbi->blocks_per_seg); 600 BUG_ON(GET_SIT_VBLOCKS(raw_sit) != valid_blocks); 601 } 602 #else 603 static inline void check_seg_range(struct f2fs_sb_info *sbi, unsigned int segno) 604 { 605 if (segno > TOTAL_SEGS(sbi) - 1) 606 set_sbi_flag(sbi, SBI_NEED_FSCK); 607 } 608 609 static inline void verify_block_addr(struct f2fs_sb_info *sbi, block_t blk_addr) 610 { 611 if (blk_addr < SEG0_BLKADDR(sbi) || blk_addr >= MAX_BLKADDR(sbi)) 612 set_sbi_flag(sbi, SBI_NEED_FSCK); 613 } 614 615 /* 616 * Summary block is always treated as an invalid block 617 */ 618 static inline void check_block_count(struct f2fs_sb_info *sbi, 619 int segno, struct f2fs_sit_entry *raw_sit) 620 { 621 /* check segment usage */ 622 if (GET_SIT_VBLOCKS(raw_sit) > sbi->blocks_per_seg) 623 set_sbi_flag(sbi, SBI_NEED_FSCK); 624 625 /* check boundary of a given segment number */ 626 if (segno > TOTAL_SEGS(sbi) - 1) 627 set_sbi_flag(sbi, SBI_NEED_FSCK); 628 } 629 #endif 630 631 static inline pgoff_t current_sit_addr(struct f2fs_sb_info *sbi, 632 unsigned int start) 633 { 634 struct sit_info *sit_i = SIT_I(sbi); 635 unsigned int offset = SIT_BLOCK_OFFSET(start); 636 block_t blk_addr = sit_i->sit_base_addr + offset; 637 638 check_seg_range(sbi, start); 639 640 /* calculate sit block address */ 641 if (f2fs_test_bit(offset, sit_i->sit_bitmap)) 642 blk_addr += sit_i->sit_blocks; 643 644 return blk_addr; 645 } 646 647 static inline pgoff_t next_sit_addr(struct f2fs_sb_info *sbi, 648 pgoff_t block_addr) 649 { 650 struct sit_info *sit_i = SIT_I(sbi); 651 block_addr -= sit_i->sit_base_addr; 652 if (block_addr < sit_i->sit_blocks) 653 block_addr += sit_i->sit_blocks; 654 else 655 block_addr -= sit_i->sit_blocks; 656 657 return block_addr + sit_i->sit_base_addr; 658 } 659 660 static inline void set_to_next_sit(struct sit_info *sit_i, unsigned int start) 661 { 662 unsigned int block_off = SIT_BLOCK_OFFSET(start); 663 664 f2fs_change_bit(block_off, sit_i->sit_bitmap); 665 } 666 667 static inline unsigned long long get_mtime(struct f2fs_sb_info *sbi) 668 { 669 struct sit_info *sit_i = SIT_I(sbi); 670 return sit_i->elapsed_time + CURRENT_TIME_SEC.tv_sec - 671 sit_i->mounted_time; 672 } 673 674 static inline void set_summary(struct f2fs_summary *sum, nid_t nid, 675 unsigned int ofs_in_node, unsigned char version) 676 { 677 sum->nid = cpu_to_le32(nid); 678 sum->ofs_in_node = cpu_to_le16(ofs_in_node); 679 sum->version = version; 680 } 681 682 static inline block_t start_sum_block(struct f2fs_sb_info *sbi) 683 { 684 return __start_cp_addr(sbi) + 685 le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_start_sum); 686 } 687 688 static inline block_t sum_blk_addr(struct f2fs_sb_info *sbi, int base, int type) 689 { 690 return __start_cp_addr(sbi) + 691 le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_total_block_count) 692 - (base + 1) + type; 693 } 694 695 static inline bool sec_usage_check(struct f2fs_sb_info *sbi, unsigned int secno) 696 { 697 if (IS_CURSEC(sbi, secno) || (sbi->cur_victim_sec == secno)) 698 return true; 699 return false; 700 } 701 702 static inline unsigned int max_hw_blocks(struct f2fs_sb_info *sbi) 703 { 704 struct block_device *bdev = sbi->sb->s_bdev; 705 struct request_queue *q = bdev_get_queue(bdev); 706 return SECTOR_TO_BLOCK(queue_max_sectors(q)); 707 } 708 709 /* 710 * It is very important to gather dirty pages and write at once, so that we can 711 * submit a big bio without interfering other data writes. 712 * By default, 512 pages for directory data, 713 * 512 pages (2MB) * 3 for three types of nodes, and 714 * max_bio_blocks for meta are set. 715 */ 716 static inline int nr_pages_to_skip(struct f2fs_sb_info *sbi, int type) 717 { 718 if (sbi->sb->s_bdi->wb.dirty_exceeded) 719 return 0; 720 721 if (type == DATA) 722 return sbi->blocks_per_seg; 723 else if (type == NODE) 724 return 3 * sbi->blocks_per_seg; 725 else if (type == META) 726 return MAX_BIO_BLOCKS(sbi); 727 else 728 return 0; 729 } 730 731 /* 732 * When writing pages, it'd better align nr_to_write for segment size. 733 */ 734 static inline long nr_pages_to_write(struct f2fs_sb_info *sbi, int type, 735 struct writeback_control *wbc) 736 { 737 long nr_to_write, desired; 738 739 if (wbc->sync_mode != WB_SYNC_NONE) 740 return 0; 741 742 nr_to_write = wbc->nr_to_write; 743 744 if (type == DATA) 745 desired = 4096; 746 else if (type == NODE) 747 desired = 3 * max_hw_blocks(sbi); 748 else 749 desired = MAX_BIO_BLOCKS(sbi); 750 751 wbc->nr_to_write = desired; 752 return desired - nr_to_write; 753 } 754