1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * fs/f2fs/segment.h 4 * 5 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 6 * http://www.samsung.com/ 7 */ 8 #include <linux/blkdev.h> 9 #include <linux/backing-dev.h> 10 11 /* constant macro */ 12 #define NULL_SEGNO ((unsigned int)(~0)) 13 #define NULL_SECNO ((unsigned int)(~0)) 14 15 #define DEF_RECLAIM_PREFREE_SEGMENTS 5 /* 5% over total segments */ 16 #define DEF_MAX_RECLAIM_PREFREE_SEGMENTS 4096 /* 8GB in maximum */ 17 18 #define F2FS_MIN_SEGMENTS 9 /* SB + 2 (CP + SIT + NAT) + SSA + MAIN */ 19 #define F2FS_MIN_META_SEGMENTS 8 /* SB + 2 (CP + SIT + NAT) + SSA */ 20 21 /* L: Logical segment # in volume, R: Relative segment # in main area */ 22 #define GET_L2R_SEGNO(free_i, segno) ((segno) - (free_i)->start_segno) 23 #define GET_R2L_SEGNO(free_i, segno) ((segno) + (free_i)->start_segno) 24 25 #define IS_DATASEG(t) ((t) <= CURSEG_COLD_DATA) 26 #define IS_NODESEG(t) ((t) >= CURSEG_HOT_NODE && (t) <= CURSEG_COLD_NODE) 27 #define SE_PAGETYPE(se) ((IS_NODESEG((se)->type) ? NODE : DATA)) 28 29 static inline void sanity_check_seg_type(struct f2fs_sb_info *sbi, 30 unsigned short seg_type) 31 { 32 f2fs_bug_on(sbi, seg_type >= NR_PERSISTENT_LOG); 33 } 34 35 #define IS_HOT(t) ((t) == CURSEG_HOT_NODE || (t) == CURSEG_HOT_DATA) 36 #define IS_WARM(t) ((t) == CURSEG_WARM_NODE || (t) == CURSEG_WARM_DATA) 37 #define IS_COLD(t) ((t) == CURSEG_COLD_NODE || (t) == CURSEG_COLD_DATA) 38 39 #define IS_CURSEG(sbi, seg) \ 40 (((seg) == CURSEG_I(sbi, CURSEG_HOT_DATA)->segno) || \ 41 ((seg) == CURSEG_I(sbi, CURSEG_WARM_DATA)->segno) || \ 42 ((seg) == CURSEG_I(sbi, CURSEG_COLD_DATA)->segno) || \ 43 ((seg) == CURSEG_I(sbi, CURSEG_HOT_NODE)->segno) || \ 44 ((seg) == CURSEG_I(sbi, CURSEG_WARM_NODE)->segno) || \ 45 ((seg) == CURSEG_I(sbi, CURSEG_COLD_NODE)->segno) || \ 46 ((seg) == CURSEG_I(sbi, CURSEG_COLD_DATA_PINNED)->segno) || \ 47 ((seg) == CURSEG_I(sbi, CURSEG_ALL_DATA_ATGC)->segno)) 48 49 #define IS_CURSEC(sbi, secno) \ 50 (((secno) == CURSEG_I(sbi, CURSEG_HOT_DATA)->segno / \ 51 SEGS_PER_SEC(sbi)) || \ 52 ((secno) == CURSEG_I(sbi, CURSEG_WARM_DATA)->segno / \ 53 SEGS_PER_SEC(sbi)) || \ 54 ((secno) == CURSEG_I(sbi, CURSEG_COLD_DATA)->segno / \ 55 SEGS_PER_SEC(sbi)) || \ 56 ((secno) == CURSEG_I(sbi, CURSEG_HOT_NODE)->segno / \ 57 SEGS_PER_SEC(sbi)) || \ 58 ((secno) == CURSEG_I(sbi, CURSEG_WARM_NODE)->segno / \ 59 SEGS_PER_SEC(sbi)) || \ 60 ((secno) == CURSEG_I(sbi, CURSEG_COLD_NODE)->segno / \ 61 SEGS_PER_SEC(sbi)) || \ 62 ((secno) == CURSEG_I(sbi, CURSEG_COLD_DATA_PINNED)->segno / \ 63 SEGS_PER_SEC(sbi)) || \ 64 ((secno) == CURSEG_I(sbi, CURSEG_ALL_DATA_ATGC)->segno / \ 65 SEGS_PER_SEC(sbi))) 66 67 #define MAIN_BLKADDR(sbi) \ 68 (SM_I(sbi) ? SM_I(sbi)->main_blkaddr : \ 69 le32_to_cpu(F2FS_RAW_SUPER(sbi)->main_blkaddr)) 70 #define SEG0_BLKADDR(sbi) \ 71 (SM_I(sbi) ? SM_I(sbi)->seg0_blkaddr : \ 72 le32_to_cpu(F2FS_RAW_SUPER(sbi)->segment0_blkaddr)) 73 74 #define MAIN_SEGS(sbi) (SM_I(sbi)->main_segments) 75 #define MAIN_SECS(sbi) ((sbi)->total_sections) 76 77 #define TOTAL_SEGS(sbi) \ 78 (SM_I(sbi) ? SM_I(sbi)->segment_count : \ 79 le32_to_cpu(F2FS_RAW_SUPER(sbi)->segment_count)) 80 #define TOTAL_BLKS(sbi) (TOTAL_SEGS(sbi) << (sbi)->log_blocks_per_seg) 81 82 #define MAX_BLKADDR(sbi) (SEG0_BLKADDR(sbi) + TOTAL_BLKS(sbi)) 83 #define SEGMENT_SIZE(sbi) (1ULL << ((sbi)->log_blocksize + \ 84 (sbi)->log_blocks_per_seg)) 85 86 #define START_BLOCK(sbi, segno) (SEG0_BLKADDR(sbi) + \ 87 (GET_R2L_SEGNO(FREE_I(sbi), segno) << (sbi)->log_blocks_per_seg)) 88 89 #define NEXT_FREE_BLKADDR(sbi, curseg) \ 90 (START_BLOCK(sbi, (curseg)->segno) + (curseg)->next_blkoff) 91 92 #define GET_SEGOFF_FROM_SEG0(sbi, blk_addr) ((blk_addr) - SEG0_BLKADDR(sbi)) 93 #define GET_SEGNO_FROM_SEG0(sbi, blk_addr) \ 94 (GET_SEGOFF_FROM_SEG0(sbi, blk_addr) >> (sbi)->log_blocks_per_seg) 95 #define GET_BLKOFF_FROM_SEG0(sbi, blk_addr) \ 96 (GET_SEGOFF_FROM_SEG0(sbi, blk_addr) & (BLKS_PER_SEG(sbi) - 1)) 97 98 #define GET_SEGNO(sbi, blk_addr) \ 99 ((!__is_valid_data_blkaddr(blk_addr)) ? \ 100 NULL_SEGNO : GET_L2R_SEGNO(FREE_I(sbi), \ 101 GET_SEGNO_FROM_SEG0(sbi, blk_addr))) 102 #define CAP_BLKS_PER_SEC(sbi) \ 103 (SEGS_PER_SEC(sbi) * BLKS_PER_SEG(sbi) - \ 104 (sbi)->unusable_blocks_per_sec) 105 #define CAP_SEGS_PER_SEC(sbi) \ 106 (SEGS_PER_SEC(sbi) - ((sbi)->unusable_blocks_per_sec >> \ 107 (sbi)->log_blocks_per_seg)) 108 #define GET_SEC_FROM_SEG(sbi, segno) \ 109 (((segno) == -1) ? -1 : (segno) / SEGS_PER_SEC(sbi)) 110 #define GET_SEG_FROM_SEC(sbi, secno) \ 111 ((secno) * SEGS_PER_SEC(sbi)) 112 #define GET_ZONE_FROM_SEC(sbi, secno) \ 113 (((secno) == -1) ? -1 : (secno) / (sbi)->secs_per_zone) 114 #define GET_ZONE_FROM_SEG(sbi, segno) \ 115 GET_ZONE_FROM_SEC(sbi, GET_SEC_FROM_SEG(sbi, segno)) 116 117 #define GET_SUM_BLOCK(sbi, segno) \ 118 ((sbi)->sm_info->ssa_blkaddr + (segno)) 119 120 #define GET_SUM_TYPE(footer) ((footer)->entry_type) 121 #define SET_SUM_TYPE(footer, type) ((footer)->entry_type = (type)) 122 123 #define SIT_ENTRY_OFFSET(sit_i, segno) \ 124 ((segno) % (sit_i)->sents_per_block) 125 #define SIT_BLOCK_OFFSET(segno) \ 126 ((segno) / SIT_ENTRY_PER_BLOCK) 127 #define START_SEGNO(segno) \ 128 (SIT_BLOCK_OFFSET(segno) * SIT_ENTRY_PER_BLOCK) 129 #define SIT_BLK_CNT(sbi) \ 130 DIV_ROUND_UP(MAIN_SEGS(sbi), SIT_ENTRY_PER_BLOCK) 131 #define f2fs_bitmap_size(nr) \ 132 (BITS_TO_LONGS(nr) * sizeof(unsigned long)) 133 134 #define SECTOR_FROM_BLOCK(blk_addr) \ 135 (((sector_t)blk_addr) << F2FS_LOG_SECTORS_PER_BLOCK) 136 #define SECTOR_TO_BLOCK(sectors) \ 137 ((sectors) >> F2FS_LOG_SECTORS_PER_BLOCK) 138 139 /* 140 * indicate a block allocation direction: RIGHT and LEFT. 141 * RIGHT means allocating new sections towards the end of volume. 142 * LEFT means the opposite direction. 143 */ 144 enum { 145 ALLOC_RIGHT = 0, 146 ALLOC_LEFT 147 }; 148 149 /* 150 * In the victim_sel_policy->alloc_mode, there are three block allocation modes. 151 * LFS writes data sequentially with cleaning operations. 152 * SSR (Slack Space Recycle) reuses obsolete space without cleaning operations. 153 * AT_SSR (Age Threshold based Slack Space Recycle) merges fragments into 154 * fragmented segment which has similar aging degree. 155 */ 156 enum { 157 LFS = 0, 158 SSR, 159 AT_SSR, 160 }; 161 162 /* 163 * In the victim_sel_policy->gc_mode, there are three gc, aka cleaning, modes. 164 * GC_CB is based on cost-benefit algorithm. 165 * GC_GREEDY is based on greedy algorithm. 166 * GC_AT is based on age-threshold algorithm. 167 */ 168 enum { 169 GC_CB = 0, 170 GC_GREEDY, 171 GC_AT, 172 ALLOC_NEXT, 173 FLUSH_DEVICE, 174 MAX_GC_POLICY, 175 }; 176 177 /* 178 * BG_GC means the background cleaning job. 179 * FG_GC means the on-demand cleaning job. 180 */ 181 enum { 182 BG_GC = 0, 183 FG_GC, 184 }; 185 186 /* for a function parameter to select a victim segment */ 187 struct victim_sel_policy { 188 int alloc_mode; /* LFS or SSR */ 189 int gc_mode; /* GC_CB or GC_GREEDY */ 190 unsigned long *dirty_bitmap; /* dirty segment/section bitmap */ 191 unsigned int max_search; /* 192 * maximum # of segments/sections 193 * to search 194 */ 195 unsigned int offset; /* last scanned bitmap offset */ 196 unsigned int ofs_unit; /* bitmap search unit */ 197 unsigned int min_cost; /* minimum cost */ 198 unsigned long long oldest_age; /* oldest age of segments having the same min cost */ 199 unsigned int min_segno; /* segment # having min. cost */ 200 unsigned long long age; /* mtime of GCed section*/ 201 unsigned long long age_threshold;/* age threshold */ 202 }; 203 204 struct seg_entry { 205 unsigned int type:6; /* segment type like CURSEG_XXX_TYPE */ 206 unsigned int valid_blocks:10; /* # of valid blocks */ 207 unsigned int ckpt_valid_blocks:10; /* # of valid blocks last cp */ 208 unsigned int padding:6; /* padding */ 209 unsigned char *cur_valid_map; /* validity bitmap of blocks */ 210 #ifdef CONFIG_F2FS_CHECK_FS 211 unsigned char *cur_valid_map_mir; /* mirror of current valid bitmap */ 212 #endif 213 /* 214 * # of valid blocks and the validity bitmap stored in the last 215 * checkpoint pack. This information is used by the SSR mode. 216 */ 217 unsigned char *ckpt_valid_map; /* validity bitmap of blocks last cp */ 218 unsigned char *discard_map; 219 unsigned long long mtime; /* modification time of the segment */ 220 }; 221 222 struct sec_entry { 223 unsigned int valid_blocks; /* # of valid blocks in a section */ 224 }; 225 226 #define MAX_SKIP_GC_COUNT 16 227 228 struct revoke_entry { 229 struct list_head list; 230 block_t old_addr; /* for revoking when fail to commit */ 231 pgoff_t index; 232 }; 233 234 struct sit_info { 235 block_t sit_base_addr; /* start block address of SIT area */ 236 block_t sit_blocks; /* # of blocks used by SIT area */ 237 block_t written_valid_blocks; /* # of valid blocks in main area */ 238 char *bitmap; /* all bitmaps pointer */ 239 char *sit_bitmap; /* SIT bitmap pointer */ 240 #ifdef CONFIG_F2FS_CHECK_FS 241 char *sit_bitmap_mir; /* SIT bitmap mirror */ 242 243 /* bitmap of segments to be ignored by GC in case of errors */ 244 unsigned long *invalid_segmap; 245 #endif 246 unsigned int bitmap_size; /* SIT bitmap size */ 247 248 unsigned long *tmp_map; /* bitmap for temporal use */ 249 unsigned long *dirty_sentries_bitmap; /* bitmap for dirty sentries */ 250 unsigned int dirty_sentries; /* # of dirty sentries */ 251 unsigned int sents_per_block; /* # of SIT entries per block */ 252 struct rw_semaphore sentry_lock; /* to protect SIT cache */ 253 struct seg_entry *sentries; /* SIT segment-level cache */ 254 struct sec_entry *sec_entries; /* SIT section-level cache */ 255 256 /* for cost-benefit algorithm in cleaning procedure */ 257 unsigned long long elapsed_time; /* elapsed time after mount */ 258 unsigned long long mounted_time; /* mount time */ 259 unsigned long long min_mtime; /* min. modification time */ 260 unsigned long long max_mtime; /* max. modification time */ 261 unsigned long long dirty_min_mtime; /* rerange candidates in GC_AT */ 262 unsigned long long dirty_max_mtime; /* rerange candidates in GC_AT */ 263 264 unsigned int last_victim[MAX_GC_POLICY]; /* last victim segment # */ 265 }; 266 267 struct free_segmap_info { 268 unsigned int start_segno; /* start segment number logically */ 269 unsigned int free_segments; /* # of free segments */ 270 unsigned int free_sections; /* # of free sections */ 271 spinlock_t segmap_lock; /* free segmap lock */ 272 unsigned long *free_segmap; /* free segment bitmap */ 273 unsigned long *free_secmap; /* free section bitmap */ 274 }; 275 276 /* Notice: The order of dirty type is same with CURSEG_XXX in f2fs.h */ 277 enum dirty_type { 278 DIRTY_HOT_DATA, /* dirty segments assigned as hot data logs */ 279 DIRTY_WARM_DATA, /* dirty segments assigned as warm data logs */ 280 DIRTY_COLD_DATA, /* dirty segments assigned as cold data logs */ 281 DIRTY_HOT_NODE, /* dirty segments assigned as hot node logs */ 282 DIRTY_WARM_NODE, /* dirty segments assigned as warm node logs */ 283 DIRTY_COLD_NODE, /* dirty segments assigned as cold node logs */ 284 DIRTY, /* to count # of dirty segments */ 285 PRE, /* to count # of entirely obsolete segments */ 286 NR_DIRTY_TYPE 287 }; 288 289 struct dirty_seglist_info { 290 unsigned long *dirty_segmap[NR_DIRTY_TYPE]; 291 unsigned long *dirty_secmap; 292 struct mutex seglist_lock; /* lock for segment bitmaps */ 293 int nr_dirty[NR_DIRTY_TYPE]; /* # of dirty segments */ 294 unsigned long *victim_secmap; /* background GC victims */ 295 unsigned long *pinned_secmap; /* pinned victims from foreground GC */ 296 unsigned int pinned_secmap_cnt; /* count of victims which has pinned data */ 297 bool enable_pin_section; /* enable pinning section */ 298 }; 299 300 /* for active log information */ 301 struct curseg_info { 302 struct mutex curseg_mutex; /* lock for consistency */ 303 struct f2fs_summary_block *sum_blk; /* cached summary block */ 304 struct rw_semaphore journal_rwsem; /* protect journal area */ 305 struct f2fs_journal *journal; /* cached journal info */ 306 unsigned char alloc_type; /* current allocation type */ 307 unsigned short seg_type; /* segment type like CURSEG_XXX_TYPE */ 308 unsigned int segno; /* current segment number */ 309 unsigned short next_blkoff; /* next block offset to write */ 310 unsigned int zone; /* current zone number */ 311 unsigned int next_segno; /* preallocated segment */ 312 int fragment_remained_chunk; /* remained block size in a chunk for block fragmentation mode */ 313 bool inited; /* indicate inmem log is inited */ 314 }; 315 316 struct sit_entry_set { 317 struct list_head set_list; /* link with all sit sets */ 318 unsigned int start_segno; /* start segno of sits in set */ 319 unsigned int entry_cnt; /* the # of sit entries in set */ 320 }; 321 322 /* 323 * inline functions 324 */ 325 static inline struct curseg_info *CURSEG_I(struct f2fs_sb_info *sbi, int type) 326 { 327 return (struct curseg_info *)(SM_I(sbi)->curseg_array + type); 328 } 329 330 static inline struct seg_entry *get_seg_entry(struct f2fs_sb_info *sbi, 331 unsigned int segno) 332 { 333 struct sit_info *sit_i = SIT_I(sbi); 334 return &sit_i->sentries[segno]; 335 } 336 337 static inline struct sec_entry *get_sec_entry(struct f2fs_sb_info *sbi, 338 unsigned int segno) 339 { 340 struct sit_info *sit_i = SIT_I(sbi); 341 return &sit_i->sec_entries[GET_SEC_FROM_SEG(sbi, segno)]; 342 } 343 344 static inline unsigned int get_valid_blocks(struct f2fs_sb_info *sbi, 345 unsigned int segno, bool use_section) 346 { 347 /* 348 * In order to get # of valid blocks in a section instantly from many 349 * segments, f2fs manages two counting structures separately. 350 */ 351 if (use_section && __is_large_section(sbi)) 352 return get_sec_entry(sbi, segno)->valid_blocks; 353 else 354 return get_seg_entry(sbi, segno)->valid_blocks; 355 } 356 357 static inline unsigned int get_ckpt_valid_blocks(struct f2fs_sb_info *sbi, 358 unsigned int segno, bool use_section) 359 { 360 if (use_section && __is_large_section(sbi)) { 361 unsigned int start_segno = START_SEGNO(segno); 362 unsigned int blocks = 0; 363 int i; 364 365 for (i = 0; i < SEGS_PER_SEC(sbi); i++, start_segno++) { 366 struct seg_entry *se = get_seg_entry(sbi, start_segno); 367 368 blocks += se->ckpt_valid_blocks; 369 } 370 return blocks; 371 } 372 return get_seg_entry(sbi, segno)->ckpt_valid_blocks; 373 } 374 375 static inline void seg_info_from_raw_sit(struct seg_entry *se, 376 struct f2fs_sit_entry *rs) 377 { 378 se->valid_blocks = GET_SIT_VBLOCKS(rs); 379 se->ckpt_valid_blocks = GET_SIT_VBLOCKS(rs); 380 memcpy(se->cur_valid_map, rs->valid_map, SIT_VBLOCK_MAP_SIZE); 381 memcpy(se->ckpt_valid_map, rs->valid_map, SIT_VBLOCK_MAP_SIZE); 382 #ifdef CONFIG_F2FS_CHECK_FS 383 memcpy(se->cur_valid_map_mir, rs->valid_map, SIT_VBLOCK_MAP_SIZE); 384 #endif 385 se->type = GET_SIT_TYPE(rs); 386 se->mtime = le64_to_cpu(rs->mtime); 387 } 388 389 static inline void __seg_info_to_raw_sit(struct seg_entry *se, 390 struct f2fs_sit_entry *rs) 391 { 392 unsigned short raw_vblocks = (se->type << SIT_VBLOCKS_SHIFT) | 393 se->valid_blocks; 394 rs->vblocks = cpu_to_le16(raw_vblocks); 395 memcpy(rs->valid_map, se->cur_valid_map, SIT_VBLOCK_MAP_SIZE); 396 rs->mtime = cpu_to_le64(se->mtime); 397 } 398 399 static inline void seg_info_to_sit_page(struct f2fs_sb_info *sbi, 400 struct page *page, unsigned int start) 401 { 402 struct f2fs_sit_block *raw_sit; 403 struct seg_entry *se; 404 struct f2fs_sit_entry *rs; 405 unsigned int end = min(start + SIT_ENTRY_PER_BLOCK, 406 (unsigned long)MAIN_SEGS(sbi)); 407 int i; 408 409 raw_sit = (struct f2fs_sit_block *)page_address(page); 410 memset(raw_sit, 0, PAGE_SIZE); 411 for (i = 0; i < end - start; i++) { 412 rs = &raw_sit->entries[i]; 413 se = get_seg_entry(sbi, start + i); 414 __seg_info_to_raw_sit(se, rs); 415 } 416 } 417 418 static inline void seg_info_to_raw_sit(struct seg_entry *se, 419 struct f2fs_sit_entry *rs) 420 { 421 __seg_info_to_raw_sit(se, rs); 422 423 memcpy(se->ckpt_valid_map, rs->valid_map, SIT_VBLOCK_MAP_SIZE); 424 se->ckpt_valid_blocks = se->valid_blocks; 425 } 426 427 static inline unsigned int find_next_inuse(struct free_segmap_info *free_i, 428 unsigned int max, unsigned int segno) 429 { 430 unsigned int ret; 431 spin_lock(&free_i->segmap_lock); 432 ret = find_next_bit(free_i->free_segmap, max, segno); 433 spin_unlock(&free_i->segmap_lock); 434 return ret; 435 } 436 437 static inline void __set_free(struct f2fs_sb_info *sbi, unsigned int segno) 438 { 439 struct free_segmap_info *free_i = FREE_I(sbi); 440 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno); 441 unsigned int start_segno = GET_SEG_FROM_SEC(sbi, secno); 442 unsigned int next; 443 unsigned int usable_segs = f2fs_usable_segs_in_sec(sbi, segno); 444 445 spin_lock(&free_i->segmap_lock); 446 clear_bit(segno, free_i->free_segmap); 447 free_i->free_segments++; 448 449 next = find_next_bit(free_i->free_segmap, 450 start_segno + SEGS_PER_SEC(sbi), start_segno); 451 if (next >= start_segno + usable_segs) { 452 clear_bit(secno, free_i->free_secmap); 453 free_i->free_sections++; 454 } 455 spin_unlock(&free_i->segmap_lock); 456 } 457 458 static inline void __set_inuse(struct f2fs_sb_info *sbi, 459 unsigned int segno) 460 { 461 struct free_segmap_info *free_i = FREE_I(sbi); 462 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno); 463 464 set_bit(segno, free_i->free_segmap); 465 free_i->free_segments--; 466 if (!test_and_set_bit(secno, free_i->free_secmap)) 467 free_i->free_sections--; 468 } 469 470 static inline void __set_test_and_free(struct f2fs_sb_info *sbi, 471 unsigned int segno, bool inmem) 472 { 473 struct free_segmap_info *free_i = FREE_I(sbi); 474 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno); 475 unsigned int start_segno = GET_SEG_FROM_SEC(sbi, secno); 476 unsigned int next; 477 unsigned int usable_segs = f2fs_usable_segs_in_sec(sbi, segno); 478 479 spin_lock(&free_i->segmap_lock); 480 if (test_and_clear_bit(segno, free_i->free_segmap)) { 481 free_i->free_segments++; 482 483 if (!inmem && IS_CURSEC(sbi, secno)) 484 goto skip_free; 485 next = find_next_bit(free_i->free_segmap, 486 start_segno + SEGS_PER_SEC(sbi), start_segno); 487 if (next >= start_segno + usable_segs) { 488 if (test_and_clear_bit(secno, free_i->free_secmap)) 489 free_i->free_sections++; 490 } 491 } 492 skip_free: 493 spin_unlock(&free_i->segmap_lock); 494 } 495 496 static inline void __set_test_and_inuse(struct f2fs_sb_info *sbi, 497 unsigned int segno) 498 { 499 struct free_segmap_info *free_i = FREE_I(sbi); 500 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno); 501 502 spin_lock(&free_i->segmap_lock); 503 if (!test_and_set_bit(segno, free_i->free_segmap)) { 504 free_i->free_segments--; 505 if (!test_and_set_bit(secno, free_i->free_secmap)) 506 free_i->free_sections--; 507 } 508 spin_unlock(&free_i->segmap_lock); 509 } 510 511 static inline void get_sit_bitmap(struct f2fs_sb_info *sbi, 512 void *dst_addr) 513 { 514 struct sit_info *sit_i = SIT_I(sbi); 515 516 #ifdef CONFIG_F2FS_CHECK_FS 517 if (memcmp(sit_i->sit_bitmap, sit_i->sit_bitmap_mir, 518 sit_i->bitmap_size)) 519 f2fs_bug_on(sbi, 1); 520 #endif 521 memcpy(dst_addr, sit_i->sit_bitmap, sit_i->bitmap_size); 522 } 523 524 static inline block_t written_block_count(struct f2fs_sb_info *sbi) 525 { 526 return SIT_I(sbi)->written_valid_blocks; 527 } 528 529 static inline unsigned int free_segments(struct f2fs_sb_info *sbi) 530 { 531 return FREE_I(sbi)->free_segments; 532 } 533 534 static inline unsigned int reserved_segments(struct f2fs_sb_info *sbi) 535 { 536 return SM_I(sbi)->reserved_segments + 537 SM_I(sbi)->additional_reserved_segments; 538 } 539 540 static inline unsigned int free_sections(struct f2fs_sb_info *sbi) 541 { 542 return FREE_I(sbi)->free_sections; 543 } 544 545 static inline unsigned int prefree_segments(struct f2fs_sb_info *sbi) 546 { 547 return DIRTY_I(sbi)->nr_dirty[PRE]; 548 } 549 550 static inline unsigned int dirty_segments(struct f2fs_sb_info *sbi) 551 { 552 return DIRTY_I(sbi)->nr_dirty[DIRTY_HOT_DATA] + 553 DIRTY_I(sbi)->nr_dirty[DIRTY_WARM_DATA] + 554 DIRTY_I(sbi)->nr_dirty[DIRTY_COLD_DATA] + 555 DIRTY_I(sbi)->nr_dirty[DIRTY_HOT_NODE] + 556 DIRTY_I(sbi)->nr_dirty[DIRTY_WARM_NODE] + 557 DIRTY_I(sbi)->nr_dirty[DIRTY_COLD_NODE]; 558 } 559 560 static inline int overprovision_segments(struct f2fs_sb_info *sbi) 561 { 562 return SM_I(sbi)->ovp_segments; 563 } 564 565 static inline int reserved_sections(struct f2fs_sb_info *sbi) 566 { 567 return GET_SEC_FROM_SEG(sbi, reserved_segments(sbi)); 568 } 569 570 static inline bool has_curseg_enough_space(struct f2fs_sb_info *sbi, 571 unsigned int node_blocks, unsigned int dent_blocks) 572 { 573 574 unsigned segno, left_blocks; 575 int i; 576 577 /* check current node sections in the worst case. */ 578 for (i = CURSEG_HOT_NODE; i <= CURSEG_COLD_NODE; i++) { 579 segno = CURSEG_I(sbi, i)->segno; 580 left_blocks = CAP_BLKS_PER_SEC(sbi) - 581 get_ckpt_valid_blocks(sbi, segno, true); 582 if (node_blocks > left_blocks) 583 return false; 584 } 585 586 /* check current data section for dentry blocks. */ 587 segno = CURSEG_I(sbi, CURSEG_HOT_DATA)->segno; 588 left_blocks = CAP_BLKS_PER_SEC(sbi) - 589 get_ckpt_valid_blocks(sbi, segno, true); 590 if (dent_blocks > left_blocks) 591 return false; 592 return true; 593 } 594 595 /* 596 * calculate needed sections for dirty node/dentry 597 * and call has_curseg_enough_space 598 */ 599 static inline void __get_secs_required(struct f2fs_sb_info *sbi, 600 unsigned int *lower_p, unsigned int *upper_p, bool *curseg_p) 601 { 602 unsigned int total_node_blocks = get_pages(sbi, F2FS_DIRTY_NODES) + 603 get_pages(sbi, F2FS_DIRTY_DENTS) + 604 get_pages(sbi, F2FS_DIRTY_IMETA); 605 unsigned int total_dent_blocks = get_pages(sbi, F2FS_DIRTY_DENTS); 606 unsigned int node_secs = total_node_blocks / CAP_BLKS_PER_SEC(sbi); 607 unsigned int dent_secs = total_dent_blocks / CAP_BLKS_PER_SEC(sbi); 608 unsigned int node_blocks = total_node_blocks % CAP_BLKS_PER_SEC(sbi); 609 unsigned int dent_blocks = total_dent_blocks % CAP_BLKS_PER_SEC(sbi); 610 611 if (lower_p) 612 *lower_p = node_secs + dent_secs; 613 if (upper_p) 614 *upper_p = node_secs + dent_secs + 615 (node_blocks ? 1 : 0) + (dent_blocks ? 1 : 0); 616 if (curseg_p) 617 *curseg_p = has_curseg_enough_space(sbi, 618 node_blocks, dent_blocks); 619 } 620 621 static inline bool has_not_enough_free_secs(struct f2fs_sb_info *sbi, 622 int freed, int needed) 623 { 624 unsigned int free_secs, lower_secs, upper_secs; 625 bool curseg_space; 626 627 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) 628 return false; 629 630 __get_secs_required(sbi, &lower_secs, &upper_secs, &curseg_space); 631 632 free_secs = free_sections(sbi) + freed; 633 lower_secs += needed + reserved_sections(sbi); 634 upper_secs += needed + reserved_sections(sbi); 635 636 if (free_secs > upper_secs) 637 return false; 638 if (free_secs <= lower_secs) 639 return true; 640 return !curseg_space; 641 } 642 643 static inline bool has_enough_free_secs(struct f2fs_sb_info *sbi, 644 int freed, int needed) 645 { 646 return !has_not_enough_free_secs(sbi, freed, needed); 647 } 648 649 static inline bool f2fs_is_checkpoint_ready(struct f2fs_sb_info *sbi) 650 { 651 if (likely(!is_sbi_flag_set(sbi, SBI_CP_DISABLED))) 652 return true; 653 if (likely(has_enough_free_secs(sbi, 0, 0))) 654 return true; 655 return false; 656 } 657 658 static inline bool excess_prefree_segs(struct f2fs_sb_info *sbi) 659 { 660 return prefree_segments(sbi) > SM_I(sbi)->rec_prefree_segments; 661 } 662 663 static inline int utilization(struct f2fs_sb_info *sbi) 664 { 665 return div_u64((u64)valid_user_blocks(sbi) * 100, 666 sbi->user_block_count); 667 } 668 669 /* 670 * Sometimes f2fs may be better to drop out-of-place update policy. 671 * And, users can control the policy through sysfs entries. 672 * There are five policies with triggering conditions as follows. 673 * F2FS_IPU_FORCE - all the time, 674 * F2FS_IPU_SSR - if SSR mode is activated, 675 * F2FS_IPU_UTIL - if FS utilization is over threashold, 676 * F2FS_IPU_SSR_UTIL - if SSR mode is activated and FS utilization is over 677 * threashold, 678 * F2FS_IPU_FSYNC - activated in fsync path only for high performance flash 679 * storages. IPU will be triggered only if the # of dirty 680 * pages over min_fsync_blocks. (=default option) 681 * F2FS_IPU_ASYNC - do IPU given by asynchronous write requests. 682 * F2FS_IPU_NOCACHE - disable IPU bio cache. 683 * F2FS_IPU_HONOR_OPU_WRITE - use OPU write prior to IPU write if inode has 684 * FI_OPU_WRITE flag. 685 * F2FS_IPU_DISABLE - disable IPU. (=default option in LFS mode) 686 */ 687 #define DEF_MIN_IPU_UTIL 70 688 #define DEF_MIN_FSYNC_BLOCKS 8 689 #define DEF_MIN_HOT_BLOCKS 16 690 691 #define SMALL_VOLUME_SEGMENTS (16 * 512) /* 16GB */ 692 693 #define F2FS_IPU_DISABLE 0 694 695 /* Modification on enum should be synchronized with ipu_mode_names array */ 696 enum { 697 F2FS_IPU_FORCE, 698 F2FS_IPU_SSR, 699 F2FS_IPU_UTIL, 700 F2FS_IPU_SSR_UTIL, 701 F2FS_IPU_FSYNC, 702 F2FS_IPU_ASYNC, 703 F2FS_IPU_NOCACHE, 704 F2FS_IPU_HONOR_OPU_WRITE, 705 F2FS_IPU_MAX, 706 }; 707 708 static inline bool IS_F2FS_IPU_DISABLE(struct f2fs_sb_info *sbi) 709 { 710 return SM_I(sbi)->ipu_policy == F2FS_IPU_DISABLE; 711 } 712 713 #define F2FS_IPU_POLICY(name) \ 714 static inline bool IS_##name(struct f2fs_sb_info *sbi) \ 715 { \ 716 return SM_I(sbi)->ipu_policy & BIT(name); \ 717 } 718 719 F2FS_IPU_POLICY(F2FS_IPU_FORCE); 720 F2FS_IPU_POLICY(F2FS_IPU_SSR); 721 F2FS_IPU_POLICY(F2FS_IPU_UTIL); 722 F2FS_IPU_POLICY(F2FS_IPU_SSR_UTIL); 723 F2FS_IPU_POLICY(F2FS_IPU_FSYNC); 724 F2FS_IPU_POLICY(F2FS_IPU_ASYNC); 725 F2FS_IPU_POLICY(F2FS_IPU_NOCACHE); 726 F2FS_IPU_POLICY(F2FS_IPU_HONOR_OPU_WRITE); 727 728 static inline unsigned int curseg_segno(struct f2fs_sb_info *sbi, 729 int type) 730 { 731 struct curseg_info *curseg = CURSEG_I(sbi, type); 732 return curseg->segno; 733 } 734 735 static inline unsigned char curseg_alloc_type(struct f2fs_sb_info *sbi, 736 int type) 737 { 738 struct curseg_info *curseg = CURSEG_I(sbi, type); 739 return curseg->alloc_type; 740 } 741 742 static inline bool valid_main_segno(struct f2fs_sb_info *sbi, 743 unsigned int segno) 744 { 745 return segno <= (MAIN_SEGS(sbi) - 1); 746 } 747 748 static inline void verify_fio_blkaddr(struct f2fs_io_info *fio) 749 { 750 struct f2fs_sb_info *sbi = fio->sbi; 751 752 if (__is_valid_data_blkaddr(fio->old_blkaddr)) 753 verify_blkaddr(sbi, fio->old_blkaddr, __is_meta_io(fio) ? 754 META_GENERIC : DATA_GENERIC); 755 verify_blkaddr(sbi, fio->new_blkaddr, __is_meta_io(fio) ? 756 META_GENERIC : DATA_GENERIC_ENHANCE); 757 } 758 759 /* 760 * Summary block is always treated as an invalid block 761 */ 762 static inline int check_block_count(struct f2fs_sb_info *sbi, 763 int segno, struct f2fs_sit_entry *raw_sit) 764 { 765 bool is_valid = test_bit_le(0, raw_sit->valid_map) ? true : false; 766 int valid_blocks = 0; 767 int cur_pos = 0, next_pos; 768 unsigned int usable_blks_per_seg = f2fs_usable_blks_in_seg(sbi, segno); 769 770 /* check bitmap with valid block count */ 771 do { 772 if (is_valid) { 773 next_pos = find_next_zero_bit_le(&raw_sit->valid_map, 774 usable_blks_per_seg, 775 cur_pos); 776 valid_blocks += next_pos - cur_pos; 777 } else 778 next_pos = find_next_bit_le(&raw_sit->valid_map, 779 usable_blks_per_seg, 780 cur_pos); 781 cur_pos = next_pos; 782 is_valid = !is_valid; 783 } while (cur_pos < usable_blks_per_seg); 784 785 if (unlikely(GET_SIT_VBLOCKS(raw_sit) != valid_blocks)) { 786 f2fs_err(sbi, "Mismatch valid blocks %d vs. %d", 787 GET_SIT_VBLOCKS(raw_sit), valid_blocks); 788 set_sbi_flag(sbi, SBI_NEED_FSCK); 789 f2fs_handle_error(sbi, ERROR_INCONSISTENT_SIT); 790 return -EFSCORRUPTED; 791 } 792 793 if (usable_blks_per_seg < BLKS_PER_SEG(sbi)) 794 f2fs_bug_on(sbi, find_next_bit_le(&raw_sit->valid_map, 795 BLKS_PER_SEG(sbi), 796 usable_blks_per_seg) != BLKS_PER_SEG(sbi)); 797 798 /* check segment usage, and check boundary of a given segment number */ 799 if (unlikely(GET_SIT_VBLOCKS(raw_sit) > usable_blks_per_seg 800 || !valid_main_segno(sbi, segno))) { 801 f2fs_err(sbi, "Wrong valid blocks %d or segno %u", 802 GET_SIT_VBLOCKS(raw_sit), segno); 803 set_sbi_flag(sbi, SBI_NEED_FSCK); 804 f2fs_handle_error(sbi, ERROR_INCONSISTENT_SIT); 805 return -EFSCORRUPTED; 806 } 807 return 0; 808 } 809 810 static inline pgoff_t current_sit_addr(struct f2fs_sb_info *sbi, 811 unsigned int start) 812 { 813 struct sit_info *sit_i = SIT_I(sbi); 814 unsigned int offset = SIT_BLOCK_OFFSET(start); 815 block_t blk_addr = sit_i->sit_base_addr + offset; 816 817 f2fs_bug_on(sbi, !valid_main_segno(sbi, start)); 818 819 #ifdef CONFIG_F2FS_CHECK_FS 820 if (f2fs_test_bit(offset, sit_i->sit_bitmap) != 821 f2fs_test_bit(offset, sit_i->sit_bitmap_mir)) 822 f2fs_bug_on(sbi, 1); 823 #endif 824 825 /* calculate sit block address */ 826 if (f2fs_test_bit(offset, sit_i->sit_bitmap)) 827 blk_addr += sit_i->sit_blocks; 828 829 return blk_addr; 830 } 831 832 static inline pgoff_t next_sit_addr(struct f2fs_sb_info *sbi, 833 pgoff_t block_addr) 834 { 835 struct sit_info *sit_i = SIT_I(sbi); 836 block_addr -= sit_i->sit_base_addr; 837 if (block_addr < sit_i->sit_blocks) 838 block_addr += sit_i->sit_blocks; 839 else 840 block_addr -= sit_i->sit_blocks; 841 842 return block_addr + sit_i->sit_base_addr; 843 } 844 845 static inline void set_to_next_sit(struct sit_info *sit_i, unsigned int start) 846 { 847 unsigned int block_off = SIT_BLOCK_OFFSET(start); 848 849 f2fs_change_bit(block_off, sit_i->sit_bitmap); 850 #ifdef CONFIG_F2FS_CHECK_FS 851 f2fs_change_bit(block_off, sit_i->sit_bitmap_mir); 852 #endif 853 } 854 855 static inline unsigned long long get_mtime(struct f2fs_sb_info *sbi, 856 bool base_time) 857 { 858 struct sit_info *sit_i = SIT_I(sbi); 859 time64_t diff, now = ktime_get_boottime_seconds(); 860 861 if (now >= sit_i->mounted_time) 862 return sit_i->elapsed_time + now - sit_i->mounted_time; 863 864 /* system time is set to the past */ 865 if (!base_time) { 866 diff = sit_i->mounted_time - now; 867 if (sit_i->elapsed_time >= diff) 868 return sit_i->elapsed_time - diff; 869 return 0; 870 } 871 return sit_i->elapsed_time; 872 } 873 874 static inline void set_summary(struct f2fs_summary *sum, nid_t nid, 875 unsigned int ofs_in_node, unsigned char version) 876 { 877 sum->nid = cpu_to_le32(nid); 878 sum->ofs_in_node = cpu_to_le16(ofs_in_node); 879 sum->version = version; 880 } 881 882 static inline block_t start_sum_block(struct f2fs_sb_info *sbi) 883 { 884 return __start_cp_addr(sbi) + 885 le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_start_sum); 886 } 887 888 static inline block_t sum_blk_addr(struct f2fs_sb_info *sbi, int base, int type) 889 { 890 return __start_cp_addr(sbi) + 891 le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_total_block_count) 892 - (base + 1) + type; 893 } 894 895 static inline bool sec_usage_check(struct f2fs_sb_info *sbi, unsigned int secno) 896 { 897 if (IS_CURSEC(sbi, secno) || (sbi->cur_victim_sec == secno)) 898 return true; 899 return false; 900 } 901 902 /* 903 * It is very important to gather dirty pages and write at once, so that we can 904 * submit a big bio without interfering other data writes. 905 * By default, 512 pages for directory data, 906 * 512 pages (2MB) * 8 for nodes, and 907 * 256 pages * 8 for meta are set. 908 */ 909 static inline int nr_pages_to_skip(struct f2fs_sb_info *sbi, int type) 910 { 911 if (sbi->sb->s_bdi->wb.dirty_exceeded) 912 return 0; 913 914 if (type == DATA) 915 return BLKS_PER_SEG(sbi); 916 else if (type == NODE) 917 return 8 * BLKS_PER_SEG(sbi); 918 else if (type == META) 919 return 8 * BIO_MAX_VECS; 920 else 921 return 0; 922 } 923 924 /* 925 * When writing pages, it'd better align nr_to_write for segment size. 926 */ 927 static inline long nr_pages_to_write(struct f2fs_sb_info *sbi, int type, 928 struct writeback_control *wbc) 929 { 930 long nr_to_write, desired; 931 932 if (wbc->sync_mode != WB_SYNC_NONE) 933 return 0; 934 935 nr_to_write = wbc->nr_to_write; 936 desired = BIO_MAX_VECS; 937 if (type == NODE) 938 desired <<= 1; 939 940 wbc->nr_to_write = desired; 941 return desired - nr_to_write; 942 } 943 944 static inline void wake_up_discard_thread(struct f2fs_sb_info *sbi, bool force) 945 { 946 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; 947 bool wakeup = false; 948 int i; 949 950 if (force) 951 goto wake_up; 952 953 mutex_lock(&dcc->cmd_lock); 954 for (i = MAX_PLIST_NUM - 1; i >= 0; i--) { 955 if (i + 1 < dcc->discard_granularity) 956 break; 957 if (!list_empty(&dcc->pend_list[i])) { 958 wakeup = true; 959 break; 960 } 961 } 962 mutex_unlock(&dcc->cmd_lock); 963 if (!wakeup || !is_idle(sbi, DISCARD_TIME)) 964 return; 965 wake_up: 966 dcc->discard_wake = true; 967 wake_up_interruptible_all(&dcc->discard_wait_queue); 968 } 969