1 /* 2 * fs/f2fs/f2fs.h 3 * 4 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 5 * http://www.samsung.com/ 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11 #ifndef _LINUX_F2FS_H 12 #define _LINUX_F2FS_H 13 14 #include <linux/types.h> 15 #include <linux/page-flags.h> 16 #include <linux/buffer_head.h> 17 #include <linux/slab.h> 18 #include <linux/crc32.h> 19 #include <linux/magic.h> 20 21 /* 22 * For mount options 23 */ 24 #define F2FS_MOUNT_BG_GC 0x00000001 25 #define F2FS_MOUNT_DISABLE_ROLL_FORWARD 0x00000002 26 #define F2FS_MOUNT_DISCARD 0x00000004 27 #define F2FS_MOUNT_NOHEAP 0x00000008 28 #define F2FS_MOUNT_XATTR_USER 0x00000010 29 #define F2FS_MOUNT_POSIX_ACL 0x00000020 30 #define F2FS_MOUNT_DISABLE_EXT_IDENTIFY 0x00000040 31 32 #define clear_opt(sbi, option) (sbi->mount_opt.opt &= ~F2FS_MOUNT_##option) 33 #define set_opt(sbi, option) (sbi->mount_opt.opt |= F2FS_MOUNT_##option) 34 #define test_opt(sbi, option) (sbi->mount_opt.opt & F2FS_MOUNT_##option) 35 36 #define ver_after(a, b) (typecheck(unsigned long long, a) && \ 37 typecheck(unsigned long long, b) && \ 38 ((long long)((a) - (b)) > 0)) 39 40 typedef u64 block_t; 41 typedef u32 nid_t; 42 43 struct f2fs_mount_info { 44 unsigned int opt; 45 }; 46 47 static inline __u32 f2fs_crc32(void *buff, size_t len) 48 { 49 return crc32_le(F2FS_SUPER_MAGIC, buff, len); 50 } 51 52 static inline bool f2fs_crc_valid(__u32 blk_crc, void *buff, size_t buff_size) 53 { 54 return f2fs_crc32(buff, buff_size) == blk_crc; 55 } 56 57 /* 58 * For checkpoint manager 59 */ 60 enum { 61 NAT_BITMAP, 62 SIT_BITMAP 63 }; 64 65 /* for the list of orphan inodes */ 66 struct orphan_inode_entry { 67 struct list_head list; /* list head */ 68 nid_t ino; /* inode number */ 69 }; 70 71 /* for the list of directory inodes */ 72 struct dir_inode_entry { 73 struct list_head list; /* list head */ 74 struct inode *inode; /* vfs inode pointer */ 75 }; 76 77 /* for the list of fsync inodes, used only during recovery */ 78 struct fsync_inode_entry { 79 struct list_head list; /* list head */ 80 struct inode *inode; /* vfs inode pointer */ 81 block_t blkaddr; /* block address locating the last inode */ 82 }; 83 84 #define nats_in_cursum(sum) (le16_to_cpu(sum->n_nats)) 85 #define sits_in_cursum(sum) (le16_to_cpu(sum->n_sits)) 86 87 #define nat_in_journal(sum, i) (sum->nat_j.entries[i].ne) 88 #define nid_in_journal(sum, i) (sum->nat_j.entries[i].nid) 89 #define sit_in_journal(sum, i) (sum->sit_j.entries[i].se) 90 #define segno_in_journal(sum, i) (sum->sit_j.entries[i].segno) 91 92 static inline int update_nats_in_cursum(struct f2fs_summary_block *rs, int i) 93 { 94 int before = nats_in_cursum(rs); 95 rs->n_nats = cpu_to_le16(before + i); 96 return before; 97 } 98 99 static inline int update_sits_in_cursum(struct f2fs_summary_block *rs, int i) 100 { 101 int before = sits_in_cursum(rs); 102 rs->n_sits = cpu_to_le16(before + i); 103 return before; 104 } 105 106 /* 107 * For INODE and NODE manager 108 */ 109 #define XATTR_NODE_OFFSET (-1) /* 110 * store xattrs to one node block per 111 * file keeping -1 as its node offset to 112 * distinguish from index node blocks. 113 */ 114 #define RDONLY_NODE 1 /* 115 * specify a read-only mode when getting 116 * a node block. 0 is read-write mode. 117 * used by get_dnode_of_data(). 118 */ 119 #define F2FS_LINK_MAX 32000 /* maximum link count per file */ 120 121 /* for in-memory extent cache entry */ 122 struct extent_info { 123 rwlock_t ext_lock; /* rwlock for consistency */ 124 unsigned int fofs; /* start offset in a file */ 125 u32 blk_addr; /* start block address of the extent */ 126 unsigned int len; /* lenth of the extent */ 127 }; 128 129 /* 130 * i_advise uses FADVISE_XXX_BIT. We can add additional hints later. 131 */ 132 #define FADVISE_COLD_BIT 0x01 133 134 struct f2fs_inode_info { 135 struct inode vfs_inode; /* serve a vfs inode */ 136 unsigned long i_flags; /* keep an inode flags for ioctl */ 137 unsigned char i_advise; /* use to give file attribute hints */ 138 unsigned int i_current_depth; /* use only in directory structure */ 139 unsigned int i_pino; /* parent inode number */ 140 umode_t i_acl_mode; /* keep file acl mode temporarily */ 141 142 /* Use below internally in f2fs*/ 143 unsigned long flags; /* use to pass per-file flags */ 144 unsigned long long data_version;/* lastes version of data for fsync */ 145 atomic_t dirty_dents; /* # of dirty dentry pages */ 146 f2fs_hash_t chash; /* hash value of given file name */ 147 unsigned int clevel; /* maximum level of given file name */ 148 nid_t i_xattr_nid; /* node id that contains xattrs */ 149 struct extent_info ext; /* in-memory extent cache entry */ 150 }; 151 152 static inline void get_extent_info(struct extent_info *ext, 153 struct f2fs_extent i_ext) 154 { 155 write_lock(&ext->ext_lock); 156 ext->fofs = le32_to_cpu(i_ext.fofs); 157 ext->blk_addr = le32_to_cpu(i_ext.blk_addr); 158 ext->len = le32_to_cpu(i_ext.len); 159 write_unlock(&ext->ext_lock); 160 } 161 162 static inline void set_raw_extent(struct extent_info *ext, 163 struct f2fs_extent *i_ext) 164 { 165 read_lock(&ext->ext_lock); 166 i_ext->fofs = cpu_to_le32(ext->fofs); 167 i_ext->blk_addr = cpu_to_le32(ext->blk_addr); 168 i_ext->len = cpu_to_le32(ext->len); 169 read_unlock(&ext->ext_lock); 170 } 171 172 struct f2fs_nm_info { 173 block_t nat_blkaddr; /* base disk address of NAT */ 174 nid_t max_nid; /* maximum possible node ids */ 175 nid_t init_scan_nid; /* the first nid to be scanned */ 176 nid_t next_scan_nid; /* the next nid to be scanned */ 177 178 /* NAT cache management */ 179 struct radix_tree_root nat_root;/* root of the nat entry cache */ 180 rwlock_t nat_tree_lock; /* protect nat_tree_lock */ 181 unsigned int nat_cnt; /* the # of cached nat entries */ 182 struct list_head nat_entries; /* cached nat entry list (clean) */ 183 struct list_head dirty_nat_entries; /* cached nat entry list (dirty) */ 184 185 /* free node ids management */ 186 struct list_head free_nid_list; /* a list for free nids */ 187 spinlock_t free_nid_list_lock; /* protect free nid list */ 188 unsigned int fcnt; /* the number of free node id */ 189 struct mutex build_lock; /* lock for build free nids */ 190 191 /* for checkpoint */ 192 char *nat_bitmap; /* NAT bitmap pointer */ 193 int bitmap_size; /* bitmap size */ 194 }; 195 196 /* 197 * this structure is used as one of function parameters. 198 * all the information are dedicated to a given direct node block determined 199 * by the data offset in a file. 200 */ 201 struct dnode_of_data { 202 struct inode *inode; /* vfs inode pointer */ 203 struct page *inode_page; /* its inode page, NULL is possible */ 204 struct page *node_page; /* cached direct node page */ 205 nid_t nid; /* node id of the direct node block */ 206 unsigned int ofs_in_node; /* data offset in the node page */ 207 bool inode_page_locked; /* inode page is locked or not */ 208 block_t data_blkaddr; /* block address of the node block */ 209 }; 210 211 static inline void set_new_dnode(struct dnode_of_data *dn, struct inode *inode, 212 struct page *ipage, struct page *npage, nid_t nid) 213 { 214 memset(dn, 0, sizeof(*dn)); 215 dn->inode = inode; 216 dn->inode_page = ipage; 217 dn->node_page = npage; 218 dn->nid = nid; 219 } 220 221 /* 222 * For SIT manager 223 * 224 * By default, there are 6 active log areas across the whole main area. 225 * When considering hot and cold data separation to reduce cleaning overhead, 226 * we split 3 for data logs and 3 for node logs as hot, warm, and cold types, 227 * respectively. 228 * In the current design, you should not change the numbers intentionally. 229 * Instead, as a mount option such as active_logs=x, you can use 2, 4, and 6 230 * logs individually according to the underlying devices. (default: 6) 231 * Just in case, on-disk layout covers maximum 16 logs that consist of 8 for 232 * data and 8 for node logs. 233 */ 234 #define NR_CURSEG_DATA_TYPE (3) 235 #define NR_CURSEG_NODE_TYPE (3) 236 #define NR_CURSEG_TYPE (NR_CURSEG_DATA_TYPE + NR_CURSEG_NODE_TYPE) 237 238 enum { 239 CURSEG_HOT_DATA = 0, /* directory entry blocks */ 240 CURSEG_WARM_DATA, /* data blocks */ 241 CURSEG_COLD_DATA, /* multimedia or GCed data blocks */ 242 CURSEG_HOT_NODE, /* direct node blocks of directory files */ 243 CURSEG_WARM_NODE, /* direct node blocks of normal files */ 244 CURSEG_COLD_NODE, /* indirect node blocks */ 245 NO_CHECK_TYPE 246 }; 247 248 struct f2fs_sm_info { 249 struct sit_info *sit_info; /* whole segment information */ 250 struct free_segmap_info *free_info; /* free segment information */ 251 struct dirty_seglist_info *dirty_info; /* dirty segment information */ 252 struct curseg_info *curseg_array; /* active segment information */ 253 254 struct list_head wblist_head; /* list of under-writeback pages */ 255 spinlock_t wblist_lock; /* lock for checkpoint */ 256 257 block_t seg0_blkaddr; /* block address of 0'th segment */ 258 block_t main_blkaddr; /* start block address of main area */ 259 block_t ssa_blkaddr; /* start block address of SSA area */ 260 261 unsigned int segment_count; /* total # of segments */ 262 unsigned int main_segments; /* # of segments in main area */ 263 unsigned int reserved_segments; /* # of reserved segments */ 264 unsigned int ovp_segments; /* # of overprovision segments */ 265 }; 266 267 /* 268 * For directory operation 269 */ 270 #define NODE_DIR1_BLOCK (ADDRS_PER_INODE + 1) 271 #define NODE_DIR2_BLOCK (ADDRS_PER_INODE + 2) 272 #define NODE_IND1_BLOCK (ADDRS_PER_INODE + 3) 273 #define NODE_IND2_BLOCK (ADDRS_PER_INODE + 4) 274 #define NODE_DIND_BLOCK (ADDRS_PER_INODE + 5) 275 276 /* 277 * For superblock 278 */ 279 /* 280 * COUNT_TYPE for monitoring 281 * 282 * f2fs monitors the number of several block types such as on-writeback, 283 * dirty dentry blocks, dirty node blocks, and dirty meta blocks. 284 */ 285 enum count_type { 286 F2FS_WRITEBACK, 287 F2FS_DIRTY_DENTS, 288 F2FS_DIRTY_NODES, 289 F2FS_DIRTY_META, 290 NR_COUNT_TYPE, 291 }; 292 293 /* 294 * FS_LOCK nesting subclasses for the lock validator: 295 * 296 * The locking order between these classes is 297 * RENAME -> DENTRY_OPS -> DATA_WRITE -> DATA_NEW 298 * -> DATA_TRUNC -> NODE_WRITE -> NODE_NEW -> NODE_TRUNC 299 */ 300 enum lock_type { 301 RENAME, /* for renaming operations */ 302 DENTRY_OPS, /* for directory operations */ 303 DATA_WRITE, /* for data write */ 304 DATA_NEW, /* for data allocation */ 305 DATA_TRUNC, /* for data truncate */ 306 NODE_NEW, /* for node allocation */ 307 NODE_TRUNC, /* for node truncate */ 308 NODE_WRITE, /* for node write */ 309 NR_LOCK_TYPE, 310 }; 311 312 /* 313 * The below are the page types of bios used in submti_bio(). 314 * The available types are: 315 * DATA User data pages. It operates as async mode. 316 * NODE Node pages. It operates as async mode. 317 * META FS metadata pages such as SIT, NAT, CP. 318 * NR_PAGE_TYPE The number of page types. 319 * META_FLUSH Make sure the previous pages are written 320 * with waiting the bio's completion 321 * ... Only can be used with META. 322 */ 323 enum page_type { 324 DATA, 325 NODE, 326 META, 327 NR_PAGE_TYPE, 328 META_FLUSH, 329 }; 330 331 struct f2fs_sb_info { 332 struct super_block *sb; /* pointer to VFS super block */ 333 struct buffer_head *raw_super_buf; /* buffer head of raw sb */ 334 struct f2fs_super_block *raw_super; /* raw super block pointer */ 335 int s_dirty; /* dirty flag for checkpoint */ 336 337 /* for node-related operations */ 338 struct f2fs_nm_info *nm_info; /* node manager */ 339 struct inode *node_inode; /* cache node blocks */ 340 341 /* for segment-related operations */ 342 struct f2fs_sm_info *sm_info; /* segment manager */ 343 struct bio *bio[NR_PAGE_TYPE]; /* bios to merge */ 344 sector_t last_block_in_bio[NR_PAGE_TYPE]; /* last block number */ 345 struct rw_semaphore bio_sem; /* IO semaphore */ 346 347 /* for checkpoint */ 348 struct f2fs_checkpoint *ckpt; /* raw checkpoint pointer */ 349 struct inode *meta_inode; /* cache meta blocks */ 350 struct mutex cp_mutex; /* for checkpoint procedure */ 351 struct mutex fs_lock[NR_LOCK_TYPE]; /* for blocking FS operations */ 352 struct mutex write_inode; /* mutex for write inode */ 353 struct mutex writepages; /* mutex for writepages() */ 354 int por_doing; /* recovery is doing or not */ 355 356 /* for orphan inode management */ 357 struct list_head orphan_inode_list; /* orphan inode list */ 358 struct mutex orphan_inode_mutex; /* for orphan inode list */ 359 unsigned int n_orphans; /* # of orphan inodes */ 360 361 /* for directory inode management */ 362 struct list_head dir_inode_list; /* dir inode list */ 363 spinlock_t dir_inode_lock; /* for dir inode list lock */ 364 unsigned int n_dirty_dirs; /* # of dir inodes */ 365 366 /* basic file system units */ 367 unsigned int log_sectors_per_block; /* log2 sectors per block */ 368 unsigned int log_blocksize; /* log2 block size */ 369 unsigned int blocksize; /* block size */ 370 unsigned int root_ino_num; /* root inode number*/ 371 unsigned int node_ino_num; /* node inode number*/ 372 unsigned int meta_ino_num; /* meta inode number*/ 373 unsigned int log_blocks_per_seg; /* log2 blocks per segment */ 374 unsigned int blocks_per_seg; /* blocks per segment */ 375 unsigned int segs_per_sec; /* segments per section */ 376 unsigned int secs_per_zone; /* sections per zone */ 377 unsigned int total_sections; /* total section count */ 378 unsigned int total_node_count; /* total node block count */ 379 unsigned int total_valid_node_count; /* valid node block count */ 380 unsigned int total_valid_inode_count; /* valid inode count */ 381 int active_logs; /* # of active logs */ 382 383 block_t user_block_count; /* # of user blocks */ 384 block_t total_valid_block_count; /* # of valid blocks */ 385 block_t alloc_valid_block_count; /* # of allocated blocks */ 386 block_t last_valid_block_count; /* for recovery */ 387 u32 s_next_generation; /* for NFS support */ 388 atomic_t nr_pages[NR_COUNT_TYPE]; /* # of pages, see count_type */ 389 390 struct f2fs_mount_info mount_opt; /* mount options */ 391 392 /* for cleaning operations */ 393 struct mutex gc_mutex; /* mutex for GC */ 394 struct f2fs_gc_kthread *gc_thread; /* GC thread */ 395 396 /* 397 * for stat information. 398 * one is for the LFS mode, and the other is for the SSR mode. 399 */ 400 struct f2fs_stat_info *stat_info; /* FS status information */ 401 unsigned int segment_count[2]; /* # of allocated segments */ 402 unsigned int block_count[2]; /* # of allocated blocks */ 403 unsigned int last_victim[2]; /* last victim segment # */ 404 int total_hit_ext, read_hit_ext; /* extent cache hit ratio */ 405 int bg_gc; /* background gc calls */ 406 spinlock_t stat_lock; /* lock for stat operations */ 407 }; 408 409 /* 410 * Inline functions 411 */ 412 static inline struct f2fs_inode_info *F2FS_I(struct inode *inode) 413 { 414 return container_of(inode, struct f2fs_inode_info, vfs_inode); 415 } 416 417 static inline struct f2fs_sb_info *F2FS_SB(struct super_block *sb) 418 { 419 return sb->s_fs_info; 420 } 421 422 static inline struct f2fs_super_block *F2FS_RAW_SUPER(struct f2fs_sb_info *sbi) 423 { 424 return (struct f2fs_super_block *)(sbi->raw_super); 425 } 426 427 static inline struct f2fs_checkpoint *F2FS_CKPT(struct f2fs_sb_info *sbi) 428 { 429 return (struct f2fs_checkpoint *)(sbi->ckpt); 430 } 431 432 static inline struct f2fs_nm_info *NM_I(struct f2fs_sb_info *sbi) 433 { 434 return (struct f2fs_nm_info *)(sbi->nm_info); 435 } 436 437 static inline struct f2fs_sm_info *SM_I(struct f2fs_sb_info *sbi) 438 { 439 return (struct f2fs_sm_info *)(sbi->sm_info); 440 } 441 442 static inline struct sit_info *SIT_I(struct f2fs_sb_info *sbi) 443 { 444 return (struct sit_info *)(SM_I(sbi)->sit_info); 445 } 446 447 static inline struct free_segmap_info *FREE_I(struct f2fs_sb_info *sbi) 448 { 449 return (struct free_segmap_info *)(SM_I(sbi)->free_info); 450 } 451 452 static inline struct dirty_seglist_info *DIRTY_I(struct f2fs_sb_info *sbi) 453 { 454 return (struct dirty_seglist_info *)(SM_I(sbi)->dirty_info); 455 } 456 457 static inline void F2FS_SET_SB_DIRT(struct f2fs_sb_info *sbi) 458 { 459 sbi->s_dirty = 1; 460 } 461 462 static inline void F2FS_RESET_SB_DIRT(struct f2fs_sb_info *sbi) 463 { 464 sbi->s_dirty = 0; 465 } 466 467 static inline bool is_set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f) 468 { 469 unsigned int ckpt_flags = le32_to_cpu(cp->ckpt_flags); 470 return ckpt_flags & f; 471 } 472 473 static inline void set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f) 474 { 475 unsigned int ckpt_flags = le32_to_cpu(cp->ckpt_flags); 476 ckpt_flags |= f; 477 cp->ckpt_flags = cpu_to_le32(ckpt_flags); 478 } 479 480 static inline void clear_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f) 481 { 482 unsigned int ckpt_flags = le32_to_cpu(cp->ckpt_flags); 483 ckpt_flags &= (~f); 484 cp->ckpt_flags = cpu_to_le32(ckpt_flags); 485 } 486 487 static inline void mutex_lock_op(struct f2fs_sb_info *sbi, enum lock_type t) 488 { 489 mutex_lock_nested(&sbi->fs_lock[t], t); 490 } 491 492 static inline void mutex_unlock_op(struct f2fs_sb_info *sbi, enum lock_type t) 493 { 494 mutex_unlock(&sbi->fs_lock[t]); 495 } 496 497 /* 498 * Check whether the given nid is within node id range. 499 */ 500 static inline void check_nid_range(struct f2fs_sb_info *sbi, nid_t nid) 501 { 502 BUG_ON((nid >= NM_I(sbi)->max_nid)); 503 } 504 505 #define F2FS_DEFAULT_ALLOCATED_BLOCKS 1 506 507 /* 508 * Check whether the inode has blocks or not 509 */ 510 static inline int F2FS_HAS_BLOCKS(struct inode *inode) 511 { 512 if (F2FS_I(inode)->i_xattr_nid) 513 return (inode->i_blocks > F2FS_DEFAULT_ALLOCATED_BLOCKS + 1); 514 else 515 return (inode->i_blocks > F2FS_DEFAULT_ALLOCATED_BLOCKS); 516 } 517 518 static inline bool inc_valid_block_count(struct f2fs_sb_info *sbi, 519 struct inode *inode, blkcnt_t count) 520 { 521 block_t valid_block_count; 522 523 spin_lock(&sbi->stat_lock); 524 valid_block_count = 525 sbi->total_valid_block_count + (block_t)count; 526 if (valid_block_count > sbi->user_block_count) { 527 spin_unlock(&sbi->stat_lock); 528 return false; 529 } 530 inode->i_blocks += count; 531 sbi->total_valid_block_count = valid_block_count; 532 sbi->alloc_valid_block_count += (block_t)count; 533 spin_unlock(&sbi->stat_lock); 534 return true; 535 } 536 537 static inline int dec_valid_block_count(struct f2fs_sb_info *sbi, 538 struct inode *inode, 539 blkcnt_t count) 540 { 541 spin_lock(&sbi->stat_lock); 542 BUG_ON(sbi->total_valid_block_count < (block_t) count); 543 BUG_ON(inode->i_blocks < count); 544 inode->i_blocks -= count; 545 sbi->total_valid_block_count -= (block_t)count; 546 spin_unlock(&sbi->stat_lock); 547 return 0; 548 } 549 550 static inline void inc_page_count(struct f2fs_sb_info *sbi, int count_type) 551 { 552 atomic_inc(&sbi->nr_pages[count_type]); 553 F2FS_SET_SB_DIRT(sbi); 554 } 555 556 static inline void inode_inc_dirty_dents(struct inode *inode) 557 { 558 atomic_inc(&F2FS_I(inode)->dirty_dents); 559 } 560 561 static inline void dec_page_count(struct f2fs_sb_info *sbi, int count_type) 562 { 563 atomic_dec(&sbi->nr_pages[count_type]); 564 } 565 566 static inline void inode_dec_dirty_dents(struct inode *inode) 567 { 568 atomic_dec(&F2FS_I(inode)->dirty_dents); 569 } 570 571 static inline int get_pages(struct f2fs_sb_info *sbi, int count_type) 572 { 573 return atomic_read(&sbi->nr_pages[count_type]); 574 } 575 576 static inline block_t valid_user_blocks(struct f2fs_sb_info *sbi) 577 { 578 block_t ret; 579 spin_lock(&sbi->stat_lock); 580 ret = sbi->total_valid_block_count; 581 spin_unlock(&sbi->stat_lock); 582 return ret; 583 } 584 585 static inline unsigned long __bitmap_size(struct f2fs_sb_info *sbi, int flag) 586 { 587 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); 588 589 /* return NAT or SIT bitmap */ 590 if (flag == NAT_BITMAP) 591 return le32_to_cpu(ckpt->nat_ver_bitmap_bytesize); 592 else if (flag == SIT_BITMAP) 593 return le32_to_cpu(ckpt->sit_ver_bitmap_bytesize); 594 595 return 0; 596 } 597 598 static inline void *__bitmap_ptr(struct f2fs_sb_info *sbi, int flag) 599 { 600 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); 601 int offset = (flag == NAT_BITMAP) ? 602 le32_to_cpu(ckpt->sit_ver_bitmap_bytesize) : 0; 603 return &ckpt->sit_nat_version_bitmap + offset; 604 } 605 606 static inline block_t __start_cp_addr(struct f2fs_sb_info *sbi) 607 { 608 block_t start_addr; 609 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); 610 unsigned long long ckpt_version = le64_to_cpu(ckpt->checkpoint_ver); 611 612 start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr); 613 614 /* 615 * odd numbered checkpoint should at cp segment 0 616 * and even segent must be at cp segment 1 617 */ 618 if (!(ckpt_version & 1)) 619 start_addr += sbi->blocks_per_seg; 620 621 return start_addr; 622 } 623 624 static inline block_t __start_sum_addr(struct f2fs_sb_info *sbi) 625 { 626 return le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_start_sum); 627 } 628 629 static inline bool inc_valid_node_count(struct f2fs_sb_info *sbi, 630 struct inode *inode, 631 unsigned int count) 632 { 633 block_t valid_block_count; 634 unsigned int valid_node_count; 635 636 spin_lock(&sbi->stat_lock); 637 638 valid_block_count = sbi->total_valid_block_count + (block_t)count; 639 sbi->alloc_valid_block_count += (block_t)count; 640 valid_node_count = sbi->total_valid_node_count + count; 641 642 if (valid_block_count > sbi->user_block_count) { 643 spin_unlock(&sbi->stat_lock); 644 return false; 645 } 646 647 if (valid_node_count > sbi->total_node_count) { 648 spin_unlock(&sbi->stat_lock); 649 return false; 650 } 651 652 if (inode) 653 inode->i_blocks += count; 654 sbi->total_valid_node_count = valid_node_count; 655 sbi->total_valid_block_count = valid_block_count; 656 spin_unlock(&sbi->stat_lock); 657 658 return true; 659 } 660 661 static inline void dec_valid_node_count(struct f2fs_sb_info *sbi, 662 struct inode *inode, 663 unsigned int count) 664 { 665 spin_lock(&sbi->stat_lock); 666 667 BUG_ON(sbi->total_valid_block_count < count); 668 BUG_ON(sbi->total_valid_node_count < count); 669 BUG_ON(inode->i_blocks < count); 670 671 inode->i_blocks -= count; 672 sbi->total_valid_node_count -= count; 673 sbi->total_valid_block_count -= (block_t)count; 674 675 spin_unlock(&sbi->stat_lock); 676 } 677 678 static inline unsigned int valid_node_count(struct f2fs_sb_info *sbi) 679 { 680 unsigned int ret; 681 spin_lock(&sbi->stat_lock); 682 ret = sbi->total_valid_node_count; 683 spin_unlock(&sbi->stat_lock); 684 return ret; 685 } 686 687 static inline void inc_valid_inode_count(struct f2fs_sb_info *sbi) 688 { 689 spin_lock(&sbi->stat_lock); 690 BUG_ON(sbi->total_valid_inode_count == sbi->total_node_count); 691 sbi->total_valid_inode_count++; 692 spin_unlock(&sbi->stat_lock); 693 } 694 695 static inline int dec_valid_inode_count(struct f2fs_sb_info *sbi) 696 { 697 spin_lock(&sbi->stat_lock); 698 BUG_ON(!sbi->total_valid_inode_count); 699 sbi->total_valid_inode_count--; 700 spin_unlock(&sbi->stat_lock); 701 return 0; 702 } 703 704 static inline unsigned int valid_inode_count(struct f2fs_sb_info *sbi) 705 { 706 unsigned int ret; 707 spin_lock(&sbi->stat_lock); 708 ret = sbi->total_valid_inode_count; 709 spin_unlock(&sbi->stat_lock); 710 return ret; 711 } 712 713 static inline void f2fs_put_page(struct page *page, int unlock) 714 { 715 if (!page || IS_ERR(page)) 716 return; 717 718 if (unlock) { 719 BUG_ON(!PageLocked(page)); 720 unlock_page(page); 721 } 722 page_cache_release(page); 723 } 724 725 static inline void f2fs_put_dnode(struct dnode_of_data *dn) 726 { 727 if (dn->node_page) 728 f2fs_put_page(dn->node_page, 1); 729 if (dn->inode_page && dn->node_page != dn->inode_page) 730 f2fs_put_page(dn->inode_page, 0); 731 dn->node_page = NULL; 732 dn->inode_page = NULL; 733 } 734 735 static inline struct kmem_cache *f2fs_kmem_cache_create(const char *name, 736 size_t size, void (*ctor)(void *)) 737 { 738 return kmem_cache_create(name, size, 0, SLAB_RECLAIM_ACCOUNT, ctor); 739 } 740 741 #define RAW_IS_INODE(p) ((p)->footer.nid == (p)->footer.ino) 742 743 static inline bool IS_INODE(struct page *page) 744 { 745 struct f2fs_node *p = (struct f2fs_node *)page_address(page); 746 return RAW_IS_INODE(p); 747 } 748 749 static inline __le32 *blkaddr_in_node(struct f2fs_node *node) 750 { 751 return RAW_IS_INODE(node) ? node->i.i_addr : node->dn.addr; 752 } 753 754 static inline block_t datablock_addr(struct page *node_page, 755 unsigned int offset) 756 { 757 struct f2fs_node *raw_node; 758 __le32 *addr_array; 759 raw_node = (struct f2fs_node *)page_address(node_page); 760 addr_array = blkaddr_in_node(raw_node); 761 return le32_to_cpu(addr_array[offset]); 762 } 763 764 static inline int f2fs_test_bit(unsigned int nr, char *addr) 765 { 766 int mask; 767 768 addr += (nr >> 3); 769 mask = 1 << (7 - (nr & 0x07)); 770 return mask & *addr; 771 } 772 773 static inline int f2fs_set_bit(unsigned int nr, char *addr) 774 { 775 int mask; 776 int ret; 777 778 addr += (nr >> 3); 779 mask = 1 << (7 - (nr & 0x07)); 780 ret = mask & *addr; 781 *addr |= mask; 782 return ret; 783 } 784 785 static inline int f2fs_clear_bit(unsigned int nr, char *addr) 786 { 787 int mask; 788 int ret; 789 790 addr += (nr >> 3); 791 mask = 1 << (7 - (nr & 0x07)); 792 ret = mask & *addr; 793 *addr &= ~mask; 794 return ret; 795 } 796 797 /* used for f2fs_inode_info->flags */ 798 enum { 799 FI_NEW_INODE, /* indicate newly allocated inode */ 800 FI_NEED_CP, /* need to do checkpoint during fsync */ 801 FI_INC_LINK, /* need to increment i_nlink */ 802 FI_ACL_MODE, /* indicate acl mode */ 803 FI_NO_ALLOC, /* should not allocate any blocks */ 804 }; 805 806 static inline void set_inode_flag(struct f2fs_inode_info *fi, int flag) 807 { 808 set_bit(flag, &fi->flags); 809 } 810 811 static inline int is_inode_flag_set(struct f2fs_inode_info *fi, int flag) 812 { 813 return test_bit(flag, &fi->flags); 814 } 815 816 static inline void clear_inode_flag(struct f2fs_inode_info *fi, int flag) 817 { 818 clear_bit(flag, &fi->flags); 819 } 820 821 static inline void set_acl_inode(struct f2fs_inode_info *fi, umode_t mode) 822 { 823 fi->i_acl_mode = mode; 824 set_inode_flag(fi, FI_ACL_MODE); 825 } 826 827 static inline int cond_clear_inode_flag(struct f2fs_inode_info *fi, int flag) 828 { 829 if (is_inode_flag_set(fi, FI_ACL_MODE)) { 830 clear_inode_flag(fi, FI_ACL_MODE); 831 return 1; 832 } 833 return 0; 834 } 835 836 /* 837 * file.c 838 */ 839 int f2fs_sync_file(struct file *, loff_t, loff_t, int); 840 void truncate_data_blocks(struct dnode_of_data *); 841 void f2fs_truncate(struct inode *); 842 int f2fs_setattr(struct dentry *, struct iattr *); 843 int truncate_hole(struct inode *, pgoff_t, pgoff_t); 844 long f2fs_ioctl(struct file *, unsigned int, unsigned long); 845 846 /* 847 * inode.c 848 */ 849 void f2fs_set_inode_flags(struct inode *); 850 struct inode *f2fs_iget_nowait(struct super_block *, unsigned long); 851 struct inode *f2fs_iget(struct super_block *, unsigned long); 852 void update_inode(struct inode *, struct page *); 853 int f2fs_write_inode(struct inode *, struct writeback_control *); 854 void f2fs_evict_inode(struct inode *); 855 856 /* 857 * namei.c 858 */ 859 struct dentry *f2fs_get_parent(struct dentry *child); 860 861 /* 862 * dir.c 863 */ 864 struct f2fs_dir_entry *f2fs_find_entry(struct inode *, struct qstr *, 865 struct page **); 866 struct f2fs_dir_entry *f2fs_parent_dir(struct inode *, struct page **); 867 ino_t f2fs_inode_by_name(struct inode *, struct qstr *); 868 void f2fs_set_link(struct inode *, struct f2fs_dir_entry *, 869 struct page *, struct inode *); 870 void init_dent_inode(struct dentry *, struct page *); 871 int f2fs_add_link(struct dentry *, struct inode *); 872 void f2fs_delete_entry(struct f2fs_dir_entry *, struct page *, struct inode *); 873 int f2fs_make_empty(struct inode *, struct inode *); 874 bool f2fs_empty_dir(struct inode *); 875 876 /* 877 * super.c 878 */ 879 int f2fs_sync_fs(struct super_block *, int); 880 extern __printf(3, 4) 881 void f2fs_msg(struct super_block *, const char *, const char *, ...); 882 883 /* 884 * hash.c 885 */ 886 f2fs_hash_t f2fs_dentry_hash(const char *, size_t); 887 888 /* 889 * node.c 890 */ 891 struct dnode_of_data; 892 struct node_info; 893 894 int is_checkpointed_node(struct f2fs_sb_info *, nid_t); 895 void get_node_info(struct f2fs_sb_info *, nid_t, struct node_info *); 896 int get_dnode_of_data(struct dnode_of_data *, pgoff_t, int); 897 int truncate_inode_blocks(struct inode *, pgoff_t); 898 int remove_inode_page(struct inode *); 899 int new_inode_page(struct inode *, struct dentry *); 900 struct page *new_node_page(struct dnode_of_data *, unsigned int); 901 void ra_node_page(struct f2fs_sb_info *, nid_t); 902 struct page *get_node_page(struct f2fs_sb_info *, pgoff_t); 903 struct page *get_node_page_ra(struct page *, int); 904 void sync_inode_page(struct dnode_of_data *); 905 int sync_node_pages(struct f2fs_sb_info *, nid_t, struct writeback_control *); 906 bool alloc_nid(struct f2fs_sb_info *, nid_t *); 907 void alloc_nid_done(struct f2fs_sb_info *, nid_t); 908 void alloc_nid_failed(struct f2fs_sb_info *, nid_t); 909 void recover_node_page(struct f2fs_sb_info *, struct page *, 910 struct f2fs_summary *, struct node_info *, block_t); 911 int recover_inode_page(struct f2fs_sb_info *, struct page *); 912 int restore_node_summary(struct f2fs_sb_info *, unsigned int, 913 struct f2fs_summary_block *); 914 void flush_nat_entries(struct f2fs_sb_info *); 915 int build_node_manager(struct f2fs_sb_info *); 916 void destroy_node_manager(struct f2fs_sb_info *); 917 int __init create_node_manager_caches(void); 918 void destroy_node_manager_caches(void); 919 920 /* 921 * segment.c 922 */ 923 void f2fs_balance_fs(struct f2fs_sb_info *); 924 void invalidate_blocks(struct f2fs_sb_info *, block_t); 925 void locate_dirty_segment(struct f2fs_sb_info *, unsigned int); 926 void clear_prefree_segments(struct f2fs_sb_info *); 927 int npages_for_summary_flush(struct f2fs_sb_info *); 928 void allocate_new_segments(struct f2fs_sb_info *); 929 struct page *get_sum_page(struct f2fs_sb_info *, unsigned int); 930 struct bio *f2fs_bio_alloc(struct block_device *, int); 931 void f2fs_submit_bio(struct f2fs_sb_info *, enum page_type, bool sync); 932 int write_meta_page(struct f2fs_sb_info *, struct page *, 933 struct writeback_control *); 934 void write_node_page(struct f2fs_sb_info *, struct page *, unsigned int, 935 block_t, block_t *); 936 void write_data_page(struct inode *, struct page *, struct dnode_of_data*, 937 block_t, block_t *); 938 void rewrite_data_page(struct f2fs_sb_info *, struct page *, block_t); 939 void recover_data_page(struct f2fs_sb_info *, struct page *, 940 struct f2fs_summary *, block_t, block_t); 941 void rewrite_node_page(struct f2fs_sb_info *, struct page *, 942 struct f2fs_summary *, block_t, block_t); 943 void write_data_summaries(struct f2fs_sb_info *, block_t); 944 void write_node_summaries(struct f2fs_sb_info *, block_t); 945 int lookup_journal_in_cursum(struct f2fs_summary_block *, 946 int, unsigned int, int); 947 void flush_sit_entries(struct f2fs_sb_info *); 948 int build_segment_manager(struct f2fs_sb_info *); 949 void reset_victim_segmap(struct f2fs_sb_info *); 950 void destroy_segment_manager(struct f2fs_sb_info *); 951 952 /* 953 * checkpoint.c 954 */ 955 struct page *grab_meta_page(struct f2fs_sb_info *, pgoff_t); 956 struct page *get_meta_page(struct f2fs_sb_info *, pgoff_t); 957 long sync_meta_pages(struct f2fs_sb_info *, enum page_type, long); 958 int check_orphan_space(struct f2fs_sb_info *); 959 void add_orphan_inode(struct f2fs_sb_info *, nid_t); 960 void remove_orphan_inode(struct f2fs_sb_info *, nid_t); 961 int recover_orphan_inodes(struct f2fs_sb_info *); 962 int get_valid_checkpoint(struct f2fs_sb_info *); 963 void set_dirty_dir_page(struct inode *, struct page *); 964 void remove_dirty_dir_inode(struct inode *); 965 void sync_dirty_dir_inodes(struct f2fs_sb_info *); 966 void block_operations(struct f2fs_sb_info *); 967 void write_checkpoint(struct f2fs_sb_info *, bool, bool); 968 void init_orphan_info(struct f2fs_sb_info *); 969 int __init create_checkpoint_caches(void); 970 void destroy_checkpoint_caches(void); 971 972 /* 973 * data.c 974 */ 975 int reserve_new_block(struct dnode_of_data *); 976 void update_extent_cache(block_t, struct dnode_of_data *); 977 struct page *find_data_page(struct inode *, pgoff_t); 978 struct page *get_lock_data_page(struct inode *, pgoff_t); 979 struct page *get_new_data_page(struct inode *, pgoff_t, bool); 980 int f2fs_readpage(struct f2fs_sb_info *, struct page *, block_t, int); 981 int do_write_data_page(struct page *); 982 983 /* 984 * gc.c 985 */ 986 int start_gc_thread(struct f2fs_sb_info *); 987 void stop_gc_thread(struct f2fs_sb_info *); 988 block_t start_bidx_of_node(unsigned int); 989 int f2fs_gc(struct f2fs_sb_info *); 990 void build_gc_manager(struct f2fs_sb_info *); 991 int __init create_gc_caches(void); 992 void destroy_gc_caches(void); 993 994 /* 995 * recovery.c 996 */ 997 void recover_fsync_data(struct f2fs_sb_info *); 998 bool space_for_roll_forward(struct f2fs_sb_info *); 999 1000 /* 1001 * debug.c 1002 */ 1003 #ifdef CONFIG_F2FS_STAT_FS 1004 struct f2fs_stat_info { 1005 struct list_head stat_list; 1006 struct f2fs_sb_info *sbi; 1007 struct mutex stat_lock; 1008 int all_area_segs, sit_area_segs, nat_area_segs, ssa_area_segs; 1009 int main_area_segs, main_area_sections, main_area_zones; 1010 int hit_ext, total_ext; 1011 int ndirty_node, ndirty_dent, ndirty_dirs, ndirty_meta; 1012 int nats, sits, fnids; 1013 int total_count, utilization; 1014 int bg_gc; 1015 unsigned int valid_count, valid_node_count, valid_inode_count; 1016 unsigned int bimodal, avg_vblocks; 1017 int util_free, util_valid, util_invalid; 1018 int rsvd_segs, overp_segs; 1019 int dirty_count, node_pages, meta_pages; 1020 int prefree_count, call_count; 1021 int tot_segs, node_segs, data_segs, free_segs, free_secs; 1022 int tot_blks, data_blks, node_blks; 1023 int curseg[NR_CURSEG_TYPE]; 1024 int cursec[NR_CURSEG_TYPE]; 1025 int curzone[NR_CURSEG_TYPE]; 1026 1027 unsigned int segment_count[2]; 1028 unsigned int block_count[2]; 1029 unsigned base_mem, cache_mem; 1030 }; 1031 1032 #define stat_inc_call_count(si) ((si)->call_count++) 1033 1034 #define stat_inc_seg_count(sbi, type) \ 1035 do { \ 1036 struct f2fs_stat_info *si = sbi->stat_info; \ 1037 (si)->tot_segs++; \ 1038 if (type == SUM_TYPE_DATA) \ 1039 si->data_segs++; \ 1040 else \ 1041 si->node_segs++; \ 1042 } while (0) 1043 1044 #define stat_inc_tot_blk_count(si, blks) \ 1045 (si->tot_blks += (blks)) 1046 1047 #define stat_inc_data_blk_count(sbi, blks) \ 1048 do { \ 1049 struct f2fs_stat_info *si = sbi->stat_info; \ 1050 stat_inc_tot_blk_count(si, blks); \ 1051 si->data_blks += (blks); \ 1052 } while (0) 1053 1054 #define stat_inc_node_blk_count(sbi, blks) \ 1055 do { \ 1056 struct f2fs_stat_info *si = sbi->stat_info; \ 1057 stat_inc_tot_blk_count(si, blks); \ 1058 si->node_blks += (blks); \ 1059 } while (0) 1060 1061 int f2fs_build_stats(struct f2fs_sb_info *); 1062 void f2fs_destroy_stats(struct f2fs_sb_info *); 1063 void __init f2fs_create_root_stats(void); 1064 void f2fs_destroy_root_stats(void); 1065 #else 1066 #define stat_inc_call_count(si) 1067 #define stat_inc_seg_count(si, type) 1068 #define stat_inc_tot_blk_count(si, blks) 1069 #define stat_inc_data_blk_count(si, blks) 1070 #define stat_inc_node_blk_count(sbi, blks) 1071 1072 static inline int f2fs_build_stats(struct f2fs_sb_info *sbi) { return 0; } 1073 static inline void f2fs_destroy_stats(struct f2fs_sb_info *sbi) { } 1074 static inline void __init f2fs_create_root_stats(void) { } 1075 static inline void f2fs_destroy_root_stats(void) { } 1076 #endif 1077 1078 extern const struct file_operations f2fs_dir_operations; 1079 extern const struct file_operations f2fs_file_operations; 1080 extern const struct inode_operations f2fs_file_inode_operations; 1081 extern const struct address_space_operations f2fs_dblock_aops; 1082 extern const struct address_space_operations f2fs_node_aops; 1083 extern const struct address_space_operations f2fs_meta_aops; 1084 extern const struct inode_operations f2fs_dir_inode_operations; 1085 extern const struct inode_operations f2fs_symlink_inode_operations; 1086 extern const struct inode_operations f2fs_special_inode_operations; 1087 #endif 1088