1 /* 2 * fs/f2fs/f2fs.h 3 * 4 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 5 * http://www.samsung.com/ 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11 #ifndef _LINUX_F2FS_H 12 #define _LINUX_F2FS_H 13 14 #include <linux/types.h> 15 #include <linux/page-flags.h> 16 #include <linux/buffer_head.h> 17 #include <linux/slab.h> 18 #include <linux/crc32.h> 19 #include <linux/magic.h> 20 21 /* 22 * For mount options 23 */ 24 #define F2FS_MOUNT_BG_GC 0x00000001 25 #define F2FS_MOUNT_DISABLE_ROLL_FORWARD 0x00000002 26 #define F2FS_MOUNT_DISCARD 0x00000004 27 #define F2FS_MOUNT_NOHEAP 0x00000008 28 #define F2FS_MOUNT_XATTR_USER 0x00000010 29 #define F2FS_MOUNT_POSIX_ACL 0x00000020 30 #define F2FS_MOUNT_DISABLE_EXT_IDENTIFY 0x00000040 31 32 #define clear_opt(sbi, option) (sbi->mount_opt.opt &= ~F2FS_MOUNT_##option) 33 #define set_opt(sbi, option) (sbi->mount_opt.opt |= F2FS_MOUNT_##option) 34 #define test_opt(sbi, option) (sbi->mount_opt.opt & F2FS_MOUNT_##option) 35 36 #define ver_after(a, b) (typecheck(unsigned long long, a) && \ 37 typecheck(unsigned long long, b) && \ 38 ((long long)((a) - (b)) > 0)) 39 40 typedef u64 block_t; 41 typedef u32 nid_t; 42 43 struct f2fs_mount_info { 44 unsigned int opt; 45 }; 46 47 static inline __u32 f2fs_crc32(void *buff, size_t len) 48 { 49 return crc32_le(F2FS_SUPER_MAGIC, buff, len); 50 } 51 52 static inline bool f2fs_crc_valid(__u32 blk_crc, void *buff, size_t buff_size) 53 { 54 return f2fs_crc32(buff, buff_size) == blk_crc; 55 } 56 57 /* 58 * For checkpoint manager 59 */ 60 enum { 61 NAT_BITMAP, 62 SIT_BITMAP 63 }; 64 65 /* for the list of orphan inodes */ 66 struct orphan_inode_entry { 67 struct list_head list; /* list head */ 68 nid_t ino; /* inode number */ 69 }; 70 71 /* for the list of directory inodes */ 72 struct dir_inode_entry { 73 struct list_head list; /* list head */ 74 struct inode *inode; /* vfs inode pointer */ 75 }; 76 77 /* for the list of fsync inodes, used only during recovery */ 78 struct fsync_inode_entry { 79 struct list_head list; /* list head */ 80 struct inode *inode; /* vfs inode pointer */ 81 block_t blkaddr; /* block address locating the last inode */ 82 }; 83 84 #define nats_in_cursum(sum) (le16_to_cpu(sum->n_nats)) 85 #define sits_in_cursum(sum) (le16_to_cpu(sum->n_sits)) 86 87 #define nat_in_journal(sum, i) (sum->nat_j.entries[i].ne) 88 #define nid_in_journal(sum, i) (sum->nat_j.entries[i].nid) 89 #define sit_in_journal(sum, i) (sum->sit_j.entries[i].se) 90 #define segno_in_journal(sum, i) (sum->sit_j.entries[i].segno) 91 92 static inline int update_nats_in_cursum(struct f2fs_summary_block *rs, int i) 93 { 94 int before = nats_in_cursum(rs); 95 rs->n_nats = cpu_to_le16(before + i); 96 return before; 97 } 98 99 static inline int update_sits_in_cursum(struct f2fs_summary_block *rs, int i) 100 { 101 int before = sits_in_cursum(rs); 102 rs->n_sits = cpu_to_le16(before + i); 103 return before; 104 } 105 106 /* 107 * ioctl commands 108 */ 109 #define F2FS_IOC_GETFLAGS FS_IOC_GETFLAGS 110 #define F2FS_IOC_SETFLAGS FS_IOC_SETFLAGS 111 112 #if defined(__KERNEL__) && defined(CONFIG_COMPAT) 113 /* 114 * ioctl commands in 32 bit emulation 115 */ 116 #define F2FS_IOC32_GETFLAGS FS_IOC32_GETFLAGS 117 #define F2FS_IOC32_SETFLAGS FS_IOC32_SETFLAGS 118 #endif 119 120 /* 121 * For INODE and NODE manager 122 */ 123 #define XATTR_NODE_OFFSET (-1) /* 124 * store xattrs to one node block per 125 * file keeping -1 as its node offset to 126 * distinguish from index node blocks. 127 */ 128 #define RDONLY_NODE 1 /* 129 * specify a read-only mode when getting 130 * a node block. 0 is read-write mode. 131 * used by get_dnode_of_data(). 132 */ 133 #define F2FS_LINK_MAX 32000 /* maximum link count per file */ 134 135 /* for in-memory extent cache entry */ 136 struct extent_info { 137 rwlock_t ext_lock; /* rwlock for consistency */ 138 unsigned int fofs; /* start offset in a file */ 139 u32 blk_addr; /* start block address of the extent */ 140 unsigned int len; /* lenth of the extent */ 141 }; 142 143 /* 144 * i_advise uses FADVISE_XXX_BIT. We can add additional hints later. 145 */ 146 #define FADVISE_COLD_BIT 0x01 147 148 struct f2fs_inode_info { 149 struct inode vfs_inode; /* serve a vfs inode */ 150 unsigned long i_flags; /* keep an inode flags for ioctl */ 151 unsigned char i_advise; /* use to give file attribute hints */ 152 unsigned int i_current_depth; /* use only in directory structure */ 153 unsigned int i_pino; /* parent inode number */ 154 umode_t i_acl_mode; /* keep file acl mode temporarily */ 155 156 /* Use below internally in f2fs*/ 157 unsigned long flags; /* use to pass per-file flags */ 158 unsigned long long data_version;/* latest version of data for fsync */ 159 atomic_t dirty_dents; /* # of dirty dentry pages */ 160 f2fs_hash_t chash; /* hash value of given file name */ 161 unsigned int clevel; /* maximum level of given file name */ 162 nid_t i_xattr_nid; /* node id that contains xattrs */ 163 struct extent_info ext; /* in-memory extent cache entry */ 164 }; 165 166 static inline void get_extent_info(struct extent_info *ext, 167 struct f2fs_extent i_ext) 168 { 169 write_lock(&ext->ext_lock); 170 ext->fofs = le32_to_cpu(i_ext.fofs); 171 ext->blk_addr = le32_to_cpu(i_ext.blk_addr); 172 ext->len = le32_to_cpu(i_ext.len); 173 write_unlock(&ext->ext_lock); 174 } 175 176 static inline void set_raw_extent(struct extent_info *ext, 177 struct f2fs_extent *i_ext) 178 { 179 read_lock(&ext->ext_lock); 180 i_ext->fofs = cpu_to_le32(ext->fofs); 181 i_ext->blk_addr = cpu_to_le32(ext->blk_addr); 182 i_ext->len = cpu_to_le32(ext->len); 183 read_unlock(&ext->ext_lock); 184 } 185 186 struct f2fs_nm_info { 187 block_t nat_blkaddr; /* base disk address of NAT */ 188 nid_t max_nid; /* maximum possible node ids */ 189 nid_t init_scan_nid; /* the first nid to be scanned */ 190 nid_t next_scan_nid; /* the next nid to be scanned */ 191 192 /* NAT cache management */ 193 struct radix_tree_root nat_root;/* root of the nat entry cache */ 194 rwlock_t nat_tree_lock; /* protect nat_tree_lock */ 195 unsigned int nat_cnt; /* the # of cached nat entries */ 196 struct list_head nat_entries; /* cached nat entry list (clean) */ 197 struct list_head dirty_nat_entries; /* cached nat entry list (dirty) */ 198 199 /* free node ids management */ 200 struct list_head free_nid_list; /* a list for free nids */ 201 spinlock_t free_nid_list_lock; /* protect free nid list */ 202 unsigned int fcnt; /* the number of free node id */ 203 struct mutex build_lock; /* lock for build free nids */ 204 205 /* for checkpoint */ 206 char *nat_bitmap; /* NAT bitmap pointer */ 207 int bitmap_size; /* bitmap size */ 208 }; 209 210 /* 211 * this structure is used as one of function parameters. 212 * all the information are dedicated to a given direct node block determined 213 * by the data offset in a file. 214 */ 215 struct dnode_of_data { 216 struct inode *inode; /* vfs inode pointer */ 217 struct page *inode_page; /* its inode page, NULL is possible */ 218 struct page *node_page; /* cached direct node page */ 219 nid_t nid; /* node id of the direct node block */ 220 unsigned int ofs_in_node; /* data offset in the node page */ 221 bool inode_page_locked; /* inode page is locked or not */ 222 block_t data_blkaddr; /* block address of the node block */ 223 }; 224 225 static inline void set_new_dnode(struct dnode_of_data *dn, struct inode *inode, 226 struct page *ipage, struct page *npage, nid_t nid) 227 { 228 memset(dn, 0, sizeof(*dn)); 229 dn->inode = inode; 230 dn->inode_page = ipage; 231 dn->node_page = npage; 232 dn->nid = nid; 233 } 234 235 /* 236 * For SIT manager 237 * 238 * By default, there are 6 active log areas across the whole main area. 239 * When considering hot and cold data separation to reduce cleaning overhead, 240 * we split 3 for data logs and 3 for node logs as hot, warm, and cold types, 241 * respectively. 242 * In the current design, you should not change the numbers intentionally. 243 * Instead, as a mount option such as active_logs=x, you can use 2, 4, and 6 244 * logs individually according to the underlying devices. (default: 6) 245 * Just in case, on-disk layout covers maximum 16 logs that consist of 8 for 246 * data and 8 for node logs. 247 */ 248 #define NR_CURSEG_DATA_TYPE (3) 249 #define NR_CURSEG_NODE_TYPE (3) 250 #define NR_CURSEG_TYPE (NR_CURSEG_DATA_TYPE + NR_CURSEG_NODE_TYPE) 251 252 enum { 253 CURSEG_HOT_DATA = 0, /* directory entry blocks */ 254 CURSEG_WARM_DATA, /* data blocks */ 255 CURSEG_COLD_DATA, /* multimedia or GCed data blocks */ 256 CURSEG_HOT_NODE, /* direct node blocks of directory files */ 257 CURSEG_WARM_NODE, /* direct node blocks of normal files */ 258 CURSEG_COLD_NODE, /* indirect node blocks */ 259 NO_CHECK_TYPE 260 }; 261 262 struct f2fs_sm_info { 263 struct sit_info *sit_info; /* whole segment information */ 264 struct free_segmap_info *free_info; /* free segment information */ 265 struct dirty_seglist_info *dirty_info; /* dirty segment information */ 266 struct curseg_info *curseg_array; /* active segment information */ 267 268 struct list_head wblist_head; /* list of under-writeback pages */ 269 spinlock_t wblist_lock; /* lock for checkpoint */ 270 271 block_t seg0_blkaddr; /* block address of 0'th segment */ 272 block_t main_blkaddr; /* start block address of main area */ 273 block_t ssa_blkaddr; /* start block address of SSA area */ 274 275 unsigned int segment_count; /* total # of segments */ 276 unsigned int main_segments; /* # of segments in main area */ 277 unsigned int reserved_segments; /* # of reserved segments */ 278 unsigned int ovp_segments; /* # of overprovision segments */ 279 }; 280 281 /* 282 * For directory operation 283 */ 284 #define NODE_DIR1_BLOCK (ADDRS_PER_INODE + 1) 285 #define NODE_DIR2_BLOCK (ADDRS_PER_INODE + 2) 286 #define NODE_IND1_BLOCK (ADDRS_PER_INODE + 3) 287 #define NODE_IND2_BLOCK (ADDRS_PER_INODE + 4) 288 #define NODE_DIND_BLOCK (ADDRS_PER_INODE + 5) 289 290 /* 291 * For superblock 292 */ 293 /* 294 * COUNT_TYPE for monitoring 295 * 296 * f2fs monitors the number of several block types such as on-writeback, 297 * dirty dentry blocks, dirty node blocks, and dirty meta blocks. 298 */ 299 enum count_type { 300 F2FS_WRITEBACK, 301 F2FS_DIRTY_DENTS, 302 F2FS_DIRTY_NODES, 303 F2FS_DIRTY_META, 304 NR_COUNT_TYPE, 305 }; 306 307 /* 308 * FS_LOCK nesting subclasses for the lock validator: 309 * 310 * The locking order between these classes is 311 * RENAME -> DENTRY_OPS -> DATA_WRITE -> DATA_NEW 312 * -> DATA_TRUNC -> NODE_WRITE -> NODE_NEW -> NODE_TRUNC 313 */ 314 enum lock_type { 315 RENAME, /* for renaming operations */ 316 DENTRY_OPS, /* for directory operations */ 317 DATA_WRITE, /* for data write */ 318 DATA_NEW, /* for data allocation */ 319 DATA_TRUNC, /* for data truncate */ 320 NODE_NEW, /* for node allocation */ 321 NODE_TRUNC, /* for node truncate */ 322 NODE_WRITE, /* for node write */ 323 NR_LOCK_TYPE, 324 }; 325 326 /* 327 * The below are the page types of bios used in submti_bio(). 328 * The available types are: 329 * DATA User data pages. It operates as async mode. 330 * NODE Node pages. It operates as async mode. 331 * META FS metadata pages such as SIT, NAT, CP. 332 * NR_PAGE_TYPE The number of page types. 333 * META_FLUSH Make sure the previous pages are written 334 * with waiting the bio's completion 335 * ... Only can be used with META. 336 */ 337 enum page_type { 338 DATA, 339 NODE, 340 META, 341 NR_PAGE_TYPE, 342 META_FLUSH, 343 }; 344 345 struct f2fs_sb_info { 346 struct super_block *sb; /* pointer to VFS super block */ 347 struct buffer_head *raw_super_buf; /* buffer head of raw sb */ 348 struct f2fs_super_block *raw_super; /* raw super block pointer */ 349 int s_dirty; /* dirty flag for checkpoint */ 350 351 /* for node-related operations */ 352 struct f2fs_nm_info *nm_info; /* node manager */ 353 struct inode *node_inode; /* cache node blocks */ 354 355 /* for segment-related operations */ 356 struct f2fs_sm_info *sm_info; /* segment manager */ 357 struct bio *bio[NR_PAGE_TYPE]; /* bios to merge */ 358 sector_t last_block_in_bio[NR_PAGE_TYPE]; /* last block number */ 359 struct rw_semaphore bio_sem; /* IO semaphore */ 360 361 /* for checkpoint */ 362 struct f2fs_checkpoint *ckpt; /* raw checkpoint pointer */ 363 struct inode *meta_inode; /* cache meta blocks */ 364 struct mutex cp_mutex; /* for checkpoint procedure */ 365 struct mutex fs_lock[NR_LOCK_TYPE]; /* for blocking FS operations */ 366 struct mutex write_inode; /* mutex for write inode */ 367 struct mutex writepages; /* mutex for writepages() */ 368 int por_doing; /* recovery is doing or not */ 369 370 /* for orphan inode management */ 371 struct list_head orphan_inode_list; /* orphan inode list */ 372 struct mutex orphan_inode_mutex; /* for orphan inode list */ 373 unsigned int n_orphans; /* # of orphan inodes */ 374 375 /* for directory inode management */ 376 struct list_head dir_inode_list; /* dir inode list */ 377 spinlock_t dir_inode_lock; /* for dir inode list lock */ 378 unsigned int n_dirty_dirs; /* # of dir inodes */ 379 380 /* basic file system units */ 381 unsigned int log_sectors_per_block; /* log2 sectors per block */ 382 unsigned int log_blocksize; /* log2 block size */ 383 unsigned int blocksize; /* block size */ 384 unsigned int root_ino_num; /* root inode number*/ 385 unsigned int node_ino_num; /* node inode number*/ 386 unsigned int meta_ino_num; /* meta inode number*/ 387 unsigned int log_blocks_per_seg; /* log2 blocks per segment */ 388 unsigned int blocks_per_seg; /* blocks per segment */ 389 unsigned int segs_per_sec; /* segments per section */ 390 unsigned int secs_per_zone; /* sections per zone */ 391 unsigned int total_sections; /* total section count */ 392 unsigned int total_node_count; /* total node block count */ 393 unsigned int total_valid_node_count; /* valid node block count */ 394 unsigned int total_valid_inode_count; /* valid inode count */ 395 int active_logs; /* # of active logs */ 396 397 block_t user_block_count; /* # of user blocks */ 398 block_t total_valid_block_count; /* # of valid blocks */ 399 block_t alloc_valid_block_count; /* # of allocated blocks */ 400 block_t last_valid_block_count; /* for recovery */ 401 u32 s_next_generation; /* for NFS support */ 402 atomic_t nr_pages[NR_COUNT_TYPE]; /* # of pages, see count_type */ 403 404 struct f2fs_mount_info mount_opt; /* mount options */ 405 406 /* for cleaning operations */ 407 struct mutex gc_mutex; /* mutex for GC */ 408 struct f2fs_gc_kthread *gc_thread; /* GC thread */ 409 410 /* 411 * for stat information. 412 * one is for the LFS mode, and the other is for the SSR mode. 413 */ 414 struct f2fs_stat_info *stat_info; /* FS status information */ 415 unsigned int segment_count[2]; /* # of allocated segments */ 416 unsigned int block_count[2]; /* # of allocated blocks */ 417 unsigned int last_victim[2]; /* last victim segment # */ 418 int total_hit_ext, read_hit_ext; /* extent cache hit ratio */ 419 int bg_gc; /* background gc calls */ 420 spinlock_t stat_lock; /* lock for stat operations */ 421 }; 422 423 /* 424 * Inline functions 425 */ 426 static inline struct f2fs_inode_info *F2FS_I(struct inode *inode) 427 { 428 return container_of(inode, struct f2fs_inode_info, vfs_inode); 429 } 430 431 static inline struct f2fs_sb_info *F2FS_SB(struct super_block *sb) 432 { 433 return sb->s_fs_info; 434 } 435 436 static inline struct f2fs_super_block *F2FS_RAW_SUPER(struct f2fs_sb_info *sbi) 437 { 438 return (struct f2fs_super_block *)(sbi->raw_super); 439 } 440 441 static inline struct f2fs_checkpoint *F2FS_CKPT(struct f2fs_sb_info *sbi) 442 { 443 return (struct f2fs_checkpoint *)(sbi->ckpt); 444 } 445 446 static inline struct f2fs_nm_info *NM_I(struct f2fs_sb_info *sbi) 447 { 448 return (struct f2fs_nm_info *)(sbi->nm_info); 449 } 450 451 static inline struct f2fs_sm_info *SM_I(struct f2fs_sb_info *sbi) 452 { 453 return (struct f2fs_sm_info *)(sbi->sm_info); 454 } 455 456 static inline struct sit_info *SIT_I(struct f2fs_sb_info *sbi) 457 { 458 return (struct sit_info *)(SM_I(sbi)->sit_info); 459 } 460 461 static inline struct free_segmap_info *FREE_I(struct f2fs_sb_info *sbi) 462 { 463 return (struct free_segmap_info *)(SM_I(sbi)->free_info); 464 } 465 466 static inline struct dirty_seglist_info *DIRTY_I(struct f2fs_sb_info *sbi) 467 { 468 return (struct dirty_seglist_info *)(SM_I(sbi)->dirty_info); 469 } 470 471 static inline void F2FS_SET_SB_DIRT(struct f2fs_sb_info *sbi) 472 { 473 sbi->s_dirty = 1; 474 } 475 476 static inline void F2FS_RESET_SB_DIRT(struct f2fs_sb_info *sbi) 477 { 478 sbi->s_dirty = 0; 479 } 480 481 static inline bool is_set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f) 482 { 483 unsigned int ckpt_flags = le32_to_cpu(cp->ckpt_flags); 484 return ckpt_flags & f; 485 } 486 487 static inline void set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f) 488 { 489 unsigned int ckpt_flags = le32_to_cpu(cp->ckpt_flags); 490 ckpt_flags |= f; 491 cp->ckpt_flags = cpu_to_le32(ckpt_flags); 492 } 493 494 static inline void clear_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f) 495 { 496 unsigned int ckpt_flags = le32_to_cpu(cp->ckpt_flags); 497 ckpt_flags &= (~f); 498 cp->ckpt_flags = cpu_to_le32(ckpt_flags); 499 } 500 501 static inline void mutex_lock_op(struct f2fs_sb_info *sbi, enum lock_type t) 502 { 503 mutex_lock_nested(&sbi->fs_lock[t], t); 504 } 505 506 static inline void mutex_unlock_op(struct f2fs_sb_info *sbi, enum lock_type t) 507 { 508 mutex_unlock(&sbi->fs_lock[t]); 509 } 510 511 /* 512 * Check whether the given nid is within node id range. 513 */ 514 static inline void check_nid_range(struct f2fs_sb_info *sbi, nid_t nid) 515 { 516 BUG_ON((nid >= NM_I(sbi)->max_nid)); 517 } 518 519 #define F2FS_DEFAULT_ALLOCATED_BLOCKS 1 520 521 /* 522 * Check whether the inode has blocks or not 523 */ 524 static inline int F2FS_HAS_BLOCKS(struct inode *inode) 525 { 526 if (F2FS_I(inode)->i_xattr_nid) 527 return (inode->i_blocks > F2FS_DEFAULT_ALLOCATED_BLOCKS + 1); 528 else 529 return (inode->i_blocks > F2FS_DEFAULT_ALLOCATED_BLOCKS); 530 } 531 532 static inline bool inc_valid_block_count(struct f2fs_sb_info *sbi, 533 struct inode *inode, blkcnt_t count) 534 { 535 block_t valid_block_count; 536 537 spin_lock(&sbi->stat_lock); 538 valid_block_count = 539 sbi->total_valid_block_count + (block_t)count; 540 if (valid_block_count > sbi->user_block_count) { 541 spin_unlock(&sbi->stat_lock); 542 return false; 543 } 544 inode->i_blocks += count; 545 sbi->total_valid_block_count = valid_block_count; 546 sbi->alloc_valid_block_count += (block_t)count; 547 spin_unlock(&sbi->stat_lock); 548 return true; 549 } 550 551 static inline int dec_valid_block_count(struct f2fs_sb_info *sbi, 552 struct inode *inode, 553 blkcnt_t count) 554 { 555 spin_lock(&sbi->stat_lock); 556 BUG_ON(sbi->total_valid_block_count < (block_t) count); 557 BUG_ON(inode->i_blocks < count); 558 inode->i_blocks -= count; 559 sbi->total_valid_block_count -= (block_t)count; 560 spin_unlock(&sbi->stat_lock); 561 return 0; 562 } 563 564 static inline void inc_page_count(struct f2fs_sb_info *sbi, int count_type) 565 { 566 atomic_inc(&sbi->nr_pages[count_type]); 567 F2FS_SET_SB_DIRT(sbi); 568 } 569 570 static inline void inode_inc_dirty_dents(struct inode *inode) 571 { 572 atomic_inc(&F2FS_I(inode)->dirty_dents); 573 } 574 575 static inline void dec_page_count(struct f2fs_sb_info *sbi, int count_type) 576 { 577 atomic_dec(&sbi->nr_pages[count_type]); 578 } 579 580 static inline void inode_dec_dirty_dents(struct inode *inode) 581 { 582 atomic_dec(&F2FS_I(inode)->dirty_dents); 583 } 584 585 static inline int get_pages(struct f2fs_sb_info *sbi, int count_type) 586 { 587 return atomic_read(&sbi->nr_pages[count_type]); 588 } 589 590 static inline int get_blocktype_secs(struct f2fs_sb_info *sbi, int block_type) 591 { 592 unsigned int pages_per_sec = sbi->segs_per_sec * 593 (1 << sbi->log_blocks_per_seg); 594 return ((get_pages(sbi, block_type) + pages_per_sec - 1) 595 >> sbi->log_blocks_per_seg) / sbi->segs_per_sec; 596 } 597 598 static inline block_t valid_user_blocks(struct f2fs_sb_info *sbi) 599 { 600 block_t ret; 601 spin_lock(&sbi->stat_lock); 602 ret = sbi->total_valid_block_count; 603 spin_unlock(&sbi->stat_lock); 604 return ret; 605 } 606 607 static inline unsigned long __bitmap_size(struct f2fs_sb_info *sbi, int flag) 608 { 609 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); 610 611 /* return NAT or SIT bitmap */ 612 if (flag == NAT_BITMAP) 613 return le32_to_cpu(ckpt->nat_ver_bitmap_bytesize); 614 else if (flag == SIT_BITMAP) 615 return le32_to_cpu(ckpt->sit_ver_bitmap_bytesize); 616 617 return 0; 618 } 619 620 static inline void *__bitmap_ptr(struct f2fs_sb_info *sbi, int flag) 621 { 622 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); 623 int offset = (flag == NAT_BITMAP) ? 624 le32_to_cpu(ckpt->sit_ver_bitmap_bytesize) : 0; 625 return &ckpt->sit_nat_version_bitmap + offset; 626 } 627 628 static inline block_t __start_cp_addr(struct f2fs_sb_info *sbi) 629 { 630 block_t start_addr; 631 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); 632 unsigned long long ckpt_version = le64_to_cpu(ckpt->checkpoint_ver); 633 634 start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr); 635 636 /* 637 * odd numbered checkpoint should at cp segment 0 638 * and even segent must be at cp segment 1 639 */ 640 if (!(ckpt_version & 1)) 641 start_addr += sbi->blocks_per_seg; 642 643 return start_addr; 644 } 645 646 static inline block_t __start_sum_addr(struct f2fs_sb_info *sbi) 647 { 648 return le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_start_sum); 649 } 650 651 static inline bool inc_valid_node_count(struct f2fs_sb_info *sbi, 652 struct inode *inode, 653 unsigned int count) 654 { 655 block_t valid_block_count; 656 unsigned int valid_node_count; 657 658 spin_lock(&sbi->stat_lock); 659 660 valid_block_count = sbi->total_valid_block_count + (block_t)count; 661 sbi->alloc_valid_block_count += (block_t)count; 662 valid_node_count = sbi->total_valid_node_count + count; 663 664 if (valid_block_count > sbi->user_block_count) { 665 spin_unlock(&sbi->stat_lock); 666 return false; 667 } 668 669 if (valid_node_count > sbi->total_node_count) { 670 spin_unlock(&sbi->stat_lock); 671 return false; 672 } 673 674 if (inode) 675 inode->i_blocks += count; 676 sbi->total_valid_node_count = valid_node_count; 677 sbi->total_valid_block_count = valid_block_count; 678 spin_unlock(&sbi->stat_lock); 679 680 return true; 681 } 682 683 static inline void dec_valid_node_count(struct f2fs_sb_info *sbi, 684 struct inode *inode, 685 unsigned int count) 686 { 687 spin_lock(&sbi->stat_lock); 688 689 BUG_ON(sbi->total_valid_block_count < count); 690 BUG_ON(sbi->total_valid_node_count < count); 691 BUG_ON(inode->i_blocks < count); 692 693 inode->i_blocks -= count; 694 sbi->total_valid_node_count -= count; 695 sbi->total_valid_block_count -= (block_t)count; 696 697 spin_unlock(&sbi->stat_lock); 698 } 699 700 static inline unsigned int valid_node_count(struct f2fs_sb_info *sbi) 701 { 702 unsigned int ret; 703 spin_lock(&sbi->stat_lock); 704 ret = sbi->total_valid_node_count; 705 spin_unlock(&sbi->stat_lock); 706 return ret; 707 } 708 709 static inline void inc_valid_inode_count(struct f2fs_sb_info *sbi) 710 { 711 spin_lock(&sbi->stat_lock); 712 BUG_ON(sbi->total_valid_inode_count == sbi->total_node_count); 713 sbi->total_valid_inode_count++; 714 spin_unlock(&sbi->stat_lock); 715 } 716 717 static inline int dec_valid_inode_count(struct f2fs_sb_info *sbi) 718 { 719 spin_lock(&sbi->stat_lock); 720 BUG_ON(!sbi->total_valid_inode_count); 721 sbi->total_valid_inode_count--; 722 spin_unlock(&sbi->stat_lock); 723 return 0; 724 } 725 726 static inline unsigned int valid_inode_count(struct f2fs_sb_info *sbi) 727 { 728 unsigned int ret; 729 spin_lock(&sbi->stat_lock); 730 ret = sbi->total_valid_inode_count; 731 spin_unlock(&sbi->stat_lock); 732 return ret; 733 } 734 735 static inline void f2fs_put_page(struct page *page, int unlock) 736 { 737 if (!page || IS_ERR(page)) 738 return; 739 740 if (unlock) { 741 BUG_ON(!PageLocked(page)); 742 unlock_page(page); 743 } 744 page_cache_release(page); 745 } 746 747 static inline void f2fs_put_dnode(struct dnode_of_data *dn) 748 { 749 if (dn->node_page) 750 f2fs_put_page(dn->node_page, 1); 751 if (dn->inode_page && dn->node_page != dn->inode_page) 752 f2fs_put_page(dn->inode_page, 0); 753 dn->node_page = NULL; 754 dn->inode_page = NULL; 755 } 756 757 static inline struct kmem_cache *f2fs_kmem_cache_create(const char *name, 758 size_t size, void (*ctor)(void *)) 759 { 760 return kmem_cache_create(name, size, 0, SLAB_RECLAIM_ACCOUNT, ctor); 761 } 762 763 #define RAW_IS_INODE(p) ((p)->footer.nid == (p)->footer.ino) 764 765 static inline bool IS_INODE(struct page *page) 766 { 767 struct f2fs_node *p = (struct f2fs_node *)page_address(page); 768 return RAW_IS_INODE(p); 769 } 770 771 static inline __le32 *blkaddr_in_node(struct f2fs_node *node) 772 { 773 return RAW_IS_INODE(node) ? node->i.i_addr : node->dn.addr; 774 } 775 776 static inline block_t datablock_addr(struct page *node_page, 777 unsigned int offset) 778 { 779 struct f2fs_node *raw_node; 780 __le32 *addr_array; 781 raw_node = (struct f2fs_node *)page_address(node_page); 782 addr_array = blkaddr_in_node(raw_node); 783 return le32_to_cpu(addr_array[offset]); 784 } 785 786 static inline int f2fs_test_bit(unsigned int nr, char *addr) 787 { 788 int mask; 789 790 addr += (nr >> 3); 791 mask = 1 << (7 - (nr & 0x07)); 792 return mask & *addr; 793 } 794 795 static inline int f2fs_set_bit(unsigned int nr, char *addr) 796 { 797 int mask; 798 int ret; 799 800 addr += (nr >> 3); 801 mask = 1 << (7 - (nr & 0x07)); 802 ret = mask & *addr; 803 *addr |= mask; 804 return ret; 805 } 806 807 static inline int f2fs_clear_bit(unsigned int nr, char *addr) 808 { 809 int mask; 810 int ret; 811 812 addr += (nr >> 3); 813 mask = 1 << (7 - (nr & 0x07)); 814 ret = mask & *addr; 815 *addr &= ~mask; 816 return ret; 817 } 818 819 /* used for f2fs_inode_info->flags */ 820 enum { 821 FI_NEW_INODE, /* indicate newly allocated inode */ 822 FI_NEED_CP, /* need to do checkpoint during fsync */ 823 FI_INC_LINK, /* need to increment i_nlink */ 824 FI_ACL_MODE, /* indicate acl mode */ 825 FI_NO_ALLOC, /* should not allocate any blocks */ 826 }; 827 828 static inline void set_inode_flag(struct f2fs_inode_info *fi, int flag) 829 { 830 set_bit(flag, &fi->flags); 831 } 832 833 static inline int is_inode_flag_set(struct f2fs_inode_info *fi, int flag) 834 { 835 return test_bit(flag, &fi->flags); 836 } 837 838 static inline void clear_inode_flag(struct f2fs_inode_info *fi, int flag) 839 { 840 clear_bit(flag, &fi->flags); 841 } 842 843 static inline void set_acl_inode(struct f2fs_inode_info *fi, umode_t mode) 844 { 845 fi->i_acl_mode = mode; 846 set_inode_flag(fi, FI_ACL_MODE); 847 } 848 849 static inline int cond_clear_inode_flag(struct f2fs_inode_info *fi, int flag) 850 { 851 if (is_inode_flag_set(fi, FI_ACL_MODE)) { 852 clear_inode_flag(fi, FI_ACL_MODE); 853 return 1; 854 } 855 return 0; 856 } 857 858 /* 859 * file.c 860 */ 861 int f2fs_sync_file(struct file *, loff_t, loff_t, int); 862 void truncate_data_blocks(struct dnode_of_data *); 863 void f2fs_truncate(struct inode *); 864 int f2fs_setattr(struct dentry *, struct iattr *); 865 int truncate_hole(struct inode *, pgoff_t, pgoff_t); 866 long f2fs_ioctl(struct file *, unsigned int, unsigned long); 867 long f2fs_compat_ioctl(struct file *, unsigned int, unsigned long); 868 869 /* 870 * inode.c 871 */ 872 void f2fs_set_inode_flags(struct inode *); 873 struct inode *f2fs_iget(struct super_block *, unsigned long); 874 void update_inode(struct inode *, struct page *); 875 int f2fs_write_inode(struct inode *, struct writeback_control *); 876 void f2fs_evict_inode(struct inode *); 877 878 /* 879 * namei.c 880 */ 881 struct dentry *f2fs_get_parent(struct dentry *child); 882 883 /* 884 * dir.c 885 */ 886 struct f2fs_dir_entry *f2fs_find_entry(struct inode *, struct qstr *, 887 struct page **); 888 struct f2fs_dir_entry *f2fs_parent_dir(struct inode *, struct page **); 889 ino_t f2fs_inode_by_name(struct inode *, struct qstr *); 890 void f2fs_set_link(struct inode *, struct f2fs_dir_entry *, 891 struct page *, struct inode *); 892 void init_dent_inode(const struct qstr *, struct page *); 893 int __f2fs_add_link(struct inode *, const struct qstr *, struct inode *); 894 void f2fs_delete_entry(struct f2fs_dir_entry *, struct page *, struct inode *); 895 int f2fs_make_empty(struct inode *, struct inode *); 896 bool f2fs_empty_dir(struct inode *); 897 898 static inline int f2fs_add_link(struct dentry *dentry, struct inode *inode) 899 { 900 return __f2fs_add_link(dentry->d_parent->d_inode, &dentry->d_name, 901 inode); 902 } 903 904 /* 905 * super.c 906 */ 907 int f2fs_sync_fs(struct super_block *, int); 908 extern __printf(3, 4) 909 void f2fs_msg(struct super_block *, const char *, const char *, ...); 910 911 /* 912 * hash.c 913 */ 914 f2fs_hash_t f2fs_dentry_hash(const char *, size_t); 915 916 /* 917 * node.c 918 */ 919 struct dnode_of_data; 920 struct node_info; 921 922 int is_checkpointed_node(struct f2fs_sb_info *, nid_t); 923 void get_node_info(struct f2fs_sb_info *, nid_t, struct node_info *); 924 int get_dnode_of_data(struct dnode_of_data *, pgoff_t, int); 925 int truncate_inode_blocks(struct inode *, pgoff_t); 926 int remove_inode_page(struct inode *); 927 int new_inode_page(struct inode *, const struct qstr *); 928 struct page *new_node_page(struct dnode_of_data *, unsigned int); 929 void ra_node_page(struct f2fs_sb_info *, nid_t); 930 struct page *get_node_page(struct f2fs_sb_info *, pgoff_t); 931 struct page *get_node_page_ra(struct page *, int); 932 void sync_inode_page(struct dnode_of_data *); 933 int sync_node_pages(struct f2fs_sb_info *, nid_t, struct writeback_control *); 934 bool alloc_nid(struct f2fs_sb_info *, nid_t *); 935 void alloc_nid_done(struct f2fs_sb_info *, nid_t); 936 void alloc_nid_failed(struct f2fs_sb_info *, nid_t); 937 void recover_node_page(struct f2fs_sb_info *, struct page *, 938 struct f2fs_summary *, struct node_info *, block_t); 939 int recover_inode_page(struct f2fs_sb_info *, struct page *); 940 int restore_node_summary(struct f2fs_sb_info *, unsigned int, 941 struct f2fs_summary_block *); 942 void flush_nat_entries(struct f2fs_sb_info *); 943 int build_node_manager(struct f2fs_sb_info *); 944 void destroy_node_manager(struct f2fs_sb_info *); 945 int __init create_node_manager_caches(void); 946 void destroy_node_manager_caches(void); 947 948 /* 949 * segment.c 950 */ 951 void f2fs_balance_fs(struct f2fs_sb_info *); 952 void invalidate_blocks(struct f2fs_sb_info *, block_t); 953 void locate_dirty_segment(struct f2fs_sb_info *, unsigned int); 954 void clear_prefree_segments(struct f2fs_sb_info *); 955 int npages_for_summary_flush(struct f2fs_sb_info *); 956 void allocate_new_segments(struct f2fs_sb_info *); 957 struct page *get_sum_page(struct f2fs_sb_info *, unsigned int); 958 struct bio *f2fs_bio_alloc(struct block_device *, int); 959 void f2fs_submit_bio(struct f2fs_sb_info *, enum page_type, bool sync); 960 void write_meta_page(struct f2fs_sb_info *, struct page *); 961 void write_node_page(struct f2fs_sb_info *, struct page *, unsigned int, 962 block_t, block_t *); 963 void write_data_page(struct inode *, struct page *, struct dnode_of_data*, 964 block_t, block_t *); 965 void rewrite_data_page(struct f2fs_sb_info *, struct page *, block_t); 966 void recover_data_page(struct f2fs_sb_info *, struct page *, 967 struct f2fs_summary *, block_t, block_t); 968 void rewrite_node_page(struct f2fs_sb_info *, struct page *, 969 struct f2fs_summary *, block_t, block_t); 970 void write_data_summaries(struct f2fs_sb_info *, block_t); 971 void write_node_summaries(struct f2fs_sb_info *, block_t); 972 int lookup_journal_in_cursum(struct f2fs_summary_block *, 973 int, unsigned int, int); 974 void flush_sit_entries(struct f2fs_sb_info *); 975 int build_segment_manager(struct f2fs_sb_info *); 976 void reset_victim_segmap(struct f2fs_sb_info *); 977 void destroy_segment_manager(struct f2fs_sb_info *); 978 979 /* 980 * checkpoint.c 981 */ 982 struct page *grab_meta_page(struct f2fs_sb_info *, pgoff_t); 983 struct page *get_meta_page(struct f2fs_sb_info *, pgoff_t); 984 long sync_meta_pages(struct f2fs_sb_info *, enum page_type, long); 985 int check_orphan_space(struct f2fs_sb_info *); 986 void add_orphan_inode(struct f2fs_sb_info *, nid_t); 987 void remove_orphan_inode(struct f2fs_sb_info *, nid_t); 988 int recover_orphan_inodes(struct f2fs_sb_info *); 989 int get_valid_checkpoint(struct f2fs_sb_info *); 990 void set_dirty_dir_page(struct inode *, struct page *); 991 void remove_dirty_dir_inode(struct inode *); 992 void sync_dirty_dir_inodes(struct f2fs_sb_info *); 993 void write_checkpoint(struct f2fs_sb_info *, bool); 994 void init_orphan_info(struct f2fs_sb_info *); 995 int __init create_checkpoint_caches(void); 996 void destroy_checkpoint_caches(void); 997 998 /* 999 * data.c 1000 */ 1001 int reserve_new_block(struct dnode_of_data *); 1002 void update_extent_cache(block_t, struct dnode_of_data *); 1003 struct page *find_data_page(struct inode *, pgoff_t); 1004 struct page *get_lock_data_page(struct inode *, pgoff_t); 1005 struct page *get_new_data_page(struct inode *, pgoff_t, bool); 1006 int f2fs_readpage(struct f2fs_sb_info *, struct page *, block_t, int); 1007 int do_write_data_page(struct page *); 1008 1009 /* 1010 * gc.c 1011 */ 1012 int start_gc_thread(struct f2fs_sb_info *); 1013 void stop_gc_thread(struct f2fs_sb_info *); 1014 block_t start_bidx_of_node(unsigned int); 1015 int f2fs_gc(struct f2fs_sb_info *); 1016 void build_gc_manager(struct f2fs_sb_info *); 1017 int __init create_gc_caches(void); 1018 void destroy_gc_caches(void); 1019 1020 /* 1021 * recovery.c 1022 */ 1023 void recover_fsync_data(struct f2fs_sb_info *); 1024 bool space_for_roll_forward(struct f2fs_sb_info *); 1025 1026 /* 1027 * debug.c 1028 */ 1029 #ifdef CONFIG_F2FS_STAT_FS 1030 struct f2fs_stat_info { 1031 struct list_head stat_list; 1032 struct f2fs_sb_info *sbi; 1033 struct mutex stat_lock; 1034 int all_area_segs, sit_area_segs, nat_area_segs, ssa_area_segs; 1035 int main_area_segs, main_area_sections, main_area_zones; 1036 int hit_ext, total_ext; 1037 int ndirty_node, ndirty_dent, ndirty_dirs, ndirty_meta; 1038 int nats, sits, fnids; 1039 int total_count, utilization; 1040 int bg_gc; 1041 unsigned int valid_count, valid_node_count, valid_inode_count; 1042 unsigned int bimodal, avg_vblocks; 1043 int util_free, util_valid, util_invalid; 1044 int rsvd_segs, overp_segs; 1045 int dirty_count, node_pages, meta_pages; 1046 int prefree_count, call_count; 1047 int tot_segs, node_segs, data_segs, free_segs, free_secs; 1048 int tot_blks, data_blks, node_blks; 1049 int curseg[NR_CURSEG_TYPE]; 1050 int cursec[NR_CURSEG_TYPE]; 1051 int curzone[NR_CURSEG_TYPE]; 1052 1053 unsigned int segment_count[2]; 1054 unsigned int block_count[2]; 1055 unsigned base_mem, cache_mem; 1056 }; 1057 1058 #define stat_inc_call_count(si) ((si)->call_count++) 1059 1060 #define stat_inc_seg_count(sbi, type) \ 1061 do { \ 1062 struct f2fs_stat_info *si = sbi->stat_info; \ 1063 (si)->tot_segs++; \ 1064 if (type == SUM_TYPE_DATA) \ 1065 si->data_segs++; \ 1066 else \ 1067 si->node_segs++; \ 1068 } while (0) 1069 1070 #define stat_inc_tot_blk_count(si, blks) \ 1071 (si->tot_blks += (blks)) 1072 1073 #define stat_inc_data_blk_count(sbi, blks) \ 1074 do { \ 1075 struct f2fs_stat_info *si = sbi->stat_info; \ 1076 stat_inc_tot_blk_count(si, blks); \ 1077 si->data_blks += (blks); \ 1078 } while (0) 1079 1080 #define stat_inc_node_blk_count(sbi, blks) \ 1081 do { \ 1082 struct f2fs_stat_info *si = sbi->stat_info; \ 1083 stat_inc_tot_blk_count(si, blks); \ 1084 si->node_blks += (blks); \ 1085 } while (0) 1086 1087 int f2fs_build_stats(struct f2fs_sb_info *); 1088 void f2fs_destroy_stats(struct f2fs_sb_info *); 1089 void __init f2fs_create_root_stats(void); 1090 void f2fs_destroy_root_stats(void); 1091 #else 1092 #define stat_inc_call_count(si) 1093 #define stat_inc_seg_count(si, type) 1094 #define stat_inc_tot_blk_count(si, blks) 1095 #define stat_inc_data_blk_count(si, blks) 1096 #define stat_inc_node_blk_count(sbi, blks) 1097 1098 static inline int f2fs_build_stats(struct f2fs_sb_info *sbi) { return 0; } 1099 static inline void f2fs_destroy_stats(struct f2fs_sb_info *sbi) { } 1100 static inline void __init f2fs_create_root_stats(void) { } 1101 static inline void f2fs_destroy_root_stats(void) { } 1102 #endif 1103 1104 extern const struct file_operations f2fs_dir_operations; 1105 extern const struct file_operations f2fs_file_operations; 1106 extern const struct inode_operations f2fs_file_inode_operations; 1107 extern const struct address_space_operations f2fs_dblock_aops; 1108 extern const struct address_space_operations f2fs_node_aops; 1109 extern const struct address_space_operations f2fs_meta_aops; 1110 extern const struct inode_operations f2fs_dir_inode_operations; 1111 extern const struct inode_operations f2fs_symlink_inode_operations; 1112 extern const struct inode_operations f2fs_special_inode_operations; 1113 #endif 1114