1 /* 2 * fs/f2fs/f2fs.h 3 * 4 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 5 * http://www.samsung.com/ 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11 #ifndef _LINUX_F2FS_H 12 #define _LINUX_F2FS_H 13 14 #include <linux/types.h> 15 #include <linux/page-flags.h> 16 #include <linux/buffer_head.h> 17 #include <linux/slab.h> 18 #include <linux/crc32.h> 19 #include <linux/magic.h> 20 #include <linux/kobject.h> 21 #include <linux/sched.h> 22 23 /* 24 * For mount options 25 */ 26 #define F2FS_MOUNT_BG_GC 0x00000001 27 #define F2FS_MOUNT_DISABLE_ROLL_FORWARD 0x00000002 28 #define F2FS_MOUNT_DISCARD 0x00000004 29 #define F2FS_MOUNT_NOHEAP 0x00000008 30 #define F2FS_MOUNT_XATTR_USER 0x00000010 31 #define F2FS_MOUNT_POSIX_ACL 0x00000020 32 #define F2FS_MOUNT_DISABLE_EXT_IDENTIFY 0x00000040 33 #define F2FS_MOUNT_INLINE_XATTR 0x00000080 34 35 #define clear_opt(sbi, option) (sbi->mount_opt.opt &= ~F2FS_MOUNT_##option) 36 #define set_opt(sbi, option) (sbi->mount_opt.opt |= F2FS_MOUNT_##option) 37 #define test_opt(sbi, option) (sbi->mount_opt.opt & F2FS_MOUNT_##option) 38 39 #define ver_after(a, b) (typecheck(unsigned long long, a) && \ 40 typecheck(unsigned long long, b) && \ 41 ((long long)((a) - (b)) > 0)) 42 43 typedef u32 block_t; /* 44 * should not change u32, since it is the on-disk block 45 * address format, __le32. 46 */ 47 typedef u32 nid_t; 48 49 struct f2fs_mount_info { 50 unsigned int opt; 51 }; 52 53 #define CRCPOLY_LE 0xedb88320 54 55 static inline __u32 f2fs_crc32(void *buf, size_t len) 56 { 57 unsigned char *p = (unsigned char *)buf; 58 __u32 crc = F2FS_SUPER_MAGIC; 59 int i; 60 61 while (len--) { 62 crc ^= *p++; 63 for (i = 0; i < 8; i++) 64 crc = (crc >> 1) ^ ((crc & 1) ? CRCPOLY_LE : 0); 65 } 66 return crc; 67 } 68 69 static inline bool f2fs_crc_valid(__u32 blk_crc, void *buf, size_t buf_size) 70 { 71 return f2fs_crc32(buf, buf_size) == blk_crc; 72 } 73 74 /* 75 * For checkpoint manager 76 */ 77 enum { 78 NAT_BITMAP, 79 SIT_BITMAP 80 }; 81 82 /* for the list of orphan inodes */ 83 struct orphan_inode_entry { 84 struct list_head list; /* list head */ 85 nid_t ino; /* inode number */ 86 }; 87 88 /* for the list of directory inodes */ 89 struct dir_inode_entry { 90 struct list_head list; /* list head */ 91 struct inode *inode; /* vfs inode pointer */ 92 }; 93 94 /* for the list of fsync inodes, used only during recovery */ 95 struct fsync_inode_entry { 96 struct list_head list; /* list head */ 97 struct inode *inode; /* vfs inode pointer */ 98 block_t blkaddr; /* block address locating the last inode */ 99 }; 100 101 #define nats_in_cursum(sum) (le16_to_cpu(sum->n_nats)) 102 #define sits_in_cursum(sum) (le16_to_cpu(sum->n_sits)) 103 104 #define nat_in_journal(sum, i) (sum->nat_j.entries[i].ne) 105 #define nid_in_journal(sum, i) (sum->nat_j.entries[i].nid) 106 #define sit_in_journal(sum, i) (sum->sit_j.entries[i].se) 107 #define segno_in_journal(sum, i) (sum->sit_j.entries[i].segno) 108 109 static inline int update_nats_in_cursum(struct f2fs_summary_block *rs, int i) 110 { 111 int before = nats_in_cursum(rs); 112 rs->n_nats = cpu_to_le16(before + i); 113 return before; 114 } 115 116 static inline int update_sits_in_cursum(struct f2fs_summary_block *rs, int i) 117 { 118 int before = sits_in_cursum(rs); 119 rs->n_sits = cpu_to_le16(before + i); 120 return before; 121 } 122 123 /* 124 * ioctl commands 125 */ 126 #define F2FS_IOC_GETFLAGS FS_IOC_GETFLAGS 127 #define F2FS_IOC_SETFLAGS FS_IOC_SETFLAGS 128 129 #if defined(__KERNEL__) && defined(CONFIG_COMPAT) 130 /* 131 * ioctl commands in 32 bit emulation 132 */ 133 #define F2FS_IOC32_GETFLAGS FS_IOC32_GETFLAGS 134 #define F2FS_IOC32_SETFLAGS FS_IOC32_SETFLAGS 135 #endif 136 137 /* 138 * For INODE and NODE manager 139 */ 140 /* 141 * XATTR_NODE_OFFSET stores xattrs to one node block per file keeping -1 142 * as its node offset to distinguish from index node blocks. 143 * But some bits are used to mark the node block. 144 */ 145 #define XATTR_NODE_OFFSET ((((unsigned int)-1) << OFFSET_BIT_SHIFT) \ 146 >> OFFSET_BIT_SHIFT) 147 enum { 148 ALLOC_NODE, /* allocate a new node page if needed */ 149 LOOKUP_NODE, /* look up a node without readahead */ 150 LOOKUP_NODE_RA, /* 151 * look up a node with readahead called 152 * by get_datablock_ro. 153 */ 154 }; 155 156 #define F2FS_LINK_MAX 32000 /* maximum link count per file */ 157 158 /* for in-memory extent cache entry */ 159 struct extent_info { 160 rwlock_t ext_lock; /* rwlock for consistency */ 161 unsigned int fofs; /* start offset in a file */ 162 u32 blk_addr; /* start block address of the extent */ 163 unsigned int len; /* length of the extent */ 164 }; 165 166 /* 167 * i_advise uses FADVISE_XXX_BIT. We can add additional hints later. 168 */ 169 #define FADVISE_COLD_BIT 0x01 170 #define FADVISE_LOST_PINO_BIT 0x02 171 172 struct f2fs_inode_info { 173 struct inode vfs_inode; /* serve a vfs inode */ 174 unsigned long i_flags; /* keep an inode flags for ioctl */ 175 unsigned char i_advise; /* use to give file attribute hints */ 176 unsigned int i_current_depth; /* use only in directory structure */ 177 unsigned int i_pino; /* parent inode number */ 178 umode_t i_acl_mode; /* keep file acl mode temporarily */ 179 180 /* Use below internally in f2fs*/ 181 unsigned long flags; /* use to pass per-file flags */ 182 atomic_t dirty_dents; /* # of dirty dentry pages */ 183 f2fs_hash_t chash; /* hash value of given file name */ 184 unsigned int clevel; /* maximum level of given file name */ 185 nid_t i_xattr_nid; /* node id that contains xattrs */ 186 unsigned long long xattr_ver; /* cp version of xattr modification */ 187 struct extent_info ext; /* in-memory extent cache entry */ 188 }; 189 190 static inline void get_extent_info(struct extent_info *ext, 191 struct f2fs_extent i_ext) 192 { 193 write_lock(&ext->ext_lock); 194 ext->fofs = le32_to_cpu(i_ext.fofs); 195 ext->blk_addr = le32_to_cpu(i_ext.blk_addr); 196 ext->len = le32_to_cpu(i_ext.len); 197 write_unlock(&ext->ext_lock); 198 } 199 200 static inline void set_raw_extent(struct extent_info *ext, 201 struct f2fs_extent *i_ext) 202 { 203 read_lock(&ext->ext_lock); 204 i_ext->fofs = cpu_to_le32(ext->fofs); 205 i_ext->blk_addr = cpu_to_le32(ext->blk_addr); 206 i_ext->len = cpu_to_le32(ext->len); 207 read_unlock(&ext->ext_lock); 208 } 209 210 struct f2fs_nm_info { 211 block_t nat_blkaddr; /* base disk address of NAT */ 212 nid_t max_nid; /* maximum possible node ids */ 213 nid_t next_scan_nid; /* the next nid to be scanned */ 214 215 /* NAT cache management */ 216 struct radix_tree_root nat_root;/* root of the nat entry cache */ 217 rwlock_t nat_tree_lock; /* protect nat_tree_lock */ 218 unsigned int nat_cnt; /* the # of cached nat entries */ 219 struct list_head nat_entries; /* cached nat entry list (clean) */ 220 struct list_head dirty_nat_entries; /* cached nat entry list (dirty) */ 221 222 /* free node ids management */ 223 struct list_head free_nid_list; /* a list for free nids */ 224 spinlock_t free_nid_list_lock; /* protect free nid list */ 225 unsigned int fcnt; /* the number of free node id */ 226 struct mutex build_lock; /* lock for build free nids */ 227 228 /* for checkpoint */ 229 char *nat_bitmap; /* NAT bitmap pointer */ 230 int bitmap_size; /* bitmap size */ 231 }; 232 233 /* 234 * this structure is used as one of function parameters. 235 * all the information are dedicated to a given direct node block determined 236 * by the data offset in a file. 237 */ 238 struct dnode_of_data { 239 struct inode *inode; /* vfs inode pointer */ 240 struct page *inode_page; /* its inode page, NULL is possible */ 241 struct page *node_page; /* cached direct node page */ 242 nid_t nid; /* node id of the direct node block */ 243 unsigned int ofs_in_node; /* data offset in the node page */ 244 bool inode_page_locked; /* inode page is locked or not */ 245 block_t data_blkaddr; /* block address of the node block */ 246 }; 247 248 static inline void set_new_dnode(struct dnode_of_data *dn, struct inode *inode, 249 struct page *ipage, struct page *npage, nid_t nid) 250 { 251 memset(dn, 0, sizeof(*dn)); 252 dn->inode = inode; 253 dn->inode_page = ipage; 254 dn->node_page = npage; 255 dn->nid = nid; 256 } 257 258 /* 259 * For SIT manager 260 * 261 * By default, there are 6 active log areas across the whole main area. 262 * When considering hot and cold data separation to reduce cleaning overhead, 263 * we split 3 for data logs and 3 for node logs as hot, warm, and cold types, 264 * respectively. 265 * In the current design, you should not change the numbers intentionally. 266 * Instead, as a mount option such as active_logs=x, you can use 2, 4, and 6 267 * logs individually according to the underlying devices. (default: 6) 268 * Just in case, on-disk layout covers maximum 16 logs that consist of 8 for 269 * data and 8 for node logs. 270 */ 271 #define NR_CURSEG_DATA_TYPE (3) 272 #define NR_CURSEG_NODE_TYPE (3) 273 #define NR_CURSEG_TYPE (NR_CURSEG_DATA_TYPE + NR_CURSEG_NODE_TYPE) 274 275 enum { 276 CURSEG_HOT_DATA = 0, /* directory entry blocks */ 277 CURSEG_WARM_DATA, /* data blocks */ 278 CURSEG_COLD_DATA, /* multimedia or GCed data blocks */ 279 CURSEG_HOT_NODE, /* direct node blocks of directory files */ 280 CURSEG_WARM_NODE, /* direct node blocks of normal files */ 281 CURSEG_COLD_NODE, /* indirect node blocks */ 282 NO_CHECK_TYPE 283 }; 284 285 struct f2fs_sm_info { 286 struct sit_info *sit_info; /* whole segment information */ 287 struct free_segmap_info *free_info; /* free segment information */ 288 struct dirty_seglist_info *dirty_info; /* dirty segment information */ 289 struct curseg_info *curseg_array; /* active segment information */ 290 291 struct list_head wblist_head; /* list of under-writeback pages */ 292 spinlock_t wblist_lock; /* lock for checkpoint */ 293 294 block_t seg0_blkaddr; /* block address of 0'th segment */ 295 block_t main_blkaddr; /* start block address of main area */ 296 block_t ssa_blkaddr; /* start block address of SSA area */ 297 298 unsigned int segment_count; /* total # of segments */ 299 unsigned int main_segments; /* # of segments in main area */ 300 unsigned int reserved_segments; /* # of reserved segments */ 301 unsigned int ovp_segments; /* # of overprovision segments */ 302 303 /* a threshold to reclaim prefree segments */ 304 unsigned int rec_prefree_segments; 305 }; 306 307 /* 308 * For superblock 309 */ 310 /* 311 * COUNT_TYPE for monitoring 312 * 313 * f2fs monitors the number of several block types such as on-writeback, 314 * dirty dentry blocks, dirty node blocks, and dirty meta blocks. 315 */ 316 enum count_type { 317 F2FS_WRITEBACK, 318 F2FS_DIRTY_DENTS, 319 F2FS_DIRTY_NODES, 320 F2FS_DIRTY_META, 321 NR_COUNT_TYPE, 322 }; 323 324 /* 325 * The below are the page types of bios used in submti_bio(). 326 * The available types are: 327 * DATA User data pages. It operates as async mode. 328 * NODE Node pages. It operates as async mode. 329 * META FS metadata pages such as SIT, NAT, CP. 330 * NR_PAGE_TYPE The number of page types. 331 * META_FLUSH Make sure the previous pages are written 332 * with waiting the bio's completion 333 * ... Only can be used with META. 334 */ 335 enum page_type { 336 DATA, 337 NODE, 338 META, 339 NR_PAGE_TYPE, 340 META_FLUSH, 341 }; 342 343 struct f2fs_sb_info { 344 struct super_block *sb; /* pointer to VFS super block */ 345 struct proc_dir_entry *s_proc; /* proc entry */ 346 struct buffer_head *raw_super_buf; /* buffer head of raw sb */ 347 struct f2fs_super_block *raw_super; /* raw super block pointer */ 348 int s_dirty; /* dirty flag for checkpoint */ 349 350 /* for node-related operations */ 351 struct f2fs_nm_info *nm_info; /* node manager */ 352 struct inode *node_inode; /* cache node blocks */ 353 354 /* for segment-related operations */ 355 struct f2fs_sm_info *sm_info; /* segment manager */ 356 struct bio *bio[NR_PAGE_TYPE]; /* bios to merge */ 357 sector_t last_block_in_bio[NR_PAGE_TYPE]; /* last block number */ 358 struct rw_semaphore bio_sem; /* IO semaphore */ 359 360 /* for checkpoint */ 361 struct f2fs_checkpoint *ckpt; /* raw checkpoint pointer */ 362 struct inode *meta_inode; /* cache meta blocks */ 363 struct mutex cp_mutex; /* checkpoint procedure lock */ 364 struct rw_semaphore cp_rwsem; /* blocking FS operations */ 365 struct mutex node_write; /* locking node writes */ 366 struct mutex writepages; /* mutex for writepages() */ 367 bool por_doing; /* recovery is doing or not */ 368 bool on_build_free_nids; /* build_free_nids is doing */ 369 struct task_struct *cp_task; /* checkpoint task */ 370 371 /* for orphan inode management */ 372 struct list_head orphan_inode_list; /* orphan inode list */ 373 struct mutex orphan_inode_mutex; /* for orphan inode list */ 374 unsigned int n_orphans; /* # of orphan inodes */ 375 376 /* for directory inode management */ 377 struct list_head dir_inode_list; /* dir inode list */ 378 spinlock_t dir_inode_lock; /* for dir inode list lock */ 379 380 /* basic file system units */ 381 unsigned int log_sectors_per_block; /* log2 sectors per block */ 382 unsigned int log_blocksize; /* log2 block size */ 383 unsigned int blocksize; /* block size */ 384 unsigned int root_ino_num; /* root inode number*/ 385 unsigned int node_ino_num; /* node inode number*/ 386 unsigned int meta_ino_num; /* meta inode number*/ 387 unsigned int log_blocks_per_seg; /* log2 blocks per segment */ 388 unsigned int blocks_per_seg; /* blocks per segment */ 389 unsigned int segs_per_sec; /* segments per section */ 390 unsigned int secs_per_zone; /* sections per zone */ 391 unsigned int total_sections; /* total section count */ 392 unsigned int total_node_count; /* total node block count */ 393 unsigned int total_valid_node_count; /* valid node block count */ 394 unsigned int total_valid_inode_count; /* valid inode count */ 395 int active_logs; /* # of active logs */ 396 397 block_t user_block_count; /* # of user blocks */ 398 block_t total_valid_block_count; /* # of valid blocks */ 399 block_t alloc_valid_block_count; /* # of allocated blocks */ 400 block_t last_valid_block_count; /* for recovery */ 401 u32 s_next_generation; /* for NFS support */ 402 atomic_t nr_pages[NR_COUNT_TYPE]; /* # of pages, see count_type */ 403 404 struct f2fs_mount_info mount_opt; /* mount options */ 405 406 /* for cleaning operations */ 407 struct mutex gc_mutex; /* mutex for GC */ 408 struct f2fs_gc_kthread *gc_thread; /* GC thread */ 409 unsigned int cur_victim_sec; /* current victim section num */ 410 411 /* 412 * for stat information. 413 * one is for the LFS mode, and the other is for the SSR mode. 414 */ 415 #ifdef CONFIG_F2FS_STAT_FS 416 struct f2fs_stat_info *stat_info; /* FS status information */ 417 unsigned int segment_count[2]; /* # of allocated segments */ 418 unsigned int block_count[2]; /* # of allocated blocks */ 419 int total_hit_ext, read_hit_ext; /* extent cache hit ratio */ 420 int bg_gc; /* background gc calls */ 421 unsigned int n_dirty_dirs; /* # of dir inodes */ 422 #endif 423 unsigned int last_victim[2]; /* last victim segment # */ 424 spinlock_t stat_lock; /* lock for stat operations */ 425 426 /* For sysfs suppport */ 427 struct kobject s_kobj; 428 struct completion s_kobj_unregister; 429 }; 430 431 /* 432 * Inline functions 433 */ 434 static inline struct f2fs_inode_info *F2FS_I(struct inode *inode) 435 { 436 return container_of(inode, struct f2fs_inode_info, vfs_inode); 437 } 438 439 static inline struct f2fs_sb_info *F2FS_SB(struct super_block *sb) 440 { 441 return sb->s_fs_info; 442 } 443 444 static inline struct f2fs_super_block *F2FS_RAW_SUPER(struct f2fs_sb_info *sbi) 445 { 446 return (struct f2fs_super_block *)(sbi->raw_super); 447 } 448 449 static inline struct f2fs_checkpoint *F2FS_CKPT(struct f2fs_sb_info *sbi) 450 { 451 return (struct f2fs_checkpoint *)(sbi->ckpt); 452 } 453 454 static inline struct f2fs_node *F2FS_NODE(struct page *page) 455 { 456 return (struct f2fs_node *)page_address(page); 457 } 458 459 static inline struct f2fs_nm_info *NM_I(struct f2fs_sb_info *sbi) 460 { 461 return (struct f2fs_nm_info *)(sbi->nm_info); 462 } 463 464 static inline struct f2fs_sm_info *SM_I(struct f2fs_sb_info *sbi) 465 { 466 return (struct f2fs_sm_info *)(sbi->sm_info); 467 } 468 469 static inline struct sit_info *SIT_I(struct f2fs_sb_info *sbi) 470 { 471 return (struct sit_info *)(SM_I(sbi)->sit_info); 472 } 473 474 static inline struct free_segmap_info *FREE_I(struct f2fs_sb_info *sbi) 475 { 476 return (struct free_segmap_info *)(SM_I(sbi)->free_info); 477 } 478 479 static inline struct dirty_seglist_info *DIRTY_I(struct f2fs_sb_info *sbi) 480 { 481 return (struct dirty_seglist_info *)(SM_I(sbi)->dirty_info); 482 } 483 484 static inline void F2FS_SET_SB_DIRT(struct f2fs_sb_info *sbi) 485 { 486 sbi->s_dirty = 1; 487 } 488 489 static inline void F2FS_RESET_SB_DIRT(struct f2fs_sb_info *sbi) 490 { 491 sbi->s_dirty = 0; 492 } 493 494 static inline unsigned long long cur_cp_version(struct f2fs_checkpoint *cp) 495 { 496 return le64_to_cpu(cp->checkpoint_ver); 497 } 498 499 static inline bool is_set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f) 500 { 501 unsigned int ckpt_flags = le32_to_cpu(cp->ckpt_flags); 502 return ckpt_flags & f; 503 } 504 505 static inline void set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f) 506 { 507 unsigned int ckpt_flags = le32_to_cpu(cp->ckpt_flags); 508 ckpt_flags |= f; 509 cp->ckpt_flags = cpu_to_le32(ckpt_flags); 510 } 511 512 static inline void clear_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f) 513 { 514 unsigned int ckpt_flags = le32_to_cpu(cp->ckpt_flags); 515 ckpt_flags &= (~f); 516 cp->ckpt_flags = cpu_to_le32(ckpt_flags); 517 } 518 519 static inline void f2fs_lock_op(struct f2fs_sb_info *sbi) 520 { 521 down_read(&sbi->cp_rwsem); 522 } 523 524 static inline void f2fs_unlock_op(struct f2fs_sb_info *sbi) 525 { 526 up_read(&sbi->cp_rwsem); 527 } 528 529 static inline void f2fs_lock_all(struct f2fs_sb_info *sbi) 530 { 531 down_write_nest_lock(&sbi->cp_rwsem, &sbi->cp_mutex); 532 } 533 534 static inline void f2fs_unlock_all(struct f2fs_sb_info *sbi) 535 { 536 up_write(&sbi->cp_rwsem); 537 } 538 539 /* 540 * Check whether the given nid is within node id range. 541 */ 542 static inline int check_nid_range(struct f2fs_sb_info *sbi, nid_t nid) 543 { 544 WARN_ON((nid >= NM_I(sbi)->max_nid)); 545 if (nid >= NM_I(sbi)->max_nid) 546 return -EINVAL; 547 return 0; 548 } 549 550 #define F2FS_DEFAULT_ALLOCATED_BLOCKS 1 551 552 /* 553 * Check whether the inode has blocks or not 554 */ 555 static inline int F2FS_HAS_BLOCKS(struct inode *inode) 556 { 557 if (F2FS_I(inode)->i_xattr_nid) 558 return (inode->i_blocks > F2FS_DEFAULT_ALLOCATED_BLOCKS + 1); 559 else 560 return (inode->i_blocks > F2FS_DEFAULT_ALLOCATED_BLOCKS); 561 } 562 563 static inline bool inc_valid_block_count(struct f2fs_sb_info *sbi, 564 struct inode *inode, blkcnt_t count) 565 { 566 block_t valid_block_count; 567 568 spin_lock(&sbi->stat_lock); 569 valid_block_count = 570 sbi->total_valid_block_count + (block_t)count; 571 if (valid_block_count > sbi->user_block_count) { 572 spin_unlock(&sbi->stat_lock); 573 return false; 574 } 575 inode->i_blocks += count; 576 sbi->total_valid_block_count = valid_block_count; 577 sbi->alloc_valid_block_count += (block_t)count; 578 spin_unlock(&sbi->stat_lock); 579 return true; 580 } 581 582 static inline int dec_valid_block_count(struct f2fs_sb_info *sbi, 583 struct inode *inode, 584 blkcnt_t count) 585 { 586 spin_lock(&sbi->stat_lock); 587 BUG_ON(sbi->total_valid_block_count < (block_t) count); 588 BUG_ON(inode->i_blocks < count); 589 inode->i_blocks -= count; 590 sbi->total_valid_block_count -= (block_t)count; 591 spin_unlock(&sbi->stat_lock); 592 return 0; 593 } 594 595 static inline void inc_page_count(struct f2fs_sb_info *sbi, int count_type) 596 { 597 atomic_inc(&sbi->nr_pages[count_type]); 598 F2FS_SET_SB_DIRT(sbi); 599 } 600 601 static inline void inode_inc_dirty_dents(struct inode *inode) 602 { 603 atomic_inc(&F2FS_I(inode)->dirty_dents); 604 } 605 606 static inline void dec_page_count(struct f2fs_sb_info *sbi, int count_type) 607 { 608 atomic_dec(&sbi->nr_pages[count_type]); 609 } 610 611 static inline void inode_dec_dirty_dents(struct inode *inode) 612 { 613 atomic_dec(&F2FS_I(inode)->dirty_dents); 614 } 615 616 static inline int get_pages(struct f2fs_sb_info *sbi, int count_type) 617 { 618 return atomic_read(&sbi->nr_pages[count_type]); 619 } 620 621 static inline int get_blocktype_secs(struct f2fs_sb_info *sbi, int block_type) 622 { 623 unsigned int pages_per_sec = sbi->segs_per_sec * 624 (1 << sbi->log_blocks_per_seg); 625 return ((get_pages(sbi, block_type) + pages_per_sec - 1) 626 >> sbi->log_blocks_per_seg) / sbi->segs_per_sec; 627 } 628 629 static inline block_t valid_user_blocks(struct f2fs_sb_info *sbi) 630 { 631 block_t ret; 632 spin_lock(&sbi->stat_lock); 633 ret = sbi->total_valid_block_count; 634 spin_unlock(&sbi->stat_lock); 635 return ret; 636 } 637 638 static inline unsigned long __bitmap_size(struct f2fs_sb_info *sbi, int flag) 639 { 640 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); 641 642 /* return NAT or SIT bitmap */ 643 if (flag == NAT_BITMAP) 644 return le32_to_cpu(ckpt->nat_ver_bitmap_bytesize); 645 else if (flag == SIT_BITMAP) 646 return le32_to_cpu(ckpt->sit_ver_bitmap_bytesize); 647 648 return 0; 649 } 650 651 static inline void *__bitmap_ptr(struct f2fs_sb_info *sbi, int flag) 652 { 653 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); 654 int offset = (flag == NAT_BITMAP) ? 655 le32_to_cpu(ckpt->sit_ver_bitmap_bytesize) : 0; 656 return &ckpt->sit_nat_version_bitmap + offset; 657 } 658 659 static inline block_t __start_cp_addr(struct f2fs_sb_info *sbi) 660 { 661 block_t start_addr; 662 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); 663 unsigned long long ckpt_version = cur_cp_version(ckpt); 664 665 start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr); 666 667 /* 668 * odd numbered checkpoint should at cp segment 0 669 * and even segent must be at cp segment 1 670 */ 671 if (!(ckpt_version & 1)) 672 start_addr += sbi->blocks_per_seg; 673 674 return start_addr; 675 } 676 677 static inline block_t __start_sum_addr(struct f2fs_sb_info *sbi) 678 { 679 return le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_start_sum); 680 } 681 682 static inline bool inc_valid_node_count(struct f2fs_sb_info *sbi, 683 struct inode *inode, 684 unsigned int count) 685 { 686 block_t valid_block_count; 687 unsigned int valid_node_count; 688 689 spin_lock(&sbi->stat_lock); 690 691 valid_block_count = sbi->total_valid_block_count + (block_t)count; 692 sbi->alloc_valid_block_count += (block_t)count; 693 valid_node_count = sbi->total_valid_node_count + count; 694 695 if (valid_block_count > sbi->user_block_count) { 696 spin_unlock(&sbi->stat_lock); 697 return false; 698 } 699 700 if (valid_node_count > sbi->total_node_count) { 701 spin_unlock(&sbi->stat_lock); 702 return false; 703 } 704 705 if (inode) 706 inode->i_blocks += count; 707 sbi->total_valid_node_count = valid_node_count; 708 sbi->total_valid_block_count = valid_block_count; 709 spin_unlock(&sbi->stat_lock); 710 711 return true; 712 } 713 714 static inline void dec_valid_node_count(struct f2fs_sb_info *sbi, 715 struct inode *inode, 716 unsigned int count) 717 { 718 spin_lock(&sbi->stat_lock); 719 720 BUG_ON(sbi->total_valid_block_count < count); 721 BUG_ON(sbi->total_valid_node_count < count); 722 BUG_ON(inode->i_blocks < count); 723 724 inode->i_blocks -= count; 725 sbi->total_valid_node_count -= count; 726 sbi->total_valid_block_count -= (block_t)count; 727 728 spin_unlock(&sbi->stat_lock); 729 } 730 731 static inline unsigned int valid_node_count(struct f2fs_sb_info *sbi) 732 { 733 unsigned int ret; 734 spin_lock(&sbi->stat_lock); 735 ret = sbi->total_valid_node_count; 736 spin_unlock(&sbi->stat_lock); 737 return ret; 738 } 739 740 static inline void inc_valid_inode_count(struct f2fs_sb_info *sbi) 741 { 742 spin_lock(&sbi->stat_lock); 743 BUG_ON(sbi->total_valid_inode_count == sbi->total_node_count); 744 sbi->total_valid_inode_count++; 745 spin_unlock(&sbi->stat_lock); 746 } 747 748 static inline int dec_valid_inode_count(struct f2fs_sb_info *sbi) 749 { 750 spin_lock(&sbi->stat_lock); 751 BUG_ON(!sbi->total_valid_inode_count); 752 sbi->total_valid_inode_count--; 753 spin_unlock(&sbi->stat_lock); 754 return 0; 755 } 756 757 static inline unsigned int valid_inode_count(struct f2fs_sb_info *sbi) 758 { 759 unsigned int ret; 760 spin_lock(&sbi->stat_lock); 761 ret = sbi->total_valid_inode_count; 762 spin_unlock(&sbi->stat_lock); 763 return ret; 764 } 765 766 static inline void f2fs_put_page(struct page *page, int unlock) 767 { 768 if (!page || IS_ERR(page)) 769 return; 770 771 if (unlock) { 772 BUG_ON(!PageLocked(page)); 773 unlock_page(page); 774 } 775 page_cache_release(page); 776 } 777 778 static inline void f2fs_put_dnode(struct dnode_of_data *dn) 779 { 780 if (dn->node_page) 781 f2fs_put_page(dn->node_page, 1); 782 if (dn->inode_page && dn->node_page != dn->inode_page) 783 f2fs_put_page(dn->inode_page, 0); 784 dn->node_page = NULL; 785 dn->inode_page = NULL; 786 } 787 788 static inline struct kmem_cache *f2fs_kmem_cache_create(const char *name, 789 size_t size, void (*ctor)(void *)) 790 { 791 return kmem_cache_create(name, size, 0, SLAB_RECLAIM_ACCOUNT, ctor); 792 } 793 794 static inline void *f2fs_kmem_cache_alloc(struct kmem_cache *cachep, 795 gfp_t flags) 796 { 797 void *entry; 798 retry: 799 entry = kmem_cache_alloc(cachep, flags); 800 if (!entry) { 801 cond_resched(); 802 goto retry; 803 } 804 805 return entry; 806 } 807 808 #define RAW_IS_INODE(p) ((p)->footer.nid == (p)->footer.ino) 809 810 static inline bool IS_INODE(struct page *page) 811 { 812 struct f2fs_node *p = F2FS_NODE(page); 813 return RAW_IS_INODE(p); 814 } 815 816 static inline __le32 *blkaddr_in_node(struct f2fs_node *node) 817 { 818 return RAW_IS_INODE(node) ? node->i.i_addr : node->dn.addr; 819 } 820 821 static inline block_t datablock_addr(struct page *node_page, 822 unsigned int offset) 823 { 824 struct f2fs_node *raw_node; 825 __le32 *addr_array; 826 raw_node = F2FS_NODE(node_page); 827 addr_array = blkaddr_in_node(raw_node); 828 return le32_to_cpu(addr_array[offset]); 829 } 830 831 static inline int f2fs_test_bit(unsigned int nr, char *addr) 832 { 833 int mask; 834 835 addr += (nr >> 3); 836 mask = 1 << (7 - (nr & 0x07)); 837 return mask & *addr; 838 } 839 840 static inline int f2fs_set_bit(unsigned int nr, char *addr) 841 { 842 int mask; 843 int ret; 844 845 addr += (nr >> 3); 846 mask = 1 << (7 - (nr & 0x07)); 847 ret = mask & *addr; 848 *addr |= mask; 849 return ret; 850 } 851 852 static inline int f2fs_clear_bit(unsigned int nr, char *addr) 853 { 854 int mask; 855 int ret; 856 857 addr += (nr >> 3); 858 mask = 1 << (7 - (nr & 0x07)); 859 ret = mask & *addr; 860 *addr &= ~mask; 861 return ret; 862 } 863 864 /* used for f2fs_inode_info->flags */ 865 enum { 866 FI_NEW_INODE, /* indicate newly allocated inode */ 867 FI_DIRTY_INODE, /* indicate inode is dirty or not */ 868 FI_INC_LINK, /* need to increment i_nlink */ 869 FI_ACL_MODE, /* indicate acl mode */ 870 FI_NO_ALLOC, /* should not allocate any blocks */ 871 FI_UPDATE_DIR, /* should update inode block for consistency */ 872 FI_DELAY_IPUT, /* used for the recovery */ 873 FI_INLINE_XATTR, /* used for inline xattr */ 874 }; 875 876 static inline void set_inode_flag(struct f2fs_inode_info *fi, int flag) 877 { 878 set_bit(flag, &fi->flags); 879 } 880 881 static inline int is_inode_flag_set(struct f2fs_inode_info *fi, int flag) 882 { 883 return test_bit(flag, &fi->flags); 884 } 885 886 static inline void clear_inode_flag(struct f2fs_inode_info *fi, int flag) 887 { 888 clear_bit(flag, &fi->flags); 889 } 890 891 static inline void set_acl_inode(struct f2fs_inode_info *fi, umode_t mode) 892 { 893 fi->i_acl_mode = mode; 894 set_inode_flag(fi, FI_ACL_MODE); 895 } 896 897 static inline int cond_clear_inode_flag(struct f2fs_inode_info *fi, int flag) 898 { 899 if (is_inode_flag_set(fi, FI_ACL_MODE)) { 900 clear_inode_flag(fi, FI_ACL_MODE); 901 return 1; 902 } 903 return 0; 904 } 905 906 static inline void get_inline_info(struct f2fs_inode_info *fi, 907 struct f2fs_inode *ri) 908 { 909 if (ri->i_inline & F2FS_INLINE_XATTR) 910 set_inode_flag(fi, FI_INLINE_XATTR); 911 } 912 913 static inline void set_raw_inline(struct f2fs_inode_info *fi, 914 struct f2fs_inode *ri) 915 { 916 ri->i_inline = 0; 917 918 if (is_inode_flag_set(fi, FI_INLINE_XATTR)) 919 ri->i_inline |= F2FS_INLINE_XATTR; 920 } 921 922 static inline unsigned int addrs_per_inode(struct f2fs_inode_info *fi) 923 { 924 if (is_inode_flag_set(fi, FI_INLINE_XATTR)) 925 return DEF_ADDRS_PER_INODE - F2FS_INLINE_XATTR_ADDRS; 926 return DEF_ADDRS_PER_INODE; 927 } 928 929 static inline void *inline_xattr_addr(struct page *page) 930 { 931 struct f2fs_inode *ri; 932 ri = (struct f2fs_inode *)page_address(page); 933 return (void *)&(ri->i_addr[DEF_ADDRS_PER_INODE - 934 F2FS_INLINE_XATTR_ADDRS]); 935 } 936 937 static inline int inline_xattr_size(struct inode *inode) 938 { 939 if (is_inode_flag_set(F2FS_I(inode), FI_INLINE_XATTR)) 940 return F2FS_INLINE_XATTR_ADDRS << 2; 941 else 942 return 0; 943 } 944 945 static inline int f2fs_readonly(struct super_block *sb) 946 { 947 return sb->s_flags & MS_RDONLY; 948 } 949 950 /* 951 * file.c 952 */ 953 int f2fs_sync_file(struct file *, loff_t, loff_t, int); 954 void truncate_data_blocks(struct dnode_of_data *); 955 void f2fs_truncate(struct inode *); 956 int f2fs_getattr(struct vfsmount *, struct dentry *, struct kstat *); 957 int f2fs_setattr(struct dentry *, struct iattr *); 958 int truncate_hole(struct inode *, pgoff_t, pgoff_t); 959 int truncate_data_blocks_range(struct dnode_of_data *, int); 960 long f2fs_ioctl(struct file *, unsigned int, unsigned long); 961 long f2fs_compat_ioctl(struct file *, unsigned int, unsigned long); 962 963 /* 964 * inode.c 965 */ 966 void f2fs_set_inode_flags(struct inode *); 967 struct inode *f2fs_iget(struct super_block *, unsigned long); 968 int try_to_free_nats(struct f2fs_sb_info *, int); 969 void update_inode(struct inode *, struct page *); 970 int update_inode_page(struct inode *); 971 int f2fs_write_inode(struct inode *, struct writeback_control *); 972 void f2fs_evict_inode(struct inode *); 973 974 /* 975 * namei.c 976 */ 977 struct dentry *f2fs_get_parent(struct dentry *child); 978 979 /* 980 * dir.c 981 */ 982 struct f2fs_dir_entry *f2fs_find_entry(struct inode *, struct qstr *, 983 struct page **); 984 struct f2fs_dir_entry *f2fs_parent_dir(struct inode *, struct page **); 985 ino_t f2fs_inode_by_name(struct inode *, struct qstr *); 986 void f2fs_set_link(struct inode *, struct f2fs_dir_entry *, 987 struct page *, struct inode *); 988 int update_dent_inode(struct inode *, const struct qstr *); 989 int __f2fs_add_link(struct inode *, const struct qstr *, struct inode *); 990 void f2fs_delete_entry(struct f2fs_dir_entry *, struct page *, struct inode *); 991 int f2fs_make_empty(struct inode *, struct inode *); 992 bool f2fs_empty_dir(struct inode *); 993 994 static inline int f2fs_add_link(struct dentry *dentry, struct inode *inode) 995 { 996 return __f2fs_add_link(dentry->d_parent->d_inode, &dentry->d_name, 997 inode); 998 } 999 1000 /* 1001 * super.c 1002 */ 1003 int f2fs_sync_fs(struct super_block *, int); 1004 extern __printf(3, 4) 1005 void f2fs_msg(struct super_block *, const char *, const char *, ...); 1006 1007 /* 1008 * hash.c 1009 */ 1010 f2fs_hash_t f2fs_dentry_hash(const char *, size_t); 1011 1012 /* 1013 * node.c 1014 */ 1015 struct dnode_of_data; 1016 struct node_info; 1017 1018 int is_checkpointed_node(struct f2fs_sb_info *, nid_t); 1019 void get_node_info(struct f2fs_sb_info *, nid_t, struct node_info *); 1020 int get_dnode_of_data(struct dnode_of_data *, pgoff_t, int); 1021 int truncate_inode_blocks(struct inode *, pgoff_t); 1022 int truncate_xattr_node(struct inode *, struct page *); 1023 int remove_inode_page(struct inode *); 1024 struct page *new_inode_page(struct inode *, const struct qstr *); 1025 struct page *new_node_page(struct dnode_of_data *, unsigned int, struct page *); 1026 void ra_node_page(struct f2fs_sb_info *, nid_t); 1027 struct page *get_node_page(struct f2fs_sb_info *, pgoff_t); 1028 struct page *get_node_page_ra(struct page *, int); 1029 void sync_inode_page(struct dnode_of_data *); 1030 int sync_node_pages(struct f2fs_sb_info *, nid_t, struct writeback_control *); 1031 bool alloc_nid(struct f2fs_sb_info *, nid_t *); 1032 void alloc_nid_done(struct f2fs_sb_info *, nid_t); 1033 void alloc_nid_failed(struct f2fs_sb_info *, nid_t); 1034 void recover_node_page(struct f2fs_sb_info *, struct page *, 1035 struct f2fs_summary *, struct node_info *, block_t); 1036 int recover_inode_page(struct f2fs_sb_info *, struct page *); 1037 int restore_node_summary(struct f2fs_sb_info *, unsigned int, 1038 struct f2fs_summary_block *); 1039 void flush_nat_entries(struct f2fs_sb_info *); 1040 int build_node_manager(struct f2fs_sb_info *); 1041 void destroy_node_manager(struct f2fs_sb_info *); 1042 int __init create_node_manager_caches(void); 1043 void destroy_node_manager_caches(void); 1044 1045 /* 1046 * segment.c 1047 */ 1048 void f2fs_balance_fs(struct f2fs_sb_info *); 1049 void f2fs_balance_fs_bg(struct f2fs_sb_info *); 1050 void invalidate_blocks(struct f2fs_sb_info *, block_t); 1051 void clear_prefree_segments(struct f2fs_sb_info *); 1052 int npages_for_summary_flush(struct f2fs_sb_info *); 1053 void allocate_new_segments(struct f2fs_sb_info *); 1054 struct page *get_sum_page(struct f2fs_sb_info *, unsigned int); 1055 struct bio *f2fs_bio_alloc(struct block_device *, int); 1056 void f2fs_submit_bio(struct f2fs_sb_info *, enum page_type, bool); 1057 void f2fs_wait_on_page_writeback(struct page *, enum page_type, bool); 1058 void write_meta_page(struct f2fs_sb_info *, struct page *); 1059 void write_node_page(struct f2fs_sb_info *, struct page *, unsigned int, 1060 block_t, block_t *); 1061 void write_data_page(struct inode *, struct page *, struct dnode_of_data*, 1062 block_t, block_t *); 1063 void rewrite_data_page(struct f2fs_sb_info *, struct page *, block_t); 1064 void recover_data_page(struct f2fs_sb_info *, struct page *, 1065 struct f2fs_summary *, block_t, block_t); 1066 void rewrite_node_page(struct f2fs_sb_info *, struct page *, 1067 struct f2fs_summary *, block_t, block_t); 1068 void write_data_summaries(struct f2fs_sb_info *, block_t); 1069 void write_node_summaries(struct f2fs_sb_info *, block_t); 1070 int lookup_journal_in_cursum(struct f2fs_summary_block *, 1071 int, unsigned int, int); 1072 void flush_sit_entries(struct f2fs_sb_info *); 1073 int build_segment_manager(struct f2fs_sb_info *); 1074 void destroy_segment_manager(struct f2fs_sb_info *); 1075 1076 /* 1077 * checkpoint.c 1078 */ 1079 struct page *grab_meta_page(struct f2fs_sb_info *, pgoff_t); 1080 struct page *get_meta_page(struct f2fs_sb_info *, pgoff_t); 1081 long sync_meta_pages(struct f2fs_sb_info *, enum page_type, long); 1082 int acquire_orphan_inode(struct f2fs_sb_info *); 1083 void release_orphan_inode(struct f2fs_sb_info *); 1084 void add_orphan_inode(struct f2fs_sb_info *, nid_t); 1085 void remove_orphan_inode(struct f2fs_sb_info *, nid_t); 1086 int recover_orphan_inodes(struct f2fs_sb_info *); 1087 int get_valid_checkpoint(struct f2fs_sb_info *); 1088 void set_dirty_dir_page(struct inode *, struct page *); 1089 void add_dirty_dir_inode(struct inode *); 1090 void remove_dirty_dir_inode(struct inode *); 1091 struct inode *check_dirty_dir_inode(struct f2fs_sb_info *, nid_t); 1092 void sync_dirty_dir_inodes(struct f2fs_sb_info *); 1093 void write_checkpoint(struct f2fs_sb_info *, bool); 1094 void init_orphan_info(struct f2fs_sb_info *); 1095 int __init create_checkpoint_caches(void); 1096 void destroy_checkpoint_caches(void); 1097 1098 /* 1099 * data.c 1100 */ 1101 int reserve_new_block(struct dnode_of_data *); 1102 void update_extent_cache(block_t, struct dnode_of_data *); 1103 struct page *find_data_page(struct inode *, pgoff_t, bool); 1104 struct page *get_lock_data_page(struct inode *, pgoff_t); 1105 struct page *get_new_data_page(struct inode *, struct page *, pgoff_t, bool); 1106 int f2fs_readpage(struct f2fs_sb_info *, struct page *, block_t, int); 1107 int do_write_data_page(struct page *); 1108 1109 /* 1110 * gc.c 1111 */ 1112 int start_gc_thread(struct f2fs_sb_info *); 1113 void stop_gc_thread(struct f2fs_sb_info *); 1114 block_t start_bidx_of_node(unsigned int, struct f2fs_inode_info *); 1115 int f2fs_gc(struct f2fs_sb_info *); 1116 void build_gc_manager(struct f2fs_sb_info *); 1117 int __init create_gc_caches(void); 1118 void destroy_gc_caches(void); 1119 1120 /* 1121 * recovery.c 1122 */ 1123 int recover_fsync_data(struct f2fs_sb_info *); 1124 bool space_for_roll_forward(struct f2fs_sb_info *); 1125 1126 /* 1127 * debug.c 1128 */ 1129 #ifdef CONFIG_F2FS_STAT_FS 1130 struct f2fs_stat_info { 1131 struct list_head stat_list; 1132 struct f2fs_sb_info *sbi; 1133 struct mutex stat_lock; 1134 int all_area_segs, sit_area_segs, nat_area_segs, ssa_area_segs; 1135 int main_area_segs, main_area_sections, main_area_zones; 1136 int hit_ext, total_ext; 1137 int ndirty_node, ndirty_dent, ndirty_dirs, ndirty_meta; 1138 int nats, sits, fnids; 1139 int total_count, utilization; 1140 int bg_gc; 1141 unsigned int valid_count, valid_node_count, valid_inode_count; 1142 unsigned int bimodal, avg_vblocks; 1143 int util_free, util_valid, util_invalid; 1144 int rsvd_segs, overp_segs; 1145 int dirty_count, node_pages, meta_pages; 1146 int prefree_count, call_count; 1147 int tot_segs, node_segs, data_segs, free_segs, free_secs; 1148 int tot_blks, data_blks, node_blks; 1149 int curseg[NR_CURSEG_TYPE]; 1150 int cursec[NR_CURSEG_TYPE]; 1151 int curzone[NR_CURSEG_TYPE]; 1152 1153 unsigned int segment_count[2]; 1154 unsigned int block_count[2]; 1155 unsigned base_mem, cache_mem; 1156 }; 1157 1158 static inline struct f2fs_stat_info *F2FS_STAT(struct f2fs_sb_info *sbi) 1159 { 1160 return (struct f2fs_stat_info*)sbi->stat_info; 1161 } 1162 1163 #define stat_inc_call_count(si) ((si)->call_count++) 1164 #define stat_inc_bggc_count(sbi) ((sbi)->bg_gc++) 1165 #define stat_inc_dirty_dir(sbi) ((sbi)->n_dirty_dirs++) 1166 #define stat_dec_dirty_dir(sbi) ((sbi)->n_dirty_dirs--) 1167 #define stat_inc_total_hit(sb) ((F2FS_SB(sb))->total_hit_ext++) 1168 #define stat_inc_read_hit(sb) ((F2FS_SB(sb))->read_hit_ext++) 1169 #define stat_inc_seg_type(sbi, curseg) \ 1170 ((sbi)->segment_count[(curseg)->alloc_type]++) 1171 #define stat_inc_block_count(sbi, curseg) \ 1172 ((sbi)->block_count[(curseg)->alloc_type]++) 1173 1174 #define stat_inc_seg_count(sbi, type) \ 1175 do { \ 1176 struct f2fs_stat_info *si = F2FS_STAT(sbi); \ 1177 (si)->tot_segs++; \ 1178 if (type == SUM_TYPE_DATA) \ 1179 si->data_segs++; \ 1180 else \ 1181 si->node_segs++; \ 1182 } while (0) 1183 1184 #define stat_inc_tot_blk_count(si, blks) \ 1185 (si->tot_blks += (blks)) 1186 1187 #define stat_inc_data_blk_count(sbi, blks) \ 1188 do { \ 1189 struct f2fs_stat_info *si = F2FS_STAT(sbi); \ 1190 stat_inc_tot_blk_count(si, blks); \ 1191 si->data_blks += (blks); \ 1192 } while (0) 1193 1194 #define stat_inc_node_blk_count(sbi, blks) \ 1195 do { \ 1196 struct f2fs_stat_info *si = F2FS_STAT(sbi); \ 1197 stat_inc_tot_blk_count(si, blks); \ 1198 si->node_blks += (blks); \ 1199 } while (0) 1200 1201 int f2fs_build_stats(struct f2fs_sb_info *); 1202 void f2fs_destroy_stats(struct f2fs_sb_info *); 1203 void __init f2fs_create_root_stats(void); 1204 void f2fs_destroy_root_stats(void); 1205 #else 1206 #define stat_inc_call_count(si) 1207 #define stat_inc_bggc_count(si) 1208 #define stat_inc_dirty_dir(sbi) 1209 #define stat_dec_dirty_dir(sbi) 1210 #define stat_inc_total_hit(sb) 1211 #define stat_inc_read_hit(sb) 1212 #define stat_inc_seg_type(sbi, curseg) 1213 #define stat_inc_block_count(sbi, curseg) 1214 #define stat_inc_seg_count(si, type) 1215 #define stat_inc_tot_blk_count(si, blks) 1216 #define stat_inc_data_blk_count(si, blks) 1217 #define stat_inc_node_blk_count(sbi, blks) 1218 1219 static inline int f2fs_build_stats(struct f2fs_sb_info *sbi) { return 0; } 1220 static inline void f2fs_destroy_stats(struct f2fs_sb_info *sbi) { } 1221 static inline void __init f2fs_create_root_stats(void) { } 1222 static inline void f2fs_destroy_root_stats(void) { } 1223 #endif 1224 1225 extern const struct file_operations f2fs_dir_operations; 1226 extern const struct file_operations f2fs_file_operations; 1227 extern const struct inode_operations f2fs_file_inode_operations; 1228 extern const struct address_space_operations f2fs_dblock_aops; 1229 extern const struct address_space_operations f2fs_node_aops; 1230 extern const struct address_space_operations f2fs_meta_aops; 1231 extern const struct inode_operations f2fs_dir_inode_operations; 1232 extern const struct inode_operations f2fs_symlink_inode_operations; 1233 extern const struct inode_operations f2fs_special_inode_operations; 1234 #endif 1235