1 /* 2 * fs/f2fs/gc.h 3 * 4 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 5 * http://www.samsung.com/ 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11 #define GC_THREAD_MIN_WB_PAGES 1 /* 12 * a threshold to determine 13 * whether IO subsystem is idle 14 * or not 15 */ 16 #define DEF_GC_THREAD_MIN_SLEEP_TIME 30000 /* milliseconds */ 17 #define DEF_GC_THREAD_MAX_SLEEP_TIME 60000 18 #define DEF_GC_THREAD_NOGC_SLEEP_TIME 300000 /* wait 5 min */ 19 #define LIMIT_INVALID_BLOCK 40 /* percentage over total user space */ 20 #define LIMIT_FREE_BLOCK 40 /* percentage over invalid + free space */ 21 22 /* 23 * with this macro, we can control the max time we do garbage collection, 24 * when user triggers batch mode gc by ioctl. 25 */ 26 #define F2FS_BATCH_GC_MAX_NUM 16 27 28 /* Search max. number of dirty segments to select a victim segment */ 29 #define DEF_MAX_VICTIM_SEARCH 4096 /* covers 8GB */ 30 31 struct f2fs_gc_kthread { 32 struct task_struct *f2fs_gc_task; 33 wait_queue_head_t gc_wait_queue_head; 34 35 /* for gc sleep time */ 36 unsigned int min_sleep_time; 37 unsigned int max_sleep_time; 38 unsigned int no_gc_sleep_time; 39 40 /* for changing gc mode */ 41 unsigned int gc_idle; 42 }; 43 44 struct gc_inode_list { 45 struct list_head ilist; 46 struct radix_tree_root iroot; 47 }; 48 49 /* 50 * inline functions 51 */ 52 static inline block_t free_user_blocks(struct f2fs_sb_info *sbi) 53 { 54 if (free_segments(sbi) < overprovision_segments(sbi)) 55 return 0; 56 else 57 return (free_segments(sbi) - overprovision_segments(sbi)) 58 << sbi->log_blocks_per_seg; 59 } 60 61 static inline block_t limit_invalid_user_blocks(struct f2fs_sb_info *sbi) 62 { 63 return (long)(sbi->user_block_count * LIMIT_INVALID_BLOCK) / 100; 64 } 65 66 static inline block_t limit_free_user_blocks(struct f2fs_sb_info *sbi) 67 { 68 block_t reclaimable_user_blocks = sbi->user_block_count - 69 written_block_count(sbi); 70 return (long)(reclaimable_user_blocks * LIMIT_FREE_BLOCK) / 100; 71 } 72 73 static inline void increase_sleep_time(struct f2fs_gc_kthread *gc_th, 74 long *wait) 75 { 76 if (*wait == gc_th->no_gc_sleep_time) 77 return; 78 79 *wait += gc_th->min_sleep_time; 80 if (*wait > gc_th->max_sleep_time) 81 *wait = gc_th->max_sleep_time; 82 } 83 84 static inline void decrease_sleep_time(struct f2fs_gc_kthread *gc_th, 85 long *wait) 86 { 87 if (*wait == gc_th->no_gc_sleep_time) 88 *wait = gc_th->max_sleep_time; 89 90 *wait -= gc_th->min_sleep_time; 91 if (*wait <= gc_th->min_sleep_time) 92 *wait = gc_th->min_sleep_time; 93 } 94 95 static inline bool has_enough_invalid_blocks(struct f2fs_sb_info *sbi) 96 { 97 block_t invalid_user_blocks = sbi->user_block_count - 98 written_block_count(sbi); 99 /* 100 * Background GC is triggered with the following conditions. 101 * 1. There are a number of invalid blocks. 102 * 2. There is not enough free space. 103 */ 104 if (invalid_user_blocks > limit_invalid_user_blocks(sbi) && 105 free_user_blocks(sbi) < limit_free_user_blocks(sbi)) 106 return true; 107 return false; 108 } 109 110 static inline int is_idle(struct f2fs_sb_info *sbi) 111 { 112 struct block_device *bdev = sbi->sb->s_bdev; 113 struct request_queue *q = bdev_get_queue(bdev); 114 struct request_list *rl = &q->root_rl; 115 return !(rl->count[BLK_RW_SYNC]) && !(rl->count[BLK_RW_ASYNC]); 116 } 117