1 /* 2 * bcache stats code 3 * 4 * Copyright 2012 Google, Inc. 5 */ 6 7 #include "bcache.h" 8 #include "stats.h" 9 #include "btree.h" 10 #include "request.h" 11 #include "sysfs.h" 12 13 /* 14 * We keep absolute totals of various statistics, and addionally a set of three 15 * rolling averages. 16 * 17 * Every so often, a timer goes off and rescales the rolling averages. 18 * accounting_rescale[] is how many times the timer has to go off before we 19 * rescale each set of numbers; that gets us half lives of 5 minutes, one hour, 20 * and one day. 21 * 22 * accounting_delay is how often the timer goes off - 22 times in 5 minutes, 23 * and accounting_weight is what we use to rescale: 24 * 25 * pow(31 / 32, 22) ~= 1/2 26 * 27 * So that we don't have to increment each set of numbers every time we (say) 28 * get a cache hit, we increment a single atomic_t in acc->collector, and when 29 * the rescale function runs it resets the atomic counter to 0 and adds its 30 * old value to each of the exported numbers. 31 * 32 * To reduce rounding error, the numbers in struct cache_stats are all 33 * stored left shifted by 16, and scaled back in the sysfs show() function. 34 */ 35 36 static const unsigned DAY_RESCALE = 288; 37 static const unsigned HOUR_RESCALE = 12; 38 static const unsigned FIVE_MINUTE_RESCALE = 1; 39 static const unsigned accounting_delay = (HZ * 300) / 22; 40 static const unsigned accounting_weight = 32; 41 42 /* sysfs reading/writing */ 43 44 read_attribute(cache_hits); 45 read_attribute(cache_misses); 46 read_attribute(cache_bypass_hits); 47 read_attribute(cache_bypass_misses); 48 read_attribute(cache_hit_ratio); 49 read_attribute(cache_readaheads); 50 read_attribute(cache_miss_collisions); 51 read_attribute(bypassed); 52 53 SHOW(bch_stats) 54 { 55 struct cache_stats *s = 56 container_of(kobj, struct cache_stats, kobj); 57 #define var(stat) (s->stat >> 16) 58 var_print(cache_hits); 59 var_print(cache_misses); 60 var_print(cache_bypass_hits); 61 var_print(cache_bypass_misses); 62 63 sysfs_print(cache_hit_ratio, 64 DIV_SAFE(var(cache_hits) * 100, 65 var(cache_hits) + var(cache_misses))); 66 67 var_print(cache_readaheads); 68 var_print(cache_miss_collisions); 69 sysfs_hprint(bypassed, var(sectors_bypassed) << 9); 70 #undef var 71 return 0; 72 } 73 74 STORE(bch_stats) 75 { 76 return size; 77 } 78 79 static void bch_stats_release(struct kobject *k) 80 { 81 } 82 83 static struct attribute *bch_stats_files[] = { 84 &sysfs_cache_hits, 85 &sysfs_cache_misses, 86 &sysfs_cache_bypass_hits, 87 &sysfs_cache_bypass_misses, 88 &sysfs_cache_hit_ratio, 89 &sysfs_cache_readaheads, 90 &sysfs_cache_miss_collisions, 91 &sysfs_bypassed, 92 NULL 93 }; 94 static KTYPE(bch_stats); 95 96 static void scale_accounting(unsigned long data); 97 98 void bch_cache_accounting_init(struct cache_accounting *acc, 99 struct closure *parent) 100 { 101 kobject_init(&acc->total.kobj, &bch_stats_ktype); 102 kobject_init(&acc->five_minute.kobj, &bch_stats_ktype); 103 kobject_init(&acc->hour.kobj, &bch_stats_ktype); 104 kobject_init(&acc->day.kobj, &bch_stats_ktype); 105 106 closure_init(&acc->cl, parent); 107 init_timer(&acc->timer); 108 acc->timer.expires = jiffies + accounting_delay; 109 acc->timer.data = (unsigned long) acc; 110 acc->timer.function = scale_accounting; 111 add_timer(&acc->timer); 112 } 113 114 int bch_cache_accounting_add_kobjs(struct cache_accounting *acc, 115 struct kobject *parent) 116 { 117 int ret = kobject_add(&acc->total.kobj, parent, 118 "stats_total"); 119 ret = ret ?: kobject_add(&acc->five_minute.kobj, parent, 120 "stats_five_minute"); 121 ret = ret ?: kobject_add(&acc->hour.kobj, parent, 122 "stats_hour"); 123 ret = ret ?: kobject_add(&acc->day.kobj, parent, 124 "stats_day"); 125 return ret; 126 } 127 128 void bch_cache_accounting_clear(struct cache_accounting *acc) 129 { 130 memset(&acc->total.cache_hits, 131 0, 132 sizeof(unsigned long) * 7); 133 } 134 135 void bch_cache_accounting_destroy(struct cache_accounting *acc) 136 { 137 kobject_put(&acc->total.kobj); 138 kobject_put(&acc->five_minute.kobj); 139 kobject_put(&acc->hour.kobj); 140 kobject_put(&acc->day.kobj); 141 142 atomic_set(&acc->closing, 1); 143 if (del_timer_sync(&acc->timer)) 144 closure_return(&acc->cl); 145 } 146 147 /* EWMA scaling */ 148 149 static void scale_stat(unsigned long *stat) 150 { 151 *stat = ewma_add(*stat, 0, accounting_weight, 0); 152 } 153 154 static void scale_stats(struct cache_stats *stats, unsigned long rescale_at) 155 { 156 if (++stats->rescale == rescale_at) { 157 stats->rescale = 0; 158 scale_stat(&stats->cache_hits); 159 scale_stat(&stats->cache_misses); 160 scale_stat(&stats->cache_bypass_hits); 161 scale_stat(&stats->cache_bypass_misses); 162 scale_stat(&stats->cache_readaheads); 163 scale_stat(&stats->cache_miss_collisions); 164 scale_stat(&stats->sectors_bypassed); 165 } 166 } 167 168 static void scale_accounting(unsigned long data) 169 { 170 struct cache_accounting *acc = (struct cache_accounting *) data; 171 172 #define move_stat(name) do { \ 173 unsigned t = atomic_xchg(&acc->collector.name, 0); \ 174 t <<= 16; \ 175 acc->five_minute.name += t; \ 176 acc->hour.name += t; \ 177 acc->day.name += t; \ 178 acc->total.name += t; \ 179 } while (0) 180 181 move_stat(cache_hits); 182 move_stat(cache_misses); 183 move_stat(cache_bypass_hits); 184 move_stat(cache_bypass_misses); 185 move_stat(cache_readaheads); 186 move_stat(cache_miss_collisions); 187 move_stat(sectors_bypassed); 188 189 scale_stats(&acc->total, 0); 190 scale_stats(&acc->day, DAY_RESCALE); 191 scale_stats(&acc->hour, HOUR_RESCALE); 192 scale_stats(&acc->five_minute, FIVE_MINUTE_RESCALE); 193 194 acc->timer.expires += accounting_delay; 195 196 if (!atomic_read(&acc->closing)) 197 add_timer(&acc->timer); 198 else 199 closure_return(&acc->cl); 200 } 201 202 static void mark_cache_stats(struct cache_stat_collector *stats, 203 bool hit, bool bypass) 204 { 205 if (!bypass) 206 if (hit) 207 atomic_inc(&stats->cache_hits); 208 else 209 atomic_inc(&stats->cache_misses); 210 else 211 if (hit) 212 atomic_inc(&stats->cache_bypass_hits); 213 else 214 atomic_inc(&stats->cache_bypass_misses); 215 } 216 217 void bch_mark_cache_accounting(struct search *s, bool hit, bool bypass) 218 { 219 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); 220 mark_cache_stats(&dc->accounting.collector, hit, bypass); 221 mark_cache_stats(&s->op.c->accounting.collector, hit, bypass); 222 #ifdef CONFIG_CGROUP_BCACHE 223 mark_cache_stats(&(bch_bio_to_cgroup(s->orig_bio)->stats), hit, bypass); 224 #endif 225 } 226 227 void bch_mark_cache_readahead(struct search *s) 228 { 229 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); 230 atomic_inc(&dc->accounting.collector.cache_readaheads); 231 atomic_inc(&s->op.c->accounting.collector.cache_readaheads); 232 } 233 234 void bch_mark_cache_miss_collision(struct search *s) 235 { 236 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); 237 atomic_inc(&dc->accounting.collector.cache_miss_collisions); 238 atomic_inc(&s->op.c->accounting.collector.cache_miss_collisions); 239 } 240 241 void bch_mark_sectors_bypassed(struct search *s, int sectors) 242 { 243 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); 244 atomic_add(sectors, &dc->accounting.collector.sectors_bypassed); 245 atomic_add(sectors, &s->op.c->accounting.collector.sectors_bypassed); 246 } 247