1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * bcache stats code 4 * 5 * Copyright 2012 Google, Inc. 6 */ 7 8 #include "bcache.h" 9 #include "stats.h" 10 #include "btree.h" 11 #include "sysfs.h" 12 13 /* 14 * We keep absolute totals of various statistics, and addionally a set of three 15 * rolling averages. 16 * 17 * Every so often, a timer goes off and rescales the rolling averages. 18 * accounting_rescale[] is how many times the timer has to go off before we 19 * rescale each set of numbers; that gets us half lives of 5 minutes, one hour, 20 * and one day. 21 * 22 * accounting_delay is how often the timer goes off - 22 times in 5 minutes, 23 * and accounting_weight is what we use to rescale: 24 * 25 * pow(31 / 32, 22) ~= 1/2 26 * 27 * So that we don't have to increment each set of numbers every time we (say) 28 * get a cache hit, we increment a single atomic_t in acc->collector, and when 29 * the rescale function runs it resets the atomic counter to 0 and adds its 30 * old value to each of the exported numbers. 31 * 32 * To reduce rounding error, the numbers in struct cache_stats are all 33 * stored left shifted by 16, and scaled back in the sysfs show() function. 34 */ 35 36 static const unsigned int DAY_RESCALE = 288; 37 static const unsigned int HOUR_RESCALE = 12; 38 static const unsigned int FIVE_MINUTE_RESCALE = 1; 39 static const unsigned int accounting_delay = (HZ * 300) / 22; 40 static const unsigned int accounting_weight = 32; 41 42 /* sysfs reading/writing */ 43 44 read_attribute(cache_hits); 45 read_attribute(cache_misses); 46 read_attribute(cache_bypass_hits); 47 read_attribute(cache_bypass_misses); 48 read_attribute(cache_hit_ratio); 49 read_attribute(cache_miss_collisions); 50 read_attribute(bypassed); 51 52 SHOW(bch_stats) 53 { 54 struct cache_stats *s = 55 container_of(kobj, struct cache_stats, kobj); 56 #define var(stat) (s->stat >> 16) 57 var_print(cache_hits); 58 var_print(cache_misses); 59 var_print(cache_bypass_hits); 60 var_print(cache_bypass_misses); 61 62 sysfs_print(cache_hit_ratio, 63 DIV_SAFE(var(cache_hits) * 100, 64 var(cache_hits) + var(cache_misses))); 65 66 var_print(cache_miss_collisions); 67 sysfs_hprint(bypassed, var(sectors_bypassed) << 9); 68 #undef var 69 return 0; 70 } 71 72 STORE(bch_stats) 73 { 74 return size; 75 } 76 77 static void bch_stats_release(struct kobject *k) 78 { 79 } 80 81 static struct attribute *bch_stats_attrs[] = { 82 &sysfs_cache_hits, 83 &sysfs_cache_misses, 84 &sysfs_cache_bypass_hits, 85 &sysfs_cache_bypass_misses, 86 &sysfs_cache_hit_ratio, 87 &sysfs_cache_miss_collisions, 88 &sysfs_bypassed, 89 NULL 90 }; 91 ATTRIBUTE_GROUPS(bch_stats); 92 static KTYPE(bch_stats); 93 94 int bch_cache_accounting_add_kobjs(struct cache_accounting *acc, 95 struct kobject *parent) 96 { 97 int ret = kobject_add(&acc->total.kobj, parent, 98 "stats_total"); 99 ret = ret ?: kobject_add(&acc->five_minute.kobj, parent, 100 "stats_five_minute"); 101 ret = ret ?: kobject_add(&acc->hour.kobj, parent, 102 "stats_hour"); 103 ret = ret ?: kobject_add(&acc->day.kobj, parent, 104 "stats_day"); 105 return ret; 106 } 107 108 void bch_cache_accounting_clear(struct cache_accounting *acc) 109 { 110 acc->total.cache_hits = 0; 111 acc->total.cache_misses = 0; 112 acc->total.cache_bypass_hits = 0; 113 acc->total.cache_bypass_misses = 0; 114 acc->total.cache_miss_collisions = 0; 115 acc->total.sectors_bypassed = 0; 116 } 117 118 void bch_cache_accounting_destroy(struct cache_accounting *acc) 119 { 120 kobject_put(&acc->total.kobj); 121 kobject_put(&acc->five_minute.kobj); 122 kobject_put(&acc->hour.kobj); 123 kobject_put(&acc->day.kobj); 124 125 atomic_set(&acc->closing, 1); 126 if (del_timer_sync(&acc->timer)) 127 closure_return(&acc->cl); 128 } 129 130 /* EWMA scaling */ 131 132 static void scale_stat(unsigned long *stat) 133 { 134 *stat = ewma_add(*stat, 0, accounting_weight, 0); 135 } 136 137 static void scale_stats(struct cache_stats *stats, unsigned long rescale_at) 138 { 139 if (++stats->rescale == rescale_at) { 140 stats->rescale = 0; 141 scale_stat(&stats->cache_hits); 142 scale_stat(&stats->cache_misses); 143 scale_stat(&stats->cache_bypass_hits); 144 scale_stat(&stats->cache_bypass_misses); 145 scale_stat(&stats->cache_miss_collisions); 146 scale_stat(&stats->sectors_bypassed); 147 } 148 } 149 150 static void scale_accounting(struct timer_list *t) 151 { 152 struct cache_accounting *acc = from_timer(acc, t, timer); 153 154 #define move_stat(name) do { \ 155 unsigned int t = atomic_xchg(&acc->collector.name, 0); \ 156 t <<= 16; \ 157 acc->five_minute.name += t; \ 158 acc->hour.name += t; \ 159 acc->day.name += t; \ 160 acc->total.name += t; \ 161 } while (0) 162 163 move_stat(cache_hits); 164 move_stat(cache_misses); 165 move_stat(cache_bypass_hits); 166 move_stat(cache_bypass_misses); 167 move_stat(cache_miss_collisions); 168 move_stat(sectors_bypassed); 169 170 scale_stats(&acc->total, 0); 171 scale_stats(&acc->day, DAY_RESCALE); 172 scale_stats(&acc->hour, HOUR_RESCALE); 173 scale_stats(&acc->five_minute, FIVE_MINUTE_RESCALE); 174 175 acc->timer.expires += accounting_delay; 176 177 if (!atomic_read(&acc->closing)) 178 add_timer(&acc->timer); 179 else 180 closure_return(&acc->cl); 181 } 182 183 static void mark_cache_stats(struct cache_stat_collector *stats, 184 bool hit, bool bypass) 185 { 186 if (!bypass) 187 if (hit) 188 atomic_inc(&stats->cache_hits); 189 else 190 atomic_inc(&stats->cache_misses); 191 else 192 if (hit) 193 atomic_inc(&stats->cache_bypass_hits); 194 else 195 atomic_inc(&stats->cache_bypass_misses); 196 } 197 198 void bch_mark_cache_accounting(struct cache_set *c, struct bcache_device *d, 199 bool hit, bool bypass) 200 { 201 struct cached_dev *dc = container_of(d, struct cached_dev, disk); 202 203 mark_cache_stats(&dc->accounting.collector, hit, bypass); 204 mark_cache_stats(&c->accounting.collector, hit, bypass); 205 } 206 207 void bch_mark_cache_miss_collision(struct cache_set *c, struct bcache_device *d) 208 { 209 struct cached_dev *dc = container_of(d, struct cached_dev, disk); 210 211 atomic_inc(&dc->accounting.collector.cache_miss_collisions); 212 atomic_inc(&c->accounting.collector.cache_miss_collisions); 213 } 214 215 void bch_mark_sectors_bypassed(struct cache_set *c, struct cached_dev *dc, 216 int sectors) 217 { 218 atomic_add(sectors, &dc->accounting.collector.sectors_bypassed); 219 atomic_add(sectors, &c->accounting.collector.sectors_bypassed); 220 } 221 222 void bch_cache_accounting_init(struct cache_accounting *acc, 223 struct closure *parent) 224 { 225 kobject_init(&acc->total.kobj, &bch_stats_ktype); 226 kobject_init(&acc->five_minute.kobj, &bch_stats_ktype); 227 kobject_init(&acc->hour.kobj, &bch_stats_ktype); 228 kobject_init(&acc->day.kobj, &bch_stats_ktype); 229 230 closure_init(&acc->cl, parent); 231 timer_setup(&acc->timer, scale_accounting, 0); 232 acc->timer.expires = jiffies + accounting_delay; 233 add_timer(&acc->timer); 234 } 235