xref: /openbmc/linux/block/blk-stat.c (revision a83b576c9c25cf771fb0b15ec5eb2e7510ec2f5a)
1cf43e6beSJens Axboe /*
2cf43e6beSJens Axboe  * Block stat tracking code
3cf43e6beSJens Axboe  *
4cf43e6beSJens Axboe  * Copyright (C) 2016 Jens Axboe
5cf43e6beSJens Axboe  */
6cf43e6beSJens Axboe #include <linux/kernel.h>
734dbad5dSOmar Sandoval #include <linux/rculist.h>
8cf43e6beSJens Axboe #include <linux/blk-mq.h>
9cf43e6beSJens Axboe 
10cf43e6beSJens Axboe #include "blk-stat.h"
11cf43e6beSJens Axboe #include "blk-mq.h"
12cf43e6beSJens Axboe 
134875253fSOmar Sandoval #define BLK_RQ_STAT_BATCH	64
144875253fSOmar Sandoval 
1534dbad5dSOmar Sandoval struct blk_queue_stats {
1634dbad5dSOmar Sandoval 	struct list_head callbacks;
1734dbad5dSOmar Sandoval 	spinlock_t lock;
1834dbad5dSOmar Sandoval };
1934dbad5dSOmar Sandoval 
2034dbad5dSOmar Sandoval unsigned int blk_stat_rq_ddir(const struct request *rq)
2134dbad5dSOmar Sandoval {
2234dbad5dSOmar Sandoval 	return rq_data_dir(rq);
2334dbad5dSOmar Sandoval }
2434dbad5dSOmar Sandoval EXPORT_SYMBOL_GPL(blk_stat_rq_ddir);
2534dbad5dSOmar Sandoval 
2634dbad5dSOmar Sandoval static void blk_stat_init(struct blk_rq_stat *stat)
2734dbad5dSOmar Sandoval {
2834dbad5dSOmar Sandoval 	stat->min = -1ULL;
2934dbad5dSOmar Sandoval 	stat->max = stat->nr_samples = stat->mean = 0;
3034dbad5dSOmar Sandoval 	stat->batch = stat->nr_batch = 0;
3134dbad5dSOmar Sandoval }
3234dbad5dSOmar Sandoval 
33cf43e6beSJens Axboe static void blk_stat_flush_batch(struct blk_rq_stat *stat)
34cf43e6beSJens Axboe {
35cf43e6beSJens Axboe 	const s32 nr_batch = READ_ONCE(stat->nr_batch);
36209200efSShaohua Li 	const s32 nr_samples = READ_ONCE(stat->nr_samples);
37cf43e6beSJens Axboe 
38cf43e6beSJens Axboe 	if (!nr_batch)
39cf43e6beSJens Axboe 		return;
40cf43e6beSJens Axboe 	if (!nr_samples)
41cf43e6beSJens Axboe 		stat->mean = div64_s64(stat->batch, nr_batch);
42cf43e6beSJens Axboe 	else {
43cf43e6beSJens Axboe 		stat->mean = div64_s64((stat->mean * nr_samples) +
44cf43e6beSJens Axboe 					stat->batch,
45cf43e6beSJens Axboe 					nr_batch + nr_samples);
46cf43e6beSJens Axboe 	}
47cf43e6beSJens Axboe 
48cf43e6beSJens Axboe 	stat->nr_samples += nr_batch;
49cf43e6beSJens Axboe 	stat->nr_batch = stat->batch = 0;
50cf43e6beSJens Axboe }
51cf43e6beSJens Axboe 
52cf43e6beSJens Axboe static void blk_stat_sum(struct blk_rq_stat *dst, struct blk_rq_stat *src)
53cf43e6beSJens Axboe {
547d8d0014SOmar Sandoval 	blk_stat_flush_batch(src);
557d8d0014SOmar Sandoval 
56cf43e6beSJens Axboe 	if (!src->nr_samples)
57cf43e6beSJens Axboe 		return;
58cf43e6beSJens Axboe 
59cf43e6beSJens Axboe 	dst->min = min(dst->min, src->min);
60cf43e6beSJens Axboe 	dst->max = max(dst->max, src->max);
61cf43e6beSJens Axboe 
62cf43e6beSJens Axboe 	if (!dst->nr_samples)
63cf43e6beSJens Axboe 		dst->mean = src->mean;
64cf43e6beSJens Axboe 	else {
65cf43e6beSJens Axboe 		dst->mean = div64_s64((src->mean * src->nr_samples) +
66cf43e6beSJens Axboe 					(dst->mean * dst->nr_samples),
67cf43e6beSJens Axboe 					dst->nr_samples + src->nr_samples);
68cf43e6beSJens Axboe 	}
69cf43e6beSJens Axboe 	dst->nr_samples += src->nr_samples;
70cf43e6beSJens Axboe }
71cf43e6beSJens Axboe 
7234dbad5dSOmar Sandoval static void __blk_stat_add(struct blk_rq_stat *stat, u64 value)
73cf43e6beSJens Axboe {
7434dbad5dSOmar Sandoval 	stat->min = min(stat->min, value);
7534dbad5dSOmar Sandoval 	stat->max = max(stat->max, value);
76cf43e6beSJens Axboe 
77cf43e6beSJens Axboe 	if (stat->batch + value < stat->batch ||
78cf43e6beSJens Axboe 	    stat->nr_batch + 1 == BLK_RQ_STAT_BATCH)
79cf43e6beSJens Axboe 		blk_stat_flush_batch(stat);
80cf43e6beSJens Axboe 
81cf43e6beSJens Axboe 	stat->batch += value;
82cf43e6beSJens Axboe 	stat->nr_batch++;
83cf43e6beSJens Axboe }
84cf43e6beSJens Axboe 
8534dbad5dSOmar Sandoval void blk_stat_add(struct request *rq)
86cf43e6beSJens Axboe {
8734dbad5dSOmar Sandoval 	struct request_queue *q = rq->q;
8834dbad5dSOmar Sandoval 	struct blk_stat_callback *cb;
8934dbad5dSOmar Sandoval 	struct blk_rq_stat *stat;
9034dbad5dSOmar Sandoval 	int bucket;
9134dbad5dSOmar Sandoval 	s64 now, value;
92cf43e6beSJens Axboe 
9334dbad5dSOmar Sandoval 	now = __blk_stat_time(ktime_to_ns(ktime_get()));
9434dbad5dSOmar Sandoval 	if (now < blk_stat_time(&rq->issue_stat))
9534dbad5dSOmar Sandoval 		return;
9634dbad5dSOmar Sandoval 
9734dbad5dSOmar Sandoval 	value = now - blk_stat_time(&rq->issue_stat);
9834dbad5dSOmar Sandoval 
9934dbad5dSOmar Sandoval 	rcu_read_lock();
10034dbad5dSOmar Sandoval 	list_for_each_entry_rcu(cb, &q->stats->callbacks, list) {
10134dbad5dSOmar Sandoval 		if (blk_stat_is_active(cb)) {
10234dbad5dSOmar Sandoval 			bucket = cb->bucket_fn(rq);
10334dbad5dSOmar Sandoval 			stat = &this_cpu_ptr(cb->cpu_stat)[bucket];
10434dbad5dSOmar Sandoval 			__blk_stat_add(stat, value);
105cf43e6beSJens Axboe 		}
106cf43e6beSJens Axboe 	}
10734dbad5dSOmar Sandoval 	rcu_read_unlock();
10834dbad5dSOmar Sandoval }
10934dbad5dSOmar Sandoval 
11034dbad5dSOmar Sandoval static void blk_stat_timer_fn(unsigned long data)
11134dbad5dSOmar Sandoval {
11234dbad5dSOmar Sandoval 	struct blk_stat_callback *cb = (void *)data;
11334dbad5dSOmar Sandoval 	unsigned int bucket;
11434dbad5dSOmar Sandoval 	int cpu;
11534dbad5dSOmar Sandoval 
11634dbad5dSOmar Sandoval 	for (bucket = 0; bucket < cb->buckets; bucket++)
11734dbad5dSOmar Sandoval 		blk_stat_init(&cb->stat[bucket]);
11834dbad5dSOmar Sandoval 
11934dbad5dSOmar Sandoval 	for_each_online_cpu(cpu) {
12034dbad5dSOmar Sandoval 		struct blk_rq_stat *cpu_stat;
12134dbad5dSOmar Sandoval 
12234dbad5dSOmar Sandoval 		cpu_stat = per_cpu_ptr(cb->cpu_stat, cpu);
12334dbad5dSOmar Sandoval 		for (bucket = 0; bucket < cb->buckets; bucket++) {
12434dbad5dSOmar Sandoval 			blk_stat_sum(&cb->stat[bucket], &cpu_stat[bucket]);
12534dbad5dSOmar Sandoval 			blk_stat_init(&cpu_stat[bucket]);
126cf43e6beSJens Axboe 		}
127cf43e6beSJens Axboe 	}
128cf43e6beSJens Axboe 
12934dbad5dSOmar Sandoval 	cb->timer_fn(cb);
130cf43e6beSJens Axboe }
131cf43e6beSJens Axboe 
13234dbad5dSOmar Sandoval struct blk_stat_callback *
13334dbad5dSOmar Sandoval blk_stat_alloc_callback(void (*timer_fn)(struct blk_stat_callback *),
13434dbad5dSOmar Sandoval 			unsigned int (*bucket_fn)(const struct request *),
13534dbad5dSOmar Sandoval 			unsigned int buckets, void *data)
136cf43e6beSJens Axboe {
13734dbad5dSOmar Sandoval 	struct blk_stat_callback *cb;
13834dbad5dSOmar Sandoval 
13934dbad5dSOmar Sandoval 	cb = kmalloc(sizeof(*cb), GFP_KERNEL);
14034dbad5dSOmar Sandoval 	if (!cb)
14134dbad5dSOmar Sandoval 		return NULL;
14234dbad5dSOmar Sandoval 
14334dbad5dSOmar Sandoval 	cb->stat = kmalloc_array(buckets, sizeof(struct blk_rq_stat),
14434dbad5dSOmar Sandoval 				 GFP_KERNEL);
14534dbad5dSOmar Sandoval 	if (!cb->stat) {
14634dbad5dSOmar Sandoval 		kfree(cb);
14734dbad5dSOmar Sandoval 		return NULL;
14834dbad5dSOmar Sandoval 	}
14934dbad5dSOmar Sandoval 	cb->cpu_stat = __alloc_percpu(buckets * sizeof(struct blk_rq_stat),
15034dbad5dSOmar Sandoval 				      __alignof__(struct blk_rq_stat));
15134dbad5dSOmar Sandoval 	if (!cb->cpu_stat) {
15234dbad5dSOmar Sandoval 		kfree(cb->stat);
15334dbad5dSOmar Sandoval 		kfree(cb);
15434dbad5dSOmar Sandoval 		return NULL;
15534dbad5dSOmar Sandoval 	}
15634dbad5dSOmar Sandoval 
15734dbad5dSOmar Sandoval 	cb->timer_fn = timer_fn;
15834dbad5dSOmar Sandoval 	cb->bucket_fn = bucket_fn;
15934dbad5dSOmar Sandoval 	cb->data = data;
16034dbad5dSOmar Sandoval 	cb->buckets = buckets;
16134dbad5dSOmar Sandoval 	setup_timer(&cb->timer, blk_stat_timer_fn, (unsigned long)cb);
16234dbad5dSOmar Sandoval 
16334dbad5dSOmar Sandoval 	return cb;
16434dbad5dSOmar Sandoval }
16534dbad5dSOmar Sandoval EXPORT_SYMBOL_GPL(blk_stat_alloc_callback);
16634dbad5dSOmar Sandoval 
16734dbad5dSOmar Sandoval void blk_stat_add_callback(struct request_queue *q,
16834dbad5dSOmar Sandoval 			   struct blk_stat_callback *cb)
16934dbad5dSOmar Sandoval {
17034dbad5dSOmar Sandoval 	unsigned int bucket;
17134dbad5dSOmar Sandoval 	int cpu;
17234dbad5dSOmar Sandoval 
17334dbad5dSOmar Sandoval 	for_each_possible_cpu(cpu) {
17434dbad5dSOmar Sandoval 		struct blk_rq_stat *cpu_stat;
17534dbad5dSOmar Sandoval 
17634dbad5dSOmar Sandoval 		cpu_stat = per_cpu_ptr(cb->cpu_stat, cpu);
17734dbad5dSOmar Sandoval 		for (bucket = 0; bucket < cb->buckets; bucket++)
17834dbad5dSOmar Sandoval 			blk_stat_init(&cpu_stat[bucket]);
17934dbad5dSOmar Sandoval 	}
18034dbad5dSOmar Sandoval 
18134dbad5dSOmar Sandoval 	spin_lock(&q->stats->lock);
18234dbad5dSOmar Sandoval 	list_add_tail_rcu(&cb->list, &q->stats->callbacks);
183cf43e6beSJens Axboe 	set_bit(QUEUE_FLAG_STATS, &q->queue_flags);
18434dbad5dSOmar Sandoval 	spin_unlock(&q->stats->lock);
18534dbad5dSOmar Sandoval }
18634dbad5dSOmar Sandoval EXPORT_SYMBOL_GPL(blk_stat_add_callback);
18734dbad5dSOmar Sandoval 
18834dbad5dSOmar Sandoval void blk_stat_remove_callback(struct request_queue *q,
18934dbad5dSOmar Sandoval 			      struct blk_stat_callback *cb)
19034dbad5dSOmar Sandoval {
19134dbad5dSOmar Sandoval 	spin_lock(&q->stats->lock);
19234dbad5dSOmar Sandoval 	list_del_rcu(&cb->list);
19334dbad5dSOmar Sandoval 	if (list_empty(&q->stats->callbacks))
19434dbad5dSOmar Sandoval 		clear_bit(QUEUE_FLAG_STATS, &q->queue_flags);
19534dbad5dSOmar Sandoval 	spin_unlock(&q->stats->lock);
19634dbad5dSOmar Sandoval 
19734dbad5dSOmar Sandoval 	del_timer_sync(&cb->timer);
19834dbad5dSOmar Sandoval }
19934dbad5dSOmar Sandoval EXPORT_SYMBOL_GPL(blk_stat_remove_callback);
20034dbad5dSOmar Sandoval 
20134dbad5dSOmar Sandoval static void blk_stat_free_callback_rcu(struct rcu_head *head)
20234dbad5dSOmar Sandoval {
20334dbad5dSOmar Sandoval 	struct blk_stat_callback *cb;
20434dbad5dSOmar Sandoval 
20534dbad5dSOmar Sandoval 	cb = container_of(head, struct blk_stat_callback, rcu);
20634dbad5dSOmar Sandoval 	free_percpu(cb->cpu_stat);
20734dbad5dSOmar Sandoval 	kfree(cb->stat);
20834dbad5dSOmar Sandoval 	kfree(cb);
209cf43e6beSJens Axboe }
210cf43e6beSJens Axboe 
21134dbad5dSOmar Sandoval void blk_stat_free_callback(struct blk_stat_callback *cb)
21234dbad5dSOmar Sandoval {
213*a83b576cSJens Axboe 	if (cb)
21434dbad5dSOmar Sandoval 		call_rcu(&cb->rcu, blk_stat_free_callback_rcu);
21534dbad5dSOmar Sandoval }
21634dbad5dSOmar Sandoval EXPORT_SYMBOL_GPL(blk_stat_free_callback);
21734dbad5dSOmar Sandoval 
21834dbad5dSOmar Sandoval struct blk_queue_stats *blk_alloc_queue_stats(void)
21934dbad5dSOmar Sandoval {
22034dbad5dSOmar Sandoval 	struct blk_queue_stats *stats;
22134dbad5dSOmar Sandoval 
22234dbad5dSOmar Sandoval 	stats = kmalloc(sizeof(*stats), GFP_KERNEL);
22334dbad5dSOmar Sandoval 	if (!stats)
22434dbad5dSOmar Sandoval 		return NULL;
22534dbad5dSOmar Sandoval 
22634dbad5dSOmar Sandoval 	INIT_LIST_HEAD(&stats->callbacks);
22734dbad5dSOmar Sandoval 	spin_lock_init(&stats->lock);
22834dbad5dSOmar Sandoval 
22934dbad5dSOmar Sandoval 	return stats;
23034dbad5dSOmar Sandoval }
23134dbad5dSOmar Sandoval 
23234dbad5dSOmar Sandoval void blk_free_queue_stats(struct blk_queue_stats *stats)
23334dbad5dSOmar Sandoval {
23434dbad5dSOmar Sandoval 	if (!stats)
23534dbad5dSOmar Sandoval 		return;
23634dbad5dSOmar Sandoval 
23734dbad5dSOmar Sandoval 	WARN_ON(!list_empty(&stats->callbacks));
23834dbad5dSOmar Sandoval 
23934dbad5dSOmar Sandoval 	kfree(stats);
240cf43e6beSJens Axboe }
241