xref: /openbmc/linux/block/blk-stat.c (revision 1e952e95843d437b8a904dbd5b48d72db8ac23ec)
13dcf60bcSChristoph Hellwig // SPDX-License-Identifier: GPL-2.0
2cf43e6beSJens Axboe /*
3cf43e6beSJens Axboe  * Block stat tracking code
4cf43e6beSJens Axboe  *
5cf43e6beSJens Axboe  * Copyright (C) 2016 Jens Axboe
6cf43e6beSJens Axboe  */
7cf43e6beSJens Axboe #include <linux/kernel.h>
834dbad5dSOmar Sandoval #include <linux/rculist.h>
9cf43e6beSJens Axboe 
10cf43e6beSJens Axboe #include "blk-stat.h"
11cf43e6beSJens Axboe #include "blk-mq.h"
12b9147dd1SShaohua Li #include "blk.h"
13cf43e6beSJens Axboe 
1434dbad5dSOmar Sandoval struct blk_queue_stats {
1534dbad5dSOmar Sandoval 	struct list_head callbacks;
1634dbad5dSOmar Sandoval 	spinlock_t lock;
1768497092SJens Axboe 	int accounting;
1834dbad5dSOmar Sandoval };
1934dbad5dSOmar Sandoval 
blk_rq_stat_init(struct blk_rq_stat * stat)202ecbf456SJosef Bacik void blk_rq_stat_init(struct blk_rq_stat *stat)
2134dbad5dSOmar Sandoval {
2234dbad5dSOmar Sandoval 	stat->min = -1ULL;
2334dbad5dSOmar Sandoval 	stat->max = stat->nr_samples = stat->mean = 0;
24eca8b53aSShaohua Li 	stat->batch = 0;
2534dbad5dSOmar Sandoval }
2634dbad5dSOmar Sandoval 
27eca8b53aSShaohua Li /* src is a per-cpu stat, mean isn't initialized */
blk_rq_stat_sum(struct blk_rq_stat * dst,struct blk_rq_stat * src)282ecbf456SJosef Bacik void blk_rq_stat_sum(struct blk_rq_stat *dst, struct blk_rq_stat *src)
29cf43e6beSJens Axboe {
30*5f7fd6aaSRoman Smirnov 	if (dst->nr_samples + src->nr_samples <= dst->nr_samples)
31cf43e6beSJens Axboe 		return;
32cf43e6beSJens Axboe 
33cf43e6beSJens Axboe 	dst->min = min(dst->min, src->min);
34cf43e6beSJens Axboe 	dst->max = max(dst->max, src->max);
35cf43e6beSJens Axboe 
36eca8b53aSShaohua Li 	dst->mean = div_u64(src->batch + dst->mean * dst->nr_samples,
37cf43e6beSJens Axboe 				dst->nr_samples + src->nr_samples);
38eca8b53aSShaohua Li 
39cf43e6beSJens Axboe 	dst->nr_samples += src->nr_samples;
40cf43e6beSJens Axboe }
41cf43e6beSJens Axboe 
blk_rq_stat_add(struct blk_rq_stat * stat,u64 value)422ecbf456SJosef Bacik void blk_rq_stat_add(struct blk_rq_stat *stat, u64 value)
43cf43e6beSJens Axboe {
4434dbad5dSOmar Sandoval 	stat->min = min(stat->min, value);
4534dbad5dSOmar Sandoval 	stat->max = max(stat->max, value);
46cf43e6beSJens Axboe 	stat->batch += value;
47eca8b53aSShaohua Li 	stat->nr_samples++;
48cf43e6beSJens Axboe }
49cf43e6beSJens Axboe 
blk_stat_add(struct request * rq,u64 now)50522a7775SOmar Sandoval void blk_stat_add(struct request *rq, u64 now)
51cf43e6beSJens Axboe {
5234dbad5dSOmar Sandoval 	struct request_queue *q = rq->q;
5334dbad5dSOmar Sandoval 	struct blk_stat_callback *cb;
5434dbad5dSOmar Sandoval 	struct blk_rq_stat *stat;
558148f0b5SPavel Begunkov 	int bucket, cpu;
56522a7775SOmar Sandoval 	u64 value;
57cf43e6beSJens Axboe 
58544ccc8dSOmar Sandoval 	value = (now >= rq->io_start_time_ns) ? now - rq->io_start_time_ns : 0;
5934dbad5dSOmar Sandoval 
60a886001cSChristoph Hellwig 	if (req_op(rq) == REQ_OP_READ || req_op(rq) == REQ_OP_WRITE)
61b9147dd1SShaohua Li 		blk_throtl_stat_add(rq, value);
62b9147dd1SShaohua Li 
6334dbad5dSOmar Sandoval 	rcu_read_lock();
648148f0b5SPavel Begunkov 	cpu = get_cpu();
6534dbad5dSOmar Sandoval 	list_for_each_entry_rcu(cb, &q->stats->callbacks, list) {
66d3738123SJens Axboe 		if (!blk_stat_is_active(cb))
67d3738123SJens Axboe 			continue;
68d3738123SJens Axboe 
6934dbad5dSOmar Sandoval 		bucket = cb->bucket_fn(rq);
70a37244e4SStephen Bates 		if (bucket < 0)
71a37244e4SStephen Bates 			continue;
72d3738123SJens Axboe 
738148f0b5SPavel Begunkov 		stat = &per_cpu_ptr(cb->cpu_stat, cpu)[bucket];
742ecbf456SJosef Bacik 		blk_rq_stat_add(stat, value);
75cf43e6beSJens Axboe 	}
768148f0b5SPavel Begunkov 	put_cpu();
7734dbad5dSOmar Sandoval 	rcu_read_unlock();
7834dbad5dSOmar Sandoval }
7934dbad5dSOmar Sandoval 
blk_stat_timer_fn(struct timer_list * t)80e99e88a9SKees Cook static void blk_stat_timer_fn(struct timer_list *t)
8134dbad5dSOmar Sandoval {
82e99e88a9SKees Cook 	struct blk_stat_callback *cb = from_timer(cb, t, timer);
8334dbad5dSOmar Sandoval 	unsigned int bucket;
8434dbad5dSOmar Sandoval 	int cpu;
8534dbad5dSOmar Sandoval 
8634dbad5dSOmar Sandoval 	for (bucket = 0; bucket < cb->buckets; bucket++)
872ecbf456SJosef Bacik 		blk_rq_stat_init(&cb->stat[bucket]);
8834dbad5dSOmar Sandoval 
8934dbad5dSOmar Sandoval 	for_each_online_cpu(cpu) {
9034dbad5dSOmar Sandoval 		struct blk_rq_stat *cpu_stat;
9134dbad5dSOmar Sandoval 
9234dbad5dSOmar Sandoval 		cpu_stat = per_cpu_ptr(cb->cpu_stat, cpu);
9334dbad5dSOmar Sandoval 		for (bucket = 0; bucket < cb->buckets; bucket++) {
942ecbf456SJosef Bacik 			blk_rq_stat_sum(&cb->stat[bucket], &cpu_stat[bucket]);
952ecbf456SJosef Bacik 			blk_rq_stat_init(&cpu_stat[bucket]);
96cf43e6beSJens Axboe 		}
97cf43e6beSJens Axboe 	}
98cf43e6beSJens Axboe 
9934dbad5dSOmar Sandoval 	cb->timer_fn(cb);
100cf43e6beSJens Axboe }
101cf43e6beSJens Axboe 
10234dbad5dSOmar Sandoval struct blk_stat_callback *
blk_stat_alloc_callback(void (* timer_fn)(struct blk_stat_callback *),int (* bucket_fn)(const struct request *),unsigned int buckets,void * data)10334dbad5dSOmar Sandoval blk_stat_alloc_callback(void (*timer_fn)(struct blk_stat_callback *),
104a37244e4SStephen Bates 			int (*bucket_fn)(const struct request *),
10534dbad5dSOmar Sandoval 			unsigned int buckets, void *data)
106cf43e6beSJens Axboe {
10734dbad5dSOmar Sandoval 	struct blk_stat_callback *cb;
10834dbad5dSOmar Sandoval 
10934dbad5dSOmar Sandoval 	cb = kmalloc(sizeof(*cb), GFP_KERNEL);
11034dbad5dSOmar Sandoval 	if (!cb)
11134dbad5dSOmar Sandoval 		return NULL;
11234dbad5dSOmar Sandoval 
11334dbad5dSOmar Sandoval 	cb->stat = kmalloc_array(buckets, sizeof(struct blk_rq_stat),
11434dbad5dSOmar Sandoval 				 GFP_KERNEL);
11534dbad5dSOmar Sandoval 	if (!cb->stat) {
11634dbad5dSOmar Sandoval 		kfree(cb);
11734dbad5dSOmar Sandoval 		return NULL;
11834dbad5dSOmar Sandoval 	}
11934dbad5dSOmar Sandoval 	cb->cpu_stat = __alloc_percpu(buckets * sizeof(struct blk_rq_stat),
12034dbad5dSOmar Sandoval 				      __alignof__(struct blk_rq_stat));
12134dbad5dSOmar Sandoval 	if (!cb->cpu_stat) {
12234dbad5dSOmar Sandoval 		kfree(cb->stat);
12334dbad5dSOmar Sandoval 		kfree(cb);
12434dbad5dSOmar Sandoval 		return NULL;
12534dbad5dSOmar Sandoval 	}
12634dbad5dSOmar Sandoval 
12734dbad5dSOmar Sandoval 	cb->timer_fn = timer_fn;
12834dbad5dSOmar Sandoval 	cb->bucket_fn = bucket_fn;
12934dbad5dSOmar Sandoval 	cb->data = data;
13034dbad5dSOmar Sandoval 	cb->buckets = buckets;
131e99e88a9SKees Cook 	timer_setup(&cb->timer, blk_stat_timer_fn, 0);
13234dbad5dSOmar Sandoval 
13334dbad5dSOmar Sandoval 	return cb;
13434dbad5dSOmar Sandoval }
13534dbad5dSOmar Sandoval 
blk_stat_add_callback(struct request_queue * q,struct blk_stat_callback * cb)13634dbad5dSOmar Sandoval void blk_stat_add_callback(struct request_queue *q,
13734dbad5dSOmar Sandoval 			   struct blk_stat_callback *cb)
13834dbad5dSOmar Sandoval {
13934dbad5dSOmar Sandoval 	unsigned int bucket;
140e11d80a8STejun Heo 	unsigned long flags;
14134dbad5dSOmar Sandoval 	int cpu;
14234dbad5dSOmar Sandoval 
14334dbad5dSOmar Sandoval 	for_each_possible_cpu(cpu) {
14434dbad5dSOmar Sandoval 		struct blk_rq_stat *cpu_stat;
14534dbad5dSOmar Sandoval 
14634dbad5dSOmar Sandoval 		cpu_stat = per_cpu_ptr(cb->cpu_stat, cpu);
14734dbad5dSOmar Sandoval 		for (bucket = 0; bucket < cb->buckets; bucket++)
1482ecbf456SJosef Bacik 			blk_rq_stat_init(&cpu_stat[bucket]);
14934dbad5dSOmar Sandoval 	}
15034dbad5dSOmar Sandoval 
151e11d80a8STejun Heo 	spin_lock_irqsave(&q->stats->lock, flags);
15234dbad5dSOmar Sandoval 	list_add_tail_rcu(&cb->list, &q->stats->callbacks);
1537dfdbc73SBart Van Assche 	blk_queue_flag_set(QUEUE_FLAG_STATS, q);
154e11d80a8STejun Heo 	spin_unlock_irqrestore(&q->stats->lock, flags);
15534dbad5dSOmar Sandoval }
15634dbad5dSOmar Sandoval 
blk_stat_remove_callback(struct request_queue * q,struct blk_stat_callback * cb)15734dbad5dSOmar Sandoval void blk_stat_remove_callback(struct request_queue *q,
15834dbad5dSOmar Sandoval 			      struct blk_stat_callback *cb)
15934dbad5dSOmar Sandoval {
160e11d80a8STejun Heo 	unsigned long flags;
161e11d80a8STejun Heo 
162e11d80a8STejun Heo 	spin_lock_irqsave(&q->stats->lock, flags);
16334dbad5dSOmar Sandoval 	list_del_rcu(&cb->list);
16468497092SJens Axboe 	if (list_empty(&q->stats->callbacks) && !q->stats->accounting)
1657dfdbc73SBart Van Assche 		blk_queue_flag_clear(QUEUE_FLAG_STATS, q);
166e11d80a8STejun Heo 	spin_unlock_irqrestore(&q->stats->lock, flags);
16734dbad5dSOmar Sandoval 
16834dbad5dSOmar Sandoval 	del_timer_sync(&cb->timer);
16934dbad5dSOmar Sandoval }
17034dbad5dSOmar Sandoval 
blk_stat_free_callback_rcu(struct rcu_head * head)17134dbad5dSOmar Sandoval static void blk_stat_free_callback_rcu(struct rcu_head *head)
17234dbad5dSOmar Sandoval {
17334dbad5dSOmar Sandoval 	struct blk_stat_callback *cb;
17434dbad5dSOmar Sandoval 
17534dbad5dSOmar Sandoval 	cb = container_of(head, struct blk_stat_callback, rcu);
17634dbad5dSOmar Sandoval 	free_percpu(cb->cpu_stat);
17734dbad5dSOmar Sandoval 	kfree(cb->stat);
17834dbad5dSOmar Sandoval 	kfree(cb);
179cf43e6beSJens Axboe }
180cf43e6beSJens Axboe 
blk_stat_free_callback(struct blk_stat_callback * cb)18134dbad5dSOmar Sandoval void blk_stat_free_callback(struct blk_stat_callback *cb)
18234dbad5dSOmar Sandoval {
183a83b576cSJens Axboe 	if (cb)
18434dbad5dSOmar Sandoval 		call_rcu(&cb->rcu, blk_stat_free_callback_rcu);
18534dbad5dSOmar Sandoval }
18634dbad5dSOmar Sandoval 
blk_stat_disable_accounting(struct request_queue * q)18768497092SJens Axboe void blk_stat_disable_accounting(struct request_queue *q)
18868497092SJens Axboe {
18968497092SJens Axboe 	unsigned long flags;
19068497092SJens Axboe 
19168497092SJens Axboe 	spin_lock_irqsave(&q->stats->lock, flags);
19220de765fSChengming Zhou 	if (!--q->stats->accounting && list_empty(&q->stats->callbacks))
19368497092SJens Axboe 		blk_queue_flag_clear(QUEUE_FLAG_STATS, q);
19468497092SJens Axboe 	spin_unlock_irqrestore(&q->stats->lock, flags);
19568497092SJens Axboe }
19668497092SJens Axboe EXPORT_SYMBOL_GPL(blk_stat_disable_accounting);
19768497092SJens Axboe 
blk_stat_enable_accounting(struct request_queue * q)198b9147dd1SShaohua Li void blk_stat_enable_accounting(struct request_queue *q)
199b9147dd1SShaohua Li {
200e11d80a8STejun Heo 	unsigned long flags;
201e11d80a8STejun Heo 
202e11d80a8STejun Heo 	spin_lock_irqsave(&q->stats->lock, flags);
20320de765fSChengming Zhou 	if (!q->stats->accounting++ && list_empty(&q->stats->callbacks))
2047dfdbc73SBart Van Assche 		blk_queue_flag_set(QUEUE_FLAG_STATS, q);
205e11d80a8STejun Heo 	spin_unlock_irqrestore(&q->stats->lock, flags);
206b9147dd1SShaohua Li }
207f8232f29SOmar Sandoval EXPORT_SYMBOL_GPL(blk_stat_enable_accounting);
208b9147dd1SShaohua Li 
blk_alloc_queue_stats(void)20934dbad5dSOmar Sandoval struct blk_queue_stats *blk_alloc_queue_stats(void)
21034dbad5dSOmar Sandoval {
21134dbad5dSOmar Sandoval 	struct blk_queue_stats *stats;
21234dbad5dSOmar Sandoval 
21334dbad5dSOmar Sandoval 	stats = kmalloc(sizeof(*stats), GFP_KERNEL);
21434dbad5dSOmar Sandoval 	if (!stats)
21534dbad5dSOmar Sandoval 		return NULL;
21634dbad5dSOmar Sandoval 
21734dbad5dSOmar Sandoval 	INIT_LIST_HEAD(&stats->callbacks);
21834dbad5dSOmar Sandoval 	spin_lock_init(&stats->lock);
21968497092SJens Axboe 	stats->accounting = 0;
22034dbad5dSOmar Sandoval 
22134dbad5dSOmar Sandoval 	return stats;
22234dbad5dSOmar Sandoval }
22334dbad5dSOmar Sandoval 
blk_free_queue_stats(struct blk_queue_stats * stats)22434dbad5dSOmar Sandoval void blk_free_queue_stats(struct blk_queue_stats *stats)
22534dbad5dSOmar Sandoval {
22634dbad5dSOmar Sandoval 	if (!stats)
22734dbad5dSOmar Sandoval 		return;
22834dbad5dSOmar Sandoval 
22934dbad5dSOmar Sandoval 	WARN_ON(!list_empty(&stats->callbacks));
23034dbad5dSOmar Sandoval 
23134dbad5dSOmar Sandoval 	kfree(stats);
232cf43e6beSJens Axboe }
233