xref: /openbmc/linux/block/blk-stat.c (revision 48b5c1fbcd8c5bc6b91a56399a5257b801391dd8)
13dcf60bcSChristoph Hellwig // SPDX-License-Identifier: GPL-2.0
2cf43e6beSJens Axboe /*
3cf43e6beSJens Axboe  * Block stat tracking code
4cf43e6beSJens Axboe  *
5cf43e6beSJens Axboe  * Copyright (C) 2016 Jens Axboe
6cf43e6beSJens Axboe  */
7cf43e6beSJens Axboe #include <linux/kernel.h>
834dbad5dSOmar Sandoval #include <linux/rculist.h>
9cf43e6beSJens Axboe #include <linux/blk-mq.h>
10cf43e6beSJens Axboe 
11cf43e6beSJens Axboe #include "blk-stat.h"
12cf43e6beSJens Axboe #include "blk-mq.h"
13b9147dd1SShaohua Li #include "blk.h"
14cf43e6beSJens Axboe 
1534dbad5dSOmar Sandoval struct blk_queue_stats {
1634dbad5dSOmar Sandoval 	struct list_head callbacks;
1734dbad5dSOmar Sandoval 	spinlock_t lock;
18b9147dd1SShaohua Li 	bool enable_accounting;
1934dbad5dSOmar Sandoval };
2034dbad5dSOmar Sandoval 
212ecbf456SJosef Bacik void blk_rq_stat_init(struct blk_rq_stat *stat)
2234dbad5dSOmar Sandoval {
2334dbad5dSOmar Sandoval 	stat->min = -1ULL;
2434dbad5dSOmar Sandoval 	stat->max = stat->nr_samples = stat->mean = 0;
25eca8b53aSShaohua Li 	stat->batch = 0;
2634dbad5dSOmar Sandoval }
2734dbad5dSOmar Sandoval 
28eca8b53aSShaohua Li /* src is a per-cpu stat, mean isn't initialized */
292ecbf456SJosef Bacik void blk_rq_stat_sum(struct blk_rq_stat *dst, struct blk_rq_stat *src)
30cf43e6beSJens Axboe {
31cf43e6beSJens Axboe 	if (!src->nr_samples)
32cf43e6beSJens Axboe 		return;
33cf43e6beSJens Axboe 
34cf43e6beSJens Axboe 	dst->min = min(dst->min, src->min);
35cf43e6beSJens Axboe 	dst->max = max(dst->max, src->max);
36cf43e6beSJens Axboe 
37eca8b53aSShaohua Li 	dst->mean = div_u64(src->batch + dst->mean * dst->nr_samples,
38cf43e6beSJens Axboe 				dst->nr_samples + src->nr_samples);
39eca8b53aSShaohua Li 
40cf43e6beSJens Axboe 	dst->nr_samples += src->nr_samples;
41cf43e6beSJens Axboe }
42cf43e6beSJens Axboe 
432ecbf456SJosef Bacik void blk_rq_stat_add(struct blk_rq_stat *stat, u64 value)
44cf43e6beSJens Axboe {
4534dbad5dSOmar Sandoval 	stat->min = min(stat->min, value);
4634dbad5dSOmar Sandoval 	stat->max = max(stat->max, value);
47cf43e6beSJens Axboe 	stat->batch += value;
48eca8b53aSShaohua Li 	stat->nr_samples++;
49cf43e6beSJens Axboe }
50cf43e6beSJens Axboe 
51522a7775SOmar Sandoval void blk_stat_add(struct request *rq, u64 now)
52cf43e6beSJens Axboe {
5334dbad5dSOmar Sandoval 	struct request_queue *q = rq->q;
5434dbad5dSOmar Sandoval 	struct blk_stat_callback *cb;
5534dbad5dSOmar Sandoval 	struct blk_rq_stat *stat;
568148f0b5SPavel Begunkov 	int bucket, cpu;
57522a7775SOmar Sandoval 	u64 value;
58cf43e6beSJens Axboe 
59544ccc8dSOmar Sandoval 	value = (now >= rq->io_start_time_ns) ? now - rq->io_start_time_ns : 0;
6034dbad5dSOmar Sandoval 
61b9147dd1SShaohua Li 	blk_throtl_stat_add(rq, value);
62b9147dd1SShaohua Li 
6334dbad5dSOmar Sandoval 	rcu_read_lock();
648148f0b5SPavel Begunkov 	cpu = get_cpu();
6534dbad5dSOmar Sandoval 	list_for_each_entry_rcu(cb, &q->stats->callbacks, list) {
66d3738123SJens Axboe 		if (!blk_stat_is_active(cb))
67d3738123SJens Axboe 			continue;
68d3738123SJens Axboe 
6934dbad5dSOmar Sandoval 		bucket = cb->bucket_fn(rq);
70a37244e4SStephen Bates 		if (bucket < 0)
71a37244e4SStephen Bates 			continue;
72d3738123SJens Axboe 
738148f0b5SPavel Begunkov 		stat = &per_cpu_ptr(cb->cpu_stat, cpu)[bucket];
742ecbf456SJosef Bacik 		blk_rq_stat_add(stat, value);
75cf43e6beSJens Axboe 	}
768148f0b5SPavel Begunkov 	put_cpu();
7734dbad5dSOmar Sandoval 	rcu_read_unlock();
7834dbad5dSOmar Sandoval }
7934dbad5dSOmar Sandoval 
80e99e88a9SKees Cook static void blk_stat_timer_fn(struct timer_list *t)
8134dbad5dSOmar Sandoval {
82e99e88a9SKees Cook 	struct blk_stat_callback *cb = from_timer(cb, t, timer);
8334dbad5dSOmar Sandoval 	unsigned int bucket;
8434dbad5dSOmar Sandoval 	int cpu;
8534dbad5dSOmar Sandoval 
8634dbad5dSOmar Sandoval 	for (bucket = 0; bucket < cb->buckets; bucket++)
872ecbf456SJosef Bacik 		blk_rq_stat_init(&cb->stat[bucket]);
8834dbad5dSOmar Sandoval 
8934dbad5dSOmar Sandoval 	for_each_online_cpu(cpu) {
9034dbad5dSOmar Sandoval 		struct blk_rq_stat *cpu_stat;
9134dbad5dSOmar Sandoval 
9234dbad5dSOmar Sandoval 		cpu_stat = per_cpu_ptr(cb->cpu_stat, cpu);
9334dbad5dSOmar Sandoval 		for (bucket = 0; bucket < cb->buckets; bucket++) {
942ecbf456SJosef Bacik 			blk_rq_stat_sum(&cb->stat[bucket], &cpu_stat[bucket]);
952ecbf456SJosef Bacik 			blk_rq_stat_init(&cpu_stat[bucket]);
96cf43e6beSJens Axboe 		}
97cf43e6beSJens Axboe 	}
98cf43e6beSJens Axboe 
9934dbad5dSOmar Sandoval 	cb->timer_fn(cb);
100cf43e6beSJens Axboe }
101cf43e6beSJens Axboe 
10234dbad5dSOmar Sandoval struct blk_stat_callback *
10334dbad5dSOmar Sandoval blk_stat_alloc_callback(void (*timer_fn)(struct blk_stat_callback *),
104a37244e4SStephen Bates 			int (*bucket_fn)(const struct request *),
10534dbad5dSOmar Sandoval 			unsigned int buckets, void *data)
106cf43e6beSJens Axboe {
10734dbad5dSOmar Sandoval 	struct blk_stat_callback *cb;
10834dbad5dSOmar Sandoval 
10934dbad5dSOmar Sandoval 	cb = kmalloc(sizeof(*cb), GFP_KERNEL);
11034dbad5dSOmar Sandoval 	if (!cb)
11134dbad5dSOmar Sandoval 		return NULL;
11234dbad5dSOmar Sandoval 
11334dbad5dSOmar Sandoval 	cb->stat = kmalloc_array(buckets, sizeof(struct blk_rq_stat),
11434dbad5dSOmar Sandoval 				 GFP_KERNEL);
11534dbad5dSOmar Sandoval 	if (!cb->stat) {
11634dbad5dSOmar Sandoval 		kfree(cb);
11734dbad5dSOmar Sandoval 		return NULL;
11834dbad5dSOmar Sandoval 	}
11934dbad5dSOmar Sandoval 	cb->cpu_stat = __alloc_percpu(buckets * sizeof(struct blk_rq_stat),
12034dbad5dSOmar Sandoval 				      __alignof__(struct blk_rq_stat));
12134dbad5dSOmar Sandoval 	if (!cb->cpu_stat) {
12234dbad5dSOmar Sandoval 		kfree(cb->stat);
12334dbad5dSOmar Sandoval 		kfree(cb);
12434dbad5dSOmar Sandoval 		return NULL;
12534dbad5dSOmar Sandoval 	}
12634dbad5dSOmar Sandoval 
12734dbad5dSOmar Sandoval 	cb->timer_fn = timer_fn;
12834dbad5dSOmar Sandoval 	cb->bucket_fn = bucket_fn;
12934dbad5dSOmar Sandoval 	cb->data = data;
13034dbad5dSOmar Sandoval 	cb->buckets = buckets;
131e99e88a9SKees Cook 	timer_setup(&cb->timer, blk_stat_timer_fn, 0);
13234dbad5dSOmar Sandoval 
13334dbad5dSOmar Sandoval 	return cb;
13434dbad5dSOmar Sandoval }
13534dbad5dSOmar Sandoval 
13634dbad5dSOmar Sandoval void blk_stat_add_callback(struct request_queue *q,
13734dbad5dSOmar Sandoval 			   struct blk_stat_callback *cb)
13834dbad5dSOmar Sandoval {
13934dbad5dSOmar Sandoval 	unsigned int bucket;
140e11d80a8STejun Heo 	unsigned long flags;
14134dbad5dSOmar Sandoval 	int cpu;
14234dbad5dSOmar Sandoval 
14334dbad5dSOmar Sandoval 	for_each_possible_cpu(cpu) {
14434dbad5dSOmar Sandoval 		struct blk_rq_stat *cpu_stat;
14534dbad5dSOmar Sandoval 
14634dbad5dSOmar Sandoval 		cpu_stat = per_cpu_ptr(cb->cpu_stat, cpu);
14734dbad5dSOmar Sandoval 		for (bucket = 0; bucket < cb->buckets; bucket++)
1482ecbf456SJosef Bacik 			blk_rq_stat_init(&cpu_stat[bucket]);
14934dbad5dSOmar Sandoval 	}
15034dbad5dSOmar Sandoval 
151e11d80a8STejun Heo 	spin_lock_irqsave(&q->stats->lock, flags);
15234dbad5dSOmar Sandoval 	list_add_tail_rcu(&cb->list, &q->stats->callbacks);
1537dfdbc73SBart Van Assche 	blk_queue_flag_set(QUEUE_FLAG_STATS, q);
154e11d80a8STejun Heo 	spin_unlock_irqrestore(&q->stats->lock, flags);
15534dbad5dSOmar Sandoval }
15634dbad5dSOmar Sandoval 
15734dbad5dSOmar Sandoval void blk_stat_remove_callback(struct request_queue *q,
15834dbad5dSOmar Sandoval 			      struct blk_stat_callback *cb)
15934dbad5dSOmar Sandoval {
160e11d80a8STejun Heo 	unsigned long flags;
161e11d80a8STejun Heo 
162e11d80a8STejun Heo 	spin_lock_irqsave(&q->stats->lock, flags);
16334dbad5dSOmar Sandoval 	list_del_rcu(&cb->list);
164b9147dd1SShaohua Li 	if (list_empty(&q->stats->callbacks) && !q->stats->enable_accounting)
1657dfdbc73SBart Van Assche 		blk_queue_flag_clear(QUEUE_FLAG_STATS, q);
166e11d80a8STejun Heo 	spin_unlock_irqrestore(&q->stats->lock, flags);
16734dbad5dSOmar Sandoval 
16834dbad5dSOmar Sandoval 	del_timer_sync(&cb->timer);
16934dbad5dSOmar Sandoval }
17034dbad5dSOmar Sandoval 
17134dbad5dSOmar Sandoval static void blk_stat_free_callback_rcu(struct rcu_head *head)
17234dbad5dSOmar Sandoval {
17334dbad5dSOmar Sandoval 	struct blk_stat_callback *cb;
17434dbad5dSOmar Sandoval 
17534dbad5dSOmar Sandoval 	cb = container_of(head, struct blk_stat_callback, rcu);
17634dbad5dSOmar Sandoval 	free_percpu(cb->cpu_stat);
17734dbad5dSOmar Sandoval 	kfree(cb->stat);
17834dbad5dSOmar Sandoval 	kfree(cb);
179cf43e6beSJens Axboe }
180cf43e6beSJens Axboe 
18134dbad5dSOmar Sandoval void blk_stat_free_callback(struct blk_stat_callback *cb)
18234dbad5dSOmar Sandoval {
183a83b576cSJens Axboe 	if (cb)
18434dbad5dSOmar Sandoval 		call_rcu(&cb->rcu, blk_stat_free_callback_rcu);
18534dbad5dSOmar Sandoval }
18634dbad5dSOmar Sandoval 
187b9147dd1SShaohua Li void blk_stat_enable_accounting(struct request_queue *q)
188b9147dd1SShaohua Li {
189e11d80a8STejun Heo 	unsigned long flags;
190e11d80a8STejun Heo 
191e11d80a8STejun Heo 	spin_lock_irqsave(&q->stats->lock, flags);
192b9147dd1SShaohua Li 	q->stats->enable_accounting = true;
1937dfdbc73SBart Van Assche 	blk_queue_flag_set(QUEUE_FLAG_STATS, q);
194e11d80a8STejun Heo 	spin_unlock_irqrestore(&q->stats->lock, flags);
195b9147dd1SShaohua Li }
196f8232f29SOmar Sandoval EXPORT_SYMBOL_GPL(blk_stat_enable_accounting);
197b9147dd1SShaohua Li 
19834dbad5dSOmar Sandoval struct blk_queue_stats *blk_alloc_queue_stats(void)
19934dbad5dSOmar Sandoval {
20034dbad5dSOmar Sandoval 	struct blk_queue_stats *stats;
20134dbad5dSOmar Sandoval 
20234dbad5dSOmar Sandoval 	stats = kmalloc(sizeof(*stats), GFP_KERNEL);
20334dbad5dSOmar Sandoval 	if (!stats)
20434dbad5dSOmar Sandoval 		return NULL;
20534dbad5dSOmar Sandoval 
20634dbad5dSOmar Sandoval 	INIT_LIST_HEAD(&stats->callbacks);
20734dbad5dSOmar Sandoval 	spin_lock_init(&stats->lock);
208b9147dd1SShaohua Li 	stats->enable_accounting = false;
20934dbad5dSOmar Sandoval 
21034dbad5dSOmar Sandoval 	return stats;
21134dbad5dSOmar Sandoval }
21234dbad5dSOmar Sandoval 
21334dbad5dSOmar Sandoval void blk_free_queue_stats(struct blk_queue_stats *stats)
21434dbad5dSOmar Sandoval {
21534dbad5dSOmar Sandoval 	if (!stats)
21634dbad5dSOmar Sandoval 		return;
21734dbad5dSOmar Sandoval 
21834dbad5dSOmar Sandoval 	WARN_ON(!list_empty(&stats->callbacks));
21934dbad5dSOmar Sandoval 
22034dbad5dSOmar Sandoval 	kfree(stats);
221cf43e6beSJens Axboe }
222*48b5c1fbSJens Axboe 
223*48b5c1fbSJens Axboe bool blk_stats_alloc_enable(struct request_queue *q)
224*48b5c1fbSJens Axboe {
225*48b5c1fbSJens Axboe 	struct blk_rq_stat *poll_stat;
226*48b5c1fbSJens Axboe 
227*48b5c1fbSJens Axboe 	poll_stat = kcalloc(BLK_MQ_POLL_STATS_BKTS, sizeof(*poll_stat),
228*48b5c1fbSJens Axboe 				GFP_ATOMIC);
229*48b5c1fbSJens Axboe 	if (!poll_stat)
230*48b5c1fbSJens Axboe 		return false;
231*48b5c1fbSJens Axboe 
232*48b5c1fbSJens Axboe 	if (cmpxchg(&q->poll_stat, NULL, poll_stat) != NULL) {
233*48b5c1fbSJens Axboe 		kfree(poll_stat);
234*48b5c1fbSJens Axboe 		return true;
235*48b5c1fbSJens Axboe 	}
236*48b5c1fbSJens Axboe 
237*48b5c1fbSJens Axboe 	blk_stat_add_callback(q, q->poll_cb);
238*48b5c1fbSJens Axboe 	return false;
239*48b5c1fbSJens Axboe }
240