xref: /openbmc/linux/block/blk-stat.c (revision a886001c2da8dd02357d0d336ddb021903347f89)
13dcf60bcSChristoph Hellwig // SPDX-License-Identifier: GPL-2.0
2cf43e6beSJens Axboe /*
3cf43e6beSJens Axboe  * Block stat tracking code
4cf43e6beSJens Axboe  *
5cf43e6beSJens Axboe  * Copyright (C) 2016 Jens Axboe
6cf43e6beSJens Axboe  */
7cf43e6beSJens Axboe #include <linux/kernel.h>
834dbad5dSOmar Sandoval #include <linux/rculist.h>
9cf43e6beSJens Axboe #include <linux/blk-mq.h>
10cf43e6beSJens Axboe 
11cf43e6beSJens Axboe #include "blk-stat.h"
12cf43e6beSJens Axboe #include "blk-mq.h"
13b9147dd1SShaohua Li #include "blk.h"
14cf43e6beSJens Axboe 
1534dbad5dSOmar Sandoval struct blk_queue_stats {
1634dbad5dSOmar Sandoval 	struct list_head callbacks;
1734dbad5dSOmar Sandoval 	spinlock_t lock;
1868497092SJens Axboe 	int accounting;
1934dbad5dSOmar Sandoval };
2034dbad5dSOmar Sandoval 
212ecbf456SJosef Bacik void blk_rq_stat_init(struct blk_rq_stat *stat)
2234dbad5dSOmar Sandoval {
2334dbad5dSOmar Sandoval 	stat->min = -1ULL;
2434dbad5dSOmar Sandoval 	stat->max = stat->nr_samples = stat->mean = 0;
25eca8b53aSShaohua Li 	stat->batch = 0;
2634dbad5dSOmar Sandoval }
2734dbad5dSOmar Sandoval 
28eca8b53aSShaohua Li /* src is a per-cpu stat, mean isn't initialized */
292ecbf456SJosef Bacik void blk_rq_stat_sum(struct blk_rq_stat *dst, struct blk_rq_stat *src)
30cf43e6beSJens Axboe {
31cf43e6beSJens Axboe 	if (!src->nr_samples)
32cf43e6beSJens Axboe 		return;
33cf43e6beSJens Axboe 
34cf43e6beSJens Axboe 	dst->min = min(dst->min, src->min);
35cf43e6beSJens Axboe 	dst->max = max(dst->max, src->max);
36cf43e6beSJens Axboe 
37eca8b53aSShaohua Li 	dst->mean = div_u64(src->batch + dst->mean * dst->nr_samples,
38cf43e6beSJens Axboe 				dst->nr_samples + src->nr_samples);
39eca8b53aSShaohua Li 
40cf43e6beSJens Axboe 	dst->nr_samples += src->nr_samples;
41cf43e6beSJens Axboe }
42cf43e6beSJens Axboe 
432ecbf456SJosef Bacik void blk_rq_stat_add(struct blk_rq_stat *stat, u64 value)
44cf43e6beSJens Axboe {
4534dbad5dSOmar Sandoval 	stat->min = min(stat->min, value);
4634dbad5dSOmar Sandoval 	stat->max = max(stat->max, value);
47cf43e6beSJens Axboe 	stat->batch += value;
48eca8b53aSShaohua Li 	stat->nr_samples++;
49cf43e6beSJens Axboe }
50cf43e6beSJens Axboe 
51522a7775SOmar Sandoval void blk_stat_add(struct request *rq, u64 now)
52cf43e6beSJens Axboe {
5334dbad5dSOmar Sandoval 	struct request_queue *q = rq->q;
5434dbad5dSOmar Sandoval 	struct blk_stat_callback *cb;
5534dbad5dSOmar Sandoval 	struct blk_rq_stat *stat;
568148f0b5SPavel Begunkov 	int bucket, cpu;
57522a7775SOmar Sandoval 	u64 value;
58cf43e6beSJens Axboe 
59544ccc8dSOmar Sandoval 	value = (now >= rq->io_start_time_ns) ? now - rq->io_start_time_ns : 0;
6034dbad5dSOmar Sandoval 
61*a886001cSChristoph Hellwig 	if (req_op(rq) == REQ_OP_READ || req_op(rq) == REQ_OP_WRITE)
62b9147dd1SShaohua Li 		blk_throtl_stat_add(rq, value);
63b9147dd1SShaohua Li 
6434dbad5dSOmar Sandoval 	rcu_read_lock();
658148f0b5SPavel Begunkov 	cpu = get_cpu();
6634dbad5dSOmar Sandoval 	list_for_each_entry_rcu(cb, &q->stats->callbacks, list) {
67d3738123SJens Axboe 		if (!blk_stat_is_active(cb))
68d3738123SJens Axboe 			continue;
69d3738123SJens Axboe 
7034dbad5dSOmar Sandoval 		bucket = cb->bucket_fn(rq);
71a37244e4SStephen Bates 		if (bucket < 0)
72a37244e4SStephen Bates 			continue;
73d3738123SJens Axboe 
748148f0b5SPavel Begunkov 		stat = &per_cpu_ptr(cb->cpu_stat, cpu)[bucket];
752ecbf456SJosef Bacik 		blk_rq_stat_add(stat, value);
76cf43e6beSJens Axboe 	}
778148f0b5SPavel Begunkov 	put_cpu();
7834dbad5dSOmar Sandoval 	rcu_read_unlock();
7934dbad5dSOmar Sandoval }
8034dbad5dSOmar Sandoval 
81e99e88a9SKees Cook static void blk_stat_timer_fn(struct timer_list *t)
8234dbad5dSOmar Sandoval {
83e99e88a9SKees Cook 	struct blk_stat_callback *cb = from_timer(cb, t, timer);
8434dbad5dSOmar Sandoval 	unsigned int bucket;
8534dbad5dSOmar Sandoval 	int cpu;
8634dbad5dSOmar Sandoval 
8734dbad5dSOmar Sandoval 	for (bucket = 0; bucket < cb->buckets; bucket++)
882ecbf456SJosef Bacik 		blk_rq_stat_init(&cb->stat[bucket]);
8934dbad5dSOmar Sandoval 
9034dbad5dSOmar Sandoval 	for_each_online_cpu(cpu) {
9134dbad5dSOmar Sandoval 		struct blk_rq_stat *cpu_stat;
9234dbad5dSOmar Sandoval 
9334dbad5dSOmar Sandoval 		cpu_stat = per_cpu_ptr(cb->cpu_stat, cpu);
9434dbad5dSOmar Sandoval 		for (bucket = 0; bucket < cb->buckets; bucket++) {
952ecbf456SJosef Bacik 			blk_rq_stat_sum(&cb->stat[bucket], &cpu_stat[bucket]);
962ecbf456SJosef Bacik 			blk_rq_stat_init(&cpu_stat[bucket]);
97cf43e6beSJens Axboe 		}
98cf43e6beSJens Axboe 	}
99cf43e6beSJens Axboe 
10034dbad5dSOmar Sandoval 	cb->timer_fn(cb);
101cf43e6beSJens Axboe }
102cf43e6beSJens Axboe 
10334dbad5dSOmar Sandoval struct blk_stat_callback *
10434dbad5dSOmar Sandoval blk_stat_alloc_callback(void (*timer_fn)(struct blk_stat_callback *),
105a37244e4SStephen Bates 			int (*bucket_fn)(const struct request *),
10634dbad5dSOmar Sandoval 			unsigned int buckets, void *data)
107cf43e6beSJens Axboe {
10834dbad5dSOmar Sandoval 	struct blk_stat_callback *cb;
10934dbad5dSOmar Sandoval 
11034dbad5dSOmar Sandoval 	cb = kmalloc(sizeof(*cb), GFP_KERNEL);
11134dbad5dSOmar Sandoval 	if (!cb)
11234dbad5dSOmar Sandoval 		return NULL;
11334dbad5dSOmar Sandoval 
11434dbad5dSOmar Sandoval 	cb->stat = kmalloc_array(buckets, sizeof(struct blk_rq_stat),
11534dbad5dSOmar Sandoval 				 GFP_KERNEL);
11634dbad5dSOmar Sandoval 	if (!cb->stat) {
11734dbad5dSOmar Sandoval 		kfree(cb);
11834dbad5dSOmar Sandoval 		return NULL;
11934dbad5dSOmar Sandoval 	}
12034dbad5dSOmar Sandoval 	cb->cpu_stat = __alloc_percpu(buckets * sizeof(struct blk_rq_stat),
12134dbad5dSOmar Sandoval 				      __alignof__(struct blk_rq_stat));
12234dbad5dSOmar Sandoval 	if (!cb->cpu_stat) {
12334dbad5dSOmar Sandoval 		kfree(cb->stat);
12434dbad5dSOmar Sandoval 		kfree(cb);
12534dbad5dSOmar Sandoval 		return NULL;
12634dbad5dSOmar Sandoval 	}
12734dbad5dSOmar Sandoval 
12834dbad5dSOmar Sandoval 	cb->timer_fn = timer_fn;
12934dbad5dSOmar Sandoval 	cb->bucket_fn = bucket_fn;
13034dbad5dSOmar Sandoval 	cb->data = data;
13134dbad5dSOmar Sandoval 	cb->buckets = buckets;
132e99e88a9SKees Cook 	timer_setup(&cb->timer, blk_stat_timer_fn, 0);
13334dbad5dSOmar Sandoval 
13434dbad5dSOmar Sandoval 	return cb;
13534dbad5dSOmar Sandoval }
13634dbad5dSOmar Sandoval 
13734dbad5dSOmar Sandoval void blk_stat_add_callback(struct request_queue *q,
13834dbad5dSOmar Sandoval 			   struct blk_stat_callback *cb)
13934dbad5dSOmar Sandoval {
14034dbad5dSOmar Sandoval 	unsigned int bucket;
141e11d80a8STejun Heo 	unsigned long flags;
14234dbad5dSOmar Sandoval 	int cpu;
14334dbad5dSOmar Sandoval 
14434dbad5dSOmar Sandoval 	for_each_possible_cpu(cpu) {
14534dbad5dSOmar Sandoval 		struct blk_rq_stat *cpu_stat;
14634dbad5dSOmar Sandoval 
14734dbad5dSOmar Sandoval 		cpu_stat = per_cpu_ptr(cb->cpu_stat, cpu);
14834dbad5dSOmar Sandoval 		for (bucket = 0; bucket < cb->buckets; bucket++)
1492ecbf456SJosef Bacik 			blk_rq_stat_init(&cpu_stat[bucket]);
15034dbad5dSOmar Sandoval 	}
15134dbad5dSOmar Sandoval 
152e11d80a8STejun Heo 	spin_lock_irqsave(&q->stats->lock, flags);
15334dbad5dSOmar Sandoval 	list_add_tail_rcu(&cb->list, &q->stats->callbacks);
1547dfdbc73SBart Van Assche 	blk_queue_flag_set(QUEUE_FLAG_STATS, q);
155e11d80a8STejun Heo 	spin_unlock_irqrestore(&q->stats->lock, flags);
15634dbad5dSOmar Sandoval }
15734dbad5dSOmar Sandoval 
15834dbad5dSOmar Sandoval void blk_stat_remove_callback(struct request_queue *q,
15934dbad5dSOmar Sandoval 			      struct blk_stat_callback *cb)
16034dbad5dSOmar Sandoval {
161e11d80a8STejun Heo 	unsigned long flags;
162e11d80a8STejun Heo 
163e11d80a8STejun Heo 	spin_lock_irqsave(&q->stats->lock, flags);
16434dbad5dSOmar Sandoval 	list_del_rcu(&cb->list);
16568497092SJens Axboe 	if (list_empty(&q->stats->callbacks) && !q->stats->accounting)
1667dfdbc73SBart Van Assche 		blk_queue_flag_clear(QUEUE_FLAG_STATS, q);
167e11d80a8STejun Heo 	spin_unlock_irqrestore(&q->stats->lock, flags);
16834dbad5dSOmar Sandoval 
16934dbad5dSOmar Sandoval 	del_timer_sync(&cb->timer);
17034dbad5dSOmar Sandoval }
17134dbad5dSOmar Sandoval 
17234dbad5dSOmar Sandoval static void blk_stat_free_callback_rcu(struct rcu_head *head)
17334dbad5dSOmar Sandoval {
17434dbad5dSOmar Sandoval 	struct blk_stat_callback *cb;
17534dbad5dSOmar Sandoval 
17634dbad5dSOmar Sandoval 	cb = container_of(head, struct blk_stat_callback, rcu);
17734dbad5dSOmar Sandoval 	free_percpu(cb->cpu_stat);
17834dbad5dSOmar Sandoval 	kfree(cb->stat);
17934dbad5dSOmar Sandoval 	kfree(cb);
180cf43e6beSJens Axboe }
181cf43e6beSJens Axboe 
18234dbad5dSOmar Sandoval void blk_stat_free_callback(struct blk_stat_callback *cb)
18334dbad5dSOmar Sandoval {
184a83b576cSJens Axboe 	if (cb)
18534dbad5dSOmar Sandoval 		call_rcu(&cb->rcu, blk_stat_free_callback_rcu);
18634dbad5dSOmar Sandoval }
18734dbad5dSOmar Sandoval 
18868497092SJens Axboe void blk_stat_disable_accounting(struct request_queue *q)
18968497092SJens Axboe {
19068497092SJens Axboe 	unsigned long flags;
19168497092SJens Axboe 
19268497092SJens Axboe 	spin_lock_irqsave(&q->stats->lock, flags);
19368497092SJens Axboe 	if (!--q->stats->accounting)
19468497092SJens Axboe 		blk_queue_flag_clear(QUEUE_FLAG_STATS, q);
19568497092SJens Axboe 	spin_unlock_irqrestore(&q->stats->lock, flags);
19668497092SJens Axboe }
19768497092SJens Axboe EXPORT_SYMBOL_GPL(blk_stat_disable_accounting);
19868497092SJens Axboe 
199b9147dd1SShaohua Li void blk_stat_enable_accounting(struct request_queue *q)
200b9147dd1SShaohua Li {
201e11d80a8STejun Heo 	unsigned long flags;
202e11d80a8STejun Heo 
203e11d80a8STejun Heo 	spin_lock_irqsave(&q->stats->lock, flags);
20468497092SJens Axboe 	if (!q->stats->accounting++)
2057dfdbc73SBart Van Assche 		blk_queue_flag_set(QUEUE_FLAG_STATS, q);
206e11d80a8STejun Heo 	spin_unlock_irqrestore(&q->stats->lock, flags);
207b9147dd1SShaohua Li }
208f8232f29SOmar Sandoval EXPORT_SYMBOL_GPL(blk_stat_enable_accounting);
209b9147dd1SShaohua Li 
21034dbad5dSOmar Sandoval struct blk_queue_stats *blk_alloc_queue_stats(void)
21134dbad5dSOmar Sandoval {
21234dbad5dSOmar Sandoval 	struct blk_queue_stats *stats;
21334dbad5dSOmar Sandoval 
21434dbad5dSOmar Sandoval 	stats = kmalloc(sizeof(*stats), GFP_KERNEL);
21534dbad5dSOmar Sandoval 	if (!stats)
21634dbad5dSOmar Sandoval 		return NULL;
21734dbad5dSOmar Sandoval 
21834dbad5dSOmar Sandoval 	INIT_LIST_HEAD(&stats->callbacks);
21934dbad5dSOmar Sandoval 	spin_lock_init(&stats->lock);
22068497092SJens Axboe 	stats->accounting = 0;
22134dbad5dSOmar Sandoval 
22234dbad5dSOmar Sandoval 	return stats;
22334dbad5dSOmar Sandoval }
22434dbad5dSOmar Sandoval 
22534dbad5dSOmar Sandoval void blk_free_queue_stats(struct blk_queue_stats *stats)
22634dbad5dSOmar Sandoval {
22734dbad5dSOmar Sandoval 	if (!stats)
22834dbad5dSOmar Sandoval 		return;
22934dbad5dSOmar Sandoval 
23034dbad5dSOmar Sandoval 	WARN_ON(!list_empty(&stats->callbacks));
23134dbad5dSOmar Sandoval 
23234dbad5dSOmar Sandoval 	kfree(stats);
233cf43e6beSJens Axboe }
23448b5c1fbSJens Axboe 
23548b5c1fbSJens Axboe bool blk_stats_alloc_enable(struct request_queue *q)
23648b5c1fbSJens Axboe {
23748b5c1fbSJens Axboe 	struct blk_rq_stat *poll_stat;
23848b5c1fbSJens Axboe 
23948b5c1fbSJens Axboe 	poll_stat = kcalloc(BLK_MQ_POLL_STATS_BKTS, sizeof(*poll_stat),
24048b5c1fbSJens Axboe 				GFP_ATOMIC);
24148b5c1fbSJens Axboe 	if (!poll_stat)
24248b5c1fbSJens Axboe 		return false;
24348b5c1fbSJens Axboe 
24448b5c1fbSJens Axboe 	if (cmpxchg(&q->poll_stat, NULL, poll_stat) != NULL) {
24548b5c1fbSJens Axboe 		kfree(poll_stat);
24648b5c1fbSJens Axboe 		return true;
24748b5c1fbSJens Axboe 	}
24848b5c1fbSJens Axboe 
24948b5c1fbSJens Axboe 	blk_stat_add_callback(q, q->poll_cb);
25048b5c1fbSJens Axboe 	return false;
25148b5c1fbSJens Axboe }
252