xref: /openbmc/linux/block/blk-rq-qos.c (revision c1c80384c8f47021a01a0cc42894a06bed2b801b)
1a7905043SJosef Bacik #include "blk-rq-qos.h"
2a7905043SJosef Bacik 
3a7905043SJosef Bacik /*
4a7905043SJosef Bacik  * Increment 'v', if 'v' is below 'below'. Returns true if we succeeded,
5a7905043SJosef Bacik  * false if 'v' + 1 would be bigger than 'below'.
6a7905043SJosef Bacik  */
7a7905043SJosef Bacik static bool atomic_inc_below(atomic_t *v, int below)
8a7905043SJosef Bacik {
9a7905043SJosef Bacik 	int cur = atomic_read(v);
10a7905043SJosef Bacik 
11a7905043SJosef Bacik 	for (;;) {
12a7905043SJosef Bacik 		int old;
13a7905043SJosef Bacik 
14a7905043SJosef Bacik 		if (cur >= below)
15a7905043SJosef Bacik 			return false;
16a7905043SJosef Bacik 		old = atomic_cmpxchg(v, cur, cur + 1);
17a7905043SJosef Bacik 		if (old == cur)
18a7905043SJosef Bacik 			break;
19a7905043SJosef Bacik 		cur = old;
20a7905043SJosef Bacik 	}
21a7905043SJosef Bacik 
22a7905043SJosef Bacik 	return true;
23a7905043SJosef Bacik }
24a7905043SJosef Bacik 
25a7905043SJosef Bacik bool rq_wait_inc_below(struct rq_wait *rq_wait, int limit)
26a7905043SJosef Bacik {
27a7905043SJosef Bacik 	return atomic_inc_below(&rq_wait->inflight, limit);
28a7905043SJosef Bacik }
29a7905043SJosef Bacik 
30*c1c80384SJosef Bacik void rq_qos_cleanup(struct request_queue *q, struct bio *bio)
31a7905043SJosef Bacik {
32a7905043SJosef Bacik 	struct rq_qos *rqos;
33a7905043SJosef Bacik 
34a7905043SJosef Bacik 	for (rqos = q->rq_qos; rqos; rqos = rqos->next) {
35a7905043SJosef Bacik 		if (rqos->ops->cleanup)
36*c1c80384SJosef Bacik 			rqos->ops->cleanup(rqos, bio);
37a7905043SJosef Bacik 	}
38a7905043SJosef Bacik }
39a7905043SJosef Bacik 
40a7905043SJosef Bacik void rq_qos_done(struct request_queue *q, struct request *rq)
41a7905043SJosef Bacik {
42a7905043SJosef Bacik 	struct rq_qos *rqos;
43a7905043SJosef Bacik 
44a7905043SJosef Bacik 	for (rqos = q->rq_qos; rqos; rqos = rqos->next) {
45a7905043SJosef Bacik 		if (rqos->ops->done)
46a7905043SJosef Bacik 			rqos->ops->done(rqos, rq);
47a7905043SJosef Bacik 	}
48a7905043SJosef Bacik }
49a7905043SJosef Bacik 
50a7905043SJosef Bacik void rq_qos_issue(struct request_queue *q, struct request *rq)
51a7905043SJosef Bacik {
52a7905043SJosef Bacik 	struct rq_qos *rqos;
53a7905043SJosef Bacik 
54a7905043SJosef Bacik 	for(rqos = q->rq_qos; rqos; rqos = rqos->next) {
55a7905043SJosef Bacik 		if (rqos->ops->issue)
56a7905043SJosef Bacik 			rqos->ops->issue(rqos, rq);
57a7905043SJosef Bacik 	}
58a7905043SJosef Bacik }
59a7905043SJosef Bacik 
60a7905043SJosef Bacik void rq_qos_requeue(struct request_queue *q, struct request *rq)
61a7905043SJosef Bacik {
62a7905043SJosef Bacik 	struct rq_qos *rqos;
63a7905043SJosef Bacik 
64a7905043SJosef Bacik 	for(rqos = q->rq_qos; rqos; rqos = rqos->next) {
65a7905043SJosef Bacik 		if (rqos->ops->requeue)
66a7905043SJosef Bacik 			rqos->ops->requeue(rqos, rq);
67a7905043SJosef Bacik 	}
68a7905043SJosef Bacik }
69a7905043SJosef Bacik 
70*c1c80384SJosef Bacik void rq_qos_throttle(struct request_queue *q, struct bio *bio,
71a7905043SJosef Bacik 		     spinlock_t *lock)
72a7905043SJosef Bacik {
73a7905043SJosef Bacik 	struct rq_qos *rqos;
74a7905043SJosef Bacik 
75a7905043SJosef Bacik 	for(rqos = q->rq_qos; rqos; rqos = rqos->next) {
76a7905043SJosef Bacik 		if (rqos->ops->throttle)
77*c1c80384SJosef Bacik 			rqos->ops->throttle(rqos, bio, lock);
78a7905043SJosef Bacik 	}
79*c1c80384SJosef Bacik }
80*c1c80384SJosef Bacik 
81*c1c80384SJosef Bacik void rq_qos_track(struct request_queue *q, struct request *rq, struct bio *bio)
82*c1c80384SJosef Bacik {
83*c1c80384SJosef Bacik 	struct rq_qos *rqos;
84*c1c80384SJosef Bacik 
85*c1c80384SJosef Bacik 	for(rqos = q->rq_qos; rqos; rqos = rqos->next) {
86*c1c80384SJosef Bacik 		if (rqos->ops->track)
87*c1c80384SJosef Bacik 			rqos->ops->track(rqos, rq, bio);
88*c1c80384SJosef Bacik 	}
89a7905043SJosef Bacik }
90a7905043SJosef Bacik 
91a7905043SJosef Bacik /*
92a7905043SJosef Bacik  * Return true, if we can't increase the depth further by scaling
93a7905043SJosef Bacik  */
94a7905043SJosef Bacik bool rq_depth_calc_max_depth(struct rq_depth *rqd)
95a7905043SJosef Bacik {
96a7905043SJosef Bacik 	unsigned int depth;
97a7905043SJosef Bacik 	bool ret = false;
98a7905043SJosef Bacik 
99a7905043SJosef Bacik 	/*
100a7905043SJosef Bacik 	 * For QD=1 devices, this is a special case. It's important for those
101a7905043SJosef Bacik 	 * to have one request ready when one completes, so force a depth of
102a7905043SJosef Bacik 	 * 2 for those devices. On the backend, it'll be a depth of 1 anyway,
103a7905043SJosef Bacik 	 * since the device can't have more than that in flight. If we're
104a7905043SJosef Bacik 	 * scaling down, then keep a setting of 1/1/1.
105a7905043SJosef Bacik 	 */
106a7905043SJosef Bacik 	if (rqd->queue_depth == 1) {
107a7905043SJosef Bacik 		if (rqd->scale_step > 0)
108a7905043SJosef Bacik 			rqd->max_depth = 1;
109a7905043SJosef Bacik 		else {
110a7905043SJosef Bacik 			rqd->max_depth = 2;
111a7905043SJosef Bacik 			ret = true;
112a7905043SJosef Bacik 		}
113a7905043SJosef Bacik 	} else {
114a7905043SJosef Bacik 		/*
115a7905043SJosef Bacik 		 * scale_step == 0 is our default state. If we have suffered
116a7905043SJosef Bacik 		 * latency spikes, step will be > 0, and we shrink the
117a7905043SJosef Bacik 		 * allowed write depths. If step is < 0, we're only doing
118a7905043SJosef Bacik 		 * writes, and we allow a temporarily higher depth to
119a7905043SJosef Bacik 		 * increase performance.
120a7905043SJosef Bacik 		 */
121a7905043SJosef Bacik 		depth = min_t(unsigned int, rqd->default_depth,
122a7905043SJosef Bacik 			      rqd->queue_depth);
123a7905043SJosef Bacik 		if (rqd->scale_step > 0)
124a7905043SJosef Bacik 			depth = 1 + ((depth - 1) >> min(31, rqd->scale_step));
125a7905043SJosef Bacik 		else if (rqd->scale_step < 0) {
126a7905043SJosef Bacik 			unsigned int maxd = 3 * rqd->queue_depth / 4;
127a7905043SJosef Bacik 
128a7905043SJosef Bacik 			depth = 1 + ((depth - 1) << -rqd->scale_step);
129a7905043SJosef Bacik 			if (depth > maxd) {
130a7905043SJosef Bacik 				depth = maxd;
131a7905043SJosef Bacik 				ret = true;
132a7905043SJosef Bacik 			}
133a7905043SJosef Bacik 		}
134a7905043SJosef Bacik 
135a7905043SJosef Bacik 		rqd->max_depth = depth;
136a7905043SJosef Bacik 	}
137a7905043SJosef Bacik 
138a7905043SJosef Bacik 	return ret;
139a7905043SJosef Bacik }
140a7905043SJosef Bacik 
141a7905043SJosef Bacik void rq_depth_scale_up(struct rq_depth *rqd)
142a7905043SJosef Bacik {
143a7905043SJosef Bacik 	/*
144a7905043SJosef Bacik 	 * Hit max in previous round, stop here
145a7905043SJosef Bacik 	 */
146a7905043SJosef Bacik 	if (rqd->scaled_max)
147a7905043SJosef Bacik 		return;
148a7905043SJosef Bacik 
149a7905043SJosef Bacik 	rqd->scale_step--;
150a7905043SJosef Bacik 
151a7905043SJosef Bacik 	rqd->scaled_max = rq_depth_calc_max_depth(rqd);
152a7905043SJosef Bacik }
153a7905043SJosef Bacik 
154a7905043SJosef Bacik /*
155a7905043SJosef Bacik  * Scale rwb down. If 'hard_throttle' is set, do it quicker, since we
156a7905043SJosef Bacik  * had a latency violation.
157a7905043SJosef Bacik  */
158a7905043SJosef Bacik void rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle)
159a7905043SJosef Bacik {
160a7905043SJosef Bacik 	/*
161a7905043SJosef Bacik 	 * Stop scaling down when we've hit the limit. This also prevents
162a7905043SJosef Bacik 	 * ->scale_step from going to crazy values, if the device can't
163a7905043SJosef Bacik 	 * keep up.
164a7905043SJosef Bacik 	 */
165a7905043SJosef Bacik 	if (rqd->max_depth == 1)
166a7905043SJosef Bacik 		return;
167a7905043SJosef Bacik 
168a7905043SJosef Bacik 	if (rqd->scale_step < 0 && hard_throttle)
169a7905043SJosef Bacik 		rqd->scale_step = 0;
170a7905043SJosef Bacik 	else
171a7905043SJosef Bacik 		rqd->scale_step++;
172a7905043SJosef Bacik 
173a7905043SJosef Bacik 	rqd->scaled_max = false;
174a7905043SJosef Bacik 	rq_depth_calc_max_depth(rqd);
175a7905043SJosef Bacik }
176a7905043SJosef Bacik 
177a7905043SJosef Bacik void rq_qos_exit(struct request_queue *q)
178a7905043SJosef Bacik {
179a7905043SJosef Bacik 	while (q->rq_qos) {
180a7905043SJosef Bacik 		struct rq_qos *rqos = q->rq_qos;
181a7905043SJosef Bacik 		q->rq_qos = rqos->next;
182a7905043SJosef Bacik 		rqos->ops->exit(rqos);
183a7905043SJosef Bacik 	}
184a7905043SJosef Bacik }
185