xref: /openbmc/linux/block/blk-throttle.c (revision 6a108a14)
1 /*
2  * Interface for controlling IO bandwidth on a request queue
3  *
4  * Copyright (C) 2010 Vivek Goyal <vgoyal@redhat.com>
5  */
6 
7 #include <linux/module.h>
8 #include <linux/slab.h>
9 #include <linux/blkdev.h>
10 #include <linux/bio.h>
11 #include <linux/blktrace_api.h>
12 #include "blk-cgroup.h"
13 
14 /* Max dispatch from a group in 1 round */
15 static int throtl_grp_quantum = 8;
16 
17 /* Total max dispatch from all groups in one round */
18 static int throtl_quantum = 32;
19 
20 /* Throttling is performed over 100ms slice and after that slice is renewed */
21 static unsigned long throtl_slice = HZ/10;	/* 100 ms */
22 
23 struct throtl_rb_root {
24 	struct rb_root rb;
25 	struct rb_node *left;
26 	unsigned int count;
27 	unsigned long min_disptime;
28 };
29 
30 #define THROTL_RB_ROOT	(struct throtl_rb_root) { .rb = RB_ROOT, .left = NULL, \
31 			.count = 0, .min_disptime = 0}
32 
33 #define rb_entry_tg(node)	rb_entry((node), struct throtl_grp, rb_node)
34 
35 struct throtl_grp {
36 	/* List of throtl groups on the request queue*/
37 	struct hlist_node tg_node;
38 
39 	/* active throtl group service_tree member */
40 	struct rb_node rb_node;
41 
42 	/*
43 	 * Dispatch time in jiffies. This is the estimated time when group
44 	 * will unthrottle and is ready to dispatch more bio. It is used as
45 	 * key to sort active groups in service tree.
46 	 */
47 	unsigned long disptime;
48 
49 	struct blkio_group blkg;
50 	atomic_t ref;
51 	unsigned int flags;
52 
53 	/* Two lists for READ and WRITE */
54 	struct bio_list bio_lists[2];
55 
56 	/* Number of queued bios on READ and WRITE lists */
57 	unsigned int nr_queued[2];
58 
59 	/* bytes per second rate limits */
60 	uint64_t bps[2];
61 
62 	/* IOPS limits */
63 	unsigned int iops[2];
64 
65 	/* Number of bytes disptached in current slice */
66 	uint64_t bytes_disp[2];
67 	/* Number of bio's dispatched in current slice */
68 	unsigned int io_disp[2];
69 
70 	/* When did we start a new slice */
71 	unsigned long slice_start[2];
72 	unsigned long slice_end[2];
73 
74 	/* Some throttle limits got updated for the group */
75 	bool limits_changed;
76 };
77 
78 struct throtl_data
79 {
80 	/* List of throtl groups */
81 	struct hlist_head tg_list;
82 
83 	/* service tree for active throtl groups */
84 	struct throtl_rb_root tg_service_tree;
85 
86 	struct throtl_grp root_tg;
87 	struct request_queue *queue;
88 
89 	/* Total Number of queued bios on READ and WRITE lists */
90 	unsigned int nr_queued[2];
91 
92 	/*
93 	 * number of total undestroyed groups
94 	 */
95 	unsigned int nr_undestroyed_grps;
96 
97 	/* Work for dispatching throttled bios */
98 	struct delayed_work throtl_work;
99 
100 	atomic_t limits_changed;
101 };
102 
103 enum tg_state_flags {
104 	THROTL_TG_FLAG_on_rr = 0,	/* on round-robin busy list */
105 };
106 
107 #define THROTL_TG_FNS(name)						\
108 static inline void throtl_mark_tg_##name(struct throtl_grp *tg)		\
109 {									\
110 	(tg)->flags |= (1 << THROTL_TG_FLAG_##name);			\
111 }									\
112 static inline void throtl_clear_tg_##name(struct throtl_grp *tg)	\
113 {									\
114 	(tg)->flags &= ~(1 << THROTL_TG_FLAG_##name);			\
115 }									\
116 static inline int throtl_tg_##name(const struct throtl_grp *tg)		\
117 {									\
118 	return ((tg)->flags & (1 << THROTL_TG_FLAG_##name)) != 0;	\
119 }
120 
121 THROTL_TG_FNS(on_rr);
122 
123 #define throtl_log_tg(td, tg, fmt, args...)				\
124 	blk_add_trace_msg((td)->queue, "throtl %s " fmt,		\
125 				blkg_path(&(tg)->blkg), ##args);      	\
126 
127 #define throtl_log(td, fmt, args...)	\
128 	blk_add_trace_msg((td)->queue, "throtl " fmt, ##args)
129 
130 static inline struct throtl_grp *tg_of_blkg(struct blkio_group *blkg)
131 {
132 	if (blkg)
133 		return container_of(blkg, struct throtl_grp, blkg);
134 
135 	return NULL;
136 }
137 
138 static inline int total_nr_queued(struct throtl_data *td)
139 {
140 	return (td->nr_queued[0] + td->nr_queued[1]);
141 }
142 
143 static inline struct throtl_grp *throtl_ref_get_tg(struct throtl_grp *tg)
144 {
145 	atomic_inc(&tg->ref);
146 	return tg;
147 }
148 
149 static void throtl_put_tg(struct throtl_grp *tg)
150 {
151 	BUG_ON(atomic_read(&tg->ref) <= 0);
152 	if (!atomic_dec_and_test(&tg->ref))
153 		return;
154 	kfree(tg);
155 }
156 
157 static struct throtl_grp * throtl_find_alloc_tg(struct throtl_data *td,
158 			struct cgroup *cgroup)
159 {
160 	struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
161 	struct throtl_grp *tg = NULL;
162 	void *key = td;
163 	struct backing_dev_info *bdi = &td->queue->backing_dev_info;
164 	unsigned int major, minor;
165 
166 	/*
167 	 * TODO: Speed up blkiocg_lookup_group() by maintaining a radix
168 	 * tree of blkg (instead of traversing through hash list all
169 	 * the time.
170 	 */
171 	tg = tg_of_blkg(blkiocg_lookup_group(blkcg, key));
172 
173 	/* Fill in device details for root group */
174 	if (tg && !tg->blkg.dev && bdi->dev && dev_name(bdi->dev)) {
175 		sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
176 		tg->blkg.dev = MKDEV(major, minor);
177 		goto done;
178 	}
179 
180 	if (tg)
181 		goto done;
182 
183 	tg = kzalloc_node(sizeof(*tg), GFP_ATOMIC, td->queue->node);
184 	if (!tg)
185 		goto done;
186 
187 	INIT_HLIST_NODE(&tg->tg_node);
188 	RB_CLEAR_NODE(&tg->rb_node);
189 	bio_list_init(&tg->bio_lists[0]);
190 	bio_list_init(&tg->bio_lists[1]);
191 
192 	/*
193 	 * Take the initial reference that will be released on destroy
194 	 * This can be thought of a joint reference by cgroup and
195 	 * request queue which will be dropped by either request queue
196 	 * exit or cgroup deletion path depending on who is exiting first.
197 	 */
198 	atomic_set(&tg->ref, 1);
199 
200 	/* Add group onto cgroup list */
201 	sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
202 	blkiocg_add_blkio_group(blkcg, &tg->blkg, (void *)td,
203 				MKDEV(major, minor), BLKIO_POLICY_THROTL);
204 
205 	tg->bps[READ] = blkcg_get_read_bps(blkcg, tg->blkg.dev);
206 	tg->bps[WRITE] = blkcg_get_write_bps(blkcg, tg->blkg.dev);
207 	tg->iops[READ] = blkcg_get_read_iops(blkcg, tg->blkg.dev);
208 	tg->iops[WRITE] = blkcg_get_write_iops(blkcg, tg->blkg.dev);
209 
210 	hlist_add_head(&tg->tg_node, &td->tg_list);
211 	td->nr_undestroyed_grps++;
212 done:
213 	return tg;
214 }
215 
216 static struct throtl_grp * throtl_get_tg(struct throtl_data *td)
217 {
218 	struct cgroup *cgroup;
219 	struct throtl_grp *tg = NULL;
220 
221 	rcu_read_lock();
222 	cgroup = task_cgroup(current, blkio_subsys_id);
223 	tg = throtl_find_alloc_tg(td, cgroup);
224 	if (!tg)
225 		tg = &td->root_tg;
226 	rcu_read_unlock();
227 	return tg;
228 }
229 
230 static struct throtl_grp *throtl_rb_first(struct throtl_rb_root *root)
231 {
232 	/* Service tree is empty */
233 	if (!root->count)
234 		return NULL;
235 
236 	if (!root->left)
237 		root->left = rb_first(&root->rb);
238 
239 	if (root->left)
240 		return rb_entry_tg(root->left);
241 
242 	return NULL;
243 }
244 
245 static void rb_erase_init(struct rb_node *n, struct rb_root *root)
246 {
247 	rb_erase(n, root);
248 	RB_CLEAR_NODE(n);
249 }
250 
251 static void throtl_rb_erase(struct rb_node *n, struct throtl_rb_root *root)
252 {
253 	if (root->left == n)
254 		root->left = NULL;
255 	rb_erase_init(n, &root->rb);
256 	--root->count;
257 }
258 
259 static void update_min_dispatch_time(struct throtl_rb_root *st)
260 {
261 	struct throtl_grp *tg;
262 
263 	tg = throtl_rb_first(st);
264 	if (!tg)
265 		return;
266 
267 	st->min_disptime = tg->disptime;
268 }
269 
270 static void
271 tg_service_tree_add(struct throtl_rb_root *st, struct throtl_grp *tg)
272 {
273 	struct rb_node **node = &st->rb.rb_node;
274 	struct rb_node *parent = NULL;
275 	struct throtl_grp *__tg;
276 	unsigned long key = tg->disptime;
277 	int left = 1;
278 
279 	while (*node != NULL) {
280 		parent = *node;
281 		__tg = rb_entry_tg(parent);
282 
283 		if (time_before(key, __tg->disptime))
284 			node = &parent->rb_left;
285 		else {
286 			node = &parent->rb_right;
287 			left = 0;
288 		}
289 	}
290 
291 	if (left)
292 		st->left = &tg->rb_node;
293 
294 	rb_link_node(&tg->rb_node, parent, node);
295 	rb_insert_color(&tg->rb_node, &st->rb);
296 }
297 
298 static void __throtl_enqueue_tg(struct throtl_data *td, struct throtl_grp *tg)
299 {
300 	struct throtl_rb_root *st = &td->tg_service_tree;
301 
302 	tg_service_tree_add(st, tg);
303 	throtl_mark_tg_on_rr(tg);
304 	st->count++;
305 }
306 
307 static void throtl_enqueue_tg(struct throtl_data *td, struct throtl_grp *tg)
308 {
309 	if (!throtl_tg_on_rr(tg))
310 		__throtl_enqueue_tg(td, tg);
311 }
312 
313 static void __throtl_dequeue_tg(struct throtl_data *td, struct throtl_grp *tg)
314 {
315 	throtl_rb_erase(&tg->rb_node, &td->tg_service_tree);
316 	throtl_clear_tg_on_rr(tg);
317 }
318 
319 static void throtl_dequeue_tg(struct throtl_data *td, struct throtl_grp *tg)
320 {
321 	if (throtl_tg_on_rr(tg))
322 		__throtl_dequeue_tg(td, tg);
323 }
324 
325 static void throtl_schedule_next_dispatch(struct throtl_data *td)
326 {
327 	struct throtl_rb_root *st = &td->tg_service_tree;
328 
329 	/*
330 	 * If there are more bios pending, schedule more work.
331 	 */
332 	if (!total_nr_queued(td))
333 		return;
334 
335 	BUG_ON(!st->count);
336 
337 	update_min_dispatch_time(st);
338 
339 	if (time_before_eq(st->min_disptime, jiffies))
340 		throtl_schedule_delayed_work(td->queue, 0);
341 	else
342 		throtl_schedule_delayed_work(td->queue,
343 				(st->min_disptime - jiffies));
344 }
345 
346 static inline void
347 throtl_start_new_slice(struct throtl_data *td, struct throtl_grp *tg, bool rw)
348 {
349 	tg->bytes_disp[rw] = 0;
350 	tg->io_disp[rw] = 0;
351 	tg->slice_start[rw] = jiffies;
352 	tg->slice_end[rw] = jiffies + throtl_slice;
353 	throtl_log_tg(td, tg, "[%c] new slice start=%lu end=%lu jiffies=%lu",
354 			rw == READ ? 'R' : 'W', tg->slice_start[rw],
355 			tg->slice_end[rw], jiffies);
356 }
357 
358 static inline void throtl_set_slice_end(struct throtl_data *td,
359 		struct throtl_grp *tg, bool rw, unsigned long jiffy_end)
360 {
361 	tg->slice_end[rw] = roundup(jiffy_end, throtl_slice);
362 }
363 
364 static inline void throtl_extend_slice(struct throtl_data *td,
365 		struct throtl_grp *tg, bool rw, unsigned long jiffy_end)
366 {
367 	tg->slice_end[rw] = roundup(jiffy_end, throtl_slice);
368 	throtl_log_tg(td, tg, "[%c] extend slice start=%lu end=%lu jiffies=%lu",
369 			rw == READ ? 'R' : 'W', tg->slice_start[rw],
370 			tg->slice_end[rw], jiffies);
371 }
372 
373 /* Determine if previously allocated or extended slice is complete or not */
374 static bool
375 throtl_slice_used(struct throtl_data *td, struct throtl_grp *tg, bool rw)
376 {
377 	if (time_in_range(jiffies, tg->slice_start[rw], tg->slice_end[rw]))
378 		return 0;
379 
380 	return 1;
381 }
382 
383 /* Trim the used slices and adjust slice start accordingly */
384 static inline void
385 throtl_trim_slice(struct throtl_data *td, struct throtl_grp *tg, bool rw)
386 {
387 	unsigned long nr_slices, time_elapsed, io_trim;
388 	u64 bytes_trim, tmp;
389 
390 	BUG_ON(time_before(tg->slice_end[rw], tg->slice_start[rw]));
391 
392 	/*
393 	 * If bps are unlimited (-1), then time slice don't get
394 	 * renewed. Don't try to trim the slice if slice is used. A new
395 	 * slice will start when appropriate.
396 	 */
397 	if (throtl_slice_used(td, tg, rw))
398 		return;
399 
400 	/*
401 	 * A bio has been dispatched. Also adjust slice_end. It might happen
402 	 * that initially cgroup limit was very low resulting in high
403 	 * slice_end, but later limit was bumped up and bio was dispached
404 	 * sooner, then we need to reduce slice_end. A high bogus slice_end
405 	 * is bad because it does not allow new slice to start.
406 	 */
407 
408 	throtl_set_slice_end(td, tg, rw, jiffies + throtl_slice);
409 
410 	time_elapsed = jiffies - tg->slice_start[rw];
411 
412 	nr_slices = time_elapsed / throtl_slice;
413 
414 	if (!nr_slices)
415 		return;
416 	tmp = tg->bps[rw] * throtl_slice * nr_slices;
417 	do_div(tmp, HZ);
418 	bytes_trim = tmp;
419 
420 	io_trim = (tg->iops[rw] * throtl_slice * nr_slices)/HZ;
421 
422 	if (!bytes_trim && !io_trim)
423 		return;
424 
425 	if (tg->bytes_disp[rw] >= bytes_trim)
426 		tg->bytes_disp[rw] -= bytes_trim;
427 	else
428 		tg->bytes_disp[rw] = 0;
429 
430 	if (tg->io_disp[rw] >= io_trim)
431 		tg->io_disp[rw] -= io_trim;
432 	else
433 		tg->io_disp[rw] = 0;
434 
435 	tg->slice_start[rw] += nr_slices * throtl_slice;
436 
437 	throtl_log_tg(td, tg, "[%c] trim slice nr=%lu bytes=%llu io=%lu"
438 			" start=%lu end=%lu jiffies=%lu",
439 			rw == READ ? 'R' : 'W', nr_slices, bytes_trim, io_trim,
440 			tg->slice_start[rw], tg->slice_end[rw], jiffies);
441 }
442 
443 static bool tg_with_in_iops_limit(struct throtl_data *td, struct throtl_grp *tg,
444 		struct bio *bio, unsigned long *wait)
445 {
446 	bool rw = bio_data_dir(bio);
447 	unsigned int io_allowed;
448 	unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
449 	u64 tmp;
450 
451 	jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
452 
453 	/* Slice has just started. Consider one slice interval */
454 	if (!jiffy_elapsed)
455 		jiffy_elapsed_rnd = throtl_slice;
456 
457 	jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, throtl_slice);
458 
459 	/*
460 	 * jiffy_elapsed_rnd should not be a big value as minimum iops can be
461 	 * 1 then at max jiffy elapsed should be equivalent of 1 second as we
462 	 * will allow dispatch after 1 second and after that slice should
463 	 * have been trimmed.
464 	 */
465 
466 	tmp = (u64)tg->iops[rw] * jiffy_elapsed_rnd;
467 	do_div(tmp, HZ);
468 
469 	if (tmp > UINT_MAX)
470 		io_allowed = UINT_MAX;
471 	else
472 		io_allowed = tmp;
473 
474 	if (tg->io_disp[rw] + 1 <= io_allowed) {
475 		if (wait)
476 			*wait = 0;
477 		return 1;
478 	}
479 
480 	/* Calc approx time to dispatch */
481 	jiffy_wait = ((tg->io_disp[rw] + 1) * HZ)/tg->iops[rw] + 1;
482 
483 	if (jiffy_wait > jiffy_elapsed)
484 		jiffy_wait = jiffy_wait - jiffy_elapsed;
485 	else
486 		jiffy_wait = 1;
487 
488 	if (wait)
489 		*wait = jiffy_wait;
490 	return 0;
491 }
492 
493 static bool tg_with_in_bps_limit(struct throtl_data *td, struct throtl_grp *tg,
494 		struct bio *bio, unsigned long *wait)
495 {
496 	bool rw = bio_data_dir(bio);
497 	u64 bytes_allowed, extra_bytes, tmp;
498 	unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
499 
500 	jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
501 
502 	/* Slice has just started. Consider one slice interval */
503 	if (!jiffy_elapsed)
504 		jiffy_elapsed_rnd = throtl_slice;
505 
506 	jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, throtl_slice);
507 
508 	tmp = tg->bps[rw] * jiffy_elapsed_rnd;
509 	do_div(tmp, HZ);
510 	bytes_allowed = tmp;
511 
512 	if (tg->bytes_disp[rw] + bio->bi_size <= bytes_allowed) {
513 		if (wait)
514 			*wait = 0;
515 		return 1;
516 	}
517 
518 	/* Calc approx time to dispatch */
519 	extra_bytes = tg->bytes_disp[rw] + bio->bi_size - bytes_allowed;
520 	jiffy_wait = div64_u64(extra_bytes * HZ, tg->bps[rw]);
521 
522 	if (!jiffy_wait)
523 		jiffy_wait = 1;
524 
525 	/*
526 	 * This wait time is without taking into consideration the rounding
527 	 * up we did. Add that time also.
528 	 */
529 	jiffy_wait = jiffy_wait + (jiffy_elapsed_rnd - jiffy_elapsed);
530 	if (wait)
531 		*wait = jiffy_wait;
532 	return 0;
533 }
534 
535 /*
536  * Returns whether one can dispatch a bio or not. Also returns approx number
537  * of jiffies to wait before this bio is with-in IO rate and can be dispatched
538  */
539 static bool tg_may_dispatch(struct throtl_data *td, struct throtl_grp *tg,
540 				struct bio *bio, unsigned long *wait)
541 {
542 	bool rw = bio_data_dir(bio);
543 	unsigned long bps_wait = 0, iops_wait = 0, max_wait = 0;
544 
545 	/*
546  	 * Currently whole state machine of group depends on first bio
547 	 * queued in the group bio list. So one should not be calling
548 	 * this function with a different bio if there are other bios
549 	 * queued.
550 	 */
551 	BUG_ON(tg->nr_queued[rw] && bio != bio_list_peek(&tg->bio_lists[rw]));
552 
553 	/* If tg->bps = -1, then BW is unlimited */
554 	if (tg->bps[rw] == -1 && tg->iops[rw] == -1) {
555 		if (wait)
556 			*wait = 0;
557 		return 1;
558 	}
559 
560 	/*
561 	 * If previous slice expired, start a new one otherwise renew/extend
562 	 * existing slice to make sure it is at least throtl_slice interval
563 	 * long since now.
564 	 */
565 	if (throtl_slice_used(td, tg, rw))
566 		throtl_start_new_slice(td, tg, rw);
567 	else {
568 		if (time_before(tg->slice_end[rw], jiffies + throtl_slice))
569 			throtl_extend_slice(td, tg, rw, jiffies + throtl_slice);
570 	}
571 
572 	if (tg_with_in_bps_limit(td, tg, bio, &bps_wait)
573 	    && tg_with_in_iops_limit(td, tg, bio, &iops_wait)) {
574 		if (wait)
575 			*wait = 0;
576 		return 1;
577 	}
578 
579 	max_wait = max(bps_wait, iops_wait);
580 
581 	if (wait)
582 		*wait = max_wait;
583 
584 	if (time_before(tg->slice_end[rw], jiffies + max_wait))
585 		throtl_extend_slice(td, tg, rw, jiffies + max_wait);
586 
587 	return 0;
588 }
589 
590 static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
591 {
592 	bool rw = bio_data_dir(bio);
593 	bool sync = bio->bi_rw & REQ_SYNC;
594 
595 	/* Charge the bio to the group */
596 	tg->bytes_disp[rw] += bio->bi_size;
597 	tg->io_disp[rw]++;
598 
599 	/*
600 	 * TODO: This will take blkg->stats_lock. Figure out a way
601 	 * to avoid this cost.
602 	 */
603 	blkiocg_update_dispatch_stats(&tg->blkg, bio->bi_size, rw, sync);
604 }
605 
606 static void throtl_add_bio_tg(struct throtl_data *td, struct throtl_grp *tg,
607 			struct bio *bio)
608 {
609 	bool rw = bio_data_dir(bio);
610 
611 	bio_list_add(&tg->bio_lists[rw], bio);
612 	/* Take a bio reference on tg */
613 	throtl_ref_get_tg(tg);
614 	tg->nr_queued[rw]++;
615 	td->nr_queued[rw]++;
616 	throtl_enqueue_tg(td, tg);
617 }
618 
619 static void tg_update_disptime(struct throtl_data *td, struct throtl_grp *tg)
620 {
621 	unsigned long read_wait = -1, write_wait = -1, min_wait = -1, disptime;
622 	struct bio *bio;
623 
624 	if ((bio = bio_list_peek(&tg->bio_lists[READ])))
625 		tg_may_dispatch(td, tg, bio, &read_wait);
626 
627 	if ((bio = bio_list_peek(&tg->bio_lists[WRITE])))
628 		tg_may_dispatch(td, tg, bio, &write_wait);
629 
630 	min_wait = min(read_wait, write_wait);
631 	disptime = jiffies + min_wait;
632 
633 	/* Update dispatch time */
634 	throtl_dequeue_tg(td, tg);
635 	tg->disptime = disptime;
636 	throtl_enqueue_tg(td, tg);
637 }
638 
639 static void tg_dispatch_one_bio(struct throtl_data *td, struct throtl_grp *tg,
640 				bool rw, struct bio_list *bl)
641 {
642 	struct bio *bio;
643 
644 	bio = bio_list_pop(&tg->bio_lists[rw]);
645 	tg->nr_queued[rw]--;
646 	/* Drop bio reference on tg */
647 	throtl_put_tg(tg);
648 
649 	BUG_ON(td->nr_queued[rw] <= 0);
650 	td->nr_queued[rw]--;
651 
652 	throtl_charge_bio(tg, bio);
653 	bio_list_add(bl, bio);
654 	bio->bi_rw |= REQ_THROTTLED;
655 
656 	throtl_trim_slice(td, tg, rw);
657 }
658 
659 static int throtl_dispatch_tg(struct throtl_data *td, struct throtl_grp *tg,
660 				struct bio_list *bl)
661 {
662 	unsigned int nr_reads = 0, nr_writes = 0;
663 	unsigned int max_nr_reads = throtl_grp_quantum*3/4;
664 	unsigned int max_nr_writes = throtl_grp_quantum - max_nr_reads;
665 	struct bio *bio;
666 
667 	/* Try to dispatch 75% READS and 25% WRITES */
668 
669 	while ((bio = bio_list_peek(&tg->bio_lists[READ]))
670 		&& tg_may_dispatch(td, tg, bio, NULL)) {
671 
672 		tg_dispatch_one_bio(td, tg, bio_data_dir(bio), bl);
673 		nr_reads++;
674 
675 		if (nr_reads >= max_nr_reads)
676 			break;
677 	}
678 
679 	while ((bio = bio_list_peek(&tg->bio_lists[WRITE]))
680 		&& tg_may_dispatch(td, tg, bio, NULL)) {
681 
682 		tg_dispatch_one_bio(td, tg, bio_data_dir(bio), bl);
683 		nr_writes++;
684 
685 		if (nr_writes >= max_nr_writes)
686 			break;
687 	}
688 
689 	return nr_reads + nr_writes;
690 }
691 
692 static int throtl_select_dispatch(struct throtl_data *td, struct bio_list *bl)
693 {
694 	unsigned int nr_disp = 0;
695 	struct throtl_grp *tg;
696 	struct throtl_rb_root *st = &td->tg_service_tree;
697 
698 	while (1) {
699 		tg = throtl_rb_first(st);
700 
701 		if (!tg)
702 			break;
703 
704 		if (time_before(jiffies, tg->disptime))
705 			break;
706 
707 		throtl_dequeue_tg(td, tg);
708 
709 		nr_disp += throtl_dispatch_tg(td, tg, bl);
710 
711 		if (tg->nr_queued[0] || tg->nr_queued[1]) {
712 			tg_update_disptime(td, tg);
713 			throtl_enqueue_tg(td, tg);
714 		}
715 
716 		if (nr_disp >= throtl_quantum)
717 			break;
718 	}
719 
720 	return nr_disp;
721 }
722 
723 static void throtl_process_limit_change(struct throtl_data *td)
724 {
725 	struct throtl_grp *tg;
726 	struct hlist_node *pos, *n;
727 
728 	if (!atomic_read(&td->limits_changed))
729 		return;
730 
731 	throtl_log(td, "limit changed =%d", atomic_read(&td->limits_changed));
732 
733 	/*
734 	 * Make sure updates from throtl_update_blkio_group_read_bps() group
735 	 * of functions to tg->limits_changed are visible. We do not
736 	 * want update td->limits_changed to be visible but update to
737 	 * tg->limits_changed not being visible yet on this cpu. Hence
738 	 * the read barrier.
739 	 */
740 	smp_rmb();
741 
742 	hlist_for_each_entry_safe(tg, pos, n, &td->tg_list, tg_node) {
743 		if (throtl_tg_on_rr(tg) && tg->limits_changed) {
744 			throtl_log_tg(td, tg, "limit change rbps=%llu wbps=%llu"
745 				" riops=%u wiops=%u", tg->bps[READ],
746 				tg->bps[WRITE], tg->iops[READ],
747 				tg->iops[WRITE]);
748 			tg_update_disptime(td, tg);
749 			tg->limits_changed = false;
750 		}
751 	}
752 
753 	smp_mb__before_atomic_dec();
754 	atomic_dec(&td->limits_changed);
755 	smp_mb__after_atomic_dec();
756 }
757 
758 /* Dispatch throttled bios. Should be called without queue lock held. */
759 static int throtl_dispatch(struct request_queue *q)
760 {
761 	struct throtl_data *td = q->td;
762 	unsigned int nr_disp = 0;
763 	struct bio_list bio_list_on_stack;
764 	struct bio *bio;
765 
766 	spin_lock_irq(q->queue_lock);
767 
768 	throtl_process_limit_change(td);
769 
770 	if (!total_nr_queued(td))
771 		goto out;
772 
773 	bio_list_init(&bio_list_on_stack);
774 
775 	throtl_log(td, "dispatch nr_queued=%lu read=%u write=%u",
776 			total_nr_queued(td), td->nr_queued[READ],
777 			td->nr_queued[WRITE]);
778 
779 	nr_disp = throtl_select_dispatch(td, &bio_list_on_stack);
780 
781 	if (nr_disp)
782 		throtl_log(td, "bios disp=%u", nr_disp);
783 
784 	throtl_schedule_next_dispatch(td);
785 out:
786 	spin_unlock_irq(q->queue_lock);
787 
788 	/*
789 	 * If we dispatched some requests, unplug the queue to make sure
790 	 * immediate dispatch
791 	 */
792 	if (nr_disp) {
793 		while((bio = bio_list_pop(&bio_list_on_stack)))
794 			generic_make_request(bio);
795 		blk_unplug(q);
796 	}
797 	return nr_disp;
798 }
799 
800 void blk_throtl_work(struct work_struct *work)
801 {
802 	struct throtl_data *td = container_of(work, struct throtl_data,
803 					throtl_work.work);
804 	struct request_queue *q = td->queue;
805 
806 	throtl_dispatch(q);
807 }
808 
809 /* Call with queue lock held */
810 void throtl_schedule_delayed_work(struct request_queue *q, unsigned long delay)
811 {
812 
813 	struct throtl_data *td = q->td;
814 	struct delayed_work *dwork = &td->throtl_work;
815 
816 	if (total_nr_queued(td) > 0) {
817 		/*
818 		 * We might have a work scheduled to be executed in future.
819 		 * Cancel that and schedule a new one.
820 		 */
821 		__cancel_delayed_work(dwork);
822 		kblockd_schedule_delayed_work(q, dwork, delay);
823 		throtl_log(td, "schedule work. delay=%lu jiffies=%lu",
824 				delay, jiffies);
825 	}
826 }
827 EXPORT_SYMBOL(throtl_schedule_delayed_work);
828 
829 static void
830 throtl_destroy_tg(struct throtl_data *td, struct throtl_grp *tg)
831 {
832 	/* Something wrong if we are trying to remove same group twice */
833 	BUG_ON(hlist_unhashed(&tg->tg_node));
834 
835 	hlist_del_init(&tg->tg_node);
836 
837 	/*
838 	 * Put the reference taken at the time of creation so that when all
839 	 * queues are gone, group can be destroyed.
840 	 */
841 	throtl_put_tg(tg);
842 	td->nr_undestroyed_grps--;
843 }
844 
845 static void throtl_release_tgs(struct throtl_data *td)
846 {
847 	struct hlist_node *pos, *n;
848 	struct throtl_grp *tg;
849 
850 	hlist_for_each_entry_safe(tg, pos, n, &td->tg_list, tg_node) {
851 		/*
852 		 * If cgroup removal path got to blk_group first and removed
853 		 * it from cgroup list, then it will take care of destroying
854 		 * cfqg also.
855 		 */
856 		if (!blkiocg_del_blkio_group(&tg->blkg))
857 			throtl_destroy_tg(td, tg);
858 	}
859 }
860 
861 static void throtl_td_free(struct throtl_data *td)
862 {
863 	kfree(td);
864 }
865 
866 /*
867  * Blk cgroup controller notification saying that blkio_group object is being
868  * delinked as associated cgroup object is going away. That also means that
869  * no new IO will come in this group. So get rid of this group as soon as
870  * any pending IO in the group is finished.
871  *
872  * This function is called under rcu_read_lock(). key is the rcu protected
873  * pointer. That means "key" is a valid throtl_data pointer as long as we are
874  * rcu read lock.
875  *
876  * "key" was fetched from blkio_group under blkio_cgroup->lock. That means
877  * it should not be NULL as even if queue was going away, cgroup deltion
878  * path got to it first.
879  */
880 void throtl_unlink_blkio_group(void *key, struct blkio_group *blkg)
881 {
882 	unsigned long flags;
883 	struct throtl_data *td = key;
884 
885 	spin_lock_irqsave(td->queue->queue_lock, flags);
886 	throtl_destroy_tg(td, tg_of_blkg(blkg));
887 	spin_unlock_irqrestore(td->queue->queue_lock, flags);
888 }
889 
890 /*
891  * For all update functions, key should be a valid pointer because these
892  * update functions are called under blkcg_lock, that means, blkg is
893  * valid and in turn key is valid. queue exit path can not race becuase
894  * of blkcg_lock
895  *
896  * Can not take queue lock in update functions as queue lock under blkcg_lock
897  * is not allowed. Under other paths we take blkcg_lock under queue_lock.
898  */
899 static void throtl_update_blkio_group_read_bps(void *key,
900 				struct blkio_group *blkg, u64 read_bps)
901 {
902 	struct throtl_data *td = key;
903 
904 	tg_of_blkg(blkg)->bps[READ] = read_bps;
905 	/* Make sure read_bps is updated before setting limits_changed */
906 	smp_wmb();
907 	tg_of_blkg(blkg)->limits_changed = true;
908 
909 	/* Make sure tg->limits_changed is updated before td->limits_changed */
910 	smp_mb__before_atomic_inc();
911 	atomic_inc(&td->limits_changed);
912 	smp_mb__after_atomic_inc();
913 
914 	/* Schedule a work now to process the limit change */
915 	throtl_schedule_delayed_work(td->queue, 0);
916 }
917 
918 static void throtl_update_blkio_group_write_bps(void *key,
919 				struct blkio_group *blkg, u64 write_bps)
920 {
921 	struct throtl_data *td = key;
922 
923 	tg_of_blkg(blkg)->bps[WRITE] = write_bps;
924 	smp_wmb();
925 	tg_of_blkg(blkg)->limits_changed = true;
926 	smp_mb__before_atomic_inc();
927 	atomic_inc(&td->limits_changed);
928 	smp_mb__after_atomic_inc();
929 	throtl_schedule_delayed_work(td->queue, 0);
930 }
931 
932 static void throtl_update_blkio_group_read_iops(void *key,
933 			struct blkio_group *blkg, unsigned int read_iops)
934 {
935 	struct throtl_data *td = key;
936 
937 	tg_of_blkg(blkg)->iops[READ] = read_iops;
938 	smp_wmb();
939 	tg_of_blkg(blkg)->limits_changed = true;
940 	smp_mb__before_atomic_inc();
941 	atomic_inc(&td->limits_changed);
942 	smp_mb__after_atomic_inc();
943 	throtl_schedule_delayed_work(td->queue, 0);
944 }
945 
946 static void throtl_update_blkio_group_write_iops(void *key,
947 			struct blkio_group *blkg, unsigned int write_iops)
948 {
949 	struct throtl_data *td = key;
950 
951 	tg_of_blkg(blkg)->iops[WRITE] = write_iops;
952 	smp_wmb();
953 	tg_of_blkg(blkg)->limits_changed = true;
954 	smp_mb__before_atomic_inc();
955 	atomic_inc(&td->limits_changed);
956 	smp_mb__after_atomic_inc();
957 	throtl_schedule_delayed_work(td->queue, 0);
958 }
959 
960 void throtl_shutdown_timer_wq(struct request_queue *q)
961 {
962 	struct throtl_data *td = q->td;
963 
964 	cancel_delayed_work_sync(&td->throtl_work);
965 }
966 
967 static struct blkio_policy_type blkio_policy_throtl = {
968 	.ops = {
969 		.blkio_unlink_group_fn = throtl_unlink_blkio_group,
970 		.blkio_update_group_read_bps_fn =
971 					throtl_update_blkio_group_read_bps,
972 		.blkio_update_group_write_bps_fn =
973 					throtl_update_blkio_group_write_bps,
974 		.blkio_update_group_read_iops_fn =
975 					throtl_update_blkio_group_read_iops,
976 		.blkio_update_group_write_iops_fn =
977 					throtl_update_blkio_group_write_iops,
978 	},
979 	.plid = BLKIO_POLICY_THROTL,
980 };
981 
982 int blk_throtl_bio(struct request_queue *q, struct bio **biop)
983 {
984 	struct throtl_data *td = q->td;
985 	struct throtl_grp *tg;
986 	struct bio *bio = *biop;
987 	bool rw = bio_data_dir(bio), update_disptime = true;
988 
989 	if (bio->bi_rw & REQ_THROTTLED) {
990 		bio->bi_rw &= ~REQ_THROTTLED;
991 		return 0;
992 	}
993 
994 	spin_lock_irq(q->queue_lock);
995 	tg = throtl_get_tg(td);
996 
997 	if (tg->nr_queued[rw]) {
998 		/*
999 		 * There is already another bio queued in same dir. No
1000 		 * need to update dispatch time.
1001 		 * Still update the disptime if rate limits on this group
1002 		 * were changed.
1003 		 */
1004 		if (!tg->limits_changed)
1005 			update_disptime = false;
1006 		else
1007 			tg->limits_changed = false;
1008 
1009 		goto queue_bio;
1010 	}
1011 
1012 	/* Bio is with-in rate limit of group */
1013 	if (tg_may_dispatch(td, tg, bio, NULL)) {
1014 		throtl_charge_bio(tg, bio);
1015 		goto out;
1016 	}
1017 
1018 queue_bio:
1019 	throtl_log_tg(td, tg, "[%c] bio. bdisp=%u sz=%u bps=%llu"
1020 			" iodisp=%u iops=%u queued=%d/%d",
1021 			rw == READ ? 'R' : 'W',
1022 			tg->bytes_disp[rw], bio->bi_size, tg->bps[rw],
1023 			tg->io_disp[rw], tg->iops[rw],
1024 			tg->nr_queued[READ], tg->nr_queued[WRITE]);
1025 
1026 	throtl_add_bio_tg(q->td, tg, bio);
1027 	*biop = NULL;
1028 
1029 	if (update_disptime) {
1030 		tg_update_disptime(td, tg);
1031 		throtl_schedule_next_dispatch(td);
1032 	}
1033 
1034 out:
1035 	spin_unlock_irq(q->queue_lock);
1036 	return 0;
1037 }
1038 
1039 int blk_throtl_init(struct request_queue *q)
1040 {
1041 	struct throtl_data *td;
1042 	struct throtl_grp *tg;
1043 
1044 	td = kzalloc_node(sizeof(*td), GFP_KERNEL, q->node);
1045 	if (!td)
1046 		return -ENOMEM;
1047 
1048 	INIT_HLIST_HEAD(&td->tg_list);
1049 	td->tg_service_tree = THROTL_RB_ROOT;
1050 	atomic_set(&td->limits_changed, 0);
1051 
1052 	/* Init root group */
1053 	tg = &td->root_tg;
1054 	INIT_HLIST_NODE(&tg->tg_node);
1055 	RB_CLEAR_NODE(&tg->rb_node);
1056 	bio_list_init(&tg->bio_lists[0]);
1057 	bio_list_init(&tg->bio_lists[1]);
1058 
1059 	/* Practically unlimited BW */
1060 	tg->bps[0] = tg->bps[1] = -1;
1061 	tg->iops[0] = tg->iops[1] = -1;
1062 
1063 	/*
1064 	 * Set root group reference to 2. One reference will be dropped when
1065 	 * all groups on tg_list are being deleted during queue exit. Other
1066 	 * reference will remain there as we don't want to delete this group
1067 	 * as it is statically allocated and gets destroyed when throtl_data
1068 	 * goes away.
1069 	 */
1070 	atomic_set(&tg->ref, 2);
1071 	hlist_add_head(&tg->tg_node, &td->tg_list);
1072 	td->nr_undestroyed_grps++;
1073 
1074 	INIT_DELAYED_WORK(&td->throtl_work, blk_throtl_work);
1075 
1076 	rcu_read_lock();
1077 	blkiocg_add_blkio_group(&blkio_root_cgroup, &tg->blkg, (void *)td,
1078 					0, BLKIO_POLICY_THROTL);
1079 	rcu_read_unlock();
1080 
1081 	/* Attach throtl data to request queue */
1082 	td->queue = q;
1083 	q->td = td;
1084 	return 0;
1085 }
1086 
1087 void blk_throtl_exit(struct request_queue *q)
1088 {
1089 	struct throtl_data *td = q->td;
1090 	bool wait = false;
1091 
1092 	BUG_ON(!td);
1093 
1094 	throtl_shutdown_timer_wq(q);
1095 
1096 	spin_lock_irq(q->queue_lock);
1097 	throtl_release_tgs(td);
1098 
1099 	/* If there are other groups */
1100 	if (td->nr_undestroyed_grps > 0)
1101 		wait = true;
1102 
1103 	spin_unlock_irq(q->queue_lock);
1104 
1105 	/*
1106 	 * Wait for tg->blkg->key accessors to exit their grace periods.
1107 	 * Do this wait only if there are other undestroyed groups out
1108 	 * there (other than root group). This can happen if cgroup deletion
1109 	 * path claimed the responsibility of cleaning up a group before
1110 	 * queue cleanup code get to the group.
1111 	 *
1112 	 * Do not call synchronize_rcu() unconditionally as there are drivers
1113 	 * which create/delete request queue hundreds of times during scan/boot
1114 	 * and synchronize_rcu() can take significant time and slow down boot.
1115 	 */
1116 	if (wait)
1117 		synchronize_rcu();
1118 
1119 	/*
1120 	 * Just being safe to make sure after previous flush if some body did
1121 	 * update limits through cgroup and another work got queued, cancel
1122 	 * it.
1123 	 */
1124 	throtl_shutdown_timer_wq(q);
1125 	throtl_td_free(td);
1126 }
1127 
1128 static int __init throtl_init(void)
1129 {
1130 	blkio_policy_register(&blkio_policy_throtl);
1131 	return 0;
1132 }
1133 
1134 module_init(throtl_init);
1135