xref: /openbmc/linux/block/blk-wbt.c (revision a7905043)
1 /*
2  * buffered writeback throttling. loosely based on CoDel. We can't drop
3  * packets for IO scheduling, so the logic is something like this:
4  *
5  * - Monitor latencies in a defined window of time.
6  * - If the minimum latency in the above window exceeds some target, increment
7  *   scaling step and scale down queue depth by a factor of 2x. The monitoring
8  *   window is then shrunk to 100 / sqrt(scaling step + 1).
9  * - For any window where we don't have solid data on what the latencies
10  *   look like, retain status quo.
11  * - If latencies look good, decrement scaling step.
12  * - If we're only doing writes, allow the scaling step to go negative. This
13  *   will temporarily boost write performance, snapping back to a stable
14  *   scaling step of 0 if reads show up or the heavy writers finish. Unlike
15  *   positive scaling steps where we shrink the monitoring window, a negative
16  *   scaling step retains the default step==0 window size.
17  *
18  * Copyright (C) 2016 Jens Axboe
19  *
20  */
21 #include <linux/kernel.h>
22 #include <linux/blk_types.h>
23 #include <linux/slab.h>
24 #include <linux/backing-dev.h>
25 #include <linux/swap.h>
26 
27 #include "blk-wbt.h"
28 #include "blk-rq-qos.h"
29 
30 #define CREATE_TRACE_POINTS
31 #include <trace/events/wbt.h>
32 
33 static inline void wbt_clear_state(struct request *rq)
34 {
35 	rq->wbt_flags = 0;
36 }
37 
38 static inline enum wbt_flags wbt_flags(struct request *rq)
39 {
40 	return rq->wbt_flags;
41 }
42 
43 static inline bool wbt_is_tracked(struct request *rq)
44 {
45 	return rq->wbt_flags & WBT_TRACKED;
46 }
47 
48 static inline bool wbt_is_read(struct request *rq)
49 {
50 	return rq->wbt_flags & WBT_READ;
51 }
52 
53 enum {
54 	/*
55 	 * Default setting, we'll scale up (to 75% of QD max) or down (min 1)
56 	 * from here depending on device stats
57 	 */
58 	RWB_DEF_DEPTH	= 16,
59 
60 	/*
61 	 * 100msec window
62 	 */
63 	RWB_WINDOW_NSEC		= 100 * 1000 * 1000ULL,
64 
65 	/*
66 	 * Disregard stats, if we don't meet this minimum
67 	 */
68 	RWB_MIN_WRITE_SAMPLES	= 3,
69 
70 	/*
71 	 * If we have this number of consecutive windows with not enough
72 	 * information to scale up or down, scale up.
73 	 */
74 	RWB_UNKNOWN_BUMP	= 5,
75 };
76 
77 static inline bool rwb_enabled(struct rq_wb *rwb)
78 {
79 	return rwb && rwb->wb_normal != 0;
80 }
81 
82 static void wb_timestamp(struct rq_wb *rwb, unsigned long *var)
83 {
84 	if (rwb_enabled(rwb)) {
85 		const unsigned long cur = jiffies;
86 
87 		if (cur != *var)
88 			*var = cur;
89 	}
90 }
91 
92 /*
93  * If a task was rate throttled in balance_dirty_pages() within the last
94  * second or so, use that to indicate a higher cleaning rate.
95  */
96 static bool wb_recent_wait(struct rq_wb *rwb)
97 {
98 	struct bdi_writeback *wb = &rwb->rqos.q->backing_dev_info->wb;
99 
100 	return time_before(jiffies, wb->dirty_sleep + HZ);
101 }
102 
103 static inline struct rq_wait *get_rq_wait(struct rq_wb *rwb,
104 					  enum wbt_flags wb_acct)
105 {
106 	if (wb_acct & WBT_KSWAPD)
107 		return &rwb->rq_wait[WBT_RWQ_KSWAPD];
108 	else if (wb_acct & WBT_DISCARD)
109 		return &rwb->rq_wait[WBT_RWQ_DISCARD];
110 
111 	return &rwb->rq_wait[WBT_RWQ_BG];
112 }
113 
114 static void rwb_wake_all(struct rq_wb *rwb)
115 {
116 	int i;
117 
118 	for (i = 0; i < WBT_NUM_RWQ; i++) {
119 		struct rq_wait *rqw = &rwb->rq_wait[i];
120 
121 		if (waitqueue_active(&rqw->wait))
122 			wake_up_all(&rqw->wait);
123 	}
124 }
125 
126 static void __wbt_done(struct rq_qos *rqos, enum wbt_flags wb_acct)
127 {
128 	struct rq_wb *rwb = RQWB(rqos);
129 	struct rq_wait *rqw;
130 	int inflight, limit;
131 
132 	if (!(wb_acct & WBT_TRACKED))
133 		return;
134 
135 	rqw = get_rq_wait(rwb, wb_acct);
136 	inflight = atomic_dec_return(&rqw->inflight);
137 
138 	/*
139 	 * wbt got disabled with IO in flight. Wake up any potential
140 	 * waiters, we don't have to do more than that.
141 	 */
142 	if (unlikely(!rwb_enabled(rwb))) {
143 		rwb_wake_all(rwb);
144 		return;
145 	}
146 
147 	/*
148 	 * For discards, our limit is always the background. For writes, if
149 	 * the device does write back caching, drop further down before we
150 	 * wake people up.
151 	 */
152 	if (wb_acct & WBT_DISCARD)
153 		limit = rwb->wb_background;
154 	else if (rwb->wc && !wb_recent_wait(rwb))
155 		limit = 0;
156 	else
157 		limit = rwb->wb_normal;
158 
159 	/*
160 	 * Don't wake anyone up if we are above the normal limit.
161 	 */
162 	if (inflight && inflight >= limit)
163 		return;
164 
165 	if (waitqueue_active(&rqw->wait)) {
166 		int diff = limit - inflight;
167 
168 		if (!inflight || diff >= rwb->wb_background / 2)
169 			wake_up_all(&rqw->wait);
170 	}
171 }
172 
173 /*
174  * Called on completion of a request. Note that it's also called when
175  * a request is merged, when the request gets freed.
176  */
177 static void wbt_done(struct rq_qos *rqos, struct request *rq)
178 {
179 	struct rq_wb *rwb = RQWB(rqos);
180 
181 	if (!wbt_is_tracked(rq)) {
182 		if (rwb->sync_cookie == rq) {
183 			rwb->sync_issue = 0;
184 			rwb->sync_cookie = NULL;
185 		}
186 
187 		if (wbt_is_read(rq))
188 			wb_timestamp(rwb, &rwb->last_comp);
189 	} else {
190 		WARN_ON_ONCE(rq == rwb->sync_cookie);
191 		__wbt_done(rqos, wbt_flags(rq));
192 	}
193 	wbt_clear_state(rq);
194 }
195 
196 static inline bool stat_sample_valid(struct blk_rq_stat *stat)
197 {
198 	/*
199 	 * We need at least one read sample, and a minimum of
200 	 * RWB_MIN_WRITE_SAMPLES. We require some write samples to know
201 	 * that it's writes impacting us, and not just some sole read on
202 	 * a device that is in a lower power state.
203 	 */
204 	return (stat[READ].nr_samples >= 1 &&
205 		stat[WRITE].nr_samples >= RWB_MIN_WRITE_SAMPLES);
206 }
207 
208 static u64 rwb_sync_issue_lat(struct rq_wb *rwb)
209 {
210 	u64 now, issue = READ_ONCE(rwb->sync_issue);
211 
212 	if (!issue || !rwb->sync_cookie)
213 		return 0;
214 
215 	now = ktime_to_ns(ktime_get());
216 	return now - issue;
217 }
218 
219 enum {
220 	LAT_OK = 1,
221 	LAT_UNKNOWN,
222 	LAT_UNKNOWN_WRITES,
223 	LAT_EXCEEDED,
224 };
225 
226 static int latency_exceeded(struct rq_wb *rwb, struct blk_rq_stat *stat)
227 {
228 	struct backing_dev_info *bdi = rwb->rqos.q->backing_dev_info;
229 	struct rq_depth *rqd = &rwb->rq_depth;
230 	u64 thislat;
231 
232 	/*
233 	 * If our stored sync issue exceeds the window size, or it
234 	 * exceeds our min target AND we haven't logged any entries,
235 	 * flag the latency as exceeded. wbt works off completion latencies,
236 	 * but for a flooded device, a single sync IO can take a long time
237 	 * to complete after being issued. If this time exceeds our
238 	 * monitoring window AND we didn't see any other completions in that
239 	 * window, then count that sync IO as a violation of the latency.
240 	 */
241 	thislat = rwb_sync_issue_lat(rwb);
242 	if (thislat > rwb->cur_win_nsec ||
243 	    (thislat > rwb->min_lat_nsec && !stat[READ].nr_samples)) {
244 		trace_wbt_lat(bdi, thislat);
245 		return LAT_EXCEEDED;
246 	}
247 
248 	/*
249 	 * No read/write mix, if stat isn't valid
250 	 */
251 	if (!stat_sample_valid(stat)) {
252 		/*
253 		 * If we had writes in this stat window and the window is
254 		 * current, we're only doing writes. If a task recently
255 		 * waited or still has writes in flights, consider us doing
256 		 * just writes as well.
257 		 */
258 		if (stat[WRITE].nr_samples || wb_recent_wait(rwb) ||
259 		    wbt_inflight(rwb))
260 			return LAT_UNKNOWN_WRITES;
261 		return LAT_UNKNOWN;
262 	}
263 
264 	/*
265 	 * If the 'min' latency exceeds our target, step down.
266 	 */
267 	if (stat[READ].min > rwb->min_lat_nsec) {
268 		trace_wbt_lat(bdi, stat[READ].min);
269 		trace_wbt_stat(bdi, stat);
270 		return LAT_EXCEEDED;
271 	}
272 
273 	if (rqd->scale_step)
274 		trace_wbt_stat(bdi, stat);
275 
276 	return LAT_OK;
277 }
278 
279 static void rwb_trace_step(struct rq_wb *rwb, const char *msg)
280 {
281 	struct backing_dev_info *bdi = rwb->rqos.q->backing_dev_info;
282 	struct rq_depth *rqd = &rwb->rq_depth;
283 
284 	trace_wbt_step(bdi, msg, rqd->scale_step, rwb->cur_win_nsec,
285 			rwb->wb_background, rwb->wb_normal, rqd->max_depth);
286 }
287 
288 static void calc_wb_limits(struct rq_wb *rwb)
289 {
290 	if (rwb->min_lat_nsec == 0) {
291 		rwb->wb_normal = rwb->wb_background = 0;
292 	} else if (rwb->rq_depth.max_depth <= 2) {
293 		rwb->wb_normal = rwb->rq_depth.max_depth;
294 		rwb->wb_background = 1;
295 	} else {
296 		rwb->wb_normal = (rwb->rq_depth.max_depth + 1) / 2;
297 		rwb->wb_background = (rwb->rq_depth.max_depth + 3) / 4;
298 	}
299 }
300 
301 static void scale_up(struct rq_wb *rwb)
302 {
303 	rq_depth_scale_up(&rwb->rq_depth);
304 	calc_wb_limits(rwb);
305 	rwb->unknown_cnt = 0;
306 	rwb_trace_step(rwb, "scale up");
307 }
308 
309 static void scale_down(struct rq_wb *rwb, bool hard_throttle)
310 {
311 	rq_depth_scale_down(&rwb->rq_depth, hard_throttle);
312 	calc_wb_limits(rwb);
313 	rwb->unknown_cnt = 0;
314 	rwb_wake_all(rwb);
315 	rwb_trace_step(rwb, "scale down");
316 }
317 
318 static void rwb_arm_timer(struct rq_wb *rwb)
319 {
320 	struct rq_depth *rqd = &rwb->rq_depth;
321 
322 	if (rqd->scale_step > 0) {
323 		/*
324 		 * We should speed this up, using some variant of a fast
325 		 * integer inverse square root calculation. Since we only do
326 		 * this for every window expiration, it's not a huge deal,
327 		 * though.
328 		 */
329 		rwb->cur_win_nsec = div_u64(rwb->win_nsec << 4,
330 					int_sqrt((rqd->scale_step + 1) << 8));
331 	} else {
332 		/*
333 		 * For step < 0, we don't want to increase/decrease the
334 		 * window size.
335 		 */
336 		rwb->cur_win_nsec = rwb->win_nsec;
337 	}
338 
339 	blk_stat_activate_nsecs(rwb->cb, rwb->cur_win_nsec);
340 }
341 
342 static void wb_timer_fn(struct blk_stat_callback *cb)
343 {
344 	struct rq_wb *rwb = cb->data;
345 	struct rq_depth *rqd = &rwb->rq_depth;
346 	unsigned int inflight = wbt_inflight(rwb);
347 	int status;
348 
349 	status = latency_exceeded(rwb, cb->stat);
350 
351 	trace_wbt_timer(rwb->rqos.q->backing_dev_info, status, rqd->scale_step,
352 			inflight);
353 
354 	/*
355 	 * If we exceeded the latency target, step down. If we did not,
356 	 * step one level up. If we don't know enough to say either exceeded
357 	 * or ok, then don't do anything.
358 	 */
359 	switch (status) {
360 	case LAT_EXCEEDED:
361 		scale_down(rwb, true);
362 		break;
363 	case LAT_OK:
364 		scale_up(rwb);
365 		break;
366 	case LAT_UNKNOWN_WRITES:
367 		/*
368 		 * We started a the center step, but don't have a valid
369 		 * read/write sample, but we do have writes going on.
370 		 * Allow step to go negative, to increase write perf.
371 		 */
372 		scale_up(rwb);
373 		break;
374 	case LAT_UNKNOWN:
375 		if (++rwb->unknown_cnt < RWB_UNKNOWN_BUMP)
376 			break;
377 		/*
378 		 * We get here when previously scaled reduced depth, and we
379 		 * currently don't have a valid read/write sample. For that
380 		 * case, slowly return to center state (step == 0).
381 		 */
382 		if (rqd->scale_step > 0)
383 			scale_up(rwb);
384 		else if (rqd->scale_step < 0)
385 			scale_down(rwb, false);
386 		break;
387 	default:
388 		break;
389 	}
390 
391 	/*
392 	 * Re-arm timer, if we have IO in flight
393 	 */
394 	if (rqd->scale_step || inflight)
395 		rwb_arm_timer(rwb);
396 }
397 
398 static void __wbt_update_limits(struct rq_wb *rwb)
399 {
400 	struct rq_depth *rqd = &rwb->rq_depth;
401 
402 	rqd->scale_step = 0;
403 	rqd->scaled_max = false;
404 
405 	rq_depth_calc_max_depth(rqd);
406 	calc_wb_limits(rwb);
407 
408 	rwb_wake_all(rwb);
409 }
410 
411 void wbt_update_limits(struct request_queue *q)
412 {
413 	struct rq_qos *rqos = wbt_rq_qos(q);
414 	if (!rqos)
415 		return;
416 	__wbt_update_limits(RQWB(rqos));
417 }
418 
419 u64 wbt_get_min_lat(struct request_queue *q)
420 {
421 	struct rq_qos *rqos = wbt_rq_qos(q);
422 	if (!rqos)
423 		return 0;
424 	return RQWB(rqos)->min_lat_nsec;
425 }
426 
427 void wbt_set_min_lat(struct request_queue *q, u64 val)
428 {
429 	struct rq_qos *rqos = wbt_rq_qos(q);
430 	if (!rqos)
431 		return;
432 	RQWB(rqos)->min_lat_nsec = val;
433 	RQWB(rqos)->enable_state = WBT_STATE_ON_MANUAL;
434 	__wbt_update_limits(RQWB(rqos));
435 }
436 
437 
438 static bool close_io(struct rq_wb *rwb)
439 {
440 	const unsigned long now = jiffies;
441 
442 	return time_before(now, rwb->last_issue + HZ / 10) ||
443 		time_before(now, rwb->last_comp + HZ / 10);
444 }
445 
446 #define REQ_HIPRIO	(REQ_SYNC | REQ_META | REQ_PRIO)
447 
448 static inline unsigned int get_limit(struct rq_wb *rwb, unsigned long rw)
449 {
450 	unsigned int limit;
451 
452 	if ((rw & REQ_OP_MASK) == REQ_OP_DISCARD)
453 		return rwb->wb_background;
454 
455 	/*
456 	 * At this point we know it's a buffered write. If this is
457 	 * kswapd trying to free memory, or REQ_SYNC is set, then
458 	 * it's WB_SYNC_ALL writeback, and we'll use the max limit for
459 	 * that. If the write is marked as a background write, then use
460 	 * the idle limit, or go to normal if we haven't had competing
461 	 * IO for a bit.
462 	 */
463 	if ((rw & REQ_HIPRIO) || wb_recent_wait(rwb) || current_is_kswapd())
464 		limit = rwb->rq_depth.max_depth;
465 	else if ((rw & REQ_BACKGROUND) || close_io(rwb)) {
466 		/*
467 		 * If less than 100ms since we completed unrelated IO,
468 		 * limit us to half the depth for background writeback.
469 		 */
470 		limit = rwb->wb_background;
471 	} else
472 		limit = rwb->wb_normal;
473 
474 	return limit;
475 }
476 
477 static inline bool may_queue(struct rq_wb *rwb, struct rq_wait *rqw,
478 			     wait_queue_entry_t *wait, unsigned long rw)
479 {
480 	/*
481 	 * inc it here even if disabled, since we'll dec it at completion.
482 	 * this only happens if the task was sleeping in __wbt_wait(),
483 	 * and someone turned it off at the same time.
484 	 */
485 	if (!rwb_enabled(rwb)) {
486 		atomic_inc(&rqw->inflight);
487 		return true;
488 	}
489 
490 	/*
491 	 * If the waitqueue is already active and we are not the next
492 	 * in line to be woken up, wait for our turn.
493 	 */
494 	if (waitqueue_active(&rqw->wait) &&
495 	    rqw->wait.head.next != &wait->entry)
496 		return false;
497 
498 	return rq_wait_inc_below(rqw, get_limit(rwb, rw));
499 }
500 
501 /*
502  * Block if we will exceed our limit, or if we are currently waiting for
503  * the timer to kick off queuing again.
504  */
505 static void __wbt_wait(struct rq_wb *rwb, enum wbt_flags wb_acct,
506 		       unsigned long rw, spinlock_t *lock)
507 	__releases(lock)
508 	__acquires(lock)
509 {
510 	struct rq_wait *rqw = get_rq_wait(rwb, wb_acct);
511 	DEFINE_WAIT(wait);
512 
513 	if (may_queue(rwb, rqw, &wait, rw))
514 		return;
515 
516 	do {
517 		prepare_to_wait_exclusive(&rqw->wait, &wait,
518 						TASK_UNINTERRUPTIBLE);
519 
520 		if (may_queue(rwb, rqw, &wait, rw))
521 			break;
522 
523 		if (lock) {
524 			spin_unlock_irq(lock);
525 			io_schedule();
526 			spin_lock_irq(lock);
527 		} else
528 			io_schedule();
529 	} while (1);
530 
531 	finish_wait(&rqw->wait, &wait);
532 }
533 
534 static inline bool wbt_should_throttle(struct rq_wb *rwb, struct bio *bio)
535 {
536 	switch (bio_op(bio)) {
537 	case REQ_OP_WRITE:
538 		/*
539 		 * Don't throttle WRITE_ODIRECT
540 		 */
541 		if ((bio->bi_opf & (REQ_SYNC | REQ_IDLE)) ==
542 		    (REQ_SYNC | REQ_IDLE))
543 			return false;
544 		/* fallthrough */
545 	case REQ_OP_DISCARD:
546 		return true;
547 	default:
548 		return false;
549 	}
550 }
551 
552 /*
553  * Returns true if the IO request should be accounted, false if not.
554  * May sleep, if we have exceeded the writeback limits. Caller can pass
555  * in an irq held spinlock, if it holds one when calling this function.
556  * If we do sleep, we'll release and re-grab it.
557  */
558 static enum wbt_flags wbt_wait(struct rq_qos *rqos, struct bio *bio,
559 			       spinlock_t *lock)
560 {
561 	struct rq_wb *rwb = RQWB(rqos);
562 	enum wbt_flags ret = 0;
563 
564 	if (!rwb_enabled(rwb))
565 		return 0;
566 
567 	if (bio_op(bio) == REQ_OP_READ)
568 		ret = WBT_READ;
569 
570 	if (!wbt_should_throttle(rwb, bio)) {
571 		if (ret & WBT_READ)
572 			wb_timestamp(rwb, &rwb->last_issue);
573 		return ret;
574 	}
575 
576 	if (current_is_kswapd())
577 		ret |= WBT_KSWAPD;
578 	if (bio_op(bio) == REQ_OP_DISCARD)
579 		ret |= WBT_DISCARD;
580 
581 	__wbt_wait(rwb, ret, bio->bi_opf, lock);
582 
583 	if (!blk_stat_is_active(rwb->cb))
584 		rwb_arm_timer(rwb);
585 
586 	return ret | WBT_TRACKED;
587 }
588 
589 void wbt_issue(struct rq_qos *rqos, struct request *rq)
590 {
591 	struct rq_wb *rwb = RQWB(rqos);
592 
593 	if (!rwb_enabled(rwb))
594 		return;
595 
596 	/*
597 	 * Track sync issue, in case it takes a long time to complete. Allows us
598 	 * to react quicker, if a sync IO takes a long time to complete. Note
599 	 * that this is just a hint. The request can go away when it completes,
600 	 * so it's important we never dereference it. We only use the address to
601 	 * compare with, which is why we store the sync_issue time locally.
602 	 */
603 	if (wbt_is_read(rq) && !rwb->sync_issue) {
604 		rwb->sync_cookie = rq;
605 		rwb->sync_issue = rq->io_start_time_ns;
606 	}
607 }
608 
609 void wbt_requeue(struct rq_qos *rqos, struct request *rq)
610 {
611 	struct rq_wb *rwb = RQWB(rqos);
612 	if (!rwb_enabled(rwb))
613 		return;
614 	if (rq == rwb->sync_cookie) {
615 		rwb->sync_issue = 0;
616 		rwb->sync_cookie = NULL;
617 	}
618 }
619 
620 void wbt_set_queue_depth(struct request_queue *q, unsigned int depth)
621 {
622 	struct rq_qos *rqos = wbt_rq_qos(q);
623 	if (rqos) {
624 		RQWB(rqos)->rq_depth.queue_depth = depth;
625 		__wbt_update_limits(RQWB(rqos));
626 	}
627 }
628 
629 void wbt_set_write_cache(struct request_queue *q, bool write_cache_on)
630 {
631 	struct rq_qos *rqos = wbt_rq_qos(q);
632 	if (rqos)
633 		RQWB(rqos)->wc = write_cache_on;
634 }
635 
636 /*
637  * Enable wbt if defaults are configured that way
638  */
639 void wbt_enable_default(struct request_queue *q)
640 {
641 	struct rq_qos *rqos = wbt_rq_qos(q);
642 	/* Throttling already enabled? */
643 	if (rqos)
644 		return;
645 
646 	/* Queue not registered? Maybe shutting down... */
647 	if (!test_bit(QUEUE_FLAG_REGISTERED, &q->queue_flags))
648 		return;
649 
650 	if ((q->mq_ops && IS_ENABLED(CONFIG_BLK_WBT_MQ)) ||
651 	    (q->request_fn && IS_ENABLED(CONFIG_BLK_WBT_SQ)))
652 		wbt_init(q);
653 }
654 EXPORT_SYMBOL_GPL(wbt_enable_default);
655 
656 u64 wbt_default_latency_nsec(struct request_queue *q)
657 {
658 	/*
659 	 * We default to 2msec for non-rotational storage, and 75msec
660 	 * for rotational storage.
661 	 */
662 	if (blk_queue_nonrot(q))
663 		return 2000000ULL;
664 	else
665 		return 75000000ULL;
666 }
667 
668 static int wbt_data_dir(const struct request *rq)
669 {
670 	const int op = req_op(rq);
671 
672 	if (op == REQ_OP_READ)
673 		return READ;
674 	else if (op_is_write(op))
675 		return WRITE;
676 
677 	/* don't account */
678 	return -1;
679 }
680 
681 static void wbt_exit(struct rq_qos *rqos)
682 {
683 	struct rq_wb *rwb = RQWB(rqos);
684 	struct request_queue *q = rqos->q;
685 
686 	blk_stat_remove_callback(q, rwb->cb);
687 	blk_stat_free_callback(rwb->cb);
688 	kfree(rwb);
689 }
690 
691 /*
692  * Disable wbt, if enabled by default.
693  */
694 void wbt_disable_default(struct request_queue *q)
695 {
696 	struct rq_qos *rqos = wbt_rq_qos(q);
697 	struct rq_wb *rwb;
698 	if (!rqos)
699 		return;
700 	rwb = RQWB(rqos);
701 	if (rwb->enable_state == WBT_STATE_ON_DEFAULT)
702 		rwb->wb_normal = 0;
703 }
704 EXPORT_SYMBOL_GPL(wbt_disable_default);
705 
706 
707 static struct rq_qos_ops wbt_rqos_ops = {
708 	.throttle = wbt_wait,
709 	.issue = wbt_issue,
710 	.requeue = wbt_requeue,
711 	.done = wbt_done,
712 	.cleanup = __wbt_done,
713 	.exit = wbt_exit,
714 };
715 
716 int wbt_init(struct request_queue *q)
717 {
718 	struct rq_wb *rwb;
719 	int i;
720 
721 	rwb = kzalloc(sizeof(*rwb), GFP_KERNEL);
722 	if (!rwb)
723 		return -ENOMEM;
724 
725 	rwb->cb = blk_stat_alloc_callback(wb_timer_fn, wbt_data_dir, 2, rwb);
726 	if (!rwb->cb) {
727 		kfree(rwb);
728 		return -ENOMEM;
729 	}
730 
731 	for (i = 0; i < WBT_NUM_RWQ; i++)
732 		rq_wait_init(&rwb->rq_wait[i]);
733 
734 	rwb->rqos.id = RQ_QOS_WBT;
735 	rwb->rqos.ops = &wbt_rqos_ops;
736 	rwb->rqos.q = q;
737 	rwb->last_comp = rwb->last_issue = jiffies;
738 	rwb->win_nsec = RWB_WINDOW_NSEC;
739 	rwb->enable_state = WBT_STATE_ON_DEFAULT;
740 	rwb->wc = 1;
741 	rwb->rq_depth.default_depth = RWB_DEF_DEPTH;
742 	__wbt_update_limits(rwb);
743 
744 	/*
745 	 * Assign rwb and add the stats callback.
746 	 */
747 	rq_qos_add(q, &rwb->rqos);
748 	blk_stat_add_callback(q, rwb->cb);
749 
750 	rwb->min_lat_nsec = wbt_default_latency_nsec(q);
751 
752 	wbt_set_queue_depth(q, blk_queue_depth(q));
753 	wbt_set_write_cache(q, test_bit(QUEUE_FLAG_WC, &q->queue_flags));
754 
755 	return 0;
756 }
757