xref: /openbmc/linux/block/blk-iocost.c (revision 266b2ca7)
1 /* SPDX-License-Identifier: GPL-2.0
2  *
3  * IO cost model based controller.
4  *
5  * Copyright (C) 2019 Tejun Heo <tj@kernel.org>
6  * Copyright (C) 2019 Andy Newell <newella@fb.com>
7  * Copyright (C) 2019 Facebook
8  *
9  * One challenge of controlling IO resources is the lack of trivially
10  * observable cost metric.  This is distinguished from CPU and memory where
11  * wallclock time and the number of bytes can serve as accurate enough
12  * approximations.
13  *
14  * Bandwidth and iops are the most commonly used metrics for IO devices but
15  * depending on the type and specifics of the device, different IO patterns
16  * easily lead to multiple orders of magnitude variations rendering them
17  * useless for the purpose of IO capacity distribution.  While on-device
18  * time, with a lot of clutches, could serve as a useful approximation for
19  * non-queued rotational devices, this is no longer viable with modern
20  * devices, even the rotational ones.
21  *
22  * While there is no cost metric we can trivially observe, it isn't a
23  * complete mystery.  For example, on a rotational device, seek cost
24  * dominates while a contiguous transfer contributes a smaller amount
25  * proportional to the size.  If we can characterize at least the relative
26  * costs of these different types of IOs, it should be possible to
27  * implement a reasonable work-conserving proportional IO resource
28  * distribution.
29  *
30  * 1. IO Cost Model
31  *
32  * IO cost model estimates the cost of an IO given its basic parameters and
33  * history (e.g. the end sector of the last IO).  The cost is measured in
34  * device time.  If a given IO is estimated to cost 10ms, the device should
35  * be able to process ~100 of those IOs in a second.
36  *
37  * Currently, there's only one builtin cost model - linear.  Each IO is
38  * classified as sequential or random and given a base cost accordingly.
39  * On top of that, a size cost proportional to the length of the IO is
40  * added.  While simple, this model captures the operational
41  * characteristics of a wide varienty of devices well enough.  Default
42  * parameters for several different classes of devices are provided and the
43  * parameters can be configured from userspace via
44  * /sys/fs/cgroup/io.cost.model.
45  *
46  * If needed, tools/cgroup/iocost_coef_gen.py can be used to generate
47  * device-specific coefficients.
48  *
49  * 2. Control Strategy
50  *
51  * The device virtual time (vtime) is used as the primary control metric.
52  * The control strategy is composed of the following three parts.
53  *
54  * 2-1. Vtime Distribution
55  *
56  * When a cgroup becomes active in terms of IOs, its hierarchical share is
57  * calculated.  Please consider the following hierarchy where the numbers
58  * inside parentheses denote the configured weights.
59  *
60  *           root
61  *         /       \
62  *      A (w:100)  B (w:300)
63  *      /       \
64  *  A0 (w:100)  A1 (w:100)
65  *
66  * If B is idle and only A0 and A1 are actively issuing IOs, as the two are
67  * of equal weight, each gets 50% share.  If then B starts issuing IOs, B
68  * gets 300/(100+300) or 75% share, and A0 and A1 equally splits the rest,
69  * 12.5% each.  The distribution mechanism only cares about these flattened
70  * shares.  They're called hweights (hierarchical weights) and always add
71  * upto 1 (WEIGHT_ONE).
72  *
73  * A given cgroup's vtime runs slower in inverse proportion to its hweight.
74  * For example, with 12.5% weight, A0's time runs 8 times slower (100/12.5)
75  * against the device vtime - an IO which takes 10ms on the underlying
76  * device is considered to take 80ms on A0.
77  *
78  * This constitutes the basis of IO capacity distribution.  Each cgroup's
79  * vtime is running at a rate determined by its hweight.  A cgroup tracks
80  * the vtime consumed by past IOs and can issue a new IO if doing so
81  * wouldn't outrun the current device vtime.  Otherwise, the IO is
82  * suspended until the vtime has progressed enough to cover it.
83  *
84  * 2-2. Vrate Adjustment
85  *
86  * It's unrealistic to expect the cost model to be perfect.  There are too
87  * many devices and even on the same device the overall performance
88  * fluctuates depending on numerous factors such as IO mixture and device
89  * internal garbage collection.  The controller needs to adapt dynamically.
90  *
91  * This is achieved by adjusting the overall IO rate according to how busy
92  * the device is.  If the device becomes overloaded, we're sending down too
93  * many IOs and should generally slow down.  If there are waiting issuers
94  * but the device isn't saturated, we're issuing too few and should
95  * generally speed up.
96  *
97  * To slow down, we lower the vrate - the rate at which the device vtime
98  * passes compared to the wall clock.  For example, if the vtime is running
99  * at the vrate of 75%, all cgroups added up would only be able to issue
100  * 750ms worth of IOs per second, and vice-versa for speeding up.
101  *
102  * Device business is determined using two criteria - rq wait and
103  * completion latencies.
104  *
105  * When a device gets saturated, the on-device and then the request queues
106  * fill up and a bio which is ready to be issued has to wait for a request
107  * to become available.  When this delay becomes noticeable, it's a clear
108  * indication that the device is saturated and we lower the vrate.  This
109  * saturation signal is fairly conservative as it only triggers when both
110  * hardware and software queues are filled up, and is used as the default
111  * busy signal.
112  *
113  * As devices can have deep queues and be unfair in how the queued commands
114  * are executed, solely depending on rq wait may not result in satisfactory
115  * control quality.  For a better control quality, completion latency QoS
116  * parameters can be configured so that the device is considered saturated
117  * if N'th percentile completion latency rises above the set point.
118  *
119  * The completion latency requirements are a function of both the
120  * underlying device characteristics and the desired IO latency quality of
121  * service.  There is an inherent trade-off - the tighter the latency QoS,
122  * the higher the bandwidth lossage.  Latency QoS is disabled by default
123  * and can be set through /sys/fs/cgroup/io.cost.qos.
124  *
125  * 2-3. Work Conservation
126  *
127  * Imagine two cgroups A and B with equal weights.  A is issuing a small IO
128  * periodically while B is sending out enough parallel IOs to saturate the
129  * device on its own.  Let's say A's usage amounts to 100ms worth of IO
130  * cost per second, i.e., 10% of the device capacity.  The naive
131  * distribution of half and half would lead to 60% utilization of the
132  * device, a significant reduction in the total amount of work done
133  * compared to free-for-all competition.  This is too high a cost to pay
134  * for IO control.
135  *
136  * To conserve the total amount of work done, we keep track of how much
137  * each active cgroup is actually using and yield part of its weight if
138  * there are other cgroups which can make use of it.  In the above case,
139  * A's weight will be lowered so that it hovers above the actual usage and
140  * B would be able to use the rest.
141  *
142  * As we don't want to penalize a cgroup for donating its weight, the
143  * surplus weight adjustment factors in a margin and has an immediate
144  * snapback mechanism in case the cgroup needs more IO vtime for itself.
145  *
146  * Note that adjusting down surplus weights has the same effects as
147  * accelerating vtime for other cgroups and work conservation can also be
148  * implemented by adjusting vrate dynamically.  However, squaring who can
149  * donate and should take back how much requires hweight propagations
150  * anyway making it easier to implement and understand as a separate
151  * mechanism.
152  *
153  * 3. Monitoring
154  *
155  * Instead of debugfs or other clumsy monitoring mechanisms, this
156  * controller uses a drgn based monitoring script -
157  * tools/cgroup/iocost_monitor.py.  For details on drgn, please see
158  * https://github.com/osandov/drgn.  The output looks like the following.
159  *
160  *  sdb RUN   per=300ms cur_per=234.218:v203.695 busy= +1 vrate= 62.12%
161  *                 active      weight      hweight% inflt% dbt  delay usages%
162  *  test/a              *    50/   50  33.33/ 33.33  27.65   2  0*041 033:033:033
163  *  test/b              *   100/  100  66.67/ 66.67  17.56   0  0*000 066:079:077
164  *
165  * - per	: Timer period
166  * - cur_per	: Internal wall and device vtime clock
167  * - vrate	: Device virtual time rate against wall clock
168  * - weight	: Surplus-adjusted and configured weights
169  * - hweight	: Surplus-adjusted and configured hierarchical weights
170  * - inflt	: The percentage of in-flight IO cost at the end of last period
171  * - del_ms	: Deferred issuer delay induction level and duration
172  * - usages	: Usage history
173  */
174 
175 #include <linux/kernel.h>
176 #include <linux/module.h>
177 #include <linux/timer.h>
178 #include <linux/time64.h>
179 #include <linux/parser.h>
180 #include <linux/sched/signal.h>
181 #include <asm/local.h>
182 #include <asm/local64.h>
183 #include "blk-rq-qos.h"
184 #include "blk-stat.h"
185 #include "blk-wbt.h"
186 #include "blk-cgroup.h"
187 
188 #ifdef CONFIG_TRACEPOINTS
189 
190 /* copied from TRACE_CGROUP_PATH, see cgroup-internal.h */
191 #define TRACE_IOCG_PATH_LEN 1024
192 static DEFINE_SPINLOCK(trace_iocg_path_lock);
193 static char trace_iocg_path[TRACE_IOCG_PATH_LEN];
194 
195 #define TRACE_IOCG_PATH(type, iocg, ...)					\
196 	do {									\
197 		unsigned long flags;						\
198 		if (trace_iocost_##type##_enabled()) {				\
199 			spin_lock_irqsave(&trace_iocg_path_lock, flags);	\
200 			cgroup_path(iocg_to_blkg(iocg)->blkcg->css.cgroup,	\
201 				    trace_iocg_path, TRACE_IOCG_PATH_LEN);	\
202 			trace_iocost_##type(iocg, trace_iocg_path,		\
203 					      ##__VA_ARGS__);			\
204 			spin_unlock_irqrestore(&trace_iocg_path_lock, flags);	\
205 		}								\
206 	} while (0)
207 
208 #else	/* CONFIG_TRACE_POINTS */
209 #define TRACE_IOCG_PATH(type, iocg, ...)	do { } while (0)
210 #endif	/* CONFIG_TRACE_POINTS */
211 
212 enum {
213 	MILLION			= 1000000,
214 
215 	/* timer period is calculated from latency requirements, bound it */
216 	MIN_PERIOD		= USEC_PER_MSEC,
217 	MAX_PERIOD		= USEC_PER_SEC,
218 
219 	/*
220 	 * iocg->vtime is targeted at 50% behind the device vtime, which
221 	 * serves as its IO credit buffer.  Surplus weight adjustment is
222 	 * immediately canceled if the vtime margin runs below 10%.
223 	 */
224 	MARGIN_MIN_PCT		= 10,
225 	MARGIN_LOW_PCT		= 20,
226 	MARGIN_TARGET_PCT	= 50,
227 
228 	INUSE_ADJ_STEP_PCT	= 25,
229 
230 	/* Have some play in timer operations */
231 	TIMER_SLACK_PCT		= 1,
232 
233 	/* 1/64k is granular enough and can easily be handled w/ u32 */
234 	WEIGHT_ONE		= 1 << 16,
235 
236 	/*
237 	 * As vtime is used to calculate the cost of each IO, it needs to
238 	 * be fairly high precision.  For example, it should be able to
239 	 * represent the cost of a single page worth of discard with
240 	 * suffificient accuracy.  At the same time, it should be able to
241 	 * represent reasonably long enough durations to be useful and
242 	 * convenient during operation.
243 	 *
244 	 * 1s worth of vtime is 2^37.  This gives us both sub-nanosecond
245 	 * granularity and days of wrap-around time even at extreme vrates.
246 	 */
247 	VTIME_PER_SEC_SHIFT	= 37,
248 	VTIME_PER_SEC		= 1LLU << VTIME_PER_SEC_SHIFT,
249 	VTIME_PER_USEC		= VTIME_PER_SEC / USEC_PER_SEC,
250 	VTIME_PER_NSEC		= VTIME_PER_SEC / NSEC_PER_SEC,
251 
252 	/* bound vrate adjustments within two orders of magnitude */
253 	VRATE_MIN_PPM		= 10000,	/* 1% */
254 	VRATE_MAX_PPM		= 100000000,	/* 10000% */
255 
256 	VRATE_MIN		= VTIME_PER_USEC * VRATE_MIN_PPM / MILLION,
257 	VRATE_CLAMP_ADJ_PCT	= 4,
258 
259 	/* if IOs end up waiting for requests, issue less */
260 	RQ_WAIT_BUSY_PCT	= 5,
261 
262 	/* unbusy hysterisis */
263 	UNBUSY_THR_PCT		= 75,
264 
265 	/*
266 	 * The effect of delay is indirect and non-linear and a huge amount of
267 	 * future debt can accumulate abruptly while unthrottled. Linearly scale
268 	 * up delay as debt is going up and then let it decay exponentially.
269 	 * This gives us quick ramp ups while delay is accumulating and long
270 	 * tails which can help reducing the frequency of debt explosions on
271 	 * unthrottle. The parameters are experimentally determined.
272 	 *
273 	 * The delay mechanism provides adequate protection and behavior in many
274 	 * cases. However, this is far from ideal and falls shorts on both
275 	 * fronts. The debtors are often throttled too harshly costing a
276 	 * significant level of fairness and possibly total work while the
277 	 * protection against their impacts on the system can be choppy and
278 	 * unreliable.
279 	 *
280 	 * The shortcoming primarily stems from the fact that, unlike for page
281 	 * cache, the kernel doesn't have well-defined back-pressure propagation
282 	 * mechanism and policies for anonymous memory. Fully addressing this
283 	 * issue will likely require substantial improvements in the area.
284 	 */
285 	MIN_DELAY_THR_PCT	= 500,
286 	MAX_DELAY_THR_PCT	= 25000,
287 	MIN_DELAY		= 250,
288 	MAX_DELAY		= 250 * USEC_PER_MSEC,
289 
290 	/* halve debts if avg usage over 100ms is under 50% */
291 	DFGV_USAGE_PCT		= 50,
292 	DFGV_PERIOD		= 100 * USEC_PER_MSEC,
293 
294 	/* don't let cmds which take a very long time pin lagging for too long */
295 	MAX_LAGGING_PERIODS	= 10,
296 
297 	/* switch iff the conditions are met for longer than this */
298 	AUTOP_CYCLE_NSEC	= 10LLU * NSEC_PER_SEC,
299 
300 	/*
301 	 * Count IO size in 4k pages.  The 12bit shift helps keeping
302 	 * size-proportional components of cost calculation in closer
303 	 * numbers of digits to per-IO cost components.
304 	 */
305 	IOC_PAGE_SHIFT		= 12,
306 	IOC_PAGE_SIZE		= 1 << IOC_PAGE_SHIFT,
307 	IOC_SECT_TO_PAGE_SHIFT	= IOC_PAGE_SHIFT - SECTOR_SHIFT,
308 
309 	/* if apart further than 16M, consider randio for linear model */
310 	LCOEF_RANDIO_PAGES	= 4096,
311 };
312 
313 enum ioc_running {
314 	IOC_IDLE,
315 	IOC_RUNNING,
316 	IOC_STOP,
317 };
318 
319 /* io.cost.qos controls including per-dev enable of the whole controller */
320 enum {
321 	QOS_ENABLE,
322 	QOS_CTRL,
323 	NR_QOS_CTRL_PARAMS,
324 };
325 
326 /* io.cost.qos params */
327 enum {
328 	QOS_RPPM,
329 	QOS_RLAT,
330 	QOS_WPPM,
331 	QOS_WLAT,
332 	QOS_MIN,
333 	QOS_MAX,
334 	NR_QOS_PARAMS,
335 };
336 
337 /* io.cost.model controls */
338 enum {
339 	COST_CTRL,
340 	COST_MODEL,
341 	NR_COST_CTRL_PARAMS,
342 };
343 
344 /* builtin linear cost model coefficients */
345 enum {
346 	I_LCOEF_RBPS,
347 	I_LCOEF_RSEQIOPS,
348 	I_LCOEF_RRANDIOPS,
349 	I_LCOEF_WBPS,
350 	I_LCOEF_WSEQIOPS,
351 	I_LCOEF_WRANDIOPS,
352 	NR_I_LCOEFS,
353 };
354 
355 enum {
356 	LCOEF_RPAGE,
357 	LCOEF_RSEQIO,
358 	LCOEF_RRANDIO,
359 	LCOEF_WPAGE,
360 	LCOEF_WSEQIO,
361 	LCOEF_WRANDIO,
362 	NR_LCOEFS,
363 };
364 
365 enum {
366 	AUTOP_INVALID,
367 	AUTOP_HDD,
368 	AUTOP_SSD_QD1,
369 	AUTOP_SSD_DFL,
370 	AUTOP_SSD_FAST,
371 };
372 
373 struct ioc_params {
374 	u32				qos[NR_QOS_PARAMS];
375 	u64				i_lcoefs[NR_I_LCOEFS];
376 	u64				lcoefs[NR_LCOEFS];
377 	u32				too_fast_vrate_pct;
378 	u32				too_slow_vrate_pct;
379 };
380 
381 struct ioc_margins {
382 	s64				min;
383 	s64				low;
384 	s64				target;
385 };
386 
387 struct ioc_missed {
388 	local_t				nr_met;
389 	local_t				nr_missed;
390 	u32				last_met;
391 	u32				last_missed;
392 };
393 
394 struct ioc_pcpu_stat {
395 	struct ioc_missed		missed[2];
396 
397 	local64_t			rq_wait_ns;
398 	u64				last_rq_wait_ns;
399 };
400 
401 /* per device */
402 struct ioc {
403 	struct rq_qos			rqos;
404 
405 	bool				enabled;
406 
407 	struct ioc_params		params;
408 	struct ioc_margins		margins;
409 	u32				period_us;
410 	u32				timer_slack_ns;
411 	u64				vrate_min;
412 	u64				vrate_max;
413 
414 	spinlock_t			lock;
415 	struct timer_list		timer;
416 	struct list_head		active_iocgs;	/* active cgroups */
417 	struct ioc_pcpu_stat __percpu	*pcpu_stat;
418 
419 	enum ioc_running		running;
420 	atomic64_t			vtime_rate;
421 	u64				vtime_base_rate;
422 	s64				vtime_err;
423 
424 	seqcount_spinlock_t		period_seqcount;
425 	u64				period_at;	/* wallclock starttime */
426 	u64				period_at_vtime; /* vtime starttime */
427 
428 	atomic64_t			cur_period;	/* inc'd each period */
429 	int				busy_level;	/* saturation history */
430 
431 	bool				weights_updated;
432 	atomic_t			hweight_gen;	/* for lazy hweights */
433 
434 	/* debt forgivness */
435 	u64				dfgv_period_at;
436 	u64				dfgv_period_rem;
437 	u64				dfgv_usage_us_sum;
438 
439 	u64				autop_too_fast_at;
440 	u64				autop_too_slow_at;
441 	int				autop_idx;
442 	bool				user_qos_params:1;
443 	bool				user_cost_model:1;
444 };
445 
446 struct iocg_pcpu_stat {
447 	local64_t			abs_vusage;
448 };
449 
450 struct iocg_stat {
451 	u64				usage_us;
452 	u64				wait_us;
453 	u64				indebt_us;
454 	u64				indelay_us;
455 };
456 
457 /* per device-cgroup pair */
458 struct ioc_gq {
459 	struct blkg_policy_data		pd;
460 	struct ioc			*ioc;
461 
462 	/*
463 	 * A iocg can get its weight from two sources - an explicit
464 	 * per-device-cgroup configuration or the default weight of the
465 	 * cgroup.  `cfg_weight` is the explicit per-device-cgroup
466 	 * configuration.  `weight` is the effective considering both
467 	 * sources.
468 	 *
469 	 * When an idle cgroup becomes active its `active` goes from 0 to
470 	 * `weight`.  `inuse` is the surplus adjusted active weight.
471 	 * `active` and `inuse` are used to calculate `hweight_active` and
472 	 * `hweight_inuse`.
473 	 *
474 	 * `last_inuse` remembers `inuse` while an iocg is idle to persist
475 	 * surplus adjustments.
476 	 *
477 	 * `inuse` may be adjusted dynamically during period. `saved_*` are used
478 	 * to determine and track adjustments.
479 	 */
480 	u32				cfg_weight;
481 	u32				weight;
482 	u32				active;
483 	u32				inuse;
484 
485 	u32				last_inuse;
486 	s64				saved_margin;
487 
488 	sector_t			cursor;		/* to detect randio */
489 
490 	/*
491 	 * `vtime` is this iocg's vtime cursor which progresses as IOs are
492 	 * issued.  If lagging behind device vtime, the delta represents
493 	 * the currently available IO budget.  If running ahead, the
494 	 * overage.
495 	 *
496 	 * `vtime_done` is the same but progressed on completion rather
497 	 * than issue.  The delta behind `vtime` represents the cost of
498 	 * currently in-flight IOs.
499 	 */
500 	atomic64_t			vtime;
501 	atomic64_t			done_vtime;
502 	u64				abs_vdebt;
503 
504 	/* current delay in effect and when it started */
505 	u64				delay;
506 	u64				delay_at;
507 
508 	/*
509 	 * The period this iocg was last active in.  Used for deactivation
510 	 * and invalidating `vtime`.
511 	 */
512 	atomic64_t			active_period;
513 	struct list_head		active_list;
514 
515 	/* see __propagate_weights() and current_hweight() for details */
516 	u64				child_active_sum;
517 	u64				child_inuse_sum;
518 	u64				child_adjusted_sum;
519 	int				hweight_gen;
520 	u32				hweight_active;
521 	u32				hweight_inuse;
522 	u32				hweight_donating;
523 	u32				hweight_after_donation;
524 
525 	struct list_head		walk_list;
526 	struct list_head		surplus_list;
527 
528 	struct wait_queue_head		waitq;
529 	struct hrtimer			waitq_timer;
530 
531 	/* timestamp at the latest activation */
532 	u64				activated_at;
533 
534 	/* statistics */
535 	struct iocg_pcpu_stat __percpu	*pcpu_stat;
536 	struct iocg_stat		stat;
537 	struct iocg_stat		last_stat;
538 	u64				last_stat_abs_vusage;
539 	u64				usage_delta_us;
540 	u64				wait_since;
541 	u64				indebt_since;
542 	u64				indelay_since;
543 
544 	/* this iocg's depth in the hierarchy and ancestors including self */
545 	int				level;
546 	struct ioc_gq			*ancestors[];
547 };
548 
549 /* per cgroup */
550 struct ioc_cgrp {
551 	struct blkcg_policy_data	cpd;
552 	unsigned int			dfl_weight;
553 };
554 
555 struct ioc_now {
556 	u64				now_ns;
557 	u64				now;
558 	u64				vnow;
559 };
560 
561 struct iocg_wait {
562 	struct wait_queue_entry		wait;
563 	struct bio			*bio;
564 	u64				abs_cost;
565 	bool				committed;
566 };
567 
568 struct iocg_wake_ctx {
569 	struct ioc_gq			*iocg;
570 	u32				hw_inuse;
571 	s64				vbudget;
572 };
573 
574 static const struct ioc_params autop[] = {
575 	[AUTOP_HDD] = {
576 		.qos				= {
577 			[QOS_RLAT]		=        250000, /* 250ms */
578 			[QOS_WLAT]		=        250000,
579 			[QOS_MIN]		= VRATE_MIN_PPM,
580 			[QOS_MAX]		= VRATE_MAX_PPM,
581 		},
582 		.i_lcoefs			= {
583 			[I_LCOEF_RBPS]		=     174019176,
584 			[I_LCOEF_RSEQIOPS]	=         41708,
585 			[I_LCOEF_RRANDIOPS]	=           370,
586 			[I_LCOEF_WBPS]		=     178075866,
587 			[I_LCOEF_WSEQIOPS]	=         42705,
588 			[I_LCOEF_WRANDIOPS]	=           378,
589 		},
590 	},
591 	[AUTOP_SSD_QD1] = {
592 		.qos				= {
593 			[QOS_RLAT]		=         25000, /* 25ms */
594 			[QOS_WLAT]		=         25000,
595 			[QOS_MIN]		= VRATE_MIN_PPM,
596 			[QOS_MAX]		= VRATE_MAX_PPM,
597 		},
598 		.i_lcoefs			= {
599 			[I_LCOEF_RBPS]		=     245855193,
600 			[I_LCOEF_RSEQIOPS]	=         61575,
601 			[I_LCOEF_RRANDIOPS]	=          6946,
602 			[I_LCOEF_WBPS]		=     141365009,
603 			[I_LCOEF_WSEQIOPS]	=         33716,
604 			[I_LCOEF_WRANDIOPS]	=         26796,
605 		},
606 	},
607 	[AUTOP_SSD_DFL] = {
608 		.qos				= {
609 			[QOS_RLAT]		=         25000, /* 25ms */
610 			[QOS_WLAT]		=         25000,
611 			[QOS_MIN]		= VRATE_MIN_PPM,
612 			[QOS_MAX]		= VRATE_MAX_PPM,
613 		},
614 		.i_lcoefs			= {
615 			[I_LCOEF_RBPS]		=     488636629,
616 			[I_LCOEF_RSEQIOPS]	=          8932,
617 			[I_LCOEF_RRANDIOPS]	=          8518,
618 			[I_LCOEF_WBPS]		=     427891549,
619 			[I_LCOEF_WSEQIOPS]	=         28755,
620 			[I_LCOEF_WRANDIOPS]	=         21940,
621 		},
622 		.too_fast_vrate_pct		=           500,
623 	},
624 	[AUTOP_SSD_FAST] = {
625 		.qos				= {
626 			[QOS_RLAT]		=          5000, /* 5ms */
627 			[QOS_WLAT]		=          5000,
628 			[QOS_MIN]		= VRATE_MIN_PPM,
629 			[QOS_MAX]		= VRATE_MAX_PPM,
630 		},
631 		.i_lcoefs			= {
632 			[I_LCOEF_RBPS]		=    3102524156LLU,
633 			[I_LCOEF_RSEQIOPS]	=        724816,
634 			[I_LCOEF_RRANDIOPS]	=        778122,
635 			[I_LCOEF_WBPS]		=    1742780862LLU,
636 			[I_LCOEF_WSEQIOPS]	=        425702,
637 			[I_LCOEF_WRANDIOPS]	=	 443193,
638 		},
639 		.too_slow_vrate_pct		=            10,
640 	},
641 };
642 
643 /*
644  * vrate adjust percentages indexed by ioc->busy_level.  We adjust up on
645  * vtime credit shortage and down on device saturation.
646  */
647 static u32 vrate_adj_pct[] =
648 	{ 0, 0, 0, 0,
649 	  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
650 	  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
651 	  4, 4, 4, 4, 4, 4, 4, 4, 8, 8, 8, 8, 8, 8, 8, 8, 16 };
652 
653 static struct blkcg_policy blkcg_policy_iocost;
654 
655 /* accessors and helpers */
656 static struct ioc *rqos_to_ioc(struct rq_qos *rqos)
657 {
658 	return container_of(rqos, struct ioc, rqos);
659 }
660 
661 static struct ioc *q_to_ioc(struct request_queue *q)
662 {
663 	return rqos_to_ioc(rq_qos_id(q, RQ_QOS_COST));
664 }
665 
666 static const char __maybe_unused *ioc_name(struct ioc *ioc)
667 {
668 	struct gendisk *disk = ioc->rqos.q->disk;
669 
670 	if (!disk)
671 		return "<unknown>";
672 	return disk->disk_name;
673 }
674 
675 static struct ioc_gq *pd_to_iocg(struct blkg_policy_data *pd)
676 {
677 	return pd ? container_of(pd, struct ioc_gq, pd) : NULL;
678 }
679 
680 static struct ioc_gq *blkg_to_iocg(struct blkcg_gq *blkg)
681 {
682 	return pd_to_iocg(blkg_to_pd(blkg, &blkcg_policy_iocost));
683 }
684 
685 static struct blkcg_gq *iocg_to_blkg(struct ioc_gq *iocg)
686 {
687 	return pd_to_blkg(&iocg->pd);
688 }
689 
690 static struct ioc_cgrp *blkcg_to_iocc(struct blkcg *blkcg)
691 {
692 	return container_of(blkcg_to_cpd(blkcg, &blkcg_policy_iocost),
693 			    struct ioc_cgrp, cpd);
694 }
695 
696 /*
697  * Scale @abs_cost to the inverse of @hw_inuse.  The lower the hierarchical
698  * weight, the more expensive each IO.  Must round up.
699  */
700 static u64 abs_cost_to_cost(u64 abs_cost, u32 hw_inuse)
701 {
702 	return DIV64_U64_ROUND_UP(abs_cost * WEIGHT_ONE, hw_inuse);
703 }
704 
705 /*
706  * The inverse of abs_cost_to_cost().  Must round up.
707  */
708 static u64 cost_to_abs_cost(u64 cost, u32 hw_inuse)
709 {
710 	return DIV64_U64_ROUND_UP(cost * hw_inuse, WEIGHT_ONE);
711 }
712 
713 static void iocg_commit_bio(struct ioc_gq *iocg, struct bio *bio,
714 			    u64 abs_cost, u64 cost)
715 {
716 	struct iocg_pcpu_stat *gcs;
717 
718 	bio->bi_iocost_cost = cost;
719 	atomic64_add(cost, &iocg->vtime);
720 
721 	gcs = get_cpu_ptr(iocg->pcpu_stat);
722 	local64_add(abs_cost, &gcs->abs_vusage);
723 	put_cpu_ptr(gcs);
724 }
725 
726 static void iocg_lock(struct ioc_gq *iocg, bool lock_ioc, unsigned long *flags)
727 {
728 	if (lock_ioc) {
729 		spin_lock_irqsave(&iocg->ioc->lock, *flags);
730 		spin_lock(&iocg->waitq.lock);
731 	} else {
732 		spin_lock_irqsave(&iocg->waitq.lock, *flags);
733 	}
734 }
735 
736 static void iocg_unlock(struct ioc_gq *iocg, bool unlock_ioc, unsigned long *flags)
737 {
738 	if (unlock_ioc) {
739 		spin_unlock(&iocg->waitq.lock);
740 		spin_unlock_irqrestore(&iocg->ioc->lock, *flags);
741 	} else {
742 		spin_unlock_irqrestore(&iocg->waitq.lock, *flags);
743 	}
744 }
745 
746 #define CREATE_TRACE_POINTS
747 #include <trace/events/iocost.h>
748 
749 static void ioc_refresh_margins(struct ioc *ioc)
750 {
751 	struct ioc_margins *margins = &ioc->margins;
752 	u32 period_us = ioc->period_us;
753 	u64 vrate = ioc->vtime_base_rate;
754 
755 	margins->min = (period_us * MARGIN_MIN_PCT / 100) * vrate;
756 	margins->low = (period_us * MARGIN_LOW_PCT / 100) * vrate;
757 	margins->target = (period_us * MARGIN_TARGET_PCT / 100) * vrate;
758 }
759 
760 /* latency Qos params changed, update period_us and all the dependent params */
761 static void ioc_refresh_period_us(struct ioc *ioc)
762 {
763 	u32 ppm, lat, multi, period_us;
764 
765 	lockdep_assert_held(&ioc->lock);
766 
767 	/* pick the higher latency target */
768 	if (ioc->params.qos[QOS_RLAT] >= ioc->params.qos[QOS_WLAT]) {
769 		ppm = ioc->params.qos[QOS_RPPM];
770 		lat = ioc->params.qos[QOS_RLAT];
771 	} else {
772 		ppm = ioc->params.qos[QOS_WPPM];
773 		lat = ioc->params.qos[QOS_WLAT];
774 	}
775 
776 	/*
777 	 * We want the period to be long enough to contain a healthy number
778 	 * of IOs while short enough for granular control.  Define it as a
779 	 * multiple of the latency target.  Ideally, the multiplier should
780 	 * be scaled according to the percentile so that it would nominally
781 	 * contain a certain number of requests.  Let's be simpler and
782 	 * scale it linearly so that it's 2x >= pct(90) and 10x at pct(50).
783 	 */
784 	if (ppm)
785 		multi = max_t(u32, (MILLION - ppm) / 50000, 2);
786 	else
787 		multi = 2;
788 	period_us = multi * lat;
789 	period_us = clamp_t(u32, period_us, MIN_PERIOD, MAX_PERIOD);
790 
791 	/* calculate dependent params */
792 	ioc->period_us = period_us;
793 	ioc->timer_slack_ns = div64_u64(
794 		(u64)period_us * NSEC_PER_USEC * TIMER_SLACK_PCT,
795 		100);
796 	ioc_refresh_margins(ioc);
797 }
798 
799 static int ioc_autop_idx(struct ioc *ioc)
800 {
801 	int idx = ioc->autop_idx;
802 	const struct ioc_params *p = &autop[idx];
803 	u32 vrate_pct;
804 	u64 now_ns;
805 
806 	/* rotational? */
807 	if (!blk_queue_nonrot(ioc->rqos.q))
808 		return AUTOP_HDD;
809 
810 	/* handle SATA SSDs w/ broken NCQ */
811 	if (blk_queue_depth(ioc->rqos.q) == 1)
812 		return AUTOP_SSD_QD1;
813 
814 	/* use one of the normal ssd sets */
815 	if (idx < AUTOP_SSD_DFL)
816 		return AUTOP_SSD_DFL;
817 
818 	/* if user is overriding anything, maintain what was there */
819 	if (ioc->user_qos_params || ioc->user_cost_model)
820 		return idx;
821 
822 	/* step up/down based on the vrate */
823 	vrate_pct = div64_u64(ioc->vtime_base_rate * 100, VTIME_PER_USEC);
824 	now_ns = ktime_get_ns();
825 
826 	if (p->too_fast_vrate_pct && p->too_fast_vrate_pct <= vrate_pct) {
827 		if (!ioc->autop_too_fast_at)
828 			ioc->autop_too_fast_at = now_ns;
829 		if (now_ns - ioc->autop_too_fast_at >= AUTOP_CYCLE_NSEC)
830 			return idx + 1;
831 	} else {
832 		ioc->autop_too_fast_at = 0;
833 	}
834 
835 	if (p->too_slow_vrate_pct && p->too_slow_vrate_pct >= vrate_pct) {
836 		if (!ioc->autop_too_slow_at)
837 			ioc->autop_too_slow_at = now_ns;
838 		if (now_ns - ioc->autop_too_slow_at >= AUTOP_CYCLE_NSEC)
839 			return idx - 1;
840 	} else {
841 		ioc->autop_too_slow_at = 0;
842 	}
843 
844 	return idx;
845 }
846 
847 /*
848  * Take the followings as input
849  *
850  *  @bps	maximum sequential throughput
851  *  @seqiops	maximum sequential 4k iops
852  *  @randiops	maximum random 4k iops
853  *
854  * and calculate the linear model cost coefficients.
855  *
856  *  *@page	per-page cost		1s / (@bps / 4096)
857  *  *@seqio	base cost of a seq IO	max((1s / @seqiops) - *@page, 0)
858  *  @randiops	base cost of a rand IO	max((1s / @randiops) - *@page, 0)
859  */
860 static void calc_lcoefs(u64 bps, u64 seqiops, u64 randiops,
861 			u64 *page, u64 *seqio, u64 *randio)
862 {
863 	u64 v;
864 
865 	*page = *seqio = *randio = 0;
866 
867 	if (bps)
868 		*page = DIV64_U64_ROUND_UP(VTIME_PER_SEC,
869 					   DIV_ROUND_UP_ULL(bps, IOC_PAGE_SIZE));
870 
871 	if (seqiops) {
872 		v = DIV64_U64_ROUND_UP(VTIME_PER_SEC, seqiops);
873 		if (v > *page)
874 			*seqio = v - *page;
875 	}
876 
877 	if (randiops) {
878 		v = DIV64_U64_ROUND_UP(VTIME_PER_SEC, randiops);
879 		if (v > *page)
880 			*randio = v - *page;
881 	}
882 }
883 
884 static void ioc_refresh_lcoefs(struct ioc *ioc)
885 {
886 	u64 *u = ioc->params.i_lcoefs;
887 	u64 *c = ioc->params.lcoefs;
888 
889 	calc_lcoefs(u[I_LCOEF_RBPS], u[I_LCOEF_RSEQIOPS], u[I_LCOEF_RRANDIOPS],
890 		    &c[LCOEF_RPAGE], &c[LCOEF_RSEQIO], &c[LCOEF_RRANDIO]);
891 	calc_lcoefs(u[I_LCOEF_WBPS], u[I_LCOEF_WSEQIOPS], u[I_LCOEF_WRANDIOPS],
892 		    &c[LCOEF_WPAGE], &c[LCOEF_WSEQIO], &c[LCOEF_WRANDIO]);
893 }
894 
895 static bool ioc_refresh_params(struct ioc *ioc, bool force)
896 {
897 	const struct ioc_params *p;
898 	int idx;
899 
900 	lockdep_assert_held(&ioc->lock);
901 
902 	idx = ioc_autop_idx(ioc);
903 	p = &autop[idx];
904 
905 	if (idx == ioc->autop_idx && !force)
906 		return false;
907 
908 	if (idx != ioc->autop_idx) {
909 		atomic64_set(&ioc->vtime_rate, VTIME_PER_USEC);
910 		ioc->vtime_base_rate = VTIME_PER_USEC;
911 	}
912 
913 	ioc->autop_idx = idx;
914 	ioc->autop_too_fast_at = 0;
915 	ioc->autop_too_slow_at = 0;
916 
917 	if (!ioc->user_qos_params)
918 		memcpy(ioc->params.qos, p->qos, sizeof(p->qos));
919 	if (!ioc->user_cost_model)
920 		memcpy(ioc->params.i_lcoefs, p->i_lcoefs, sizeof(p->i_lcoefs));
921 
922 	ioc_refresh_period_us(ioc);
923 	ioc_refresh_lcoefs(ioc);
924 
925 	ioc->vrate_min = DIV64_U64_ROUND_UP((u64)ioc->params.qos[QOS_MIN] *
926 					    VTIME_PER_USEC, MILLION);
927 	ioc->vrate_max = div64_u64((u64)ioc->params.qos[QOS_MAX] *
928 				   VTIME_PER_USEC, MILLION);
929 
930 	return true;
931 }
932 
933 /*
934  * When an iocg accumulates too much vtime or gets deactivated, we throw away
935  * some vtime, which lowers the overall device utilization. As the exact amount
936  * which is being thrown away is known, we can compensate by accelerating the
937  * vrate accordingly so that the extra vtime generated in the current period
938  * matches what got lost.
939  */
940 static void ioc_refresh_vrate(struct ioc *ioc, struct ioc_now *now)
941 {
942 	s64 pleft = ioc->period_at + ioc->period_us - now->now;
943 	s64 vperiod = ioc->period_us * ioc->vtime_base_rate;
944 	s64 vcomp, vcomp_min, vcomp_max;
945 
946 	lockdep_assert_held(&ioc->lock);
947 
948 	/* we need some time left in this period */
949 	if (pleft <= 0)
950 		goto done;
951 
952 	/*
953 	 * Calculate how much vrate should be adjusted to offset the error.
954 	 * Limit the amount of adjustment and deduct the adjusted amount from
955 	 * the error.
956 	 */
957 	vcomp = -div64_s64(ioc->vtime_err, pleft);
958 	vcomp_min = -(ioc->vtime_base_rate >> 1);
959 	vcomp_max = ioc->vtime_base_rate;
960 	vcomp = clamp(vcomp, vcomp_min, vcomp_max);
961 
962 	ioc->vtime_err += vcomp * pleft;
963 
964 	atomic64_set(&ioc->vtime_rate, ioc->vtime_base_rate + vcomp);
965 done:
966 	/* bound how much error can accumulate */
967 	ioc->vtime_err = clamp(ioc->vtime_err, -vperiod, vperiod);
968 }
969 
970 static void ioc_adjust_base_vrate(struct ioc *ioc, u32 rq_wait_pct,
971 				  int nr_lagging, int nr_shortages,
972 				  int prev_busy_level, u32 *missed_ppm)
973 {
974 	u64 vrate = ioc->vtime_base_rate;
975 	u64 vrate_min = ioc->vrate_min, vrate_max = ioc->vrate_max;
976 
977 	if (!ioc->busy_level || (ioc->busy_level < 0 && nr_lagging)) {
978 		if (ioc->busy_level != prev_busy_level || nr_lagging)
979 			trace_iocost_ioc_vrate_adj(ioc, vrate,
980 						   missed_ppm, rq_wait_pct,
981 						   nr_lagging, nr_shortages);
982 
983 		return;
984 	}
985 
986 	/*
987 	 * If vrate is out of bounds, apply clamp gradually as the
988 	 * bounds can change abruptly.  Otherwise, apply busy_level
989 	 * based adjustment.
990 	 */
991 	if (vrate < vrate_min) {
992 		vrate = div64_u64(vrate * (100 + VRATE_CLAMP_ADJ_PCT), 100);
993 		vrate = min(vrate, vrate_min);
994 	} else if (vrate > vrate_max) {
995 		vrate = div64_u64(vrate * (100 - VRATE_CLAMP_ADJ_PCT), 100);
996 		vrate = max(vrate, vrate_max);
997 	} else {
998 		int idx = min_t(int, abs(ioc->busy_level),
999 				ARRAY_SIZE(vrate_adj_pct) - 1);
1000 		u32 adj_pct = vrate_adj_pct[idx];
1001 
1002 		if (ioc->busy_level > 0)
1003 			adj_pct = 100 - adj_pct;
1004 		else
1005 			adj_pct = 100 + adj_pct;
1006 
1007 		vrate = clamp(DIV64_U64_ROUND_UP(vrate * adj_pct, 100),
1008 			      vrate_min, vrate_max);
1009 	}
1010 
1011 	trace_iocost_ioc_vrate_adj(ioc, vrate, missed_ppm, rq_wait_pct,
1012 				   nr_lagging, nr_shortages);
1013 
1014 	ioc->vtime_base_rate = vrate;
1015 	ioc_refresh_margins(ioc);
1016 }
1017 
1018 /* take a snapshot of the current [v]time and vrate */
1019 static void ioc_now(struct ioc *ioc, struct ioc_now *now)
1020 {
1021 	unsigned seq;
1022 	u64 vrate;
1023 
1024 	now->now_ns = ktime_get();
1025 	now->now = ktime_to_us(now->now_ns);
1026 	vrate = atomic64_read(&ioc->vtime_rate);
1027 
1028 	/*
1029 	 * The current vtime is
1030 	 *
1031 	 *   vtime at period start + (wallclock time since the start) * vrate
1032 	 *
1033 	 * As a consistent snapshot of `period_at_vtime` and `period_at` is
1034 	 * needed, they're seqcount protected.
1035 	 */
1036 	do {
1037 		seq = read_seqcount_begin(&ioc->period_seqcount);
1038 		now->vnow = ioc->period_at_vtime +
1039 			(now->now - ioc->period_at) * vrate;
1040 	} while (read_seqcount_retry(&ioc->period_seqcount, seq));
1041 }
1042 
1043 static void ioc_start_period(struct ioc *ioc, struct ioc_now *now)
1044 {
1045 	WARN_ON_ONCE(ioc->running != IOC_RUNNING);
1046 
1047 	write_seqcount_begin(&ioc->period_seqcount);
1048 	ioc->period_at = now->now;
1049 	ioc->period_at_vtime = now->vnow;
1050 	write_seqcount_end(&ioc->period_seqcount);
1051 
1052 	ioc->timer.expires = jiffies + usecs_to_jiffies(ioc->period_us);
1053 	add_timer(&ioc->timer);
1054 }
1055 
1056 /*
1057  * Update @iocg's `active` and `inuse` to @active and @inuse, update level
1058  * weight sums and propagate upwards accordingly. If @save, the current margin
1059  * is saved to be used as reference for later inuse in-period adjustments.
1060  */
1061 static void __propagate_weights(struct ioc_gq *iocg, u32 active, u32 inuse,
1062 				bool save, struct ioc_now *now)
1063 {
1064 	struct ioc *ioc = iocg->ioc;
1065 	int lvl;
1066 
1067 	lockdep_assert_held(&ioc->lock);
1068 
1069 	/*
1070 	 * For an active leaf node, its inuse shouldn't be zero or exceed
1071 	 * @active. An active internal node's inuse is solely determined by the
1072 	 * inuse to active ratio of its children regardless of @inuse.
1073 	 */
1074 	if (list_empty(&iocg->active_list) && iocg->child_active_sum) {
1075 		inuse = DIV64_U64_ROUND_UP(active * iocg->child_inuse_sum,
1076 					   iocg->child_active_sum);
1077 	} else {
1078 		inuse = clamp_t(u32, inuse, 1, active);
1079 	}
1080 
1081 	iocg->last_inuse = iocg->inuse;
1082 	if (save)
1083 		iocg->saved_margin = now->vnow - atomic64_read(&iocg->vtime);
1084 
1085 	if (active == iocg->active && inuse == iocg->inuse)
1086 		return;
1087 
1088 	for (lvl = iocg->level - 1; lvl >= 0; lvl--) {
1089 		struct ioc_gq *parent = iocg->ancestors[lvl];
1090 		struct ioc_gq *child = iocg->ancestors[lvl + 1];
1091 		u32 parent_active = 0, parent_inuse = 0;
1092 
1093 		/* update the level sums */
1094 		parent->child_active_sum += (s32)(active - child->active);
1095 		parent->child_inuse_sum += (s32)(inuse - child->inuse);
1096 		/* apply the updates */
1097 		child->active = active;
1098 		child->inuse = inuse;
1099 
1100 		/*
1101 		 * The delta between inuse and active sums indicates that
1102 		 * much of weight is being given away.  Parent's inuse
1103 		 * and active should reflect the ratio.
1104 		 */
1105 		if (parent->child_active_sum) {
1106 			parent_active = parent->weight;
1107 			parent_inuse = DIV64_U64_ROUND_UP(
1108 				parent_active * parent->child_inuse_sum,
1109 				parent->child_active_sum);
1110 		}
1111 
1112 		/* do we need to keep walking up? */
1113 		if (parent_active == parent->active &&
1114 		    parent_inuse == parent->inuse)
1115 			break;
1116 
1117 		active = parent_active;
1118 		inuse = parent_inuse;
1119 	}
1120 
1121 	ioc->weights_updated = true;
1122 }
1123 
1124 static void commit_weights(struct ioc *ioc)
1125 {
1126 	lockdep_assert_held(&ioc->lock);
1127 
1128 	if (ioc->weights_updated) {
1129 		/* paired with rmb in current_hweight(), see there */
1130 		smp_wmb();
1131 		atomic_inc(&ioc->hweight_gen);
1132 		ioc->weights_updated = false;
1133 	}
1134 }
1135 
1136 static void propagate_weights(struct ioc_gq *iocg, u32 active, u32 inuse,
1137 			      bool save, struct ioc_now *now)
1138 {
1139 	__propagate_weights(iocg, active, inuse, save, now);
1140 	commit_weights(iocg->ioc);
1141 }
1142 
1143 static void current_hweight(struct ioc_gq *iocg, u32 *hw_activep, u32 *hw_inusep)
1144 {
1145 	struct ioc *ioc = iocg->ioc;
1146 	int lvl;
1147 	u32 hwa, hwi;
1148 	int ioc_gen;
1149 
1150 	/* hot path - if uptodate, use cached */
1151 	ioc_gen = atomic_read(&ioc->hweight_gen);
1152 	if (ioc_gen == iocg->hweight_gen)
1153 		goto out;
1154 
1155 	/*
1156 	 * Paired with wmb in commit_weights(). If we saw the updated
1157 	 * hweight_gen, all the weight updates from __propagate_weights() are
1158 	 * visible too.
1159 	 *
1160 	 * We can race with weight updates during calculation and get it
1161 	 * wrong.  However, hweight_gen would have changed and a future
1162 	 * reader will recalculate and we're guaranteed to discard the
1163 	 * wrong result soon.
1164 	 */
1165 	smp_rmb();
1166 
1167 	hwa = hwi = WEIGHT_ONE;
1168 	for (lvl = 0; lvl <= iocg->level - 1; lvl++) {
1169 		struct ioc_gq *parent = iocg->ancestors[lvl];
1170 		struct ioc_gq *child = iocg->ancestors[lvl + 1];
1171 		u64 active_sum = READ_ONCE(parent->child_active_sum);
1172 		u64 inuse_sum = READ_ONCE(parent->child_inuse_sum);
1173 		u32 active = READ_ONCE(child->active);
1174 		u32 inuse = READ_ONCE(child->inuse);
1175 
1176 		/* we can race with deactivations and either may read as zero */
1177 		if (!active_sum || !inuse_sum)
1178 			continue;
1179 
1180 		active_sum = max_t(u64, active, active_sum);
1181 		hwa = div64_u64((u64)hwa * active, active_sum);
1182 
1183 		inuse_sum = max_t(u64, inuse, inuse_sum);
1184 		hwi = div64_u64((u64)hwi * inuse, inuse_sum);
1185 	}
1186 
1187 	iocg->hweight_active = max_t(u32, hwa, 1);
1188 	iocg->hweight_inuse = max_t(u32, hwi, 1);
1189 	iocg->hweight_gen = ioc_gen;
1190 out:
1191 	if (hw_activep)
1192 		*hw_activep = iocg->hweight_active;
1193 	if (hw_inusep)
1194 		*hw_inusep = iocg->hweight_inuse;
1195 }
1196 
1197 /*
1198  * Calculate the hweight_inuse @iocg would get with max @inuse assuming all the
1199  * other weights stay unchanged.
1200  */
1201 static u32 current_hweight_max(struct ioc_gq *iocg)
1202 {
1203 	u32 hwm = WEIGHT_ONE;
1204 	u32 inuse = iocg->active;
1205 	u64 child_inuse_sum;
1206 	int lvl;
1207 
1208 	lockdep_assert_held(&iocg->ioc->lock);
1209 
1210 	for (lvl = iocg->level - 1; lvl >= 0; lvl--) {
1211 		struct ioc_gq *parent = iocg->ancestors[lvl];
1212 		struct ioc_gq *child = iocg->ancestors[lvl + 1];
1213 
1214 		child_inuse_sum = parent->child_inuse_sum + inuse - child->inuse;
1215 		hwm = div64_u64((u64)hwm * inuse, child_inuse_sum);
1216 		inuse = DIV64_U64_ROUND_UP(parent->active * child_inuse_sum,
1217 					   parent->child_active_sum);
1218 	}
1219 
1220 	return max_t(u32, hwm, 1);
1221 }
1222 
1223 static void weight_updated(struct ioc_gq *iocg, struct ioc_now *now)
1224 {
1225 	struct ioc *ioc = iocg->ioc;
1226 	struct blkcg_gq *blkg = iocg_to_blkg(iocg);
1227 	struct ioc_cgrp *iocc = blkcg_to_iocc(blkg->blkcg);
1228 	u32 weight;
1229 
1230 	lockdep_assert_held(&ioc->lock);
1231 
1232 	weight = iocg->cfg_weight ?: iocc->dfl_weight;
1233 	if (weight != iocg->weight && iocg->active)
1234 		propagate_weights(iocg, weight, iocg->inuse, true, now);
1235 	iocg->weight = weight;
1236 }
1237 
1238 static bool iocg_activate(struct ioc_gq *iocg, struct ioc_now *now)
1239 {
1240 	struct ioc *ioc = iocg->ioc;
1241 	u64 last_period, cur_period;
1242 	u64 vtime, vtarget;
1243 	int i;
1244 
1245 	/*
1246 	 * If seem to be already active, just update the stamp to tell the
1247 	 * timer that we're still active.  We don't mind occassional races.
1248 	 */
1249 	if (!list_empty(&iocg->active_list)) {
1250 		ioc_now(ioc, now);
1251 		cur_period = atomic64_read(&ioc->cur_period);
1252 		if (atomic64_read(&iocg->active_period) != cur_period)
1253 			atomic64_set(&iocg->active_period, cur_period);
1254 		return true;
1255 	}
1256 
1257 	/* racy check on internal node IOs, treat as root level IOs */
1258 	if (iocg->child_active_sum)
1259 		return false;
1260 
1261 	spin_lock_irq(&ioc->lock);
1262 
1263 	ioc_now(ioc, now);
1264 
1265 	/* update period */
1266 	cur_period = atomic64_read(&ioc->cur_period);
1267 	last_period = atomic64_read(&iocg->active_period);
1268 	atomic64_set(&iocg->active_period, cur_period);
1269 
1270 	/* already activated or breaking leaf-only constraint? */
1271 	if (!list_empty(&iocg->active_list))
1272 		goto succeed_unlock;
1273 	for (i = iocg->level - 1; i > 0; i--)
1274 		if (!list_empty(&iocg->ancestors[i]->active_list))
1275 			goto fail_unlock;
1276 
1277 	if (iocg->child_active_sum)
1278 		goto fail_unlock;
1279 
1280 	/*
1281 	 * Always start with the target budget. On deactivation, we throw away
1282 	 * anything above it.
1283 	 */
1284 	vtarget = now->vnow - ioc->margins.target;
1285 	vtime = atomic64_read(&iocg->vtime);
1286 
1287 	atomic64_add(vtarget - vtime, &iocg->vtime);
1288 	atomic64_add(vtarget - vtime, &iocg->done_vtime);
1289 	vtime = vtarget;
1290 
1291 	/*
1292 	 * Activate, propagate weight and start period timer if not
1293 	 * running.  Reset hweight_gen to avoid accidental match from
1294 	 * wrapping.
1295 	 */
1296 	iocg->hweight_gen = atomic_read(&ioc->hweight_gen) - 1;
1297 	list_add(&iocg->active_list, &ioc->active_iocgs);
1298 
1299 	propagate_weights(iocg, iocg->weight,
1300 			  iocg->last_inuse ?: iocg->weight, true, now);
1301 
1302 	TRACE_IOCG_PATH(iocg_activate, iocg, now,
1303 			last_period, cur_period, vtime);
1304 
1305 	iocg->activated_at = now->now;
1306 
1307 	if (ioc->running == IOC_IDLE) {
1308 		ioc->running = IOC_RUNNING;
1309 		ioc->dfgv_period_at = now->now;
1310 		ioc->dfgv_period_rem = 0;
1311 		ioc_start_period(ioc, now);
1312 	}
1313 
1314 succeed_unlock:
1315 	spin_unlock_irq(&ioc->lock);
1316 	return true;
1317 
1318 fail_unlock:
1319 	spin_unlock_irq(&ioc->lock);
1320 	return false;
1321 }
1322 
1323 static bool iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now)
1324 {
1325 	struct ioc *ioc = iocg->ioc;
1326 	struct blkcg_gq *blkg = iocg_to_blkg(iocg);
1327 	u64 tdelta, delay, new_delay;
1328 	s64 vover, vover_pct;
1329 	u32 hwa;
1330 
1331 	lockdep_assert_held(&iocg->waitq.lock);
1332 
1333 	/* calculate the current delay in effect - 1/2 every second */
1334 	tdelta = now->now - iocg->delay_at;
1335 	if (iocg->delay)
1336 		delay = iocg->delay >> div64_u64(tdelta, USEC_PER_SEC);
1337 	else
1338 		delay = 0;
1339 
1340 	/* calculate the new delay from the debt amount */
1341 	current_hweight(iocg, &hwa, NULL);
1342 	vover = atomic64_read(&iocg->vtime) +
1343 		abs_cost_to_cost(iocg->abs_vdebt, hwa) - now->vnow;
1344 	vover_pct = div64_s64(100 * vover,
1345 			      ioc->period_us * ioc->vtime_base_rate);
1346 
1347 	if (vover_pct <= MIN_DELAY_THR_PCT)
1348 		new_delay = 0;
1349 	else if (vover_pct >= MAX_DELAY_THR_PCT)
1350 		new_delay = MAX_DELAY;
1351 	else
1352 		new_delay = MIN_DELAY +
1353 			div_u64((MAX_DELAY - MIN_DELAY) *
1354 				(vover_pct - MIN_DELAY_THR_PCT),
1355 				MAX_DELAY_THR_PCT - MIN_DELAY_THR_PCT);
1356 
1357 	/* pick the higher one and apply */
1358 	if (new_delay > delay) {
1359 		iocg->delay = new_delay;
1360 		iocg->delay_at = now->now;
1361 		delay = new_delay;
1362 	}
1363 
1364 	if (delay >= MIN_DELAY) {
1365 		if (!iocg->indelay_since)
1366 			iocg->indelay_since = now->now;
1367 		blkcg_set_delay(blkg, delay * NSEC_PER_USEC);
1368 		return true;
1369 	} else {
1370 		if (iocg->indelay_since) {
1371 			iocg->stat.indelay_us += now->now - iocg->indelay_since;
1372 			iocg->indelay_since = 0;
1373 		}
1374 		iocg->delay = 0;
1375 		blkcg_clear_delay(blkg);
1376 		return false;
1377 	}
1378 }
1379 
1380 static void iocg_incur_debt(struct ioc_gq *iocg, u64 abs_cost,
1381 			    struct ioc_now *now)
1382 {
1383 	struct iocg_pcpu_stat *gcs;
1384 
1385 	lockdep_assert_held(&iocg->ioc->lock);
1386 	lockdep_assert_held(&iocg->waitq.lock);
1387 	WARN_ON_ONCE(list_empty(&iocg->active_list));
1388 
1389 	/*
1390 	 * Once in debt, debt handling owns inuse. @iocg stays at the minimum
1391 	 * inuse donating all of it share to others until its debt is paid off.
1392 	 */
1393 	if (!iocg->abs_vdebt && abs_cost) {
1394 		iocg->indebt_since = now->now;
1395 		propagate_weights(iocg, iocg->active, 0, false, now);
1396 	}
1397 
1398 	iocg->abs_vdebt += abs_cost;
1399 
1400 	gcs = get_cpu_ptr(iocg->pcpu_stat);
1401 	local64_add(abs_cost, &gcs->abs_vusage);
1402 	put_cpu_ptr(gcs);
1403 }
1404 
1405 static void iocg_pay_debt(struct ioc_gq *iocg, u64 abs_vpay,
1406 			  struct ioc_now *now)
1407 {
1408 	lockdep_assert_held(&iocg->ioc->lock);
1409 	lockdep_assert_held(&iocg->waitq.lock);
1410 
1411 	/* make sure that nobody messed with @iocg */
1412 	WARN_ON_ONCE(list_empty(&iocg->active_list));
1413 	WARN_ON_ONCE(iocg->inuse > 1);
1414 
1415 	iocg->abs_vdebt -= min(abs_vpay, iocg->abs_vdebt);
1416 
1417 	/* if debt is paid in full, restore inuse */
1418 	if (!iocg->abs_vdebt) {
1419 		iocg->stat.indebt_us += now->now - iocg->indebt_since;
1420 		iocg->indebt_since = 0;
1421 
1422 		propagate_weights(iocg, iocg->active, iocg->last_inuse,
1423 				  false, now);
1424 	}
1425 }
1426 
1427 static int iocg_wake_fn(struct wait_queue_entry *wq_entry, unsigned mode,
1428 			int flags, void *key)
1429 {
1430 	struct iocg_wait *wait = container_of(wq_entry, struct iocg_wait, wait);
1431 	struct iocg_wake_ctx *ctx = key;
1432 	u64 cost = abs_cost_to_cost(wait->abs_cost, ctx->hw_inuse);
1433 
1434 	ctx->vbudget -= cost;
1435 
1436 	if (ctx->vbudget < 0)
1437 		return -1;
1438 
1439 	iocg_commit_bio(ctx->iocg, wait->bio, wait->abs_cost, cost);
1440 	wait->committed = true;
1441 
1442 	/*
1443 	 * autoremove_wake_function() removes the wait entry only when it
1444 	 * actually changed the task state. We want the wait always removed.
1445 	 * Remove explicitly and use default_wake_function(). Note that the
1446 	 * order of operations is important as finish_wait() tests whether
1447 	 * @wq_entry is removed without grabbing the lock.
1448 	 */
1449 	default_wake_function(wq_entry, mode, flags, key);
1450 	list_del_init_careful(&wq_entry->entry);
1451 	return 0;
1452 }
1453 
1454 /*
1455  * Calculate the accumulated budget, pay debt if @pay_debt and wake up waiters
1456  * accordingly. When @pay_debt is %true, the caller must be holding ioc->lock in
1457  * addition to iocg->waitq.lock.
1458  */
1459 static void iocg_kick_waitq(struct ioc_gq *iocg, bool pay_debt,
1460 			    struct ioc_now *now)
1461 {
1462 	struct ioc *ioc = iocg->ioc;
1463 	struct iocg_wake_ctx ctx = { .iocg = iocg };
1464 	u64 vshortage, expires, oexpires;
1465 	s64 vbudget;
1466 	u32 hwa;
1467 
1468 	lockdep_assert_held(&iocg->waitq.lock);
1469 
1470 	current_hweight(iocg, &hwa, NULL);
1471 	vbudget = now->vnow - atomic64_read(&iocg->vtime);
1472 
1473 	/* pay off debt */
1474 	if (pay_debt && iocg->abs_vdebt && vbudget > 0) {
1475 		u64 abs_vbudget = cost_to_abs_cost(vbudget, hwa);
1476 		u64 abs_vpay = min_t(u64, abs_vbudget, iocg->abs_vdebt);
1477 		u64 vpay = abs_cost_to_cost(abs_vpay, hwa);
1478 
1479 		lockdep_assert_held(&ioc->lock);
1480 
1481 		atomic64_add(vpay, &iocg->vtime);
1482 		atomic64_add(vpay, &iocg->done_vtime);
1483 		iocg_pay_debt(iocg, abs_vpay, now);
1484 		vbudget -= vpay;
1485 	}
1486 
1487 	if (iocg->abs_vdebt || iocg->delay)
1488 		iocg_kick_delay(iocg, now);
1489 
1490 	/*
1491 	 * Debt can still be outstanding if we haven't paid all yet or the
1492 	 * caller raced and called without @pay_debt. Shouldn't wake up waiters
1493 	 * under debt. Make sure @vbudget reflects the outstanding amount and is
1494 	 * not positive.
1495 	 */
1496 	if (iocg->abs_vdebt) {
1497 		s64 vdebt = abs_cost_to_cost(iocg->abs_vdebt, hwa);
1498 		vbudget = min_t(s64, 0, vbudget - vdebt);
1499 	}
1500 
1501 	/*
1502 	 * Wake up the ones which are due and see how much vtime we'll need for
1503 	 * the next one. As paying off debt restores hw_inuse, it must be read
1504 	 * after the above debt payment.
1505 	 */
1506 	ctx.vbudget = vbudget;
1507 	current_hweight(iocg, NULL, &ctx.hw_inuse);
1508 
1509 	__wake_up_locked_key(&iocg->waitq, TASK_NORMAL, &ctx);
1510 
1511 	if (!waitqueue_active(&iocg->waitq)) {
1512 		if (iocg->wait_since) {
1513 			iocg->stat.wait_us += now->now - iocg->wait_since;
1514 			iocg->wait_since = 0;
1515 		}
1516 		return;
1517 	}
1518 
1519 	if (!iocg->wait_since)
1520 		iocg->wait_since = now->now;
1521 
1522 	if (WARN_ON_ONCE(ctx.vbudget >= 0))
1523 		return;
1524 
1525 	/* determine next wakeup, add a timer margin to guarantee chunking */
1526 	vshortage = -ctx.vbudget;
1527 	expires = now->now_ns +
1528 		DIV64_U64_ROUND_UP(vshortage, ioc->vtime_base_rate) *
1529 		NSEC_PER_USEC;
1530 	expires += ioc->timer_slack_ns;
1531 
1532 	/* if already active and close enough, don't bother */
1533 	oexpires = ktime_to_ns(hrtimer_get_softexpires(&iocg->waitq_timer));
1534 	if (hrtimer_is_queued(&iocg->waitq_timer) &&
1535 	    abs(oexpires - expires) <= ioc->timer_slack_ns)
1536 		return;
1537 
1538 	hrtimer_start_range_ns(&iocg->waitq_timer, ns_to_ktime(expires),
1539 			       ioc->timer_slack_ns, HRTIMER_MODE_ABS);
1540 }
1541 
1542 static enum hrtimer_restart iocg_waitq_timer_fn(struct hrtimer *timer)
1543 {
1544 	struct ioc_gq *iocg = container_of(timer, struct ioc_gq, waitq_timer);
1545 	bool pay_debt = READ_ONCE(iocg->abs_vdebt);
1546 	struct ioc_now now;
1547 	unsigned long flags;
1548 
1549 	ioc_now(iocg->ioc, &now);
1550 
1551 	iocg_lock(iocg, pay_debt, &flags);
1552 	iocg_kick_waitq(iocg, pay_debt, &now);
1553 	iocg_unlock(iocg, pay_debt, &flags);
1554 
1555 	return HRTIMER_NORESTART;
1556 }
1557 
1558 static void ioc_lat_stat(struct ioc *ioc, u32 *missed_ppm_ar, u32 *rq_wait_pct_p)
1559 {
1560 	u32 nr_met[2] = { };
1561 	u32 nr_missed[2] = { };
1562 	u64 rq_wait_ns = 0;
1563 	int cpu, rw;
1564 
1565 	for_each_online_cpu(cpu) {
1566 		struct ioc_pcpu_stat *stat = per_cpu_ptr(ioc->pcpu_stat, cpu);
1567 		u64 this_rq_wait_ns;
1568 
1569 		for (rw = READ; rw <= WRITE; rw++) {
1570 			u32 this_met = local_read(&stat->missed[rw].nr_met);
1571 			u32 this_missed = local_read(&stat->missed[rw].nr_missed);
1572 
1573 			nr_met[rw] += this_met - stat->missed[rw].last_met;
1574 			nr_missed[rw] += this_missed - stat->missed[rw].last_missed;
1575 			stat->missed[rw].last_met = this_met;
1576 			stat->missed[rw].last_missed = this_missed;
1577 		}
1578 
1579 		this_rq_wait_ns = local64_read(&stat->rq_wait_ns);
1580 		rq_wait_ns += this_rq_wait_ns - stat->last_rq_wait_ns;
1581 		stat->last_rq_wait_ns = this_rq_wait_ns;
1582 	}
1583 
1584 	for (rw = READ; rw <= WRITE; rw++) {
1585 		if (nr_met[rw] + nr_missed[rw])
1586 			missed_ppm_ar[rw] =
1587 				DIV64_U64_ROUND_UP((u64)nr_missed[rw] * MILLION,
1588 						   nr_met[rw] + nr_missed[rw]);
1589 		else
1590 			missed_ppm_ar[rw] = 0;
1591 	}
1592 
1593 	*rq_wait_pct_p = div64_u64(rq_wait_ns * 100,
1594 				   ioc->period_us * NSEC_PER_USEC);
1595 }
1596 
1597 /* was iocg idle this period? */
1598 static bool iocg_is_idle(struct ioc_gq *iocg)
1599 {
1600 	struct ioc *ioc = iocg->ioc;
1601 
1602 	/* did something get issued this period? */
1603 	if (atomic64_read(&iocg->active_period) ==
1604 	    atomic64_read(&ioc->cur_period))
1605 		return false;
1606 
1607 	/* is something in flight? */
1608 	if (atomic64_read(&iocg->done_vtime) != atomic64_read(&iocg->vtime))
1609 		return false;
1610 
1611 	return true;
1612 }
1613 
1614 /*
1615  * Call this function on the target leaf @iocg's to build pre-order traversal
1616  * list of all the ancestors in @inner_walk. The inner nodes are linked through
1617  * ->walk_list and the caller is responsible for dissolving the list after use.
1618  */
1619 static void iocg_build_inner_walk(struct ioc_gq *iocg,
1620 				  struct list_head *inner_walk)
1621 {
1622 	int lvl;
1623 
1624 	WARN_ON_ONCE(!list_empty(&iocg->walk_list));
1625 
1626 	/* find the first ancestor which hasn't been visited yet */
1627 	for (lvl = iocg->level - 1; lvl >= 0; lvl--) {
1628 		if (!list_empty(&iocg->ancestors[lvl]->walk_list))
1629 			break;
1630 	}
1631 
1632 	/* walk down and visit the inner nodes to get pre-order traversal */
1633 	while (++lvl <= iocg->level - 1) {
1634 		struct ioc_gq *inner = iocg->ancestors[lvl];
1635 
1636 		/* record traversal order */
1637 		list_add_tail(&inner->walk_list, inner_walk);
1638 	}
1639 }
1640 
1641 /* propagate the deltas to the parent */
1642 static void iocg_flush_stat_upward(struct ioc_gq *iocg)
1643 {
1644 	if (iocg->level > 0) {
1645 		struct iocg_stat *parent_stat =
1646 			&iocg->ancestors[iocg->level - 1]->stat;
1647 
1648 		parent_stat->usage_us +=
1649 			iocg->stat.usage_us - iocg->last_stat.usage_us;
1650 		parent_stat->wait_us +=
1651 			iocg->stat.wait_us - iocg->last_stat.wait_us;
1652 		parent_stat->indebt_us +=
1653 			iocg->stat.indebt_us - iocg->last_stat.indebt_us;
1654 		parent_stat->indelay_us +=
1655 			iocg->stat.indelay_us - iocg->last_stat.indelay_us;
1656 	}
1657 
1658 	iocg->last_stat = iocg->stat;
1659 }
1660 
1661 /* collect per-cpu counters and propagate the deltas to the parent */
1662 static void iocg_flush_stat_leaf(struct ioc_gq *iocg, struct ioc_now *now)
1663 {
1664 	struct ioc *ioc = iocg->ioc;
1665 	u64 abs_vusage = 0;
1666 	u64 vusage_delta;
1667 	int cpu;
1668 
1669 	lockdep_assert_held(&iocg->ioc->lock);
1670 
1671 	/* collect per-cpu counters */
1672 	for_each_possible_cpu(cpu) {
1673 		abs_vusage += local64_read(
1674 				per_cpu_ptr(&iocg->pcpu_stat->abs_vusage, cpu));
1675 	}
1676 	vusage_delta = abs_vusage - iocg->last_stat_abs_vusage;
1677 	iocg->last_stat_abs_vusage = abs_vusage;
1678 
1679 	iocg->usage_delta_us = div64_u64(vusage_delta, ioc->vtime_base_rate);
1680 	iocg->stat.usage_us += iocg->usage_delta_us;
1681 
1682 	iocg_flush_stat_upward(iocg);
1683 }
1684 
1685 /* get stat counters ready for reading on all active iocgs */
1686 static void iocg_flush_stat(struct list_head *target_iocgs, struct ioc_now *now)
1687 {
1688 	LIST_HEAD(inner_walk);
1689 	struct ioc_gq *iocg, *tiocg;
1690 
1691 	/* flush leaves and build inner node walk list */
1692 	list_for_each_entry(iocg, target_iocgs, active_list) {
1693 		iocg_flush_stat_leaf(iocg, now);
1694 		iocg_build_inner_walk(iocg, &inner_walk);
1695 	}
1696 
1697 	/* keep flushing upwards by walking the inner list backwards */
1698 	list_for_each_entry_safe_reverse(iocg, tiocg, &inner_walk, walk_list) {
1699 		iocg_flush_stat_upward(iocg);
1700 		list_del_init(&iocg->walk_list);
1701 	}
1702 }
1703 
1704 /*
1705  * Determine what @iocg's hweight_inuse should be after donating unused
1706  * capacity. @hwm is the upper bound and used to signal no donation. This
1707  * function also throws away @iocg's excess budget.
1708  */
1709 static u32 hweight_after_donation(struct ioc_gq *iocg, u32 old_hwi, u32 hwm,
1710 				  u32 usage, struct ioc_now *now)
1711 {
1712 	struct ioc *ioc = iocg->ioc;
1713 	u64 vtime = atomic64_read(&iocg->vtime);
1714 	s64 excess, delta, target, new_hwi;
1715 
1716 	/* debt handling owns inuse for debtors */
1717 	if (iocg->abs_vdebt)
1718 		return 1;
1719 
1720 	/* see whether minimum margin requirement is met */
1721 	if (waitqueue_active(&iocg->waitq) ||
1722 	    time_after64(vtime, now->vnow - ioc->margins.min))
1723 		return hwm;
1724 
1725 	/* throw away excess above target */
1726 	excess = now->vnow - vtime - ioc->margins.target;
1727 	if (excess > 0) {
1728 		atomic64_add(excess, &iocg->vtime);
1729 		atomic64_add(excess, &iocg->done_vtime);
1730 		vtime += excess;
1731 		ioc->vtime_err -= div64_u64(excess * old_hwi, WEIGHT_ONE);
1732 	}
1733 
1734 	/*
1735 	 * Let's say the distance between iocg's and device's vtimes as a
1736 	 * fraction of period duration is delta. Assuming that the iocg will
1737 	 * consume the usage determined above, we want to determine new_hwi so
1738 	 * that delta equals MARGIN_TARGET at the end of the next period.
1739 	 *
1740 	 * We need to execute usage worth of IOs while spending the sum of the
1741 	 * new budget (1 - MARGIN_TARGET) and the leftover from the last period
1742 	 * (delta):
1743 	 *
1744 	 *   usage = (1 - MARGIN_TARGET + delta) * new_hwi
1745 	 *
1746 	 * Therefore, the new_hwi is:
1747 	 *
1748 	 *   new_hwi = usage / (1 - MARGIN_TARGET + delta)
1749 	 */
1750 	delta = div64_s64(WEIGHT_ONE * (now->vnow - vtime),
1751 			  now->vnow - ioc->period_at_vtime);
1752 	target = WEIGHT_ONE * MARGIN_TARGET_PCT / 100;
1753 	new_hwi = div64_s64(WEIGHT_ONE * usage, WEIGHT_ONE - target + delta);
1754 
1755 	return clamp_t(s64, new_hwi, 1, hwm);
1756 }
1757 
1758 /*
1759  * For work-conservation, an iocg which isn't using all of its share should
1760  * donate the leftover to other iocgs. There are two ways to achieve this - 1.
1761  * bumping up vrate accordingly 2. lowering the donating iocg's inuse weight.
1762  *
1763  * #1 is mathematically simpler but has the drawback of requiring synchronous
1764  * global hweight_inuse updates when idle iocg's get activated or inuse weights
1765  * change due to donation snapbacks as it has the possibility of grossly
1766  * overshooting what's allowed by the model and vrate.
1767  *
1768  * #2 is inherently safe with local operations. The donating iocg can easily
1769  * snap back to higher weights when needed without worrying about impacts on
1770  * other nodes as the impacts will be inherently correct. This also makes idle
1771  * iocg activations safe. The only effect activations have is decreasing
1772  * hweight_inuse of others, the right solution to which is for those iocgs to
1773  * snap back to higher weights.
1774  *
1775  * So, we go with #2. The challenge is calculating how each donating iocg's
1776  * inuse should be adjusted to achieve the target donation amounts. This is done
1777  * using Andy's method described in the following pdf.
1778  *
1779  *   https://drive.google.com/file/d/1PsJwxPFtjUnwOY1QJ5AeICCcsL7BM3bo
1780  *
1781  * Given the weights and target after-donation hweight_inuse values, Andy's
1782  * method determines how the proportional distribution should look like at each
1783  * sibling level to maintain the relative relationship between all non-donating
1784  * pairs. To roughly summarize, it divides the tree into donating and
1785  * non-donating parts, calculates global donation rate which is used to
1786  * determine the target hweight_inuse for each node, and then derives per-level
1787  * proportions.
1788  *
1789  * The following pdf shows that global distribution calculated this way can be
1790  * achieved by scaling inuse weights of donating leaves and propagating the
1791  * adjustments upwards proportionally.
1792  *
1793  *   https://drive.google.com/file/d/1vONz1-fzVO7oY5DXXsLjSxEtYYQbOvsE
1794  *
1795  * Combining the above two, we can determine how each leaf iocg's inuse should
1796  * be adjusted to achieve the target donation.
1797  *
1798  *   https://drive.google.com/file/d/1WcrltBOSPN0qXVdBgnKm4mdp9FhuEFQN
1799  *
1800  * The inline comments use symbols from the last pdf.
1801  *
1802  *   b is the sum of the absolute budgets in the subtree. 1 for the root node.
1803  *   f is the sum of the absolute budgets of non-donating nodes in the subtree.
1804  *   t is the sum of the absolute budgets of donating nodes in the subtree.
1805  *   w is the weight of the node. w = w_f + w_t
1806  *   w_f is the non-donating portion of w. w_f = w * f / b
1807  *   w_b is the donating portion of w. w_t = w * t / b
1808  *   s is the sum of all sibling weights. s = Sum(w) for siblings
1809  *   s_f and s_t are the non-donating and donating portions of s.
1810  *
1811  * Subscript p denotes the parent's counterpart and ' the adjusted value - e.g.
1812  * w_pt is the donating portion of the parent's weight and w'_pt the same value
1813  * after adjustments. Subscript r denotes the root node's values.
1814  */
1815 static void transfer_surpluses(struct list_head *surpluses, struct ioc_now *now)
1816 {
1817 	LIST_HEAD(over_hwa);
1818 	LIST_HEAD(inner_walk);
1819 	struct ioc_gq *iocg, *tiocg, *root_iocg;
1820 	u32 after_sum, over_sum, over_target, gamma;
1821 
1822 	/*
1823 	 * It's pretty unlikely but possible for the total sum of
1824 	 * hweight_after_donation's to be higher than WEIGHT_ONE, which will
1825 	 * confuse the following calculations. If such condition is detected,
1826 	 * scale down everyone over its full share equally to keep the sum below
1827 	 * WEIGHT_ONE.
1828 	 */
1829 	after_sum = 0;
1830 	over_sum = 0;
1831 	list_for_each_entry(iocg, surpluses, surplus_list) {
1832 		u32 hwa;
1833 
1834 		current_hweight(iocg, &hwa, NULL);
1835 		after_sum += iocg->hweight_after_donation;
1836 
1837 		if (iocg->hweight_after_donation > hwa) {
1838 			over_sum += iocg->hweight_after_donation;
1839 			list_add(&iocg->walk_list, &over_hwa);
1840 		}
1841 	}
1842 
1843 	if (after_sum >= WEIGHT_ONE) {
1844 		/*
1845 		 * The delta should be deducted from the over_sum, calculate
1846 		 * target over_sum value.
1847 		 */
1848 		u32 over_delta = after_sum - (WEIGHT_ONE - 1);
1849 		WARN_ON_ONCE(over_sum <= over_delta);
1850 		over_target = over_sum - over_delta;
1851 	} else {
1852 		over_target = 0;
1853 	}
1854 
1855 	list_for_each_entry_safe(iocg, tiocg, &over_hwa, walk_list) {
1856 		if (over_target)
1857 			iocg->hweight_after_donation =
1858 				div_u64((u64)iocg->hweight_after_donation *
1859 					over_target, over_sum);
1860 		list_del_init(&iocg->walk_list);
1861 	}
1862 
1863 	/*
1864 	 * Build pre-order inner node walk list and prepare for donation
1865 	 * adjustment calculations.
1866 	 */
1867 	list_for_each_entry(iocg, surpluses, surplus_list) {
1868 		iocg_build_inner_walk(iocg, &inner_walk);
1869 	}
1870 
1871 	root_iocg = list_first_entry(&inner_walk, struct ioc_gq, walk_list);
1872 	WARN_ON_ONCE(root_iocg->level > 0);
1873 
1874 	list_for_each_entry(iocg, &inner_walk, walk_list) {
1875 		iocg->child_adjusted_sum = 0;
1876 		iocg->hweight_donating = 0;
1877 		iocg->hweight_after_donation = 0;
1878 	}
1879 
1880 	/*
1881 	 * Propagate the donating budget (b_t) and after donation budget (b'_t)
1882 	 * up the hierarchy.
1883 	 */
1884 	list_for_each_entry(iocg, surpluses, surplus_list) {
1885 		struct ioc_gq *parent = iocg->ancestors[iocg->level - 1];
1886 
1887 		parent->hweight_donating += iocg->hweight_donating;
1888 		parent->hweight_after_donation += iocg->hweight_after_donation;
1889 	}
1890 
1891 	list_for_each_entry_reverse(iocg, &inner_walk, walk_list) {
1892 		if (iocg->level > 0) {
1893 			struct ioc_gq *parent = iocg->ancestors[iocg->level - 1];
1894 
1895 			parent->hweight_donating += iocg->hweight_donating;
1896 			parent->hweight_after_donation += iocg->hweight_after_donation;
1897 		}
1898 	}
1899 
1900 	/*
1901 	 * Calculate inner hwa's (b) and make sure the donation values are
1902 	 * within the accepted ranges as we're doing low res calculations with
1903 	 * roundups.
1904 	 */
1905 	list_for_each_entry(iocg, &inner_walk, walk_list) {
1906 		if (iocg->level) {
1907 			struct ioc_gq *parent = iocg->ancestors[iocg->level - 1];
1908 
1909 			iocg->hweight_active = DIV64_U64_ROUND_UP(
1910 				(u64)parent->hweight_active * iocg->active,
1911 				parent->child_active_sum);
1912 
1913 		}
1914 
1915 		iocg->hweight_donating = min(iocg->hweight_donating,
1916 					     iocg->hweight_active);
1917 		iocg->hweight_after_donation = min(iocg->hweight_after_donation,
1918 						   iocg->hweight_donating - 1);
1919 		if (WARN_ON_ONCE(iocg->hweight_active <= 1 ||
1920 				 iocg->hweight_donating <= 1 ||
1921 				 iocg->hweight_after_donation == 0)) {
1922 			pr_warn("iocg: invalid donation weights in ");
1923 			pr_cont_cgroup_path(iocg_to_blkg(iocg)->blkcg->css.cgroup);
1924 			pr_cont(": active=%u donating=%u after=%u\n",
1925 				iocg->hweight_active, iocg->hweight_donating,
1926 				iocg->hweight_after_donation);
1927 		}
1928 	}
1929 
1930 	/*
1931 	 * Calculate the global donation rate (gamma) - the rate to adjust
1932 	 * non-donating budgets by.
1933 	 *
1934 	 * No need to use 64bit multiplication here as the first operand is
1935 	 * guaranteed to be smaller than WEIGHT_ONE (1<<16).
1936 	 *
1937 	 * We know that there are beneficiary nodes and the sum of the donating
1938 	 * hweights can't be whole; however, due to the round-ups during hweight
1939 	 * calculations, root_iocg->hweight_donating might still end up equal to
1940 	 * or greater than whole. Limit the range when calculating the divider.
1941 	 *
1942 	 * gamma = (1 - t_r') / (1 - t_r)
1943 	 */
1944 	gamma = DIV_ROUND_UP(
1945 		(WEIGHT_ONE - root_iocg->hweight_after_donation) * WEIGHT_ONE,
1946 		WEIGHT_ONE - min_t(u32, root_iocg->hweight_donating, WEIGHT_ONE - 1));
1947 
1948 	/*
1949 	 * Calculate adjusted hwi, child_adjusted_sum and inuse for the inner
1950 	 * nodes.
1951 	 */
1952 	list_for_each_entry(iocg, &inner_walk, walk_list) {
1953 		struct ioc_gq *parent;
1954 		u32 inuse, wpt, wptp;
1955 		u64 st, sf;
1956 
1957 		if (iocg->level == 0) {
1958 			/* adjusted weight sum for 1st level: s' = s * b_pf / b'_pf */
1959 			iocg->child_adjusted_sum = DIV64_U64_ROUND_UP(
1960 				iocg->child_active_sum * (WEIGHT_ONE - iocg->hweight_donating),
1961 				WEIGHT_ONE - iocg->hweight_after_donation);
1962 			continue;
1963 		}
1964 
1965 		parent = iocg->ancestors[iocg->level - 1];
1966 
1967 		/* b' = gamma * b_f + b_t' */
1968 		iocg->hweight_inuse = DIV64_U64_ROUND_UP(
1969 			(u64)gamma * (iocg->hweight_active - iocg->hweight_donating),
1970 			WEIGHT_ONE) + iocg->hweight_after_donation;
1971 
1972 		/* w' = s' * b' / b'_p */
1973 		inuse = DIV64_U64_ROUND_UP(
1974 			(u64)parent->child_adjusted_sum * iocg->hweight_inuse,
1975 			parent->hweight_inuse);
1976 
1977 		/* adjusted weight sum for children: s' = s_f + s_t * w'_pt / w_pt */
1978 		st = DIV64_U64_ROUND_UP(
1979 			iocg->child_active_sum * iocg->hweight_donating,
1980 			iocg->hweight_active);
1981 		sf = iocg->child_active_sum - st;
1982 		wpt = DIV64_U64_ROUND_UP(
1983 			(u64)iocg->active * iocg->hweight_donating,
1984 			iocg->hweight_active);
1985 		wptp = DIV64_U64_ROUND_UP(
1986 			(u64)inuse * iocg->hweight_after_donation,
1987 			iocg->hweight_inuse);
1988 
1989 		iocg->child_adjusted_sum = sf + DIV64_U64_ROUND_UP(st * wptp, wpt);
1990 	}
1991 
1992 	/*
1993 	 * All inner nodes now have ->hweight_inuse and ->child_adjusted_sum and
1994 	 * we can finally determine leaf adjustments.
1995 	 */
1996 	list_for_each_entry(iocg, surpluses, surplus_list) {
1997 		struct ioc_gq *parent = iocg->ancestors[iocg->level - 1];
1998 		u32 inuse;
1999 
2000 		/*
2001 		 * In-debt iocgs participated in the donation calculation with
2002 		 * the minimum target hweight_inuse. Configuring inuse
2003 		 * accordingly would work fine but debt handling expects
2004 		 * @iocg->inuse stay at the minimum and we don't wanna
2005 		 * interfere.
2006 		 */
2007 		if (iocg->abs_vdebt) {
2008 			WARN_ON_ONCE(iocg->inuse > 1);
2009 			continue;
2010 		}
2011 
2012 		/* w' = s' * b' / b'_p, note that b' == b'_t for donating leaves */
2013 		inuse = DIV64_U64_ROUND_UP(
2014 			parent->child_adjusted_sum * iocg->hweight_after_donation,
2015 			parent->hweight_inuse);
2016 
2017 		TRACE_IOCG_PATH(inuse_transfer, iocg, now,
2018 				iocg->inuse, inuse,
2019 				iocg->hweight_inuse,
2020 				iocg->hweight_after_donation);
2021 
2022 		__propagate_weights(iocg, iocg->active, inuse, true, now);
2023 	}
2024 
2025 	/* walk list should be dissolved after use */
2026 	list_for_each_entry_safe(iocg, tiocg, &inner_walk, walk_list)
2027 		list_del_init(&iocg->walk_list);
2028 }
2029 
2030 /*
2031  * A low weight iocg can amass a large amount of debt, for example, when
2032  * anonymous memory gets reclaimed aggressively. If the system has a lot of
2033  * memory paired with a slow IO device, the debt can span multiple seconds or
2034  * more. If there are no other subsequent IO issuers, the in-debt iocg may end
2035  * up blocked paying its debt while the IO device is idle.
2036  *
2037  * The following protects against such cases. If the device has been
2038  * sufficiently idle for a while, the debts are halved and delays are
2039  * recalculated.
2040  */
2041 static void ioc_forgive_debts(struct ioc *ioc, u64 usage_us_sum, int nr_debtors,
2042 			      struct ioc_now *now)
2043 {
2044 	struct ioc_gq *iocg;
2045 	u64 dur, usage_pct, nr_cycles;
2046 
2047 	/* if no debtor, reset the cycle */
2048 	if (!nr_debtors) {
2049 		ioc->dfgv_period_at = now->now;
2050 		ioc->dfgv_period_rem = 0;
2051 		ioc->dfgv_usage_us_sum = 0;
2052 		return;
2053 	}
2054 
2055 	/*
2056 	 * Debtors can pass through a lot of writes choking the device and we
2057 	 * don't want to be forgiving debts while the device is struggling from
2058 	 * write bursts. If we're missing latency targets, consider the device
2059 	 * fully utilized.
2060 	 */
2061 	if (ioc->busy_level > 0)
2062 		usage_us_sum = max_t(u64, usage_us_sum, ioc->period_us);
2063 
2064 	ioc->dfgv_usage_us_sum += usage_us_sum;
2065 	if (time_before64(now->now, ioc->dfgv_period_at + DFGV_PERIOD))
2066 		return;
2067 
2068 	/*
2069 	 * At least DFGV_PERIOD has passed since the last period. Calculate the
2070 	 * average usage and reset the period counters.
2071 	 */
2072 	dur = now->now - ioc->dfgv_period_at;
2073 	usage_pct = div64_u64(100 * ioc->dfgv_usage_us_sum, dur);
2074 
2075 	ioc->dfgv_period_at = now->now;
2076 	ioc->dfgv_usage_us_sum = 0;
2077 
2078 	/* if was too busy, reset everything */
2079 	if (usage_pct > DFGV_USAGE_PCT) {
2080 		ioc->dfgv_period_rem = 0;
2081 		return;
2082 	}
2083 
2084 	/*
2085 	 * Usage is lower than threshold. Let's forgive some debts. Debt
2086 	 * forgiveness runs off of the usual ioc timer but its period usually
2087 	 * doesn't match ioc's. Compensate the difference by performing the
2088 	 * reduction as many times as would fit in the duration since the last
2089 	 * run and carrying over the left-over duration in @ioc->dfgv_period_rem
2090 	 * - if ioc period is 75% of DFGV_PERIOD, one out of three consecutive
2091 	 * reductions is doubled.
2092 	 */
2093 	nr_cycles = dur + ioc->dfgv_period_rem;
2094 	ioc->dfgv_period_rem = do_div(nr_cycles, DFGV_PERIOD);
2095 
2096 	list_for_each_entry(iocg, &ioc->active_iocgs, active_list) {
2097 		u64 __maybe_unused old_debt, __maybe_unused old_delay;
2098 
2099 		if (!iocg->abs_vdebt && !iocg->delay)
2100 			continue;
2101 
2102 		spin_lock(&iocg->waitq.lock);
2103 
2104 		old_debt = iocg->abs_vdebt;
2105 		old_delay = iocg->delay;
2106 
2107 		if (iocg->abs_vdebt)
2108 			iocg->abs_vdebt = iocg->abs_vdebt >> nr_cycles ?: 1;
2109 		if (iocg->delay)
2110 			iocg->delay = iocg->delay >> nr_cycles ?: 1;
2111 
2112 		iocg_kick_waitq(iocg, true, now);
2113 
2114 		TRACE_IOCG_PATH(iocg_forgive_debt, iocg, now, usage_pct,
2115 				old_debt, iocg->abs_vdebt,
2116 				old_delay, iocg->delay);
2117 
2118 		spin_unlock(&iocg->waitq.lock);
2119 	}
2120 }
2121 
2122 /*
2123  * Check the active iocgs' state to avoid oversleeping and deactive
2124  * idle iocgs.
2125  *
2126  * Since waiters determine the sleep durations based on the vrate
2127  * they saw at the time of sleep, if vrate has increased, some
2128  * waiters could be sleeping for too long. Wake up tardy waiters
2129  * which should have woken up in the last period and expire idle
2130  * iocgs.
2131  */
2132 static int ioc_check_iocgs(struct ioc *ioc, struct ioc_now *now)
2133 {
2134 	int nr_debtors = 0;
2135 	struct ioc_gq *iocg, *tiocg;
2136 
2137 	list_for_each_entry_safe(iocg, tiocg, &ioc->active_iocgs, active_list) {
2138 		if (!waitqueue_active(&iocg->waitq) && !iocg->abs_vdebt &&
2139 		    !iocg->delay && !iocg_is_idle(iocg))
2140 			continue;
2141 
2142 		spin_lock(&iocg->waitq.lock);
2143 
2144 		/* flush wait and indebt stat deltas */
2145 		if (iocg->wait_since) {
2146 			iocg->stat.wait_us += now->now - iocg->wait_since;
2147 			iocg->wait_since = now->now;
2148 		}
2149 		if (iocg->indebt_since) {
2150 			iocg->stat.indebt_us +=
2151 				now->now - iocg->indebt_since;
2152 			iocg->indebt_since = now->now;
2153 		}
2154 		if (iocg->indelay_since) {
2155 			iocg->stat.indelay_us +=
2156 				now->now - iocg->indelay_since;
2157 			iocg->indelay_since = now->now;
2158 		}
2159 
2160 		if (waitqueue_active(&iocg->waitq) || iocg->abs_vdebt ||
2161 		    iocg->delay) {
2162 			/* might be oversleeping vtime / hweight changes, kick */
2163 			iocg_kick_waitq(iocg, true, now);
2164 			if (iocg->abs_vdebt || iocg->delay)
2165 				nr_debtors++;
2166 		} else if (iocg_is_idle(iocg)) {
2167 			/* no waiter and idle, deactivate */
2168 			u64 vtime = atomic64_read(&iocg->vtime);
2169 			s64 excess;
2170 
2171 			/*
2172 			 * @iocg has been inactive for a full duration and will
2173 			 * have a high budget. Account anything above target as
2174 			 * error and throw away. On reactivation, it'll start
2175 			 * with the target budget.
2176 			 */
2177 			excess = now->vnow - vtime - ioc->margins.target;
2178 			if (excess > 0) {
2179 				u32 old_hwi;
2180 
2181 				current_hweight(iocg, NULL, &old_hwi);
2182 				ioc->vtime_err -= div64_u64(excess * old_hwi,
2183 							    WEIGHT_ONE);
2184 			}
2185 
2186 			TRACE_IOCG_PATH(iocg_idle, iocg, now,
2187 					atomic64_read(&iocg->active_period),
2188 					atomic64_read(&ioc->cur_period), vtime);
2189 			__propagate_weights(iocg, 0, 0, false, now);
2190 			list_del_init(&iocg->active_list);
2191 		}
2192 
2193 		spin_unlock(&iocg->waitq.lock);
2194 	}
2195 
2196 	commit_weights(ioc);
2197 	return nr_debtors;
2198 }
2199 
2200 static void ioc_timer_fn(struct timer_list *timer)
2201 {
2202 	struct ioc *ioc = container_of(timer, struct ioc, timer);
2203 	struct ioc_gq *iocg, *tiocg;
2204 	struct ioc_now now;
2205 	LIST_HEAD(surpluses);
2206 	int nr_debtors, nr_shortages = 0, nr_lagging = 0;
2207 	u64 usage_us_sum = 0;
2208 	u32 ppm_rthr;
2209 	u32 ppm_wthr;
2210 	u32 missed_ppm[2], rq_wait_pct;
2211 	u64 period_vtime;
2212 	int prev_busy_level;
2213 
2214 	/* how were the latencies during the period? */
2215 	ioc_lat_stat(ioc, missed_ppm, &rq_wait_pct);
2216 
2217 	/* take care of active iocgs */
2218 	spin_lock_irq(&ioc->lock);
2219 
2220 	ppm_rthr = MILLION - ioc->params.qos[QOS_RPPM];
2221 	ppm_wthr = MILLION - ioc->params.qos[QOS_WPPM];
2222 	ioc_now(ioc, &now);
2223 
2224 	period_vtime = now.vnow - ioc->period_at_vtime;
2225 	if (WARN_ON_ONCE(!period_vtime)) {
2226 		spin_unlock_irq(&ioc->lock);
2227 		return;
2228 	}
2229 
2230 	nr_debtors = ioc_check_iocgs(ioc, &now);
2231 
2232 	/*
2233 	 * Wait and indebt stat are flushed above and the donation calculation
2234 	 * below needs updated usage stat. Let's bring stat up-to-date.
2235 	 */
2236 	iocg_flush_stat(&ioc->active_iocgs, &now);
2237 
2238 	/* calc usage and see whether some weights need to be moved around */
2239 	list_for_each_entry(iocg, &ioc->active_iocgs, active_list) {
2240 		u64 vdone, vtime, usage_us;
2241 		u32 hw_active, hw_inuse;
2242 
2243 		/*
2244 		 * Collect unused and wind vtime closer to vnow to prevent
2245 		 * iocgs from accumulating a large amount of budget.
2246 		 */
2247 		vdone = atomic64_read(&iocg->done_vtime);
2248 		vtime = atomic64_read(&iocg->vtime);
2249 		current_hweight(iocg, &hw_active, &hw_inuse);
2250 
2251 		/*
2252 		 * Latency QoS detection doesn't account for IOs which are
2253 		 * in-flight for longer than a period.  Detect them by
2254 		 * comparing vdone against period start.  If lagging behind
2255 		 * IOs from past periods, don't increase vrate.
2256 		 */
2257 		if ((ppm_rthr != MILLION || ppm_wthr != MILLION) &&
2258 		    !atomic_read(&iocg_to_blkg(iocg)->use_delay) &&
2259 		    time_after64(vtime, vdone) &&
2260 		    time_after64(vtime, now.vnow -
2261 				 MAX_LAGGING_PERIODS * period_vtime) &&
2262 		    time_before64(vdone, now.vnow - period_vtime))
2263 			nr_lagging++;
2264 
2265 		/*
2266 		 * Determine absolute usage factoring in in-flight IOs to avoid
2267 		 * high-latency completions appearing as idle.
2268 		 */
2269 		usage_us = iocg->usage_delta_us;
2270 		usage_us_sum += usage_us;
2271 
2272 		/* see whether there's surplus vtime */
2273 		WARN_ON_ONCE(!list_empty(&iocg->surplus_list));
2274 		if (hw_inuse < hw_active ||
2275 		    (!waitqueue_active(&iocg->waitq) &&
2276 		     time_before64(vtime, now.vnow - ioc->margins.low))) {
2277 			u32 hwa, old_hwi, hwm, new_hwi, usage;
2278 			u64 usage_dur;
2279 
2280 			if (vdone != vtime) {
2281 				u64 inflight_us = DIV64_U64_ROUND_UP(
2282 					cost_to_abs_cost(vtime - vdone, hw_inuse),
2283 					ioc->vtime_base_rate);
2284 
2285 				usage_us = max(usage_us, inflight_us);
2286 			}
2287 
2288 			/* convert to hweight based usage ratio */
2289 			if (time_after64(iocg->activated_at, ioc->period_at))
2290 				usage_dur = max_t(u64, now.now - iocg->activated_at, 1);
2291 			else
2292 				usage_dur = max_t(u64, now.now - ioc->period_at, 1);
2293 
2294 			usage = clamp_t(u32,
2295 				DIV64_U64_ROUND_UP(usage_us * WEIGHT_ONE,
2296 						   usage_dur),
2297 				1, WEIGHT_ONE);
2298 
2299 			/*
2300 			 * Already donating or accumulated enough to start.
2301 			 * Determine the donation amount.
2302 			 */
2303 			current_hweight(iocg, &hwa, &old_hwi);
2304 			hwm = current_hweight_max(iocg);
2305 			new_hwi = hweight_after_donation(iocg, old_hwi, hwm,
2306 							 usage, &now);
2307 			/*
2308 			 * Donation calculation assumes hweight_after_donation
2309 			 * to be positive, a condition that a donor w/ hwa < 2
2310 			 * can't meet. Don't bother with donation if hwa is
2311 			 * below 2. It's not gonna make a meaningful difference
2312 			 * anyway.
2313 			 */
2314 			if (new_hwi < hwm && hwa >= 2) {
2315 				iocg->hweight_donating = hwa;
2316 				iocg->hweight_after_donation = new_hwi;
2317 				list_add(&iocg->surplus_list, &surpluses);
2318 			} else if (!iocg->abs_vdebt) {
2319 				/*
2320 				 * @iocg doesn't have enough to donate. Reset
2321 				 * its inuse to active.
2322 				 *
2323 				 * Don't reset debtors as their inuse's are
2324 				 * owned by debt handling. This shouldn't affect
2325 				 * donation calculuation in any meaningful way
2326 				 * as @iocg doesn't have a meaningful amount of
2327 				 * share anyway.
2328 				 */
2329 				TRACE_IOCG_PATH(inuse_shortage, iocg, &now,
2330 						iocg->inuse, iocg->active,
2331 						iocg->hweight_inuse, new_hwi);
2332 
2333 				__propagate_weights(iocg, iocg->active,
2334 						    iocg->active, true, &now);
2335 				nr_shortages++;
2336 			}
2337 		} else {
2338 			/* genuinely short on vtime */
2339 			nr_shortages++;
2340 		}
2341 	}
2342 
2343 	if (!list_empty(&surpluses) && nr_shortages)
2344 		transfer_surpluses(&surpluses, &now);
2345 
2346 	commit_weights(ioc);
2347 
2348 	/* surplus list should be dissolved after use */
2349 	list_for_each_entry_safe(iocg, tiocg, &surpluses, surplus_list)
2350 		list_del_init(&iocg->surplus_list);
2351 
2352 	/*
2353 	 * If q is getting clogged or we're missing too much, we're issuing
2354 	 * too much IO and should lower vtime rate.  If we're not missing
2355 	 * and experiencing shortages but not surpluses, we're too stingy
2356 	 * and should increase vtime rate.
2357 	 */
2358 	prev_busy_level = ioc->busy_level;
2359 	if (rq_wait_pct > RQ_WAIT_BUSY_PCT ||
2360 	    missed_ppm[READ] > ppm_rthr ||
2361 	    missed_ppm[WRITE] > ppm_wthr) {
2362 		/* clearly missing QoS targets, slow down vrate */
2363 		ioc->busy_level = max(ioc->busy_level, 0);
2364 		ioc->busy_level++;
2365 	} else if (rq_wait_pct <= RQ_WAIT_BUSY_PCT * UNBUSY_THR_PCT / 100 &&
2366 		   missed_ppm[READ] <= ppm_rthr * UNBUSY_THR_PCT / 100 &&
2367 		   missed_ppm[WRITE] <= ppm_wthr * UNBUSY_THR_PCT / 100) {
2368 		/* QoS targets are being met with >25% margin */
2369 		if (nr_shortages) {
2370 			/*
2371 			 * We're throttling while the device has spare
2372 			 * capacity.  If vrate was being slowed down, stop.
2373 			 */
2374 			ioc->busy_level = min(ioc->busy_level, 0);
2375 
2376 			/*
2377 			 * If there are IOs spanning multiple periods, wait
2378 			 * them out before pushing the device harder.
2379 			 */
2380 			if (!nr_lagging)
2381 				ioc->busy_level--;
2382 		} else {
2383 			/*
2384 			 * Nobody is being throttled and the users aren't
2385 			 * issuing enough IOs to saturate the device.  We
2386 			 * simply don't know how close the device is to
2387 			 * saturation.  Coast.
2388 			 */
2389 			ioc->busy_level = 0;
2390 		}
2391 	} else {
2392 		/* inside the hysterisis margin, we're good */
2393 		ioc->busy_level = 0;
2394 	}
2395 
2396 	ioc->busy_level = clamp(ioc->busy_level, -1000, 1000);
2397 
2398 	ioc_adjust_base_vrate(ioc, rq_wait_pct, nr_lagging, nr_shortages,
2399 			      prev_busy_level, missed_ppm);
2400 
2401 	ioc_refresh_params(ioc, false);
2402 
2403 	ioc_forgive_debts(ioc, usage_us_sum, nr_debtors, &now);
2404 
2405 	/*
2406 	 * This period is done.  Move onto the next one.  If nothing's
2407 	 * going on with the device, stop the timer.
2408 	 */
2409 	atomic64_inc(&ioc->cur_period);
2410 
2411 	if (ioc->running != IOC_STOP) {
2412 		if (!list_empty(&ioc->active_iocgs)) {
2413 			ioc_start_period(ioc, &now);
2414 		} else {
2415 			ioc->busy_level = 0;
2416 			ioc->vtime_err = 0;
2417 			ioc->running = IOC_IDLE;
2418 		}
2419 
2420 		ioc_refresh_vrate(ioc, &now);
2421 	}
2422 
2423 	spin_unlock_irq(&ioc->lock);
2424 }
2425 
2426 static u64 adjust_inuse_and_calc_cost(struct ioc_gq *iocg, u64 vtime,
2427 				      u64 abs_cost, struct ioc_now *now)
2428 {
2429 	struct ioc *ioc = iocg->ioc;
2430 	struct ioc_margins *margins = &ioc->margins;
2431 	u32 __maybe_unused old_inuse = iocg->inuse, __maybe_unused old_hwi;
2432 	u32 hwi, adj_step;
2433 	s64 margin;
2434 	u64 cost, new_inuse;
2435 
2436 	current_hweight(iocg, NULL, &hwi);
2437 	old_hwi = hwi;
2438 	cost = abs_cost_to_cost(abs_cost, hwi);
2439 	margin = now->vnow - vtime - cost;
2440 
2441 	/* debt handling owns inuse for debtors */
2442 	if (iocg->abs_vdebt)
2443 		return cost;
2444 
2445 	/*
2446 	 * We only increase inuse during period and do so if the margin has
2447 	 * deteriorated since the previous adjustment.
2448 	 */
2449 	if (margin >= iocg->saved_margin || margin >= margins->low ||
2450 	    iocg->inuse == iocg->active)
2451 		return cost;
2452 
2453 	spin_lock_irq(&ioc->lock);
2454 
2455 	/* we own inuse only when @iocg is in the normal active state */
2456 	if (iocg->abs_vdebt || list_empty(&iocg->active_list)) {
2457 		spin_unlock_irq(&ioc->lock);
2458 		return cost;
2459 	}
2460 
2461 	/*
2462 	 * Bump up inuse till @abs_cost fits in the existing budget.
2463 	 * adj_step must be determined after acquiring ioc->lock - we might
2464 	 * have raced and lost to another thread for activation and could
2465 	 * be reading 0 iocg->active before ioc->lock which will lead to
2466 	 * infinite loop.
2467 	 */
2468 	new_inuse = iocg->inuse;
2469 	adj_step = DIV_ROUND_UP(iocg->active * INUSE_ADJ_STEP_PCT, 100);
2470 	do {
2471 		new_inuse = new_inuse + adj_step;
2472 		propagate_weights(iocg, iocg->active, new_inuse, true, now);
2473 		current_hweight(iocg, NULL, &hwi);
2474 		cost = abs_cost_to_cost(abs_cost, hwi);
2475 	} while (time_after64(vtime + cost, now->vnow) &&
2476 		 iocg->inuse != iocg->active);
2477 
2478 	spin_unlock_irq(&ioc->lock);
2479 
2480 	TRACE_IOCG_PATH(inuse_adjust, iocg, now,
2481 			old_inuse, iocg->inuse, old_hwi, hwi);
2482 
2483 	return cost;
2484 }
2485 
2486 static void calc_vtime_cost_builtin(struct bio *bio, struct ioc_gq *iocg,
2487 				    bool is_merge, u64 *costp)
2488 {
2489 	struct ioc *ioc = iocg->ioc;
2490 	u64 coef_seqio, coef_randio, coef_page;
2491 	u64 pages = max_t(u64, bio_sectors(bio) >> IOC_SECT_TO_PAGE_SHIFT, 1);
2492 	u64 seek_pages = 0;
2493 	u64 cost = 0;
2494 
2495 	switch (bio_op(bio)) {
2496 	case REQ_OP_READ:
2497 		coef_seqio	= ioc->params.lcoefs[LCOEF_RSEQIO];
2498 		coef_randio	= ioc->params.lcoefs[LCOEF_RRANDIO];
2499 		coef_page	= ioc->params.lcoefs[LCOEF_RPAGE];
2500 		break;
2501 	case REQ_OP_WRITE:
2502 		coef_seqio	= ioc->params.lcoefs[LCOEF_WSEQIO];
2503 		coef_randio	= ioc->params.lcoefs[LCOEF_WRANDIO];
2504 		coef_page	= ioc->params.lcoefs[LCOEF_WPAGE];
2505 		break;
2506 	default:
2507 		goto out;
2508 	}
2509 
2510 	if (iocg->cursor) {
2511 		seek_pages = abs(bio->bi_iter.bi_sector - iocg->cursor);
2512 		seek_pages >>= IOC_SECT_TO_PAGE_SHIFT;
2513 	}
2514 
2515 	if (!is_merge) {
2516 		if (seek_pages > LCOEF_RANDIO_PAGES) {
2517 			cost += coef_randio;
2518 		} else {
2519 			cost += coef_seqio;
2520 		}
2521 	}
2522 	cost += pages * coef_page;
2523 out:
2524 	*costp = cost;
2525 }
2526 
2527 static u64 calc_vtime_cost(struct bio *bio, struct ioc_gq *iocg, bool is_merge)
2528 {
2529 	u64 cost;
2530 
2531 	calc_vtime_cost_builtin(bio, iocg, is_merge, &cost);
2532 	return cost;
2533 }
2534 
2535 static void calc_size_vtime_cost_builtin(struct request *rq, struct ioc *ioc,
2536 					 u64 *costp)
2537 {
2538 	unsigned int pages = blk_rq_stats_sectors(rq) >> IOC_SECT_TO_PAGE_SHIFT;
2539 
2540 	switch (req_op(rq)) {
2541 	case REQ_OP_READ:
2542 		*costp = pages * ioc->params.lcoefs[LCOEF_RPAGE];
2543 		break;
2544 	case REQ_OP_WRITE:
2545 		*costp = pages * ioc->params.lcoefs[LCOEF_WPAGE];
2546 		break;
2547 	default:
2548 		*costp = 0;
2549 	}
2550 }
2551 
2552 static u64 calc_size_vtime_cost(struct request *rq, struct ioc *ioc)
2553 {
2554 	u64 cost;
2555 
2556 	calc_size_vtime_cost_builtin(rq, ioc, &cost);
2557 	return cost;
2558 }
2559 
2560 static void ioc_rqos_throttle(struct rq_qos *rqos, struct bio *bio)
2561 {
2562 	struct blkcg_gq *blkg = bio->bi_blkg;
2563 	struct ioc *ioc = rqos_to_ioc(rqos);
2564 	struct ioc_gq *iocg = blkg_to_iocg(blkg);
2565 	struct ioc_now now;
2566 	struct iocg_wait wait;
2567 	u64 abs_cost, cost, vtime;
2568 	bool use_debt, ioc_locked;
2569 	unsigned long flags;
2570 
2571 	/* bypass IOs if disabled, still initializing, or for root cgroup */
2572 	if (!ioc->enabled || !iocg || !iocg->level)
2573 		return;
2574 
2575 	/* calculate the absolute vtime cost */
2576 	abs_cost = calc_vtime_cost(bio, iocg, false);
2577 	if (!abs_cost)
2578 		return;
2579 
2580 	if (!iocg_activate(iocg, &now))
2581 		return;
2582 
2583 	iocg->cursor = bio_end_sector(bio);
2584 	vtime = atomic64_read(&iocg->vtime);
2585 	cost = adjust_inuse_and_calc_cost(iocg, vtime, abs_cost, &now);
2586 
2587 	/*
2588 	 * If no one's waiting and within budget, issue right away.  The
2589 	 * tests are racy but the races aren't systemic - we only miss once
2590 	 * in a while which is fine.
2591 	 */
2592 	if (!waitqueue_active(&iocg->waitq) && !iocg->abs_vdebt &&
2593 	    time_before_eq64(vtime + cost, now.vnow)) {
2594 		iocg_commit_bio(iocg, bio, abs_cost, cost);
2595 		return;
2596 	}
2597 
2598 	/*
2599 	 * We're over budget. This can be handled in two ways. IOs which may
2600 	 * cause priority inversions are punted to @ioc->aux_iocg and charged as
2601 	 * debt. Otherwise, the issuer is blocked on @iocg->waitq. Debt handling
2602 	 * requires @ioc->lock, waitq handling @iocg->waitq.lock. Determine
2603 	 * whether debt handling is needed and acquire locks accordingly.
2604 	 */
2605 	use_debt = bio_issue_as_root_blkg(bio) || fatal_signal_pending(current);
2606 	ioc_locked = use_debt || READ_ONCE(iocg->abs_vdebt);
2607 retry_lock:
2608 	iocg_lock(iocg, ioc_locked, &flags);
2609 
2610 	/*
2611 	 * @iocg must stay activated for debt and waitq handling. Deactivation
2612 	 * is synchronized against both ioc->lock and waitq.lock and we won't
2613 	 * get deactivated as long as we're waiting or has debt, so we're good
2614 	 * if we're activated here. In the unlikely cases that we aren't, just
2615 	 * issue the IO.
2616 	 */
2617 	if (unlikely(list_empty(&iocg->active_list))) {
2618 		iocg_unlock(iocg, ioc_locked, &flags);
2619 		iocg_commit_bio(iocg, bio, abs_cost, cost);
2620 		return;
2621 	}
2622 
2623 	/*
2624 	 * We're over budget. If @bio has to be issued regardless, remember
2625 	 * the abs_cost instead of advancing vtime. iocg_kick_waitq() will pay
2626 	 * off the debt before waking more IOs.
2627 	 *
2628 	 * This way, the debt is continuously paid off each period with the
2629 	 * actual budget available to the cgroup. If we just wound vtime, we
2630 	 * would incorrectly use the current hw_inuse for the entire amount
2631 	 * which, for example, can lead to the cgroup staying blocked for a
2632 	 * long time even with substantially raised hw_inuse.
2633 	 *
2634 	 * An iocg with vdebt should stay online so that the timer can keep
2635 	 * deducting its vdebt and [de]activate use_delay mechanism
2636 	 * accordingly. We don't want to race against the timer trying to
2637 	 * clear them and leave @iocg inactive w/ dangling use_delay heavily
2638 	 * penalizing the cgroup and its descendants.
2639 	 */
2640 	if (use_debt) {
2641 		iocg_incur_debt(iocg, abs_cost, &now);
2642 		if (iocg_kick_delay(iocg, &now))
2643 			blkcg_schedule_throttle(rqos->q->disk,
2644 					(bio->bi_opf & REQ_SWAP) == REQ_SWAP);
2645 		iocg_unlock(iocg, ioc_locked, &flags);
2646 		return;
2647 	}
2648 
2649 	/* guarantee that iocgs w/ waiters have maximum inuse */
2650 	if (!iocg->abs_vdebt && iocg->inuse != iocg->active) {
2651 		if (!ioc_locked) {
2652 			iocg_unlock(iocg, false, &flags);
2653 			ioc_locked = true;
2654 			goto retry_lock;
2655 		}
2656 		propagate_weights(iocg, iocg->active, iocg->active, true,
2657 				  &now);
2658 	}
2659 
2660 	/*
2661 	 * Append self to the waitq and schedule the wakeup timer if we're
2662 	 * the first waiter.  The timer duration is calculated based on the
2663 	 * current vrate.  vtime and hweight changes can make it too short
2664 	 * or too long.  Each wait entry records the absolute cost it's
2665 	 * waiting for to allow re-evaluation using a custom wait entry.
2666 	 *
2667 	 * If too short, the timer simply reschedules itself.  If too long,
2668 	 * the period timer will notice and trigger wakeups.
2669 	 *
2670 	 * All waiters are on iocg->waitq and the wait states are
2671 	 * synchronized using waitq.lock.
2672 	 */
2673 	init_waitqueue_func_entry(&wait.wait, iocg_wake_fn);
2674 	wait.wait.private = current;
2675 	wait.bio = bio;
2676 	wait.abs_cost = abs_cost;
2677 	wait.committed = false;	/* will be set true by waker */
2678 
2679 	__add_wait_queue_entry_tail(&iocg->waitq, &wait.wait);
2680 	iocg_kick_waitq(iocg, ioc_locked, &now);
2681 
2682 	iocg_unlock(iocg, ioc_locked, &flags);
2683 
2684 	while (true) {
2685 		set_current_state(TASK_UNINTERRUPTIBLE);
2686 		if (wait.committed)
2687 			break;
2688 		io_schedule();
2689 	}
2690 
2691 	/* waker already committed us, proceed */
2692 	finish_wait(&iocg->waitq, &wait.wait);
2693 }
2694 
2695 static void ioc_rqos_merge(struct rq_qos *rqos, struct request *rq,
2696 			   struct bio *bio)
2697 {
2698 	struct ioc_gq *iocg = blkg_to_iocg(bio->bi_blkg);
2699 	struct ioc *ioc = rqos_to_ioc(rqos);
2700 	sector_t bio_end = bio_end_sector(bio);
2701 	struct ioc_now now;
2702 	u64 vtime, abs_cost, cost;
2703 	unsigned long flags;
2704 
2705 	/* bypass if disabled, still initializing, or for root cgroup */
2706 	if (!ioc->enabled || !iocg || !iocg->level)
2707 		return;
2708 
2709 	abs_cost = calc_vtime_cost(bio, iocg, true);
2710 	if (!abs_cost)
2711 		return;
2712 
2713 	ioc_now(ioc, &now);
2714 
2715 	vtime = atomic64_read(&iocg->vtime);
2716 	cost = adjust_inuse_and_calc_cost(iocg, vtime, abs_cost, &now);
2717 
2718 	/* update cursor if backmerging into the request at the cursor */
2719 	if (blk_rq_pos(rq) < bio_end &&
2720 	    blk_rq_pos(rq) + blk_rq_sectors(rq) == iocg->cursor)
2721 		iocg->cursor = bio_end;
2722 
2723 	/*
2724 	 * Charge if there's enough vtime budget and the existing request has
2725 	 * cost assigned.
2726 	 */
2727 	if (rq->bio && rq->bio->bi_iocost_cost &&
2728 	    time_before_eq64(atomic64_read(&iocg->vtime) + cost, now.vnow)) {
2729 		iocg_commit_bio(iocg, bio, abs_cost, cost);
2730 		return;
2731 	}
2732 
2733 	/*
2734 	 * Otherwise, account it as debt if @iocg is online, which it should
2735 	 * be for the vast majority of cases. See debt handling in
2736 	 * ioc_rqos_throttle() for details.
2737 	 */
2738 	spin_lock_irqsave(&ioc->lock, flags);
2739 	spin_lock(&iocg->waitq.lock);
2740 
2741 	if (likely(!list_empty(&iocg->active_list))) {
2742 		iocg_incur_debt(iocg, abs_cost, &now);
2743 		if (iocg_kick_delay(iocg, &now))
2744 			blkcg_schedule_throttle(rqos->q->disk,
2745 					(bio->bi_opf & REQ_SWAP) == REQ_SWAP);
2746 	} else {
2747 		iocg_commit_bio(iocg, bio, abs_cost, cost);
2748 	}
2749 
2750 	spin_unlock(&iocg->waitq.lock);
2751 	spin_unlock_irqrestore(&ioc->lock, flags);
2752 }
2753 
2754 static void ioc_rqos_done_bio(struct rq_qos *rqos, struct bio *bio)
2755 {
2756 	struct ioc_gq *iocg = blkg_to_iocg(bio->bi_blkg);
2757 
2758 	if (iocg && bio->bi_iocost_cost)
2759 		atomic64_add(bio->bi_iocost_cost, &iocg->done_vtime);
2760 }
2761 
2762 static void ioc_rqos_done(struct rq_qos *rqos, struct request *rq)
2763 {
2764 	struct ioc *ioc = rqos_to_ioc(rqos);
2765 	struct ioc_pcpu_stat *ccs;
2766 	u64 on_q_ns, rq_wait_ns, size_nsec;
2767 	int pidx, rw;
2768 
2769 	if (!ioc->enabled || !rq->alloc_time_ns || !rq->start_time_ns)
2770 		return;
2771 
2772 	switch (req_op(rq)) {
2773 	case REQ_OP_READ:
2774 		pidx = QOS_RLAT;
2775 		rw = READ;
2776 		break;
2777 	case REQ_OP_WRITE:
2778 		pidx = QOS_WLAT;
2779 		rw = WRITE;
2780 		break;
2781 	default:
2782 		return;
2783 	}
2784 
2785 	on_q_ns = ktime_get_ns() - rq->alloc_time_ns;
2786 	rq_wait_ns = rq->start_time_ns - rq->alloc_time_ns;
2787 	size_nsec = div64_u64(calc_size_vtime_cost(rq, ioc), VTIME_PER_NSEC);
2788 
2789 	ccs = get_cpu_ptr(ioc->pcpu_stat);
2790 
2791 	if (on_q_ns <= size_nsec ||
2792 	    on_q_ns - size_nsec <= ioc->params.qos[pidx] * NSEC_PER_USEC)
2793 		local_inc(&ccs->missed[rw].nr_met);
2794 	else
2795 		local_inc(&ccs->missed[rw].nr_missed);
2796 
2797 	local64_add(rq_wait_ns, &ccs->rq_wait_ns);
2798 
2799 	put_cpu_ptr(ccs);
2800 }
2801 
2802 static void ioc_rqos_queue_depth_changed(struct rq_qos *rqos)
2803 {
2804 	struct ioc *ioc = rqos_to_ioc(rqos);
2805 
2806 	spin_lock_irq(&ioc->lock);
2807 	ioc_refresh_params(ioc, false);
2808 	spin_unlock_irq(&ioc->lock);
2809 }
2810 
2811 static void ioc_rqos_exit(struct rq_qos *rqos)
2812 {
2813 	struct ioc *ioc = rqos_to_ioc(rqos);
2814 
2815 	blkcg_deactivate_policy(rqos->q, &blkcg_policy_iocost);
2816 
2817 	spin_lock_irq(&ioc->lock);
2818 	ioc->running = IOC_STOP;
2819 	spin_unlock_irq(&ioc->lock);
2820 
2821 	del_timer_sync(&ioc->timer);
2822 	free_percpu(ioc->pcpu_stat);
2823 	kfree(ioc);
2824 }
2825 
2826 static struct rq_qos_ops ioc_rqos_ops = {
2827 	.throttle = ioc_rqos_throttle,
2828 	.merge = ioc_rqos_merge,
2829 	.done_bio = ioc_rqos_done_bio,
2830 	.done = ioc_rqos_done,
2831 	.queue_depth_changed = ioc_rqos_queue_depth_changed,
2832 	.exit = ioc_rqos_exit,
2833 };
2834 
2835 static int blk_iocost_init(struct gendisk *disk)
2836 {
2837 	struct request_queue *q = disk->queue;
2838 	struct ioc *ioc;
2839 	struct rq_qos *rqos;
2840 	int i, cpu, ret;
2841 
2842 	ioc = kzalloc(sizeof(*ioc), GFP_KERNEL);
2843 	if (!ioc)
2844 		return -ENOMEM;
2845 
2846 	ioc->pcpu_stat = alloc_percpu(struct ioc_pcpu_stat);
2847 	if (!ioc->pcpu_stat) {
2848 		kfree(ioc);
2849 		return -ENOMEM;
2850 	}
2851 
2852 	for_each_possible_cpu(cpu) {
2853 		struct ioc_pcpu_stat *ccs = per_cpu_ptr(ioc->pcpu_stat, cpu);
2854 
2855 		for (i = 0; i < ARRAY_SIZE(ccs->missed); i++) {
2856 			local_set(&ccs->missed[i].nr_met, 0);
2857 			local_set(&ccs->missed[i].nr_missed, 0);
2858 		}
2859 		local64_set(&ccs->rq_wait_ns, 0);
2860 	}
2861 
2862 	rqos = &ioc->rqos;
2863 	rqos->id = RQ_QOS_COST;
2864 	rqos->ops = &ioc_rqos_ops;
2865 	rqos->q = q;
2866 
2867 	spin_lock_init(&ioc->lock);
2868 	timer_setup(&ioc->timer, ioc_timer_fn, 0);
2869 	INIT_LIST_HEAD(&ioc->active_iocgs);
2870 
2871 	ioc->running = IOC_IDLE;
2872 	ioc->vtime_base_rate = VTIME_PER_USEC;
2873 	atomic64_set(&ioc->vtime_rate, VTIME_PER_USEC);
2874 	seqcount_spinlock_init(&ioc->period_seqcount, &ioc->lock);
2875 	ioc->period_at = ktime_to_us(ktime_get());
2876 	atomic64_set(&ioc->cur_period, 0);
2877 	atomic_set(&ioc->hweight_gen, 0);
2878 
2879 	spin_lock_irq(&ioc->lock);
2880 	ioc->autop_idx = AUTOP_INVALID;
2881 	ioc_refresh_params(ioc, true);
2882 	spin_unlock_irq(&ioc->lock);
2883 
2884 	/*
2885 	 * rqos must be added before activation to allow ioc_pd_init() to
2886 	 * lookup the ioc from q. This means that the rqos methods may get
2887 	 * called before policy activation completion, can't assume that the
2888 	 * target bio has an iocg associated and need to test for NULL iocg.
2889 	 */
2890 	ret = rq_qos_add(q, rqos);
2891 	if (ret)
2892 		goto err_free_ioc;
2893 
2894 	ret = blkcg_activate_policy(q, &blkcg_policy_iocost);
2895 	if (ret)
2896 		goto err_del_qos;
2897 	return 0;
2898 
2899 err_del_qos:
2900 	rq_qos_del(q, rqos);
2901 err_free_ioc:
2902 	free_percpu(ioc->pcpu_stat);
2903 	kfree(ioc);
2904 	return ret;
2905 }
2906 
2907 static struct blkcg_policy_data *ioc_cpd_alloc(gfp_t gfp)
2908 {
2909 	struct ioc_cgrp *iocc;
2910 
2911 	iocc = kzalloc(sizeof(struct ioc_cgrp), gfp);
2912 	if (!iocc)
2913 		return NULL;
2914 
2915 	iocc->dfl_weight = CGROUP_WEIGHT_DFL * WEIGHT_ONE;
2916 	return &iocc->cpd;
2917 }
2918 
2919 static void ioc_cpd_free(struct blkcg_policy_data *cpd)
2920 {
2921 	kfree(container_of(cpd, struct ioc_cgrp, cpd));
2922 }
2923 
2924 static struct blkg_policy_data *ioc_pd_alloc(gfp_t gfp, struct request_queue *q,
2925 					     struct blkcg *blkcg)
2926 {
2927 	int levels = blkcg->css.cgroup->level + 1;
2928 	struct ioc_gq *iocg;
2929 
2930 	iocg = kzalloc_node(struct_size(iocg, ancestors, levels), gfp, q->node);
2931 	if (!iocg)
2932 		return NULL;
2933 
2934 	iocg->pcpu_stat = alloc_percpu_gfp(struct iocg_pcpu_stat, gfp);
2935 	if (!iocg->pcpu_stat) {
2936 		kfree(iocg);
2937 		return NULL;
2938 	}
2939 
2940 	return &iocg->pd;
2941 }
2942 
2943 static void ioc_pd_init(struct blkg_policy_data *pd)
2944 {
2945 	struct ioc_gq *iocg = pd_to_iocg(pd);
2946 	struct blkcg_gq *blkg = pd_to_blkg(&iocg->pd);
2947 	struct ioc *ioc = q_to_ioc(blkg->q);
2948 	struct ioc_now now;
2949 	struct blkcg_gq *tblkg;
2950 	unsigned long flags;
2951 
2952 	ioc_now(ioc, &now);
2953 
2954 	iocg->ioc = ioc;
2955 	atomic64_set(&iocg->vtime, now.vnow);
2956 	atomic64_set(&iocg->done_vtime, now.vnow);
2957 	atomic64_set(&iocg->active_period, atomic64_read(&ioc->cur_period));
2958 	INIT_LIST_HEAD(&iocg->active_list);
2959 	INIT_LIST_HEAD(&iocg->walk_list);
2960 	INIT_LIST_HEAD(&iocg->surplus_list);
2961 	iocg->hweight_active = WEIGHT_ONE;
2962 	iocg->hweight_inuse = WEIGHT_ONE;
2963 
2964 	init_waitqueue_head(&iocg->waitq);
2965 	hrtimer_init(&iocg->waitq_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
2966 	iocg->waitq_timer.function = iocg_waitq_timer_fn;
2967 
2968 	iocg->level = blkg->blkcg->css.cgroup->level;
2969 
2970 	for (tblkg = blkg; tblkg; tblkg = tblkg->parent) {
2971 		struct ioc_gq *tiocg = blkg_to_iocg(tblkg);
2972 		iocg->ancestors[tiocg->level] = tiocg;
2973 	}
2974 
2975 	spin_lock_irqsave(&ioc->lock, flags);
2976 	weight_updated(iocg, &now);
2977 	spin_unlock_irqrestore(&ioc->lock, flags);
2978 }
2979 
2980 static void ioc_pd_free(struct blkg_policy_data *pd)
2981 {
2982 	struct ioc_gq *iocg = pd_to_iocg(pd);
2983 	struct ioc *ioc = iocg->ioc;
2984 	unsigned long flags;
2985 
2986 	if (ioc) {
2987 		spin_lock_irqsave(&ioc->lock, flags);
2988 
2989 		if (!list_empty(&iocg->active_list)) {
2990 			struct ioc_now now;
2991 
2992 			ioc_now(ioc, &now);
2993 			propagate_weights(iocg, 0, 0, false, &now);
2994 			list_del_init(&iocg->active_list);
2995 		}
2996 
2997 		WARN_ON_ONCE(!list_empty(&iocg->walk_list));
2998 		WARN_ON_ONCE(!list_empty(&iocg->surplus_list));
2999 
3000 		spin_unlock_irqrestore(&ioc->lock, flags);
3001 
3002 		hrtimer_cancel(&iocg->waitq_timer);
3003 	}
3004 	free_percpu(iocg->pcpu_stat);
3005 	kfree(iocg);
3006 }
3007 
3008 static void ioc_pd_stat(struct blkg_policy_data *pd, struct seq_file *s)
3009 {
3010 	struct ioc_gq *iocg = pd_to_iocg(pd);
3011 	struct ioc *ioc = iocg->ioc;
3012 
3013 	if (!ioc->enabled)
3014 		return;
3015 
3016 	if (iocg->level == 0) {
3017 		unsigned vp10k = DIV64_U64_ROUND_CLOSEST(
3018 			ioc->vtime_base_rate * 10000,
3019 			VTIME_PER_USEC);
3020 		seq_printf(s, " cost.vrate=%u.%02u", vp10k / 100, vp10k % 100);
3021 	}
3022 
3023 	seq_printf(s, " cost.usage=%llu", iocg->last_stat.usage_us);
3024 
3025 	if (blkcg_debug_stats)
3026 		seq_printf(s, " cost.wait=%llu cost.indebt=%llu cost.indelay=%llu",
3027 			iocg->last_stat.wait_us,
3028 			iocg->last_stat.indebt_us,
3029 			iocg->last_stat.indelay_us);
3030 }
3031 
3032 static u64 ioc_weight_prfill(struct seq_file *sf, struct blkg_policy_data *pd,
3033 			     int off)
3034 {
3035 	const char *dname = blkg_dev_name(pd->blkg);
3036 	struct ioc_gq *iocg = pd_to_iocg(pd);
3037 
3038 	if (dname && iocg->cfg_weight)
3039 		seq_printf(sf, "%s %u\n", dname, iocg->cfg_weight / WEIGHT_ONE);
3040 	return 0;
3041 }
3042 
3043 
3044 static int ioc_weight_show(struct seq_file *sf, void *v)
3045 {
3046 	struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
3047 	struct ioc_cgrp *iocc = blkcg_to_iocc(blkcg);
3048 
3049 	seq_printf(sf, "default %u\n", iocc->dfl_weight / WEIGHT_ONE);
3050 	blkcg_print_blkgs(sf, blkcg, ioc_weight_prfill,
3051 			  &blkcg_policy_iocost, seq_cft(sf)->private, false);
3052 	return 0;
3053 }
3054 
3055 static ssize_t ioc_weight_write(struct kernfs_open_file *of, char *buf,
3056 				size_t nbytes, loff_t off)
3057 {
3058 	struct blkcg *blkcg = css_to_blkcg(of_css(of));
3059 	struct ioc_cgrp *iocc = blkcg_to_iocc(blkcg);
3060 	struct blkg_conf_ctx ctx;
3061 	struct ioc_now now;
3062 	struct ioc_gq *iocg;
3063 	u32 v;
3064 	int ret;
3065 
3066 	if (!strchr(buf, ':')) {
3067 		struct blkcg_gq *blkg;
3068 
3069 		if (!sscanf(buf, "default %u", &v) && !sscanf(buf, "%u", &v))
3070 			return -EINVAL;
3071 
3072 		if (v < CGROUP_WEIGHT_MIN || v > CGROUP_WEIGHT_MAX)
3073 			return -EINVAL;
3074 
3075 		spin_lock_irq(&blkcg->lock);
3076 		iocc->dfl_weight = v * WEIGHT_ONE;
3077 		hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
3078 			struct ioc_gq *iocg = blkg_to_iocg(blkg);
3079 
3080 			if (iocg) {
3081 				spin_lock(&iocg->ioc->lock);
3082 				ioc_now(iocg->ioc, &now);
3083 				weight_updated(iocg, &now);
3084 				spin_unlock(&iocg->ioc->lock);
3085 			}
3086 		}
3087 		spin_unlock_irq(&blkcg->lock);
3088 
3089 		return nbytes;
3090 	}
3091 
3092 	ret = blkg_conf_prep(blkcg, &blkcg_policy_iocost, buf, &ctx);
3093 	if (ret)
3094 		return ret;
3095 
3096 	iocg = blkg_to_iocg(ctx.blkg);
3097 
3098 	if (!strncmp(ctx.body, "default", 7)) {
3099 		v = 0;
3100 	} else {
3101 		if (!sscanf(ctx.body, "%u", &v))
3102 			goto einval;
3103 		if (v < CGROUP_WEIGHT_MIN || v > CGROUP_WEIGHT_MAX)
3104 			goto einval;
3105 	}
3106 
3107 	spin_lock(&iocg->ioc->lock);
3108 	iocg->cfg_weight = v * WEIGHT_ONE;
3109 	ioc_now(iocg->ioc, &now);
3110 	weight_updated(iocg, &now);
3111 	spin_unlock(&iocg->ioc->lock);
3112 
3113 	blkg_conf_finish(&ctx);
3114 	return nbytes;
3115 
3116 einval:
3117 	blkg_conf_finish(&ctx);
3118 	return -EINVAL;
3119 }
3120 
3121 static u64 ioc_qos_prfill(struct seq_file *sf, struct blkg_policy_data *pd,
3122 			  int off)
3123 {
3124 	const char *dname = blkg_dev_name(pd->blkg);
3125 	struct ioc *ioc = pd_to_iocg(pd)->ioc;
3126 
3127 	if (!dname)
3128 		return 0;
3129 
3130 	seq_printf(sf, "%s enable=%d ctrl=%s rpct=%u.%02u rlat=%u wpct=%u.%02u wlat=%u min=%u.%02u max=%u.%02u\n",
3131 		   dname, ioc->enabled, ioc->user_qos_params ? "user" : "auto",
3132 		   ioc->params.qos[QOS_RPPM] / 10000,
3133 		   ioc->params.qos[QOS_RPPM] % 10000 / 100,
3134 		   ioc->params.qos[QOS_RLAT],
3135 		   ioc->params.qos[QOS_WPPM] / 10000,
3136 		   ioc->params.qos[QOS_WPPM] % 10000 / 100,
3137 		   ioc->params.qos[QOS_WLAT],
3138 		   ioc->params.qos[QOS_MIN] / 10000,
3139 		   ioc->params.qos[QOS_MIN] % 10000 / 100,
3140 		   ioc->params.qos[QOS_MAX] / 10000,
3141 		   ioc->params.qos[QOS_MAX] % 10000 / 100);
3142 	return 0;
3143 }
3144 
3145 static int ioc_qos_show(struct seq_file *sf, void *v)
3146 {
3147 	struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
3148 
3149 	blkcg_print_blkgs(sf, blkcg, ioc_qos_prfill,
3150 			  &blkcg_policy_iocost, seq_cft(sf)->private, false);
3151 	return 0;
3152 }
3153 
3154 static const match_table_t qos_ctrl_tokens = {
3155 	{ QOS_ENABLE,		"enable=%u"	},
3156 	{ QOS_CTRL,		"ctrl=%s"	},
3157 	{ NR_QOS_CTRL_PARAMS,	NULL		},
3158 };
3159 
3160 static const match_table_t qos_tokens = {
3161 	{ QOS_RPPM,		"rpct=%s"	},
3162 	{ QOS_RLAT,		"rlat=%u"	},
3163 	{ QOS_WPPM,		"wpct=%s"	},
3164 	{ QOS_WLAT,		"wlat=%u"	},
3165 	{ QOS_MIN,		"min=%s"	},
3166 	{ QOS_MAX,		"max=%s"	},
3167 	{ NR_QOS_PARAMS,	NULL		},
3168 };
3169 
3170 static ssize_t ioc_qos_write(struct kernfs_open_file *of, char *input,
3171 			     size_t nbytes, loff_t off)
3172 {
3173 	struct block_device *bdev;
3174 	struct gendisk *disk;
3175 	struct ioc *ioc;
3176 	u32 qos[NR_QOS_PARAMS];
3177 	bool enable, user;
3178 	char *p;
3179 	int ret;
3180 
3181 	bdev = blkcg_conf_open_bdev(&input);
3182 	if (IS_ERR(bdev))
3183 		return PTR_ERR(bdev);
3184 
3185 	disk = bdev->bd_disk;
3186 	ioc = q_to_ioc(disk->queue);
3187 	if (!ioc) {
3188 		ret = blk_iocost_init(disk);
3189 		if (ret)
3190 			goto err;
3191 		ioc = q_to_ioc(disk->queue);
3192 	}
3193 
3194 	blk_mq_freeze_queue(disk->queue);
3195 	blk_mq_quiesce_queue(disk->queue);
3196 
3197 	spin_lock_irq(&ioc->lock);
3198 	memcpy(qos, ioc->params.qos, sizeof(qos));
3199 	enable = ioc->enabled;
3200 	user = ioc->user_qos_params;
3201 
3202 	while ((p = strsep(&input, " \t\n"))) {
3203 		substring_t args[MAX_OPT_ARGS];
3204 		char buf[32];
3205 		int tok;
3206 		s64 v;
3207 
3208 		if (!*p)
3209 			continue;
3210 
3211 		switch (match_token(p, qos_ctrl_tokens, args)) {
3212 		case QOS_ENABLE:
3213 			match_u64(&args[0], &v);
3214 			enable = v;
3215 			continue;
3216 		case QOS_CTRL:
3217 			match_strlcpy(buf, &args[0], sizeof(buf));
3218 			if (!strcmp(buf, "auto"))
3219 				user = false;
3220 			else if (!strcmp(buf, "user"))
3221 				user = true;
3222 			else
3223 				goto einval;
3224 			continue;
3225 		}
3226 
3227 		tok = match_token(p, qos_tokens, args);
3228 		switch (tok) {
3229 		case QOS_RPPM:
3230 		case QOS_WPPM:
3231 			if (match_strlcpy(buf, &args[0], sizeof(buf)) >=
3232 			    sizeof(buf))
3233 				goto einval;
3234 			if (cgroup_parse_float(buf, 2, &v))
3235 				goto einval;
3236 			if (v < 0 || v > 10000)
3237 				goto einval;
3238 			qos[tok] = v * 100;
3239 			break;
3240 		case QOS_RLAT:
3241 		case QOS_WLAT:
3242 			if (match_u64(&args[0], &v))
3243 				goto einval;
3244 			qos[tok] = v;
3245 			break;
3246 		case QOS_MIN:
3247 		case QOS_MAX:
3248 			if (match_strlcpy(buf, &args[0], sizeof(buf)) >=
3249 			    sizeof(buf))
3250 				goto einval;
3251 			if (cgroup_parse_float(buf, 2, &v))
3252 				goto einval;
3253 			if (v < 0)
3254 				goto einval;
3255 			qos[tok] = clamp_t(s64, v * 100,
3256 					   VRATE_MIN_PPM, VRATE_MAX_PPM);
3257 			break;
3258 		default:
3259 			goto einval;
3260 		}
3261 		user = true;
3262 	}
3263 
3264 	if (qos[QOS_MIN] > qos[QOS_MAX])
3265 		goto einval;
3266 
3267 	if (enable) {
3268 		blk_stat_enable_accounting(disk->queue);
3269 		blk_queue_flag_set(QUEUE_FLAG_RQ_ALLOC_TIME, disk->queue);
3270 		ioc->enabled = true;
3271 		wbt_disable_default(disk->queue);
3272 	} else {
3273 		blk_queue_flag_clear(QUEUE_FLAG_RQ_ALLOC_TIME, disk->queue);
3274 		ioc->enabled = false;
3275 		wbt_enable_default(disk->queue);
3276 	}
3277 
3278 	if (user) {
3279 		memcpy(ioc->params.qos, qos, sizeof(qos));
3280 		ioc->user_qos_params = true;
3281 	} else {
3282 		ioc->user_qos_params = false;
3283 	}
3284 
3285 	ioc_refresh_params(ioc, true);
3286 	spin_unlock_irq(&ioc->lock);
3287 
3288 	blk_mq_unquiesce_queue(disk->queue);
3289 	blk_mq_unfreeze_queue(disk->queue);
3290 
3291 	blkdev_put_no_open(bdev);
3292 	return nbytes;
3293 einval:
3294 	spin_unlock_irq(&ioc->lock);
3295 
3296 	blk_mq_unquiesce_queue(disk->queue);
3297 	blk_mq_unfreeze_queue(disk->queue);
3298 
3299 	ret = -EINVAL;
3300 err:
3301 	blkdev_put_no_open(bdev);
3302 	return ret;
3303 }
3304 
3305 static u64 ioc_cost_model_prfill(struct seq_file *sf,
3306 				 struct blkg_policy_data *pd, int off)
3307 {
3308 	const char *dname = blkg_dev_name(pd->blkg);
3309 	struct ioc *ioc = pd_to_iocg(pd)->ioc;
3310 	u64 *u = ioc->params.i_lcoefs;
3311 
3312 	if (!dname)
3313 		return 0;
3314 
3315 	seq_printf(sf, "%s ctrl=%s model=linear "
3316 		   "rbps=%llu rseqiops=%llu rrandiops=%llu "
3317 		   "wbps=%llu wseqiops=%llu wrandiops=%llu\n",
3318 		   dname, ioc->user_cost_model ? "user" : "auto",
3319 		   u[I_LCOEF_RBPS], u[I_LCOEF_RSEQIOPS], u[I_LCOEF_RRANDIOPS],
3320 		   u[I_LCOEF_WBPS], u[I_LCOEF_WSEQIOPS], u[I_LCOEF_WRANDIOPS]);
3321 	return 0;
3322 }
3323 
3324 static int ioc_cost_model_show(struct seq_file *sf, void *v)
3325 {
3326 	struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
3327 
3328 	blkcg_print_blkgs(sf, blkcg, ioc_cost_model_prfill,
3329 			  &blkcg_policy_iocost, seq_cft(sf)->private, false);
3330 	return 0;
3331 }
3332 
3333 static const match_table_t cost_ctrl_tokens = {
3334 	{ COST_CTRL,		"ctrl=%s"	},
3335 	{ COST_MODEL,		"model=%s"	},
3336 	{ NR_COST_CTRL_PARAMS,	NULL		},
3337 };
3338 
3339 static const match_table_t i_lcoef_tokens = {
3340 	{ I_LCOEF_RBPS,		"rbps=%u"	},
3341 	{ I_LCOEF_RSEQIOPS,	"rseqiops=%u"	},
3342 	{ I_LCOEF_RRANDIOPS,	"rrandiops=%u"	},
3343 	{ I_LCOEF_WBPS,		"wbps=%u"	},
3344 	{ I_LCOEF_WSEQIOPS,	"wseqiops=%u"	},
3345 	{ I_LCOEF_WRANDIOPS,	"wrandiops=%u"	},
3346 	{ NR_I_LCOEFS,		NULL		},
3347 };
3348 
3349 static ssize_t ioc_cost_model_write(struct kernfs_open_file *of, char *input,
3350 				    size_t nbytes, loff_t off)
3351 {
3352 	struct block_device *bdev;
3353 	struct request_queue *q;
3354 	struct ioc *ioc;
3355 	u64 u[NR_I_LCOEFS];
3356 	bool user;
3357 	char *p;
3358 	int ret;
3359 
3360 	bdev = blkcg_conf_open_bdev(&input);
3361 	if (IS_ERR(bdev))
3362 		return PTR_ERR(bdev);
3363 
3364 	q = bdev_get_queue(bdev);
3365 	ioc = q_to_ioc(q);
3366 	if (!ioc) {
3367 		ret = blk_iocost_init(bdev->bd_disk);
3368 		if (ret)
3369 			goto err;
3370 		ioc = q_to_ioc(q);
3371 	}
3372 
3373 	blk_mq_freeze_queue(q);
3374 	blk_mq_quiesce_queue(q);
3375 
3376 	spin_lock_irq(&ioc->lock);
3377 	memcpy(u, ioc->params.i_lcoefs, sizeof(u));
3378 	user = ioc->user_cost_model;
3379 
3380 	while ((p = strsep(&input, " \t\n"))) {
3381 		substring_t args[MAX_OPT_ARGS];
3382 		char buf[32];
3383 		int tok;
3384 		u64 v;
3385 
3386 		if (!*p)
3387 			continue;
3388 
3389 		switch (match_token(p, cost_ctrl_tokens, args)) {
3390 		case COST_CTRL:
3391 			match_strlcpy(buf, &args[0], sizeof(buf));
3392 			if (!strcmp(buf, "auto"))
3393 				user = false;
3394 			else if (!strcmp(buf, "user"))
3395 				user = true;
3396 			else
3397 				goto einval;
3398 			continue;
3399 		case COST_MODEL:
3400 			match_strlcpy(buf, &args[0], sizeof(buf));
3401 			if (strcmp(buf, "linear"))
3402 				goto einval;
3403 			continue;
3404 		}
3405 
3406 		tok = match_token(p, i_lcoef_tokens, args);
3407 		if (tok == NR_I_LCOEFS)
3408 			goto einval;
3409 		if (match_u64(&args[0], &v))
3410 			goto einval;
3411 		u[tok] = v;
3412 		user = true;
3413 	}
3414 
3415 	if (user) {
3416 		memcpy(ioc->params.i_lcoefs, u, sizeof(u));
3417 		ioc->user_cost_model = true;
3418 	} else {
3419 		ioc->user_cost_model = false;
3420 	}
3421 	ioc_refresh_params(ioc, true);
3422 	spin_unlock_irq(&ioc->lock);
3423 
3424 	blk_mq_unquiesce_queue(q);
3425 	blk_mq_unfreeze_queue(q);
3426 
3427 	blkdev_put_no_open(bdev);
3428 	return nbytes;
3429 
3430 einval:
3431 	spin_unlock_irq(&ioc->lock);
3432 
3433 	blk_mq_unquiesce_queue(q);
3434 	blk_mq_unfreeze_queue(q);
3435 
3436 	ret = -EINVAL;
3437 err:
3438 	blkdev_put_no_open(bdev);
3439 	return ret;
3440 }
3441 
3442 static struct cftype ioc_files[] = {
3443 	{
3444 		.name = "weight",
3445 		.flags = CFTYPE_NOT_ON_ROOT,
3446 		.seq_show = ioc_weight_show,
3447 		.write = ioc_weight_write,
3448 	},
3449 	{
3450 		.name = "cost.qos",
3451 		.flags = CFTYPE_ONLY_ON_ROOT,
3452 		.seq_show = ioc_qos_show,
3453 		.write = ioc_qos_write,
3454 	},
3455 	{
3456 		.name = "cost.model",
3457 		.flags = CFTYPE_ONLY_ON_ROOT,
3458 		.seq_show = ioc_cost_model_show,
3459 		.write = ioc_cost_model_write,
3460 	},
3461 	{}
3462 };
3463 
3464 static struct blkcg_policy blkcg_policy_iocost = {
3465 	.dfl_cftypes	= ioc_files,
3466 	.cpd_alloc_fn	= ioc_cpd_alloc,
3467 	.cpd_free_fn	= ioc_cpd_free,
3468 	.pd_alloc_fn	= ioc_pd_alloc,
3469 	.pd_init_fn	= ioc_pd_init,
3470 	.pd_free_fn	= ioc_pd_free,
3471 	.pd_stat_fn	= ioc_pd_stat,
3472 };
3473 
3474 static int __init ioc_init(void)
3475 {
3476 	return blkcg_policy_register(&blkcg_policy_iocost);
3477 }
3478 
3479 static void __exit ioc_exit(void)
3480 {
3481 	blkcg_policy_unregister(&blkcg_policy_iocost);
3482 }
3483 
3484 module_init(ioc_init);
3485 module_exit(ioc_exit);
3486