Lines Matching refs:ioc
406 struct ioc { struct
464 struct ioc *ioc; member
660 static struct ioc *rqos_to_ioc(struct rq_qos *rqos) in rqos_to_ioc()
662 return container_of(rqos, struct ioc, rqos); in rqos_to_ioc()
665 static struct ioc *q_to_ioc(struct request_queue *q) in q_to_ioc()
670 static const char __maybe_unused *ioc_name(struct ioc *ioc) in ioc_name() argument
672 struct gendisk *disk = ioc->rqos.disk; in ioc_name()
733 spin_lock_irqsave(&iocg->ioc->lock, *flags); in iocg_lock()
744 spin_unlock_irqrestore(&iocg->ioc->lock, *flags); in iocg_unlock()
753 static void ioc_refresh_margins(struct ioc *ioc) in ioc_refresh_margins() argument
755 struct ioc_margins *margins = &ioc->margins; in ioc_refresh_margins()
756 u32 period_us = ioc->period_us; in ioc_refresh_margins()
757 u64 vrate = ioc->vtime_base_rate; in ioc_refresh_margins()
765 static void ioc_refresh_period_us(struct ioc *ioc) in ioc_refresh_period_us() argument
769 lockdep_assert_held(&ioc->lock); in ioc_refresh_period_us()
772 if (ioc->params.qos[QOS_RLAT] >= ioc->params.qos[QOS_WLAT]) { in ioc_refresh_period_us()
773 ppm = ioc->params.qos[QOS_RPPM]; in ioc_refresh_period_us()
774 lat = ioc->params.qos[QOS_RLAT]; in ioc_refresh_period_us()
776 ppm = ioc->params.qos[QOS_WPPM]; in ioc_refresh_period_us()
777 lat = ioc->params.qos[QOS_WLAT]; in ioc_refresh_period_us()
796 ioc->period_us = period_us; in ioc_refresh_period_us()
797 ioc->timer_slack_ns = div64_u64( in ioc_refresh_period_us()
800 ioc_refresh_margins(ioc); in ioc_refresh_period_us()
807 static int ioc_autop_idx(struct ioc *ioc, struct gendisk *disk) in ioc_autop_idx() argument
809 int idx = ioc->autop_idx; in ioc_autop_idx()
827 if (ioc->user_qos_params || ioc->user_cost_model) in ioc_autop_idx()
831 vrate_pct = div64_u64(ioc->vtime_base_rate * 100, VTIME_PER_USEC); in ioc_autop_idx()
835 if (!ioc->autop_too_fast_at) in ioc_autop_idx()
836 ioc->autop_too_fast_at = now_ns; in ioc_autop_idx()
837 if (now_ns - ioc->autop_too_fast_at >= AUTOP_CYCLE_NSEC) in ioc_autop_idx()
840 ioc->autop_too_fast_at = 0; in ioc_autop_idx()
844 if (!ioc->autop_too_slow_at) in ioc_autop_idx()
845 ioc->autop_too_slow_at = now_ns; in ioc_autop_idx()
846 if (now_ns - ioc->autop_too_slow_at >= AUTOP_CYCLE_NSEC) in ioc_autop_idx()
849 ioc->autop_too_slow_at = 0; in ioc_autop_idx()
897 static void ioc_refresh_lcoefs(struct ioc *ioc) in ioc_refresh_lcoefs() argument
899 u64 *u = ioc->params.i_lcoefs; in ioc_refresh_lcoefs()
900 u64 *c = ioc->params.lcoefs; in ioc_refresh_lcoefs()
912 static bool ioc_refresh_params_disk(struct ioc *ioc, bool force, in ioc_refresh_params_disk() argument
918 lockdep_assert_held(&ioc->lock); in ioc_refresh_params_disk()
920 idx = ioc_autop_idx(ioc, disk); in ioc_refresh_params_disk()
923 if (idx == ioc->autop_idx && !force) in ioc_refresh_params_disk()
926 if (idx != ioc->autop_idx) { in ioc_refresh_params_disk()
927 atomic64_set(&ioc->vtime_rate, VTIME_PER_USEC); in ioc_refresh_params_disk()
928 ioc->vtime_base_rate = VTIME_PER_USEC; in ioc_refresh_params_disk()
931 ioc->autop_idx = idx; in ioc_refresh_params_disk()
932 ioc->autop_too_fast_at = 0; in ioc_refresh_params_disk()
933 ioc->autop_too_slow_at = 0; in ioc_refresh_params_disk()
935 if (!ioc->user_qos_params) in ioc_refresh_params_disk()
936 memcpy(ioc->params.qos, p->qos, sizeof(p->qos)); in ioc_refresh_params_disk()
937 if (!ioc->user_cost_model) in ioc_refresh_params_disk()
938 memcpy(ioc->params.i_lcoefs, p->i_lcoefs, sizeof(p->i_lcoefs)); in ioc_refresh_params_disk()
940 ioc_refresh_period_us(ioc); in ioc_refresh_params_disk()
941 ioc_refresh_lcoefs(ioc); in ioc_refresh_params_disk()
943 ioc->vrate_min = DIV64_U64_ROUND_UP((u64)ioc->params.qos[QOS_MIN] * in ioc_refresh_params_disk()
945 ioc->vrate_max = DIV64_U64_ROUND_UP((u64)ioc->params.qos[QOS_MAX] * in ioc_refresh_params_disk()
951 static bool ioc_refresh_params(struct ioc *ioc, bool force) in ioc_refresh_params() argument
953 return ioc_refresh_params_disk(ioc, force, ioc->rqos.disk); in ioc_refresh_params()
963 static void ioc_refresh_vrate(struct ioc *ioc, struct ioc_now *now) in ioc_refresh_vrate() argument
965 s64 pleft = ioc->period_at + ioc->period_us - now->now; in ioc_refresh_vrate()
966 s64 vperiod = ioc->period_us * ioc->vtime_base_rate; in ioc_refresh_vrate()
969 lockdep_assert_held(&ioc->lock); in ioc_refresh_vrate()
980 vcomp = -div64_s64(ioc->vtime_err, pleft); in ioc_refresh_vrate()
981 vcomp_min = -(ioc->vtime_base_rate >> 1); in ioc_refresh_vrate()
982 vcomp_max = ioc->vtime_base_rate; in ioc_refresh_vrate()
985 ioc->vtime_err += vcomp * pleft; in ioc_refresh_vrate()
987 atomic64_set(&ioc->vtime_rate, ioc->vtime_base_rate + vcomp); in ioc_refresh_vrate()
990 ioc->vtime_err = clamp(ioc->vtime_err, -vperiod, vperiod); in ioc_refresh_vrate()
993 static void ioc_adjust_base_vrate(struct ioc *ioc, u32 rq_wait_pct, in ioc_adjust_base_vrate() argument
997 u64 vrate = ioc->vtime_base_rate; in ioc_adjust_base_vrate()
998 u64 vrate_min = ioc->vrate_min, vrate_max = ioc->vrate_max; in ioc_adjust_base_vrate()
1000 if (!ioc->busy_level || (ioc->busy_level < 0 && nr_lagging)) { in ioc_adjust_base_vrate()
1001 if (ioc->busy_level != prev_busy_level || nr_lagging) in ioc_adjust_base_vrate()
1002 trace_iocost_ioc_vrate_adj(ioc, vrate, in ioc_adjust_base_vrate()
1021 int idx = min_t(int, abs(ioc->busy_level), in ioc_adjust_base_vrate()
1025 if (ioc->busy_level > 0) in ioc_adjust_base_vrate()
1034 trace_iocost_ioc_vrate_adj(ioc, vrate, missed_ppm, rq_wait_pct, in ioc_adjust_base_vrate()
1037 ioc->vtime_base_rate = vrate; in ioc_adjust_base_vrate()
1038 ioc_refresh_margins(ioc); in ioc_adjust_base_vrate()
1042 static void ioc_now(struct ioc *ioc, struct ioc_now *now) in ioc_now() argument
1049 vrate = atomic64_read(&ioc->vtime_rate); in ioc_now()
1060 seq = read_seqcount_begin(&ioc->period_seqcount); in ioc_now()
1061 now->vnow = ioc->period_at_vtime + in ioc_now()
1062 (now->now - ioc->period_at) * vrate; in ioc_now()
1063 } while (read_seqcount_retry(&ioc->period_seqcount, seq)); in ioc_now()
1066 static void ioc_start_period(struct ioc *ioc, struct ioc_now *now) in ioc_start_period() argument
1068 WARN_ON_ONCE(ioc->running != IOC_RUNNING); in ioc_start_period()
1070 write_seqcount_begin(&ioc->period_seqcount); in ioc_start_period()
1071 ioc->period_at = now->now; in ioc_start_period()
1072 ioc->period_at_vtime = now->vnow; in ioc_start_period()
1073 write_seqcount_end(&ioc->period_seqcount); in ioc_start_period()
1075 ioc->timer.expires = jiffies + usecs_to_jiffies(ioc->period_us); in ioc_start_period()
1076 add_timer(&ioc->timer); in ioc_start_period()
1087 struct ioc *ioc = iocg->ioc; in __propagate_weights() local
1090 lockdep_assert_held(&ioc->lock); in __propagate_weights()
1151 ioc->weights_updated = true; in __propagate_weights()
1154 static void commit_weights(struct ioc *ioc) in commit_weights() argument
1156 lockdep_assert_held(&ioc->lock); in commit_weights()
1158 if (ioc->weights_updated) { in commit_weights()
1161 atomic_inc(&ioc->hweight_gen); in commit_weights()
1162 ioc->weights_updated = false; in commit_weights()
1170 commit_weights(iocg->ioc); in propagate_weights()
1175 struct ioc *ioc = iocg->ioc; in current_hweight() local
1181 ioc_gen = atomic_read(&ioc->hweight_gen); in current_hweight()
1238 lockdep_assert_held(&iocg->ioc->lock); in current_hweight_max()
1255 struct ioc *ioc = iocg->ioc; in weight_updated() local
1260 lockdep_assert_held(&ioc->lock); in weight_updated()
1270 struct ioc *ioc = iocg->ioc; in iocg_activate() local
1280 ioc_now(ioc, now); in iocg_activate()
1281 cur_period = atomic64_read(&ioc->cur_period); in iocg_activate()
1291 spin_lock_irq(&ioc->lock); in iocg_activate()
1293 ioc_now(ioc, now); in iocg_activate()
1296 cur_period = atomic64_read(&ioc->cur_period); in iocg_activate()
1314 vtarget = now->vnow - ioc->margins.target; in iocg_activate()
1326 iocg->hweight_gen = atomic_read(&ioc->hweight_gen) - 1; in iocg_activate()
1327 list_add(&iocg->active_list, &ioc->active_iocgs); in iocg_activate()
1337 if (ioc->running == IOC_IDLE) { in iocg_activate()
1338 ioc->running = IOC_RUNNING; in iocg_activate()
1339 ioc->dfgv_period_at = now->now; in iocg_activate()
1340 ioc->dfgv_period_rem = 0; in iocg_activate()
1341 ioc_start_period(ioc, now); in iocg_activate()
1345 spin_unlock_irq(&ioc->lock); in iocg_activate()
1349 spin_unlock_irq(&ioc->lock); in iocg_activate()
1355 struct ioc *ioc = iocg->ioc; in iocg_kick_delay() local
1383 ioc->period_us * ioc->vtime_base_rate); in iocg_kick_delay()
1423 lockdep_assert_held(&iocg->ioc->lock); in iocg_incur_debt()
1446 lockdep_assert_held(&iocg->ioc->lock); in iocg_pay_debt()
1503 struct ioc *ioc = iocg->ioc; in iocg_kick_waitq() local
1520 lockdep_assert_held(&ioc->lock); in iocg_kick_waitq()
1569 DIV64_U64_ROUND_UP(vshortage, ioc->vtime_base_rate) * in iocg_kick_waitq()
1571 expires += ioc->timer_slack_ns; in iocg_kick_waitq()
1576 abs(oexpires - expires) <= ioc->timer_slack_ns) in iocg_kick_waitq()
1580 ioc->timer_slack_ns, HRTIMER_MODE_ABS); in iocg_kick_waitq()
1590 ioc_now(iocg->ioc, &now); in iocg_waitq_timer_fn()
1599 static void ioc_lat_stat(struct ioc *ioc, u32 *missed_ppm_ar, u32 *rq_wait_pct_p) in ioc_lat_stat() argument
1607 struct ioc_pcpu_stat *stat = per_cpu_ptr(ioc->pcpu_stat, cpu); in ioc_lat_stat()
1635 ioc->period_us * NSEC_PER_USEC); in ioc_lat_stat()
1641 struct ioc *ioc = iocg->ioc; in iocg_is_idle() local
1645 atomic64_read(&ioc->cur_period)) in iocg_is_idle()
1705 struct ioc *ioc = iocg->ioc; in iocg_flush_stat_leaf() local
1710 lockdep_assert_held(&iocg->ioc->lock); in iocg_flush_stat_leaf()
1720 iocg->usage_delta_us = div64_u64(vusage_delta, ioc->vtime_base_rate); in iocg_flush_stat_leaf()
1753 struct ioc *ioc = iocg->ioc; in hweight_after_donation() local
1763 time_after64(vtime, now->vnow - ioc->margins.min)) in hweight_after_donation()
1767 excess = now->vnow - vtime - ioc->margins.target; in hweight_after_donation()
1772 ioc->vtime_err -= div64_u64(excess * old_hwi, WEIGHT_ONE); in hweight_after_donation()
1792 now->vnow - ioc->period_at_vtime); in hweight_after_donation()
2082 static void ioc_forgive_debts(struct ioc *ioc, u64 usage_us_sum, int nr_debtors, in ioc_forgive_debts() argument
2090 ioc->dfgv_period_at = now->now; in ioc_forgive_debts()
2091 ioc->dfgv_period_rem = 0; in ioc_forgive_debts()
2092 ioc->dfgv_usage_us_sum = 0; in ioc_forgive_debts()
2102 if (ioc->busy_level > 0) in ioc_forgive_debts()
2103 usage_us_sum = max_t(u64, usage_us_sum, ioc->period_us); in ioc_forgive_debts()
2105 ioc->dfgv_usage_us_sum += usage_us_sum; in ioc_forgive_debts()
2106 if (time_before64(now->now, ioc->dfgv_period_at + DFGV_PERIOD)) in ioc_forgive_debts()
2113 dur = now->now - ioc->dfgv_period_at; in ioc_forgive_debts()
2114 usage_pct = div64_u64(100 * ioc->dfgv_usage_us_sum, dur); in ioc_forgive_debts()
2116 ioc->dfgv_period_at = now->now; in ioc_forgive_debts()
2117 ioc->dfgv_usage_us_sum = 0; in ioc_forgive_debts()
2121 ioc->dfgv_period_rem = 0; in ioc_forgive_debts()
2134 nr_cycles = dur + ioc->dfgv_period_rem; in ioc_forgive_debts()
2135 ioc->dfgv_period_rem = do_div(nr_cycles, DFGV_PERIOD); in ioc_forgive_debts()
2137 list_for_each_entry(iocg, &ioc->active_iocgs, active_list) { in ioc_forgive_debts()
2175 static int ioc_check_iocgs(struct ioc *ioc, struct ioc_now *now) in ioc_check_iocgs() argument
2180 list_for_each_entry_safe(iocg, tiocg, &ioc->active_iocgs, active_list) { in ioc_check_iocgs()
2220 excess = now->vnow - vtime - ioc->margins.target; in ioc_check_iocgs()
2225 ioc->vtime_err -= div64_u64(excess * old_hwi, in ioc_check_iocgs()
2231 atomic64_read(&ioc->cur_period), vtime); in ioc_check_iocgs()
2239 commit_weights(ioc); in ioc_check_iocgs()
2245 struct ioc *ioc = container_of(timer, struct ioc, timer); in ioc_timer_fn() local
2258 ioc_lat_stat(ioc, missed_ppm, &rq_wait_pct); in ioc_timer_fn()
2261 spin_lock_irq(&ioc->lock); in ioc_timer_fn()
2263 ppm_rthr = MILLION - ioc->params.qos[QOS_RPPM]; in ioc_timer_fn()
2264 ppm_wthr = MILLION - ioc->params.qos[QOS_WPPM]; in ioc_timer_fn()
2265 ioc_now(ioc, &now); in ioc_timer_fn()
2267 period_vtime = now.vnow - ioc->period_at_vtime; in ioc_timer_fn()
2269 spin_unlock_irq(&ioc->lock); in ioc_timer_fn()
2273 nr_debtors = ioc_check_iocgs(ioc, &now); in ioc_timer_fn()
2279 iocg_flush_stat(&ioc->active_iocgs, &now); in ioc_timer_fn()
2282 list_for_each_entry(iocg, &ioc->active_iocgs, active_list) { in ioc_timer_fn()
2319 time_before64(vtime, now.vnow - ioc->margins.low))) { in ioc_timer_fn()
2326 ioc->vtime_base_rate); in ioc_timer_fn()
2332 if (time_after64(iocg->activated_at, ioc->period_at)) in ioc_timer_fn()
2335 usage_dur = max_t(u64, now.now - ioc->period_at, 1); in ioc_timer_fn()
2389 commit_weights(ioc); in ioc_timer_fn()
2401 prev_busy_level = ioc->busy_level; in ioc_timer_fn()
2406 ioc->busy_level = max(ioc->busy_level, 0); in ioc_timer_fn()
2407 ioc->busy_level++; in ioc_timer_fn()
2417 ioc->busy_level = min(ioc->busy_level, 0); in ioc_timer_fn()
2424 ioc->busy_level--; in ioc_timer_fn()
2432 ioc->busy_level = 0; in ioc_timer_fn()
2436 ioc->busy_level = 0; in ioc_timer_fn()
2439 ioc->busy_level = clamp(ioc->busy_level, -1000, 1000); in ioc_timer_fn()
2441 ioc_adjust_base_vrate(ioc, rq_wait_pct, nr_lagging, nr_shortages, in ioc_timer_fn()
2444 ioc_refresh_params(ioc, false); in ioc_timer_fn()
2446 ioc_forgive_debts(ioc, usage_us_sum, nr_debtors, &now); in ioc_timer_fn()
2452 atomic64_inc(&ioc->cur_period); in ioc_timer_fn()
2454 if (ioc->running != IOC_STOP) { in ioc_timer_fn()
2455 if (!list_empty(&ioc->active_iocgs)) { in ioc_timer_fn()
2456 ioc_start_period(ioc, &now); in ioc_timer_fn()
2458 ioc->busy_level = 0; in ioc_timer_fn()
2459 ioc->vtime_err = 0; in ioc_timer_fn()
2460 ioc->running = IOC_IDLE; in ioc_timer_fn()
2463 ioc_refresh_vrate(ioc, &now); in ioc_timer_fn()
2466 spin_unlock_irq(&ioc->lock); in ioc_timer_fn()
2472 struct ioc *ioc = iocg->ioc; in adjust_inuse_and_calc_cost() local
2473 struct ioc_margins *margins = &ioc->margins; in adjust_inuse_and_calc_cost()
2497 spin_lock_irqsave(&ioc->lock, flags); in adjust_inuse_and_calc_cost()
2501 spin_unlock_irqrestore(&ioc->lock, flags); in adjust_inuse_and_calc_cost()
2522 spin_unlock_irqrestore(&ioc->lock, flags); in adjust_inuse_and_calc_cost()
2533 struct ioc *ioc = iocg->ioc; in calc_vtime_cost_builtin() local
2545 coef_seqio = ioc->params.lcoefs[LCOEF_RSEQIO]; in calc_vtime_cost_builtin()
2546 coef_randio = ioc->params.lcoefs[LCOEF_RRANDIO]; in calc_vtime_cost_builtin()
2547 coef_page = ioc->params.lcoefs[LCOEF_RPAGE]; in calc_vtime_cost_builtin()
2550 coef_seqio = ioc->params.lcoefs[LCOEF_WSEQIO]; in calc_vtime_cost_builtin()
2551 coef_randio = ioc->params.lcoefs[LCOEF_WRANDIO]; in calc_vtime_cost_builtin()
2552 coef_page = ioc->params.lcoefs[LCOEF_WPAGE]; in calc_vtime_cost_builtin()
2583 static void calc_size_vtime_cost_builtin(struct request *rq, struct ioc *ioc, in calc_size_vtime_cost_builtin() argument
2590 *costp = pages * ioc->params.lcoefs[LCOEF_RPAGE]; in calc_size_vtime_cost_builtin()
2593 *costp = pages * ioc->params.lcoefs[LCOEF_WPAGE]; in calc_size_vtime_cost_builtin()
2600 static u64 calc_size_vtime_cost(struct request *rq, struct ioc *ioc) in calc_size_vtime_cost() argument
2604 calc_size_vtime_cost_builtin(rq, ioc, &cost); in calc_size_vtime_cost()
2611 struct ioc *ioc = rqos_to_ioc(rqos); in ioc_rqos_throttle() local
2620 if (!ioc->enabled || !iocg || !iocg->level) in ioc_rqos_throttle()
2747 struct ioc *ioc = rqos_to_ioc(rqos); in ioc_rqos_merge() local
2754 if (!ioc->enabled || !iocg || !iocg->level) in ioc_rqos_merge()
2761 ioc_now(ioc, &now); in ioc_rqos_merge()
2786 spin_lock_irqsave(&ioc->lock, flags); in ioc_rqos_merge()
2799 spin_unlock_irqrestore(&ioc->lock, flags); in ioc_rqos_merge()
2812 struct ioc *ioc = rqos_to_ioc(rqos); in ioc_rqos_done() local
2817 if (!ioc->enabled || !rq->alloc_time_ns || !rq->start_time_ns) in ioc_rqos_done()
2835 size_nsec = div64_u64(calc_size_vtime_cost(rq, ioc), VTIME_PER_NSEC); in ioc_rqos_done()
2837 ccs = get_cpu_ptr(ioc->pcpu_stat); in ioc_rqos_done()
2840 on_q_ns - size_nsec <= ioc->params.qos[pidx] * NSEC_PER_USEC) in ioc_rqos_done()
2852 struct ioc *ioc = rqos_to_ioc(rqos); in ioc_rqos_queue_depth_changed() local
2854 spin_lock_irq(&ioc->lock); in ioc_rqos_queue_depth_changed()
2855 ioc_refresh_params(ioc, false); in ioc_rqos_queue_depth_changed()
2856 spin_unlock_irq(&ioc->lock); in ioc_rqos_queue_depth_changed()
2861 struct ioc *ioc = rqos_to_ioc(rqos); in ioc_rqos_exit() local
2865 spin_lock_irq(&ioc->lock); in ioc_rqos_exit()
2866 ioc->running = IOC_STOP; in ioc_rqos_exit()
2867 spin_unlock_irq(&ioc->lock); in ioc_rqos_exit()
2869 timer_shutdown_sync(&ioc->timer); in ioc_rqos_exit()
2870 free_percpu(ioc->pcpu_stat); in ioc_rqos_exit()
2871 kfree(ioc); in ioc_rqos_exit()
2885 struct ioc *ioc; in blk_iocost_init() local
2888 ioc = kzalloc(sizeof(*ioc), GFP_KERNEL); in blk_iocost_init()
2889 if (!ioc) in blk_iocost_init()
2892 ioc->pcpu_stat = alloc_percpu(struct ioc_pcpu_stat); in blk_iocost_init()
2893 if (!ioc->pcpu_stat) { in blk_iocost_init()
2894 kfree(ioc); in blk_iocost_init()
2899 struct ioc_pcpu_stat *ccs = per_cpu_ptr(ioc->pcpu_stat, cpu); in blk_iocost_init()
2908 spin_lock_init(&ioc->lock); in blk_iocost_init()
2909 timer_setup(&ioc->timer, ioc_timer_fn, 0); in blk_iocost_init()
2910 INIT_LIST_HEAD(&ioc->active_iocgs); in blk_iocost_init()
2912 ioc->running = IOC_IDLE; in blk_iocost_init()
2913 ioc->vtime_base_rate = VTIME_PER_USEC; in blk_iocost_init()
2914 atomic64_set(&ioc->vtime_rate, VTIME_PER_USEC); in blk_iocost_init()
2915 seqcount_spinlock_init(&ioc->period_seqcount, &ioc->lock); in blk_iocost_init()
2916 ioc->period_at = ktime_to_us(ktime_get()); in blk_iocost_init()
2917 atomic64_set(&ioc->cur_period, 0); in blk_iocost_init()
2918 atomic_set(&ioc->hweight_gen, 0); in blk_iocost_init()
2920 spin_lock_irq(&ioc->lock); in blk_iocost_init()
2921 ioc->autop_idx = AUTOP_INVALID; in blk_iocost_init()
2922 ioc_refresh_params_disk(ioc, true, disk); in blk_iocost_init()
2923 spin_unlock_irq(&ioc->lock); in blk_iocost_init()
2931 ret = rq_qos_add(&ioc->rqos, disk, RQ_QOS_COST, &ioc_rqos_ops); in blk_iocost_init()
2941 rq_qos_del(&ioc->rqos); in blk_iocost_init()
2943 free_percpu(ioc->pcpu_stat); in blk_iocost_init()
2944 kfree(ioc); in blk_iocost_init()
2989 struct ioc *ioc = q_to_ioc(blkg->q); in ioc_pd_init() local
2994 ioc_now(ioc, &now); in ioc_pd_init()
2996 iocg->ioc = ioc; in ioc_pd_init()
2999 atomic64_set(&iocg->active_period, atomic64_read(&ioc->cur_period)); in ioc_pd_init()
3017 spin_lock_irqsave(&ioc->lock, flags); in ioc_pd_init()
3019 spin_unlock_irqrestore(&ioc->lock, flags); in ioc_pd_init()
3025 struct ioc *ioc = iocg->ioc; in ioc_pd_free() local
3028 if (ioc) { in ioc_pd_free()
3029 spin_lock_irqsave(&ioc->lock, flags); in ioc_pd_free()
3034 ioc_now(ioc, &now); in ioc_pd_free()
3042 spin_unlock_irqrestore(&ioc->lock, flags); in ioc_pd_free()
3053 struct ioc *ioc = iocg->ioc; in ioc_pd_stat() local
3055 if (!ioc->enabled) in ioc_pd_stat()
3060 ioc->vtime_base_rate * 10000, in ioc_pd_stat()
3123 spin_lock(&iocg->ioc->lock); in ioc_weight_write()
3124 ioc_now(iocg->ioc, &now); in ioc_weight_write()
3126 spin_unlock(&iocg->ioc->lock); in ioc_weight_write()
3151 spin_lock(&iocg->ioc->lock); in ioc_weight_write()
3153 ioc_now(iocg->ioc, &now); in ioc_weight_write()
3155 spin_unlock(&iocg->ioc->lock); in ioc_weight_write()
3171 struct ioc *ioc = pd_to_iocg(pd)->ioc; in ioc_qos_prfill() local
3176 spin_lock_irq(&ioc->lock); in ioc_qos_prfill()
3178 dname, ioc->enabled, ioc->user_qos_params ? "user" : "auto", in ioc_qos_prfill()
3179 ioc->params.qos[QOS_RPPM] / 10000, in ioc_qos_prfill()
3180 ioc->params.qos[QOS_RPPM] % 10000 / 100, in ioc_qos_prfill()
3181 ioc->params.qos[QOS_RLAT], in ioc_qos_prfill()
3182 ioc->params.qos[QOS_WPPM] / 10000, in ioc_qos_prfill()
3183 ioc->params.qos[QOS_WPPM] % 10000 / 100, in ioc_qos_prfill()
3184 ioc->params.qos[QOS_WLAT], in ioc_qos_prfill()
3185 ioc->params.qos[QOS_MIN] / 10000, in ioc_qos_prfill()
3186 ioc->params.qos[QOS_MIN] % 10000 / 100, in ioc_qos_prfill()
3187 ioc->params.qos[QOS_MAX] / 10000, in ioc_qos_prfill()
3188 ioc->params.qos[QOS_MAX] % 10000 / 100); in ioc_qos_prfill()
3189 spin_unlock_irq(&ioc->lock); in ioc_qos_prfill()
3223 struct ioc *ioc; in ioc_qos_write() local
3242 ioc = q_to_ioc(disk->queue); in ioc_qos_write()
3243 if (!ioc) { in ioc_qos_write()
3247 ioc = q_to_ioc(disk->queue); in ioc_qos_write()
3253 spin_lock_irq(&ioc->lock); in ioc_qos_write()
3254 memcpy(qos, ioc->params.qos, sizeof(qos)); in ioc_qos_write()
3255 enable = ioc->enabled; in ioc_qos_write()
3256 user = ioc->user_qos_params; in ioc_qos_write()
3324 if (enable && !ioc->enabled) { in ioc_qos_write()
3327 ioc->enabled = true; in ioc_qos_write()
3328 } else if (!enable && ioc->enabled) { in ioc_qos_write()
3331 ioc->enabled = false; in ioc_qos_write()
3335 memcpy(ioc->params.qos, qos, sizeof(qos)); in ioc_qos_write()
3336 ioc->user_qos_params = true; in ioc_qos_write()
3338 ioc->user_qos_params = false; in ioc_qos_write()
3341 ioc_refresh_params(ioc, true); in ioc_qos_write()
3342 spin_unlock_irq(&ioc->lock); in ioc_qos_write()
3355 spin_unlock_irq(&ioc->lock); in ioc_qos_write()
3370 struct ioc *ioc = pd_to_iocg(pd)->ioc; in ioc_cost_model_prfill() local
3371 u64 *u = ioc->params.i_lcoefs; in ioc_cost_model_prfill()
3376 spin_lock_irq(&ioc->lock); in ioc_cost_model_prfill()
3380 dname, ioc->user_cost_model ? "user" : "auto", in ioc_cost_model_prfill()
3383 spin_unlock_irq(&ioc->lock); in ioc_cost_model_prfill()
3417 struct ioc *ioc; in ioc_cost_model_write() local
3436 ioc = q_to_ioc(q); in ioc_cost_model_write()
3437 if (!ioc) { in ioc_cost_model_write()
3441 ioc = q_to_ioc(q); in ioc_cost_model_write()
3447 spin_lock_irq(&ioc->lock); in ioc_cost_model_write()
3448 memcpy(u, ioc->params.i_lcoefs, sizeof(u)); in ioc_cost_model_write()
3449 user = ioc->user_cost_model; in ioc_cost_model_write()
3487 memcpy(ioc->params.i_lcoefs, u, sizeof(u)); in ioc_cost_model_write()
3488 ioc->user_cost_model = true; in ioc_cost_model_write()
3490 ioc->user_cost_model = false; in ioc_cost_model_write()
3492 ioc_refresh_params(ioc, true); in ioc_cost_model_write()
3493 spin_unlock_irq(&ioc->lock); in ioc_cost_model_write()
3502 spin_unlock_irq(&ioc->lock); in ioc_cost_model_write()