Lines Matching refs:iolat

193 static inline struct blkcg_gq *lat_to_blkg(struct iolatency_grp *iolat)  in lat_to_blkg()  argument
195 return pd_to_blkg(&iolat->pd); in lat_to_blkg()
198 static inline void latency_stat_init(struct iolatency_grp *iolat, in latency_stat_init() argument
201 if (iolat->ssd) { in latency_stat_init()
208 static inline void latency_stat_sum(struct iolatency_grp *iolat, in latency_stat_sum() argument
212 if (iolat->ssd) { in latency_stat_sum()
219 static inline void latency_stat_record_time(struct iolatency_grp *iolat, in latency_stat_record_time() argument
222 struct latency_stat *stat = get_cpu_ptr(iolat->stats); in latency_stat_record_time()
223 if (iolat->ssd) { in latency_stat_record_time()
224 if (req_time >= iolat->min_lat_nsec) in latency_stat_record_time()
232 static inline bool latency_sum_ok(struct iolatency_grp *iolat, in latency_sum_ok() argument
235 if (iolat->ssd) { in latency_sum_ok()
240 return stat->rqs.mean <= iolat->min_lat_nsec; in latency_sum_ok()
243 static inline u64 latency_stat_samples(struct iolatency_grp *iolat, in latency_stat_samples() argument
246 if (iolat->ssd) in latency_stat_samples()
251 static inline void iolat_update_total_lat_avg(struct iolatency_grp *iolat, in iolat_update_total_lat_avg() argument
256 if (iolat->ssd) in iolat_update_total_lat_avg()
267 div64_u64(iolat->cur_win_nsec, in iolat_update_total_lat_avg()
269 iolat->lat_avg = calc_load(iolat->lat_avg, in iolat_update_total_lat_avg()
282 struct iolatency_grp *iolat = private_data; in iolat_acquire_inflight() local
283 return rq_wait_inc_below(rqw, iolat->max_depth); in iolat_acquire_inflight()
287 struct iolatency_grp *iolat, in __blkcg_iolatency_throttle() argument
291 struct rq_wait *rqw = &iolat->rq_wait; in __blkcg_iolatency_throttle()
292 unsigned use_delay = atomic_read(&lat_to_blkg(iolat)->use_delay); in __blkcg_iolatency_throttle()
309 rq_qos_wait(rqw, iolat, iolat_acquire_inflight, iolat_cleanup_cb); in __blkcg_iolatency_throttle()
373 static void scale_change(struct iolatency_grp *iolat, bool up) in scale_change() argument
375 unsigned long qd = iolat->blkiolat->rqos.disk->queue->nr_requests; in scale_change()
377 unsigned long old = iolat->max_depth; in scale_change()
383 if (old == 1 && blkcg_unuse_delay(lat_to_blkg(iolat))) in scale_change()
389 iolat->max_depth = old; in scale_change()
390 wake_up_all(&iolat->rq_wait.wait); in scale_change()
394 iolat->max_depth = max(old, 1UL); in scale_change()
399 static void check_scale_change(struct iolatency_grp *iolat) in check_scale_change() argument
404 unsigned int our_cookie = atomic_read(&iolat->scale_cookie); in check_scale_change()
408 parent = blkg_to_lat(lat_to_blkg(iolat)->parent); in check_scale_change()
423 if (!atomic_try_cmpxchg(&iolat->scale_cookie, &our_cookie, cur_cookie)) { in check_scale_change()
428 if (direction < 0 && iolat->min_lat_nsec) { in check_scale_change()
431 if (!scale_lat || iolat->min_lat_nsec <= scale_lat) in check_scale_change()
442 if (iolat->nr_samples <= samples_thresh) in check_scale_change()
447 if (iolat->max_depth == 1 && direction < 0) { in check_scale_change()
448 blkcg_use_delay(lat_to_blkg(iolat)); in check_scale_change()
454 blkcg_clear_delay(lat_to_blkg(iolat)); in check_scale_change()
455 iolat->max_depth = UINT_MAX; in check_scale_change()
456 wake_up_all(&iolat->rq_wait.wait); in check_scale_change()
460 scale_change(iolat, direction > 0); in check_scale_change()
473 struct iolatency_grp *iolat = blkg_to_lat(blkg); in blkcg_iolatency_throttle() local
474 if (!iolat) { in blkcg_iolatency_throttle()
479 check_scale_change(iolat); in blkcg_iolatency_throttle()
480 __blkcg_iolatency_throttle(rqos, iolat, issue_as_root, in blkcg_iolatency_throttle()
488 static void iolatency_record_time(struct iolatency_grp *iolat, in iolatency_record_time() argument
510 if (unlikely(issue_as_root && iolat->max_depth != UINT_MAX)) { in iolatency_record_time()
511 u64 sub = iolat->min_lat_nsec; in iolatency_record_time()
513 blkcg_add_delay(lat_to_blkg(iolat), now, sub - req_time); in iolatency_record_time()
517 latency_stat_record_time(iolat, req_time); in iolatency_record_time()
523 static void iolatency_check_latencies(struct iolatency_grp *iolat, u64 now) in iolatency_check_latencies() argument
525 struct blkcg_gq *blkg = lat_to_blkg(iolat); in iolatency_check_latencies()
532 latency_stat_init(iolat, &stat); in iolatency_check_latencies()
536 s = per_cpu_ptr(iolat->stats, cpu); in iolatency_check_latencies()
537 latency_stat_sum(iolat, &stat, s); in iolatency_check_latencies()
538 latency_stat_init(iolat, s); in iolatency_check_latencies()
548 iolat_update_total_lat_avg(iolat, &stat); in iolatency_check_latencies()
551 if (latency_sum_ok(iolat, &stat) && in iolatency_check_latencies()
558 latency_stat_sum(iolat, &iolat->cur_stat, &stat); in iolatency_check_latencies()
559 lat_info->nr_samples -= iolat->nr_samples; in iolatency_check_latencies()
560 lat_info->nr_samples += latency_stat_samples(iolat, &iolat->cur_stat); in iolatency_check_latencies()
561 iolat->nr_samples = latency_stat_samples(iolat, &iolat->cur_stat); in iolatency_check_latencies()
567 if (latency_sum_ok(iolat, &iolat->cur_stat) && in iolatency_check_latencies()
568 latency_sum_ok(iolat, &stat)) { in iolatency_check_latencies()
569 if (latency_stat_samples(iolat, &iolat->cur_stat) < in iolatency_check_latencies()
572 if (lat_info->scale_grp == iolat) { in iolatency_check_latencies()
574 scale_cookie_change(iolat->blkiolat, lat_info, true); in iolatency_check_latencies()
577 lat_info->scale_lat >= iolat->min_lat_nsec) { in iolatency_check_latencies()
580 lat_info->scale_lat > iolat->min_lat_nsec) { in iolatency_check_latencies()
581 WRITE_ONCE(lat_info->scale_lat, iolat->min_lat_nsec); in iolatency_check_latencies()
582 lat_info->scale_grp = iolat; in iolatency_check_latencies()
584 scale_cookie_change(iolat->blkiolat, lat_info, false); in iolatency_check_latencies()
586 latency_stat_init(iolat, &iolat->cur_stat); in iolatency_check_latencies()
595 struct iolatency_grp *iolat; in blkcg_iolatency_done_bio() local
605 iolat = blkg_to_lat(bio->bi_blkg); in blkcg_iolatency_done_bio()
606 if (!iolat) in blkcg_iolatency_done_bio()
609 if (!iolat->blkiolat->enabled) in blkcg_iolatency_done_bio()
614 iolat = blkg_to_lat(blkg); in blkcg_iolatency_done_bio()
615 if (!iolat) { in blkcg_iolatency_done_bio()
619 rqw = &iolat->rq_wait; in blkcg_iolatency_done_bio()
627 if (iolat->min_lat_nsec && bio->bi_status != BLK_STS_AGAIN) { in blkcg_iolatency_done_bio()
628 iolatency_record_time(iolat, &bio->bi_issue, now, in blkcg_iolatency_done_bio()
630 window_start = atomic64_read(&iolat->window_start); in blkcg_iolatency_done_bio()
632 (now - window_start) >= iolat->cur_win_nsec) { in blkcg_iolatency_done_bio()
633 if (atomic64_try_cmpxchg(&iolat->window_start, in blkcg_iolatency_done_bio()
635 iolatency_check_latencies(iolat, now); in blkcg_iolatency_done_bio()
669 struct iolatency_grp *iolat; in blkiolatency_timer_fn() local
681 iolat = blkg_to_lat(blkg); in blkiolatency_timer_fn()
682 if (!iolat) in blkiolatency_timer_fn()
685 lat_info = &iolat->child_lat; in blkiolatency_timer_fn()
700 scale_cookie_change(iolat->blkiolat, lat_info, true); in blkiolatency_timer_fn()
789 struct iolatency_grp *iolat = blkg_to_lat(blkg); in iolatency_set_min_lat_nsec() local
790 struct blk_iolatency *blkiolat = iolat->blkiolat; in iolatency_set_min_lat_nsec()
791 u64 oldval = iolat->min_lat_nsec; in iolatency_set_min_lat_nsec()
793 iolat->min_lat_nsec = val; in iolatency_set_min_lat_nsec()
794 iolat->cur_win_nsec = max_t(u64, val << 4, BLKIOLATENCY_MIN_WIN_SIZE); in iolatency_set_min_lat_nsec()
795 iolat->cur_win_nsec = min_t(u64, iolat->cur_win_nsec, in iolatency_set_min_lat_nsec()
812 struct iolatency_grp *iolat = blkg_to_lat(blkg->parent); in iolatency_clear_scaling() local
814 if (!iolat) in iolatency_clear_scaling()
817 lat_info = &iolat->child_lat; in iolatency_clear_scaling()
833 struct iolatency_grp *iolat; in iolatency_set_limit() local
859 iolat = blkg_to_lat(ctx.blkg); in iolatency_set_limit()
886 oldval = iolat->min_lat_nsec; in iolatency_set_limit()
889 if (oldval != iolat->min_lat_nsec) in iolatency_set_limit()
900 struct iolatency_grp *iolat = pd_to_lat(pd); in iolatency_prfill_limit() local
903 if (!dname || !iolat->min_lat_nsec) in iolatency_prfill_limit()
906 dname, div_u64(iolat->min_lat_nsec, NSEC_PER_USEC)); in iolatency_prfill_limit()
918 static void iolatency_ssd_stat(struct iolatency_grp *iolat, struct seq_file *s) in iolatency_ssd_stat() argument
923 latency_stat_init(iolat, &stat); in iolatency_ssd_stat()
927 s = per_cpu_ptr(iolat->stats, cpu); in iolatency_ssd_stat()
928 latency_stat_sum(iolat, &stat, s); in iolatency_ssd_stat()
932 if (iolat->max_depth == UINT_MAX) in iolatency_ssd_stat()
940 iolat->max_depth); in iolatency_ssd_stat()
945 struct iolatency_grp *iolat = pd_to_lat(pd); in iolatency_pd_stat() local
952 if (iolat->ssd) in iolatency_pd_stat()
953 return iolatency_ssd_stat(iolat, s); in iolatency_pd_stat()
955 avg_lat = div64_u64(iolat->lat_avg, NSEC_PER_USEC); in iolatency_pd_stat()
956 cur_win = div64_u64(iolat->cur_win_nsec, NSEC_PER_MSEC); in iolatency_pd_stat()
957 if (iolat->max_depth == UINT_MAX) in iolatency_pd_stat()
962 iolat->max_depth, avg_lat, cur_win); in iolatency_pd_stat()
968 struct iolatency_grp *iolat; in iolatency_pd_alloc() local
970 iolat = kzalloc_node(sizeof(*iolat), gfp, disk->node_id); in iolatency_pd_alloc()
971 if (!iolat) in iolatency_pd_alloc()
973 iolat->stats = __alloc_percpu_gfp(sizeof(struct latency_stat), in iolatency_pd_alloc()
975 if (!iolat->stats) { in iolatency_pd_alloc()
976 kfree(iolat); in iolatency_pd_alloc()
979 return &iolat->pd; in iolatency_pd_alloc()
984 struct iolatency_grp *iolat = pd_to_lat(pd); in iolatency_pd_init() local
985 struct blkcg_gq *blkg = lat_to_blkg(iolat); in iolatency_pd_init()
992 iolat->ssd = true; in iolatency_pd_init()
994 iolat->ssd = false; in iolatency_pd_init()
998 stat = per_cpu_ptr(iolat->stats, cpu); in iolatency_pd_init()
999 latency_stat_init(iolat, stat); in iolatency_pd_init()
1002 latency_stat_init(iolat, &iolat->cur_stat); in iolatency_pd_init()
1003 rq_wait_init(&iolat->rq_wait); in iolatency_pd_init()
1004 spin_lock_init(&iolat->child_lat.lock); in iolatency_pd_init()
1005 iolat->max_depth = UINT_MAX; in iolatency_pd_init()
1006 iolat->blkiolat = blkiolat; in iolatency_pd_init()
1007 iolat->cur_win_nsec = 100 * NSEC_PER_MSEC; in iolatency_pd_init()
1008 atomic64_set(&iolat->window_start, now); in iolatency_pd_init()
1016 atomic_set(&iolat->scale_cookie, in iolatency_pd_init()
1019 atomic_set(&iolat->scale_cookie, DEFAULT_SCALE_COOKIE); in iolatency_pd_init()
1022 atomic_set(&iolat->child_lat.scale_cookie, DEFAULT_SCALE_COOKIE); in iolatency_pd_init()
1027 struct iolatency_grp *iolat = pd_to_lat(pd); in iolatency_pd_offline() local
1028 struct blkcg_gq *blkg = lat_to_blkg(iolat); in iolatency_pd_offline()
1036 struct iolatency_grp *iolat = pd_to_lat(pd); in iolatency_pd_free() local
1037 free_percpu(iolat->stats); in iolatency_pd_free()
1038 kfree(iolat); in iolatency_pd_free()