Lines Matching refs:bfqd
231 #define BFQ_RQ_SEEKY(bfqd, last_pos, rq) \ argument
234 (!blk_queue_nonrot(bfqd->queue) || \
474 void bfq_schedule_dispatch(struct bfq_data *bfqd) in bfq_schedule_dispatch() argument
476 lockdep_assert_held(&bfqd->lock); in bfq_schedule_dispatch()
478 if (bfqd->queued != 0) { in bfq_schedule_dispatch()
479 bfq_log(bfqd, "schedule dispatch"); in bfq_schedule_dispatch()
480 blk_mq_run_hw_queues(bfqd->queue, true); in bfq_schedule_dispatch()
493 static struct request *bfq_choose_req(struct bfq_data *bfqd, in bfq_choose_req() argument
524 back_max = bfqd->bfq_back_max * 2; in bfq_choose_req()
534 d1 = (last - s1) * bfqd->bfq_back_penalty; in bfq_choose_req()
541 d2 = (last - s2) * bfqd->bfq_back_penalty; in bfq_choose_req()
585 static bool bfqq_request_over_limit(struct bfq_data *bfqd, in bfqq_request_over_limit() argument
601 spin_lock_irq(&bfqd->lock); in bfqq_request_over_limit()
613 spin_unlock_irq(&bfqd->lock); in bfqq_request_over_limit()
663 bfq_log_bfqq(bfqq->bfqd, bfqq, in bfqq_request_over_limit()
671 spin_unlock_irq(&bfqd->lock); in bfqq_request_over_limit()
677 static bool bfqq_request_over_limit(struct bfq_data *bfqd, in bfqq_request_over_limit() argument
702 struct bfq_data *bfqd = data->q->elevator->elevator_data; in bfq_limit_depth() local
712 depth = bfqd->word_depths[!!bfqd->wr_busy_queues][op_is_sync(opf)]; in bfq_limit_depth()
713 limit = (limit * depth) >> bfqd->full_depth_shift; in bfq_limit_depth()
716 for (act_idx = 0; bic && act_idx < bfqd->num_actuators; act_idx++) { in bfq_limit_depth()
727 if (bfqq_request_over_limit(bfqd, bic, opf, act_idx, limit)) { in bfq_limit_depth()
732 bfq_log(bfqd, "[%s] wr_busy %d sync %d depth %u", in bfq_limit_depth()
733 __func__, bfqd->wr_busy_queues, op_is_sync(opf), depth); in bfq_limit_depth()
739 bfq_rq_pos_tree_lookup(struct bfq_data *bfqd, struct rb_root *root, in bfq_rq_pos_tree_lookup() argument
772 bfq_log(bfqd, "rq_pos_tree_lookup %llu: returning %d", in bfq_rq_pos_tree_lookup()
795 bfq_pos_tree_add_move(struct bfq_data *bfqd, struct bfq_queue *bfqq) in bfq_pos_tree_add_move() argument
806 if (bfqq == &bfqd->oom_bfqq) in bfq_pos_tree_add_move()
823 __bfqq = bfq_rq_pos_tree_lookup(bfqd, bfqq->pos_root, in bfq_pos_tree_add_move()
865 static bool bfq_asymmetric_scenario(struct bfq_data *bfqd, in bfq_asymmetric_scenario() argument
872 rb_first_cached(&bfqd->queue_weights_tree), in bfq_asymmetric_scenario()
881 !RB_EMPTY_ROOT(&bfqd->queue_weights_tree.rb_root) && in bfq_asymmetric_scenario()
882 (bfqd->queue_weights_tree.rb_root.rb_node->rb_left || in bfq_asymmetric_scenario()
883 bfqd->queue_weights_tree.rb_root.rb_node->rb_right); in bfq_asymmetric_scenario()
886 (bfqd->busy_queues[0] && bfqd->busy_queues[1]) || in bfq_asymmetric_scenario()
887 (bfqd->busy_queues[0] && bfqd->busy_queues[2]) || in bfq_asymmetric_scenario()
888 (bfqd->busy_queues[1] && bfqd->busy_queues[2]); in bfq_asymmetric_scenario()
892 || bfqd->num_groups_with_pending_reqs > 1 in bfq_asymmetric_scenario()
912 struct rb_root_cached *root = &bfqq->bfqd->queue_weights_tree; in bfq_weights_tree_add()
991 root = &bfqq->bfqd->queue_weights_tree; in bfq_weights_tree_remove()
1022 bfq_log_bfqq(bfqq->bfqd, bfqq, "check_fifo: returned %p", rq); in bfq_check_fifo()
1026 static struct request *bfq_find_next_rq(struct bfq_data *bfqd, in bfq_find_next_rq() argument
1050 return bfq_choose_req(bfqd, next, prev, blk_rq_pos(last)); in bfq_find_next_rq()
1058 bfq_asymmetric_scenario(bfqq->bfqd, bfqq)) in bfq_serv_to_charge()
1075 static void bfq_updated_next_req(struct bfq_data *bfqd, in bfq_updated_next_req() argument
1085 if (bfqq == bfqd->in_service_queue) in bfq_updated_next_req()
1098 bfq_log_bfqq(bfqd, bfqq, "updated next rq: new budget %lu", in bfq_updated_next_req()
1100 bfq_requeue_bfqq(bfqd, bfqq, false); in bfq_updated_next_req()
1104 static unsigned int bfq_wr_duration(struct bfq_data *bfqd) in bfq_wr_duration() argument
1108 dur = bfqd->rate_dur_prod; in bfq_wr_duration()
1109 do_div(dur, bfqd->peak_rate); in bfq_wr_duration()
1136 struct bfq_data *bfqd) in switch_back_to_interactive_wr() argument
1138 bfqq->wr_coeff = bfqd->bfq_wr_coeff; in switch_back_to_interactive_wr()
1139 bfqq->wr_cur_max_time = bfq_wr_duration(bfqd); in switch_back_to_interactive_wr()
1144 bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_data *bfqd, in bfq_bfqq_resume_state() argument
1173 if (bfqd->low_latency) { in bfq_bfqq_resume_state()
1186 if (bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time && in bfq_bfqq_resume_state()
1189 bfq_wr_duration(bfqd))) { in bfq_bfqq_resume_state()
1190 switch_back_to_interactive_wr(bfqq, bfqd); in bfq_bfqq_resume_state()
1193 bfq_log_bfqq(bfqq->bfqd, bfqq, in bfq_bfqq_resume_state()
1205 bfqd->wr_busy_queues++; in bfq_bfqq_resume_state()
1207 bfqd->wr_busy_queues--; in bfq_bfqq_resume_state()
1218 static void bfq_reset_burst_list(struct bfq_data *bfqd, struct bfq_queue *bfqq) in bfq_reset_burst_list() argument
1223 hlist_for_each_entry_safe(item, n, &bfqd->burst_list, burst_list_node) in bfq_reset_burst_list()
1231 if (bfq_tot_busy_queues(bfqd) == 0) { in bfq_reset_burst_list()
1232 hlist_add_head(&bfqq->burst_list_node, &bfqd->burst_list); in bfq_reset_burst_list()
1233 bfqd->burst_size = 1; in bfq_reset_burst_list()
1235 bfqd->burst_size = 0; in bfq_reset_burst_list()
1237 bfqd->burst_parent_entity = bfqq->entity.parent; in bfq_reset_burst_list()
1241 static void bfq_add_to_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq) in bfq_add_to_burst() argument
1244 bfqd->burst_size++; in bfq_add_to_burst()
1246 if (bfqd->burst_size == bfqd->bfq_large_burst_thresh) { in bfq_add_to_burst()
1254 bfqd->large_burst = true; in bfq_add_to_burst()
1260 hlist_for_each_entry(bfqq_item, &bfqd->burst_list, in bfq_add_to_burst()
1272 hlist_for_each_entry_safe(pos, n, &bfqd->burst_list, in bfq_add_to_burst()
1281 hlist_add_head(&bfqq->burst_list_node, &bfqd->burst_list); in bfq_add_to_burst()
1393 static void bfq_handle_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq) in bfq_handle_burst() argument
1423 if (time_is_before_jiffies(bfqd->last_ins_in_burst + in bfq_handle_burst()
1424 bfqd->bfq_burst_interval) || in bfq_handle_burst()
1425 bfqq->entity.parent != bfqd->burst_parent_entity) { in bfq_handle_burst()
1426 bfqd->large_burst = false; in bfq_handle_burst()
1427 bfq_reset_burst_list(bfqd, bfqq); in bfq_handle_burst()
1436 if (bfqd->large_burst) { in bfq_handle_burst()
1446 bfq_add_to_burst(bfqd, bfqq); in bfq_handle_burst()
1456 bfqd->last_ins_in_burst = jiffies; in bfq_handle_burst()
1471 static int bfq_max_budget(struct bfq_data *bfqd) in bfq_max_budget() argument
1473 if (bfqd->budgets_assigned < bfq_stats_min_budgets) in bfq_max_budget()
1476 return bfqd->bfq_max_budget; in bfq_max_budget()
1483 static int bfq_min_budget(struct bfq_data *bfqd) in bfq_min_budget() argument
1485 if (bfqd->budgets_assigned < bfq_stats_min_budgets) in bfq_min_budget()
1488 return bfqd->bfq_max_budget / 32; in bfq_min_budget()
1595 static bool bfq_bfqq_update_budg_for_activation(struct bfq_data *bfqd, in bfq_bfqq_update_budg_for_activation() argument
1664 static void bfq_update_bfqq_wr_on_rq_arrival(struct bfq_data *bfqd, in bfq_update_bfqq_wr_on_rq_arrival() argument
1676 bfqq->wr_coeff = bfqd->bfq_wr_coeff; in bfq_update_bfqq_wr_on_rq_arrival()
1677 bfqq->wr_cur_max_time = bfq_wr_duration(bfqd); in bfq_update_bfqq_wr_on_rq_arrival()
1692 bfqq->wr_coeff = bfqd->bfq_wr_coeff * in bfq_update_bfqq_wr_on_rq_arrival()
1695 bfqd->bfq_wr_rt_max_time; in bfq_update_bfqq_wr_on_rq_arrival()
1709 2 * bfq_min_budget(bfqd)); in bfq_update_bfqq_wr_on_rq_arrival()
1712 bfqq->wr_coeff = bfqd->bfq_wr_coeff; in bfq_update_bfqq_wr_on_rq_arrival()
1713 bfqq->wr_cur_max_time = bfq_wr_duration(bfqd); in bfq_update_bfqq_wr_on_rq_arrival()
1747 bfqd->bfq_wr_rt_max_time) { in bfq_update_bfqq_wr_on_rq_arrival()
1752 bfqd->bfq_wr_rt_max_time; in bfq_update_bfqq_wr_on_rq_arrival()
1753 bfqq->wr_coeff = bfqd->bfq_wr_coeff * in bfq_update_bfqq_wr_on_rq_arrival()
1761 static bool bfq_bfqq_idle_for_long_time(struct bfq_data *bfqd, in bfq_bfqq_idle_for_long_time() argument
1767 bfqd->bfq_wr_min_idle_time); in bfq_bfqq_idle_for_long_time()
1803 static unsigned int bfq_actuator_index(struct bfq_data *bfqd, struct bio *bio) in bfq_actuator_index() argument
1809 if (bfqd->num_actuators == 1) in bfq_actuator_index()
1815 for (i = 0; i < bfqd->num_actuators; i++) { in bfq_actuator_index()
1816 if (end >= bfqd->sector[i] && in bfq_actuator_index()
1817 end < bfqd->sector[i] + bfqd->nr_sectors[i]) in bfq_actuator_index()
1829 static void bfq_bfqq_handle_idle_busy_switch(struct bfq_data *bfqd, in bfq_bfqq_handle_idle_busy_switch() argument
1837 idle_for_long_time = bfq_bfqq_idle_for_long_time(bfqd, bfqq), in bfq_bfqq_handle_idle_busy_switch()
1845 bfqd->bfq_slice_idle * 3; in bfq_bfqq_handle_idle_busy_switch()
1846 unsigned int act_idx = bfq_actuator_index(bfqd, rq->bio); in bfq_bfqq_handle_idle_busy_switch()
1860 soft_rt = bfqd->bfq_wr_max_softrt_rate > 0 && in bfq_bfqq_handle_idle_busy_switch()
1880 wr_or_deserves_wr = bfqd->low_latency && in bfq_bfqq_handle_idle_busy_switch()
1890 bfq_bfqq_update_budg_for_activation(bfqd, bfqq, in bfq_bfqq_handle_idle_busy_switch()
1917 if (bfqd->low_latency) { in bfq_bfqq_handle_idle_busy_switch()
1921 jiffies - bfqd->bfq_wr_min_idle_time - 1; in bfq_bfqq_handle_idle_busy_switch()
1924 bfqd->bfq_wr_min_idle_time)) { in bfq_bfqq_handle_idle_busy_switch()
1925 bfq_update_bfqq_wr_on_rq_arrival(bfqd, bfqq, in bfq_bfqq_handle_idle_busy_switch()
1986 if (bfqd->in_service_queue && in bfq_bfqq_handle_idle_busy_switch()
1988 bfqq->wr_coeff >= bfqd->in_service_queue->wr_coeff) || in bfq_bfqq_handle_idle_busy_switch()
1989 bfq_bfqq_higher_class_or_weight(bfqq, bfqd->in_service_queue) || in bfq_bfqq_handle_idle_busy_switch()
1990 !bfq_better_to_idle(bfqd->in_service_queue)) && in bfq_bfqq_handle_idle_busy_switch()
1991 next_queue_may_preempt(bfqd)) in bfq_bfqq_handle_idle_busy_switch()
1992 bfq_bfqq_expire(bfqd, bfqd->in_service_queue, in bfq_bfqq_handle_idle_busy_switch()
1996 static void bfq_reset_inject_limit(struct bfq_data *bfqd, in bfq_reset_inject_limit() argument
2006 bfqd->waited_rq = NULL; in bfq_reset_inject_limit()
2143 static void bfq_check_waker(struct bfq_data *bfqd, struct bfq_queue *bfqq, in bfq_check_waker() argument
2148 if (!bfqd->last_completed_rq_bfqq || in bfq_check_waker()
2149 bfqd->last_completed_rq_bfqq == bfqq || in bfq_check_waker()
2151 now_ns - bfqd->last_completion >= 4 * NSEC_PER_MSEC || in bfq_check_waker()
2152 bfqd->last_completed_rq_bfqq == &bfqd->oom_bfqq || in bfq_check_waker()
2153 bfqq == &bfqd->oom_bfqq) in bfq_check_waker()
2162 if (bfqd->last_completed_rq_bfqq != in bfq_check_waker()
2165 128 * (u64)bfqd->bfq_slice_idle) { in bfq_check_waker()
2172 bfqd->last_completed_rq_bfqq; in bfq_check_waker()
2177 bfq_log_bfqq(bfqd, bfqq, "set tentative waker %s", waker_name); in bfq_check_waker()
2182 bfqq->waker_bfqq = bfqd->last_completed_rq_bfqq; in bfq_check_waker()
2186 bfq_log_bfqq(bfqd, bfqq, "set waker %s", waker_name); in bfq_check_waker()
2211 &bfqd->last_completed_rq_bfqq->woken_list); in bfq_check_waker()
2218 struct bfq_data *bfqd = bfqq->bfqd; in bfq_add_request() local
2224 bfq_log_bfqq(bfqd, bfqq, "add_request %d", rq_is_sync(rq)); in bfq_add_request()
2230 WRITE_ONCE(bfqd->queued, bfqd->queued + 1); in bfq_add_request()
2233 bfq_check_waker(bfqd, bfqq, now_ns); in bfq_add_request()
2243 bfq_reset_inject_limit(bfqd, bfqq); in bfq_add_request()
2270 if (bfqq == bfqd->in_service_queue && in bfq_add_request()
2271 (bfqd->tot_rq_in_driver == 0 || in bfq_add_request()
2273 bfqd->rqs_injected && bfqd->tot_rq_in_driver > 0)) && in bfq_add_request()
2276 bfqd->last_empty_occupied_ns = ktime_get_ns(); in bfq_add_request()
2283 bfqd->wait_dispatch = true; in bfq_add_request()
2297 if (bfqd->tot_rq_in_driver == 0) in bfq_add_request()
2298 bfqd->rqs_injected = false; in bfq_add_request()
2311 next_rq = bfq_choose_req(bfqd, bfqq->next_rq, rq, bfqd->last_position); in bfq_add_request()
2318 if (unlikely(!bfqd->nonrot_with_queueing && prev != bfqq->next_rq)) in bfq_add_request()
2319 bfq_pos_tree_add_move(bfqd, bfqq); in bfq_add_request()
2322 bfq_bfqq_handle_idle_busy_switch(bfqd, bfqq, old_wr_coeff, in bfq_add_request()
2325 if (bfqd->low_latency && old_wr_coeff == 1 && !rq_is_sync(rq) && in bfq_add_request()
2328 bfqd->bfq_wr_min_inter_arr_async)) { in bfq_add_request()
2329 bfqq->wr_coeff = bfqd->bfq_wr_coeff; in bfq_add_request()
2330 bfqq->wr_cur_max_time = bfq_wr_duration(bfqd); in bfq_add_request()
2332 bfqd->wr_busy_queues++; in bfq_add_request()
2336 bfq_updated_next_req(bfqd, bfqq); in bfq_add_request()
2365 if (bfqd->low_latency && in bfq_add_request()
2370 static struct request *bfq_find_rq_fmerge(struct bfq_data *bfqd, in bfq_find_rq_fmerge() argument
2374 struct bfq_queue *bfqq = bfqd->bio_bfqq; in bfq_find_rq_fmerge()
2395 struct bfq_data *bfqd = bfqq->bfqd; in bfq_remove_request() local
2399 bfqq->next_rq = bfq_find_next_rq(bfqd, bfqq, rq); in bfq_remove_request()
2400 bfq_updated_next_req(bfqd, bfqq); in bfq_remove_request()
2410 WRITE_ONCE(bfqd->queued, bfqd->queued - 1); in bfq_remove_request()
2420 if (bfq_bfqq_busy(bfqq) && bfqq != bfqd->in_service_queue) { in bfq_remove_request()
2447 if (unlikely(!bfqd->nonrot_with_queueing)) in bfq_remove_request()
2448 bfq_pos_tree_add_move(bfqd, bfqq); in bfq_remove_request()
2459 struct bfq_data *bfqd = q->elevator->elevator_data; in bfq_bio_merge() local
2471 spin_lock_irq(&bfqd->lock); in bfq_bio_merge()
2480 bfqd->bio_bfqq = bic_to_bfqq(bic, op_is_sync(bio->bi_opf), in bfq_bio_merge()
2481 bfq_actuator_index(bfqd, bio)); in bfq_bio_merge()
2483 bfqd->bio_bfqq = NULL; in bfq_bio_merge()
2485 bfqd->bio_bic = bic; in bfq_bio_merge()
2489 spin_unlock_irq(&bfqd->lock); in bfq_bio_merge()
2499 struct bfq_data *bfqd = q->elevator->elevator_data; in bfq_request_merge() local
2502 __rq = bfq_find_rq_fmerge(bfqd, bio, q); in bfq_request_merge()
2523 struct bfq_data *bfqd; in bfq_request_merged() local
2529 bfqd = bfqq->bfqd; in bfq_request_merged()
2537 next_rq = bfq_choose_req(bfqd, bfqq->next_rq, req, in bfq_request_merged()
2538 bfqd->last_position); in bfq_request_merged()
2546 bfq_updated_next_req(bfqd, bfqq); in bfq_request_merged()
2551 if (unlikely(!bfqd->nonrot_with_queueing)) in bfq_request_merged()
2552 bfq_pos_tree_add_move(bfqd, bfqq); in bfq_request_merged()
2629 bfqq->bfqd->bfq_wr_rt_max_time) in bfq_bfqq_end_wr()
2633 bfqq->bfqd->wr_busy_queues--; in bfq_bfqq_end_wr()
2644 void bfq_end_wr_async_queues(struct bfq_data *bfqd, in bfq_end_wr_async_queues() argument
2649 for (k = 0; k < bfqd->num_actuators; k++) { in bfq_end_wr_async_queues()
2659 static void bfq_end_wr(struct bfq_data *bfqd) in bfq_end_wr() argument
2664 spin_lock_irq(&bfqd->lock); in bfq_end_wr()
2666 for (i = 0; i < bfqd->num_actuators; i++) { in bfq_end_wr()
2667 list_for_each_entry(bfqq, &bfqd->active_list[i], bfqq_list) in bfq_end_wr()
2670 list_for_each_entry(bfqq, &bfqd->idle_list, bfqq_list) in bfq_end_wr()
2672 bfq_end_wr_async(bfqd); in bfq_end_wr()
2674 spin_unlock_irq(&bfqd->lock); in bfq_end_wr()
2692 static struct bfq_queue *bfqq_find_close(struct bfq_data *bfqd, in bfqq_find_close() argument
2707 __bfqq = bfq_rq_pos_tree_lookup(bfqd, root, sector, &parent, NULL); in bfqq_find_close()
2734 static struct bfq_queue *bfq_find_close_cooperator(struct bfq_data *bfqd, in bfq_find_close_cooperator() argument
2747 bfqq = bfqq_find_close(bfqd, cur_bfqq, sector); in bfq_find_close_cooperator()
2793 bfq_log_bfqq(bfqq->bfqd, bfqq, "scheduling merge with queue %d", in bfq_setup_merge()
2859 static bool idling_boosts_thr_without_issues(struct bfq_data *bfqd,
2863 bfq_setup_stable_merge(struct bfq_data *bfqd, struct bfq_queue *bfqq, in bfq_setup_stable_merge() argument
2872 if (idling_boosts_thr_without_issues(bfqd, bfqq) || proc_ref == 0) in bfq_setup_stable_merge()
2917 bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq, in bfq_setup_cooperator() argument
2941 if (unlikely(!bfqd->nonrot_with_queueing)) { in bfq_setup_cooperator()
2957 return bfq_setup_stable_merge(bfqd, bfqq, in bfq_setup_cooperator()
3000 if (likely(bfqd->nonrot_with_queueing)) in bfq_setup_cooperator()
3017 if (!io_struct || unlikely(bfqq == &bfqd->oom_bfqq)) in bfq_setup_cooperator()
3021 if (bfq_tot_busy_queues(bfqd) == 1) in bfq_setup_cooperator()
3024 in_service_bfqq = bfqd->in_service_queue; in bfq_setup_cooperator()
3027 likely(in_service_bfqq != &bfqd->oom_bfqq) && in bfq_setup_cooperator()
3029 bfqd->in_serv_last_pos) && in bfq_setup_cooperator()
3041 new_bfqq = bfq_find_close_cooperator(bfqd, bfqq, in bfq_setup_cooperator()
3044 if (new_bfqq && likely(new_bfqq != &bfqd->oom_bfqq) && in bfq_setup_cooperator()
3082 bfqq->bfqd->low_latency)) { in bfq_bfqq_save_state()
3092 bfqq_data->saved_wr_coeff = bfqq->bfqd->bfq_wr_coeff; in bfq_bfqq_save_state()
3096 bfq_wr_duration(bfqq->bfqd); in bfq_bfqq_save_state()
3117 else if (cur_bfqq->bfqd && cur_bfqq->bfqd->last_bfqq_created == cur_bfqq) in bfq_reassign_last_bfqq()
3118 cur_bfqq->bfqd->last_bfqq_created = new_bfqq; in bfq_reassign_last_bfqq()
3121 void bfq_release_process_ref(struct bfq_data *bfqd, struct bfq_queue *bfqq) in bfq_release_process_ref() argument
3135 bfqq != bfqd->in_service_queue) in bfq_release_process_ref()
3143 static struct bfq_queue *bfq_merge_bfqqs(struct bfq_data *bfqd, in bfq_merge_bfqqs() argument
3149 bfq_log_bfqq(bfqd, bfqq, "merging with queue %lu", in bfq_merge_bfqqs()
3197 bfqd->wr_busy_queues++; in bfq_merge_bfqqs()
3205 bfqd->wr_busy_queues--; in bfq_merge_bfqqs()
3208 bfq_log_bfqq(bfqd, new_bfqq, "merge_bfqqs: wr_busy %d", in bfq_merge_bfqqs()
3209 bfqd->wr_busy_queues); in bfq_merge_bfqqs()
3241 bfq_release_process_ref(bfqd, bfqq); in bfq_merge_bfqqs()
3249 struct bfq_data *bfqd = q->elevator->elevator_data; in bfq_allow_bio_merge() local
3251 struct bfq_queue *bfqq = bfqd->bio_bfqq, *new_bfqq; in bfq_allow_bio_merge()
3270 new_bfqq = bfq_setup_cooperator(bfqd, bfqq, bio, false, bfqd->bio_bic); in bfq_allow_bio_merge()
3280 bfqq = bfq_merge_bfqqs(bfqd, bfqd->bio_bic, bfqq); in bfq_allow_bio_merge()
3288 bfqd->bio_bfqq = bfqq; in bfq_allow_bio_merge()
3300 static void bfq_set_budget_timeout(struct bfq_data *bfqd, in bfq_set_budget_timeout() argument
3305 if (bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time) in bfq_set_budget_timeout()
3310 bfqd->last_budget_start = ktime_get(); in bfq_set_budget_timeout()
3313 bfqd->bfq_timeout * timeout_coeff; in bfq_set_budget_timeout()
3316 static void __bfq_set_in_service_queue(struct bfq_data *bfqd, in __bfq_set_in_service_queue() argument
3322 bfqd->budgets_assigned = (bfqd->budgets_assigned * 7 + 256) / 8; in __bfq_set_in_service_queue()
3326 bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time && in __bfq_set_in_service_queue()
3360 bfq_set_budget_timeout(bfqd, bfqq); in __bfq_set_in_service_queue()
3361 bfq_log_bfqq(bfqd, bfqq, in __bfq_set_in_service_queue()
3366 bfqd->in_service_queue = bfqq; in __bfq_set_in_service_queue()
3367 bfqd->in_serv_last_pos = 0; in __bfq_set_in_service_queue()
3373 static struct bfq_queue *bfq_set_in_service_queue(struct bfq_data *bfqd) in bfq_set_in_service_queue() argument
3375 struct bfq_queue *bfqq = bfq_get_next_queue(bfqd); in bfq_set_in_service_queue()
3377 __bfq_set_in_service_queue(bfqd, bfqq); in bfq_set_in_service_queue()
3381 static void bfq_arm_slice_timer(struct bfq_data *bfqd) in bfq_arm_slice_timer() argument
3383 struct bfq_queue *bfqq = bfqd->in_service_queue; in bfq_arm_slice_timer()
3393 sl = bfqd->bfq_slice_idle; in bfq_arm_slice_timer()
3405 !bfq_asymmetric_scenario(bfqd, bfqq)) in bfq_arm_slice_timer()
3410 bfqd->last_idling_start = ktime_get(); in bfq_arm_slice_timer()
3411 bfqd->last_idling_start_jiffies = jiffies; in bfq_arm_slice_timer()
3413 hrtimer_start(&bfqd->idle_slice_timer, ns_to_ktime(sl), in bfq_arm_slice_timer()
3425 static unsigned long bfq_calc_max_budget(struct bfq_data *bfqd) in bfq_calc_max_budget() argument
3427 return (u64)bfqd->peak_rate * USEC_PER_MSEC * in bfq_calc_max_budget()
3428 jiffies_to_msecs(bfqd->bfq_timeout)>>BFQ_RATE_SHIFT; in bfq_calc_max_budget()
3436 static void update_thr_responsiveness_params(struct bfq_data *bfqd) in update_thr_responsiveness_params() argument
3438 if (bfqd->bfq_user_max_budget == 0) { in update_thr_responsiveness_params()
3439 bfqd->bfq_max_budget = in update_thr_responsiveness_params()
3440 bfq_calc_max_budget(bfqd); in update_thr_responsiveness_params()
3441 bfq_log(bfqd, "new max_budget = %d", bfqd->bfq_max_budget); in update_thr_responsiveness_params()
3445 static void bfq_reset_rate_computation(struct bfq_data *bfqd, in bfq_reset_rate_computation() argument
3449 bfqd->last_dispatch = bfqd->first_dispatch = ktime_get_ns(); in bfq_reset_rate_computation()
3450 bfqd->peak_rate_samples = 1; in bfq_reset_rate_computation()
3451 bfqd->sequential_samples = 0; in bfq_reset_rate_computation()
3452 bfqd->tot_sectors_dispatched = bfqd->last_rq_max_size = in bfq_reset_rate_computation()
3455 bfqd->peak_rate_samples = 0; /* full re-init on next disp. */ in bfq_reset_rate_computation()
3457 bfq_log(bfqd, in bfq_reset_rate_computation()
3459 bfqd->peak_rate_samples, bfqd->sequential_samples, in bfq_reset_rate_computation()
3460 bfqd->tot_sectors_dispatched); in bfq_reset_rate_computation()
3463 static void bfq_update_rate_reset(struct bfq_data *bfqd, struct request *rq) in bfq_update_rate_reset() argument
3475 if (bfqd->peak_rate_samples < BFQ_RATE_MIN_SAMPLES || in bfq_update_rate_reset()
3476 bfqd->delta_from_first < BFQ_RATE_MIN_INTERVAL) in bfq_update_rate_reset()
3485 bfqd->delta_from_first = in bfq_update_rate_reset()
3486 max_t(u64, bfqd->delta_from_first, in bfq_update_rate_reset()
3487 bfqd->last_completion - bfqd->first_dispatch); in bfq_update_rate_reset()
3493 rate = div64_ul(bfqd->tot_sectors_dispatched<<BFQ_RATE_SHIFT, in bfq_update_rate_reset()
3494 div_u64(bfqd->delta_from_first, NSEC_PER_USEC)); in bfq_update_rate_reset()
3502 if ((bfqd->sequential_samples < (3 * bfqd->peak_rate_samples)>>2 && in bfq_update_rate_reset()
3503 rate <= bfqd->peak_rate) || in bfq_update_rate_reset()
3530 weight = (9 * bfqd->sequential_samples) / bfqd->peak_rate_samples; in bfq_update_rate_reset()
3537 div_u64(weight * bfqd->delta_from_first, in bfq_update_rate_reset()
3551 bfqd->peak_rate *= divisor-1; in bfq_update_rate_reset()
3552 bfqd->peak_rate /= divisor; in bfq_update_rate_reset()
3555 bfqd->peak_rate += rate; in bfq_update_rate_reset()
3564 bfqd->peak_rate = max_t(u32, 1, bfqd->peak_rate); in bfq_update_rate_reset()
3566 update_thr_responsiveness_params(bfqd); in bfq_update_rate_reset()
3569 bfq_reset_rate_computation(bfqd, rq); in bfq_update_rate_reset()
3604 static void bfq_update_peak_rate(struct bfq_data *bfqd, struct request *rq) in bfq_update_peak_rate() argument
3608 if (bfqd->peak_rate_samples == 0) { /* first dispatch */ in bfq_update_peak_rate()
3609 bfq_log(bfqd, "update_peak_rate: goto reset, samples %d", in bfq_update_peak_rate()
3610 bfqd->peak_rate_samples); in bfq_update_peak_rate()
3611 bfq_reset_rate_computation(bfqd, rq); in bfq_update_peak_rate()
3627 if (now_ns - bfqd->last_dispatch > 100*NSEC_PER_MSEC && in bfq_update_peak_rate()
3628 bfqd->tot_rq_in_driver == 0) in bfq_update_peak_rate()
3632 bfqd->peak_rate_samples++; in bfq_update_peak_rate()
3634 if ((bfqd->tot_rq_in_driver > 0 || in bfq_update_peak_rate()
3635 now_ns - bfqd->last_completion < BFQ_MIN_TT) in bfq_update_peak_rate()
3636 && !BFQ_RQ_SEEKY(bfqd, bfqd->last_position, rq)) in bfq_update_peak_rate()
3637 bfqd->sequential_samples++; in bfq_update_peak_rate()
3639 bfqd->tot_sectors_dispatched += blk_rq_sectors(rq); in bfq_update_peak_rate()
3642 if (likely(bfqd->peak_rate_samples % 32)) in bfq_update_peak_rate()
3643 bfqd->last_rq_max_size = in bfq_update_peak_rate()
3644 max_t(u32, blk_rq_sectors(rq), bfqd->last_rq_max_size); in bfq_update_peak_rate()
3646 bfqd->last_rq_max_size = blk_rq_sectors(rq); in bfq_update_peak_rate()
3648 bfqd->delta_from_first = now_ns - bfqd->first_dispatch; in bfq_update_peak_rate()
3651 if (bfqd->delta_from_first < BFQ_RATE_REF_INTERVAL) in bfq_update_peak_rate()
3655 bfq_update_rate_reset(bfqd, rq); in bfq_update_peak_rate()
3657 bfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq); in bfq_update_peak_rate()
3658 if (RQ_BFQQ(rq) == bfqd->in_service_queue) in bfq_update_peak_rate()
3659 bfqd->in_serv_last_pos = bfqd->last_position; in bfq_update_peak_rate()
3660 bfqd->last_dispatch = now_ns; in bfq_update_peak_rate()
3889 static bool idling_needed_for_service_guarantees(struct bfq_data *bfqd, in idling_needed_for_service_guarantees() argument
3892 int tot_busy_queues = bfq_tot_busy_queues(bfqd); in idling_needed_for_service_guarantees()
3899 (bfqd->wr_busy_queues < tot_busy_queues || in idling_needed_for_service_guarantees()
3900 bfqd->tot_rq_in_driver >= bfqq->dispatched + 4)) || in idling_needed_for_service_guarantees()
3901 bfq_asymmetric_scenario(bfqd, bfqq) || in idling_needed_for_service_guarantees()
3905 static bool __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq, in __bfq_bfqq_expire() argument
3932 idling_needed_for_service_guarantees(bfqd, bfqq))) { in __bfq_bfqq_expire()
3944 bfq_requeue_bfqq(bfqd, bfqq, true); in __bfq_bfqq_expire()
3949 if (unlikely(!bfqd->nonrot_with_queueing && in __bfq_bfqq_expire()
3951 bfq_pos_tree_add_move(bfqd, bfqq); in __bfq_bfqq_expire()
3961 return __bfq_bfqd_reset_in_service(bfqd); in __bfq_bfqq_expire()
3973 static void __bfq_bfqq_recalc_budget(struct bfq_data *bfqd, in __bfq_bfqq_recalc_budget() argument
3980 min_budget = bfq_min_budget(bfqd); in __bfq_bfqq_recalc_budget()
3992 bfq_log_bfqq(bfqd, bfqq, "recalc_budg: last budg %d, budg left %d", in __bfq_bfqq_recalc_budget()
3994 bfq_log_bfqq(bfqd, bfqq, "recalc_budg: last max_budg %d, min budg %d", in __bfq_bfqq_recalc_budget()
3995 budget, bfq_min_budget(bfqd)); in __bfq_bfqq_recalc_budget()
3996 bfq_log_bfqq(bfqd, bfqq, "recalc_budg: sync %d, seeky %d", in __bfq_bfqq_recalc_budget()
3997 bfq_bfqq_sync(bfqq), BFQQ_SEEKY(bfqd->in_service_queue)); in __bfq_bfqq_recalc_budget()
4031 budget = min(budget * 2, bfqd->bfq_max_budget); in __bfq_bfqq_recalc_budget()
4046 budget = min(budget * 2, bfqd->bfq_max_budget); in __bfq_bfqq_recalc_budget()
4058 budget = min(budget * 4, bfqd->bfq_max_budget); in __bfq_bfqq_recalc_budget()
4105 budget = bfqd->bfq_max_budget; in __bfq_bfqq_recalc_budget()
4110 if (bfqd->budgets_assigned >= bfq_stats_min_budgets && in __bfq_bfqq_recalc_budget()
4111 !bfqd->bfq_user_max_budget) in __bfq_bfqq_recalc_budget()
4112 bfqq->max_budget = min(bfqq->max_budget, bfqd->bfq_max_budget); in __bfq_bfqq_recalc_budget()
4129 bfq_log_bfqq(bfqd, bfqq, "head sect: %u, new budget %d", in __bfq_bfqq_recalc_budget()
4165 static bool bfq_bfqq_is_slow(struct bfq_data *bfqd, struct bfq_queue *bfqq, in bfq_bfqq_is_slow() argument
4176 delta_ktime = bfqd->last_idling_start; in bfq_bfqq_is_slow()
4179 delta_ktime = ktime_sub(delta_ktime, bfqd->last_budget_start); in bfq_bfqq_is_slow()
4184 if (blk_queue_nonrot(bfqd->queue)) in bfq_bfqq_is_slow()
4213 slow = bfqq->entity.service < bfqd->bfq_max_budget / 2; in bfq_bfqq_is_slow()
4216 bfq_log_bfqq(bfqd, bfqq, "bfq_bfqq_is_slow: slow %d", slow); in bfq_bfqq_is_slow()
4314 static unsigned long bfq_bfqq_softrt_next_start(struct bfq_data *bfqd, in bfq_bfqq_softrt_next_start() argument
4320 bfqd->bfq_wr_max_softrt_rate, in bfq_bfqq_softrt_next_start()
4321 jiffies + nsecs_to_jiffies(bfqq->bfqd->bfq_slice_idle) + 4); in bfq_bfqq_softrt_next_start()
4350 void bfq_bfqq_expire(struct bfq_data *bfqd, in bfq_bfqq_expire() argument
4362 slow = bfq_bfqq_is_slow(bfqd, bfqq, compensate, &delta); in bfq_bfqq_expire()
4383 bfq_bfqq_charge_time(bfqd, bfqq, delta); in bfq_bfqq_expire()
4385 if (bfqd->low_latency && bfqq->wr_coeff == 1) in bfq_bfqq_expire()
4388 if (bfqd->low_latency && bfqd->bfq_wr_max_softrt_rate > 0 && in bfq_bfqq_expire()
4404 bfq_bfqq_softrt_next_start(bfqd, bfqq); in bfq_bfqq_expire()
4414 bfq_log_bfqq(bfqd, bfqq, in bfq_bfqq_expire()
4423 bfqd->rqs_injected = bfqd->wait_dispatch = false; in bfq_bfqq_expire()
4424 bfqd->waited_rq = NULL; in bfq_bfqq_expire()
4430 __bfq_bfqq_recalc_budget(bfqd, bfqq, reason); in bfq_bfqq_expire()
4431 if (__bfq_bfqq_expire(bfqd, bfqq, reason)) in bfq_bfqq_expire()
4490 bfq_log_bfqq(bfqq->bfqd, bfqq, in bfq_may_expire_for_budg_timeout()
4502 static bool idling_boosts_thr_without_issues(struct bfq_data *bfqd, in idling_boosts_thr_without_issues() argument
4506 !blk_queue_nonrot(bfqd->queue) && !bfqd->hw_tag, in idling_boosts_thr_without_issues()
4540 ((!blk_queue_nonrot(bfqd->queue) || !bfqd->hw_tag) && in idling_boosts_thr_without_issues()
4578 bfqd->wr_busy_queues == 0; in idling_boosts_thr_without_issues()
4604 struct bfq_data *bfqd = bfqq->bfqd; in bfq_better_to_idle() local
4611 if (unlikely(bfqd->strict_guarantees)) in bfq_better_to_idle()
4622 if (bfqd->bfq_slice_idle == 0 || !bfq_bfqq_sync(bfqq) || in bfq_better_to_idle()
4627 idling_boosts_thr_without_issues(bfqd, bfqq); in bfq_better_to_idle()
4630 idling_needed_for_service_guarantees(bfqd, bfqq); in bfq_better_to_idle()
4666 bfq_choose_bfqq_for_injection(struct bfq_data *bfqd) in bfq_choose_bfqq_for_injection() argument
4668 struct bfq_queue *bfqq, *in_serv_bfqq = bfqd->in_service_queue; in bfq_choose_bfqq_for_injection()
4698 time_is_before_eq_jiffies(bfqd->last_idling_start_jiffies + in bfq_choose_bfqq_for_injection()
4699 bfqd->bfq_slice_idle) in bfq_choose_bfqq_for_injection()
4703 if (bfqd->tot_rq_in_driver >= limit) in bfq_choose_bfqq_for_injection()
4718 for (i = 0; i < bfqd->num_actuators; i++) { in bfq_choose_bfqq_for_injection()
4719 list_for_each_entry(bfqq, &bfqd->active_list[i], bfqq_list) in bfq_choose_bfqq_for_injection()
4741 if (blk_queue_nonrot(bfqd->queue) && in bfq_choose_bfqq_for_injection()
4744 bfqd->tot_rq_in_driver >= 1) in bfq_choose_bfqq_for_injection()
4747 bfqd->rqs_injected = true; in bfq_choose_bfqq_for_injection()
4757 bfq_find_active_bfqq_for_actuator(struct bfq_data *bfqd, int idx) in bfq_find_active_bfqq_for_actuator() argument
4761 if (bfqd->in_service_queue && in bfq_find_active_bfqq_for_actuator()
4762 bfqd->in_service_queue->actuator_idx == idx) in bfq_find_active_bfqq_for_actuator()
4763 return bfqd->in_service_queue; in bfq_find_active_bfqq_for_actuator()
4765 list_for_each_entry(bfqq, &bfqd->active_list[idx], bfqq_list) { in bfq_find_active_bfqq_for_actuator()
4790 bfq_find_bfqq_for_underused_actuator(struct bfq_data *bfqd) in bfq_find_bfqq_for_underused_actuator() argument
4794 for (i = 0 ; i < bfqd->num_actuators; i++) { in bfq_find_bfqq_for_underused_actuator()
4795 if (bfqd->rq_in_driver[i] < bfqd->actuator_load_threshold && in bfq_find_bfqq_for_underused_actuator()
4796 (i == bfqd->num_actuators - 1 || in bfq_find_bfqq_for_underused_actuator()
4797 bfqd->rq_in_driver[i] < bfqd->rq_in_driver[i+1])) { in bfq_find_bfqq_for_underused_actuator()
4799 bfq_find_active_bfqq_for_actuator(bfqd, i); in bfq_find_bfqq_for_underused_actuator()
4814 static struct bfq_queue *bfq_select_queue(struct bfq_data *bfqd) in bfq_select_queue() argument
4820 bfqq = bfqd->in_service_queue; in bfq_select_queue()
4824 bfq_log_bfqq(bfqd, bfqq, "select_queue: already in-service queue"); in bfq_select_queue()
4843 inject_bfqq = bfq_find_bfqq_for_underused_actuator(bfqd); in bfq_select_queue()
4890 hrtimer_try_to_cancel(&bfqd->idle_slice_timer); in bfq_select_queue()
5020 else if (!idling_boosts_thr_without_issues(bfqd, bfqq) && in bfq_select_queue()
5021 (bfqq->wr_coeff == 1 || bfqd->wr_busy_queues > 1 || in bfq_select_queue()
5023 bfqq = bfq_choose_bfqq_for_injection(bfqd); in bfq_select_queue()
5032 bfq_bfqq_expire(bfqd, bfqq, false, reason); in bfq_select_queue()
5034 bfqq = bfq_set_in_service_queue(bfqd); in bfq_select_queue()
5036 bfq_log_bfqq(bfqd, bfqq, "select_queue: checking new queue"); in bfq_select_queue()
5041 bfq_log_bfqq(bfqd, bfqq, "select_queue: returned this queue"); in bfq_select_queue()
5043 bfq_log(bfqd, "select_queue: no queue returned"); in bfq_select_queue()
5048 static void bfq_update_wr_data(struct bfq_data *bfqd, struct bfq_queue *bfqq) in bfq_update_wr_data() argument
5053 bfq_log_bfqq(bfqd, bfqq, in bfq_update_wr_data()
5061 bfq_log_bfqq(bfqd, bfqq, "WARN: pending prio change"); in bfq_update_wr_data()
5072 if (bfqq->wr_cur_max_time != bfqd->bfq_wr_rt_max_time || in bfq_update_wr_data()
5074 bfq_wr_duration(bfqd))) { in bfq_update_wr_data()
5089 switch_back_to_interactive_wr(bfqq, bfqd); in bfq_update_wr_data()
5094 bfqq->wr_cur_max_time != bfqd->bfq_wr_rt_max_time && in bfq_update_wr_data()
5116 static struct request *bfq_dispatch_rq_from_bfqq(struct bfq_data *bfqd, in bfq_dispatch_rq_from_bfqq() argument
5126 if (bfqq == bfqd->in_service_queue && bfqd->wait_dispatch) { in bfq_dispatch_rq_from_bfqq()
5127 bfqd->wait_dispatch = false; in bfq_dispatch_rq_from_bfqq()
5128 bfqd->waited_rq = rq; in bfq_dispatch_rq_from_bfqq()
5131 bfq_dispatch_remove(bfqd->queue, rq); in bfq_dispatch_rq_from_bfqq()
5133 if (bfqq != bfqd->in_service_queue) in bfq_dispatch_rq_from_bfqq()
5147 bfq_update_wr_data(bfqd, bfqq); in bfq_dispatch_rq_from_bfqq()
5154 if (bfq_tot_busy_queues(bfqd) > 1 && bfq_class_idle(bfqq)) in bfq_dispatch_rq_from_bfqq()
5155 bfq_bfqq_expire(bfqd, bfqq, false, BFQQE_BUDGET_EXHAUSTED); in bfq_dispatch_rq_from_bfqq()
5162 struct bfq_data *bfqd = hctx->queue->elevator->elevator_data; in bfq_has_work() local
5168 return !list_empty_careful(&bfqd->dispatch) || in bfq_has_work()
5169 READ_ONCE(bfqd->queued); in bfq_has_work()
5174 struct bfq_data *bfqd = hctx->queue->elevator->elevator_data; in __bfq_dispatch_request() local
5178 if (!list_empty(&bfqd->dispatch)) { in __bfq_dispatch_request()
5179 rq = list_first_entry(&bfqd->dispatch, struct request, in __bfq_dispatch_request()
5223 bfq_log(bfqd, "dispatch requests: %d busy queues", in __bfq_dispatch_request()
5224 bfq_tot_busy_queues(bfqd)); in __bfq_dispatch_request()
5226 if (bfq_tot_busy_queues(bfqd) == 0) in __bfq_dispatch_request()
5241 if (bfqd->strict_guarantees && bfqd->tot_rq_in_driver > 0) in __bfq_dispatch_request()
5244 bfqq = bfq_select_queue(bfqd); in __bfq_dispatch_request()
5248 rq = bfq_dispatch_rq_from_bfqq(bfqd, bfqq); in __bfq_dispatch_request()
5252 bfqd->rq_in_driver[bfqq->actuator_idx]++; in __bfq_dispatch_request()
5253 bfqd->tot_rq_in_driver++; in __bfq_dispatch_request()
5315 struct bfq_data *bfqd = hctx->queue->elevator->elevator_data; in bfq_dispatch_request() local
5320 spin_lock_irq(&bfqd->lock); in bfq_dispatch_request()
5322 in_serv_queue = bfqd->in_service_queue; in bfq_dispatch_request()
5326 if (in_serv_queue == bfqd->in_service_queue) { in bfq_dispatch_request()
5331 spin_unlock_irq(&bfqd->lock); in bfq_dispatch_request()
5352 bfq_log_bfqq(bfqq->bfqd, bfqq, "put_queue: %p %d", bfqq, bfqq->ref); in bfq_put_queue()
5386 if (bfqq->bic && bfqq->bfqd->burst_size > 0) in bfq_put_queue()
5387 bfqq->bfqd->burst_size--; in bfq_put_queue()
5416 if (bfqq->bfqd->last_completed_rq_bfqq == bfqq) in bfq_put_queue()
5417 bfqq->bfqd->last_completed_rq_bfqq = NULL; in bfq_put_queue()
5450 static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq) in bfq_exit_bfqq() argument
5452 if (bfqq == bfqd->in_service_queue) { in bfq_exit_bfqq()
5453 __bfq_bfqq_expire(bfqd, bfqq, BFQQE_BUDGET_TIMEOUT); in bfq_exit_bfqq()
5454 bfq_schedule_dispatch(bfqd); in bfq_exit_bfqq()
5457 bfq_log_bfqq(bfqd, bfqq, "exit_bfqq: %p, %d", bfqq, bfqq->ref); in bfq_exit_bfqq()
5461 bfq_release_process_ref(bfqd, bfqq); in bfq_exit_bfqq()
5468 struct bfq_data *bfqd; in bfq_exit_icq_bfqq() local
5471 bfqd = bfqq->bfqd; /* NULL if scheduler already exited */ in bfq_exit_icq_bfqq()
5473 if (bfqq && bfqd) { in bfq_exit_icq_bfqq()
5475 bfq_exit_bfqq(bfqd, bfqq); in bfq_exit_icq_bfqq()
5482 struct bfq_data *bfqd = bic_to_bfqd(bic); in bfq_exit_icq() local
5498 if (bfqd) { in bfq_exit_icq()
5499 spin_lock_irqsave(&bfqd->lock, flags); in bfq_exit_icq()
5500 num_actuators = bfqd->num_actuators; in bfq_exit_icq()
5511 if (bfqd) in bfq_exit_icq()
5512 spin_unlock_irqrestore(&bfqd->lock, flags); in bfq_exit_icq()
5524 struct bfq_data *bfqd = bfqq->bfqd; in bfq_set_next_ioprio_data() local
5526 if (!bfqd) in bfq_set_next_ioprio_data()
5533 bdi_dev_name(bfqq->bfqd->queue->disk->bdi), in bfq_set_next_ioprio_data()
5564 bfq_log_bfqq(bfqd, bfqq, "new_ioprio %d new_weight %d", in bfq_set_next_ioprio_data()
5569 static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd,
5576 struct bfq_data *bfqd = bic_to_bfqd(bic); in bfq_check_ioprio_change() local
5584 if (unlikely(!bfqd) || likely(bic->ioprio == ioprio)) in bfq_check_ioprio_change()
5589 bfqq = bic_to_bfqq(bic, false, bfq_actuator_index(bfqd, bio)); in bfq_check_ioprio_change()
5593 bfqq = bfq_get_queue(bfqd, bio, false, bic, true); in bfq_check_ioprio_change()
5594 bic_set_bfqq(bic, bfqq, false, bfq_actuator_index(bfqd, bio)); in bfq_check_ioprio_change()
5595 bfq_release_process_ref(bfqd, old_bfqq); in bfq_check_ioprio_change()
5598 bfqq = bic_to_bfqq(bic, true, bfq_actuator_index(bfqd, bio)); in bfq_check_ioprio_change()
5603 static void bfq_init_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq, in bfq_init_bfqq() argument
5617 bfqq->bfqd = bfqd; in bfq_init_bfqq()
5648 bfqq->max_budget = (2 * bfq_max_budget(bfqd)) / 3; in bfq_init_bfqq()
5673 static struct bfq_queue **bfq_async_queue_prio(struct bfq_data *bfqd, in bfq_async_queue_prio() argument
5693 bfq_do_early_stable_merge(struct bfq_data *bfqd, struct bfq_queue *bfqq, in bfq_do_early_stable_merge() argument
5715 return bfq_merge_bfqqs(bfqd, bic, bfqq); in bfq_do_early_stable_merge()
5764 static struct bfq_queue *bfq_do_or_sched_stable_merge(struct bfq_data *bfqd, in bfq_do_or_sched_stable_merge() argument
5770 &bfqd->last_bfqq_created; in bfq_do_or_sched_stable_merge()
5807 bfqd->bfq_burst_interval, in bfq_do_or_sched_stable_merge()
5809 if (likely(bfqd->nonrot_with_queueing)) in bfq_do_or_sched_stable_merge()
5816 bfqq = bfq_do_early_stable_merge(bfqd, bfqq, in bfq_do_or_sched_stable_merge()
5843 static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd, in bfq_get_queue() argument
5854 bfqg = bfq_bio_bfqg(bfqd, bio); in bfq_get_queue()
5856 async_bfqq = bfq_async_queue_prio(bfqd, bfqg, ioprio_class, in bfq_get_queue()
5858 bfq_actuator_index(bfqd, bio)); in bfq_get_queue()
5866 bfqd->queue->node); in bfq_get_queue()
5869 bfq_init_bfqq(bfqd, bfqq, bic, current->pid, in bfq_get_queue()
5870 is_sync, bfq_actuator_index(bfqd, bio)); in bfq_get_queue()
5872 bfq_log_bfqq(bfqd, bfqq, "allocated"); in bfq_get_queue()
5874 bfqq = &bfqd->oom_bfqq; in bfq_get_queue()
5875 bfq_log_bfqq(bfqd, bfqq, "using oom bfqq"); in bfq_get_queue()
5891 bfq_log_bfqq(bfqd, bfqq, "get_queue, bfqq not in async: %p, %d", in bfq_get_queue()
5899 if (bfqq != &bfqd->oom_bfqq && is_sync && !respawn) in bfq_get_queue()
5900 bfqq = bfq_do_or_sched_stable_merge(bfqd, bfqq, bic); in bfq_get_queue()
5904 static void bfq_update_io_thinktime(struct bfq_data *bfqd, in bfq_update_io_thinktime() argument
5918 elapsed = min_t(u64, elapsed, 2ULL * bfqd->bfq_slice_idle); in bfq_update_io_thinktime()
5927 bfq_update_io_seektime(struct bfq_data *bfqd, struct bfq_queue *bfqq, in bfq_update_io_seektime() argument
5931 bfqq->seek_history |= BFQ_RQ_SEEKY(bfqd, bfqq->last_request_pos, rq); in bfq_update_io_seektime()
5934 bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time && in bfq_update_io_seektime()
5937 bfq_wr_duration(bfqd))) { in bfq_update_io_seektime()
5951 switch_back_to_interactive_wr(bfqq, bfqd); in bfq_update_io_seektime()
5957 static void bfq_update_has_short_ttime(struct bfq_data *bfqd, in bfq_update_has_short_ttime() argument
5969 bfqd->bfq_slice_idle == 0) in bfq_update_has_short_ttime()
5974 bfqd->bfq_wr_min_idle_time)) in bfq_update_has_short_ttime()
5984 bfqq->ttime.ttime_mean > bfqd->bfq_slice_idle>>1)) in bfq_update_has_short_ttime()
6082 bfq_reset_inject_limit(bfqd, bfqq); in bfq_update_has_short_ttime()
6089 static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq, in bfq_rq_enqueued() argument
6097 if (bfqq == bfqd->in_service_queue && bfq_bfqq_wait_request(bfqq)) { in bfq_rq_enqueued()
6118 if (small_req && idling_boosts_thr_without_issues(bfqd, bfqq) && in bfq_rq_enqueued()
6130 hrtimer_try_to_cancel(&bfqd->idle_slice_timer); in bfq_rq_enqueued()
6140 bfq_bfqq_expire(bfqd, bfqq, false, in bfq_rq_enqueued()
6162 static bool __bfq_insert_request(struct bfq_data *bfqd, struct request *rq) in __bfq_insert_request() argument
6165 *new_bfqq = bfq_setup_cooperator(bfqd, bfqq, rq, true, in __bfq_insert_request()
6187 bfq_actuator_index(bfqd, rq->bio)) == bfqq) { in __bfq_insert_request()
6189 bfqq = bfq_merge_bfqqs(bfqd, RQ_BIC(rq), bfqq); in __bfq_insert_request()
6201 bfq_update_io_thinktime(bfqd, bfqq); in __bfq_insert_request()
6202 bfq_update_has_short_ttime(bfqd, bfqq, RQ_BIC(rq)); in __bfq_insert_request()
6203 bfq_update_io_seektime(bfqd, bfqq, rq); in __bfq_insert_request()
6209 rq->fifo_time = ktime_get_ns() + bfqd->bfq_fifo_expire[rq_is_sync(rq)]; in __bfq_insert_request()
6212 bfq_rq_enqueued(bfqd, bfqq, rq); in __bfq_insert_request()
6255 struct bfq_data *bfqd = q->elevator->elevator_data; in bfq_insert_request() local
6265 spin_lock_irq(&bfqd->lock); in bfq_insert_request()
6268 spin_unlock_irq(&bfqd->lock); in bfq_insert_request()
6276 list_add(&rq->queuelist, &bfqd->dispatch); in bfq_insert_request()
6278 list_add_tail(&rq->queuelist, &bfqd->dispatch); in bfq_insert_request()
6280 idle_timer_disabled = __bfq_insert_request(bfqd, rq); in bfq_insert_request()
6301 spin_unlock_irq(&bfqd->lock); in bfq_insert_request()
6320 static void bfq_update_hw_tag(struct bfq_data *bfqd) in bfq_update_hw_tag() argument
6322 struct bfq_queue *bfqq = bfqd->in_service_queue; in bfq_update_hw_tag()
6324 bfqd->max_rq_in_driver = max_t(int, bfqd->max_rq_in_driver, in bfq_update_hw_tag()
6325 bfqd->tot_rq_in_driver); in bfq_update_hw_tag()
6327 if (bfqd->hw_tag == 1) in bfq_update_hw_tag()
6336 if (bfqd->tot_rq_in_driver + bfqd->queued <= BFQ_HW_QUEUE_THRESHOLD) in bfq_update_hw_tag()
6347 bfqd->tot_rq_in_driver < BFQ_HW_QUEUE_THRESHOLD) in bfq_update_hw_tag()
6350 if (bfqd->hw_tag_samples++ < BFQ_HW_QUEUE_SAMPLES) in bfq_update_hw_tag()
6353 bfqd->hw_tag = bfqd->max_rq_in_driver > BFQ_HW_QUEUE_THRESHOLD; in bfq_update_hw_tag()
6354 bfqd->max_rq_in_driver = 0; in bfq_update_hw_tag()
6355 bfqd->hw_tag_samples = 0; in bfq_update_hw_tag()
6357 bfqd->nonrot_with_queueing = in bfq_update_hw_tag()
6358 blk_queue_nonrot(bfqd->queue) && bfqd->hw_tag; in bfq_update_hw_tag()
6361 static void bfq_completed_request(struct bfq_queue *bfqq, struct bfq_data *bfqd) in bfq_completed_request() argument
6366 bfq_update_hw_tag(bfqd); in bfq_completed_request()
6368 bfqd->rq_in_driver[bfqq->actuator_idx]--; in bfq_completed_request()
6369 bfqd->tot_rq_in_driver--; in bfq_completed_request()
6393 delta_us = div_u64(now_ns - bfqd->last_completion, NSEC_PER_USEC); in bfq_completed_request()
6412 (bfqd->last_rq_max_size<<BFQ_RATE_SHIFT)/delta_us < in bfq_completed_request()
6414 bfq_update_rate_reset(bfqd, NULL); in bfq_completed_request()
6415 bfqd->last_completion = now_ns; in bfq_completed_request()
6426 bfqd->last_completed_rq_bfqq = bfqq; in bfq_completed_request()
6428 bfqd->last_completed_rq_bfqq = NULL; in bfq_completed_request()
6443 bfqq->wr_coeff != bfqd->bfq_wr_coeff) in bfq_completed_request()
6445 bfq_bfqq_softrt_next_start(bfqd, bfqq); in bfq_completed_request()
6451 if (bfqd->in_service_queue == bfqq) { in bfq_completed_request()
6454 bfq_arm_slice_timer(bfqd); in bfq_completed_request()
6480 bfq_bfqq_expire(bfqd, bfqq, false, in bfq_completed_request()
6485 bfq_bfqq_expire(bfqd, bfqq, false, in bfq_completed_request()
6489 if (!bfqd->tot_rq_in_driver) in bfq_completed_request()
6490 bfq_schedule_dispatch(bfqd); in bfq_completed_request()
6597 static void bfq_update_inject_limit(struct bfq_data *bfqd, in bfq_update_inject_limit() argument
6600 u64 tot_time_ns = ktime_get_ns() - bfqd->last_empty_occupied_ns; in bfq_update_inject_limit()
6603 if (bfqq->last_serv_time_ns > 0 && bfqd->rqs_injected) { in bfq_update_inject_limit()
6610 old_limit <= bfqd->max_rq_in_driver) in bfq_update_inject_limit()
6626 if ((bfqq->last_serv_time_ns == 0 && bfqd->tot_rq_in_driver == 1) || in bfq_update_inject_limit()
6636 } else if (!bfqd->rqs_injected && bfqd->tot_rq_in_driver == 1) in bfq_update_inject_limit()
6650 bfqd->waited_rq = NULL; in bfq_update_inject_limit()
6651 bfqd->rqs_injected = false; in bfq_update_inject_limit()
6663 struct bfq_data *bfqd; in bfq_finish_requeue_request() local
6674 bfqd = bfqq->bfqd; in bfq_finish_requeue_request()
6682 spin_lock_irqsave(&bfqd->lock, flags); in bfq_finish_requeue_request()
6684 if (rq == bfqd->waited_rq) in bfq_finish_requeue_request()
6685 bfq_update_inject_limit(bfqd, bfqq); in bfq_finish_requeue_request()
6687 bfq_completed_request(bfqq, bfqd); in bfq_finish_requeue_request()
6692 spin_unlock_irqrestore(&bfqd->lock, flags); in bfq_finish_requeue_request()
6734 bfq_log_bfqq(bfqq->bfqd, bfqq, "splitting queue"); in bfq_split_bfqq()
6747 bfq_release_process_ref(bfqq->bfqd, bfqq); in bfq_split_bfqq()
6751 static struct bfq_queue *bfq_get_bfqq_handle_split(struct bfq_data *bfqd, in bfq_get_bfqq_handle_split() argument
6757 unsigned int act_idx = bfq_actuator_index(bfqd, bio); in bfq_get_bfqq_handle_split()
6761 if (likely(bfqq && bfqq != &bfqd->oom_bfqq)) in bfq_get_bfqq_handle_split()
6769 bfqq = bfq_get_queue(bfqd, bio, is_sync, bic, split); in bfq_get_bfqq_handle_split()
6773 if ((bfqq_data->was_in_burst_list && bfqd->large_burst) || in bfq_get_bfqq_handle_split()
6808 &bfqd->burst_list); in bfq_get_bfqq_handle_split()
6894 struct bfq_data *bfqd = q->elevator->elevator_data; in bfq_init_rq() local
6900 unsigned int a_idx = bfq_actuator_index(bfqd, bio); in bfq_init_rq()
6921 bfqq = bfq_get_bfqq_handle_split(bfqd, bic, bio, false, is_sync, in bfq_init_rq()
6939 bfqq = bfq_get_bfqq_handle_split(bfqd, bic, bio, in bfq_init_rq()
6942 if (unlikely(bfqq == &bfqd->oom_bfqq)) in bfq_init_rq()
6968 bfq_log_bfqq(bfqd, bfqq, "get_request %p: bfqq %p, %d", in bfq_init_rq()
6980 if (likely(bfqq != &bfqd->oom_bfqq) && !bfqq->new_bfqq && in bfq_init_rq()
6989 bfq_bfqq_resume_state(bfqq, bfqd, bic, in bfq_init_rq()
7015 (bfqd->burst_size > 0 || in bfq_init_rq()
7016 bfq_tot_busy_queues(bfqd) == 0))) in bfq_init_rq()
7017 bfq_handle_burst(bfqd, bfqq); in bfq_init_rq()
7023 bfq_idle_slice_timer_body(struct bfq_data *bfqd, struct bfq_queue *bfqq) in bfq_idle_slice_timer_body() argument
7028 spin_lock_irqsave(&bfqd->lock, flags); in bfq_idle_slice_timer_body()
7037 if (bfqq != bfqd->in_service_queue) { in bfq_idle_slice_timer_body()
7038 spin_unlock_irqrestore(&bfqd->lock, flags); in bfq_idle_slice_timer_body()
7062 bfq_bfqq_expire(bfqd, bfqq, true, reason); in bfq_idle_slice_timer_body()
7065 bfq_schedule_dispatch(bfqd); in bfq_idle_slice_timer_body()
7066 spin_unlock_irqrestore(&bfqd->lock, flags); in bfq_idle_slice_timer_body()
7075 struct bfq_data *bfqd = container_of(timer, struct bfq_data, in bfq_idle_slice_timer() local
7077 struct bfq_queue *bfqq = bfqd->in_service_queue; in bfq_idle_slice_timer()
7088 bfq_idle_slice_timer_body(bfqd, bfqq); in bfq_idle_slice_timer()
7093 static void __bfq_put_async_bfqq(struct bfq_data *bfqd, in __bfq_put_async_bfqq() argument
7098 bfq_log(bfqd, "put_async_bfqq: %p", bfqq); in __bfq_put_async_bfqq()
7100 bfq_bfqq_move(bfqd, bfqq, bfqd->root_group); in __bfq_put_async_bfqq()
7102 bfq_log_bfqq(bfqd, bfqq, "put_async_bfqq: putting %p, %d", in __bfq_put_async_bfqq()
7115 void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg) in bfq_put_async_queues() argument
7119 for (k = 0; k < bfqd->num_actuators; k++) { in bfq_put_async_queues()
7122 __bfq_put_async_bfqq(bfqd, &bfqg->async_bfqq[i][j][k]); in bfq_put_async_queues()
7124 __bfq_put_async_bfqq(bfqd, &bfqg->async_idle_bfqq[k]); in bfq_put_async_queues()
7132 static void bfq_update_depths(struct bfq_data *bfqd, struct sbitmap_queue *bt) in bfq_update_depths() argument
7136 bfqd->full_depth_shift = bt->sb.shift; in bfq_update_depths()
7148 bfqd->word_depths[0][0] = max(depth >> 1, 1U); in bfq_update_depths()
7154 bfqd->word_depths[0][1] = max((depth * 3) >> 2, 1U); in bfq_update_depths()
7164 bfqd->word_depths[1][0] = max((depth * 3) >> 4, 1U); in bfq_update_depths()
7166 bfqd->word_depths[1][1] = max((depth * 6) >> 4, 1U); in bfq_update_depths()
7171 struct bfq_data *bfqd = hctx->queue->elevator->elevator_data; in bfq_depth_updated() local
7174 bfq_update_depths(bfqd, &tags->bitmap_tags); in bfq_depth_updated()
7186 struct bfq_data *bfqd = e->elevator_data; in bfq_exit_queue() local
7190 hrtimer_cancel(&bfqd->idle_slice_timer); in bfq_exit_queue()
7192 spin_lock_irq(&bfqd->lock); in bfq_exit_queue()
7193 list_for_each_entry_safe(bfqq, n, &bfqd->idle_list, bfqq_list) in bfq_exit_queue()
7194 bfq_deactivate_bfqq(bfqd, bfqq, false, false); in bfq_exit_queue()
7195 spin_unlock_irq(&bfqd->lock); in bfq_exit_queue()
7197 for (actuator = 0; actuator < bfqd->num_actuators; actuator++) in bfq_exit_queue()
7198 WARN_ON_ONCE(bfqd->rq_in_driver[actuator]); in bfq_exit_queue()
7199 WARN_ON_ONCE(bfqd->tot_rq_in_driver); in bfq_exit_queue()
7201 hrtimer_cancel(&bfqd->idle_slice_timer); in bfq_exit_queue()
7204 bfqg_and_blkg_put(bfqd->root_group); in bfq_exit_queue()
7207 blkcg_deactivate_policy(bfqd->queue->disk, &blkcg_policy_bfq); in bfq_exit_queue()
7209 spin_lock_irq(&bfqd->lock); in bfq_exit_queue()
7210 bfq_put_async_queues(bfqd, bfqd->root_group); in bfq_exit_queue()
7211 kfree(bfqd->root_group); in bfq_exit_queue()
7212 spin_unlock_irq(&bfqd->lock); in bfq_exit_queue()
7215 blk_stat_disable_accounting(bfqd->queue); in bfq_exit_queue()
7217 wbt_enable_default(bfqd->queue->disk); in bfq_exit_queue()
7219 kfree(bfqd); in bfq_exit_queue()
7223 struct bfq_data *bfqd) in bfq_init_root_group() argument
7230 root_group->bfqd = bfqd; in bfq_init_root_group()
7240 struct bfq_data *bfqd; in bfq_init_queue() local
7249 bfqd = kzalloc_node(sizeof(*bfqd), GFP_KERNEL, q->node); in bfq_init_queue()
7250 if (!bfqd) { in bfq_init_queue()
7254 eq->elevator_data = bfqd; in bfq_init_queue()
7267 bfq_init_bfqq(bfqd, &bfqd->oom_bfqq, NULL, 1, 0, 0); in bfq_init_queue()
7268 bfqd->oom_bfqq.ref++; in bfq_init_queue()
7269 bfqd->oom_bfqq.new_ioprio = BFQ_DEFAULT_QUEUE_IOPRIO; in bfq_init_queue()
7270 bfqd->oom_bfqq.new_ioprio_class = IOPRIO_CLASS_BE; in bfq_init_queue()
7271 bfqd->oom_bfqq.entity.new_weight = in bfq_init_queue()
7272 bfq_ioprio_to_weight(bfqd->oom_bfqq.new_ioprio); in bfq_init_queue()
7275 bfq_clear_bfqq_just_created(&bfqd->oom_bfqq); in bfq_init_queue()
7282 bfqd->oom_bfqq.entity.prio_changed = 1; in bfq_init_queue()
7284 bfqd->queue = q; in bfq_init_queue()
7286 bfqd->num_actuators = 1; in bfq_init_queue()
7302 bfqd->num_actuators = ia_ranges->nr_ia_ranges; in bfq_init_queue()
7304 for (i = 0; i < bfqd->num_actuators; i++) { in bfq_init_queue()
7305 bfqd->sector[i] = ia_ranges->ia_range[i].sector; in bfq_init_queue()
7306 bfqd->nr_sectors[i] = in bfq_init_queue()
7313 if (bfqd->num_actuators == 1) { in bfq_init_queue()
7314 bfqd->sector[0] = 0; in bfq_init_queue()
7315 bfqd->nr_sectors[0] = get_capacity(q->disk); in bfq_init_queue()
7319 INIT_LIST_HEAD(&bfqd->dispatch); in bfq_init_queue()
7321 hrtimer_init(&bfqd->idle_slice_timer, CLOCK_MONOTONIC, in bfq_init_queue()
7323 bfqd->idle_slice_timer.function = bfq_idle_slice_timer; in bfq_init_queue()
7325 bfqd->queue_weights_tree = RB_ROOT_CACHED; in bfq_init_queue()
7327 bfqd->num_groups_with_pending_reqs = 0; in bfq_init_queue()
7330 INIT_LIST_HEAD(&bfqd->active_list[0]); in bfq_init_queue()
7331 INIT_LIST_HEAD(&bfqd->active_list[1]); in bfq_init_queue()
7332 INIT_LIST_HEAD(&bfqd->idle_list); in bfq_init_queue()
7333 INIT_HLIST_HEAD(&bfqd->burst_list); in bfq_init_queue()
7335 bfqd->hw_tag = -1; in bfq_init_queue()
7336 bfqd->nonrot_with_queueing = blk_queue_nonrot(bfqd->queue); in bfq_init_queue()
7338 bfqd->bfq_max_budget = bfq_default_max_budget; in bfq_init_queue()
7340 bfqd->bfq_fifo_expire[0] = bfq_fifo_expire[0]; in bfq_init_queue()
7341 bfqd->bfq_fifo_expire[1] = bfq_fifo_expire[1]; in bfq_init_queue()
7342 bfqd->bfq_back_max = bfq_back_max; in bfq_init_queue()
7343 bfqd->bfq_back_penalty = bfq_back_penalty; in bfq_init_queue()
7344 bfqd->bfq_slice_idle = bfq_slice_idle; in bfq_init_queue()
7345 bfqd->bfq_timeout = bfq_timeout; in bfq_init_queue()
7347 bfqd->bfq_large_burst_thresh = 8; in bfq_init_queue()
7348 bfqd->bfq_burst_interval = msecs_to_jiffies(180); in bfq_init_queue()
7350 bfqd->low_latency = true; in bfq_init_queue()
7355 bfqd->bfq_wr_coeff = 30; in bfq_init_queue()
7356 bfqd->bfq_wr_rt_max_time = msecs_to_jiffies(300); in bfq_init_queue()
7357 bfqd->bfq_wr_min_idle_time = msecs_to_jiffies(2000); in bfq_init_queue()
7358 bfqd->bfq_wr_min_inter_arr_async = msecs_to_jiffies(500); in bfq_init_queue()
7359 bfqd->bfq_wr_max_softrt_rate = 7000; /* in bfq_init_queue()
7365 bfqd->wr_busy_queues = 0; in bfq_init_queue()
7371 bfqd->rate_dur_prod = ref_rate[blk_queue_nonrot(bfqd->queue)] * in bfq_init_queue()
7372 ref_wr_duration[blk_queue_nonrot(bfqd->queue)]; in bfq_init_queue()
7373 bfqd->peak_rate = ref_rate[blk_queue_nonrot(bfqd->queue)] * 2 / 3; in bfq_init_queue()
7376 bfqd->actuator_load_threshold = 4; in bfq_init_queue()
7378 spin_lock_init(&bfqd->lock); in bfq_init_queue()
7395 bfqd->root_group = bfq_create_group_hierarchy(bfqd, q->node); in bfq_init_queue()
7396 if (!bfqd->root_group) in bfq_init_queue()
7398 bfq_init_root_group(bfqd->root_group, bfqd); in bfq_init_queue()
7399 bfq_init_entity(&bfqd->oom_bfqq.entity, bfqd->root_group); in bfq_init_queue()
7411 kfree(bfqd); in bfq_init_queue()
7448 struct bfq_data *bfqd = e->elevator_data; \
7456 SHOW_FUNCTION(bfq_fifo_expire_sync_show, bfqd->bfq_fifo_expire[1], 2);
7457 SHOW_FUNCTION(bfq_fifo_expire_async_show, bfqd->bfq_fifo_expire[0], 2);
7458 SHOW_FUNCTION(bfq_back_seek_max_show, bfqd->bfq_back_max, 0);
7459 SHOW_FUNCTION(bfq_back_seek_penalty_show, bfqd->bfq_back_penalty, 0);
7460 SHOW_FUNCTION(bfq_slice_idle_show, bfqd->bfq_slice_idle, 2);
7461 SHOW_FUNCTION(bfq_max_budget_show, bfqd->bfq_user_max_budget, 0);
7462 SHOW_FUNCTION(bfq_timeout_sync_show, bfqd->bfq_timeout, 1);
7463 SHOW_FUNCTION(bfq_strict_guarantees_show, bfqd->strict_guarantees, 0);
7464 SHOW_FUNCTION(bfq_low_latency_show, bfqd->low_latency, 0);
7470 struct bfq_data *bfqd = e->elevator_data; \
7475 USEC_SHOW_FUNCTION(bfq_slice_idle_us_show, bfqd->bfq_slice_idle);
7482 struct bfq_data *bfqd = e->elevator_data; \
7501 STORE_FUNCTION(bfq_fifo_expire_sync_store, &bfqd->bfq_fifo_expire[1], 1,
7503 STORE_FUNCTION(bfq_fifo_expire_async_store, &bfqd->bfq_fifo_expire[0], 1,
7505 STORE_FUNCTION(bfq_back_seek_max_store, &bfqd->bfq_back_max, 0, INT_MAX, 0);
7506 STORE_FUNCTION(bfq_back_seek_penalty_store, &bfqd->bfq_back_penalty, 1,
7508 STORE_FUNCTION(bfq_slice_idle_store, &bfqd->bfq_slice_idle, 0, INT_MAX, 2);
7514 struct bfq_data *bfqd = e->elevator_data; \
7528 USEC_STORE_FUNCTION(bfq_slice_idle_us_store, &bfqd->bfq_slice_idle, 0,
7535 struct bfq_data *bfqd = e->elevator_data; in bfq_max_budget_store() local
7544 bfqd->bfq_max_budget = bfq_calc_max_budget(bfqd); in bfq_max_budget_store()
7548 bfqd->bfq_max_budget = __data; in bfq_max_budget_store()
7551 bfqd->bfq_user_max_budget = __data; in bfq_max_budget_store()
7563 struct bfq_data *bfqd = e->elevator_data; in bfq_timeout_sync_store() local
7576 bfqd->bfq_timeout = msecs_to_jiffies(__data); in bfq_timeout_sync_store()
7577 if (bfqd->bfq_user_max_budget == 0) in bfq_timeout_sync_store()
7578 bfqd->bfq_max_budget = bfq_calc_max_budget(bfqd); in bfq_timeout_sync_store()
7586 struct bfq_data *bfqd = e->elevator_data; in bfq_strict_guarantees_store() local
7596 if (!bfqd->strict_guarantees && __data == 1 in bfq_strict_guarantees_store()
7597 && bfqd->bfq_slice_idle < 8 * NSEC_PER_MSEC) in bfq_strict_guarantees_store()
7598 bfqd->bfq_slice_idle = 8 * NSEC_PER_MSEC; in bfq_strict_guarantees_store()
7600 bfqd->strict_guarantees = __data; in bfq_strict_guarantees_store()
7608 struct bfq_data *bfqd = e->elevator_data; in bfq_low_latency_store() local
7618 if (__data == 0 && bfqd->low_latency != 0) in bfq_low_latency_store()
7619 bfq_end_wr(bfqd); in bfq_low_latency_store()
7620 bfqd->low_latency = __data; in bfq_low_latency_store()