Searched refs:busiest (Results 1 – 3 of 3) sorted by relevance
9456 .busiest = NULL, in init_sd_lb_stats()10582 if (sds->busiest) in update_sd_lb_stats()10774 (busiest->avg_load - sds->avg_load) * busiest->group_capacity, in calculate_imbalance()10825 if (!sds.busiest) in find_busiest_group()11048 busiest = rq; in find_busiest_queue()11065 busiest = rq; in find_busiest_queue()11072 busiest = rq; in find_busiest_queue()11083 busiest = rq; in find_busiest_queue()11091 return busiest; in find_busiest_queue()11244 struct rq *busiest; in load_balance() local[all …]
2686 __acquires(busiest->lock) in _double_lock_balance()2690 double_rq_lock(this_rq, busiest); in _double_lock_balance()2705 __acquires(busiest->lock) in _double_lock_balance()2709 likely(raw_spin_rq_trylock(busiest))) { in _double_lock_balance()2710 double_rq_clock_clear_update(this_rq, busiest); in _double_lock_balance()2714 if (rq_order_less(this_rq, busiest)) { in _double_lock_balance()2721 double_rq_lock(this_rq, busiest); in _double_lock_balance()2735 return _double_lock_balance(this_rq, busiest); in double_lock_balance()2739 __releases(busiest->lock) in double_unlock_balance()2741 if (__rq_lockp(this_rq) != __rq_lockp(busiest)) in double_unlock_balance()[all …]
48 Initially, load_balance() finds the busiest group in the current sched domain.49 If it succeeds, it looks for the busiest runqueue of all the CPUs' runqueues in51 CPU's runqueue and the newly found busiest one and starts moving tasks from it