Lines Matching refs:dst_cpu

1846 				int src_nid, int dst_cpu)  in should_numa_migrate_memory()  argument
1849 int dst_nid = cpu_to_node(dst_cpu); in should_numa_migrate_memory()
1883 this_cpupid = cpu_pid_to_cpupid(dst_cpu, current->pid); in should_numa_migrate_memory()
1984 int dst_cpu, dst_nid; member
2091 struct rq *rq = cpu_rq(env->dst_cpu); in task_numa_assign()
2094 if (env->best_cpu != env->dst_cpu && xchg(&rq->numa_migrate_on, 1)) { in task_numa_assign()
2096 int start = env->dst_cpu; in task_numa_assign()
2105 env->dst_cpu = cpu; in task_numa_assign()
2106 rq = cpu_rq(env->dst_cpu); in task_numa_assign()
2120 if (env->best_cpu != -1 && env->best_cpu != env->dst_cpu) { in task_numa_assign()
2132 env->best_cpu = env->dst_cpu; in task_numa_assign()
2180 struct rq *dst_rq = cpu_rq(env->dst_cpu); in task_numa_compare()
2326 cpu = env->dst_cpu; in task_numa_compare()
2337 env->dst_cpu = cpu; in task_numa_compare()
2394 env->dst_cpu = env->dst_stats.idle_cpu; in task_numa_find_cpu()
2416 env->dst_cpu = cpu; in task_numa_find_cpu()
7663 cpu_util(int cpu, struct task_struct *p, int dst_cpu, int boost) in cpu_util() argument
7680 if (p && task_cpu(p) == cpu && dst_cpu != cpu) in cpu_util()
7682 else if (p && task_cpu(p) != cpu && dst_cpu == cpu) in cpu_util()
7716 if (dst_cpu == cpu) in cpu_util()
7841 struct task_struct *p, int dst_cpu) in eenv_pd_max_util() argument
7847 struct task_struct *tsk = (cpu == dst_cpu) ? p : NULL; in eenv_pd_max_util()
7848 unsigned long util = cpu_util(cpu, p, dst_cpu, 1); in eenv_pd_max_util()
7872 struct cpumask *pd_cpus, struct task_struct *p, int dst_cpu) in compute_energy() argument
7874 unsigned long max_util = eenv_pd_max_util(eenv, pd_cpus, p, dst_cpu); in compute_energy()
7877 if (dst_cpu >= 0) in compute_energy()
8756 int dst_cpu; member
8810 if (!sched_core_cookie_match(cpu_rq(env->dst_cpu), p)) in task_hot()
8840 dst_nid = cpu_to_node(env->dst_cpu); in migrate_degrades_locality()
8898 if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu)) in can_migrate_task()
8905 if (!cpumask_test_cpu(env->dst_cpu, p->cpus_ptr)) { in can_migrate_task()
8981 set_task_cpu(p, env->dst_cpu); in detach_task()
9753 if (!sched_use_asym_prio(env->sd, env->dst_cpu)) in sched_asym()
9765 return sched_asym_prefer(env->dst_cpu, group->asym_prefer_cpu); in sched_asym()
9969 (!capacity_greater(capacity_of(env->dst_cpu), sg->sgc->max_capacity) || in update_sd_pick_busiest()
10088 (capacity_greater(sg->sgc->min_capacity, capacity_of(env->dst_cpu)))) in update_sd_pick_busiest()
10475 llc_weight = per_cpu(sd_llc_size, env->dst_cpu); in update_idle_cpu_scan()
10479 sd_share = rcu_dereference(per_cpu(sd_llc_shared, env->dst_cpu)); in update_idle_cpu_scan()
10547 local_group = cpumask_test_cpu(env->dst_cpu, sched_group_span(sg)); in update_sd_lb_stats()
10554 update_group_capacity(env->sd, env->dst_cpu); in update_sd_lb_stats()
11003 !capacity_greater(capacity_of(env->dst_cpu), capacity) && in find_busiest_queue()
11016 sched_asym_prefer(i, env->dst_cpu) && in find_busiest_queue()
11114 sched_use_asym_prio(env->sd, env->dst_cpu) && in asym_active_balance()
11115 (sched_asym_prefer(env->dst_cpu, env->src_cpu) || in asym_active_balance()
11155 (capacity_of(env->src_cpu)*sd->imbalance_pct < capacity_of(env->dst_cpu)*100)) in need_active_balance()
11177 if (!cpumask_test_cpu(env->dst_cpu, env->cpus)) in should_we_balance()
11222 return cpu == env->dst_cpu; in should_we_balance()
11227 return idle_smt == env->dst_cpu; in should_we_balance()
11230 return group_balance_cpu(sg) == env->dst_cpu; in should_we_balance()
11249 .dst_cpu = this_cpu, in load_balance()
11354 __cpumask_clear_cpu(env.dst_cpu, env.cpus); in load_balance()
11357 env.dst_cpu = env.new_dst_cpu; in load_balance()
11591 .dst_cpu = target_cpu, in active_load_balance_cpu_stop()