Lines Matching refs:p
1054 static int select_idle_sibling(struct task_struct *p, int prev_cpu, int cpu);
1055 static unsigned long task_h_load(struct task_struct *p);
1103 void post_init_entity_util_avg(struct task_struct *p) in post_init_entity_util_avg() argument
1105 struct sched_entity *se = &p->se; in post_init_entity_util_avg()
1111 if (p->sched_class != &fair_sched_class) { in post_init_entity_util_avg()
1145 void post_init_entity_util_avg(struct task_struct *p) in post_init_entity_util_avg() argument
1206 struct task_struct *p = NULL; in update_stats_wait_start_fair() local
1214 p = task_of(se); in update_stats_wait_start_fair()
1216 __update_stats_wait_start(rq_of(cfs_rq), p, stats); in update_stats_wait_start_fair()
1223 struct task_struct *p = NULL; in update_stats_wait_end_fair() local
1240 p = task_of(se); in update_stats_wait_end_fair()
1242 __update_stats_wait_end(rq_of(cfs_rq), p, stats); in update_stats_wait_end_fair()
1414 static struct numa_group *deref_task_numa_group(struct task_struct *p) in deref_task_numa_group() argument
1416 return rcu_dereference_check(p->numa_group, p == current || in deref_task_numa_group()
1417 (lockdep_is_held(__rq_lockp(task_rq(p))) && !READ_ONCE(p->on_cpu))); in deref_task_numa_group()
1420 static struct numa_group *deref_curr_numa_group(struct task_struct *p) in deref_curr_numa_group() argument
1422 return rcu_dereference_protected(p->numa_group, p == current); in deref_curr_numa_group()
1428 static unsigned int task_nr_scan_windows(struct task_struct *p) in task_nr_scan_windows() argument
1439 rss = get_mm_rss(p->mm); in task_nr_scan_windows()
1450 static unsigned int task_scan_min(struct task_struct *p) in task_scan_min() argument
1460 scan = sysctl_numa_balancing_scan_period_min / task_nr_scan_windows(p); in task_scan_min()
1464 static unsigned int task_scan_start(struct task_struct *p) in task_scan_start() argument
1466 unsigned long smin = task_scan_min(p); in task_scan_start()
1472 ng = rcu_dereference(p->numa_group); in task_scan_start()
1486 static unsigned int task_scan_max(struct task_struct *p) in task_scan_max() argument
1488 unsigned long smin = task_scan_min(p); in task_scan_max()
1493 smax = sysctl_numa_balancing_scan_period_max / task_nr_scan_windows(p); in task_scan_max()
1496 ng = deref_curr_numa_group(p); in task_scan_max()
1512 static void account_numa_enqueue(struct rq *rq, struct task_struct *p) in account_numa_enqueue() argument
1514 rq->nr_numa_running += (p->numa_preferred_nid != NUMA_NO_NODE); in account_numa_enqueue()
1515 rq->nr_preferred_running += (p->numa_preferred_nid == task_node(p)); in account_numa_enqueue()
1518 static void account_numa_dequeue(struct rq *rq, struct task_struct *p) in account_numa_dequeue() argument
1520 rq->nr_numa_running -= (p->numa_preferred_nid != NUMA_NO_NODE); in account_numa_dequeue()
1521 rq->nr_preferred_running -= (p->numa_preferred_nid == task_node(p)); in account_numa_dequeue()
1533 pid_t task_numa_group_id(struct task_struct *p) in task_numa_group_id() argument
1539 ng = rcu_dereference(p->numa_group); in task_numa_group_id()
1558 static inline unsigned long task_faults(struct task_struct *p, int nid) in task_faults() argument
1560 if (!p->numa_faults) in task_faults()
1563 return p->numa_faults[task_faults_idx(NUMA_MEM, nid, 0)] + in task_faults()
1564 p->numa_faults[task_faults_idx(NUMA_MEM, nid, 1)]; in task_faults()
1567 static inline unsigned long group_faults(struct task_struct *p, int nid) in group_faults() argument
1569 struct numa_group *ng = deref_task_numa_group(p); in group_faults()
1621 static unsigned long score_nearby_nodes(struct task_struct *p, int nid, in score_nearby_nodes() argument
1663 faults = task_faults(p, node); in score_nearby_nodes()
1665 faults = group_faults(p, node); in score_nearby_nodes()
1692 static inline unsigned long task_weight(struct task_struct *p, int nid, in task_weight() argument
1697 if (!p->numa_faults) in task_weight()
1700 total_faults = p->total_numa_faults; in task_weight()
1705 faults = task_faults(p, nid); in task_weight()
1706 faults += score_nearby_nodes(p, nid, dist, true); in task_weight()
1711 static inline unsigned long group_weight(struct task_struct *p, int nid, in group_weight() argument
1714 struct numa_group *ng = deref_task_numa_group(p); in group_weight()
1725 faults = group_faults(p, nid); in group_weight()
1726 faults += score_nearby_nodes(p, nid, dist, false); in group_weight()
1845 bool should_numa_migrate_memory(struct task_struct *p, struct page * page, in should_numa_migrate_memory() argument
1848 struct numa_group *ng = deref_curr_numa_group(p); in should_numa_migrate_memory()
1896 if ((p->numa_preferred_nid == NUMA_NO_NODE || p->numa_scan_seq <= 4) && in should_numa_migrate_memory()
1897 (cpupid_pid_unset(last_cpupid) || cpupid_match_pid(p, last_cpupid))) in should_numa_migrate_memory()
1922 if (cpupid_match_pid(p, last_cpupid)) in should_numa_migrate_memory()
1945 return group_faults_cpu(ng, dst_nid) * group_faults(p, src_nid) * 3 > in should_numa_migrate_memory()
1946 group_faults_cpu(ng, src_nid) * group_faults(p, dst_nid) * 4; in should_numa_migrate_memory()
1981 struct task_struct *p; member
2069 !cpumask_test_cpu(cpu, env->p->cpus_ptr)) in update_numa_stats()
2089 struct task_struct *p, long imp) in task_numa_assign() argument
2101 !cpumask_test_cpu(cpu, env->p->cpus_ptr)) { in task_numa_assign()
2127 if (p) in task_numa_assign()
2128 get_task_struct(p); in task_numa_assign()
2130 env->best_task = p; in task_numa_assign()
2179 struct numa_group *cur_ng, *p_ng = deref_curr_numa_group(env->p); in task_numa_compare()
2201 if (cur == env->p) { in task_numa_compare()
2309 load = task_h_load(env->p) - task_h_load(cur); in task_numa_compare()
2405 load = task_h_load(env->p); in task_numa_find_cpu()
2413 if (!cpumask_test_cpu(cpu, env->p->cpus_ptr)) in task_numa_find_cpu()
2422 static int task_numa_migrate(struct task_struct *p) in task_numa_migrate() argument
2425 .p = p, in task_numa_migrate()
2427 .src_cpu = task_cpu(p), in task_numa_migrate()
2428 .src_nid = task_node(p), in task_numa_migrate()
2466 sched_setnuma(p, task_node(p)); in task_numa_migrate()
2470 env.dst_nid = p->numa_preferred_nid; in task_numa_migrate()
2472 taskweight = task_weight(p, env.src_nid, dist); in task_numa_migrate()
2473 groupweight = group_weight(p, env.src_nid, dist); in task_numa_migrate()
2475 taskimp = task_weight(p, env.dst_nid, dist) - taskweight; in task_numa_migrate()
2476 groupimp = group_weight(p, env.dst_nid, dist) - groupweight; in task_numa_migrate()
2489 ng = deref_curr_numa_group(p); in task_numa_migrate()
2492 if (nid == env.src_nid || nid == p->numa_preferred_nid) in task_numa_migrate()
2498 taskweight = task_weight(p, env.src_nid, dist); in task_numa_migrate()
2499 groupweight = group_weight(p, env.src_nid, dist); in task_numa_migrate()
2503 taskimp = task_weight(p, nid, dist) - taskweight; in task_numa_migrate()
2504 groupimp = group_weight(p, nid, dist) - groupweight; in task_numa_migrate()
2529 if (nid != p->numa_preferred_nid) in task_numa_migrate()
2530 sched_setnuma(p, nid); in task_numa_migrate()
2535 trace_sched_stick_numa(p, env.src_cpu, NULL, -1); in task_numa_migrate()
2541 ret = migrate_task_to(p, env.best_cpu); in task_numa_migrate()
2544 trace_sched_stick_numa(p, env.src_cpu, NULL, env.best_cpu); in task_numa_migrate()
2548 ret = migrate_swap(p, env.best_task, env.best_cpu, env.src_cpu); in task_numa_migrate()
2552 trace_sched_stick_numa(p, env.src_cpu, env.best_task, env.best_cpu); in task_numa_migrate()
2558 static void numa_migrate_preferred(struct task_struct *p) in numa_migrate_preferred() argument
2563 if (unlikely(p->numa_preferred_nid == NUMA_NO_NODE || !p->numa_faults)) in numa_migrate_preferred()
2567 interval = min(interval, msecs_to_jiffies(p->numa_scan_period) / 16); in numa_migrate_preferred()
2568 p->numa_migrate_retry = jiffies + interval; in numa_migrate_preferred()
2571 if (task_node(p) == p->numa_preferred_nid) in numa_migrate_preferred()
2575 task_numa_migrate(p); in numa_migrate_preferred()
2621 static void update_task_scan_period(struct task_struct *p, in update_task_scan_period() argument
2628 unsigned long remote = p->numa_faults_locality[0]; in update_task_scan_period()
2629 unsigned long local = p->numa_faults_locality[1]; in update_task_scan_period()
2638 if (local + shared == 0 || p->numa_faults_locality[2]) { in update_task_scan_period()
2639 p->numa_scan_period = min(p->numa_scan_period_max, in update_task_scan_period()
2640 p->numa_scan_period << 1); in update_task_scan_period()
2642 p->mm->numa_next_scan = jiffies + in update_task_scan_period()
2643 msecs_to_jiffies(p->numa_scan_period); in update_task_scan_period()
2654 period_slot = DIV_ROUND_UP(p->numa_scan_period, NUMA_PERIOD_SLOTS); in update_task_scan_period()
2687 p->numa_scan_period = clamp(p->numa_scan_period + diff, in update_task_scan_period()
2688 task_scan_min(p), task_scan_max(p)); in update_task_scan_period()
2689 memset(p->numa_faults_locality, 0, sizeof(p->numa_faults_locality)); in update_task_scan_period()
2699 static u64 numa_get_avg_runtime(struct task_struct *p, u64 *period) in numa_get_avg_runtime() argument
2703 now = p->se.exec_start; in numa_get_avg_runtime()
2704 runtime = p->se.sum_exec_runtime; in numa_get_avg_runtime()
2706 if (p->last_task_numa_placement) { in numa_get_avg_runtime()
2707 delta = runtime - p->last_sum_exec_runtime; in numa_get_avg_runtime()
2708 *period = now - p->last_task_numa_placement; in numa_get_avg_runtime()
2714 delta = p->se.avg.load_sum; in numa_get_avg_runtime()
2718 p->last_sum_exec_runtime = runtime; in numa_get_avg_runtime()
2719 p->last_task_numa_placement = now; in numa_get_avg_runtime()
2729 static int preferred_group_nid(struct task_struct *p, int nid) in preferred_group_nid() argument
2750 score = group_weight(p, node, dist); in preferred_group_nid()
2786 faults += group_faults(p, b); in preferred_group_nid()
2812 static void task_numa_placement(struct task_struct *p) in task_numa_placement() argument
2827 seq = READ_ONCE(p->mm->numa_scan_seq); in task_numa_placement()
2828 if (p->numa_scan_seq == seq) in task_numa_placement()
2830 p->numa_scan_seq = seq; in task_numa_placement()
2831 p->numa_scan_period_max = task_scan_max(p); in task_numa_placement()
2833 total_faults = p->numa_faults_locality[0] + in task_numa_placement()
2834 p->numa_faults_locality[1]; in task_numa_placement()
2835 runtime = numa_get_avg_runtime(p, &period); in task_numa_placement()
2838 ng = deref_curr_numa_group(p); in task_numa_placement()
2860 diff = p->numa_faults[membuf_idx] - p->numa_faults[mem_idx] / 2; in task_numa_placement()
2861 fault_types[priv] += p->numa_faults[membuf_idx]; in task_numa_placement()
2862 p->numa_faults[membuf_idx] = 0; in task_numa_placement()
2872 f_weight = (f_weight * p->numa_faults[cpubuf_idx]) / in task_numa_placement()
2874 f_diff = f_weight - p->numa_faults[cpu_idx] / 2; in task_numa_placement()
2875 p->numa_faults[cpubuf_idx] = 0; in task_numa_placement()
2877 p->numa_faults[mem_idx] += diff; in task_numa_placement()
2878 p->numa_faults[cpu_idx] += f_diff; in task_numa_placement()
2879 faults += p->numa_faults[mem_idx]; in task_numa_placement()
2880 p->total_numa_faults += diff; in task_numa_placement()
2925 max_nid = preferred_group_nid(p, max_nid); in task_numa_placement()
2930 if (max_nid != p->numa_preferred_nid) in task_numa_placement()
2931 sched_setnuma(p, max_nid); in task_numa_placement()
2934 update_task_scan_period(p, fault_types[0], fault_types[1]); in task_numa_placement()
2948 static void task_numa_group(struct task_struct *p, int cpupid, int flags, in task_numa_group() argument
2957 if (unlikely(!deref_curr_numa_group(p))) { in task_numa_group()
2970 grp->gid = p->pid; in task_numa_group()
2973 grp->faults[i] = p->numa_faults[i]; in task_numa_group()
2975 grp->total_faults = p->total_numa_faults; in task_numa_group()
2978 rcu_assign_pointer(p->numa_group, grp); in task_numa_group()
2991 my_grp = deref_curr_numa_group(p); in task_numa_group()
3031 my_grp->faults[i] -= p->numa_faults[i]; in task_numa_group()
3032 grp->faults[i] += p->numa_faults[i]; in task_numa_group()
3034 my_grp->total_faults -= p->total_numa_faults; in task_numa_group()
3035 grp->total_faults += p->total_numa_faults; in task_numa_group()
3043 rcu_assign_pointer(p->numa_group, grp); in task_numa_group()
3060 void task_numa_free(struct task_struct *p, bool final) in task_numa_free() argument
3063 struct numa_group *grp = rcu_dereference_raw(p->numa_group); in task_numa_free()
3064 unsigned long *numa_faults = p->numa_faults; in task_numa_free()
3074 grp->faults[i] -= p->numa_faults[i]; in task_numa_free()
3075 grp->total_faults -= p->total_numa_faults; in task_numa_free()
3079 RCU_INIT_POINTER(p->numa_group, NULL); in task_numa_free()
3084 p->numa_faults = NULL; in task_numa_free()
3087 p->total_numa_faults = 0; in task_numa_free()
3098 struct task_struct *p = current; in task_numa_fault() local
3109 if (!p->mm) in task_numa_fault()
3122 if (unlikely(!p->numa_faults)) { in task_numa_fault()
3123 int size = sizeof(*p->numa_faults) * in task_numa_fault()
3126 p->numa_faults = kzalloc(size, GFP_KERNEL|__GFP_NOWARN); in task_numa_fault()
3127 if (!p->numa_faults) in task_numa_fault()
3130 p->total_numa_faults = 0; in task_numa_fault()
3131 memset(p->numa_faults_locality, 0, sizeof(p->numa_faults_locality)); in task_numa_fault()
3141 priv = cpupid_match_pid(p, last_cpupid); in task_numa_fault()
3143 task_numa_group(p, last_cpupid, flags, &priv); in task_numa_fault()
3152 ng = deref_curr_numa_group(p); in task_numa_fault()
3162 if (time_after(jiffies, p->numa_migrate_retry)) { in task_numa_fault()
3163 task_numa_placement(p); in task_numa_fault()
3164 numa_migrate_preferred(p); in task_numa_fault()
3168 p->numa_pages_migrated += pages; in task_numa_fault()
3170 p->numa_faults_locality[2] += pages; in task_numa_fault()
3172 p->numa_faults[task_faults_idx(NUMA_MEMBUF, mem_node, priv)] += pages; in task_numa_fault()
3173 p->numa_faults[task_faults_idx(NUMA_CPUBUF, cpu_node, priv)] += pages; in task_numa_fault()
3174 p->numa_faults_locality[local] += pages; in task_numa_fault()
3177 static void reset_ptenuma_scan(struct task_struct *p) in reset_ptenuma_scan() argument
3187 WRITE_ONCE(p->mm->numa_scan_seq, READ_ONCE(p->mm->numa_scan_seq) + 1); in reset_ptenuma_scan()
3188 p->mm->numa_scan_offset = 0; in reset_ptenuma_scan()
3237 struct task_struct *p = current; in task_numa_work() local
3238 struct mm_struct *mm = p->mm; in task_numa_work()
3239 u64 runtime = p->se.sum_exec_runtime; in task_numa_work()
3248 SCHED_WARN_ON(p != container_of(work, struct task_struct, numa_work)); in task_numa_work()
3259 if (p->flags & PF_EXITING) in task_numa_work()
3274 if (p->numa_scan_period == 0) { in task_numa_work()
3275 p->numa_scan_period_max = task_scan_max(p); in task_numa_work()
3276 p->numa_scan_period = task_scan_start(p); in task_numa_work()
3279 next_scan = now + msecs_to_jiffies(p->numa_scan_period); in task_numa_work()
3287 p->node_stamp += 2 * TICK_NSEC; in task_numa_work()
3311 reset_ptenuma_scan(p); in task_numa_work()
3459 reset_ptenuma_scan(p); in task_numa_work()
3468 if (unlikely(p->se.sum_exec_runtime != runtime)) { in task_numa_work()
3469 u64 diff = p->se.sum_exec_runtime - runtime; in task_numa_work()
3470 p->node_stamp += 32 * diff; in task_numa_work()
3474 void init_numa_balancing(unsigned long clone_flags, struct task_struct *p) in init_numa_balancing() argument
3477 struct mm_struct *mm = p->mm; in init_numa_balancing()
3486 p->node_stamp = 0; in init_numa_balancing()
3487 p->numa_scan_seq = mm ? mm->numa_scan_seq : 0; in init_numa_balancing()
3488 p->numa_scan_period = sysctl_numa_balancing_scan_delay; in init_numa_balancing()
3489 p->numa_migrate_retry = 0; in init_numa_balancing()
3491 p->numa_work.next = &p->numa_work; in init_numa_balancing()
3492 p->numa_faults = NULL; in init_numa_balancing()
3493 p->numa_pages_migrated = 0; in init_numa_balancing()
3494 p->total_numa_faults = 0; in init_numa_balancing()
3495 RCU_INIT_POINTER(p->numa_group, NULL); in init_numa_balancing()
3496 p->last_task_numa_placement = 0; in init_numa_balancing()
3497 p->last_sum_exec_runtime = 0; in init_numa_balancing()
3499 init_task_work(&p->numa_work, task_numa_work); in init_numa_balancing()
3503 p->numa_preferred_nid = NUMA_NO_NODE; in init_numa_balancing()
3517 p->node_stamp = delay; in init_numa_balancing()
3554 static void update_scan_period(struct task_struct *p, int new_cpu) in update_scan_period() argument
3556 int src_nid = cpu_to_node(task_cpu(p)); in update_scan_period()
3562 if (!p->mm || !p->numa_faults || (p->flags & PF_EXITING)) in update_scan_period()
3573 if (p->numa_scan_seq) { in update_scan_period()
3579 if (dst_nid == p->numa_preferred_nid || in update_scan_period()
3580 (p->numa_preferred_nid != NUMA_NO_NODE && in update_scan_period()
3581 src_nid != p->numa_preferred_nid)) in update_scan_period()
3585 p->numa_scan_period = task_scan_start(p); in update_scan_period()
3593 static inline void account_numa_enqueue(struct rq *rq, struct task_struct *p) in account_numa_enqueue() argument
3597 static inline void account_numa_dequeue(struct rq *rq, struct task_struct *p) in account_numa_dequeue() argument
3601 static inline void update_scan_period(struct task_struct *p, int new_cpu) in update_scan_period() argument
3870 void reweight_task(struct task_struct *p, const struct load_weight *lw) in reweight_task() argument
3872 struct sched_entity *se = &p->se; in reweight_task()
4798 static inline unsigned long task_util(struct task_struct *p) in task_util() argument
4800 return READ_ONCE(p->se.avg.util_avg); in task_util()
4803 static inline unsigned long _task_util_est(struct task_struct *p) in _task_util_est() argument
4805 struct util_est ue = READ_ONCE(p->se.avg.util_est); in _task_util_est()
4810 static inline unsigned long task_util_est(struct task_struct *p) in task_util_est() argument
4812 return max(task_util(p), _task_util_est(p)); in task_util_est()
4816 struct task_struct *p) in util_est_enqueue() argument
4825 enqueued += _task_util_est(p); in util_est_enqueue()
4832 struct task_struct *p) in util_est_dequeue() argument
4841 enqueued -= min_t(unsigned int, enqueued, _task_util_est(p)); in util_est_dequeue()
4863 struct task_struct *p, in util_est_update() argument
4883 ue = p->se.avg.util_est; in util_est_update()
4893 ue.enqueued = task_util(p); in util_est_update()
4918 if (task_util(p) > capacity_orig_of(cpu_of(rq_of(cfs_rq)))) in util_est_update()
4943 WRITE_ONCE(p->se.avg.util_est, ue); in util_est_update()
4945 trace_sched_util_est_se_tp(&p->se); in util_est_update()
5067 static inline int task_fits_cpu(struct task_struct *p, int cpu) in task_fits_cpu() argument
5069 unsigned long uclamp_min = uclamp_eff_value(p, UCLAMP_MIN); in task_fits_cpu()
5070 unsigned long uclamp_max = uclamp_eff_value(p, UCLAMP_MAX); in task_fits_cpu()
5071 unsigned long util = task_util_est(p); in task_fits_cpu()
5079 static inline void update_misfit_status(struct task_struct *p, struct rq *rq) in update_misfit_status() argument
5084 if (!p || p->nr_cpus_allowed == 1) { in update_misfit_status()
5089 if (task_fits_cpu(p, cpu_of(rq))) { in update_misfit_status()
5098 rq->misfit_task_load = max_t(unsigned long, task_h_load(p), 1); in update_misfit_status()
5131 util_est_enqueue(struct cfs_rq *cfs_rq, struct task_struct *p) {} in util_est_enqueue() argument
5134 util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p) {} in util_est_dequeue() argument
5137 util_est_update(struct cfs_rq *cfs_rq, struct task_struct *p, in util_est_update() argument
5139 static inline void update_misfit_status(struct task_struct *p, struct rq *rq) {} in update_misfit_status() argument
6496 bool cfs_task_bw_constrained(struct task_struct *p) in cfs_task_bw_constrained() argument
6498 struct cfs_rq *cfs_rq = task_cfs_rq(p); in cfs_task_bw_constrained()
6512 static void sched_fair_update_stop_tick(struct rq *rq, struct task_struct *p) in sched_fair_update_stop_tick() argument
6531 if (cfs_task_bw_constrained(p)) in sched_fair_update_stop_tick()
6578 bool cfs_task_bw_constrained(struct task_struct *p) in cfs_task_bw_constrained() argument
6586 static inline void sched_fair_update_stop_tick(struct rq *rq, struct task_struct *p) {} in sched_fair_update_stop_tick() argument
6594 static void hrtick_start_fair(struct rq *rq, struct task_struct *p) in hrtick_start_fair() argument
6596 struct sched_entity *se = &p->se; in hrtick_start_fair()
6598 SCHED_WARN_ON(task_rq(p) != rq); in hrtick_start_fair()
6606 if (task_current(rq, p)) in hrtick_start_fair()
6630 hrtick_start_fair(struct rq *rq, struct task_struct *p) in hrtick_start_fair() argument
6700 enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) in enqueue_task_fair() argument
6703 struct sched_entity *se = &p->se; in enqueue_task_fair()
6704 int idle_h_nr_running = task_has_idle_policy(p); in enqueue_task_fair()
6713 util_est_enqueue(&rq->cfs, p); in enqueue_task_fair()
6720 if (p->in_iowait) in enqueue_task_fair()
6793 static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) in dequeue_task_fair() argument
6796 struct sched_entity *se = &p->se; in dequeue_task_fair()
6798 int idle_h_nr_running = task_has_idle_policy(p); in dequeue_task_fair()
6801 util_est_dequeue(&rq->cfs, p); in dequeue_task_fair()
6859 util_est_update(&rq->cfs, p, task_sleep); in dequeue_task_fair()
6901 static unsigned long cpu_load_without(struct rq *rq, struct task_struct *p) in cpu_load_without() argument
6907 if (cpu_of(rq) != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) in cpu_load_without()
6914 lsub_positive(&load, task_h_load(p)); in cpu_load_without()
6924 static unsigned long cpu_runnable_without(struct rq *rq, struct task_struct *p) in cpu_runnable_without() argument
6930 if (cpu_of(rq) != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) in cpu_runnable_without()
6937 lsub_positive(&runnable, p->se.avg.runnable_avg); in cpu_runnable_without()
6947 static void record_wakee(struct task_struct *p) in record_wakee() argument
6958 if (current->last_wakee != p) { in record_wakee()
6959 current->last_wakee = p; in record_wakee()
6981 static int wake_wide(struct task_struct *p) in wake_wide() argument
6984 unsigned int slave = p->wakee_flips; in wake_wide()
7034 wake_affine_weight(struct sched_domain *sd, struct task_struct *p, in wake_affine_weight() argument
7051 task_load = task_h_load(p); in wake_affine_weight()
7076 static int wake_affine(struct sched_domain *sd, struct task_struct *p, in wake_affine() argument
7085 target = wake_affine_weight(sd, p, this_cpu, prev_cpu, sync); in wake_affine()
7087 schedstat_inc(p->stats.nr_wakeups_affine_attempts); in wake_affine()
7092 schedstat_inc(p->stats.nr_wakeups_affine); in wake_affine()
7097 find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu);
7103 find_idlest_group_cpu(struct sched_group *group, struct task_struct *p, int this_cpu) in find_idlest_group_cpu() argument
7117 for_each_cpu_and(i, sched_group_span(group), p->cpus_ptr) { in find_idlest_group_cpu()
7120 if (!sched_core_cookie_match(rq, p)) in find_idlest_group_cpu()
7159 static inline int find_idlest_cpu(struct sched_domain *sd, struct task_struct *p, in find_idlest_cpu() argument
7164 if (!cpumask_intersects(sched_domain_span(sd), p->cpus_ptr)) in find_idlest_cpu()
7172 sync_entity_load_avg(&p->se); in find_idlest_cpu()
7184 group = find_idlest_group(sd, p, cpu); in find_idlest_cpu()
7190 new_cpu = find_idlest_group_cpu(group, p, cpu); in find_idlest_cpu()
7212 static inline int __select_idle_cpu(int cpu, struct task_struct *p) in __select_idle_cpu() argument
7215 sched_cpu_cookie_match(cpu_rq(cpu), p)) in __select_idle_cpu()
7279 static int select_idle_core(struct task_struct *p, int core, struct cpumask *cpus, int *idle_cpu) in select_idle_core() argument
7310 static int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target) in select_idle_smt() argument
7314 for_each_cpu_and(cpu, cpu_smt_mask(target), p->cpus_ptr) { in select_idle_smt()
7341 static inline int select_idle_core(struct task_struct *p, int core, struct cpumask *cpus, int *idle… in select_idle_core() argument
7343 return __select_idle_cpu(core, p); in select_idle_core()
7346 static inline int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target) in select_idle_smt() argument
7358 static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, bool has_idle_core, int … in select_idle_cpu() argument
7368 cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr); in select_idle_cpu()
7415 i = select_idle_core(p, cpu, cpus, &idle_cpu); in select_idle_cpu()
7422 idle_cpu = __select_idle_cpu(cpu, p); in select_idle_cpu()
7452 select_idle_capacity(struct task_struct *p, struct sched_domain *sd, int target) in select_idle_capacity() argument
7460 cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr); in select_idle_capacity()
7462 task_util = task_util_est(p); in select_idle_capacity()
7463 util_min = uclamp_eff_value(p, UCLAMP_MIN); in select_idle_capacity()
7464 util_max = uclamp_eff_value(p, UCLAMP_MAX); in select_idle_capacity()
7517 static int select_idle_sibling(struct task_struct *p, int prev, int target) in select_idle_sibling() argument
7529 sync_entity_load_avg(&p->se); in select_idle_sibling()
7530 task_util = task_util_est(p); in select_idle_sibling()
7531 util_min = uclamp_eff_value(p, UCLAMP_MIN); in select_idle_sibling()
7532 util_max = uclamp_eff_value(p, UCLAMP_MAX); in select_idle_sibling()
7569 recent_used_cpu = p->recent_used_cpu; in select_idle_sibling()
7570 p->recent_used_cpu = prev; in select_idle_sibling()
7575 cpumask_test_cpu(recent_used_cpu, p->cpus_ptr) && in select_idle_sibling()
7595 i = select_idle_capacity(p, sd, target); in select_idle_sibling()
7608 i = select_idle_smt(p, sd, prev); in select_idle_sibling()
7614 i = select_idle_cpu(p, sd, has_idle_core, target); in select_idle_sibling()
7663 cpu_util(int cpu, struct task_struct *p, int dst_cpu, int boost) in cpu_util() argument
7680 if (p && task_cpu(p) == cpu && dst_cpu != cpu) in cpu_util()
7681 lsub_positive(&util, task_util(p)); in cpu_util()
7682 else if (p && task_cpu(p) != cpu && dst_cpu == cpu) in cpu_util()
7683 util += task_util(p); in cpu_util()
7717 util_est += _task_util_est(p); in cpu_util()
7718 else if (p && unlikely(task_on_rq_queued(p) || current == p)) in cpu_util()
7719 lsub_positive(&util_est, _task_util_est(p)); in cpu_util()
7750 static unsigned long cpu_util_without(int cpu, struct task_struct *p) in cpu_util_without() argument
7753 if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) in cpu_util_without()
7754 p = NULL; in cpu_util_without()
7756 return cpu_util(cpu, p, -1, 0); in cpu_util_without()
7782 struct task_struct *p, int prev_cpu) in eenv_task_busy_time() argument
7790 busy_time = scale_irq_capacity(task_util_est(p), irq, max_cap); in eenv_task_busy_time()
7818 struct task_struct *p) in eenv_pd_busy_time() argument
7824 unsigned long util = cpu_util(cpu, p, -1, 0); in eenv_pd_busy_time()
7841 struct task_struct *p, int dst_cpu) in eenv_pd_max_util() argument
7847 struct task_struct *tsk = (cpu == dst_cpu) ? p : NULL; in eenv_pd_max_util()
7848 unsigned long util = cpu_util(cpu, p, dst_cpu, 1); in eenv_pd_max_util()
7872 struct cpumask *pd_cpus, struct task_struct *p, int dst_cpu) in compute_energy() argument
7874 unsigned long max_util = eenv_pd_max_util(eenv, pd_cpus, p, dst_cpu); in compute_energy()
7922 static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu) in find_energy_efficient_cpu() argument
7926 unsigned long p_util_min = uclamp_is_used() ? uclamp_eff_value(p, UCLAMP_MIN) : 0; in find_energy_efficient_cpu()
7927 unsigned long p_util_max = uclamp_is_used() ? uclamp_eff_value(p, UCLAMP_MAX) : 1024; in find_energy_efficient_cpu()
7954 sync_entity_load_avg(&p->se); in find_energy_efficient_cpu()
7955 if (!task_util_est(p) && p_util_min == 0) in find_energy_efficient_cpu()
7958 eenv_task_busy_time(&eenv, p, prev_cpu); in find_energy_efficient_cpu()
7990 if (!cpumask_test_cpu(cpu, p->cpus_ptr)) in find_energy_efficient_cpu()
7993 util = cpu_util(cpu, p, cpu, 0); in find_energy_efficient_cpu()
8044 eenv_pd_busy_time(&eenv, cpus, p); in find_energy_efficient_cpu()
8046 base_energy = compute_energy(&eenv, pd, cpus, p, -1); in find_energy_efficient_cpu()
8050 prev_delta = compute_energy(&eenv, pd, cpus, p, in find_energy_efficient_cpu()
8074 cur_delta = compute_energy(&eenv, pd, cpus, p, in find_energy_efficient_cpu()
8121 select_task_rq_fair(struct task_struct *p, int prev_cpu, int wake_flags) in select_task_rq_fair() argument
8134 lockdep_assert_held(&p->pi_lock); in select_task_rq_fair()
8136 record_wakee(p); in select_task_rq_fair()
8139 cpumask_test_cpu(cpu, p->cpus_ptr)) in select_task_rq_fair()
8143 new_cpu = find_energy_efficient_cpu(p, prev_cpu); in select_task_rq_fair()
8149 want_affine = !wake_wide(p) && cpumask_test_cpu(cpu, p->cpus_ptr); in select_task_rq_fair()
8161 new_cpu = wake_affine(tmp, p, cpu, prev_cpu, sync); in select_task_rq_fair()
8180 new_cpu = find_idlest_cpu(sd, p, cpu, prev_cpu, sd_flag); in select_task_rq_fair()
8183 new_cpu = select_idle_sibling(p, prev_cpu, new_cpu); in select_task_rq_fair()
8195 static void migrate_task_rq_fair(struct task_struct *p, int new_cpu) in migrate_task_rq_fair() argument
8197 struct sched_entity *se = &p->se; in migrate_task_rq_fair()
8199 if (!task_on_rq_migrating(p)) { in migrate_task_rq_fair()
8218 update_scan_period(p, new_cpu); in migrate_task_rq_fair()
8221 static void task_dead_fair(struct task_struct *p) in task_dead_fair() argument
8223 remove_entity_load_avg(&p->se); in task_dead_fair()
8250 static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_flags) in check_preempt_wakeup() argument
8253 struct sched_entity *se = &curr->se, *pse = &p->se; in check_preempt_wakeup()
8309 if (unlikely(p->policy != SCHED_NORMAL)) in check_preempt_wakeup()
8364 struct task_struct *p; in pick_next_task_fair() local
8418 p = task_of(se); in pick_next_task_fair()
8425 if (prev != p) { in pick_next_task_fair()
8458 p = task_of(se); in pick_next_task_fair()
8467 list_move(&p->se.group_node, &rq->cfs_tasks); in pick_next_task_fair()
8471 hrtick_start_fair(rq, p); in pick_next_task_fair()
8473 update_misfit_status(p, rq); in pick_next_task_fair()
8474 sched_fair_update_stop_tick(rq, p); in pick_next_task_fair()
8476 return p; in pick_next_task_fair()
8555 static bool yield_to_task_fair(struct rq *rq, struct task_struct *p) in yield_to_task_fair() argument
8557 struct sched_entity *se = &p->se; in yield_to_task_fair()
8780 static int task_hot(struct task_struct *p, struct lb_env *env) in task_hot() argument
8786 if (p->sched_class != &fair_sched_class) in task_hot()
8789 if (unlikely(task_has_idle_policy(p))) in task_hot()
8800 (&p->se == cfs_rq_of(&p->se)->next)) in task_hot()
8810 if (!sched_core_cookie_match(cpu_rq(env->dst_cpu), p)) in task_hot()
8816 delta = rq_clock_task(env->src_rq) - p->se.exec_start; in task_hot()
8827 static int migrate_degrades_locality(struct task_struct *p, struct lb_env *env) in migrate_degrades_locality() argument
8829 struct numa_group *numa_group = rcu_dereference(p->numa_group); in migrate_degrades_locality()
8836 if (!p->numa_faults || !(env->sd->flags & SD_NUMA)) in migrate_degrades_locality()
8846 if (src_nid == p->numa_preferred_nid) { in migrate_degrades_locality()
8854 if (dst_nid == p->numa_preferred_nid) in migrate_degrades_locality()
8863 src_weight = group_weight(p, src_nid, dist); in migrate_degrades_locality()
8864 dst_weight = group_weight(p, dst_nid, dist); in migrate_degrades_locality()
8866 src_weight = task_weight(p, src_nid, dist); in migrate_degrades_locality()
8867 dst_weight = task_weight(p, dst_nid, dist); in migrate_degrades_locality()
8874 static inline int migrate_degrades_locality(struct task_struct *p, in migrate_degrades_locality() argument
8885 int can_migrate_task(struct task_struct *p, struct lb_env *env) in can_migrate_task() argument
8898 if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu)) in can_migrate_task()
8902 if (kthread_is_per_cpu(p)) in can_migrate_task()
8905 if (!cpumask_test_cpu(env->dst_cpu, p->cpus_ptr)) { in can_migrate_task()
8908 schedstat_inc(p->stats.nr_failed_migrations_affine); in can_migrate_task()
8928 if (cpumask_test_cpu(cpu, p->cpus_ptr)) { in can_migrate_task()
8941 if (task_on_cpu(env->src_rq, p)) { in can_migrate_task()
8942 schedstat_inc(p->stats.nr_failed_migrations_running); in can_migrate_task()
8956 tsk_cache_hot = migrate_degrades_locality(p, env); in can_migrate_task()
8958 tsk_cache_hot = task_hot(p, env); in can_migrate_task()
8964 schedstat_inc(p->stats.nr_forced_migrations); in can_migrate_task()
8969 schedstat_inc(p->stats.nr_failed_migrations_hot); in can_migrate_task()
8976 static void detach_task(struct task_struct *p, struct lb_env *env) in detach_task() argument
8980 deactivate_task(env->src_rq, p, DEQUEUE_NOCLOCK); in detach_task()
8981 set_task_cpu(p, env->dst_cpu); in detach_task()
8992 struct task_struct *p; in detach_one_task() local
8996 list_for_each_entry_reverse(p, in detach_one_task()
8998 if (!can_migrate_task(p, env)) in detach_one_task()
9001 detach_task(p, env); in detach_one_task()
9010 return p; in detach_one_task()
9025 struct task_struct *p; in detach_tasks() local
9062 p = list_last_entry(tasks, struct task_struct, se.group_node); in detach_tasks()
9064 if (!can_migrate_task(p, env)) in detach_tasks()
9076 load = max_t(unsigned long, task_h_load(p), 1); in detach_tasks()
9095 util = task_util_est(p); in detach_tasks()
9109 if (task_fits_cpu(p, env->src_cpu)) in detach_tasks()
9116 detach_task(p, env); in detach_tasks()
9117 list_add(&p->se.group_node, &env->tasks); in detach_tasks()
9140 list_move(&p->se.group_node, tasks); in detach_tasks()
9156 static void attach_task(struct rq *rq, struct task_struct *p) in attach_task() argument
9160 WARN_ON_ONCE(task_rq(p) != rq); in attach_task()
9161 activate_task(rq, p, ENQUEUE_NOCLOCK); in attach_task()
9162 check_preempt_curr(rq, p, 0); in attach_task()
9169 static void attach_one_task(struct rq *rq, struct task_struct *p) in attach_one_task() argument
9175 attach_task(rq, p); in attach_one_task()
9186 struct task_struct *p; in attach_tasks() local
9193 p = list_first_entry(tasks, struct task_struct, se.group_node); in attach_tasks()
9194 list_del_init(&p->se.group_node); in attach_tasks()
9196 attach_task(env->dst_rq, p); in attach_tasks()
9359 static unsigned long task_h_load(struct task_struct *p) in task_h_load() argument
9361 struct cfs_rq *cfs_rq = task_cfs_rq(p); in task_h_load()
9364 return div64_ul(p->se.avg.load_avg * cfs_rq->h_load, in task_h_load()
9380 static unsigned long task_h_load(struct task_struct *p) in task_h_load() argument
9382 return p->se.avg.load_avg; in task_h_load()
10131 static unsigned int task_running_on_cpu(int cpu, struct task_struct *p) in task_running_on_cpu() argument
10134 if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) in task_running_on_cpu()
10137 if (task_on_rq_queued(p)) in task_running_on_cpu()
10150 static int idle_cpu_without(int cpu, struct task_struct *p) in idle_cpu_without() argument
10154 if (rq->curr != rq->idle && rq->curr != p) in idle_cpu_without()
10181 struct task_struct *p) in update_sg_wakeup_stats() argument
10195 sgs->group_load += cpu_load_without(rq, p); in update_sg_wakeup_stats()
10196 sgs->group_util += cpu_util_without(i, p); in update_sg_wakeup_stats()
10197 sgs->group_runnable += cpu_runnable_without(rq, p); in update_sg_wakeup_stats()
10198 local = task_running_on_cpu(i, p); in update_sg_wakeup_stats()
10207 if (!nr_running && idle_cpu_without(i, p)) in update_sg_wakeup_stats()
10213 task_fits_cpu(p, i)) in update_sg_wakeup_stats()
10293 find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu) in find_idlest_group() argument
10309 p->cpus_ptr)) in find_idlest_group()
10313 if (!sched_group_cookie_match(cpu_rq(this_cpu), p, group)) in find_idlest_group()
10326 update_sg_wakeup_stats(sd, group, sgs, p); in find_idlest_group()
10412 if (cpu_to_node(this_cpu) == p->numa_preferred_nid) in find_idlest_group()
10416 if (cpu_to_node(idlest_cpu) == p->numa_preferred_nid) in find_idlest_group()
10428 if (p->nr_cpus_allowed != NR_CPUS) { in find_idlest_group()
10431 cpumask_and(cpus, sched_group_span(local), p->cpus_ptr); in find_idlest_group()
11553 struct task_struct *p = NULL; in active_load_balance_cpu_stop() local
11602 p = detach_one_task(&env); in active_load_balance_cpu_stop()
11603 if (p) { in active_load_balance_cpu_stop()
11616 if (p) in active_load_balance_cpu_stop()
11617 attach_one_task(target_rq, p); in active_load_balance_cpu_stop()
12491 void task_vruntime_update(struct rq *rq, struct task_struct *p, bool in_fi) in task_vruntime_update() argument
12493 struct sched_entity *se = &p->se; in task_vruntime_update()
12495 if (p->sched_class != &fair_sched_class) in task_vruntime_update()
12549 static int task_is_throttled_fair(struct task_struct *p, int cpu) in task_is_throttled_fair() argument
12554 cfs_rq = task_group(p)->cfs_rq[cpu]; in task_is_throttled_fair()
12596 static void task_fork_fair(struct task_struct *p) in task_fork_fair() argument
12598 struct sched_entity *se = &p->se, *curr; in task_fork_fair()
12619 prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio) in prio_changed_fair() argument
12621 if (!task_on_rq_queued(p)) in prio_changed_fair()
12632 if (task_current(rq, p)) { in prio_changed_fair()
12633 if (p->prio > oldprio) in prio_changed_fair()
12636 check_preempt_curr(rq, p, 0); in prio_changed_fair()
12706 static void detach_task_cfs_rq(struct task_struct *p) in detach_task_cfs_rq() argument
12708 struct sched_entity *se = &p->se; in detach_task_cfs_rq()
12713 static void attach_task_cfs_rq(struct task_struct *p) in attach_task_cfs_rq() argument
12715 struct sched_entity *se = &p->se; in attach_task_cfs_rq()
12720 static void switched_from_fair(struct rq *rq, struct task_struct *p) in switched_from_fair() argument
12722 detach_task_cfs_rq(p); in switched_from_fair()
12725 static void switched_to_fair(struct rq *rq, struct task_struct *p) in switched_to_fair() argument
12727 attach_task_cfs_rq(p); in switched_to_fair()
12729 if (task_on_rq_queued(p)) { in switched_to_fair()
12735 if (task_current(rq, p)) in switched_to_fair()
12738 check_preempt_curr(rq, p, 0); in switched_to_fair()
12747 static void set_next_task_fair(struct rq *rq, struct task_struct *p, bool first) in set_next_task_fair() argument
12749 struct sched_entity *se = &p->se; in set_next_task_fair()
12752 if (task_on_rq_queued(p)) { in set_next_task_fair()
12780 static void task_change_group_fair(struct task_struct *p) in task_change_group_fair() argument
12786 if (READ_ONCE(p->__state) == TASK_NEW) in task_change_group_fair()
12789 detach_task_cfs_rq(p); in task_change_group_fair()
12793 p->se.avg.last_update_time = 0; in task_change_group_fair()
12795 set_task_rq(p, task_cpu(p)); in task_change_group_fair()
12796 attach_task_cfs_rq(p); in task_change_group_fair()
13152 void show_numa_stats(struct task_struct *p, struct seq_file *m) in show_numa_stats() argument
13159 ng = rcu_dereference(p->numa_group); in show_numa_stats()
13161 if (p->numa_faults) { in show_numa_stats()
13162 tsf = p->numa_faults[task_faults_idx(NUMA_MEM, node, 0)]; in show_numa_stats()
13163 tpf = p->numa_faults[task_faults_idx(NUMA_MEM, node, 1)]; in show_numa_stats()