Searched refs:task_rq (Results 1 – 5 of 5) sorted by relevance
70 struct rq *rq = task_rq(p); in dl_rq_of_se()320 rq = task_rq(p); in dl_change_utilization()1052 struct rq *rq = task_rq(p); in start_dl_timer()1882 rq = task_rq(p); in migrate_task_rq_dl()2148 if (!cpudl_find(&task_rq(task)->rd->cpudl, task, later_mask)) in find_later_rq()2244 if (unlikely(task_rq(task) != rq || in find_lock_later_rq()2497 rq = task_rq(p); in set_cpus_allowed_dl()
212 if (prio_less(b, a, !!task_rq(a)->core->core_forceidle_count)) in __sched_core_less()635 rq = task_rq(p); in __task_rq_lock()637 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) { in __task_rq_lock()659 rq = task_rq(p); in task_rq_lock()678 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) { in task_rq_lock()2295 rq = task_rq(p); in wait_task_inactive()2610 if (task_rq(p) == rq) { in migration_cpu_stop()2689 if (task_rq(p) != rq) in push_cpu_stop()2706 if (task_rq(p) == rq) { in push_cpu_stop()2748 struct rq *rq = task_rq(p); in __do_set_cpus_allowed()[all …]
303 return task_rq(p); in rq_of_rt_se()1896 ret = cpupri_find_fitness(&task_rq(task)->rd->cpupri, in find_lowest_rq()1901 ret = cpupri_find(&task_rq(task)->rd->cpupri, in find_lowest_rq()2002 if (unlikely(task_rq(task) != rq || in find_lock_lowest_rq()
1212 #define task_rq(p) cpu_rq(task_cpu(p)) macro1450 return &task_rq(p)->cfs; in task_cfs_rq()1456 struct rq *rq = task_rq(p); in cfs_rq_of()
1443 (lockdep_is_held(__rq_lockp(task_rq(p))) && !READ_ONCE(p->on_cpu))); in deref_task_numa_group() 6632 SCHED_WARN_ON(task_rq(p) != rq); in hrtick_start_fair() 8227 * previous CPU. The caller guarantees p->pi_lock or task_rq(p)->lock is held.9203 WARN_ON_ONCE(task_rq(p) != rq); in attach_one_task() 12547 struct rq *rq = task_rq(a); in cfs_prio_less() 12554 SCHED_WARN_ON(task_rq(b)->core != rq->core); in cfs_prio_less() 12577 cfs_rqa = &task_rq(a)->cfs; in cfs_prio_less() 12578 cfs_rqb = &task_rq(b)->cfs; in cfs_prio_less() 12629 check_update_overutilized_status(task_rq(curr));