Searched refs:task_rq (Results 1 – 5 of 5) sorted by relevance
70 struct rq *rq = task_rq(p); in dl_rq_of_se()320 rq = task_rq(p); in dl_change_utilization()1052 struct rq *rq = task_rq(p); in start_dl_timer()1882 rq = task_rq(p); in migrate_task_rq_dl()2148 if (!cpudl_find(&task_rq(task)->rd->cpudl, task, later_mask)) in find_later_rq()2244 if (unlikely(task_rq(task) != rq || in find_lock_later_rq()2497 rq = task_rq(p); in set_cpus_allowed_dl()
212 if (prio_less(b, a, !!task_rq(a)->core->core_forceidle_count)) in __sched_core_less() 635 rq = task_rq(p); in __task_rq_lock() 637 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) { in __task_rq_lock() 659 rq = task_rq(p); in task_rq_lock() 665 * [S] ->on_rq = MIGRATING [L] rq = task_rq() in task_rq_lock() 667 * [S] ->cpu = new_cpu [L] task_rq() in task_rq_lock() 675 * dependency headed by '[L] rq = task_rq()' and the acquire in task_rq_lock() 678 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) { in task_rq_lock() 2298 rq = task_rq(p); in wait_task_inactive() 2609 * If task_rq( in migration_cpu_stop() [all...]
303 return task_rq(p); in rq_of_rt_se()1896 ret = cpupri_find_fitness(&task_rq(task)->rd->cpupri, in find_lowest_rq()1901 ret = cpupri_find(&task_rq(task)->rd->cpupri, in find_lowest_rq()2002 if (unlikely(task_rq(task) != rq || in find_lock_lowest_rq()
1212 #define task_rq(p) cpu_rq(task_cpu(p)) macro1450 return &task_rq(p)->cfs; in task_cfs_rq()1456 struct rq *rq = task_rq(p); in cfs_rq_of()
1443 (lockdep_is_held(__rq_lockp(task_rq(p))) && !READ_ONCE(p->on_cpu))); in deref_task_numa_group()6634 SCHED_WARN_ON(task_rq(p) != rq); in hrtick_start_fair()9205 WARN_ON_ONCE(task_rq(p) != rq); in attach_task()12549 struct rq *rq = task_rq(a); in cfs_prio_less()12556 SCHED_WARN_ON(task_rq(b)->core != rq->core); in cfs_prio_less()12579 cfs_rqa = &task_rq(a)->cfs; in cfs_prio_less()12580 cfs_rqb = &task_rq(b)->cfs; in cfs_prio_less()12631 check_update_overutilized_status(task_rq(curr)); in task_tick_fair()