H A D | core.c | 118 DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); 239 void sched_core_enqueue(struct rq *rq, struct task_struct *p) in sched_core_enqueue() argument 241 rq->core->core_task_seq++; in sched_core_enqueue() 246 rb_add(&p->core_node, &rq->core_tree, rb_sched_core_less); in sched_core_enqueue() 249 void sched_core_dequeue(struct rq *rq, struct task_struct *p, int flags) in sched_core_dequeue() argument 251 rq->core->core_task_seq++; in sched_core_dequeue() 254 rb_erase(&p->core_node, &rq->core_tree); in sched_core_dequeue() 263 if (!(flags & DEQUEUE_SAVE) && rq in sched_core_dequeue() 299 sched_core_find(struct rq * rq,unsigned long cookie) sched_core_find() argument 453 sched_core_enqueue(struct rq * rq,struct task_struct * p) sched_core_enqueue() argument 455 sched_core_dequeue(struct rq * rq,struct task_struct * p,int flags) sched_core_dequeue() argument 551 raw_spin_rq_lock_nested(struct rq * rq,int subclass) raw_spin_rq_lock_nested() argument 576 raw_spin_rq_trylock(struct rq * rq) raw_spin_rq_trylock() argument 600 raw_spin_rq_unlock(struct rq * rq) raw_spin_rq_unlock() argument 630 struct rq *rq; __task_rq_lock() local 655 struct rq *rq; task_rq_lock() local 694 update_rq_clock_task(struct rq * rq,s64 delta) update_rq_clock_task() argument 751 update_rq_clock(struct rq * rq) update_rq_clock() argument 778 hrtick_clear(struct rq * rq) hrtick_clear() argument 790 struct rq *rq = container_of(timer, struct rq, hrtick_timer); hrtick() local 805 __hrtick_restart(struct rq * rq) __hrtick_restart() argument 818 struct rq *rq = arg; __hrtick_start() local 831 hrtick_start(struct rq * rq,u64 delay) hrtick_start() argument 855 hrtick_start(struct rq * rq,u64 delay) hrtick_start() argument 868 hrtick_rq_init(struct rq * rq) hrtick_rq_init() argument 877 hrtick_clear(struct rq * rq) hrtick_clear() argument 881 hrtick_rq_init(struct rq * rq) hrtick_rq_init() argument 1042 resched_curr(struct rq * rq) resched_curr() argument 1068 struct rq *rq = cpu_rq(cpu); resched_cpu() local 1131 struct rq *rq = cpu_rq(cpu); wake_up_idle_cpu() local 1175 struct rq *rq = info; nohz_csd_func() local 1195 __need_bw_check(struct rq * rq,struct task_struct * p) __need_bw_check() argument 1209 sched_can_stop_tick(struct rq * rq) sched_can_stop_tick() argument 1415 uclamp_idle_value(struct rq * rq,enum uclamp_id clamp_id,unsigned int clamp_value) uclamp_idle_value() argument 1431 uclamp_idle_reset(struct rq * rq,enum uclamp_id clamp_id,unsigned int clamp_value) uclamp_idle_reset() argument 1442 uclamp_rq_max_value(struct rq * rq,enum uclamp_id clamp_id,unsigned int clamp_value) uclamp_rq_max_value() argument 1482 struct rq *rq; uclamp_update_util_min_rt_default() local 1564 uclamp_rq_inc_id(struct rq * rq,struct task_struct * p,enum uclamp_id clamp_id) uclamp_rq_inc_id() argument 1602 uclamp_rq_dec_id(struct rq * rq,struct task_struct * p,enum uclamp_id clamp_id) uclamp_rq_dec_id() argument 1668 uclamp_rq_inc(struct rq * rq,struct task_struct * p) uclamp_rq_inc() argument 1692 uclamp_rq_dec(struct rq * rq,struct task_struct * p) uclamp_rq_dec() argument 1712 uclamp_rq_reinc_id(struct rq * rq,struct task_struct * p,enum uclamp_id clamp_id) uclamp_rq_reinc_id() argument 1734 struct rq *rq; uclamp_update_active() local 2011 init_uclamp_rq(struct rq * rq) init_uclamp_rq() argument 2051 uclamp_rq_inc(struct rq * rq,struct task_struct * p) uclamp_rq_inc() argument 2052 uclamp_rq_dec(struct rq * rq,struct task_struct * p) uclamp_rq_dec() argument 2089 enqueue_task(struct rq * rq,struct task_struct * p,int flags) enqueue_task() argument 2106 dequeue_task(struct rq * rq,struct task_struct * p,int flags) dequeue_task() argument 2123 activate_task(struct rq * rq,struct task_struct * p,int flags) activate_task() argument 2135 deactivate_task(struct rq * rq,struct task_struct * p,int flags) deactivate_task() argument 2206 check_class_changed(struct rq * rq,struct task_struct * p,const struct sched_class * prev_class,int oldprio) check_class_changed() argument 2219 wakeup_preempt(struct rq * rq,struct task_struct * p,int flags) wakeup_preempt() argument 2288 struct rq *rq; wait_task_inactive() local 2390 migrate_disable_switch(struct rq * rq,struct task_struct * p) migrate_disable_switch() argument 2460 rq_has_pinned_tasks(struct rq * rq) rq_has_pinned_tasks() argument 2514 move_queued_task(struct rq * rq,struct rq_flags * rf,struct task_struct * p,int new_cpu) move_queued_task() argument 2560 __migrate_task(struct rq * rq,struct rq_flags * rf,struct task_struct * p,int dest_cpu) __migrate_task() argument 2582 struct rq *rq = this_rq(); migration_cpu_stop() local 2685 struct rq *lowest_rq = NULL, *rq = this_rq(); push_cpu_stop() local 2750 struct rq *rq = task_rq(p); __do_set_cpus_allowed() local 2961 affine_move_task(struct rq * rq,struct task_struct * p,struct rq_flags * rf,int dest_cpu,unsigned int flags) affine_move_task() argument 3110 __set_cpus_allowed_ptr_locked(struct task_struct * p,struct affinity_context * ctx,struct rq * rq,struct rq_flags * rf) __set_cpus_allowed_ptr_locked() argument 3200 struct rq *rq; __set_cpus_allowed_ptr() local 3244 struct rq *rq; restrict_cpus_allowed_ptr() local 3706 migrate_disable_switch(struct rq * rq,struct task_struct * p) migrate_disable_switch() argument 3708 rq_has_pinned_tasks(struct rq * rq) rq_has_pinned_tasks() argument 3723 struct rq *rq; ttwu_stat() local 3769 ttwu_do_activate(struct rq * rq,struct task_struct * p,int wake_flags,struct rq_flags * rf) ttwu_do_activate() argument 3850 struct rq *rq; ttwu_runnable() local 3875 struct rq *rq = this_rq(); sched_ttwu_pending() local 3933 struct rq *rq = cpu_rq(cpu); __ttwu_queue_wakelist() local 3943 struct rq *rq = cpu_rq(cpu); wake_up_if_idle() local 4023 struct rq *rq = cpu_rq(cpu); ttwu_queue() local 4407 struct rq *rq = NULL; task_call_func() local 4454 struct rq *rq = cpu_rq(cpu); cpu_curr_snapshot() local 4851 struct rq *rq; wake_up_new_task() local 5010 do_balance_callbacks(struct rq * rq,struct balance_callback * head) do_balance_callbacks() argument 5046 __splice_balance_callbacks(struct rq * rq,bool split) __splice_balance_callbacks() argument 5070 splice_balance_callbacks(struct rq * rq) splice_balance_callbacks() argument 5075 __balance_callbacks(struct rq * rq) __balance_callbacks() argument 5080 balance_callbacks(struct rq * rq,struct balance_callback * head) balance_callbacks() argument 5093 __balance_callbacks(struct rq * rq) __balance_callbacks() argument 5097 splice_balance_callbacks(struct rq * rq) splice_balance_callbacks() argument 5102 balance_callbacks(struct rq * rq,struct balance_callback * head) balance_callbacks() argument 5109 prepare_lock_switch(struct rq * rq,struct task_struct * next,struct rq_flags * rf) prepare_lock_switch() argument 5125 finish_lock_switch(struct rq * rq) finish_lock_switch() argument 5179 prepare_task_switch(struct rq * rq,struct task_struct * prev,struct task_struct * next) prepare_task_switch() argument 5214 struct rq *rq = this_rq(); finish_task_switch() local 5324 context_switch(struct rq * rq,struct task_struct * prev,struct task_struct * next,struct rq_flags * rf) context_switch() argument 5547 struct rq *rq; task_sched_runtime() local 5584 cpu_resched_latency(struct rq * rq) cpu_resched_latency() argument 5629 cpu_resched_latency(struct rq * rq) cpu_resched_latency() argument 5639 struct rq *rq = cpu_rq(cpu); scheduler_tick() local 5723 struct rq *rq = cpu_rq(cpu); sched_tick_remote() local 5965 put_prev_task_balance(struct rq * rq,struct task_struct * prev,struct rq_flags * rf) put_prev_task_balance() argument 5991 __pick_next_task(struct rq * rq,struct task_struct * prev,struct rq_flags * rf) __pick_next_task() argument 6049 pick_task(struct rq * rq) pick_task() argument 6068 pick_next_task(struct rq * rq,struct task_struct * prev,struct rq_flags * rf) pick_next_task() argument 6367 sched_core_balance(struct rq * rq) sched_core_balance() argument 6389 queue_core_balance(struct rq * rq) queue_core_balance() argument 6411 struct rq *rq = cpu_rq(cpu), *core_rq = NULL; sched_core_cpu_starting() local 6450 struct rq *rq = cpu_rq(cpu), *core_rq = NULL; sched_core_cpu_deactivate() local 6500 struct rq *rq = cpu_rq(cpu); sched_core_cpu_dying() local 6513 pick_next_task(struct rq * rq,struct task_struct * prev,struct rq_flags * rf) pick_next_task() argument 6583 struct rq *rq; __schedule() local 7071 struct rq *rq; rt_mutex_setprio() local 7194 struct rq *rq; set_user_nice() local 7329 struct rq *rq = cpu_rq(cpu); idle_cpu() local 7376 struct rq *rq = cpu_rq(cpu); sched_core_idle_cpu() local 7412 struct rq *rq = cpu_rq(cpu); effective_cpu_util() local 7633 struct rq *rq; __sched_setscheduler() local 8533 struct rq *rq; do_sched_yield() local 8936 struct rq *rq, *p_rq; yield_to() local 9091 struct rq *rq; sched_rr_get_interval() local 9260 struct rq *rq = cpu_rq(cpu); init_idle() local 9382 struct rq *rq; sched_setnuma() local 9426 struct rq *rq = this_rq(); __balance_push_cpu_stop() local 9456 balance_push(struct rq * rq) balance_push() argument 9521 struct rq *rq = cpu_rq(cpu); balance_push_set() local 9542 struct rq *rq = this_rq(); balance_hotplug_wait() local 9551 balance_push(struct rq * rq) balance_push() argument 9565 set_rq_online(struct rq * rq) set_rq_online() argument 9580 set_rq_offline(struct rq * rq) set_rq_offline() argument 9596 sched_set_rq_online(struct rq * rq,int cpu) sched_set_rq_online() argument 9608 sched_set_rq_offline(struct rq * rq,int cpu) sched_set_rq_offline() argument 9688 struct rq *rq = cpu_rq(cpu); sched_cpu_activate() local 9724 struct rq *rq = cpu_rq(cpu); sched_cpu_deactivate() local 9785 struct rq *rq = cpu_rq(cpu); sched_rq_cpu_starting() local 9827 calc_load_migrate(struct rq * rq) calc_load_migrate() argument 9835 dump_rq_tasks(struct rq * rq,const char * loglvl) dump_rq_tasks() argument 9856 struct rq *rq = cpu_rq(cpu); sched_cpu_dying() local 10000 struct rq *rq; sched_init() local 10529 struct rq *rq; sched_move_task() local 10928 struct rq *rq = cfs_rq->rq; tg_set_cfs_bandwidth() local 11598 call_trace_sched_update_nr_running(struct rq * rq,int count) call_trace_sched_update_nr_running() argument 11880 struct rq *rq = cpu_rq(cpu); sched_mm_cid_remote_clear() local 11938 struct rq *rq = cpu_rq(cpu); sched_mm_cid_remote_clear_old() local 12040 task_tick_mm_cid(struct rq * rq,struct task_struct * curr) task_tick_mm_cid() argument 12059 struct rq *rq; sched_mm_cid_exit_signals() local 12083 struct rq *rq; sched_mm_cid_before_execve() local 12107 struct rq *rq; sched_mm_cid_after_execve() local [all...] |