H A D | fair.c | 1 // SPDX-License-Identifier: GPL-2.0 43 #include <linux/memory-tiers.h> 61 * The initial- and re-scaling of tunables is configurable 65 * SCHED_TUNABLESCALING_NONE - unscaled, always *1 66 * SCHED_TUNABLESCALING_LOG - scaled logarithmical, *1+ilog(ncpus) 67 * SCHED_TUNABLESCALING_LINEAR - scaled linear, *ncpus 74 * Minimal preemption granularity for CPU 106 arch_asym_cpu_priority(int cpu) arch_asym_cpu_priority() argument 334 int cpu = cpu_of(rq); list_add_leaf_cfs_rq() local 1353 is_core_idle(int cpu) is_core_idle() argument 2046 numa_idle_core(int idle_core,int cpu) numa_idle_core() argument 2062 numa_idle_core(int idle_core,int cpu) numa_idle_core() argument 2078 int cpu, idle_core = -1; update_numa_stats() local 2121 int cpu; task_numa_assign() local 2348 int cpu = env->dst_stats.idle_cpu; task_numa_compare() local 2394 int cpu; task_numa_find_cpu() local 2980 int cpu = cpupid_to_cpu(cpupid); task_numa_group() local 4985 util_fits_cpu(unsigned long util,unsigned long uclamp_min,unsigned long uclamp_max,int cpu) util_fits_cpu() argument 4988 unsigned long capacity = capacity_of(cpu); util_fits_cpu() local 5101 task_fits_cpu(struct task_struct * p,int cpu) task_fits_cpu() argument 6283 sync_throttle(struct task_group * tg,int cpu) sync_throttle() argument 6548 int cpu = cpu_of(rq); sched_fair_update_stop_tick() local 6580 sync_throttle(struct task_group * tg,int cpu) sync_throttle() argument 6674 cpu_overutilized(int cpu) cpu_overutilized() argument 6722 sched_idle_cpu(int cpu) sched_idle_cpu() argument 6976 capacity_of(int cpu) capacity_of() argument 7194 find_idlest_cpu(struct sched_domain * sd,struct task_struct * p,int cpu,int prev_cpu,int sd_flag) find_idlest_cpu() argument 7246 __select_idle_cpu(int cpu,struct task_struct * p) __select_idle_cpu() argument 7259 set_idle_cores(int cpu,int val) set_idle_cores() argument 7268 test_idle_cores(int cpu) test_idle_cores() argument 7289 int cpu; __update_idle_core() local 7316 int cpu; select_idle_core() local 7346 int cpu; select_idle_smt() local 7366 set_idle_cores(int cpu,int val) set_idle_cores() argument 7370 test_idle_cores(int cpu) test_idle_cores() argument 7395 int i, cpu, idle_cpu = -1, nr = INT_MAX; select_idle_cpu() local 7490 int cpu, best_cpu = -1; select_idle_capacity() local 7536 asym_fits_cpu(unsigned long util,unsigned long util_min,unsigned long util_max,int cpu) asym_fits_cpu() argument 7697 cpu_util(int cpu,struct task_struct * p,int dst_cpu,int boost) cpu_util() argument 7761 cpu_util_cfs(int cpu) cpu_util_cfs() argument 7766 cpu_util_cfs_boost(int cpu) cpu_util_cfs_boost() argument 7784 cpu_util_without(int cpu,struct task_struct * p) cpu_util_without() argument 7855 int cpu; eenv_pd_busy_time() local 7878 int cpu; eenv_pd_max_util() local 7963 int cpu, best_energy_cpu, target = -1; find_energy_efficient_cpu() local 8159 int cpu = smp_processor_id(); select_task_rq_fair() local 8942 int cpu; can_migrate_task() local 9325 int cpu = cpu_of(rq); __update_blocked_fair() local 9429 update_blocked_averages(int cpu) update_blocked_averages() argument 9510 scale_rt_capacity(int cpu) scale_rt_capacity() argument 9540 update_cpu_capacity(struct sched_domain * sd,int cpu) update_cpu_capacity() argument 9542 unsigned long capacity = scale_rt_capacity(cpu); update_cpu_capacity() local 9558 update_group_capacity(struct sched_domain * sd,int cpu) update_group_capacity() argument 9562 unsigned long capacity, min_capacity, max_capacity; update_group_capacity() local 9762 sched_use_asym_prio(struct sched_domain * sd,int cpu) sched_use_asym_prio() argument 10174 task_running_on_cpu(int cpu,struct task_struct * p) task_running_on_cpu() argument 10193 idle_cpu_without(int cpu,struct task_struct * p) idle_cpu_without() argument 11004 unsigned long capacity, load, util; find_busiest_queue() local 11214 int cpu, idle_smt = -1; should_we_balance() local 11711 int cpu = rq->cpu; rebalance_domains() local 11869 int nr_busy, i, cpu = rq->cpu; nohz_balancer_kick() local 11982 set_cpu_sd_state_busy(int cpu) set_cpu_sd_state_busy() argument 12012 set_cpu_sd_state_idle(int cpu) set_cpu_sd_state_idle() argument 12032 nohz_balance_enter_idle(int cpu) nohz_balance_enter_idle() argument 12091 unsigned int cpu = rq->cpu; update_nohz_stats() local 12236 nohz_run_idle_balance(int cpu) nohz_run_idle_balance() argument 12592 task_is_throttled_fair(struct task_struct * p,int cpu) task_is_throttled_fair() argument 12920 int cpu; unregister_fair_sched_group() local 12944 init_tg_cfs_entry(struct task_group * tg,struct cfs_rq * cfs_rq,struct sched_entity * se,int cpu,struct sched_entity * parent) init_tg_cfs_entry() argument 13184 print_cfs_stats(struct seq_file * m,int cpu) print_cfs_stats() argument [all...] |