/openbmc/qemu/tests/functional/ |
H A D | test_x86_cpu_model_versions.py | 32 def validate_aliases(self, cpus): argument 33 for c in cpus.values(): 36 self.assertIn(c['alias-of'], cpus, 39 self.assertNotIn('alias-of', cpus[c['alias-of']], 45 def validate_variant_aliases(self, cpus): argument 48 self.assertNotIn("Haswell-noTSX-v1", cpus, 50 self.assertNotIn("Broadwell-noTSX-v1", cpus, 52 self.assertNotIn("Nehalem-IBRS-v1", cpus, 54 self.assertNotIn("Westmere-IBRS-v1", cpus, 56 self.assertNotIn("SandyBridge-IBRS-v1", cpus, [all …]
|
/openbmc/linux/tools/lib/perf/ |
H A D | cpumap.c | 20 RC_STRUCT(perf_cpu_map) *cpus = malloc(sizeof(*cpus) + sizeof(struct perf_cpu) * nr_cpus); in perf_cpu_map__alloc() 23 if (ADD_RC_CHK(result, cpus)) { in perf_cpu_map__alloc() 24 cpus->nr = nr_cpus; in perf_cpu_map__alloc() 25 refcount_set(&cpus->refcnt, 1); in perf_cpu_map__alloc() 32 struct perf_cpu_map *cpus = perf_cpu_map__alloc(1); in perf_cpu_map__dummy_new() local 34 if (cpus) in perf_cpu_map__dummy_new() 35 RC_CHK_ACCESS(cpus)->map[0].cpu = -1; in perf_cpu_map__dummy_new() 37 return cpus; in perf_cpu_map__dummy_new() 71 struct perf_cpu_map *cpus; in cpu_map__default_new() local 78 cpus = perf_cpu_map__alloc(nr_cpus); in cpu_map__default_new() [all …]
|
/openbmc/linux/drivers/cpuidle/ |
H A D | coupled.c | 3 * coupled.c - helper functions to enter the same idle state on multiple cpus 24 * cpus cannot be independently powered down, either due to 31 * shared between the cpus (L2 cache, interrupt controller, and 33 * be tightly controlled on both cpus. 36 * WFI state until all cpus are ready to enter a coupled state, at 38 * cpus at approximately the same time. 40 * Once all cpus are ready to enter idle, they are woken by an smp 42 * cpus will find work to do, and choose not to enter idle. A 43 * final pass is needed to guarantee that all cpus will call the 46 * ready counter matches the number of online coupled cpus. If any [all …]
|
/openbmc/linux/Documentation/admin-guide/cgroup-v1/ |
H A D | cpusets.rst | 31 2.2 Adding/removing cpus 43 Cpusets provide a mechanism for assigning a set of CPUs and Memory 57 include CPUs in its CPU affinity mask, and using the mbind(2) and 60 CPUs or Memory Nodes not in that cpuset. The scheduler will not 67 cpusets and which CPUs and Memory Nodes are assigned to each cpuset, 75 The management of large computer systems, with many processors (CPUs), 113 Cpusets provide a Linux kernel mechanism to constrain which CPUs and 117 CPUs a task may be scheduled (sched_setaffinity) and on which Memory 122 - Cpusets are sets of allowed CPUs and Memory Nodes, known to the 126 - Calls to sched_setaffinity are filtered to just those CPUs [all …]
|
/openbmc/linux/Documentation/timers/ |
H A D | no_hz.rst | 19 2. Omit scheduling-clock ticks on idle CPUs (CONFIG_NO_HZ_IDLE=y or 23 3. Omit scheduling-clock ticks on CPUs that are either idle or that 65 Omit Scheduling-Clock Ticks For Idle CPUs 78 scheduling-clock interrupts to idle CPUs, which is critically important 86 idle CPUs. That said, dyntick-idle mode is not free: 104 Omit Scheduling-Clock Ticks For CPUs With Only One Runnable Task 109 Note that omitting scheduling-clock ticks for CPUs with only one runnable 110 task implies also omitting them for idle CPUs. 113 sending scheduling-clock interrupts to CPUs with a single runnable task, 114 and such CPUs are said to be "adaptive-ticks CPUs". This is important [all …]
|
/openbmc/linux/tools/lib/perf/tests/ |
H A D | test-cpumap.c | 16 struct perf_cpu_map *cpus; in test_cpumap() local 24 cpus = perf_cpu_map__dummy_new(); in test_cpumap() 25 if (!cpus) in test_cpumap() 28 perf_cpu_map__get(cpus); in test_cpumap() 29 perf_cpu_map__put(cpus); in test_cpumap() 30 perf_cpu_map__put(cpus); in test_cpumap() 32 cpus = perf_cpu_map__default_new(); in test_cpumap() 33 if (!cpus) in test_cpumap() 36 perf_cpu_map__for_each_cpu(cpu, idx, cpus) in test_cpumap() 39 perf_cpu_map__put(cpus); in test_cpumap()
|
/openbmc/linux/include/linux/ |
H A D | stop_machine.h | 13 * function to be executed on a single or multiple cpus preempting all 14 * other processes and monopolizing those cpus until it finishes. 18 * cpus are online. 99 * stop_machine: freeze the machine on all CPUs and run this function 102 * @cpus: the cpus to run the @fn() on (NULL = any online cpu) 114 int stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus); 117 * stop_machine_cpuslocked: freeze the machine on all CPUs and run this function 120 * @cpus: the cpus to run the @fn() on (NULL = any online cpu) 125 int stop_machine_cpuslocked(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus); 133 * Same as above, but instead of every CPU, only the logical CPUs of a [all …]
|
/openbmc/linux/arch/riscv/kernel/ |
H A D | sys_riscv.c | 86 const struct cpumask *cpus) in hwprobe_arch_id() argument 92 for_each_cpu(cpu, cpus) { in hwprobe_arch_id() 126 const struct cpumask *cpus) in hwprobe_isa_ext0() argument 145 for_each_cpu(cpu, cpus) { in hwprobe_isa_ext0() 168 static u64 hwprobe_misaligned(const struct cpumask *cpus) in hwprobe_misaligned() argument 173 for_each_cpu(cpu, cpus) { in hwprobe_misaligned() 192 const struct cpumask *cpus) in hwprobe_one_pair() argument 198 hwprobe_arch_id(pair, cpus); in hwprobe_one_pair() 211 hwprobe_isa_ext0(pair, cpus); in hwprobe_one_pair() 215 pair->value = hwprobe_misaligned(cpus); in hwprobe_one_pair() [all …]
|
/openbmc/qemu/tests/qtest/ |
H A D | numa-test.c | 28 cli = make_cli(data, "-machine smp.cpus=8 -numa node,nodeid=0,memdev=ram,cpus=0-3 " in test_mon_explicit() 29 "-numa node,nodeid=1,cpus=4-7"); in test_mon_explicit() 33 g_assert(strstr(s, "node 0 cpus: 0 1 2 3")); in test_mon_explicit() 34 g_assert(strstr(s, "node 1 cpus: 4 5 6 7")); in test_mon_explicit() 45 cli = make_cli(data, "-machine smp.cpus=8,smp.sockets=8 " in test_def_cpu_split() 50 g_assert(strstr(s, "node 0 cpus: 0 2 4 6")); in test_def_cpu_split() 51 g_assert(strstr(s, "node 1 cpus: 1 3 5 7")); in test_def_cpu_split() 62 cli = make_cli(data, "-machine smp.cpus=8 " in test_mon_partial() 63 "-numa node,nodeid=0,memdev=ram,cpus=0-1 " in test_mon_partial() 64 "-numa node,nodeid=1,cpus=4-5 "); in test_mon_partial() [all …]
|
/openbmc/linux/drivers/clk/sunxi/ |
H A D | clk-sun9i-cpus.c | 7 * Allwinner A80 CPUS clock driver 22 * sun9i_a80_cpus_clk_setup() - Setup function for a80 cpus composite clk 55 struct sun9i_a80_cpus_clk *cpus = to_sun9i_a80_cpus_clk(hw); in sun9i_a80_cpus_clk_recalc_rate() local 60 reg = readl(cpus->reg); in sun9i_a80_cpus_clk_recalc_rate() 155 struct sun9i_a80_cpus_clk *cpus = to_sun9i_a80_cpus_clk(hw); in sun9i_a80_cpus_clk_set_rate() local 162 reg = readl(cpus->reg); in sun9i_a80_cpus_clk_set_rate() 170 writel(reg, cpus->reg); in sun9i_a80_cpus_clk_set_rate() 188 struct sun9i_a80_cpus_clk *cpus; in sun9i_a80_cpus_setup() local 193 cpus = kzalloc(sizeof(*cpus), GFP_KERNEL); in sun9i_a80_cpus_setup() 194 if (!cpus) in sun9i_a80_cpus_setup() [all …]
|
/openbmc/linux/tools/perf/arch/arm64/util/ |
H A D | header.c | 19 static int _get_cpuid(char *buf, size_t sz, struct perf_cpu_map *cpus) in _get_cpuid() argument 28 cpus = perf_cpu_map__get(cpus); in _get_cpuid() 30 for (cpu = 0; cpu < perf_cpu_map__nr(cpus); cpu++) { in _get_cpuid() 35 sysfs, RC_CHK_ACCESS(cpus)->map[cpu].cpu); in _get_cpuid() 54 perf_cpu_map__put(cpus); in _get_cpuid() 60 struct perf_cpu_map *cpus = perf_cpu_map__new(NULL); in get_cpuid() local 63 if (!cpus) in get_cpuid() 66 ret = _get_cpuid(buf, sz, cpus); in get_cpuid() 68 perf_cpu_map__put(cpus); in get_cpuid() 78 if (!pmu || !pmu->cpus) in get_cpuid_str() [all …]
|
/openbmc/qemu/hw/riscv/ |
H A D | numa.c | 40 int i, first_hartid = ms->smp.cpus; in riscv_socket_first_hartid() 46 for (i = 0; i < ms->smp.cpus; i++) { in riscv_socket_first_hartid() 47 if (ms->possible_cpus->cpus[i].props.node_id != socket_id) { in riscv_socket_first_hartid() 55 return (first_hartid < ms->smp.cpus) ? first_hartid : -1; in riscv_socket_first_hartid() 63 return (!socket_id) ? ms->smp.cpus - 1 : -1; in riscv_socket_last_hartid() 66 for (i = 0; i < ms->smp.cpus; i++) { in riscv_socket_last_hartid() 67 if (ms->possible_cpus->cpus[i].props.node_id != socket_id) { in riscv_socket_last_hartid() 75 return (last_hartid < ms->smp.cpus) ? last_hartid : -1; in riscv_socket_last_hartid() 83 return (!socket_id) ? ms->smp.cpus : -1; in riscv_socket_hart_count() 122 if (ms->possible_cpus->cpus[i].props.node_id != socket_id) { in riscv_socket_check_hartids() [all …]
|
/openbmc/linux/drivers/cpufreq/ |
H A D | cpufreq-dt.c | 30 cpumask_var_t cpus; member 50 if (cpumask_test_cpu(cpu, priv->cpus)) in cpufreq_dt_find_data() 129 cpumask_copy(policy->cpus, priv->cpus); in cpufreq_init() 211 if (!zalloc_cpumask_var(&priv->cpus, GFP_KERNEL)) in dt_cpufreq_early_init() 214 cpumask_set_cpu(cpu, priv->cpus); in dt_cpufreq_early_init() 232 ret = dev_pm_opp_of_get_sharing_cpus(cpu_dev, priv->cpus); in dt_cpufreq_early_init() 238 * operating-points-v2 not supported, fallback to all CPUs share in dt_cpufreq_early_init() 240 * sharing CPUs. in dt_cpufreq_early_init() 242 if (dev_pm_opp_get_sharing_cpus(cpu_dev, priv->cpus)) in dt_cpufreq_early_init() 247 * Initialize OPP tables for all priv->cpus. They will be shared by in dt_cpufreq_early_init() [all …]
|
/openbmc/linux/Documentation/scheduler/ |
H A D | sched-energy.rst | 9 the impact of its decisions on the energy consumed by CPUs. EAS relies on an 10 Energy Model (EM) of the CPUs to select an energy efficient CPU for each task, 59 In short, EAS changes the way CFS tasks are assigned to CPUs. When it is time 64 knowledge about the platform's topology, which include the 'capacity' of CPUs, 72 differentiate CPUs with different computing throughput. The 'capacity' of a CPU 76 tasks and CPUs computed by the Per-Entity Load Tracking (PELT) mechanism. Thanks 79 energy trade-offs. The capacity of CPUs is provided via arch-specific code 99 Let us consider a platform with 12 CPUs, split in 3 performance domains 102 CPUs: 0 1 2 3 4 5 6 7 8 9 10 11 108 containing 6 CPUs. The two root domains are denoted rd1 and rd2 in the [all …]
|
/openbmc/linux/Documentation/admin-guide/ |
H A D | kernel-per-CPU-kthreads.rst | 13 - Documentation/core-api/irq/irq-affinity.rst: Binding interrupts to sets of CPUs. 15 - Documentation/admin-guide/cgroup-v1: Using cgroups to bind tasks to sets of CPUs. 18 of CPUs. 21 call to bind tasks to sets of CPUs. 50 2. Do all eHCA-Infiniband-related work on other CPUs, including 53 provisioned only on selected CPUs. 101 with multiple CPUs, force them all offline before bringing the 102 first one back online. Once you have onlined the CPUs in question, 103 do not offline any other CPUs, because doing so could force the 104 timer back onto one of the CPUs in question. [all …]
|
/openbmc/qemu/hw/openrisc/ |
H A D | openrisc_sim.c | 103 static qemu_irq get_cpu_irq(OpenRISCCPU *cpus[], int cpunum, int irq_pin) in get_cpu_irq() argument 105 return qdev_get_gpio_in_named(DEVICE(cpus[cpunum]), "IRQ", irq_pin); in get_cpu_irq() 136 qemu_fdt_add_subnode(fdt, "/cpus"); in openrisc_create_fdt() 137 qemu_fdt_setprop_cell(fdt, "/cpus", "#size-cells", 0x0); in openrisc_create_fdt() 138 qemu_fdt_setprop_cell(fdt, "/cpus", "#address-cells", 0x1); in openrisc_create_fdt() 141 nodename = g_strdup_printf("/cpus/cpu@%d", cpu); in openrisc_create_fdt() 172 int num_cpus, OpenRISCCPU *cpus[], in openrisc_sim_net_init() argument 193 qdev_connect_gpio_out(splitter, i, get_cpu_irq(cpus, i, irq_pin)); in openrisc_sim_net_init() 197 sysbus_connect_irq(s, 0, get_cpu_irq(cpus, 0, irq_pin)); in openrisc_sim_net_init() 216 OpenRISCCPU *cpus[], int irq_pin) in openrisc_sim_ompic_init() argument [all …]
|
H A D | virt.c | 103 static qemu_irq get_cpu_irq(OpenRISCCPU *cpus[], int cpunum, int irq_pin) in get_cpu_irq() argument 105 return qdev_get_gpio_in_named(DEVICE(cpus[cpunum]), "IRQ", irq_pin); in get_cpu_irq() 108 static qemu_irq get_per_cpu_irq(OpenRISCCPU *cpus[], int num_cpus, int irq_pin) in get_per_cpu_irq() argument 117 qdev_connect_gpio_out(splitter, i, get_cpu_irq(cpus, i, irq_pin)); in get_per_cpu_irq() 121 return get_cpu_irq(cpus, 0, irq_pin); in get_per_cpu_irq() 160 qemu_fdt_add_subnode(fdt, "/cpus"); in openrisc_create_fdt() 161 qemu_fdt_setprop_cell(fdt, "/cpus", "#size-cells", 0x0); in openrisc_create_fdt() 162 qemu_fdt_setprop_cell(fdt, "/cpus", "#address-cells", 0x1); in openrisc_create_fdt() 165 nodename = g_strdup_printf("/cpus/cpu@%d", cpu); in openrisc_create_fdt() 201 OpenRISCCPU *cpus[], int irq_pin) in openrisc_virt_ompic_init() argument [all …]
|
/openbmc/linux/tools/testing/selftests/cgroup/ |
H A D | test_cpuset_prs.sh | 26 CPUS=$(lscpu | grep "^CPU(s):" | sed -e "s/.*:[[:space:]]*//") 27 [[ $CPUS -lt 8 ]] && skip_test "Test needs at least 8 cpus available!" 102 echo $EXPECTED_VAL > cpuset.cpus.partition 104 ACTUAL_VAL=$(cat cpuset.cpus.partition) 106 echo "cpuset.cpus.partition: expect $EXPECTED_VAL, found $EXPECTED_VAL" 115 ACTUAL_VAL=$(cat cpuset.cpus.effective) 117 echo "cpuset.cpus.effective: expect '$EXPECTED_VAL', found '$EXPECTED_VAL'" 142 echo 2-3 > cpuset.cpus 143 TYPE=$(cat cpuset.cpus.partition) 144 [[ $TYPE = member ]] || echo member > cpuset.cpus.partition [all …]
|
/openbmc/linux/Documentation/devicetree/bindings/csky/ |
H A D | cpus.txt | 5 The device tree allows to describe the layout of CPUs in a system through 6 the "cpus" node, which in turn contains a number of subnodes (ie "cpu") 9 Only SMP system need to care about the cpus node and single processor 10 needn't define cpus node at all. 13 cpus and cpu node bindings definition 16 - cpus node 20 The node name must be "cpus". 22 A cpus node must define the following properties: 59 cpus {
|
/openbmc/linux/tools/perf/tests/ |
H A D | openat-syscall-all-cpus.c | 27 struct perf_cpu_map *cpus; in test__openat_syscall_event_on_all_cpus() local 40 cpus = perf_cpu_map__new(NULL); in test__openat_syscall_event_on_all_cpus() 41 if (cpus == NULL) { in test__openat_syscall_event_on_all_cpus() 56 if (evsel__open(evsel, cpus, threads) < 0) { in test__openat_syscall_event_on_all_cpus() 64 perf_cpu_map__for_each_cpu(cpu, idx, cpus) { in test__openat_syscall_event_on_all_cpus() 69 * without CPU_ALLOC. 1024 cpus in 2010 still seems in test__openat_syscall_event_on_all_cpus() 91 evsel->core.cpus = perf_cpu_map__get(cpus); in test__openat_syscall_event_on_all_cpus() 95 perf_cpu_map__for_each_cpu(cpu, idx, cpus) { in test__openat_syscall_event_on_all_cpus() 121 perf_cpu_map__put(cpus); in test__openat_syscall_event_on_all_cpus() 129 TEST_CASE_REASON("Detect openat syscall event on all cpus", [all …]
|
/openbmc/linux/Documentation/power/ |
H A D | suspend-and-cpuhotplug.rst | 27 |tasks | | cpus | | | | cpus | |tasks| 59 online CPUs 75 Note down these cpus in | P 100 | Call _cpu_up() [for all those cpus in the frozen_cpus mask, in a loop] 158 the non-boot CPUs are offlined or onlined, the _cpu_*() functions are called 177 update on the CPUs, as discussed below: 184 a. When all the CPUs are identical: 187 to apply the same microcode revision to each of the CPUs. 192 all CPUs, in order to handle case 'b' described below. 195 b. When some of the CPUs are different than the rest: [all …]
|
/openbmc/qemu/hw/intc/ |
H A D | ompic.c | 52 OR1KOMPICCPUState cpus[OMPIC_MAX_CPUS]; member 64 return s->cpus[src_cpu].control; in ompic_read() 66 return s->cpus[src_cpu].status; in ompic_read() 78 s->cpus[src_cpu].control = data; in ompic_write() 83 s->cpus[dst_cpu].status = OMPIC_STATUS_IRQ_PENDING | in ompic_write() 87 qemu_irq_raise(s->cpus[dst_cpu].irq); in ompic_write() 90 s->cpus[src_cpu].status &= ~OMPIC_STATUS_IRQ_PENDING; in ompic_write() 91 qemu_irq_lower(s->cpus[src_cpu].irq); in ompic_write() 122 error_setg(errp, "Exceeded maximum CPUs %d", s->num_cpus); in or1k_ompic_realize() 125 /* Init IRQ sources for all CPUs */ in or1k_ompic_realize() [all …]
|
/openbmc/qemu/hw/core/ |
H A D | machine-smp.c | 68 * Any missing parameter in "cpus/maxcpus/sockets/cores/threads" will be 75 * In the calculation of cpus/maxcpus: When both maxcpus and cpus are omitted, 76 * maxcpus will be computed from the given parameters and cpus will be set 77 * equal to maxcpus. When only one of maxcpus and cpus is given then the 79 * cpus may be specified, but maxcpus must be equal to or greater than cpus. 89 unsigned cpus = config->has_cpus ? config->cpus : 0; in machine_parse_smp_config() local 103 * explicit configuration like "cpus=0" is not allowed. in machine_parse_smp_config() 105 if ((config->has_cpus && config->cpus == 0) || in machine_parse_smp_config() 165 if (cpus == 0 && maxcpus == 0) { in machine_parse_smp_config() 170 maxcpus = maxcpus > 0 ? maxcpus : cpus; in machine_parse_smp_config() [all …]
|
/openbmc/linux/sound/soc/intel/boards/ |
H A D | sof_cs42l42.c | 301 struct snd_soc_dai_link_component *cpus, in create_spk_amp_dai_links() argument 336 links[*id].cpus = &cpus[*id]; in create_spk_amp_dai_links() 339 links[*id].cpus->dai_name = devm_kasprintf(dev, GFP_KERNEL, in create_spk_amp_dai_links() 341 if (!links[*id].cpus->dai_name) { in create_spk_amp_dai_links() 354 struct snd_soc_dai_link_component *cpus, in create_hp_codec_dai_links() argument 374 links[*id].cpus = &cpus[*id]; in create_hp_codec_dai_links() 377 links[*id].cpus->dai_name = devm_kasprintf(dev, GFP_KERNEL, in create_hp_codec_dai_links() 380 if (!links[*id].cpus->dai_name) in create_hp_codec_dai_links() 393 struct snd_soc_dai_link_component *cpus, in create_dmic_dai_links() argument 404 links[*id].cpus = &cpus[*id]; in create_dmic_dai_links() [all …]
|
H A D | sof_ssp_amp.c | 198 struct snd_soc_dai_link_component *cpus; in sof_card_dai_links_create() local 204 cpus = devm_kcalloc(dev, sof_ssp_amp_card.num_links, in sof_card_dai_links_create() 206 if (!links || !cpus) in sof_card_dai_links_create() 220 links[id].cpus = &cpus[id]; in sof_card_dai_links_create() 221 links[id].cpus->dai_name = devm_kasprintf(dev, GFP_KERNEL, in sof_card_dai_links_create() 223 if (!links[id].cpus->dai_name) in sof_card_dai_links_create() 257 links[id].cpus = &cpus[id]; in sof_card_dai_links_create() 259 links[id].cpus->dai_name = devm_kasprintf(dev, GFP_KERNEL, "SSP%d Pin", ssp_codec); in sof_card_dai_links_create() 260 if (!links[id].cpus->dai_name) in sof_card_dai_links_create() 269 links[id].cpus = &cpus[id]; in sof_card_dai_links_create() [all …]
|