/openbmc/linux/arch/riscv/kernel/vdso/ |
H A D | hwprobe.c | 11 size_t cpu_count, unsigned long *cpus, 16 size_t cpu_count, unsigned long *cpus, 20 size_t cpu_count, unsigned long *cpus, in __vdso_riscv_hwprobe() argument 25 bool all_cpus = !cpu_count && !cpus; in __vdso_riscv_hwprobe() 36 return riscv_hwprobe(pairs, pair_count, cpu_count, cpus, flags); in __vdso_riscv_hwprobe()
|
/openbmc/linux/arch/riscv/kernel/ |
H A D | smpboot.c | 78 static unsigned int cpu_count = 1; variable 108 early_map_cpu_to_node(0, acpi_numa_get_nid(cpu_count)); in acpi_parse_rintc() 112 if (cpu_count >= NR_CPUS) { in acpi_parse_rintc() 117 cpuid_to_hartid_map(cpu_count) = hart; in acpi_parse_rintc() 118 early_map_cpu_to_node(cpu_count, acpi_numa_get_nid(cpu_count)); in acpi_parse_rintc() 119 cpu_count++; in acpi_parse_rintc()
|
H A D | sys_riscv.c | 231 size_t pair_count, size_t cpu_count, in do_riscv_hwprobe() argument 249 if (!cpu_count && !cpus_user) { in do_riscv_hwprobe() 252 if (cpu_count > cpumask_size()) in do_riscv_hwprobe() 253 cpu_count = cpumask_size(); in do_riscv_hwprobe() 255 ret = copy_from_user(&cpus, cpus_user, cpu_count); in do_riscv_hwprobe() 332 size_t, pair_count, size_t, cpu_count, unsigned long __user *, in SYSCALL_DEFINE5() argument 335 return do_riscv_hwprobe(pairs, pair_count, cpu_count, in SYSCALL_DEFINE5()
|
H A D | patch.c | 23 atomic_t cpu_count; member 240 if (atomic_inc_return(&patch->cpu_count) == num_online_cpus()) { in patch_text_cb() 246 atomic_inc(&patch->cpu_count); in patch_text_cb() 248 while (atomic_read(&patch->cpu_count) <= num_online_cpus()) in patch_text_cb() 264 .cpu_count = ATOMIC_INIT(0), in patch_text()
|
/openbmc/linux/arch/xtensa/kernel/ |
H A D | jump_label.c | 27 atomic_t cpu_count; member 43 if (atomic_inc_return(&patch->cpu_count) == num_online_cpus()) { in patch_text_stop_machine() 45 atomic_inc(&patch->cpu_count); in patch_text_stop_machine() 47 while (atomic_read(&patch->cpu_count) <= num_online_cpus()) in patch_text_stop_machine() 58 .cpu_count = ATOMIC_INIT(0), in patch_text()
|
/openbmc/linux/tools/power/cpupower/utils/idle_monitor/ |
H A D | mperf_monitor.c | 231 for (cpu = 0; cpu < cpu_count; cpu++) { in mperf_start() 243 for (cpu = 0; cpu < cpu_count; cpu++) { in mperf_stop() 345 is_valid = calloc(cpu_count, sizeof(int)); in mperf_register() 346 mperf_previous_count = calloc(cpu_count, sizeof(unsigned long long)); in mperf_register() 347 aperf_previous_count = calloc(cpu_count, sizeof(unsigned long long)); in mperf_register() 348 mperf_current_count = calloc(cpu_count, sizeof(unsigned long long)); in mperf_register() 349 aperf_current_count = calloc(cpu_count, sizeof(unsigned long long)); in mperf_register() 350 tsc_at_measure_start = calloc(cpu_count, sizeof(unsigned long long)); in mperf_register() 351 tsc_at_measure_end = calloc(cpu_count, sizeof(unsigned long long)); in mperf_register()
|
H A D | cpuidle_sysfs.c | 48 for (cpu = 0; cpu < cpu_count; cpu++) { in cpuidle_start() 67 for (cpu = 0; cpu < cpu_count; cpu++) { in cpuidle_stop() 178 previous_count = malloc(sizeof(long long *) * cpu_count); in cpuidle_register() 179 current_count = malloc(sizeof(long long *) * cpu_count); in cpuidle_register() 180 for (num = 0; num < cpu_count; num++) { in cpuidle_register() 195 for (num = 0; num < cpu_count; num++) { in cpuidle_unregister()
|
H A D | snb_idle.c | 117 for (cpu = 0; cpu < cpu_count; cpu++) { in snb_start() 134 for (cpu = 0; cpu < cpu_count; cpu++) { in snb_stop() 166 is_valid = calloc(cpu_count, sizeof(int)); in snb_register() 168 previous_count[num] = calloc(cpu_count, in snb_register() 170 current_count[num] = calloc(cpu_count, in snb_register()
|
H A D | hsw_ext_idle.c | 119 for (cpu = 0; cpu < cpu_count; cpu++) { in hsw_ext_start() 136 for (cpu = 0; cpu < cpu_count; cpu++) { in hsw_ext_stop() 161 is_valid = calloc(cpu_count, sizeof(int)); in hsw_ext_register() 163 previous_count[num] = calloc(cpu_count, in hsw_ext_register() 165 current_count[num] = calloc(cpu_count, in hsw_ext_register()
|
H A D | nhm_idle.c | 134 for (cpu = 0; cpu < cpu_count; cpu++) { in nhm_start() 153 for (cpu = 0; cpu < cpu_count; cpu++) { in nhm_stop() 180 is_valid = calloc(cpu_count, sizeof(int)); in intel_nhm_register() 182 previous_count[num] = calloc(cpu_count, in intel_nhm_register() 184 current_count[num] = calloc(cpu_count, in intel_nhm_register()
|
H A D | cpupower-monitor.c | 30 int cpu_count; variable 330 for (cpu = 0; cpu < cpu_count; cpu++) in do_interval_measure() 342 for (cpu = 0; cpu < cpu_count; cpu++) in do_interval_measure() 394 cpu_count = get_cpu_topology(&cpu_top); in cmd_monitor() 395 if (cpu_count < 0) { in cmd_monitor() 407 dprint("System has up to %d CPU cores\n", cpu_count); in cmd_monitor() 438 cpu_top.pkgs, cpu_top.cores, cpu_count); in cmd_monitor() 455 for (cpu = 0; cpu < cpu_count; cpu++) { in cmd_monitor()
|
H A D | amd_fam14h_idle.c | 233 for (cpu = 0; cpu < cpu_count; cpu++) in amd_fam14h_start() 253 for (cpu = 0; cpu < cpu_count; cpu++) in amd_fam14h_stop() 294 previous_count[num] = calloc(cpu_count, in amd_fam14h_register() 296 current_count[num] = calloc(cpu_count, in amd_fam14h_register()
|
/openbmc/qemu/hw/cpu/ |
H A D | cluster.c | 35 int cpu_count; member 45 cbdata->cpu_count++; in add_cpu_to_cluster() 57 .cpu_count = 0, in cpu_cluster_realize() 73 assert(cbdata.cpu_count > 0); in cpu_cluster_realize()
|
/openbmc/linux/drivers/thermal/intel/ |
H A D | intel_hfi.c | 203 int i = 0, cpu_count; in update_capabilities() local 208 cpu_count = cpumask_weight(hfi_instance->cpus); in update_capabilities() 211 if (!cpu_count) in update_capabilities() 214 cpu_caps = kcalloc(cpu_count, sizeof(*cpu_caps), GFP_KERNEL); in update_capabilities() 220 if (cpu_count < HFI_MAX_THERM_NOTIFY_COUNT) in update_capabilities() 225 (i + HFI_MAX_THERM_NOTIFY_COUNT) <= cpu_count; in update_capabilities() 230 cpu_count = cpu_count - i; in update_capabilities() 234 if (cpu_count) in update_capabilities() 235 thermal_genl_cpu_capability_event(cpu_count, &cpu_caps[i]); in update_capabilities()
|
/openbmc/linux/arch/arm64/kernel/ |
H A D | smp.c | 503 static unsigned int cpu_count = 1; variable 535 if (is_mpidr_duplicate(cpu_count, hwid)) { in acpi_map_gic_cpu_interface() 552 if (cpu_count >= NR_CPUS) in acpi_map_gic_cpu_interface() 556 set_cpu_logical_map(cpu_count, hwid); in acpi_map_gic_cpu_interface() 558 cpu_madt_gicc[cpu_count] = *processor; in acpi_map_gic_cpu_interface() 571 cpu_count++; in acpi_map_gic_cpu_interface() 666 if (cpu_count >= NR_CPUS) in of_parse_and_init_cpus() 670 set_cpu_logical_map(cpu_count, hwid); in of_parse_and_init_cpus() 674 cpu_count++; in of_parse_and_init_cpus() 692 if (cpu_count > nr_cpu_ids) in smp_init_cpus() [all …]
|
H A D | patching.c | 129 atomic_t cpu_count; member 138 if (atomic_inc_return(&pp->cpu_count) == num_online_cpus()) { in aarch64_insn_patch_text_cb() 143 atomic_inc(&pp->cpu_count); in aarch64_insn_patch_text_cb() 145 while (atomic_read(&pp->cpu_count) <= num_online_cpus()) in aarch64_insn_patch_text_cb() 159 .cpu_count = ATOMIC_INIT(0), in aarch64_insn_patch_text()
|
/openbmc/qemu/accel/tcg/ |
H A D | tcg-accel-ops-rr.c | 156 static int cpu_count; in rr_cpu_count() local 162 cpu_count = 0; in rr_cpu_count() 164 ++cpu_count; in rr_cpu_count() 169 return cpu_count; in rr_cpu_count() 226 int cpu_count = rr_cpu_count(); in rr_cpu_thread_fn() local 236 cpu_budget = icount_percpu_budget(cpu_count); in rr_cpu_thread_fn()
|
/openbmc/linux/arch/s390/hypfs/ |
H A D | hypfs_diag0c.c | 33 unsigned int cpu_count, cpu, i; in diag0c_store() local 37 cpu_count = num_online_cpus(); in diag0c_store() 43 diag0c_data = kzalloc(struct_size(diag0c_data, entry, cpu_count), in diag0c_store() 55 *count = cpu_count; in diag0c_store()
|
/openbmc/linux/tools/testing/selftests/rcutorture/bin/ |
H A D | kvm-test-1-run.sh | 144 cpu_count=`configNR_CPUS.sh $resdir/ConfigFragment` 145 cpu_count=`configfrag_boot_cpus "$boot_args_in" "$config_template" "$cpu_count"` 146 if test "$cpu_count" -gt "$TORTURE_ALLOTED_CPUS" 148 echo CPU count limited from $cpu_count to $TORTURE_ALLOTED_CPUS | tee -a $resdir/Warnings 149 cpu_count=$TORTURE_ALLOTED_CPUS 151 qemu_args="`specify_qemu_cpus "$QEMU" "$qemu_args" "$cpu_count"`" 207 echo "# TORTURE_CPU_COUNT=$cpu_count" >> $resdir/qemu-cmd
|
H A D | kvm-test-1-run-batch.sh | 78 affinity_export="`awk -f $T/cpubatches.awk -v cpu_count="$cpu_count" -v scenario=$i < /dev/null`"
|
H A D | kvm.sh | 321 cpu_count=`configNR_CPUS.sh $T/KCONFIG_ARG` 323 cpu_count=`configNR_CPUS.sh $CONFIGFRAG/$CF1` 325 cpu_count=`configfrag_boot_cpus "$TORTURE_BOOTARGS" "$CONFIGFRAG/$CF1" "$cpu_count"` 326 cpu_count=`configfrag_boot_maxcpus "$TORTURE_BOOTARGS" "$CONFIGFRAG/$CF1" "$cpu_count"` 327 echo 'scenariocpu["'"$CF1"'"] = '"$cpu_count"';' >> $T/cfgcpu.awk
|
/openbmc/linux/arch/arm/mach-axxia/ |
H A D | platsmp.c | 56 int cpu_count = 0; in axxia_smp_prepare_cpus() local 73 if (cpu_count < max_cpus) { in axxia_smp_prepare_cpus() 75 cpu_count++; in axxia_smp_prepare_cpus()
|
/openbmc/linux/arch/csky/kernel/ |
H A D | ftrace.c | 205 atomic_t cpu_count; member 212 if (atomic_inc_return(¶m->cpu_count) == 1) { in __ftrace_modify_code() 214 atomic_inc(¶m->cpu_count); in __ftrace_modify_code() 216 while (atomic_read(¶m->cpu_count) <= num_online_cpus()) in __ftrace_modify_code()
|
/openbmc/linux/scripts/ |
H A D | checkkconfigsymbols.py | 18 from multiprocessing import Pool, cpu_count 276 pool = Pool(cpu_count(), init_worker) 283 for part in partition(kfiles, cpu_count()): 313 pool = Pool(cpu_count(), init_worker) 340 arglist = partition(source_files, cpu_count()) 346 for part in partition(kconfig_files, cpu_count()):
|
/openbmc/qemu/hw/s390x/ |
H A D | sclp.c | 111 int cpu_count; in read_SCP_info() local 133 prepare_cpu_entries(machine, entries_start, &cpu_count); in read_SCP_info() 134 read_info->entries_cpu = cpu_to_be16(cpu_count); in read_SCP_info() 189 int cpu_count; in sclp_read_cpu_info() local 200 prepare_cpu_entries(machine, cpu_info->entries, &cpu_count); in sclp_read_cpu_info() 201 cpu_info->nr_configured = cpu_to_be16(cpu_count); in sclp_read_cpu_info()
|