/openbmc/qemu/hw/core/ |
H A D | machine-smp.c | 58 g_string_append_printf(s, " * cores (%u)", ms->smp.cores); in cpu_hierarchy_to_string() 96 unsigned cores = config->has_cores ? config->cores : 0; in machine_parse_smp_config() local 112 (config->has_cores && config->cores == 0) || in machine_parse_smp_config() 167 cores = cores > 0 ? cores : 1; in machine_parse_smp_config() 175 cores = cores > 0 ? cores : 1; in machine_parse_smp_config() 179 modules * cores * threads); in machine_parse_smp_config() 180 } else if (cores == 0) { in machine_parse_smp_config() 182 cores = maxcpus / in machine_parse_smp_config() 188 if (cores == 0) { in machine_parse_smp_config() 191 cores = maxcpus / in machine_parse_smp_config() [all …]
|
/openbmc/qemu/tests/qtest/ |
H A D | cpu-plug-test.c | 21 unsigned cores; member 40 td->sockets, td->cores, td->threads, td->maxcpus); in test_plug_with_device_add() 95 data->cores = 3; in add_pc_test_case() 97 data->maxcpus = data->sockets * data->cores * data->threads; in add_pc_test_case() 100 mname, data->sockets, data->cores, in add_pc_test_case() 121 data->cores = 3; in add_pseries_test_case() 123 data->maxcpus = data->sockets * data->cores * data->threads; in add_pseries_test_case() 126 mname, data->sockets, data->cores, in add_pseries_test_case() 147 data->cores = 3; in add_s390x_test_case() 149 data->maxcpus = data->sockets * data->cores * data->threads; in add_s390x_test_case() [all …]
|
/openbmc/linux/sound/soc/intel/skylake/ |
H A D | skl-sst-dsp.c | 39 skl->cores.state[SKL_DSP_CORE0_ID] = SKL_DSP_RUNNING; in skl_dsp_init_core_state() 40 skl->cores.usage_count[SKL_DSP_CORE0_ID] = 1; in skl_dsp_init_core_state() 42 for (i = SKL_DSP_CORE0_ID + 1; i < skl->cores.count; i++) { in skl_dsp_init_core_state() 43 skl->cores.state[i] = SKL_DSP_RESET; in skl_dsp_init_core_state() 44 skl->cores.usage_count[i] = 0; in skl_dsp_init_core_state() 55 core_mask = SKL_DSP_CORES_MASK(skl->cores.count); in skl_dsp_get_enabled_cores() 341 if (core_id >= skl->cores.count) { in skl_dsp_get_core() 346 skl->cores.usage_count[core_id]++; in skl_dsp_get_core() 348 if (skl->cores.state[core_id] == SKL_DSP_RESET) { in skl_dsp_get_core() 358 core_id, skl->cores.state[core_id], in skl_dsp_get_core() [all …]
|
H A D | skl-messages.c | 256 struct skl_dsp_cores *cores; in skl_init_dsp() local 285 cores = &skl->cores; in skl_init_dsp() 286 cores->count = ops->num_cores; in skl_init_dsp() 288 cores->state = kcalloc(cores->count, sizeof(*cores->state), GFP_KERNEL); in skl_init_dsp() 289 if (!cores->state) { in skl_init_dsp() 294 cores->usage_count = kcalloc(cores->count, sizeof(*cores->usage_count), in skl_init_dsp() 296 if (!cores->usage_count) { in skl_init_dsp() 306 kfree(cores->state); in skl_init_dsp() 323 kfree(skl->cores.state); in skl_free_dsp() 324 kfree(skl->cores.usage_count); in skl_free_dsp()
|
/openbmc/u-boot/arch/arm/cpu/armv8/fsl-layerscape/ |
H A D | mp.c | 77 u32 cores, cpu_up_mask = 1; in fsl_layerscape_wake_seconday_cores() local 88 cores = cpu_mask(); in fsl_layerscape_wake_seconday_cores() 108 rst->brrl = cores; in fsl_layerscape_wake_seconday_cores() 140 gur_out32(&gur->brrl, cores); in fsl_layerscape_wake_seconday_cores() 144 scfg_out32(&scfg->corebcr, cores); in fsl_layerscape_wake_seconday_cores() 162 if (hweight32(cpu_up_mask) == hweight32(cores)) in fsl_layerscape_wake_seconday_cores() 168 cores, cpu_up_mask); in fsl_layerscape_wake_seconday_cores() 171 printf("All (%d) cores are up.\n", hweight32(cores)); in fsl_layerscape_wake_seconday_cores() 210 u32 cores = cpu_pos_mask(); in core_to_pos() local 215 } else if (nr >= hweight32(cores)) { in core_to_pos()
|
/openbmc/linux/Documentation/admin-guide/ |
H A D | lockup-watchdogs.rst | 67 By default, the watchdog runs on all online cores. However, on a 69 on the housekeeping cores, not the cores specified in the "nohz_full" 71 the "nohz_full" cores, we would have to run timer ticks to activate 73 from protecting the user code on those cores from the kernel. 74 Of course, disabling it by default on the nohz_full cores means that 75 when those cores do enter the kernel, by default we will not be 77 to continue to run on the housekeeping (non-tickless) cores means 78 that we will continue to detect lockups properly on those cores. 80 In either case, the set of cores excluded from running the watchdog 82 nohz_full cores, this may be useful for debugging a case where the [all …]
|
/openbmc/linux/drivers/gpu/drm/nouveau/dispnv50/ |
H A D | core.c | 44 } cores[] = { in nv50_core_new() local 65 cid = nvif_mclass(&disp->disp->object, cores); in nv50_core_new() 71 return cores[cid].new(drm, cores[cid].oclass, pcore); in nv50_core_new()
|
/openbmc/linux/Documentation/devicetree/bindings/timer/ |
H A D | snps,arc-timer.txt | 4 - Two identical copies TIMER0 and TIMER1 exist in ARC cores and historically 5 TIMER0 used as clockevent provider (true for all ARC cores) 12 (16 for ARCHS cores, 3 for ARC700 cores)
|
/openbmc/linux/Documentation/networking/device_drivers/can/freescale/ |
H A D | flexcan.rst | 13 For most flexcan IP cores the driver supports 2 RX modes: 18 The older flexcan cores (integrated into the i.MX25, i.MX28, i.MX35 28 cores come up in a mode where RTR reception is possible. 39 On some IP cores the controller cannot receive RTR frames in the 45 Waive ability to receive RTR frames. (not supported on all IP cores) 48 some IP cores RTR frames cannot be received anymore.
|
/openbmc/qemu/docs/system/arm/ |
H A D | raspi.rst | 10 Cortex-A7 (4 cores), 1 GiB of RAM 12 Cortex-A53 (4 cores), 512 MiB of RAM 14 Cortex-A53 (4 cores), 1 GiB of RAM 16 Cortex-A72 (4 cores), 2 GiB of RAM
|
H A D | highbank.rst | 5 which has four Cortex-A9 cores. 8 which has four Cortex-A15 cores.
|
/openbmc/linux/Documentation/devicetree/bindings/media/xilinx/ |
H A D | video.txt | 1 DT bindings for Xilinx video IP cores 4 Xilinx video IP cores process video streams by acting as video sinks and/or 10 cores are represented as defined in ../video-interfaces.txt. 18 The following properties are common to all Xilinx video IP cores. 21 AXI bus between video IP cores, using its VF code as defined in "AXI4-Stream
|
/openbmc/linux/drivers/remoteproc/ |
H A D | ti_k3_r5_remoteproc.c | 112 struct list_head cores; member 300 list_for_each_entry(core, &cluster->cores, elem) { in k3_r5_lockstep_reset() 311 list_for_each_entry(core, &cluster->cores, elem) { in k3_r5_lockstep_reset() 324 list_for_each_entry_continue_reverse(core, &cluster->cores, elem) { in k3_r5_lockstep_reset() 329 core = list_last_entry(&cluster->cores, struct k3_r5_core, elem); in k3_r5_lockstep_reset() 331 list_for_each_entry_from_reverse(core, &cluster->cores, elem) { in k3_r5_lockstep_reset() 345 list_for_each_entry_reverse(core, &cluster->cores, elem) { in k3_r5_lockstep_release() 357 list_for_each_entry_reverse(core, &cluster->cores, elem) { in k3_r5_lockstep_release() 369 list_for_each_entry_continue(core, &cluster->cores, elem) { in k3_r5_lockstep_release() 373 core = list_first_entry(&cluster->cores, struct k3_r5_core, elem); in k3_r5_lockstep_release() [all …]
|
/openbmc/u-boot/doc/ |
H A D | README.mpc85xx-spin-table | 6 __secondary_start_page. For other cores to use the spin table, the booting 12 page translation for secondary cores to use this page of memory. Then 4KB 17 that secondary cores can see it. 19 When secondary cores boot up from 0xffff_f000 page, they only have one default 22 with WIMGE =0b00100. Now secondary cores can keep polling the spin table
|
H A D | README.Heterogeneous-SoCs | 5 configuration and frequencies of all PowerPC cores and devices 7 SC3900/DSP cores and such devices like CPRI, MAPLE, MAPLE-ULB etc. 19 Code added in this file to print the DSP cores and other device's(CPRI, 25 required cores and devices from RCW and System frequency 29 Added API to get the number of SC cores in running system and Their BIT 44 Global structure updated for dsp cores and other components 73 DSP cores and other device's components have been added in this structure.
|
/openbmc/qemu/contrib/plugins/ |
H A D | cache.c | 98 static int cores; variable 294 caches = g_new(Cache *, cores); in caches_init() 296 for (i = 0; i < cores; i++) { in caches_init() 403 cache_idx = vcpu_index % cores; in vcpu_mem_access() 439 cache_idx = vcpu_index % cores; in vcpu_insn_exec() 533 for (i = 0; i < cores; i++) { in caches_free() 571 g_assert(cores > 1); in sum_stats() 572 for (i = 0; i < cores; i++) { in sum_stats() 624 for (i = 0; i < cores; i++) { in log_stats() 635 if (cores > 1) { in log_stats() [all …]
|
/openbmc/qemu/include/hw/s390x/ |
H A D | cpu-topology.h | 68 return (n / smp->cores) % smp->sockets; in s390_std_socket() 73 return (n / (smp->cores * smp->sockets)) % smp->books; in s390_std_book() 78 return (n / (smp->cores * smp->sockets * smp->books)) % smp->drawers; in s390_std_drawer()
|
/openbmc/linux/Documentation/devicetree/bindings/bus/ |
H A D | brcm,bus-axi.txt | 9 The cores on the AXI bus are automatically detected by bcma with the 12 BCM47xx/BCM53xx ARM SoCs. To assign IRQ numbers to the cores, provide 17 The top-level axi bus may contain children representing attached cores 19 detected (e.g. IRQ numbers). Also some of the cores may be responsible
|
/openbmc/u-boot/drivers/axi/ |
H A D | Kconfig | 7 communication with IP cores in Xilinx FPGAs). 23 IP cores in the FPGA (e.g. video transmitter cores).
|
/openbmc/linux/arch/riscv/ |
H A D | Kconfig.errata | 9 here if your platform uses Andes CPU cores. 20 non-standard handling on non-coherent operations on Andes cores. 30 here if your platform uses SiFive CPU cores. 62 here if your platform uses T-HEAD CPU cores. 94 The T-Head C9xx cores implement a PMU overflow extension very
|
/openbmc/linux/arch/x86/mm/ |
H A D | amdtopology.c | 63 unsigned int bits, cores, apicid_base; in amd_numa_init() local 165 cores = 1 << bits; in amd_numa_init() 179 for (j = apicid_base; j < cores + apicid_base; j++) in amd_numa_init()
|
/openbmc/linux/drivers/bcma/ |
H A D | main.c | 92 list_for_each_entry(core, &bus->cores, list) { in bcma_find_core_unit() 272 INIT_LIST_HEAD(&bus->cores); in bcma_init_bus() 296 list_for_each_entry(core, &bus->cores, list) { in bcma_register_devices() 366 list_for_each_entry_safe(core, tmp, &bus->cores, list) { in bcma_unregister_cores() 376 list_for_each_entry_safe(core, tmp, &bus->cores, list) { in bcma_unregister_cores() 412 list_for_each_entry(core, &bus->cores, list) { in bcma_bus_register() 537 list_for_each_entry(core, &bus->cores, list) { in bcma_bus_suspend() 558 list_for_each_entry(core, &bus->cores, list) { in bcma_bus_resume()
|
/openbmc/qemu/docs/system/ppc/ |
H A D | powernv.rst | 77 $ qemu-system-ppc64 -m 2G -machine powernv9 -smp 2,cores=2,threads=1 \ 114 $ qemu-system-ppc64 -m 2G -machine powernv9 -smp 2,cores=2,threads=1 -accel tcg,thread=single \ 149 number of cores. ``-smp 2,cores=1`` will define a machine with 2 150 sockets of 1 core, whereas ``-smp 2,cores=2`` will define a machine 151 with 1 socket of 2 cores. ``-smp 8,cores=2``, 4 sockets of 2 cores.
|
/openbmc/linux/drivers/gpu/drm/v3d/ |
H A D | v3d_irq.c | 225 for (core = 0; core < v3d->cores; core++) in v3d_irq_init() 270 for (core = 0; core < v3d->cores; core++) { in v3d_irq_enable() 285 for (core = 0; core < v3d->cores; core++) in v3d_irq_disable() 290 for (core = 0; core < v3d->cores; core++) in v3d_irq_disable()
|
/openbmc/linux/sound/soc/sof/ |
H A D | ipc4-mtrace.c | 124 struct sof_mtrace_core_data cores[]; member 414 debugfs_create_file(dfs_name, 0444, dfs_root, &priv->cores[i], in mtrace_debugfs_create() 492 struct sof_mtrace_core_data *core_data = &priv->cores[i]; in ipc4_mtrace_disable() 528 core_data = &priv->cores[core]; in sof_mtrace_find_core_slots() 565 priv = devm_kzalloc(sdev->dev, struct_size(priv, cores, sdev->num_cores), in ipc4_mtrace_init() 579 struct sof_mtrace_core_data *core_data = &priv->cores[i]; in ipc4_mtrace_init() 634 core_data = &priv->cores[core]; in sof_ipc4_mtrace_update_pos()
|