| /openbmc/qemu/tests/tcg/mips/user/ase/msa/ |
| H A D | test_msa_run_64r6eb.sh | 8 $PATH_TO_QEMU -cpu I6400 /tmp/test_msa_nloc_b_64r6eb 9 $PATH_TO_QEMU -cpu I6400 /tmp/test_msa_nloc_h_64r6eb 10 $PATH_TO_QEMU -cpu I6400 /tmp/test_msa_nloc_w_64r6eb 11 $PATH_TO_QEMU -cpu I6400 /tmp/test_msa_nloc_d_64r6eb 12 $PATH_TO_QEMU -cpu I6400 /tmp/test_msa_nlzc_b_64r6eb 13 $PATH_TO_QEMU -cpu I6400 /tmp/test_msa_nlzc_h_64r6eb 14 $PATH_TO_QEMU -cpu I6400 /tmp/test_msa_nlzc_w_64r6eb 15 $PATH_TO_QEMU -cpu I6400 /tmp/test_msa_nlzc_d_64r6eb 16 $PATH_TO_QEMU -cpu I6400 /tmp/test_msa_pcnt_b_64r6eb 17 $PATH_TO_QEMU -cpu I6400 /tmp/test_msa_pcnt_h_64r6eb [all …]
|
| H A D | test_msa_run_32r5eb.sh | 8 $PATH_TO_QEMU -cpu P5600 /tmp/test_msa_nloc_b_32r5eb 9 $PATH_TO_QEMU -cpu P5600 /tmp/test_msa_nloc_h_32r5eb 10 $PATH_TO_QEMU -cpu P5600 /tmp/test_msa_nloc_w_32r5eb 11 $PATH_TO_QEMU -cpu P5600 /tmp/test_msa_nloc_d_32r5eb 12 $PATH_TO_QEMU -cpu P5600 /tmp/test_msa_nlzc_b_32r5eb 13 $PATH_TO_QEMU -cpu P5600 /tmp/test_msa_nlzc_h_32r5eb 14 $PATH_TO_QEMU -cpu P5600 /tmp/test_msa_nlzc_w_32r5eb 15 $PATH_TO_QEMU -cpu P5600 /tmp/test_msa_nlzc_d_32r5eb 16 $PATH_TO_QEMU -cpu P5600 /tmp/test_msa_pcnt_b_32r5eb 17 $PATH_TO_QEMU -cpu P5600 /tmp/test_msa_pcnt_h_32r5eb [all …]
|
| H A D | test_msa_run_64r6el.sh | 8 $PATH_TO_QEMU -cpu I6400 /tmp/test_msa_nloc_b_64r6el 9 $PATH_TO_QEMU -cpu I6400 /tmp/test_msa_nloc_h_64r6el 10 $PATH_TO_QEMU -cpu I6400 /tmp/test_msa_nloc_w_64r6el 11 $PATH_TO_QEMU -cpu I6400 /tmp/test_msa_nloc_d_64r6el 12 $PATH_TO_QEMU -cpu I6400 /tmp/test_msa_nlzc_b_64r6el 13 $PATH_TO_QEMU -cpu I6400 /tmp/test_msa_nlzc_h_64r6el 14 $PATH_TO_QEMU -cpu I6400 /tmp/test_msa_nlzc_w_64r6el 15 $PATH_TO_QEMU -cpu I6400 /tmp/test_msa_nlzc_d_64r6el 16 $PATH_TO_QEMU -cpu I6400 /tmp/test_msa_pcnt_b_64r6el 17 $PATH_TO_QEMU -cpu I6400 /tmp/test_msa_pcnt_h_64r6el [all …]
|
| H A D | test_msa_run_32r5el.sh | 8 $PATH_TO_QEMU -cpu P5600 /tmp/test_msa_nloc_b_32r5el 9 $PATH_TO_QEMU -cpu P5600 /tmp/test_msa_nloc_h_32r5el 10 $PATH_TO_QEMU -cpu P5600 /tmp/test_msa_nloc_w_32r5el 11 $PATH_TO_QEMU -cpu P5600 /tmp/test_msa_nloc_d_32r5el 12 $PATH_TO_QEMU -cpu P5600 /tmp/test_msa_nlzc_b_32r5el 13 $PATH_TO_QEMU -cpu P5600 /tmp/test_msa_nlzc_h_32r5el 14 $PATH_TO_QEMU -cpu P5600 /tmp/test_msa_nlzc_w_32r5el 15 $PATH_TO_QEMU -cpu P5600 /tmp/test_msa_nlzc_d_32r5el 16 $PATH_TO_QEMU -cpu P5600 /tmp/test_msa_pcnt_b_32r5el 17 $PATH_TO_QEMU -cpu P5600 /tmp/test_msa_pcnt_h_32r5el [all …]
|
| /openbmc/qemu/target/i386/emulate/ |
| H A D | x86.h | 198 #define x86_reg(cpu, reg) ((x86_register *) &cpu->regs[reg]) argument 200 #define RRX(cpu, reg) (x86_reg(cpu, reg)->rrx) argument 201 #define RAX(cpu) RRX(cpu, R_EAX) argument 202 #define RCX(cpu) RRX(cpu, R_ECX) argument 203 #define RDX(cpu) RRX(cpu, R_EDX) argument 204 #define RBX(cpu) RRX(cpu, R_EBX) argument 205 #define RSP(cpu) RRX(cpu, R_ESP) argument 206 #define RBP(cpu) RRX(cpu, R_EBP) argument 207 #define RSI(cpu) RRX(cpu, R_ESI) argument 208 #define RDI(cpu) RRX(cpu, R_EDI) argument [all …]
|
| /openbmc/u-boot/arch/arm/dts/ |
| H A D | thunderx-88xx.dtsi | 24 cpu@000 { 25 device_type = "cpu"; 30 cpu@001 { 31 device_type = "cpu"; 36 cpu@002 { 37 device_type = "cpu"; 42 cpu@003 { 43 device_type = "cpu"; 48 cpu@004 { 49 device_type = "cpu"; [all …]
|
| /openbmc/openbmc/poky/meta/recipes-support/boost/boost/ |
| H A D | 0001-Don-t-set-up-arch-instruction-set-flags-we-do-that-o.patch | 19 @@ -1144,156 +1144,3 @@ local rule cpu-flags ( toolset variable : architecture : instruction-set + : 31 -cpu-flags gcc OPTIONS : x86 : native : -march=native ; 32 -cpu-flags gcc OPTIONS : x86 : i486 : -march=i486 ; 33 -cpu-flags gcc OPTIONS : x86 : i586 : -march=i586 ; 34 -cpu-flags gcc OPTIONS : x86 : i686 : -march=i686 ; 35 -cpu-flags gcc OPTIONS : x86 : pentium : -march=pentium ; 36 -cpu-flags gcc OPTIONS : x86 : pentium-mmx : -march=pentium-mmx ; 37 -cpu-flags gcc OPTIONS : x86 : pentiumpro : -march=pentiumpro ; 38 -cpu-flags gcc OPTIONS : x86 : pentium2 : -march=pentium2 ; 39 -cpu-flags gcc OPTIONS : x86 : pentium3 : -march=pentium3 ; [all …]
|
| /openbmc/qemu/target/i386/hvf/ |
| H A D | x86.c | 49 bool x86_read_segment_descriptor(CPUState *cpu, in x86_read_segment_descriptor() argument 64 base = rvmcs(cpu->accel->fd, VMCS_GUEST_GDTR_BASE); in x86_read_segment_descriptor() 65 limit = rvmcs(cpu->accel->fd, VMCS_GUEST_GDTR_LIMIT); in x86_read_segment_descriptor() 67 base = rvmcs(cpu->accel->fd, VMCS_GUEST_LDTR_BASE); in x86_read_segment_descriptor() 68 limit = rvmcs(cpu->accel->fd, VMCS_GUEST_LDTR_LIMIT); in x86_read_segment_descriptor() 75 vmx_read_mem(cpu, desc, base + sel.index * 8, sizeof(*desc)); in x86_read_segment_descriptor() 79 bool x86_write_segment_descriptor(CPUState *cpu, in x86_write_segment_descriptor() argument 87 base = rvmcs(cpu->accel->fd, VMCS_GUEST_GDTR_BASE); in x86_write_segment_descriptor() 88 limit = rvmcs(cpu->accel->fd, VMCS_GUEST_GDTR_LIMIT); in x86_write_segment_descriptor() 90 base = rvmcs(cpu->accel->fd, VMCS_GUEST_LDTR_BASE); in x86_write_segment_descriptor() [all …]
|
| H A D | x86_task.c | 34 static void save_state_to_tss32(CPUState *cpu, struct x86_tss_segment32 *tss) in save_state_to_tss32() argument 36 X86CPU *x86_cpu = X86_CPU(cpu); in save_state_to_tss32() 51 tss->es = vmx_read_segment_selector(cpu, R_ES).sel; in save_state_to_tss32() 52 tss->cs = vmx_read_segment_selector(cpu, R_CS).sel; in save_state_to_tss32() 53 tss->ss = vmx_read_segment_selector(cpu, R_SS).sel; in save_state_to_tss32() 54 tss->ds = vmx_read_segment_selector(cpu, R_DS).sel; in save_state_to_tss32() 55 tss->fs = vmx_read_segment_selector(cpu, R_FS).sel; in save_state_to_tss32() 56 tss->gs = vmx_read_segment_selector(cpu, R_GS).sel; in save_state_to_tss32() 59 static void load_state_from_tss32(CPUState *cpu, struct x86_tss_segment32 *tss) in load_state_from_tss32() argument 61 X86CPU *x86_cpu = X86_CPU(cpu); in load_state_from_tss32() [all …]
|
| /openbmc/qemu/accel/tcg/ |
| H A D | cpu-exec-common.c | 28 bool tcg_cflags_has(CPUState *cpu, uint32_t flags) in tcg_cflags_has() argument 30 return cpu->tcg_cflags & flags; in tcg_cflags_has() 33 void tcg_cflags_set(CPUState *cpu, uint32_t flags) in tcg_cflags_set() argument 35 cpu->tcg_cflags |= flags; in tcg_cflags_set() 38 uint32_t curr_cflags(CPUState *cpu) in curr_cflags() argument 40 uint32_t cflags = cpu->tcg_cflags; in curr_cflags() 49 if (unlikely(cpu->singlestep_enabled)) { in curr_cflags() 61 void cpu_loop_exit_noexc(CPUState *cpu) in cpu_loop_exit_noexc() argument 63 cpu->exception_index = -1; in cpu_loop_exit_noexc() 64 cpu_loop_exit(cpu); in cpu_loop_exit_noexc() [all …]
|
| H A D | watchpoint.c | 54 int cpu_watchpoint_address_matches(CPUState *cpu, vaddr addr, vaddr len) in cpu_watchpoint_address_matches() argument 59 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) { in cpu_watchpoint_address_matches() 68 void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len, in cpu_check_watchpoint() argument 74 if (cpu->watchpoint_hit) { in cpu_check_watchpoint() 81 cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG); in cpu_check_watchpoint() 86 if (cpu->cc->tcg_ops->adjust_watchpoint_address) { in cpu_check_watchpoint() 88 addr = cpu->cc->tcg_ops->adjust_watchpoint_address(cpu, addr, len); in cpu_check_watchpoint() 92 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) { in cpu_check_watchpoint() 102 if (!cpu->neg.can_do_io) { in cpu_check_watchpoint() 104 cpu->cflags_next_tb = 1 | CF_NOIRQ | curr_cflags(cpu); in cpu_check_watchpoint() [all …]
|
| H A D | tcg-accel-ops-icount.c | 105 void icount_prepare_for_run(CPUState *cpu, int64_t cpu_budget) in icount_prepare_for_run() argument 114 g_assert(cpu->neg.icount_decr.u16.low == 0); in icount_prepare_for_run() 115 g_assert(cpu->icount_extra == 0); in icount_prepare_for_run() 119 cpu->icount_budget = MIN(icount_get_limit(), cpu_budget); in icount_prepare_for_run() 120 insns_left = MIN(0xffff, cpu->icount_budget); in icount_prepare_for_run() 121 cpu->neg.icount_decr.u16.low = insns_left; in icount_prepare_for_run() 122 cpu->icount_extra = cpu->icount_budget - insns_left; in icount_prepare_for_run() 124 if (cpu->icount_budget == 0) { in icount_prepare_for_run() 135 void icount_process_data(CPUState *cpu) in icount_process_data() argument 138 icount_update(cpu); in icount_process_data() [all …]
|
| /openbmc/qemu/target/i386/tcg/system/ |
| H A D | tcg-cpu.c | 33 X86CPU *cpu = container_of(n, X86CPU, machine_done); in tcg_cpu_machine_done() local 38 cpu->smram = g_new(MemoryRegion, 1); in tcg_cpu_machine_done() 39 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram", in tcg_cpu_machine_done() 41 memory_region_set_enabled(cpu->smram, true); in tcg_cpu_machine_done() 42 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, in tcg_cpu_machine_done() 43 cpu->smram, 1); in tcg_cpu_machine_done() 49 X86CPU *cpu = X86_CPU(cs); in tcg_cpu_realizefn() local 60 cpu->cpu_as_mem = g_new(MemoryRegion, 1); in tcg_cpu_realizefn() 61 cpu->cpu_as_root = g_new(MemoryRegion, 1); in tcg_cpu_realizefn() 64 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull); in tcg_cpu_realizefn() [all …]
|
| /openbmc/qemu/hw/intc/ |
| H A D | arm_gic.c | 80 static inline bool gic_cpu_ns_access(GICState *s, int cpu, MemTxAttrs attrs) in gic_cpu_ns_access() argument 82 return !gic_is_vcpu(cpu) && s->security_extn && !attrs.secure; in gic_cpu_ns_access() 85 static inline void gic_get_best_irq(GICState *s, int cpu, in gic_get_best_irq() argument 89 int cm = 1 << cpu; in gic_get_best_irq() 98 if (GIC_DIST_GET_PRIORITY(irq, cpu) < *best_prio) { in gic_get_best_irq() 99 *best_prio = GIC_DIST_GET_PRIORITY(irq, cpu); in gic_get_best_irq() 110 static inline void gic_get_best_virq(GICState *s, int cpu, in gic_get_best_virq() argument 119 uint32_t lr_entry = s->h_lr[lr_idx][cpu]; in gic_get_best_virq() 142 static inline bool gic_irq_signaling_enabled(GICState *s, int cpu, bool virt, in gic_irq_signaling_enabled() argument 145 int cpu_iface = virt ? (cpu + GIC_NCPU) : cpu; in gic_irq_signaling_enabled() [all …]
|
| H A D | arm_gic_kvm.c | 55 int irqtype, cpu; in kvm_arm_gic_set_irq() local 63 cpu = 0; in kvm_arm_gic_set_irq() 69 cpu = irq / GIC_INTERNAL; in kvm_arm_gic_set_irq() 72 kvm_arm_set_irq(cpu, irqtype, irq, !!level); in kvm_arm_gic_set_irq() 87 #define KVM_VGIC_ATTR(offset, cpu) \ argument 88 ((((uint64_t)(cpu) << KVM_DEV_ARM_VGIC_CPUID_SHIFT) & \ 93 static void kvm_gicd_access(GICState *s, int offset, int cpu, in kvm_gicd_access() argument 97 KVM_VGIC_ATTR(offset, cpu), val, write, &error_abort); in kvm_gicd_access() 100 static void kvm_gicc_access(GICState *s, int offset, int cpu, in kvm_gicc_access() argument 104 KVM_VGIC_ATTR(offset, cpu), val, write, &error_abort); in kvm_gicc_access() [all …]
|
| H A D | armv7m_nvic.c | 108 bool check_sec = arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY); in nvic_rettobase() 174 if (!arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY)) { in exc_targets_secure() 188 return !(s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK); in exc_targets_secure() 218 (s->cpu->env.v7m.aircr & R_V7M_AIRCR_PRIS_MASK)) { in exc_group_prio() 304 if (arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY)) { in nvic_recompute_state() 344 CPUARMState *env = &s->cpu->env; in nvic_exec_prio() 404 if (s->cpu->env.v7m.faultmask[secure]) { in armv7m_nvic_neg_prio_requested() 582 cpu_abort(CPU(s->cpu), in do_armv7m_nvic_set_pending() 635 if (arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY) && in do_armv7m_nvic_set_pending() 637 !(s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK))) { in do_armv7m_nvic_set_pending() [all …]
|
| /openbmc/qemu/target/arm/tcg/ |
| H A D | cpu-v7m.c | 22 ARMCPU *cpu = ARM_CPU(cs); in arm_v7m_cpu_exec_interrupt() local 23 CPUARMState *env = &cpu->env; in arm_v7m_cpu_exec_interrupt() 47 ARMCPU *cpu = ARM_CPU(obj); in cortex_m0_initfn() local 48 ARMISARegisters *isar = &cpu->isar; in cortex_m0_initfn() 49 set_feature(&cpu->env, ARM_FEATURE_V6); in cortex_m0_initfn() 50 set_feature(&cpu->env, ARM_FEATURE_M); in cortex_m0_initfn() 52 cpu->midr = 0x410cc200; in cortex_m0_initfn() 81 ARMCPU *cpu = ARM_CPU(obj); in cortex_m3_initfn() local 82 ARMISARegisters *isar = &cpu->isar; in cortex_m3_initfn() 83 set_feature(&cpu->env, ARM_FEATURE_V7); in cortex_m3_initfn() [all …]
|
| /openbmc/qemu/hw/xtensa/ |
| H A D | mx_pic.c | 69 } cpu[MX_MAX_CPU]; member 81 return mx->cpu[offset - MIPICAUSE].mipicause; in xtensa_mx_pic_ext_reg_read() 94 return ((mx->n_cpu - 1) << 18) | (mx_cpu - mx->cpu); in xtensa_mx_pic_ext_reg_read() 112 unsigned cpu) in xtensa_mx_pic_get_ipi_for_cpu() argument 114 uint32_t mipicause = mx->cpu[cpu].mipicause; in xtensa_mx_pic_get_ipi_for_cpu() 124 unsigned cpu) in xtensa_mx_pic_get_ext_irq_for_cpu() argument 127 mx->cpu[cpu].mirout_cache) << 2) | in xtensa_mx_pic_get_ext_irq_for_cpu() 128 xtensa_mx_pic_get_ipi_for_cpu(mx, cpu); in xtensa_mx_pic_get_ext_irq_for_cpu() 131 static void xtensa_mx_pic_update_cpu(XtensaMxPic *mx, unsigned cpu) in xtensa_mx_pic_update_cpu() argument 133 uint32_t irq = xtensa_mx_pic_get_ext_irq_for_cpu(mx, cpu); in xtensa_mx_pic_update_cpu() [all …]
|
| /openbmc/qemu/target/s390x/ |
| H A D | interrupt.c | 44 void cpu_inject_clock_comparator(S390CPU *cpu) in cpu_inject_clock_comparator() argument 46 CPUS390XState *env = &cpu->env; in cpu_inject_clock_comparator() 49 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HARD); in cpu_inject_clock_comparator() 52 void cpu_inject_cpu_timer(S390CPU *cpu) in cpu_inject_cpu_timer() argument 54 CPUS390XState *env = &cpu->env; in cpu_inject_cpu_timer() 57 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HARD); in cpu_inject_cpu_timer() 60 void cpu_inject_emergency_signal(S390CPU *cpu, uint16_t src_cpu_addr) in cpu_inject_emergency_signal() argument 62 CPUS390XState *env = &cpu->env; in cpu_inject_emergency_signal() 68 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HARD); in cpu_inject_emergency_signal() 71 int cpu_inject_external_call(S390CPU *cpu, uint16_t src_cpu_addr) in cpu_inject_external_call() argument [all …]
|
| /openbmc/qemu/include/accel/tcg/ |
| H A D | cpu-ops.h | 62 void (*translate_code)(CPUState *cpu, TranslationBlock *tb, 81 void (*synchronize_from_tb)(CPUState *cpu, const TranslationBlock *tb); 90 void (*restore_state_to_opc)(CPUState *cpu, const TranslationBlock *tb, 94 void (*cpu_exec_enter)(CPUState *cpu); 96 void (*cpu_exec_exit)(CPUState *cpu); 98 void (*debug_excp_handler)(CPUState *cpu); 101 int (*mmu_index)(CPUState *cpu, bool ifetch); 110 void (*fake_user_interrupt)(CPUState *cpu); 134 void (*record_sigsegv)(CPUState *cpu, vaddr addr, 158 void (*record_sigbus)(CPUState *cpu, vaddr addr, [all …]
|
| /openbmc/qemu/target/ppc/ |
| H A D | mmu-hash64.c | 53 static ppc_slb_t *slb_lookup(PowerPCCPU *cpu, target_ulong eaddr) in slb_lookup() argument 55 CPUPPCState *env = &cpu->env; in slb_lookup() 64 for (n = 0; n < cpu->hash64_opts->slb_size; n++) { in slb_lookup() 85 void dump_slb(PowerPCCPU *cpu) in dump_slb() argument 87 CPUPPCState *env = &cpu->env; in dump_slb() 91 cpu_synchronize_state(CPU(cpu)); in dump_slb() 94 for (i = 0; i < cpu->hash64_opts->slb_size; i++) { in dump_slb() 108 PowerPCCPU *cpu = env_archcpu(env); in helper_SLBIA() local 161 for (n = starting_entry; n < cpu->hash64_opts->slb_size; n++) { in helper_SLBIA() 181 PowerPCCPU *cpu = env_archcpu(env); in helper_SLBIAG() local [all …]
|
| /openbmc/qemu/target/i386/kvm/ |
| H A D | kvm-cpu.c | 23 X86CPU *cpu = X86_CPU(cs); in kvm_set_guest_phys_bits() local 31 cpu->guest_phys_bits = guest_phys_bits; in kvm_set_guest_phys_bits() 32 if (cpu->guest_phys_bits > cpu->phys_bits) { in kvm_set_guest_phys_bits() 33 cpu->guest_phys_bits = cpu->phys_bits; in kvm_set_guest_phys_bits() 36 if (cpu->host_phys_bits && cpu->host_phys_bits_limit && in kvm_set_guest_phys_bits() 37 cpu->guest_phys_bits > cpu->host_phys_bits_limit) { in kvm_set_guest_phys_bits() 38 cpu->guest_phys_bits = cpu->host_phys_bits_limit; in kvm_set_guest_phys_bits() 44 X86CPU *cpu = X86_CPU(cs); in kvm_cpu_realizefn() local 45 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu); in kvm_cpu_realizefn() 46 CPUX86State *env = &cpu->env; in kvm_cpu_realizefn() [all …]
|
| /openbmc/qemu/hw/s390x/ |
| H A D | cpu-topology.c | 61 static int s390_socket_nb(S390CPU *cpu) in s390_socket_nb() argument 63 return s390_socket_nb_from_ids(cpu->env.drawer_id, cpu->env.book_id, in s390_socket_nb() 64 cpu->env.socket_id); in s390_socket_nb() 106 void s390_handle_ptf(S390CPU *cpu, uint8_t r1, uintptr_t ra) in s390_handle_ptf() argument 109 CPUS390XState *env = &cpu->env; in s390_handle_ptf() 136 setcc(cpu, 2); in s390_handle_ptf() 141 setcc(cpu, 0); in s390_handle_ptf() 182 static bool s390_topology_cpu_default(S390CPU *cpu, Error **errp) in s390_topology_cpu_default() argument 185 CPUS390XState *env = &cpu->env; in s390_topology_cpu_default() 275 static bool s390_topology_need_report(S390CPU *cpu, int drawer_id, in s390_topology_need_report() argument [all …]
|
| /openbmc/u-boot/arch/arm/cpu/armv7/sunxi/ |
| H A D | psci.c | 37 #define SUN8I_R40_PWR_CLAMP(cpu) (0x120 + (cpu) * 0x4) argument 103 int cpu) in sunxi_power_switch() argument 110 clrbits_le32(pwroff, BIT(cpu)); in sunxi_power_switch() 113 setbits_le32(pwroff, BIT(cpu)); in sunxi_power_switch() 139 static void __secure sunxi_cpu_set_power(int __always_unused cpu, bool on) in sunxi_cpu_set_power() argument 148 static void __secure sunxi_cpu_set_power(int cpu, bool on) in sunxi_cpu_set_power() argument 153 sunxi_power_switch((void *)cpucfg + SUN8I_R40_PWR_CLAMP(cpu), in sunxi_cpu_set_power() 158 static void __secure sunxi_cpu_set_power(int cpu, bool on) in sunxi_cpu_set_power() argument 163 sunxi_power_switch(&prcm->cpu_pwr_clamp[cpu], &prcm->cpu_pwroff, in sunxi_cpu_set_power() 164 on, cpu); in sunxi_cpu_set_power() [all …]
|
| /openbmc/qemu/include/accel/ |
| H A D | accel-cpu-ops.h | 38 void (*cpu_reset_hold)(CPUState *cpu); 40 void (*create_vcpu_thread)(CPUState *cpu); /* MANDATORY NON-NULL */ 41 void (*kick_vcpu_thread)(CPUState *cpu); 42 bool (*cpu_thread_is_idle)(CPUState *cpu); 52 void (*synchronize_post_reset)(CPUState *cpu); 53 void (*synchronize_post_init)(CPUState *cpu); 62 void (*synchronize_state)(CPUState *cpu); 63 void (*synchronize_pre_loadvm)(CPUState *cpu); 66 void (*handle_interrupt)(CPUState *cpu, int mask); 69 void (*get_vcpu_stats)(CPUState *cpu, GString *buf); [all …]
|