1fcf5ef2aSThomas Huth /*
2fcf5ef2aSThomas Huth * PowerPC implementation of KVM hooks
3fcf5ef2aSThomas Huth *
4fcf5ef2aSThomas Huth * Copyright IBM Corp. 2007
5fcf5ef2aSThomas Huth * Copyright (C) 2011 Freescale Semiconductor, Inc.
6fcf5ef2aSThomas Huth *
7fcf5ef2aSThomas Huth * Authors:
8fcf5ef2aSThomas Huth * Jerone Young <jyoung5@us.ibm.com>
9fcf5ef2aSThomas Huth * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
10fcf5ef2aSThomas Huth * Hollis Blanchard <hollisb@us.ibm.com>
11fcf5ef2aSThomas Huth *
12fcf5ef2aSThomas Huth * This work is licensed under the terms of the GNU GPL, version 2 or later.
13fcf5ef2aSThomas Huth * See the COPYING file in the top-level directory.
14fcf5ef2aSThomas Huth *
15fcf5ef2aSThomas Huth */
16fcf5ef2aSThomas Huth
17fcf5ef2aSThomas Huth #include "qemu/osdep.h"
18fcf5ef2aSThomas Huth #include <dirent.h>
19fcf5ef2aSThomas Huth #include <sys/ioctl.h>
20fcf5ef2aSThomas Huth #include <sys/vfs.h>
21fcf5ef2aSThomas Huth
22fcf5ef2aSThomas Huth #include <linux/kvm.h>
23fcf5ef2aSThomas Huth
2430f4b05bSDavid Gibson #include "qapi/error.h"
25fcf5ef2aSThomas Huth #include "qemu/error-report.h"
26fcf5ef2aSThomas Huth #include "cpu.h"
27715d4b96SThomas Huth #include "cpu-models.h"
28fcf5ef2aSThomas Huth #include "qemu/timer.h"
29b3946626SVincent Palatin #include "sysemu/hw_accel.h"
30fcf5ef2aSThomas Huth #include "kvm_ppc.h"
31fcf5ef2aSThomas Huth #include "sysemu/cpus.h"
32fcf5ef2aSThomas Huth #include "sysemu/device_tree.h"
33fcf5ef2aSThomas Huth #include "mmu-hash64.h"
34fcf5ef2aSThomas Huth
35fcf5ef2aSThomas Huth #include "hw/ppc/spapr.h"
36fcf5ef2aSThomas Huth #include "hw/ppc/spapr_cpu_core.h"
37650d103dSMarkus Armbruster #include "hw/hw.h"
38fcf5ef2aSThomas Huth #include "hw/ppc/ppc.h"
39ca77ee28SMarkus Armbruster #include "migration/qemu-file-types.h"
40fcf5ef2aSThomas Huth #include "sysemu/watchdog.h"
41fcf5ef2aSThomas Huth #include "trace.h"
425b7d54d4SAlex Bennée #include "gdbstub/enums.h"
43fcf5ef2aSThomas Huth #include "exec/memattrs.h"
449c607668SAlexey Kardashevskiy #include "exec/ram_addr.h"
45fcf5ef2aSThomas Huth #include "sysemu/hostmem.h"
46fcf5ef2aSThomas Huth #include "qemu/cutils.h"
47db725815SMarkus Armbruster #include "qemu/main-loop.h"
489c607668SAlexey Kardashevskiy #include "qemu/mmap-alloc.h"
49f3d9f303SSam Bobroff #include "elf.h"
50c64abd1fSSam Bobroff #include "sysemu/kvm_int.h"
51cfb52d07SHarsh Prateek Bora #include "sysemu/kvm.h"
52cfb52d07SHarsh Prateek Bora #include "hw/core/accel-cpu.h"
53fcf5ef2aSThomas Huth
54566abdb4SPaolo Bonzini #include CONFIG_DEVICES
55566abdb4SPaolo Bonzini
56fcf5ef2aSThomas Huth #define PROC_DEVTREE_CPU "/proc/device-tree/cpus/"
57fcf5ef2aSThomas Huth
586e0552a3SFabiano Rosas #define DEBUG_RETURN_GUEST 0
596e0552a3SFabiano Rosas #define DEBUG_RETURN_GDB 1
606e0552a3SFabiano Rosas
61fcf5ef2aSThomas Huth const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
62fcf5ef2aSThomas Huth KVM_CAP_LAST_INFO
63fcf5ef2aSThomas Huth };
64fcf5ef2aSThomas Huth
65c995e942SDavid Gibson static int cap_interrupt_unset;
66fcf5ef2aSThomas Huth static int cap_segstate;
67fcf5ef2aSThomas Huth static int cap_booke_sregs;
68fcf5ef2aSThomas Huth static int cap_ppc_smt;
69fa98fbfcSSam Bobroff static int cap_ppc_smt_possible;
70fcf5ef2aSThomas Huth static int cap_spapr_tce;
71d6ee2a7cSAlexey Kardashevskiy static int cap_spapr_tce_64;
72fcf5ef2aSThomas Huth static int cap_spapr_multitce;
73fcf5ef2aSThomas Huth static int cap_spapr_vfio;
74fcf5ef2aSThomas Huth static int cap_hior;
75fcf5ef2aSThomas Huth static int cap_one_reg;
76fcf5ef2aSThomas Huth static int cap_epr;
77fcf5ef2aSThomas Huth static int cap_ppc_watchdog;
78fcf5ef2aSThomas Huth static int cap_htab_fd;
79fcf5ef2aSThomas Huth static int cap_fixup_hcalls;
80fcf5ef2aSThomas Huth static int cap_htm; /* Hardware transactional memory support */
81cf1c4cceSSam Bobroff static int cap_mmu_radix;
82cf1c4cceSSam Bobroff static int cap_mmu_hash_v3;
8338afd772SCédric Le Goater static int cap_xive;
84b55d295eSDavid Gibson static int cap_resize_hpt;
85c363a37aSDaniel Henrique Barboza static int cap_ppc_pvr_compat;
868acc2ae5SSuraj Jitindar Singh static int cap_ppc_safe_cache;
878acc2ae5SSuraj Jitindar Singh static int cap_ppc_safe_bounds_check;
888acc2ae5SSuraj Jitindar Singh static int cap_ppc_safe_indirect_branch;
898ff43ee4SSuraj Jitindar Singh static int cap_ppc_count_cache_flush_assist;
90b9a477b7SSuraj Jitindar Singh static int cap_ppc_nested_kvm_hv;
917d050527SSuraj Jitindar Singh static int cap_large_decr;
92ec010c00SNicholas Piggin static int cap_fwnmi;
9382123b75SBharata B Rao static int cap_rpt_invalidate;
94ccc5a4c5SNicholas Piggin static int cap_ail_mode_3;
95fcf5ef2aSThomas Huth
96566abdb4SPaolo Bonzini #ifdef CONFIG_PSERIES
97566abdb4SPaolo Bonzini static int cap_papr;
98566abdb4SPaolo Bonzini #else
99566abdb4SPaolo Bonzini #define cap_papr (0)
100566abdb4SPaolo Bonzini #endif
101566abdb4SPaolo Bonzini
102fcf5ef2aSThomas Huth static uint32_t debug_inst_opcode;
103fcf5ef2aSThomas Huth
104c995e942SDavid Gibson /*
105c995e942SDavid Gibson * Check whether we are running with KVM-PR (instead of KVM-HV). This
106fcf5ef2aSThomas Huth * should only be used for fallback tests - generally we should use
107fcf5ef2aSThomas Huth * explicit capabilities for the features we want, rather than
108c995e942SDavid Gibson * assuming what is/isn't available depending on the KVM variant.
109c995e942SDavid Gibson */
kvmppc_is_pr(KVMState * ks)110fcf5ef2aSThomas Huth static bool kvmppc_is_pr(KVMState *ks)
111fcf5ef2aSThomas Huth {
112fcf5ef2aSThomas Huth /* Assume KVM-PR if the GET_PVINFO capability is available */
11370a0c19eSGreg Kurz return kvm_vm_check_extension(ks, KVM_CAP_PPC_GET_PVINFO) != 0;
114fcf5ef2aSThomas Huth }
115fcf5ef2aSThomas Huth
116165dc3edSDavid Gibson static int kvm_ppc_register_host_cpu_type(void);
1178acc2ae5SSuraj Jitindar Singh static void kvmppc_get_cpu_characteristics(KVMState *s);
1187d050527SSuraj Jitindar Singh static int kvmppc_get_dec_bits(void);
119fcf5ef2aSThomas Huth
kvm_arch_get_default_type(MachineState * ms)1205e0d6590SAkihiko Odaki int kvm_arch_get_default_type(MachineState *ms)
1215e0d6590SAkihiko Odaki {
1225e0d6590SAkihiko Odaki return 0;
1235e0d6590SAkihiko Odaki }
1245e0d6590SAkihiko Odaki
kvm_arch_init(MachineState * ms,KVMState * s)125fcf5ef2aSThomas Huth int kvm_arch_init(MachineState *ms, KVMState *s)
126fcf5ef2aSThomas Huth {
127fcf5ef2aSThomas Huth cap_interrupt_unset = kvm_check_extension(s, KVM_CAP_PPC_UNSET_IRQ);
128fcf5ef2aSThomas Huth cap_segstate = kvm_check_extension(s, KVM_CAP_PPC_SEGSTATE);
129fcf5ef2aSThomas Huth cap_booke_sregs = kvm_check_extension(s, KVM_CAP_PPC_BOOKE_SREGS);
1306977afdaSGreg Kurz cap_ppc_smt_possible = kvm_vm_check_extension(s, KVM_CAP_PPC_SMT_POSSIBLE);
131fcf5ef2aSThomas Huth cap_spapr_tce = kvm_check_extension(s, KVM_CAP_SPAPR_TCE);
132d6ee2a7cSAlexey Kardashevskiy cap_spapr_tce_64 = kvm_check_extension(s, KVM_CAP_SPAPR_TCE_64);
133fcf5ef2aSThomas Huth cap_spapr_multitce = kvm_check_extension(s, KVM_CAP_SPAPR_MULTITCE);
1349ded780cSAlexey Kardashevskiy cap_spapr_vfio = kvm_vm_check_extension(s, KVM_CAP_SPAPR_TCE_VFIO);
135fcf5ef2aSThomas Huth cap_one_reg = kvm_check_extension(s, KVM_CAP_ONE_REG);
136fcf5ef2aSThomas Huth cap_hior = kvm_check_extension(s, KVM_CAP_PPC_HIOR);
137fcf5ef2aSThomas Huth cap_epr = kvm_check_extension(s, KVM_CAP_PPC_EPR);
138fcf5ef2aSThomas Huth cap_ppc_watchdog = kvm_check_extension(s, KVM_CAP_PPC_BOOKE_WATCHDOG);
139c995e942SDavid Gibson /*
140c995e942SDavid Gibson * Note: we don't set cap_papr here, because this capability is
141c995e942SDavid Gibson * only activated after this by kvmppc_set_papr()
142c995e942SDavid Gibson */
1436977afdaSGreg Kurz cap_htab_fd = kvm_vm_check_extension(s, KVM_CAP_PPC_HTAB_FD);
144fcf5ef2aSThomas Huth cap_fixup_hcalls = kvm_check_extension(s, KVM_CAP_PPC_FIXUP_HCALL);
145fa98fbfcSSam Bobroff cap_ppc_smt = kvm_vm_check_extension(s, KVM_CAP_PPC_SMT);
146fcf5ef2aSThomas Huth cap_htm = kvm_vm_check_extension(s, KVM_CAP_PPC_HTM);
147cf1c4cceSSam Bobroff cap_mmu_radix = kvm_vm_check_extension(s, KVM_CAP_PPC_MMU_RADIX);
148cf1c4cceSSam Bobroff cap_mmu_hash_v3 = kvm_vm_check_extension(s, KVM_CAP_PPC_MMU_HASH_V3);
14938afd772SCédric Le Goater cap_xive = kvm_vm_check_extension(s, KVM_CAP_PPC_IRQ_XIVE);
150b55d295eSDavid Gibson cap_resize_hpt = kvm_vm_check_extension(s, KVM_CAP_SPAPR_RESIZE_HPT);
1518acc2ae5SSuraj Jitindar Singh kvmppc_get_cpu_characteristics(s);
152b9a477b7SSuraj Jitindar Singh cap_ppc_nested_kvm_hv = kvm_vm_check_extension(s, KVM_CAP_PPC_NESTED_HV);
1537d050527SSuraj Jitindar Singh cap_large_decr = kvmppc_get_dec_bits();
154ec010c00SNicholas Piggin cap_fwnmi = kvm_vm_check_extension(s, KVM_CAP_PPC_FWNMI);
155c363a37aSDaniel Henrique Barboza /*
156c363a37aSDaniel Henrique Barboza * Note: setting it to false because there is not such capability
157c363a37aSDaniel Henrique Barboza * in KVM at this moment.
158c363a37aSDaniel Henrique Barboza *
159c363a37aSDaniel Henrique Barboza * TODO: call kvm_vm_check_extension() with the right capability
160c995e942SDavid Gibson * after the kernel starts implementing it.
161c995e942SDavid Gibson */
162c363a37aSDaniel Henrique Barboza cap_ppc_pvr_compat = false;
163fcf5ef2aSThomas Huth
1641e8f51e8SShivaprasad G Bhat if (!kvm_check_extension(s, KVM_CAP_PPC_IRQ_LEVEL)) {
1651e8f51e8SShivaprasad G Bhat error_report("KVM: Host kernel doesn't have level irq capability");
1661e8f51e8SShivaprasad G Bhat exit(1);
167fcf5ef2aSThomas Huth }
168fcf5ef2aSThomas Huth
16982123b75SBharata B Rao cap_rpt_invalidate = kvm_vm_check_extension(s, KVM_CAP_PPC_RPT_INVALIDATE);
170ccc5a4c5SNicholas Piggin cap_ail_mode_3 = kvm_vm_check_extension(s, KVM_CAP_PPC_AIL_MODE_3);
171165dc3edSDavid Gibson kvm_ppc_register_host_cpu_type();
172fcf5ef2aSThomas Huth
173fcf5ef2aSThomas Huth return 0;
174fcf5ef2aSThomas Huth }
175fcf5ef2aSThomas Huth
kvm_arch_irqchip_create(KVMState * s)1764376c40dSPaolo Bonzini int kvm_arch_irqchip_create(KVMState *s)
177d525ffabSPaolo Bonzini {
178d525ffabSPaolo Bonzini return 0;
179d525ffabSPaolo Bonzini }
180d525ffabSPaolo Bonzini
kvm_arch_sync_sregs(PowerPCCPU * cpu)181fcf5ef2aSThomas Huth static int kvm_arch_sync_sregs(PowerPCCPU *cpu)
182fcf5ef2aSThomas Huth {
183fcf5ef2aSThomas Huth CPUPPCState *cenv = &cpu->env;
184fcf5ef2aSThomas Huth CPUState *cs = CPU(cpu);
185fcf5ef2aSThomas Huth struct kvm_sregs sregs;
186fcf5ef2aSThomas Huth int ret;
187fcf5ef2aSThomas Huth
188fcf5ef2aSThomas Huth if (cenv->excp_model == POWERPC_EXCP_BOOKE) {
189c995e942SDavid Gibson /*
190c995e942SDavid Gibson * What we're really trying to say is "if we're on BookE, we
191c995e942SDavid Gibson * use the native PVR for now". This is the only sane way to
192c995e942SDavid Gibson * check it though, so we potentially confuse users that they
193c995e942SDavid Gibson * can run BookE guests on BookS. Let's hope nobody dares
194c995e942SDavid Gibson * enough :)
195c995e942SDavid Gibson */
196fcf5ef2aSThomas Huth return 0;
197fcf5ef2aSThomas Huth } else {
198fcf5ef2aSThomas Huth if (!cap_segstate) {
199fcf5ef2aSThomas Huth fprintf(stderr, "kvm error: missing PVR setting capability\n");
200fcf5ef2aSThomas Huth return -ENOSYS;
201fcf5ef2aSThomas Huth }
202fcf5ef2aSThomas Huth }
203fcf5ef2aSThomas Huth
204fcf5ef2aSThomas Huth ret = kvm_vcpu_ioctl(cs, KVM_GET_SREGS, &sregs);
205fcf5ef2aSThomas Huth if (ret) {
206fcf5ef2aSThomas Huth return ret;
207fcf5ef2aSThomas Huth }
208fcf5ef2aSThomas Huth
209fcf5ef2aSThomas Huth sregs.pvr = cenv->spr[SPR_PVR];
210fcf5ef2aSThomas Huth return kvm_vcpu_ioctl(cs, KVM_SET_SREGS, &sregs);
211fcf5ef2aSThomas Huth }
212fcf5ef2aSThomas Huth
213fcf5ef2aSThomas Huth /* Set up a shared TLB array with KVM */
kvm_booke206_tlb_init(PowerPCCPU * cpu)214fcf5ef2aSThomas Huth static int kvm_booke206_tlb_init(PowerPCCPU *cpu)
215fcf5ef2aSThomas Huth {
216fcf5ef2aSThomas Huth CPUPPCState *env = &cpu->env;
217fcf5ef2aSThomas Huth CPUState *cs = CPU(cpu);
218fcf5ef2aSThomas Huth struct kvm_book3e_206_tlb_params params = {};
219fcf5ef2aSThomas Huth struct kvm_config_tlb cfg = {};
220fcf5ef2aSThomas Huth unsigned int entries = 0;
221fcf5ef2aSThomas Huth int ret, i;
222fcf5ef2aSThomas Huth
223fcf5ef2aSThomas Huth if (!kvm_enabled() ||
224fcf5ef2aSThomas Huth !kvm_check_extension(cs->kvm_state, KVM_CAP_SW_TLB)) {
225fcf5ef2aSThomas Huth return 0;
226fcf5ef2aSThomas Huth }
227fcf5ef2aSThomas Huth
228fcf5ef2aSThomas Huth assert(ARRAY_SIZE(params.tlb_sizes) == BOOKE206_MAX_TLBN);
229fcf5ef2aSThomas Huth
230fcf5ef2aSThomas Huth for (i = 0; i < BOOKE206_MAX_TLBN; i++) {
231fcf5ef2aSThomas Huth params.tlb_sizes[i] = booke206_tlb_size(env, i);
232fcf5ef2aSThomas Huth params.tlb_ways[i] = booke206_tlb_ways(env, i);
233fcf5ef2aSThomas Huth entries += params.tlb_sizes[i];
234fcf5ef2aSThomas Huth }
235fcf5ef2aSThomas Huth
236fcf5ef2aSThomas Huth assert(entries == env->nb_tlb);
237fcf5ef2aSThomas Huth assert(sizeof(struct kvm_book3e_206_tlb_entry) == sizeof(ppcmas_tlb_t));
238fcf5ef2aSThomas Huth
239fcf5ef2aSThomas Huth env->tlb_dirty = true;
240fcf5ef2aSThomas Huth
241fcf5ef2aSThomas Huth cfg.array = (uintptr_t)env->tlb.tlbm;
242fcf5ef2aSThomas Huth cfg.array_len = sizeof(ppcmas_tlb_t) * entries;
243fcf5ef2aSThomas Huth cfg.params = (uintptr_t)¶ms;
244fcf5ef2aSThomas Huth cfg.mmu_type = KVM_MMU_FSL_BOOKE_NOHV;
245fcf5ef2aSThomas Huth
246fcf5ef2aSThomas Huth ret = kvm_vcpu_enable_cap(cs, KVM_CAP_SW_TLB, 0, (uintptr_t)&cfg);
247fcf5ef2aSThomas Huth if (ret < 0) {
248fcf5ef2aSThomas Huth fprintf(stderr, "%s: couldn't enable KVM_CAP_SW_TLB: %s\n",
249fcf5ef2aSThomas Huth __func__, strerror(-ret));
250fcf5ef2aSThomas Huth return ret;
251fcf5ef2aSThomas Huth }
252fcf5ef2aSThomas Huth
253fcf5ef2aSThomas Huth env->kvm_sw_tlb = true;
254fcf5ef2aSThomas Huth return 0;
255fcf5ef2aSThomas Huth }
256fcf5ef2aSThomas Huth
257fcf5ef2aSThomas Huth
258fcf5ef2aSThomas Huth #if defined(TARGET_PPC64)
kvm_get_smmu_info(struct kvm_ppc_smmu_info * info,Error ** errp)259ab256960SGreg Kurz static void kvm_get_smmu_info(struct kvm_ppc_smmu_info *info, Error **errp)
260fcf5ef2aSThomas Huth {
261fcf5ef2aSThomas Huth int ret;
262fcf5ef2aSThomas Huth
263ab256960SGreg Kurz assert(kvm_state != NULL);
264ab256960SGreg Kurz
265ab256960SGreg Kurz if (!kvm_check_extension(kvm_state, KVM_CAP_PPC_GET_SMMU_INFO)) {
26671d0f1eaSGreg Kurz error_setg(errp, "KVM doesn't expose the MMU features it supports");
26771d0f1eaSGreg Kurz error_append_hint(errp, "Consider switching to a newer KVM\n");
26871d0f1eaSGreg Kurz return;
26971d0f1eaSGreg Kurz }
27071d0f1eaSGreg Kurz
271ab256960SGreg Kurz ret = kvm_vm_ioctl(kvm_state, KVM_PPC_GET_SMMU_INFO, info);
272fcf5ef2aSThomas Huth if (ret == 0) {
273fcf5ef2aSThomas Huth return;
274fcf5ef2aSThomas Huth }
275fcf5ef2aSThomas Huth
27671d0f1eaSGreg Kurz error_setg_errno(errp, -ret,
27771d0f1eaSGreg Kurz "KVM failed to provide the MMU features it supports");
278fcf5ef2aSThomas Huth }
279fcf5ef2aSThomas Huth
kvmppc_get_radix_page_info(void)280aa6edf97SPhilippe Mathieu-Daudé static struct ppc_radix_page_info *kvmppc_get_radix_page_info(void)
281c64abd1fSSam Bobroff {
2824f7f5893SPhilippe Mathieu-Daudé KVMState *s = KVM_STATE(current_accel());
283c64abd1fSSam Bobroff struct ppc_radix_page_info *radix_page_info;
28455baf4b5SDaniel Henrique Barboza struct kvm_ppc_rmmu_info rmmu_info = { };
285c64abd1fSSam Bobroff int i;
286c64abd1fSSam Bobroff
287c64abd1fSSam Bobroff if (!kvm_check_extension(s, KVM_CAP_PPC_MMU_RADIX)) {
288c64abd1fSSam Bobroff return NULL;
289c64abd1fSSam Bobroff }
290c64abd1fSSam Bobroff if (kvm_vm_ioctl(s, KVM_PPC_GET_RMMU_INFO, &rmmu_info)) {
291c64abd1fSSam Bobroff return NULL;
292c64abd1fSSam Bobroff }
293c64abd1fSSam Bobroff radix_page_info = g_malloc0(sizeof(*radix_page_info));
294c64abd1fSSam Bobroff radix_page_info->count = 0;
295c64abd1fSSam Bobroff for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
296c64abd1fSSam Bobroff if (rmmu_info.ap_encodings[i]) {
297c64abd1fSSam Bobroff radix_page_info->entries[i] = rmmu_info.ap_encodings[i];
298c64abd1fSSam Bobroff radix_page_info->count++;
299c64abd1fSSam Bobroff }
300c64abd1fSSam Bobroff }
301c64abd1fSSam Bobroff return radix_page_info;
302c64abd1fSSam Bobroff }
303c64abd1fSSam Bobroff
kvmppc_configure_v3_mmu(PowerPCCPU * cpu,bool radix,bool gtse,uint64_t proc_tbl)304b4db5413SSuraj Jitindar Singh target_ulong kvmppc_configure_v3_mmu(PowerPCCPU *cpu,
305b4db5413SSuraj Jitindar Singh bool radix, bool gtse,
306b4db5413SSuraj Jitindar Singh uint64_t proc_tbl)
307b4db5413SSuraj Jitindar Singh {
308b4db5413SSuraj Jitindar Singh CPUState *cs = CPU(cpu);
309b4db5413SSuraj Jitindar Singh int ret;
310b4db5413SSuraj Jitindar Singh uint64_t flags = 0;
311b4db5413SSuraj Jitindar Singh struct kvm_ppc_mmuv3_cfg cfg = {
312b4db5413SSuraj Jitindar Singh .process_table = proc_tbl,
313b4db5413SSuraj Jitindar Singh };
314b4db5413SSuraj Jitindar Singh
315b4db5413SSuraj Jitindar Singh if (radix) {
316b4db5413SSuraj Jitindar Singh flags |= KVM_PPC_MMUV3_RADIX;
317b4db5413SSuraj Jitindar Singh }
318b4db5413SSuraj Jitindar Singh if (gtse) {
319b4db5413SSuraj Jitindar Singh flags |= KVM_PPC_MMUV3_GTSE;
320b4db5413SSuraj Jitindar Singh }
321b4db5413SSuraj Jitindar Singh cfg.flags = flags;
322b4db5413SSuraj Jitindar Singh ret = kvm_vm_ioctl(cs->kvm_state, KVM_PPC_CONFIGURE_V3_MMU, &cfg);
323b4db5413SSuraj Jitindar Singh switch (ret) {
324b4db5413SSuraj Jitindar Singh case 0:
325b4db5413SSuraj Jitindar Singh return H_SUCCESS;
326b4db5413SSuraj Jitindar Singh case -EINVAL:
327b4db5413SSuraj Jitindar Singh return H_PARAMETER;
328b4db5413SSuraj Jitindar Singh case -ENODEV:
329b4db5413SSuraj Jitindar Singh return H_NOT_AVAILABLE;
330b4db5413SSuraj Jitindar Singh default:
331b4db5413SSuraj Jitindar Singh return H_HARDWARE;
332b4db5413SSuraj Jitindar Singh }
333b4db5413SSuraj Jitindar Singh }
334b4db5413SSuraj Jitindar Singh
kvmppc_hpt_needs_host_contiguous_pages(void)33524c6863cSDavid Gibson bool kvmppc_hpt_needs_host_contiguous_pages(void)
33624c6863cSDavid Gibson {
33724c6863cSDavid Gibson static struct kvm_ppc_smmu_info smmu_info;
33824c6863cSDavid Gibson
33924c6863cSDavid Gibson if (!kvm_enabled()) {
34024c6863cSDavid Gibson return false;
34124c6863cSDavid Gibson }
34224c6863cSDavid Gibson
343ab256960SGreg Kurz kvm_get_smmu_info(&smmu_info, &error_fatal);
34424c6863cSDavid Gibson return !!(smmu_info.flags & KVM_PPC_PAGE_SIZES_REAL);
34524c6863cSDavid Gibson }
34624c6863cSDavid Gibson
kvm_check_mmu(PowerPCCPU * cpu,Error ** errp)347e5ca28ecSDavid Gibson void kvm_check_mmu(PowerPCCPU *cpu, Error **errp)
348fcf5ef2aSThomas Huth {
349e5ca28ecSDavid Gibson struct kvm_ppc_smmu_info smmu_info;
350fcf5ef2aSThomas Huth int iq, ik, jq, jk;
35171d0f1eaSGreg Kurz Error *local_err = NULL;
352fcf5ef2aSThomas Huth
353e5ca28ecSDavid Gibson /* For now, we only have anything to check on hash64 MMUs */
354e5ca28ecSDavid Gibson if (!cpu->hash64_opts || !kvm_enabled()) {
355fcf5ef2aSThomas Huth return;
356fcf5ef2aSThomas Huth }
357fcf5ef2aSThomas Huth
358ab256960SGreg Kurz kvm_get_smmu_info(&smmu_info, &local_err);
35971d0f1eaSGreg Kurz if (local_err) {
36071d0f1eaSGreg Kurz error_propagate(errp, local_err);
36171d0f1eaSGreg Kurz return;
36271d0f1eaSGreg Kurz }
363e5ca28ecSDavid Gibson
364e5ca28ecSDavid Gibson if (ppc_hash64_has(cpu, PPC_HASH64_1TSEG)
365e5ca28ecSDavid Gibson && !(smmu_info.flags & KVM_PPC_1T_SEGMENTS)) {
366e5ca28ecSDavid Gibson error_setg(errp,
367e5ca28ecSDavid Gibson "KVM does not support 1TiB segments which guest expects");
368e5ca28ecSDavid Gibson return;
369fcf5ef2aSThomas Huth }
370fcf5ef2aSThomas Huth
371e5ca28ecSDavid Gibson if (smmu_info.slb_size < cpu->hash64_opts->slb_size) {
372e5ca28ecSDavid Gibson error_setg(errp, "KVM only supports %u SLB entries, but guest needs %u",
373e5ca28ecSDavid Gibson smmu_info.slb_size, cpu->hash64_opts->slb_size);
374e5ca28ecSDavid Gibson return;
375fcf5ef2aSThomas Huth }
376fcf5ef2aSThomas Huth
377fcf5ef2aSThomas Huth /*
378e5ca28ecSDavid Gibson * Verify that every pagesize supported by the cpu model is
379e5ca28ecSDavid Gibson * supported by KVM with the same encodings
380fcf5ef2aSThomas Huth */
381e5ca28ecSDavid Gibson for (iq = 0; iq < ARRAY_SIZE(cpu->hash64_opts->sps); iq++) {
382b07c59f7SDavid Gibson PPCHash64SegmentPageSizes *qsps = &cpu->hash64_opts->sps[iq];
383e5ca28ecSDavid Gibson struct kvm_ppc_one_seg_page_size *ksps;
384fcf5ef2aSThomas Huth
385e5ca28ecSDavid Gibson for (ik = 0; ik < ARRAY_SIZE(smmu_info.sps); ik++) {
386e5ca28ecSDavid Gibson if (qsps->page_shift == smmu_info.sps[ik].page_shift) {
387fcf5ef2aSThomas Huth break;
388fcf5ef2aSThomas Huth }
389fcf5ef2aSThomas Huth }
390e5ca28ecSDavid Gibson if (ik >= ARRAY_SIZE(smmu_info.sps)) {
391e5ca28ecSDavid Gibson error_setg(errp, "KVM doesn't support for base page shift %u",
392e5ca28ecSDavid Gibson qsps->page_shift);
393e5ca28ecSDavid Gibson return;
394e5ca28ecSDavid Gibson }
395e5ca28ecSDavid Gibson
396e5ca28ecSDavid Gibson ksps = &smmu_info.sps[ik];
397e5ca28ecSDavid Gibson if (ksps->slb_enc != qsps->slb_enc) {
398e5ca28ecSDavid Gibson error_setg(errp,
399e5ca28ecSDavid Gibson "KVM uses SLB encoding 0x%x for page shift %u, but guest expects 0x%x",
400e5ca28ecSDavid Gibson ksps->slb_enc, ksps->page_shift, qsps->slb_enc);
401e5ca28ecSDavid Gibson return;
402e5ca28ecSDavid Gibson }
403e5ca28ecSDavid Gibson
404e5ca28ecSDavid Gibson for (jq = 0; jq < ARRAY_SIZE(qsps->enc); jq++) {
405e5ca28ecSDavid Gibson for (jk = 0; jk < ARRAY_SIZE(ksps->enc); jk++) {
406e5ca28ecSDavid Gibson if (qsps->enc[jq].page_shift == ksps->enc[jk].page_shift) {
407fcf5ef2aSThomas Huth break;
408fcf5ef2aSThomas Huth }
409fcf5ef2aSThomas Huth }
410fcf5ef2aSThomas Huth
411e5ca28ecSDavid Gibson if (jk >= ARRAY_SIZE(ksps->enc)) {
412e5ca28ecSDavid Gibson error_setg(errp, "KVM doesn't support page shift %u/%u",
413e5ca28ecSDavid Gibson qsps->enc[jq].page_shift, qsps->page_shift);
414e5ca28ecSDavid Gibson return;
415e5ca28ecSDavid Gibson }
416e5ca28ecSDavid Gibson if (qsps->enc[jq].pte_enc != ksps->enc[jk].pte_enc) {
417e5ca28ecSDavid Gibson error_setg(errp,
418e5ca28ecSDavid Gibson "KVM uses PTE encoding 0x%x for page shift %u/%u, but guest expects 0x%x",
419e5ca28ecSDavid Gibson ksps->enc[jk].pte_enc, qsps->enc[jq].page_shift,
420e5ca28ecSDavid Gibson qsps->page_shift, qsps->enc[jq].pte_enc);
421e5ca28ecSDavid Gibson return;
422e5ca28ecSDavid Gibson }
423e5ca28ecSDavid Gibson }
424fcf5ef2aSThomas Huth }
425fcf5ef2aSThomas Huth
426e5ca28ecSDavid Gibson if (ppc_hash64_has(cpu, PPC_HASH64_CI_LARGEPAGE)) {
427c995e942SDavid Gibson /*
428c995e942SDavid Gibson * Mostly what guest pagesizes we can use are related to the
429e5ca28ecSDavid Gibson * host pages used to map guest RAM, which is handled in the
430e5ca28ecSDavid Gibson * platform code. Cache-Inhibited largepages (64k) however are
431e5ca28ecSDavid Gibson * used for I/O, so if they're mapped to the host at all it
432e5ca28ecSDavid Gibson * will be a normal mapping, not a special hugepage one used
433c995e942SDavid Gibson * for RAM.
434c995e942SDavid Gibson */
4358e3b0cbbSMarc-André Lureau if (qemu_real_host_page_size() < 0x10000) {
436e5ca28ecSDavid Gibson error_setg(errp,
437e5ca28ecSDavid Gibson "KVM can't supply 64kiB CI pages, which guest expects");
438e5ca28ecSDavid Gibson }
439e5ca28ecSDavid Gibson }
440e5ca28ecSDavid Gibson }
441fcf5ef2aSThomas Huth #endif /* !defined (TARGET_PPC64) */
442fcf5ef2aSThomas Huth
kvm_arch_vcpu_id(CPUState * cpu)443fcf5ef2aSThomas Huth unsigned long kvm_arch_vcpu_id(CPUState *cpu)
444fcf5ef2aSThomas Huth {
4452e886fb3SSam Bobroff return POWERPC_CPU(cpu)->vcpu_id;
446fcf5ef2aSThomas Huth }
447fcf5ef2aSThomas Huth
448c995e942SDavid Gibson /*
449c995e942SDavid Gibson * e500 supports 2 h/w breakpoint and 2 watchpoint. book3s supports
450c995e942SDavid Gibson * only 1 watchpoint, so array size of 4 is sufficient for now.
451fcf5ef2aSThomas Huth */
452fcf5ef2aSThomas Huth #define MAX_HW_BKPTS 4
453fcf5ef2aSThomas Huth
454fcf5ef2aSThomas Huth static struct HWBreakpoint {
455fcf5ef2aSThomas Huth target_ulong addr;
456fcf5ef2aSThomas Huth int type;
457fcf5ef2aSThomas Huth } hw_debug_points[MAX_HW_BKPTS];
458fcf5ef2aSThomas Huth
459fcf5ef2aSThomas Huth static CPUWatchpoint hw_watchpoint;
460fcf5ef2aSThomas Huth
461fcf5ef2aSThomas Huth /* Default there is no breakpoint and watchpoint supported */
462fcf5ef2aSThomas Huth static int max_hw_breakpoint;
463fcf5ef2aSThomas Huth static int max_hw_watchpoint;
464fcf5ef2aSThomas Huth static int nb_hw_breakpoint;
465fcf5ef2aSThomas Huth static int nb_hw_watchpoint;
466fcf5ef2aSThomas Huth
kvmppc_hw_debug_points_init(CPUPPCState * cenv)467fcf5ef2aSThomas Huth static void kvmppc_hw_debug_points_init(CPUPPCState *cenv)
468fcf5ef2aSThomas Huth {
469fcf5ef2aSThomas Huth if (cenv->excp_model == POWERPC_EXCP_BOOKE) {
470fcf5ef2aSThomas Huth max_hw_breakpoint = 2;
471fcf5ef2aSThomas Huth max_hw_watchpoint = 2;
472fcf5ef2aSThomas Huth }
473fcf5ef2aSThomas Huth
474fcf5ef2aSThomas Huth if ((max_hw_breakpoint + max_hw_watchpoint) > MAX_HW_BKPTS) {
475fcf5ef2aSThomas Huth fprintf(stderr, "Error initializing h/w breakpoints\n");
476fcf5ef2aSThomas Huth return;
477fcf5ef2aSThomas Huth }
478fcf5ef2aSThomas Huth }
479fcf5ef2aSThomas Huth
kvm_arch_init_vcpu(CPUState * cs)480fcf5ef2aSThomas Huth int kvm_arch_init_vcpu(CPUState *cs)
481fcf5ef2aSThomas Huth {
482fcf5ef2aSThomas Huth PowerPCCPU *cpu = POWERPC_CPU(cs);
483fcf5ef2aSThomas Huth CPUPPCState *cenv = &cpu->env;
484fcf5ef2aSThomas Huth int ret;
485fcf5ef2aSThomas Huth
486fcf5ef2aSThomas Huth /* Synchronize sregs with kvm */
487fcf5ef2aSThomas Huth ret = kvm_arch_sync_sregs(cpu);
488fcf5ef2aSThomas Huth if (ret) {
489fcf5ef2aSThomas Huth if (ret == -EINVAL) {
490fcf5ef2aSThomas Huth error_report("Register sync failed... If you're using kvm-hv.ko,"
491fcf5ef2aSThomas Huth " only \"-cpu host\" is possible");
492fcf5ef2aSThomas Huth }
493fcf5ef2aSThomas Huth return ret;
494fcf5ef2aSThomas Huth }
495fcf5ef2aSThomas Huth
496fcf5ef2aSThomas Huth switch (cenv->mmu_model) {
497fcf5ef2aSThomas Huth case POWERPC_MMU_BOOKE206:
498fcf5ef2aSThomas Huth /* This target supports access to KVM's guest TLB */
499fcf5ef2aSThomas Huth ret = kvm_booke206_tlb_init(cpu);
500fcf5ef2aSThomas Huth break;
501fcf5ef2aSThomas Huth case POWERPC_MMU_2_07:
502fcf5ef2aSThomas Huth if (!cap_htm && !kvmppc_is_pr(cs->kvm_state)) {
503c995e942SDavid Gibson /*
504c995e942SDavid Gibson * KVM-HV has transactional memory on POWER8 also without
505c995e942SDavid Gibson * the KVM_CAP_PPC_HTM extension, so enable it here
506136fbf65Szhaolichang * instead as long as it's available to userspace on the
507c995e942SDavid Gibson * host.
508c995e942SDavid Gibson */
509f3d9f303SSam Bobroff if (qemu_getauxval(AT_HWCAP2) & PPC_FEATURE2_HAS_HTM) {
510fcf5ef2aSThomas Huth cap_htm = true;
511fcf5ef2aSThomas Huth }
512f3d9f303SSam Bobroff }
513fcf5ef2aSThomas Huth break;
514fcf5ef2aSThomas Huth default:
515fcf5ef2aSThomas Huth break;
516fcf5ef2aSThomas Huth }
517fcf5ef2aSThomas Huth
518fcf5ef2aSThomas Huth kvm_get_one_reg(cs, KVM_REG_PPC_DEBUG_INST, &debug_inst_opcode);
519fcf5ef2aSThomas Huth kvmppc_hw_debug_points_init(cenv);
520fcf5ef2aSThomas Huth
521fcf5ef2aSThomas Huth return ret;
522fcf5ef2aSThomas Huth }
523fcf5ef2aSThomas Huth
kvm_arch_destroy_vcpu(CPUState * cs)524b1115c99SLiran Alon int kvm_arch_destroy_vcpu(CPUState *cs)
525b1115c99SLiran Alon {
526b1115c99SLiran Alon return 0;
527b1115c99SLiran Alon }
528b1115c99SLiran Alon
kvm_sw_tlb_put(PowerPCCPU * cpu)529fcf5ef2aSThomas Huth static void kvm_sw_tlb_put(PowerPCCPU *cpu)
530fcf5ef2aSThomas Huth {
531fcf5ef2aSThomas Huth CPUPPCState *env = &cpu->env;
532fcf5ef2aSThomas Huth CPUState *cs = CPU(cpu);
533fcf5ef2aSThomas Huth struct kvm_dirty_tlb dirty_tlb;
534fcf5ef2aSThomas Huth unsigned char *bitmap;
535fcf5ef2aSThomas Huth int ret;
536fcf5ef2aSThomas Huth
537fcf5ef2aSThomas Huth if (!env->kvm_sw_tlb) {
538fcf5ef2aSThomas Huth return;
539fcf5ef2aSThomas Huth }
540fcf5ef2aSThomas Huth
541fcf5ef2aSThomas Huth bitmap = g_malloc((env->nb_tlb + 7) / 8);
542fcf5ef2aSThomas Huth memset(bitmap, 0xFF, (env->nb_tlb + 7) / 8);
543fcf5ef2aSThomas Huth
544fcf5ef2aSThomas Huth dirty_tlb.bitmap = (uintptr_t)bitmap;
545fcf5ef2aSThomas Huth dirty_tlb.num_dirty = env->nb_tlb;
546fcf5ef2aSThomas Huth
547fcf5ef2aSThomas Huth ret = kvm_vcpu_ioctl(cs, KVM_DIRTY_TLB, &dirty_tlb);
548fcf5ef2aSThomas Huth if (ret) {
549fcf5ef2aSThomas Huth fprintf(stderr, "%s: KVM_DIRTY_TLB: %s\n",
550fcf5ef2aSThomas Huth __func__, strerror(-ret));
551fcf5ef2aSThomas Huth }
552fcf5ef2aSThomas Huth
553fcf5ef2aSThomas Huth g_free(bitmap);
554fcf5ef2aSThomas Huth }
555fcf5ef2aSThomas Huth
kvm_get_one_spr(CPUState * cs,uint64_t id,int spr)556fcf5ef2aSThomas Huth static void kvm_get_one_spr(CPUState *cs, uint64_t id, int spr)
557fcf5ef2aSThomas Huth {
558794511bcSPhilippe Mathieu-Daudé CPUPPCState *env = cpu_env(cs);
559942069e0SDaniel Henrique Barboza /* Init 'val' to avoid "uninitialised value" Valgrind warnings */
560fcf5ef2aSThomas Huth union {
561fcf5ef2aSThomas Huth uint32_t u32;
562fcf5ef2aSThomas Huth uint64_t u64;
563942069e0SDaniel Henrique Barboza } val = { };
564fcf5ef2aSThomas Huth struct kvm_one_reg reg = {
565fcf5ef2aSThomas Huth .id = id,
566fcf5ef2aSThomas Huth .addr = (uintptr_t) &val,
567fcf5ef2aSThomas Huth };
568fcf5ef2aSThomas Huth int ret;
569fcf5ef2aSThomas Huth
570fcf5ef2aSThomas Huth ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
571fcf5ef2aSThomas Huth if (ret != 0) {
572fcf5ef2aSThomas Huth trace_kvm_failed_spr_get(spr, strerror(errno));
573fcf5ef2aSThomas Huth } else {
574fcf5ef2aSThomas Huth switch (id & KVM_REG_SIZE_MASK) {
575fcf5ef2aSThomas Huth case KVM_REG_SIZE_U32:
576fcf5ef2aSThomas Huth env->spr[spr] = val.u32;
577fcf5ef2aSThomas Huth break;
578fcf5ef2aSThomas Huth
579fcf5ef2aSThomas Huth case KVM_REG_SIZE_U64:
580fcf5ef2aSThomas Huth env->spr[spr] = val.u64;
581fcf5ef2aSThomas Huth break;
582fcf5ef2aSThomas Huth
583fcf5ef2aSThomas Huth default:
584fcf5ef2aSThomas Huth /* Don't handle this size yet */
585fcf5ef2aSThomas Huth abort();
586fcf5ef2aSThomas Huth }
587fcf5ef2aSThomas Huth }
588fcf5ef2aSThomas Huth }
589fcf5ef2aSThomas Huth
kvm_put_one_spr(CPUState * cs,uint64_t id,int spr)590fcf5ef2aSThomas Huth static void kvm_put_one_spr(CPUState *cs, uint64_t id, int spr)
591fcf5ef2aSThomas Huth {
592794511bcSPhilippe Mathieu-Daudé CPUPPCState *env = cpu_env(cs);
593fcf5ef2aSThomas Huth union {
594fcf5ef2aSThomas Huth uint32_t u32;
595fcf5ef2aSThomas Huth uint64_t u64;
596fcf5ef2aSThomas Huth } val;
597fcf5ef2aSThomas Huth struct kvm_one_reg reg = {
598fcf5ef2aSThomas Huth .id = id,
599fcf5ef2aSThomas Huth .addr = (uintptr_t) &val,
600fcf5ef2aSThomas Huth };
601fcf5ef2aSThomas Huth int ret;
602fcf5ef2aSThomas Huth
603fcf5ef2aSThomas Huth switch (id & KVM_REG_SIZE_MASK) {
604fcf5ef2aSThomas Huth case KVM_REG_SIZE_U32:
605fcf5ef2aSThomas Huth val.u32 = env->spr[spr];
606fcf5ef2aSThomas Huth break;
607fcf5ef2aSThomas Huth
608fcf5ef2aSThomas Huth case KVM_REG_SIZE_U64:
609fcf5ef2aSThomas Huth val.u64 = env->spr[spr];
610fcf5ef2aSThomas Huth break;
611fcf5ef2aSThomas Huth
612fcf5ef2aSThomas Huth default:
613fcf5ef2aSThomas Huth /* Don't handle this size yet */
614fcf5ef2aSThomas Huth abort();
615fcf5ef2aSThomas Huth }
616fcf5ef2aSThomas Huth
617fcf5ef2aSThomas Huth ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
618fcf5ef2aSThomas Huth if (ret != 0) {
619fcf5ef2aSThomas Huth trace_kvm_failed_spr_set(spr, strerror(errno));
620fcf5ef2aSThomas Huth }
621fcf5ef2aSThomas Huth }
622fcf5ef2aSThomas Huth
kvm_put_fp(CPUState * cs)623fcf5ef2aSThomas Huth static int kvm_put_fp(CPUState *cs)
624fcf5ef2aSThomas Huth {
625794511bcSPhilippe Mathieu-Daudé CPUPPCState *env = cpu_env(cs);
626fcf5ef2aSThomas Huth struct kvm_one_reg reg;
627fcf5ef2aSThomas Huth int i;
628fcf5ef2aSThomas Huth int ret;
629fcf5ef2aSThomas Huth
630fcf5ef2aSThomas Huth if (env->insns_flags & PPC_FLOAT) {
631fcf5ef2aSThomas Huth uint64_t fpscr = env->fpscr;
632fcf5ef2aSThomas Huth bool vsx = !!(env->insns_flags2 & PPC2_VSX);
633fcf5ef2aSThomas Huth
634fcf5ef2aSThomas Huth reg.id = KVM_REG_PPC_FPSCR;
635fcf5ef2aSThomas Huth reg.addr = (uintptr_t)&fpscr;
636fcf5ef2aSThomas Huth ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
637fcf5ef2aSThomas Huth if (ret < 0) {
6388d83cbf1SGreg Kurz trace_kvm_failed_fpscr_set(strerror(errno));
639fcf5ef2aSThomas Huth return ret;
640fcf5ef2aSThomas Huth }
641fcf5ef2aSThomas Huth
642fcf5ef2aSThomas Huth for (i = 0; i < 32; i++) {
643fcf5ef2aSThomas Huth uint64_t vsr[2];
644ee1004bbSPhilippe Mathieu-Daudé uint64_t *fpr = cpu_fpr_ptr(env, i);
645ee1004bbSPhilippe Mathieu-Daudé uint64_t *vsrl = cpu_vsrl_ptr(env, i);
646fcf5ef2aSThomas Huth
647e03b5686SMarc-André Lureau #if HOST_BIG_ENDIAN
648ef96e3aeSMark Cave-Ayland vsr[0] = float64_val(*fpr);
649ef96e3aeSMark Cave-Ayland vsr[1] = *vsrl;
650fcf5ef2aSThomas Huth #else
651ef96e3aeSMark Cave-Ayland vsr[0] = *vsrl;
652ef96e3aeSMark Cave-Ayland vsr[1] = float64_val(*fpr);
653fcf5ef2aSThomas Huth #endif
654fcf5ef2aSThomas Huth reg.addr = (uintptr_t) &vsr;
655fcf5ef2aSThomas Huth reg.id = vsx ? KVM_REG_PPC_VSR(i) : KVM_REG_PPC_FPR(i);
656fcf5ef2aSThomas Huth
657fcf5ef2aSThomas Huth ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
658fcf5ef2aSThomas Huth if (ret < 0) {
6598d83cbf1SGreg Kurz trace_kvm_failed_fp_set(vsx ? "VSR" : "FPR", i,
6608d83cbf1SGreg Kurz strerror(errno));
661fcf5ef2aSThomas Huth return ret;
662fcf5ef2aSThomas Huth }
663fcf5ef2aSThomas Huth }
664fcf5ef2aSThomas Huth }
665fcf5ef2aSThomas Huth
666fcf5ef2aSThomas Huth if (env->insns_flags & PPC_ALTIVEC) {
667fcf5ef2aSThomas Huth reg.id = KVM_REG_PPC_VSCR;
668fcf5ef2aSThomas Huth reg.addr = (uintptr_t)&env->vscr;
669fcf5ef2aSThomas Huth ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
670fcf5ef2aSThomas Huth if (ret < 0) {
6718d83cbf1SGreg Kurz trace_kvm_failed_vscr_set(strerror(errno));
672fcf5ef2aSThomas Huth return ret;
673fcf5ef2aSThomas Huth }
674fcf5ef2aSThomas Huth
675fcf5ef2aSThomas Huth for (i = 0; i < 32; i++) {
676fcf5ef2aSThomas Huth reg.id = KVM_REG_PPC_VR(i);
677ef96e3aeSMark Cave-Ayland reg.addr = (uintptr_t)cpu_avr_ptr(env, i);
678fcf5ef2aSThomas Huth ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
679fcf5ef2aSThomas Huth if (ret < 0) {
6808d83cbf1SGreg Kurz trace_kvm_failed_vr_set(i, strerror(errno));
681fcf5ef2aSThomas Huth return ret;
682fcf5ef2aSThomas Huth }
683fcf5ef2aSThomas Huth }
684fcf5ef2aSThomas Huth }
685fcf5ef2aSThomas Huth
686fcf5ef2aSThomas Huth return 0;
687fcf5ef2aSThomas Huth }
688fcf5ef2aSThomas Huth
kvm_get_fp(CPUState * cs)689fcf5ef2aSThomas Huth static int kvm_get_fp(CPUState *cs)
690fcf5ef2aSThomas Huth {
691794511bcSPhilippe Mathieu-Daudé CPUPPCState *env = cpu_env(cs);
692fcf5ef2aSThomas Huth struct kvm_one_reg reg;
693fcf5ef2aSThomas Huth int i;
694fcf5ef2aSThomas Huth int ret;
695fcf5ef2aSThomas Huth
696fcf5ef2aSThomas Huth if (env->insns_flags & PPC_FLOAT) {
697fcf5ef2aSThomas Huth uint64_t fpscr;
698fcf5ef2aSThomas Huth bool vsx = !!(env->insns_flags2 & PPC2_VSX);
699fcf5ef2aSThomas Huth
700fcf5ef2aSThomas Huth reg.id = KVM_REG_PPC_FPSCR;
701fcf5ef2aSThomas Huth reg.addr = (uintptr_t)&fpscr;
702fcf5ef2aSThomas Huth ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
703fcf5ef2aSThomas Huth if (ret < 0) {
7048d83cbf1SGreg Kurz trace_kvm_failed_fpscr_get(strerror(errno));
705fcf5ef2aSThomas Huth return ret;
706fcf5ef2aSThomas Huth } else {
707fcf5ef2aSThomas Huth env->fpscr = fpscr;
708fcf5ef2aSThomas Huth }
709fcf5ef2aSThomas Huth
710fcf5ef2aSThomas Huth for (i = 0; i < 32; i++) {
711fcf5ef2aSThomas Huth uint64_t vsr[2];
712ee1004bbSPhilippe Mathieu-Daudé uint64_t *fpr = cpu_fpr_ptr(env, i);
713ee1004bbSPhilippe Mathieu-Daudé uint64_t *vsrl = cpu_vsrl_ptr(env, i);
714fcf5ef2aSThomas Huth
715fcf5ef2aSThomas Huth reg.addr = (uintptr_t) &vsr;
716fcf5ef2aSThomas Huth reg.id = vsx ? KVM_REG_PPC_VSR(i) : KVM_REG_PPC_FPR(i);
717fcf5ef2aSThomas Huth
718fcf5ef2aSThomas Huth ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
719fcf5ef2aSThomas Huth if (ret < 0) {
7208d83cbf1SGreg Kurz trace_kvm_failed_fp_get(vsx ? "VSR" : "FPR", i,
7218d83cbf1SGreg Kurz strerror(errno));
722fcf5ef2aSThomas Huth return ret;
723fcf5ef2aSThomas Huth } else {
724e03b5686SMarc-André Lureau #if HOST_BIG_ENDIAN
725ef96e3aeSMark Cave-Ayland *fpr = vsr[0];
726fcf5ef2aSThomas Huth if (vsx) {
727ef96e3aeSMark Cave-Ayland *vsrl = vsr[1];
728fcf5ef2aSThomas Huth }
729fcf5ef2aSThomas Huth #else
730ef96e3aeSMark Cave-Ayland *fpr = vsr[1];
731fcf5ef2aSThomas Huth if (vsx) {
732ef96e3aeSMark Cave-Ayland *vsrl = vsr[0];
733fcf5ef2aSThomas Huth }
734fcf5ef2aSThomas Huth #endif
735fcf5ef2aSThomas Huth }
736fcf5ef2aSThomas Huth }
737fcf5ef2aSThomas Huth }
738fcf5ef2aSThomas Huth
739fcf5ef2aSThomas Huth if (env->insns_flags & PPC_ALTIVEC) {
740fcf5ef2aSThomas Huth reg.id = KVM_REG_PPC_VSCR;
741fcf5ef2aSThomas Huth reg.addr = (uintptr_t)&env->vscr;
742fcf5ef2aSThomas Huth ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
743fcf5ef2aSThomas Huth if (ret < 0) {
7448d83cbf1SGreg Kurz trace_kvm_failed_vscr_get(strerror(errno));
745fcf5ef2aSThomas Huth return ret;
746fcf5ef2aSThomas Huth }
747fcf5ef2aSThomas Huth
748fcf5ef2aSThomas Huth for (i = 0; i < 32; i++) {
749fcf5ef2aSThomas Huth reg.id = KVM_REG_PPC_VR(i);
750ef96e3aeSMark Cave-Ayland reg.addr = (uintptr_t)cpu_avr_ptr(env, i);
751fcf5ef2aSThomas Huth ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
752fcf5ef2aSThomas Huth if (ret < 0) {
7538d83cbf1SGreg Kurz trace_kvm_failed_vr_get(i, strerror(errno));
754fcf5ef2aSThomas Huth return ret;
755fcf5ef2aSThomas Huth }
756fcf5ef2aSThomas Huth }
757fcf5ef2aSThomas Huth }
758fcf5ef2aSThomas Huth
759fcf5ef2aSThomas Huth return 0;
760fcf5ef2aSThomas Huth }
761fcf5ef2aSThomas Huth
762fcf5ef2aSThomas Huth #if defined(TARGET_PPC64)
kvm_get_vpa(CPUState * cs)763fcf5ef2aSThomas Huth static int kvm_get_vpa(CPUState *cs)
764fcf5ef2aSThomas Huth {
765fcf5ef2aSThomas Huth PowerPCCPU *cpu = POWERPC_CPU(cs);
766ce2918cbSDavid Gibson SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
767fcf5ef2aSThomas Huth struct kvm_one_reg reg;
768fcf5ef2aSThomas Huth int ret;
769fcf5ef2aSThomas Huth
770fcf5ef2aSThomas Huth reg.id = KVM_REG_PPC_VPA_ADDR;
7717388efafSDavid Gibson reg.addr = (uintptr_t)&spapr_cpu->vpa_addr;
772fcf5ef2aSThomas Huth ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
773fcf5ef2aSThomas Huth if (ret < 0) {
7748d83cbf1SGreg Kurz trace_kvm_failed_vpa_addr_get(strerror(errno));
775fcf5ef2aSThomas Huth return ret;
776fcf5ef2aSThomas Huth }
777fcf5ef2aSThomas Huth
7787388efafSDavid Gibson assert((uintptr_t)&spapr_cpu->slb_shadow_size
7797388efafSDavid Gibson == ((uintptr_t)&spapr_cpu->slb_shadow_addr + 8));
780fcf5ef2aSThomas Huth reg.id = KVM_REG_PPC_VPA_SLB;
7817388efafSDavid Gibson reg.addr = (uintptr_t)&spapr_cpu->slb_shadow_addr;
782fcf5ef2aSThomas Huth ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
783fcf5ef2aSThomas Huth if (ret < 0) {
7848d83cbf1SGreg Kurz trace_kvm_failed_slb_get(strerror(errno));
785fcf5ef2aSThomas Huth return ret;
786fcf5ef2aSThomas Huth }
787fcf5ef2aSThomas Huth
7887388efafSDavid Gibson assert((uintptr_t)&spapr_cpu->dtl_size
7897388efafSDavid Gibson == ((uintptr_t)&spapr_cpu->dtl_addr + 8));
790fcf5ef2aSThomas Huth reg.id = KVM_REG_PPC_VPA_DTL;
7917388efafSDavid Gibson reg.addr = (uintptr_t)&spapr_cpu->dtl_addr;
792fcf5ef2aSThomas Huth ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
793fcf5ef2aSThomas Huth if (ret < 0) {
7948d83cbf1SGreg Kurz trace_kvm_failed_dtl_get(strerror(errno));
795fcf5ef2aSThomas Huth return ret;
796fcf5ef2aSThomas Huth }
797fcf5ef2aSThomas Huth
798fcf5ef2aSThomas Huth return 0;
799fcf5ef2aSThomas Huth }
800fcf5ef2aSThomas Huth
kvm_put_vpa(CPUState * cs)801fcf5ef2aSThomas Huth static int kvm_put_vpa(CPUState *cs)
802fcf5ef2aSThomas Huth {
803fcf5ef2aSThomas Huth PowerPCCPU *cpu = POWERPC_CPU(cs);
804ce2918cbSDavid Gibson SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
805fcf5ef2aSThomas Huth struct kvm_one_reg reg;
806fcf5ef2aSThomas Huth int ret;
807fcf5ef2aSThomas Huth
808c995e942SDavid Gibson /*
809c995e942SDavid Gibson * SLB shadow or DTL can't be registered unless a master VPA is
810fcf5ef2aSThomas Huth * registered. That means when restoring state, if a VPA *is*
811fcf5ef2aSThomas Huth * registered, we need to set that up first. If not, we need to
812c995e942SDavid Gibson * deregister the others before deregistering the master VPA
813c995e942SDavid Gibson */
8147388efafSDavid Gibson assert(spapr_cpu->vpa_addr
8157388efafSDavid Gibson || !(spapr_cpu->slb_shadow_addr || spapr_cpu->dtl_addr));
816fcf5ef2aSThomas Huth
8177388efafSDavid Gibson if (spapr_cpu->vpa_addr) {
818fcf5ef2aSThomas Huth reg.id = KVM_REG_PPC_VPA_ADDR;
8197388efafSDavid Gibson reg.addr = (uintptr_t)&spapr_cpu->vpa_addr;
820fcf5ef2aSThomas Huth ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
821fcf5ef2aSThomas Huth if (ret < 0) {
8228d83cbf1SGreg Kurz trace_kvm_failed_vpa_addr_set(strerror(errno));
823fcf5ef2aSThomas Huth return ret;
824fcf5ef2aSThomas Huth }
825fcf5ef2aSThomas Huth }
826fcf5ef2aSThomas Huth
8277388efafSDavid Gibson assert((uintptr_t)&spapr_cpu->slb_shadow_size
8287388efafSDavid Gibson == ((uintptr_t)&spapr_cpu->slb_shadow_addr + 8));
829fcf5ef2aSThomas Huth reg.id = KVM_REG_PPC_VPA_SLB;
8307388efafSDavid Gibson reg.addr = (uintptr_t)&spapr_cpu->slb_shadow_addr;
831fcf5ef2aSThomas Huth ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
832fcf5ef2aSThomas Huth if (ret < 0) {
8338d83cbf1SGreg Kurz trace_kvm_failed_slb_set(strerror(errno));
834fcf5ef2aSThomas Huth return ret;
835fcf5ef2aSThomas Huth }
836fcf5ef2aSThomas Huth
8377388efafSDavid Gibson assert((uintptr_t)&spapr_cpu->dtl_size
8387388efafSDavid Gibson == ((uintptr_t)&spapr_cpu->dtl_addr + 8));
839fcf5ef2aSThomas Huth reg.id = KVM_REG_PPC_VPA_DTL;
8407388efafSDavid Gibson reg.addr = (uintptr_t)&spapr_cpu->dtl_addr;
841fcf5ef2aSThomas Huth ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
842fcf5ef2aSThomas Huth if (ret < 0) {
8438d83cbf1SGreg Kurz trace_kvm_failed_dtl_set(strerror(errno));
844fcf5ef2aSThomas Huth return ret;
845fcf5ef2aSThomas Huth }
846fcf5ef2aSThomas Huth
8477388efafSDavid Gibson if (!spapr_cpu->vpa_addr) {
848fcf5ef2aSThomas Huth reg.id = KVM_REG_PPC_VPA_ADDR;
8497388efafSDavid Gibson reg.addr = (uintptr_t)&spapr_cpu->vpa_addr;
850fcf5ef2aSThomas Huth ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
851fcf5ef2aSThomas Huth if (ret < 0) {
8528d83cbf1SGreg Kurz trace_kvm_failed_null_vpa_addr_set(strerror(errno));
853fcf5ef2aSThomas Huth return ret;
854fcf5ef2aSThomas Huth }
855fcf5ef2aSThomas Huth }
856fcf5ef2aSThomas Huth
857fcf5ef2aSThomas Huth return 0;
858fcf5ef2aSThomas Huth }
859fcf5ef2aSThomas Huth #endif /* TARGET_PPC64 */
860fcf5ef2aSThomas Huth
kvmppc_put_books_sregs(PowerPCCPU * cpu)861fcf5ef2aSThomas Huth int kvmppc_put_books_sregs(PowerPCCPU *cpu)
862fcf5ef2aSThomas Huth {
863fcf5ef2aSThomas Huth CPUPPCState *env = &cpu->env;
864b339427cSDaniel Henrique Barboza struct kvm_sregs sregs = { };
865fcf5ef2aSThomas Huth int i;
866fcf5ef2aSThomas Huth
867fcf5ef2aSThomas Huth sregs.pvr = env->spr[SPR_PVR];
868fcf5ef2aSThomas Huth
8691ec26c75SGreg Kurz if (cpu->vhyp) {
870c700b5e1SNicholas Piggin sregs.u.s.sdr1 = cpu->vhyp_class->encode_hpt_for_kvm_pr(cpu->vhyp);
8711ec26c75SGreg Kurz } else {
872fcf5ef2aSThomas Huth sregs.u.s.sdr1 = env->spr[SPR_SDR1];
8731ec26c75SGreg Kurz }
874fcf5ef2aSThomas Huth
875fcf5ef2aSThomas Huth /* Sync SLB */
876fcf5ef2aSThomas Huth #ifdef TARGET_PPC64
877fcf5ef2aSThomas Huth for (i = 0; i < ARRAY_SIZE(env->slb); i++) {
878fcf5ef2aSThomas Huth sregs.u.s.ppc64.slb[i].slbe = env->slb[i].esid;
879fcf5ef2aSThomas Huth if (env->slb[i].esid & SLB_ESID_V) {
880fcf5ef2aSThomas Huth sregs.u.s.ppc64.slb[i].slbe |= i;
881fcf5ef2aSThomas Huth }
882fcf5ef2aSThomas Huth sregs.u.s.ppc64.slb[i].slbv = env->slb[i].vsid;
883fcf5ef2aSThomas Huth }
884fcf5ef2aSThomas Huth #endif
885fcf5ef2aSThomas Huth
886fcf5ef2aSThomas Huth /* Sync SRs */
887fcf5ef2aSThomas Huth for (i = 0; i < 16; i++) {
888fcf5ef2aSThomas Huth sregs.u.s.ppc32.sr[i] = env->sr[i];
889fcf5ef2aSThomas Huth }
890fcf5ef2aSThomas Huth
891fcf5ef2aSThomas Huth /* Sync BATs */
892fcf5ef2aSThomas Huth for (i = 0; i < 8; i++) {
893fcf5ef2aSThomas Huth /* Beware. We have to swap upper and lower bits here */
894fcf5ef2aSThomas Huth sregs.u.s.ppc32.dbat[i] = ((uint64_t)env->DBAT[0][i] << 32)
895fcf5ef2aSThomas Huth | env->DBAT[1][i];
896fcf5ef2aSThomas Huth sregs.u.s.ppc32.ibat[i] = ((uint64_t)env->IBAT[0][i] << 32)
897fcf5ef2aSThomas Huth | env->IBAT[1][i];
898fcf5ef2aSThomas Huth }
899fcf5ef2aSThomas Huth
900fcf5ef2aSThomas Huth return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_SREGS, &sregs);
901fcf5ef2aSThomas Huth }
902fcf5ef2aSThomas Huth
kvm_arch_put_registers(CPUState * cs,int level,Error ** errp)903*a1676bb3SJulia Suvorova int kvm_arch_put_registers(CPUState *cs, int level, Error **errp)
904fcf5ef2aSThomas Huth {
905fcf5ef2aSThomas Huth PowerPCCPU *cpu = POWERPC_CPU(cs);
906fcf5ef2aSThomas Huth CPUPPCState *env = &cpu->env;
907fcf5ef2aSThomas Huth struct kvm_regs regs;
908fcf5ef2aSThomas Huth int ret;
909fcf5ef2aSThomas Huth int i;
910fcf5ef2aSThomas Huth
911fcf5ef2aSThomas Huth ret = kvm_vcpu_ioctl(cs, KVM_GET_REGS, ®s);
912fcf5ef2aSThomas Huth if (ret < 0) {
913fcf5ef2aSThomas Huth return ret;
914fcf5ef2aSThomas Huth }
915fcf5ef2aSThomas Huth
916fcf5ef2aSThomas Huth regs.ctr = env->ctr;
917fcf5ef2aSThomas Huth regs.lr = env->lr;
918fcf5ef2aSThomas Huth regs.xer = cpu_read_xer(env);
919fcf5ef2aSThomas Huth regs.msr = env->msr;
920fcf5ef2aSThomas Huth regs.pc = env->nip;
921fcf5ef2aSThomas Huth
922fcf5ef2aSThomas Huth regs.srr0 = env->spr[SPR_SRR0];
923fcf5ef2aSThomas Huth regs.srr1 = env->spr[SPR_SRR1];
924fcf5ef2aSThomas Huth
925fcf5ef2aSThomas Huth regs.sprg0 = env->spr[SPR_SPRG0];
926fcf5ef2aSThomas Huth regs.sprg1 = env->spr[SPR_SPRG1];
927fcf5ef2aSThomas Huth regs.sprg2 = env->spr[SPR_SPRG2];
928fcf5ef2aSThomas Huth regs.sprg3 = env->spr[SPR_SPRG3];
929fcf5ef2aSThomas Huth regs.sprg4 = env->spr[SPR_SPRG4];
930fcf5ef2aSThomas Huth regs.sprg5 = env->spr[SPR_SPRG5];
931fcf5ef2aSThomas Huth regs.sprg6 = env->spr[SPR_SPRG6];
932fcf5ef2aSThomas Huth regs.sprg7 = env->spr[SPR_SPRG7];
933fcf5ef2aSThomas Huth
934fcf5ef2aSThomas Huth regs.pid = env->spr[SPR_BOOKE_PID];
935fcf5ef2aSThomas Huth
936c995e942SDavid Gibson for (i = 0; i < 32; i++) {
937fcf5ef2aSThomas Huth regs.gpr[i] = env->gpr[i];
938c995e942SDavid Gibson }
939fcf5ef2aSThomas Huth
9402060436aSHarsh Prateek Bora regs.cr = ppc_get_cr(env);
941fcf5ef2aSThomas Huth
942fcf5ef2aSThomas Huth ret = kvm_vcpu_ioctl(cs, KVM_SET_REGS, ®s);
943c995e942SDavid Gibson if (ret < 0) {
944fcf5ef2aSThomas Huth return ret;
945c995e942SDavid Gibson }
946fcf5ef2aSThomas Huth
947fcf5ef2aSThomas Huth kvm_put_fp(cs);
948fcf5ef2aSThomas Huth
949fcf5ef2aSThomas Huth if (env->tlb_dirty) {
950fcf5ef2aSThomas Huth kvm_sw_tlb_put(cpu);
951fcf5ef2aSThomas Huth env->tlb_dirty = false;
952fcf5ef2aSThomas Huth }
953fcf5ef2aSThomas Huth
954fcf5ef2aSThomas Huth if (cap_segstate && (level >= KVM_PUT_RESET_STATE)) {
955fcf5ef2aSThomas Huth ret = kvmppc_put_books_sregs(cpu);
956fcf5ef2aSThomas Huth if (ret < 0) {
957fcf5ef2aSThomas Huth return ret;
958fcf5ef2aSThomas Huth }
959fcf5ef2aSThomas Huth }
960fcf5ef2aSThomas Huth
961fcf5ef2aSThomas Huth if (cap_hior && (level >= KVM_PUT_RESET_STATE)) {
962fcf5ef2aSThomas Huth kvm_put_one_spr(cs, KVM_REG_PPC_HIOR, SPR_HIOR);
963fcf5ef2aSThomas Huth }
964fcf5ef2aSThomas Huth
965fcf5ef2aSThomas Huth if (cap_one_reg) {
966c995e942SDavid Gibson /*
967c995e942SDavid Gibson * We deliberately ignore errors here, for kernels which have
968fcf5ef2aSThomas Huth * the ONE_REG calls, but don't support the specific
969fcf5ef2aSThomas Huth * registers, there's a reasonable chance things will still
970c995e942SDavid Gibson * work, at least until we try to migrate.
971c995e942SDavid Gibson */
972fcf5ef2aSThomas Huth for (i = 0; i < 1024; i++) {
973fcf5ef2aSThomas Huth uint64_t id = env->spr_cb[i].one_reg_id;
974fcf5ef2aSThomas Huth
975fcf5ef2aSThomas Huth if (id != 0) {
976fcf5ef2aSThomas Huth kvm_put_one_spr(cs, id, i);
977fcf5ef2aSThomas Huth }
978fcf5ef2aSThomas Huth }
979fcf5ef2aSThomas Huth
980fcf5ef2aSThomas Huth #ifdef TARGET_PPC64
981ca241959SVíctor Colombo if (FIELD_EX64(env->msr, MSR, TS)) {
982fcf5ef2aSThomas Huth for (i = 0; i < ARRAY_SIZE(env->tm_gpr); i++) {
983fcf5ef2aSThomas Huth kvm_set_one_reg(cs, KVM_REG_PPC_TM_GPR(i), &env->tm_gpr[i]);
984fcf5ef2aSThomas Huth }
985fcf5ef2aSThomas Huth for (i = 0; i < ARRAY_SIZE(env->tm_vsr); i++) {
986fcf5ef2aSThomas Huth kvm_set_one_reg(cs, KVM_REG_PPC_TM_VSR(i), &env->tm_vsr[i]);
987fcf5ef2aSThomas Huth }
988fcf5ef2aSThomas Huth kvm_set_one_reg(cs, KVM_REG_PPC_TM_CR, &env->tm_cr);
989fcf5ef2aSThomas Huth kvm_set_one_reg(cs, KVM_REG_PPC_TM_LR, &env->tm_lr);
990fcf5ef2aSThomas Huth kvm_set_one_reg(cs, KVM_REG_PPC_TM_CTR, &env->tm_ctr);
991fcf5ef2aSThomas Huth kvm_set_one_reg(cs, KVM_REG_PPC_TM_FPSCR, &env->tm_fpscr);
992fcf5ef2aSThomas Huth kvm_set_one_reg(cs, KVM_REG_PPC_TM_AMR, &env->tm_amr);
993fcf5ef2aSThomas Huth kvm_set_one_reg(cs, KVM_REG_PPC_TM_PPR, &env->tm_ppr);
994fcf5ef2aSThomas Huth kvm_set_one_reg(cs, KVM_REG_PPC_TM_VRSAVE, &env->tm_vrsave);
995fcf5ef2aSThomas Huth kvm_set_one_reg(cs, KVM_REG_PPC_TM_VSCR, &env->tm_vscr);
996fcf5ef2aSThomas Huth kvm_set_one_reg(cs, KVM_REG_PPC_TM_DSCR, &env->tm_dscr);
997fcf5ef2aSThomas Huth kvm_set_one_reg(cs, KVM_REG_PPC_TM_TAR, &env->tm_tar);
998fcf5ef2aSThomas Huth }
999fcf5ef2aSThomas Huth
1000fcf5ef2aSThomas Huth if (cap_papr) {
1001fcf5ef2aSThomas Huth if (kvm_put_vpa(cs) < 0) {
10028d83cbf1SGreg Kurz trace_kvm_failed_put_vpa();
1003fcf5ef2aSThomas Huth }
1004fcf5ef2aSThomas Huth }
1005fcf5ef2aSThomas Huth
1006fcf5ef2aSThomas Huth kvm_set_one_reg(cs, KVM_REG_PPC_TB_OFFSET, &env->tb_env->tb_offset);
1007972bd576SAlexey Kardashevskiy
1008972bd576SAlexey Kardashevskiy if (level > KVM_PUT_RUNTIME_STATE) {
1009972bd576SAlexey Kardashevskiy kvm_put_one_spr(cs, KVM_REG_PPC_DPDES, SPR_DPDES);
1010972bd576SAlexey Kardashevskiy }
1011fcf5ef2aSThomas Huth #endif /* TARGET_PPC64 */
1012fcf5ef2aSThomas Huth }
1013fcf5ef2aSThomas Huth
1014fcf5ef2aSThomas Huth return ret;
1015fcf5ef2aSThomas Huth }
1016fcf5ef2aSThomas Huth
kvm_sync_excp(CPUPPCState * env,int vector,int ivor)1017fcf5ef2aSThomas Huth static void kvm_sync_excp(CPUPPCState *env, int vector, int ivor)
1018fcf5ef2aSThomas Huth {
1019fcf5ef2aSThomas Huth env->excp_vectors[vector] = env->spr[ivor] + env->spr[SPR_BOOKE_IVPR];
1020fcf5ef2aSThomas Huth }
1021fcf5ef2aSThomas Huth
kvmppc_get_booke_sregs(PowerPCCPU * cpu)1022fcf5ef2aSThomas Huth static int kvmppc_get_booke_sregs(PowerPCCPU *cpu)
1023fcf5ef2aSThomas Huth {
1024fcf5ef2aSThomas Huth CPUPPCState *env = &cpu->env;
1025fcf5ef2aSThomas Huth struct kvm_sregs sregs;
1026fcf5ef2aSThomas Huth int ret;
1027fcf5ef2aSThomas Huth
1028fcf5ef2aSThomas Huth ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_SREGS, &sregs);
1029fcf5ef2aSThomas Huth if (ret < 0) {
1030fcf5ef2aSThomas Huth return ret;
1031fcf5ef2aSThomas Huth }
1032fcf5ef2aSThomas Huth
1033fcf5ef2aSThomas Huth if (sregs.u.e.features & KVM_SREGS_E_BASE) {
1034fcf5ef2aSThomas Huth env->spr[SPR_BOOKE_CSRR0] = sregs.u.e.csrr0;
1035fcf5ef2aSThomas Huth env->spr[SPR_BOOKE_CSRR1] = sregs.u.e.csrr1;
1036fcf5ef2aSThomas Huth env->spr[SPR_BOOKE_ESR] = sregs.u.e.esr;
1037fcf5ef2aSThomas Huth env->spr[SPR_BOOKE_DEAR] = sregs.u.e.dear;
1038fcf5ef2aSThomas Huth env->spr[SPR_BOOKE_MCSR] = sregs.u.e.mcsr;
1039fcf5ef2aSThomas Huth env->spr[SPR_BOOKE_TSR] = sregs.u.e.tsr;
1040fcf5ef2aSThomas Huth env->spr[SPR_BOOKE_TCR] = sregs.u.e.tcr;
1041fcf5ef2aSThomas Huth env->spr[SPR_DECR] = sregs.u.e.dec;
1042fcf5ef2aSThomas Huth env->spr[SPR_TBL] = sregs.u.e.tb & 0xffffffff;
1043fcf5ef2aSThomas Huth env->spr[SPR_TBU] = sregs.u.e.tb >> 32;
1044fcf5ef2aSThomas Huth env->spr[SPR_VRSAVE] = sregs.u.e.vrsave;
1045fcf5ef2aSThomas Huth }
1046fcf5ef2aSThomas Huth
1047fcf5ef2aSThomas Huth if (sregs.u.e.features & KVM_SREGS_E_ARCH206) {
1048fcf5ef2aSThomas Huth env->spr[SPR_BOOKE_PIR] = sregs.u.e.pir;
1049fcf5ef2aSThomas Huth env->spr[SPR_BOOKE_MCSRR0] = sregs.u.e.mcsrr0;
1050fcf5ef2aSThomas Huth env->spr[SPR_BOOKE_MCSRR1] = sregs.u.e.mcsrr1;
1051fcf5ef2aSThomas Huth env->spr[SPR_BOOKE_DECAR] = sregs.u.e.decar;
1052fcf5ef2aSThomas Huth env->spr[SPR_BOOKE_IVPR] = sregs.u.e.ivpr;
1053fcf5ef2aSThomas Huth }
1054fcf5ef2aSThomas Huth
1055fcf5ef2aSThomas Huth if (sregs.u.e.features & KVM_SREGS_E_64) {
1056fcf5ef2aSThomas Huth env->spr[SPR_BOOKE_EPCR] = sregs.u.e.epcr;
1057fcf5ef2aSThomas Huth }
1058fcf5ef2aSThomas Huth
1059fcf5ef2aSThomas Huth if (sregs.u.e.features & KVM_SREGS_E_SPRG8) {
1060fcf5ef2aSThomas Huth env->spr[SPR_BOOKE_SPRG8] = sregs.u.e.sprg8;
1061fcf5ef2aSThomas Huth }
1062fcf5ef2aSThomas Huth
1063fcf5ef2aSThomas Huth if (sregs.u.e.features & KVM_SREGS_E_IVOR) {
1064fcf5ef2aSThomas Huth env->spr[SPR_BOOKE_IVOR0] = sregs.u.e.ivor_low[0];
1065fcf5ef2aSThomas Huth kvm_sync_excp(env, POWERPC_EXCP_CRITICAL, SPR_BOOKE_IVOR0);
1066fcf5ef2aSThomas Huth env->spr[SPR_BOOKE_IVOR1] = sregs.u.e.ivor_low[1];
1067fcf5ef2aSThomas Huth kvm_sync_excp(env, POWERPC_EXCP_MCHECK, SPR_BOOKE_IVOR1);
1068fcf5ef2aSThomas Huth env->spr[SPR_BOOKE_IVOR2] = sregs.u.e.ivor_low[2];
1069fcf5ef2aSThomas Huth kvm_sync_excp(env, POWERPC_EXCP_DSI, SPR_BOOKE_IVOR2);
1070fcf5ef2aSThomas Huth env->spr[SPR_BOOKE_IVOR3] = sregs.u.e.ivor_low[3];
1071fcf5ef2aSThomas Huth kvm_sync_excp(env, POWERPC_EXCP_ISI, SPR_BOOKE_IVOR3);
1072fcf5ef2aSThomas Huth env->spr[SPR_BOOKE_IVOR4] = sregs.u.e.ivor_low[4];
1073fcf5ef2aSThomas Huth kvm_sync_excp(env, POWERPC_EXCP_EXTERNAL, SPR_BOOKE_IVOR4);
1074fcf5ef2aSThomas Huth env->spr[SPR_BOOKE_IVOR5] = sregs.u.e.ivor_low[5];
1075fcf5ef2aSThomas Huth kvm_sync_excp(env, POWERPC_EXCP_ALIGN, SPR_BOOKE_IVOR5);
1076fcf5ef2aSThomas Huth env->spr[SPR_BOOKE_IVOR6] = sregs.u.e.ivor_low[6];
1077fcf5ef2aSThomas Huth kvm_sync_excp(env, POWERPC_EXCP_PROGRAM, SPR_BOOKE_IVOR6);
1078fcf5ef2aSThomas Huth env->spr[SPR_BOOKE_IVOR7] = sregs.u.e.ivor_low[7];
1079fcf5ef2aSThomas Huth kvm_sync_excp(env, POWERPC_EXCP_FPU, SPR_BOOKE_IVOR7);
1080fcf5ef2aSThomas Huth env->spr[SPR_BOOKE_IVOR8] = sregs.u.e.ivor_low[8];
1081fcf5ef2aSThomas Huth kvm_sync_excp(env, POWERPC_EXCP_SYSCALL, SPR_BOOKE_IVOR8);
1082fcf5ef2aSThomas Huth env->spr[SPR_BOOKE_IVOR9] = sregs.u.e.ivor_low[9];
1083fcf5ef2aSThomas Huth kvm_sync_excp(env, POWERPC_EXCP_APU, SPR_BOOKE_IVOR9);
1084fcf5ef2aSThomas Huth env->spr[SPR_BOOKE_IVOR10] = sregs.u.e.ivor_low[10];
1085fcf5ef2aSThomas Huth kvm_sync_excp(env, POWERPC_EXCP_DECR, SPR_BOOKE_IVOR10);
1086fcf5ef2aSThomas Huth env->spr[SPR_BOOKE_IVOR11] = sregs.u.e.ivor_low[11];
1087fcf5ef2aSThomas Huth kvm_sync_excp(env, POWERPC_EXCP_FIT, SPR_BOOKE_IVOR11);
1088fcf5ef2aSThomas Huth env->spr[SPR_BOOKE_IVOR12] = sregs.u.e.ivor_low[12];
1089fcf5ef2aSThomas Huth kvm_sync_excp(env, POWERPC_EXCP_WDT, SPR_BOOKE_IVOR12);
1090fcf5ef2aSThomas Huth env->spr[SPR_BOOKE_IVOR13] = sregs.u.e.ivor_low[13];
1091fcf5ef2aSThomas Huth kvm_sync_excp(env, POWERPC_EXCP_DTLB, SPR_BOOKE_IVOR13);
1092fcf5ef2aSThomas Huth env->spr[SPR_BOOKE_IVOR14] = sregs.u.e.ivor_low[14];
1093fcf5ef2aSThomas Huth kvm_sync_excp(env, POWERPC_EXCP_ITLB, SPR_BOOKE_IVOR14);
1094fcf5ef2aSThomas Huth env->spr[SPR_BOOKE_IVOR15] = sregs.u.e.ivor_low[15];
1095fcf5ef2aSThomas Huth kvm_sync_excp(env, POWERPC_EXCP_DEBUG, SPR_BOOKE_IVOR15);
1096fcf5ef2aSThomas Huth
1097fcf5ef2aSThomas Huth if (sregs.u.e.features & KVM_SREGS_E_SPE) {
1098fcf5ef2aSThomas Huth env->spr[SPR_BOOKE_IVOR32] = sregs.u.e.ivor_high[0];
1099fcf5ef2aSThomas Huth kvm_sync_excp(env, POWERPC_EXCP_SPEU, SPR_BOOKE_IVOR32);
1100fcf5ef2aSThomas Huth env->spr[SPR_BOOKE_IVOR33] = sregs.u.e.ivor_high[1];
1101fcf5ef2aSThomas Huth kvm_sync_excp(env, POWERPC_EXCP_EFPDI, SPR_BOOKE_IVOR33);
1102fcf5ef2aSThomas Huth env->spr[SPR_BOOKE_IVOR34] = sregs.u.e.ivor_high[2];
1103fcf5ef2aSThomas Huth kvm_sync_excp(env, POWERPC_EXCP_EFPRI, SPR_BOOKE_IVOR34);
1104fcf5ef2aSThomas Huth }
1105fcf5ef2aSThomas Huth
1106fcf5ef2aSThomas Huth if (sregs.u.e.features & KVM_SREGS_E_PM) {
1107fcf5ef2aSThomas Huth env->spr[SPR_BOOKE_IVOR35] = sregs.u.e.ivor_high[3];
1108fcf5ef2aSThomas Huth kvm_sync_excp(env, POWERPC_EXCP_EPERFM, SPR_BOOKE_IVOR35);
1109fcf5ef2aSThomas Huth }
1110fcf5ef2aSThomas Huth
1111fcf5ef2aSThomas Huth if (sregs.u.e.features & KVM_SREGS_E_PC) {
1112fcf5ef2aSThomas Huth env->spr[SPR_BOOKE_IVOR36] = sregs.u.e.ivor_high[4];
1113fcf5ef2aSThomas Huth kvm_sync_excp(env, POWERPC_EXCP_DOORI, SPR_BOOKE_IVOR36);
1114fcf5ef2aSThomas Huth env->spr[SPR_BOOKE_IVOR37] = sregs.u.e.ivor_high[5];
1115fcf5ef2aSThomas Huth kvm_sync_excp(env, POWERPC_EXCP_DOORCI, SPR_BOOKE_IVOR37);
1116fcf5ef2aSThomas Huth }
1117fcf5ef2aSThomas Huth }
1118fcf5ef2aSThomas Huth
1119fcf5ef2aSThomas Huth if (sregs.u.e.features & KVM_SREGS_E_ARCH206_MMU) {
1120fcf5ef2aSThomas Huth env->spr[SPR_BOOKE_MAS0] = sregs.u.e.mas0;
1121fcf5ef2aSThomas Huth env->spr[SPR_BOOKE_MAS1] = sregs.u.e.mas1;
1122fcf5ef2aSThomas Huth env->spr[SPR_BOOKE_MAS2] = sregs.u.e.mas2;
1123fcf5ef2aSThomas Huth env->spr[SPR_BOOKE_MAS3] = sregs.u.e.mas7_3 & 0xffffffff;
1124fcf5ef2aSThomas Huth env->spr[SPR_BOOKE_MAS4] = sregs.u.e.mas4;
1125fcf5ef2aSThomas Huth env->spr[SPR_BOOKE_MAS6] = sregs.u.e.mas6;
1126fcf5ef2aSThomas Huth env->spr[SPR_BOOKE_MAS7] = sregs.u.e.mas7_3 >> 32;
1127fcf5ef2aSThomas Huth env->spr[SPR_MMUCFG] = sregs.u.e.mmucfg;
1128fcf5ef2aSThomas Huth env->spr[SPR_BOOKE_TLB0CFG] = sregs.u.e.tlbcfg[0];
1129fcf5ef2aSThomas Huth env->spr[SPR_BOOKE_TLB1CFG] = sregs.u.e.tlbcfg[1];
1130fcf5ef2aSThomas Huth }
1131fcf5ef2aSThomas Huth
1132fcf5ef2aSThomas Huth if (sregs.u.e.features & KVM_SREGS_EXP) {
1133fcf5ef2aSThomas Huth env->spr[SPR_BOOKE_EPR] = sregs.u.e.epr;
1134fcf5ef2aSThomas Huth }
1135fcf5ef2aSThomas Huth
1136fcf5ef2aSThomas Huth if (sregs.u.e.features & KVM_SREGS_E_PD) {
1137fcf5ef2aSThomas Huth env->spr[SPR_BOOKE_EPLC] = sregs.u.e.eplc;
1138fcf5ef2aSThomas Huth env->spr[SPR_BOOKE_EPSC] = sregs.u.e.epsc;
1139fcf5ef2aSThomas Huth }
1140fcf5ef2aSThomas Huth
1141fcf5ef2aSThomas Huth if (sregs.u.e.impl_id == KVM_SREGS_E_IMPL_FSL) {
1142fcf5ef2aSThomas Huth env->spr[SPR_E500_SVR] = sregs.u.e.impl.fsl.svr;
1143fcf5ef2aSThomas Huth env->spr[SPR_Exxx_MCAR] = sregs.u.e.impl.fsl.mcar;
1144fcf5ef2aSThomas Huth env->spr[SPR_HID0] = sregs.u.e.impl.fsl.hid0;
1145fcf5ef2aSThomas Huth
1146fcf5ef2aSThomas Huth if (sregs.u.e.impl.fsl.features & KVM_SREGS_E_FSL_PIDn) {
1147fcf5ef2aSThomas Huth env->spr[SPR_BOOKE_PID1] = sregs.u.e.impl.fsl.pid1;
1148fcf5ef2aSThomas Huth env->spr[SPR_BOOKE_PID2] = sregs.u.e.impl.fsl.pid2;
1149fcf5ef2aSThomas Huth }
1150fcf5ef2aSThomas Huth }
1151fcf5ef2aSThomas Huth
1152fcf5ef2aSThomas Huth return 0;
1153fcf5ef2aSThomas Huth }
1154fcf5ef2aSThomas Huth
kvmppc_get_books_sregs(PowerPCCPU * cpu)1155fcf5ef2aSThomas Huth static int kvmppc_get_books_sregs(PowerPCCPU *cpu)
1156fcf5ef2aSThomas Huth {
1157fcf5ef2aSThomas Huth CPUPPCState *env = &cpu->env;
1158fcf5ef2aSThomas Huth struct kvm_sregs sregs;
1159fcf5ef2aSThomas Huth int ret;
1160fcf5ef2aSThomas Huth int i;
1161fcf5ef2aSThomas Huth
1162fcf5ef2aSThomas Huth ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_SREGS, &sregs);
1163fcf5ef2aSThomas Huth if (ret < 0) {
1164fcf5ef2aSThomas Huth return ret;
1165fcf5ef2aSThomas Huth }
1166fcf5ef2aSThomas Huth
1167e57ca75cSDavid Gibson if (!cpu->vhyp) {
1168fcf5ef2aSThomas Huth ppc_store_sdr1(env, sregs.u.s.sdr1);
1169fcf5ef2aSThomas Huth }
1170fcf5ef2aSThomas Huth
1171fcf5ef2aSThomas Huth /* Sync SLB */
1172fcf5ef2aSThomas Huth #ifdef TARGET_PPC64
1173fcf5ef2aSThomas Huth /*
1174fcf5ef2aSThomas Huth * The packed SLB array we get from KVM_GET_SREGS only contains
1175fcf5ef2aSThomas Huth * information about valid entries. So we flush our internal copy
1176fcf5ef2aSThomas Huth * to get rid of stale ones, then put all valid SLB entries back
1177fcf5ef2aSThomas Huth * in.
1178fcf5ef2aSThomas Huth */
1179fcf5ef2aSThomas Huth memset(env->slb, 0, sizeof(env->slb));
1180fcf5ef2aSThomas Huth for (i = 0; i < ARRAY_SIZE(env->slb); i++) {
1181fcf5ef2aSThomas Huth target_ulong rb = sregs.u.s.ppc64.slb[i].slbe;
1182fcf5ef2aSThomas Huth target_ulong rs = sregs.u.s.ppc64.slb[i].slbv;
1183fcf5ef2aSThomas Huth /*
1184fcf5ef2aSThomas Huth * Only restore valid entries
1185fcf5ef2aSThomas Huth */
1186fcf5ef2aSThomas Huth if (rb & SLB_ESID_V) {
1187fcf5ef2aSThomas Huth ppc_store_slb(cpu, rb & 0xfff, rb & ~0xfffULL, rs);
1188fcf5ef2aSThomas Huth }
1189fcf5ef2aSThomas Huth }
1190fcf5ef2aSThomas Huth #endif
1191fcf5ef2aSThomas Huth
1192fcf5ef2aSThomas Huth /* Sync SRs */
1193fcf5ef2aSThomas Huth for (i = 0; i < 16; i++) {
1194fcf5ef2aSThomas Huth env->sr[i] = sregs.u.s.ppc32.sr[i];
1195fcf5ef2aSThomas Huth }
1196fcf5ef2aSThomas Huth
1197fcf5ef2aSThomas Huth /* Sync BATs */
1198fcf5ef2aSThomas Huth for (i = 0; i < 8; i++) {
1199fcf5ef2aSThomas Huth env->DBAT[0][i] = sregs.u.s.ppc32.dbat[i] & 0xffffffff;
1200fcf5ef2aSThomas Huth env->DBAT[1][i] = sregs.u.s.ppc32.dbat[i] >> 32;
1201fcf5ef2aSThomas Huth env->IBAT[0][i] = sregs.u.s.ppc32.ibat[i] & 0xffffffff;
1202fcf5ef2aSThomas Huth env->IBAT[1][i] = sregs.u.s.ppc32.ibat[i] >> 32;
1203fcf5ef2aSThomas Huth }
1204fcf5ef2aSThomas Huth
1205fcf5ef2aSThomas Huth return 0;
1206fcf5ef2aSThomas Huth }
1207fcf5ef2aSThomas Huth
kvm_arch_get_registers(CPUState * cs,Error ** errp)1208*a1676bb3SJulia Suvorova int kvm_arch_get_registers(CPUState *cs, Error **errp)
1209fcf5ef2aSThomas Huth {
1210fcf5ef2aSThomas Huth PowerPCCPU *cpu = POWERPC_CPU(cs);
1211fcf5ef2aSThomas Huth CPUPPCState *env = &cpu->env;
1212fcf5ef2aSThomas Huth struct kvm_regs regs;
1213fcf5ef2aSThomas Huth int i, ret;
1214fcf5ef2aSThomas Huth
1215fcf5ef2aSThomas Huth ret = kvm_vcpu_ioctl(cs, KVM_GET_REGS, ®s);
1216c995e942SDavid Gibson if (ret < 0) {
1217fcf5ef2aSThomas Huth return ret;
1218c995e942SDavid Gibson }
1219fcf5ef2aSThomas Huth
12202060436aSHarsh Prateek Bora ppc_set_cr(env, regs.cr);
1221fcf5ef2aSThomas Huth env->ctr = regs.ctr;
1222fcf5ef2aSThomas Huth env->lr = regs.lr;
1223fcf5ef2aSThomas Huth cpu_write_xer(env, regs.xer);
1224fcf5ef2aSThomas Huth env->msr = regs.msr;
1225fcf5ef2aSThomas Huth env->nip = regs.pc;
1226fcf5ef2aSThomas Huth
1227fcf5ef2aSThomas Huth env->spr[SPR_SRR0] = regs.srr0;
1228fcf5ef2aSThomas Huth env->spr[SPR_SRR1] = regs.srr1;
1229fcf5ef2aSThomas Huth
1230fcf5ef2aSThomas Huth env->spr[SPR_SPRG0] = regs.sprg0;
1231fcf5ef2aSThomas Huth env->spr[SPR_SPRG1] = regs.sprg1;
1232fcf5ef2aSThomas Huth env->spr[SPR_SPRG2] = regs.sprg2;
1233fcf5ef2aSThomas Huth env->spr[SPR_SPRG3] = regs.sprg3;
1234fcf5ef2aSThomas Huth env->spr[SPR_SPRG4] = regs.sprg4;
1235fcf5ef2aSThomas Huth env->spr[SPR_SPRG5] = regs.sprg5;
1236fcf5ef2aSThomas Huth env->spr[SPR_SPRG6] = regs.sprg6;
1237fcf5ef2aSThomas Huth env->spr[SPR_SPRG7] = regs.sprg7;
1238fcf5ef2aSThomas Huth
1239fcf5ef2aSThomas Huth env->spr[SPR_BOOKE_PID] = regs.pid;
1240fcf5ef2aSThomas Huth
1241c995e942SDavid Gibson for (i = 0; i < 32; i++) {
1242fcf5ef2aSThomas Huth env->gpr[i] = regs.gpr[i];
1243c995e942SDavid Gibson }
1244fcf5ef2aSThomas Huth
1245fcf5ef2aSThomas Huth kvm_get_fp(cs);
1246fcf5ef2aSThomas Huth
1247fcf5ef2aSThomas Huth if (cap_booke_sregs) {
1248fcf5ef2aSThomas Huth ret = kvmppc_get_booke_sregs(cpu);
1249fcf5ef2aSThomas Huth if (ret < 0) {
1250fcf5ef2aSThomas Huth return ret;
1251fcf5ef2aSThomas Huth }
1252fcf5ef2aSThomas Huth }
1253fcf5ef2aSThomas Huth
1254fcf5ef2aSThomas Huth if (cap_segstate) {
1255fcf5ef2aSThomas Huth ret = kvmppc_get_books_sregs(cpu);
1256fcf5ef2aSThomas Huth if (ret < 0) {
1257fcf5ef2aSThomas Huth return ret;
1258fcf5ef2aSThomas Huth }
1259fcf5ef2aSThomas Huth }
1260fcf5ef2aSThomas Huth
1261fcf5ef2aSThomas Huth if (cap_hior) {
1262fcf5ef2aSThomas Huth kvm_get_one_spr(cs, KVM_REG_PPC_HIOR, SPR_HIOR);
1263fcf5ef2aSThomas Huth }
1264fcf5ef2aSThomas Huth
1265fcf5ef2aSThomas Huth if (cap_one_reg) {
1266c995e942SDavid Gibson /*
1267c995e942SDavid Gibson * We deliberately ignore errors here, for kernels which have
1268fcf5ef2aSThomas Huth * the ONE_REG calls, but don't support the specific
1269fcf5ef2aSThomas Huth * registers, there's a reasonable chance things will still
1270c995e942SDavid Gibson * work, at least until we try to migrate.
1271c995e942SDavid Gibson */
1272fcf5ef2aSThomas Huth for (i = 0; i < 1024; i++) {
1273fcf5ef2aSThomas Huth uint64_t id = env->spr_cb[i].one_reg_id;
1274fcf5ef2aSThomas Huth
1275fcf5ef2aSThomas Huth if (id != 0) {
1276fcf5ef2aSThomas Huth kvm_get_one_spr(cs, id, i);
1277fcf5ef2aSThomas Huth }
1278fcf5ef2aSThomas Huth }
1279fcf5ef2aSThomas Huth
1280fcf5ef2aSThomas Huth #ifdef TARGET_PPC64
1281ca241959SVíctor Colombo if (FIELD_EX64(env->msr, MSR, TS)) {
1282fcf5ef2aSThomas Huth for (i = 0; i < ARRAY_SIZE(env->tm_gpr); i++) {
1283fcf5ef2aSThomas Huth kvm_get_one_reg(cs, KVM_REG_PPC_TM_GPR(i), &env->tm_gpr[i]);
1284fcf5ef2aSThomas Huth }
1285fcf5ef2aSThomas Huth for (i = 0; i < ARRAY_SIZE(env->tm_vsr); i++) {
1286fcf5ef2aSThomas Huth kvm_get_one_reg(cs, KVM_REG_PPC_TM_VSR(i), &env->tm_vsr[i]);
1287fcf5ef2aSThomas Huth }
1288fcf5ef2aSThomas Huth kvm_get_one_reg(cs, KVM_REG_PPC_TM_CR, &env->tm_cr);
1289fcf5ef2aSThomas Huth kvm_get_one_reg(cs, KVM_REG_PPC_TM_LR, &env->tm_lr);
1290fcf5ef2aSThomas Huth kvm_get_one_reg(cs, KVM_REG_PPC_TM_CTR, &env->tm_ctr);
1291fcf5ef2aSThomas Huth kvm_get_one_reg(cs, KVM_REG_PPC_TM_FPSCR, &env->tm_fpscr);
1292fcf5ef2aSThomas Huth kvm_get_one_reg(cs, KVM_REG_PPC_TM_AMR, &env->tm_amr);
1293fcf5ef2aSThomas Huth kvm_get_one_reg(cs, KVM_REG_PPC_TM_PPR, &env->tm_ppr);
1294fcf5ef2aSThomas Huth kvm_get_one_reg(cs, KVM_REG_PPC_TM_VRSAVE, &env->tm_vrsave);
1295fcf5ef2aSThomas Huth kvm_get_one_reg(cs, KVM_REG_PPC_TM_VSCR, &env->tm_vscr);
1296fcf5ef2aSThomas Huth kvm_get_one_reg(cs, KVM_REG_PPC_TM_DSCR, &env->tm_dscr);
1297fcf5ef2aSThomas Huth kvm_get_one_reg(cs, KVM_REG_PPC_TM_TAR, &env->tm_tar);
1298fcf5ef2aSThomas Huth }
1299fcf5ef2aSThomas Huth
1300fcf5ef2aSThomas Huth if (cap_papr) {
1301fcf5ef2aSThomas Huth if (kvm_get_vpa(cs) < 0) {
13028d83cbf1SGreg Kurz trace_kvm_failed_get_vpa();
1303fcf5ef2aSThomas Huth }
1304fcf5ef2aSThomas Huth }
1305fcf5ef2aSThomas Huth
1306fcf5ef2aSThomas Huth kvm_get_one_reg(cs, KVM_REG_PPC_TB_OFFSET, &env->tb_env->tb_offset);
1307972bd576SAlexey Kardashevskiy kvm_get_one_spr(cs, KVM_REG_PPC_DPDES, SPR_DPDES);
1308fcf5ef2aSThomas Huth #endif
1309fcf5ef2aSThomas Huth }
1310fcf5ef2aSThomas Huth
1311fcf5ef2aSThomas Huth return 0;
1312fcf5ef2aSThomas Huth }
1313fcf5ef2aSThomas Huth
kvmppc_set_interrupt(PowerPCCPU * cpu,int irq,int level)1314fcf5ef2aSThomas Huth int kvmppc_set_interrupt(PowerPCCPU *cpu, int irq, int level)
1315fcf5ef2aSThomas Huth {
1316fcf5ef2aSThomas Huth unsigned virq = level ? KVM_INTERRUPT_SET_LEVEL : KVM_INTERRUPT_UNSET;
1317fcf5ef2aSThomas Huth
1318fcf5ef2aSThomas Huth if (irq != PPC_INTERRUPT_EXT) {
1319fcf5ef2aSThomas Huth return 0;
1320fcf5ef2aSThomas Huth }
1321fcf5ef2aSThomas Huth
132276d93e14Sjianchunfu if (!cap_interrupt_unset) {
1323fcf5ef2aSThomas Huth return 0;
1324fcf5ef2aSThomas Huth }
1325fcf5ef2aSThomas Huth
1326fcf5ef2aSThomas Huth kvm_vcpu_ioctl(CPU(cpu), KVM_INTERRUPT, &virq);
1327fcf5ef2aSThomas Huth
1328fcf5ef2aSThomas Huth return 0;
1329fcf5ef2aSThomas Huth }
1330fcf5ef2aSThomas Huth
kvm_arch_pre_run(CPUState * cs,struct kvm_run * run)1331fcf5ef2aSThomas Huth void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run)
1332fcf5ef2aSThomas Huth {
13331e8f51e8SShivaprasad G Bhat return;
1334fcf5ef2aSThomas Huth }
1335fcf5ef2aSThomas Huth
kvm_arch_post_run(CPUState * cs,struct kvm_run * run)1336fcf5ef2aSThomas Huth MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run)
1337fcf5ef2aSThomas Huth {
1338fcf5ef2aSThomas Huth return MEMTXATTRS_UNSPECIFIED;
1339fcf5ef2aSThomas Huth }
1340fcf5ef2aSThomas Huth
kvm_arch_process_async_events(CPUState * cs)1341fcf5ef2aSThomas Huth int kvm_arch_process_async_events(CPUState *cs)
1342fcf5ef2aSThomas Huth {
1343fcf5ef2aSThomas Huth return cs->halted;
1344fcf5ef2aSThomas Huth }
1345fcf5ef2aSThomas Huth
kvmppc_handle_halt(PowerPCCPU * cpu)1346fcf5ef2aSThomas Huth static int kvmppc_handle_halt(PowerPCCPU *cpu)
1347fcf5ef2aSThomas Huth {
1348fcf5ef2aSThomas Huth CPUState *cs = CPU(cpu);
1349fcf5ef2aSThomas Huth CPUPPCState *env = &cpu->env;
1350fcf5ef2aSThomas Huth
13510939b8f8SVíctor Colombo if (!(cs->interrupt_request & CPU_INTERRUPT_HARD) &&
13520939b8f8SVíctor Colombo FIELD_EX64(env->msr, MSR, EE)) {
1353fcf5ef2aSThomas Huth cs->halted = 1;
1354fcf5ef2aSThomas Huth cs->exception_index = EXCP_HLT;
1355fcf5ef2aSThomas Huth }
1356fcf5ef2aSThomas Huth
1357fcf5ef2aSThomas Huth return 0;
1358fcf5ef2aSThomas Huth }
1359fcf5ef2aSThomas Huth
1360fcf5ef2aSThomas Huth /* map dcr access to existing qemu dcr emulation */
kvmppc_handle_dcr_read(CPUPPCState * env,uint32_t dcrn,uint32_t * data)1361c995e942SDavid Gibson static int kvmppc_handle_dcr_read(CPUPPCState *env,
1362c995e942SDavid Gibson uint32_t dcrn, uint32_t *data)
1363fcf5ef2aSThomas Huth {
1364c995e942SDavid Gibson if (ppc_dcr_read(env->dcr_env, dcrn, data) < 0) {
1365fcf5ef2aSThomas Huth fprintf(stderr, "Read to unhandled DCR (0x%x)\n", dcrn);
1366c995e942SDavid Gibson }
1367fcf5ef2aSThomas Huth
1368fcf5ef2aSThomas Huth return 0;
1369fcf5ef2aSThomas Huth }
1370fcf5ef2aSThomas Huth
kvmppc_handle_dcr_write(CPUPPCState * env,uint32_t dcrn,uint32_t data)1371c995e942SDavid Gibson static int kvmppc_handle_dcr_write(CPUPPCState *env,
1372c995e942SDavid Gibson uint32_t dcrn, uint32_t data)
1373fcf5ef2aSThomas Huth {
1374c995e942SDavid Gibson if (ppc_dcr_write(env->dcr_env, dcrn, data) < 0) {
1375fcf5ef2aSThomas Huth fprintf(stderr, "Write to unhandled DCR (0x%x)\n", dcrn);
1376c995e942SDavid Gibson }
1377fcf5ef2aSThomas Huth
1378fcf5ef2aSThomas Huth return 0;
1379fcf5ef2aSThomas Huth }
1380fcf5ef2aSThomas Huth
kvm_arch_insert_sw_breakpoint(CPUState * cs,struct kvm_sw_breakpoint * bp)1381fcf5ef2aSThomas Huth int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
1382fcf5ef2aSThomas Huth {
1383fcf5ef2aSThomas Huth /* Mixed endian case is not handled */
1384fcf5ef2aSThomas Huth uint32_t sc = debug_inst_opcode;
1385fcf5ef2aSThomas Huth
1386fcf5ef2aSThomas Huth if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn,
1387fcf5ef2aSThomas Huth sizeof(sc), 0) ||
1388fcf5ef2aSThomas Huth cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&sc, sizeof(sc), 1)) {
1389fcf5ef2aSThomas Huth return -EINVAL;
1390fcf5ef2aSThomas Huth }
1391fcf5ef2aSThomas Huth
1392fcf5ef2aSThomas Huth return 0;
1393fcf5ef2aSThomas Huth }
1394fcf5ef2aSThomas Huth
kvm_arch_remove_sw_breakpoint(CPUState * cs,struct kvm_sw_breakpoint * bp)1395fcf5ef2aSThomas Huth int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
1396fcf5ef2aSThomas Huth {
1397fcf5ef2aSThomas Huth uint32_t sc;
1398fcf5ef2aSThomas Huth
1399fcf5ef2aSThomas Huth if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&sc, sizeof(sc), 0) ||
1400fcf5ef2aSThomas Huth sc != debug_inst_opcode ||
1401fcf5ef2aSThomas Huth cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn,
1402fcf5ef2aSThomas Huth sizeof(sc), 1)) {
1403fcf5ef2aSThomas Huth return -EINVAL;
1404fcf5ef2aSThomas Huth }
1405fcf5ef2aSThomas Huth
1406fcf5ef2aSThomas Huth return 0;
1407fcf5ef2aSThomas Huth }
1408fcf5ef2aSThomas Huth
find_hw_breakpoint(target_ulong addr,int type)1409fcf5ef2aSThomas Huth static int find_hw_breakpoint(target_ulong addr, int type)
1410fcf5ef2aSThomas Huth {
1411fcf5ef2aSThomas Huth int n;
1412fcf5ef2aSThomas Huth
1413fcf5ef2aSThomas Huth assert((nb_hw_breakpoint + nb_hw_watchpoint)
1414fcf5ef2aSThomas Huth <= ARRAY_SIZE(hw_debug_points));
1415fcf5ef2aSThomas Huth
1416fcf5ef2aSThomas Huth for (n = 0; n < nb_hw_breakpoint + nb_hw_watchpoint; n++) {
1417fcf5ef2aSThomas Huth if (hw_debug_points[n].addr == addr &&
1418fcf5ef2aSThomas Huth hw_debug_points[n].type == type) {
1419fcf5ef2aSThomas Huth return n;
1420fcf5ef2aSThomas Huth }
1421fcf5ef2aSThomas Huth }
1422fcf5ef2aSThomas Huth
1423fcf5ef2aSThomas Huth return -1;
1424fcf5ef2aSThomas Huth }
1425fcf5ef2aSThomas Huth
find_hw_watchpoint(target_ulong addr,int * flag)1426fcf5ef2aSThomas Huth static int find_hw_watchpoint(target_ulong addr, int *flag)
1427fcf5ef2aSThomas Huth {
1428fcf5ef2aSThomas Huth int n;
1429fcf5ef2aSThomas Huth
1430fcf5ef2aSThomas Huth n = find_hw_breakpoint(addr, GDB_WATCHPOINT_ACCESS);
1431fcf5ef2aSThomas Huth if (n >= 0) {
1432fcf5ef2aSThomas Huth *flag = BP_MEM_ACCESS;
1433fcf5ef2aSThomas Huth return n;
1434fcf5ef2aSThomas Huth }
1435fcf5ef2aSThomas Huth
1436fcf5ef2aSThomas Huth n = find_hw_breakpoint(addr, GDB_WATCHPOINT_WRITE);
1437fcf5ef2aSThomas Huth if (n >= 0) {
1438fcf5ef2aSThomas Huth *flag = BP_MEM_WRITE;
1439fcf5ef2aSThomas Huth return n;
1440fcf5ef2aSThomas Huth }
1441fcf5ef2aSThomas Huth
1442fcf5ef2aSThomas Huth n = find_hw_breakpoint(addr, GDB_WATCHPOINT_READ);
1443fcf5ef2aSThomas Huth if (n >= 0) {
1444fcf5ef2aSThomas Huth *flag = BP_MEM_READ;
1445fcf5ef2aSThomas Huth return n;
1446fcf5ef2aSThomas Huth }
1447fcf5ef2aSThomas Huth
1448fcf5ef2aSThomas Huth return -1;
1449fcf5ef2aSThomas Huth }
1450fcf5ef2aSThomas Huth
kvm_arch_insert_hw_breakpoint(vaddr addr,vaddr len,int type)1451b8a6eb18SAnton Johansson int kvm_arch_insert_hw_breakpoint(vaddr addr, vaddr len, int type)
1452fcf5ef2aSThomas Huth {
1453b8a6eb18SAnton Johansson const unsigned breakpoint_index = nb_hw_breakpoint + nb_hw_watchpoint;
1454b8a6eb18SAnton Johansson if (breakpoint_index >= ARRAY_SIZE(hw_debug_points)) {
1455fcf5ef2aSThomas Huth return -ENOBUFS;
1456fcf5ef2aSThomas Huth }
1457fcf5ef2aSThomas Huth
1458b8a6eb18SAnton Johansson hw_debug_points[breakpoint_index].addr = addr;
1459b8a6eb18SAnton Johansson hw_debug_points[breakpoint_index].type = type;
1460fcf5ef2aSThomas Huth
1461fcf5ef2aSThomas Huth switch (type) {
1462fcf5ef2aSThomas Huth case GDB_BREAKPOINT_HW:
1463fcf5ef2aSThomas Huth if (nb_hw_breakpoint >= max_hw_breakpoint) {
1464fcf5ef2aSThomas Huth return -ENOBUFS;
1465fcf5ef2aSThomas Huth }
1466fcf5ef2aSThomas Huth
1467fcf5ef2aSThomas Huth if (find_hw_breakpoint(addr, type) >= 0) {
1468fcf5ef2aSThomas Huth return -EEXIST;
1469fcf5ef2aSThomas Huth }
1470fcf5ef2aSThomas Huth
1471fcf5ef2aSThomas Huth nb_hw_breakpoint++;
1472fcf5ef2aSThomas Huth break;
1473fcf5ef2aSThomas Huth
1474fcf5ef2aSThomas Huth case GDB_WATCHPOINT_WRITE:
1475fcf5ef2aSThomas Huth case GDB_WATCHPOINT_READ:
1476fcf5ef2aSThomas Huth case GDB_WATCHPOINT_ACCESS:
1477fcf5ef2aSThomas Huth if (nb_hw_watchpoint >= max_hw_watchpoint) {
1478fcf5ef2aSThomas Huth return -ENOBUFS;
1479fcf5ef2aSThomas Huth }
1480fcf5ef2aSThomas Huth
1481fcf5ef2aSThomas Huth if (find_hw_breakpoint(addr, type) >= 0) {
1482fcf5ef2aSThomas Huth return -EEXIST;
1483fcf5ef2aSThomas Huth }
1484fcf5ef2aSThomas Huth
1485fcf5ef2aSThomas Huth nb_hw_watchpoint++;
1486fcf5ef2aSThomas Huth break;
1487fcf5ef2aSThomas Huth
1488fcf5ef2aSThomas Huth default:
1489fcf5ef2aSThomas Huth return -ENOSYS;
1490fcf5ef2aSThomas Huth }
1491fcf5ef2aSThomas Huth
1492fcf5ef2aSThomas Huth return 0;
1493fcf5ef2aSThomas Huth }
1494fcf5ef2aSThomas Huth
kvm_arch_remove_hw_breakpoint(vaddr addr,vaddr len,int type)1495b8a6eb18SAnton Johansson int kvm_arch_remove_hw_breakpoint(vaddr addr, vaddr len, int type)
1496fcf5ef2aSThomas Huth {
1497fcf5ef2aSThomas Huth int n;
1498fcf5ef2aSThomas Huth
1499fcf5ef2aSThomas Huth n = find_hw_breakpoint(addr, type);
1500fcf5ef2aSThomas Huth if (n < 0) {
1501fcf5ef2aSThomas Huth return -ENOENT;
1502fcf5ef2aSThomas Huth }
1503fcf5ef2aSThomas Huth
1504fcf5ef2aSThomas Huth switch (type) {
1505fcf5ef2aSThomas Huth case GDB_BREAKPOINT_HW:
1506fcf5ef2aSThomas Huth nb_hw_breakpoint--;
1507fcf5ef2aSThomas Huth break;
1508fcf5ef2aSThomas Huth
1509fcf5ef2aSThomas Huth case GDB_WATCHPOINT_WRITE:
1510fcf5ef2aSThomas Huth case GDB_WATCHPOINT_READ:
1511fcf5ef2aSThomas Huth case GDB_WATCHPOINT_ACCESS:
1512fcf5ef2aSThomas Huth nb_hw_watchpoint--;
1513fcf5ef2aSThomas Huth break;
1514fcf5ef2aSThomas Huth
1515fcf5ef2aSThomas Huth default:
1516fcf5ef2aSThomas Huth return -ENOSYS;
1517fcf5ef2aSThomas Huth }
1518fcf5ef2aSThomas Huth hw_debug_points[n] = hw_debug_points[nb_hw_breakpoint + nb_hw_watchpoint];
1519fcf5ef2aSThomas Huth
1520fcf5ef2aSThomas Huth return 0;
1521fcf5ef2aSThomas Huth }
1522fcf5ef2aSThomas Huth
kvm_arch_remove_all_hw_breakpoints(void)1523fcf5ef2aSThomas Huth void kvm_arch_remove_all_hw_breakpoints(void)
1524fcf5ef2aSThomas Huth {
1525fcf5ef2aSThomas Huth nb_hw_breakpoint = nb_hw_watchpoint = 0;
1526fcf5ef2aSThomas Huth }
1527fcf5ef2aSThomas Huth
kvm_arch_update_guest_debug(CPUState * cs,struct kvm_guest_debug * dbg)1528fcf5ef2aSThomas Huth void kvm_arch_update_guest_debug(CPUState *cs, struct kvm_guest_debug *dbg)
1529fcf5ef2aSThomas Huth {
1530fcf5ef2aSThomas Huth int n;
1531fcf5ef2aSThomas Huth
1532fcf5ef2aSThomas Huth /* Software Breakpoint updates */
1533fcf5ef2aSThomas Huth if (kvm_sw_breakpoints_active(cs)) {
1534fcf5ef2aSThomas Huth dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP;
1535fcf5ef2aSThomas Huth }
1536fcf5ef2aSThomas Huth
1537fcf5ef2aSThomas Huth assert((nb_hw_breakpoint + nb_hw_watchpoint)
1538fcf5ef2aSThomas Huth <= ARRAY_SIZE(hw_debug_points));
1539fcf5ef2aSThomas Huth assert((nb_hw_breakpoint + nb_hw_watchpoint) <= ARRAY_SIZE(dbg->arch.bp));
1540fcf5ef2aSThomas Huth
1541fcf5ef2aSThomas Huth if (nb_hw_breakpoint + nb_hw_watchpoint > 0) {
1542fcf5ef2aSThomas Huth dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP;
1543fcf5ef2aSThomas Huth memset(dbg->arch.bp, 0, sizeof(dbg->arch.bp));
1544fcf5ef2aSThomas Huth for (n = 0; n < nb_hw_breakpoint + nb_hw_watchpoint; n++) {
1545fcf5ef2aSThomas Huth switch (hw_debug_points[n].type) {
1546fcf5ef2aSThomas Huth case GDB_BREAKPOINT_HW:
1547fcf5ef2aSThomas Huth dbg->arch.bp[n].type = KVMPPC_DEBUG_BREAKPOINT;
1548fcf5ef2aSThomas Huth break;
1549fcf5ef2aSThomas Huth case GDB_WATCHPOINT_WRITE:
1550fcf5ef2aSThomas Huth dbg->arch.bp[n].type = KVMPPC_DEBUG_WATCH_WRITE;
1551fcf5ef2aSThomas Huth break;
1552fcf5ef2aSThomas Huth case GDB_WATCHPOINT_READ:
1553fcf5ef2aSThomas Huth dbg->arch.bp[n].type = KVMPPC_DEBUG_WATCH_READ;
1554fcf5ef2aSThomas Huth break;
1555fcf5ef2aSThomas Huth case GDB_WATCHPOINT_ACCESS:
1556fcf5ef2aSThomas Huth dbg->arch.bp[n].type = KVMPPC_DEBUG_WATCH_WRITE |
1557fcf5ef2aSThomas Huth KVMPPC_DEBUG_WATCH_READ;
1558fcf5ef2aSThomas Huth break;
1559fcf5ef2aSThomas Huth default:
1560fcf5ef2aSThomas Huth cpu_abort(cs, "Unsupported breakpoint type\n");
1561fcf5ef2aSThomas Huth }
1562fcf5ef2aSThomas Huth dbg->arch.bp[n].addr = hw_debug_points[n].addr;
1563fcf5ef2aSThomas Huth }
1564fcf5ef2aSThomas Huth }
1565fcf5ef2aSThomas Huth }
1566fcf5ef2aSThomas Huth
kvm_handle_hw_breakpoint(CPUState * cs,struct kvm_debug_exit_arch * arch_info)15672cbd1581SFabiano Rosas static int kvm_handle_hw_breakpoint(CPUState *cs,
15682cbd1581SFabiano Rosas struct kvm_debug_exit_arch *arch_info)
1569fcf5ef2aSThomas Huth {
15706e0552a3SFabiano Rosas int handle = DEBUG_RETURN_GUEST;
1571fcf5ef2aSThomas Huth int n;
1572fcf5ef2aSThomas Huth int flag = 0;
1573fcf5ef2aSThomas Huth
1574fcf5ef2aSThomas Huth if (nb_hw_breakpoint + nb_hw_watchpoint > 0) {
1575fcf5ef2aSThomas Huth if (arch_info->status & KVMPPC_DEBUG_BREAKPOINT) {
1576fcf5ef2aSThomas Huth n = find_hw_breakpoint(arch_info->address, GDB_BREAKPOINT_HW);
1577fcf5ef2aSThomas Huth if (n >= 0) {
15786e0552a3SFabiano Rosas handle = DEBUG_RETURN_GDB;
1579fcf5ef2aSThomas Huth }
1580fcf5ef2aSThomas Huth } else if (arch_info->status & (KVMPPC_DEBUG_WATCH_READ |
1581fcf5ef2aSThomas Huth KVMPPC_DEBUG_WATCH_WRITE)) {
1582fcf5ef2aSThomas Huth n = find_hw_watchpoint(arch_info->address, &flag);
1583fcf5ef2aSThomas Huth if (n >= 0) {
15846e0552a3SFabiano Rosas handle = DEBUG_RETURN_GDB;
1585fcf5ef2aSThomas Huth cs->watchpoint_hit = &hw_watchpoint;
1586fcf5ef2aSThomas Huth hw_watchpoint.vaddr = hw_debug_points[n].addr;
1587fcf5ef2aSThomas Huth hw_watchpoint.flags = flag;
1588fcf5ef2aSThomas Huth }
1589fcf5ef2aSThomas Huth }
1590fcf5ef2aSThomas Huth }
15912cbd1581SFabiano Rosas return handle;
15922cbd1581SFabiano Rosas }
15932cbd1581SFabiano Rosas
kvm_handle_singlestep(void)1594468e3a1aSFabiano Rosas static int kvm_handle_singlestep(void)
1595468e3a1aSFabiano Rosas {
15966e0552a3SFabiano Rosas return DEBUG_RETURN_GDB;
1597468e3a1aSFabiano Rosas }
1598468e3a1aSFabiano Rosas
kvm_handle_sw_breakpoint(void)1599468e3a1aSFabiano Rosas static int kvm_handle_sw_breakpoint(void)
1600468e3a1aSFabiano Rosas {
16016e0552a3SFabiano Rosas return DEBUG_RETURN_GDB;
1602468e3a1aSFabiano Rosas }
1603468e3a1aSFabiano Rosas
kvm_handle_debug(PowerPCCPU * cpu,struct kvm_run * run)16042cbd1581SFabiano Rosas static int kvm_handle_debug(PowerPCCPU *cpu, struct kvm_run *run)
16052cbd1581SFabiano Rosas {
16062cbd1581SFabiano Rosas CPUState *cs = CPU(cpu);
16072cbd1581SFabiano Rosas CPUPPCState *env = &cpu->env;
16082cbd1581SFabiano Rosas struct kvm_debug_exit_arch *arch_info = &run->debug.arch;
16092cbd1581SFabiano Rosas
16102cbd1581SFabiano Rosas if (cs->singlestep_enabled) {
1611468e3a1aSFabiano Rosas return kvm_handle_singlestep();
1612468e3a1aSFabiano Rosas }
1613468e3a1aSFabiano Rosas
1614468e3a1aSFabiano Rosas if (arch_info->status) {
1615468e3a1aSFabiano Rosas return kvm_handle_hw_breakpoint(cs, arch_info);
1616468e3a1aSFabiano Rosas }
1617468e3a1aSFabiano Rosas
1618468e3a1aSFabiano Rosas if (kvm_find_sw_breakpoint(cs, arch_info->address)) {
1619468e3a1aSFabiano Rosas return kvm_handle_sw_breakpoint();
1620468e3a1aSFabiano Rosas }
1621468e3a1aSFabiano Rosas
1622468e3a1aSFabiano Rosas /*
1623468e3a1aSFabiano Rosas * QEMU is not able to handle debug exception, so inject
1624fcf5ef2aSThomas Huth * program exception to guest;
1625fcf5ef2aSThomas Huth * Yes program exception NOT debug exception !!
1626fcf5ef2aSThomas Huth * When QEMU is using debug resources then debug exception must
1627fcf5ef2aSThomas Huth * be always set. To achieve this we set MSR_DE and also set
1628fcf5ef2aSThomas Huth * MSRP_DEP so guest cannot change MSR_DE.
1629fcf5ef2aSThomas Huth * When emulating debug resource for guest we want guest
1630fcf5ef2aSThomas Huth * to control MSR_DE (enable/disable debug interrupt on need).
1631fcf5ef2aSThomas Huth * Supporting both configurations are NOT possible.
1632fcf5ef2aSThomas Huth * So the result is that we cannot share debug resources
1633fcf5ef2aSThomas Huth * between QEMU and Guest on BOOKE architecture.
1634fcf5ef2aSThomas Huth * In the current design QEMU gets the priority over guest,
1635fcf5ef2aSThomas Huth * this means that if QEMU is using debug resources then guest
1636fcf5ef2aSThomas Huth * cannot use them;
1637fcf5ef2aSThomas Huth * For software breakpoint QEMU uses a privileged instruction;
1638fcf5ef2aSThomas Huth * So there cannot be any reason that we are here for guest
1639fcf5ef2aSThomas Huth * set debug exception, only possibility is guest executed a
1640fcf5ef2aSThomas Huth * privileged / illegal instruction and that's why we are
1641fcf5ef2aSThomas Huth * injecting a program interrupt.
1642fcf5ef2aSThomas Huth */
1643fcf5ef2aSThomas Huth cpu_synchronize_state(cs);
1644468e3a1aSFabiano Rosas /*
1645468e3a1aSFabiano Rosas * env->nip is PC, so increment this by 4 to use
1646fcf5ef2aSThomas Huth * ppc_cpu_do_interrupt(), which set srr0 = env->nip - 4.
1647fcf5ef2aSThomas Huth */
1648fcf5ef2aSThomas Huth env->nip += 4;
1649fcf5ef2aSThomas Huth cs->exception_index = POWERPC_EXCP_PROGRAM;
1650fcf5ef2aSThomas Huth env->error_code = POWERPC_EXCP_INVAL;
1651fcf5ef2aSThomas Huth ppc_cpu_do_interrupt(cs);
1652fcf5ef2aSThomas Huth
16536e0552a3SFabiano Rosas return DEBUG_RETURN_GUEST;
1654fcf5ef2aSThomas Huth }
1655fcf5ef2aSThomas Huth
kvm_arch_handle_exit(CPUState * cs,struct kvm_run * run)1656fcf5ef2aSThomas Huth int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
1657fcf5ef2aSThomas Huth {
1658fcf5ef2aSThomas Huth PowerPCCPU *cpu = POWERPC_CPU(cs);
1659fcf5ef2aSThomas Huth CPUPPCState *env = &cpu->env;
1660fcf5ef2aSThomas Huth int ret;
1661fcf5ef2aSThomas Huth
1662195801d7SStefan Hajnoczi bql_lock();
1663fcf5ef2aSThomas Huth
1664fcf5ef2aSThomas Huth switch (run->exit_reason) {
1665fcf5ef2aSThomas Huth case KVM_EXIT_DCR:
1666fcf5ef2aSThomas Huth if (run->dcr.is_write) {
16678d83cbf1SGreg Kurz trace_kvm_handle_dcr_write();
1668fcf5ef2aSThomas Huth ret = kvmppc_handle_dcr_write(env, run->dcr.dcrn, run->dcr.data);
1669fcf5ef2aSThomas Huth } else {
1670228152c2SBoxuan Li trace_kvm_handle_dcr_read();
1671fcf5ef2aSThomas Huth ret = kvmppc_handle_dcr_read(env, run->dcr.dcrn, &run->dcr.data);
1672fcf5ef2aSThomas Huth }
1673fcf5ef2aSThomas Huth break;
1674fcf5ef2aSThomas Huth case KVM_EXIT_HLT:
16758d83cbf1SGreg Kurz trace_kvm_handle_halt();
1676fcf5ef2aSThomas Huth ret = kvmppc_handle_halt(cpu);
1677fcf5ef2aSThomas Huth break;
1678566abdb4SPaolo Bonzini #if defined(CONFIG_PSERIES)
1679fcf5ef2aSThomas Huth case KVM_EXIT_PAPR_HCALL:
1680f290a238SFabiano Rosas trace_kvm_handle_papr_hcall(run->papr_hcall.nr);
1681fcf5ef2aSThomas Huth run->papr_hcall.ret = spapr_hypercall(cpu,
1682fcf5ef2aSThomas Huth run->papr_hcall.nr,
1683fcf5ef2aSThomas Huth run->papr_hcall.args);
1684fcf5ef2aSThomas Huth ret = 0;
1685fcf5ef2aSThomas Huth break;
1686fcf5ef2aSThomas Huth #endif
1687fcf5ef2aSThomas Huth case KVM_EXIT_EPR:
16888d83cbf1SGreg Kurz trace_kvm_handle_epr();
1689fcf5ef2aSThomas Huth run->epr.epr = ldl_phys(cs->as, env->mpic_iack);
1690fcf5ef2aSThomas Huth ret = 0;
1691fcf5ef2aSThomas Huth break;
1692fcf5ef2aSThomas Huth case KVM_EXIT_WATCHDOG:
16938d83cbf1SGreg Kurz trace_kvm_handle_watchdog_expiry();
1694fcf5ef2aSThomas Huth watchdog_perform_action();
1695fcf5ef2aSThomas Huth ret = 0;
1696fcf5ef2aSThomas Huth break;
1697fcf5ef2aSThomas Huth
1698fcf5ef2aSThomas Huth case KVM_EXIT_DEBUG:
16998d83cbf1SGreg Kurz trace_kvm_handle_debug_exception();
1700fcf5ef2aSThomas Huth if (kvm_handle_debug(cpu, run)) {
1701fcf5ef2aSThomas Huth ret = EXCP_DEBUG;
1702fcf5ef2aSThomas Huth break;
1703fcf5ef2aSThomas Huth }
1704fcf5ef2aSThomas Huth /* re-enter, this exception was guest-internal */
1705fcf5ef2aSThomas Huth ret = 0;
1706fcf5ef2aSThomas Huth break;
1707fcf5ef2aSThomas Huth
1708566abdb4SPaolo Bonzini #if defined(CONFIG_PSERIES)
17099ac703acSAravinda Prasad case KVM_EXIT_NMI:
17109ac703acSAravinda Prasad trace_kvm_handle_nmi_exception();
17119ac703acSAravinda Prasad ret = kvm_handle_nmi(cpu, run);
17129ac703acSAravinda Prasad break;
17139ac703acSAravinda Prasad #endif
17149ac703acSAravinda Prasad
1715fcf5ef2aSThomas Huth default:
1716fcf5ef2aSThomas Huth fprintf(stderr, "KVM: unknown exit reason %d\n", run->exit_reason);
1717fcf5ef2aSThomas Huth ret = -1;
1718fcf5ef2aSThomas Huth break;
1719fcf5ef2aSThomas Huth }
1720fcf5ef2aSThomas Huth
1721195801d7SStefan Hajnoczi bql_unlock();
1722fcf5ef2aSThomas Huth return ret;
1723fcf5ef2aSThomas Huth }
1724fcf5ef2aSThomas Huth
kvmppc_or_tsr_bits(PowerPCCPU * cpu,uint32_t tsr_bits)1725fcf5ef2aSThomas Huth int kvmppc_or_tsr_bits(PowerPCCPU *cpu, uint32_t tsr_bits)
1726fcf5ef2aSThomas Huth {
1727fcf5ef2aSThomas Huth CPUState *cs = CPU(cpu);
1728fcf5ef2aSThomas Huth uint32_t bits = tsr_bits;
1729fcf5ef2aSThomas Huth struct kvm_one_reg reg = {
1730fcf5ef2aSThomas Huth .id = KVM_REG_PPC_OR_TSR,
1731fcf5ef2aSThomas Huth .addr = (uintptr_t) &bits,
1732fcf5ef2aSThomas Huth };
1733fcf5ef2aSThomas Huth
1734c4550e6eSCédric Le Goater if (!kvm_enabled()) {
1735c4550e6eSCédric Le Goater return 0;
1736c4550e6eSCédric Le Goater }
1737c4550e6eSCédric Le Goater
1738fcf5ef2aSThomas Huth return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
1739fcf5ef2aSThomas Huth }
1740fcf5ef2aSThomas Huth
kvmppc_clear_tsr_bits(PowerPCCPU * cpu,uint32_t tsr_bits)1741fcf5ef2aSThomas Huth int kvmppc_clear_tsr_bits(PowerPCCPU *cpu, uint32_t tsr_bits)
1742fcf5ef2aSThomas Huth {
1743fcf5ef2aSThomas Huth
1744fcf5ef2aSThomas Huth CPUState *cs = CPU(cpu);
1745fcf5ef2aSThomas Huth uint32_t bits = tsr_bits;
1746fcf5ef2aSThomas Huth struct kvm_one_reg reg = {
1747fcf5ef2aSThomas Huth .id = KVM_REG_PPC_CLEAR_TSR,
1748fcf5ef2aSThomas Huth .addr = (uintptr_t) &bits,
1749fcf5ef2aSThomas Huth };
1750fcf5ef2aSThomas Huth
1751c4550e6eSCédric Le Goater if (!kvm_enabled()) {
1752c4550e6eSCédric Le Goater return 0;
1753c4550e6eSCédric Le Goater }
1754c4550e6eSCédric Le Goater
1755fcf5ef2aSThomas Huth return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
1756fcf5ef2aSThomas Huth }
1757fcf5ef2aSThomas Huth
kvmppc_set_tcr(PowerPCCPU * cpu)1758fcf5ef2aSThomas Huth int kvmppc_set_tcr(PowerPCCPU *cpu)
1759fcf5ef2aSThomas Huth {
1760fcf5ef2aSThomas Huth CPUState *cs = CPU(cpu);
1761fcf5ef2aSThomas Huth CPUPPCState *env = &cpu->env;
1762fcf5ef2aSThomas Huth uint32_t tcr = env->spr[SPR_BOOKE_TCR];
1763fcf5ef2aSThomas Huth
1764fcf5ef2aSThomas Huth struct kvm_one_reg reg = {
1765fcf5ef2aSThomas Huth .id = KVM_REG_PPC_TCR,
1766fcf5ef2aSThomas Huth .addr = (uintptr_t) &tcr,
1767fcf5ef2aSThomas Huth };
1768fcf5ef2aSThomas Huth
1769c4550e6eSCédric Le Goater if (!kvm_enabled()) {
1770c4550e6eSCédric Le Goater return 0;
1771c4550e6eSCédric Le Goater }
1772c4550e6eSCédric Le Goater
1773fcf5ef2aSThomas Huth return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
1774fcf5ef2aSThomas Huth }
1775fcf5ef2aSThomas Huth
kvmppc_booke_watchdog_enable(PowerPCCPU * cpu)1776fcf5ef2aSThomas Huth int kvmppc_booke_watchdog_enable(PowerPCCPU *cpu)
1777fcf5ef2aSThomas Huth {
1778fcf5ef2aSThomas Huth CPUState *cs = CPU(cpu);
1779fcf5ef2aSThomas Huth int ret;
1780fcf5ef2aSThomas Huth
1781fcf5ef2aSThomas Huth if (!kvm_enabled()) {
1782fcf5ef2aSThomas Huth return -1;
1783fcf5ef2aSThomas Huth }
1784fcf5ef2aSThomas Huth
1785fcf5ef2aSThomas Huth if (!cap_ppc_watchdog) {
1786fcf5ef2aSThomas Huth printf("warning: KVM does not support watchdog");
1787fcf5ef2aSThomas Huth return -1;
1788fcf5ef2aSThomas Huth }
1789fcf5ef2aSThomas Huth
1790fcf5ef2aSThomas Huth ret = kvm_vcpu_enable_cap(cs, KVM_CAP_PPC_BOOKE_WATCHDOG, 0);
1791fcf5ef2aSThomas Huth if (ret < 0) {
1792fcf5ef2aSThomas Huth fprintf(stderr, "%s: couldn't enable KVM_CAP_PPC_BOOKE_WATCHDOG: %s\n",
1793fcf5ef2aSThomas Huth __func__, strerror(-ret));
1794fcf5ef2aSThomas Huth return ret;
1795fcf5ef2aSThomas Huth }
1796fcf5ef2aSThomas Huth
1797fcf5ef2aSThomas Huth return ret;
1798fcf5ef2aSThomas Huth }
1799fcf5ef2aSThomas Huth
read_cpuinfo(const char * field,char * value,int len)1800fcf5ef2aSThomas Huth static int read_cpuinfo(const char *field, char *value, int len)
1801fcf5ef2aSThomas Huth {
1802fcf5ef2aSThomas Huth FILE *f;
1803fcf5ef2aSThomas Huth int ret = -1;
1804fcf5ef2aSThomas Huth int field_len = strlen(field);
1805fcf5ef2aSThomas Huth char line[512];
1806fcf5ef2aSThomas Huth
1807fcf5ef2aSThomas Huth f = fopen("/proc/cpuinfo", "r");
1808fcf5ef2aSThomas Huth if (!f) {
1809fcf5ef2aSThomas Huth return -1;
1810fcf5ef2aSThomas Huth }
1811fcf5ef2aSThomas Huth
1812fcf5ef2aSThomas Huth do {
1813fcf5ef2aSThomas Huth if (!fgets(line, sizeof(line), f)) {
1814fcf5ef2aSThomas Huth break;
1815fcf5ef2aSThomas Huth }
1816fcf5ef2aSThomas Huth if (!strncmp(line, field, field_len)) {
1817fcf5ef2aSThomas Huth pstrcpy(value, len, line);
1818fcf5ef2aSThomas Huth ret = 0;
1819fcf5ef2aSThomas Huth break;
1820fcf5ef2aSThomas Huth }
1821fcf5ef2aSThomas Huth } while (*line);
1822fcf5ef2aSThomas Huth
1823fcf5ef2aSThomas Huth fclose(f);
1824fcf5ef2aSThomas Huth
1825fcf5ef2aSThomas Huth return ret;
1826fcf5ef2aSThomas Huth }
1827fcf5ef2aSThomas Huth
kvmppc_get_tbfreq_procfs(void)18289cbcfb59SGreg Kurz static uint32_t kvmppc_get_tbfreq_procfs(void)
1829fcf5ef2aSThomas Huth {
1830fcf5ef2aSThomas Huth char line[512];
1831fcf5ef2aSThomas Huth char *ns;
18329cbcfb59SGreg Kurz uint32_t tbfreq_fallback = NANOSECONDS_PER_SECOND;
18339cbcfb59SGreg Kurz uint32_t tbfreq_procfs;
1834fcf5ef2aSThomas Huth
1835fcf5ef2aSThomas Huth if (read_cpuinfo("timebase", line, sizeof(line))) {
18369cbcfb59SGreg Kurz return tbfreq_fallback;
1837fcf5ef2aSThomas Huth }
1838fcf5ef2aSThomas Huth
1839c995e942SDavid Gibson ns = strchr(line, ':');
1840c995e942SDavid Gibson if (!ns) {
18419cbcfb59SGreg Kurz return tbfreq_fallback;
1842fcf5ef2aSThomas Huth }
1843fcf5ef2aSThomas Huth
18449cbcfb59SGreg Kurz tbfreq_procfs = atoi(++ns);
1845fcf5ef2aSThomas Huth
18469cbcfb59SGreg Kurz /* 0 is certainly not acceptable by the guest, return fallback value */
18479cbcfb59SGreg Kurz return tbfreq_procfs ? tbfreq_procfs : tbfreq_fallback;
18489cbcfb59SGreg Kurz }
18499cbcfb59SGreg Kurz
kvmppc_get_tbfreq(void)18509cbcfb59SGreg Kurz uint32_t kvmppc_get_tbfreq(void)
18519cbcfb59SGreg Kurz {
18529cbcfb59SGreg Kurz static uint32_t cached_tbfreq;
18539cbcfb59SGreg Kurz
18549cbcfb59SGreg Kurz if (!cached_tbfreq) {
18559cbcfb59SGreg Kurz cached_tbfreq = kvmppc_get_tbfreq_procfs();
18569cbcfb59SGreg Kurz }
18579cbcfb59SGreg Kurz
18589cbcfb59SGreg Kurz return cached_tbfreq;
1859fcf5ef2aSThomas Huth }
1860fcf5ef2aSThomas Huth
kvmppc_get_host_serial(char ** value)1861fcf5ef2aSThomas Huth bool kvmppc_get_host_serial(char **value)
1862fcf5ef2aSThomas Huth {
1863fcf5ef2aSThomas Huth return g_file_get_contents("/proc/device-tree/system-id", value, NULL,
1864fcf5ef2aSThomas Huth NULL);
1865fcf5ef2aSThomas Huth }
1866fcf5ef2aSThomas Huth
kvmppc_get_host_model(char ** value)1867fcf5ef2aSThomas Huth bool kvmppc_get_host_model(char **value)
1868fcf5ef2aSThomas Huth {
1869fcf5ef2aSThomas Huth return g_file_get_contents("/proc/device-tree/model", value, NULL, NULL);
1870fcf5ef2aSThomas Huth }
1871fcf5ef2aSThomas Huth
1872fcf5ef2aSThomas Huth /* Try to find a device tree node for a CPU with clock-frequency property */
kvmppc_find_cpu_dt(char * buf,int buf_len)1873fcf5ef2aSThomas Huth static int kvmppc_find_cpu_dt(char *buf, int buf_len)
1874fcf5ef2aSThomas Huth {
1875fcf5ef2aSThomas Huth struct dirent *dirp;
1876fcf5ef2aSThomas Huth DIR *dp;
1877fcf5ef2aSThomas Huth
1878c995e942SDavid Gibson dp = opendir(PROC_DEVTREE_CPU);
1879c995e942SDavid Gibson if (!dp) {
1880fcf5ef2aSThomas Huth printf("Can't open directory " PROC_DEVTREE_CPU "\n");
1881fcf5ef2aSThomas Huth return -1;
1882fcf5ef2aSThomas Huth }
1883fcf5ef2aSThomas Huth
1884fcf5ef2aSThomas Huth buf[0] = '\0';
1885fcf5ef2aSThomas Huth while ((dirp = readdir(dp)) != NULL) {
1886fcf5ef2aSThomas Huth FILE *f;
18871a42c692SMurilo Opsfelder Araujo
18881a42c692SMurilo Opsfelder Araujo /* Don't accidentally read from the current and parent directories */
18891a42c692SMurilo Opsfelder Araujo if (strcmp(dirp->d_name, ".") == 0 || strcmp(dirp->d_name, "..") == 0) {
18901a42c692SMurilo Opsfelder Araujo continue;
18911a42c692SMurilo Opsfelder Araujo }
18921a42c692SMurilo Opsfelder Araujo
1893fcf5ef2aSThomas Huth snprintf(buf, buf_len, "%s%s/clock-frequency", PROC_DEVTREE_CPU,
1894fcf5ef2aSThomas Huth dirp->d_name);
1895fcf5ef2aSThomas Huth f = fopen(buf, "r");
1896fcf5ef2aSThomas Huth if (f) {
1897fcf5ef2aSThomas Huth snprintf(buf, buf_len, "%s%s", PROC_DEVTREE_CPU, dirp->d_name);
1898fcf5ef2aSThomas Huth fclose(f);
1899fcf5ef2aSThomas Huth break;
1900fcf5ef2aSThomas Huth }
1901fcf5ef2aSThomas Huth buf[0] = '\0';
1902fcf5ef2aSThomas Huth }
1903fcf5ef2aSThomas Huth closedir(dp);
1904fcf5ef2aSThomas Huth if (buf[0] == '\0') {
1905fcf5ef2aSThomas Huth printf("Unknown host!\n");
1906fcf5ef2aSThomas Huth return -1;
1907fcf5ef2aSThomas Huth }
1908fcf5ef2aSThomas Huth
1909fcf5ef2aSThomas Huth return 0;
1910fcf5ef2aSThomas Huth }
1911fcf5ef2aSThomas Huth
kvmppc_read_int_dt(const char * filename)1912fcf5ef2aSThomas Huth static uint64_t kvmppc_read_int_dt(const char *filename)
1913fcf5ef2aSThomas Huth {
1914fcf5ef2aSThomas Huth union {
1915fcf5ef2aSThomas Huth uint32_t v32;
1916fcf5ef2aSThomas Huth uint64_t v64;
1917fcf5ef2aSThomas Huth } u;
1918fcf5ef2aSThomas Huth FILE *f;
1919fcf5ef2aSThomas Huth int len;
1920fcf5ef2aSThomas Huth
1921fcf5ef2aSThomas Huth f = fopen(filename, "rb");
1922fcf5ef2aSThomas Huth if (!f) {
1923fcf5ef2aSThomas Huth return -1;
1924fcf5ef2aSThomas Huth }
1925fcf5ef2aSThomas Huth
1926fcf5ef2aSThomas Huth len = fread(&u, 1, sizeof(u), f);
1927fcf5ef2aSThomas Huth fclose(f);
1928fcf5ef2aSThomas Huth switch (len) {
1929fcf5ef2aSThomas Huth case 4:
1930fcf5ef2aSThomas Huth /* property is a 32-bit quantity */
1931fcf5ef2aSThomas Huth return be32_to_cpu(u.v32);
1932fcf5ef2aSThomas Huth case 8:
1933fcf5ef2aSThomas Huth return be64_to_cpu(u.v64);
1934fcf5ef2aSThomas Huth }
1935fcf5ef2aSThomas Huth
1936fcf5ef2aSThomas Huth return 0;
1937fcf5ef2aSThomas Huth }
1938fcf5ef2aSThomas Huth
1939c995e942SDavid Gibson /*
1940c995e942SDavid Gibson * Read a CPU node property from the host device tree that's a single
1941fcf5ef2aSThomas Huth * integer (32-bit or 64-bit). Returns 0 if anything goes wrong
1942c995e942SDavid Gibson * (can't find or open the property, or doesn't understand the format)
1943c995e942SDavid Gibson */
kvmppc_read_int_cpu_dt(const char * propname)1944fcf5ef2aSThomas Huth static uint64_t kvmppc_read_int_cpu_dt(const char *propname)
1945fcf5ef2aSThomas Huth {
1946fcf5ef2aSThomas Huth char buf[PATH_MAX], *tmp;
1947fcf5ef2aSThomas Huth uint64_t val;
1948fcf5ef2aSThomas Huth
1949fcf5ef2aSThomas Huth if (kvmppc_find_cpu_dt(buf, sizeof(buf))) {
1950fcf5ef2aSThomas Huth return -1;
1951fcf5ef2aSThomas Huth }
1952fcf5ef2aSThomas Huth
1953fcf5ef2aSThomas Huth tmp = g_strdup_printf("%s/%s", buf, propname);
1954fcf5ef2aSThomas Huth val = kvmppc_read_int_dt(tmp);
1955fcf5ef2aSThomas Huth g_free(tmp);
1956fcf5ef2aSThomas Huth
1957fcf5ef2aSThomas Huth return val;
1958fcf5ef2aSThomas Huth }
1959fcf5ef2aSThomas Huth
kvmppc_get_clockfreq(void)1960fcf5ef2aSThomas Huth uint64_t kvmppc_get_clockfreq(void)
1961fcf5ef2aSThomas Huth {
1962fcf5ef2aSThomas Huth return kvmppc_read_int_cpu_dt("clock-frequency");
1963fcf5ef2aSThomas Huth }
1964fcf5ef2aSThomas Huth
kvmppc_get_dec_bits(void)19657d050527SSuraj Jitindar Singh static int kvmppc_get_dec_bits(void)
19667d050527SSuraj Jitindar Singh {
19677d050527SSuraj Jitindar Singh int nr_bits = kvmppc_read_int_cpu_dt("ibm,dec-bits");
19687d050527SSuraj Jitindar Singh
19697d050527SSuraj Jitindar Singh if (nr_bits > 0) {
19707d050527SSuraj Jitindar Singh return nr_bits;
19717d050527SSuraj Jitindar Singh }
19727d050527SSuraj Jitindar Singh return 0;
19737d050527SSuraj Jitindar Singh }
19747d050527SSuraj Jitindar Singh
kvmppc_get_pvinfo(CPUPPCState * env,struct kvm_ppc_pvinfo * pvinfo)1975fcf5ef2aSThomas Huth static int kvmppc_get_pvinfo(CPUPPCState *env, struct kvm_ppc_pvinfo *pvinfo)
1976fcf5ef2aSThomas Huth {
1977db70b311SRichard Henderson CPUState *cs = env_cpu(env);
1978fcf5ef2aSThomas Huth
1979fcf5ef2aSThomas Huth if (kvm_vm_check_extension(cs->kvm_state, KVM_CAP_PPC_GET_PVINFO) &&
1980fcf5ef2aSThomas Huth !kvm_vm_ioctl(cs->kvm_state, KVM_PPC_GET_PVINFO, pvinfo)) {
1981fcf5ef2aSThomas Huth return 0;
1982fcf5ef2aSThomas Huth }
1983fcf5ef2aSThomas Huth
1984fcf5ef2aSThomas Huth return 1;
1985fcf5ef2aSThomas Huth }
1986fcf5ef2aSThomas Huth
kvmppc_get_hasidle(CPUPPCState * env)1987fcf5ef2aSThomas Huth int kvmppc_get_hasidle(CPUPPCState *env)
1988fcf5ef2aSThomas Huth {
1989fcf5ef2aSThomas Huth struct kvm_ppc_pvinfo pvinfo;
1990fcf5ef2aSThomas Huth
1991fcf5ef2aSThomas Huth if (!kvmppc_get_pvinfo(env, &pvinfo) &&
1992fcf5ef2aSThomas Huth (pvinfo.flags & KVM_PPC_PVINFO_FLAGS_EV_IDLE)) {
1993fcf5ef2aSThomas Huth return 1;
1994fcf5ef2aSThomas Huth }
1995fcf5ef2aSThomas Huth
1996fcf5ef2aSThomas Huth return 0;
1997fcf5ef2aSThomas Huth }
1998fcf5ef2aSThomas Huth
kvmppc_get_hypercall(CPUPPCState * env,uint8_t * buf,int buf_len)1999fcf5ef2aSThomas Huth int kvmppc_get_hypercall(CPUPPCState *env, uint8_t *buf, int buf_len)
2000fcf5ef2aSThomas Huth {
2001fcf5ef2aSThomas Huth uint32_t *hc = (uint32_t *)buf;
2002fcf5ef2aSThomas Huth struct kvm_ppc_pvinfo pvinfo;
2003fcf5ef2aSThomas Huth
2004fcf5ef2aSThomas Huth if (!kvmppc_get_pvinfo(env, &pvinfo)) {
2005fcf5ef2aSThomas Huth memcpy(buf, pvinfo.hcall, buf_len);
2006fcf5ef2aSThomas Huth return 0;
2007fcf5ef2aSThomas Huth }
2008fcf5ef2aSThomas Huth
2009fcf5ef2aSThomas Huth /*
2010fcf5ef2aSThomas Huth * Fallback to always fail hypercalls regardless of endianness:
2011fcf5ef2aSThomas Huth *
2012fcf5ef2aSThomas Huth * tdi 0,r0,72 (becomes b .+8 in wrong endian, nop in good endian)
2013fcf5ef2aSThomas Huth * li r3, -1
2014fcf5ef2aSThomas Huth * b .+8 (becomes nop in wrong endian)
2015fcf5ef2aSThomas Huth * bswap32(li r3, -1)
2016fcf5ef2aSThomas Huth */
2017fcf5ef2aSThomas Huth
2018fcf5ef2aSThomas Huth hc[0] = cpu_to_be32(0x08000048);
2019fcf5ef2aSThomas Huth hc[1] = cpu_to_be32(0x3860ffff);
2020fcf5ef2aSThomas Huth hc[2] = cpu_to_be32(0x48000008);
2021fcf5ef2aSThomas Huth hc[3] = cpu_to_be32(bswap32(0x3860ffff));
2022fcf5ef2aSThomas Huth
2023fcf5ef2aSThomas Huth return 1;
2024fcf5ef2aSThomas Huth }
2025fcf5ef2aSThomas Huth
kvmppc_enable_hcall(KVMState * s,target_ulong hcall)2026fcf5ef2aSThomas Huth static inline int kvmppc_enable_hcall(KVMState *s, target_ulong hcall)
2027fcf5ef2aSThomas Huth {
2028fcf5ef2aSThomas Huth return kvm_vm_enable_cap(s, KVM_CAP_PPC_ENABLE_HCALL, 0, hcall, 1);
2029fcf5ef2aSThomas Huth }
2030fcf5ef2aSThomas Huth
kvmppc_enable_logical_ci_hcalls(void)2031fcf5ef2aSThomas Huth void kvmppc_enable_logical_ci_hcalls(void)
2032fcf5ef2aSThomas Huth {
2033fcf5ef2aSThomas Huth /*
2034fcf5ef2aSThomas Huth * FIXME: it would be nice if we could detect the cases where
2035fcf5ef2aSThomas Huth * we're using a device which requires the in kernel
2036fcf5ef2aSThomas Huth * implementation of these hcalls, but the kernel lacks them and
2037fcf5ef2aSThomas Huth * produce a warning.
2038fcf5ef2aSThomas Huth */
2039fcf5ef2aSThomas Huth kvmppc_enable_hcall(kvm_state, H_LOGICAL_CI_LOAD);
2040fcf5ef2aSThomas Huth kvmppc_enable_hcall(kvm_state, H_LOGICAL_CI_STORE);
2041fcf5ef2aSThomas Huth }
2042fcf5ef2aSThomas Huth
kvmppc_enable_set_mode_hcall(void)2043fcf5ef2aSThomas Huth void kvmppc_enable_set_mode_hcall(void)
2044fcf5ef2aSThomas Huth {
2045fcf5ef2aSThomas Huth kvmppc_enable_hcall(kvm_state, H_SET_MODE);
2046fcf5ef2aSThomas Huth }
2047fcf5ef2aSThomas Huth
kvmppc_enable_clear_ref_mod_hcalls(void)2048fcf5ef2aSThomas Huth void kvmppc_enable_clear_ref_mod_hcalls(void)
2049fcf5ef2aSThomas Huth {
2050fcf5ef2aSThomas Huth kvmppc_enable_hcall(kvm_state, H_CLEAR_REF);
2051fcf5ef2aSThomas Huth kvmppc_enable_hcall(kvm_state, H_CLEAR_MOD);
2052fcf5ef2aSThomas Huth }
2053fcf5ef2aSThomas Huth
kvmppc_enable_h_page_init(void)205468f9f708SSuraj Jitindar Singh void kvmppc_enable_h_page_init(void)
205568f9f708SSuraj Jitindar Singh {
205668f9f708SSuraj Jitindar Singh kvmppc_enable_hcall(kvm_state, H_PAGE_INIT);
205768f9f708SSuraj Jitindar Singh }
205868f9f708SSuraj Jitindar Singh
kvmppc_enable_h_rpt_invalidate(void)205982123b75SBharata B Rao void kvmppc_enable_h_rpt_invalidate(void)
206082123b75SBharata B Rao {
206182123b75SBharata B Rao kvmppc_enable_hcall(kvm_state, H_RPT_INVALIDATE);
206282123b75SBharata B Rao }
206382123b75SBharata B Rao
2064566abdb4SPaolo Bonzini #ifdef CONFIG_PSERIES
kvmppc_set_papr(PowerPCCPU * cpu)2065fcf5ef2aSThomas Huth void kvmppc_set_papr(PowerPCCPU *cpu)
2066fcf5ef2aSThomas Huth {
2067fcf5ef2aSThomas Huth CPUState *cs = CPU(cpu);
2068fcf5ef2aSThomas Huth int ret;
2069fcf5ef2aSThomas Huth
2070da20aed1SDavid Gibson if (!kvm_enabled()) {
2071da20aed1SDavid Gibson return;
2072da20aed1SDavid Gibson }
2073da20aed1SDavid Gibson
2074fcf5ef2aSThomas Huth ret = kvm_vcpu_enable_cap(cs, KVM_CAP_PPC_PAPR, 0);
2075fcf5ef2aSThomas Huth if (ret) {
2076fcf5ef2aSThomas Huth error_report("This vCPU type or KVM version does not support PAPR");
2077fcf5ef2aSThomas Huth exit(1);
2078fcf5ef2aSThomas Huth }
2079fcf5ef2aSThomas Huth
2080c995e942SDavid Gibson /*
2081c995e942SDavid Gibson * Update the capability flag so we sync the right information
2082c995e942SDavid Gibson * with kvm
2083c995e942SDavid Gibson */
2084fcf5ef2aSThomas Huth cap_papr = 1;
2085fcf5ef2aSThomas Huth }
2086566abdb4SPaolo Bonzini #endif
2087fcf5ef2aSThomas Huth
kvmppc_set_compat(PowerPCCPU * cpu,uint32_t compat_pvr)2088d6e166c0SDavid Gibson int kvmppc_set_compat(PowerPCCPU *cpu, uint32_t compat_pvr)
2089fcf5ef2aSThomas Huth {
2090d6e166c0SDavid Gibson return kvm_set_one_reg(CPU(cpu), KVM_REG_PPC_ARCH_COMPAT, &compat_pvr);
2091fcf5ef2aSThomas Huth }
2092fcf5ef2aSThomas Huth
kvmppc_set_mpic_proxy(PowerPCCPU * cpu,int mpic_proxy)2093fcf5ef2aSThomas Huth void kvmppc_set_mpic_proxy(PowerPCCPU *cpu, int mpic_proxy)
2094fcf5ef2aSThomas Huth {
2095fcf5ef2aSThomas Huth CPUState *cs = CPU(cpu);
2096fcf5ef2aSThomas Huth int ret;
2097fcf5ef2aSThomas Huth
2098fcf5ef2aSThomas Huth ret = kvm_vcpu_enable_cap(cs, KVM_CAP_PPC_EPR, 0, mpic_proxy);
2099fcf5ef2aSThomas Huth if (ret && mpic_proxy) {
2100fcf5ef2aSThomas Huth error_report("This KVM version does not support EPR");
2101fcf5ef2aSThomas Huth exit(1);
2102fcf5ef2aSThomas Huth }
2103fcf5ef2aSThomas Huth }
2104fcf5ef2aSThomas Huth
kvmppc_get_fwnmi(void)2105ec010c00SNicholas Piggin bool kvmppc_get_fwnmi(void)
2106ec010c00SNicholas Piggin {
2107ec010c00SNicholas Piggin return cap_fwnmi;
2108ec010c00SNicholas Piggin }
2109ec010c00SNicholas Piggin
kvmppc_set_fwnmi(PowerPCCPU * cpu)2110aef92d87SLaurent Vivier int kvmppc_set_fwnmi(PowerPCCPU *cpu)
21119d953ce4SAravinda Prasad {
21129d953ce4SAravinda Prasad CPUState *cs = CPU(cpu);
21139d953ce4SAravinda Prasad
21149d953ce4SAravinda Prasad return kvm_vcpu_enable_cap(cs, KVM_CAP_PPC_FWNMI, 0);
21159d953ce4SAravinda Prasad }
21169d953ce4SAravinda Prasad
kvmppc_smt_threads(void)2117fcf5ef2aSThomas Huth int kvmppc_smt_threads(void)
2118fcf5ef2aSThomas Huth {
2119fcf5ef2aSThomas Huth return cap_ppc_smt ? cap_ppc_smt : 1;
2120fcf5ef2aSThomas Huth }
2121fcf5ef2aSThomas Huth
kvmppc_set_smt_threads(int smt)2122fa98fbfcSSam Bobroff int kvmppc_set_smt_threads(int smt)
2123fa98fbfcSSam Bobroff {
2124fa98fbfcSSam Bobroff int ret;
2125fa98fbfcSSam Bobroff
2126fa98fbfcSSam Bobroff ret = kvm_vm_enable_cap(kvm_state, KVM_CAP_PPC_SMT, 0, smt, 0);
2127fa98fbfcSSam Bobroff if (!ret) {
2128fa98fbfcSSam Bobroff cap_ppc_smt = smt;
2129fa98fbfcSSam Bobroff }
2130fa98fbfcSSam Bobroff return ret;
2131fa98fbfcSSam Bobroff }
2132fa98fbfcSSam Bobroff
kvmppc_error_append_smt_possible_hint(Error * const * errp)21330c115681SVladimir Sementsov-Ogievskiy void kvmppc_error_append_smt_possible_hint(Error *const *errp)
2134fa98fbfcSSam Bobroff {
2135fa98fbfcSSam Bobroff int i;
2136fa98fbfcSSam Bobroff GString *g;
2137fa98fbfcSSam Bobroff char *s;
2138fa98fbfcSSam Bobroff
2139fa98fbfcSSam Bobroff assert(kvm_enabled());
2140fa98fbfcSSam Bobroff if (cap_ppc_smt_possible) {
2141fa98fbfcSSam Bobroff g = g_string_new("Available VSMT modes:");
2142fa98fbfcSSam Bobroff for (i = 63; i >= 0; i--) {
2143fa98fbfcSSam Bobroff if ((1UL << i) & cap_ppc_smt_possible) {
2144fa98fbfcSSam Bobroff g_string_append_printf(g, " %lu", (1UL << i));
2145fa98fbfcSSam Bobroff }
2146fa98fbfcSSam Bobroff }
2147fa98fbfcSSam Bobroff s = g_string_free(g, false);
21481a639fdfSMarkus Armbruster error_append_hint(errp, "%s.\n", s);
2149fa98fbfcSSam Bobroff g_free(s);
2150fa98fbfcSSam Bobroff } else {
21511a639fdfSMarkus Armbruster error_append_hint(errp,
2152fa98fbfcSSam Bobroff "This KVM seems to be too old to support VSMT.\n");
2153fa98fbfcSSam Bobroff }
2154fa98fbfcSSam Bobroff }
2155fa98fbfcSSam Bobroff
2156fa98fbfcSSam Bobroff
2157fcf5ef2aSThomas Huth #ifdef TARGET_PPC64
kvmppc_vrma_limit(unsigned int hash_shift)21586a84737cSDavid Gibson uint64_t kvmppc_vrma_limit(unsigned int hash_shift)
2159fcf5ef2aSThomas Huth {
2160fcf5ef2aSThomas Huth struct kvm_ppc_smmu_info info;
2161fcf5ef2aSThomas Huth long rampagesize, best_page_shift;
2162fcf5ef2aSThomas Huth int i;
2163fcf5ef2aSThomas Huth
2164c995e942SDavid Gibson /*
2165c995e942SDavid Gibson * Find the largest hardware supported page size that's less than
2166c995e942SDavid Gibson * or equal to the (logical) backing page size of guest RAM
2167c995e942SDavid Gibson */
2168ab256960SGreg Kurz kvm_get_smmu_info(&info, &error_fatal);
2169905b7ee4SDavid Hildenbrand rampagesize = qemu_minrampagesize();
2170fcf5ef2aSThomas Huth best_page_shift = 0;
2171fcf5ef2aSThomas Huth
2172fcf5ef2aSThomas Huth for (i = 0; i < KVM_PPC_PAGE_SIZES_MAX_SZ; i++) {
2173fcf5ef2aSThomas Huth struct kvm_ppc_one_seg_page_size *sps = &info.sps[i];
2174fcf5ef2aSThomas Huth
2175fcf5ef2aSThomas Huth if (!sps->page_shift) {
2176fcf5ef2aSThomas Huth continue;
2177fcf5ef2aSThomas Huth }
2178fcf5ef2aSThomas Huth
2179fcf5ef2aSThomas Huth if ((sps->page_shift > best_page_shift)
2180fcf5ef2aSThomas Huth && ((1UL << sps->page_shift) <= rampagesize)) {
2181fcf5ef2aSThomas Huth best_page_shift = sps->page_shift;
2182fcf5ef2aSThomas Huth }
2183fcf5ef2aSThomas Huth }
2184fcf5ef2aSThomas Huth
21856a84737cSDavid Gibson return 1ULL << (best_page_shift + hash_shift - 7);
2186fcf5ef2aSThomas Huth }
2187fcf5ef2aSThomas Huth #endif
2188fcf5ef2aSThomas Huth
kvmppc_spapr_use_multitce(void)2189fcf5ef2aSThomas Huth bool kvmppc_spapr_use_multitce(void)
2190fcf5ef2aSThomas Huth {
2191fcf5ef2aSThomas Huth return cap_spapr_multitce;
2192fcf5ef2aSThomas Huth }
2193fcf5ef2aSThomas Huth
kvmppc_spapr_enable_inkernel_multitce(void)21943dc410aeSAlexey Kardashevskiy int kvmppc_spapr_enable_inkernel_multitce(void)
21953dc410aeSAlexey Kardashevskiy {
21963dc410aeSAlexey Kardashevskiy int ret;
21973dc410aeSAlexey Kardashevskiy
21983dc410aeSAlexey Kardashevskiy ret = kvm_vm_enable_cap(kvm_state, KVM_CAP_PPC_ENABLE_HCALL, 0,
21993dc410aeSAlexey Kardashevskiy H_PUT_TCE_INDIRECT, 1);
22003dc410aeSAlexey Kardashevskiy if (!ret) {
22013dc410aeSAlexey Kardashevskiy ret = kvm_vm_enable_cap(kvm_state, KVM_CAP_PPC_ENABLE_HCALL, 0,
22023dc410aeSAlexey Kardashevskiy H_STUFF_TCE, 1);
22033dc410aeSAlexey Kardashevskiy }
22043dc410aeSAlexey Kardashevskiy
22053dc410aeSAlexey Kardashevskiy return ret;
22063dc410aeSAlexey Kardashevskiy }
22073dc410aeSAlexey Kardashevskiy
kvmppc_create_spapr_tce(uint32_t liobn,uint32_t page_shift,uint64_t bus_offset,uint32_t nb_table,int * pfd,bool need_vfio)2208d6ee2a7cSAlexey Kardashevskiy void *kvmppc_create_spapr_tce(uint32_t liobn, uint32_t page_shift,
2209d6ee2a7cSAlexey Kardashevskiy uint64_t bus_offset, uint32_t nb_table,
2210d6ee2a7cSAlexey Kardashevskiy int *pfd, bool need_vfio)
2211fcf5ef2aSThomas Huth {
2212fcf5ef2aSThomas Huth long len;
2213fcf5ef2aSThomas Huth int fd;
2214fcf5ef2aSThomas Huth void *table;
2215fcf5ef2aSThomas Huth
2216c995e942SDavid Gibson /*
2217c995e942SDavid Gibson * Must set fd to -1 so we don't try to munmap when called for
2218fcf5ef2aSThomas Huth * destroying the table, which the upper layers -will- do
2219fcf5ef2aSThomas Huth */
2220fcf5ef2aSThomas Huth *pfd = -1;
2221fcf5ef2aSThomas Huth if (!cap_spapr_tce || (need_vfio && !cap_spapr_vfio)) {
2222fcf5ef2aSThomas Huth return NULL;
2223fcf5ef2aSThomas Huth }
2224fcf5ef2aSThomas Huth
2225d6ee2a7cSAlexey Kardashevskiy if (cap_spapr_tce_64) {
2226d6ee2a7cSAlexey Kardashevskiy struct kvm_create_spapr_tce_64 args = {
2227d6ee2a7cSAlexey Kardashevskiy .liobn = liobn,
2228d6ee2a7cSAlexey Kardashevskiy .page_shift = page_shift,
2229d6ee2a7cSAlexey Kardashevskiy .offset = bus_offset >> page_shift,
2230d6ee2a7cSAlexey Kardashevskiy .size = nb_table,
2231d6ee2a7cSAlexey Kardashevskiy .flags = 0
2232d6ee2a7cSAlexey Kardashevskiy };
2233d6ee2a7cSAlexey Kardashevskiy fd = kvm_vm_ioctl(kvm_state, KVM_CREATE_SPAPR_TCE_64, &args);
2234d6ee2a7cSAlexey Kardashevskiy if (fd < 0) {
2235d6ee2a7cSAlexey Kardashevskiy fprintf(stderr,
2236d6ee2a7cSAlexey Kardashevskiy "KVM: Failed to create TCE64 table for liobn 0x%x\n",
2237d6ee2a7cSAlexey Kardashevskiy liobn);
2238d6ee2a7cSAlexey Kardashevskiy return NULL;
2239d6ee2a7cSAlexey Kardashevskiy }
2240d6ee2a7cSAlexey Kardashevskiy } else if (cap_spapr_tce) {
2241d6ee2a7cSAlexey Kardashevskiy uint64_t window_size = (uint64_t) nb_table << page_shift;
2242d6ee2a7cSAlexey Kardashevskiy struct kvm_create_spapr_tce args = {
2243d6ee2a7cSAlexey Kardashevskiy .liobn = liobn,
2244d6ee2a7cSAlexey Kardashevskiy .window_size = window_size,
2245d6ee2a7cSAlexey Kardashevskiy };
2246d6ee2a7cSAlexey Kardashevskiy if ((window_size != args.window_size) || bus_offset) {
2247d6ee2a7cSAlexey Kardashevskiy return NULL;
2248d6ee2a7cSAlexey Kardashevskiy }
2249fcf5ef2aSThomas Huth fd = kvm_vm_ioctl(kvm_state, KVM_CREATE_SPAPR_TCE, &args);
2250fcf5ef2aSThomas Huth if (fd < 0) {
2251fcf5ef2aSThomas Huth fprintf(stderr, "KVM: Failed to create TCE table for liobn 0x%x\n",
2252fcf5ef2aSThomas Huth liobn);
2253fcf5ef2aSThomas Huth return NULL;
2254fcf5ef2aSThomas Huth }
2255d6ee2a7cSAlexey Kardashevskiy } else {
2256d6ee2a7cSAlexey Kardashevskiy return NULL;
2257d6ee2a7cSAlexey Kardashevskiy }
2258fcf5ef2aSThomas Huth
2259d6ee2a7cSAlexey Kardashevskiy len = nb_table * sizeof(uint64_t);
2260fcf5ef2aSThomas Huth /* FIXME: round this up to page size */
2261fcf5ef2aSThomas Huth
2262fcf5ef2aSThomas Huth table = mmap(NULL, len, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
2263fcf5ef2aSThomas Huth if (table == MAP_FAILED) {
2264fcf5ef2aSThomas Huth fprintf(stderr, "KVM: Failed to map TCE table for liobn 0x%x\n",
2265fcf5ef2aSThomas Huth liobn);
2266fcf5ef2aSThomas Huth close(fd);
2267fcf5ef2aSThomas Huth return NULL;
2268fcf5ef2aSThomas Huth }
2269fcf5ef2aSThomas Huth
2270fcf5ef2aSThomas Huth *pfd = fd;
2271fcf5ef2aSThomas Huth return table;
2272fcf5ef2aSThomas Huth }
2273fcf5ef2aSThomas Huth
kvmppc_remove_spapr_tce(void * table,int fd,uint32_t nb_table)2274fcf5ef2aSThomas Huth int kvmppc_remove_spapr_tce(void *table, int fd, uint32_t nb_table)
2275fcf5ef2aSThomas Huth {
2276fcf5ef2aSThomas Huth long len;
2277fcf5ef2aSThomas Huth
2278fcf5ef2aSThomas Huth if (fd < 0) {
2279fcf5ef2aSThomas Huth return -1;
2280fcf5ef2aSThomas Huth }
2281fcf5ef2aSThomas Huth
2282fcf5ef2aSThomas Huth len = nb_table * sizeof(uint64_t);
2283fcf5ef2aSThomas Huth if ((munmap(table, len) < 0) ||
2284fcf5ef2aSThomas Huth (close(fd) < 0)) {
2285fcf5ef2aSThomas Huth fprintf(stderr, "KVM: Unexpected error removing TCE table: %s",
2286fcf5ef2aSThomas Huth strerror(errno));
2287fcf5ef2aSThomas Huth /* Leak the table */
2288fcf5ef2aSThomas Huth }
2289fcf5ef2aSThomas Huth
2290fcf5ef2aSThomas Huth return 0;
2291fcf5ef2aSThomas Huth }
2292fcf5ef2aSThomas Huth
kvmppc_reset_htab(int shift_hint)2293fcf5ef2aSThomas Huth int kvmppc_reset_htab(int shift_hint)
2294fcf5ef2aSThomas Huth {
2295fcf5ef2aSThomas Huth uint32_t shift = shift_hint;
2296fcf5ef2aSThomas Huth
2297fcf5ef2aSThomas Huth if (!kvm_enabled()) {
2298fcf5ef2aSThomas Huth /* Full emulation, tell caller to allocate htab itself */
2299fcf5ef2aSThomas Huth return 0;
2300fcf5ef2aSThomas Huth }
23016977afdaSGreg Kurz if (kvm_vm_check_extension(kvm_state, KVM_CAP_PPC_ALLOC_HTAB)) {
2302fcf5ef2aSThomas Huth int ret;
2303fcf5ef2aSThomas Huth ret = kvm_vm_ioctl(kvm_state, KVM_PPC_ALLOCATE_HTAB, &shift);
2304fcf5ef2aSThomas Huth if (ret == -ENOTTY) {
2305c995e942SDavid Gibson /*
2306c995e942SDavid Gibson * At least some versions of PR KVM advertise the
2307fcf5ef2aSThomas Huth * capability, but don't implement the ioctl(). Oops.
2308fcf5ef2aSThomas Huth * Return 0 so that we allocate the htab in qemu, as is
2309c995e942SDavid Gibson * correct for PR.
2310c995e942SDavid Gibson */
2311fcf5ef2aSThomas Huth return 0;
2312fcf5ef2aSThomas Huth } else if (ret < 0) {
2313fcf5ef2aSThomas Huth return ret;
2314fcf5ef2aSThomas Huth }
2315fcf5ef2aSThomas Huth return shift;
2316fcf5ef2aSThomas Huth }
2317fcf5ef2aSThomas Huth
2318c995e942SDavid Gibson /*
2319c995e942SDavid Gibson * We have a kernel that predates the htab reset calls. For PR
2320fcf5ef2aSThomas Huth * KVM, we need to allocate the htab ourselves, for an HV KVM of
2321c995e942SDavid Gibson * this era, it has allocated a 16MB fixed size hash table
2322c995e942SDavid Gibson * already.
2323c995e942SDavid Gibson */
2324fcf5ef2aSThomas Huth if (kvmppc_is_pr(kvm_state)) {
2325fcf5ef2aSThomas Huth /* PR - tell caller to allocate htab */
2326fcf5ef2aSThomas Huth return 0;
2327fcf5ef2aSThomas Huth } else {
2328fcf5ef2aSThomas Huth /* HV - assume 16MB kernel allocated htab */
2329fcf5ef2aSThomas Huth return 24;
2330fcf5ef2aSThomas Huth }
2331fcf5ef2aSThomas Huth }
2332fcf5ef2aSThomas Huth
mfpvr(void)2333fcf5ef2aSThomas Huth static inline uint32_t mfpvr(void)
2334fcf5ef2aSThomas Huth {
2335fcf5ef2aSThomas Huth uint32_t pvr;
2336fcf5ef2aSThomas Huth
2337fcf5ef2aSThomas Huth asm ("mfpvr %0"
2338fcf5ef2aSThomas Huth : "=r"(pvr));
2339fcf5ef2aSThomas Huth return pvr;
2340fcf5ef2aSThomas Huth }
2341fcf5ef2aSThomas Huth
alter_insns(uint64_t * word,uint64_t flags,bool on)2342fcf5ef2aSThomas Huth static void alter_insns(uint64_t *word, uint64_t flags, bool on)
2343fcf5ef2aSThomas Huth {
2344fcf5ef2aSThomas Huth if (on) {
2345fcf5ef2aSThomas Huth *word |= flags;
2346fcf5ef2aSThomas Huth } else {
2347fcf5ef2aSThomas Huth *word &= ~flags;
2348fcf5ef2aSThomas Huth }
2349fcf5ef2aSThomas Huth }
2350fcf5ef2aSThomas Huth
kvmppc_cpu_realize(CPUState * cs,Error ** errp)2351cfb52d07SHarsh Prateek Bora static bool kvmppc_cpu_realize(CPUState *cs, Error **errp)
2352cfb52d07SHarsh Prateek Bora {
2353cfb52d07SHarsh Prateek Bora int ret;
2354cfb52d07SHarsh Prateek Bora const char *vcpu_str = (cs->parent_obj.hotplugged == true) ?
2355cfb52d07SHarsh Prateek Bora "hotplug" : "create";
2356cfb52d07SHarsh Prateek Bora cs->cpu_index = cpu_get_free_index();
2357cfb52d07SHarsh Prateek Bora
2358cfb52d07SHarsh Prateek Bora POWERPC_CPU(cs)->vcpu_id = cs->cpu_index;
2359cfb52d07SHarsh Prateek Bora
2360cfb52d07SHarsh Prateek Bora /* create and park to fail gracefully in case vcpu hotplug fails */
2361cfb52d07SHarsh Prateek Bora ret = kvm_create_and_park_vcpu(cs);
2362cfb52d07SHarsh Prateek Bora if (ret) {
2363cfb52d07SHarsh Prateek Bora /*
2364cfb52d07SHarsh Prateek Bora * This causes QEMU to terminate if initial CPU creation
2365cfb52d07SHarsh Prateek Bora * fails, and only CPU hotplug failure if the error happens
2366cfb52d07SHarsh Prateek Bora * there.
2367cfb52d07SHarsh Prateek Bora */
2368cfb52d07SHarsh Prateek Bora error_setg(errp, "%s: vcpu %s failed with %d",
2369cfb52d07SHarsh Prateek Bora __func__, vcpu_str, ret);
2370cfb52d07SHarsh Prateek Bora return false;
2371cfb52d07SHarsh Prateek Bora }
2372cfb52d07SHarsh Prateek Bora return true;
2373cfb52d07SHarsh Prateek Bora }
2374cfb52d07SHarsh Prateek Bora
kvmppc_host_cpu_class_init(ObjectClass * oc,void * data)2375fcf5ef2aSThomas Huth static void kvmppc_host_cpu_class_init(ObjectClass *oc, void *data)
2376fcf5ef2aSThomas Huth {
2377fcf5ef2aSThomas Huth PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
2378fcf5ef2aSThomas Huth uint32_t dcache_size = kvmppc_read_int_cpu_dt("d-cache-size");
2379fcf5ef2aSThomas Huth uint32_t icache_size = kvmppc_read_int_cpu_dt("i-cache-size");
2380fcf5ef2aSThomas Huth
2381fcf5ef2aSThomas Huth /* Now fix up the class with information we can query from the host */
2382fcf5ef2aSThomas Huth pcc->pvr = mfpvr();
2383fcf5ef2aSThomas Huth
23843f2ca480SDavid Gibson alter_insns(&pcc->insns_flags, PPC_ALTIVEC,
23853f2ca480SDavid Gibson qemu_getauxval(AT_HWCAP) & PPC_FEATURE_HAS_ALTIVEC);
23863f2ca480SDavid Gibson alter_insns(&pcc->insns_flags2, PPC2_VSX,
23873f2ca480SDavid Gibson qemu_getauxval(AT_HWCAP) & PPC_FEATURE_HAS_VSX);
23883f2ca480SDavid Gibson alter_insns(&pcc->insns_flags2, PPC2_DFP,
23893f2ca480SDavid Gibson qemu_getauxval(AT_HWCAP) & PPC_FEATURE_HAS_DFP);
2390fcf5ef2aSThomas Huth
2391fcf5ef2aSThomas Huth if (dcache_size != -1) {
2392fcf5ef2aSThomas Huth pcc->l1_dcache_size = dcache_size;
2393fcf5ef2aSThomas Huth }
2394fcf5ef2aSThomas Huth
2395fcf5ef2aSThomas Huth if (icache_size != -1) {
2396fcf5ef2aSThomas Huth pcc->l1_icache_size = icache_size;
2397fcf5ef2aSThomas Huth }
2398c64abd1fSSam Bobroff
2399c64abd1fSSam Bobroff #if defined(TARGET_PPC64)
2400aa6edf97SPhilippe Mathieu-Daudé pcc->radix_page_info = kvmppc_get_radix_page_info();
2401c64abd1fSSam Bobroff #endif /* defined(TARGET_PPC64) */
2402fcf5ef2aSThomas Huth }
2403fcf5ef2aSThomas Huth
kvmppc_has_cap_epr(void)2404fcf5ef2aSThomas Huth bool kvmppc_has_cap_epr(void)
2405fcf5ef2aSThomas Huth {
2406fcf5ef2aSThomas Huth return cap_epr;
2407fcf5ef2aSThomas Huth }
2408fcf5ef2aSThomas Huth
kvmppc_has_cap_fixup_hcalls(void)2409fcf5ef2aSThomas Huth bool kvmppc_has_cap_fixup_hcalls(void)
2410fcf5ef2aSThomas Huth {
2411fcf5ef2aSThomas Huth return cap_fixup_hcalls;
2412fcf5ef2aSThomas Huth }
2413fcf5ef2aSThomas Huth
kvmppc_has_cap_htm(void)2414fcf5ef2aSThomas Huth bool kvmppc_has_cap_htm(void)
2415fcf5ef2aSThomas Huth {
2416fcf5ef2aSThomas Huth return cap_htm;
2417fcf5ef2aSThomas Huth }
2418fcf5ef2aSThomas Huth
kvmppc_has_cap_mmu_radix(void)2419cf1c4cceSSam Bobroff bool kvmppc_has_cap_mmu_radix(void)
2420cf1c4cceSSam Bobroff {
2421cf1c4cceSSam Bobroff return cap_mmu_radix;
2422cf1c4cceSSam Bobroff }
2423cf1c4cceSSam Bobroff
kvmppc_has_cap_mmu_hash_v3(void)2424cf1c4cceSSam Bobroff bool kvmppc_has_cap_mmu_hash_v3(void)
2425cf1c4cceSSam Bobroff {
2426cf1c4cceSSam Bobroff return cap_mmu_hash_v3;
2427cf1c4cceSSam Bobroff }
2428cf1c4cceSSam Bobroff
kvmppc_power8_host(void)2429072f416aSSuraj Jitindar Singh static bool kvmppc_power8_host(void)
2430072f416aSSuraj Jitindar Singh {
2431072f416aSSuraj Jitindar Singh bool ret = false;
2432072f416aSSuraj Jitindar Singh #ifdef TARGET_PPC64
2433072f416aSSuraj Jitindar Singh {
2434072f416aSSuraj Jitindar Singh uint32_t base_pvr = CPU_POWERPC_POWER_SERVER_MASK & mfpvr();
2435072f416aSSuraj Jitindar Singh ret = (base_pvr == CPU_POWERPC_POWER8E_BASE) ||
2436072f416aSSuraj Jitindar Singh (base_pvr == CPU_POWERPC_POWER8NVL_BASE) ||
2437072f416aSSuraj Jitindar Singh (base_pvr == CPU_POWERPC_POWER8_BASE);
2438072f416aSSuraj Jitindar Singh }
2439072f416aSSuraj Jitindar Singh #endif /* TARGET_PPC64 */
2440072f416aSSuraj Jitindar Singh return ret;
2441072f416aSSuraj Jitindar Singh }
2442072f416aSSuraj Jitindar Singh
parse_cap_ppc_safe_cache(struct kvm_ppc_cpu_char c)24438fea7044SSuraj Jitindar Singh static int parse_cap_ppc_safe_cache(struct kvm_ppc_cpu_char c)
24448fea7044SSuraj Jitindar Singh {
2445072f416aSSuraj Jitindar Singh bool l1d_thread_priv_req = !kvmppc_power8_host();
2446072f416aSSuraj Jitindar Singh
24478fea7044SSuraj Jitindar Singh if (~c.behaviour & c.behaviour_mask & H_CPU_BEHAV_L1D_FLUSH_PR) {
24488fea7044SSuraj Jitindar Singh return 2;
2449072f416aSSuraj Jitindar Singh } else if ((!l1d_thread_priv_req ||
2450072f416aSSuraj Jitindar Singh c.character & c.character_mask & H_CPU_CHAR_L1D_THREAD_PRIV) &&
24518fea7044SSuraj Jitindar Singh (c.character & c.character_mask
24528fea7044SSuraj Jitindar Singh & (H_CPU_CHAR_L1D_FLUSH_ORI30 | H_CPU_CHAR_L1D_FLUSH_TRIG2))) {
24538fea7044SSuraj Jitindar Singh return 1;
24548fea7044SSuraj Jitindar Singh }
24558fea7044SSuraj Jitindar Singh
24568fea7044SSuraj Jitindar Singh return 0;
24578fea7044SSuraj Jitindar Singh }
24588fea7044SSuraj Jitindar Singh
parse_cap_ppc_safe_bounds_check(struct kvm_ppc_cpu_char c)24598fea7044SSuraj Jitindar Singh static int parse_cap_ppc_safe_bounds_check(struct kvm_ppc_cpu_char c)
24608fea7044SSuraj Jitindar Singh {
24618fea7044SSuraj Jitindar Singh if (~c.behaviour & c.behaviour_mask & H_CPU_BEHAV_BNDS_CHK_SPEC_BAR) {
24628fea7044SSuraj Jitindar Singh return 2;
24638fea7044SSuraj Jitindar Singh } else if (c.character & c.character_mask & H_CPU_CHAR_SPEC_BAR_ORI31) {
24648fea7044SSuraj Jitindar Singh return 1;
24658fea7044SSuraj Jitindar Singh }
24668fea7044SSuraj Jitindar Singh
24678fea7044SSuraj Jitindar Singh return 0;
24688fea7044SSuraj Jitindar Singh }
24698fea7044SSuraj Jitindar Singh
parse_cap_ppc_safe_indirect_branch(struct kvm_ppc_cpu_char c)24708fea7044SSuraj Jitindar Singh static int parse_cap_ppc_safe_indirect_branch(struct kvm_ppc_cpu_char c)
24718fea7044SSuraj Jitindar Singh {
2472399b2896SSuraj Jitindar Singh if ((~c.behaviour & c.behaviour_mask & H_CPU_BEHAV_FLUSH_COUNT_CACHE) &&
2473399b2896SSuraj Jitindar Singh (~c.character & c.character_mask & H_CPU_CHAR_CACHE_COUNT_DIS) &&
2474399b2896SSuraj Jitindar Singh (~c.character & c.character_mask & H_CPU_CHAR_BCCTRL_SERIALISED)) {
2475399b2896SSuraj Jitindar Singh return SPAPR_CAP_FIXED_NA;
2476399b2896SSuraj Jitindar Singh } else if (c.behaviour & c.behaviour_mask & H_CPU_BEHAV_FLUSH_COUNT_CACHE) {
2477399b2896SSuraj Jitindar Singh return SPAPR_CAP_WORKAROUND;
2478399b2896SSuraj Jitindar Singh } else if (c.character & c.character_mask & H_CPU_CHAR_CACHE_COUNT_DIS) {
24798fea7044SSuraj Jitindar Singh return SPAPR_CAP_FIXED_CCD;
24808fea7044SSuraj Jitindar Singh } else if (c.character & c.character_mask & H_CPU_CHAR_BCCTRL_SERIALISED) {
24818fea7044SSuraj Jitindar Singh return SPAPR_CAP_FIXED_IBS;
24828fea7044SSuraj Jitindar Singh }
24838fea7044SSuraj Jitindar Singh
24848fea7044SSuraj Jitindar Singh return 0;
24858fea7044SSuraj Jitindar Singh }
24868fea7044SSuraj Jitindar Singh
parse_cap_ppc_count_cache_flush_assist(struct kvm_ppc_cpu_char c)24878ff43ee4SSuraj Jitindar Singh static int parse_cap_ppc_count_cache_flush_assist(struct kvm_ppc_cpu_char c)
24888ff43ee4SSuraj Jitindar Singh {
24898ff43ee4SSuraj Jitindar Singh if (c.character & c.character_mask & H_CPU_CHAR_BCCTR_FLUSH_ASSIST) {
24908ff43ee4SSuraj Jitindar Singh return 1;
24918ff43ee4SSuraj Jitindar Singh }
24928ff43ee4SSuraj Jitindar Singh return 0;
24938ff43ee4SSuraj Jitindar Singh }
24948ff43ee4SSuraj Jitindar Singh
kvmppc_has_cap_xive(void)249538afd772SCédric Le Goater bool kvmppc_has_cap_xive(void)
249638afd772SCédric Le Goater {
249738afd772SCédric Le Goater return cap_xive;
249838afd772SCédric Le Goater }
249938afd772SCédric Le Goater
kvmppc_get_cpu_characteristics(KVMState * s)25008acc2ae5SSuraj Jitindar Singh static void kvmppc_get_cpu_characteristics(KVMState *s)
25018acc2ae5SSuraj Jitindar Singh {
25028acc2ae5SSuraj Jitindar Singh struct kvm_ppc_cpu_char c;
25038acc2ae5SSuraj Jitindar Singh int ret;
25048acc2ae5SSuraj Jitindar Singh
25058acc2ae5SSuraj Jitindar Singh /* Assume broken */
25068acc2ae5SSuraj Jitindar Singh cap_ppc_safe_cache = 0;
25078acc2ae5SSuraj Jitindar Singh cap_ppc_safe_bounds_check = 0;
25088acc2ae5SSuraj Jitindar Singh cap_ppc_safe_indirect_branch = 0;
25098acc2ae5SSuraj Jitindar Singh
25108acc2ae5SSuraj Jitindar Singh ret = kvm_vm_check_extension(s, KVM_CAP_PPC_GET_CPU_CHAR);
25118acc2ae5SSuraj Jitindar Singh if (!ret) {
25128acc2ae5SSuraj Jitindar Singh return;
25138acc2ae5SSuraj Jitindar Singh }
25148acc2ae5SSuraj Jitindar Singh ret = kvm_vm_ioctl(s, KVM_PPC_GET_CPU_CHAR, &c);
25158acc2ae5SSuraj Jitindar Singh if (ret < 0) {
25168acc2ae5SSuraj Jitindar Singh return;
25178acc2ae5SSuraj Jitindar Singh }
25188fea7044SSuraj Jitindar Singh
25198fea7044SSuraj Jitindar Singh cap_ppc_safe_cache = parse_cap_ppc_safe_cache(c);
25208fea7044SSuraj Jitindar Singh cap_ppc_safe_bounds_check = parse_cap_ppc_safe_bounds_check(c);
25218fea7044SSuraj Jitindar Singh cap_ppc_safe_indirect_branch = parse_cap_ppc_safe_indirect_branch(c);
25228ff43ee4SSuraj Jitindar Singh cap_ppc_count_cache_flush_assist =
25238ff43ee4SSuraj Jitindar Singh parse_cap_ppc_count_cache_flush_assist(c);
25248acc2ae5SSuraj Jitindar Singh }
25258acc2ae5SSuraj Jitindar Singh
kvmppc_get_cap_safe_cache(void)25268acc2ae5SSuraj Jitindar Singh int kvmppc_get_cap_safe_cache(void)
25278acc2ae5SSuraj Jitindar Singh {
25288acc2ae5SSuraj Jitindar Singh return cap_ppc_safe_cache;
25298acc2ae5SSuraj Jitindar Singh }
25308acc2ae5SSuraj Jitindar Singh
kvmppc_get_cap_safe_bounds_check(void)25318acc2ae5SSuraj Jitindar Singh int kvmppc_get_cap_safe_bounds_check(void)
25328acc2ae5SSuraj Jitindar Singh {
25338acc2ae5SSuraj Jitindar Singh return cap_ppc_safe_bounds_check;
25348acc2ae5SSuraj Jitindar Singh }
25358acc2ae5SSuraj Jitindar Singh
kvmppc_get_cap_safe_indirect_branch(void)25368acc2ae5SSuraj Jitindar Singh int kvmppc_get_cap_safe_indirect_branch(void)
25378acc2ae5SSuraj Jitindar Singh {
25388acc2ae5SSuraj Jitindar Singh return cap_ppc_safe_indirect_branch;
25398acc2ae5SSuraj Jitindar Singh }
25408acc2ae5SSuraj Jitindar Singh
kvmppc_get_cap_count_cache_flush_assist(void)25418ff43ee4SSuraj Jitindar Singh int kvmppc_get_cap_count_cache_flush_assist(void)
25428ff43ee4SSuraj Jitindar Singh {
25438ff43ee4SSuraj Jitindar Singh return cap_ppc_count_cache_flush_assist;
25448ff43ee4SSuraj Jitindar Singh }
25458ff43ee4SSuraj Jitindar Singh
kvmppc_has_cap_nested_kvm_hv(void)2546b9a477b7SSuraj Jitindar Singh bool kvmppc_has_cap_nested_kvm_hv(void)
2547b9a477b7SSuraj Jitindar Singh {
2548b9a477b7SSuraj Jitindar Singh return !!cap_ppc_nested_kvm_hv;
2549b9a477b7SSuraj Jitindar Singh }
2550b9a477b7SSuraj Jitindar Singh
kvmppc_set_cap_nested_kvm_hv(int enable)2551b9a477b7SSuraj Jitindar Singh int kvmppc_set_cap_nested_kvm_hv(int enable)
2552b9a477b7SSuraj Jitindar Singh {
2553b9a477b7SSuraj Jitindar Singh return kvm_vm_enable_cap(kvm_state, KVM_CAP_PPC_NESTED_HV, 0, enable);
2554b9a477b7SSuraj Jitindar Singh }
2555b9a477b7SSuraj Jitindar Singh
kvmppc_has_cap_spapr_vfio(void)25569ded780cSAlexey Kardashevskiy bool kvmppc_has_cap_spapr_vfio(void)
25579ded780cSAlexey Kardashevskiy {
25589ded780cSAlexey Kardashevskiy return cap_spapr_vfio;
25599ded780cSAlexey Kardashevskiy }
25609ded780cSAlexey Kardashevskiy
kvmppc_get_cap_large_decr(void)25617d050527SSuraj Jitindar Singh int kvmppc_get_cap_large_decr(void)
25627d050527SSuraj Jitindar Singh {
25637d050527SSuraj Jitindar Singh return cap_large_decr;
25647d050527SSuraj Jitindar Singh }
25657d050527SSuraj Jitindar Singh
kvmppc_enable_cap_large_decr(PowerPCCPU * cpu,int enable)25667d050527SSuraj Jitindar Singh int kvmppc_enable_cap_large_decr(PowerPCCPU *cpu, int enable)
25677d050527SSuraj Jitindar Singh {
25687d050527SSuraj Jitindar Singh CPUState *cs = CPU(cpu);
256959411579SDaniel Henrique Barboza uint64_t lpcr = 0;
25707d050527SSuraj Jitindar Singh
25717d050527SSuraj Jitindar Singh kvm_get_one_reg(cs, KVM_REG_PPC_LPCR_64, &lpcr);
25727d050527SSuraj Jitindar Singh /* Do we need to modify the LPCR? */
25737d050527SSuraj Jitindar Singh if (!!(lpcr & LPCR_LD) != !!enable) {
25747d050527SSuraj Jitindar Singh if (enable) {
25757d050527SSuraj Jitindar Singh lpcr |= LPCR_LD;
25767d050527SSuraj Jitindar Singh } else {
25777d050527SSuraj Jitindar Singh lpcr &= ~LPCR_LD;
25787d050527SSuraj Jitindar Singh }
25797d050527SSuraj Jitindar Singh kvm_set_one_reg(cs, KVM_REG_PPC_LPCR_64, &lpcr);
25807d050527SSuraj Jitindar Singh kvm_get_one_reg(cs, KVM_REG_PPC_LPCR_64, &lpcr);
25817d050527SSuraj Jitindar Singh
25827d050527SSuraj Jitindar Singh if (!!(lpcr & LPCR_LD) != !!enable) {
25837d050527SSuraj Jitindar Singh return -1;
25847d050527SSuraj Jitindar Singh }
25857d050527SSuraj Jitindar Singh }
25867d050527SSuraj Jitindar Singh
25877d050527SSuraj Jitindar Singh return 0;
25887d050527SSuraj Jitindar Singh }
25897d050527SSuraj Jitindar Singh
kvmppc_has_cap_rpt_invalidate(void)259082123b75SBharata B Rao int kvmppc_has_cap_rpt_invalidate(void)
259182123b75SBharata B Rao {
259282123b75SBharata B Rao return cap_rpt_invalidate;
259382123b75SBharata B Rao }
259482123b75SBharata B Rao
kvmppc_supports_ail_3(void)2595ccc5a4c5SNicholas Piggin bool kvmppc_supports_ail_3(void)
2596ccc5a4c5SNicholas Piggin {
2597ccc5a4c5SNicholas Piggin return cap_ail_mode_3;
2598ccc5a4c5SNicholas Piggin }
2599ccc5a4c5SNicholas Piggin
kvm_ppc_get_host_cpu_class(void)2600fcf5ef2aSThomas Huth PowerPCCPUClass *kvm_ppc_get_host_cpu_class(void)
2601fcf5ef2aSThomas Huth {
2602fcf5ef2aSThomas Huth uint32_t host_pvr = mfpvr();
2603fcf5ef2aSThomas Huth PowerPCCPUClass *pvr_pcc;
2604fcf5ef2aSThomas Huth
2605fcf5ef2aSThomas Huth pvr_pcc = ppc_cpu_class_by_pvr(host_pvr);
2606fcf5ef2aSThomas Huth if (pvr_pcc == NULL) {
2607fcf5ef2aSThomas Huth pvr_pcc = ppc_cpu_class_by_pvr_mask(host_pvr);
2608fcf5ef2aSThomas Huth }
2609fcf5ef2aSThomas Huth
2610fcf5ef2aSThomas Huth return pvr_pcc;
2611fcf5ef2aSThomas Huth }
2612fcf5ef2aSThomas Huth
pseries_machine_class_fixup(ObjectClass * oc,void * opaque)2613165dc3edSDavid Gibson static void pseries_machine_class_fixup(ObjectClass *oc, void *opaque)
2614165dc3edSDavid Gibson {
2615165dc3edSDavid Gibson MachineClass *mc = MACHINE_CLASS(oc);
2616165dc3edSDavid Gibson
2617165dc3edSDavid Gibson mc->default_cpu_type = TYPE_HOST_POWERPC_CPU;
2618165dc3edSDavid Gibson }
2619165dc3edSDavid Gibson
kvm_ppc_register_host_cpu_type(void)2620165dc3edSDavid Gibson static int kvm_ppc_register_host_cpu_type(void)
2621fcf5ef2aSThomas Huth {
2622fcf5ef2aSThomas Huth TypeInfo type_info = {
2623fcf5ef2aSThomas Huth .name = TYPE_HOST_POWERPC_CPU,
2624fcf5ef2aSThomas Huth .class_init = kvmppc_host_cpu_class_init,
2625fcf5ef2aSThomas Huth };
2626fcf5ef2aSThomas Huth PowerPCCPUClass *pvr_pcc;
262792e926e1SGreg Kurz ObjectClass *oc;
2628fcf5ef2aSThomas Huth DeviceClass *dc;
2629715d4b96SThomas Huth int i;
2630fcf5ef2aSThomas Huth
2631fcf5ef2aSThomas Huth pvr_pcc = kvm_ppc_get_host_cpu_class();
2632fcf5ef2aSThomas Huth if (pvr_pcc == NULL) {
2633fcf5ef2aSThomas Huth return -1;
2634fcf5ef2aSThomas Huth }
2635fcf5ef2aSThomas Huth type_info.parent = object_class_get_name(OBJECT_CLASS(pvr_pcc));
2636fcf5ef2aSThomas Huth type_register(&type_info);
26372e9c10ebSIgor Mammedov /* override TCG default cpu type with 'host' cpu model */
2638165dc3edSDavid Gibson object_class_foreach(pseries_machine_class_fixup, TYPE_SPAPR_MACHINE,
2639165dc3edSDavid Gibson false, NULL);
2640fcf5ef2aSThomas Huth
264192e926e1SGreg Kurz oc = object_class_by_name(type_info.name);
264292e926e1SGreg Kurz g_assert(oc);
264392e926e1SGreg Kurz
2644715d4b96SThomas Huth /*
2645715d4b96SThomas Huth * Update generic CPU family class alias (e.g. on a POWER8NVL host,
2646715d4b96SThomas Huth * we want "POWER8" to be a "family" alias that points to the current
2647715d4b96SThomas Huth * host CPU type, too)
2648715d4b96SThomas Huth */
2649715d4b96SThomas Huth dc = DEVICE_CLASS(ppc_cpu_get_family_class(pvr_pcc));
2650715d4b96SThomas Huth for (i = 0; ppc_cpu_aliases[i].alias != NULL; i++) {
2651c5354f54SIgor Mammedov if (strcasecmp(ppc_cpu_aliases[i].alias, dc->desc) == 0) {
2652715d4b96SThomas Huth char *suffix;
2653715d4b96SThomas Huth
2654715d4b96SThomas Huth ppc_cpu_aliases[i].model = g_strdup(object_class_get_name(oc));
2655c9137065SIgor Mammedov suffix = strstr(ppc_cpu_aliases[i].model, POWERPC_CPU_TYPE_SUFFIX);
2656715d4b96SThomas Huth if (suffix) {
2657715d4b96SThomas Huth *suffix = 0;
2658715d4b96SThomas Huth }
2659715d4b96SThomas Huth break;
2660715d4b96SThomas Huth }
2661715d4b96SThomas Huth }
2662715d4b96SThomas Huth
2663fcf5ef2aSThomas Huth return 0;
2664fcf5ef2aSThomas Huth }
2665fcf5ef2aSThomas Huth
kvmppc_define_rtas_kernel_token(uint32_t token,const char * function)2666fcf5ef2aSThomas Huth int kvmppc_define_rtas_kernel_token(uint32_t token, const char *function)
2667fcf5ef2aSThomas Huth {
2668fcf5ef2aSThomas Huth struct kvm_rtas_token_args args = {
2669fcf5ef2aSThomas Huth .token = token,
2670fcf5ef2aSThomas Huth };
2671fcf5ef2aSThomas Huth
2672fcf5ef2aSThomas Huth if (!kvm_check_extension(kvm_state, KVM_CAP_PPC_RTAS)) {
2673fcf5ef2aSThomas Huth return -ENOENT;
2674fcf5ef2aSThomas Huth }
2675fcf5ef2aSThomas Huth
26767701aeedSCédric Le Goater strncpy(args.name, function, sizeof(args.name) - 1);
2677fcf5ef2aSThomas Huth
2678fcf5ef2aSThomas Huth return kvm_vm_ioctl(kvm_state, KVM_PPC_RTAS_DEFINE_TOKEN, &args);
2679fcf5ef2aSThomas Huth }
2680fcf5ef2aSThomas Huth
kvmppc_get_htab_fd(bool write,uint64_t index,Error ** errp)268114b0d748SGreg Kurz int kvmppc_get_htab_fd(bool write, uint64_t index, Error **errp)
2682fcf5ef2aSThomas Huth {
2683fcf5ef2aSThomas Huth struct kvm_get_htab_fd s = {
2684fcf5ef2aSThomas Huth .flags = write ? KVM_GET_HTAB_WRITE : 0,
268514b0d748SGreg Kurz .start_index = index,
2686fcf5ef2aSThomas Huth };
268782be8e73SGreg Kurz int ret;
2688fcf5ef2aSThomas Huth
2689fcf5ef2aSThomas Huth if (!cap_htab_fd) {
269014b0d748SGreg Kurz error_setg(errp, "KVM version doesn't support %s the HPT",
269114b0d748SGreg Kurz write ? "writing" : "reading");
269282be8e73SGreg Kurz return -ENOTSUP;
2693fcf5ef2aSThomas Huth }
2694fcf5ef2aSThomas Huth
269582be8e73SGreg Kurz ret = kvm_vm_ioctl(kvm_state, KVM_PPC_GET_HTAB_FD, &s);
269682be8e73SGreg Kurz if (ret < 0) {
269714b0d748SGreg Kurz error_setg(errp, "Unable to open fd for %s HPT %s KVM: %s",
269814b0d748SGreg Kurz write ? "writing" : "reading", write ? "to" : "from",
269914b0d748SGreg Kurz strerror(errno));
270082be8e73SGreg Kurz return -errno;
270182be8e73SGreg Kurz }
270282be8e73SGreg Kurz
270382be8e73SGreg Kurz return ret;
2704fcf5ef2aSThomas Huth }
2705fcf5ef2aSThomas Huth
kvmppc_save_htab(QEMUFile * f,int fd,size_t bufsize,int64_t max_ns)2706fcf5ef2aSThomas Huth int kvmppc_save_htab(QEMUFile *f, int fd, size_t bufsize, int64_t max_ns)
2707fcf5ef2aSThomas Huth {
2708fcf5ef2aSThomas Huth int64_t starttime = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
2709aba594daSThomas Huth g_autofree uint8_t *buf = g_malloc(bufsize);
2710fcf5ef2aSThomas Huth ssize_t rc;
2711fcf5ef2aSThomas Huth
2712fcf5ef2aSThomas Huth do {
2713fcf5ef2aSThomas Huth rc = read(fd, buf, bufsize);
2714fcf5ef2aSThomas Huth if (rc < 0) {
2715fcf5ef2aSThomas Huth fprintf(stderr, "Error reading data from KVM HTAB fd: %s\n",
2716fcf5ef2aSThomas Huth strerror(errno));
2717fcf5ef2aSThomas Huth return rc;
2718fcf5ef2aSThomas Huth } else if (rc) {
2719fcf5ef2aSThomas Huth uint8_t *buffer = buf;
2720fcf5ef2aSThomas Huth ssize_t n = rc;
2721fcf5ef2aSThomas Huth while (n) {
2722fcf5ef2aSThomas Huth struct kvm_get_htab_header *head =
2723fcf5ef2aSThomas Huth (struct kvm_get_htab_header *) buffer;
2724fcf5ef2aSThomas Huth size_t chunksize = sizeof(*head) +
2725fcf5ef2aSThomas Huth HASH_PTE_SIZE_64 * head->n_valid;
2726fcf5ef2aSThomas Huth
2727fcf5ef2aSThomas Huth qemu_put_be32(f, head->index);
2728fcf5ef2aSThomas Huth qemu_put_be16(f, head->n_valid);
2729fcf5ef2aSThomas Huth qemu_put_be16(f, head->n_invalid);
2730fcf5ef2aSThomas Huth qemu_put_buffer(f, (void *)(head + 1),
2731fcf5ef2aSThomas Huth HASH_PTE_SIZE_64 * head->n_valid);
2732fcf5ef2aSThomas Huth
2733fcf5ef2aSThomas Huth buffer += chunksize;
2734fcf5ef2aSThomas Huth n -= chunksize;
2735fcf5ef2aSThomas Huth }
2736fcf5ef2aSThomas Huth }
2737fcf5ef2aSThomas Huth } while ((rc != 0)
2738c995e942SDavid Gibson && ((max_ns < 0) ||
2739c995e942SDavid Gibson ((qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - starttime) < max_ns)));
2740fcf5ef2aSThomas Huth
2741fcf5ef2aSThomas Huth return (rc == 0) ? 1 : 0;
2742fcf5ef2aSThomas Huth }
2743fcf5ef2aSThomas Huth
kvmppc_load_htab_chunk(QEMUFile * f,int fd,uint32_t index,uint16_t n_valid,uint16_t n_invalid,Error ** errp)2744fcf5ef2aSThomas Huth int kvmppc_load_htab_chunk(QEMUFile *f, int fd, uint32_t index,
27450a06e4d6SGreg Kurz uint16_t n_valid, uint16_t n_invalid, Error **errp)
2746fcf5ef2aSThomas Huth {
2747fcf5ef2aSThomas Huth struct kvm_get_htab_header *buf;
2748fcf5ef2aSThomas Huth size_t chunksize = sizeof(*buf) + n_valid * HASH_PTE_SIZE_64;
2749fcf5ef2aSThomas Huth ssize_t rc;
2750fcf5ef2aSThomas Huth
2751fcf5ef2aSThomas Huth buf = alloca(chunksize);
2752fcf5ef2aSThomas Huth buf->index = index;
2753fcf5ef2aSThomas Huth buf->n_valid = n_valid;
2754fcf5ef2aSThomas Huth buf->n_invalid = n_invalid;
2755fcf5ef2aSThomas Huth
2756fcf5ef2aSThomas Huth qemu_get_buffer(f, (void *)(buf + 1), HASH_PTE_SIZE_64 * n_valid);
2757fcf5ef2aSThomas Huth
2758fcf5ef2aSThomas Huth rc = write(fd, buf, chunksize);
2759fcf5ef2aSThomas Huth if (rc < 0) {
27600a06e4d6SGreg Kurz error_setg_errno(errp, errno, "Error writing the KVM hash table");
27610a06e4d6SGreg Kurz return -errno;
2762fcf5ef2aSThomas Huth }
2763fcf5ef2aSThomas Huth if (rc != chunksize) {
2764fcf5ef2aSThomas Huth /* We should never get a short write on a single chunk */
27650a06e4d6SGreg Kurz error_setg(errp, "Short write while restoring the KVM hash table");
27660a06e4d6SGreg Kurz return -ENOSPC;
2767fcf5ef2aSThomas Huth }
2768fcf5ef2aSThomas Huth return 0;
2769fcf5ef2aSThomas Huth }
2770fcf5ef2aSThomas Huth
kvm_arch_stop_on_emulation_error(CPUState * cpu)2771fcf5ef2aSThomas Huth bool kvm_arch_stop_on_emulation_error(CPUState *cpu)
2772fcf5ef2aSThomas Huth {
2773fcf5ef2aSThomas Huth return true;
2774fcf5ef2aSThomas Huth }
2775fcf5ef2aSThomas Huth
kvm_arch_init_irq_routing(KVMState * s)2776fcf5ef2aSThomas Huth void kvm_arch_init_irq_routing(KVMState *s)
2777fcf5ef2aSThomas Huth {
2778fcf5ef2aSThomas Huth }
2779fcf5ef2aSThomas Huth
kvmppc_read_hptes(ppc_hash_pte64_t * hptes,hwaddr ptex,int n)27801ad9f0a4SDavid Gibson void kvmppc_read_hptes(ppc_hash_pte64_t *hptes, hwaddr ptex, int n)
27811ad9f0a4SDavid Gibson {
27821ad9f0a4SDavid Gibson int fd, rc;
27831ad9f0a4SDavid Gibson int i;
2784fcf5ef2aSThomas Huth
278514b0d748SGreg Kurz fd = kvmppc_get_htab_fd(false, ptex, &error_abort);
27861ad9f0a4SDavid Gibson
27871ad9f0a4SDavid Gibson i = 0;
27881ad9f0a4SDavid Gibson while (i < n) {
27891ad9f0a4SDavid Gibson struct kvm_get_htab_header *hdr;
27901ad9f0a4SDavid Gibson int m = n < HPTES_PER_GROUP ? n : HPTES_PER_GROUP;
279197c2fc50SThomas Huth char buf[sizeof(*hdr) + HPTES_PER_GROUP * HASH_PTE_SIZE_64];
27921ad9f0a4SDavid Gibson
279397c2fc50SThomas Huth rc = read(fd, buf, sizeof(*hdr) + m * HASH_PTE_SIZE_64);
27941ad9f0a4SDavid Gibson if (rc < 0) {
27951ad9f0a4SDavid Gibson hw_error("kvmppc_read_hptes: Unable to read HPTEs");
27961ad9f0a4SDavid Gibson }
27971ad9f0a4SDavid Gibson
27981ad9f0a4SDavid Gibson hdr = (struct kvm_get_htab_header *)buf;
27991ad9f0a4SDavid Gibson while ((i < n) && ((char *)hdr < (buf + rc))) {
2800a36593e1SAlexey Kardashevskiy int invalid = hdr->n_invalid, valid = hdr->n_valid;
28011ad9f0a4SDavid Gibson
28021ad9f0a4SDavid Gibson if (hdr->index != (ptex + i)) {
28031ad9f0a4SDavid Gibson hw_error("kvmppc_read_hptes: Unexpected HPTE index %"PRIu32
28041ad9f0a4SDavid Gibson " != (%"HWADDR_PRIu" + %d", hdr->index, ptex, i);
28051ad9f0a4SDavid Gibson }
28061ad9f0a4SDavid Gibson
2807a36593e1SAlexey Kardashevskiy if (n - i < valid) {
2808a36593e1SAlexey Kardashevskiy valid = n - i;
2809a36593e1SAlexey Kardashevskiy }
2810a36593e1SAlexey Kardashevskiy memcpy(hptes + i, hdr + 1, HASH_PTE_SIZE_64 * valid);
2811a36593e1SAlexey Kardashevskiy i += valid;
28121ad9f0a4SDavid Gibson
28131ad9f0a4SDavid Gibson if ((n - i) < invalid) {
28141ad9f0a4SDavid Gibson invalid = n - i;
28151ad9f0a4SDavid Gibson }
28161ad9f0a4SDavid Gibson memset(hptes + i, 0, invalid * HASH_PTE_SIZE_64);
2817a36593e1SAlexey Kardashevskiy i += invalid;
28181ad9f0a4SDavid Gibson
28191ad9f0a4SDavid Gibson hdr = (struct kvm_get_htab_header *)
28201ad9f0a4SDavid Gibson ((char *)(hdr + 1) + HASH_PTE_SIZE_64 * hdr->n_valid);
28211ad9f0a4SDavid Gibson }
28221ad9f0a4SDavid Gibson }
28231ad9f0a4SDavid Gibson
28241ad9f0a4SDavid Gibson close(fd);
28251ad9f0a4SDavid Gibson }
28261ad9f0a4SDavid Gibson
kvmppc_write_hpte(hwaddr ptex,uint64_t pte0,uint64_t pte1)28271ad9f0a4SDavid Gibson void kvmppc_write_hpte(hwaddr ptex, uint64_t pte0, uint64_t pte1)
2828fcf5ef2aSThomas Huth {
28291ad9f0a4SDavid Gibson int fd, rc;
28301ad9f0a4SDavid Gibson struct {
28311ad9f0a4SDavid Gibson struct kvm_get_htab_header hdr;
28321ad9f0a4SDavid Gibson uint64_t pte0;
28331ad9f0a4SDavid Gibson uint64_t pte1;
28341ad9f0a4SDavid Gibson } buf;
2835fcf5ef2aSThomas Huth
283614b0d748SGreg Kurz fd = kvmppc_get_htab_fd(true, 0 /* Ignored */, &error_abort);
2837fcf5ef2aSThomas Huth
28381ad9f0a4SDavid Gibson buf.hdr.n_valid = 1;
28391ad9f0a4SDavid Gibson buf.hdr.n_invalid = 0;
28401ad9f0a4SDavid Gibson buf.hdr.index = ptex;
28411ad9f0a4SDavid Gibson buf.pte0 = cpu_to_be64(pte0);
28421ad9f0a4SDavid Gibson buf.pte1 = cpu_to_be64(pte1);
28431ad9f0a4SDavid Gibson
28441ad9f0a4SDavid Gibson rc = write(fd, &buf, sizeof(buf));
28451ad9f0a4SDavid Gibson if (rc != sizeof(buf)) {
28461ad9f0a4SDavid Gibson hw_error("kvmppc_write_hpte: Unable to update KVM HPT");
2847fcf5ef2aSThomas Huth }
28481ad9f0a4SDavid Gibson close(fd);
2849fcf5ef2aSThomas Huth }
2850fcf5ef2aSThomas Huth
kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry * route,uint64_t address,uint32_t data,PCIDevice * dev)2851fcf5ef2aSThomas Huth int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
2852fcf5ef2aSThomas Huth uint64_t address, uint32_t data, PCIDevice *dev)
2853fcf5ef2aSThomas Huth {
2854fcf5ef2aSThomas Huth return 0;
2855fcf5ef2aSThomas Huth }
2856fcf5ef2aSThomas Huth
kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry * route,int vector,PCIDevice * dev)2857fcf5ef2aSThomas Huth int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route,
2858fcf5ef2aSThomas Huth int vector, PCIDevice *dev)
2859fcf5ef2aSThomas Huth {
2860fcf5ef2aSThomas Huth return 0;
2861fcf5ef2aSThomas Huth }
2862fcf5ef2aSThomas Huth
kvm_arch_release_virq_post(int virq)2863fcf5ef2aSThomas Huth int kvm_arch_release_virq_post(int virq)
2864fcf5ef2aSThomas Huth {
2865fcf5ef2aSThomas Huth return 0;
2866fcf5ef2aSThomas Huth }
2867fcf5ef2aSThomas Huth
kvm_arch_msi_data_to_gsi(uint32_t data)2868fcf5ef2aSThomas Huth int kvm_arch_msi_data_to_gsi(uint32_t data)
2869fcf5ef2aSThomas Huth {
2870fcf5ef2aSThomas Huth return data & 0xffff;
2871fcf5ef2aSThomas Huth }
2872fcf5ef2aSThomas Huth
2873566abdb4SPaolo Bonzini #if defined(CONFIG_PSERIES)
kvm_handle_nmi(PowerPCCPU * cpu,struct kvm_run * run)28749ac703acSAravinda Prasad int kvm_handle_nmi(PowerPCCPU *cpu, struct kvm_run *run)
28759ac703acSAravinda Prasad {
2876211a7784SGanesh Goudar uint16_t flags = run->flags & KVM_RUN_PPC_NMI_DISP_MASK;
287781fe70e4SAravinda Prasad
28789ac703acSAravinda Prasad cpu_synchronize_state(CPU(cpu));
28799ac703acSAravinda Prasad
2880211a7784SGanesh Goudar spapr_mce_req_event(cpu, flags == KVM_RUN_PPC_NMI_DISP_FULLY_RECOV);
28819ac703acSAravinda Prasad
28829ac703acSAravinda Prasad return 0;
28839ac703acSAravinda Prasad }
28849ac703acSAravinda Prasad #endif
28859ac703acSAravinda Prasad
kvmppc_enable_hwrng(void)2886fcf5ef2aSThomas Huth int kvmppc_enable_hwrng(void)
2887fcf5ef2aSThomas Huth {
2888fcf5ef2aSThomas Huth if (!kvm_enabled() || !kvm_check_extension(kvm_state, KVM_CAP_PPC_HWRNG)) {
2889fcf5ef2aSThomas Huth return -1;
2890fcf5ef2aSThomas Huth }
2891fcf5ef2aSThomas Huth
2892fcf5ef2aSThomas Huth return kvmppc_enable_hcall(kvm_state, H_RANDOM);
2893fcf5ef2aSThomas Huth }
289430f4b05bSDavid Gibson
kvmppc_check_papr_resize_hpt(Error ** errp)289530f4b05bSDavid Gibson void kvmppc_check_papr_resize_hpt(Error **errp)
289630f4b05bSDavid Gibson {
289730f4b05bSDavid Gibson if (!kvm_enabled()) {
2898b55d295eSDavid Gibson return; /* No KVM, we're good */
2899b55d295eSDavid Gibson }
2900b55d295eSDavid Gibson
2901b55d295eSDavid Gibson if (cap_resize_hpt) {
2902b55d295eSDavid Gibson return; /* Kernel has explicit support, we're good */
2903b55d295eSDavid Gibson }
2904b55d295eSDavid Gibson
2905b55d295eSDavid Gibson /* Otherwise fallback on looking for PR KVM */
2906b55d295eSDavid Gibson if (kvmppc_is_pr(kvm_state)) {
290730f4b05bSDavid Gibson return;
290830f4b05bSDavid Gibson }
290930f4b05bSDavid Gibson
291030f4b05bSDavid Gibson error_setg(errp,
291130f4b05bSDavid Gibson "Hash page table resizing not available with this KVM version");
291230f4b05bSDavid Gibson }
2913b55d295eSDavid Gibson
kvmppc_resize_hpt_prepare(PowerPCCPU * cpu,target_ulong flags,int shift)2914b55d295eSDavid Gibson int kvmppc_resize_hpt_prepare(PowerPCCPU *cpu, target_ulong flags, int shift)
2915b55d295eSDavid Gibson {
2916b55d295eSDavid Gibson CPUState *cs = CPU(cpu);
2917b55d295eSDavid Gibson struct kvm_ppc_resize_hpt rhpt = {
2918b55d295eSDavid Gibson .flags = flags,
2919b55d295eSDavid Gibson .shift = shift,
2920b55d295eSDavid Gibson };
2921b55d295eSDavid Gibson
2922b55d295eSDavid Gibson if (!cap_resize_hpt) {
2923b55d295eSDavid Gibson return -ENOSYS;
2924b55d295eSDavid Gibson }
2925b55d295eSDavid Gibson
2926b55d295eSDavid Gibson return kvm_vm_ioctl(cs->kvm_state, KVM_PPC_RESIZE_HPT_PREPARE, &rhpt);
2927b55d295eSDavid Gibson }
2928b55d295eSDavid Gibson
kvmppc_resize_hpt_commit(PowerPCCPU * cpu,target_ulong flags,int shift)2929b55d295eSDavid Gibson int kvmppc_resize_hpt_commit(PowerPCCPU *cpu, target_ulong flags, int shift)
2930b55d295eSDavid Gibson {
2931b55d295eSDavid Gibson CPUState *cs = CPU(cpu);
2932b55d295eSDavid Gibson struct kvm_ppc_resize_hpt rhpt = {
2933b55d295eSDavid Gibson .flags = flags,
2934b55d295eSDavid Gibson .shift = shift,
2935b55d295eSDavid Gibson };
2936b55d295eSDavid Gibson
2937b55d295eSDavid Gibson if (!cap_resize_hpt) {
2938b55d295eSDavid Gibson return -ENOSYS;
2939b55d295eSDavid Gibson }
2940b55d295eSDavid Gibson
2941b55d295eSDavid Gibson return kvm_vm_ioctl(cs->kvm_state, KVM_PPC_RESIZE_HPT_COMMIT, &rhpt);
2942b55d295eSDavid Gibson }
2943b55d295eSDavid Gibson
2944c363a37aSDaniel Henrique Barboza /*
2945c363a37aSDaniel Henrique Barboza * This is a helper function to detect a post migration scenario
2946c363a37aSDaniel Henrique Barboza * in which a guest, running as KVM-HV, freezes in cpu_post_load because
2947c363a37aSDaniel Henrique Barboza * the guest kernel can't handle a PVR value other than the actual host
2948c363a37aSDaniel Henrique Barboza * PVR in KVM_SET_SREGS, even if pvr_match() returns true.
2949c363a37aSDaniel Henrique Barboza *
2950c363a37aSDaniel Henrique Barboza * If we don't have cap_ppc_pvr_compat and we're not running in PR
2951c363a37aSDaniel Henrique Barboza * (so, we're HV), return true. The workaround itself is done in
2952c363a37aSDaniel Henrique Barboza * cpu_post_load.
2953c363a37aSDaniel Henrique Barboza *
2954c363a37aSDaniel Henrique Barboza * The order here is important: we'll only check for KVM PR as a
2955c363a37aSDaniel Henrique Barboza * fallback if the guest kernel can't handle the situation itself.
2956c363a37aSDaniel Henrique Barboza * We need to avoid as much as possible querying the running KVM type
2957c363a37aSDaniel Henrique Barboza * in QEMU level.
2958c363a37aSDaniel Henrique Barboza */
kvmppc_pvr_workaround_required(PowerPCCPU * cpu)2959c363a37aSDaniel Henrique Barboza bool kvmppc_pvr_workaround_required(PowerPCCPU *cpu)
2960c363a37aSDaniel Henrique Barboza {
2961c363a37aSDaniel Henrique Barboza CPUState *cs = CPU(cpu);
2962c363a37aSDaniel Henrique Barboza
2963c363a37aSDaniel Henrique Barboza if (!kvm_enabled()) {
2964c363a37aSDaniel Henrique Barboza return false;
2965c363a37aSDaniel Henrique Barboza }
2966c363a37aSDaniel Henrique Barboza
2967c363a37aSDaniel Henrique Barboza if (cap_ppc_pvr_compat) {
2968c363a37aSDaniel Henrique Barboza return false;
2969c363a37aSDaniel Henrique Barboza }
2970c363a37aSDaniel Henrique Barboza
2971c363a37aSDaniel Henrique Barboza return !kvmppc_is_pr(cs->kvm_state);
2972c363a37aSDaniel Henrique Barboza }
2973a84f7179SNikunj A Dadhania
kvmppc_set_reg_ppc_online(PowerPCCPU * cpu,unsigned int online)2974a84f7179SNikunj A Dadhania void kvmppc_set_reg_ppc_online(PowerPCCPU *cpu, unsigned int online)
2975a84f7179SNikunj A Dadhania {
2976a84f7179SNikunj A Dadhania CPUState *cs = CPU(cpu);
2977a84f7179SNikunj A Dadhania
2978a84f7179SNikunj A Dadhania if (kvm_enabled()) {
2979a84f7179SNikunj A Dadhania kvm_set_one_reg(cs, KVM_REG_PPC_ONLINE, &online);
2980a84f7179SNikunj A Dadhania }
2981a84f7179SNikunj A Dadhania }
29829723295aSGreg Kurz
kvmppc_set_reg_tb_offset(PowerPCCPU * cpu,int64_t tb_offset)29839723295aSGreg Kurz void kvmppc_set_reg_tb_offset(PowerPCCPU *cpu, int64_t tb_offset)
29849723295aSGreg Kurz {
29859723295aSGreg Kurz CPUState *cs = CPU(cpu);
29869723295aSGreg Kurz
29879723295aSGreg Kurz if (kvm_enabled()) {
29889723295aSGreg Kurz kvm_set_one_reg(cs, KVM_REG_PPC_TB_OFFSET, &tb_offset);
29899723295aSGreg Kurz }
29909723295aSGreg Kurz }
299192a5199bSTom Lendacky
kvm_arch_accel_class_init(ObjectClass * oc)29923dba0a33SPaolo Bonzini void kvm_arch_accel_class_init(ObjectClass *oc)
29933dba0a33SPaolo Bonzini {
29943dba0a33SPaolo Bonzini }
2995cfb52d07SHarsh Prateek Bora
kvm_cpu_accel_class_init(ObjectClass * oc,void * data)2996cfb52d07SHarsh Prateek Bora static void kvm_cpu_accel_class_init(ObjectClass *oc, void *data)
2997cfb52d07SHarsh Prateek Bora {
2998cfb52d07SHarsh Prateek Bora AccelCPUClass *acc = ACCEL_CPU_CLASS(oc);
2999cfb52d07SHarsh Prateek Bora
3000cfb52d07SHarsh Prateek Bora acc->cpu_target_realize = kvmppc_cpu_realize;
3001cfb52d07SHarsh Prateek Bora }
3002cfb52d07SHarsh Prateek Bora
3003cfb52d07SHarsh Prateek Bora static const TypeInfo kvm_cpu_accel_type_info = {
3004cfb52d07SHarsh Prateek Bora .name = ACCEL_CPU_NAME("kvm"),
3005cfb52d07SHarsh Prateek Bora
3006cfb52d07SHarsh Prateek Bora .parent = TYPE_ACCEL_CPU,
3007cfb52d07SHarsh Prateek Bora .class_init = kvm_cpu_accel_class_init,
3008cfb52d07SHarsh Prateek Bora .abstract = true,
3009cfb52d07SHarsh Prateek Bora };
kvm_cpu_accel_register_types(void)3010cfb52d07SHarsh Prateek Bora static void kvm_cpu_accel_register_types(void)
3011cfb52d07SHarsh Prateek Bora {
3012cfb52d07SHarsh Prateek Bora type_register_static(&kvm_cpu_accel_type_info);
3013cfb52d07SHarsh Prateek Bora }
3014cfb52d07SHarsh Prateek Bora type_init(kvm_cpu_accel_register_types);
3015