1d2912cb1SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2bc8080cbSHollis Blanchard /*
34cd35f67SScott Wood * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All rights reserved.
4bc8080cbSHollis Blanchard *
5bc8080cbSHollis Blanchard * Author: Yu Liu, <yu.liu@freescale.com>
6bc8080cbSHollis Blanchard *
7bc8080cbSHollis Blanchard * Description:
8bc8080cbSHollis Blanchard * This file is derived from arch/powerpc/kvm/44x.c,
9bc8080cbSHollis Blanchard * by Hollis Blanchard <hollisb@us.ibm.com>.
10bc8080cbSHollis Blanchard */
11bc8080cbSHollis Blanchard
12bc8080cbSHollis Blanchard #include <linux/kvm_host.h>
135a0e3ad6STejun Heo #include <linux/slab.h>
14bc8080cbSHollis Blanchard #include <linux/err.h>
15fae9dbb4SScott Wood #include <linux/export.h>
16398a76c6SAlexander Graf #include <linux/module.h>
17398a76c6SAlexander Graf #include <linux/miscdevice.h>
18bc8080cbSHollis Blanchard
19bc8080cbSHollis Blanchard #include <asm/reg.h>
20bc8080cbSHollis Blanchard #include <asm/cputable.h>
21bc8080cbSHollis Blanchard #include <asm/kvm_ppc.h>
22bc8080cbSHollis Blanchard
238fdd21a2SScott Wood #include "../mm/mmu_decl.h"
24bb3a8a17SHollis Blanchard #include "booke.h"
2529a5a6f9SScott Wood #include "e500.h"
26bc8080cbSHollis Blanchard
278fdd21a2SScott Wood struct id {
288fdd21a2SScott Wood unsigned long val;
298fdd21a2SScott Wood struct id **pentry;
308fdd21a2SScott Wood };
318fdd21a2SScott Wood
328fdd21a2SScott Wood #define NUM_TIDS 256
338fdd21a2SScott Wood
348fdd21a2SScott Wood /*
358fdd21a2SScott Wood * This table provide mappings from:
368fdd21a2SScott Wood * (guestAS,guestTID,guestPR) --> ID of physical cpu
378fdd21a2SScott Wood * guestAS [0..1]
388fdd21a2SScott Wood * guestTID [0..255]
398fdd21a2SScott Wood * guestPR [0..1]
408fdd21a2SScott Wood * ID [1..255]
418fdd21a2SScott Wood * Each vcpu keeps one vcpu_id_table.
428fdd21a2SScott Wood */
438fdd21a2SScott Wood struct vcpu_id_table {
448fdd21a2SScott Wood struct id id[2][NUM_TIDS][2];
458fdd21a2SScott Wood };
468fdd21a2SScott Wood
478fdd21a2SScott Wood /*
488fdd21a2SScott Wood * This table provide reversed mappings of vcpu_id_table:
498fdd21a2SScott Wood * ID --> address of vcpu_id_table item.
508fdd21a2SScott Wood * Each physical core has one pcpu_id_table.
518fdd21a2SScott Wood */
528fdd21a2SScott Wood struct pcpu_id_table {
538fdd21a2SScott Wood struct id *entry[NUM_TIDS];
548fdd21a2SScott Wood };
558fdd21a2SScott Wood
568fdd21a2SScott Wood static DEFINE_PER_CPU(struct pcpu_id_table, pcpu_sids);
578fdd21a2SScott Wood
588fdd21a2SScott Wood /* This variable keeps last used shadow ID on local core.
598fdd21a2SScott Wood * The valid range of shadow ID is [1..255] */
608fdd21a2SScott Wood static DEFINE_PER_CPU(unsigned long, pcpu_last_used_sid);
618fdd21a2SScott Wood
628fdd21a2SScott Wood /*
638fdd21a2SScott Wood * Allocate a free shadow id and setup a valid sid mapping in given entry.
648fdd21a2SScott Wood * A mapping is only valid when vcpu_id_table and pcpu_id_table are match.
658fdd21a2SScott Wood *
668fdd21a2SScott Wood * The caller must have preemption disabled, and keep it that way until
678fdd21a2SScott Wood * it has finished with the returned shadow id (either written into the
688fdd21a2SScott Wood * TLB or arch.shadow_pid, or discarded).
698fdd21a2SScott Wood */
local_sid_setup_one(struct id * entry)708fdd21a2SScott Wood static inline int local_sid_setup_one(struct id *entry)
718fdd21a2SScott Wood {
728fdd21a2SScott Wood unsigned long sid;
738fdd21a2SScott Wood int ret = -1;
748fdd21a2SScott Wood
7569111bacSChristoph Lameter sid = __this_cpu_inc_return(pcpu_last_used_sid);
768fdd21a2SScott Wood if (sid < NUM_TIDS) {
7791ed9e8aSAlexander Graf __this_cpu_write(pcpu_sids.entry[sid], entry);
788fdd21a2SScott Wood entry->val = sid;
7969111bacSChristoph Lameter entry->pentry = this_cpu_ptr(&pcpu_sids.entry[sid]);
808fdd21a2SScott Wood ret = sid;
818fdd21a2SScott Wood }
828fdd21a2SScott Wood
838fdd21a2SScott Wood /*
848fdd21a2SScott Wood * If sid == NUM_TIDS, we've run out of sids. We return -1, and
858fdd21a2SScott Wood * the caller will invalidate everything and start over.
868fdd21a2SScott Wood *
878fdd21a2SScott Wood * sid > NUM_TIDS indicates a race, which we disable preemption to
888fdd21a2SScott Wood * avoid.
898fdd21a2SScott Wood */
908fdd21a2SScott Wood WARN_ON(sid > NUM_TIDS);
918fdd21a2SScott Wood
928fdd21a2SScott Wood return ret;
938fdd21a2SScott Wood }
948fdd21a2SScott Wood
958fdd21a2SScott Wood /*
968fdd21a2SScott Wood * Check if given entry contain a valid shadow id mapping.
978fdd21a2SScott Wood * An ID mapping is considered valid only if
988fdd21a2SScott Wood * both vcpu and pcpu know this mapping.
998fdd21a2SScott Wood *
1008fdd21a2SScott Wood * The caller must have preemption disabled, and keep it that way until
1018fdd21a2SScott Wood * it has finished with the returned shadow id (either written into the
1028fdd21a2SScott Wood * TLB or arch.shadow_pid, or discarded).
1038fdd21a2SScott Wood */
local_sid_lookup(struct id * entry)1048fdd21a2SScott Wood static inline int local_sid_lookup(struct id *entry)
1058fdd21a2SScott Wood {
1068fdd21a2SScott Wood if (entry && entry->val != 0 &&
10769111bacSChristoph Lameter __this_cpu_read(pcpu_sids.entry[entry->val]) == entry &&
10869111bacSChristoph Lameter entry->pentry == this_cpu_ptr(&pcpu_sids.entry[entry->val]))
1098fdd21a2SScott Wood return entry->val;
1108fdd21a2SScott Wood return -1;
1118fdd21a2SScott Wood }
1128fdd21a2SScott Wood
1138fdd21a2SScott Wood /* Invalidate all id mappings on local core -- call with preempt disabled */
local_sid_destroy_all(void)1148fdd21a2SScott Wood static inline void local_sid_destroy_all(void)
1158fdd21a2SScott Wood {
11669111bacSChristoph Lameter __this_cpu_write(pcpu_last_used_sid, 0);
11769111bacSChristoph Lameter memset(this_cpu_ptr(&pcpu_sids), 0, sizeof(pcpu_sids));
1188fdd21a2SScott Wood }
1198fdd21a2SScott Wood
kvmppc_e500_id_table_alloc(struct kvmppc_vcpu_e500 * vcpu_e500)1208fdd21a2SScott Wood static void *kvmppc_e500_id_table_alloc(struct kvmppc_vcpu_e500 *vcpu_e500)
1218fdd21a2SScott Wood {
1228fdd21a2SScott Wood vcpu_e500->idt = kzalloc(sizeof(struct vcpu_id_table), GFP_KERNEL);
1238fdd21a2SScott Wood return vcpu_e500->idt;
1248fdd21a2SScott Wood }
1258fdd21a2SScott Wood
kvmppc_e500_id_table_free(struct kvmppc_vcpu_e500 * vcpu_e500)1268fdd21a2SScott Wood static void kvmppc_e500_id_table_free(struct kvmppc_vcpu_e500 *vcpu_e500)
1278fdd21a2SScott Wood {
1288fdd21a2SScott Wood kfree(vcpu_e500->idt);
1298fdd21a2SScott Wood vcpu_e500->idt = NULL;
1308fdd21a2SScott Wood }
1318fdd21a2SScott Wood
1328fdd21a2SScott Wood /* Map guest pid to shadow.
1338fdd21a2SScott Wood * We use PID to keep shadow of current guest non-zero PID,
1348fdd21a2SScott Wood * and use PID1 to keep shadow of guest zero PID.
1358fdd21a2SScott Wood * So that guest tlbe with TID=0 can be accessed at any time */
kvmppc_e500_recalc_shadow_pid(struct kvmppc_vcpu_e500 * vcpu_e500)1368fdd21a2SScott Wood static void kvmppc_e500_recalc_shadow_pid(struct kvmppc_vcpu_e500 *vcpu_e500)
1378fdd21a2SScott Wood {
1388fdd21a2SScott Wood preempt_disable();
1398fdd21a2SScott Wood vcpu_e500->vcpu.arch.shadow_pid = kvmppc_e500_get_sid(vcpu_e500,
1408fdd21a2SScott Wood get_cur_as(&vcpu_e500->vcpu),
1418fdd21a2SScott Wood get_cur_pid(&vcpu_e500->vcpu),
1428fdd21a2SScott Wood get_cur_pr(&vcpu_e500->vcpu), 1);
1438fdd21a2SScott Wood vcpu_e500->vcpu.arch.shadow_pid1 = kvmppc_e500_get_sid(vcpu_e500,
1448fdd21a2SScott Wood get_cur_as(&vcpu_e500->vcpu), 0,
1458fdd21a2SScott Wood get_cur_pr(&vcpu_e500->vcpu), 1);
1468fdd21a2SScott Wood preempt_enable();
1478fdd21a2SScott Wood }
1488fdd21a2SScott Wood
1498fdd21a2SScott Wood /* Invalidate all mappings on vcpu */
kvmppc_e500_id_table_reset_all(struct kvmppc_vcpu_e500 * vcpu_e500)1508fdd21a2SScott Wood static void kvmppc_e500_id_table_reset_all(struct kvmppc_vcpu_e500 *vcpu_e500)
1518fdd21a2SScott Wood {
1528fdd21a2SScott Wood memset(vcpu_e500->idt, 0, sizeof(struct vcpu_id_table));
1538fdd21a2SScott Wood
1548fdd21a2SScott Wood /* Update shadow pid when mappings are changed */
1558fdd21a2SScott Wood kvmppc_e500_recalc_shadow_pid(vcpu_e500);
1568fdd21a2SScott Wood }
1578fdd21a2SScott Wood
1588fdd21a2SScott Wood /* Invalidate one ID mapping on vcpu */
kvmppc_e500_id_table_reset_one(struct kvmppc_vcpu_e500 * vcpu_e500,int as,int pid,int pr)1598fdd21a2SScott Wood static inline void kvmppc_e500_id_table_reset_one(
1608fdd21a2SScott Wood struct kvmppc_vcpu_e500 *vcpu_e500,
1618fdd21a2SScott Wood int as, int pid, int pr)
1628fdd21a2SScott Wood {
1638fdd21a2SScott Wood struct vcpu_id_table *idt = vcpu_e500->idt;
1648fdd21a2SScott Wood
1658fdd21a2SScott Wood BUG_ON(as >= 2);
1668fdd21a2SScott Wood BUG_ON(pid >= NUM_TIDS);
1678fdd21a2SScott Wood BUG_ON(pr >= 2);
1688fdd21a2SScott Wood
1698fdd21a2SScott Wood idt->id[as][pid][pr].val = 0;
1708fdd21a2SScott Wood idt->id[as][pid][pr].pentry = NULL;
1718fdd21a2SScott Wood
1728fdd21a2SScott Wood /* Update shadow pid when mappings are changed */
1738fdd21a2SScott Wood kvmppc_e500_recalc_shadow_pid(vcpu_e500);
1748fdd21a2SScott Wood }
1758fdd21a2SScott Wood
1768fdd21a2SScott Wood /*
1778fdd21a2SScott Wood * Map guest (vcpu,AS,ID,PR) to physical core shadow id.
1788fdd21a2SScott Wood * This function first lookup if a valid mapping exists,
1798fdd21a2SScott Wood * if not, then creates a new one.
1808fdd21a2SScott Wood *
1818fdd21a2SScott Wood * The caller must have preemption disabled, and keep it that way until
1828fdd21a2SScott Wood * it has finished with the returned shadow id (either written into the
1838fdd21a2SScott Wood * TLB or arch.shadow_pid, or discarded).
1848fdd21a2SScott Wood */
kvmppc_e500_get_sid(struct kvmppc_vcpu_e500 * vcpu_e500,unsigned int as,unsigned int gid,unsigned int pr,int avoid_recursion)1858fdd21a2SScott Wood unsigned int kvmppc_e500_get_sid(struct kvmppc_vcpu_e500 *vcpu_e500,
1868fdd21a2SScott Wood unsigned int as, unsigned int gid,
1878fdd21a2SScott Wood unsigned int pr, int avoid_recursion)
1888fdd21a2SScott Wood {
1898fdd21a2SScott Wood struct vcpu_id_table *idt = vcpu_e500->idt;
1908fdd21a2SScott Wood int sid;
1918fdd21a2SScott Wood
1928fdd21a2SScott Wood BUG_ON(as >= 2);
1938fdd21a2SScott Wood BUG_ON(gid >= NUM_TIDS);
1948fdd21a2SScott Wood BUG_ON(pr >= 2);
1958fdd21a2SScott Wood
1968fdd21a2SScott Wood sid = local_sid_lookup(&idt->id[as][gid][pr]);
1978fdd21a2SScott Wood
1988fdd21a2SScott Wood while (sid <= 0) {
1998fdd21a2SScott Wood /* No mapping yet */
2008fdd21a2SScott Wood sid = local_sid_setup_one(&idt->id[as][gid][pr]);
2018fdd21a2SScott Wood if (sid <= 0) {
2028fdd21a2SScott Wood _tlbil_all();
2038fdd21a2SScott Wood local_sid_destroy_all();
2048fdd21a2SScott Wood }
2058fdd21a2SScott Wood
2068fdd21a2SScott Wood /* Update shadow pid when mappings are changed */
2078fdd21a2SScott Wood if (!avoid_recursion)
2088fdd21a2SScott Wood kvmppc_e500_recalc_shadow_pid(vcpu_e500);
2098fdd21a2SScott Wood }
2108fdd21a2SScott Wood
2118fdd21a2SScott Wood return sid;
2128fdd21a2SScott Wood }
2138fdd21a2SScott Wood
kvmppc_e500_get_tlb_stid(struct kvm_vcpu * vcpu,struct kvm_book3e_206_tlb_entry * gtlbe)2148fdd21a2SScott Wood unsigned int kvmppc_e500_get_tlb_stid(struct kvm_vcpu *vcpu,
2158fdd21a2SScott Wood struct kvm_book3e_206_tlb_entry *gtlbe)
2168fdd21a2SScott Wood {
2178fdd21a2SScott Wood return kvmppc_e500_get_sid(to_e500(vcpu), get_tlb_ts(gtlbe),
2188fdd21a2SScott Wood get_tlb_tid(gtlbe), get_cur_pr(vcpu), 0);
2198fdd21a2SScott Wood }
2208fdd21a2SScott Wood
kvmppc_set_pid(struct kvm_vcpu * vcpu,u32 pid)2218fdd21a2SScott Wood void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid)
2228fdd21a2SScott Wood {
2238fdd21a2SScott Wood struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
2248fdd21a2SScott Wood
2258fdd21a2SScott Wood if (vcpu->arch.pid != pid) {
2268fdd21a2SScott Wood vcpu_e500->pid[0] = vcpu->arch.pid = pid;
2278fdd21a2SScott Wood kvmppc_e500_recalc_shadow_pid(vcpu_e500);
2288fdd21a2SScott Wood }
2298fdd21a2SScott Wood }
2308fdd21a2SScott Wood
2318fdd21a2SScott Wood /* gtlbe must not be mapped by more than one host tlbe */
kvmppc_e500_tlbil_one(struct kvmppc_vcpu_e500 * vcpu_e500,struct kvm_book3e_206_tlb_entry * gtlbe)2328fdd21a2SScott Wood void kvmppc_e500_tlbil_one(struct kvmppc_vcpu_e500 *vcpu_e500,
2338fdd21a2SScott Wood struct kvm_book3e_206_tlb_entry *gtlbe)
2348fdd21a2SScott Wood {
2358fdd21a2SScott Wood struct vcpu_id_table *idt = vcpu_e500->idt;
236d4cd4f95SAndrzej Hajda unsigned int pr, tid, ts;
237d4cd4f95SAndrzej Hajda int pid;
2388fdd21a2SScott Wood u32 val, eaddr;
2398fdd21a2SScott Wood unsigned long flags;
2408fdd21a2SScott Wood
2418fdd21a2SScott Wood ts = get_tlb_ts(gtlbe);
2428fdd21a2SScott Wood tid = get_tlb_tid(gtlbe);
2438fdd21a2SScott Wood
2448fdd21a2SScott Wood preempt_disable();
2458fdd21a2SScott Wood
2468fdd21a2SScott Wood /* One guest ID may be mapped to two shadow IDs */
2478fdd21a2SScott Wood for (pr = 0; pr < 2; pr++) {
2488fdd21a2SScott Wood /*
2498fdd21a2SScott Wood * The shadow PID can have a valid mapping on at most one
2508fdd21a2SScott Wood * host CPU. In the common case, it will be valid on this
2518fdd21a2SScott Wood * CPU, in which case we do a local invalidation of the
2528fdd21a2SScott Wood * specific address.
2538fdd21a2SScott Wood *
2548fdd21a2SScott Wood * If the shadow PID is not valid on the current host CPU,
2558fdd21a2SScott Wood * we invalidate the entire shadow PID.
2568fdd21a2SScott Wood */
2578fdd21a2SScott Wood pid = local_sid_lookup(&idt->id[ts][tid][pr]);
2588fdd21a2SScott Wood if (pid <= 0) {
2598fdd21a2SScott Wood kvmppc_e500_id_table_reset_one(vcpu_e500, ts, tid, pr);
2608fdd21a2SScott Wood continue;
2618fdd21a2SScott Wood }
2628fdd21a2SScott Wood
2638fdd21a2SScott Wood /*
2648fdd21a2SScott Wood * The guest is invalidating a 4K entry which is in a PID
2658fdd21a2SScott Wood * that has a valid shadow mapping on this host CPU. We
2668fdd21a2SScott Wood * search host TLB to invalidate it's shadow TLB entry,
2678fdd21a2SScott Wood * similar to __tlbil_va except that we need to look in AS1.
2688fdd21a2SScott Wood */
2698fdd21a2SScott Wood val = (pid << MAS6_SPID_SHIFT) | MAS6_SAS;
2708fdd21a2SScott Wood eaddr = get_tlb_eaddr(gtlbe);
2718fdd21a2SScott Wood
2728fdd21a2SScott Wood local_irq_save(flags);
2738fdd21a2SScott Wood
2748fdd21a2SScott Wood mtspr(SPRN_MAS6, val);
2758fdd21a2SScott Wood asm volatile("tlbsx 0, %[eaddr]" : : [eaddr] "r" (eaddr));
2768fdd21a2SScott Wood val = mfspr(SPRN_MAS1);
2778fdd21a2SScott Wood if (val & MAS1_VALID) {
2788fdd21a2SScott Wood mtspr(SPRN_MAS1, val & ~MAS1_VALID);
2798fdd21a2SScott Wood asm volatile("tlbwe");
2808fdd21a2SScott Wood }
2818fdd21a2SScott Wood
2828fdd21a2SScott Wood local_irq_restore(flags);
2838fdd21a2SScott Wood }
2848fdd21a2SScott Wood
2858fdd21a2SScott Wood preempt_enable();
2868fdd21a2SScott Wood }
2878fdd21a2SScott Wood
kvmppc_e500_tlbil_all(struct kvmppc_vcpu_e500 * vcpu_e500)2888fdd21a2SScott Wood void kvmppc_e500_tlbil_all(struct kvmppc_vcpu_e500 *vcpu_e500)
2898fdd21a2SScott Wood {
2908fdd21a2SScott Wood kvmppc_e500_id_table_reset_all(vcpu_e500);
2918fdd21a2SScott Wood }
2928fdd21a2SScott Wood
kvmppc_mmu_msr_notify(struct kvm_vcpu * vcpu,u32 old_msr)2938fdd21a2SScott Wood void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr)
2948fdd21a2SScott Wood {
2958fdd21a2SScott Wood /* Recalc shadow pid since MSR changes */
2968fdd21a2SScott Wood kvmppc_e500_recalc_shadow_pid(to_e500(vcpu));
2978fdd21a2SScott Wood }
2988fdd21a2SScott Wood
kvmppc_core_vcpu_load_e500(struct kvm_vcpu * vcpu,int cpu)2993a167beaSAneesh Kumar K.V static void kvmppc_core_vcpu_load_e500(struct kvm_vcpu *vcpu, int cpu)
300bc8080cbSHollis Blanchard {
30194fa9d99SScott Wood kvmppc_booke_vcpu_load(vcpu, cpu);
3028fdd21a2SScott Wood
3038fdd21a2SScott Wood /* Shadow PID may be expired on local core */
3048fdd21a2SScott Wood kvmppc_e500_recalc_shadow_pid(to_e500(vcpu));
305bc8080cbSHollis Blanchard }
306bc8080cbSHollis Blanchard
kvmppc_core_vcpu_put_e500(struct kvm_vcpu * vcpu)3073a167beaSAneesh Kumar K.V static void kvmppc_core_vcpu_put_e500(struct kvm_vcpu *vcpu)
308bc8080cbSHollis Blanchard {
3094cd35f67SScott Wood #ifdef CONFIG_SPE
3104cd35f67SScott Wood if (vcpu->arch.shadow_msr & MSR_SPE)
3114cd35f67SScott Wood kvmppc_vcpu_disable_spe(vcpu);
3124cd35f67SScott Wood #endif
31394fa9d99SScott Wood
31494fa9d99SScott Wood kvmppc_booke_vcpu_put(vcpu);
315bc8080cbSHollis Blanchard }
316bc8080cbSHollis Blanchard
kvmppc_e500_check_processor_compat(void)317ae19b15dSSean Christopherson static int kvmppc_e500_check_processor_compat(void)
318bc8080cbSHollis Blanchard {
319bc8080cbSHollis Blanchard int r;
320bc8080cbSHollis Blanchard
321bc8080cbSHollis Blanchard if (strcmp(cur_cpu_spec->cpu_name, "e500v2") == 0)
322bc8080cbSHollis Blanchard r = 0;
323bc8080cbSHollis Blanchard else
324bc8080cbSHollis Blanchard r = -ENOTSUPP;
325bc8080cbSHollis Blanchard
326bc8080cbSHollis Blanchard return r;
327bc8080cbSHollis Blanchard }
328bc8080cbSHollis Blanchard
kvmppc_e500_tlb_setup(struct kvmppc_vcpu_e500 * vcpu_e500)3298fdd21a2SScott Wood static void kvmppc_e500_tlb_setup(struct kvmppc_vcpu_e500 *vcpu_e500)
3308fdd21a2SScott Wood {
3318fdd21a2SScott Wood struct kvm_book3e_206_tlb_entry *tlbe;
3328fdd21a2SScott Wood
3338fdd21a2SScott Wood /* Insert large initial mapping for guest. */
3348fdd21a2SScott Wood tlbe = get_entry(vcpu_e500, 1, 0);
3358fdd21a2SScott Wood tlbe->mas1 = MAS1_VALID | MAS1_TSIZE(BOOK3E_PAGESZ_256M);
3368fdd21a2SScott Wood tlbe->mas2 = 0;
3378fdd21a2SScott Wood tlbe->mas7_3 = E500_TLB_SUPER_PERM_MASK;
3388fdd21a2SScott Wood
3398fdd21a2SScott Wood /* 4K map for serial output. Used by kernel wrapper. */
3408fdd21a2SScott Wood tlbe = get_entry(vcpu_e500, 1, 1);
3418fdd21a2SScott Wood tlbe->mas1 = MAS1_VALID | MAS1_TSIZE(BOOK3E_PAGESZ_4K);
3428fdd21a2SScott Wood tlbe->mas2 = (0xe0004500 & 0xFFFFF000) | MAS2_I | MAS2_G;
3438fdd21a2SScott Wood tlbe->mas7_3 = (0xe0004500 & 0xFFFFF000) | E500_TLB_SUPER_PERM_MASK;
3448fdd21a2SScott Wood }
3458fdd21a2SScott Wood
kvmppc_core_vcpu_setup(struct kvm_vcpu * vcpu)346bc8080cbSHollis Blanchard int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu)
347bc8080cbSHollis Blanchard {
348bc8080cbSHollis Blanchard struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
349bc8080cbSHollis Blanchard
350bc8080cbSHollis Blanchard kvmppc_e500_tlb_setup(vcpu_e500);
351bc8080cbSHollis Blanchard
352a9040f27SLiu Yu /* Registers init */
353a9040f27SLiu Yu vcpu->arch.pvr = mfspr(SPRN_PVR);
35490d34b0eSScott Wood vcpu_e500->svr = mfspr(SPRN_SVR);
355a9040f27SLiu Yu
356af8f38b3SAlexander Graf vcpu->arch.cpu_type = KVM_CPU_E500V2;
357af8f38b3SAlexander Graf
358bc8080cbSHollis Blanchard return 0;
359bc8080cbSHollis Blanchard }
360bc8080cbSHollis Blanchard
kvmppc_core_get_sregs_e500(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)3613a167beaSAneesh Kumar K.V static int kvmppc_core_get_sregs_e500(struct kvm_vcpu *vcpu,
3623a167beaSAneesh Kumar K.V struct kvm_sregs *sregs)
3635ce941eeSScott Wood {
3645ce941eeSScott Wood struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
3655ce941eeSScott Wood
3665ce941eeSScott Wood sregs->u.e.features |= KVM_SREGS_E_ARCH206_MMU | KVM_SREGS_E_SPE |
3675ce941eeSScott Wood KVM_SREGS_E_PM;
3685ce941eeSScott Wood sregs->u.e.impl_id = KVM_SREGS_E_IMPL_FSL;
3695ce941eeSScott Wood
3705ce941eeSScott Wood sregs->u.e.impl.fsl.features = 0;
3715ce941eeSScott Wood sregs->u.e.impl.fsl.svr = vcpu_e500->svr;
3725ce941eeSScott Wood sregs->u.e.impl.fsl.hid0 = vcpu_e500->hid0;
3735ce941eeSScott Wood sregs->u.e.impl.fsl.mcar = vcpu_e500->mcar;
3745ce941eeSScott Wood
3755ce941eeSScott Wood sregs->u.e.ivor_high[0] = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL];
3765ce941eeSScott Wood sregs->u.e.ivor_high[1] = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA];
3775ce941eeSScott Wood sregs->u.e.ivor_high[2] = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND];
3785ce941eeSScott Wood sregs->u.e.ivor_high[3] =
3795ce941eeSScott Wood vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR];
3805ce941eeSScott Wood
3815ce941eeSScott Wood kvmppc_get_sregs_ivor(vcpu, sregs);
3828fdd21a2SScott Wood kvmppc_get_sregs_e500_tlb(vcpu, sregs);
3833a167beaSAneesh Kumar K.V return 0;
3845ce941eeSScott Wood }
3855ce941eeSScott Wood
kvmppc_core_set_sregs_e500(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)3863a167beaSAneesh Kumar K.V static int kvmppc_core_set_sregs_e500(struct kvm_vcpu *vcpu,
3873a167beaSAneesh Kumar K.V struct kvm_sregs *sregs)
3885ce941eeSScott Wood {
3895ce941eeSScott Wood struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
3908fdd21a2SScott Wood int ret;
3915ce941eeSScott Wood
3925ce941eeSScott Wood if (sregs->u.e.impl_id == KVM_SREGS_E_IMPL_FSL) {
3935ce941eeSScott Wood vcpu_e500->svr = sregs->u.e.impl.fsl.svr;
3945ce941eeSScott Wood vcpu_e500->hid0 = sregs->u.e.impl.fsl.hid0;
3955ce941eeSScott Wood vcpu_e500->mcar = sregs->u.e.impl.fsl.mcar;
3965ce941eeSScott Wood }
3975ce941eeSScott Wood
3988fdd21a2SScott Wood ret = kvmppc_set_sregs_e500_tlb(vcpu, sregs);
3998fdd21a2SScott Wood if (ret < 0)
4008fdd21a2SScott Wood return ret;
4015ce941eeSScott Wood
4025ce941eeSScott Wood if (!(sregs->u.e.features & KVM_SREGS_E_IVOR))
4035ce941eeSScott Wood return 0;
4045ce941eeSScott Wood
4055ce941eeSScott Wood if (sregs->u.e.features & KVM_SREGS_E_SPE) {
4065ce941eeSScott Wood vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL] =
4075ce941eeSScott Wood sregs->u.e.ivor_high[0];
4085ce941eeSScott Wood vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA] =
4095ce941eeSScott Wood sregs->u.e.ivor_high[1];
4105ce941eeSScott Wood vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND] =
4115ce941eeSScott Wood sregs->u.e.ivor_high[2];
4125ce941eeSScott Wood }
4135ce941eeSScott Wood
4145ce941eeSScott Wood if (sregs->u.e.features & KVM_SREGS_E_PM) {
4155ce941eeSScott Wood vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR] =
4165ce941eeSScott Wood sregs->u.e.ivor_high[3];
4175ce941eeSScott Wood }
4185ce941eeSScott Wood
4195ce941eeSScott Wood return kvmppc_set_sregs_ivor(vcpu, sregs);
4205ce941eeSScott Wood }
4215ce941eeSScott Wood
kvmppc_get_one_reg_e500(struct kvm_vcpu * vcpu,u64 id,union kvmppc_one_reg * val)4223a167beaSAneesh Kumar K.V static int kvmppc_get_one_reg_e500(struct kvm_vcpu *vcpu, u64 id,
42335b299e2SMihai Caraman union kvmppc_one_reg *val)
42435b299e2SMihai Caraman {
425a85d2aa2SMihai Caraman int r = kvmppc_get_one_reg_e500_tlb(vcpu, id, val);
426a85d2aa2SMihai Caraman return r;
42735b299e2SMihai Caraman }
42835b299e2SMihai Caraman
kvmppc_set_one_reg_e500(struct kvm_vcpu * vcpu,u64 id,union kvmppc_one_reg * val)4293a167beaSAneesh Kumar K.V static int kvmppc_set_one_reg_e500(struct kvm_vcpu *vcpu, u64 id,
43035b299e2SMihai Caraman union kvmppc_one_reg *val)
43135b299e2SMihai Caraman {
432a85d2aa2SMihai Caraman int r = kvmppc_get_one_reg_e500_tlb(vcpu, id, val);
433a85d2aa2SMihai Caraman return r;
43435b299e2SMihai Caraman }
43535b299e2SMihai Caraman
kvmppc_core_vcpu_create_e500(struct kvm_vcpu * vcpu)436ff030fdfSSean Christopherson static int kvmppc_core_vcpu_create_e500(struct kvm_vcpu *vcpu)
437bc8080cbSHollis Blanchard {
438bc8080cbSHollis Blanchard struct kvmppc_vcpu_e500 *vcpu_e500;
439bc8080cbSHollis Blanchard int err;
440bc8080cbSHollis Blanchard
441c50bfbdcSSean Christopherson BUILD_BUG_ON(offsetof(struct kvmppc_vcpu_e500, vcpu) != 0);
442c50bfbdcSSean Christopherson vcpu_e500 = to_e500(vcpu);
44312b58f4eSSean Christopherson
444ff030fdfSSean Christopherson if (kvmppc_e500_id_table_alloc(vcpu_e500) == NULL)
445ff030fdfSSean Christopherson return -ENOMEM;
4468fdd21a2SScott Wood
447bc8080cbSHollis Blanchard err = kvmppc_e500_tlb_init(vcpu_e500);
448bc8080cbSHollis Blanchard if (err)
4498fdd21a2SScott Wood goto uninit_id;
450bc8080cbSHollis Blanchard
45196bc451aSAlexander Graf vcpu->arch.shared = (void*)__get_free_page(GFP_KERNEL|__GFP_ZERO);
45273e77c09SDan Carpenter if (!vcpu->arch.shared) {
45373e77c09SDan Carpenter err = -ENOMEM;
45496bc451aSAlexander Graf goto uninit_tlb;
45573e77c09SDan Carpenter }
45696bc451aSAlexander Graf
457c50bfbdcSSean Christopherson return 0;
458bc8080cbSHollis Blanchard
45996bc451aSAlexander Graf uninit_tlb:
46096bc451aSAlexander Graf kvmppc_e500_tlb_uninit(vcpu_e500);
4618fdd21a2SScott Wood uninit_id:
4628fdd21a2SScott Wood kvmppc_e500_id_table_free(vcpu_e500);
463c50bfbdcSSean Christopherson return err;
464bc8080cbSHollis Blanchard }
465bc8080cbSHollis Blanchard
kvmppc_core_vcpu_free_e500(struct kvm_vcpu * vcpu)4663a167beaSAneesh Kumar K.V static void kvmppc_core_vcpu_free_e500(struct kvm_vcpu *vcpu)
467bc8080cbSHollis Blanchard {
468bc8080cbSHollis Blanchard struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
469bc8080cbSHollis Blanchard
47096bc451aSAlexander Graf free_page((unsigned long)vcpu->arch.shared);
471f22e2f04SScott Wood kvmppc_e500_tlb_uninit(vcpu_e500);
4728fdd21a2SScott Wood kvmppc_e500_id_table_free(vcpu_e500);
473bc8080cbSHollis Blanchard }
474bc8080cbSHollis Blanchard
kvmppc_core_init_vm_e500(struct kvm * kvm)4753a167beaSAneesh Kumar K.V static int kvmppc_core_init_vm_e500(struct kvm *kvm)
476fafd6832SScott Wood {
477fafd6832SScott Wood return 0;
478fafd6832SScott Wood }
479fafd6832SScott Wood
kvmppc_core_destroy_vm_e500(struct kvm * kvm)4803a167beaSAneesh Kumar K.V static void kvmppc_core_destroy_vm_e500(struct kvm *kvm)
481fafd6832SScott Wood {
482fafd6832SScott Wood }
483fafd6832SScott Wood
4843a167beaSAneesh Kumar K.V static struct kvmppc_ops kvm_ops_e500 = {
4853a167beaSAneesh Kumar K.V .get_sregs = kvmppc_core_get_sregs_e500,
4863a167beaSAneesh Kumar K.V .set_sregs = kvmppc_core_set_sregs_e500,
4873a167beaSAneesh Kumar K.V .get_one_reg = kvmppc_get_one_reg_e500,
4883a167beaSAneesh Kumar K.V .set_one_reg = kvmppc_set_one_reg_e500,
4893a167beaSAneesh Kumar K.V .vcpu_load = kvmppc_core_vcpu_load_e500,
4903a167beaSAneesh Kumar K.V .vcpu_put = kvmppc_core_vcpu_put_e500,
4913a167beaSAneesh Kumar K.V .vcpu_create = kvmppc_core_vcpu_create_e500,
4923a167beaSAneesh Kumar K.V .vcpu_free = kvmppc_core_vcpu_free_e500,
4933a167beaSAneesh Kumar K.V .init_vm = kvmppc_core_init_vm_e500,
4943a167beaSAneesh Kumar K.V .destroy_vm = kvmppc_core_destroy_vm_e500,
4953a167beaSAneesh Kumar K.V .emulate_op = kvmppc_core_emulate_op_e500,
4963a167beaSAneesh Kumar K.V .emulate_mtspr = kvmppc_core_emulate_mtspr_e500,
4973a167beaSAneesh Kumar K.V .emulate_mfspr = kvmppc_core_emulate_mfspr_e500,
498faf01aefSAlexey Kardashevskiy .create_vcpu_debugfs = kvmppc_create_vcpu_debugfs_e500,
4993a167beaSAneesh Kumar K.V };
5003a167beaSAneesh Kumar K.V
kvmppc_e500_init(void)5012986b8c7SStephen Rothwell static int __init kvmppc_e500_init(void)
502bc8080cbSHollis Blanchard {
503bb3a8a17SHollis Blanchard int r, i;
504bb3a8a17SHollis Blanchard unsigned long ivor[3];
5051d542d9cSBharat Bhushan /* Process remaining handlers above the generic first 16 */
5061d542d9cSBharat Bhushan unsigned long *handler = &kvmppc_booke_handler_addr[16];
5071d542d9cSBharat Bhushan unsigned long handler_len;
508bb3a8a17SHollis Blanchard unsigned long max_ivor = 0;
509bc8080cbSHollis Blanchard
510ae19b15dSSean Christopherson r = kvmppc_e500_check_processor_compat();
5119cf7c0e4SAlexander Graf if (r)
5123a167beaSAneesh Kumar K.V goto err_out;
5139cf7c0e4SAlexander Graf
514bc8080cbSHollis Blanchard r = kvmppc_booke_init();
515bc8080cbSHollis Blanchard if (r)
5163a167beaSAneesh Kumar K.V goto err_out;
517bc8080cbSHollis Blanchard
518bb3a8a17SHollis Blanchard /* copy extra E500 exception handlers */
519bb3a8a17SHollis Blanchard ivor[0] = mfspr(SPRN_IVOR32);
520bb3a8a17SHollis Blanchard ivor[1] = mfspr(SPRN_IVOR33);
521bb3a8a17SHollis Blanchard ivor[2] = mfspr(SPRN_IVOR34);
522bb3a8a17SHollis Blanchard for (i = 0; i < 3; i++) {
5231d542d9cSBharat Bhushan if (ivor[i] > ivor[max_ivor])
5241d542d9cSBharat Bhushan max_ivor = i;
525bb3a8a17SHollis Blanchard
5261d542d9cSBharat Bhushan handler_len = handler[i + 1] - handler[i];
527bb3a8a17SHollis Blanchard memcpy((void *)kvmppc_booke_handlers + ivor[i],
5281d542d9cSBharat Bhushan (void *)handler[i], handler_len);
529bb3a8a17SHollis Blanchard }
5301d542d9cSBharat Bhushan handler_len = handler[max_ivor + 1] - handler[max_ivor];
5311d542d9cSBharat Bhushan flush_icache_range(kvmppc_booke_handlers, kvmppc_booke_handlers +
5321d542d9cSBharat Bhushan ivor[max_ivor] + handler_len);
533bb3a8a17SHollis Blanchard
534*81a1cf9fSSean Christopherson r = kvm_init(sizeof(struct kvmppc_vcpu_e500), 0, THIS_MODULE);
535cbbc58d4SAneesh Kumar K.V if (r)
536cbbc58d4SAneesh Kumar K.V goto err_out;
537cbbc58d4SAneesh Kumar K.V kvm_ops_e500.owner = THIS_MODULE;
538cbbc58d4SAneesh Kumar K.V kvmppc_pr_ops = &kvm_ops_e500;
539cbbc58d4SAneesh Kumar K.V
5403a167beaSAneesh Kumar K.V err_out:
5413a167beaSAneesh Kumar K.V return r;
542bc8080cbSHollis Blanchard }
543bc8080cbSHollis Blanchard
kvmppc_e500_exit(void)544a06cdb56SJean Delvare static void __exit kvmppc_e500_exit(void)
545bc8080cbSHollis Blanchard {
546cbbc58d4SAneesh Kumar K.V kvmppc_pr_ops = NULL;
547bc8080cbSHollis Blanchard kvmppc_booke_exit();
548bc8080cbSHollis Blanchard }
549bc8080cbSHollis Blanchard
550bc8080cbSHollis Blanchard module_init(kvmppc_e500_init);
551bc8080cbSHollis Blanchard module_exit(kvmppc_e500_exit);
552398a76c6SAlexander Graf MODULE_ALIAS_MISCDEV(KVM_MINOR);
553398a76c6SAlexander Graf MODULE_ALIAS("devname:kvm");
554