1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License, version 2, as
4  * published by the Free Software Foundation.
5  *
6  * This program is distributed in the hope that it will be useful,
7  * but WITHOUT ANY WARRANTY; without even the implied warranty of
8  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
9  * GNU General Public License for more details.
10  *
11  * You should have received a copy of the GNU General Public License
12  * along with this program; if not, write to the Free Software
13  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
14  *
15  * Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
16  */
17 
18 #include <linux/types.h>
19 #include <linux/string.h>
20 #include <linux/kvm.h>
21 #include <linux/kvm_host.h>
22 #include <linux/highmem.h>
23 #include <linux/gfp.h>
24 #include <linux/slab.h>
25 #include <linux/hugetlb.h>
26 
27 #include <asm/tlbflush.h>
28 #include <asm/kvm_ppc.h>
29 #include <asm/kvm_book3s.h>
30 #include <asm/mmu-hash64.h>
31 #include <asm/hvcall.h>
32 #include <asm/synch.h>
33 #include <asm/ppc-opcode.h>
34 #include <asm/cputable.h>
35 
36 /* For now use fixed-size 16MB page table */
37 #define HPT_ORDER	24
38 #define HPT_NPTEG	(1ul << (HPT_ORDER - 7))	/* 128B per pteg */
39 #define HPT_HASH_MASK	(HPT_NPTEG - 1)
40 
41 /* Pages in the VRMA are 16MB pages */
42 #define VRMA_PAGE_ORDER	24
43 #define VRMA_VSID	0x1ffffffUL	/* 1TB VSID reserved for VRMA */
44 
45 /* POWER7 has 10-bit LPIDs, PPC970 has 6-bit LPIDs */
46 #define MAX_LPID_970	63
47 #define NR_LPIDS	(LPID_RSVD + 1)
48 unsigned long lpid_inuse[BITS_TO_LONGS(NR_LPIDS)];
49 
50 long kvmppc_alloc_hpt(struct kvm *kvm)
51 {
52 	unsigned long hpt;
53 	unsigned long lpid;
54 
55 	hpt = __get_free_pages(GFP_KERNEL|__GFP_ZERO|__GFP_REPEAT|__GFP_NOWARN,
56 			       HPT_ORDER - PAGE_SHIFT);
57 	if (!hpt) {
58 		pr_err("kvm_alloc_hpt: Couldn't alloc HPT\n");
59 		return -ENOMEM;
60 	}
61 	kvm->arch.hpt_virt = hpt;
62 
63 	do {
64 		lpid = find_first_zero_bit(lpid_inuse, NR_LPIDS);
65 		if (lpid >= NR_LPIDS) {
66 			pr_err("kvm_alloc_hpt: No LPIDs free\n");
67 			free_pages(hpt, HPT_ORDER - PAGE_SHIFT);
68 			return -ENOMEM;
69 		}
70 	} while (test_and_set_bit(lpid, lpid_inuse));
71 
72 	kvm->arch.sdr1 = __pa(hpt) | (HPT_ORDER - 18);
73 	kvm->arch.lpid = lpid;
74 
75 	pr_info("KVM guest htab at %lx, LPID %lx\n", hpt, lpid);
76 	return 0;
77 }
78 
79 void kvmppc_free_hpt(struct kvm *kvm)
80 {
81 	clear_bit(kvm->arch.lpid, lpid_inuse);
82 	free_pages(kvm->arch.hpt_virt, HPT_ORDER - PAGE_SHIFT);
83 }
84 
85 void kvmppc_map_vrma(struct kvm *kvm, struct kvm_userspace_memory_region *mem)
86 {
87 	unsigned long i;
88 	unsigned long npages = kvm->arch.ram_npages;
89 	unsigned long pfn;
90 	unsigned long *hpte;
91 	unsigned long hash;
92 	struct kvmppc_pginfo *pginfo = kvm->arch.ram_pginfo;
93 
94 	if (!pginfo)
95 		return;
96 
97 	/* VRMA can't be > 1TB */
98 	if (npages > 1ul << (40 - kvm->arch.ram_porder))
99 		npages = 1ul << (40 - kvm->arch.ram_porder);
100 	/* Can't use more than 1 HPTE per HPTEG */
101 	if (npages > HPT_NPTEG)
102 		npages = HPT_NPTEG;
103 
104 	for (i = 0; i < npages; ++i) {
105 		pfn = pginfo[i].pfn;
106 		if (!pfn)
107 			break;
108 		/* can't use hpt_hash since va > 64 bits */
109 		hash = (i ^ (VRMA_VSID ^ (VRMA_VSID << 25))) & HPT_HASH_MASK;
110 		/*
111 		 * We assume that the hash table is empty and no
112 		 * vcpus are using it at this stage.  Since we create
113 		 * at most one HPTE per HPTEG, we just assume entry 7
114 		 * is available and use it.
115 		 */
116 		hpte = (unsigned long *) (kvm->arch.hpt_virt + (hash << 7));
117 		hpte += 7 * 2;
118 		/* HPTE low word - RPN, protection, etc. */
119 		hpte[1] = (pfn << PAGE_SHIFT) | HPTE_R_R | HPTE_R_C |
120 			HPTE_R_M | PP_RWXX;
121 		wmb();
122 		hpte[0] = HPTE_V_1TB_SEG | (VRMA_VSID << (40 - 16)) |
123 			(i << (VRMA_PAGE_ORDER - 16)) | HPTE_V_BOLTED |
124 			HPTE_V_LARGE | HPTE_V_VALID;
125 	}
126 }
127 
128 int kvmppc_mmu_hv_init(void)
129 {
130 	unsigned long host_lpid, rsvd_lpid;
131 
132 	if (!cpu_has_feature(CPU_FTR_HVMODE))
133 		return -EINVAL;
134 
135 	memset(lpid_inuse, 0, sizeof(lpid_inuse));
136 
137 	if (cpu_has_feature(CPU_FTR_ARCH_206)) {
138 		host_lpid = mfspr(SPRN_LPID);	/* POWER7 */
139 		rsvd_lpid = LPID_RSVD;
140 	} else {
141 		host_lpid = 0;			/* PPC970 */
142 		rsvd_lpid = MAX_LPID_970;
143 	}
144 
145 	set_bit(host_lpid, lpid_inuse);
146 	/* rsvd_lpid is reserved for use in partition switching */
147 	set_bit(rsvd_lpid, lpid_inuse);
148 
149 	return 0;
150 }
151 
152 void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
153 {
154 }
155 
156 static void kvmppc_mmu_book3s_64_hv_reset_msr(struct kvm_vcpu *vcpu)
157 {
158 	kvmppc_set_msr(vcpu, MSR_SF | MSR_ME);
159 }
160 
161 static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
162 				struct kvmppc_pte *gpte, bool data)
163 {
164 	return -ENOENT;
165 }
166 
167 void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu)
168 {
169 	struct kvmppc_mmu *mmu = &vcpu->arch.mmu;
170 
171 	if (cpu_has_feature(CPU_FTR_ARCH_206))
172 		vcpu->arch.slb_nr = 32;		/* POWER7 */
173 	else
174 		vcpu->arch.slb_nr = 64;
175 
176 	mmu->xlate = kvmppc_mmu_book3s_64_hv_xlate;
177 	mmu->reset_msr = kvmppc_mmu_book3s_64_hv_reset_msr;
178 
179 	vcpu->arch.hflags |= BOOK3S_HFLAG_SLB;
180 }
181