1 /*
2  * Copyright (C) 2010 SUSE Linux Products GmbH. All rights reserved.
3  *
4  * Authors:
5  *     Alexander Graf <agraf@suse.de>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License, version 2, as
9  * published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
19  */
20 
21 #include <linux/kvm_host.h>
22 #include <linux/hash.h>
23 #include <linux/slab.h>
24 #include "trace.h"
25 
26 #include <asm/kvm_ppc.h>
27 #include <asm/kvm_book3s.h>
28 #include <asm/machdep.h>
29 #include <asm/mmu_context.h>
30 #include <asm/hw_irq.h>
31 
32 #define PTE_SIZE	12
33 
34 /* #define DEBUG_MMU */
35 
36 #ifdef DEBUG_MMU
37 #define dprintk_mmu(a, ...) printk(KERN_INFO a, __VA_ARGS__)
38 #else
39 #define dprintk_mmu(a, ...) do { } while(0)
40 #endif
41 
42 static struct kmem_cache *hpte_cache;
43 
44 static inline u64 kvmppc_mmu_hash_pte(u64 eaddr)
45 {
46 	return hash_64(eaddr >> PTE_SIZE, HPTEG_HASH_BITS_PTE);
47 }
48 
49 static inline u64 kvmppc_mmu_hash_pte_long(u64 eaddr)
50 {
51 	return hash_64((eaddr & 0x0ffff000) >> PTE_SIZE,
52 		       HPTEG_HASH_BITS_PTE_LONG);
53 }
54 
55 static inline u64 kvmppc_mmu_hash_vpte(u64 vpage)
56 {
57 	return hash_64(vpage & 0xfffffffffULL, HPTEG_HASH_BITS_VPTE);
58 }
59 
60 static inline u64 kvmppc_mmu_hash_vpte_long(u64 vpage)
61 {
62 	return hash_64((vpage & 0xffffff000ULL) >> 12,
63 		       HPTEG_HASH_BITS_VPTE_LONG);
64 }
65 
66 void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
67 {
68 	u64 index;
69 
70 	trace_kvm_book3s_mmu_map(pte);
71 
72 	spin_lock(&vcpu->arch.mmu_lock);
73 
74 	/* Add to ePTE list */
75 	index = kvmppc_mmu_hash_pte(pte->pte.eaddr);
76 	hlist_add_head_rcu(&pte->list_pte, &vcpu->arch.hpte_hash_pte[index]);
77 
78 	/* Add to ePTE_long list */
79 	index = kvmppc_mmu_hash_pte_long(pte->pte.eaddr);
80 	hlist_add_head_rcu(&pte->list_pte_long,
81 			   &vcpu->arch.hpte_hash_pte_long[index]);
82 
83 	/* Add to vPTE list */
84 	index = kvmppc_mmu_hash_vpte(pte->pte.vpage);
85 	hlist_add_head_rcu(&pte->list_vpte, &vcpu->arch.hpte_hash_vpte[index]);
86 
87 	/* Add to vPTE_long list */
88 	index = kvmppc_mmu_hash_vpte_long(pte->pte.vpage);
89 	hlist_add_head_rcu(&pte->list_vpte_long,
90 			   &vcpu->arch.hpte_hash_vpte_long[index]);
91 
92 	spin_unlock(&vcpu->arch.mmu_lock);
93 }
94 
95 static void free_pte_rcu(struct rcu_head *head)
96 {
97 	struct hpte_cache *pte = container_of(head, struct hpte_cache, rcu_head);
98 	kmem_cache_free(hpte_cache, pte);
99 }
100 
101 static void invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
102 {
103 	/* pte already invalidated? */
104 	if (hlist_unhashed(&pte->list_pte))
105 		return;
106 
107 	trace_kvm_book3s_mmu_invalidate(pte);
108 
109 	/* Different for 32 and 64 bit */
110 	kvmppc_mmu_invalidate_pte(vcpu, pte);
111 
112 	spin_lock(&vcpu->arch.mmu_lock);
113 
114 	hlist_del_init_rcu(&pte->list_pte);
115 	hlist_del_init_rcu(&pte->list_pte_long);
116 	hlist_del_init_rcu(&pte->list_vpte);
117 	hlist_del_init_rcu(&pte->list_vpte_long);
118 
119 	spin_unlock(&vcpu->arch.mmu_lock);
120 
121 	if (pte->pte.may_write)
122 		kvm_release_pfn_dirty(pte->pfn);
123 	else
124 		kvm_release_pfn_clean(pte->pfn);
125 
126 	vcpu->arch.hpte_cache_count--;
127 	call_rcu(&pte->rcu_head, free_pte_rcu);
128 }
129 
130 static void kvmppc_mmu_pte_flush_all(struct kvm_vcpu *vcpu)
131 {
132 	struct hpte_cache *pte;
133 	struct hlist_node *node;
134 	int i;
135 
136 	rcu_read_lock();
137 
138 	for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) {
139 		struct hlist_head *list = &vcpu->arch.hpte_hash_vpte_long[i];
140 
141 		hlist_for_each_entry_rcu(pte, node, list, list_vpte_long)
142 			invalidate_pte(vcpu, pte);
143 	}
144 
145 	rcu_read_unlock();
146 }
147 
148 static void kvmppc_mmu_pte_flush_page(struct kvm_vcpu *vcpu, ulong guest_ea)
149 {
150 	struct hlist_head *list;
151 	struct hlist_node *node;
152 	struct hpte_cache *pte;
153 
154 	/* Find the list of entries in the map */
155 	list = &vcpu->arch.hpte_hash_pte[kvmppc_mmu_hash_pte(guest_ea)];
156 
157 	rcu_read_lock();
158 
159 	/* Check the list for matching entries and invalidate */
160 	hlist_for_each_entry_rcu(pte, node, list, list_pte)
161 		if ((pte->pte.eaddr & ~0xfffUL) == guest_ea)
162 			invalidate_pte(vcpu, pte);
163 
164 	rcu_read_unlock();
165 }
166 
167 static void kvmppc_mmu_pte_flush_long(struct kvm_vcpu *vcpu, ulong guest_ea)
168 {
169 	struct hlist_head *list;
170 	struct hlist_node *node;
171 	struct hpte_cache *pte;
172 
173 	/* Find the list of entries in the map */
174 	list = &vcpu->arch.hpte_hash_pte_long[
175 			kvmppc_mmu_hash_pte_long(guest_ea)];
176 
177 	rcu_read_lock();
178 
179 	/* Check the list for matching entries and invalidate */
180 	hlist_for_each_entry_rcu(pte, node, list, list_pte_long)
181 		if ((pte->pte.eaddr & 0x0ffff000UL) == guest_ea)
182 			invalidate_pte(vcpu, pte);
183 
184 	rcu_read_unlock();
185 }
186 
187 void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong guest_ea, ulong ea_mask)
188 {
189 	dprintk_mmu("KVM: Flushing %d Shadow PTEs: 0x%lx & 0x%lx\n",
190 		    vcpu->arch.hpte_cache_count, guest_ea, ea_mask);
191 
192 	guest_ea &= ea_mask;
193 
194 	switch (ea_mask) {
195 	case ~0xfffUL:
196 		kvmppc_mmu_pte_flush_page(vcpu, guest_ea);
197 		break;
198 	case 0x0ffff000:
199 		kvmppc_mmu_pte_flush_long(vcpu, guest_ea);
200 		break;
201 	case 0:
202 		/* Doing a complete flush -> start from scratch */
203 		kvmppc_mmu_pte_flush_all(vcpu);
204 		break;
205 	default:
206 		WARN_ON(1);
207 		break;
208 	}
209 }
210 
211 /* Flush with mask 0xfffffffff */
212 static void kvmppc_mmu_pte_vflush_short(struct kvm_vcpu *vcpu, u64 guest_vp)
213 {
214 	struct hlist_head *list;
215 	struct hlist_node *node;
216 	struct hpte_cache *pte;
217 	u64 vp_mask = 0xfffffffffULL;
218 
219 	list = &vcpu->arch.hpte_hash_vpte[kvmppc_mmu_hash_vpte(guest_vp)];
220 
221 	rcu_read_lock();
222 
223 	/* Check the list for matching entries and invalidate */
224 	hlist_for_each_entry_rcu(pte, node, list, list_vpte)
225 		if ((pte->pte.vpage & vp_mask) == guest_vp)
226 			invalidate_pte(vcpu, pte);
227 
228 	rcu_read_unlock();
229 }
230 
231 /* Flush with mask 0xffffff000 */
232 static void kvmppc_mmu_pte_vflush_long(struct kvm_vcpu *vcpu, u64 guest_vp)
233 {
234 	struct hlist_head *list;
235 	struct hlist_node *node;
236 	struct hpte_cache *pte;
237 	u64 vp_mask = 0xffffff000ULL;
238 
239 	list = &vcpu->arch.hpte_hash_vpte_long[
240 		kvmppc_mmu_hash_vpte_long(guest_vp)];
241 
242 	rcu_read_lock();
243 
244 	/* Check the list for matching entries and invalidate */
245 	hlist_for_each_entry_rcu(pte, node, list, list_vpte_long)
246 		if ((pte->pte.vpage & vp_mask) == guest_vp)
247 			invalidate_pte(vcpu, pte);
248 
249 	rcu_read_unlock();
250 }
251 
252 void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 guest_vp, u64 vp_mask)
253 {
254 	dprintk_mmu("KVM: Flushing %d Shadow vPTEs: 0x%llx & 0x%llx\n",
255 		    vcpu->arch.hpte_cache_count, guest_vp, vp_mask);
256 	guest_vp &= vp_mask;
257 
258 	switch(vp_mask) {
259 	case 0xfffffffffULL:
260 		kvmppc_mmu_pte_vflush_short(vcpu, guest_vp);
261 		break;
262 	case 0xffffff000ULL:
263 		kvmppc_mmu_pte_vflush_long(vcpu, guest_vp);
264 		break;
265 	default:
266 		WARN_ON(1);
267 		return;
268 	}
269 }
270 
271 void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end)
272 {
273 	struct hlist_node *node;
274 	struct hpte_cache *pte;
275 	int i;
276 
277 	dprintk_mmu("KVM: Flushing %d Shadow pPTEs: 0x%lx - 0x%lx\n",
278 		    vcpu->arch.hpte_cache_count, pa_start, pa_end);
279 
280 	rcu_read_lock();
281 
282 	for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) {
283 		struct hlist_head *list = &vcpu->arch.hpte_hash_vpte_long[i];
284 
285 		hlist_for_each_entry_rcu(pte, node, list, list_vpte_long)
286 			if ((pte->pte.raddr >= pa_start) &&
287 			    (pte->pte.raddr < pa_end))
288 				invalidate_pte(vcpu, pte);
289 	}
290 
291 	rcu_read_unlock();
292 }
293 
294 struct hpte_cache *kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu)
295 {
296 	struct hpte_cache *pte;
297 
298 	pte = kmem_cache_zalloc(hpte_cache, GFP_KERNEL);
299 	vcpu->arch.hpte_cache_count++;
300 
301 	if (vcpu->arch.hpte_cache_count == HPTEG_CACHE_NUM)
302 		kvmppc_mmu_pte_flush_all(vcpu);
303 
304 	return pte;
305 }
306 
307 void kvmppc_mmu_hpte_destroy(struct kvm_vcpu *vcpu)
308 {
309 	kvmppc_mmu_pte_flush(vcpu, 0, 0);
310 }
311 
312 static void kvmppc_mmu_hpte_init_hash(struct hlist_head *hash_list, int len)
313 {
314 	int i;
315 
316 	for (i = 0; i < len; i++)
317 		INIT_HLIST_HEAD(&hash_list[i]);
318 }
319 
320 int kvmppc_mmu_hpte_init(struct kvm_vcpu *vcpu)
321 {
322 	/* init hpte lookup hashes */
323 	kvmppc_mmu_hpte_init_hash(vcpu->arch.hpte_hash_pte,
324 				  ARRAY_SIZE(vcpu->arch.hpte_hash_pte));
325 	kvmppc_mmu_hpte_init_hash(vcpu->arch.hpte_hash_pte_long,
326 				  ARRAY_SIZE(vcpu->arch.hpte_hash_pte_long));
327 	kvmppc_mmu_hpte_init_hash(vcpu->arch.hpte_hash_vpte,
328 				  ARRAY_SIZE(vcpu->arch.hpte_hash_vpte));
329 	kvmppc_mmu_hpte_init_hash(vcpu->arch.hpte_hash_vpte_long,
330 				  ARRAY_SIZE(vcpu->arch.hpte_hash_vpte_long));
331 
332 	spin_lock_init(&vcpu->arch.mmu_lock);
333 
334 	return 0;
335 }
336 
337 int kvmppc_mmu_hpte_sysinit(void)
338 {
339 	/* init hpte slab cache */
340 	hpte_cache = kmem_cache_create("kvm-spt", sizeof(struct hpte_cache),
341 				       sizeof(struct hpte_cache), 0, NULL);
342 
343 	return 0;
344 }
345 
346 void kvmppc_mmu_hpte_sysexit(void)
347 {
348 	kmem_cache_destroy(hpte_cache);
349 }
350