1 /*
2  * Copyright (C) 2010 SUSE Linux Products GmbH. All rights reserved.
3  *
4  * Authors:
5  *     Alexander Graf <agraf@suse.de>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License, version 2, as
9  * published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
19  */
20 
21 #include <linux/kvm_host.h>
22 #include <linux/hash.h>
23 #include <linux/slab.h>
24 
25 #include <asm/kvm_ppc.h>
26 #include <asm/kvm_book3s.h>
27 #include <asm/machdep.h>
28 #include <asm/mmu_context.h>
29 #include <asm/hw_irq.h>
30 
31 #include "trace_pr.h"
32 
33 #define PTE_SIZE	12
34 
35 static struct kmem_cache *hpte_cache;
36 
37 static inline u64 kvmppc_mmu_hash_pte(u64 eaddr)
38 {
39 	return hash_64(eaddr >> PTE_SIZE, HPTEG_HASH_BITS_PTE);
40 }
41 
42 static inline u64 kvmppc_mmu_hash_pte_long(u64 eaddr)
43 {
44 	return hash_64((eaddr & 0x0ffff000) >> PTE_SIZE,
45 		       HPTEG_HASH_BITS_PTE_LONG);
46 }
47 
48 static inline u64 kvmppc_mmu_hash_vpte(u64 vpage)
49 {
50 	return hash_64(vpage & 0xfffffffffULL, HPTEG_HASH_BITS_VPTE);
51 }
52 
53 static inline u64 kvmppc_mmu_hash_vpte_long(u64 vpage)
54 {
55 	return hash_64((vpage & 0xffffff000ULL) >> 12,
56 		       HPTEG_HASH_BITS_VPTE_LONG);
57 }
58 
59 #ifdef CONFIG_PPC_BOOK3S_64
60 static inline u64 kvmppc_mmu_hash_vpte_64k(u64 vpage)
61 {
62 	return hash_64((vpage & 0xffffffff0ULL) >> 4,
63 		       HPTEG_HASH_BITS_VPTE_64K);
64 }
65 #endif
66 
67 void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
68 {
69 	u64 index;
70 	struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
71 
72 	trace_kvm_book3s_mmu_map(pte);
73 
74 	spin_lock(&vcpu3s->mmu_lock);
75 
76 	/* Add to ePTE list */
77 	index = kvmppc_mmu_hash_pte(pte->pte.eaddr);
78 	hlist_add_head_rcu(&pte->list_pte, &vcpu3s->hpte_hash_pte[index]);
79 
80 	/* Add to ePTE_long list */
81 	index = kvmppc_mmu_hash_pte_long(pte->pte.eaddr);
82 	hlist_add_head_rcu(&pte->list_pte_long,
83 			   &vcpu3s->hpte_hash_pte_long[index]);
84 
85 	/* Add to vPTE list */
86 	index = kvmppc_mmu_hash_vpte(pte->pte.vpage);
87 	hlist_add_head_rcu(&pte->list_vpte, &vcpu3s->hpte_hash_vpte[index]);
88 
89 	/* Add to vPTE_long list */
90 	index = kvmppc_mmu_hash_vpte_long(pte->pte.vpage);
91 	hlist_add_head_rcu(&pte->list_vpte_long,
92 			   &vcpu3s->hpte_hash_vpte_long[index]);
93 
94 #ifdef CONFIG_PPC_BOOK3S_64
95 	/* Add to vPTE_64k list */
96 	index = kvmppc_mmu_hash_vpte_64k(pte->pte.vpage);
97 	hlist_add_head_rcu(&pte->list_vpte_64k,
98 			   &vcpu3s->hpte_hash_vpte_64k[index]);
99 #endif
100 
101 	vcpu3s->hpte_cache_count++;
102 
103 	spin_unlock(&vcpu3s->mmu_lock);
104 }
105 
106 static void free_pte_rcu(struct rcu_head *head)
107 {
108 	struct hpte_cache *pte = container_of(head, struct hpte_cache, rcu_head);
109 	kmem_cache_free(hpte_cache, pte);
110 }
111 
112 static void invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
113 {
114 	struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
115 
116 	trace_kvm_book3s_mmu_invalidate(pte);
117 
118 	/* Different for 32 and 64 bit */
119 	kvmppc_mmu_invalidate_pte(vcpu, pte);
120 
121 	spin_lock(&vcpu3s->mmu_lock);
122 
123 	/* pte already invalidated in between? */
124 	if (hlist_unhashed(&pte->list_pte)) {
125 		spin_unlock(&vcpu3s->mmu_lock);
126 		return;
127 	}
128 
129 	hlist_del_init_rcu(&pte->list_pte);
130 	hlist_del_init_rcu(&pte->list_pte_long);
131 	hlist_del_init_rcu(&pte->list_vpte);
132 	hlist_del_init_rcu(&pte->list_vpte_long);
133 #ifdef CONFIG_PPC_BOOK3S_64
134 	hlist_del_init_rcu(&pte->list_vpte_64k);
135 #endif
136 	vcpu3s->hpte_cache_count--;
137 
138 	spin_unlock(&vcpu3s->mmu_lock);
139 
140 	call_rcu(&pte->rcu_head, free_pte_rcu);
141 }
142 
143 static void kvmppc_mmu_pte_flush_all(struct kvm_vcpu *vcpu)
144 {
145 	struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
146 	struct hpte_cache *pte;
147 	int i;
148 
149 	rcu_read_lock();
150 
151 	for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) {
152 		struct hlist_head *list = &vcpu3s->hpte_hash_vpte_long[i];
153 
154 		hlist_for_each_entry_rcu(pte, list, list_vpte_long)
155 			invalidate_pte(vcpu, pte);
156 	}
157 
158 	rcu_read_unlock();
159 }
160 
161 static void kvmppc_mmu_pte_flush_page(struct kvm_vcpu *vcpu, ulong guest_ea)
162 {
163 	struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
164 	struct hlist_head *list;
165 	struct hpte_cache *pte;
166 
167 	/* Find the list of entries in the map */
168 	list = &vcpu3s->hpte_hash_pte[kvmppc_mmu_hash_pte(guest_ea)];
169 
170 	rcu_read_lock();
171 
172 	/* Check the list for matching entries and invalidate */
173 	hlist_for_each_entry_rcu(pte, list, list_pte)
174 		if ((pte->pte.eaddr & ~0xfffUL) == guest_ea)
175 			invalidate_pte(vcpu, pte);
176 
177 	rcu_read_unlock();
178 }
179 
180 static void kvmppc_mmu_pte_flush_long(struct kvm_vcpu *vcpu, ulong guest_ea)
181 {
182 	struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
183 	struct hlist_head *list;
184 	struct hpte_cache *pte;
185 
186 	/* Find the list of entries in the map */
187 	list = &vcpu3s->hpte_hash_pte_long[
188 			kvmppc_mmu_hash_pte_long(guest_ea)];
189 
190 	rcu_read_lock();
191 
192 	/* Check the list for matching entries and invalidate */
193 	hlist_for_each_entry_rcu(pte, list, list_pte_long)
194 		if ((pte->pte.eaddr & 0x0ffff000UL) == guest_ea)
195 			invalidate_pte(vcpu, pte);
196 
197 	rcu_read_unlock();
198 }
199 
200 void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong guest_ea, ulong ea_mask)
201 {
202 	trace_kvm_book3s_mmu_flush("", vcpu, guest_ea, ea_mask);
203 	guest_ea &= ea_mask;
204 
205 	switch (ea_mask) {
206 	case ~0xfffUL:
207 		kvmppc_mmu_pte_flush_page(vcpu, guest_ea);
208 		break;
209 	case 0x0ffff000:
210 		kvmppc_mmu_pte_flush_long(vcpu, guest_ea);
211 		break;
212 	case 0:
213 		/* Doing a complete flush -> start from scratch */
214 		kvmppc_mmu_pte_flush_all(vcpu);
215 		break;
216 	default:
217 		WARN_ON(1);
218 		break;
219 	}
220 }
221 
222 /* Flush with mask 0xfffffffff */
223 static void kvmppc_mmu_pte_vflush_short(struct kvm_vcpu *vcpu, u64 guest_vp)
224 {
225 	struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
226 	struct hlist_head *list;
227 	struct hpte_cache *pte;
228 	u64 vp_mask = 0xfffffffffULL;
229 
230 	list = &vcpu3s->hpte_hash_vpte[kvmppc_mmu_hash_vpte(guest_vp)];
231 
232 	rcu_read_lock();
233 
234 	/* Check the list for matching entries and invalidate */
235 	hlist_for_each_entry_rcu(pte, list, list_vpte)
236 		if ((pte->pte.vpage & vp_mask) == guest_vp)
237 			invalidate_pte(vcpu, pte);
238 
239 	rcu_read_unlock();
240 }
241 
242 #ifdef CONFIG_PPC_BOOK3S_64
243 /* Flush with mask 0xffffffff0 */
244 static void kvmppc_mmu_pte_vflush_64k(struct kvm_vcpu *vcpu, u64 guest_vp)
245 {
246 	struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
247 	struct hlist_head *list;
248 	struct hpte_cache *pte;
249 	u64 vp_mask = 0xffffffff0ULL;
250 
251 	list = &vcpu3s->hpte_hash_vpte_64k[
252 		kvmppc_mmu_hash_vpte_64k(guest_vp)];
253 
254 	rcu_read_lock();
255 
256 	/* Check the list for matching entries and invalidate */
257 	hlist_for_each_entry_rcu(pte, list, list_vpte_64k)
258 		if ((pte->pte.vpage & vp_mask) == guest_vp)
259 			invalidate_pte(vcpu, pte);
260 
261 	rcu_read_unlock();
262 }
263 #endif
264 
265 /* Flush with mask 0xffffff000 */
266 static void kvmppc_mmu_pte_vflush_long(struct kvm_vcpu *vcpu, u64 guest_vp)
267 {
268 	struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
269 	struct hlist_head *list;
270 	struct hpte_cache *pte;
271 	u64 vp_mask = 0xffffff000ULL;
272 
273 	list = &vcpu3s->hpte_hash_vpte_long[
274 		kvmppc_mmu_hash_vpte_long(guest_vp)];
275 
276 	rcu_read_lock();
277 
278 	/* Check the list for matching entries and invalidate */
279 	hlist_for_each_entry_rcu(pte, list, list_vpte_long)
280 		if ((pte->pte.vpage & vp_mask) == guest_vp)
281 			invalidate_pte(vcpu, pte);
282 
283 	rcu_read_unlock();
284 }
285 
286 void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 guest_vp, u64 vp_mask)
287 {
288 	trace_kvm_book3s_mmu_flush("v", vcpu, guest_vp, vp_mask);
289 	guest_vp &= vp_mask;
290 
291 	switch(vp_mask) {
292 	case 0xfffffffffULL:
293 		kvmppc_mmu_pte_vflush_short(vcpu, guest_vp);
294 		break;
295 #ifdef CONFIG_PPC_BOOK3S_64
296 	case 0xffffffff0ULL:
297 		kvmppc_mmu_pte_vflush_64k(vcpu, guest_vp);
298 		break;
299 #endif
300 	case 0xffffff000ULL:
301 		kvmppc_mmu_pte_vflush_long(vcpu, guest_vp);
302 		break;
303 	default:
304 		WARN_ON(1);
305 		return;
306 	}
307 }
308 
309 void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end)
310 {
311 	struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
312 	struct hpte_cache *pte;
313 	int i;
314 
315 	trace_kvm_book3s_mmu_flush("p", vcpu, pa_start, pa_end);
316 
317 	rcu_read_lock();
318 
319 	for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) {
320 		struct hlist_head *list = &vcpu3s->hpte_hash_vpte_long[i];
321 
322 		hlist_for_each_entry_rcu(pte, list, list_vpte_long)
323 			if ((pte->pte.raddr >= pa_start) &&
324 			    (pte->pte.raddr < pa_end))
325 				invalidate_pte(vcpu, pte);
326 	}
327 
328 	rcu_read_unlock();
329 }
330 
331 struct hpte_cache *kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu)
332 {
333 	struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
334 	struct hpte_cache *pte;
335 
336 	if (vcpu3s->hpte_cache_count == HPTEG_CACHE_NUM)
337 		kvmppc_mmu_pte_flush_all(vcpu);
338 
339 	pte = kmem_cache_zalloc(hpte_cache, GFP_KERNEL);
340 
341 	return pte;
342 }
343 
344 void kvmppc_mmu_hpte_cache_free(struct hpte_cache *pte)
345 {
346 	kmem_cache_free(hpte_cache, pte);
347 }
348 
349 void kvmppc_mmu_hpte_destroy(struct kvm_vcpu *vcpu)
350 {
351 	kvmppc_mmu_pte_flush(vcpu, 0, 0);
352 }
353 
354 static void kvmppc_mmu_hpte_init_hash(struct hlist_head *hash_list, int len)
355 {
356 	int i;
357 
358 	for (i = 0; i < len; i++)
359 		INIT_HLIST_HEAD(&hash_list[i]);
360 }
361 
362 int kvmppc_mmu_hpte_init(struct kvm_vcpu *vcpu)
363 {
364 	struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
365 
366 	/* init hpte lookup hashes */
367 	kvmppc_mmu_hpte_init_hash(vcpu3s->hpte_hash_pte,
368 				  ARRAY_SIZE(vcpu3s->hpte_hash_pte));
369 	kvmppc_mmu_hpte_init_hash(vcpu3s->hpte_hash_pte_long,
370 				  ARRAY_SIZE(vcpu3s->hpte_hash_pte_long));
371 	kvmppc_mmu_hpte_init_hash(vcpu3s->hpte_hash_vpte,
372 				  ARRAY_SIZE(vcpu3s->hpte_hash_vpte));
373 	kvmppc_mmu_hpte_init_hash(vcpu3s->hpte_hash_vpte_long,
374 				  ARRAY_SIZE(vcpu3s->hpte_hash_vpte_long));
375 #ifdef CONFIG_PPC_BOOK3S_64
376 	kvmppc_mmu_hpte_init_hash(vcpu3s->hpte_hash_vpte_64k,
377 				  ARRAY_SIZE(vcpu3s->hpte_hash_vpte_64k));
378 #endif
379 
380 	spin_lock_init(&vcpu3s->mmu_lock);
381 
382 	return 0;
383 }
384 
385 int kvmppc_mmu_hpte_sysinit(void)
386 {
387 	/* init hpte slab cache */
388 	hpte_cache = kmem_cache_create("kvm-spt", sizeof(struct hpte_cache),
389 				       sizeof(struct hpte_cache), 0, NULL);
390 
391 	return 0;
392 }
393 
394 void kvmppc_mmu_hpte_sysexit(void)
395 {
396 	kmem_cache_destroy(hpte_cache);
397 }
398