1 /*
2  * Copyright (C) 2010 SUSE Linux Products GmbH. All rights reserved.
3  *
4  * Authors:
5  *     Alexander Graf <agraf@suse.de>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License, version 2, as
9  * published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
19  */
20 
21 #include <linux/kvm_host.h>
22 #include <linux/hash.h>
23 #include <linux/slab.h>
24 #include <linux/rculist.h>
25 
26 #include <asm/kvm_ppc.h>
27 #include <asm/kvm_book3s.h>
28 #include <asm/machdep.h>
29 #include <asm/mmu_context.h>
30 #include <asm/hw_irq.h>
31 
32 #include "trace_pr.h"
33 
34 #define PTE_SIZE	12
35 
36 static struct kmem_cache *hpte_cache;
37 
38 static inline u64 kvmppc_mmu_hash_pte(u64 eaddr)
39 {
40 	return hash_64(eaddr >> PTE_SIZE, HPTEG_HASH_BITS_PTE);
41 }
42 
43 static inline u64 kvmppc_mmu_hash_pte_long(u64 eaddr)
44 {
45 	return hash_64((eaddr & 0x0ffff000) >> PTE_SIZE,
46 		       HPTEG_HASH_BITS_PTE_LONG);
47 }
48 
49 static inline u64 kvmppc_mmu_hash_vpte(u64 vpage)
50 {
51 	return hash_64(vpage & 0xfffffffffULL, HPTEG_HASH_BITS_VPTE);
52 }
53 
54 static inline u64 kvmppc_mmu_hash_vpte_long(u64 vpage)
55 {
56 	return hash_64((vpage & 0xffffff000ULL) >> 12,
57 		       HPTEG_HASH_BITS_VPTE_LONG);
58 }
59 
60 #ifdef CONFIG_PPC_BOOK3S_64
61 static inline u64 kvmppc_mmu_hash_vpte_64k(u64 vpage)
62 {
63 	return hash_64((vpage & 0xffffffff0ULL) >> 4,
64 		       HPTEG_HASH_BITS_VPTE_64K);
65 }
66 #endif
67 
68 void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
69 {
70 	u64 index;
71 	struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
72 
73 	trace_kvm_book3s_mmu_map(pte);
74 
75 	spin_lock(&vcpu3s->mmu_lock);
76 
77 	/* Add to ePTE list */
78 	index = kvmppc_mmu_hash_pte(pte->pte.eaddr);
79 	hlist_add_head_rcu(&pte->list_pte, &vcpu3s->hpte_hash_pte[index]);
80 
81 	/* Add to ePTE_long list */
82 	index = kvmppc_mmu_hash_pte_long(pte->pte.eaddr);
83 	hlist_add_head_rcu(&pte->list_pte_long,
84 			   &vcpu3s->hpte_hash_pte_long[index]);
85 
86 	/* Add to vPTE list */
87 	index = kvmppc_mmu_hash_vpte(pte->pte.vpage);
88 	hlist_add_head_rcu(&pte->list_vpte, &vcpu3s->hpte_hash_vpte[index]);
89 
90 	/* Add to vPTE_long list */
91 	index = kvmppc_mmu_hash_vpte_long(pte->pte.vpage);
92 	hlist_add_head_rcu(&pte->list_vpte_long,
93 			   &vcpu3s->hpte_hash_vpte_long[index]);
94 
95 #ifdef CONFIG_PPC_BOOK3S_64
96 	/* Add to vPTE_64k list */
97 	index = kvmppc_mmu_hash_vpte_64k(pte->pte.vpage);
98 	hlist_add_head_rcu(&pte->list_vpte_64k,
99 			   &vcpu3s->hpte_hash_vpte_64k[index]);
100 #endif
101 
102 	vcpu3s->hpte_cache_count++;
103 
104 	spin_unlock(&vcpu3s->mmu_lock);
105 }
106 
107 static void free_pte_rcu(struct rcu_head *head)
108 {
109 	struct hpte_cache *pte = container_of(head, struct hpte_cache, rcu_head);
110 	kmem_cache_free(hpte_cache, pte);
111 }
112 
113 static void invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
114 {
115 	struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
116 
117 	trace_kvm_book3s_mmu_invalidate(pte);
118 
119 	/* Different for 32 and 64 bit */
120 	kvmppc_mmu_invalidate_pte(vcpu, pte);
121 
122 	spin_lock(&vcpu3s->mmu_lock);
123 
124 	/* pte already invalidated in between? */
125 	if (hlist_unhashed(&pte->list_pte)) {
126 		spin_unlock(&vcpu3s->mmu_lock);
127 		return;
128 	}
129 
130 	hlist_del_init_rcu(&pte->list_pte);
131 	hlist_del_init_rcu(&pte->list_pte_long);
132 	hlist_del_init_rcu(&pte->list_vpte);
133 	hlist_del_init_rcu(&pte->list_vpte_long);
134 #ifdef CONFIG_PPC_BOOK3S_64
135 	hlist_del_init_rcu(&pte->list_vpte_64k);
136 #endif
137 	vcpu3s->hpte_cache_count--;
138 
139 	spin_unlock(&vcpu3s->mmu_lock);
140 
141 	call_rcu(&pte->rcu_head, free_pte_rcu);
142 }
143 
144 static void kvmppc_mmu_pte_flush_all(struct kvm_vcpu *vcpu)
145 {
146 	struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
147 	struct hpte_cache *pte;
148 	int i;
149 
150 	rcu_read_lock();
151 
152 	for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) {
153 		struct hlist_head *list = &vcpu3s->hpte_hash_vpte_long[i];
154 
155 		hlist_for_each_entry_rcu(pte, list, list_vpte_long)
156 			invalidate_pte(vcpu, pte);
157 	}
158 
159 	rcu_read_unlock();
160 }
161 
162 static void kvmppc_mmu_pte_flush_page(struct kvm_vcpu *vcpu, ulong guest_ea)
163 {
164 	struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
165 	struct hlist_head *list;
166 	struct hpte_cache *pte;
167 
168 	/* Find the list of entries in the map */
169 	list = &vcpu3s->hpte_hash_pte[kvmppc_mmu_hash_pte(guest_ea)];
170 
171 	rcu_read_lock();
172 
173 	/* Check the list for matching entries and invalidate */
174 	hlist_for_each_entry_rcu(pte, list, list_pte)
175 		if ((pte->pte.eaddr & ~0xfffUL) == guest_ea)
176 			invalidate_pte(vcpu, pte);
177 
178 	rcu_read_unlock();
179 }
180 
181 static void kvmppc_mmu_pte_flush_long(struct kvm_vcpu *vcpu, ulong guest_ea)
182 {
183 	struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
184 	struct hlist_head *list;
185 	struct hpte_cache *pte;
186 
187 	/* Find the list of entries in the map */
188 	list = &vcpu3s->hpte_hash_pte_long[
189 			kvmppc_mmu_hash_pte_long(guest_ea)];
190 
191 	rcu_read_lock();
192 
193 	/* Check the list for matching entries and invalidate */
194 	hlist_for_each_entry_rcu(pte, list, list_pte_long)
195 		if ((pte->pte.eaddr & 0x0ffff000UL) == guest_ea)
196 			invalidate_pte(vcpu, pte);
197 
198 	rcu_read_unlock();
199 }
200 
201 void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong guest_ea, ulong ea_mask)
202 {
203 	trace_kvm_book3s_mmu_flush("", vcpu, guest_ea, ea_mask);
204 	guest_ea &= ea_mask;
205 
206 	switch (ea_mask) {
207 	case ~0xfffUL:
208 		kvmppc_mmu_pte_flush_page(vcpu, guest_ea);
209 		break;
210 	case 0x0ffff000:
211 		kvmppc_mmu_pte_flush_long(vcpu, guest_ea);
212 		break;
213 	case 0:
214 		/* Doing a complete flush -> start from scratch */
215 		kvmppc_mmu_pte_flush_all(vcpu);
216 		break;
217 	default:
218 		WARN_ON(1);
219 		break;
220 	}
221 }
222 
223 /* Flush with mask 0xfffffffff */
224 static void kvmppc_mmu_pte_vflush_short(struct kvm_vcpu *vcpu, u64 guest_vp)
225 {
226 	struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
227 	struct hlist_head *list;
228 	struct hpte_cache *pte;
229 	u64 vp_mask = 0xfffffffffULL;
230 
231 	list = &vcpu3s->hpte_hash_vpte[kvmppc_mmu_hash_vpte(guest_vp)];
232 
233 	rcu_read_lock();
234 
235 	/* Check the list for matching entries and invalidate */
236 	hlist_for_each_entry_rcu(pte, list, list_vpte)
237 		if ((pte->pte.vpage & vp_mask) == guest_vp)
238 			invalidate_pte(vcpu, pte);
239 
240 	rcu_read_unlock();
241 }
242 
243 #ifdef CONFIG_PPC_BOOK3S_64
244 /* Flush with mask 0xffffffff0 */
245 static void kvmppc_mmu_pte_vflush_64k(struct kvm_vcpu *vcpu, u64 guest_vp)
246 {
247 	struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
248 	struct hlist_head *list;
249 	struct hpte_cache *pte;
250 	u64 vp_mask = 0xffffffff0ULL;
251 
252 	list = &vcpu3s->hpte_hash_vpte_64k[
253 		kvmppc_mmu_hash_vpte_64k(guest_vp)];
254 
255 	rcu_read_lock();
256 
257 	/* Check the list for matching entries and invalidate */
258 	hlist_for_each_entry_rcu(pte, list, list_vpte_64k)
259 		if ((pte->pte.vpage & vp_mask) == guest_vp)
260 			invalidate_pte(vcpu, pte);
261 
262 	rcu_read_unlock();
263 }
264 #endif
265 
266 /* Flush with mask 0xffffff000 */
267 static void kvmppc_mmu_pte_vflush_long(struct kvm_vcpu *vcpu, u64 guest_vp)
268 {
269 	struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
270 	struct hlist_head *list;
271 	struct hpte_cache *pte;
272 	u64 vp_mask = 0xffffff000ULL;
273 
274 	list = &vcpu3s->hpte_hash_vpte_long[
275 		kvmppc_mmu_hash_vpte_long(guest_vp)];
276 
277 	rcu_read_lock();
278 
279 	/* Check the list for matching entries and invalidate */
280 	hlist_for_each_entry_rcu(pte, list, list_vpte_long)
281 		if ((pte->pte.vpage & vp_mask) == guest_vp)
282 			invalidate_pte(vcpu, pte);
283 
284 	rcu_read_unlock();
285 }
286 
287 void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 guest_vp, u64 vp_mask)
288 {
289 	trace_kvm_book3s_mmu_flush("v", vcpu, guest_vp, vp_mask);
290 	guest_vp &= vp_mask;
291 
292 	switch(vp_mask) {
293 	case 0xfffffffffULL:
294 		kvmppc_mmu_pte_vflush_short(vcpu, guest_vp);
295 		break;
296 #ifdef CONFIG_PPC_BOOK3S_64
297 	case 0xffffffff0ULL:
298 		kvmppc_mmu_pte_vflush_64k(vcpu, guest_vp);
299 		break;
300 #endif
301 	case 0xffffff000ULL:
302 		kvmppc_mmu_pte_vflush_long(vcpu, guest_vp);
303 		break;
304 	default:
305 		WARN_ON(1);
306 		return;
307 	}
308 }
309 
310 void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end)
311 {
312 	struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
313 	struct hpte_cache *pte;
314 	int i;
315 
316 	trace_kvm_book3s_mmu_flush("p", vcpu, pa_start, pa_end);
317 
318 	rcu_read_lock();
319 
320 	for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) {
321 		struct hlist_head *list = &vcpu3s->hpte_hash_vpte_long[i];
322 
323 		hlist_for_each_entry_rcu(pte, list, list_vpte_long)
324 			if ((pte->pte.raddr >= pa_start) &&
325 			    (pte->pte.raddr < pa_end))
326 				invalidate_pte(vcpu, pte);
327 	}
328 
329 	rcu_read_unlock();
330 }
331 
332 struct hpte_cache *kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu)
333 {
334 	struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
335 	struct hpte_cache *pte;
336 
337 	if (vcpu3s->hpte_cache_count == HPTEG_CACHE_NUM)
338 		kvmppc_mmu_pte_flush_all(vcpu);
339 
340 	pte = kmem_cache_zalloc(hpte_cache, GFP_KERNEL);
341 
342 	return pte;
343 }
344 
345 void kvmppc_mmu_hpte_cache_free(struct hpte_cache *pte)
346 {
347 	kmem_cache_free(hpte_cache, pte);
348 }
349 
350 void kvmppc_mmu_hpte_destroy(struct kvm_vcpu *vcpu)
351 {
352 	kvmppc_mmu_pte_flush(vcpu, 0, 0);
353 }
354 
355 static void kvmppc_mmu_hpte_init_hash(struct hlist_head *hash_list, int len)
356 {
357 	int i;
358 
359 	for (i = 0; i < len; i++)
360 		INIT_HLIST_HEAD(&hash_list[i]);
361 }
362 
363 int kvmppc_mmu_hpte_init(struct kvm_vcpu *vcpu)
364 {
365 	struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
366 
367 	/* init hpte lookup hashes */
368 	kvmppc_mmu_hpte_init_hash(vcpu3s->hpte_hash_pte,
369 				  ARRAY_SIZE(vcpu3s->hpte_hash_pte));
370 	kvmppc_mmu_hpte_init_hash(vcpu3s->hpte_hash_pte_long,
371 				  ARRAY_SIZE(vcpu3s->hpte_hash_pte_long));
372 	kvmppc_mmu_hpte_init_hash(vcpu3s->hpte_hash_vpte,
373 				  ARRAY_SIZE(vcpu3s->hpte_hash_vpte));
374 	kvmppc_mmu_hpte_init_hash(vcpu3s->hpte_hash_vpte_long,
375 				  ARRAY_SIZE(vcpu3s->hpte_hash_vpte_long));
376 #ifdef CONFIG_PPC_BOOK3S_64
377 	kvmppc_mmu_hpte_init_hash(vcpu3s->hpte_hash_vpte_64k,
378 				  ARRAY_SIZE(vcpu3s->hpte_hash_vpte_64k));
379 #endif
380 
381 	spin_lock_init(&vcpu3s->mmu_lock);
382 
383 	return 0;
384 }
385 
386 int kvmppc_mmu_hpte_sysinit(void)
387 {
388 	/* init hpte slab cache */
389 	hpte_cache = kmem_cache_create("kvm-spt", sizeof(struct hpte_cache),
390 				       sizeof(struct hpte_cache), 0, NULL);
391 
392 	return 0;
393 }
394 
395 void kvmppc_mmu_hpte_sysexit(void)
396 {
397 	kmem_cache_destroy(hpte_cache);
398 }
399