1 /*
2  * Copyright (C) 2010 SUSE Linux Products GmbH. All rights reserved.
3  *
4  * Authors:
5  *     Alexander Graf <agraf@suse.de>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License, version 2, as
9  * published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
19  */
20 
21 #include <linux/kvm_host.h>
22 #include <linux/hash.h>
23 #include <linux/slab.h>
24 
25 #include <asm/kvm_ppc.h>
26 #include <asm/kvm_book3s.h>
27 #include <asm/machdep.h>
28 #include <asm/mmu_context.h>
29 #include <asm/hw_irq.h>
30 
31 #include "trace.h"
32 
33 #define PTE_SIZE	12
34 
35 static struct kmem_cache *hpte_cache;
36 
37 static inline u64 kvmppc_mmu_hash_pte(u64 eaddr)
38 {
39 	return hash_64(eaddr >> PTE_SIZE, HPTEG_HASH_BITS_PTE);
40 }
41 
42 static inline u64 kvmppc_mmu_hash_pte_long(u64 eaddr)
43 {
44 	return hash_64((eaddr & 0x0ffff000) >> PTE_SIZE,
45 		       HPTEG_HASH_BITS_PTE_LONG);
46 }
47 
48 static inline u64 kvmppc_mmu_hash_vpte(u64 vpage)
49 {
50 	return hash_64(vpage & 0xfffffffffULL, HPTEG_HASH_BITS_VPTE);
51 }
52 
53 static inline u64 kvmppc_mmu_hash_vpte_long(u64 vpage)
54 {
55 	return hash_64((vpage & 0xffffff000ULL) >> 12,
56 		       HPTEG_HASH_BITS_VPTE_LONG);
57 }
58 
59 #ifdef CONFIG_PPC_BOOK3S_64
60 static inline u64 kvmppc_mmu_hash_vpte_64k(u64 vpage)
61 {
62 	return hash_64((vpage & 0xffffffff0ULL) >> 4,
63 		       HPTEG_HASH_BITS_VPTE_64K);
64 }
65 #endif
66 
67 void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
68 {
69 	u64 index;
70 	struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
71 
72 	trace_kvm_book3s_mmu_map(pte);
73 
74 	spin_lock(&vcpu3s->mmu_lock);
75 
76 	/* Add to ePTE list */
77 	index = kvmppc_mmu_hash_pte(pte->pte.eaddr);
78 	hlist_add_head_rcu(&pte->list_pte, &vcpu3s->hpte_hash_pte[index]);
79 
80 	/* Add to ePTE_long list */
81 	index = kvmppc_mmu_hash_pte_long(pte->pte.eaddr);
82 	hlist_add_head_rcu(&pte->list_pte_long,
83 			   &vcpu3s->hpte_hash_pte_long[index]);
84 
85 	/* Add to vPTE list */
86 	index = kvmppc_mmu_hash_vpte(pte->pte.vpage);
87 	hlist_add_head_rcu(&pte->list_vpte, &vcpu3s->hpte_hash_vpte[index]);
88 
89 	/* Add to vPTE_long list */
90 	index = kvmppc_mmu_hash_vpte_long(pte->pte.vpage);
91 	hlist_add_head_rcu(&pte->list_vpte_long,
92 			   &vcpu3s->hpte_hash_vpte_long[index]);
93 
94 #ifdef CONFIG_PPC_BOOK3S_64
95 	/* Add to vPTE_64k list */
96 	index = kvmppc_mmu_hash_vpte_64k(pte->pte.vpage);
97 	hlist_add_head_rcu(&pte->list_vpte_64k,
98 			   &vcpu3s->hpte_hash_vpte_64k[index]);
99 #endif
100 
101 	spin_unlock(&vcpu3s->mmu_lock);
102 }
103 
104 static void free_pte_rcu(struct rcu_head *head)
105 {
106 	struct hpte_cache *pte = container_of(head, struct hpte_cache, rcu_head);
107 	kmem_cache_free(hpte_cache, pte);
108 }
109 
110 static void invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
111 {
112 	struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
113 
114 	trace_kvm_book3s_mmu_invalidate(pte);
115 
116 	/* Different for 32 and 64 bit */
117 	kvmppc_mmu_invalidate_pte(vcpu, pte);
118 
119 	spin_lock(&vcpu3s->mmu_lock);
120 
121 	/* pte already invalidated in between? */
122 	if (hlist_unhashed(&pte->list_pte)) {
123 		spin_unlock(&vcpu3s->mmu_lock);
124 		return;
125 	}
126 
127 	hlist_del_init_rcu(&pte->list_pte);
128 	hlist_del_init_rcu(&pte->list_pte_long);
129 	hlist_del_init_rcu(&pte->list_vpte);
130 	hlist_del_init_rcu(&pte->list_vpte_long);
131 #ifdef CONFIG_PPC_BOOK3S_64
132 	hlist_del_init_rcu(&pte->list_vpte_64k);
133 #endif
134 
135 	spin_unlock(&vcpu3s->mmu_lock);
136 
137 	vcpu3s->hpte_cache_count--;
138 	call_rcu(&pte->rcu_head, free_pte_rcu);
139 }
140 
141 static void kvmppc_mmu_pte_flush_all(struct kvm_vcpu *vcpu)
142 {
143 	struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
144 	struct hpte_cache *pte;
145 	int i;
146 
147 	rcu_read_lock();
148 
149 	for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) {
150 		struct hlist_head *list = &vcpu3s->hpte_hash_vpte_long[i];
151 
152 		hlist_for_each_entry_rcu(pte, list, list_vpte_long)
153 			invalidate_pte(vcpu, pte);
154 	}
155 
156 	rcu_read_unlock();
157 }
158 
159 static void kvmppc_mmu_pte_flush_page(struct kvm_vcpu *vcpu, ulong guest_ea)
160 {
161 	struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
162 	struct hlist_head *list;
163 	struct hpte_cache *pte;
164 
165 	/* Find the list of entries in the map */
166 	list = &vcpu3s->hpte_hash_pte[kvmppc_mmu_hash_pte(guest_ea)];
167 
168 	rcu_read_lock();
169 
170 	/* Check the list for matching entries and invalidate */
171 	hlist_for_each_entry_rcu(pte, list, list_pte)
172 		if ((pte->pte.eaddr & ~0xfffUL) == guest_ea)
173 			invalidate_pte(vcpu, pte);
174 
175 	rcu_read_unlock();
176 }
177 
178 static void kvmppc_mmu_pte_flush_long(struct kvm_vcpu *vcpu, ulong guest_ea)
179 {
180 	struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
181 	struct hlist_head *list;
182 	struct hpte_cache *pte;
183 
184 	/* Find the list of entries in the map */
185 	list = &vcpu3s->hpte_hash_pte_long[
186 			kvmppc_mmu_hash_pte_long(guest_ea)];
187 
188 	rcu_read_lock();
189 
190 	/* Check the list for matching entries and invalidate */
191 	hlist_for_each_entry_rcu(pte, list, list_pte_long)
192 		if ((pte->pte.eaddr & 0x0ffff000UL) == guest_ea)
193 			invalidate_pte(vcpu, pte);
194 
195 	rcu_read_unlock();
196 }
197 
198 void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong guest_ea, ulong ea_mask)
199 {
200 	trace_kvm_book3s_mmu_flush("", vcpu, guest_ea, ea_mask);
201 	guest_ea &= ea_mask;
202 
203 	switch (ea_mask) {
204 	case ~0xfffUL:
205 		kvmppc_mmu_pte_flush_page(vcpu, guest_ea);
206 		break;
207 	case 0x0ffff000:
208 		kvmppc_mmu_pte_flush_long(vcpu, guest_ea);
209 		break;
210 	case 0:
211 		/* Doing a complete flush -> start from scratch */
212 		kvmppc_mmu_pte_flush_all(vcpu);
213 		break;
214 	default:
215 		WARN_ON(1);
216 		break;
217 	}
218 }
219 
220 /* Flush with mask 0xfffffffff */
221 static void kvmppc_mmu_pte_vflush_short(struct kvm_vcpu *vcpu, u64 guest_vp)
222 {
223 	struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
224 	struct hlist_head *list;
225 	struct hpte_cache *pte;
226 	u64 vp_mask = 0xfffffffffULL;
227 
228 	list = &vcpu3s->hpte_hash_vpte[kvmppc_mmu_hash_vpte(guest_vp)];
229 
230 	rcu_read_lock();
231 
232 	/* Check the list for matching entries and invalidate */
233 	hlist_for_each_entry_rcu(pte, list, list_vpte)
234 		if ((pte->pte.vpage & vp_mask) == guest_vp)
235 			invalidate_pte(vcpu, pte);
236 
237 	rcu_read_unlock();
238 }
239 
240 #ifdef CONFIG_PPC_BOOK3S_64
241 /* Flush with mask 0xffffffff0 */
242 static void kvmppc_mmu_pte_vflush_64k(struct kvm_vcpu *vcpu, u64 guest_vp)
243 {
244 	struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
245 	struct hlist_head *list;
246 	struct hpte_cache *pte;
247 	u64 vp_mask = 0xffffffff0ULL;
248 
249 	list = &vcpu3s->hpte_hash_vpte_64k[
250 		kvmppc_mmu_hash_vpte_64k(guest_vp)];
251 
252 	rcu_read_lock();
253 
254 	/* Check the list for matching entries and invalidate */
255 	hlist_for_each_entry_rcu(pte, list, list_vpte_64k)
256 		if ((pte->pte.vpage & vp_mask) == guest_vp)
257 			invalidate_pte(vcpu, pte);
258 
259 	rcu_read_unlock();
260 }
261 #endif
262 
263 /* Flush with mask 0xffffff000 */
264 static void kvmppc_mmu_pte_vflush_long(struct kvm_vcpu *vcpu, u64 guest_vp)
265 {
266 	struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
267 	struct hlist_head *list;
268 	struct hpte_cache *pte;
269 	u64 vp_mask = 0xffffff000ULL;
270 
271 	list = &vcpu3s->hpte_hash_vpte_long[
272 		kvmppc_mmu_hash_vpte_long(guest_vp)];
273 
274 	rcu_read_lock();
275 
276 	/* Check the list for matching entries and invalidate */
277 	hlist_for_each_entry_rcu(pte, list, list_vpte_long)
278 		if ((pte->pte.vpage & vp_mask) == guest_vp)
279 			invalidate_pte(vcpu, pte);
280 
281 	rcu_read_unlock();
282 }
283 
284 void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 guest_vp, u64 vp_mask)
285 {
286 	trace_kvm_book3s_mmu_flush("v", vcpu, guest_vp, vp_mask);
287 	guest_vp &= vp_mask;
288 
289 	switch(vp_mask) {
290 	case 0xfffffffffULL:
291 		kvmppc_mmu_pte_vflush_short(vcpu, guest_vp);
292 		break;
293 #ifdef CONFIG_PPC_BOOK3S_64
294 	case 0xffffffff0ULL:
295 		kvmppc_mmu_pte_vflush_64k(vcpu, guest_vp);
296 		break;
297 #endif
298 	case 0xffffff000ULL:
299 		kvmppc_mmu_pte_vflush_long(vcpu, guest_vp);
300 		break;
301 	default:
302 		WARN_ON(1);
303 		return;
304 	}
305 }
306 
307 void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end)
308 {
309 	struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
310 	struct hpte_cache *pte;
311 	int i;
312 
313 	trace_kvm_book3s_mmu_flush("p", vcpu, pa_start, pa_end);
314 
315 	rcu_read_lock();
316 
317 	for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) {
318 		struct hlist_head *list = &vcpu3s->hpte_hash_vpte_long[i];
319 
320 		hlist_for_each_entry_rcu(pte, list, list_vpte_long)
321 			if ((pte->pte.raddr >= pa_start) &&
322 			    (pte->pte.raddr < pa_end))
323 				invalidate_pte(vcpu, pte);
324 	}
325 
326 	rcu_read_unlock();
327 }
328 
329 struct hpte_cache *kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu)
330 {
331 	struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
332 	struct hpte_cache *pte;
333 
334 	pte = kmem_cache_zalloc(hpte_cache, GFP_KERNEL);
335 	vcpu3s->hpte_cache_count++;
336 
337 	if (vcpu3s->hpte_cache_count == HPTEG_CACHE_NUM)
338 		kvmppc_mmu_pte_flush_all(vcpu);
339 
340 	return pte;
341 }
342 
343 void kvmppc_mmu_hpte_destroy(struct kvm_vcpu *vcpu)
344 {
345 	kvmppc_mmu_pte_flush(vcpu, 0, 0);
346 }
347 
348 static void kvmppc_mmu_hpte_init_hash(struct hlist_head *hash_list, int len)
349 {
350 	int i;
351 
352 	for (i = 0; i < len; i++)
353 		INIT_HLIST_HEAD(&hash_list[i]);
354 }
355 
356 int kvmppc_mmu_hpte_init(struct kvm_vcpu *vcpu)
357 {
358 	struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
359 
360 	/* init hpte lookup hashes */
361 	kvmppc_mmu_hpte_init_hash(vcpu3s->hpte_hash_pte,
362 				  ARRAY_SIZE(vcpu3s->hpte_hash_pte));
363 	kvmppc_mmu_hpte_init_hash(vcpu3s->hpte_hash_pte_long,
364 				  ARRAY_SIZE(vcpu3s->hpte_hash_pte_long));
365 	kvmppc_mmu_hpte_init_hash(vcpu3s->hpte_hash_vpte,
366 				  ARRAY_SIZE(vcpu3s->hpte_hash_vpte));
367 	kvmppc_mmu_hpte_init_hash(vcpu3s->hpte_hash_vpte_long,
368 				  ARRAY_SIZE(vcpu3s->hpte_hash_vpte_long));
369 #ifdef CONFIG_PPC_BOOK3S_64
370 	kvmppc_mmu_hpte_init_hash(vcpu3s->hpte_hash_vpte_64k,
371 				  ARRAY_SIZE(vcpu3s->hpte_hash_vpte_64k));
372 #endif
373 
374 	spin_lock_init(&vcpu3s->mmu_lock);
375 
376 	return 0;
377 }
378 
379 int kvmppc_mmu_hpte_sysinit(void)
380 {
381 	/* init hpte slab cache */
382 	hpte_cache = kmem_cache_create("kvm-spt", sizeof(struct hpte_cache),
383 				       sizeof(struct hpte_cache), 0, NULL);
384 
385 	return 0;
386 }
387 
388 void kvmppc_mmu_hpte_sysexit(void)
389 {
390 	kmem_cache_destroy(hpte_cache);
391 }
392