1 /*
2  * Copyright (C) 2010 SUSE Linux Products GmbH. All rights reserved.
3  *
4  * Authors:
5  *     Alexander Graf <agraf@suse.de>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License, version 2, as
9  * published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
19  */
20 
21 #include <linux/kvm_host.h>
22 #include <linux/hash.h>
23 #include <linux/slab.h>
24 
25 #include <asm/kvm_ppc.h>
26 #include <asm/kvm_book3s.h>
27 #include <asm/machdep.h>
28 #include <asm/mmu_context.h>
29 #include <asm/hw_irq.h>
30 
31 #include "trace.h"
32 
33 #define PTE_SIZE	12
34 
35 static struct kmem_cache *hpte_cache;
36 
37 static inline u64 kvmppc_mmu_hash_pte(u64 eaddr)
38 {
39 	return hash_64(eaddr >> PTE_SIZE, HPTEG_HASH_BITS_PTE);
40 }
41 
42 static inline u64 kvmppc_mmu_hash_pte_long(u64 eaddr)
43 {
44 	return hash_64((eaddr & 0x0ffff000) >> PTE_SIZE,
45 		       HPTEG_HASH_BITS_PTE_LONG);
46 }
47 
48 static inline u64 kvmppc_mmu_hash_vpte(u64 vpage)
49 {
50 	return hash_64(vpage & 0xfffffffffULL, HPTEG_HASH_BITS_VPTE);
51 }
52 
53 static inline u64 kvmppc_mmu_hash_vpte_long(u64 vpage)
54 {
55 	return hash_64((vpage & 0xffffff000ULL) >> 12,
56 		       HPTEG_HASH_BITS_VPTE_LONG);
57 }
58 
59 void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
60 {
61 	u64 index;
62 	struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
63 
64 	trace_kvm_book3s_mmu_map(pte);
65 
66 	spin_lock(&vcpu3s->mmu_lock);
67 
68 	/* Add to ePTE list */
69 	index = kvmppc_mmu_hash_pte(pte->pte.eaddr);
70 	hlist_add_head_rcu(&pte->list_pte, &vcpu3s->hpte_hash_pte[index]);
71 
72 	/* Add to ePTE_long list */
73 	index = kvmppc_mmu_hash_pte_long(pte->pte.eaddr);
74 	hlist_add_head_rcu(&pte->list_pte_long,
75 			   &vcpu3s->hpte_hash_pte_long[index]);
76 
77 	/* Add to vPTE list */
78 	index = kvmppc_mmu_hash_vpte(pte->pte.vpage);
79 	hlist_add_head_rcu(&pte->list_vpte, &vcpu3s->hpte_hash_vpte[index]);
80 
81 	/* Add to vPTE_long list */
82 	index = kvmppc_mmu_hash_vpte_long(pte->pte.vpage);
83 	hlist_add_head_rcu(&pte->list_vpte_long,
84 			   &vcpu3s->hpte_hash_vpte_long[index]);
85 
86 	spin_unlock(&vcpu3s->mmu_lock);
87 }
88 
89 static void free_pte_rcu(struct rcu_head *head)
90 {
91 	struct hpte_cache *pte = container_of(head, struct hpte_cache, rcu_head);
92 	kmem_cache_free(hpte_cache, pte);
93 }
94 
95 static void invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
96 {
97 	struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
98 
99 	trace_kvm_book3s_mmu_invalidate(pte);
100 
101 	/* Different for 32 and 64 bit */
102 	kvmppc_mmu_invalidate_pte(vcpu, pte);
103 
104 	spin_lock(&vcpu3s->mmu_lock);
105 
106 	/* pte already invalidated in between? */
107 	if (hlist_unhashed(&pte->list_pte)) {
108 		spin_unlock(&vcpu3s->mmu_lock);
109 		return;
110 	}
111 
112 	hlist_del_init_rcu(&pte->list_pte);
113 	hlist_del_init_rcu(&pte->list_pte_long);
114 	hlist_del_init_rcu(&pte->list_vpte);
115 	hlist_del_init_rcu(&pte->list_vpte_long);
116 
117 	spin_unlock(&vcpu3s->mmu_lock);
118 
119 	vcpu3s->hpte_cache_count--;
120 	call_rcu(&pte->rcu_head, free_pte_rcu);
121 }
122 
123 static void kvmppc_mmu_pte_flush_all(struct kvm_vcpu *vcpu)
124 {
125 	struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
126 	struct hpte_cache *pte;
127 	struct hlist_node *node;
128 	int i;
129 
130 	rcu_read_lock();
131 
132 	for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) {
133 		struct hlist_head *list = &vcpu3s->hpte_hash_vpte_long[i];
134 
135 		hlist_for_each_entry_rcu(pte, node, list, list_vpte_long)
136 			invalidate_pte(vcpu, pte);
137 	}
138 
139 	rcu_read_unlock();
140 }
141 
142 static void kvmppc_mmu_pte_flush_page(struct kvm_vcpu *vcpu, ulong guest_ea)
143 {
144 	struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
145 	struct hlist_head *list;
146 	struct hlist_node *node;
147 	struct hpte_cache *pte;
148 
149 	/* Find the list of entries in the map */
150 	list = &vcpu3s->hpte_hash_pte[kvmppc_mmu_hash_pte(guest_ea)];
151 
152 	rcu_read_lock();
153 
154 	/* Check the list for matching entries and invalidate */
155 	hlist_for_each_entry_rcu(pte, node, list, list_pte)
156 		if ((pte->pte.eaddr & ~0xfffUL) == guest_ea)
157 			invalidate_pte(vcpu, pte);
158 
159 	rcu_read_unlock();
160 }
161 
162 static void kvmppc_mmu_pte_flush_long(struct kvm_vcpu *vcpu, ulong guest_ea)
163 {
164 	struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
165 	struct hlist_head *list;
166 	struct hlist_node *node;
167 	struct hpte_cache *pte;
168 
169 	/* Find the list of entries in the map */
170 	list = &vcpu3s->hpte_hash_pte_long[
171 			kvmppc_mmu_hash_pte_long(guest_ea)];
172 
173 	rcu_read_lock();
174 
175 	/* Check the list for matching entries and invalidate */
176 	hlist_for_each_entry_rcu(pte, node, list, list_pte_long)
177 		if ((pte->pte.eaddr & 0x0ffff000UL) == guest_ea)
178 			invalidate_pte(vcpu, pte);
179 
180 	rcu_read_unlock();
181 }
182 
183 void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong guest_ea, ulong ea_mask)
184 {
185 	trace_kvm_book3s_mmu_flush("", vcpu, guest_ea, ea_mask);
186 	guest_ea &= ea_mask;
187 
188 	switch (ea_mask) {
189 	case ~0xfffUL:
190 		kvmppc_mmu_pte_flush_page(vcpu, guest_ea);
191 		break;
192 	case 0x0ffff000:
193 		kvmppc_mmu_pte_flush_long(vcpu, guest_ea);
194 		break;
195 	case 0:
196 		/* Doing a complete flush -> start from scratch */
197 		kvmppc_mmu_pte_flush_all(vcpu);
198 		break;
199 	default:
200 		WARN_ON(1);
201 		break;
202 	}
203 }
204 
205 /* Flush with mask 0xfffffffff */
206 static void kvmppc_mmu_pte_vflush_short(struct kvm_vcpu *vcpu, u64 guest_vp)
207 {
208 	struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
209 	struct hlist_head *list;
210 	struct hlist_node *node;
211 	struct hpte_cache *pte;
212 	u64 vp_mask = 0xfffffffffULL;
213 
214 	list = &vcpu3s->hpte_hash_vpte[kvmppc_mmu_hash_vpte(guest_vp)];
215 
216 	rcu_read_lock();
217 
218 	/* Check the list for matching entries and invalidate */
219 	hlist_for_each_entry_rcu(pte, node, list, list_vpte)
220 		if ((pte->pte.vpage & vp_mask) == guest_vp)
221 			invalidate_pte(vcpu, pte);
222 
223 	rcu_read_unlock();
224 }
225 
226 /* Flush with mask 0xffffff000 */
227 static void kvmppc_mmu_pte_vflush_long(struct kvm_vcpu *vcpu, u64 guest_vp)
228 {
229 	struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
230 	struct hlist_head *list;
231 	struct hlist_node *node;
232 	struct hpte_cache *pte;
233 	u64 vp_mask = 0xffffff000ULL;
234 
235 	list = &vcpu3s->hpte_hash_vpte_long[
236 		kvmppc_mmu_hash_vpte_long(guest_vp)];
237 
238 	rcu_read_lock();
239 
240 	/* Check the list for matching entries and invalidate */
241 	hlist_for_each_entry_rcu(pte, node, list, list_vpte_long)
242 		if ((pte->pte.vpage & vp_mask) == guest_vp)
243 			invalidate_pte(vcpu, pte);
244 
245 	rcu_read_unlock();
246 }
247 
248 void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 guest_vp, u64 vp_mask)
249 {
250 	trace_kvm_book3s_mmu_flush("v", vcpu, guest_vp, vp_mask);
251 	guest_vp &= vp_mask;
252 
253 	switch(vp_mask) {
254 	case 0xfffffffffULL:
255 		kvmppc_mmu_pte_vflush_short(vcpu, guest_vp);
256 		break;
257 	case 0xffffff000ULL:
258 		kvmppc_mmu_pte_vflush_long(vcpu, guest_vp);
259 		break;
260 	default:
261 		WARN_ON(1);
262 		return;
263 	}
264 }
265 
266 void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end)
267 {
268 	struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
269 	struct hlist_node *node;
270 	struct hpte_cache *pte;
271 	int i;
272 
273 	trace_kvm_book3s_mmu_flush("p", vcpu, pa_start, pa_end);
274 
275 	rcu_read_lock();
276 
277 	for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) {
278 		struct hlist_head *list = &vcpu3s->hpte_hash_vpte_long[i];
279 
280 		hlist_for_each_entry_rcu(pte, node, list, list_vpte_long)
281 			if ((pte->pte.raddr >= pa_start) &&
282 			    (pte->pte.raddr < pa_end))
283 				invalidate_pte(vcpu, pte);
284 	}
285 
286 	rcu_read_unlock();
287 }
288 
289 struct hpte_cache *kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu)
290 {
291 	struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
292 	struct hpte_cache *pte;
293 
294 	pte = kmem_cache_zalloc(hpte_cache, GFP_KERNEL);
295 	vcpu3s->hpte_cache_count++;
296 
297 	if (vcpu3s->hpte_cache_count == HPTEG_CACHE_NUM)
298 		kvmppc_mmu_pte_flush_all(vcpu);
299 
300 	return pte;
301 }
302 
303 void kvmppc_mmu_hpte_destroy(struct kvm_vcpu *vcpu)
304 {
305 	kvmppc_mmu_pte_flush(vcpu, 0, 0);
306 }
307 
308 static void kvmppc_mmu_hpte_init_hash(struct hlist_head *hash_list, int len)
309 {
310 	int i;
311 
312 	for (i = 0; i < len; i++)
313 		INIT_HLIST_HEAD(&hash_list[i]);
314 }
315 
316 int kvmppc_mmu_hpte_init(struct kvm_vcpu *vcpu)
317 {
318 	struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
319 
320 	/* init hpte lookup hashes */
321 	kvmppc_mmu_hpte_init_hash(vcpu3s->hpte_hash_pte,
322 				  ARRAY_SIZE(vcpu3s->hpte_hash_pte));
323 	kvmppc_mmu_hpte_init_hash(vcpu3s->hpte_hash_pte_long,
324 				  ARRAY_SIZE(vcpu3s->hpte_hash_pte_long));
325 	kvmppc_mmu_hpte_init_hash(vcpu3s->hpte_hash_vpte,
326 				  ARRAY_SIZE(vcpu3s->hpte_hash_vpte));
327 	kvmppc_mmu_hpte_init_hash(vcpu3s->hpte_hash_vpte_long,
328 				  ARRAY_SIZE(vcpu3s->hpte_hash_vpte_long));
329 
330 	spin_lock_init(&vcpu3s->mmu_lock);
331 
332 	return 0;
333 }
334 
335 int kvmppc_mmu_hpte_sysinit(void)
336 {
337 	/* init hpte slab cache */
338 	hpte_cache = kmem_cache_create("kvm-spt", sizeof(struct hpte_cache),
339 				       sizeof(struct hpte_cache), 0, NULL);
340 
341 	return 0;
342 }
343 
344 void kvmppc_mmu_hpte_sysexit(void)
345 {
346 	kmem_cache_destroy(hpte_cache);
347 }
348