1 /*
2  * Copyright (C) 2009 SUSE Linux Products GmbH. All rights reserved.
3  *
4  * Authors:
5  *     Alexander Graf <agraf@suse.de>
6  *     Kevin Wolf <mail@kevin-wolf.de>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License, version 2, as
10  * published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program; if not, write to the Free Software
19  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
20  */
21 
22 #include <linux/kvm_host.h>
23 
24 #include <asm/kvm_ppc.h>
25 #include <asm/kvm_book3s.h>
26 #include <asm/mmu-hash64.h>
27 #include <asm/machdep.h>
28 #include <asm/mmu_context.h>
29 #include <asm/hw_irq.h>
30 #include "trace.h"
31 
32 #define PTE_SIZE 12
33 
34 void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
35 {
36 	ppc_md.hpte_invalidate(pte->slot, pte->host_vpn,
37 			       MMU_PAGE_4K, MMU_SEGSIZE_256M,
38 			       false);
39 }
40 
41 /* We keep 512 gvsid->hvsid entries, mapping the guest ones to the array using
42  * a hash, so we don't waste cycles on looping */
43 static u16 kvmppc_sid_hash(struct kvm_vcpu *vcpu, u64 gvsid)
44 {
45 	return (u16)(((gvsid >> (SID_MAP_BITS * 7)) & SID_MAP_MASK) ^
46 		     ((gvsid >> (SID_MAP_BITS * 6)) & SID_MAP_MASK) ^
47 		     ((gvsid >> (SID_MAP_BITS * 5)) & SID_MAP_MASK) ^
48 		     ((gvsid >> (SID_MAP_BITS * 4)) & SID_MAP_MASK) ^
49 		     ((gvsid >> (SID_MAP_BITS * 3)) & SID_MAP_MASK) ^
50 		     ((gvsid >> (SID_MAP_BITS * 2)) & SID_MAP_MASK) ^
51 		     ((gvsid >> (SID_MAP_BITS * 1)) & SID_MAP_MASK) ^
52 		     ((gvsid >> (SID_MAP_BITS * 0)) & SID_MAP_MASK));
53 }
54 
55 
56 static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid)
57 {
58 	struct kvmppc_sid_map *map;
59 	u16 sid_map_mask;
60 
61 	if (vcpu->arch.shared->msr & MSR_PR)
62 		gvsid |= VSID_PR;
63 
64 	sid_map_mask = kvmppc_sid_hash(vcpu, gvsid);
65 	map = &to_book3s(vcpu)->sid_map[sid_map_mask];
66 	if (map->valid && (map->guest_vsid == gvsid)) {
67 		trace_kvm_book3s_slb_found(gvsid, map->host_vsid);
68 		return map;
69 	}
70 
71 	map = &to_book3s(vcpu)->sid_map[SID_MAP_MASK - sid_map_mask];
72 	if (map->valid && (map->guest_vsid == gvsid)) {
73 		trace_kvm_book3s_slb_found(gvsid, map->host_vsid);
74 		return map;
75 	}
76 
77 	trace_kvm_book3s_slb_fail(sid_map_mask, gvsid);
78 	return NULL;
79 }
80 
81 int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
82 {
83 	unsigned long vpn;
84 	pfn_t hpaddr;
85 	ulong hash, hpteg;
86 	u64 vsid;
87 	int ret;
88 	int rflags = 0x192;
89 	int vflags = 0;
90 	int attempt = 0;
91 	struct kvmppc_sid_map *map;
92 	int r = 0;
93 
94 	/* Get host physical address for gpa */
95 	hpaddr = kvmppc_gfn_to_pfn(vcpu, orig_pte->raddr >> PAGE_SHIFT);
96 	if (is_error_noslot_pfn(hpaddr)) {
97 		printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n", orig_pte->eaddr);
98 		r = -EINVAL;
99 		goto out;
100 	}
101 	hpaddr <<= PAGE_SHIFT;
102 	hpaddr |= orig_pte->raddr & (~0xfffULL & ~PAGE_MASK);
103 
104 	/* and write the mapping ea -> hpa into the pt */
105 	vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid);
106 	map = find_sid_vsid(vcpu, vsid);
107 	if (!map) {
108 		ret = kvmppc_mmu_map_segment(vcpu, orig_pte->eaddr);
109 		WARN_ON(ret < 0);
110 		map = find_sid_vsid(vcpu, vsid);
111 	}
112 	if (!map) {
113 		printk(KERN_ERR "KVM: Segment map for 0x%llx (0x%lx) failed\n",
114 				vsid, orig_pte->eaddr);
115 		WARN_ON(true);
116 		r = -EINVAL;
117 		goto out;
118 	}
119 
120 	vsid = map->host_vsid;
121 	vpn = hpt_vpn(orig_pte->eaddr, vsid, MMU_SEGSIZE_256M);
122 
123 	if (!orig_pte->may_write)
124 		rflags |= HPTE_R_PP;
125 	else
126 		mark_page_dirty(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT);
127 
128 	if (!orig_pte->may_execute)
129 		rflags |= HPTE_R_N;
130 	else
131 		kvmppc_mmu_flush_icache(hpaddr >> PAGE_SHIFT);
132 
133 	hash = hpt_hash(vpn, PTE_SIZE, MMU_SEGSIZE_256M);
134 
135 map_again:
136 	hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
137 
138 	/* In case we tried normal mapping already, let's nuke old entries */
139 	if (attempt > 1)
140 		if (ppc_md.hpte_remove(hpteg) < 0) {
141 			r = -1;
142 			goto out;
143 		}
144 
145 	ret = ppc_md.hpte_insert(hpteg, vpn, hpaddr, rflags, vflags,
146 				 MMU_PAGE_4K, MMU_SEGSIZE_256M);
147 
148 	if (ret < 0) {
149 		/* If we couldn't map a primary PTE, try a secondary */
150 		hash = ~hash;
151 		vflags ^= HPTE_V_SECONDARY;
152 		attempt++;
153 		goto map_again;
154 	} else {
155 		struct hpte_cache *pte = kvmppc_mmu_hpte_cache_next(vcpu);
156 
157 		trace_kvm_book3s_64_mmu_map(rflags, hpteg,
158 					    vpn, hpaddr, orig_pte);
159 
160 		/* The ppc_md code may give us a secondary entry even though we
161 		   asked for a primary. Fix up. */
162 		if ((ret & _PTEIDX_SECONDARY) && !(vflags & HPTE_V_SECONDARY)) {
163 			hash = ~hash;
164 			hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
165 		}
166 
167 		pte->slot = hpteg + (ret & 7);
168 		pte->host_vpn = vpn;
169 		pte->pte = *orig_pte;
170 		pte->pfn = hpaddr >> PAGE_SHIFT;
171 
172 		kvmppc_mmu_hpte_cache_map(vcpu, pte);
173 	}
174 	kvm_release_pfn_clean(hpaddr >> PAGE_SHIFT);
175 
176 out:
177 	return r;
178 }
179 
180 static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid)
181 {
182 	struct kvmppc_sid_map *map;
183 	struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
184 	u16 sid_map_mask;
185 	static int backwards_map = 0;
186 
187 	if (vcpu->arch.shared->msr & MSR_PR)
188 		gvsid |= VSID_PR;
189 
190 	/* We might get collisions that trap in preceding order, so let's
191 	   map them differently */
192 
193 	sid_map_mask = kvmppc_sid_hash(vcpu, gvsid);
194 	if (backwards_map)
195 		sid_map_mask = SID_MAP_MASK - sid_map_mask;
196 
197 	map = &to_book3s(vcpu)->sid_map[sid_map_mask];
198 
199 	/* Make sure we're taking the other map next time */
200 	backwards_map = !backwards_map;
201 
202 	/* Uh-oh ... out of mappings. Let's flush! */
203 	if (vcpu_book3s->proto_vsid_next == vcpu_book3s->proto_vsid_max) {
204 		vcpu_book3s->proto_vsid_next = vcpu_book3s->proto_vsid_first;
205 		memset(vcpu_book3s->sid_map, 0,
206 		       sizeof(struct kvmppc_sid_map) * SID_MAP_NUM);
207 		kvmppc_mmu_pte_flush(vcpu, 0, 0);
208 		kvmppc_mmu_flush_segments(vcpu);
209 	}
210 	map->host_vsid = vsid_scramble(vcpu_book3s->proto_vsid_next++, 256M);
211 
212 	map->guest_vsid = gvsid;
213 	map->valid = true;
214 
215 	trace_kvm_book3s_slb_map(sid_map_mask, gvsid, map->host_vsid);
216 
217 	return map;
218 }
219 
220 static int kvmppc_mmu_next_segment(struct kvm_vcpu *vcpu, ulong esid)
221 {
222 	struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
223 	int i;
224 	int max_slb_size = 64;
225 	int found_inval = -1;
226 	int r;
227 
228 	if (!svcpu->slb_max)
229 		svcpu->slb_max = 1;
230 
231 	/* Are we overwriting? */
232 	for (i = 1; i < svcpu->slb_max; i++) {
233 		if (!(svcpu->slb[i].esid & SLB_ESID_V))
234 			found_inval = i;
235 		else if ((svcpu->slb[i].esid & ESID_MASK) == esid) {
236 			r = i;
237 			goto out;
238 		}
239 	}
240 
241 	/* Found a spare entry that was invalidated before */
242 	if (found_inval > 0) {
243 		r = found_inval;
244 		goto out;
245 	}
246 
247 	/* No spare invalid entry, so create one */
248 
249 	if (mmu_slb_size < 64)
250 		max_slb_size = mmu_slb_size;
251 
252 	/* Overflowing -> purge */
253 	if ((svcpu->slb_max) == max_slb_size)
254 		kvmppc_mmu_flush_segments(vcpu);
255 
256 	r = svcpu->slb_max;
257 	svcpu->slb_max++;
258 
259 out:
260 	svcpu_put(svcpu);
261 	return r;
262 }
263 
264 int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr)
265 {
266 	struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
267 	u64 esid = eaddr >> SID_SHIFT;
268 	u64 slb_esid = (eaddr & ESID_MASK) | SLB_ESID_V;
269 	u64 slb_vsid = SLB_VSID_USER;
270 	u64 gvsid;
271 	int slb_index;
272 	struct kvmppc_sid_map *map;
273 	int r = 0;
274 
275 	slb_index = kvmppc_mmu_next_segment(vcpu, eaddr & ESID_MASK);
276 
277 	if (vcpu->arch.mmu.esid_to_vsid(vcpu, esid, &gvsid)) {
278 		/* Invalidate an entry */
279 		svcpu->slb[slb_index].esid = 0;
280 		r = -ENOENT;
281 		goto out;
282 	}
283 
284 	map = find_sid_vsid(vcpu, gvsid);
285 	if (!map)
286 		map = create_sid_map(vcpu, gvsid);
287 
288 	map->guest_esid = esid;
289 
290 	slb_vsid |= (map->host_vsid << 12);
291 	slb_vsid &= ~SLB_VSID_KP;
292 	slb_esid |= slb_index;
293 
294 	svcpu->slb[slb_index].esid = slb_esid;
295 	svcpu->slb[slb_index].vsid = slb_vsid;
296 
297 	trace_kvm_book3s_slbmte(slb_vsid, slb_esid);
298 
299 out:
300 	svcpu_put(svcpu);
301 	return r;
302 }
303 
304 void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu)
305 {
306 	struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
307 	svcpu->slb_max = 1;
308 	svcpu->slb[0].esid = 0;
309 	svcpu_put(svcpu);
310 }
311 
312 void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
313 {
314 	kvmppc_mmu_hpte_destroy(vcpu);
315 	__destroy_context(to_book3s(vcpu)->context_id[0]);
316 }
317 
318 int kvmppc_mmu_init(struct kvm_vcpu *vcpu)
319 {
320 	struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
321 	int err;
322 
323 	err = __init_new_context();
324 	if (err < 0)
325 		return -1;
326 	vcpu3s->context_id[0] = err;
327 
328 	vcpu3s->proto_vsid_max = ((vcpu3s->context_id[0] + 1)
329 				  << USER_ESID_BITS) - 1;
330 	vcpu3s->proto_vsid_first = vcpu3s->context_id[0] << USER_ESID_BITS;
331 	vcpu3s->proto_vsid_next = vcpu3s->proto_vsid_first;
332 
333 	kvmppc_mmu_hpte_init(vcpu);
334 
335 	return 0;
336 }
337