1 /* 2 * Copyright (C) 2010 SUSE Linux Products GmbH. All rights reserved. 3 * 4 * Authors: 5 * Alexander Graf <agraf@suse.de> 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License, version 2, as 9 * published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, write to the Free Software 18 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 19 */ 20 21 #include <linux/kvm_host.h> 22 23 #include <asm/kvm_ppc.h> 24 #include <asm/kvm_book3s.h> 25 #include <asm/mmu-hash32.h> 26 #include <asm/machdep.h> 27 #include <asm/mmu_context.h> 28 #include <asm/hw_irq.h> 29 30 /* #define DEBUG_MMU */ 31 /* #define DEBUG_SR */ 32 33 #ifdef DEBUG_MMU 34 #define dprintk_mmu(a, ...) printk(KERN_INFO a, __VA_ARGS__) 35 #else 36 #define dprintk_mmu(a, ...) do { } while(0) 37 #endif 38 39 #ifdef DEBUG_SR 40 #define dprintk_sr(a, ...) printk(KERN_INFO a, __VA_ARGS__) 41 #else 42 #define dprintk_sr(a, ...) do { } while(0) 43 #endif 44 45 #if PAGE_SHIFT != 12 46 #error Unknown page size 47 #endif 48 49 #ifdef CONFIG_SMP 50 #error XXX need to grab mmu_hash_lock 51 #endif 52 53 #ifdef CONFIG_PTE_64BIT 54 #error Only 32 bit pages are supported for now 55 #endif 56 57 static ulong htab; 58 static u32 htabmask; 59 60 void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte) 61 { 62 volatile u32 *pteg; 63 64 /* Remove from host HTAB */ 65 pteg = (u32*)pte->slot; 66 pteg[0] = 0; 67 68 /* And make sure it's gone from the TLB too */ 69 asm volatile ("sync"); 70 asm volatile ("tlbie %0" : : "r" (pte->pte.eaddr) : "memory"); 71 asm volatile ("sync"); 72 asm volatile ("tlbsync"); 73 } 74 75 /* We keep 512 gvsid->hvsid entries, mapping the guest ones to the array using 76 * a hash, so we don't waste cycles on looping */ 77 static u16 kvmppc_sid_hash(struct kvm_vcpu *vcpu, u64 gvsid) 78 { 79 return (u16)(((gvsid >> (SID_MAP_BITS * 7)) & SID_MAP_MASK) ^ 80 ((gvsid >> (SID_MAP_BITS * 6)) & SID_MAP_MASK) ^ 81 ((gvsid >> (SID_MAP_BITS * 5)) & SID_MAP_MASK) ^ 82 ((gvsid >> (SID_MAP_BITS * 4)) & SID_MAP_MASK) ^ 83 ((gvsid >> (SID_MAP_BITS * 3)) & SID_MAP_MASK) ^ 84 ((gvsid >> (SID_MAP_BITS * 2)) & SID_MAP_MASK) ^ 85 ((gvsid >> (SID_MAP_BITS * 1)) & SID_MAP_MASK) ^ 86 ((gvsid >> (SID_MAP_BITS * 0)) & SID_MAP_MASK)); 87 } 88 89 90 static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid) 91 { 92 struct kvmppc_sid_map *map; 93 u16 sid_map_mask; 94 95 if (vcpu->arch.shared->msr & MSR_PR) 96 gvsid |= VSID_PR; 97 98 sid_map_mask = kvmppc_sid_hash(vcpu, gvsid); 99 map = &to_book3s(vcpu)->sid_map[sid_map_mask]; 100 if (map->guest_vsid == gvsid) { 101 dprintk_sr("SR: Searching 0x%llx -> 0x%llx\n", 102 gvsid, map->host_vsid); 103 return map; 104 } 105 106 map = &to_book3s(vcpu)->sid_map[SID_MAP_MASK - sid_map_mask]; 107 if (map->guest_vsid == gvsid) { 108 dprintk_sr("SR: Searching 0x%llx -> 0x%llx\n", 109 gvsid, map->host_vsid); 110 return map; 111 } 112 113 dprintk_sr("SR: Searching 0x%llx -> not found\n", gvsid); 114 return NULL; 115 } 116 117 static u32 *kvmppc_mmu_get_pteg(struct kvm_vcpu *vcpu, u32 vsid, u32 eaddr, 118 bool primary) 119 { 120 u32 page, hash; 121 ulong pteg = htab; 122 123 page = (eaddr & ~ESID_MASK) >> 12; 124 125 hash = ((vsid ^ page) << 6); 126 if (!primary) 127 hash = ~hash; 128 129 hash &= htabmask; 130 131 pteg |= hash; 132 133 dprintk_mmu("htab: %lx | hash: %x | htabmask: %x | pteg: %lx\n", 134 htab, hash, htabmask, pteg); 135 136 return (u32*)pteg; 137 } 138 139 extern char etext[]; 140 141 int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte) 142 { 143 pfn_t hpaddr; 144 u64 va; 145 u64 vsid; 146 struct kvmppc_sid_map *map; 147 volatile u32 *pteg; 148 u32 eaddr = orig_pte->eaddr; 149 u32 pteg0, pteg1; 150 register int rr = 0; 151 bool primary = false; 152 bool evict = false; 153 struct hpte_cache *pte; 154 int r = 0; 155 156 /* Get host physical address for gpa */ 157 hpaddr = kvmppc_gfn_to_pfn(vcpu, orig_pte->raddr >> PAGE_SHIFT); 158 if (is_error_pfn(hpaddr)) { 159 printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n", 160 orig_pte->eaddr); 161 r = -EINVAL; 162 goto out; 163 } 164 hpaddr <<= PAGE_SHIFT; 165 166 /* and write the mapping ea -> hpa into the pt */ 167 vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid); 168 map = find_sid_vsid(vcpu, vsid); 169 if (!map) { 170 kvmppc_mmu_map_segment(vcpu, eaddr); 171 map = find_sid_vsid(vcpu, vsid); 172 } 173 BUG_ON(!map); 174 175 vsid = map->host_vsid; 176 va = (vsid << SID_SHIFT) | (eaddr & ~ESID_MASK); 177 178 next_pteg: 179 if (rr == 16) { 180 primary = !primary; 181 evict = true; 182 rr = 0; 183 } 184 185 pteg = kvmppc_mmu_get_pteg(vcpu, vsid, eaddr, primary); 186 187 /* not evicting yet */ 188 if (!evict && (pteg[rr] & PTE_V)) { 189 rr += 2; 190 goto next_pteg; 191 } 192 193 dprintk_mmu("KVM: old PTEG: %p (%d)\n", pteg, rr); 194 dprintk_mmu("KVM: %08x - %08x\n", pteg[0], pteg[1]); 195 dprintk_mmu("KVM: %08x - %08x\n", pteg[2], pteg[3]); 196 dprintk_mmu("KVM: %08x - %08x\n", pteg[4], pteg[5]); 197 dprintk_mmu("KVM: %08x - %08x\n", pteg[6], pteg[7]); 198 dprintk_mmu("KVM: %08x - %08x\n", pteg[8], pteg[9]); 199 dprintk_mmu("KVM: %08x - %08x\n", pteg[10], pteg[11]); 200 dprintk_mmu("KVM: %08x - %08x\n", pteg[12], pteg[13]); 201 dprintk_mmu("KVM: %08x - %08x\n", pteg[14], pteg[15]); 202 203 pteg0 = ((eaddr & 0x0fffffff) >> 22) | (vsid << 7) | PTE_V | 204 (primary ? 0 : PTE_SEC); 205 pteg1 = hpaddr | PTE_M | PTE_R | PTE_C; 206 207 if (orig_pte->may_write) { 208 pteg1 |= PP_RWRW; 209 mark_page_dirty(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT); 210 } else { 211 pteg1 |= PP_RWRX; 212 } 213 214 if (orig_pte->may_execute) 215 kvmppc_mmu_flush_icache(hpaddr >> PAGE_SHIFT); 216 217 local_irq_disable(); 218 219 if (pteg[rr]) { 220 pteg[rr] = 0; 221 asm volatile ("sync"); 222 } 223 pteg[rr + 1] = pteg1; 224 pteg[rr] = pteg0; 225 asm volatile ("sync"); 226 227 local_irq_enable(); 228 229 dprintk_mmu("KVM: new PTEG: %p\n", pteg); 230 dprintk_mmu("KVM: %08x - %08x\n", pteg[0], pteg[1]); 231 dprintk_mmu("KVM: %08x - %08x\n", pteg[2], pteg[3]); 232 dprintk_mmu("KVM: %08x - %08x\n", pteg[4], pteg[5]); 233 dprintk_mmu("KVM: %08x - %08x\n", pteg[6], pteg[7]); 234 dprintk_mmu("KVM: %08x - %08x\n", pteg[8], pteg[9]); 235 dprintk_mmu("KVM: %08x - %08x\n", pteg[10], pteg[11]); 236 dprintk_mmu("KVM: %08x - %08x\n", pteg[12], pteg[13]); 237 dprintk_mmu("KVM: %08x - %08x\n", pteg[14], pteg[15]); 238 239 240 /* Now tell our Shadow PTE code about the new page */ 241 242 pte = kvmppc_mmu_hpte_cache_next(vcpu); 243 244 dprintk_mmu("KVM: %c%c Map 0x%llx: [%lx] 0x%llx (0x%llx) -> %lx\n", 245 orig_pte->may_write ? 'w' : '-', 246 orig_pte->may_execute ? 'x' : '-', 247 orig_pte->eaddr, (ulong)pteg, va, 248 orig_pte->vpage, hpaddr); 249 250 pte->slot = (ulong)&pteg[rr]; 251 pte->host_va = va; 252 pte->pte = *orig_pte; 253 pte->pfn = hpaddr >> PAGE_SHIFT; 254 255 kvmppc_mmu_hpte_cache_map(vcpu, pte); 256 257 out: 258 return r; 259 } 260 261 static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid) 262 { 263 struct kvmppc_sid_map *map; 264 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); 265 u16 sid_map_mask; 266 static int backwards_map = 0; 267 268 if (vcpu->arch.shared->msr & MSR_PR) 269 gvsid |= VSID_PR; 270 271 /* We might get collisions that trap in preceding order, so let's 272 map them differently */ 273 274 sid_map_mask = kvmppc_sid_hash(vcpu, gvsid); 275 if (backwards_map) 276 sid_map_mask = SID_MAP_MASK - sid_map_mask; 277 278 map = &to_book3s(vcpu)->sid_map[sid_map_mask]; 279 280 /* Make sure we're taking the other map next time */ 281 backwards_map = !backwards_map; 282 283 /* Uh-oh ... out of mappings. Let's flush! */ 284 if (vcpu_book3s->vsid_next >= VSID_POOL_SIZE) { 285 vcpu_book3s->vsid_next = 0; 286 memset(vcpu_book3s->sid_map, 0, 287 sizeof(struct kvmppc_sid_map) * SID_MAP_NUM); 288 kvmppc_mmu_pte_flush(vcpu, 0, 0); 289 kvmppc_mmu_flush_segments(vcpu); 290 } 291 map->host_vsid = vcpu_book3s->vsid_pool[vcpu_book3s->vsid_next]; 292 vcpu_book3s->vsid_next++; 293 294 map->guest_vsid = gvsid; 295 map->valid = true; 296 297 return map; 298 } 299 300 int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr) 301 { 302 u32 esid = eaddr >> SID_SHIFT; 303 u64 gvsid; 304 u32 sr; 305 struct kvmppc_sid_map *map; 306 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); 307 int r = 0; 308 309 if (vcpu->arch.mmu.esid_to_vsid(vcpu, esid, &gvsid)) { 310 /* Invalidate an entry */ 311 svcpu->sr[esid] = SR_INVALID; 312 r = -ENOENT; 313 goto out; 314 } 315 316 map = find_sid_vsid(vcpu, gvsid); 317 if (!map) 318 map = create_sid_map(vcpu, gvsid); 319 320 map->guest_esid = esid; 321 sr = map->host_vsid | SR_KP; 322 svcpu->sr[esid] = sr; 323 324 dprintk_sr("MMU: mtsr %d, 0x%x\n", esid, sr); 325 326 out: 327 svcpu_put(svcpu); 328 return r; 329 } 330 331 void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu) 332 { 333 int i; 334 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); 335 336 dprintk_sr("MMU: flushing all segments (%d)\n", ARRAY_SIZE(svcpu->sr)); 337 for (i = 0; i < ARRAY_SIZE(svcpu->sr); i++) 338 svcpu->sr[i] = SR_INVALID; 339 340 svcpu_put(svcpu); 341 } 342 343 void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu) 344 { 345 int i; 346 347 kvmppc_mmu_hpte_destroy(vcpu); 348 preempt_disable(); 349 for (i = 0; i < SID_CONTEXTS; i++) 350 __destroy_context(to_book3s(vcpu)->context_id[i]); 351 preempt_enable(); 352 } 353 354 /* From mm/mmu_context_hash32.c */ 355 #define CTX_TO_VSID(c, id) ((((c) * (897 * 16)) + (id * 0x111)) & 0xffffff) 356 357 int kvmppc_mmu_init(struct kvm_vcpu *vcpu) 358 { 359 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); 360 int err; 361 ulong sdr1; 362 int i; 363 int j; 364 365 for (i = 0; i < SID_CONTEXTS; i++) { 366 err = __init_new_context(); 367 if (err < 0) 368 goto init_fail; 369 vcpu3s->context_id[i] = err; 370 371 /* Remember context id for this combination */ 372 for (j = 0; j < 16; j++) 373 vcpu3s->vsid_pool[(i * 16) + j] = CTX_TO_VSID(err, j); 374 } 375 376 vcpu3s->vsid_next = 0; 377 378 /* Remember where the HTAB is */ 379 asm ( "mfsdr1 %0" : "=r"(sdr1) ); 380 htabmask = ((sdr1 & 0x1FF) << 16) | 0xFFC0; 381 htab = (ulong)__va(sdr1 & 0xffff0000); 382 383 kvmppc_mmu_hpte_init(vcpu); 384 385 return 0; 386 387 init_fail: 388 for (j = 0; j < i; j++) { 389 if (!vcpu3s->context_id[j]) 390 continue; 391 392 __destroy_context(to_book3s(vcpu)->context_id[j]); 393 } 394 395 return -1; 396 } 397