1/* 2 * This program is free software; you can redistribute it and/or modify 3 * it under the terms of the GNU General Public License, version 2, as 4 * published by the Free Software Foundation. 5 * 6 * This program is distributed in the hope that it will be useful, 7 * but WITHOUT ANY WARRANTY; without even the implied warranty of 8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 9 * GNU General Public License for more details. 10 * 11 * You should have received a copy of the GNU General Public License 12 * along with this program; if not, write to the Free Software 13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 14 * 15 * Copyright IBM Corp. 2007 16 * 17 * Authors: Hollis Blanchard <hollisb@us.ibm.com> 18 */ 19 20#include <asm/ppc_asm.h> 21#include <asm/kvm_asm.h> 22#include <asm/reg.h> 23#include <asm/mmu-44x.h> 24#include <asm/page.h> 25#include <asm/asm-offsets.h> 26 27#define KVMPPC_MSR_MASK (MSR_CE|MSR_EE|MSR_PR|MSR_DE|MSR_ME|MSR_IS|MSR_DS) 28 29#define VCPU_GPR(n) (VCPU_GPRS + (n * 4)) 30 31/* The host stack layout: */ 32#define HOST_R1 0 /* Implied by stwu. */ 33#define HOST_CALLEE_LR 4 34#define HOST_RUN 8 35/* r2 is special: it holds 'current', and it made nonvolatile in the 36 * kernel with the -ffixed-r2 gcc option. */ 37#define HOST_R2 12 38#define HOST_NV_GPRS 16 39#define HOST_NV_GPR(n) (HOST_NV_GPRS + ((n - 14) * 4)) 40#define HOST_MIN_STACK_SIZE (HOST_NV_GPR(31) + 4) 41#define HOST_STACK_SIZE (((HOST_MIN_STACK_SIZE + 15) / 16) * 16) /* Align. */ 42#define HOST_STACK_LR (HOST_STACK_SIZE + 4) /* In caller stack frame. */ 43 44#define NEED_INST_MASK ((1<<BOOKE_INTERRUPT_PROGRAM) | \ 45 (1<<BOOKE_INTERRUPT_DTLB_MISS)) 46 47#define NEED_DEAR_MASK ((1<<BOOKE_INTERRUPT_DATA_STORAGE) | \ 48 (1<<BOOKE_INTERRUPT_DTLB_MISS)) 49 50#define NEED_ESR_MASK ((1<<BOOKE_INTERRUPT_DATA_STORAGE) | \ 51 (1<<BOOKE_INTERRUPT_INST_STORAGE) | \ 52 (1<<BOOKE_INTERRUPT_PROGRAM) | \ 53 (1<<BOOKE_INTERRUPT_DTLB_MISS)) 54 55.macro KVM_HANDLER ivor_nr 56_GLOBAL(kvmppc_handler_\ivor_nr) 57 /* Get pointer to vcpu and record exit number. */ 58 mtspr SPRN_SPRG0, r4 59 mfspr r4, SPRN_SPRG1 60 stw r5, VCPU_GPR(r5)(r4) 61 stw r6, VCPU_GPR(r6)(r4) 62 mfctr r5 63 lis r6, kvmppc_resume_host@h 64 stw r5, VCPU_CTR(r4) 65 li r5, \ivor_nr 66 ori r6, r6, kvmppc_resume_host@l 67 mtctr r6 68 bctr 69.endm 70 71_GLOBAL(kvmppc_handlers_start) 72KVM_HANDLER BOOKE_INTERRUPT_CRITICAL 73KVM_HANDLER BOOKE_INTERRUPT_MACHINE_CHECK 74KVM_HANDLER BOOKE_INTERRUPT_DATA_STORAGE 75KVM_HANDLER BOOKE_INTERRUPT_INST_STORAGE 76KVM_HANDLER BOOKE_INTERRUPT_EXTERNAL 77KVM_HANDLER BOOKE_INTERRUPT_ALIGNMENT 78KVM_HANDLER BOOKE_INTERRUPT_PROGRAM 79KVM_HANDLER BOOKE_INTERRUPT_FP_UNAVAIL 80KVM_HANDLER BOOKE_INTERRUPT_SYSCALL 81KVM_HANDLER BOOKE_INTERRUPT_AP_UNAVAIL 82KVM_HANDLER BOOKE_INTERRUPT_DECREMENTER 83KVM_HANDLER BOOKE_INTERRUPT_FIT 84KVM_HANDLER BOOKE_INTERRUPT_WATCHDOG 85KVM_HANDLER BOOKE_INTERRUPT_DTLB_MISS 86KVM_HANDLER BOOKE_INTERRUPT_ITLB_MISS 87KVM_HANDLER BOOKE_INTERRUPT_DEBUG 88 89_GLOBAL(kvmppc_handler_len) 90 .long kvmppc_handler_1 - kvmppc_handler_0 91 92 93/* Registers: 94 * SPRG0: guest r4 95 * r4: vcpu pointer 96 * r5: KVM exit number 97 */ 98_GLOBAL(kvmppc_resume_host) 99 stw r3, VCPU_GPR(r3)(r4) 100 mfcr r3 101 stw r3, VCPU_CR(r4) 102 stw r7, VCPU_GPR(r7)(r4) 103 stw r8, VCPU_GPR(r8)(r4) 104 stw r9, VCPU_GPR(r9)(r4) 105 106 li r6, 1 107 slw r6, r6, r5 108 109 /* Save the faulting instruction and all GPRs for emulation. */ 110 andi. r7, r6, NEED_INST_MASK 111 beq ..skip_inst_copy 112 mfspr r9, SPRN_SRR0 113 mfmsr r8 114 ori r7, r8, MSR_DS 115 mtmsr r7 116 isync 117 lwz r9, 0(r9) 118 mtmsr r8 119 isync 120 stw r9, VCPU_LAST_INST(r4) 121 122 stw r15, VCPU_GPR(r15)(r4) 123 stw r16, VCPU_GPR(r16)(r4) 124 stw r17, VCPU_GPR(r17)(r4) 125 stw r18, VCPU_GPR(r18)(r4) 126 stw r19, VCPU_GPR(r19)(r4) 127 stw r20, VCPU_GPR(r20)(r4) 128 stw r21, VCPU_GPR(r21)(r4) 129 stw r22, VCPU_GPR(r22)(r4) 130 stw r23, VCPU_GPR(r23)(r4) 131 stw r24, VCPU_GPR(r24)(r4) 132 stw r25, VCPU_GPR(r25)(r4) 133 stw r26, VCPU_GPR(r26)(r4) 134 stw r27, VCPU_GPR(r27)(r4) 135 stw r28, VCPU_GPR(r28)(r4) 136 stw r29, VCPU_GPR(r29)(r4) 137 stw r30, VCPU_GPR(r30)(r4) 138 stw r31, VCPU_GPR(r31)(r4) 139..skip_inst_copy: 140 141 /* Also grab DEAR and ESR before the host can clobber them. */ 142 143 andi. r7, r6, NEED_DEAR_MASK 144 beq ..skip_dear 145 mfspr r9, SPRN_DEAR 146 stw r9, VCPU_FAULT_DEAR(r4) 147..skip_dear: 148 149 andi. r7, r6, NEED_ESR_MASK 150 beq ..skip_esr 151 mfspr r9, SPRN_ESR 152 stw r9, VCPU_FAULT_ESR(r4) 153..skip_esr: 154 155 /* Save remaining volatile guest register state to vcpu. */ 156 stw r0, VCPU_GPR(r0)(r4) 157 stw r1, VCPU_GPR(r1)(r4) 158 stw r2, VCPU_GPR(r2)(r4) 159 stw r10, VCPU_GPR(r10)(r4) 160 stw r11, VCPU_GPR(r11)(r4) 161 stw r12, VCPU_GPR(r12)(r4) 162 stw r13, VCPU_GPR(r13)(r4) 163 stw r14, VCPU_GPR(r14)(r4) /* We need a NV GPR below. */ 164 mflr r3 165 stw r3, VCPU_LR(r4) 166 mfxer r3 167 stw r3, VCPU_XER(r4) 168 mfspr r3, SPRN_SPRG0 169 stw r3, VCPU_GPR(r4)(r4) 170 mfspr r3, SPRN_SRR0 171 stw r3, VCPU_PC(r4) 172 173 /* Restore host stack pointer and PID before IVPR, since the host 174 * exception handlers use them. */ 175 lwz r1, VCPU_HOST_STACK(r4) 176 lwz r3, VCPU_HOST_PID(r4) 177 mtspr SPRN_PID, r3 178 179 /* Restore host IVPR before re-enabling interrupts. We cheat and know 180 * that Linux IVPR is always 0xc0000000. */ 181 lis r3, 0xc000 182 mtspr SPRN_IVPR, r3 183 184 /* Switch to kernel stack and jump to handler. */ 185 LOAD_REG_ADDR(r3, kvmppc_handle_exit) 186 mtctr r3 187 lwz r3, HOST_RUN(r1) 188 lwz r2, HOST_R2(r1) 189 mr r14, r4 /* Save vcpu pointer. */ 190 191 bctrl /* kvmppc_handle_exit() */ 192 193 /* Restore vcpu pointer and the nonvolatiles we used. */ 194 mr r4, r14 195 lwz r14, VCPU_GPR(r14)(r4) 196 197 /* Sometimes instruction emulation must restore complete GPR state. */ 198 andi. r5, r3, RESUME_FLAG_NV 199 beq ..skip_nv_load 200 lwz r15, VCPU_GPR(r15)(r4) 201 lwz r16, VCPU_GPR(r16)(r4) 202 lwz r17, VCPU_GPR(r17)(r4) 203 lwz r18, VCPU_GPR(r18)(r4) 204 lwz r19, VCPU_GPR(r19)(r4) 205 lwz r20, VCPU_GPR(r20)(r4) 206 lwz r21, VCPU_GPR(r21)(r4) 207 lwz r22, VCPU_GPR(r22)(r4) 208 lwz r23, VCPU_GPR(r23)(r4) 209 lwz r24, VCPU_GPR(r24)(r4) 210 lwz r25, VCPU_GPR(r25)(r4) 211 lwz r26, VCPU_GPR(r26)(r4) 212 lwz r27, VCPU_GPR(r27)(r4) 213 lwz r28, VCPU_GPR(r28)(r4) 214 lwz r29, VCPU_GPR(r29)(r4) 215 lwz r30, VCPU_GPR(r30)(r4) 216 lwz r31, VCPU_GPR(r31)(r4) 217..skip_nv_load: 218 219 /* Should we return to the guest? */ 220 andi. r5, r3, RESUME_FLAG_HOST 221 beq lightweight_exit 222 223 srawi r3, r3, 2 /* Shift -ERR back down. */ 224 225heavyweight_exit: 226 /* Not returning to guest. */ 227 228 /* We already saved guest volatile register state; now save the 229 * non-volatiles. */ 230 stw r15, VCPU_GPR(r15)(r4) 231 stw r16, VCPU_GPR(r16)(r4) 232 stw r17, VCPU_GPR(r17)(r4) 233 stw r18, VCPU_GPR(r18)(r4) 234 stw r19, VCPU_GPR(r19)(r4) 235 stw r20, VCPU_GPR(r20)(r4) 236 stw r21, VCPU_GPR(r21)(r4) 237 stw r22, VCPU_GPR(r22)(r4) 238 stw r23, VCPU_GPR(r23)(r4) 239 stw r24, VCPU_GPR(r24)(r4) 240 stw r25, VCPU_GPR(r25)(r4) 241 stw r26, VCPU_GPR(r26)(r4) 242 stw r27, VCPU_GPR(r27)(r4) 243 stw r28, VCPU_GPR(r28)(r4) 244 stw r29, VCPU_GPR(r29)(r4) 245 stw r30, VCPU_GPR(r30)(r4) 246 stw r31, VCPU_GPR(r31)(r4) 247 248 /* Load host non-volatile register state from host stack. */ 249 lwz r14, HOST_NV_GPR(r14)(r1) 250 lwz r15, HOST_NV_GPR(r15)(r1) 251 lwz r16, HOST_NV_GPR(r16)(r1) 252 lwz r17, HOST_NV_GPR(r17)(r1) 253 lwz r18, HOST_NV_GPR(r18)(r1) 254 lwz r19, HOST_NV_GPR(r19)(r1) 255 lwz r20, HOST_NV_GPR(r20)(r1) 256 lwz r21, HOST_NV_GPR(r21)(r1) 257 lwz r22, HOST_NV_GPR(r22)(r1) 258 lwz r23, HOST_NV_GPR(r23)(r1) 259 lwz r24, HOST_NV_GPR(r24)(r1) 260 lwz r25, HOST_NV_GPR(r25)(r1) 261 lwz r26, HOST_NV_GPR(r26)(r1) 262 lwz r27, HOST_NV_GPR(r27)(r1) 263 lwz r28, HOST_NV_GPR(r28)(r1) 264 lwz r29, HOST_NV_GPR(r29)(r1) 265 lwz r30, HOST_NV_GPR(r30)(r1) 266 lwz r31, HOST_NV_GPR(r31)(r1) 267 268 /* Return to kvm_vcpu_run(). */ 269 lwz r4, HOST_STACK_LR(r1) 270 addi r1, r1, HOST_STACK_SIZE 271 mtlr r4 272 /* r3 still contains the return code from kvmppc_handle_exit(). */ 273 blr 274 275 276/* Registers: 277 * r3: kvm_run pointer 278 * r4: vcpu pointer 279 */ 280_GLOBAL(__kvmppc_vcpu_run) 281 stwu r1, -HOST_STACK_SIZE(r1) 282 stw r1, VCPU_HOST_STACK(r4) /* Save stack pointer to vcpu. */ 283 284 /* Save host state to stack. */ 285 stw r3, HOST_RUN(r1) 286 mflr r3 287 stw r3, HOST_STACK_LR(r1) 288 289 /* Save host non-volatile register state to stack. */ 290 stw r14, HOST_NV_GPR(r14)(r1) 291 stw r15, HOST_NV_GPR(r15)(r1) 292 stw r16, HOST_NV_GPR(r16)(r1) 293 stw r17, HOST_NV_GPR(r17)(r1) 294 stw r18, HOST_NV_GPR(r18)(r1) 295 stw r19, HOST_NV_GPR(r19)(r1) 296 stw r20, HOST_NV_GPR(r20)(r1) 297 stw r21, HOST_NV_GPR(r21)(r1) 298 stw r22, HOST_NV_GPR(r22)(r1) 299 stw r23, HOST_NV_GPR(r23)(r1) 300 stw r24, HOST_NV_GPR(r24)(r1) 301 stw r25, HOST_NV_GPR(r25)(r1) 302 stw r26, HOST_NV_GPR(r26)(r1) 303 stw r27, HOST_NV_GPR(r27)(r1) 304 stw r28, HOST_NV_GPR(r28)(r1) 305 stw r29, HOST_NV_GPR(r29)(r1) 306 stw r30, HOST_NV_GPR(r30)(r1) 307 stw r31, HOST_NV_GPR(r31)(r1) 308 309 /* Load guest non-volatiles. */ 310 lwz r14, VCPU_GPR(r14)(r4) 311 lwz r15, VCPU_GPR(r15)(r4) 312 lwz r16, VCPU_GPR(r16)(r4) 313 lwz r17, VCPU_GPR(r17)(r4) 314 lwz r18, VCPU_GPR(r18)(r4) 315 lwz r19, VCPU_GPR(r19)(r4) 316 lwz r20, VCPU_GPR(r20)(r4) 317 lwz r21, VCPU_GPR(r21)(r4) 318 lwz r22, VCPU_GPR(r22)(r4) 319 lwz r23, VCPU_GPR(r23)(r4) 320 lwz r24, VCPU_GPR(r24)(r4) 321 lwz r25, VCPU_GPR(r25)(r4) 322 lwz r26, VCPU_GPR(r26)(r4) 323 lwz r27, VCPU_GPR(r27)(r4) 324 lwz r28, VCPU_GPR(r28)(r4) 325 lwz r29, VCPU_GPR(r29)(r4) 326 lwz r30, VCPU_GPR(r30)(r4) 327 lwz r31, VCPU_GPR(r31)(r4) 328 329lightweight_exit: 330 stw r2, HOST_R2(r1) 331 332 mfspr r3, SPRN_PID 333 stw r3, VCPU_HOST_PID(r4) 334 lwz r3, VCPU_PID(r4) 335 mtspr SPRN_PID, r3 336 337 /* Prevent all TLB updates. */ 338 mfmsr r5 339 lis r6, (MSR_EE|MSR_CE|MSR_ME|MSR_DE)@h 340 ori r6, r6, (MSR_EE|MSR_CE|MSR_ME|MSR_DE)@l 341 andc r6, r5, r6 342 mtmsr r6 343 344 /* Save the host's non-pinned TLB mappings, and load the guest mappings 345 * over them. Leave the host's "pinned" kernel mappings in place. */ 346 /* XXX optimization: use generation count to avoid swapping unmodified 347 * entries. */ 348 mfspr r10, SPRN_MMUCR /* Save host MMUCR. */ 349 lis r8, tlb_44x_hwater@ha 350 lwz r8, tlb_44x_hwater@l(r8) 351 addi r3, r4, VCPU_HOST_TLB - 4 352 addi r9, r4, VCPU_SHADOW_TLB - 4 353 li r6, 0 3541: 355 /* Save host entry. */ 356 tlbre r7, r6, PPC44x_TLB_PAGEID 357 mfspr r5, SPRN_MMUCR 358 stwu r5, 4(r3) 359 stwu r7, 4(r3) 360 tlbre r7, r6, PPC44x_TLB_XLAT 361 stwu r7, 4(r3) 362 tlbre r7, r6, PPC44x_TLB_ATTRIB 363 stwu r7, 4(r3) 364 /* Load guest entry. */ 365 lwzu r7, 4(r9) 366 mtspr SPRN_MMUCR, r7 367 lwzu r7, 4(r9) 368 tlbwe r7, r6, PPC44x_TLB_PAGEID 369 lwzu r7, 4(r9) 370 tlbwe r7, r6, PPC44x_TLB_XLAT 371 lwzu r7, 4(r9) 372 tlbwe r7, r6, PPC44x_TLB_ATTRIB 373 /* Increment index. */ 374 addi r6, r6, 1 375 cmpw r6, r8 376 blt 1b 377 mtspr SPRN_MMUCR, r10 /* Restore host MMUCR. */ 378 379 iccci 0, 0 /* XXX hack */ 380 381 /* Load some guest volatiles. */ 382 lwz r0, VCPU_GPR(r0)(r4) 383 lwz r2, VCPU_GPR(r2)(r4) 384 lwz r9, VCPU_GPR(r9)(r4) 385 lwz r10, VCPU_GPR(r10)(r4) 386 lwz r11, VCPU_GPR(r11)(r4) 387 lwz r12, VCPU_GPR(r12)(r4) 388 lwz r13, VCPU_GPR(r13)(r4) 389 lwz r3, VCPU_LR(r4) 390 mtlr r3 391 lwz r3, VCPU_XER(r4) 392 mtxer r3 393 394 /* Switch the IVPR. XXX If we take a TLB miss after this we're screwed, 395 * so how do we make sure vcpu won't fault? */ 396 lis r8, kvmppc_booke_handlers@ha 397 lwz r8, kvmppc_booke_handlers@l(r8) 398 mtspr SPRN_IVPR, r8 399 400 /* Save vcpu pointer for the exception handlers. */ 401 mtspr SPRN_SPRG1, r4 402 403 /* Can't switch the stack pointer until after IVPR is switched, 404 * because host interrupt handlers would get confused. */ 405 lwz r1, VCPU_GPR(r1)(r4) 406 407 /* XXX handle USPRG0 */ 408 /* Host interrupt handlers may have clobbered these guest-readable 409 * SPRGs, so we need to reload them here with the guest's values. */ 410 lwz r3, VCPU_SPRG4(r4) 411 mtspr SPRN_SPRG4, r3 412 lwz r3, VCPU_SPRG5(r4) 413 mtspr SPRN_SPRG5, r3 414 lwz r3, VCPU_SPRG6(r4) 415 mtspr SPRN_SPRG6, r3 416 lwz r3, VCPU_SPRG7(r4) 417 mtspr SPRN_SPRG7, r3 418 419 /* Finish loading guest volatiles and jump to guest. */ 420 lwz r3, VCPU_CTR(r4) 421 mtctr r3 422 lwz r3, VCPU_CR(r4) 423 mtcr r3 424 lwz r5, VCPU_GPR(r5)(r4) 425 lwz r6, VCPU_GPR(r6)(r4) 426 lwz r7, VCPU_GPR(r7)(r4) 427 lwz r8, VCPU_GPR(r8)(r4) 428 lwz r3, VCPU_PC(r4) 429 mtsrr0 r3 430 lwz r3, VCPU_MSR(r4) 431 oris r3, r3, KVMPPC_MSR_MASK@h 432 ori r3, r3, KVMPPC_MSR_MASK@l 433 mtsrr1 r3 434 lwz r3, VCPU_GPR(r3)(r4) 435 lwz r4, VCPU_GPR(r4)(r4) 436 rfi 437