1 /* 2 * in-kernel handling for sie intercepts 3 * 4 * Copyright IBM Corp. 2008, 2014 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License (version 2 only) 8 * as published by the Free Software Foundation. 9 * 10 * Author(s): Carsten Otte <cotte@de.ibm.com> 11 * Christian Borntraeger <borntraeger@de.ibm.com> 12 */ 13 14 #include <linux/kvm_host.h> 15 #include <linux/errno.h> 16 #include <linux/pagemap.h> 17 18 #include <asm/kvm_host.h> 19 #include <asm/asm-offsets.h> 20 #include <asm/irq.h> 21 22 #include "kvm-s390.h" 23 #include "gaccess.h" 24 #include "trace.h" 25 #include "trace-s390.h" 26 27 28 static const intercept_handler_t instruction_handlers[256] = { 29 [0x01] = kvm_s390_handle_01, 30 [0x82] = kvm_s390_handle_lpsw, 31 [0x83] = kvm_s390_handle_diag, 32 [0xae] = kvm_s390_handle_sigp, 33 [0xb2] = kvm_s390_handle_b2, 34 [0xb6] = kvm_s390_handle_stctl, 35 [0xb7] = kvm_s390_handle_lctl, 36 [0xb9] = kvm_s390_handle_b9, 37 [0xe5] = kvm_s390_handle_e5, 38 [0xeb] = kvm_s390_handle_eb, 39 }; 40 41 static int handle_noop(struct kvm_vcpu *vcpu) 42 { 43 switch (vcpu->arch.sie_block->icptcode) { 44 case 0x0: 45 vcpu->stat.exit_null++; 46 break; 47 case 0x10: 48 vcpu->stat.exit_external_request++; 49 break; 50 default: 51 break; /* nothing */ 52 } 53 return 0; 54 } 55 56 static int handle_stop(struct kvm_vcpu *vcpu) 57 { 58 int rc = 0; 59 unsigned int action_bits; 60 61 vcpu->stat.exit_stop_request++; 62 trace_kvm_s390_stop_request(vcpu->arch.local_int.action_bits); 63 64 action_bits = vcpu->arch.local_int.action_bits; 65 66 if (!(action_bits & ACTION_STOP_ON_STOP)) 67 return 0; 68 69 if (action_bits & ACTION_STORE_ON_STOP) { 70 rc = kvm_s390_vcpu_store_status(vcpu, 71 KVM_S390_STORE_STATUS_NOADDR); 72 if (rc) 73 return rc; 74 } 75 76 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) 77 kvm_s390_vcpu_stop(vcpu); 78 return -EOPNOTSUPP; 79 } 80 81 static int handle_validity(struct kvm_vcpu *vcpu) 82 { 83 int viwhy = vcpu->arch.sie_block->ipb >> 16; 84 85 vcpu->stat.exit_validity++; 86 trace_kvm_s390_intercept_validity(vcpu, viwhy); 87 WARN_ONCE(true, "kvm: unhandled validity intercept 0x%x\n", viwhy); 88 return -EOPNOTSUPP; 89 } 90 91 static int handle_instruction(struct kvm_vcpu *vcpu) 92 { 93 intercept_handler_t handler; 94 95 vcpu->stat.exit_instruction++; 96 trace_kvm_s390_intercept_instruction(vcpu, 97 vcpu->arch.sie_block->ipa, 98 vcpu->arch.sie_block->ipb); 99 handler = instruction_handlers[vcpu->arch.sie_block->ipa >> 8]; 100 if (handler) 101 return handler(vcpu); 102 return -EOPNOTSUPP; 103 } 104 105 static void __extract_prog_irq(struct kvm_vcpu *vcpu, 106 struct kvm_s390_pgm_info *pgm_info) 107 { 108 memset(pgm_info, 0, sizeof(struct kvm_s390_pgm_info)); 109 pgm_info->code = vcpu->arch.sie_block->iprcc; 110 111 switch (vcpu->arch.sie_block->iprcc & ~PGM_PER) { 112 case PGM_AFX_TRANSLATION: 113 case PGM_ASX_TRANSLATION: 114 case PGM_EX_TRANSLATION: 115 case PGM_LFX_TRANSLATION: 116 case PGM_LSTE_SEQUENCE: 117 case PGM_LSX_TRANSLATION: 118 case PGM_LX_TRANSLATION: 119 case PGM_PRIMARY_AUTHORITY: 120 case PGM_SECONDARY_AUTHORITY: 121 case PGM_SPACE_SWITCH: 122 pgm_info->trans_exc_code = vcpu->arch.sie_block->tecmc; 123 break; 124 case PGM_ALEN_TRANSLATION: 125 case PGM_ALE_SEQUENCE: 126 case PGM_ASTE_INSTANCE: 127 case PGM_ASTE_SEQUENCE: 128 case PGM_ASTE_VALIDITY: 129 case PGM_EXTENDED_AUTHORITY: 130 pgm_info->exc_access_id = vcpu->arch.sie_block->eai; 131 break; 132 case PGM_ASCE_TYPE: 133 case PGM_PAGE_TRANSLATION: 134 case PGM_REGION_FIRST_TRANS: 135 case PGM_REGION_SECOND_TRANS: 136 case PGM_REGION_THIRD_TRANS: 137 case PGM_SEGMENT_TRANSLATION: 138 pgm_info->trans_exc_code = vcpu->arch.sie_block->tecmc; 139 pgm_info->exc_access_id = vcpu->arch.sie_block->eai; 140 pgm_info->op_access_id = vcpu->arch.sie_block->oai; 141 break; 142 case PGM_MONITOR: 143 pgm_info->mon_class_nr = vcpu->arch.sie_block->mcn; 144 pgm_info->mon_code = vcpu->arch.sie_block->tecmc; 145 break; 146 case PGM_DATA: 147 pgm_info->data_exc_code = vcpu->arch.sie_block->dxc; 148 break; 149 case PGM_PROTECTION: 150 pgm_info->trans_exc_code = vcpu->arch.sie_block->tecmc; 151 pgm_info->exc_access_id = vcpu->arch.sie_block->eai; 152 break; 153 default: 154 break; 155 } 156 157 if (vcpu->arch.sie_block->iprcc & PGM_PER) { 158 pgm_info->per_code = vcpu->arch.sie_block->perc; 159 pgm_info->per_atmid = vcpu->arch.sie_block->peratmid; 160 pgm_info->per_address = vcpu->arch.sie_block->peraddr; 161 pgm_info->per_access_id = vcpu->arch.sie_block->peraid; 162 } 163 } 164 165 /* 166 * restore ITDB to program-interruption TDB in guest lowcore 167 * and set TX abort indication if required 168 */ 169 static int handle_itdb(struct kvm_vcpu *vcpu) 170 { 171 struct kvm_s390_itdb *itdb; 172 int rc; 173 174 if (!IS_TE_ENABLED(vcpu) || !IS_ITDB_VALID(vcpu)) 175 return 0; 176 if (current->thread.per_flags & PER_FLAG_NO_TE) 177 return 0; 178 itdb = (struct kvm_s390_itdb *)vcpu->arch.sie_block->itdba; 179 rc = write_guest_lc(vcpu, __LC_PGM_TDB, itdb, sizeof(*itdb)); 180 if (rc) 181 return rc; 182 memset(itdb, 0, sizeof(*itdb)); 183 184 return 0; 185 } 186 187 #define per_event(vcpu) (vcpu->arch.sie_block->iprcc & PGM_PER) 188 189 static int handle_prog(struct kvm_vcpu *vcpu) 190 { 191 struct kvm_s390_pgm_info pgm_info; 192 psw_t psw; 193 int rc; 194 195 vcpu->stat.exit_program_interruption++; 196 197 if (guestdbg_enabled(vcpu) && per_event(vcpu)) { 198 kvm_s390_handle_per_event(vcpu); 199 /* the interrupt might have been filtered out completely */ 200 if (vcpu->arch.sie_block->iprcc == 0) 201 return 0; 202 } 203 204 trace_kvm_s390_intercept_prog(vcpu, vcpu->arch.sie_block->iprcc); 205 if (vcpu->arch.sie_block->iprcc == PGM_SPECIFICATION) { 206 rc = read_guest_lc(vcpu, __LC_PGM_NEW_PSW, &psw, sizeof(psw_t)); 207 if (rc) 208 return rc; 209 /* Avoid endless loops of specification exceptions */ 210 if (!is_valid_psw(&psw)) 211 return -EOPNOTSUPP; 212 } 213 rc = handle_itdb(vcpu); 214 if (rc) 215 return rc; 216 217 __extract_prog_irq(vcpu, &pgm_info); 218 return kvm_s390_inject_prog_irq(vcpu, &pgm_info); 219 } 220 221 static int handle_instruction_and_prog(struct kvm_vcpu *vcpu) 222 { 223 int rc, rc2; 224 225 vcpu->stat.exit_instr_and_program++; 226 rc = handle_instruction(vcpu); 227 rc2 = handle_prog(vcpu); 228 229 if (rc == -EOPNOTSUPP) 230 vcpu->arch.sie_block->icptcode = 0x04; 231 if (rc) 232 return rc; 233 return rc2; 234 } 235 236 /** 237 * handle_external_interrupt - used for external interruption interceptions 238 * 239 * This interception only occurs if the CPUSTAT_EXT_INT bit was set, or if 240 * the new PSW does not have external interrupts disabled. In the first case, 241 * we've got to deliver the interrupt manually, and in the second case, we 242 * drop to userspace to handle the situation there. 243 */ 244 static int handle_external_interrupt(struct kvm_vcpu *vcpu) 245 { 246 u16 eic = vcpu->arch.sie_block->eic; 247 struct kvm_s390_interrupt irq; 248 psw_t newpsw; 249 int rc; 250 251 vcpu->stat.exit_external_interrupt++; 252 253 rc = read_guest_lc(vcpu, __LC_EXT_NEW_PSW, &newpsw, sizeof(psw_t)); 254 if (rc) 255 return rc; 256 /* We can not handle clock comparator or timer interrupt with bad PSW */ 257 if ((eic == EXT_IRQ_CLK_COMP || eic == EXT_IRQ_CPU_TIMER) && 258 (newpsw.mask & PSW_MASK_EXT)) 259 return -EOPNOTSUPP; 260 261 switch (eic) { 262 case EXT_IRQ_CLK_COMP: 263 irq.type = KVM_S390_INT_CLOCK_COMP; 264 break; 265 case EXT_IRQ_CPU_TIMER: 266 irq.type = KVM_S390_INT_CPU_TIMER; 267 break; 268 case EXT_IRQ_EXTERNAL_CALL: 269 if (kvm_s390_si_ext_call_pending(vcpu)) 270 return 0; 271 irq.type = KVM_S390_INT_EXTERNAL_CALL; 272 irq.parm = vcpu->arch.sie_block->extcpuaddr; 273 break; 274 default: 275 return -EOPNOTSUPP; 276 } 277 278 return kvm_s390_inject_vcpu(vcpu, &irq); 279 } 280 281 /** 282 * Handle MOVE PAGE partial execution interception. 283 * 284 * This interception can only happen for guests with DAT disabled and 285 * addresses that are currently not mapped in the host. Thus we try to 286 * set up the mappings for the corresponding user pages here (or throw 287 * addressing exceptions in case of illegal guest addresses). 288 */ 289 static int handle_mvpg_pei(struct kvm_vcpu *vcpu) 290 { 291 psw_t *psw = &vcpu->arch.sie_block->gpsw; 292 unsigned long srcaddr, dstaddr; 293 int reg1, reg2, rc; 294 295 kvm_s390_get_regs_rre(vcpu, ®1, ®2); 296 297 /* Make sure that the source is paged-in */ 298 srcaddr = kvm_s390_real_to_abs(vcpu, vcpu->run->s.regs.gprs[reg2]); 299 if (kvm_is_error_gpa(vcpu->kvm, srcaddr)) 300 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 301 rc = kvm_arch_fault_in_page(vcpu, srcaddr, 0); 302 if (rc != 0) 303 return rc; 304 305 /* Make sure that the destination is paged-in */ 306 dstaddr = kvm_s390_real_to_abs(vcpu, vcpu->run->s.regs.gprs[reg1]); 307 if (kvm_is_error_gpa(vcpu->kvm, dstaddr)) 308 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 309 rc = kvm_arch_fault_in_page(vcpu, dstaddr, 1); 310 if (rc != 0) 311 return rc; 312 313 psw->addr = __rewind_psw(*psw, 4); 314 315 return 0; 316 } 317 318 static int handle_partial_execution(struct kvm_vcpu *vcpu) 319 { 320 if (vcpu->arch.sie_block->ipa == 0xb254) /* MVPG */ 321 return handle_mvpg_pei(vcpu); 322 if (vcpu->arch.sie_block->ipa >> 8 == 0xae) /* SIGP */ 323 return kvm_s390_handle_sigp_pei(vcpu); 324 325 return -EOPNOTSUPP; 326 } 327 328 static const intercept_handler_t intercept_funcs[] = { 329 [0x00 >> 2] = handle_noop, 330 [0x04 >> 2] = handle_instruction, 331 [0x08 >> 2] = handle_prog, 332 [0x0C >> 2] = handle_instruction_and_prog, 333 [0x10 >> 2] = handle_noop, 334 [0x14 >> 2] = handle_external_interrupt, 335 [0x18 >> 2] = handle_noop, 336 [0x1C >> 2] = kvm_s390_handle_wait, 337 [0x20 >> 2] = handle_validity, 338 [0x28 >> 2] = handle_stop, 339 [0x38 >> 2] = handle_partial_execution, 340 }; 341 342 int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu) 343 { 344 intercept_handler_t func; 345 u8 code = vcpu->arch.sie_block->icptcode; 346 347 if (code & 3 || (code >> 2) >= ARRAY_SIZE(intercept_funcs)) 348 return -EOPNOTSUPP; 349 func = intercept_funcs[code >> 2]; 350 if (func) 351 return func(vcpu); 352 return -EOPNOTSUPP; 353 } 354