1 /* 2 * handling diagnose instructions 3 * 4 * Copyright IBM Corp. 2008, 2011 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License (version 2 only) 8 * as published by the Free Software Foundation. 9 * 10 * Author(s): Carsten Otte <cotte@de.ibm.com> 11 * Christian Borntraeger <borntraeger@de.ibm.com> 12 */ 13 14 #include <linux/kvm.h> 15 #include <linux/kvm_host.h> 16 #include <asm/pgalloc.h> 17 #include <asm/virtio-ccw.h> 18 #include "kvm-s390.h" 19 #include "trace.h" 20 #include "trace-s390.h" 21 #include "gaccess.h" 22 23 static int diag_release_pages(struct kvm_vcpu *vcpu) 24 { 25 unsigned long start, end; 26 unsigned long prefix = kvm_s390_get_prefix(vcpu); 27 28 start = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4]; 29 end = vcpu->run->s.regs.gprs[vcpu->arch.sie_block->ipa & 0xf] + 4096; 30 vcpu->stat.diagnose_10++; 31 32 if (start & ~PAGE_MASK || end & ~PAGE_MASK || start >= end 33 || start < 2 * PAGE_SIZE) 34 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 35 36 VCPU_EVENT(vcpu, 5, "diag release pages %lX %lX", start, end); 37 38 /* 39 * We checked for start >= end above, so lets check for the 40 * fast path (no prefix swap page involved) 41 */ 42 if (end <= prefix || start >= prefix + 2 * PAGE_SIZE) { 43 gmap_discard(vcpu->arch.gmap, start, end); 44 } else { 45 /* 46 * This is slow path. gmap_discard will check for start 47 * so lets split this into before prefix, prefix, after 48 * prefix and let gmap_discard make some of these calls 49 * NOPs. 50 */ 51 gmap_discard(vcpu->arch.gmap, start, prefix); 52 if (start <= prefix) 53 gmap_discard(vcpu->arch.gmap, 0, 4096); 54 if (end > prefix + 4096) 55 gmap_discard(vcpu->arch.gmap, 4096, 8192); 56 gmap_discard(vcpu->arch.gmap, prefix + 2 * PAGE_SIZE, end); 57 } 58 return 0; 59 } 60 61 static int __diag_page_ref_service(struct kvm_vcpu *vcpu) 62 { 63 struct prs_parm { 64 u16 code; 65 u16 subcode; 66 u16 parm_len; 67 u16 parm_version; 68 u64 token_addr; 69 u64 select_mask; 70 u64 compare_mask; 71 u64 zarch; 72 }; 73 struct prs_parm parm; 74 int rc; 75 u16 rx = (vcpu->arch.sie_block->ipa & 0xf0) >> 4; 76 u16 ry = (vcpu->arch.sie_block->ipa & 0x0f); 77 78 VCPU_EVENT(vcpu, 3, "diag page reference parameter block at 0x%llx", 79 vcpu->run->s.regs.gprs[rx]); 80 vcpu->stat.diagnose_258++; 81 if (vcpu->run->s.regs.gprs[rx] & 7) 82 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 83 rc = read_guest(vcpu, vcpu->run->s.regs.gprs[rx], rx, &parm, sizeof(parm)); 84 if (rc) 85 return kvm_s390_inject_prog_cond(vcpu, rc); 86 if (parm.parm_version != 2 || parm.parm_len < 5 || parm.code != 0x258) 87 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 88 89 switch (parm.subcode) { 90 case 0: /* TOKEN */ 91 VCPU_EVENT(vcpu, 3, "pageref token addr 0x%llx " 92 "select mask 0x%llx compare mask 0x%llx", 93 parm.token_addr, parm.select_mask, parm.compare_mask); 94 if (vcpu->arch.pfault_token != KVM_S390_PFAULT_TOKEN_INVALID) { 95 /* 96 * If the pagefault handshake is already activated, 97 * the token must not be changed. We have to return 98 * decimal 8 instead, as mandated in SC24-6084. 99 */ 100 vcpu->run->s.regs.gprs[ry] = 8; 101 return 0; 102 } 103 104 if ((parm.compare_mask & parm.select_mask) != parm.compare_mask || 105 parm.token_addr & 7 || parm.zarch != 0x8000000000000000ULL) 106 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 107 108 if (kvm_is_error_gpa(vcpu->kvm, parm.token_addr)) 109 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 110 111 vcpu->arch.pfault_token = parm.token_addr; 112 vcpu->arch.pfault_select = parm.select_mask; 113 vcpu->arch.pfault_compare = parm.compare_mask; 114 vcpu->run->s.regs.gprs[ry] = 0; 115 rc = 0; 116 break; 117 case 1: /* 118 * CANCEL 119 * Specification allows to let already pending tokens survive 120 * the cancel, therefore to reduce code complexity, we assume 121 * all outstanding tokens are already pending. 122 */ 123 VCPU_EVENT(vcpu, 3, "pageref cancel addr 0x%llx", parm.token_addr); 124 if (parm.token_addr || parm.select_mask || 125 parm.compare_mask || parm.zarch) 126 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 127 128 vcpu->run->s.regs.gprs[ry] = 0; 129 /* 130 * If the pfault handling was not established or is already 131 * canceled SC24-6084 requests to return decimal 4. 132 */ 133 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID) 134 vcpu->run->s.regs.gprs[ry] = 4; 135 else 136 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID; 137 138 rc = 0; 139 break; 140 default: 141 rc = -EOPNOTSUPP; 142 break; 143 } 144 145 return rc; 146 } 147 148 static int __diag_time_slice_end(struct kvm_vcpu *vcpu) 149 { 150 VCPU_EVENT(vcpu, 5, "%s", "diag time slice end"); 151 vcpu->stat.diagnose_44++; 152 kvm_vcpu_on_spin(vcpu); 153 return 0; 154 } 155 156 static int __diag_time_slice_end_directed(struct kvm_vcpu *vcpu) 157 { 158 struct kvm *kvm = vcpu->kvm; 159 struct kvm_vcpu *tcpu; 160 int tid; 161 int i; 162 163 tid = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4]; 164 vcpu->stat.diagnose_9c++; 165 VCPU_EVENT(vcpu, 5, "diag time slice end directed to %d", tid); 166 167 if (tid == vcpu->vcpu_id) 168 return 0; 169 170 kvm_for_each_vcpu(i, tcpu, kvm) 171 if (tcpu->vcpu_id == tid) { 172 kvm_vcpu_yield_to(tcpu); 173 break; 174 } 175 176 return 0; 177 } 178 179 static int __diag_ipl_functions(struct kvm_vcpu *vcpu) 180 { 181 unsigned int reg = vcpu->arch.sie_block->ipa & 0xf; 182 unsigned long subcode = vcpu->run->s.regs.gprs[reg] & 0xffff; 183 184 VCPU_EVENT(vcpu, 3, "diag ipl functions, subcode %lx", subcode); 185 vcpu->stat.diagnose_308++; 186 switch (subcode) { 187 case 3: 188 vcpu->run->s390_reset_flags = KVM_S390_RESET_CLEAR; 189 break; 190 case 4: 191 vcpu->run->s390_reset_flags = 0; 192 break; 193 default: 194 return -EOPNOTSUPP; 195 } 196 197 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) 198 kvm_s390_vcpu_stop(vcpu); 199 vcpu->run->s390_reset_flags |= KVM_S390_RESET_SUBSYSTEM; 200 vcpu->run->s390_reset_flags |= KVM_S390_RESET_IPL; 201 vcpu->run->s390_reset_flags |= KVM_S390_RESET_CPU_INIT; 202 vcpu->run->exit_reason = KVM_EXIT_S390_RESET; 203 VCPU_EVENT(vcpu, 3, "requesting userspace resets %llx", 204 vcpu->run->s390_reset_flags); 205 trace_kvm_s390_request_resets(vcpu->run->s390_reset_flags); 206 return -EREMOTE; 207 } 208 209 static int __diag_virtio_hypercall(struct kvm_vcpu *vcpu) 210 { 211 int ret; 212 213 vcpu->stat.diagnose_500++; 214 /* No virtio-ccw notification? Get out quickly. */ 215 if (!vcpu->kvm->arch.css_support || 216 (vcpu->run->s.regs.gprs[1] != KVM_S390_VIRTIO_CCW_NOTIFY)) 217 return -EOPNOTSUPP; 218 219 /* 220 * The layout is as follows: 221 * - gpr 2 contains the subchannel id (passed as addr) 222 * - gpr 3 contains the virtqueue index (passed as datamatch) 223 * - gpr 4 contains the index on the bus (optionally) 224 */ 225 ret = kvm_io_bus_write_cookie(vcpu, KVM_VIRTIO_CCW_NOTIFY_BUS, 226 vcpu->run->s.regs.gprs[2] & 0xffffffff, 227 8, &vcpu->run->s.regs.gprs[3], 228 vcpu->run->s.regs.gprs[4]); 229 230 /* 231 * Return cookie in gpr 2, but don't overwrite the register if the 232 * diagnose will be handled by userspace. 233 */ 234 if (ret != -EOPNOTSUPP) 235 vcpu->run->s.regs.gprs[2] = ret; 236 /* kvm_io_bus_write_cookie returns -EOPNOTSUPP if it found no match. */ 237 return ret < 0 ? ret : 0; 238 } 239 240 int kvm_s390_handle_diag(struct kvm_vcpu *vcpu) 241 { 242 int code = kvm_s390_get_base_disp_rs(vcpu, NULL) & 0xffff; 243 244 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 245 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 246 247 trace_kvm_s390_handle_diag(vcpu, code); 248 switch (code) { 249 case 0x10: 250 return diag_release_pages(vcpu); 251 case 0x44: 252 return __diag_time_slice_end(vcpu); 253 case 0x9c: 254 return __diag_time_slice_end_directed(vcpu); 255 case 0x258: 256 return __diag_page_ref_service(vcpu); 257 case 0x308: 258 return __diag_ipl_functions(vcpu); 259 case 0x500: 260 return __diag_virtio_hypercall(vcpu); 261 default: 262 return -EOPNOTSUPP; 263 } 264 } 265