1 /* 2 * handling diagnose instructions 3 * 4 * Copyright IBM Corp. 2008, 2011 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License (version 2 only) 8 * as published by the Free Software Foundation. 9 * 10 * Author(s): Carsten Otte <cotte@de.ibm.com> 11 * Christian Borntraeger <borntraeger@de.ibm.com> 12 */ 13 14 #include <linux/kvm.h> 15 #include <linux/kvm_host.h> 16 #include <asm/pgalloc.h> 17 #include <asm/virtio-ccw.h> 18 #include "kvm-s390.h" 19 #include "trace.h" 20 #include "trace-s390.h" 21 #include "gaccess.h" 22 23 static int diag_release_pages(struct kvm_vcpu *vcpu) 24 { 25 unsigned long start, end; 26 unsigned long prefix = kvm_s390_get_prefix(vcpu); 27 28 start = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4]; 29 end = vcpu->run->s.regs.gprs[vcpu->arch.sie_block->ipa & 0xf] + 4096; 30 vcpu->stat.diagnose_10++; 31 32 if (start & ~PAGE_MASK || end & ~PAGE_MASK || start >= end 33 || start < 2 * PAGE_SIZE) 34 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 35 36 VCPU_EVENT(vcpu, 5, "diag release pages %lX %lX", start, end); 37 38 /* 39 * We checked for start >= end above, so lets check for the 40 * fast path (no prefix swap page involved) 41 */ 42 if (end <= prefix || start >= prefix + 2 * PAGE_SIZE) { 43 gmap_discard(vcpu->arch.gmap, start, end); 44 } else { 45 /* 46 * This is slow path. gmap_discard will check for start 47 * so lets split this into before prefix, prefix, after 48 * prefix and let gmap_discard make some of these calls 49 * NOPs. 50 */ 51 gmap_discard(vcpu->arch.gmap, start, prefix); 52 if (start <= prefix) 53 gmap_discard(vcpu->arch.gmap, 0, 4096); 54 if (end > prefix + 4096) 55 gmap_discard(vcpu->arch.gmap, 4096, 8192); 56 gmap_discard(vcpu->arch.gmap, prefix + 2 * PAGE_SIZE, end); 57 } 58 return 0; 59 } 60 61 static int __diag_page_ref_service(struct kvm_vcpu *vcpu) 62 { 63 struct prs_parm { 64 u16 code; 65 u16 subcode; 66 u16 parm_len; 67 u16 parm_version; 68 u64 token_addr; 69 u64 select_mask; 70 u64 compare_mask; 71 u64 zarch; 72 }; 73 struct prs_parm parm; 74 int rc; 75 u16 rx = (vcpu->arch.sie_block->ipa & 0xf0) >> 4; 76 u16 ry = (vcpu->arch.sie_block->ipa & 0x0f); 77 78 VCPU_EVENT(vcpu, 3, "diag page reference parameter block at 0x%llx", 79 vcpu->run->s.regs.gprs[rx]); 80 vcpu->stat.diagnose_258++; 81 if (vcpu->run->s.regs.gprs[rx] & 7) 82 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 83 rc = read_guest(vcpu, vcpu->run->s.regs.gprs[rx], rx, &parm, sizeof(parm)); 84 if (rc) 85 return kvm_s390_inject_prog_cond(vcpu, rc); 86 if (parm.parm_version != 2 || parm.parm_len < 5 || parm.code != 0x258) 87 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 88 89 switch (parm.subcode) { 90 case 0: /* TOKEN */ 91 VCPU_EVENT(vcpu, 3, "pageref token addr 0x%llx " 92 "select mask 0x%llx compare mask 0x%llx", 93 parm.token_addr, parm.select_mask, parm.compare_mask); 94 if (vcpu->arch.pfault_token != KVM_S390_PFAULT_TOKEN_INVALID) { 95 /* 96 * If the pagefault handshake is already activated, 97 * the token must not be changed. We have to return 98 * decimal 8 instead, as mandated in SC24-6084. 99 */ 100 vcpu->run->s.regs.gprs[ry] = 8; 101 return 0; 102 } 103 104 if ((parm.compare_mask & parm.select_mask) != parm.compare_mask || 105 parm.token_addr & 7 || parm.zarch != 0x8000000000000000ULL) 106 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 107 108 if (kvm_is_error_gpa(vcpu->kvm, parm.token_addr)) 109 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 110 111 vcpu->arch.pfault_token = parm.token_addr; 112 vcpu->arch.pfault_select = parm.select_mask; 113 vcpu->arch.pfault_compare = parm.compare_mask; 114 vcpu->run->s.regs.gprs[ry] = 0; 115 rc = 0; 116 break; 117 case 1: /* 118 * CANCEL 119 * Specification allows to let already pending tokens survive 120 * the cancel, therefore to reduce code complexity, we assume 121 * all outstanding tokens are already pending. 122 */ 123 VCPU_EVENT(vcpu, 3, "pageref cancel addr 0x%llx", parm.token_addr); 124 if (parm.token_addr || parm.select_mask || 125 parm.compare_mask || parm.zarch) 126 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 127 128 vcpu->run->s.regs.gprs[ry] = 0; 129 /* 130 * If the pfault handling was not established or is already 131 * canceled SC24-6084 requests to return decimal 4. 132 */ 133 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID) 134 vcpu->run->s.regs.gprs[ry] = 4; 135 else 136 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID; 137 138 rc = 0; 139 break; 140 default: 141 rc = -EOPNOTSUPP; 142 break; 143 } 144 145 return rc; 146 } 147 148 static int __diag_time_slice_end(struct kvm_vcpu *vcpu) 149 { 150 VCPU_EVENT(vcpu, 5, "%s", "diag time slice end"); 151 vcpu->stat.diagnose_44++; 152 kvm_vcpu_on_spin(vcpu); 153 return 0; 154 } 155 156 static int __diag_time_slice_end_directed(struct kvm_vcpu *vcpu) 157 { 158 struct kvm_vcpu *tcpu; 159 int tid; 160 161 tid = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4]; 162 vcpu->stat.diagnose_9c++; 163 VCPU_EVENT(vcpu, 5, "diag time slice end directed to %d", tid); 164 165 if (tid == vcpu->vcpu_id) 166 return 0; 167 168 tcpu = kvm_get_vcpu_by_id(vcpu->kvm, tid); 169 if (tcpu) 170 kvm_vcpu_yield_to(tcpu); 171 return 0; 172 } 173 174 static int __diag_ipl_functions(struct kvm_vcpu *vcpu) 175 { 176 unsigned int reg = vcpu->arch.sie_block->ipa & 0xf; 177 unsigned long subcode = vcpu->run->s.regs.gprs[reg] & 0xffff; 178 179 VCPU_EVENT(vcpu, 3, "diag ipl functions, subcode %lx", subcode); 180 vcpu->stat.diagnose_308++; 181 switch (subcode) { 182 case 3: 183 vcpu->run->s390_reset_flags = KVM_S390_RESET_CLEAR; 184 break; 185 case 4: 186 vcpu->run->s390_reset_flags = 0; 187 break; 188 default: 189 return -EOPNOTSUPP; 190 } 191 192 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) 193 kvm_s390_vcpu_stop(vcpu); 194 vcpu->run->s390_reset_flags |= KVM_S390_RESET_SUBSYSTEM; 195 vcpu->run->s390_reset_flags |= KVM_S390_RESET_IPL; 196 vcpu->run->s390_reset_flags |= KVM_S390_RESET_CPU_INIT; 197 vcpu->run->exit_reason = KVM_EXIT_S390_RESET; 198 VCPU_EVENT(vcpu, 3, "requesting userspace resets %llx", 199 vcpu->run->s390_reset_flags); 200 trace_kvm_s390_request_resets(vcpu->run->s390_reset_flags); 201 return -EREMOTE; 202 } 203 204 static int __diag_virtio_hypercall(struct kvm_vcpu *vcpu) 205 { 206 int ret; 207 208 vcpu->stat.diagnose_500++; 209 /* No virtio-ccw notification? Get out quickly. */ 210 if (!vcpu->kvm->arch.css_support || 211 (vcpu->run->s.regs.gprs[1] != KVM_S390_VIRTIO_CCW_NOTIFY)) 212 return -EOPNOTSUPP; 213 214 /* 215 * The layout is as follows: 216 * - gpr 2 contains the subchannel id (passed as addr) 217 * - gpr 3 contains the virtqueue index (passed as datamatch) 218 * - gpr 4 contains the index on the bus (optionally) 219 */ 220 ret = kvm_io_bus_write_cookie(vcpu, KVM_VIRTIO_CCW_NOTIFY_BUS, 221 vcpu->run->s.regs.gprs[2] & 0xffffffff, 222 8, &vcpu->run->s.regs.gprs[3], 223 vcpu->run->s.regs.gprs[4]); 224 225 /* 226 * Return cookie in gpr 2, but don't overwrite the register if the 227 * diagnose will be handled by userspace. 228 */ 229 if (ret != -EOPNOTSUPP) 230 vcpu->run->s.regs.gprs[2] = ret; 231 /* kvm_io_bus_write_cookie returns -EOPNOTSUPP if it found no match. */ 232 return ret < 0 ? ret : 0; 233 } 234 235 int kvm_s390_handle_diag(struct kvm_vcpu *vcpu) 236 { 237 int code = kvm_s390_get_base_disp_rs(vcpu, NULL) & 0xffff; 238 239 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 240 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 241 242 trace_kvm_s390_handle_diag(vcpu, code); 243 switch (code) { 244 case 0x10: 245 return diag_release_pages(vcpu); 246 case 0x44: 247 return __diag_time_slice_end(vcpu); 248 case 0x9c: 249 return __diag_time_slice_end_directed(vcpu); 250 case 0x258: 251 return __diag_page_ref_service(vcpu); 252 case 0x308: 253 return __diag_ipl_functions(vcpu); 254 case 0x500: 255 return __diag_virtio_hypercall(vcpu); 256 default: 257 return -EOPNOTSUPP; 258 } 259 } 260