interrupt.c (879f99ef2c4c05d9a7f0a67a05f1415663119825) interrupt.c (4953919feedaeb6d0161ecea920c35d1d1f639d3)
1/*
2 * handling kvm guest interrupts
3 *
4 * Copyright IBM Corp. 2008,2014
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.

--- 13 unchanged lines hidden (view full) ---

22#include "gaccess.h"
23#include "trace-s390.h"
24
25#define IOINT_SCHID_MASK 0x0000ffff
26#define IOINT_SSID_MASK 0x00030000
27#define IOINT_CSSID_MASK 0x03fc0000
28#define IOINT_AI_MASK 0x04000000
29
1/*
2 * handling kvm guest interrupts
3 *
4 * Copyright IBM Corp. 2008,2014
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.

--- 13 unchanged lines hidden (view full) ---

22#include "gaccess.h"
23#include "trace-s390.h"
24
25#define IOINT_SCHID_MASK 0x0000ffff
26#define IOINT_SSID_MASK 0x00030000
27#define IOINT_CSSID_MASK 0x03fc0000
28#define IOINT_AI_MASK 0x04000000
29
30static void deliver_ckc_interrupt(struct kvm_vcpu *vcpu);
31
30static int is_ioint(u64 type)
31{
32 return ((type & 0xfffe0000u) != 0xfffe0000u);
33}
34
35int psw_extint_disabled(struct kvm_vcpu *vcpu)
36{
37 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT);

--- 13 unchanged lines hidden (view full) ---

51{
52 if ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PER) ||
53 (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_IO) ||
54 (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT))
55 return 0;
56 return 1;
57}
58
32static int is_ioint(u64 type)
33{
34 return ((type & 0xfffe0000u) != 0xfffe0000u);
35}
36
37int psw_extint_disabled(struct kvm_vcpu *vcpu)
38{
39 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT);

--- 13 unchanged lines hidden (view full) ---

53{
54 if ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PER) ||
55 (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_IO) ||
56 (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT))
57 return 0;
58 return 1;
59}
60
61static int ckc_interrupts_enabled(struct kvm_vcpu *vcpu)
62{
63 if (psw_extint_disabled(vcpu) ||
64 !(vcpu->arch.sie_block->gcr[0] & 0x800ul))
65 return 0;
66 if (guestdbg_enabled(vcpu) && guestdbg_sstep_enabled(vcpu))
67 /* No timer interrupts when single stepping */
68 return 0;
69 return 1;
70}
71
59static u64 int_word_to_isc_bits(u32 int_word)
60{
61 u8 isc = (int_word & 0x38000000) >> 27;
62
63 return (0x80 >> isc) << 24;
64}
65
66static int __interrupt_is_deliverable(struct kvm_vcpu *vcpu,

--- 6 unchanged lines hidden (view full) ---

73 if (vcpu->arch.sie_block->gcr[0] & 0x2000ul)
74 return 1;
75 case KVM_S390_INT_EMERGENCY:
76 if (psw_extint_disabled(vcpu))
77 return 0;
78 if (vcpu->arch.sie_block->gcr[0] & 0x4000ul)
79 return 1;
80 return 0;
72static u64 int_word_to_isc_bits(u32 int_word)
73{
74 u8 isc = (int_word & 0x38000000) >> 27;
75
76 return (0x80 >> isc) << 24;
77}
78
79static int __interrupt_is_deliverable(struct kvm_vcpu *vcpu,

--- 6 unchanged lines hidden (view full) ---

86 if (vcpu->arch.sie_block->gcr[0] & 0x2000ul)
87 return 1;
88 case KVM_S390_INT_EMERGENCY:
89 if (psw_extint_disabled(vcpu))
90 return 0;
91 if (vcpu->arch.sie_block->gcr[0] & 0x4000ul)
92 return 1;
93 return 0;
94 case KVM_S390_INT_CLOCK_COMP:
95 return ckc_interrupts_enabled(vcpu);
96 case KVM_S390_INT_CPU_TIMER:
97 if (psw_extint_disabled(vcpu))
98 return 0;
99 if (vcpu->arch.sie_block->gcr[0] & 0x400ul)
100 return 1;
101 return 0;
81 case KVM_S390_INT_SERVICE:
82 case KVM_S390_INT_PFAULT_INIT:
83 case KVM_S390_INT_PFAULT_DONE:
84 case KVM_S390_INT_VIRTIO:
85 if (psw_extint_disabled(vcpu))
86 return 0;
87 if (vcpu->arch.sie_block->gcr[0] & 0x200ul)
88 return 1;

--- 33 unchanged lines hidden (view full) ---

122static void __unset_cpu_idle(struct kvm_vcpu *vcpu)
123{
124 atomic_clear_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
125 clear_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask);
126}
127
128static void __reset_intercept_indicators(struct kvm_vcpu *vcpu)
129{
102 case KVM_S390_INT_SERVICE:
103 case KVM_S390_INT_PFAULT_INIT:
104 case KVM_S390_INT_PFAULT_DONE:
105 case KVM_S390_INT_VIRTIO:
106 if (psw_extint_disabled(vcpu))
107 return 0;
108 if (vcpu->arch.sie_block->gcr[0] & 0x200ul)
109 return 1;

--- 33 unchanged lines hidden (view full) ---

143static void __unset_cpu_idle(struct kvm_vcpu *vcpu)
144{
145 atomic_clear_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
146 clear_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask);
147}
148
149static void __reset_intercept_indicators(struct kvm_vcpu *vcpu)
150{
130 atomic_clear_mask(CPUSTAT_ECALL_PEND |
131 CPUSTAT_IO_INT | CPUSTAT_EXT_INT | CPUSTAT_STOP_INT,
132 &vcpu->arch.sie_block->cpuflags);
151 atomic_clear_mask(CPUSTAT_IO_INT | CPUSTAT_EXT_INT | CPUSTAT_STOP_INT,
152 &vcpu->arch.sie_block->cpuflags);
133 vcpu->arch.sie_block->lctl = 0x0000;
153 vcpu->arch.sie_block->lctl = 0x0000;
134 vcpu->arch.sie_block->ictl &= ~ICTL_LPSW;
154 vcpu->arch.sie_block->ictl &= ~(ICTL_LPSW | ICTL_STCTL | ICTL_PINT);
155
156 if (guestdbg_enabled(vcpu)) {
157 vcpu->arch.sie_block->lctl |= (LCTL_CR0 | LCTL_CR9 |
158 LCTL_CR10 | LCTL_CR11);
159 vcpu->arch.sie_block->ictl |= (ICTL_STCTL | ICTL_PINT);
160 }
135}
136
137static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag)
138{
139 atomic_set_mask(flag, &vcpu->arch.sie_block->cpuflags);
140}
141
142static void __set_intercept_indicator(struct kvm_vcpu *vcpu,
143 struct kvm_s390_interrupt_info *inti)
144{
145 switch (inti->type) {
146 case KVM_S390_INT_EXTERNAL_CALL:
147 case KVM_S390_INT_EMERGENCY:
148 case KVM_S390_INT_SERVICE:
149 case KVM_S390_INT_PFAULT_INIT:
150 case KVM_S390_INT_PFAULT_DONE:
151 case KVM_S390_INT_VIRTIO:
161}
162
163static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag)
164{
165 atomic_set_mask(flag, &vcpu->arch.sie_block->cpuflags);
166}
167
168static void __set_intercept_indicator(struct kvm_vcpu *vcpu,
169 struct kvm_s390_interrupt_info *inti)
170{
171 switch (inti->type) {
172 case KVM_S390_INT_EXTERNAL_CALL:
173 case KVM_S390_INT_EMERGENCY:
174 case KVM_S390_INT_SERVICE:
175 case KVM_S390_INT_PFAULT_INIT:
176 case KVM_S390_INT_PFAULT_DONE:
177 case KVM_S390_INT_VIRTIO:
178 case KVM_S390_INT_CLOCK_COMP:
179 case KVM_S390_INT_CPU_TIMER:
152 if (psw_extint_disabled(vcpu))
153 __set_cpuflag(vcpu, CPUSTAT_EXT_INT);
154 else
155 vcpu->arch.sie_block->lctl |= LCTL_CR0;
156 break;
157 case KVM_S390_SIGP_STOP:
158 __set_cpuflag(vcpu, CPUSTAT_STOP_INT);
159 break;

--- 9 unchanged lines hidden (view full) ---

169 else
170 vcpu->arch.sie_block->lctl |= LCTL_CR6;
171 break;
172 default:
173 BUG();
174 }
175}
176
180 if (psw_extint_disabled(vcpu))
181 __set_cpuflag(vcpu, CPUSTAT_EXT_INT);
182 else
183 vcpu->arch.sie_block->lctl |= LCTL_CR0;
184 break;
185 case KVM_S390_SIGP_STOP:
186 __set_cpuflag(vcpu, CPUSTAT_STOP_INT);
187 break;

--- 9 unchanged lines hidden (view full) ---

197 else
198 vcpu->arch.sie_block->lctl |= LCTL_CR6;
199 break;
200 default:
201 BUG();
202 }
203}
204
205static int __deliver_prog_irq(struct kvm_vcpu *vcpu,
206 struct kvm_s390_pgm_info *pgm_info)
207{
208 const unsigned short table[] = { 2, 4, 4, 6 };
209 int rc = 0;
210
211 switch (pgm_info->code & ~PGM_PER) {
212 case PGM_AFX_TRANSLATION:
213 case PGM_ASX_TRANSLATION:
214 case PGM_EX_TRANSLATION:
215 case PGM_LFX_TRANSLATION:
216 case PGM_LSTE_SEQUENCE:
217 case PGM_LSX_TRANSLATION:
218 case PGM_LX_TRANSLATION:
219 case PGM_PRIMARY_AUTHORITY:
220 case PGM_SECONDARY_AUTHORITY:
221 case PGM_SPACE_SWITCH:
222 rc = put_guest_lc(vcpu, pgm_info->trans_exc_code,
223 (u64 *)__LC_TRANS_EXC_CODE);
224 break;
225 case PGM_ALEN_TRANSLATION:
226 case PGM_ALE_SEQUENCE:
227 case PGM_ASTE_INSTANCE:
228 case PGM_ASTE_SEQUENCE:
229 case PGM_ASTE_VALIDITY:
230 case PGM_EXTENDED_AUTHORITY:
231 rc = put_guest_lc(vcpu, pgm_info->exc_access_id,
232 (u8 *)__LC_EXC_ACCESS_ID);
233 break;
234 case PGM_ASCE_TYPE:
235 case PGM_PAGE_TRANSLATION:
236 case PGM_REGION_FIRST_TRANS:
237 case PGM_REGION_SECOND_TRANS:
238 case PGM_REGION_THIRD_TRANS:
239 case PGM_SEGMENT_TRANSLATION:
240 rc = put_guest_lc(vcpu, pgm_info->trans_exc_code,
241 (u64 *)__LC_TRANS_EXC_CODE);
242 rc |= put_guest_lc(vcpu, pgm_info->exc_access_id,
243 (u8 *)__LC_EXC_ACCESS_ID);
244 rc |= put_guest_lc(vcpu, pgm_info->op_access_id,
245 (u8 *)__LC_OP_ACCESS_ID);
246 break;
247 case PGM_MONITOR:
248 rc = put_guest_lc(vcpu, pgm_info->mon_class_nr,
249 (u64 *)__LC_MON_CLASS_NR);
250 rc |= put_guest_lc(vcpu, pgm_info->mon_code,
251 (u64 *)__LC_MON_CODE);
252 break;
253 case PGM_DATA:
254 rc = put_guest_lc(vcpu, pgm_info->data_exc_code,
255 (u32 *)__LC_DATA_EXC_CODE);
256 break;
257 case PGM_PROTECTION:
258 rc = put_guest_lc(vcpu, pgm_info->trans_exc_code,
259 (u64 *)__LC_TRANS_EXC_CODE);
260 rc |= put_guest_lc(vcpu, pgm_info->exc_access_id,
261 (u8 *)__LC_EXC_ACCESS_ID);
262 break;
263 }
264
265 if (pgm_info->code & PGM_PER) {
266 rc |= put_guest_lc(vcpu, pgm_info->per_code,
267 (u8 *) __LC_PER_CODE);
268 rc |= put_guest_lc(vcpu, pgm_info->per_atmid,
269 (u8 *)__LC_PER_ATMID);
270 rc |= put_guest_lc(vcpu, pgm_info->per_address,
271 (u64 *) __LC_PER_ADDRESS);
272 rc |= put_guest_lc(vcpu, pgm_info->per_access_id,
273 (u8 *) __LC_PER_ACCESS_ID);
274 }
275
276 switch (vcpu->arch.sie_block->icptcode) {
277 case ICPT_INST:
278 case ICPT_INSTPROGI:
279 case ICPT_OPEREXC:
280 case ICPT_PARTEXEC:
281 case ICPT_IOINST:
282 /* last instruction only stored for these icptcodes */
283 rc |= put_guest_lc(vcpu, table[vcpu->arch.sie_block->ipa >> 14],
284 (u16 *) __LC_PGM_ILC);
285 break;
286 case ICPT_PROGI:
287 rc |= put_guest_lc(vcpu, vcpu->arch.sie_block->pgmilc,
288 (u16 *) __LC_PGM_ILC);
289 break;
290 default:
291 rc |= put_guest_lc(vcpu, 0,
292 (u16 *) __LC_PGM_ILC);
293 }
294
295 rc |= put_guest_lc(vcpu, pgm_info->code,
296 (u16 *)__LC_PGM_INT_CODE);
297 rc |= write_guest_lc(vcpu, __LC_PGM_OLD_PSW,
298 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
299 rc |= read_guest_lc(vcpu, __LC_PGM_NEW_PSW,
300 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
301
302 return rc;
303}
304
177static void __do_deliver_interrupt(struct kvm_vcpu *vcpu,
178 struct kvm_s390_interrupt_info *inti)
179{
180 const unsigned short table[] = { 2, 4, 4, 6 };
181 int rc = 0;
182
183 switch (inti->type) {
184 case KVM_S390_INT_EMERGENCY:
185 VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp emerg");
186 vcpu->stat.deliver_emergency_signal++;
187 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
188 inti->emerg.code, 0);
305static void __do_deliver_interrupt(struct kvm_vcpu *vcpu,
306 struct kvm_s390_interrupt_info *inti)
307{
308 const unsigned short table[] = { 2, 4, 4, 6 };
309 int rc = 0;
310
311 switch (inti->type) {
312 case KVM_S390_INT_EMERGENCY:
313 VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp emerg");
314 vcpu->stat.deliver_emergency_signal++;
315 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
316 inti->emerg.code, 0);
189 rc = put_guest(vcpu, 0x1201, (u16 __user *)__LC_EXT_INT_CODE);
190 rc |= put_guest(vcpu, inti->emerg.code,
191 (u16 __user *)__LC_EXT_CPU_ADDR);
192 rc |= copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
317 rc = put_guest_lc(vcpu, 0x1201, (u16 *)__LC_EXT_INT_CODE);
318 rc |= put_guest_lc(vcpu, inti->emerg.code,
319 (u16 *)__LC_EXT_CPU_ADDR);
320 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
321 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
322 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
193 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
323 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
194 rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
195 __LC_EXT_NEW_PSW, sizeof(psw_t));
196 break;
197 case KVM_S390_INT_EXTERNAL_CALL:
198 VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp ext call");
199 vcpu->stat.deliver_external_call++;
200 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
201 inti->extcall.code, 0);
324 break;
325 case KVM_S390_INT_EXTERNAL_CALL:
326 VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp ext call");
327 vcpu->stat.deliver_external_call++;
328 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
329 inti->extcall.code, 0);
202 rc = put_guest(vcpu, 0x1202, (u16 __user *)__LC_EXT_INT_CODE);
203 rc |= put_guest(vcpu, inti->extcall.code,
204 (u16 __user *)__LC_EXT_CPU_ADDR);
205 rc |= copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
330 rc = put_guest_lc(vcpu, 0x1202, (u16 *)__LC_EXT_INT_CODE);
331 rc |= put_guest_lc(vcpu, inti->extcall.code,
332 (u16 *)__LC_EXT_CPU_ADDR);
333 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
334 &vcpu->arch.sie_block->gpsw,
335 sizeof(psw_t));
336 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
337 &vcpu->arch.sie_block->gpsw,
338 sizeof(psw_t));
339 break;
340 case KVM_S390_INT_CLOCK_COMP:
341 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
342 inti->ext.ext_params, 0);
343 deliver_ckc_interrupt(vcpu);
344 break;
345 case KVM_S390_INT_CPU_TIMER:
346 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
347 inti->ext.ext_params, 0);
348 rc = put_guest_lc(vcpu, EXT_IRQ_CPU_TIMER,
349 (u16 *)__LC_EXT_INT_CODE);
350 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
351 &vcpu->arch.sie_block->gpsw,
352 sizeof(psw_t));
353 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
206 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
354 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
207 rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
208 __LC_EXT_NEW_PSW, sizeof(psw_t));
355 rc |= put_guest_lc(vcpu, inti->ext.ext_params,
356 (u32 *)__LC_EXT_PARAMS);
209 break;
210 case KVM_S390_INT_SERVICE:
211 VCPU_EVENT(vcpu, 4, "interrupt: sclp parm:%x",
212 inti->ext.ext_params);
213 vcpu->stat.deliver_service_signal++;
214 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
215 inti->ext.ext_params, 0);
357 break;
358 case KVM_S390_INT_SERVICE:
359 VCPU_EVENT(vcpu, 4, "interrupt: sclp parm:%x",
360 inti->ext.ext_params);
361 vcpu->stat.deliver_service_signal++;
362 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
363 inti->ext.ext_params, 0);
216 rc = put_guest(vcpu, 0x2401, (u16 __user *)__LC_EXT_INT_CODE);
217 rc |= copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
364 rc = put_guest_lc(vcpu, 0x2401, (u16 *)__LC_EXT_INT_CODE);
365 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
366 &vcpu->arch.sie_block->gpsw,
367 sizeof(psw_t));
368 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
218 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
369 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
219 rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
220 __LC_EXT_NEW_PSW, sizeof(psw_t));
221 rc |= put_guest(vcpu, inti->ext.ext_params,
222 (u32 __user *)__LC_EXT_PARAMS);
370 rc |= put_guest_lc(vcpu, inti->ext.ext_params,
371 (u32 *)__LC_EXT_PARAMS);
223 break;
224 case KVM_S390_INT_PFAULT_INIT:
225 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 0,
226 inti->ext.ext_params2);
372 break;
373 case KVM_S390_INT_PFAULT_INIT:
374 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 0,
375 inti->ext.ext_params2);
227 rc = put_guest(vcpu, 0x2603, (u16 __user *) __LC_EXT_INT_CODE);
228 rc |= put_guest(vcpu, 0x0600, (u16 __user *) __LC_EXT_CPU_ADDR);
229 rc |= copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
376 rc = put_guest_lc(vcpu, 0x2603, (u16 *) __LC_EXT_INT_CODE);
377 rc |= put_guest_lc(vcpu, 0x0600, (u16 *) __LC_EXT_CPU_ADDR);
378 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
379 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
380 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
230 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
381 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
231 rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
232 __LC_EXT_NEW_PSW, sizeof(psw_t));
233 rc |= put_guest(vcpu, inti->ext.ext_params2,
234 (u64 __user *) __LC_EXT_PARAMS2);
382 rc |= put_guest_lc(vcpu, inti->ext.ext_params2,
383 (u64 *) __LC_EXT_PARAMS2);
235 break;
236 case KVM_S390_INT_PFAULT_DONE:
237 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 0,
238 inti->ext.ext_params2);
384 break;
385 case KVM_S390_INT_PFAULT_DONE:
386 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 0,
387 inti->ext.ext_params2);
239 rc = put_guest(vcpu, 0x2603, (u16 __user *) __LC_EXT_INT_CODE);
240 rc |= put_guest(vcpu, 0x0680, (u16 __user *) __LC_EXT_CPU_ADDR);
241 rc |= copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
388 rc = put_guest_lc(vcpu, 0x2603, (u16 *)__LC_EXT_INT_CODE);
389 rc |= put_guest_lc(vcpu, 0x0680, (u16 *)__LC_EXT_CPU_ADDR);
390 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
391 &vcpu->arch.sie_block->gpsw,
392 sizeof(psw_t));
393 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
242 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
394 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
243 rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
244 __LC_EXT_NEW_PSW, sizeof(psw_t));
245 rc |= put_guest(vcpu, inti->ext.ext_params2,
246 (u64 __user *) __LC_EXT_PARAMS2);
395 rc |= put_guest_lc(vcpu, inti->ext.ext_params2,
396 (u64 *)__LC_EXT_PARAMS2);
247 break;
248 case KVM_S390_INT_VIRTIO:
249 VCPU_EVENT(vcpu, 4, "interrupt: virtio parm:%x,parm64:%llx",
250 inti->ext.ext_params, inti->ext.ext_params2);
251 vcpu->stat.deliver_virtio_interrupt++;
252 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
253 inti->ext.ext_params,
254 inti->ext.ext_params2);
397 break;
398 case KVM_S390_INT_VIRTIO:
399 VCPU_EVENT(vcpu, 4, "interrupt: virtio parm:%x,parm64:%llx",
400 inti->ext.ext_params, inti->ext.ext_params2);
401 vcpu->stat.deliver_virtio_interrupt++;
402 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
403 inti->ext.ext_params,
404 inti->ext.ext_params2);
255 rc = put_guest(vcpu, 0x2603, (u16 __user *)__LC_EXT_INT_CODE);
256 rc |= put_guest(vcpu, 0x0d00, (u16 __user *)__LC_EXT_CPU_ADDR);
257 rc |= copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
405 rc = put_guest_lc(vcpu, 0x2603, (u16 *)__LC_EXT_INT_CODE);
406 rc |= put_guest_lc(vcpu, 0x0d00, (u16 *)__LC_EXT_CPU_ADDR);
407 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
408 &vcpu->arch.sie_block->gpsw,
409 sizeof(psw_t));
410 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
258 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
411 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
259 rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
260 __LC_EXT_NEW_PSW, sizeof(psw_t));
261 rc |= put_guest(vcpu, inti->ext.ext_params,
262 (u32 __user *)__LC_EXT_PARAMS);
263 rc |= put_guest(vcpu, inti->ext.ext_params2,
264 (u64 __user *)__LC_EXT_PARAMS2);
412 rc |= put_guest_lc(vcpu, inti->ext.ext_params,
413 (u32 *)__LC_EXT_PARAMS);
414 rc |= put_guest_lc(vcpu, inti->ext.ext_params2,
415 (u64 *)__LC_EXT_PARAMS2);
265 break;
266 case KVM_S390_SIGP_STOP:
267 VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu stop");
268 vcpu->stat.deliver_stop_signal++;
269 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
270 0, 0);
271 __set_intercept_indicator(vcpu, inti);
272 break;

--- 7 unchanged lines hidden (view full) ---

280 kvm_s390_set_prefix(vcpu, inti->prefix.address);
281 break;
282
283 case KVM_S390_RESTART:
284 VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu restart");
285 vcpu->stat.deliver_restart_signal++;
286 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
287 0, 0);
416 break;
417 case KVM_S390_SIGP_STOP:
418 VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu stop");
419 vcpu->stat.deliver_stop_signal++;
420 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
421 0, 0);
422 __set_intercept_indicator(vcpu, inti);
423 break;

--- 7 unchanged lines hidden (view full) ---

431 kvm_s390_set_prefix(vcpu, inti->prefix.address);
432 break;
433
434 case KVM_S390_RESTART:
435 VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu restart");
436 vcpu->stat.deliver_restart_signal++;
437 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
438 0, 0);
288 rc = copy_to_guest(vcpu,
289 offsetof(struct _lowcore, restart_old_psw),
290 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
291 rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
292 offsetof(struct _lowcore, restart_psw),
293 sizeof(psw_t));
294 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
439 rc = write_guest_lc(vcpu,
440 offsetof(struct _lowcore, restart_old_psw),
441 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
442 rc |= read_guest_lc(vcpu, offsetof(struct _lowcore, restart_psw),
443 &vcpu->arch.sie_block->gpsw,
444 sizeof(psw_t));
445 kvm_s390_vcpu_start(vcpu);
295 break;
296 case KVM_S390_PROGRAM_INT:
297 VCPU_EVENT(vcpu, 4, "interrupt: pgm check code:%x, ilc:%x",
298 inti->pgm.code,
299 table[vcpu->arch.sie_block->ipa >> 14]);
300 vcpu->stat.deliver_program_int++;
301 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
302 inti->pgm.code, 0);
446 break;
447 case KVM_S390_PROGRAM_INT:
448 VCPU_EVENT(vcpu, 4, "interrupt: pgm check code:%x, ilc:%x",
449 inti->pgm.code,
450 table[vcpu->arch.sie_block->ipa >> 14]);
451 vcpu->stat.deliver_program_int++;
452 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
453 inti->pgm.code, 0);
303 rc = put_guest(vcpu, inti->pgm.code, (u16 __user *)__LC_PGM_INT_CODE);
304 rc |= put_guest(vcpu, table[vcpu->arch.sie_block->ipa >> 14],
305 (u16 __user *)__LC_PGM_ILC);
306 rc |= copy_to_guest(vcpu, __LC_PGM_OLD_PSW,
307 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
308 rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
309 __LC_PGM_NEW_PSW, sizeof(psw_t));
454 rc = __deliver_prog_irq(vcpu, &inti->pgm);
310 break;
311
312 case KVM_S390_MCHK:
313 VCPU_EVENT(vcpu, 4, "interrupt: machine check mcic=%llx",
314 inti->mchk.mcic);
315 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
316 inti->mchk.cr14,
317 inti->mchk.mcic);
318 rc = kvm_s390_vcpu_store_status(vcpu,
319 KVM_S390_STORE_STATUS_PREFIXED);
455 break;
456
457 case KVM_S390_MCHK:
458 VCPU_EVENT(vcpu, 4, "interrupt: machine check mcic=%llx",
459 inti->mchk.mcic);
460 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
461 inti->mchk.cr14,
462 inti->mchk.mcic);
463 rc = kvm_s390_vcpu_store_status(vcpu,
464 KVM_S390_STORE_STATUS_PREFIXED);
320 rc |= put_guest(vcpu, inti->mchk.mcic, (u64 __user *) __LC_MCCK_CODE);
321 rc |= copy_to_guest(vcpu, __LC_MCK_OLD_PSW,
465 rc |= put_guest_lc(vcpu, inti->mchk.mcic, (u64 *)__LC_MCCK_CODE);
466 rc |= write_guest_lc(vcpu, __LC_MCK_OLD_PSW,
467 &vcpu->arch.sie_block->gpsw,
468 sizeof(psw_t));
469 rc |= read_guest_lc(vcpu, __LC_MCK_NEW_PSW,
322 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
470 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
323 rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
324 __LC_MCK_NEW_PSW, sizeof(psw_t));
325 break;
326
327 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
328 {
329 __u32 param0 = ((__u32)inti->io.subchannel_id << 16) |
330 inti->io.subchannel_nr;
331 __u64 param1 = ((__u64)inti->io.io_int_parm << 32) |
332 inti->io.io_int_word;
333 VCPU_EVENT(vcpu, 4, "interrupt: I/O %llx", inti->type);
334 vcpu->stat.deliver_io_int++;
335 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
336 param0, param1);
471 break;
472
473 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
474 {
475 __u32 param0 = ((__u32)inti->io.subchannel_id << 16) |
476 inti->io.subchannel_nr;
477 __u64 param1 = ((__u64)inti->io.io_int_parm << 32) |
478 inti->io.io_int_word;
479 VCPU_EVENT(vcpu, 4, "interrupt: I/O %llx", inti->type);
480 vcpu->stat.deliver_io_int++;
481 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
482 param0, param1);
337 rc = put_guest(vcpu, inti->io.subchannel_id,
338 (u16 __user *) __LC_SUBCHANNEL_ID);
339 rc |= put_guest(vcpu, inti->io.subchannel_nr,
340 (u16 __user *) __LC_SUBCHANNEL_NR);
341 rc |= put_guest(vcpu, inti->io.io_int_parm,
342 (u32 __user *) __LC_IO_INT_PARM);
343 rc |= put_guest(vcpu, inti->io.io_int_word,
344 (u32 __user *) __LC_IO_INT_WORD);
345 rc |= copy_to_guest(vcpu, __LC_IO_OLD_PSW,
346 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
347 rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
348 __LC_IO_NEW_PSW, sizeof(psw_t));
483 rc = put_guest_lc(vcpu, inti->io.subchannel_id,
484 (u16 *)__LC_SUBCHANNEL_ID);
485 rc |= put_guest_lc(vcpu, inti->io.subchannel_nr,
486 (u16 *)__LC_SUBCHANNEL_NR);
487 rc |= put_guest_lc(vcpu, inti->io.io_int_parm,
488 (u32 *)__LC_IO_INT_PARM);
489 rc |= put_guest_lc(vcpu, inti->io.io_int_word,
490 (u32 *)__LC_IO_INT_WORD);
491 rc |= write_guest_lc(vcpu, __LC_IO_OLD_PSW,
492 &vcpu->arch.sie_block->gpsw,
493 sizeof(psw_t));
494 rc |= read_guest_lc(vcpu, __LC_IO_NEW_PSW,
495 &vcpu->arch.sie_block->gpsw,
496 sizeof(psw_t));
349 break;
350 }
351 default:
352 BUG();
353 }
354 if (rc) {
355 printk("kvm: The guest lowcore is not mapped during interrupt "
356 "delivery, killing userspace\n");
357 do_exit(SIGKILL);
358 }
359}
360
497 break;
498 }
499 default:
500 BUG();
501 }
502 if (rc) {
503 printk("kvm: The guest lowcore is not mapped during interrupt "
504 "delivery, killing userspace\n");
505 do_exit(SIGKILL);
506 }
507}
508
361static int __try_deliver_ckc_interrupt(struct kvm_vcpu *vcpu)
509static void deliver_ckc_interrupt(struct kvm_vcpu *vcpu)
362{
363 int rc;
364
510{
511 int rc;
512
365 if (psw_extint_disabled(vcpu))
366 return 0;
367 if (!(vcpu->arch.sie_block->gcr[0] & 0x800ul))
368 return 0;
369 rc = put_guest(vcpu, 0x1004, (u16 __user *)__LC_EXT_INT_CODE);
370 rc |= copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
371 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
372 rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
373 __LC_EXT_NEW_PSW, sizeof(psw_t));
513 rc = put_guest_lc(vcpu, 0x1004, (u16 __user *)__LC_EXT_INT_CODE);
514 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
515 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
516 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
517 &vcpu->arch.sie_block->gpsw,
518 sizeof(psw_t));
374 if (rc) {
375 printk("kvm: The guest lowcore is not mapped during interrupt "
376 "delivery, killing userspace\n");
377 do_exit(SIGKILL);
378 }
519 if (rc) {
520 printk("kvm: The guest lowcore is not mapped during interrupt "
521 "delivery, killing userspace\n");
522 do_exit(SIGKILL);
523 }
379 return 1;
380}
381
524}
525
526/* Check whether SIGP interpretation facility has an external call pending */
527int kvm_s390_si_ext_call_pending(struct kvm_vcpu *vcpu)
528{
529 atomic_t *sigp_ctrl = &vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].ctrl;
530
531 if (!psw_extint_disabled(vcpu) &&
532 (vcpu->arch.sie_block->gcr[0] & 0x2000ul) &&
533 (atomic_read(sigp_ctrl) & SIGP_CTRL_C) &&
534 (atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_ECALL_PEND))
535 return 1;
536
537 return 0;
538}
539
382int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu)
383{
384 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
385 struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int;
386 struct kvm_s390_interrupt_info *inti;
387 int rc = 0;
388
389 if (atomic_read(&li->active)) {

--- 11 unchanged lines hidden (view full) ---

401 list_for_each_entry(inti, &fi->list, list)
402 if (__interrupt_is_deliverable(vcpu, inti)) {
403 rc = 1;
404 break;
405 }
406 spin_unlock(&fi->lock);
407 }
408
540int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu)
541{
542 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
543 struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int;
544 struct kvm_s390_interrupt_info *inti;
545 int rc = 0;
546
547 if (atomic_read(&li->active)) {

--- 11 unchanged lines hidden (view full) ---

559 list_for_each_entry(inti, &fi->list, list)
560 if (__interrupt_is_deliverable(vcpu, inti)) {
561 rc = 1;
562 break;
563 }
564 spin_unlock(&fi->lock);
565 }
566
409 if ((!rc) && (vcpu->arch.sie_block->ckc <
410 get_tod_clock_fast() + vcpu->arch.sie_block->epoch)) {
411 if ((!psw_extint_disabled(vcpu)) &&
412 (vcpu->arch.sie_block->gcr[0] & 0x800ul))
413 rc = 1;
414 }
567 if (!rc && kvm_cpu_has_pending_timer(vcpu))
568 rc = 1;
415
569
570 if (!rc && kvm_s390_si_ext_call_pending(vcpu))
571 rc = 1;
572
416 return rc;
417}
418
419int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
420{
573 return rc;
574}
575
576int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
577{
421 return 0;
578 if (!(vcpu->arch.sie_block->ckc <
579 get_tod_clock_fast() + vcpu->arch.sie_block->epoch))
580 return 0;
581 if (!ckc_interrupts_enabled(vcpu))
582 return 0;
583 return 1;
422}
423
424int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
425{
426 u64 now, sltime;
427 DECLARE_WAITQUEUE(wait, current);
428
429 vcpu->stat.exit_wait_state++;

--- 6 unchanged lines hidden (view full) ---

436 spin_unlock_bh(&vcpu->arch.local_int.lock);
437
438 if (psw_interrupts_disabled(vcpu)) {
439 VCPU_EVENT(vcpu, 3, "%s", "disabled wait");
440 __unset_cpu_idle(vcpu);
441 return -EOPNOTSUPP; /* disabled wait */
442 }
443
584}
585
586int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
587{
588 u64 now, sltime;
589 DECLARE_WAITQUEUE(wait, current);
590
591 vcpu->stat.exit_wait_state++;

--- 6 unchanged lines hidden (view full) ---

598 spin_unlock_bh(&vcpu->arch.local_int.lock);
599
600 if (psw_interrupts_disabled(vcpu)) {
601 VCPU_EVENT(vcpu, 3, "%s", "disabled wait");
602 __unset_cpu_idle(vcpu);
603 return -EOPNOTSUPP; /* disabled wait */
604 }
605
444 if (psw_extint_disabled(vcpu) ||
445 (!(vcpu->arch.sie_block->gcr[0] & 0x800ul))) {
606 if (!ckc_interrupts_enabled(vcpu)) {
446 VCPU_EVENT(vcpu, 3, "%s", "enabled wait w/o timer");
447 goto no_timer;
448 }
449
450 now = get_tod_clock_fast() + vcpu->arch.sie_block->epoch;
451 if (vcpu->arch.sie_block->ckc < now) {
452 __unset_cpu_idle(vcpu);
453 return 0;

--- 6 unchanged lines hidden (view full) ---

460no_timer:
461 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
462 spin_lock(&vcpu->arch.local_int.float_int->lock);
463 spin_lock_bh(&vcpu->arch.local_int.lock);
464 add_wait_queue(&vcpu->wq, &wait);
465 while (list_empty(&vcpu->arch.local_int.list) &&
466 list_empty(&vcpu->arch.local_int.float_int->list) &&
467 (!vcpu->arch.local_int.timer_due) &&
607 VCPU_EVENT(vcpu, 3, "%s", "enabled wait w/o timer");
608 goto no_timer;
609 }
610
611 now = get_tod_clock_fast() + vcpu->arch.sie_block->epoch;
612 if (vcpu->arch.sie_block->ckc < now) {
613 __unset_cpu_idle(vcpu);
614 return 0;

--- 6 unchanged lines hidden (view full) ---

621no_timer:
622 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
623 spin_lock(&vcpu->arch.local_int.float_int->lock);
624 spin_lock_bh(&vcpu->arch.local_int.lock);
625 add_wait_queue(&vcpu->wq, &wait);
626 while (list_empty(&vcpu->arch.local_int.list) &&
627 list_empty(&vcpu->arch.local_int.float_int->list) &&
628 (!vcpu->arch.local_int.timer_due) &&
468 !signal_pending(current)) {
629 !signal_pending(current) &&
630 !kvm_s390_si_ext_call_pending(vcpu)) {
469 set_current_state(TASK_INTERRUPTIBLE);
470 spin_unlock_bh(&vcpu->arch.local_int.lock);
471 spin_unlock(&vcpu->arch.local_int.float_int->lock);
472 schedule();
473 spin_lock(&vcpu->arch.local_int.float_int->lock);
474 spin_lock_bh(&vcpu->arch.local_int.lock);
475 }
476 __unset_cpu_idle(vcpu);

--- 40 unchanged lines hidden (view full) ---

517
518 spin_lock_bh(&li->lock);
519 list_for_each_entry_safe(inti, n, &li->list, list) {
520 list_del(&inti->list);
521 kfree(inti);
522 }
523 atomic_set(&li->active, 0);
524 spin_unlock_bh(&li->lock);
631 set_current_state(TASK_INTERRUPTIBLE);
632 spin_unlock_bh(&vcpu->arch.local_int.lock);
633 spin_unlock(&vcpu->arch.local_int.float_int->lock);
634 schedule();
635 spin_lock(&vcpu->arch.local_int.float_int->lock);
636 spin_lock_bh(&vcpu->arch.local_int.lock);
637 }
638 __unset_cpu_idle(vcpu);

--- 40 unchanged lines hidden (view full) ---

679
680 spin_lock_bh(&li->lock);
681 list_for_each_entry_safe(inti, n, &li->list, list) {
682 list_del(&inti->list);
683 kfree(inti);
684 }
685 atomic_set(&li->active, 0);
686 spin_unlock_bh(&li->lock);
687
688 /* clear pending external calls set by sigp interpretation facility */
689 atomic_clear_mask(CPUSTAT_ECALL_PEND, &vcpu->arch.sie_block->cpuflags);
690 atomic_clear_mask(SIGP_CTRL_C,
691 &vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].ctrl);
525}
526
527void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
528{
529 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
530 struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int;
531 struct kvm_s390_interrupt_info *n, *inti = NULL;
532 int deliver;

--- 16 unchanged lines hidden (view full) ---

549 spin_unlock_bh(&li->lock);
550 if (deliver) {
551 __do_deliver_interrupt(vcpu, inti);
552 kfree(inti);
553 }
554 } while (deliver);
555 }
556
692}
693
694void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
695{
696 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
697 struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int;
698 struct kvm_s390_interrupt_info *n, *inti = NULL;
699 int deliver;

--- 16 unchanged lines hidden (view full) ---

716 spin_unlock_bh(&li->lock);
717 if (deliver) {
718 __do_deliver_interrupt(vcpu, inti);
719 kfree(inti);
720 }
721 } while (deliver);
722 }
723
557 if ((vcpu->arch.sie_block->ckc <
558 get_tod_clock_fast() + vcpu->arch.sie_block->epoch))
559 __try_deliver_ckc_interrupt(vcpu);
724 if (kvm_cpu_has_pending_timer(vcpu))
725 deliver_ckc_interrupt(vcpu);
560
561 if (atomic_read(&fi->active)) {
562 do {
563 deliver = 0;
564 spin_lock(&fi->lock);
565 list_for_each_entry_safe(inti, n, &fi->list, list) {
566 if (__interrupt_is_deliverable(vcpu, inti)) {
567 list_del(&inti->list);

--- 87 unchanged lines hidden (view full) ---

655 spin_lock_bh(&li->lock);
656 list_add(&inti->list, &li->list);
657 atomic_set(&li->active, 1);
658 BUG_ON(waitqueue_active(li->wq));
659 spin_unlock_bh(&li->lock);
660 return 0;
661}
662
726
727 if (atomic_read(&fi->active)) {
728 do {
729 deliver = 0;
730 spin_lock(&fi->lock);
731 list_for_each_entry_safe(inti, n, &fi->list, list) {
732 if (__interrupt_is_deliverable(vcpu, inti)) {
733 list_del(&inti->list);

--- 87 unchanged lines hidden (view full) ---

821 spin_lock_bh(&li->lock);
822 list_add(&inti->list, &li->list);
823 atomic_set(&li->active, 1);
824 BUG_ON(waitqueue_active(li->wq));
825 spin_unlock_bh(&li->lock);
826 return 0;
827}
828
829int kvm_s390_inject_prog_irq(struct kvm_vcpu *vcpu,
830 struct kvm_s390_pgm_info *pgm_info)
831{
832 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
833 struct kvm_s390_interrupt_info *inti;
834
835 inti = kzalloc(sizeof(*inti), GFP_KERNEL);
836 if (!inti)
837 return -ENOMEM;
838
839 VCPU_EVENT(vcpu, 3, "inject: prog irq %d (from kernel)",
840 pgm_info->code);
841 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_PROGRAM_INT,
842 pgm_info->code, 0, 1);
843
844 inti->type = KVM_S390_PROGRAM_INT;
845 memcpy(&inti->pgm, pgm_info, sizeof(inti->pgm));
846 spin_lock_bh(&li->lock);
847 list_add(&inti->list, &li->list);
848 atomic_set(&li->active, 1);
849 BUG_ON(waitqueue_active(li->wq));
850 spin_unlock_bh(&li->lock);
851 return 0;
852}
853
663struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm,
664 u64 cr6, u64 schid)
665{
666 struct kvm_s390_float_interrupt *fi;
667 struct kvm_s390_interrupt_info *inti, *iter;
668
669 if ((!schid && !cr6) || (schid && cr6))
670 return NULL;

--- 134 unchanged lines hidden (view full) ---

805 return -EINVAL;
806 }
807 trace_kvm_s390_inject_vm(s390int->type, s390int->parm, s390int->parm64,
808 2);
809
810 return __inject_vm(kvm, inti);
811}
812
854struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm,
855 u64 cr6, u64 schid)
856{
857 struct kvm_s390_float_interrupt *fi;
858 struct kvm_s390_interrupt_info *inti, *iter;
859
860 if ((!schid && !cr6) || (schid && cr6))
861 return NULL;

--- 134 unchanged lines hidden (view full) ---

996 return -EINVAL;
997 }
998 trace_kvm_s390_inject_vm(s390int->type, s390int->parm, s390int->parm64,
999 2);
1000
1001 return __inject_vm(kvm, inti);
1002}
1003
1004void kvm_s390_reinject_io_int(struct kvm *kvm,
1005 struct kvm_s390_interrupt_info *inti)
1006{
1007 __inject_vm(kvm, inti);
1008}
1009
813int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
814 struct kvm_s390_interrupt *s390int)
815{
816 struct kvm_s390_local_interrupt *li;
817 struct kvm_s390_interrupt_info *inti;
818
819 inti = kzalloc(sizeof(*inti), GFP_KERNEL);
820 if (!inti)

--- 13 unchanged lines hidden (view full) ---

834 case KVM_S390_SIGP_SET_PREFIX:
835 inti->prefix.address = s390int->parm;
836 inti->type = s390int->type;
837 VCPU_EVENT(vcpu, 3, "inject: set prefix to %x (from user)",
838 s390int->parm);
839 break;
840 case KVM_S390_SIGP_STOP:
841 case KVM_S390_RESTART:
1010int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
1011 struct kvm_s390_interrupt *s390int)
1012{
1013 struct kvm_s390_local_interrupt *li;
1014 struct kvm_s390_interrupt_info *inti;
1015
1016 inti = kzalloc(sizeof(*inti), GFP_KERNEL);
1017 if (!inti)

--- 13 unchanged lines hidden (view full) ---

1031 case KVM_S390_SIGP_SET_PREFIX:
1032 inti->prefix.address = s390int->parm;
1033 inti->type = s390int->type;
1034 VCPU_EVENT(vcpu, 3, "inject: set prefix to %x (from user)",
1035 s390int->parm);
1036 break;
1037 case KVM_S390_SIGP_STOP:
1038 case KVM_S390_RESTART:
1039 case KVM_S390_INT_CLOCK_COMP:
1040 case KVM_S390_INT_CPU_TIMER:
842 VCPU_EVENT(vcpu, 3, "inject: type %x", s390int->type);
843 inti->type = s390int->type;
844 break;
845 case KVM_S390_INT_EXTERNAL_CALL:
846 if (s390int->parm & 0xffff0000) {
847 kfree(inti);
848 return -EINVAL;
849 }

--- 45 unchanged lines hidden (view full) ---

895 if (waitqueue_active(&vcpu->wq))
896 wake_up_interruptible(&vcpu->wq);
897 vcpu->preempted = true;
898 spin_unlock_bh(&li->lock);
899 mutex_unlock(&vcpu->kvm->lock);
900 return 0;
901}
902
1041 VCPU_EVENT(vcpu, 3, "inject: type %x", s390int->type);
1042 inti->type = s390int->type;
1043 break;
1044 case KVM_S390_INT_EXTERNAL_CALL:
1045 if (s390int->parm & 0xffff0000) {
1046 kfree(inti);
1047 return -EINVAL;
1048 }

--- 45 unchanged lines hidden (view full) ---

1094 if (waitqueue_active(&vcpu->wq))
1095 wake_up_interruptible(&vcpu->wq);
1096 vcpu->preempted = true;
1097 spin_unlock_bh(&li->lock);
1098 mutex_unlock(&vcpu->kvm->lock);
1099 return 0;
1100}
1101
903static void clear_floating_interrupts(struct kvm *kvm)
1102void kvm_s390_clear_float_irqs(struct kvm *kvm)
904{
905 struct kvm_s390_float_interrupt *fi;
906 struct kvm_s390_interrupt_info *n, *inti = NULL;
907
908 mutex_lock(&kvm->lock);
909 fi = &kvm->arch.float_int;
910 spin_lock(&fi->lock);
911 list_for_each_entry_safe(inti, n, &fi->list, list) {

--- 329 unchanged lines hidden (view full) ---

1241 struct kvm_vcpu *vcpu;
1242
1243 switch (attr->group) {
1244 case KVM_DEV_FLIC_ENQUEUE:
1245 r = enqueue_floating_irq(dev, attr);
1246 break;
1247 case KVM_DEV_FLIC_CLEAR_IRQS:
1248 r = 0;
1103{
1104 struct kvm_s390_float_interrupt *fi;
1105 struct kvm_s390_interrupt_info *n, *inti = NULL;
1106
1107 mutex_lock(&kvm->lock);
1108 fi = &kvm->arch.float_int;
1109 spin_lock(&fi->lock);
1110 list_for_each_entry_safe(inti, n, &fi->list, list) {

--- 329 unchanged lines hidden (view full) ---

1440 struct kvm_vcpu *vcpu;
1441
1442 switch (attr->group) {
1443 case KVM_DEV_FLIC_ENQUEUE:
1444 r = enqueue_floating_irq(dev, attr);
1445 break;
1446 case KVM_DEV_FLIC_CLEAR_IRQS:
1447 r = 0;
1249 clear_floating_interrupts(dev->kvm);
1448 kvm_s390_clear_float_irqs(dev->kvm);
1250 break;
1251 case KVM_DEV_FLIC_APF_ENABLE:
1252 dev->kvm->arch.gmap->pfault_enabled = 1;
1253 break;
1254 case KVM_DEV_FLIC_APF_DISABLE_WAIT:
1255 dev->kvm->arch.gmap->pfault_enabled = 0;
1256 /*
1257 * Make sure no async faults are in transition when

--- 164 unchanged lines hidden ---
1449 break;
1450 case KVM_DEV_FLIC_APF_ENABLE:
1451 dev->kvm->arch.gmap->pfault_enabled = 1;
1452 break;
1453 case KVM_DEV_FLIC_APF_DISABLE_WAIT:
1454 dev->kvm->arch.gmap->pfault_enabled = 0;
1455 /*
1456 * Make sure no async faults are in transition when

--- 164 unchanged lines hidden ---