xref: /openbmc/linux/arch/s390/kvm/sigp.c (revision 63dc02bd)
1 /*
2  * sigp.c - handlinge interprocessor communication
3  *
4  * Copyright IBM Corp. 2008,2009
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License (version 2 only)
8  * as published by the Free Software Foundation.
9  *
10  *    Author(s): Carsten Otte <cotte@de.ibm.com>
11  *               Christian Borntraeger <borntraeger@de.ibm.com>
12  *               Christian Ehrhardt <ehrhardt@de.ibm.com>
13  */
14 
15 #include <linux/kvm.h>
16 #include <linux/kvm_host.h>
17 #include <linux/slab.h>
18 #include "gaccess.h"
19 #include "kvm-s390.h"
20 
21 /* sigp order codes */
22 #define SIGP_SENSE             0x01
23 #define SIGP_EXTERNAL_CALL     0x02
24 #define SIGP_EMERGENCY         0x03
25 #define SIGP_START             0x04
26 #define SIGP_STOP              0x05
27 #define SIGP_RESTART           0x06
28 #define SIGP_STOP_STORE_STATUS 0x09
29 #define SIGP_INITIAL_CPU_RESET 0x0b
30 #define SIGP_CPU_RESET         0x0c
31 #define SIGP_SET_PREFIX        0x0d
32 #define SIGP_STORE_STATUS_ADDR 0x0e
33 #define SIGP_SET_ARCH          0x12
34 #define SIGP_SENSE_RUNNING     0x15
35 
36 /* cpu status bits */
37 #define SIGP_STAT_EQUIPMENT_CHECK   0x80000000UL
38 #define SIGP_STAT_NOT_RUNNING	    0x00000400UL
39 #define SIGP_STAT_INCORRECT_STATE   0x00000200UL
40 #define SIGP_STAT_INVALID_PARAMETER 0x00000100UL
41 #define SIGP_STAT_EXT_CALL_PENDING  0x00000080UL
42 #define SIGP_STAT_STOPPED           0x00000040UL
43 #define SIGP_STAT_OPERATOR_INTERV   0x00000020UL
44 #define SIGP_STAT_CHECK_STOP        0x00000010UL
45 #define SIGP_STAT_INOPERATIVE       0x00000004UL
46 #define SIGP_STAT_INVALID_ORDER     0x00000002UL
47 #define SIGP_STAT_RECEIVER_CHECK    0x00000001UL
48 
49 
50 static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr,
51 			u64 *reg)
52 {
53 	struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
54 	int rc;
55 
56 	if (cpu_addr >= KVM_MAX_VCPUS)
57 		return 3; /* not operational */
58 
59 	spin_lock(&fi->lock);
60 	if (fi->local_int[cpu_addr] == NULL)
61 		rc = 3; /* not operational */
62 	else if (!(atomic_read(fi->local_int[cpu_addr]->cpuflags)
63 		  & CPUSTAT_STOPPED)) {
64 		*reg &= 0xffffffff00000000UL;
65 		rc = 1; /* status stored */
66 	} else {
67 		*reg &= 0xffffffff00000000UL;
68 		*reg |= SIGP_STAT_STOPPED;
69 		rc = 1; /* status stored */
70 	}
71 	spin_unlock(&fi->lock);
72 
73 	VCPU_EVENT(vcpu, 4, "sensed status of cpu %x rc %x", cpu_addr, rc);
74 	return rc;
75 }
76 
77 static int __sigp_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr)
78 {
79 	struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
80 	struct kvm_s390_local_interrupt *li;
81 	struct kvm_s390_interrupt_info *inti;
82 	int rc;
83 
84 	if (cpu_addr >= KVM_MAX_VCPUS)
85 		return 3; /* not operational */
86 
87 	inti = kzalloc(sizeof(*inti), GFP_KERNEL);
88 	if (!inti)
89 		return -ENOMEM;
90 
91 	inti->type = KVM_S390_INT_EMERGENCY;
92 	inti->emerg.code = vcpu->vcpu_id;
93 
94 	spin_lock(&fi->lock);
95 	li = fi->local_int[cpu_addr];
96 	if (li == NULL) {
97 		rc = 3; /* not operational */
98 		kfree(inti);
99 		goto unlock;
100 	}
101 	spin_lock_bh(&li->lock);
102 	list_add_tail(&inti->list, &li->list);
103 	atomic_set(&li->active, 1);
104 	atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
105 	if (waitqueue_active(&li->wq))
106 		wake_up_interruptible(&li->wq);
107 	spin_unlock_bh(&li->lock);
108 	rc = 0; /* order accepted */
109 	VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x", cpu_addr);
110 unlock:
111 	spin_unlock(&fi->lock);
112 	return rc;
113 }
114 
115 static int __sigp_external_call(struct kvm_vcpu *vcpu, u16 cpu_addr)
116 {
117 	struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
118 	struct kvm_s390_local_interrupt *li;
119 	struct kvm_s390_interrupt_info *inti;
120 	int rc;
121 
122 	if (cpu_addr >= KVM_MAX_VCPUS)
123 		return 3; /* not operational */
124 
125 	inti = kzalloc(sizeof(*inti), GFP_KERNEL);
126 	if (!inti)
127 		return -ENOMEM;
128 
129 	inti->type = KVM_S390_INT_EXTERNAL_CALL;
130 	inti->extcall.code = vcpu->vcpu_id;
131 
132 	spin_lock(&fi->lock);
133 	li = fi->local_int[cpu_addr];
134 	if (li == NULL) {
135 		rc = 3; /* not operational */
136 		kfree(inti);
137 		goto unlock;
138 	}
139 	spin_lock_bh(&li->lock);
140 	list_add_tail(&inti->list, &li->list);
141 	atomic_set(&li->active, 1);
142 	atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
143 	if (waitqueue_active(&li->wq))
144 		wake_up_interruptible(&li->wq);
145 	spin_unlock_bh(&li->lock);
146 	rc = 0; /* order accepted */
147 	VCPU_EVENT(vcpu, 4, "sent sigp ext call to cpu %x", cpu_addr);
148 unlock:
149 	spin_unlock(&fi->lock);
150 	return rc;
151 }
152 
153 static int __inject_sigp_stop(struct kvm_s390_local_interrupt *li, int action)
154 {
155 	struct kvm_s390_interrupt_info *inti;
156 
157 	inti = kzalloc(sizeof(*inti), GFP_ATOMIC);
158 	if (!inti)
159 		return -ENOMEM;
160 	inti->type = KVM_S390_SIGP_STOP;
161 
162 	spin_lock_bh(&li->lock);
163 	if ((atomic_read(li->cpuflags) & CPUSTAT_STOPPED))
164 		goto out;
165 	list_add_tail(&inti->list, &li->list);
166 	atomic_set(&li->active, 1);
167 	atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags);
168 	li->action_bits |= action;
169 	if (waitqueue_active(&li->wq))
170 		wake_up_interruptible(&li->wq);
171 out:
172 	spin_unlock_bh(&li->lock);
173 
174 	return 0; /* order accepted */
175 }
176 
177 static int __sigp_stop(struct kvm_vcpu *vcpu, u16 cpu_addr, int action)
178 {
179 	struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
180 	struct kvm_s390_local_interrupt *li;
181 	int rc;
182 
183 	if (cpu_addr >= KVM_MAX_VCPUS)
184 		return 3; /* not operational */
185 
186 	spin_lock(&fi->lock);
187 	li = fi->local_int[cpu_addr];
188 	if (li == NULL) {
189 		rc = 3; /* not operational */
190 		goto unlock;
191 	}
192 
193 	rc = __inject_sigp_stop(li, action);
194 
195 unlock:
196 	spin_unlock(&fi->lock);
197 	VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x", cpu_addr);
198 	return rc;
199 }
200 
201 int kvm_s390_inject_sigp_stop(struct kvm_vcpu *vcpu, int action)
202 {
203 	struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
204 	return __inject_sigp_stop(li, action);
205 }
206 
207 static int __sigp_set_arch(struct kvm_vcpu *vcpu, u32 parameter)
208 {
209 	int rc;
210 
211 	switch (parameter & 0xff) {
212 	case 0:
213 		rc = 3; /* not operational */
214 		break;
215 	case 1:
216 	case 2:
217 		rc = 0; /* order accepted */
218 		break;
219 	default:
220 		rc = -EOPNOTSUPP;
221 	}
222 	return rc;
223 }
224 
225 static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address,
226 			     u64 *reg)
227 {
228 	struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
229 	struct kvm_s390_local_interrupt *li = NULL;
230 	struct kvm_s390_interrupt_info *inti;
231 	int rc;
232 	u8 tmp;
233 
234 	/* make sure that the new value is valid memory */
235 	address = address & 0x7fffe000u;
236 	if (copy_from_guest_absolute(vcpu, &tmp, address, 1) ||
237 	   copy_from_guest_absolute(vcpu, &tmp, address + PAGE_SIZE, 1)) {
238 		*reg |= SIGP_STAT_INVALID_PARAMETER;
239 		return 1; /* invalid parameter */
240 	}
241 
242 	inti = kzalloc(sizeof(*inti), GFP_KERNEL);
243 	if (!inti)
244 		return 2; /* busy */
245 
246 	spin_lock(&fi->lock);
247 	if (cpu_addr < KVM_MAX_VCPUS)
248 		li = fi->local_int[cpu_addr];
249 
250 	if (li == NULL) {
251 		rc = 1; /* incorrect state */
252 		*reg &= SIGP_STAT_INCORRECT_STATE;
253 		kfree(inti);
254 		goto out_fi;
255 	}
256 
257 	spin_lock_bh(&li->lock);
258 	/* cpu must be in stopped state */
259 	if (!(atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) {
260 		rc = 1; /* incorrect state */
261 		*reg &= SIGP_STAT_INCORRECT_STATE;
262 		kfree(inti);
263 		goto out_li;
264 	}
265 
266 	inti->type = KVM_S390_SIGP_SET_PREFIX;
267 	inti->prefix.address = address;
268 
269 	list_add_tail(&inti->list, &li->list);
270 	atomic_set(&li->active, 1);
271 	if (waitqueue_active(&li->wq))
272 		wake_up_interruptible(&li->wq);
273 	rc = 0; /* order accepted */
274 
275 	VCPU_EVENT(vcpu, 4, "set prefix of cpu %02x to %x", cpu_addr, address);
276 out_li:
277 	spin_unlock_bh(&li->lock);
278 out_fi:
279 	spin_unlock(&fi->lock);
280 	return rc;
281 }
282 
283 static int __sigp_sense_running(struct kvm_vcpu *vcpu, u16 cpu_addr,
284 				u64 *reg)
285 {
286 	int rc;
287 	struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
288 
289 	if (cpu_addr >= KVM_MAX_VCPUS)
290 		return 3; /* not operational */
291 
292 	spin_lock(&fi->lock);
293 	if (fi->local_int[cpu_addr] == NULL)
294 		rc = 3; /* not operational */
295 	else {
296 		if (atomic_read(fi->local_int[cpu_addr]->cpuflags)
297 		    & CPUSTAT_RUNNING) {
298 			/* running */
299 			rc = 1;
300 		} else {
301 			/* not running */
302 			*reg &= 0xffffffff00000000UL;
303 			*reg |= SIGP_STAT_NOT_RUNNING;
304 			rc = 0;
305 		}
306 	}
307 	spin_unlock(&fi->lock);
308 
309 	VCPU_EVENT(vcpu, 4, "sensed running status of cpu %x rc %x", cpu_addr,
310 		   rc);
311 
312 	return rc;
313 }
314 
315 static int __sigp_restart(struct kvm_vcpu *vcpu, u16 cpu_addr)
316 {
317 	int rc = 0;
318 	struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
319 	struct kvm_s390_local_interrupt *li;
320 
321 	if (cpu_addr >= KVM_MAX_VCPUS)
322 		return 3; /* not operational */
323 
324 	spin_lock(&fi->lock);
325 	li = fi->local_int[cpu_addr];
326 	if (li == NULL) {
327 		rc = 3; /* not operational */
328 		goto out;
329 	}
330 
331 	spin_lock_bh(&li->lock);
332 	if (li->action_bits & ACTION_STOP_ON_STOP)
333 		rc = 2; /* busy */
334 	else
335 		VCPU_EVENT(vcpu, 4, "sigp restart %x to handle userspace",
336 			cpu_addr);
337 	spin_unlock_bh(&li->lock);
338 out:
339 	spin_unlock(&fi->lock);
340 	return rc;
341 }
342 
343 int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu)
344 {
345 	int r1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
346 	int r3 = vcpu->arch.sie_block->ipa & 0x000f;
347 	int base2 = vcpu->arch.sie_block->ipb >> 28;
348 	int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
349 	u32 parameter;
350 	u16 cpu_addr = vcpu->run->s.regs.gprs[r3];
351 	u8 order_code;
352 	int rc;
353 
354 	/* sigp in userspace can exit */
355 	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
356 		return kvm_s390_inject_program_int(vcpu,
357 						   PGM_PRIVILEGED_OPERATION);
358 
359 	order_code = disp2;
360 	if (base2)
361 		order_code += vcpu->run->s.regs.gprs[base2];
362 
363 	if (r1 % 2)
364 		parameter = vcpu->run->s.regs.gprs[r1];
365 	else
366 		parameter = vcpu->run->s.regs.gprs[r1 + 1];
367 
368 	switch (order_code) {
369 	case SIGP_SENSE:
370 		vcpu->stat.instruction_sigp_sense++;
371 		rc = __sigp_sense(vcpu, cpu_addr,
372 				  &vcpu->run->s.regs.gprs[r1]);
373 		break;
374 	case SIGP_EXTERNAL_CALL:
375 		vcpu->stat.instruction_sigp_external_call++;
376 		rc = __sigp_external_call(vcpu, cpu_addr);
377 		break;
378 	case SIGP_EMERGENCY:
379 		vcpu->stat.instruction_sigp_emergency++;
380 		rc = __sigp_emergency(vcpu, cpu_addr);
381 		break;
382 	case SIGP_STOP:
383 		vcpu->stat.instruction_sigp_stop++;
384 		rc = __sigp_stop(vcpu, cpu_addr, ACTION_STOP_ON_STOP);
385 		break;
386 	case SIGP_STOP_STORE_STATUS:
387 		vcpu->stat.instruction_sigp_stop++;
388 		rc = __sigp_stop(vcpu, cpu_addr, ACTION_STORE_ON_STOP |
389 						 ACTION_STOP_ON_STOP);
390 		break;
391 	case SIGP_SET_ARCH:
392 		vcpu->stat.instruction_sigp_arch++;
393 		rc = __sigp_set_arch(vcpu, parameter);
394 		break;
395 	case SIGP_SET_PREFIX:
396 		vcpu->stat.instruction_sigp_prefix++;
397 		rc = __sigp_set_prefix(vcpu, cpu_addr, parameter,
398 				       &vcpu->run->s.regs.gprs[r1]);
399 		break;
400 	case SIGP_SENSE_RUNNING:
401 		vcpu->stat.instruction_sigp_sense_running++;
402 		rc = __sigp_sense_running(vcpu, cpu_addr,
403 					  &vcpu->run->s.regs.gprs[r1]);
404 		break;
405 	case SIGP_RESTART:
406 		vcpu->stat.instruction_sigp_restart++;
407 		rc = __sigp_restart(vcpu, cpu_addr);
408 		if (rc == 2) /* busy */
409 			break;
410 		/* user space must know about restart */
411 	default:
412 		return -EOPNOTSUPP;
413 	}
414 
415 	if (rc < 0)
416 		return rc;
417 
418 	vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
419 	vcpu->arch.sie_block->gpsw.mask |= (rc & 3ul) << 44;
420 	return 0;
421 }
422