xref: /openbmc/linux/drivers/s390/cio/vfio_ccw_fsm.c (revision 3b73c45e)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Finite state machine for vfio-ccw device handling
4  *
5  * Copyright IBM Corp. 2017
6  * Copyright Red Hat, Inc. 2019
7  *
8  * Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com>
9  *            Cornelia Huck <cohuck@redhat.com>
10  */
11 
12 #include <linux/vfio.h>
13 
14 #include <asm/isc.h>
15 
16 #include "ioasm.h"
17 #include "vfio_ccw_private.h"
18 
19 static int fsm_io_helper(struct vfio_ccw_private *private)
20 {
21 	struct subchannel *sch;
22 	union orb *orb;
23 	int ccode;
24 	__u8 lpm;
25 	unsigned long flags;
26 	int ret;
27 
28 	sch = private->sch;
29 
30 	spin_lock_irqsave(sch->lock, flags);
31 
32 	orb = cp_get_orb(&private->cp, (u32)(addr_t)sch, sch->lpm);
33 	if (!orb) {
34 		ret = -EIO;
35 		goto out;
36 	}
37 
38 	VFIO_CCW_TRACE_EVENT(5, "stIO");
39 	VFIO_CCW_TRACE_EVENT(5, dev_name(&sch->dev));
40 
41 	/* Issue "Start Subchannel" */
42 	ccode = ssch(sch->schid, orb);
43 
44 	VFIO_CCW_HEX_EVENT(5, &ccode, sizeof(ccode));
45 
46 	switch (ccode) {
47 	case 0:
48 		/*
49 		 * Initialize device status information
50 		 */
51 		sch->schib.scsw.cmd.actl |= SCSW_ACTL_START_PEND;
52 		ret = 0;
53 		private->state = VFIO_CCW_STATE_CP_PENDING;
54 		break;
55 	case 1:		/* Status pending */
56 	case 2:		/* Busy */
57 		ret = -EBUSY;
58 		break;
59 	case 3:		/* Device/path not operational */
60 	{
61 		lpm = orb->cmd.lpm;
62 		if (lpm != 0)
63 			sch->lpm &= ~lpm;
64 		else
65 			sch->lpm = 0;
66 
67 		if (cio_update_schib(sch))
68 			ret = -ENODEV;
69 		else
70 			ret = sch->lpm ? -EACCES : -ENODEV;
71 		break;
72 	}
73 	default:
74 		ret = ccode;
75 	}
76 out:
77 	spin_unlock_irqrestore(sch->lock, flags);
78 	return ret;
79 }
80 
81 static int fsm_do_halt(struct vfio_ccw_private *private)
82 {
83 	struct subchannel *sch;
84 	unsigned long flags;
85 	int ccode;
86 	int ret;
87 
88 	sch = private->sch;
89 
90 	spin_lock_irqsave(sch->lock, flags);
91 
92 	VFIO_CCW_TRACE_EVENT(2, "haltIO");
93 	VFIO_CCW_TRACE_EVENT(2, dev_name(&sch->dev));
94 
95 	/* Issue "Halt Subchannel" */
96 	ccode = hsch(sch->schid);
97 
98 	VFIO_CCW_HEX_EVENT(2, &ccode, sizeof(ccode));
99 
100 	switch (ccode) {
101 	case 0:
102 		/*
103 		 * Initialize device status information
104 		 */
105 		sch->schib.scsw.cmd.actl |= SCSW_ACTL_HALT_PEND;
106 		ret = 0;
107 		break;
108 	case 1:		/* Status pending */
109 	case 2:		/* Busy */
110 		ret = -EBUSY;
111 		break;
112 	case 3:		/* Device not operational */
113 		ret = -ENODEV;
114 		break;
115 	default:
116 		ret = ccode;
117 	}
118 	spin_unlock_irqrestore(sch->lock, flags);
119 	return ret;
120 }
121 
122 static int fsm_do_clear(struct vfio_ccw_private *private)
123 {
124 	struct subchannel *sch;
125 	unsigned long flags;
126 	int ccode;
127 	int ret;
128 
129 	sch = private->sch;
130 
131 	spin_lock_irqsave(sch->lock, flags);
132 
133 	VFIO_CCW_TRACE_EVENT(2, "clearIO");
134 	VFIO_CCW_TRACE_EVENT(2, dev_name(&sch->dev));
135 
136 	/* Issue "Clear Subchannel" */
137 	ccode = csch(sch->schid);
138 
139 	VFIO_CCW_HEX_EVENT(2, &ccode, sizeof(ccode));
140 
141 	switch (ccode) {
142 	case 0:
143 		/*
144 		 * Initialize device status information
145 		 */
146 		sch->schib.scsw.cmd.actl = SCSW_ACTL_CLEAR_PEND;
147 		/* TODO: check what else we might need to clear */
148 		ret = 0;
149 		break;
150 	case 3:		/* Device not operational */
151 		ret = -ENODEV;
152 		break;
153 	default:
154 		ret = ccode;
155 	}
156 	spin_unlock_irqrestore(sch->lock, flags);
157 	return ret;
158 }
159 
160 static void fsm_notoper(struct vfio_ccw_private *private,
161 			enum vfio_ccw_event event)
162 {
163 	struct subchannel *sch = private->sch;
164 
165 	VFIO_CCW_MSG_EVENT(2, "sch %x.%x.%04x: notoper event %x state %x\n",
166 			   sch->schid.cssid,
167 			   sch->schid.ssid,
168 			   sch->schid.sch_no,
169 			   event,
170 			   private->state);
171 
172 	/*
173 	 * TODO:
174 	 * Probably we should send the machine check to the guest.
175 	 */
176 	css_sched_sch_todo(sch, SCH_TODO_UNREG);
177 	private->state = VFIO_CCW_STATE_NOT_OPER;
178 
179 	/* This is usually handled during CLOSE event */
180 	cp_free(&private->cp);
181 }
182 
183 /*
184  * No operation action.
185  */
186 static void fsm_nop(struct vfio_ccw_private *private,
187 		    enum vfio_ccw_event event)
188 {
189 }
190 
191 static void fsm_io_error(struct vfio_ccw_private *private,
192 			 enum vfio_ccw_event event)
193 {
194 	pr_err("vfio-ccw: FSM: I/O request from state:%d\n", private->state);
195 	private->io_region->ret_code = -EIO;
196 }
197 
198 static void fsm_io_busy(struct vfio_ccw_private *private,
199 			enum vfio_ccw_event event)
200 {
201 	private->io_region->ret_code = -EBUSY;
202 }
203 
204 static void fsm_io_retry(struct vfio_ccw_private *private,
205 			 enum vfio_ccw_event event)
206 {
207 	private->io_region->ret_code = -EAGAIN;
208 }
209 
210 static void fsm_async_error(struct vfio_ccw_private *private,
211 			    enum vfio_ccw_event event)
212 {
213 	struct ccw_cmd_region *cmd_region = private->cmd_region;
214 
215 	pr_err("vfio-ccw: FSM: %s request from state:%d\n",
216 	       cmd_region->command == VFIO_CCW_ASYNC_CMD_HSCH ? "halt" :
217 	       cmd_region->command == VFIO_CCW_ASYNC_CMD_CSCH ? "clear" :
218 	       "<unknown>", private->state);
219 	cmd_region->ret_code = -EIO;
220 }
221 
222 static void fsm_async_retry(struct vfio_ccw_private *private,
223 			    enum vfio_ccw_event event)
224 {
225 	private->cmd_region->ret_code = -EAGAIN;
226 }
227 
228 static void fsm_disabled_irq(struct vfio_ccw_private *private,
229 			     enum vfio_ccw_event event)
230 {
231 	struct subchannel *sch = private->sch;
232 
233 	/*
234 	 * An interrupt in a disabled state means a previous disable was not
235 	 * successful - should not happen, but we try to disable again.
236 	 */
237 	cio_disable_subchannel(sch);
238 }
239 inline struct subchannel_id get_schid(struct vfio_ccw_private *p)
240 {
241 	return p->sch->schid;
242 }
243 
244 /*
245  * Deal with the ccw command request from the userspace.
246  */
247 static void fsm_io_request(struct vfio_ccw_private *private,
248 			   enum vfio_ccw_event event)
249 {
250 	union orb *orb;
251 	union scsw *scsw = &private->scsw;
252 	struct ccw_io_region *io_region = private->io_region;
253 	char *errstr = "request";
254 	struct subchannel_id schid = get_schid(private);
255 
256 	private->state = VFIO_CCW_STATE_CP_PROCESSING;
257 	memcpy(scsw, io_region->scsw_area, sizeof(*scsw));
258 
259 	if (scsw->cmd.fctl & SCSW_FCTL_START_FUNC) {
260 		orb = (union orb *)io_region->orb_area;
261 
262 		/* Don't try to build a cp if transport mode is specified. */
263 		if (orb->tm.b) {
264 			io_region->ret_code = -EOPNOTSUPP;
265 			VFIO_CCW_MSG_EVENT(2,
266 					   "sch %x.%x.%04x: transport mode\n",
267 					   schid.cssid,
268 					   schid.ssid, schid.sch_no);
269 			errstr = "transport mode";
270 			goto err_out;
271 		}
272 		io_region->ret_code = cp_init(&private->cp, orb);
273 		if (io_region->ret_code) {
274 			VFIO_CCW_MSG_EVENT(2,
275 					   "sch %x.%x.%04x: cp_init=%d\n",
276 					   schid.cssid,
277 					   schid.ssid, schid.sch_no,
278 					   io_region->ret_code);
279 			errstr = "cp init";
280 			goto err_out;
281 		}
282 
283 		io_region->ret_code = cp_prefetch(&private->cp);
284 		if (io_region->ret_code) {
285 			VFIO_CCW_MSG_EVENT(2,
286 					   "sch %x.%x.%04x: cp_prefetch=%d\n",
287 					   schid.cssid,
288 					   schid.ssid, schid.sch_no,
289 					   io_region->ret_code);
290 			errstr = "cp prefetch";
291 			cp_free(&private->cp);
292 			goto err_out;
293 		}
294 
295 		/* Start channel program and wait for I/O interrupt. */
296 		io_region->ret_code = fsm_io_helper(private);
297 		if (io_region->ret_code) {
298 			VFIO_CCW_MSG_EVENT(2,
299 					   "sch %x.%x.%04x: fsm_io_helper=%d\n",
300 					   schid.cssid,
301 					   schid.ssid, schid.sch_no,
302 					   io_region->ret_code);
303 			errstr = "cp fsm_io_helper";
304 			cp_free(&private->cp);
305 			goto err_out;
306 		}
307 		return;
308 	} else if (scsw->cmd.fctl & SCSW_FCTL_HALT_FUNC) {
309 		VFIO_CCW_MSG_EVENT(2,
310 				   "sch %x.%x.%04x: halt on io_region\n",
311 				   schid.cssid,
312 				   schid.ssid, schid.sch_no);
313 		/* halt is handled via the async cmd region */
314 		io_region->ret_code = -EOPNOTSUPP;
315 		goto err_out;
316 	} else if (scsw->cmd.fctl & SCSW_FCTL_CLEAR_FUNC) {
317 		VFIO_CCW_MSG_EVENT(2,
318 				   "sch %x.%x.%04x: clear on io_region\n",
319 				   schid.cssid,
320 				   schid.ssid, schid.sch_no);
321 		/* clear is handled via the async cmd region */
322 		io_region->ret_code = -EOPNOTSUPP;
323 		goto err_out;
324 	}
325 
326 err_out:
327 	private->state = VFIO_CCW_STATE_IDLE;
328 	trace_vfio_ccw_fsm_io_request(scsw->cmd.fctl, schid,
329 				      io_region->ret_code, errstr);
330 }
331 
332 /*
333  * Deal with an async request from userspace.
334  */
335 static void fsm_async_request(struct vfio_ccw_private *private,
336 			      enum vfio_ccw_event event)
337 {
338 	struct ccw_cmd_region *cmd_region = private->cmd_region;
339 
340 	switch (cmd_region->command) {
341 	case VFIO_CCW_ASYNC_CMD_HSCH:
342 		cmd_region->ret_code = fsm_do_halt(private);
343 		break;
344 	case VFIO_CCW_ASYNC_CMD_CSCH:
345 		cmd_region->ret_code = fsm_do_clear(private);
346 		break;
347 	default:
348 		/* should not happen? */
349 		cmd_region->ret_code = -EINVAL;
350 	}
351 
352 	trace_vfio_ccw_fsm_async_request(get_schid(private),
353 					 cmd_region->command,
354 					 cmd_region->ret_code);
355 }
356 
357 /*
358  * Got an interrupt for a normal io (state busy).
359  */
360 static void fsm_irq(struct vfio_ccw_private *private,
361 		    enum vfio_ccw_event event)
362 {
363 	struct irb *irb = this_cpu_ptr(&cio_irb);
364 
365 	VFIO_CCW_TRACE_EVENT(6, "IRQ");
366 	VFIO_CCW_TRACE_EVENT(6, dev_name(&private->sch->dev));
367 
368 	memcpy(&private->irb, irb, sizeof(*irb));
369 
370 	queue_work(vfio_ccw_work_q, &private->io_work);
371 
372 	if (private->completion)
373 		complete(private->completion);
374 }
375 
376 static void fsm_open(struct vfio_ccw_private *private,
377 		     enum vfio_ccw_event event)
378 {
379 	struct subchannel *sch = private->sch;
380 	int ret;
381 
382 	spin_lock_irq(sch->lock);
383 	sch->isc = VFIO_CCW_ISC;
384 	ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch);
385 	if (ret)
386 		goto err_unlock;
387 
388 	private->state = VFIO_CCW_STATE_IDLE;
389 	spin_unlock_irq(sch->lock);
390 	return;
391 
392 err_unlock:
393 	spin_unlock_irq(sch->lock);
394 	vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_NOT_OPER);
395 }
396 
397 static void fsm_close(struct vfio_ccw_private *private,
398 		      enum vfio_ccw_event event)
399 {
400 	struct subchannel *sch = private->sch;
401 	int ret;
402 
403 	spin_lock_irq(sch->lock);
404 
405 	if (!sch->schib.pmcw.ena)
406 		goto err_unlock;
407 
408 	ret = cio_disable_subchannel(sch);
409 	if (ret == -EBUSY)
410 		ret = vfio_ccw_sch_quiesce(sch);
411 	if (ret)
412 		goto err_unlock;
413 
414 	private->state = VFIO_CCW_STATE_STANDBY;
415 	spin_unlock_irq(sch->lock);
416 	cp_free(&private->cp);
417 	return;
418 
419 err_unlock:
420 	spin_unlock_irq(sch->lock);
421 	vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_NOT_OPER);
422 }
423 
424 /*
425  * Device statemachine
426  */
427 fsm_func_t *vfio_ccw_jumptable[NR_VFIO_CCW_STATES][NR_VFIO_CCW_EVENTS] = {
428 	[VFIO_CCW_STATE_NOT_OPER] = {
429 		[VFIO_CCW_EVENT_NOT_OPER]	= fsm_nop,
430 		[VFIO_CCW_EVENT_IO_REQ]		= fsm_io_error,
431 		[VFIO_CCW_EVENT_ASYNC_REQ]	= fsm_async_error,
432 		[VFIO_CCW_EVENT_INTERRUPT]	= fsm_disabled_irq,
433 		[VFIO_CCW_EVENT_OPEN]		= fsm_nop,
434 		[VFIO_CCW_EVENT_CLOSE]		= fsm_nop,
435 	},
436 	[VFIO_CCW_STATE_STANDBY] = {
437 		[VFIO_CCW_EVENT_NOT_OPER]	= fsm_notoper,
438 		[VFIO_CCW_EVENT_IO_REQ]		= fsm_io_error,
439 		[VFIO_CCW_EVENT_ASYNC_REQ]	= fsm_async_error,
440 		[VFIO_CCW_EVENT_INTERRUPT]	= fsm_disabled_irq,
441 		[VFIO_CCW_EVENT_OPEN]		= fsm_open,
442 		[VFIO_CCW_EVENT_CLOSE]		= fsm_notoper,
443 	},
444 	[VFIO_CCW_STATE_IDLE] = {
445 		[VFIO_CCW_EVENT_NOT_OPER]	= fsm_notoper,
446 		[VFIO_CCW_EVENT_IO_REQ]		= fsm_io_request,
447 		[VFIO_CCW_EVENT_ASYNC_REQ]	= fsm_async_request,
448 		[VFIO_CCW_EVENT_INTERRUPT]	= fsm_irq,
449 		[VFIO_CCW_EVENT_OPEN]		= fsm_notoper,
450 		[VFIO_CCW_EVENT_CLOSE]		= fsm_close,
451 	},
452 	[VFIO_CCW_STATE_CP_PROCESSING] = {
453 		[VFIO_CCW_EVENT_NOT_OPER]	= fsm_notoper,
454 		[VFIO_CCW_EVENT_IO_REQ]		= fsm_io_retry,
455 		[VFIO_CCW_EVENT_ASYNC_REQ]	= fsm_async_retry,
456 		[VFIO_CCW_EVENT_INTERRUPT]	= fsm_irq,
457 		[VFIO_CCW_EVENT_OPEN]		= fsm_notoper,
458 		[VFIO_CCW_EVENT_CLOSE]		= fsm_close,
459 	},
460 	[VFIO_CCW_STATE_CP_PENDING] = {
461 		[VFIO_CCW_EVENT_NOT_OPER]	= fsm_notoper,
462 		[VFIO_CCW_EVENT_IO_REQ]		= fsm_io_busy,
463 		[VFIO_CCW_EVENT_ASYNC_REQ]	= fsm_async_request,
464 		[VFIO_CCW_EVENT_INTERRUPT]	= fsm_irq,
465 		[VFIO_CCW_EVENT_OPEN]		= fsm_notoper,
466 		[VFIO_CCW_EVENT_CLOSE]		= fsm_close,
467 	},
468 };
469