xref: /openbmc/linux/drivers/s390/cio/vfio_ccw_fsm.c (revision 68198dca)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Finite state machine for vfio-ccw device handling
4  *
5  * Copyright IBM Corp. 2017
6  *
7  * Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com>
8  */
9 
10 #include <linux/vfio.h>
11 #include <linux/mdev.h>
12 
13 #include "ioasm.h"
14 #include "vfio_ccw_private.h"
15 
16 static int fsm_io_helper(struct vfio_ccw_private *private)
17 {
18 	struct subchannel *sch;
19 	union orb *orb;
20 	int ccode;
21 	__u8 lpm;
22 	unsigned long flags;
23 
24 	sch = private->sch;
25 
26 	spin_lock_irqsave(sch->lock, flags);
27 	private->state = VFIO_CCW_STATE_BUSY;
28 	spin_unlock_irqrestore(sch->lock, flags);
29 
30 	orb = cp_get_orb(&private->cp, (u32)(addr_t)sch, sch->lpm);
31 
32 	/* Issue "Start Subchannel" */
33 	ccode = ssch(sch->schid, orb);
34 
35 	switch (ccode) {
36 	case 0:
37 		/*
38 		 * Initialize device status information
39 		 */
40 		sch->schib.scsw.cmd.actl |= SCSW_ACTL_START_PEND;
41 		return 0;
42 	case 1:		/* Status pending */
43 	case 2:		/* Busy */
44 		return -EBUSY;
45 	case 3:		/* Device/path not operational */
46 	{
47 		lpm = orb->cmd.lpm;
48 		if (lpm != 0)
49 			sch->lpm &= ~lpm;
50 		else
51 			sch->lpm = 0;
52 
53 		if (cio_update_schib(sch))
54 			return -ENODEV;
55 
56 		return sch->lpm ? -EACCES : -ENODEV;
57 	}
58 	default:
59 		return ccode;
60 	}
61 }
62 
63 static void fsm_notoper(struct vfio_ccw_private *private,
64 			enum vfio_ccw_event event)
65 {
66 	struct subchannel *sch = private->sch;
67 
68 	/*
69 	 * TODO:
70 	 * Probably we should send the machine check to the guest.
71 	 */
72 	css_sched_sch_todo(sch, SCH_TODO_UNREG);
73 	private->state = VFIO_CCW_STATE_NOT_OPER;
74 }
75 
76 /*
77  * No operation action.
78  */
79 static void fsm_nop(struct vfio_ccw_private *private,
80 		    enum vfio_ccw_event event)
81 {
82 }
83 
84 static void fsm_io_error(struct vfio_ccw_private *private,
85 			 enum vfio_ccw_event event)
86 {
87 	pr_err("vfio-ccw: FSM: I/O request from state:%d\n", private->state);
88 	private->io_region.ret_code = -EIO;
89 }
90 
91 static void fsm_io_busy(struct vfio_ccw_private *private,
92 			enum vfio_ccw_event event)
93 {
94 	private->io_region.ret_code = -EBUSY;
95 }
96 
97 static void fsm_disabled_irq(struct vfio_ccw_private *private,
98 			     enum vfio_ccw_event event)
99 {
100 	struct subchannel *sch = private->sch;
101 
102 	/*
103 	 * An interrupt in a disabled state means a previous disable was not
104 	 * successful - should not happen, but we try to disable again.
105 	 */
106 	cio_disable_subchannel(sch);
107 }
108 
109 /*
110  * Deal with the ccw command request from the userspace.
111  */
112 static void fsm_io_request(struct vfio_ccw_private *private,
113 			   enum vfio_ccw_event event)
114 {
115 	union orb *orb;
116 	union scsw *scsw = &private->scsw;
117 	struct ccw_io_region *io_region = &private->io_region;
118 	struct mdev_device *mdev = private->mdev;
119 
120 	private->state = VFIO_CCW_STATE_BOXED;
121 
122 	memcpy(scsw, io_region->scsw_area, sizeof(*scsw));
123 
124 	if (scsw->cmd.fctl & SCSW_FCTL_START_FUNC) {
125 		orb = (union orb *)io_region->orb_area;
126 
127 		io_region->ret_code = cp_init(&private->cp, mdev_dev(mdev),
128 					      orb);
129 		if (io_region->ret_code)
130 			goto err_out;
131 
132 		io_region->ret_code = cp_prefetch(&private->cp);
133 		if (io_region->ret_code) {
134 			cp_free(&private->cp);
135 			goto err_out;
136 		}
137 
138 		/* Start channel program and wait for I/O interrupt. */
139 		io_region->ret_code = fsm_io_helper(private);
140 		if (io_region->ret_code) {
141 			cp_free(&private->cp);
142 			goto err_out;
143 		}
144 		return;
145 	} else if (scsw->cmd.fctl & SCSW_FCTL_HALT_FUNC) {
146 		/* XXX: Handle halt. */
147 		io_region->ret_code = -EOPNOTSUPP;
148 		goto err_out;
149 	} else if (scsw->cmd.fctl & SCSW_FCTL_CLEAR_FUNC) {
150 		/* XXX: Handle clear. */
151 		io_region->ret_code = -EOPNOTSUPP;
152 		goto err_out;
153 	}
154 
155 err_out:
156 	private->state = VFIO_CCW_STATE_IDLE;
157 }
158 
159 /*
160  * Got an interrupt for a normal io (state busy).
161  */
162 static void fsm_irq(struct vfio_ccw_private *private,
163 		    enum vfio_ccw_event event)
164 {
165 	struct irb *irb = this_cpu_ptr(&cio_irb);
166 
167 	memcpy(&private->irb, irb, sizeof(*irb));
168 
169 	queue_work(vfio_ccw_work_q, &private->io_work);
170 
171 	if (private->completion)
172 		complete(private->completion);
173 }
174 
175 /*
176  * Device statemachine
177  */
178 fsm_func_t *vfio_ccw_jumptable[NR_VFIO_CCW_STATES][NR_VFIO_CCW_EVENTS] = {
179 	[VFIO_CCW_STATE_NOT_OPER] = {
180 		[VFIO_CCW_EVENT_NOT_OPER]	= fsm_nop,
181 		[VFIO_CCW_EVENT_IO_REQ]		= fsm_io_error,
182 		[VFIO_CCW_EVENT_INTERRUPT]	= fsm_disabled_irq,
183 	},
184 	[VFIO_CCW_STATE_STANDBY] = {
185 		[VFIO_CCW_EVENT_NOT_OPER]	= fsm_notoper,
186 		[VFIO_CCW_EVENT_IO_REQ]		= fsm_io_error,
187 		[VFIO_CCW_EVENT_INTERRUPT]	= fsm_irq,
188 	},
189 	[VFIO_CCW_STATE_IDLE] = {
190 		[VFIO_CCW_EVENT_NOT_OPER]	= fsm_notoper,
191 		[VFIO_CCW_EVENT_IO_REQ]		= fsm_io_request,
192 		[VFIO_CCW_EVENT_INTERRUPT]	= fsm_irq,
193 	},
194 	[VFIO_CCW_STATE_BOXED] = {
195 		[VFIO_CCW_EVENT_NOT_OPER]	= fsm_notoper,
196 		[VFIO_CCW_EVENT_IO_REQ]		= fsm_io_busy,
197 		[VFIO_CCW_EVENT_INTERRUPT]	= fsm_irq,
198 	},
199 	[VFIO_CCW_STATE_BUSY] = {
200 		[VFIO_CCW_EVENT_NOT_OPER]	= fsm_notoper,
201 		[VFIO_CCW_EVENT_IO_REQ]		= fsm_io_busy,
202 		[VFIO_CCW_EVENT_INTERRUPT]	= fsm_irq,
203 	},
204 };
205