xref: /openbmc/linux/drivers/s390/cio/vfio_ccw_fsm.c (revision bdeeed09)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Finite state machine for vfio-ccw device handling
4  *
5  * Copyright IBM Corp. 2017
6  *
7  * Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com>
8  */
9 
10 #include <linux/vfio.h>
11 #include <linux/mdev.h>
12 
13 #include "ioasm.h"
14 #include "vfio_ccw_private.h"
15 
16 static int fsm_io_helper(struct vfio_ccw_private *private)
17 {
18 	struct subchannel *sch;
19 	union orb *orb;
20 	int ccode;
21 	__u8 lpm;
22 	unsigned long flags;
23 	int ret;
24 
25 	sch = private->sch;
26 
27 	spin_lock_irqsave(sch->lock, flags);
28 	private->state = VFIO_CCW_STATE_BUSY;
29 
30 	orb = cp_get_orb(&private->cp, (u32)(addr_t)sch, sch->lpm);
31 
32 	/* Issue "Start Subchannel" */
33 	ccode = ssch(sch->schid, orb);
34 
35 	switch (ccode) {
36 	case 0:
37 		/*
38 		 * Initialize device status information
39 		 */
40 		sch->schib.scsw.cmd.actl |= SCSW_ACTL_START_PEND;
41 		ret = 0;
42 		break;
43 	case 1:		/* Status pending */
44 	case 2:		/* Busy */
45 		ret = -EBUSY;
46 		break;
47 	case 3:		/* Device/path not operational */
48 	{
49 		lpm = orb->cmd.lpm;
50 		if (lpm != 0)
51 			sch->lpm &= ~lpm;
52 		else
53 			sch->lpm = 0;
54 
55 		if (cio_update_schib(sch))
56 			ret = -ENODEV;
57 		else
58 			ret = sch->lpm ? -EACCES : -ENODEV;
59 		break;
60 	}
61 	default:
62 		ret = ccode;
63 	}
64 	spin_unlock_irqrestore(sch->lock, flags);
65 	return ret;
66 }
67 
68 static void fsm_notoper(struct vfio_ccw_private *private,
69 			enum vfio_ccw_event event)
70 {
71 	struct subchannel *sch = private->sch;
72 
73 	/*
74 	 * TODO:
75 	 * Probably we should send the machine check to the guest.
76 	 */
77 	css_sched_sch_todo(sch, SCH_TODO_UNREG);
78 	private->state = VFIO_CCW_STATE_NOT_OPER;
79 }
80 
81 /*
82  * No operation action.
83  */
84 static void fsm_nop(struct vfio_ccw_private *private,
85 		    enum vfio_ccw_event event)
86 {
87 }
88 
89 static void fsm_io_error(struct vfio_ccw_private *private,
90 			 enum vfio_ccw_event event)
91 {
92 	pr_err("vfio-ccw: FSM: I/O request from state:%d\n", private->state);
93 	private->io_region.ret_code = -EIO;
94 }
95 
96 static void fsm_io_busy(struct vfio_ccw_private *private,
97 			enum vfio_ccw_event event)
98 {
99 	private->io_region.ret_code = -EBUSY;
100 }
101 
102 static void fsm_disabled_irq(struct vfio_ccw_private *private,
103 			     enum vfio_ccw_event event)
104 {
105 	struct subchannel *sch = private->sch;
106 
107 	/*
108 	 * An interrupt in a disabled state means a previous disable was not
109 	 * successful - should not happen, but we try to disable again.
110 	 */
111 	cio_disable_subchannel(sch);
112 }
113 
114 /*
115  * Deal with the ccw command request from the userspace.
116  */
117 static void fsm_io_request(struct vfio_ccw_private *private,
118 			   enum vfio_ccw_event event)
119 {
120 	union orb *orb;
121 	union scsw *scsw = &private->scsw;
122 	struct ccw_io_region *io_region = &private->io_region;
123 	struct mdev_device *mdev = private->mdev;
124 
125 	private->state = VFIO_CCW_STATE_BOXED;
126 
127 	memcpy(scsw, io_region->scsw_area, sizeof(*scsw));
128 
129 	if (scsw->cmd.fctl & SCSW_FCTL_START_FUNC) {
130 		orb = (union orb *)io_region->orb_area;
131 
132 		/* Don't try to build a cp if transport mode is specified. */
133 		if (orb->tm.b) {
134 			io_region->ret_code = -EOPNOTSUPP;
135 			goto err_out;
136 		}
137 		io_region->ret_code = cp_init(&private->cp, mdev_dev(mdev),
138 					      orb);
139 		if (io_region->ret_code)
140 			goto err_out;
141 
142 		io_region->ret_code = cp_prefetch(&private->cp);
143 		if (io_region->ret_code) {
144 			cp_free(&private->cp);
145 			goto err_out;
146 		}
147 
148 		/* Start channel program and wait for I/O interrupt. */
149 		io_region->ret_code = fsm_io_helper(private);
150 		if (io_region->ret_code) {
151 			cp_free(&private->cp);
152 			goto err_out;
153 		}
154 		return;
155 	} else if (scsw->cmd.fctl & SCSW_FCTL_HALT_FUNC) {
156 		/* XXX: Handle halt. */
157 		io_region->ret_code = -EOPNOTSUPP;
158 		goto err_out;
159 	} else if (scsw->cmd.fctl & SCSW_FCTL_CLEAR_FUNC) {
160 		/* XXX: Handle clear. */
161 		io_region->ret_code = -EOPNOTSUPP;
162 		goto err_out;
163 	}
164 
165 err_out:
166 	private->state = VFIO_CCW_STATE_IDLE;
167 }
168 
169 /*
170  * Got an interrupt for a normal io (state busy).
171  */
172 static void fsm_irq(struct vfio_ccw_private *private,
173 		    enum vfio_ccw_event event)
174 {
175 	struct irb *irb = this_cpu_ptr(&cio_irb);
176 
177 	memcpy(&private->irb, irb, sizeof(*irb));
178 
179 	queue_work(vfio_ccw_work_q, &private->io_work);
180 
181 	if (private->completion)
182 		complete(private->completion);
183 }
184 
185 /*
186  * Device statemachine
187  */
188 fsm_func_t *vfio_ccw_jumptable[NR_VFIO_CCW_STATES][NR_VFIO_CCW_EVENTS] = {
189 	[VFIO_CCW_STATE_NOT_OPER] = {
190 		[VFIO_CCW_EVENT_NOT_OPER]	= fsm_nop,
191 		[VFIO_CCW_EVENT_IO_REQ]		= fsm_io_error,
192 		[VFIO_CCW_EVENT_INTERRUPT]	= fsm_disabled_irq,
193 	},
194 	[VFIO_CCW_STATE_STANDBY] = {
195 		[VFIO_CCW_EVENT_NOT_OPER]	= fsm_notoper,
196 		[VFIO_CCW_EVENT_IO_REQ]		= fsm_io_error,
197 		[VFIO_CCW_EVENT_INTERRUPT]	= fsm_irq,
198 	},
199 	[VFIO_CCW_STATE_IDLE] = {
200 		[VFIO_CCW_EVENT_NOT_OPER]	= fsm_notoper,
201 		[VFIO_CCW_EVENT_IO_REQ]		= fsm_io_request,
202 		[VFIO_CCW_EVENT_INTERRUPT]	= fsm_irq,
203 	},
204 	[VFIO_CCW_STATE_BOXED] = {
205 		[VFIO_CCW_EVENT_NOT_OPER]	= fsm_notoper,
206 		[VFIO_CCW_EVENT_IO_REQ]		= fsm_io_busy,
207 		[VFIO_CCW_EVENT_INTERRUPT]	= fsm_irq,
208 	},
209 	[VFIO_CCW_STATE_BUSY] = {
210 		[VFIO_CCW_EVENT_NOT_OPER]	= fsm_notoper,
211 		[VFIO_CCW_EVENT_IO_REQ]		= fsm_io_busy,
212 		[VFIO_CCW_EVENT_INTERRUPT]	= fsm_irq,
213 	},
214 };
215