xref: /openbmc/linux/drivers/s390/cio/vfio_ccw_fsm.c (revision 981ab3f1)
1 /*
2  * Finite state machine for vfio-ccw device handling
3  *
4  * Copyright IBM Corp. 2017
5  *
6  * Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com>
7  */
8 
9 #include <linux/vfio.h>
10 #include <linux/mdev.h>
11 
12 #include "ioasm.h"
13 #include "vfio_ccw_private.h"
14 
15 static int fsm_io_helper(struct vfio_ccw_private *private)
16 {
17 	struct subchannel *sch;
18 	union orb *orb;
19 	int ccode;
20 	__u8 lpm;
21 	unsigned long flags;
22 
23 	sch = private->sch;
24 
25 	spin_lock_irqsave(sch->lock, flags);
26 	private->state = VFIO_CCW_STATE_BUSY;
27 	spin_unlock_irqrestore(sch->lock, flags);
28 
29 	orb = cp_get_orb(&private->cp, (u32)(addr_t)sch, sch->lpm);
30 
31 	/* Issue "Start Subchannel" */
32 	ccode = ssch(sch->schid, orb);
33 
34 	switch (ccode) {
35 	case 0:
36 		/*
37 		 * Initialize device status information
38 		 */
39 		sch->schib.scsw.cmd.actl |= SCSW_ACTL_START_PEND;
40 		return 0;
41 	case 1:		/* Status pending */
42 	case 2:		/* Busy */
43 		return -EBUSY;
44 	case 3:		/* Device/path not operational */
45 	{
46 		lpm = orb->cmd.lpm;
47 		if (lpm != 0)
48 			sch->lpm &= ~lpm;
49 		else
50 			sch->lpm = 0;
51 
52 		if (cio_update_schib(sch))
53 			return -ENODEV;
54 
55 		return sch->lpm ? -EACCES : -ENODEV;
56 	}
57 	default:
58 		return ccode;
59 	}
60 }
61 
62 static void fsm_notoper(struct vfio_ccw_private *private,
63 			enum vfio_ccw_event event)
64 {
65 	struct subchannel *sch = private->sch;
66 
67 	/*
68 	 * TODO:
69 	 * Probably we should send the machine check to the guest.
70 	 */
71 	css_sched_sch_todo(sch, SCH_TODO_UNREG);
72 	private->state = VFIO_CCW_STATE_NOT_OPER;
73 }
74 
75 /*
76  * No operation action.
77  */
78 static void fsm_nop(struct vfio_ccw_private *private,
79 		    enum vfio_ccw_event event)
80 {
81 }
82 
83 static void fsm_io_error(struct vfio_ccw_private *private,
84 			 enum vfio_ccw_event event)
85 {
86 	pr_err("vfio-ccw: FSM: I/O request from state:%d\n", private->state);
87 	private->io_region.ret_code = -EIO;
88 }
89 
90 static void fsm_io_busy(struct vfio_ccw_private *private,
91 			enum vfio_ccw_event event)
92 {
93 	private->io_region.ret_code = -EBUSY;
94 }
95 
96 static void fsm_disabled_irq(struct vfio_ccw_private *private,
97 			     enum vfio_ccw_event event)
98 {
99 	struct subchannel *sch = private->sch;
100 
101 	/*
102 	 * An interrupt in a disabled state means a previous disable was not
103 	 * successful - should not happen, but we try to disable again.
104 	 */
105 	cio_disable_subchannel(sch);
106 }
107 
108 /*
109  * Deal with the ccw command request from the userspace.
110  */
111 static void fsm_io_request(struct vfio_ccw_private *private,
112 			   enum vfio_ccw_event event)
113 {
114 	union orb *orb;
115 	union scsw *scsw = &private->scsw;
116 	struct ccw_io_region *io_region = &private->io_region;
117 	struct mdev_device *mdev = private->mdev;
118 
119 	private->state = VFIO_CCW_STATE_BOXED;
120 
121 	memcpy(scsw, io_region->scsw_area, sizeof(*scsw));
122 
123 	if (scsw->cmd.fctl & SCSW_FCTL_START_FUNC) {
124 		orb = (union orb *)io_region->orb_area;
125 
126 		io_region->ret_code = cp_init(&private->cp, mdev_dev(mdev),
127 					      orb);
128 		if (io_region->ret_code)
129 			goto err_out;
130 
131 		io_region->ret_code = cp_prefetch(&private->cp);
132 		if (io_region->ret_code) {
133 			cp_free(&private->cp);
134 			goto err_out;
135 		}
136 
137 		/* Start channel program and wait for I/O interrupt. */
138 		io_region->ret_code = fsm_io_helper(private);
139 		if (io_region->ret_code) {
140 			cp_free(&private->cp);
141 			goto err_out;
142 		}
143 		return;
144 	} else if (scsw->cmd.fctl & SCSW_FCTL_HALT_FUNC) {
145 		/* XXX: Handle halt. */
146 		io_region->ret_code = -EOPNOTSUPP;
147 		goto err_out;
148 	} else if (scsw->cmd.fctl & SCSW_FCTL_CLEAR_FUNC) {
149 		/* XXX: Handle clear. */
150 		io_region->ret_code = -EOPNOTSUPP;
151 		goto err_out;
152 	}
153 
154 err_out:
155 	private->state = VFIO_CCW_STATE_IDLE;
156 }
157 
158 /*
159  * Got an interrupt for a normal io (state busy).
160  */
161 static void fsm_irq(struct vfio_ccw_private *private,
162 		    enum vfio_ccw_event event)
163 {
164 	struct irb *irb = this_cpu_ptr(&cio_irb);
165 
166 	memcpy(&private->irb, irb, sizeof(*irb));
167 
168 	queue_work(vfio_ccw_work_q, &private->io_work);
169 
170 	if (private->completion)
171 		complete(private->completion);
172 }
173 
174 /*
175  * Device statemachine
176  */
177 fsm_func_t *vfio_ccw_jumptable[NR_VFIO_CCW_STATES][NR_VFIO_CCW_EVENTS] = {
178 	[VFIO_CCW_STATE_NOT_OPER] = {
179 		[VFIO_CCW_EVENT_NOT_OPER]	= fsm_nop,
180 		[VFIO_CCW_EVENT_IO_REQ]		= fsm_io_error,
181 		[VFIO_CCW_EVENT_INTERRUPT]	= fsm_disabled_irq,
182 	},
183 	[VFIO_CCW_STATE_STANDBY] = {
184 		[VFIO_CCW_EVENT_NOT_OPER]	= fsm_notoper,
185 		[VFIO_CCW_EVENT_IO_REQ]		= fsm_io_error,
186 		[VFIO_CCW_EVENT_INTERRUPT]	= fsm_irq,
187 	},
188 	[VFIO_CCW_STATE_IDLE] = {
189 		[VFIO_CCW_EVENT_NOT_OPER]	= fsm_notoper,
190 		[VFIO_CCW_EVENT_IO_REQ]		= fsm_io_request,
191 		[VFIO_CCW_EVENT_INTERRUPT]	= fsm_irq,
192 	},
193 	[VFIO_CCW_STATE_BOXED] = {
194 		[VFIO_CCW_EVENT_NOT_OPER]	= fsm_notoper,
195 		[VFIO_CCW_EVENT_IO_REQ]		= fsm_io_busy,
196 		[VFIO_CCW_EVENT_INTERRUPT]	= fsm_irq,
197 	},
198 	[VFIO_CCW_STATE_BUSY] = {
199 		[VFIO_CCW_EVENT_NOT_OPER]	= fsm_notoper,
200 		[VFIO_CCW_EVENT_IO_REQ]		= fsm_io_busy,
201 		[VFIO_CCW_EVENT_INTERRUPT]	= fsm_irq,
202 	},
203 };
204