xref: /openbmc/linux/drivers/s390/cio/vfio_ccw_drv.c (revision 023e41632e065d49bcbe31b3c4b336217f96a271)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * VFIO based Physical Subchannel device driver
4  *
5  * Copyright IBM Corp. 2017
6  *
7  * Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com>
8  *            Xiao Feng Ren <renxiaof@linux.vnet.ibm.com>
9  */
10 
11 #include <linux/module.h>
12 #include <linux/init.h>
13 #include <linux/device.h>
14 #include <linux/slab.h>
15 #include <linux/uuid.h>
16 #include <linux/mdev.h>
17 
18 #include <asm/isc.h>
19 
20 #include "ioasm.h"
21 #include "css.h"
22 #include "vfio_ccw_private.h"
23 
24 struct workqueue_struct *vfio_ccw_work_q;
25 static struct kmem_cache *vfio_ccw_io_region;
26 
27 /*
28  * Helpers
29  */
30 int vfio_ccw_sch_quiesce(struct subchannel *sch)
31 {
32 	struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
33 	DECLARE_COMPLETION_ONSTACK(completion);
34 	int iretry, ret = 0;
35 
36 	spin_lock_irq(sch->lock);
37 	if (!sch->schib.pmcw.ena)
38 		goto out_unlock;
39 	ret = cio_disable_subchannel(sch);
40 	if (ret != -EBUSY)
41 		goto out_unlock;
42 
43 	do {
44 		iretry = 255;
45 
46 		ret = cio_cancel_halt_clear(sch, &iretry);
47 		while (ret == -EBUSY) {
48 			/*
49 			 * Flush all I/O and wait for
50 			 * cancel/halt/clear completion.
51 			 */
52 			private->completion = &completion;
53 			spin_unlock_irq(sch->lock);
54 
55 			wait_for_completion_timeout(&completion, 3*HZ);
56 
57 			spin_lock_irq(sch->lock);
58 			private->completion = NULL;
59 			flush_workqueue(vfio_ccw_work_q);
60 			ret = cio_cancel_halt_clear(sch, &iretry);
61 		};
62 
63 		ret = cio_disable_subchannel(sch);
64 	} while (ret == -EBUSY);
65 out_unlock:
66 	private->state = VFIO_CCW_STATE_NOT_OPER;
67 	spin_unlock_irq(sch->lock);
68 	return ret;
69 }
70 
71 static void vfio_ccw_sch_io_todo(struct work_struct *work)
72 {
73 	struct vfio_ccw_private *private;
74 	struct irb *irb;
75 	bool is_final;
76 
77 	private = container_of(work, struct vfio_ccw_private, io_work);
78 	irb = &private->irb;
79 
80 	is_final = !(scsw_actl(&irb->scsw) &
81 		     (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT));
82 	if (scsw_is_solicited(&irb->scsw)) {
83 		cp_update_scsw(&private->cp, &irb->scsw);
84 		if (is_final)
85 			cp_free(&private->cp);
86 	}
87 	memcpy(private->io_region->irb_area, irb, sizeof(*irb));
88 
89 	if (private->io_trigger)
90 		eventfd_signal(private->io_trigger, 1);
91 
92 	if (private->mdev && is_final)
93 		private->state = VFIO_CCW_STATE_IDLE;
94 }
95 
96 /*
97  * Css driver callbacks
98  */
99 static void vfio_ccw_sch_irq(struct subchannel *sch)
100 {
101 	struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
102 
103 	inc_irq_stat(IRQIO_CIO);
104 	vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_INTERRUPT);
105 }
106 
107 static int vfio_ccw_sch_probe(struct subchannel *sch)
108 {
109 	struct pmcw *pmcw = &sch->schib.pmcw;
110 	struct vfio_ccw_private *private;
111 	int ret;
112 
113 	if (pmcw->qf) {
114 		dev_warn(&sch->dev, "vfio: ccw: does not support QDIO: %s\n",
115 			 dev_name(&sch->dev));
116 		return -ENODEV;
117 	}
118 
119 	private = kzalloc(sizeof(*private), GFP_KERNEL | GFP_DMA);
120 	if (!private)
121 		return -ENOMEM;
122 
123 	private->io_region = kmem_cache_zalloc(vfio_ccw_io_region,
124 					       GFP_KERNEL | GFP_DMA);
125 	if (!private->io_region) {
126 		kfree(private);
127 		return -ENOMEM;
128 	}
129 
130 	private->sch = sch;
131 	dev_set_drvdata(&sch->dev, private);
132 
133 	spin_lock_irq(sch->lock);
134 	private->state = VFIO_CCW_STATE_NOT_OPER;
135 	sch->isc = VFIO_CCW_ISC;
136 	ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch);
137 	spin_unlock_irq(sch->lock);
138 	if (ret)
139 		goto out_free;
140 
141 	INIT_WORK(&private->io_work, vfio_ccw_sch_io_todo);
142 	atomic_set(&private->avail, 1);
143 	private->state = VFIO_CCW_STATE_STANDBY;
144 
145 	ret = vfio_ccw_mdev_reg(sch);
146 	if (ret)
147 		goto out_disable;
148 
149 	return 0;
150 
151 out_disable:
152 	cio_disable_subchannel(sch);
153 out_free:
154 	dev_set_drvdata(&sch->dev, NULL);
155 	kmem_cache_free(vfio_ccw_io_region, private->io_region);
156 	kfree(private);
157 	return ret;
158 }
159 
160 static int vfio_ccw_sch_remove(struct subchannel *sch)
161 {
162 	struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
163 
164 	vfio_ccw_sch_quiesce(sch);
165 
166 	vfio_ccw_mdev_unreg(sch);
167 
168 	dev_set_drvdata(&sch->dev, NULL);
169 
170 	kmem_cache_free(vfio_ccw_io_region, private->io_region);
171 	kfree(private);
172 
173 	return 0;
174 }
175 
176 static void vfio_ccw_sch_shutdown(struct subchannel *sch)
177 {
178 	vfio_ccw_sch_quiesce(sch);
179 }
180 
181 /**
182  * vfio_ccw_sch_event - process subchannel event
183  * @sch: subchannel
184  * @process: non-zero if function is called in process context
185  *
186  * An unspecified event occurred for this subchannel. Adjust data according
187  * to the current operational state of the subchannel. Return zero when the
188  * event has been handled sufficiently or -EAGAIN when this function should
189  * be called again in process context.
190  */
191 static int vfio_ccw_sch_event(struct subchannel *sch, int process)
192 {
193 	struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
194 	unsigned long flags;
195 	int rc = -EAGAIN;
196 
197 	spin_lock_irqsave(sch->lock, flags);
198 	if (!device_is_registered(&sch->dev))
199 		goto out_unlock;
200 
201 	if (work_pending(&sch->todo_work))
202 		goto out_unlock;
203 
204 	if (cio_update_schib(sch)) {
205 		vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_NOT_OPER);
206 		rc = 0;
207 		goto out_unlock;
208 	}
209 
210 	private = dev_get_drvdata(&sch->dev);
211 	if (private->state == VFIO_CCW_STATE_NOT_OPER) {
212 		private->state = private->mdev ? VFIO_CCW_STATE_IDLE :
213 				 VFIO_CCW_STATE_STANDBY;
214 	}
215 	rc = 0;
216 
217 out_unlock:
218 	spin_unlock_irqrestore(sch->lock, flags);
219 
220 	return rc;
221 }
222 
223 static struct css_device_id vfio_ccw_sch_ids[] = {
224 	{ .match_flags = 0x1, .type = SUBCHANNEL_TYPE_IO, },
225 	{ /* end of list */ },
226 };
227 MODULE_DEVICE_TABLE(css, vfio_ccw_sch_ids);
228 
229 static struct css_driver vfio_ccw_sch_driver = {
230 	.drv = {
231 		.name = "vfio_ccw",
232 		.owner = THIS_MODULE,
233 	},
234 	.subchannel_type = vfio_ccw_sch_ids,
235 	.irq = vfio_ccw_sch_irq,
236 	.probe = vfio_ccw_sch_probe,
237 	.remove = vfio_ccw_sch_remove,
238 	.shutdown = vfio_ccw_sch_shutdown,
239 	.sch_event = vfio_ccw_sch_event,
240 };
241 
242 static int __init vfio_ccw_sch_init(void)
243 {
244 	int ret;
245 
246 	vfio_ccw_work_q = create_singlethread_workqueue("vfio-ccw");
247 	if (!vfio_ccw_work_q)
248 		return -ENOMEM;
249 
250 	vfio_ccw_io_region = kmem_cache_create_usercopy("vfio_ccw_io_region",
251 					sizeof(struct ccw_io_region), 0,
252 					SLAB_ACCOUNT, 0,
253 					sizeof(struct ccw_io_region), NULL);
254 	if (!vfio_ccw_io_region) {
255 		destroy_workqueue(vfio_ccw_work_q);
256 		return -ENOMEM;
257 	}
258 
259 	isc_register(VFIO_CCW_ISC);
260 	ret = css_driver_register(&vfio_ccw_sch_driver);
261 	if (ret) {
262 		isc_unregister(VFIO_CCW_ISC);
263 		kmem_cache_destroy(vfio_ccw_io_region);
264 		destroy_workqueue(vfio_ccw_work_q);
265 	}
266 
267 	return ret;
268 }
269 
270 static void __exit vfio_ccw_sch_exit(void)
271 {
272 	css_driver_unregister(&vfio_ccw_sch_driver);
273 	isc_unregister(VFIO_CCW_ISC);
274 	kmem_cache_destroy(vfio_ccw_io_region);
275 	destroy_workqueue(vfio_ccw_work_q);
276 }
277 module_init(vfio_ccw_sch_init);
278 module_exit(vfio_ccw_sch_exit);
279 
280 MODULE_LICENSE("GPL v2");
281