1 /*
2 * vfio based subchannel assignment support
3 *
4 * Copyright 2017 IBM Corp.
5 * Copyright 2019 Red Hat, Inc.
6 *
7 * Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com>
8 * Xiao Feng Ren <renxiaof@linux.vnet.ibm.com>
9 * Pierre Morel <pmorel@linux.vnet.ibm.com>
10 * Cornelia Huck <cohuck@redhat.com>
11 *
12 * This work is licensed under the terms of the GNU GPL, version 2 or (at
13 * your option) any later version. See the COPYING file in the top-level
14 * directory.
15 */
16
17 #include "qemu/osdep.h"
18 #include <linux/vfio.h>
19 #include <linux/vfio_ccw.h>
20 #include <sys/ioctl.h>
21
22 #include "qapi/error.h"
23 #include "hw/vfio/vfio-common.h"
24 #include "hw/s390x/s390-ccw.h"
25 #include "hw/s390x/vfio-ccw.h"
26 #include "hw/qdev-properties.h"
27 #include "hw/s390x/ccw-device.h"
28 #include "exec/address-spaces.h"
29 #include "qemu/error-report.h"
30 #include "qemu/main-loop.h"
31 #include "qemu/module.h"
32
33 struct VFIOCCWDevice {
34 S390CCWDevice cdev;
35 VFIODevice vdev;
36 uint64_t io_region_size;
37 uint64_t io_region_offset;
38 struct ccw_io_region *io_region;
39 uint64_t async_cmd_region_size;
40 uint64_t async_cmd_region_offset;
41 struct ccw_cmd_region *async_cmd_region;
42 uint64_t schib_region_size;
43 uint64_t schib_region_offset;
44 struct ccw_schib_region *schib_region;
45 uint64_t crw_region_size;
46 uint64_t crw_region_offset;
47 struct ccw_crw_region *crw_region;
48 EventNotifier io_notifier;
49 EventNotifier crw_notifier;
50 EventNotifier req_notifier;
51 bool force_orb_pfch;
52 bool warned_orb_pfch;
53 };
54
warn_once_pfch(VFIOCCWDevice * vcdev,SubchDev * sch,const char * msg)55 static inline void warn_once_pfch(VFIOCCWDevice *vcdev, SubchDev *sch,
56 const char *msg)
57 {
58 warn_report_once_cond(&vcdev->warned_orb_pfch,
59 "vfio-ccw (devno %x.%x.%04x): %s",
60 sch->cssid, sch->ssid, sch->devno, msg);
61 }
62
vfio_ccw_compute_needs_reset(VFIODevice * vdev)63 static void vfio_ccw_compute_needs_reset(VFIODevice *vdev)
64 {
65 vdev->needs_reset = false;
66 }
67
68 /*
69 * We don't need vfio_hot_reset_multi and vfio_eoi operations for
70 * vfio_ccw device now.
71 */
72 struct VFIODeviceOps vfio_ccw_ops = {
73 .vfio_compute_needs_reset = vfio_ccw_compute_needs_reset,
74 };
75
vfio_ccw_handle_request(SubchDev * sch)76 static IOInstEnding vfio_ccw_handle_request(SubchDev *sch)
77 {
78 VFIOCCWDevice *vcdev = VFIO_CCW(sch->driver_data);
79 struct ccw_io_region *region = vcdev->io_region;
80 int ret;
81
82 if (!(sch->orb.ctrl0 & ORB_CTRL0_MASK_PFCH) && vcdev->force_orb_pfch) {
83 sch->orb.ctrl0 |= ORB_CTRL0_MASK_PFCH;
84 warn_once_pfch(vcdev, sch, "PFCH flag forced");
85 }
86
87 QEMU_BUILD_BUG_ON(sizeof(region->orb_area) != sizeof(ORB));
88 QEMU_BUILD_BUG_ON(sizeof(region->scsw_area) != sizeof(SCSW));
89 QEMU_BUILD_BUG_ON(sizeof(region->irb_area) != sizeof(IRB));
90
91 memset(region, 0, sizeof(*region));
92
93 memcpy(region->orb_area, &sch->orb, sizeof(ORB));
94 memcpy(region->scsw_area, &sch->curr_status.scsw, sizeof(SCSW));
95
96 again:
97 ret = pwrite(vcdev->vdev.fd, region,
98 vcdev->io_region_size, vcdev->io_region_offset);
99 if (ret != vcdev->io_region_size) {
100 if (errno == EAGAIN) {
101 goto again;
102 }
103 error_report("vfio-ccw: write I/O region failed with errno=%d", errno);
104 ret = errno ? -errno : -EFAULT;
105 } else {
106 ret = 0;
107 }
108 switch (ret) {
109 case 0:
110 return IOINST_CC_EXPECTED;
111 case -EBUSY:
112 return IOINST_CC_BUSY;
113 case -ENODEV:
114 case -EACCES:
115 return IOINST_CC_NOT_OPERATIONAL;
116 case -EFAULT:
117 default:
118 sch_gen_unit_exception(sch);
119 css_inject_io_interrupt(sch);
120 return IOINST_CC_EXPECTED;
121 }
122 }
123
vfio_ccw_handle_store(SubchDev * sch)124 static IOInstEnding vfio_ccw_handle_store(SubchDev *sch)
125 {
126 VFIOCCWDevice *vcdev = VFIO_CCW(sch->driver_data);
127 SCHIB *schib = &sch->curr_status;
128 struct ccw_schib_region *region = vcdev->schib_region;
129 SCHIB *s;
130 int ret;
131
132 /* schib region not available so nothing else to do */
133 if (!region) {
134 return IOINST_CC_EXPECTED;
135 }
136
137 memset(region, 0, sizeof(*region));
138 ret = pread(vcdev->vdev.fd, region, vcdev->schib_region_size,
139 vcdev->schib_region_offset);
140
141 if (ret == -1) {
142 /*
143 * Device is probably damaged, but store subchannel does not
144 * have a nonzero cc defined for this scenario. Log an error,
145 * and presume things are otherwise fine.
146 */
147 error_report("vfio-ccw: store region read failed with errno=%d", errno);
148 return IOINST_CC_EXPECTED;
149 }
150
151 /*
152 * Selectively copy path-related bits of the SCHIB,
153 * rather than copying the entire struct.
154 */
155 s = (SCHIB *)region->schib_area;
156 schib->pmcw.pnom = s->pmcw.pnom;
157 schib->pmcw.lpum = s->pmcw.lpum;
158 schib->pmcw.pam = s->pmcw.pam;
159 schib->pmcw.pom = s->pmcw.pom;
160
161 if (s->scsw.flags & SCSW_FLAGS_MASK_PNO) {
162 schib->scsw.flags |= SCSW_FLAGS_MASK_PNO;
163 }
164
165 return IOINST_CC_EXPECTED;
166 }
167
vfio_ccw_handle_clear(SubchDev * sch)168 static int vfio_ccw_handle_clear(SubchDev *sch)
169 {
170 VFIOCCWDevice *vcdev = VFIO_CCW(sch->driver_data);
171 struct ccw_cmd_region *region = vcdev->async_cmd_region;
172 int ret;
173
174 if (!vcdev->async_cmd_region) {
175 /* Async command region not available, fall back to emulation */
176 return -ENOSYS;
177 }
178
179 memset(region, 0, sizeof(*region));
180 region->command = VFIO_CCW_ASYNC_CMD_CSCH;
181
182 again:
183 ret = pwrite(vcdev->vdev.fd, region,
184 vcdev->async_cmd_region_size, vcdev->async_cmd_region_offset);
185 if (ret != vcdev->async_cmd_region_size) {
186 if (errno == EAGAIN) {
187 goto again;
188 }
189 error_report("vfio-ccw: write cmd region failed with errno=%d", errno);
190 ret = errno ? -errno : -EFAULT;
191 } else {
192 ret = 0;
193 }
194 switch (ret) {
195 case 0:
196 case -ENODEV:
197 case -EACCES:
198 return ret;
199 case -EFAULT:
200 default:
201 sch_gen_unit_exception(sch);
202 css_inject_io_interrupt(sch);
203 return 0;
204 }
205 }
206
vfio_ccw_handle_halt(SubchDev * sch)207 static int vfio_ccw_handle_halt(SubchDev *sch)
208 {
209 VFIOCCWDevice *vcdev = VFIO_CCW(sch->driver_data);
210 struct ccw_cmd_region *region = vcdev->async_cmd_region;
211 int ret;
212
213 if (!vcdev->async_cmd_region) {
214 /* Async command region not available, fall back to emulation */
215 return -ENOSYS;
216 }
217
218 memset(region, 0, sizeof(*region));
219 region->command = VFIO_CCW_ASYNC_CMD_HSCH;
220
221 again:
222 ret = pwrite(vcdev->vdev.fd, region,
223 vcdev->async_cmd_region_size, vcdev->async_cmd_region_offset);
224 if (ret != vcdev->async_cmd_region_size) {
225 if (errno == EAGAIN) {
226 goto again;
227 }
228 error_report("vfio-ccw: write cmd region failed with errno=%d", errno);
229 ret = errno ? -errno : -EFAULT;
230 } else {
231 ret = 0;
232 }
233 switch (ret) {
234 case 0:
235 case -EBUSY:
236 case -ENODEV:
237 case -EACCES:
238 return ret;
239 case -EFAULT:
240 default:
241 sch_gen_unit_exception(sch);
242 css_inject_io_interrupt(sch);
243 return 0;
244 }
245 }
246
vfio_ccw_reset(DeviceState * dev)247 static void vfio_ccw_reset(DeviceState *dev)
248 {
249 VFIOCCWDevice *vcdev = VFIO_CCW(dev);
250
251 ioctl(vcdev->vdev.fd, VFIO_DEVICE_RESET);
252 }
253
vfio_ccw_crw_read(VFIOCCWDevice * vcdev)254 static void vfio_ccw_crw_read(VFIOCCWDevice *vcdev)
255 {
256 struct ccw_crw_region *region = vcdev->crw_region;
257 CRW crw;
258 int size;
259
260 /* Keep reading CRWs as long as data is returned */
261 do {
262 memset(region, 0, sizeof(*region));
263 size = pread(vcdev->vdev.fd, region, vcdev->crw_region_size,
264 vcdev->crw_region_offset);
265
266 if (size == -1) {
267 error_report("vfio-ccw: Read crw region failed with errno=%d",
268 errno);
269 break;
270 }
271
272 if (region->crw == 0) {
273 /* No more CRWs to queue */
274 break;
275 }
276
277 memcpy(&crw, ®ion->crw, sizeof(CRW));
278
279 css_crw_add_to_queue(crw);
280 } while (1);
281 }
282
vfio_ccw_req_notifier_handler(void * opaque)283 static void vfio_ccw_req_notifier_handler(void *opaque)
284 {
285 VFIOCCWDevice *vcdev = opaque;
286 Error *err = NULL;
287
288 if (!event_notifier_test_and_clear(&vcdev->req_notifier)) {
289 return;
290 }
291
292 qdev_unplug(DEVICE(vcdev), &err);
293 if (err) {
294 warn_reportf_err(err, VFIO_MSG_PREFIX, vcdev->vdev.name);
295 }
296 }
297
vfio_ccw_crw_notifier_handler(void * opaque)298 static void vfio_ccw_crw_notifier_handler(void *opaque)
299 {
300 VFIOCCWDevice *vcdev = opaque;
301
302 while (event_notifier_test_and_clear(&vcdev->crw_notifier)) {
303 vfio_ccw_crw_read(vcdev);
304 }
305 }
306
vfio_ccw_io_notifier_handler(void * opaque)307 static void vfio_ccw_io_notifier_handler(void *opaque)
308 {
309 VFIOCCWDevice *vcdev = opaque;
310 struct ccw_io_region *region = vcdev->io_region;
311 CcwDevice *ccw_dev = CCW_DEVICE(vcdev);
312 SubchDev *sch = ccw_dev->sch;
313 SCHIB *schib = &sch->curr_status;
314 SCSW s;
315 IRB irb;
316 ESW esw;
317 int size;
318
319 if (!event_notifier_test_and_clear(&vcdev->io_notifier)) {
320 return;
321 }
322
323 size = pread(vcdev->vdev.fd, region, vcdev->io_region_size,
324 vcdev->io_region_offset);
325 if (size == -1) {
326 switch (errno) {
327 case ENODEV:
328 /* Generate a deferred cc 3 condition. */
329 schib->scsw.flags |= SCSW_FLAGS_MASK_CC;
330 schib->scsw.ctrl &= ~SCSW_CTRL_MASK_STCTL;
331 schib->scsw.ctrl |= (SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND);
332 goto read_err;
333 case EFAULT:
334 /* Memory problem, generate channel data check. */
335 schib->scsw.ctrl &= ~SCSW_ACTL_START_PEND;
336 schib->scsw.cstat = SCSW_CSTAT_DATA_CHECK;
337 schib->scsw.ctrl &= ~SCSW_CTRL_MASK_STCTL;
338 schib->scsw.ctrl |= SCSW_STCTL_PRIMARY | SCSW_STCTL_SECONDARY |
339 SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND;
340 goto read_err;
341 default:
342 /* Error, generate channel program check. */
343 schib->scsw.ctrl &= ~SCSW_ACTL_START_PEND;
344 schib->scsw.cstat = SCSW_CSTAT_PROG_CHECK;
345 schib->scsw.ctrl &= ~SCSW_CTRL_MASK_STCTL;
346 schib->scsw.ctrl |= SCSW_STCTL_PRIMARY | SCSW_STCTL_SECONDARY |
347 SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND;
348 goto read_err;
349 }
350 } else if (size != vcdev->io_region_size) {
351 /* Information transfer error, generate channel-control check. */
352 schib->scsw.ctrl &= ~SCSW_ACTL_START_PEND;
353 schib->scsw.cstat = SCSW_CSTAT_CHN_CTRL_CHK;
354 schib->scsw.ctrl &= ~SCSW_CTRL_MASK_STCTL;
355 schib->scsw.ctrl |= SCSW_STCTL_PRIMARY | SCSW_STCTL_SECONDARY |
356 SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND;
357 goto read_err;
358 }
359
360 memcpy(&irb, region->irb_area, sizeof(IRB));
361
362 /* Update control block via irb. */
363 s = schib->scsw;
364 copy_scsw_to_guest(&s, &irb.scsw);
365 schib->scsw = s;
366
367 copy_esw_to_guest(&esw, &irb.esw);
368 sch->esw = esw;
369
370 /* If a uint check is pending, copy sense data. */
371 if ((schib->scsw.dstat & SCSW_DSTAT_UNIT_CHECK) &&
372 (schib->pmcw.chars & PMCW_CHARS_MASK_CSENSE)) {
373 memcpy(sch->sense_data, irb.ecw, sizeof(irb.ecw));
374 }
375
376 read_err:
377 css_inject_io_interrupt(sch);
378 }
379
vfio_ccw_register_irq_notifier(VFIOCCWDevice * vcdev,unsigned int irq,Error ** errp)380 static void vfio_ccw_register_irq_notifier(VFIOCCWDevice *vcdev,
381 unsigned int irq,
382 Error **errp)
383 {
384 VFIODevice *vdev = &vcdev->vdev;
385 struct vfio_irq_info *irq_info;
386 size_t argsz;
387 int fd;
388 EventNotifier *notifier;
389 IOHandler *fd_read;
390
391 switch (irq) {
392 case VFIO_CCW_IO_IRQ_INDEX:
393 notifier = &vcdev->io_notifier;
394 fd_read = vfio_ccw_io_notifier_handler;
395 break;
396 case VFIO_CCW_CRW_IRQ_INDEX:
397 notifier = &vcdev->crw_notifier;
398 fd_read = vfio_ccw_crw_notifier_handler;
399 break;
400 case VFIO_CCW_REQ_IRQ_INDEX:
401 notifier = &vcdev->req_notifier;
402 fd_read = vfio_ccw_req_notifier_handler;
403 break;
404 default:
405 error_setg(errp, "vfio: Unsupported device irq(%d)", irq);
406 return;
407 }
408
409 if (vdev->num_irqs < irq + 1) {
410 error_setg(errp, "vfio: IRQ %u not available (number of irqs %u)",
411 irq, vdev->num_irqs);
412 return;
413 }
414
415 argsz = sizeof(*irq_info);
416 irq_info = g_malloc0(argsz);
417 irq_info->index = irq;
418 irq_info->argsz = argsz;
419 if (ioctl(vdev->fd, VFIO_DEVICE_GET_IRQ_INFO,
420 irq_info) < 0 || irq_info->count < 1) {
421 error_setg_errno(errp, errno, "vfio: Error getting irq info");
422 goto out_free_info;
423 }
424
425 if (event_notifier_init(notifier, 0)) {
426 error_setg_errno(errp, errno,
427 "vfio: Unable to init event notifier for irq (%d)",
428 irq);
429 goto out_free_info;
430 }
431
432 fd = event_notifier_get_fd(notifier);
433 qemu_set_fd_handler(fd, fd_read, NULL, vcdev);
434
435 if (vfio_set_irq_signaling(vdev, irq, 0,
436 VFIO_IRQ_SET_ACTION_TRIGGER, fd, errp)) {
437 qemu_set_fd_handler(fd, NULL, NULL, vcdev);
438 event_notifier_cleanup(notifier);
439 }
440
441 out_free_info:
442 g_free(irq_info);
443 }
444
vfio_ccw_unregister_irq_notifier(VFIOCCWDevice * vcdev,unsigned int irq)445 static void vfio_ccw_unregister_irq_notifier(VFIOCCWDevice *vcdev,
446 unsigned int irq)
447 {
448 Error *err = NULL;
449 EventNotifier *notifier;
450
451 switch (irq) {
452 case VFIO_CCW_IO_IRQ_INDEX:
453 notifier = &vcdev->io_notifier;
454 break;
455 case VFIO_CCW_CRW_IRQ_INDEX:
456 notifier = &vcdev->crw_notifier;
457 break;
458 case VFIO_CCW_REQ_IRQ_INDEX:
459 notifier = &vcdev->req_notifier;
460 break;
461 default:
462 error_report("vfio: Unsupported device irq(%d)", irq);
463 return;
464 }
465
466 if (vfio_set_irq_signaling(&vcdev->vdev, irq, 0,
467 VFIO_IRQ_SET_ACTION_TRIGGER, -1, &err)) {
468 warn_reportf_err(err, VFIO_MSG_PREFIX, vcdev->vdev.name);
469 }
470
471 qemu_set_fd_handler(event_notifier_get_fd(notifier),
472 NULL, NULL, vcdev);
473 event_notifier_cleanup(notifier);
474 }
475
vfio_ccw_get_region(VFIOCCWDevice * vcdev,Error ** errp)476 static void vfio_ccw_get_region(VFIOCCWDevice *vcdev, Error **errp)
477 {
478 VFIODevice *vdev = &vcdev->vdev;
479 struct vfio_region_info *info;
480 int ret;
481
482 /* Sanity check device */
483 if (!(vdev->flags & VFIO_DEVICE_FLAGS_CCW)) {
484 error_setg(errp, "vfio: Um, this isn't a vfio-ccw device");
485 return;
486 }
487
488 /*
489 * We always expect at least the I/O region to be present. We also
490 * may have a variable number of regions governed by capabilities.
491 */
492 if (vdev->num_regions < VFIO_CCW_CONFIG_REGION_INDEX + 1) {
493 error_setg(errp, "vfio: too few regions (%u), expected at least %u",
494 vdev->num_regions, VFIO_CCW_CONFIG_REGION_INDEX + 1);
495 return;
496 }
497
498 ret = vfio_get_region_info(vdev, VFIO_CCW_CONFIG_REGION_INDEX, &info);
499 if (ret) {
500 error_setg_errno(errp, -ret, "vfio: Error getting config info");
501 return;
502 }
503
504 vcdev->io_region_size = info->size;
505 if (sizeof(*vcdev->io_region) != vcdev->io_region_size) {
506 error_setg(errp, "vfio: Unexpected size of the I/O region");
507 goto out_err;
508 }
509
510 vcdev->io_region_offset = info->offset;
511 vcdev->io_region = g_malloc0(info->size);
512 g_free(info);
513
514 /* check for the optional async command region */
515 ret = vfio_get_dev_region_info(vdev, VFIO_REGION_TYPE_CCW,
516 VFIO_REGION_SUBTYPE_CCW_ASYNC_CMD, &info);
517 if (!ret) {
518 vcdev->async_cmd_region_size = info->size;
519 if (sizeof(*vcdev->async_cmd_region) != vcdev->async_cmd_region_size) {
520 error_setg(errp, "vfio: Unexpected size of the async cmd region");
521 goto out_err;
522 }
523 vcdev->async_cmd_region_offset = info->offset;
524 vcdev->async_cmd_region = g_malloc0(info->size);
525 g_free(info);
526 }
527
528 ret = vfio_get_dev_region_info(vdev, VFIO_REGION_TYPE_CCW,
529 VFIO_REGION_SUBTYPE_CCW_SCHIB, &info);
530 if (!ret) {
531 vcdev->schib_region_size = info->size;
532 if (sizeof(*vcdev->schib_region) != vcdev->schib_region_size) {
533 error_setg(errp, "vfio: Unexpected size of the schib region");
534 goto out_err;
535 }
536 vcdev->schib_region_offset = info->offset;
537 vcdev->schib_region = g_malloc(info->size);
538 g_free(info);
539 }
540
541 ret = vfio_get_dev_region_info(vdev, VFIO_REGION_TYPE_CCW,
542 VFIO_REGION_SUBTYPE_CCW_CRW, &info);
543
544 if (!ret) {
545 vcdev->crw_region_size = info->size;
546 if (sizeof(*vcdev->crw_region) != vcdev->crw_region_size) {
547 error_setg(errp, "vfio: Unexpected size of the CRW region");
548 goto out_err;
549 }
550 vcdev->crw_region_offset = info->offset;
551 vcdev->crw_region = g_malloc(info->size);
552 g_free(info);
553 }
554
555 return;
556
557 out_err:
558 g_free(vcdev->crw_region);
559 g_free(vcdev->schib_region);
560 g_free(vcdev->async_cmd_region);
561 g_free(vcdev->io_region);
562 g_free(info);
563 return;
564 }
565
vfio_ccw_put_region(VFIOCCWDevice * vcdev)566 static void vfio_ccw_put_region(VFIOCCWDevice *vcdev)
567 {
568 g_free(vcdev->crw_region);
569 g_free(vcdev->schib_region);
570 g_free(vcdev->async_cmd_region);
571 g_free(vcdev->io_region);
572 }
573
vfio_ccw_realize(DeviceState * dev,Error ** errp)574 static void vfio_ccw_realize(DeviceState *dev, Error **errp)
575 {
576 S390CCWDevice *cdev = S390_CCW_DEVICE(dev);
577 VFIOCCWDevice *vcdev = VFIO_CCW(cdev);
578 S390CCWDeviceClass *cdc = S390_CCW_DEVICE_GET_CLASS(cdev);
579 VFIODevice *vbasedev = &vcdev->vdev;
580 Error *err = NULL;
581 int ret;
582
583 /* Call the class init function for subchannel. */
584 if (cdc->realize) {
585 cdc->realize(cdev, vcdev->vdev.sysfsdev, &err);
586 if (err) {
587 goto out_err_propagate;
588 }
589 }
590
591 vbasedev->ops = &vfio_ccw_ops;
592 vbasedev->type = VFIO_DEVICE_TYPE_CCW;
593 vbasedev->name = g_strdup_printf("%x.%x.%04x", vcdev->cdev.hostid.cssid,
594 vcdev->cdev.hostid.ssid,
595 vcdev->cdev.hostid.devid);
596 vbasedev->dev = dev;
597
598 /*
599 * All vfio-ccw devices are believed to operate in a way compatible with
600 * discarding of memory in RAM blocks, ie. pages pinned in the host are
601 * in the current working set of the guest driver and therefore never
602 * overlap e.g., with pages available to the guest balloon driver. This
603 * needs to be set before vfio_get_device() for vfio common to handle
604 * ram_block_discard_disable().
605 */
606 vbasedev->ram_block_discard_allowed = true;
607
608 ret = vfio_attach_device(cdev->mdevid, vbasedev,
609 &address_space_memory, errp);
610 if (ret) {
611 goto out_attach_dev_err;
612 }
613
614 vfio_ccw_get_region(vcdev, &err);
615 if (err) {
616 goto out_region_err;
617 }
618
619 vfio_ccw_register_irq_notifier(vcdev, VFIO_CCW_IO_IRQ_INDEX, &err);
620 if (err) {
621 goto out_io_notifier_err;
622 }
623
624 if (vcdev->crw_region) {
625 vfio_ccw_register_irq_notifier(vcdev, VFIO_CCW_CRW_IRQ_INDEX, &err);
626 if (err) {
627 goto out_irq_notifier_err;
628 }
629 }
630
631 vfio_ccw_register_irq_notifier(vcdev, VFIO_CCW_REQ_IRQ_INDEX, &err);
632 if (err) {
633 /*
634 * Report this error, but do not make it a failing condition.
635 * Lack of this IRQ in the host does not prevent normal operation.
636 */
637 error_report_err(err);
638 }
639
640 return;
641
642 out_irq_notifier_err:
643 vfio_ccw_unregister_irq_notifier(vcdev, VFIO_CCW_REQ_IRQ_INDEX);
644 vfio_ccw_unregister_irq_notifier(vcdev, VFIO_CCW_CRW_IRQ_INDEX);
645 vfio_ccw_unregister_irq_notifier(vcdev, VFIO_CCW_IO_IRQ_INDEX);
646 out_io_notifier_err:
647 vfio_ccw_put_region(vcdev);
648 out_region_err:
649 vfio_detach_device(vbasedev);
650 out_attach_dev_err:
651 g_free(vbasedev->name);
652 if (cdc->unrealize) {
653 cdc->unrealize(cdev);
654 }
655 out_err_propagate:
656 error_propagate(errp, err);
657 }
658
vfio_ccw_unrealize(DeviceState * dev)659 static void vfio_ccw_unrealize(DeviceState *dev)
660 {
661 S390CCWDevice *cdev = S390_CCW_DEVICE(dev);
662 VFIOCCWDevice *vcdev = VFIO_CCW(cdev);
663 S390CCWDeviceClass *cdc = S390_CCW_DEVICE_GET_CLASS(cdev);
664
665 vfio_ccw_unregister_irq_notifier(vcdev, VFIO_CCW_REQ_IRQ_INDEX);
666 vfio_ccw_unregister_irq_notifier(vcdev, VFIO_CCW_CRW_IRQ_INDEX);
667 vfio_ccw_unregister_irq_notifier(vcdev, VFIO_CCW_IO_IRQ_INDEX);
668 vfio_ccw_put_region(vcdev);
669 vfio_detach_device(&vcdev->vdev);
670 g_free(vcdev->vdev.name);
671
672 if (cdc->unrealize) {
673 cdc->unrealize(cdev);
674 }
675 }
676
677 static Property vfio_ccw_properties[] = {
678 DEFINE_PROP_STRING("sysfsdev", VFIOCCWDevice, vdev.sysfsdev),
679 DEFINE_PROP_BOOL("force-orb-pfch", VFIOCCWDevice, force_orb_pfch, false),
680 DEFINE_PROP_END_OF_LIST(),
681 };
682
683 static const VMStateDescription vfio_ccw_vmstate = {
684 .name = "vfio-ccw",
685 .unmigratable = 1,
686 };
687
vfio_ccw_class_init(ObjectClass * klass,void * data)688 static void vfio_ccw_class_init(ObjectClass *klass, void *data)
689 {
690 DeviceClass *dc = DEVICE_CLASS(klass);
691 S390CCWDeviceClass *cdc = S390_CCW_DEVICE_CLASS(klass);
692
693 device_class_set_props(dc, vfio_ccw_properties);
694 dc->vmsd = &vfio_ccw_vmstate;
695 dc->desc = "VFIO-based subchannel assignment";
696 set_bit(DEVICE_CATEGORY_MISC, dc->categories);
697 dc->realize = vfio_ccw_realize;
698 dc->unrealize = vfio_ccw_unrealize;
699 dc->reset = vfio_ccw_reset;
700
701 cdc->handle_request = vfio_ccw_handle_request;
702 cdc->handle_halt = vfio_ccw_handle_halt;
703 cdc->handle_clear = vfio_ccw_handle_clear;
704 cdc->handle_store = vfio_ccw_handle_store;
705 }
706
707 static const TypeInfo vfio_ccw_info = {
708 .name = TYPE_VFIO_CCW,
709 .parent = TYPE_S390_CCW,
710 .instance_size = sizeof(VFIOCCWDevice),
711 .class_init = vfio_ccw_class_init,
712 };
713
register_vfio_ccw_type(void)714 static void register_vfio_ccw_type(void)
715 {
716 type_register_static(&vfio_ccw_info);
717 }
718
719 type_init(register_vfio_ccw_type)
720