1 /*
2 * vfio based subchannel assignment support
3 *
4 * Copyright 2017 IBM Corp.
5 * Copyright 2019 Red Hat, Inc.
6 *
7 * Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com>
8 * Xiao Feng Ren <renxiaof@linux.vnet.ibm.com>
9 * Pierre Morel <pmorel@linux.vnet.ibm.com>
10 * Cornelia Huck <cohuck@redhat.com>
11 *
12 * This work is licensed under the terms of the GNU GPL, version 2 or (at
13 * your option) any later version. See the COPYING file in the top-level
14 * directory.
15 */
16
17 #include "qemu/osdep.h"
18 #include CONFIG_DEVICES /* CONFIG_IOMMUFD */
19 #include <linux/vfio.h>
20 #include <linux/vfio_ccw.h>
21 #include <sys/ioctl.h>
22
23 #include "qapi/error.h"
24 #include "hw/vfio/vfio-common.h"
25 #include "system/iommufd.h"
26 #include "hw/s390x/s390-ccw.h"
27 #include "hw/s390x/vfio-ccw.h"
28 #include "hw/qdev-properties.h"
29 #include "hw/s390x/ccw-device.h"
30 #include "exec/address-spaces.h"
31 #include "qemu/error-report.h"
32 #include "qemu/main-loop.h"
33 #include "qemu/module.h"
34
35 struct VFIOCCWDevice {
36 S390CCWDevice cdev;
37 VFIODevice vdev;
38 uint64_t io_region_size;
39 uint64_t io_region_offset;
40 struct ccw_io_region *io_region;
41 uint64_t async_cmd_region_size;
42 uint64_t async_cmd_region_offset;
43 struct ccw_cmd_region *async_cmd_region;
44 uint64_t schib_region_size;
45 uint64_t schib_region_offset;
46 struct ccw_schib_region *schib_region;
47 uint64_t crw_region_size;
48 uint64_t crw_region_offset;
49 struct ccw_crw_region *crw_region;
50 EventNotifier io_notifier;
51 EventNotifier crw_notifier;
52 EventNotifier req_notifier;
53 bool force_orb_pfch;
54 };
55
vfio_ccw_compute_needs_reset(VFIODevice * vdev)56 static void vfio_ccw_compute_needs_reset(VFIODevice *vdev)
57 {
58 vdev->needs_reset = false;
59 }
60
61 /*
62 * We don't need vfio_hot_reset_multi and vfio_eoi operations for
63 * vfio_ccw device now.
64 */
65 struct VFIODeviceOps vfio_ccw_ops = {
66 .vfio_compute_needs_reset = vfio_ccw_compute_needs_reset,
67 };
68
vfio_ccw_handle_request(SubchDev * sch)69 static IOInstEnding vfio_ccw_handle_request(SubchDev *sch)
70 {
71 VFIOCCWDevice *vcdev = VFIO_CCW(sch->driver_data);
72 struct ccw_io_region *region = vcdev->io_region;
73 int ret;
74
75 if (!(sch->orb.ctrl0 & ORB_CTRL0_MASK_PFCH) && vcdev->force_orb_pfch) {
76 sch->orb.ctrl0 |= ORB_CTRL0_MASK_PFCH;
77 warn_report_once("vfio-ccw (devno %x.%x.%04x): PFCH flag forced",
78 sch->cssid, sch->ssid, sch->devno);
79 }
80
81 QEMU_BUILD_BUG_ON(sizeof(region->orb_area) != sizeof(ORB));
82 QEMU_BUILD_BUG_ON(sizeof(region->scsw_area) != sizeof(SCSW));
83 QEMU_BUILD_BUG_ON(sizeof(region->irb_area) != sizeof(IRB));
84
85 memset(region, 0, sizeof(*region));
86
87 memcpy(region->orb_area, &sch->orb, sizeof(ORB));
88 memcpy(region->scsw_area, &sch->curr_status.scsw, sizeof(SCSW));
89
90 again:
91 ret = pwrite(vcdev->vdev.fd, region,
92 vcdev->io_region_size, vcdev->io_region_offset);
93 if (ret != vcdev->io_region_size) {
94 if (errno == EAGAIN) {
95 goto again;
96 }
97 error_report("vfio-ccw: write I/O region failed with errno=%d", errno);
98 ret = errno ? -errno : -EFAULT;
99 } else {
100 ret = 0;
101 }
102 switch (ret) {
103 case 0:
104 return IOINST_CC_EXPECTED;
105 case -EBUSY:
106 return IOINST_CC_BUSY;
107 case -ENODEV:
108 case -EACCES:
109 return IOINST_CC_NOT_OPERATIONAL;
110 case -EFAULT:
111 default:
112 sch_gen_unit_exception(sch);
113 css_inject_io_interrupt(sch);
114 return IOINST_CC_EXPECTED;
115 }
116 }
117
vfio_ccw_handle_store(SubchDev * sch)118 static IOInstEnding vfio_ccw_handle_store(SubchDev *sch)
119 {
120 VFIOCCWDevice *vcdev = VFIO_CCW(sch->driver_data);
121 SCHIB *schib = &sch->curr_status;
122 struct ccw_schib_region *region = vcdev->schib_region;
123 SCHIB *s;
124 int ret;
125
126 /* schib region not available so nothing else to do */
127 if (!region) {
128 return IOINST_CC_EXPECTED;
129 }
130
131 memset(region, 0, sizeof(*region));
132 ret = pread(vcdev->vdev.fd, region, vcdev->schib_region_size,
133 vcdev->schib_region_offset);
134
135 if (ret == -1) {
136 /*
137 * Device is probably damaged, but store subchannel does not
138 * have a nonzero cc defined for this scenario. Log an error,
139 * and presume things are otherwise fine.
140 */
141 error_report("vfio-ccw: store region read failed with errno=%d", errno);
142 return IOINST_CC_EXPECTED;
143 }
144
145 /*
146 * Selectively copy path-related bits of the SCHIB,
147 * rather than copying the entire struct.
148 */
149 s = (SCHIB *)region->schib_area;
150 schib->pmcw.pnom = s->pmcw.pnom;
151 schib->pmcw.lpum = s->pmcw.lpum;
152 schib->pmcw.pam = s->pmcw.pam;
153 schib->pmcw.pom = s->pmcw.pom;
154
155 if (s->scsw.flags & SCSW_FLAGS_MASK_PNO) {
156 schib->scsw.flags |= SCSW_FLAGS_MASK_PNO;
157 }
158
159 return IOINST_CC_EXPECTED;
160 }
161
vfio_ccw_handle_clear(SubchDev * sch)162 static int vfio_ccw_handle_clear(SubchDev *sch)
163 {
164 VFIOCCWDevice *vcdev = VFIO_CCW(sch->driver_data);
165 struct ccw_cmd_region *region = vcdev->async_cmd_region;
166 int ret;
167
168 if (!vcdev->async_cmd_region) {
169 /* Async command region not available, fall back to emulation */
170 return -ENOSYS;
171 }
172
173 memset(region, 0, sizeof(*region));
174 region->command = VFIO_CCW_ASYNC_CMD_CSCH;
175
176 again:
177 ret = pwrite(vcdev->vdev.fd, region,
178 vcdev->async_cmd_region_size, vcdev->async_cmd_region_offset);
179 if (ret != vcdev->async_cmd_region_size) {
180 if (errno == EAGAIN) {
181 goto again;
182 }
183 error_report("vfio-ccw: write cmd region failed with errno=%d", errno);
184 ret = errno ? -errno : -EFAULT;
185 } else {
186 ret = 0;
187 }
188 switch (ret) {
189 case 0:
190 case -ENODEV:
191 case -EACCES:
192 return ret;
193 case -EFAULT:
194 default:
195 sch_gen_unit_exception(sch);
196 css_inject_io_interrupt(sch);
197 return 0;
198 }
199 }
200
vfio_ccw_handle_halt(SubchDev * sch)201 static int vfio_ccw_handle_halt(SubchDev *sch)
202 {
203 VFIOCCWDevice *vcdev = VFIO_CCW(sch->driver_data);
204 struct ccw_cmd_region *region = vcdev->async_cmd_region;
205 int ret;
206
207 if (!vcdev->async_cmd_region) {
208 /* Async command region not available, fall back to emulation */
209 return -ENOSYS;
210 }
211
212 memset(region, 0, sizeof(*region));
213 region->command = VFIO_CCW_ASYNC_CMD_HSCH;
214
215 again:
216 ret = pwrite(vcdev->vdev.fd, region,
217 vcdev->async_cmd_region_size, vcdev->async_cmd_region_offset);
218 if (ret != vcdev->async_cmd_region_size) {
219 if (errno == EAGAIN) {
220 goto again;
221 }
222 error_report("vfio-ccw: write cmd region failed with errno=%d", errno);
223 ret = errno ? -errno : -EFAULT;
224 } else {
225 ret = 0;
226 }
227 switch (ret) {
228 case 0:
229 case -EBUSY:
230 case -ENODEV:
231 case -EACCES:
232 return ret;
233 case -EFAULT:
234 default:
235 sch_gen_unit_exception(sch);
236 css_inject_io_interrupt(sch);
237 return 0;
238 }
239 }
240
vfio_ccw_reset(DeviceState * dev)241 static void vfio_ccw_reset(DeviceState *dev)
242 {
243 VFIOCCWDevice *vcdev = VFIO_CCW(dev);
244
245 ioctl(vcdev->vdev.fd, VFIO_DEVICE_RESET);
246 }
247
vfio_ccw_crw_read(VFIOCCWDevice * vcdev)248 static void vfio_ccw_crw_read(VFIOCCWDevice *vcdev)
249 {
250 struct ccw_crw_region *region = vcdev->crw_region;
251 CRW crw;
252 int size;
253
254 /* Keep reading CRWs as long as data is returned */
255 do {
256 memset(region, 0, sizeof(*region));
257 size = pread(vcdev->vdev.fd, region, vcdev->crw_region_size,
258 vcdev->crw_region_offset);
259
260 if (size == -1) {
261 error_report("vfio-ccw: Read crw region failed with errno=%d",
262 errno);
263 break;
264 }
265
266 if (region->crw == 0) {
267 /* No more CRWs to queue */
268 break;
269 }
270
271 memcpy(&crw, ®ion->crw, sizeof(CRW));
272
273 css_crw_add_to_queue(crw);
274 } while (1);
275 }
276
vfio_ccw_req_notifier_handler(void * opaque)277 static void vfio_ccw_req_notifier_handler(void *opaque)
278 {
279 VFIOCCWDevice *vcdev = opaque;
280 Error *err = NULL;
281
282 if (!event_notifier_test_and_clear(&vcdev->req_notifier)) {
283 return;
284 }
285
286 qdev_unplug(DEVICE(vcdev), &err);
287 if (err) {
288 warn_reportf_err(err, VFIO_MSG_PREFIX, vcdev->vdev.name);
289 }
290 }
291
vfio_ccw_crw_notifier_handler(void * opaque)292 static void vfio_ccw_crw_notifier_handler(void *opaque)
293 {
294 VFIOCCWDevice *vcdev = opaque;
295
296 while (event_notifier_test_and_clear(&vcdev->crw_notifier)) {
297 vfio_ccw_crw_read(vcdev);
298 }
299 }
300
vfio_ccw_io_notifier_handler(void * opaque)301 static void vfio_ccw_io_notifier_handler(void *opaque)
302 {
303 VFIOCCWDevice *vcdev = opaque;
304 struct ccw_io_region *region = vcdev->io_region;
305 CcwDevice *ccw_dev = CCW_DEVICE(vcdev);
306 SubchDev *sch = ccw_dev->sch;
307 SCHIB *schib = &sch->curr_status;
308 SCSW s;
309 IRB irb;
310 ESW esw;
311 int size;
312
313 if (!event_notifier_test_and_clear(&vcdev->io_notifier)) {
314 return;
315 }
316
317 size = pread(vcdev->vdev.fd, region, vcdev->io_region_size,
318 vcdev->io_region_offset);
319 if (size == -1) {
320 switch (errno) {
321 case ENODEV:
322 /* Generate a deferred cc 3 condition. */
323 schib->scsw.flags |= SCSW_FLAGS_MASK_CC;
324 schib->scsw.ctrl &= ~SCSW_CTRL_MASK_STCTL;
325 schib->scsw.ctrl |= (SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND);
326 goto read_err;
327 case EFAULT:
328 /* Memory problem, generate channel data check. */
329 schib->scsw.ctrl &= ~SCSW_ACTL_START_PEND;
330 schib->scsw.cstat = SCSW_CSTAT_DATA_CHECK;
331 schib->scsw.ctrl &= ~SCSW_CTRL_MASK_STCTL;
332 schib->scsw.ctrl |= SCSW_STCTL_PRIMARY | SCSW_STCTL_SECONDARY |
333 SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND;
334 goto read_err;
335 default:
336 /* Error, generate channel program check. */
337 schib->scsw.ctrl &= ~SCSW_ACTL_START_PEND;
338 schib->scsw.cstat = SCSW_CSTAT_PROG_CHECK;
339 schib->scsw.ctrl &= ~SCSW_CTRL_MASK_STCTL;
340 schib->scsw.ctrl |= SCSW_STCTL_PRIMARY | SCSW_STCTL_SECONDARY |
341 SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND;
342 goto read_err;
343 }
344 } else if (size != vcdev->io_region_size) {
345 /* Information transfer error, generate channel-control check. */
346 schib->scsw.ctrl &= ~SCSW_ACTL_START_PEND;
347 schib->scsw.cstat = SCSW_CSTAT_CHN_CTRL_CHK;
348 schib->scsw.ctrl &= ~SCSW_CTRL_MASK_STCTL;
349 schib->scsw.ctrl |= SCSW_STCTL_PRIMARY | SCSW_STCTL_SECONDARY |
350 SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND;
351 goto read_err;
352 }
353
354 memcpy(&irb, region->irb_area, sizeof(IRB));
355
356 /* Update control block via irb. */
357 s = schib->scsw;
358 copy_scsw_to_guest(&s, &irb.scsw);
359 schib->scsw = s;
360
361 copy_esw_to_guest(&esw, &irb.esw);
362 sch->esw = esw;
363
364 /* If a uint check is pending, copy sense data. */
365 if ((schib->scsw.dstat & SCSW_DSTAT_UNIT_CHECK) &&
366 (schib->pmcw.chars & PMCW_CHARS_MASK_CSENSE)) {
367 memcpy(sch->sense_data, irb.ecw, sizeof(irb.ecw));
368 }
369
370 read_err:
371 css_inject_io_interrupt(sch);
372 }
373
vfio_ccw_register_irq_notifier(VFIOCCWDevice * vcdev,unsigned int irq,Error ** errp)374 static bool vfio_ccw_register_irq_notifier(VFIOCCWDevice *vcdev,
375 unsigned int irq,
376 Error **errp)
377 {
378 VFIODevice *vdev = &vcdev->vdev;
379 g_autofree struct vfio_irq_info *irq_info = NULL;
380 size_t argsz;
381 int fd;
382 EventNotifier *notifier;
383 IOHandler *fd_read;
384
385 switch (irq) {
386 case VFIO_CCW_IO_IRQ_INDEX:
387 notifier = &vcdev->io_notifier;
388 fd_read = vfio_ccw_io_notifier_handler;
389 break;
390 case VFIO_CCW_CRW_IRQ_INDEX:
391 notifier = &vcdev->crw_notifier;
392 fd_read = vfio_ccw_crw_notifier_handler;
393 break;
394 case VFIO_CCW_REQ_IRQ_INDEX:
395 notifier = &vcdev->req_notifier;
396 fd_read = vfio_ccw_req_notifier_handler;
397 break;
398 default:
399 error_setg(errp, "vfio: Unsupported device irq(%d)", irq);
400 return false;
401 }
402
403 if (vdev->num_irqs < irq + 1) {
404 error_setg(errp, "vfio: IRQ %u not available (number of irqs %u)",
405 irq, vdev->num_irqs);
406 return false;
407 }
408
409 argsz = sizeof(*irq_info);
410 irq_info = g_malloc0(argsz);
411 irq_info->index = irq;
412 irq_info->argsz = argsz;
413 if (ioctl(vdev->fd, VFIO_DEVICE_GET_IRQ_INFO,
414 irq_info) < 0 || irq_info->count < 1) {
415 error_setg_errno(errp, errno, "vfio: Error getting irq info");
416 return false;
417 }
418
419 if (event_notifier_init(notifier, 0)) {
420 error_setg_errno(errp, errno,
421 "vfio: Unable to init event notifier for irq (%d)",
422 irq);
423 return false;
424 }
425
426 fd = event_notifier_get_fd(notifier);
427 qemu_set_fd_handler(fd, fd_read, NULL, vcdev);
428
429 if (!vfio_set_irq_signaling(vdev, irq, 0,
430 VFIO_IRQ_SET_ACTION_TRIGGER, fd, errp)) {
431 qemu_set_fd_handler(fd, NULL, NULL, vcdev);
432 event_notifier_cleanup(notifier);
433 }
434
435 return true;
436 }
437
vfio_ccw_unregister_irq_notifier(VFIOCCWDevice * vcdev,unsigned int irq)438 static void vfio_ccw_unregister_irq_notifier(VFIOCCWDevice *vcdev,
439 unsigned int irq)
440 {
441 Error *err = NULL;
442 EventNotifier *notifier;
443
444 switch (irq) {
445 case VFIO_CCW_IO_IRQ_INDEX:
446 notifier = &vcdev->io_notifier;
447 break;
448 case VFIO_CCW_CRW_IRQ_INDEX:
449 notifier = &vcdev->crw_notifier;
450 break;
451 case VFIO_CCW_REQ_IRQ_INDEX:
452 notifier = &vcdev->req_notifier;
453 break;
454 default:
455 error_report("vfio: Unsupported device irq(%d)", irq);
456 return;
457 }
458
459 if (!vfio_set_irq_signaling(&vcdev->vdev, irq, 0,
460 VFIO_IRQ_SET_ACTION_TRIGGER, -1, &err)) {
461 warn_reportf_err(err, VFIO_MSG_PREFIX, vcdev->vdev.name);
462 }
463
464 qemu_set_fd_handler(event_notifier_get_fd(notifier),
465 NULL, NULL, vcdev);
466 event_notifier_cleanup(notifier);
467 }
468
vfio_ccw_get_region(VFIOCCWDevice * vcdev,Error ** errp)469 static bool vfio_ccw_get_region(VFIOCCWDevice *vcdev, Error **errp)
470 {
471 VFIODevice *vdev = &vcdev->vdev;
472 struct vfio_region_info *info;
473 int ret;
474
475 /* Sanity check device */
476 if (!(vdev->flags & VFIO_DEVICE_FLAGS_CCW)) {
477 error_setg(errp, "vfio: Um, this isn't a vfio-ccw device");
478 return false;
479 }
480
481 /*
482 * We always expect at least the I/O region to be present. We also
483 * may have a variable number of regions governed by capabilities.
484 */
485 if (vdev->num_regions < VFIO_CCW_CONFIG_REGION_INDEX + 1) {
486 error_setg(errp, "vfio: too few regions (%u), expected at least %u",
487 vdev->num_regions, VFIO_CCW_CONFIG_REGION_INDEX + 1);
488 return false;
489 }
490
491 ret = vfio_get_region_info(vdev, VFIO_CCW_CONFIG_REGION_INDEX, &info);
492 if (ret) {
493 error_setg_errno(errp, -ret, "vfio: Error getting config info");
494 return false;
495 }
496
497 vcdev->io_region_size = info->size;
498 if (sizeof(*vcdev->io_region) != vcdev->io_region_size) {
499 error_setg(errp, "vfio: Unexpected size of the I/O region");
500 goto out_err;
501 }
502
503 vcdev->io_region_offset = info->offset;
504 vcdev->io_region = g_malloc0(info->size);
505 g_free(info);
506
507 /* check for the optional async command region */
508 ret = vfio_get_dev_region_info(vdev, VFIO_REGION_TYPE_CCW,
509 VFIO_REGION_SUBTYPE_CCW_ASYNC_CMD, &info);
510 if (!ret) {
511 vcdev->async_cmd_region_size = info->size;
512 if (sizeof(*vcdev->async_cmd_region) != vcdev->async_cmd_region_size) {
513 error_setg(errp, "vfio: Unexpected size of the async cmd region");
514 goto out_err;
515 }
516 vcdev->async_cmd_region_offset = info->offset;
517 vcdev->async_cmd_region = g_malloc0(info->size);
518 g_free(info);
519 }
520
521 ret = vfio_get_dev_region_info(vdev, VFIO_REGION_TYPE_CCW,
522 VFIO_REGION_SUBTYPE_CCW_SCHIB, &info);
523 if (!ret) {
524 vcdev->schib_region_size = info->size;
525 if (sizeof(*vcdev->schib_region) != vcdev->schib_region_size) {
526 error_setg(errp, "vfio: Unexpected size of the schib region");
527 goto out_err;
528 }
529 vcdev->schib_region_offset = info->offset;
530 vcdev->schib_region = g_malloc(info->size);
531 g_free(info);
532 }
533
534 ret = vfio_get_dev_region_info(vdev, VFIO_REGION_TYPE_CCW,
535 VFIO_REGION_SUBTYPE_CCW_CRW, &info);
536
537 if (!ret) {
538 vcdev->crw_region_size = info->size;
539 if (sizeof(*vcdev->crw_region) != vcdev->crw_region_size) {
540 error_setg(errp, "vfio: Unexpected size of the CRW region");
541 goto out_err;
542 }
543 vcdev->crw_region_offset = info->offset;
544 vcdev->crw_region = g_malloc(info->size);
545 g_free(info);
546 }
547
548 return true;
549
550 out_err:
551 g_free(vcdev->crw_region);
552 g_free(vcdev->schib_region);
553 g_free(vcdev->async_cmd_region);
554 g_free(vcdev->io_region);
555 g_free(info);
556 return false;
557 }
558
vfio_ccw_put_region(VFIOCCWDevice * vcdev)559 static void vfio_ccw_put_region(VFIOCCWDevice *vcdev)
560 {
561 g_free(vcdev->crw_region);
562 g_free(vcdev->schib_region);
563 g_free(vcdev->async_cmd_region);
564 g_free(vcdev->io_region);
565 }
566
vfio_ccw_realize(DeviceState * dev,Error ** errp)567 static void vfio_ccw_realize(DeviceState *dev, Error **errp)
568 {
569 S390CCWDevice *cdev = S390_CCW_DEVICE(dev);
570 VFIOCCWDevice *vcdev = VFIO_CCW(cdev);
571 S390CCWDeviceClass *cdc = S390_CCW_DEVICE_GET_CLASS(cdev);
572 VFIODevice *vbasedev = &vcdev->vdev;
573 Error *err = NULL;
574
575 /* Call the class init function for subchannel. */
576 if (cdc->realize) {
577 if (!cdc->realize(cdev, vcdev->vdev.sysfsdev, errp)) {
578 return;
579 }
580 }
581
582 if (!vfio_device_get_name(vbasedev, errp)) {
583 goto out_unrealize;
584 }
585
586 if (!vfio_attach_device(cdev->mdevid, vbasedev,
587 &address_space_memory, errp)) {
588 goto out_attach_dev_err;
589 }
590
591 if (!vfio_ccw_get_region(vcdev, errp)) {
592 goto out_region_err;
593 }
594
595 if (!vfio_ccw_register_irq_notifier(vcdev, VFIO_CCW_IO_IRQ_INDEX, errp)) {
596 goto out_io_notifier_err;
597 }
598
599 if (vcdev->crw_region) {
600 if (!vfio_ccw_register_irq_notifier(vcdev, VFIO_CCW_CRW_IRQ_INDEX,
601 errp)) {
602 goto out_irq_notifier_err;
603 }
604 }
605
606 if (!vfio_ccw_register_irq_notifier(vcdev, VFIO_CCW_REQ_IRQ_INDEX, &err)) {
607 /*
608 * Report this error, but do not make it a failing condition.
609 * Lack of this IRQ in the host does not prevent normal operation.
610 */
611 warn_report_err(err);
612 }
613
614 return;
615
616 out_irq_notifier_err:
617 vfio_ccw_unregister_irq_notifier(vcdev, VFIO_CCW_REQ_IRQ_INDEX);
618 vfio_ccw_unregister_irq_notifier(vcdev, VFIO_CCW_CRW_IRQ_INDEX);
619 vfio_ccw_unregister_irq_notifier(vcdev, VFIO_CCW_IO_IRQ_INDEX);
620 out_io_notifier_err:
621 vfio_ccw_put_region(vcdev);
622 out_region_err:
623 vfio_detach_device(vbasedev);
624 out_attach_dev_err:
625 g_free(vbasedev->name);
626 out_unrealize:
627 if (cdc->unrealize) {
628 cdc->unrealize(cdev);
629 }
630 }
631
vfio_ccw_unrealize(DeviceState * dev)632 static void vfio_ccw_unrealize(DeviceState *dev)
633 {
634 S390CCWDevice *cdev = S390_CCW_DEVICE(dev);
635 VFIOCCWDevice *vcdev = VFIO_CCW(cdev);
636 S390CCWDeviceClass *cdc = S390_CCW_DEVICE_GET_CLASS(cdev);
637
638 vfio_ccw_unregister_irq_notifier(vcdev, VFIO_CCW_REQ_IRQ_INDEX);
639 vfio_ccw_unregister_irq_notifier(vcdev, VFIO_CCW_CRW_IRQ_INDEX);
640 vfio_ccw_unregister_irq_notifier(vcdev, VFIO_CCW_IO_IRQ_INDEX);
641 vfio_ccw_put_region(vcdev);
642 vfio_detach_device(&vcdev->vdev);
643 g_free(vcdev->vdev.name);
644
645 if (cdc->unrealize) {
646 cdc->unrealize(cdev);
647 }
648 }
649
650 static const Property vfio_ccw_properties[] = {
651 DEFINE_PROP_STRING("sysfsdev", VFIOCCWDevice, vdev.sysfsdev),
652 DEFINE_PROP_BOOL("force-orb-pfch", VFIOCCWDevice, force_orb_pfch, false),
653 #ifdef CONFIG_IOMMUFD
654 DEFINE_PROP_LINK("iommufd", VFIOCCWDevice, vdev.iommufd,
655 TYPE_IOMMUFD_BACKEND, IOMMUFDBackend *),
656 #endif
657 DEFINE_PROP_CCW_LOADPARM("loadparm", CcwDevice, loadparm),
658 };
659
660 static const VMStateDescription vfio_ccw_vmstate = {
661 .name = "vfio-ccw",
662 .unmigratable = 1,
663 };
664
vfio_ccw_instance_init(Object * obj)665 static void vfio_ccw_instance_init(Object *obj)
666 {
667 VFIOCCWDevice *vcdev = VFIO_CCW(obj);
668 VFIODevice *vbasedev = &vcdev->vdev;
669
670 /* CCW device is mdev type device */
671 vbasedev->mdev = true;
672
673 /*
674 * All vfio-ccw devices are believed to operate in a way compatible with
675 * discarding of memory in RAM blocks, ie. pages pinned in the host are
676 * in the current working set of the guest driver and therefore never
677 * overlap e.g., with pages available to the guest balloon driver. This
678 * needs to be set before vfio_get_device() for vfio common to handle
679 * ram_block_discard_disable().
680 */
681 vfio_device_init(vbasedev, VFIO_DEVICE_TYPE_CCW, &vfio_ccw_ops,
682 DEVICE(vcdev), true);
683 }
684
685 #ifdef CONFIG_IOMMUFD
vfio_ccw_set_fd(Object * obj,const char * str,Error ** errp)686 static void vfio_ccw_set_fd(Object *obj, const char *str, Error **errp)
687 {
688 vfio_device_set_fd(&VFIO_CCW(obj)->vdev, str, errp);
689 }
690 #endif
691
vfio_ccw_class_init(ObjectClass * klass,void * data)692 static void vfio_ccw_class_init(ObjectClass *klass, void *data)
693 {
694 DeviceClass *dc = DEVICE_CLASS(klass);
695 S390CCWDeviceClass *cdc = S390_CCW_DEVICE_CLASS(klass);
696
697 device_class_set_props(dc, vfio_ccw_properties);
698 #ifdef CONFIG_IOMMUFD
699 object_class_property_add_str(klass, "fd", NULL, vfio_ccw_set_fd);
700 #endif
701 dc->vmsd = &vfio_ccw_vmstate;
702 dc->desc = "VFIO-based subchannel assignment";
703 set_bit(DEVICE_CATEGORY_MISC, dc->categories);
704 dc->realize = vfio_ccw_realize;
705 dc->unrealize = vfio_ccw_unrealize;
706 device_class_set_legacy_reset(dc, vfio_ccw_reset);
707
708 cdc->handle_request = vfio_ccw_handle_request;
709 cdc->handle_halt = vfio_ccw_handle_halt;
710 cdc->handle_clear = vfio_ccw_handle_clear;
711 cdc->handle_store = vfio_ccw_handle_store;
712
713 object_class_property_set_description(klass, /* 2.10 */
714 "sysfsdev",
715 "Host sysfs path of assigned device");
716 object_class_property_set_description(klass, /* 3.0 */
717 "force-orb-pfch",
718 "Force unlimited prefetch");
719 #ifdef CONFIG_IOMMUFD
720 object_class_property_set_description(klass, /* 9.0 */
721 "iommufd",
722 "Set host IOMMUFD backend device");
723 #endif
724 object_class_property_set_description(klass, /* 9.2 */
725 "loadparm",
726 "Define which devices that can be used for booting");
727 }
728
729 static const TypeInfo vfio_ccw_info = {
730 .name = TYPE_VFIO_CCW,
731 .parent = TYPE_S390_CCW,
732 .instance_size = sizeof(VFIOCCWDevice),
733 .instance_init = vfio_ccw_instance_init,
734 .class_init = vfio_ccw_class_init,
735 };
736
register_vfio_ccw_type(void)737 static void register_vfio_ccw_type(void)
738 {
739 type_register_static(&vfio_ccw_info);
740 }
741
742 type_init(register_vfio_ccw_type)
743