xref: /openbmc/qemu/hw/s390x/virtio-ccw.c (revision 4d87fcdd)
1 /*
2  * virtio ccw target implementation
3  *
4  * Copyright 2012,2015 IBM Corp.
5  * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
6  *            Pierre Morel <pmorel@linux.vnet.ibm.com>
7  *
8  * This work is licensed under the terms of the GNU GPL, version 2 or (at
9  * your option) any later version. See the COPYING file in the top-level
10  * directory.
11  */
12 
13 #include "qemu/osdep.h"
14 #include "qapi/error.h"
15 #include "sysemu/kvm.h"
16 #include "net/net.h"
17 #include "hw/virtio/virtio.h"
18 #include "migration/qemu-file-types.h"
19 #include "hw/virtio/virtio-net.h"
20 #include "qemu/bitops.h"
21 #include "qemu/error-report.h"
22 #include "qemu/module.h"
23 #include "hw/virtio/virtio-access.h"
24 #include "hw/virtio/virtio-bus.h"
25 #include "hw/s390x/adapter.h"
26 #include "hw/s390x/s390_flic.h"
27 
28 #include "hw/s390x/ioinst.h"
29 #include "hw/s390x/css.h"
30 #include "virtio-ccw.h"
31 #include "trace.h"
32 #include "hw/s390x/css-bridge.h"
33 #include "hw/s390x/s390-virtio-ccw.h"
34 
35 #define NR_CLASSIC_INDICATOR_BITS 64
36 
37 bool have_virtio_ccw = true;
38 
39 static int virtio_ccw_dev_post_load(void *opaque, int version_id)
40 {
41     VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(opaque);
42     CcwDevice *ccw_dev = CCW_DEVICE(dev);
43     CCWDeviceClass *ck = CCW_DEVICE_GET_CLASS(ccw_dev);
44 
45     ccw_dev->sch->driver_data = dev;
46     if (ccw_dev->sch->thinint_active) {
47         dev->routes.adapter.adapter_id = css_get_adapter_id(
48                                          CSS_IO_ADAPTER_VIRTIO,
49                                          dev->thinint_isc);
50     }
51     /* Re-fill subch_id after loading the subchannel states.*/
52     if (ck->refill_ids) {
53         ck->refill_ids(ccw_dev);
54     }
55     return 0;
56 }
57 
58 typedef struct VirtioCcwDeviceTmp {
59     VirtioCcwDevice *parent;
60     uint16_t config_vector;
61 } VirtioCcwDeviceTmp;
62 
63 static int virtio_ccw_dev_tmp_pre_save(void *opaque)
64 {
65     VirtioCcwDeviceTmp *tmp = opaque;
66     VirtioCcwDevice *dev = tmp->parent;
67     VirtIODevice *vdev = virtio_bus_get_device(&dev->bus);
68 
69     tmp->config_vector = vdev->config_vector;
70 
71     return 0;
72 }
73 
74 static int virtio_ccw_dev_tmp_post_load(void *opaque, int version_id)
75 {
76     VirtioCcwDeviceTmp *tmp = opaque;
77     VirtioCcwDevice *dev = tmp->parent;
78     VirtIODevice *vdev = virtio_bus_get_device(&dev->bus);
79 
80     vdev->config_vector = tmp->config_vector;
81     return 0;
82 }
83 
84 const VMStateDescription vmstate_virtio_ccw_dev_tmp = {
85     .name = "s390_virtio_ccw_dev_tmp",
86     .pre_save = virtio_ccw_dev_tmp_pre_save,
87     .post_load = virtio_ccw_dev_tmp_post_load,
88     .fields = (VMStateField[]) {
89         VMSTATE_UINT16(config_vector, VirtioCcwDeviceTmp),
90         VMSTATE_END_OF_LIST()
91     }
92 };
93 
94 const VMStateDescription vmstate_virtio_ccw_dev = {
95     .name = "s390_virtio_ccw_dev",
96     .version_id = 1,
97     .minimum_version_id = 1,
98     .post_load = virtio_ccw_dev_post_load,
99     .fields = (VMStateField[]) {
100         VMSTATE_CCW_DEVICE(parent_obj, VirtioCcwDevice),
101         VMSTATE_PTR_TO_IND_ADDR(indicators, VirtioCcwDevice),
102         VMSTATE_PTR_TO_IND_ADDR(indicators2, VirtioCcwDevice),
103         VMSTATE_PTR_TO_IND_ADDR(summary_indicator, VirtioCcwDevice),
104         /*
105          * Ugly hack because VirtIODevice does not migrate itself.
106          * This also makes legacy via vmstate_save_state possible.
107          */
108         VMSTATE_WITH_TMP(VirtioCcwDevice, VirtioCcwDeviceTmp,
109                          vmstate_virtio_ccw_dev_tmp),
110         VMSTATE_STRUCT(routes, VirtioCcwDevice, 1, vmstate_adapter_routes,
111                        AdapterRoutes),
112         VMSTATE_UINT8(thinint_isc, VirtioCcwDevice),
113         VMSTATE_INT32(revision, VirtioCcwDevice),
114         VMSTATE_END_OF_LIST()
115     }
116 };
117 
118 static void virtio_ccw_bus_new(VirtioBusState *bus, size_t bus_size,
119                                VirtioCcwDevice *dev);
120 
121 VirtIODevice *virtio_ccw_get_vdev(SubchDev *sch)
122 {
123     VirtIODevice *vdev = NULL;
124     VirtioCcwDevice *dev = sch->driver_data;
125 
126     if (dev) {
127         vdev = virtio_bus_get_device(&dev->bus);
128     }
129     return vdev;
130 }
131 
132 static void virtio_ccw_start_ioeventfd(VirtioCcwDevice *dev)
133 {
134     virtio_bus_start_ioeventfd(&dev->bus);
135 }
136 
137 static void virtio_ccw_stop_ioeventfd(VirtioCcwDevice *dev)
138 {
139     virtio_bus_stop_ioeventfd(&dev->bus);
140 }
141 
142 static bool virtio_ccw_ioeventfd_enabled(DeviceState *d)
143 {
144     VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
145 
146     return (dev->flags & VIRTIO_CCW_FLAG_USE_IOEVENTFD) != 0;
147 }
148 
149 static int virtio_ccw_ioeventfd_assign(DeviceState *d, EventNotifier *notifier,
150                                        int n, bool assign)
151 {
152     VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
153     CcwDevice *ccw_dev = CCW_DEVICE(dev);
154     SubchDev *sch = ccw_dev->sch;
155     uint32_t sch_id = (css_build_subchannel_id(sch) << 16) | sch->schid;
156 
157     return s390_assign_subch_ioeventfd(notifier, sch_id, n, assign);
158 }
159 
160 /* Communication blocks used by several channel commands. */
161 typedef struct VqInfoBlockLegacy {
162     uint64_t queue;
163     uint32_t align;
164     uint16_t index;
165     uint16_t num;
166 } QEMU_PACKED VqInfoBlockLegacy;
167 
168 typedef struct VqInfoBlock {
169     uint64_t desc;
170     uint32_t res0;
171     uint16_t index;
172     uint16_t num;
173     uint64_t avail;
174     uint64_t used;
175 } QEMU_PACKED VqInfoBlock;
176 
177 typedef struct VqConfigBlock {
178     uint16_t index;
179     uint16_t num_max;
180 } QEMU_PACKED VqConfigBlock;
181 
182 typedef struct VirtioFeatDesc {
183     uint32_t features;
184     uint8_t index;
185 } QEMU_PACKED VirtioFeatDesc;
186 
187 typedef struct VirtioThinintInfo {
188     hwaddr summary_indicator;
189     hwaddr device_indicator;
190     uint64_t ind_bit;
191     uint8_t isc;
192 } QEMU_PACKED VirtioThinintInfo;
193 
194 typedef struct VirtioRevInfo {
195     uint16_t revision;
196     uint16_t length;
197     uint8_t data[];
198 } QEMU_PACKED VirtioRevInfo;
199 
200 /* Specify where the virtqueues for the subchannel are in guest memory. */
201 static int virtio_ccw_set_vqs(SubchDev *sch, VqInfoBlock *info,
202                               VqInfoBlockLegacy *linfo)
203 {
204     VirtIODevice *vdev = virtio_ccw_get_vdev(sch);
205     uint16_t index = info ? info->index : linfo->index;
206     uint16_t num = info ? info->num : linfo->num;
207     uint64_t desc = info ? info->desc : linfo->queue;
208 
209     if (index >= VIRTIO_QUEUE_MAX) {
210         return -EINVAL;
211     }
212 
213     /* Current code in virtio.c relies on 4K alignment. */
214     if (linfo && desc && (linfo->align != 4096)) {
215         return -EINVAL;
216     }
217 
218     if (!vdev) {
219         return -EINVAL;
220     }
221 
222     if (info) {
223         virtio_queue_set_rings(vdev, index, desc, info->avail, info->used);
224     } else {
225         virtio_queue_set_addr(vdev, index, desc);
226     }
227     if (!desc) {
228         virtio_queue_set_vector(vdev, index, VIRTIO_NO_VECTOR);
229     } else {
230         if (info) {
231             /* virtio-1 allows changing the ring size. */
232             if (virtio_queue_get_max_num(vdev, index) < num) {
233                 /* Fail if we exceed the maximum number. */
234                 return -EINVAL;
235             }
236             virtio_queue_set_num(vdev, index, num);
237         } else if (virtio_queue_get_num(vdev, index) > num) {
238             /* Fail if we don't have a big enough queue. */
239             return -EINVAL;
240         }
241         /* We ignore possible increased num for legacy for compatibility. */
242         virtio_queue_set_vector(vdev, index, index);
243     }
244     /* tell notify handler in case of config change */
245     vdev->config_vector = VIRTIO_QUEUE_MAX;
246     return 0;
247 }
248 
249 static void virtio_ccw_reset_virtio(VirtioCcwDevice *dev, VirtIODevice *vdev)
250 {
251     CcwDevice *ccw_dev = CCW_DEVICE(dev);
252 
253     virtio_ccw_stop_ioeventfd(dev);
254     virtio_reset(vdev);
255     if (dev->indicators) {
256         release_indicator(&dev->routes.adapter, dev->indicators);
257         dev->indicators = NULL;
258     }
259     if (dev->indicators2) {
260         release_indicator(&dev->routes.adapter, dev->indicators2);
261         dev->indicators2 = NULL;
262     }
263     if (dev->summary_indicator) {
264         release_indicator(&dev->routes.adapter, dev->summary_indicator);
265         dev->summary_indicator = NULL;
266     }
267     ccw_dev->sch->thinint_active = false;
268 }
269 
270 static int virtio_ccw_handle_set_vq(SubchDev *sch, CCW1 ccw, bool check_len,
271                                     bool is_legacy)
272 {
273     int ret;
274     VqInfoBlock info;
275     VqInfoBlockLegacy linfo;
276     size_t info_len = is_legacy ? sizeof(linfo) : sizeof(info);
277 
278     if (check_len) {
279         if (ccw.count != info_len) {
280             return -EINVAL;
281         }
282     } else if (ccw.count < info_len) {
283         /* Can't execute command. */
284         return -EINVAL;
285     }
286     if (!ccw.cda) {
287         return -EFAULT;
288     }
289     if (is_legacy) {
290         ret = ccw_dstream_read(&sch->cds, linfo);
291         if (ret) {
292             return ret;
293         }
294         linfo.queue = be64_to_cpu(linfo.queue);
295         linfo.align = be32_to_cpu(linfo.align);
296         linfo.index = be16_to_cpu(linfo.index);
297         linfo.num = be16_to_cpu(linfo.num);
298         ret = virtio_ccw_set_vqs(sch, NULL, &linfo);
299     } else {
300         ret = ccw_dstream_read(&sch->cds, info);
301         if (ret) {
302             return ret;
303         }
304         info.desc = be64_to_cpu(info.desc);
305         info.index = be16_to_cpu(info.index);
306         info.num = be16_to_cpu(info.num);
307         info.avail = be64_to_cpu(info.avail);
308         info.used = be64_to_cpu(info.used);
309         ret = virtio_ccw_set_vqs(sch, &info, NULL);
310     }
311     sch->curr_status.scsw.count = 0;
312     return ret;
313 }
314 
315 static int virtio_ccw_cb(SubchDev *sch, CCW1 ccw)
316 {
317     int ret;
318     VirtioRevInfo revinfo;
319     uint8_t status;
320     VirtioFeatDesc features;
321     hwaddr indicators;
322     VqConfigBlock vq_config;
323     VirtioCcwDevice *dev = sch->driver_data;
324     VirtIODevice *vdev = virtio_ccw_get_vdev(sch);
325     bool check_len;
326     int len;
327     VirtioThinintInfo thinint;
328 
329     if (!dev) {
330         return -EINVAL;
331     }
332 
333     trace_virtio_ccw_interpret_ccw(sch->cssid, sch->ssid, sch->schid,
334                                    ccw.cmd_code);
335     check_len = !((ccw.flags & CCW_FLAG_SLI) && !(ccw.flags & CCW_FLAG_DC));
336 
337     if (dev->revision < 0 && ccw.cmd_code != CCW_CMD_SET_VIRTIO_REV) {
338         if (dev->force_revision_1) {
339             /*
340              * virtio-1 drivers must start with negotiating to a revision >= 1,
341              * so post a command reject for all other commands
342              */
343             return -ENOSYS;
344         } else {
345             /*
346              * If the driver issues any command that is not SET_VIRTIO_REV,
347              * we'll have to operate the device in legacy mode.
348              */
349             dev->revision = 0;
350         }
351     }
352 
353     /* Look at the command. */
354     switch (ccw.cmd_code) {
355     case CCW_CMD_SET_VQ:
356         ret = virtio_ccw_handle_set_vq(sch, ccw, check_len, dev->revision < 1);
357         break;
358     case CCW_CMD_VDEV_RESET:
359         virtio_ccw_reset_virtio(dev, vdev);
360         ret = 0;
361         break;
362     case CCW_CMD_READ_FEAT:
363         if (check_len) {
364             if (ccw.count != sizeof(features)) {
365                 ret = -EINVAL;
366                 break;
367             }
368         } else if (ccw.count < sizeof(features)) {
369             /* Can't execute command. */
370             ret = -EINVAL;
371             break;
372         }
373         if (!ccw.cda) {
374             ret = -EFAULT;
375         } else {
376             VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
377 
378             ccw_dstream_advance(&sch->cds, sizeof(features.features));
379             ret = ccw_dstream_read(&sch->cds, features.index);
380             if (ret) {
381                 break;
382             }
383             if (features.index == 0) {
384                 if (dev->revision >= 1) {
385                     /* Don't offer legacy features for modern devices. */
386                     features.features = (uint32_t)
387                         (vdev->host_features & ~vdc->legacy_features);
388                 } else {
389                     features.features = (uint32_t)vdev->host_features;
390                 }
391             } else if ((features.index == 1) && (dev->revision >= 1)) {
392                 /*
393                  * Only offer feature bits beyond 31 if the guest has
394                  * negotiated at least revision 1.
395                  */
396                 features.features = (uint32_t)(vdev->host_features >> 32);
397             } else {
398                 /* Return zeroes if the guest supports more feature bits. */
399                 features.features = 0;
400             }
401             ccw_dstream_rewind(&sch->cds);
402             features.features = cpu_to_le32(features.features);
403             ret = ccw_dstream_write(&sch->cds, features.features);
404             if (!ret) {
405                 sch->curr_status.scsw.count = ccw.count - sizeof(features);
406             }
407         }
408         break;
409     case CCW_CMD_WRITE_FEAT:
410         if (check_len) {
411             if (ccw.count != sizeof(features)) {
412                 ret = -EINVAL;
413                 break;
414             }
415         } else if (ccw.count < sizeof(features)) {
416             /* Can't execute command. */
417             ret = -EINVAL;
418             break;
419         }
420         if (!ccw.cda) {
421             ret = -EFAULT;
422         } else {
423             ret = ccw_dstream_read(&sch->cds, features);
424             if (ret) {
425                 break;
426             }
427             features.features = le32_to_cpu(features.features);
428             if (features.index == 0) {
429                 virtio_set_features(vdev,
430                                     (vdev->guest_features & 0xffffffff00000000ULL) |
431                                     features.features);
432             } else if ((features.index == 1) && (dev->revision >= 1)) {
433                 /*
434                  * If the guest did not negotiate at least revision 1,
435                  * we did not offer it any feature bits beyond 31. Such a
436                  * guest passing us any bit here is therefore buggy.
437                  */
438                 virtio_set_features(vdev,
439                                     (vdev->guest_features & 0x00000000ffffffffULL) |
440                                     ((uint64_t)features.features << 32));
441             } else {
442                 /*
443                  * If the guest supports more feature bits, assert that it
444                  * passes us zeroes for those we don't support.
445                  */
446                 if (features.features) {
447                     qemu_log_mask(LOG_GUEST_ERROR,
448                                   "Guest bug: features[%i]=%x (expected 0)",
449                                   features.index, features.features);
450                     /* XXX: do a unit check here? */
451                 }
452             }
453             sch->curr_status.scsw.count = ccw.count - sizeof(features);
454             ret = 0;
455         }
456         break;
457     case CCW_CMD_READ_CONF:
458         if (check_len) {
459             if (ccw.count > vdev->config_len) {
460                 ret = -EINVAL;
461                 break;
462             }
463         }
464         len = MIN(ccw.count, vdev->config_len);
465         if (!ccw.cda) {
466             ret = -EFAULT;
467         } else {
468             virtio_bus_get_vdev_config(&dev->bus, vdev->config);
469             ret = ccw_dstream_write_buf(&sch->cds, vdev->config, len);
470             if (ret) {
471                 sch->curr_status.scsw.count = ccw.count - len;
472             }
473         }
474         break;
475     case CCW_CMD_WRITE_CONF:
476         if (check_len) {
477             if (ccw.count > vdev->config_len) {
478                 ret = -EINVAL;
479                 break;
480             }
481         }
482         len = MIN(ccw.count, vdev->config_len);
483         if (!ccw.cda) {
484             ret = -EFAULT;
485         } else {
486             ret = ccw_dstream_read_buf(&sch->cds, vdev->config, len);
487             if (!ret) {
488                 virtio_bus_set_vdev_config(&dev->bus, vdev->config);
489                 sch->curr_status.scsw.count = ccw.count - len;
490             }
491         }
492         break;
493     case CCW_CMD_READ_STATUS:
494         if (check_len) {
495             if (ccw.count != sizeof(status)) {
496                 ret = -EINVAL;
497                 break;
498             }
499         } else if (ccw.count < sizeof(status)) {
500             /* Can't execute command. */
501             ret = -EINVAL;
502             break;
503         }
504         if (!ccw.cda) {
505             ret = -EFAULT;
506         } else {
507             address_space_stb(&address_space_memory, ccw.cda, vdev->status,
508                                         MEMTXATTRS_UNSPECIFIED, NULL);
509             sch->curr_status.scsw.count = ccw.count - sizeof(vdev->status);
510             ret = 0;
511         }
512         break;
513     case CCW_CMD_WRITE_STATUS:
514         if (check_len) {
515             if (ccw.count != sizeof(status)) {
516                 ret = -EINVAL;
517                 break;
518             }
519         } else if (ccw.count < sizeof(status)) {
520             /* Can't execute command. */
521             ret = -EINVAL;
522             break;
523         }
524         if (!ccw.cda) {
525             ret = -EFAULT;
526         } else {
527             ret = ccw_dstream_read(&sch->cds, status);
528             if (ret) {
529                 break;
530             }
531             if (!(status & VIRTIO_CONFIG_S_DRIVER_OK)) {
532                 virtio_ccw_stop_ioeventfd(dev);
533             }
534             if (virtio_set_status(vdev, status) == 0) {
535                 if (vdev->status == 0) {
536                     virtio_ccw_reset_virtio(dev, vdev);
537                 }
538                 if (status & VIRTIO_CONFIG_S_DRIVER_OK) {
539                     virtio_ccw_start_ioeventfd(dev);
540                 }
541                 sch->curr_status.scsw.count = ccw.count - sizeof(status);
542                 ret = 0;
543             } else {
544                 /* Trigger a command reject. */
545                 ret = -ENOSYS;
546             }
547         }
548         break;
549     case CCW_CMD_SET_IND:
550         if (check_len) {
551             if (ccw.count != sizeof(indicators)) {
552                 ret = -EINVAL;
553                 break;
554             }
555         } else if (ccw.count < sizeof(indicators)) {
556             /* Can't execute command. */
557             ret = -EINVAL;
558             break;
559         }
560         if (sch->thinint_active) {
561             /* Trigger a command reject. */
562             ret = -ENOSYS;
563             break;
564         }
565         if (virtio_get_num_queues(vdev) > NR_CLASSIC_INDICATOR_BITS) {
566             /* More queues than indicator bits --> trigger a reject */
567             ret = -ENOSYS;
568             break;
569         }
570         if (!ccw.cda) {
571             ret = -EFAULT;
572         } else {
573             ret = ccw_dstream_read(&sch->cds, indicators);
574             if (ret) {
575                 break;
576             }
577             indicators = be64_to_cpu(indicators);
578             dev->indicators = get_indicator(indicators, sizeof(uint64_t));
579             sch->curr_status.scsw.count = ccw.count - sizeof(indicators);
580             ret = 0;
581         }
582         break;
583     case CCW_CMD_SET_CONF_IND:
584         if (check_len) {
585             if (ccw.count != sizeof(indicators)) {
586                 ret = -EINVAL;
587                 break;
588             }
589         } else if (ccw.count < sizeof(indicators)) {
590             /* Can't execute command. */
591             ret = -EINVAL;
592             break;
593         }
594         if (!ccw.cda) {
595             ret = -EFAULT;
596         } else {
597             ret = ccw_dstream_read(&sch->cds, indicators);
598             if (ret) {
599                 break;
600             }
601             indicators = be64_to_cpu(indicators);
602             dev->indicators2 = get_indicator(indicators, sizeof(uint64_t));
603             sch->curr_status.scsw.count = ccw.count - sizeof(indicators);
604             ret = 0;
605         }
606         break;
607     case CCW_CMD_READ_VQ_CONF:
608         if (check_len) {
609             if (ccw.count != sizeof(vq_config)) {
610                 ret = -EINVAL;
611                 break;
612             }
613         } else if (ccw.count < sizeof(vq_config)) {
614             /* Can't execute command. */
615             ret = -EINVAL;
616             break;
617         }
618         if (!ccw.cda) {
619             ret = -EFAULT;
620         } else {
621             ret = ccw_dstream_read(&sch->cds, vq_config.index);
622             if (ret) {
623                 break;
624             }
625             vq_config.index = be16_to_cpu(vq_config.index);
626             if (vq_config.index >= VIRTIO_QUEUE_MAX) {
627                 ret = -EINVAL;
628                 break;
629             }
630             vq_config.num_max = virtio_queue_get_num(vdev,
631                                                      vq_config.index);
632             vq_config.num_max = cpu_to_be16(vq_config.num_max);
633             ret = ccw_dstream_write(&sch->cds, vq_config.num_max);
634             if (!ret) {
635                 sch->curr_status.scsw.count = ccw.count - sizeof(vq_config);
636             }
637         }
638         break;
639     case CCW_CMD_SET_IND_ADAPTER:
640         if (check_len) {
641             if (ccw.count != sizeof(thinint)) {
642                 ret = -EINVAL;
643                 break;
644             }
645         } else if (ccw.count < sizeof(thinint)) {
646             /* Can't execute command. */
647             ret = -EINVAL;
648             break;
649         }
650         if (!ccw.cda) {
651             ret = -EFAULT;
652         } else if (dev->indicators && !sch->thinint_active) {
653             /* Trigger a command reject. */
654             ret = -ENOSYS;
655         } else {
656             if (ccw_dstream_read(&sch->cds, thinint)) {
657                 ret = -EFAULT;
658             } else {
659                 thinint.ind_bit = be64_to_cpu(thinint.ind_bit);
660                 thinint.summary_indicator =
661                     be64_to_cpu(thinint.summary_indicator);
662                 thinint.device_indicator =
663                     be64_to_cpu(thinint.device_indicator);
664 
665                 dev->summary_indicator =
666                     get_indicator(thinint.summary_indicator, sizeof(uint8_t));
667                 dev->indicators =
668                     get_indicator(thinint.device_indicator,
669                                   thinint.ind_bit / 8 + 1);
670                 dev->thinint_isc = thinint.isc;
671                 dev->routes.adapter.ind_offset = thinint.ind_bit;
672                 dev->routes.adapter.summary_offset = 7;
673                 dev->routes.adapter.adapter_id = css_get_adapter_id(
674                                                  CSS_IO_ADAPTER_VIRTIO,
675                                                  dev->thinint_isc);
676                 sch->thinint_active = ((dev->indicators != NULL) &&
677                                        (dev->summary_indicator != NULL));
678                 sch->curr_status.scsw.count = ccw.count - sizeof(thinint);
679                 ret = 0;
680             }
681         }
682         break;
683     case CCW_CMD_SET_VIRTIO_REV:
684         len = sizeof(revinfo);
685         if (ccw.count < len) {
686             ret = -EINVAL;
687             break;
688         }
689         if (!ccw.cda) {
690             ret = -EFAULT;
691             break;
692         }
693         ret = ccw_dstream_read_buf(&sch->cds, &revinfo, 4);
694         if (ret < 0) {
695             break;
696         }
697         revinfo.revision = be16_to_cpu(revinfo.revision);
698         revinfo.length = be16_to_cpu(revinfo.length);
699         if (ccw.count < len + revinfo.length ||
700             (check_len && ccw.count > len + revinfo.length)) {
701             ret = -EINVAL;
702             break;
703         }
704         /*
705          * Once we start to support revisions with additional data, we'll
706          * need to fetch it here. Nothing to do for now, though.
707          */
708         if (dev->revision >= 0 ||
709             revinfo.revision > virtio_ccw_rev_max(dev) ||
710             (dev->force_revision_1 && !revinfo.revision)) {
711             ret = -ENOSYS;
712             break;
713         }
714         ret = 0;
715         dev->revision = revinfo.revision;
716         break;
717     default:
718         ret = -ENOSYS;
719         break;
720     }
721     return ret;
722 }
723 
724 static void virtio_sch_disable_cb(SubchDev *sch)
725 {
726     VirtioCcwDevice *dev = sch->driver_data;
727 
728     dev->revision = -1;
729 }
730 
731 static void virtio_ccw_device_realize(VirtioCcwDevice *dev, Error **errp)
732 {
733     VirtIOCCWDeviceClass *k = VIRTIO_CCW_DEVICE_GET_CLASS(dev);
734     CcwDevice *ccw_dev = CCW_DEVICE(dev);
735     CCWDeviceClass *ck = CCW_DEVICE_GET_CLASS(ccw_dev);
736     SubchDev *sch;
737     Error *err = NULL;
738     int i;
739 
740     sch = css_create_sch(ccw_dev->devno, errp);
741     if (!sch) {
742         return;
743     }
744     if (!virtio_ccw_rev_max(dev) && dev->force_revision_1) {
745         error_setg(&err, "Invalid value of property max_rev "
746                    "(is %d expected >= 1)", virtio_ccw_rev_max(dev));
747         goto out_err;
748     }
749 
750     sch->driver_data = dev;
751     sch->ccw_cb = virtio_ccw_cb;
752     sch->disable_cb = virtio_sch_disable_cb;
753     sch->id.reserved = 0xff;
754     sch->id.cu_type = VIRTIO_CCW_CU_TYPE;
755     sch->do_subchannel_work = do_subchannel_work_virtual;
756     ccw_dev->sch = sch;
757     dev->indicators = NULL;
758     dev->revision = -1;
759     for (i = 0; i < ADAPTER_ROUTES_MAX_GSI; i++) {
760         dev->routes.gsi[i] = -1;
761     }
762     css_sch_build_virtual_schib(sch, 0, VIRTIO_CCW_CHPID_TYPE);
763 
764     trace_virtio_ccw_new_device(
765         sch->cssid, sch->ssid, sch->schid, sch->devno,
766         ccw_dev->devno.valid ? "user-configured" : "auto-configured");
767 
768     if (kvm_enabled() && !kvm_eventfds_enabled()) {
769         dev->flags &= ~VIRTIO_CCW_FLAG_USE_IOEVENTFD;
770     }
771 
772     if (k->realize) {
773         k->realize(dev, &err);
774         if (err) {
775             goto out_err;
776         }
777     }
778 
779     ck->realize(ccw_dev, &err);
780     if (err) {
781         goto out_err;
782     }
783 
784     return;
785 
786 out_err:
787     error_propagate(errp, err);
788     css_subch_assign(sch->cssid, sch->ssid, sch->schid, sch->devno, NULL);
789     ccw_dev->sch = NULL;
790     g_free(sch);
791 }
792 
793 static void virtio_ccw_device_unrealize(VirtioCcwDevice *dev)
794 {
795     VirtIOCCWDeviceClass *dc = VIRTIO_CCW_DEVICE_GET_CLASS(dev);
796     CcwDevice *ccw_dev = CCW_DEVICE(dev);
797     SubchDev *sch = ccw_dev->sch;
798 
799     if (dc->unrealize) {
800         dc->unrealize(dev);
801     }
802 
803     if (sch) {
804         css_subch_assign(sch->cssid, sch->ssid, sch->schid, sch->devno, NULL);
805         g_free(sch);
806         ccw_dev->sch = NULL;
807     }
808     if (dev->indicators) {
809         release_indicator(&dev->routes.adapter, dev->indicators);
810         dev->indicators = NULL;
811     }
812 }
813 
814 /* DeviceState to VirtioCcwDevice. Note: used on datapath,
815  * be careful and test performance if you change this.
816  */
817 static inline VirtioCcwDevice *to_virtio_ccw_dev_fast(DeviceState *d)
818 {
819     CcwDevice *ccw_dev = to_ccw_dev_fast(d);
820 
821     return container_of(ccw_dev, VirtioCcwDevice, parent_obj);
822 }
823 
824 static uint8_t virtio_set_ind_atomic(SubchDev *sch, uint64_t ind_loc,
825                                      uint8_t to_be_set)
826 {
827     uint8_t expected, actual;
828     hwaddr len = 1;
829     /* avoid  multiple fetches */
830     uint8_t volatile *ind_addr;
831 
832     ind_addr = cpu_physical_memory_map(ind_loc, &len, true);
833     if (!ind_addr) {
834         error_report("%s(%x.%x.%04x): unable to access indicator",
835                      __func__, sch->cssid, sch->ssid, sch->schid);
836         return -1;
837     }
838     actual = *ind_addr;
839     do {
840         expected = actual;
841         actual = qatomic_cmpxchg(ind_addr, expected, expected | to_be_set);
842     } while (actual != expected);
843     trace_virtio_ccw_set_ind(ind_loc, actual, actual | to_be_set);
844     cpu_physical_memory_unmap((void *)ind_addr, len, 1, len);
845 
846     return actual;
847 }
848 
849 static void virtio_ccw_notify(DeviceState *d, uint16_t vector)
850 {
851     VirtioCcwDevice *dev = to_virtio_ccw_dev_fast(d);
852     CcwDevice *ccw_dev = to_ccw_dev_fast(d);
853     SubchDev *sch = ccw_dev->sch;
854     uint64_t indicators;
855 
856     if (vector == VIRTIO_NO_VECTOR) {
857         return;
858     }
859     /*
860      * vector < VIRTIO_QUEUE_MAX: notification for a virtqueue
861      * vector == VIRTIO_QUEUE_MAX: configuration change notification
862      * bits beyond that are unused and should never be notified for
863      */
864     assert(vector <= VIRTIO_QUEUE_MAX);
865 
866     if (vector < VIRTIO_QUEUE_MAX) {
867         if (!dev->indicators) {
868             return;
869         }
870         if (sch->thinint_active) {
871             /*
872              * In the adapter interrupt case, indicators points to a
873              * memory area that may be (way) larger than 64 bit and
874              * ind_bit indicates the start of the indicators in a big
875              * endian notation.
876              */
877             uint64_t ind_bit = dev->routes.adapter.ind_offset;
878 
879             virtio_set_ind_atomic(sch, dev->indicators->addr +
880                                   (ind_bit + vector) / 8,
881                                   0x80 >> ((ind_bit + vector) % 8));
882             if (!virtio_set_ind_atomic(sch, dev->summary_indicator->addr,
883                                        0x01)) {
884                 css_adapter_interrupt(CSS_IO_ADAPTER_VIRTIO, dev->thinint_isc);
885             }
886         } else {
887             assert(vector < NR_CLASSIC_INDICATOR_BITS);
888             indicators = address_space_ldq(&address_space_memory,
889                                            dev->indicators->addr,
890                                            MEMTXATTRS_UNSPECIFIED,
891                                            NULL);
892             indicators |= 1ULL << vector;
893             address_space_stq(&address_space_memory, dev->indicators->addr,
894                               indicators, MEMTXATTRS_UNSPECIFIED, NULL);
895             css_conditional_io_interrupt(sch);
896         }
897     } else {
898         if (!dev->indicators2) {
899             return;
900         }
901         indicators = address_space_ldq(&address_space_memory,
902                                        dev->indicators2->addr,
903                                        MEMTXATTRS_UNSPECIFIED,
904                                        NULL);
905         indicators |= 1ULL;
906         address_space_stq(&address_space_memory, dev->indicators2->addr,
907                           indicators, MEMTXATTRS_UNSPECIFIED, NULL);
908         css_conditional_io_interrupt(sch);
909     }
910 }
911 
912 static void virtio_ccw_reset(DeviceState *d)
913 {
914     VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
915     VirtIODevice *vdev = virtio_bus_get_device(&dev->bus);
916     VirtIOCCWDeviceClass *vdc = VIRTIO_CCW_DEVICE_GET_CLASS(dev);
917 
918     virtio_ccw_reset_virtio(dev, vdev);
919     if (vdc->parent_reset) {
920         vdc->parent_reset(d);
921     }
922 }
923 
924 static void virtio_ccw_vmstate_change(DeviceState *d, bool running)
925 {
926     VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
927 
928     if (running) {
929         virtio_ccw_start_ioeventfd(dev);
930     } else {
931         virtio_ccw_stop_ioeventfd(dev);
932     }
933 }
934 
935 static bool virtio_ccw_query_guest_notifiers(DeviceState *d)
936 {
937     CcwDevice *dev = CCW_DEVICE(d);
938 
939     return !!(dev->sch->curr_status.pmcw.flags & PMCW_FLAGS_MASK_ENA);
940 }
941 
942 static int virtio_ccw_get_mappings(VirtioCcwDevice *dev)
943 {
944     int r;
945     CcwDevice *ccw_dev = CCW_DEVICE(dev);
946 
947     if (!ccw_dev->sch->thinint_active) {
948         return -EINVAL;
949     }
950 
951     r = map_indicator(&dev->routes.adapter, dev->summary_indicator);
952     if (r) {
953         return r;
954     }
955     r = map_indicator(&dev->routes.adapter, dev->indicators);
956     if (r) {
957         return r;
958     }
959     dev->routes.adapter.summary_addr = dev->summary_indicator->map;
960     dev->routes.adapter.ind_addr = dev->indicators->map;
961 
962     return 0;
963 }
964 
965 static int virtio_ccw_setup_irqroutes(VirtioCcwDevice *dev, int nvqs)
966 {
967     int i;
968     VirtIODevice *vdev = virtio_bus_get_device(&dev->bus);
969     int ret;
970     S390FLICState *fs = s390_get_flic();
971     S390FLICStateClass *fsc = s390_get_flic_class(fs);
972 
973     ret = virtio_ccw_get_mappings(dev);
974     if (ret) {
975         return ret;
976     }
977     for (i = 0; i < nvqs; i++) {
978         if (!virtio_queue_get_num(vdev, i)) {
979             break;
980         }
981     }
982     dev->routes.num_routes = i;
983     return fsc->add_adapter_routes(fs, &dev->routes);
984 }
985 
986 static void virtio_ccw_release_irqroutes(VirtioCcwDevice *dev, int nvqs)
987 {
988     S390FLICState *fs = s390_get_flic();
989     S390FLICStateClass *fsc = s390_get_flic_class(fs);
990 
991     fsc->release_adapter_routes(fs, &dev->routes);
992 }
993 
994 static int virtio_ccw_add_irqfd(VirtioCcwDevice *dev, int n)
995 {
996     VirtIODevice *vdev = virtio_bus_get_device(&dev->bus);
997     VirtQueue *vq = virtio_get_queue(vdev, n);
998     EventNotifier *notifier = virtio_queue_get_guest_notifier(vq);
999 
1000     return kvm_irqchip_add_irqfd_notifier_gsi(kvm_state, notifier, NULL,
1001                                               dev->routes.gsi[n]);
1002 }
1003 
1004 static void virtio_ccw_remove_irqfd(VirtioCcwDevice *dev, int n)
1005 {
1006     VirtIODevice *vdev = virtio_bus_get_device(&dev->bus);
1007     VirtQueue *vq = virtio_get_queue(vdev, n);
1008     EventNotifier *notifier = virtio_queue_get_guest_notifier(vq);
1009     int ret;
1010 
1011     ret = kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, notifier,
1012                                                 dev->routes.gsi[n]);
1013     assert(ret == 0);
1014 }
1015 
1016 static int virtio_ccw_set_guest_notifier(VirtioCcwDevice *dev, int n,
1017                                          bool assign, bool with_irqfd)
1018 {
1019     VirtIODevice *vdev = virtio_bus_get_device(&dev->bus);
1020     VirtQueue *vq = virtio_get_queue(vdev, n);
1021     EventNotifier *notifier = virtio_queue_get_guest_notifier(vq);
1022     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1023 
1024     if (assign) {
1025         int r = event_notifier_init(notifier, 0);
1026 
1027         if (r < 0) {
1028             return r;
1029         }
1030         virtio_queue_set_guest_notifier_fd_handler(vq, true, with_irqfd);
1031         if (with_irqfd) {
1032             r = virtio_ccw_add_irqfd(dev, n);
1033             if (r) {
1034                 virtio_queue_set_guest_notifier_fd_handler(vq, false,
1035                                                            with_irqfd);
1036                 return r;
1037             }
1038         }
1039         /*
1040          * We do not support individual masking for channel devices, so we
1041          * need to manually trigger any guest masking callbacks here.
1042          */
1043         if (k->guest_notifier_mask && vdev->use_guest_notifier_mask) {
1044             k->guest_notifier_mask(vdev, n, false);
1045         }
1046         /* get lost events and re-inject */
1047         if (k->guest_notifier_pending &&
1048             k->guest_notifier_pending(vdev, n)) {
1049             event_notifier_set(notifier);
1050         }
1051     } else {
1052         if (k->guest_notifier_mask && vdev->use_guest_notifier_mask) {
1053             k->guest_notifier_mask(vdev, n, true);
1054         }
1055         if (with_irqfd) {
1056             virtio_ccw_remove_irqfd(dev, n);
1057         }
1058         virtio_queue_set_guest_notifier_fd_handler(vq, false, with_irqfd);
1059         event_notifier_cleanup(notifier);
1060     }
1061     return 0;
1062 }
1063 
1064 static int virtio_ccw_set_guest_notifiers(DeviceState *d, int nvqs,
1065                                           bool assigned)
1066 {
1067     VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
1068     VirtIODevice *vdev = virtio_bus_get_device(&dev->bus);
1069     CcwDevice *ccw_dev = CCW_DEVICE(d);
1070     bool with_irqfd = ccw_dev->sch->thinint_active && kvm_irqfds_enabled();
1071     int r, n;
1072 
1073     if (with_irqfd && assigned) {
1074         /* irq routes need to be set up before assigning irqfds */
1075         r = virtio_ccw_setup_irqroutes(dev, nvqs);
1076         if (r < 0) {
1077             goto irqroute_error;
1078         }
1079     }
1080     for (n = 0; n < nvqs; n++) {
1081         if (!virtio_queue_get_num(vdev, n)) {
1082             break;
1083         }
1084         r = virtio_ccw_set_guest_notifier(dev, n, assigned, with_irqfd);
1085         if (r < 0) {
1086             goto assign_error;
1087         }
1088     }
1089     if (with_irqfd && !assigned) {
1090         /* release irq routes after irqfds have been released */
1091         virtio_ccw_release_irqroutes(dev, nvqs);
1092     }
1093     return 0;
1094 
1095 assign_error:
1096     while (--n >= 0) {
1097         virtio_ccw_set_guest_notifier(dev, n, !assigned, false);
1098     }
1099 irqroute_error:
1100     if (with_irqfd && assigned) {
1101         virtio_ccw_release_irqroutes(dev, nvqs);
1102     }
1103     return r;
1104 }
1105 
1106 static void virtio_ccw_save_queue(DeviceState *d, int n, QEMUFile *f)
1107 {
1108     VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
1109     VirtIODevice *vdev = virtio_bus_get_device(&dev->bus);
1110 
1111     qemu_put_be16(f, virtio_queue_vector(vdev, n));
1112 }
1113 
1114 static int virtio_ccw_load_queue(DeviceState *d, int n, QEMUFile *f)
1115 {
1116     VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
1117     VirtIODevice *vdev = virtio_bus_get_device(&dev->bus);
1118     uint16_t vector;
1119 
1120     qemu_get_be16s(f, &vector);
1121     virtio_queue_set_vector(vdev, n , vector);
1122 
1123     return 0;
1124 }
1125 
1126 static void virtio_ccw_save_config(DeviceState *d, QEMUFile *f)
1127 {
1128     VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
1129     vmstate_save_state(f, &vmstate_virtio_ccw_dev, dev, NULL);
1130 }
1131 
1132 static int virtio_ccw_load_config(DeviceState *d, QEMUFile *f)
1133 {
1134     VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
1135     return vmstate_load_state(f, &vmstate_virtio_ccw_dev, dev, 1);
1136 }
1137 
1138 static void virtio_ccw_pre_plugged(DeviceState *d, Error **errp)
1139 {
1140    VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
1141    VirtIODevice *vdev = virtio_bus_get_device(&dev->bus);
1142 
1143     if (dev->max_rev >= 1) {
1144         virtio_add_feature(&vdev->host_features, VIRTIO_F_VERSION_1);
1145     }
1146 }
1147 
1148 /* This is called by virtio-bus just after the device is plugged. */
1149 static void virtio_ccw_device_plugged(DeviceState *d, Error **errp)
1150 {
1151     VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
1152     VirtIODevice *vdev = virtio_bus_get_device(&dev->bus);
1153     CcwDevice *ccw_dev = CCW_DEVICE(d);
1154     SubchDev *sch = ccw_dev->sch;
1155     int n = virtio_get_num_queues(vdev);
1156     S390FLICState *flic = s390_get_flic();
1157 
1158     if (!virtio_has_feature(vdev->host_features, VIRTIO_F_VERSION_1)) {
1159         dev->max_rev = 0;
1160     }
1161 
1162     if (!virtio_ccw_rev_max(dev) && !virtio_legacy_allowed(vdev)) {
1163         /*
1164          * To avoid migration issues, we allow legacy mode when legacy
1165          * check is disabled in the old machine types (< 5.1).
1166          */
1167         if (virtio_legacy_check_disabled(vdev)) {
1168             warn_report("device requires revision >= 1, but for backward "
1169                         "compatibility max_revision=0 is allowed");
1170         } else {
1171             error_setg(errp, "Invalid value of property max_rev "
1172                        "(is %d expected >= 1)", virtio_ccw_rev_max(dev));
1173             return;
1174         }
1175     }
1176 
1177     if (virtio_get_num_queues(vdev) > VIRTIO_QUEUE_MAX) {
1178         error_setg(errp, "The number of virtqueues %d "
1179                    "exceeds virtio limit %d", n,
1180                    VIRTIO_QUEUE_MAX);
1181         return;
1182     }
1183     if (virtio_get_num_queues(vdev) > flic->adapter_routes_max_batch) {
1184         error_setg(errp, "The number of virtqueues %d "
1185                    "exceeds flic adapter route limit %d", n,
1186                    flic->adapter_routes_max_batch);
1187         return;
1188     }
1189 
1190     sch->id.cu_model = virtio_bus_get_vdev_id(&dev->bus);
1191 
1192 
1193     css_generate_sch_crws(sch->cssid, sch->ssid, sch->schid,
1194                           d->hotplugged, 1);
1195 }
1196 
1197 static void virtio_ccw_device_unplugged(DeviceState *d)
1198 {
1199     VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
1200 
1201     virtio_ccw_stop_ioeventfd(dev);
1202 }
1203 /**************** Virtio-ccw Bus Device Descriptions *******************/
1204 
1205 static void virtio_ccw_busdev_realize(DeviceState *dev, Error **errp)
1206 {
1207     VirtioCcwDevice *_dev = (VirtioCcwDevice *)dev;
1208 
1209     virtio_ccw_bus_new(&_dev->bus, sizeof(_dev->bus), _dev);
1210     virtio_ccw_device_realize(_dev, errp);
1211 }
1212 
1213 static void virtio_ccw_busdev_unrealize(DeviceState *dev)
1214 {
1215     VirtioCcwDevice *_dev = (VirtioCcwDevice *)dev;
1216 
1217     virtio_ccw_device_unrealize(_dev);
1218 }
1219 
1220 static void virtio_ccw_busdev_unplug(HotplugHandler *hotplug_dev,
1221                                      DeviceState *dev, Error **errp)
1222 {
1223     VirtioCcwDevice *_dev = to_virtio_ccw_dev_fast(dev);
1224 
1225     virtio_ccw_stop_ioeventfd(_dev);
1226 }
1227 
1228 static void virtio_ccw_device_class_init(ObjectClass *klass, void *data)
1229 {
1230     DeviceClass *dc = DEVICE_CLASS(klass);
1231     CCWDeviceClass *k = CCW_DEVICE_CLASS(dc);
1232     VirtIOCCWDeviceClass *vdc = VIRTIO_CCW_DEVICE_CLASS(klass);
1233 
1234     k->unplug = virtio_ccw_busdev_unplug;
1235     dc->realize = virtio_ccw_busdev_realize;
1236     dc->unrealize = virtio_ccw_busdev_unrealize;
1237     dc->bus_type = TYPE_VIRTUAL_CSS_BUS;
1238     device_class_set_parent_reset(dc, virtio_ccw_reset, &vdc->parent_reset);
1239 }
1240 
1241 static const TypeInfo virtio_ccw_device_info = {
1242     .name = TYPE_VIRTIO_CCW_DEVICE,
1243     .parent = TYPE_CCW_DEVICE,
1244     .instance_size = sizeof(VirtioCcwDevice),
1245     .class_init = virtio_ccw_device_class_init,
1246     .class_size = sizeof(VirtIOCCWDeviceClass),
1247     .abstract = true,
1248 };
1249 
1250 /* virtio-ccw-bus */
1251 
1252 static void virtio_ccw_bus_new(VirtioBusState *bus, size_t bus_size,
1253                                VirtioCcwDevice *dev)
1254 {
1255     DeviceState *qdev = DEVICE(dev);
1256     char virtio_bus_name[] = "virtio-bus";
1257 
1258     qbus_create_inplace(bus, bus_size, TYPE_VIRTIO_CCW_BUS,
1259                         qdev, virtio_bus_name);
1260 }
1261 
1262 static void virtio_ccw_bus_class_init(ObjectClass *klass, void *data)
1263 {
1264     VirtioBusClass *k = VIRTIO_BUS_CLASS(klass);
1265     BusClass *bus_class = BUS_CLASS(klass);
1266 
1267     bus_class->max_dev = 1;
1268     k->notify = virtio_ccw_notify;
1269     k->vmstate_change = virtio_ccw_vmstate_change;
1270     k->query_guest_notifiers = virtio_ccw_query_guest_notifiers;
1271     k->set_guest_notifiers = virtio_ccw_set_guest_notifiers;
1272     k->save_queue = virtio_ccw_save_queue;
1273     k->load_queue = virtio_ccw_load_queue;
1274     k->save_config = virtio_ccw_save_config;
1275     k->load_config = virtio_ccw_load_config;
1276     k->pre_plugged = virtio_ccw_pre_plugged;
1277     k->device_plugged = virtio_ccw_device_plugged;
1278     k->device_unplugged = virtio_ccw_device_unplugged;
1279     k->ioeventfd_enabled = virtio_ccw_ioeventfd_enabled;
1280     k->ioeventfd_assign = virtio_ccw_ioeventfd_assign;
1281 }
1282 
1283 static const TypeInfo virtio_ccw_bus_info = {
1284     .name = TYPE_VIRTIO_CCW_BUS,
1285     .parent = TYPE_VIRTIO_BUS,
1286     .instance_size = sizeof(VirtioCcwBusState),
1287     .class_size = sizeof(VirtioCcwBusClass),
1288     .class_init = virtio_ccw_bus_class_init,
1289 };
1290 
1291 static void virtio_ccw_register(void)
1292 {
1293     type_register_static(&virtio_ccw_bus_info);
1294     type_register_static(&virtio_ccw_device_info);
1295 }
1296 
1297 type_init(virtio_ccw_register)
1298