xref: /openbmc/qemu/hw/s390x/virtio-ccw.c (revision b9b59a36)
1 /*
2  * virtio ccw target implementation
3  *
4  * Copyright 2012,2015 IBM Corp.
5  * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
6  *            Pierre Morel <pmorel@linux.vnet.ibm.com>
7  *
8  * This work is licensed under the terms of the GNU GPL, version 2 or (at
9  * your option) any later version. See the COPYING file in the top-level
10  * directory.
11  */
12 
13 #include "qemu/osdep.h"
14 #include "qapi/error.h"
15 #include "exec/address-spaces.h"
16 #include "sysemu/kvm.h"
17 #include "net/net.h"
18 #include "hw/virtio/virtio.h"
19 #include "migration/qemu-file-types.h"
20 #include "hw/virtio/virtio-net.h"
21 #include "qemu/bitops.h"
22 #include "qemu/error-report.h"
23 #include "qemu/log.h"
24 #include "qemu/module.h"
25 #include "hw/virtio/virtio-bus.h"
26 #include "hw/s390x/adapter.h"
27 #include "hw/s390x/s390_flic.h"
28 
29 #include "hw/s390x/ioinst.h"
30 #include "hw/s390x/css.h"
31 #include "virtio-ccw.h"
32 #include "trace.h"
33 #include "hw/s390x/css-bridge.h"
34 #include "hw/s390x/s390-virtio-ccw.h"
35 #include "sysemu/replay.h"
36 
37 #define NR_CLASSIC_INDICATOR_BITS 64
38 
39 bool have_virtio_ccw = true;
40 
virtio_ccw_dev_post_load(void * opaque,int version_id)41 static int virtio_ccw_dev_post_load(void *opaque, int version_id)
42 {
43     VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(opaque);
44     CcwDevice *ccw_dev = CCW_DEVICE(dev);
45     CCWDeviceClass *ck = CCW_DEVICE_GET_CLASS(ccw_dev);
46 
47     ccw_dev->sch->driver_data = dev;
48     if (ccw_dev->sch->thinint_active) {
49         dev->routes.adapter.adapter_id = css_get_adapter_id(
50                                          CSS_IO_ADAPTER_VIRTIO,
51                                          dev->thinint_isc);
52     }
53     /* Re-fill subch_id after loading the subchannel states.*/
54     if (ck->refill_ids) {
55         ck->refill_ids(ccw_dev);
56     }
57     return 0;
58 }
59 
60 typedef struct VirtioCcwDeviceTmp {
61     VirtioCcwDevice *parent;
62     uint16_t config_vector;
63 } VirtioCcwDeviceTmp;
64 
virtio_ccw_dev_tmp_pre_save(void * opaque)65 static int virtio_ccw_dev_tmp_pre_save(void *opaque)
66 {
67     VirtioCcwDeviceTmp *tmp = opaque;
68     VirtioCcwDevice *dev = tmp->parent;
69     VirtIODevice *vdev = virtio_bus_get_device(&dev->bus);
70 
71     tmp->config_vector = vdev->config_vector;
72 
73     return 0;
74 }
75 
virtio_ccw_dev_tmp_post_load(void * opaque,int version_id)76 static int virtio_ccw_dev_tmp_post_load(void *opaque, int version_id)
77 {
78     VirtioCcwDeviceTmp *tmp = opaque;
79     VirtioCcwDevice *dev = tmp->parent;
80     VirtIODevice *vdev = virtio_bus_get_device(&dev->bus);
81 
82     vdev->config_vector = tmp->config_vector;
83     return 0;
84 }
85 
86 const VMStateDescription vmstate_virtio_ccw_dev_tmp = {
87     .name = "s390_virtio_ccw_dev_tmp",
88     .pre_save = virtio_ccw_dev_tmp_pre_save,
89     .post_load = virtio_ccw_dev_tmp_post_load,
90     .fields = (const VMStateField[]) {
91         VMSTATE_UINT16(config_vector, VirtioCcwDeviceTmp),
92         VMSTATE_END_OF_LIST()
93     }
94 };
95 
96 const VMStateDescription vmstate_virtio_ccw_dev = {
97     .name = "s390_virtio_ccw_dev",
98     .version_id = 1,
99     .minimum_version_id = 1,
100     .post_load = virtio_ccw_dev_post_load,
101     .fields = (const VMStateField[]) {
102         VMSTATE_CCW_DEVICE(parent_obj, VirtioCcwDevice),
103         VMSTATE_PTR_TO_IND_ADDR(indicators, VirtioCcwDevice),
104         VMSTATE_PTR_TO_IND_ADDR(indicators2, VirtioCcwDevice),
105         VMSTATE_PTR_TO_IND_ADDR(summary_indicator, VirtioCcwDevice),
106         /*
107          * Ugly hack because VirtIODevice does not migrate itself.
108          * This also makes legacy via vmstate_save_state possible.
109          */
110         VMSTATE_WITH_TMP(VirtioCcwDevice, VirtioCcwDeviceTmp,
111                          vmstate_virtio_ccw_dev_tmp),
112         VMSTATE_STRUCT(routes, VirtioCcwDevice, 1, vmstate_adapter_routes,
113                        AdapterRoutes),
114         VMSTATE_UINT8(thinint_isc, VirtioCcwDevice),
115         VMSTATE_INT32(revision, VirtioCcwDevice),
116         VMSTATE_END_OF_LIST()
117     }
118 };
119 
120 static void virtio_ccw_bus_new(VirtioBusState *bus, size_t bus_size,
121                                VirtioCcwDevice *dev);
122 
virtio_ccw_get_vdev(SubchDev * sch)123 VirtIODevice *virtio_ccw_get_vdev(SubchDev *sch)
124 {
125     VirtIODevice *vdev = NULL;
126     VirtioCcwDevice *dev = sch->driver_data;
127 
128     if (dev) {
129         vdev = virtio_bus_get_device(&dev->bus);
130     }
131     return vdev;
132 }
133 
virtio_ccw_start_ioeventfd(VirtioCcwDevice * dev)134 static void virtio_ccw_start_ioeventfd(VirtioCcwDevice *dev)
135 {
136     virtio_bus_start_ioeventfd(&dev->bus);
137 }
138 
virtio_ccw_stop_ioeventfd(VirtioCcwDevice * dev)139 static void virtio_ccw_stop_ioeventfd(VirtioCcwDevice *dev)
140 {
141     virtio_bus_stop_ioeventfd(&dev->bus);
142 }
143 
virtio_ccw_ioeventfd_enabled(DeviceState * d)144 static bool virtio_ccw_ioeventfd_enabled(DeviceState *d)
145 {
146     VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
147 
148     return (dev->flags & VIRTIO_CCW_FLAG_USE_IOEVENTFD) != 0;
149 }
150 
virtio_ccw_ioeventfd_assign(DeviceState * d,EventNotifier * notifier,int n,bool assign)151 static int virtio_ccw_ioeventfd_assign(DeviceState *d, EventNotifier *notifier,
152                                        int n, bool assign)
153 {
154     VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
155     CcwDevice *ccw_dev = CCW_DEVICE(dev);
156     SubchDev *sch = ccw_dev->sch;
157     uint32_t sch_id = (css_build_subchannel_id(sch) << 16) | sch->schid;
158 
159     return s390_assign_subch_ioeventfd(notifier, sch_id, n, assign);
160 }
161 
162 /* Communication blocks used by several channel commands. */
163 typedef struct VqInfoBlockLegacy {
164     uint64_t queue;
165     uint32_t align;
166     uint16_t index;
167     uint16_t num;
168 } QEMU_PACKED VqInfoBlockLegacy;
169 
170 typedef struct VqInfoBlock {
171     uint64_t desc;
172     uint32_t res0;
173     uint16_t index;
174     uint16_t num;
175     uint64_t avail;
176     uint64_t used;
177 } QEMU_PACKED VqInfoBlock;
178 
179 typedef struct VqConfigBlock {
180     uint16_t index;
181     uint16_t num_max;
182 } QEMU_PACKED VqConfigBlock;
183 
184 typedef struct VirtioFeatDesc {
185     uint32_t features;
186     uint8_t index;
187 } QEMU_PACKED VirtioFeatDesc;
188 
189 typedef struct VirtioThinintInfo {
190     hwaddr summary_indicator;
191     hwaddr device_indicator;
192     uint64_t ind_bit;
193     uint8_t isc;
194 } QEMU_PACKED VirtioThinintInfo;
195 
196 typedef struct VirtioRevInfo {
197     uint16_t revision;
198     uint16_t length;
199     uint8_t data[];
200 } QEMU_PACKED VirtioRevInfo;
201 
202 /* Specify where the virtqueues for the subchannel are in guest memory. */
virtio_ccw_set_vqs(SubchDev * sch,VqInfoBlock * info,VqInfoBlockLegacy * linfo)203 static int virtio_ccw_set_vqs(SubchDev *sch, VqInfoBlock *info,
204                               VqInfoBlockLegacy *linfo)
205 {
206     VirtIODevice *vdev = virtio_ccw_get_vdev(sch);
207     uint16_t index = info ? info->index : linfo->index;
208     uint16_t num = info ? info->num : linfo->num;
209     uint64_t desc = info ? info->desc : linfo->queue;
210 
211     if (index >= VIRTIO_QUEUE_MAX) {
212         return -EINVAL;
213     }
214 
215     /* Current code in virtio.c relies on 4K alignment. */
216     if (linfo && desc && (linfo->align != 4096)) {
217         return -EINVAL;
218     }
219 
220     if (!vdev) {
221         return -EINVAL;
222     }
223 
224     if (info) {
225         virtio_queue_set_rings(vdev, index, desc, info->avail, info->used);
226     } else {
227         virtio_queue_set_addr(vdev, index, desc);
228     }
229     if (!desc) {
230         virtio_queue_set_vector(vdev, index, VIRTIO_NO_VECTOR);
231     } else {
232         if (info) {
233             /* virtio-1 allows changing the ring size. */
234             if (virtio_queue_get_max_num(vdev, index) < num) {
235                 /* Fail if we exceed the maximum number. */
236                 return -EINVAL;
237             }
238             virtio_queue_set_num(vdev, index, num);
239             virtio_init_region_cache(vdev, index);
240         } else if (virtio_queue_get_num(vdev, index) > num) {
241             /* Fail if we don't have a big enough queue. */
242             return -EINVAL;
243         }
244         /* We ignore possible increased num for legacy for compatibility. */
245         virtio_queue_set_vector(vdev, index, index);
246     }
247     /* tell notify handler in case of config change */
248     vdev->config_vector = VIRTIO_QUEUE_MAX;
249     return 0;
250 }
251 
virtio_ccw_reset_virtio(VirtioCcwDevice * dev)252 static void virtio_ccw_reset_virtio(VirtioCcwDevice *dev)
253 {
254     CcwDevice *ccw_dev = CCW_DEVICE(dev);
255 
256     virtio_bus_reset(&dev->bus);
257     if (dev->indicators) {
258         release_indicator(&dev->routes.adapter, dev->indicators);
259         dev->indicators = NULL;
260     }
261     if (dev->indicators2) {
262         release_indicator(&dev->routes.adapter, dev->indicators2);
263         dev->indicators2 = NULL;
264     }
265     if (dev->summary_indicator) {
266         release_indicator(&dev->routes.adapter, dev->summary_indicator);
267         dev->summary_indicator = NULL;
268     }
269     ccw_dev->sch->thinint_active = false;
270 }
271 
virtio_ccw_handle_set_vq(SubchDev * sch,CCW1 ccw,bool check_len,bool is_legacy)272 static int virtio_ccw_handle_set_vq(SubchDev *sch, CCW1 ccw, bool check_len,
273                                     bool is_legacy)
274 {
275     int ret;
276     VqInfoBlock info;
277     VqInfoBlockLegacy linfo;
278     size_t info_len = is_legacy ? sizeof(linfo) : sizeof(info);
279 
280     if (check_len) {
281         if (ccw.count != info_len) {
282             return -EINVAL;
283         }
284     } else if (ccw.count < info_len) {
285         /* Can't execute command. */
286         return -EINVAL;
287     }
288     if (!ccw.cda) {
289         return -EFAULT;
290     }
291     if (is_legacy) {
292         ret = ccw_dstream_read(&sch->cds, linfo);
293         if (ret) {
294             return ret;
295         }
296         linfo.queue = be64_to_cpu(linfo.queue);
297         linfo.align = be32_to_cpu(linfo.align);
298         linfo.index = be16_to_cpu(linfo.index);
299         linfo.num = be16_to_cpu(linfo.num);
300         ret = virtio_ccw_set_vqs(sch, NULL, &linfo);
301     } else {
302         ret = ccw_dstream_read(&sch->cds, info);
303         if (ret) {
304             return ret;
305         }
306         info.desc = be64_to_cpu(info.desc);
307         info.index = be16_to_cpu(info.index);
308         info.num = be16_to_cpu(info.num);
309         info.avail = be64_to_cpu(info.avail);
310         info.used = be64_to_cpu(info.used);
311         ret = virtio_ccw_set_vqs(sch, &info, NULL);
312     }
313     sch->curr_status.scsw.count = 0;
314     return ret;
315 }
316 
virtio_ccw_cb(SubchDev * sch,CCW1 ccw)317 static int virtio_ccw_cb(SubchDev *sch, CCW1 ccw)
318 {
319     int ret;
320     VirtioRevInfo revinfo;
321     uint8_t status;
322     VirtioFeatDesc features;
323     hwaddr indicators;
324     VqConfigBlock vq_config;
325     VirtioCcwDevice *dev = sch->driver_data;
326     VirtIODevice *vdev = virtio_ccw_get_vdev(sch);
327     bool check_len;
328     int len;
329     VirtioThinintInfo thinint;
330 
331     if (!dev) {
332         return -EINVAL;
333     }
334 
335     trace_virtio_ccw_interpret_ccw(sch->cssid, sch->ssid, sch->schid,
336                                    ccw.cmd_code);
337     check_len = !((ccw.flags & CCW_FLAG_SLI) && !(ccw.flags & CCW_FLAG_DC));
338 
339     if (dev->revision < 0 && ccw.cmd_code != CCW_CMD_SET_VIRTIO_REV) {
340         if (dev->force_revision_1) {
341             /*
342              * virtio-1 drivers must start with negotiating to a revision >= 1,
343              * so post a command reject for all other commands
344              */
345             return -ENOSYS;
346         } else {
347             /*
348              * If the driver issues any command that is not SET_VIRTIO_REV,
349              * we'll have to operate the device in legacy mode.
350              */
351             dev->revision = 0;
352         }
353     }
354 
355     /* Look at the command. */
356     switch (ccw.cmd_code) {
357     case CCW_CMD_SET_VQ:
358         ret = virtio_ccw_handle_set_vq(sch, ccw, check_len, dev->revision < 1);
359         break;
360     case CCW_CMD_VDEV_RESET:
361         virtio_ccw_reset_virtio(dev);
362         ret = 0;
363         break;
364     case CCW_CMD_READ_FEAT:
365         if (check_len) {
366             if (ccw.count != sizeof(features)) {
367                 ret = -EINVAL;
368                 break;
369             }
370         } else if (ccw.count < sizeof(features)) {
371             /* Can't execute command. */
372             ret = -EINVAL;
373             break;
374         }
375         if (!ccw.cda) {
376             ret = -EFAULT;
377         } else {
378             VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
379 
380             ccw_dstream_advance(&sch->cds, sizeof(features.features));
381             ret = ccw_dstream_read(&sch->cds, features.index);
382             if (ret) {
383                 break;
384             }
385             if (features.index == 0) {
386                 if (dev->revision >= 1) {
387                     /* Don't offer legacy features for modern devices. */
388                     features.features = (uint32_t)
389                         (vdev->host_features & ~vdc->legacy_features);
390                 } else {
391                     features.features = (uint32_t)vdev->host_features;
392                 }
393             } else if ((features.index == 1) && (dev->revision >= 1)) {
394                 /*
395                  * Only offer feature bits beyond 31 if the guest has
396                  * negotiated at least revision 1.
397                  */
398                 features.features = (uint32_t)(vdev->host_features >> 32);
399             } else {
400                 /* Return zeroes if the guest supports more feature bits. */
401                 features.features = 0;
402             }
403             ccw_dstream_rewind(&sch->cds);
404             features.features = cpu_to_le32(features.features);
405             ret = ccw_dstream_write(&sch->cds, features.features);
406             if (!ret) {
407                 sch->curr_status.scsw.count = ccw.count - sizeof(features);
408             }
409         }
410         break;
411     case CCW_CMD_WRITE_FEAT:
412         if (check_len) {
413             if (ccw.count != sizeof(features)) {
414                 ret = -EINVAL;
415                 break;
416             }
417         } else if (ccw.count < sizeof(features)) {
418             /* Can't execute command. */
419             ret = -EINVAL;
420             break;
421         }
422         if (!ccw.cda) {
423             ret = -EFAULT;
424         } else {
425             ret = ccw_dstream_read(&sch->cds, features);
426             if (ret) {
427                 break;
428             }
429             features.features = le32_to_cpu(features.features);
430             if (features.index == 0) {
431                 virtio_set_features(vdev,
432                                     (vdev->guest_features & 0xffffffff00000000ULL) |
433                                     features.features);
434             } else if ((features.index == 1) && (dev->revision >= 1)) {
435                 /*
436                  * If the guest did not negotiate at least revision 1,
437                  * we did not offer it any feature bits beyond 31. Such a
438                  * guest passing us any bit here is therefore buggy.
439                  */
440                 virtio_set_features(vdev,
441                                     (vdev->guest_features & 0x00000000ffffffffULL) |
442                                     ((uint64_t)features.features << 32));
443             } else {
444                 /*
445                  * If the guest supports more feature bits, assert that it
446                  * passes us zeroes for those we don't support.
447                  */
448                 if (features.features) {
449                     qemu_log_mask(LOG_GUEST_ERROR,
450                                   "Guest bug: features[%i]=%x (expected 0)",
451                                   features.index, features.features);
452                     /* XXX: do a unit check here? */
453                 }
454             }
455             sch->curr_status.scsw.count = ccw.count - sizeof(features);
456             ret = 0;
457         }
458         break;
459     case CCW_CMD_READ_CONF:
460         if (check_len) {
461             if (ccw.count > vdev->config_len) {
462                 ret = -EINVAL;
463                 break;
464             }
465         }
466         len = MIN(ccw.count, vdev->config_len);
467         if (!ccw.cda) {
468             ret = -EFAULT;
469         } else {
470             virtio_bus_get_vdev_config(&dev->bus, vdev->config);
471             ret = ccw_dstream_write_buf(&sch->cds, vdev->config, len);
472             if (ret) {
473                 sch->curr_status.scsw.count = ccw.count - len;
474             }
475         }
476         break;
477     case CCW_CMD_WRITE_CONF:
478         if (check_len) {
479             if (ccw.count > vdev->config_len) {
480                 ret = -EINVAL;
481                 break;
482             }
483         }
484         len = MIN(ccw.count, vdev->config_len);
485         if (!ccw.cda) {
486             ret = -EFAULT;
487         } else {
488             ret = ccw_dstream_read_buf(&sch->cds, vdev->config, len);
489             if (!ret) {
490                 virtio_bus_set_vdev_config(&dev->bus, vdev->config);
491                 sch->curr_status.scsw.count = ccw.count - len;
492             }
493         }
494         break;
495     case CCW_CMD_READ_STATUS:
496         if (check_len) {
497             if (ccw.count != sizeof(status)) {
498                 ret = -EINVAL;
499                 break;
500             }
501         } else if (ccw.count < sizeof(status)) {
502             /* Can't execute command. */
503             ret = -EINVAL;
504             break;
505         }
506         if (!ccw.cda) {
507             ret = -EFAULT;
508         } else {
509             address_space_stb(&address_space_memory, ccw.cda, vdev->status,
510                                         MEMTXATTRS_UNSPECIFIED, NULL);
511             sch->curr_status.scsw.count = ccw.count - sizeof(vdev->status);
512             ret = 0;
513         }
514         break;
515     case CCW_CMD_WRITE_STATUS:
516         if (check_len) {
517             if (ccw.count != sizeof(status)) {
518                 ret = -EINVAL;
519                 break;
520             }
521         } else if (ccw.count < sizeof(status)) {
522             /* Can't execute command. */
523             ret = -EINVAL;
524             break;
525         }
526         if (!ccw.cda) {
527             ret = -EFAULT;
528         } else {
529             ret = ccw_dstream_read(&sch->cds, status);
530             if (ret) {
531                 break;
532             }
533             if (!(status & VIRTIO_CONFIG_S_DRIVER_OK)) {
534                 virtio_ccw_stop_ioeventfd(dev);
535             }
536             if (virtio_set_status(vdev, status) == 0) {
537                 if (vdev->status == 0) {
538                     virtio_ccw_reset_virtio(dev);
539                 }
540                 if (status & VIRTIO_CONFIG_S_DRIVER_OK) {
541                     virtio_ccw_start_ioeventfd(dev);
542                 }
543                 sch->curr_status.scsw.count = ccw.count - sizeof(status);
544                 ret = 0;
545             } else {
546                 /* Trigger a command reject. */
547                 ret = -ENOSYS;
548             }
549         }
550         break;
551     case CCW_CMD_SET_IND:
552         if (check_len) {
553             if (ccw.count != sizeof(indicators)) {
554                 ret = -EINVAL;
555                 break;
556             }
557         } else if (ccw.count < sizeof(indicators)) {
558             /* Can't execute command. */
559             ret = -EINVAL;
560             break;
561         }
562         if (sch->thinint_active) {
563             /* Trigger a command reject. */
564             ret = -ENOSYS;
565             break;
566         }
567         if (virtio_get_num_queues(vdev) > NR_CLASSIC_INDICATOR_BITS) {
568             /* More queues than indicator bits --> trigger a reject */
569             ret = -ENOSYS;
570             break;
571         }
572         if (!ccw.cda) {
573             ret = -EFAULT;
574         } else {
575             ret = ccw_dstream_read(&sch->cds, indicators);
576             if (ret) {
577                 break;
578             }
579             indicators = be64_to_cpu(indicators);
580             dev->indicators = get_indicator(indicators, sizeof(uint64_t));
581             sch->curr_status.scsw.count = ccw.count - sizeof(indicators);
582             ret = 0;
583         }
584         break;
585     case CCW_CMD_SET_CONF_IND:
586         if (check_len) {
587             if (ccw.count != sizeof(indicators)) {
588                 ret = -EINVAL;
589                 break;
590             }
591         } else if (ccw.count < sizeof(indicators)) {
592             /* Can't execute command. */
593             ret = -EINVAL;
594             break;
595         }
596         if (!ccw.cda) {
597             ret = -EFAULT;
598         } else {
599             ret = ccw_dstream_read(&sch->cds, indicators);
600             if (ret) {
601                 break;
602             }
603             indicators = be64_to_cpu(indicators);
604             dev->indicators2 = get_indicator(indicators, sizeof(uint64_t));
605             sch->curr_status.scsw.count = ccw.count - sizeof(indicators);
606             ret = 0;
607         }
608         break;
609     case CCW_CMD_READ_VQ_CONF:
610         if (check_len) {
611             if (ccw.count != sizeof(vq_config)) {
612                 ret = -EINVAL;
613                 break;
614             }
615         } else if (ccw.count < sizeof(vq_config)) {
616             /* Can't execute command. */
617             ret = -EINVAL;
618             break;
619         }
620         if (!ccw.cda) {
621             ret = -EFAULT;
622         } else {
623             ret = ccw_dstream_read(&sch->cds, vq_config.index);
624             if (ret) {
625                 break;
626             }
627             vq_config.index = be16_to_cpu(vq_config.index);
628             if (vq_config.index >= VIRTIO_QUEUE_MAX) {
629                 ret = -EINVAL;
630                 break;
631             }
632             vq_config.num_max = virtio_queue_get_num(vdev,
633                                                      vq_config.index);
634             vq_config.num_max = cpu_to_be16(vq_config.num_max);
635             ret = ccw_dstream_write(&sch->cds, vq_config.num_max);
636             if (!ret) {
637                 sch->curr_status.scsw.count = ccw.count - sizeof(vq_config);
638             }
639         }
640         break;
641     case CCW_CMD_SET_IND_ADAPTER:
642         if (check_len) {
643             if (ccw.count != sizeof(thinint)) {
644                 ret = -EINVAL;
645                 break;
646             }
647         } else if (ccw.count < sizeof(thinint)) {
648             /* Can't execute command. */
649             ret = -EINVAL;
650             break;
651         }
652         if (!ccw.cda) {
653             ret = -EFAULT;
654         } else if (dev->indicators && !sch->thinint_active) {
655             /* Trigger a command reject. */
656             ret = -ENOSYS;
657         } else {
658             if (ccw_dstream_read(&sch->cds, thinint)) {
659                 ret = -EFAULT;
660             } else {
661                 thinint.ind_bit = be64_to_cpu(thinint.ind_bit);
662                 thinint.summary_indicator =
663                     be64_to_cpu(thinint.summary_indicator);
664                 thinint.device_indicator =
665                     be64_to_cpu(thinint.device_indicator);
666 
667                 dev->summary_indicator =
668                     get_indicator(thinint.summary_indicator, sizeof(uint8_t));
669                 dev->indicators =
670                     get_indicator(thinint.device_indicator,
671                                   thinint.ind_bit / 8 + 1);
672                 dev->thinint_isc = thinint.isc;
673                 dev->routes.adapter.ind_offset = thinint.ind_bit;
674                 dev->routes.adapter.summary_offset = 7;
675                 dev->routes.adapter.adapter_id = css_get_adapter_id(
676                                                  CSS_IO_ADAPTER_VIRTIO,
677                                                  dev->thinint_isc);
678                 sch->thinint_active = ((dev->indicators != NULL) &&
679                                        (dev->summary_indicator != NULL));
680                 sch->curr_status.scsw.count = ccw.count - sizeof(thinint);
681                 ret = 0;
682             }
683         }
684         break;
685     case CCW_CMD_SET_VIRTIO_REV:
686         len = sizeof(revinfo);
687         if (ccw.count < len) {
688             ret = -EINVAL;
689             break;
690         }
691         if (!ccw.cda) {
692             ret = -EFAULT;
693             break;
694         }
695         ret = ccw_dstream_read_buf(&sch->cds, &revinfo, 4);
696         if (ret < 0) {
697             break;
698         }
699         revinfo.revision = be16_to_cpu(revinfo.revision);
700         revinfo.length = be16_to_cpu(revinfo.length);
701         if (ccw.count < len + revinfo.length ||
702             (check_len && ccw.count > len + revinfo.length)) {
703             ret = -EINVAL;
704             break;
705         }
706         /*
707          * Once we start to support revisions with additional data, we'll
708          * need to fetch it here. Nothing to do for now, though.
709          */
710         if (dev->revision >= 0 ||
711             revinfo.revision > virtio_ccw_rev_max(dev) ||
712             (dev->force_revision_1 && !revinfo.revision)) {
713             ret = -ENOSYS;
714             break;
715         }
716         ret = 0;
717         dev->revision = revinfo.revision;
718         break;
719     default:
720         ret = -ENOSYS;
721         break;
722     }
723     return ret;
724 }
725 
virtio_sch_disable_cb(SubchDev * sch)726 static void virtio_sch_disable_cb(SubchDev *sch)
727 {
728     VirtioCcwDevice *dev = sch->driver_data;
729 
730     dev->revision = -1;
731 }
732 
virtio_ccw_device_realize(VirtioCcwDevice * dev,Error ** errp)733 static void virtio_ccw_device_realize(VirtioCcwDevice *dev, Error **errp)
734 {
735     VirtIOCCWDeviceClass *k = VIRTIO_CCW_DEVICE_GET_CLASS(dev);
736     CcwDevice *ccw_dev = CCW_DEVICE(dev);
737     CCWDeviceClass *ck = CCW_DEVICE_GET_CLASS(ccw_dev);
738     SubchDev *sch;
739     Error *err = NULL;
740     int i;
741 
742     sch = css_create_sch(ccw_dev->devno, errp);
743     if (!sch) {
744         return;
745     }
746     if (!virtio_ccw_rev_max(dev) && dev->force_revision_1) {
747         error_setg(&err, "Invalid value of property max_rev "
748                    "(is %d expected >= 1)", virtio_ccw_rev_max(dev));
749         goto out_err;
750     }
751 
752     sch->driver_data = dev;
753     sch->ccw_cb = virtio_ccw_cb;
754     sch->disable_cb = virtio_sch_disable_cb;
755     sch->id.reserved = 0xff;
756     sch->id.cu_type = VIRTIO_CCW_CU_TYPE;
757     sch->do_subchannel_work = do_subchannel_work_virtual;
758     sch->irb_cb = build_irb_virtual;
759     ccw_dev->sch = sch;
760     dev->indicators = NULL;
761     dev->revision = -1;
762     for (i = 0; i < ADAPTER_ROUTES_MAX_GSI; i++) {
763         dev->routes.gsi[i] = -1;
764     }
765     css_sch_build_virtual_schib(sch, 0, VIRTIO_CCW_CHPID_TYPE);
766 
767     trace_virtio_ccw_new_device(
768         sch->cssid, sch->ssid, sch->schid, sch->devno,
769         ccw_dev->devno.valid ? "user-configured" : "auto-configured");
770 
771     /* fd-based ioevents can't be synchronized in record/replay */
772     if (replay_mode != REPLAY_MODE_NONE) {
773         dev->flags &= ~VIRTIO_CCW_FLAG_USE_IOEVENTFD;
774     }
775 
776     if (k->realize) {
777         k->realize(dev, &err);
778         if (err) {
779             goto out_err;
780         }
781     }
782 
783     ck->realize(ccw_dev, &err);
784     if (err) {
785         goto out_err;
786     }
787 
788     return;
789 
790 out_err:
791     error_propagate(errp, err);
792     css_subch_assign(sch->cssid, sch->ssid, sch->schid, sch->devno, NULL);
793     ccw_dev->sch = NULL;
794     g_free(sch);
795 }
796 
virtio_ccw_device_unrealize(VirtioCcwDevice * dev)797 static void virtio_ccw_device_unrealize(VirtioCcwDevice *dev)
798 {
799     VirtIOCCWDeviceClass *dc = VIRTIO_CCW_DEVICE_GET_CLASS(dev);
800     CcwDevice *ccw_dev = CCW_DEVICE(dev);
801     SubchDev *sch = ccw_dev->sch;
802 
803     if (dc->unrealize) {
804         dc->unrealize(dev);
805     }
806 
807     if (sch) {
808         css_subch_assign(sch->cssid, sch->ssid, sch->schid, sch->devno, NULL);
809         g_free(sch);
810         ccw_dev->sch = NULL;
811     }
812     if (dev->indicators) {
813         release_indicator(&dev->routes.adapter, dev->indicators);
814         dev->indicators = NULL;
815     }
816 }
817 
818 /* DeviceState to VirtioCcwDevice. Note: used on datapath,
819  * be careful and test performance if you change this.
820  */
to_virtio_ccw_dev_fast(DeviceState * d)821 static inline VirtioCcwDevice *to_virtio_ccw_dev_fast(DeviceState *d)
822 {
823     CcwDevice *ccw_dev = to_ccw_dev_fast(d);
824 
825     return container_of(ccw_dev, VirtioCcwDevice, parent_obj);
826 }
827 
virtio_set_ind_atomic(SubchDev * sch,uint64_t ind_loc,uint8_t to_be_set)828 static uint8_t virtio_set_ind_atomic(SubchDev *sch, uint64_t ind_loc,
829                                      uint8_t to_be_set)
830 {
831     uint8_t expected, actual;
832     hwaddr len = 1;
833     /* avoid  multiple fetches */
834     uint8_t volatile *ind_addr;
835 
836     ind_addr = cpu_physical_memory_map(ind_loc, &len, true);
837     if (!ind_addr) {
838         error_report("%s(%x.%x.%04x): unable to access indicator",
839                      __func__, sch->cssid, sch->ssid, sch->schid);
840         return -1;
841     }
842     actual = *ind_addr;
843     do {
844         expected = actual;
845         actual = qatomic_cmpxchg(ind_addr, expected, expected | to_be_set);
846     } while (actual != expected);
847     trace_virtio_ccw_set_ind(ind_loc, actual, actual | to_be_set);
848     cpu_physical_memory_unmap((void *)ind_addr, len, 1, len);
849 
850     return actual;
851 }
852 
virtio_ccw_notify(DeviceState * d,uint16_t vector)853 static void virtio_ccw_notify(DeviceState *d, uint16_t vector)
854 {
855     VirtioCcwDevice *dev = to_virtio_ccw_dev_fast(d);
856     CcwDevice *ccw_dev = to_ccw_dev_fast(d);
857     SubchDev *sch = ccw_dev->sch;
858     uint64_t indicators;
859 
860     if (vector == VIRTIO_NO_VECTOR) {
861         return;
862     }
863     /*
864      * vector < VIRTIO_QUEUE_MAX: notification for a virtqueue
865      * vector == VIRTIO_QUEUE_MAX: configuration change notification
866      * bits beyond that are unused and should never be notified for
867      */
868     assert(vector <= VIRTIO_QUEUE_MAX);
869 
870     if (vector < VIRTIO_QUEUE_MAX) {
871         if (!dev->indicators) {
872             return;
873         }
874         if (sch->thinint_active) {
875             /*
876              * In the adapter interrupt case, indicators points to a
877              * memory area that may be (way) larger than 64 bit and
878              * ind_bit indicates the start of the indicators in a big
879              * endian notation.
880              */
881             uint64_t ind_bit = dev->routes.adapter.ind_offset;
882 
883             virtio_set_ind_atomic(sch, dev->indicators->addr +
884                                   (ind_bit + vector) / 8,
885                                   0x80 >> ((ind_bit + vector) % 8));
886             if (!virtio_set_ind_atomic(sch, dev->summary_indicator->addr,
887                                        0x01)) {
888                 css_adapter_interrupt(CSS_IO_ADAPTER_VIRTIO, dev->thinint_isc);
889             }
890         } else {
891             assert(vector < NR_CLASSIC_INDICATOR_BITS);
892             indicators = address_space_ldq(&address_space_memory,
893                                            dev->indicators->addr,
894                                            MEMTXATTRS_UNSPECIFIED,
895                                            NULL);
896             indicators |= 1ULL << vector;
897             address_space_stq(&address_space_memory, dev->indicators->addr,
898                               indicators, MEMTXATTRS_UNSPECIFIED, NULL);
899             css_conditional_io_interrupt(sch);
900         }
901     } else {
902         if (!dev->indicators2) {
903             return;
904         }
905         indicators = address_space_ldq(&address_space_memory,
906                                        dev->indicators2->addr,
907                                        MEMTXATTRS_UNSPECIFIED,
908                                        NULL);
909         indicators |= 1ULL;
910         address_space_stq(&address_space_memory, dev->indicators2->addr,
911                           indicators, MEMTXATTRS_UNSPECIFIED, NULL);
912         css_conditional_io_interrupt(sch);
913     }
914 }
915 
virtio_ccw_reset(DeviceState * d)916 static void virtio_ccw_reset(DeviceState *d)
917 {
918     VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
919     VirtIOCCWDeviceClass *vdc = VIRTIO_CCW_DEVICE_GET_CLASS(dev);
920 
921     virtio_ccw_reset_virtio(dev);
922     if (vdc->parent_reset) {
923         vdc->parent_reset(d);
924     }
925 }
926 
virtio_ccw_vmstate_change(DeviceState * d,bool running)927 static void virtio_ccw_vmstate_change(DeviceState *d, bool running)
928 {
929     VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
930 
931     if (running) {
932         virtio_ccw_start_ioeventfd(dev);
933     } else {
934         virtio_ccw_stop_ioeventfd(dev);
935     }
936 }
937 
virtio_ccw_query_guest_notifiers(DeviceState * d)938 static bool virtio_ccw_query_guest_notifiers(DeviceState *d)
939 {
940     CcwDevice *dev = CCW_DEVICE(d);
941 
942     return !!(dev->sch->curr_status.pmcw.flags & PMCW_FLAGS_MASK_ENA);
943 }
944 
virtio_ccw_get_mappings(VirtioCcwDevice * dev)945 static int virtio_ccw_get_mappings(VirtioCcwDevice *dev)
946 {
947     int r;
948     CcwDevice *ccw_dev = CCW_DEVICE(dev);
949 
950     if (!ccw_dev->sch->thinint_active) {
951         return -EINVAL;
952     }
953 
954     r = map_indicator(&dev->routes.adapter, dev->summary_indicator);
955     if (r) {
956         return r;
957     }
958     r = map_indicator(&dev->routes.adapter, dev->indicators);
959     if (r) {
960         return r;
961     }
962     dev->routes.adapter.summary_addr = dev->summary_indicator->map;
963     dev->routes.adapter.ind_addr = dev->indicators->map;
964 
965     return 0;
966 }
967 
virtio_ccw_setup_irqroutes(VirtioCcwDevice * dev,int nvqs)968 static int virtio_ccw_setup_irqroutes(VirtioCcwDevice *dev, int nvqs)
969 {
970     int i;
971     VirtIODevice *vdev = virtio_bus_get_device(&dev->bus);
972     int ret;
973     S390FLICState *fs = s390_get_flic();
974     S390FLICStateClass *fsc = s390_get_flic_class(fs);
975 
976     ret = virtio_ccw_get_mappings(dev);
977     if (ret) {
978         return ret;
979     }
980     for (i = 0; i < nvqs; i++) {
981         if (!virtio_queue_get_num(vdev, i)) {
982             break;
983         }
984     }
985     dev->routes.num_routes = i;
986     return fsc->add_adapter_routes(fs, &dev->routes);
987 }
988 
virtio_ccw_release_irqroutes(VirtioCcwDevice * dev,int nvqs)989 static void virtio_ccw_release_irqroutes(VirtioCcwDevice *dev, int nvqs)
990 {
991     S390FLICState *fs = s390_get_flic();
992     S390FLICStateClass *fsc = s390_get_flic_class(fs);
993 
994     fsc->release_adapter_routes(fs, &dev->routes);
995 }
996 
virtio_ccw_add_irqfd(VirtioCcwDevice * dev,int n)997 static int virtio_ccw_add_irqfd(VirtioCcwDevice *dev, int n)
998 {
999     VirtIODevice *vdev = virtio_bus_get_device(&dev->bus);
1000     VirtQueue *vq = virtio_get_queue(vdev, n);
1001     EventNotifier *notifier = virtio_queue_get_guest_notifier(vq);
1002 
1003     return kvm_irqchip_add_irqfd_notifier_gsi(kvm_state, notifier, NULL,
1004                                               dev->routes.gsi[n]);
1005 }
1006 
virtio_ccw_remove_irqfd(VirtioCcwDevice * dev,int n)1007 static void virtio_ccw_remove_irqfd(VirtioCcwDevice *dev, int n)
1008 {
1009     VirtIODevice *vdev = virtio_bus_get_device(&dev->bus);
1010     VirtQueue *vq = virtio_get_queue(vdev, n);
1011     EventNotifier *notifier = virtio_queue_get_guest_notifier(vq);
1012     int ret;
1013 
1014     ret = kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, notifier,
1015                                                 dev->routes.gsi[n]);
1016     assert(ret == 0);
1017 }
1018 
virtio_ccw_set_guest_notifier(VirtioCcwDevice * dev,int n,bool assign,bool with_irqfd)1019 static int virtio_ccw_set_guest_notifier(VirtioCcwDevice *dev, int n,
1020                                          bool assign, bool with_irqfd)
1021 {
1022     VirtIODevice *vdev = virtio_bus_get_device(&dev->bus);
1023     VirtQueue *vq = virtio_get_queue(vdev, n);
1024     EventNotifier *notifier = virtio_queue_get_guest_notifier(vq);
1025     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1026 
1027     if (assign) {
1028         int r = event_notifier_init(notifier, 0);
1029 
1030         if (r < 0) {
1031             return r;
1032         }
1033         virtio_queue_set_guest_notifier_fd_handler(vq, true, with_irqfd);
1034         if (with_irqfd) {
1035             r = virtio_ccw_add_irqfd(dev, n);
1036             if (r) {
1037                 virtio_queue_set_guest_notifier_fd_handler(vq, false,
1038                                                            with_irqfd);
1039                 return r;
1040             }
1041         }
1042         /*
1043          * We do not support individual masking for channel devices, so we
1044          * need to manually trigger any guest masking callbacks here.
1045          */
1046         if (k->guest_notifier_mask && vdev->use_guest_notifier_mask) {
1047             k->guest_notifier_mask(vdev, n, false);
1048         }
1049         /* get lost events and re-inject */
1050         if (k->guest_notifier_pending &&
1051             k->guest_notifier_pending(vdev, n)) {
1052             event_notifier_set(notifier);
1053         }
1054     } else {
1055         if (k->guest_notifier_mask && vdev->use_guest_notifier_mask) {
1056             k->guest_notifier_mask(vdev, n, true);
1057         }
1058         if (with_irqfd) {
1059             virtio_ccw_remove_irqfd(dev, n);
1060         }
1061         virtio_queue_set_guest_notifier_fd_handler(vq, false, with_irqfd);
1062         event_notifier_cleanup(notifier);
1063     }
1064     return 0;
1065 }
1066 
virtio_ccw_set_guest_notifiers(DeviceState * d,int nvqs,bool assigned)1067 static int virtio_ccw_set_guest_notifiers(DeviceState *d, int nvqs,
1068                                           bool assigned)
1069 {
1070     VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
1071     VirtIODevice *vdev = virtio_bus_get_device(&dev->bus);
1072     CcwDevice *ccw_dev = CCW_DEVICE(d);
1073     bool with_irqfd = ccw_dev->sch->thinint_active && kvm_irqfds_enabled();
1074     int r, n;
1075 
1076     if (with_irqfd && assigned) {
1077         /* irq routes need to be set up before assigning irqfds */
1078         r = virtio_ccw_setup_irqroutes(dev, nvqs);
1079         if (r < 0) {
1080             goto irqroute_error;
1081         }
1082     }
1083     for (n = 0; n < nvqs; n++) {
1084         if (!virtio_queue_get_num(vdev, n)) {
1085             break;
1086         }
1087         r = virtio_ccw_set_guest_notifier(dev, n, assigned, with_irqfd);
1088         if (r < 0) {
1089             goto assign_error;
1090         }
1091     }
1092     if (with_irqfd && !assigned) {
1093         /* release irq routes after irqfds have been released */
1094         virtio_ccw_release_irqroutes(dev, nvqs);
1095     }
1096     return 0;
1097 
1098 assign_error:
1099     while (--n >= 0) {
1100         virtio_ccw_set_guest_notifier(dev, n, !assigned, false);
1101     }
1102 irqroute_error:
1103     if (with_irqfd && assigned) {
1104         virtio_ccw_release_irqroutes(dev, nvqs);
1105     }
1106     return r;
1107 }
1108 
virtio_ccw_save_queue(DeviceState * d,int n,QEMUFile * f)1109 static void virtio_ccw_save_queue(DeviceState *d, int n, QEMUFile *f)
1110 {
1111     VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
1112     VirtIODevice *vdev = virtio_bus_get_device(&dev->bus);
1113 
1114     qemu_put_be16(f, virtio_queue_vector(vdev, n));
1115 }
1116 
virtio_ccw_load_queue(DeviceState * d,int n,QEMUFile * f)1117 static int virtio_ccw_load_queue(DeviceState *d, int n, QEMUFile *f)
1118 {
1119     VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
1120     VirtIODevice *vdev = virtio_bus_get_device(&dev->bus);
1121     uint16_t vector;
1122 
1123     qemu_get_be16s(f, &vector);
1124     virtio_queue_set_vector(vdev, n , vector);
1125 
1126     return 0;
1127 }
1128 
virtio_ccw_save_config(DeviceState * d,QEMUFile * f)1129 static void virtio_ccw_save_config(DeviceState *d, QEMUFile *f)
1130 {
1131     VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
1132     vmstate_save_state(f, &vmstate_virtio_ccw_dev, dev, NULL);
1133 }
1134 
virtio_ccw_load_config(DeviceState * d,QEMUFile * f)1135 static int virtio_ccw_load_config(DeviceState *d, QEMUFile *f)
1136 {
1137     VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
1138     return vmstate_load_state(f, &vmstate_virtio_ccw_dev, dev, 1);
1139 }
1140 
virtio_ccw_pre_plugged(DeviceState * d,Error ** errp)1141 static void virtio_ccw_pre_plugged(DeviceState *d, Error **errp)
1142 {
1143    VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
1144    VirtIODevice *vdev = virtio_bus_get_device(&dev->bus);
1145 
1146     if (dev->max_rev >= 1) {
1147         virtio_add_feature(&vdev->host_features, VIRTIO_F_VERSION_1);
1148     }
1149 }
1150 
1151 /* This is called by virtio-bus just after the device is plugged. */
virtio_ccw_device_plugged(DeviceState * d,Error ** errp)1152 static void virtio_ccw_device_plugged(DeviceState *d, Error **errp)
1153 {
1154     VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
1155     VirtIODevice *vdev = virtio_bus_get_device(&dev->bus);
1156     CcwDevice *ccw_dev = CCW_DEVICE(d);
1157     SubchDev *sch = ccw_dev->sch;
1158     int n = virtio_get_num_queues(vdev);
1159     S390FLICState *flic = s390_get_flic();
1160 
1161     if (!virtio_has_feature(vdev->host_features, VIRTIO_F_VERSION_1)) {
1162         dev->max_rev = 0;
1163     }
1164 
1165     if (!virtio_ccw_rev_max(dev) && !virtio_legacy_allowed(vdev)) {
1166         /*
1167          * To avoid migration issues, we allow legacy mode when legacy
1168          * check is disabled in the old machine types (< 5.1).
1169          */
1170         if (virtio_legacy_check_disabled(vdev)) {
1171             warn_report("device requires revision >= 1, but for backward "
1172                         "compatibility max_revision=0 is allowed");
1173         } else {
1174             error_setg(errp, "Invalid value of property max_rev "
1175                        "(is %d expected >= 1)", virtio_ccw_rev_max(dev));
1176             return;
1177         }
1178     }
1179 
1180     if (virtio_get_num_queues(vdev) > VIRTIO_QUEUE_MAX) {
1181         error_setg(errp, "The number of virtqueues %d "
1182                    "exceeds virtio limit %d", n,
1183                    VIRTIO_QUEUE_MAX);
1184         return;
1185     }
1186     if (virtio_get_num_queues(vdev) > flic->adapter_routes_max_batch) {
1187         error_setg(errp, "The number of virtqueues %d "
1188                    "exceeds flic adapter route limit %d", n,
1189                    flic->adapter_routes_max_batch);
1190         return;
1191     }
1192 
1193     sch->id.cu_model = virtio_bus_get_vdev_id(&dev->bus);
1194 
1195 
1196     css_generate_sch_crws(sch->cssid, sch->ssid, sch->schid,
1197                           d->hotplugged, 1);
1198 }
1199 
virtio_ccw_device_unplugged(DeviceState * d)1200 static void virtio_ccw_device_unplugged(DeviceState *d)
1201 {
1202     VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
1203 
1204     virtio_ccw_stop_ioeventfd(dev);
1205 }
1206 /**************** Virtio-ccw Bus Device Descriptions *******************/
1207 
virtio_ccw_busdev_realize(DeviceState * dev,Error ** errp)1208 static void virtio_ccw_busdev_realize(DeviceState *dev, Error **errp)
1209 {
1210     VirtioCcwDevice *_dev = (VirtioCcwDevice *)dev;
1211 
1212     virtio_ccw_bus_new(&_dev->bus, sizeof(_dev->bus), _dev);
1213     virtio_ccw_device_realize(_dev, errp);
1214 }
1215 
virtio_ccw_busdev_unrealize(DeviceState * dev)1216 static void virtio_ccw_busdev_unrealize(DeviceState *dev)
1217 {
1218     VirtioCcwDevice *_dev = (VirtioCcwDevice *)dev;
1219 
1220     virtio_ccw_device_unrealize(_dev);
1221 }
1222 
virtio_ccw_busdev_unplug(HotplugHandler * hotplug_dev,DeviceState * dev,Error ** errp)1223 static void virtio_ccw_busdev_unplug(HotplugHandler *hotplug_dev,
1224                                      DeviceState *dev, Error **errp)
1225 {
1226     VirtioCcwDevice *_dev = to_virtio_ccw_dev_fast(dev);
1227 
1228     virtio_ccw_stop_ioeventfd(_dev);
1229 }
1230 
virtio_ccw_device_class_init(ObjectClass * klass,void * data)1231 static void virtio_ccw_device_class_init(ObjectClass *klass, void *data)
1232 {
1233     DeviceClass *dc = DEVICE_CLASS(klass);
1234     CCWDeviceClass *k = CCW_DEVICE_CLASS(dc);
1235     VirtIOCCWDeviceClass *vdc = VIRTIO_CCW_DEVICE_CLASS(klass);
1236 
1237     k->unplug = virtio_ccw_busdev_unplug;
1238     dc->realize = virtio_ccw_busdev_realize;
1239     dc->unrealize = virtio_ccw_busdev_unrealize;
1240     device_class_set_parent_reset(dc, virtio_ccw_reset, &vdc->parent_reset);
1241 }
1242 
1243 static const TypeInfo virtio_ccw_device_info = {
1244     .name = TYPE_VIRTIO_CCW_DEVICE,
1245     .parent = TYPE_CCW_DEVICE,
1246     .instance_size = sizeof(VirtioCcwDevice),
1247     .class_init = virtio_ccw_device_class_init,
1248     .class_size = sizeof(VirtIOCCWDeviceClass),
1249     .abstract = true,
1250 };
1251 
1252 /* virtio-ccw-bus */
1253 
virtio_ccw_bus_new(VirtioBusState * bus,size_t bus_size,VirtioCcwDevice * dev)1254 static void virtio_ccw_bus_new(VirtioBusState *bus, size_t bus_size,
1255                                VirtioCcwDevice *dev)
1256 {
1257     DeviceState *qdev = DEVICE(dev);
1258     char virtio_bus_name[] = "virtio-bus";
1259 
1260     qbus_init(bus, bus_size, TYPE_VIRTIO_CCW_BUS, qdev, virtio_bus_name);
1261 }
1262 
virtio_ccw_bus_class_init(ObjectClass * klass,void * data)1263 static void virtio_ccw_bus_class_init(ObjectClass *klass, void *data)
1264 {
1265     VirtioBusClass *k = VIRTIO_BUS_CLASS(klass);
1266     BusClass *bus_class = BUS_CLASS(klass);
1267 
1268     bus_class->max_dev = 1;
1269     k->notify = virtio_ccw_notify;
1270     k->vmstate_change = virtio_ccw_vmstate_change;
1271     k->query_guest_notifiers = virtio_ccw_query_guest_notifiers;
1272     k->set_guest_notifiers = virtio_ccw_set_guest_notifiers;
1273     k->save_queue = virtio_ccw_save_queue;
1274     k->load_queue = virtio_ccw_load_queue;
1275     k->save_config = virtio_ccw_save_config;
1276     k->load_config = virtio_ccw_load_config;
1277     k->pre_plugged = virtio_ccw_pre_plugged;
1278     k->device_plugged = virtio_ccw_device_plugged;
1279     k->device_unplugged = virtio_ccw_device_unplugged;
1280     k->ioeventfd_enabled = virtio_ccw_ioeventfd_enabled;
1281     k->ioeventfd_assign = virtio_ccw_ioeventfd_assign;
1282 }
1283 
1284 static const TypeInfo virtio_ccw_bus_info = {
1285     .name = TYPE_VIRTIO_CCW_BUS,
1286     .parent = TYPE_VIRTIO_BUS,
1287     .instance_size = sizeof(VirtioCcwBusState),
1288     .class_size = sizeof(VirtioCcwBusClass),
1289     .class_init = virtio_ccw_bus_class_init,
1290 };
1291 
virtio_ccw_register(void)1292 static void virtio_ccw_register(void)
1293 {
1294     type_register_static(&virtio_ccw_bus_info);
1295     type_register_static(&virtio_ccw_device_info);
1296 }
1297 
1298 type_init(virtio_ccw_register)
1299