xref: /openbmc/qemu/hw/s390x/css.c (revision 62dd4eda)
1 /*
2  * Channel subsystem base support.
3  *
4  * Copyright 2012 IBM Corp.
5  * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
6  *
7  * This work is licensed under the terms of the GNU GPL, version 2 or (at
8  * your option) any later version. See the COPYING file in the top-level
9  * directory.
10  */
11 
12 #include "qemu/osdep.h"
13 #include "qapi/error.h"
14 #include "qapi/visitor.h"
15 #include "hw/qdev.h"
16 #include "qemu/error-report.h"
17 #include "qemu/bitops.h"
18 #include "qemu/error-report.h"
19 #include "exec/address-spaces.h"
20 #include "cpu.h"
21 #include "hw/s390x/ioinst.h"
22 #include "hw/s390x/css.h"
23 #include "trace.h"
24 #include "hw/s390x/s390_flic.h"
25 #include "hw/s390x/s390-virtio-ccw.h"
26 
27 typedef struct CrwContainer {
28     CRW crw;
29     QTAILQ_ENTRY(CrwContainer) sibling;
30 } CrwContainer;
31 
32 static const VMStateDescription vmstate_crw = {
33     .name = "s390_crw",
34     .version_id = 1,
35     .minimum_version_id = 1,
36     .fields = (VMStateField[]) {
37         VMSTATE_UINT16(flags, CRW),
38         VMSTATE_UINT16(rsid, CRW),
39         VMSTATE_END_OF_LIST()
40     },
41 };
42 
43 static const VMStateDescription vmstate_crw_container = {
44     .name = "s390_crw_container",
45     .version_id = 1,
46     .minimum_version_id = 1,
47     .fields = (VMStateField[]) {
48         VMSTATE_STRUCT(crw, CrwContainer, 0, vmstate_crw, CRW),
49         VMSTATE_END_OF_LIST()
50     },
51 };
52 
53 typedef struct ChpInfo {
54     uint8_t in_use;
55     uint8_t type;
56     uint8_t is_virtual;
57 } ChpInfo;
58 
59 static const VMStateDescription vmstate_chp_info = {
60     .name = "s390_chp_info",
61     .version_id = 1,
62     .minimum_version_id = 1,
63     .fields = (VMStateField[]) {
64         VMSTATE_UINT8(in_use, ChpInfo),
65         VMSTATE_UINT8(type, ChpInfo),
66         VMSTATE_UINT8(is_virtual, ChpInfo),
67         VMSTATE_END_OF_LIST()
68     }
69 };
70 
71 typedef struct SubchSet {
72     SubchDev *sch[MAX_SCHID + 1];
73     unsigned long schids_used[BITS_TO_LONGS(MAX_SCHID + 1)];
74     unsigned long devnos_used[BITS_TO_LONGS(MAX_SCHID + 1)];
75 } SubchSet;
76 
77 static const VMStateDescription vmstate_scsw = {
78     .name = "s390_scsw",
79     .version_id = 1,
80     .minimum_version_id = 1,
81     .fields = (VMStateField[]) {
82         VMSTATE_UINT16(flags, SCSW),
83         VMSTATE_UINT16(ctrl, SCSW),
84         VMSTATE_UINT32(cpa, SCSW),
85         VMSTATE_UINT8(dstat, SCSW),
86         VMSTATE_UINT8(cstat, SCSW),
87         VMSTATE_UINT16(count, SCSW),
88         VMSTATE_END_OF_LIST()
89     }
90 };
91 
92 static const VMStateDescription vmstate_pmcw = {
93     .name = "s390_pmcw",
94     .version_id = 1,
95     .minimum_version_id = 1,
96     .fields = (VMStateField[]) {
97         VMSTATE_UINT32(intparm, PMCW),
98         VMSTATE_UINT16(flags, PMCW),
99         VMSTATE_UINT16(devno, PMCW),
100         VMSTATE_UINT8(lpm, PMCW),
101         VMSTATE_UINT8(pnom, PMCW),
102         VMSTATE_UINT8(lpum, PMCW),
103         VMSTATE_UINT8(pim, PMCW),
104         VMSTATE_UINT16(mbi, PMCW),
105         VMSTATE_UINT8(pom, PMCW),
106         VMSTATE_UINT8(pam, PMCW),
107         VMSTATE_UINT8_ARRAY(chpid, PMCW, 8),
108         VMSTATE_UINT32(chars, PMCW),
109         VMSTATE_END_OF_LIST()
110     }
111 };
112 
113 static const VMStateDescription vmstate_schib = {
114     .name = "s390_schib",
115     .version_id = 1,
116     .minimum_version_id = 1,
117     .fields = (VMStateField[]) {
118         VMSTATE_STRUCT(pmcw, SCHIB, 0, vmstate_pmcw, PMCW),
119         VMSTATE_STRUCT(scsw, SCHIB, 0, vmstate_scsw, SCSW),
120         VMSTATE_UINT64(mba, SCHIB),
121         VMSTATE_UINT8_ARRAY(mda, SCHIB, 4),
122         VMSTATE_END_OF_LIST()
123     }
124 };
125 
126 
127 static const VMStateDescription vmstate_ccw1 = {
128     .name = "s390_ccw1",
129     .version_id = 1,
130     .minimum_version_id = 1,
131     .fields = (VMStateField[]) {
132         VMSTATE_UINT8(cmd_code, CCW1),
133         VMSTATE_UINT8(flags, CCW1),
134         VMSTATE_UINT16(count, CCW1),
135         VMSTATE_UINT32(cda, CCW1),
136         VMSTATE_END_OF_LIST()
137     }
138 };
139 
140 static const VMStateDescription vmstate_ciw = {
141     .name = "s390_ciw",
142     .version_id = 1,
143     .minimum_version_id = 1,
144     .fields = (VMStateField[]) {
145         VMSTATE_UINT8(type, CIW),
146         VMSTATE_UINT8(command, CIW),
147         VMSTATE_UINT16(count, CIW),
148         VMSTATE_END_OF_LIST()
149     }
150 };
151 
152 static const VMStateDescription vmstate_sense_id = {
153     .name = "s390_sense_id",
154     .version_id = 1,
155     .minimum_version_id = 1,
156     .fields = (VMStateField[]) {
157         VMSTATE_UINT8(reserved, SenseId),
158         VMSTATE_UINT16(cu_type, SenseId),
159         VMSTATE_UINT8(cu_model, SenseId),
160         VMSTATE_UINT16(dev_type, SenseId),
161         VMSTATE_UINT8(dev_model, SenseId),
162         VMSTATE_UINT8(unused, SenseId),
163         VMSTATE_STRUCT_ARRAY(ciw, SenseId, MAX_CIWS, 0, vmstate_ciw, CIW),
164         VMSTATE_END_OF_LIST()
165     }
166 };
167 
168 static const VMStateDescription vmstate_orb = {
169     .name = "s390_orb",
170     .version_id = 1,
171     .minimum_version_id = 1,
172     .fields = (VMStateField[]) {
173         VMSTATE_UINT32(intparm, ORB),
174         VMSTATE_UINT16(ctrl0, ORB),
175         VMSTATE_UINT8(lpm, ORB),
176         VMSTATE_UINT8(ctrl1, ORB),
177         VMSTATE_UINT32(cpa, ORB),
178         VMSTATE_END_OF_LIST()
179     }
180 };
181 
182 static bool vmstate_schdev_orb_needed(void *opaque)
183 {
184     return css_migration_enabled();
185 }
186 
187 static const VMStateDescription vmstate_schdev_orb = {
188     .name = "s390_subch_dev/orb",
189     .version_id = 1,
190     .minimum_version_id = 1,
191     .needed = vmstate_schdev_orb_needed,
192     .fields = (VMStateField[]) {
193         VMSTATE_STRUCT(orb, SubchDev, 1, vmstate_orb, ORB),
194         VMSTATE_END_OF_LIST()
195     }
196 };
197 
198 static int subch_dev_post_load(void *opaque, int version_id);
199 static int subch_dev_pre_save(void *opaque);
200 
201 const char err_hint_devno[] = "Devno mismatch, tried to load wrong section!"
202     " Likely reason: some sequences of plug and unplug  can break"
203     " migration for machine versions prior to  2.7 (known design flaw).";
204 
205 const VMStateDescription vmstate_subch_dev = {
206     .name = "s390_subch_dev",
207     .version_id = 1,
208     .minimum_version_id = 1,
209     .post_load = subch_dev_post_load,
210     .pre_save = subch_dev_pre_save,
211     .fields = (VMStateField[]) {
212         VMSTATE_UINT8_EQUAL(cssid, SubchDev, "Bug!"),
213         VMSTATE_UINT8_EQUAL(ssid, SubchDev, "Bug!"),
214         VMSTATE_UINT16(migrated_schid, SubchDev),
215         VMSTATE_UINT16_EQUAL(devno, SubchDev, err_hint_devno),
216         VMSTATE_BOOL(thinint_active, SubchDev),
217         VMSTATE_STRUCT(curr_status, SubchDev, 0, vmstate_schib, SCHIB),
218         VMSTATE_UINT8_ARRAY(sense_data, SubchDev, 32),
219         VMSTATE_UINT64(channel_prog, SubchDev),
220         VMSTATE_STRUCT(last_cmd, SubchDev, 0, vmstate_ccw1, CCW1),
221         VMSTATE_BOOL(last_cmd_valid, SubchDev),
222         VMSTATE_STRUCT(id, SubchDev, 0, vmstate_sense_id, SenseId),
223         VMSTATE_BOOL(ccw_fmt_1, SubchDev),
224         VMSTATE_UINT8(ccw_no_data_cnt, SubchDev),
225         VMSTATE_END_OF_LIST()
226     },
227     .subsections = (const VMStateDescription * []) {
228         &vmstate_schdev_orb,
229         NULL
230     }
231 };
232 
233 typedef struct IndAddrPtrTmp {
234     IndAddr **parent;
235     uint64_t addr;
236     int32_t len;
237 } IndAddrPtrTmp;
238 
239 static int post_load_ind_addr(void *opaque, int version_id)
240 {
241     IndAddrPtrTmp *ptmp = opaque;
242     IndAddr **ind_addr = ptmp->parent;
243 
244     if (ptmp->len != 0) {
245         *ind_addr = get_indicator(ptmp->addr, ptmp->len);
246     } else {
247         *ind_addr = NULL;
248     }
249     return 0;
250 }
251 
252 static int pre_save_ind_addr(void *opaque)
253 {
254     IndAddrPtrTmp *ptmp = opaque;
255     IndAddr *ind_addr = *(ptmp->parent);
256 
257     if (ind_addr != NULL) {
258         ptmp->len = ind_addr->len;
259         ptmp->addr = ind_addr->addr;
260     } else {
261         ptmp->len = 0;
262         ptmp->addr = 0L;
263     }
264 
265     return 0;
266 }
267 
268 const VMStateDescription vmstate_ind_addr_tmp = {
269     .name = "s390_ind_addr_tmp",
270     .pre_save = pre_save_ind_addr,
271     .post_load = post_load_ind_addr,
272 
273     .fields = (VMStateField[]) {
274         VMSTATE_INT32(len, IndAddrPtrTmp),
275         VMSTATE_UINT64(addr, IndAddrPtrTmp),
276         VMSTATE_END_OF_LIST()
277     }
278 };
279 
280 const VMStateDescription vmstate_ind_addr = {
281     .name = "s390_ind_addr_tmp",
282     .fields = (VMStateField[]) {
283         VMSTATE_WITH_TMP(IndAddr*, IndAddrPtrTmp, vmstate_ind_addr_tmp),
284         VMSTATE_END_OF_LIST()
285     }
286 };
287 
288 typedef struct CssImage {
289     SubchSet *sch_set[MAX_SSID + 1];
290     ChpInfo chpids[MAX_CHPID + 1];
291 } CssImage;
292 
293 static const VMStateDescription vmstate_css_img = {
294     .name = "s390_css_img",
295     .version_id = 1,
296     .minimum_version_id = 1,
297     .fields = (VMStateField[]) {
298         /* Subchannel sets have no relevant state. */
299         VMSTATE_STRUCT_ARRAY(chpids, CssImage, MAX_CHPID + 1, 0,
300                              vmstate_chp_info, ChpInfo),
301         VMSTATE_END_OF_LIST()
302     }
303 
304 };
305 
306 typedef struct IoAdapter {
307     uint32_t id;
308     uint8_t type;
309     uint8_t isc;
310     uint8_t flags;
311 } IoAdapter;
312 
313 typedef struct ChannelSubSys {
314     QTAILQ_HEAD(, CrwContainer) pending_crws;
315     bool sei_pending;
316     bool do_crw_mchk;
317     bool crws_lost;
318     uint8_t max_cssid;
319     uint8_t max_ssid;
320     bool chnmon_active;
321     uint64_t chnmon_area;
322     CssImage *css[MAX_CSSID + 1];
323     uint8_t default_cssid;
324     /* don't migrate, see css_register_io_adapters */
325     IoAdapter *io_adapters[CSS_IO_ADAPTER_TYPE_NUMS][MAX_ISC + 1];
326     /* don't migrate, see get_indicator and IndAddrPtrTmp */
327     QTAILQ_HEAD(, IndAddr) indicator_addresses;
328 } ChannelSubSys;
329 
330 static const VMStateDescription vmstate_css = {
331     .name = "s390_css",
332     .version_id = 1,
333     .minimum_version_id = 1,
334     .fields = (VMStateField[]) {
335         VMSTATE_QTAILQ_V(pending_crws, ChannelSubSys, 1, vmstate_crw_container,
336                          CrwContainer, sibling),
337         VMSTATE_BOOL(sei_pending, ChannelSubSys),
338         VMSTATE_BOOL(do_crw_mchk, ChannelSubSys),
339         VMSTATE_BOOL(crws_lost, ChannelSubSys),
340         /* These were kind of migrated by virtio */
341         VMSTATE_UINT8(max_cssid, ChannelSubSys),
342         VMSTATE_UINT8(max_ssid, ChannelSubSys),
343         VMSTATE_BOOL(chnmon_active, ChannelSubSys),
344         VMSTATE_UINT64(chnmon_area, ChannelSubSys),
345         VMSTATE_ARRAY_OF_POINTER_TO_STRUCT(css, ChannelSubSys, MAX_CSSID + 1,
346                 0, vmstate_css_img, CssImage),
347         VMSTATE_UINT8(default_cssid, ChannelSubSys),
348         VMSTATE_END_OF_LIST()
349     }
350 };
351 
352 static ChannelSubSys channel_subsys = {
353     .pending_crws = QTAILQ_HEAD_INITIALIZER(channel_subsys.pending_crws),
354     .do_crw_mchk = true,
355     .sei_pending = false,
356     .do_crw_mchk = true,
357     .crws_lost = false,
358     .chnmon_active = false,
359     .indicator_addresses =
360         QTAILQ_HEAD_INITIALIZER(channel_subsys.indicator_addresses),
361 };
362 
363 static int subch_dev_pre_save(void *opaque)
364 {
365     SubchDev *s = opaque;
366 
367     /* Prepare remote_schid for save */
368     s->migrated_schid = s->schid;
369 
370     return 0;
371 }
372 
373 static int subch_dev_post_load(void *opaque, int version_id)
374 {
375 
376     SubchDev *s = opaque;
377 
378     /* Re-assign the subchannel to remote_schid if necessary */
379     if (s->migrated_schid != s->schid) {
380         if (css_find_subch(true, s->cssid, s->ssid, s->schid) == s) {
381             /*
382              * Cleanup the slot before moving to s->migrated_schid provided
383              * it still belongs to us, i.e. it was not changed by previous
384              * invocation of this function.
385              */
386             css_subch_assign(s->cssid, s->ssid, s->schid, s->devno, NULL);
387         }
388         /* It's OK to re-assign without a prior de-assign. */
389         s->schid = s->migrated_schid;
390         css_subch_assign(s->cssid, s->ssid, s->schid, s->devno, s);
391     }
392 
393     if (css_migration_enabled()) {
394         /* No compat voodoo to do ;) */
395         return 0;
396     }
397     /*
398      * Hack alert. If we don't migrate the channel subsystem status
399      * we still need to find out if the guest enabled mss/mcss-e.
400      * If the subchannel is enabled, it certainly was able to access it,
401      * so adjust the max_ssid/max_cssid values for relevant ssid/cssid
402      * values. This is not watertight, but better than nothing.
403      */
404     if (s->curr_status.pmcw.flags & PMCW_FLAGS_MASK_ENA) {
405         if (s->ssid) {
406             channel_subsys.max_ssid = MAX_SSID;
407         }
408         if (s->cssid != channel_subsys.default_cssid) {
409             channel_subsys.max_cssid = MAX_CSSID;
410         }
411     }
412     return 0;
413 }
414 
415 void css_register_vmstate(void)
416 {
417     vmstate_register(NULL, 0, &vmstate_css, &channel_subsys);
418 }
419 
420 IndAddr *get_indicator(hwaddr ind_addr, int len)
421 {
422     IndAddr *indicator;
423 
424     QTAILQ_FOREACH(indicator, &channel_subsys.indicator_addresses, sibling) {
425         if (indicator->addr == ind_addr) {
426             indicator->refcnt++;
427             return indicator;
428         }
429     }
430     indicator = g_new0(IndAddr, 1);
431     indicator->addr = ind_addr;
432     indicator->len = len;
433     indicator->refcnt = 1;
434     QTAILQ_INSERT_TAIL(&channel_subsys.indicator_addresses,
435                        indicator, sibling);
436     return indicator;
437 }
438 
439 static int s390_io_adapter_map(AdapterInfo *adapter, uint64_t map_addr,
440                                bool do_map)
441 {
442     S390FLICState *fs = s390_get_flic();
443     S390FLICStateClass *fsc = S390_FLIC_COMMON_GET_CLASS(fs);
444 
445     return fsc->io_adapter_map(fs, adapter->adapter_id, map_addr, do_map);
446 }
447 
448 void release_indicator(AdapterInfo *adapter, IndAddr *indicator)
449 {
450     assert(indicator->refcnt > 0);
451     indicator->refcnt--;
452     if (indicator->refcnt > 0) {
453         return;
454     }
455     QTAILQ_REMOVE(&channel_subsys.indicator_addresses, indicator, sibling);
456     if (indicator->map) {
457         s390_io_adapter_map(adapter, indicator->map, false);
458     }
459     g_free(indicator);
460 }
461 
462 int map_indicator(AdapterInfo *adapter, IndAddr *indicator)
463 {
464     int ret;
465 
466     if (indicator->map) {
467         return 0; /* already mapped is not an error */
468     }
469     indicator->map = indicator->addr;
470     ret = s390_io_adapter_map(adapter, indicator->map, true);
471     if ((ret != 0) && (ret != -ENOSYS)) {
472         goto out_err;
473     }
474     return 0;
475 
476 out_err:
477     indicator->map = 0;
478     return ret;
479 }
480 
481 int css_create_css_image(uint8_t cssid, bool default_image)
482 {
483     trace_css_new_image(cssid, default_image ? "(default)" : "");
484     /* 255 is reserved */
485     if (cssid == 255) {
486         return -EINVAL;
487     }
488     if (channel_subsys.css[cssid]) {
489         return -EBUSY;
490     }
491     channel_subsys.css[cssid] = g_malloc0(sizeof(CssImage));
492     if (default_image) {
493         channel_subsys.default_cssid = cssid;
494     }
495     return 0;
496 }
497 
498 uint32_t css_get_adapter_id(CssIoAdapterType type, uint8_t isc)
499 {
500     if (type >= CSS_IO_ADAPTER_TYPE_NUMS || isc > MAX_ISC ||
501         !channel_subsys.io_adapters[type][isc]) {
502         return -1;
503     }
504 
505     return channel_subsys.io_adapters[type][isc]->id;
506 }
507 
508 /**
509  * css_register_io_adapters: Register I/O adapters per ISC during init
510  *
511  * @swap: an indication if byte swap is needed.
512  * @maskable: an indication if the adapter is subject to the mask operation.
513  * @flags: further characteristics of the adapter.
514  *         e.g. suppressible, an indication if the adapter is subject to AIS.
515  * @errp: location to store error information.
516  */
517 void css_register_io_adapters(CssIoAdapterType type, bool swap, bool maskable,
518                               uint8_t flags, Error **errp)
519 {
520     uint32_t id;
521     int ret, isc;
522     IoAdapter *adapter;
523     S390FLICState *fs = s390_get_flic();
524     S390FLICStateClass *fsc = S390_FLIC_COMMON_GET_CLASS(fs);
525 
526     /*
527      * Disallow multiple registrations for the same device type.
528      * Report an error if registering for an already registered type.
529      */
530     if (channel_subsys.io_adapters[type][0]) {
531         error_setg(errp, "Adapters for type %d already registered", type);
532     }
533 
534     for (isc = 0; isc <= MAX_ISC; isc++) {
535         id = (type << 3) | isc;
536         ret = fsc->register_io_adapter(fs, id, isc, swap, maskable, flags);
537         if (ret == 0) {
538             adapter = g_new0(IoAdapter, 1);
539             adapter->id = id;
540             adapter->isc = isc;
541             adapter->type = type;
542             adapter->flags = flags;
543             channel_subsys.io_adapters[type][isc] = adapter;
544         } else {
545             error_setg_errno(errp, -ret, "Unexpected error %d when "
546                              "registering adapter %d", ret, id);
547             break;
548         }
549     }
550 
551     /*
552      * No need to free registered adapters in kvm: kvm will clean up
553      * when the machine goes away.
554      */
555     if (ret) {
556         for (isc--; isc >= 0; isc--) {
557             g_free(channel_subsys.io_adapters[type][isc]);
558             channel_subsys.io_adapters[type][isc] = NULL;
559         }
560     }
561 
562 }
563 
564 static void css_clear_io_interrupt(uint16_t subchannel_id,
565                                    uint16_t subchannel_nr)
566 {
567     Error *err = NULL;
568     static bool no_clear_irq;
569     S390FLICState *fs = s390_get_flic();
570     S390FLICStateClass *fsc = S390_FLIC_COMMON_GET_CLASS(fs);
571     int r;
572 
573     if (unlikely(no_clear_irq)) {
574         return;
575     }
576     r = fsc->clear_io_irq(fs, subchannel_id, subchannel_nr);
577     switch (r) {
578     case 0:
579         break;
580     case -ENOSYS:
581         no_clear_irq = true;
582         /*
583         * Ignore unavailability, as the user can't do anything
584         * about it anyway.
585         */
586         break;
587     default:
588         error_setg_errno(&err, -r, "unexpected error condition");
589         error_propagate(&error_abort, err);
590     }
591 }
592 
593 static inline uint16_t css_do_build_subchannel_id(uint8_t cssid, uint8_t ssid)
594 {
595     if (channel_subsys.max_cssid > 0) {
596         return (cssid << 8) | (1 << 3) | (ssid << 1) | 1;
597     }
598     return (ssid << 1) | 1;
599 }
600 
601 uint16_t css_build_subchannel_id(SubchDev *sch)
602 {
603     return css_do_build_subchannel_id(sch->cssid, sch->ssid);
604 }
605 
606 void css_inject_io_interrupt(SubchDev *sch)
607 {
608     uint8_t isc = (sch->curr_status.pmcw.flags & PMCW_FLAGS_MASK_ISC) >> 11;
609 
610     trace_css_io_interrupt(sch->cssid, sch->ssid, sch->schid,
611                            sch->curr_status.pmcw.intparm, isc, "");
612     s390_io_interrupt(css_build_subchannel_id(sch),
613                       sch->schid,
614                       sch->curr_status.pmcw.intparm,
615                       isc << 27);
616 }
617 
618 void css_conditional_io_interrupt(SubchDev *sch)
619 {
620     /*
621      * If the subchannel is not currently status pending, make it pending
622      * with alert status.
623      */
624     if (!(sch->curr_status.scsw.ctrl & SCSW_STCTL_STATUS_PEND)) {
625         uint8_t isc = (sch->curr_status.pmcw.flags & PMCW_FLAGS_MASK_ISC) >> 11;
626 
627         trace_css_io_interrupt(sch->cssid, sch->ssid, sch->schid,
628                                sch->curr_status.pmcw.intparm, isc,
629                                "(unsolicited)");
630         sch->curr_status.scsw.ctrl &= ~SCSW_CTRL_MASK_STCTL;
631         sch->curr_status.scsw.ctrl |=
632             SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND;
633         /* Inject an I/O interrupt. */
634         s390_io_interrupt(css_build_subchannel_id(sch),
635                           sch->schid,
636                           sch->curr_status.pmcw.intparm,
637                           isc << 27);
638     }
639 }
640 
641 int css_do_sic(CPUS390XState *env, uint8_t isc, uint16_t mode)
642 {
643     S390FLICState *fs = s390_get_flic();
644     S390FLICStateClass *fsc = S390_FLIC_COMMON_GET_CLASS(fs);
645     int r;
646 
647     if (env->psw.mask & PSW_MASK_PSTATE) {
648         r = -PGM_PRIVILEGED;
649         goto out;
650     }
651 
652     trace_css_do_sic(mode, isc);
653     switch (mode) {
654     case SIC_IRQ_MODE_ALL:
655     case SIC_IRQ_MODE_SINGLE:
656         break;
657     default:
658         r = -PGM_OPERAND;
659         goto out;
660     }
661 
662     r = fsc->modify_ais_mode(fs, isc, mode) ? -PGM_OPERATION : 0;
663 out:
664     return r;
665 }
666 
667 void css_adapter_interrupt(CssIoAdapterType type, uint8_t isc)
668 {
669     S390FLICState *fs = s390_get_flic();
670     S390FLICStateClass *fsc = S390_FLIC_COMMON_GET_CLASS(fs);
671     uint32_t io_int_word = (isc << 27) | IO_INT_WORD_AI;
672     IoAdapter *adapter = channel_subsys.io_adapters[type][isc];
673 
674     if (!adapter) {
675         return;
676     }
677 
678     trace_css_adapter_interrupt(isc);
679     if (fs->ais_supported) {
680         if (fsc->inject_airq(fs, type, isc, adapter->flags)) {
681             error_report("Failed to inject airq with AIS supported");
682             exit(1);
683         }
684     } else {
685         s390_io_interrupt(0, 0, 0, io_int_word);
686     }
687 }
688 
689 static void sch_handle_clear_func(SubchDev *sch)
690 {
691     PMCW *p = &sch->curr_status.pmcw;
692     SCSW *s = &sch->curr_status.scsw;
693     int path;
694 
695     /* Path management: In our simple css, we always choose the only path. */
696     path = 0x80;
697 
698     /* Reset values prior to 'issuing the clear signal'. */
699     p->lpum = 0;
700     p->pom = 0xff;
701     s->flags &= ~SCSW_FLAGS_MASK_PNO;
702 
703     /* We always 'attempt to issue the clear signal', and we always succeed. */
704     sch->channel_prog = 0x0;
705     sch->last_cmd_valid = false;
706     s->ctrl &= ~SCSW_ACTL_CLEAR_PEND;
707     s->ctrl |= SCSW_STCTL_STATUS_PEND;
708 
709     s->dstat = 0;
710     s->cstat = 0;
711     p->lpum = path;
712 
713 }
714 
715 static void sch_handle_halt_func(SubchDev *sch)
716 {
717 
718     PMCW *p = &sch->curr_status.pmcw;
719     SCSW *s = &sch->curr_status.scsw;
720     hwaddr curr_ccw = sch->channel_prog;
721     int path;
722 
723     /* Path management: In our simple css, we always choose the only path. */
724     path = 0x80;
725 
726     /* We always 'attempt to issue the halt signal', and we always succeed. */
727     sch->channel_prog = 0x0;
728     sch->last_cmd_valid = false;
729     s->ctrl &= ~SCSW_ACTL_HALT_PEND;
730     s->ctrl |= SCSW_STCTL_STATUS_PEND;
731 
732     if ((s->ctrl & (SCSW_ACTL_SUBCH_ACTIVE | SCSW_ACTL_DEVICE_ACTIVE)) ||
733         !((s->ctrl & SCSW_ACTL_START_PEND) ||
734           (s->ctrl & SCSW_ACTL_SUSP))) {
735         s->dstat = SCSW_DSTAT_DEVICE_END;
736     }
737     if ((s->ctrl & (SCSW_ACTL_SUBCH_ACTIVE | SCSW_ACTL_DEVICE_ACTIVE)) ||
738         (s->ctrl & SCSW_ACTL_SUSP)) {
739         s->cpa = curr_ccw + 8;
740     }
741     s->cstat = 0;
742     p->lpum = path;
743 
744 }
745 
746 static void copy_sense_id_to_guest(SenseId *dest, SenseId *src)
747 {
748     int i;
749 
750     dest->reserved = src->reserved;
751     dest->cu_type = cpu_to_be16(src->cu_type);
752     dest->cu_model = src->cu_model;
753     dest->dev_type = cpu_to_be16(src->dev_type);
754     dest->dev_model = src->dev_model;
755     dest->unused = src->unused;
756     for (i = 0; i < ARRAY_SIZE(dest->ciw); i++) {
757         dest->ciw[i].type = src->ciw[i].type;
758         dest->ciw[i].command = src->ciw[i].command;
759         dest->ciw[i].count = cpu_to_be16(src->ciw[i].count);
760     }
761 }
762 
763 static CCW1 copy_ccw_from_guest(hwaddr addr, bool fmt1)
764 {
765     CCW0 tmp0;
766     CCW1 tmp1;
767     CCW1 ret;
768 
769     if (fmt1) {
770         cpu_physical_memory_read(addr, &tmp1, sizeof(tmp1));
771         ret.cmd_code = tmp1.cmd_code;
772         ret.flags = tmp1.flags;
773         ret.count = be16_to_cpu(tmp1.count);
774         ret.cda = be32_to_cpu(tmp1.cda);
775     } else {
776         cpu_physical_memory_read(addr, &tmp0, sizeof(tmp0));
777         if ((tmp0.cmd_code & 0x0f) == CCW_CMD_TIC) {
778             ret.cmd_code = CCW_CMD_TIC;
779             ret.flags = 0;
780             ret.count = 0;
781         } else {
782             ret.cmd_code = tmp0.cmd_code;
783             ret.flags = tmp0.flags;
784             ret.count = be16_to_cpu(tmp0.count);
785         }
786         ret.cda = be16_to_cpu(tmp0.cda1) | (tmp0.cda0 << 16);
787     }
788     return ret;
789 }
790 
791 static int css_interpret_ccw(SubchDev *sch, hwaddr ccw_addr,
792                              bool suspend_allowed)
793 {
794     int ret;
795     bool check_len;
796     int len;
797     CCW1 ccw;
798 
799     if (!ccw_addr) {
800         return -EINVAL; /* channel-program check */
801     }
802     /* Check doubleword aligned and 31 or 24 (fmt 0) bit addressable. */
803     if (ccw_addr & (sch->ccw_fmt_1 ? 0x80000007 : 0xff000007)) {
804         return -EINVAL;
805     }
806 
807     /* Translate everything to format-1 ccws - the information is the same. */
808     ccw = copy_ccw_from_guest(ccw_addr, sch->ccw_fmt_1);
809 
810     /* Check for invalid command codes. */
811     if ((ccw.cmd_code & 0x0f) == 0) {
812         return -EINVAL;
813     }
814     if (((ccw.cmd_code & 0x0f) == CCW_CMD_TIC) &&
815         ((ccw.cmd_code & 0xf0) != 0)) {
816         return -EINVAL;
817     }
818     if (!sch->ccw_fmt_1 && (ccw.count == 0) &&
819         (ccw.cmd_code != CCW_CMD_TIC)) {
820         return -EINVAL;
821     }
822 
823     /* We don't support MIDA. */
824     if (ccw.flags & CCW_FLAG_MIDA) {
825         return -EINVAL;
826     }
827 
828     if (ccw.flags & CCW_FLAG_SUSPEND) {
829         return suspend_allowed ? -EINPROGRESS : -EINVAL;
830     }
831 
832     check_len = !((ccw.flags & CCW_FLAG_SLI) && !(ccw.flags & CCW_FLAG_DC));
833 
834     if (!ccw.cda) {
835         if (sch->ccw_no_data_cnt == 255) {
836             return -EINVAL;
837         }
838         sch->ccw_no_data_cnt++;
839     }
840 
841     /* Look at the command. */
842     switch (ccw.cmd_code) {
843     case CCW_CMD_NOOP:
844         /* Nothing to do. */
845         ret = 0;
846         break;
847     case CCW_CMD_BASIC_SENSE:
848         if (check_len) {
849             if (ccw.count != sizeof(sch->sense_data)) {
850                 ret = -EINVAL;
851                 break;
852             }
853         }
854         len = MIN(ccw.count, sizeof(sch->sense_data));
855         cpu_physical_memory_write(ccw.cda, sch->sense_data, len);
856         sch->curr_status.scsw.count = ccw.count - len;
857         memset(sch->sense_data, 0, sizeof(sch->sense_data));
858         ret = 0;
859         break;
860     case CCW_CMD_SENSE_ID:
861     {
862         SenseId sense_id;
863 
864         copy_sense_id_to_guest(&sense_id, &sch->id);
865         /* Sense ID information is device specific. */
866         if (check_len) {
867             if (ccw.count != sizeof(sense_id)) {
868                 ret = -EINVAL;
869                 break;
870             }
871         }
872         len = MIN(ccw.count, sizeof(sense_id));
873         /*
874          * Only indicate 0xff in the first sense byte if we actually
875          * have enough place to store at least bytes 0-3.
876          */
877         if (len >= 4) {
878             sense_id.reserved = 0xff;
879         } else {
880             sense_id.reserved = 0;
881         }
882         cpu_physical_memory_write(ccw.cda, &sense_id, len);
883         sch->curr_status.scsw.count = ccw.count - len;
884         ret = 0;
885         break;
886     }
887     case CCW_CMD_TIC:
888         if (sch->last_cmd_valid && (sch->last_cmd.cmd_code == CCW_CMD_TIC)) {
889             ret = -EINVAL;
890             break;
891         }
892         if (ccw.flags || ccw.count) {
893             /* We have already sanitized these if converted from fmt 0. */
894             ret = -EINVAL;
895             break;
896         }
897         sch->channel_prog = ccw.cda;
898         ret = -EAGAIN;
899         break;
900     default:
901         if (sch->ccw_cb) {
902             /* Handle device specific commands. */
903             ret = sch->ccw_cb(sch, ccw);
904         } else {
905             ret = -ENOSYS;
906         }
907         break;
908     }
909     sch->last_cmd = ccw;
910     sch->last_cmd_valid = true;
911     if (ret == 0) {
912         if (ccw.flags & CCW_FLAG_CC) {
913             sch->channel_prog += 8;
914             ret = -EAGAIN;
915         }
916     }
917 
918     return ret;
919 }
920 
921 static void sch_handle_start_func_virtual(SubchDev *sch)
922 {
923 
924     PMCW *p = &sch->curr_status.pmcw;
925     SCSW *s = &sch->curr_status.scsw;
926     int path;
927     int ret;
928     bool suspend_allowed;
929 
930     /* Path management: In our simple css, we always choose the only path. */
931     path = 0x80;
932 
933     if (!(s->ctrl & SCSW_ACTL_SUSP)) {
934         /* Start Function triggered via ssch, i.e. we have an ORB */
935         ORB *orb = &sch->orb;
936         s->cstat = 0;
937         s->dstat = 0;
938         /* Look at the orb and try to execute the channel program. */
939         p->intparm = orb->intparm;
940         if (!(orb->lpm & path)) {
941             /* Generate a deferred cc 3 condition. */
942             s->flags |= SCSW_FLAGS_MASK_CC;
943             s->ctrl &= ~SCSW_CTRL_MASK_STCTL;
944             s->ctrl |= (SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND);
945             return;
946         }
947         sch->ccw_fmt_1 = !!(orb->ctrl0 & ORB_CTRL0_MASK_FMT);
948         s->flags |= (sch->ccw_fmt_1) ? SCSW_FLAGS_MASK_FMT : 0;
949         sch->ccw_no_data_cnt = 0;
950         suspend_allowed = !!(orb->ctrl0 & ORB_CTRL0_MASK_SPND);
951     } else {
952         /* Start Function resumed via rsch */
953         s->ctrl &= ~(SCSW_ACTL_SUSP | SCSW_ACTL_RESUME_PEND);
954         /* The channel program had been suspended before. */
955         suspend_allowed = true;
956     }
957     sch->last_cmd_valid = false;
958     do {
959         ret = css_interpret_ccw(sch, sch->channel_prog, suspend_allowed);
960         switch (ret) {
961         case -EAGAIN:
962             /* ccw chain, continue processing */
963             break;
964         case 0:
965             /* success */
966             s->ctrl &= ~SCSW_ACTL_START_PEND;
967             s->ctrl &= ~SCSW_CTRL_MASK_STCTL;
968             s->ctrl |= SCSW_STCTL_PRIMARY | SCSW_STCTL_SECONDARY |
969                     SCSW_STCTL_STATUS_PEND;
970             s->dstat = SCSW_DSTAT_CHANNEL_END | SCSW_DSTAT_DEVICE_END;
971             s->cpa = sch->channel_prog + 8;
972             break;
973         case -EIO:
974             /* I/O errors, status depends on specific devices */
975             break;
976         case -ENOSYS:
977             /* unsupported command, generate unit check (command reject) */
978             s->ctrl &= ~SCSW_ACTL_START_PEND;
979             s->dstat = SCSW_DSTAT_UNIT_CHECK;
980             /* Set sense bit 0 in ecw0. */
981             sch->sense_data[0] = 0x80;
982             s->ctrl &= ~SCSW_CTRL_MASK_STCTL;
983             s->ctrl |= SCSW_STCTL_PRIMARY | SCSW_STCTL_SECONDARY |
984                     SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND;
985             s->cpa = sch->channel_prog + 8;
986             break;
987         case -EINPROGRESS:
988             /* channel program has been suspended */
989             s->ctrl &= ~SCSW_ACTL_START_PEND;
990             s->ctrl |= SCSW_ACTL_SUSP;
991             break;
992         default:
993             /* error, generate channel program check */
994             s->ctrl &= ~SCSW_ACTL_START_PEND;
995             s->cstat = SCSW_CSTAT_PROG_CHECK;
996             s->ctrl &= ~SCSW_CTRL_MASK_STCTL;
997             s->ctrl |= SCSW_STCTL_PRIMARY | SCSW_STCTL_SECONDARY |
998                     SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND;
999             s->cpa = sch->channel_prog + 8;
1000             break;
1001         }
1002     } while (ret == -EAGAIN);
1003 
1004 }
1005 
1006 static int sch_handle_start_func_passthrough(SubchDev *sch)
1007 {
1008 
1009     PMCW *p = &sch->curr_status.pmcw;
1010     SCSW *s = &sch->curr_status.scsw;
1011     int ret;
1012 
1013     ORB *orb = &sch->orb;
1014     if (!(s->ctrl & SCSW_ACTL_SUSP)) {
1015         assert(orb != NULL);
1016         p->intparm = orb->intparm;
1017     }
1018 
1019     /*
1020      * Only support prefetch enable mode.
1021      * Only support 64bit addressing idal.
1022      */
1023     if (!(orb->ctrl0 & ORB_CTRL0_MASK_PFCH) ||
1024         !(orb->ctrl0 & ORB_CTRL0_MASK_C64)) {
1025         return -EINVAL;
1026     }
1027 
1028     ret = s390_ccw_cmd_request(orb, s, sch->driver_data);
1029     switch (ret) {
1030     /* Currently we don't update control block and just return the cc code. */
1031     case 0:
1032         break;
1033     case -EBUSY:
1034         break;
1035     case -ENODEV:
1036         break;
1037     case -EACCES:
1038         /* Let's reflect an inaccessible host device by cc 3. */
1039         ret = -ENODEV;
1040         break;
1041     default:
1042        /*
1043         * All other return codes will trigger a program check,
1044         * or set cc to 1.
1045         */
1046        break;
1047     };
1048 
1049     return ret;
1050 }
1051 
1052 /*
1053  * On real machines, this would run asynchronously to the main vcpus.
1054  * We might want to make some parts of the ssch handling (interpreting
1055  * read/writes) asynchronous later on if we start supporting more than
1056  * our current very simple devices.
1057  */
1058 int do_subchannel_work_virtual(SubchDev *sch)
1059 {
1060 
1061     SCSW *s = &sch->curr_status.scsw;
1062 
1063     if (s->ctrl & SCSW_FCTL_CLEAR_FUNC) {
1064         sch_handle_clear_func(sch);
1065     } else if (s->ctrl & SCSW_FCTL_HALT_FUNC) {
1066         sch_handle_halt_func(sch);
1067     } else if (s->ctrl & SCSW_FCTL_START_FUNC) {
1068         /* Triggered by both ssch and rsch. */
1069         sch_handle_start_func_virtual(sch);
1070     } else {
1071         /* Cannot happen. */
1072         return 0;
1073     }
1074     css_inject_io_interrupt(sch);
1075     return 0;
1076 }
1077 
1078 int do_subchannel_work_passthrough(SubchDev *sch)
1079 {
1080     int ret;
1081     SCSW *s = &sch->curr_status.scsw;
1082 
1083     if (s->ctrl & SCSW_FCTL_CLEAR_FUNC) {
1084         /* TODO: Clear handling */
1085         sch_handle_clear_func(sch);
1086         ret = 0;
1087     } else if (s->ctrl & SCSW_FCTL_HALT_FUNC) {
1088         /* TODO: Halt handling */
1089         sch_handle_halt_func(sch);
1090         ret = 0;
1091     } else if (s->ctrl & SCSW_FCTL_START_FUNC) {
1092         ret = sch_handle_start_func_passthrough(sch);
1093     } else {
1094         /* Cannot happen. */
1095         return -ENODEV;
1096     }
1097 
1098     return ret;
1099 }
1100 
1101 static int do_subchannel_work(SubchDev *sch)
1102 {
1103     if (sch->do_subchannel_work) {
1104         return sch->do_subchannel_work(sch);
1105     } else {
1106         return -EINVAL;
1107     }
1108 }
1109 
1110 static void copy_pmcw_to_guest(PMCW *dest, const PMCW *src)
1111 {
1112     int i;
1113 
1114     dest->intparm = cpu_to_be32(src->intparm);
1115     dest->flags = cpu_to_be16(src->flags);
1116     dest->devno = cpu_to_be16(src->devno);
1117     dest->lpm = src->lpm;
1118     dest->pnom = src->pnom;
1119     dest->lpum = src->lpum;
1120     dest->pim = src->pim;
1121     dest->mbi = cpu_to_be16(src->mbi);
1122     dest->pom = src->pom;
1123     dest->pam = src->pam;
1124     for (i = 0; i < ARRAY_SIZE(dest->chpid); i++) {
1125         dest->chpid[i] = src->chpid[i];
1126     }
1127     dest->chars = cpu_to_be32(src->chars);
1128 }
1129 
1130 void copy_scsw_to_guest(SCSW *dest, const SCSW *src)
1131 {
1132     dest->flags = cpu_to_be16(src->flags);
1133     dest->ctrl = cpu_to_be16(src->ctrl);
1134     dest->cpa = cpu_to_be32(src->cpa);
1135     dest->dstat = src->dstat;
1136     dest->cstat = src->cstat;
1137     dest->count = cpu_to_be16(src->count);
1138 }
1139 
1140 static void copy_schib_to_guest(SCHIB *dest, const SCHIB *src)
1141 {
1142     int i;
1143 
1144     copy_pmcw_to_guest(&dest->pmcw, &src->pmcw);
1145     copy_scsw_to_guest(&dest->scsw, &src->scsw);
1146     dest->mba = cpu_to_be64(src->mba);
1147     for (i = 0; i < ARRAY_SIZE(dest->mda); i++) {
1148         dest->mda[i] = src->mda[i];
1149     }
1150 }
1151 
1152 int css_do_stsch(SubchDev *sch, SCHIB *schib)
1153 {
1154     /* Use current status. */
1155     copy_schib_to_guest(schib, &sch->curr_status);
1156     return 0;
1157 }
1158 
1159 static void copy_pmcw_from_guest(PMCW *dest, const PMCW *src)
1160 {
1161     int i;
1162 
1163     dest->intparm = be32_to_cpu(src->intparm);
1164     dest->flags = be16_to_cpu(src->flags);
1165     dest->devno = be16_to_cpu(src->devno);
1166     dest->lpm = src->lpm;
1167     dest->pnom = src->pnom;
1168     dest->lpum = src->lpum;
1169     dest->pim = src->pim;
1170     dest->mbi = be16_to_cpu(src->mbi);
1171     dest->pom = src->pom;
1172     dest->pam = src->pam;
1173     for (i = 0; i < ARRAY_SIZE(dest->chpid); i++) {
1174         dest->chpid[i] = src->chpid[i];
1175     }
1176     dest->chars = be32_to_cpu(src->chars);
1177 }
1178 
1179 static void copy_scsw_from_guest(SCSW *dest, const SCSW *src)
1180 {
1181     dest->flags = be16_to_cpu(src->flags);
1182     dest->ctrl = be16_to_cpu(src->ctrl);
1183     dest->cpa = be32_to_cpu(src->cpa);
1184     dest->dstat = src->dstat;
1185     dest->cstat = src->cstat;
1186     dest->count = be16_to_cpu(src->count);
1187 }
1188 
1189 static void copy_schib_from_guest(SCHIB *dest, const SCHIB *src)
1190 {
1191     int i;
1192 
1193     copy_pmcw_from_guest(&dest->pmcw, &src->pmcw);
1194     copy_scsw_from_guest(&dest->scsw, &src->scsw);
1195     dest->mba = be64_to_cpu(src->mba);
1196     for (i = 0; i < ARRAY_SIZE(dest->mda); i++) {
1197         dest->mda[i] = src->mda[i];
1198     }
1199 }
1200 
1201 int css_do_msch(SubchDev *sch, const SCHIB *orig_schib)
1202 {
1203     SCSW *s = &sch->curr_status.scsw;
1204     PMCW *p = &sch->curr_status.pmcw;
1205     uint16_t oldflags;
1206     int ret;
1207     SCHIB schib;
1208 
1209     if (!(sch->curr_status.pmcw.flags & PMCW_FLAGS_MASK_DNV)) {
1210         ret = 0;
1211         goto out;
1212     }
1213 
1214     if (s->ctrl & SCSW_STCTL_STATUS_PEND) {
1215         ret = -EINPROGRESS;
1216         goto out;
1217     }
1218 
1219     if (s->ctrl &
1220         (SCSW_FCTL_START_FUNC|SCSW_FCTL_HALT_FUNC|SCSW_FCTL_CLEAR_FUNC)) {
1221         ret = -EBUSY;
1222         goto out;
1223     }
1224 
1225     copy_schib_from_guest(&schib, orig_schib);
1226     /* Only update the program-modifiable fields. */
1227     p->intparm = schib.pmcw.intparm;
1228     oldflags = p->flags;
1229     p->flags &= ~(PMCW_FLAGS_MASK_ISC | PMCW_FLAGS_MASK_ENA |
1230                   PMCW_FLAGS_MASK_LM | PMCW_FLAGS_MASK_MME |
1231                   PMCW_FLAGS_MASK_MP);
1232     p->flags |= schib.pmcw.flags &
1233             (PMCW_FLAGS_MASK_ISC | PMCW_FLAGS_MASK_ENA |
1234              PMCW_FLAGS_MASK_LM | PMCW_FLAGS_MASK_MME |
1235              PMCW_FLAGS_MASK_MP);
1236     p->lpm = schib.pmcw.lpm;
1237     p->mbi = schib.pmcw.mbi;
1238     p->pom = schib.pmcw.pom;
1239     p->chars &= ~(PMCW_CHARS_MASK_MBFC | PMCW_CHARS_MASK_CSENSE);
1240     p->chars |= schib.pmcw.chars &
1241             (PMCW_CHARS_MASK_MBFC | PMCW_CHARS_MASK_CSENSE);
1242     sch->curr_status.mba = schib.mba;
1243 
1244     /* Has the channel been disabled? */
1245     if (sch->disable_cb && (oldflags & PMCW_FLAGS_MASK_ENA) != 0
1246         && (p->flags & PMCW_FLAGS_MASK_ENA) == 0) {
1247         sch->disable_cb(sch);
1248     }
1249 
1250     ret = 0;
1251 
1252 out:
1253     return ret;
1254 }
1255 
1256 int css_do_xsch(SubchDev *sch)
1257 {
1258     SCSW *s = &sch->curr_status.scsw;
1259     PMCW *p = &sch->curr_status.pmcw;
1260     int ret;
1261 
1262     if (~(p->flags) & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA)) {
1263         ret = -ENODEV;
1264         goto out;
1265     }
1266 
1267     if (s->ctrl & SCSW_CTRL_MASK_STCTL) {
1268         ret = -EINPROGRESS;
1269         goto out;
1270     }
1271 
1272     if (!(s->ctrl & SCSW_CTRL_MASK_FCTL) ||
1273         ((s->ctrl & SCSW_CTRL_MASK_FCTL) != SCSW_FCTL_START_FUNC) ||
1274         (!(s->ctrl &
1275            (SCSW_ACTL_RESUME_PEND | SCSW_ACTL_START_PEND | SCSW_ACTL_SUSP))) ||
1276         (s->ctrl & SCSW_ACTL_SUBCH_ACTIVE)) {
1277         ret = -EBUSY;
1278         goto out;
1279     }
1280 
1281     /* Cancel the current operation. */
1282     s->ctrl &= ~(SCSW_FCTL_START_FUNC |
1283                  SCSW_ACTL_RESUME_PEND |
1284                  SCSW_ACTL_START_PEND |
1285                  SCSW_ACTL_SUSP);
1286     sch->channel_prog = 0x0;
1287     sch->last_cmd_valid = false;
1288     s->dstat = 0;
1289     s->cstat = 0;
1290     ret = 0;
1291 
1292 out:
1293     return ret;
1294 }
1295 
1296 int css_do_csch(SubchDev *sch)
1297 {
1298     SCSW *s = &sch->curr_status.scsw;
1299     PMCW *p = &sch->curr_status.pmcw;
1300     int ret;
1301 
1302     if (~(p->flags) & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA)) {
1303         ret = -ENODEV;
1304         goto out;
1305     }
1306 
1307     /* Trigger the clear function. */
1308     s->ctrl &= ~(SCSW_CTRL_MASK_FCTL | SCSW_CTRL_MASK_ACTL);
1309     s->ctrl |= SCSW_FCTL_CLEAR_FUNC | SCSW_ACTL_CLEAR_PEND;
1310 
1311     do_subchannel_work(sch);
1312     ret = 0;
1313 
1314 out:
1315     return ret;
1316 }
1317 
1318 int css_do_hsch(SubchDev *sch)
1319 {
1320     SCSW *s = &sch->curr_status.scsw;
1321     PMCW *p = &sch->curr_status.pmcw;
1322     int ret;
1323 
1324     if (~(p->flags) & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA)) {
1325         ret = -ENODEV;
1326         goto out;
1327     }
1328 
1329     if (((s->ctrl & SCSW_CTRL_MASK_STCTL) == SCSW_STCTL_STATUS_PEND) ||
1330         (s->ctrl & (SCSW_STCTL_PRIMARY |
1331                     SCSW_STCTL_SECONDARY |
1332                     SCSW_STCTL_ALERT))) {
1333         ret = -EINPROGRESS;
1334         goto out;
1335     }
1336 
1337     if (s->ctrl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) {
1338         ret = -EBUSY;
1339         goto out;
1340     }
1341 
1342     /* Trigger the halt function. */
1343     s->ctrl |= SCSW_FCTL_HALT_FUNC;
1344     s->ctrl &= ~SCSW_FCTL_START_FUNC;
1345     if (((s->ctrl & SCSW_CTRL_MASK_ACTL) ==
1346          (SCSW_ACTL_SUBCH_ACTIVE | SCSW_ACTL_DEVICE_ACTIVE)) &&
1347         ((s->ctrl & SCSW_CTRL_MASK_STCTL) == SCSW_STCTL_INTERMEDIATE)) {
1348         s->ctrl &= ~SCSW_STCTL_STATUS_PEND;
1349     }
1350     s->ctrl |= SCSW_ACTL_HALT_PEND;
1351 
1352     do_subchannel_work(sch);
1353     ret = 0;
1354 
1355 out:
1356     return ret;
1357 }
1358 
1359 static void css_update_chnmon(SubchDev *sch)
1360 {
1361     if (!(sch->curr_status.pmcw.flags & PMCW_FLAGS_MASK_MME)) {
1362         /* Not active. */
1363         return;
1364     }
1365     /* The counter is conveniently located at the beginning of the struct. */
1366     if (sch->curr_status.pmcw.chars & PMCW_CHARS_MASK_MBFC) {
1367         /* Format 1, per-subchannel area. */
1368         uint32_t count;
1369 
1370         count = address_space_ldl(&address_space_memory,
1371                                   sch->curr_status.mba,
1372                                   MEMTXATTRS_UNSPECIFIED,
1373                                   NULL);
1374         count++;
1375         address_space_stl(&address_space_memory, sch->curr_status.mba, count,
1376                           MEMTXATTRS_UNSPECIFIED, NULL);
1377     } else {
1378         /* Format 0, global area. */
1379         uint32_t offset;
1380         uint16_t count;
1381 
1382         offset = sch->curr_status.pmcw.mbi << 5;
1383         count = address_space_lduw(&address_space_memory,
1384                                    channel_subsys.chnmon_area + offset,
1385                                    MEMTXATTRS_UNSPECIFIED,
1386                                    NULL);
1387         count++;
1388         address_space_stw(&address_space_memory,
1389                           channel_subsys.chnmon_area + offset, count,
1390                           MEMTXATTRS_UNSPECIFIED, NULL);
1391     }
1392 }
1393 
1394 int css_do_ssch(SubchDev *sch, ORB *orb)
1395 {
1396     SCSW *s = &sch->curr_status.scsw;
1397     PMCW *p = &sch->curr_status.pmcw;
1398     int ret;
1399 
1400     if (~(p->flags) & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA)) {
1401         ret = -ENODEV;
1402         goto out;
1403     }
1404 
1405     if (s->ctrl & SCSW_STCTL_STATUS_PEND) {
1406         ret = -EINPROGRESS;
1407         goto out;
1408     }
1409 
1410     if (s->ctrl & (SCSW_FCTL_START_FUNC |
1411                    SCSW_FCTL_HALT_FUNC |
1412                    SCSW_FCTL_CLEAR_FUNC)) {
1413         ret = -EBUSY;
1414         goto out;
1415     }
1416 
1417     /* If monitoring is active, update counter. */
1418     if (channel_subsys.chnmon_active) {
1419         css_update_chnmon(sch);
1420     }
1421     sch->orb = *orb;
1422     sch->channel_prog = orb->cpa;
1423     /* Trigger the start function. */
1424     s->ctrl |= (SCSW_FCTL_START_FUNC | SCSW_ACTL_START_PEND);
1425     s->flags &= ~SCSW_FLAGS_MASK_PNO;
1426 
1427     ret = do_subchannel_work(sch);
1428 
1429 out:
1430     return ret;
1431 }
1432 
1433 static void copy_irb_to_guest(IRB *dest, const IRB *src, PMCW *pmcw,
1434                               int *irb_len)
1435 {
1436     int i;
1437     uint16_t stctl = src->scsw.ctrl & SCSW_CTRL_MASK_STCTL;
1438     uint16_t actl = src->scsw.ctrl & SCSW_CTRL_MASK_ACTL;
1439 
1440     copy_scsw_to_guest(&dest->scsw, &src->scsw);
1441 
1442     for (i = 0; i < ARRAY_SIZE(dest->esw); i++) {
1443         dest->esw[i] = cpu_to_be32(src->esw[i]);
1444     }
1445     for (i = 0; i < ARRAY_SIZE(dest->ecw); i++) {
1446         dest->ecw[i] = cpu_to_be32(src->ecw[i]);
1447     }
1448     *irb_len = sizeof(*dest) - sizeof(dest->emw);
1449 
1450     /* extended measurements enabled? */
1451     if ((src->scsw.flags & SCSW_FLAGS_MASK_ESWF) ||
1452         !(pmcw->flags & PMCW_FLAGS_MASK_TF) ||
1453         !(pmcw->chars & PMCW_CHARS_MASK_XMWME)) {
1454         return;
1455     }
1456     /* extended measurements pending? */
1457     if (!(stctl & SCSW_STCTL_STATUS_PEND)) {
1458         return;
1459     }
1460     if ((stctl & SCSW_STCTL_PRIMARY) ||
1461         (stctl == SCSW_STCTL_SECONDARY) ||
1462         ((stctl & SCSW_STCTL_INTERMEDIATE) && (actl & SCSW_ACTL_SUSP))) {
1463         for (i = 0; i < ARRAY_SIZE(dest->emw); i++) {
1464             dest->emw[i] = cpu_to_be32(src->emw[i]);
1465         }
1466     }
1467     *irb_len = sizeof(*dest);
1468 }
1469 
1470 int css_do_tsch_get_irb(SubchDev *sch, IRB *target_irb, int *irb_len)
1471 {
1472     SCSW *s = &sch->curr_status.scsw;
1473     PMCW *p = &sch->curr_status.pmcw;
1474     uint16_t stctl;
1475     IRB irb;
1476 
1477     if (~(p->flags) & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA)) {
1478         return 3;
1479     }
1480 
1481     stctl = s->ctrl & SCSW_CTRL_MASK_STCTL;
1482 
1483     /* Prepare the irb for the guest. */
1484     memset(&irb, 0, sizeof(IRB));
1485 
1486     /* Copy scsw from current status. */
1487     memcpy(&irb.scsw, s, sizeof(SCSW));
1488     if (stctl & SCSW_STCTL_STATUS_PEND) {
1489         if (s->cstat & (SCSW_CSTAT_DATA_CHECK |
1490                         SCSW_CSTAT_CHN_CTRL_CHK |
1491                         SCSW_CSTAT_INTF_CTRL_CHK)) {
1492             irb.scsw.flags |= SCSW_FLAGS_MASK_ESWF;
1493             irb.esw[0] = 0x04804000;
1494         } else {
1495             irb.esw[0] = 0x00800000;
1496         }
1497         /* If a unit check is pending, copy sense data. */
1498         if ((s->dstat & SCSW_DSTAT_UNIT_CHECK) &&
1499             (p->chars & PMCW_CHARS_MASK_CSENSE)) {
1500             int i;
1501 
1502             irb.scsw.flags |= SCSW_FLAGS_MASK_ESWF | SCSW_FLAGS_MASK_ECTL;
1503             /* Attention: sense_data is already BE! */
1504             memcpy(irb.ecw, sch->sense_data, sizeof(sch->sense_data));
1505             for (i = 0; i < ARRAY_SIZE(irb.ecw); i++) {
1506                 irb.ecw[i] = be32_to_cpu(irb.ecw[i]);
1507             }
1508             irb.esw[1] = 0x01000000 | (sizeof(sch->sense_data) << 8);
1509         }
1510     }
1511     /* Store the irb to the guest. */
1512     copy_irb_to_guest(target_irb, &irb, p, irb_len);
1513 
1514     return ((stctl & SCSW_STCTL_STATUS_PEND) == 0);
1515 }
1516 
1517 void css_do_tsch_update_subch(SubchDev *sch)
1518 {
1519     SCSW *s = &sch->curr_status.scsw;
1520     PMCW *p = &sch->curr_status.pmcw;
1521     uint16_t stctl;
1522     uint16_t fctl;
1523     uint16_t actl;
1524 
1525     stctl = s->ctrl & SCSW_CTRL_MASK_STCTL;
1526     fctl = s->ctrl & SCSW_CTRL_MASK_FCTL;
1527     actl = s->ctrl & SCSW_CTRL_MASK_ACTL;
1528 
1529     /* Clear conditions on subchannel, if applicable. */
1530     if (stctl & SCSW_STCTL_STATUS_PEND) {
1531         s->ctrl &= ~SCSW_CTRL_MASK_STCTL;
1532         if ((stctl != (SCSW_STCTL_INTERMEDIATE | SCSW_STCTL_STATUS_PEND)) ||
1533             ((fctl & SCSW_FCTL_HALT_FUNC) &&
1534              (actl & SCSW_ACTL_SUSP))) {
1535             s->ctrl &= ~SCSW_CTRL_MASK_FCTL;
1536         }
1537         if (stctl != (SCSW_STCTL_INTERMEDIATE | SCSW_STCTL_STATUS_PEND)) {
1538             s->flags &= ~SCSW_FLAGS_MASK_PNO;
1539             s->ctrl &= ~(SCSW_ACTL_RESUME_PEND |
1540                          SCSW_ACTL_START_PEND |
1541                          SCSW_ACTL_HALT_PEND |
1542                          SCSW_ACTL_CLEAR_PEND |
1543                          SCSW_ACTL_SUSP);
1544         } else {
1545             if ((actl & SCSW_ACTL_SUSP) &&
1546                 (fctl & SCSW_FCTL_START_FUNC)) {
1547                 s->flags &= ~SCSW_FLAGS_MASK_PNO;
1548                 if (fctl & SCSW_FCTL_HALT_FUNC) {
1549                     s->ctrl &= ~(SCSW_ACTL_RESUME_PEND |
1550                                  SCSW_ACTL_START_PEND |
1551                                  SCSW_ACTL_HALT_PEND |
1552                                  SCSW_ACTL_CLEAR_PEND |
1553                                  SCSW_ACTL_SUSP);
1554                 } else {
1555                     s->ctrl &= ~SCSW_ACTL_RESUME_PEND;
1556                 }
1557             }
1558         }
1559         /* Clear pending sense data. */
1560         if (p->chars & PMCW_CHARS_MASK_CSENSE) {
1561             memset(sch->sense_data, 0 , sizeof(sch->sense_data));
1562         }
1563     }
1564 }
1565 
1566 static void copy_crw_to_guest(CRW *dest, const CRW *src)
1567 {
1568     dest->flags = cpu_to_be16(src->flags);
1569     dest->rsid = cpu_to_be16(src->rsid);
1570 }
1571 
1572 int css_do_stcrw(CRW *crw)
1573 {
1574     CrwContainer *crw_cont;
1575     int ret;
1576 
1577     crw_cont = QTAILQ_FIRST(&channel_subsys.pending_crws);
1578     if (crw_cont) {
1579         QTAILQ_REMOVE(&channel_subsys.pending_crws, crw_cont, sibling);
1580         copy_crw_to_guest(crw, &crw_cont->crw);
1581         g_free(crw_cont);
1582         ret = 0;
1583     } else {
1584         /* List was empty, turn crw machine checks on again. */
1585         memset(crw, 0, sizeof(*crw));
1586         channel_subsys.do_crw_mchk = true;
1587         ret = 1;
1588     }
1589 
1590     return ret;
1591 }
1592 
1593 static void copy_crw_from_guest(CRW *dest, const CRW *src)
1594 {
1595     dest->flags = be16_to_cpu(src->flags);
1596     dest->rsid = be16_to_cpu(src->rsid);
1597 }
1598 
1599 void css_undo_stcrw(CRW *crw)
1600 {
1601     CrwContainer *crw_cont;
1602 
1603     crw_cont = g_try_malloc0(sizeof(CrwContainer));
1604     if (!crw_cont) {
1605         channel_subsys.crws_lost = true;
1606         return;
1607     }
1608     copy_crw_from_guest(&crw_cont->crw, crw);
1609 
1610     QTAILQ_INSERT_HEAD(&channel_subsys.pending_crws, crw_cont, sibling);
1611 }
1612 
1613 int css_do_tpi(IOIntCode *int_code, int lowcore)
1614 {
1615     /* No pending interrupts for !KVM. */
1616     return 0;
1617  }
1618 
1619 int css_collect_chp_desc(int m, uint8_t cssid, uint8_t f_chpid, uint8_t l_chpid,
1620                          int rfmt, void *buf)
1621 {
1622     int i, desc_size;
1623     uint32_t words[8];
1624     uint32_t chpid_type_word;
1625     CssImage *css;
1626 
1627     if (!m && !cssid) {
1628         css = channel_subsys.css[channel_subsys.default_cssid];
1629     } else {
1630         css = channel_subsys.css[cssid];
1631     }
1632     if (!css) {
1633         return 0;
1634     }
1635     desc_size = 0;
1636     for (i = f_chpid; i <= l_chpid; i++) {
1637         if (css->chpids[i].in_use) {
1638             chpid_type_word = 0x80000000 | (css->chpids[i].type << 8) | i;
1639             if (rfmt == 0) {
1640                 words[0] = cpu_to_be32(chpid_type_word);
1641                 words[1] = 0;
1642                 memcpy(buf + desc_size, words, 8);
1643                 desc_size += 8;
1644             } else if (rfmt == 1) {
1645                 words[0] = cpu_to_be32(chpid_type_word);
1646                 words[1] = 0;
1647                 words[2] = 0;
1648                 words[3] = 0;
1649                 words[4] = 0;
1650                 words[5] = 0;
1651                 words[6] = 0;
1652                 words[7] = 0;
1653                 memcpy(buf + desc_size, words, 32);
1654                 desc_size += 32;
1655             }
1656         }
1657     }
1658     return desc_size;
1659 }
1660 
1661 void css_do_schm(uint8_t mbk, int update, int dct, uint64_t mbo)
1662 {
1663     /* dct is currently ignored (not really meaningful for our devices) */
1664     /* TODO: Don't ignore mbk. */
1665     if (update && !channel_subsys.chnmon_active) {
1666         /* Enable measuring. */
1667         channel_subsys.chnmon_area = mbo;
1668         channel_subsys.chnmon_active = true;
1669     }
1670     if (!update && channel_subsys.chnmon_active) {
1671         /* Disable measuring. */
1672         channel_subsys.chnmon_area = 0;
1673         channel_subsys.chnmon_active = false;
1674     }
1675 }
1676 
1677 int css_do_rsch(SubchDev *sch)
1678 {
1679     SCSW *s = &sch->curr_status.scsw;
1680     PMCW *p = &sch->curr_status.pmcw;
1681     int ret;
1682 
1683     if (~(p->flags) & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA)) {
1684         ret = -ENODEV;
1685         goto out;
1686     }
1687 
1688     if (s->ctrl & SCSW_STCTL_STATUS_PEND) {
1689         ret = -EINPROGRESS;
1690         goto out;
1691     }
1692 
1693     if (((s->ctrl & SCSW_CTRL_MASK_FCTL) != SCSW_FCTL_START_FUNC) ||
1694         (s->ctrl & SCSW_ACTL_RESUME_PEND) ||
1695         (!(s->ctrl & SCSW_ACTL_SUSP))) {
1696         ret = -EINVAL;
1697         goto out;
1698     }
1699 
1700     /* If monitoring is active, update counter. */
1701     if (channel_subsys.chnmon_active) {
1702         css_update_chnmon(sch);
1703     }
1704 
1705     s->ctrl |= SCSW_ACTL_RESUME_PEND;
1706     do_subchannel_work(sch);
1707     ret = 0;
1708 
1709 out:
1710     return ret;
1711 }
1712 
1713 int css_do_rchp(uint8_t cssid, uint8_t chpid)
1714 {
1715     uint8_t real_cssid;
1716 
1717     if (cssid > channel_subsys.max_cssid) {
1718         return -EINVAL;
1719     }
1720     if (channel_subsys.max_cssid == 0) {
1721         real_cssid = channel_subsys.default_cssid;
1722     } else {
1723         real_cssid = cssid;
1724     }
1725     if (!channel_subsys.css[real_cssid]) {
1726         return -EINVAL;
1727     }
1728 
1729     if (!channel_subsys.css[real_cssid]->chpids[chpid].in_use) {
1730         return -ENODEV;
1731     }
1732 
1733     if (!channel_subsys.css[real_cssid]->chpids[chpid].is_virtual) {
1734         fprintf(stderr,
1735                 "rchp unsupported for non-virtual chpid %x.%02x!\n",
1736                 real_cssid, chpid);
1737         return -ENODEV;
1738     }
1739 
1740     /* We don't really use a channel path, so we're done here. */
1741     css_queue_crw(CRW_RSC_CHP, CRW_ERC_INIT, 1,
1742                   channel_subsys.max_cssid > 0 ? 1 : 0, chpid);
1743     if (channel_subsys.max_cssid > 0) {
1744         css_queue_crw(CRW_RSC_CHP, CRW_ERC_INIT, 1, 0, real_cssid << 8);
1745     }
1746     return 0;
1747 }
1748 
1749 bool css_schid_final(int m, uint8_t cssid, uint8_t ssid, uint16_t schid)
1750 {
1751     SubchSet *set;
1752     uint8_t real_cssid;
1753 
1754     real_cssid = (!m && (cssid == 0)) ? channel_subsys.default_cssid : cssid;
1755     if (ssid > MAX_SSID ||
1756         !channel_subsys.css[real_cssid] ||
1757         !channel_subsys.css[real_cssid]->sch_set[ssid]) {
1758         return true;
1759     }
1760     set = channel_subsys.css[real_cssid]->sch_set[ssid];
1761     return schid > find_last_bit(set->schids_used,
1762                                  (MAX_SCHID + 1) / sizeof(unsigned long));
1763 }
1764 
1765 unsigned int css_find_free_chpid(uint8_t cssid)
1766 {
1767     CssImage *css = channel_subsys.css[cssid];
1768     unsigned int chpid;
1769 
1770     if (!css) {
1771         return MAX_CHPID + 1;
1772     }
1773 
1774     for (chpid = 0; chpid <= MAX_CHPID; chpid++) {
1775         /* skip reserved chpid */
1776         if (chpid == VIRTIO_CCW_CHPID) {
1777             continue;
1778         }
1779         if (!css->chpids[chpid].in_use) {
1780             return chpid;
1781         }
1782     }
1783     return MAX_CHPID + 1;
1784 }
1785 
1786 static int css_add_chpid(uint8_t cssid, uint8_t chpid, uint8_t type,
1787                          bool is_virt)
1788 {
1789     CssImage *css;
1790 
1791     trace_css_chpid_add(cssid, chpid, type);
1792     css = channel_subsys.css[cssid];
1793     if (!css) {
1794         return -EINVAL;
1795     }
1796     if (css->chpids[chpid].in_use) {
1797         return -EEXIST;
1798     }
1799     css->chpids[chpid].in_use = 1;
1800     css->chpids[chpid].type = type;
1801     css->chpids[chpid].is_virtual = is_virt;
1802 
1803     css_generate_chp_crws(cssid, chpid);
1804 
1805     return 0;
1806 }
1807 
1808 void css_sch_build_virtual_schib(SubchDev *sch, uint8_t chpid, uint8_t type)
1809 {
1810     PMCW *p = &sch->curr_status.pmcw;
1811     SCSW *s = &sch->curr_status.scsw;
1812     int i;
1813     CssImage *css = channel_subsys.css[sch->cssid];
1814 
1815     assert(css != NULL);
1816     memset(p, 0, sizeof(PMCW));
1817     p->flags |= PMCW_FLAGS_MASK_DNV;
1818     p->devno = sch->devno;
1819     /* single path */
1820     p->pim = 0x80;
1821     p->pom = 0xff;
1822     p->pam = 0x80;
1823     p->chpid[0] = chpid;
1824     if (!css->chpids[chpid].in_use) {
1825         css_add_chpid(sch->cssid, chpid, type, true);
1826     }
1827 
1828     memset(s, 0, sizeof(SCSW));
1829     sch->curr_status.mba = 0;
1830     for (i = 0; i < ARRAY_SIZE(sch->curr_status.mda); i++) {
1831         sch->curr_status.mda[i] = 0;
1832     }
1833 }
1834 
1835 SubchDev *css_find_subch(uint8_t m, uint8_t cssid, uint8_t ssid, uint16_t schid)
1836 {
1837     uint8_t real_cssid;
1838 
1839     real_cssid = (!m && (cssid == 0)) ? channel_subsys.default_cssid : cssid;
1840 
1841     if (!channel_subsys.css[real_cssid]) {
1842         return NULL;
1843     }
1844 
1845     if (!channel_subsys.css[real_cssid]->sch_set[ssid]) {
1846         return NULL;
1847     }
1848 
1849     return channel_subsys.css[real_cssid]->sch_set[ssid]->sch[schid];
1850 }
1851 
1852 /**
1853  * Return free device number in subchannel set.
1854  *
1855  * Return index of the first free device number in the subchannel set
1856  * identified by @p cssid and @p ssid, beginning the search at @p
1857  * start and wrapping around at MAX_DEVNO. Return a value exceeding
1858  * MAX_SCHID if there are no free device numbers in the subchannel
1859  * set.
1860  */
1861 static uint32_t css_find_free_devno(uint8_t cssid, uint8_t ssid,
1862                                     uint16_t start)
1863 {
1864     uint32_t round;
1865 
1866     for (round = 0; round <= MAX_DEVNO; round++) {
1867         uint16_t devno = (start + round) % MAX_DEVNO;
1868 
1869         if (!css_devno_used(cssid, ssid, devno)) {
1870             return devno;
1871         }
1872     }
1873     return MAX_DEVNO + 1;
1874 }
1875 
1876 /**
1877  * Return first free subchannel (id) in subchannel set.
1878  *
1879  * Return index of the first free subchannel in the subchannel set
1880  * identified by @p cssid and @p ssid, if there is any. Return a value
1881  * exceeding MAX_SCHID if there are no free subchannels in the
1882  * subchannel set.
1883  */
1884 static uint32_t css_find_free_subch(uint8_t cssid, uint8_t ssid)
1885 {
1886     uint32_t schid;
1887 
1888     for (schid = 0; schid <= MAX_SCHID; schid++) {
1889         if (!css_find_subch(1, cssid, ssid, schid)) {
1890             return schid;
1891         }
1892     }
1893     return MAX_SCHID + 1;
1894 }
1895 
1896 /**
1897  * Return first free subchannel (id) in subchannel set for a device number
1898  *
1899  * Verify the device number @p devno is not used yet in the subchannel
1900  * set identified by @p cssid and @p ssid. Set @p schid to the index
1901  * of the first free subchannel in the subchannel set, if there is
1902  * any. Return true if everything succeeded and false otherwise.
1903  */
1904 static bool css_find_free_subch_for_devno(uint8_t cssid, uint8_t ssid,
1905                                           uint16_t devno, uint16_t *schid,
1906                                           Error **errp)
1907 {
1908     uint32_t free_schid;
1909 
1910     assert(schid);
1911     if (css_devno_used(cssid, ssid, devno)) {
1912         error_setg(errp, "Device %x.%x.%04x already exists",
1913                    cssid, ssid, devno);
1914         return false;
1915     }
1916     free_schid = css_find_free_subch(cssid, ssid);
1917     if (free_schid > MAX_SCHID) {
1918         error_setg(errp, "No free subchannel found for %x.%x.%04x",
1919                    cssid, ssid, devno);
1920         return false;
1921     }
1922     *schid = free_schid;
1923     return true;
1924 }
1925 
1926 /**
1927  * Return first free subchannel (id) and device number
1928  *
1929  * Locate the first free subchannel and first free device number in
1930  * any of the subchannel sets of the channel subsystem identified by
1931  * @p cssid. Return false if no free subchannel / device number could
1932  * be found. Otherwise set @p ssid, @p devno and @p schid to identify
1933  * the available subchannel and device number and return true.
1934  *
1935  * May modify @p ssid, @p devno and / or @p schid even if no free
1936  * subchannel / device number could be found.
1937  */
1938 static bool css_find_free_subch_and_devno(uint8_t cssid, uint8_t *ssid,
1939                                           uint16_t *devno, uint16_t *schid,
1940                                           Error **errp)
1941 {
1942     uint32_t free_schid, free_devno;
1943 
1944     assert(ssid && devno && schid);
1945     for (*ssid = 0; *ssid <= MAX_SSID; (*ssid)++) {
1946         free_schid = css_find_free_subch(cssid, *ssid);
1947         if (free_schid > MAX_SCHID) {
1948             continue;
1949         }
1950         free_devno = css_find_free_devno(cssid, *ssid, free_schid);
1951         if (free_devno > MAX_DEVNO) {
1952             continue;
1953         }
1954         *schid = free_schid;
1955         *devno = free_devno;
1956         return true;
1957     }
1958     error_setg(errp, "Virtual channel subsystem is full!");
1959     return false;
1960 }
1961 
1962 bool css_subch_visible(SubchDev *sch)
1963 {
1964     if (sch->ssid > channel_subsys.max_ssid) {
1965         return false;
1966     }
1967 
1968     if (sch->cssid != channel_subsys.default_cssid) {
1969         return (channel_subsys.max_cssid > 0);
1970     }
1971 
1972     return true;
1973 }
1974 
1975 bool css_present(uint8_t cssid)
1976 {
1977     return (channel_subsys.css[cssid] != NULL);
1978 }
1979 
1980 bool css_devno_used(uint8_t cssid, uint8_t ssid, uint16_t devno)
1981 {
1982     if (!channel_subsys.css[cssid]) {
1983         return false;
1984     }
1985     if (!channel_subsys.css[cssid]->sch_set[ssid]) {
1986         return false;
1987     }
1988 
1989     return !!test_bit(devno,
1990                       channel_subsys.css[cssid]->sch_set[ssid]->devnos_used);
1991 }
1992 
1993 void css_subch_assign(uint8_t cssid, uint8_t ssid, uint16_t schid,
1994                       uint16_t devno, SubchDev *sch)
1995 {
1996     CssImage *css;
1997     SubchSet *s_set;
1998 
1999     trace_css_assign_subch(sch ? "assign" : "deassign", cssid, ssid, schid,
2000                            devno);
2001     if (!channel_subsys.css[cssid]) {
2002         fprintf(stderr,
2003                 "Suspicious call to %s (%x.%x.%04x) for non-existing css!\n",
2004                 __func__, cssid, ssid, schid);
2005         return;
2006     }
2007     css = channel_subsys.css[cssid];
2008 
2009     if (!css->sch_set[ssid]) {
2010         css->sch_set[ssid] = g_malloc0(sizeof(SubchSet));
2011     }
2012     s_set = css->sch_set[ssid];
2013 
2014     s_set->sch[schid] = sch;
2015     if (sch) {
2016         set_bit(schid, s_set->schids_used);
2017         set_bit(devno, s_set->devnos_used);
2018     } else {
2019         clear_bit(schid, s_set->schids_used);
2020         clear_bit(devno, s_set->devnos_used);
2021     }
2022 }
2023 
2024 void css_queue_crw(uint8_t rsc, uint8_t erc, int solicited,
2025                    int chain, uint16_t rsid)
2026 {
2027     CrwContainer *crw_cont;
2028 
2029     trace_css_crw(rsc, erc, rsid, chain ? "(chained)" : "");
2030     /* TODO: Maybe use a static crw pool? */
2031     crw_cont = g_try_malloc0(sizeof(CrwContainer));
2032     if (!crw_cont) {
2033         channel_subsys.crws_lost = true;
2034         return;
2035     }
2036     crw_cont->crw.flags = (rsc << 8) | erc;
2037     if (solicited) {
2038         crw_cont->crw.flags |= CRW_FLAGS_MASK_S;
2039     }
2040     if (chain) {
2041         crw_cont->crw.flags |= CRW_FLAGS_MASK_C;
2042     }
2043     crw_cont->crw.rsid = rsid;
2044     if (channel_subsys.crws_lost) {
2045         crw_cont->crw.flags |= CRW_FLAGS_MASK_R;
2046         channel_subsys.crws_lost = false;
2047     }
2048 
2049     QTAILQ_INSERT_TAIL(&channel_subsys.pending_crws, crw_cont, sibling);
2050 
2051     if (channel_subsys.do_crw_mchk) {
2052         channel_subsys.do_crw_mchk = false;
2053         /* Inject crw pending machine check. */
2054         s390_crw_mchk();
2055     }
2056 }
2057 
2058 void css_generate_sch_crws(uint8_t cssid, uint8_t ssid, uint16_t schid,
2059                            int hotplugged, int add)
2060 {
2061     uint8_t guest_cssid;
2062     bool chain_crw;
2063 
2064     if (add && !hotplugged) {
2065         return;
2066     }
2067     if (channel_subsys.max_cssid == 0) {
2068         /* Default cssid shows up as 0. */
2069         guest_cssid = (cssid == channel_subsys.default_cssid) ? 0 : cssid;
2070     } else {
2071         /* Show real cssid to the guest. */
2072         guest_cssid = cssid;
2073     }
2074     /*
2075      * Only notify for higher subchannel sets/channel subsystems if the
2076      * guest has enabled it.
2077      */
2078     if ((ssid > channel_subsys.max_ssid) ||
2079         (guest_cssid > channel_subsys.max_cssid) ||
2080         ((channel_subsys.max_cssid == 0) &&
2081          (cssid != channel_subsys.default_cssid))) {
2082         return;
2083     }
2084     chain_crw = (channel_subsys.max_ssid > 0) ||
2085             (channel_subsys.max_cssid > 0);
2086     css_queue_crw(CRW_RSC_SUBCH, CRW_ERC_IPI, 0, chain_crw ? 1 : 0, schid);
2087     if (chain_crw) {
2088         css_queue_crw(CRW_RSC_SUBCH, CRW_ERC_IPI, 0, 0,
2089                       (guest_cssid << 8) | (ssid << 4));
2090     }
2091     /* RW_ERC_IPI --> clear pending interrupts */
2092     css_clear_io_interrupt(css_do_build_subchannel_id(cssid, ssid), schid);
2093 }
2094 
2095 void css_generate_chp_crws(uint8_t cssid, uint8_t chpid)
2096 {
2097     /* TODO */
2098 }
2099 
2100 void css_generate_css_crws(uint8_t cssid)
2101 {
2102     if (!channel_subsys.sei_pending) {
2103         css_queue_crw(CRW_RSC_CSS, CRW_ERC_EVENT, 0, 0, cssid);
2104     }
2105     channel_subsys.sei_pending = true;
2106 }
2107 
2108 void css_clear_sei_pending(void)
2109 {
2110     channel_subsys.sei_pending = false;
2111 }
2112 
2113 int css_enable_mcsse(void)
2114 {
2115     trace_css_enable_facility("mcsse");
2116     channel_subsys.max_cssid = MAX_CSSID;
2117     return 0;
2118 }
2119 
2120 int css_enable_mss(void)
2121 {
2122     trace_css_enable_facility("mss");
2123     channel_subsys.max_ssid = MAX_SSID;
2124     return 0;
2125 }
2126 
2127 void css_reset_sch(SubchDev *sch)
2128 {
2129     PMCW *p = &sch->curr_status.pmcw;
2130 
2131     if ((p->flags & PMCW_FLAGS_MASK_ENA) != 0 && sch->disable_cb) {
2132         sch->disable_cb(sch);
2133     }
2134 
2135     p->intparm = 0;
2136     p->flags &= ~(PMCW_FLAGS_MASK_ISC | PMCW_FLAGS_MASK_ENA |
2137                   PMCW_FLAGS_MASK_LM | PMCW_FLAGS_MASK_MME |
2138                   PMCW_FLAGS_MASK_MP | PMCW_FLAGS_MASK_TF);
2139     p->flags |= PMCW_FLAGS_MASK_DNV;
2140     p->devno = sch->devno;
2141     p->pim = 0x80;
2142     p->lpm = p->pim;
2143     p->pnom = 0;
2144     p->lpum = 0;
2145     p->mbi = 0;
2146     p->pom = 0xff;
2147     p->pam = 0x80;
2148     p->chars &= ~(PMCW_CHARS_MASK_MBFC | PMCW_CHARS_MASK_XMWME |
2149                   PMCW_CHARS_MASK_CSENSE);
2150 
2151     memset(&sch->curr_status.scsw, 0, sizeof(sch->curr_status.scsw));
2152     sch->curr_status.mba = 0;
2153 
2154     sch->channel_prog = 0x0;
2155     sch->last_cmd_valid = false;
2156     sch->thinint_active = false;
2157 }
2158 
2159 void css_reset(void)
2160 {
2161     CrwContainer *crw_cont;
2162 
2163     /* Clean up monitoring. */
2164     channel_subsys.chnmon_active = false;
2165     channel_subsys.chnmon_area = 0;
2166 
2167     /* Clear pending CRWs. */
2168     while ((crw_cont = QTAILQ_FIRST(&channel_subsys.pending_crws))) {
2169         QTAILQ_REMOVE(&channel_subsys.pending_crws, crw_cont, sibling);
2170         g_free(crw_cont);
2171     }
2172     channel_subsys.sei_pending = false;
2173     channel_subsys.do_crw_mchk = true;
2174     channel_subsys.crws_lost = false;
2175 
2176     /* Reset maximum ids. */
2177     channel_subsys.max_cssid = 0;
2178     channel_subsys.max_ssid = 0;
2179 }
2180 
2181 static void get_css_devid(Object *obj, Visitor *v, const char *name,
2182                           void *opaque, Error **errp)
2183 {
2184     DeviceState *dev = DEVICE(obj);
2185     Property *prop = opaque;
2186     CssDevId *dev_id = qdev_get_prop_ptr(dev, prop);
2187     char buffer[] = "xx.x.xxxx";
2188     char *p = buffer;
2189     int r;
2190 
2191     if (dev_id->valid) {
2192 
2193         r = snprintf(buffer, sizeof(buffer), "%02x.%1x.%04x", dev_id->cssid,
2194                      dev_id->ssid, dev_id->devid);
2195         assert(r == sizeof(buffer) - 1);
2196 
2197         /* drop leading zero */
2198         if (dev_id->cssid <= 0xf) {
2199             p++;
2200         }
2201     } else {
2202         snprintf(buffer, sizeof(buffer), "<unset>");
2203     }
2204 
2205     visit_type_str(v, name, &p, errp);
2206 }
2207 
2208 /*
2209  * parse <cssid>.<ssid>.<devid> and assert valid range for cssid/ssid
2210  */
2211 static void set_css_devid(Object *obj, Visitor *v, const char *name,
2212                           void *opaque, Error **errp)
2213 {
2214     DeviceState *dev = DEVICE(obj);
2215     Property *prop = opaque;
2216     CssDevId *dev_id = qdev_get_prop_ptr(dev, prop);
2217     Error *local_err = NULL;
2218     char *str;
2219     int num, n1, n2;
2220     unsigned int cssid, ssid, devid;
2221 
2222     if (dev->realized) {
2223         qdev_prop_set_after_realize(dev, name, errp);
2224         return;
2225     }
2226 
2227     visit_type_str(v, name, &str, &local_err);
2228     if (local_err) {
2229         error_propagate(errp, local_err);
2230         return;
2231     }
2232 
2233     num = sscanf(str, "%2x.%1x%n.%4x%n", &cssid, &ssid, &n1, &devid, &n2);
2234     if (num != 3 || (n2 - n1) != 5 || strlen(str) != n2) {
2235         error_set_from_qdev_prop_error(errp, EINVAL, dev, prop, str);
2236         goto out;
2237     }
2238     if ((cssid > MAX_CSSID) || (ssid > MAX_SSID)) {
2239         error_setg(errp, "Invalid cssid or ssid: cssid %x, ssid %x",
2240                    cssid, ssid);
2241         goto out;
2242     }
2243 
2244     dev_id->cssid = cssid;
2245     dev_id->ssid = ssid;
2246     dev_id->devid = devid;
2247     dev_id->valid = true;
2248 
2249 out:
2250     g_free(str);
2251 }
2252 
2253 const PropertyInfo css_devid_propinfo = {
2254     .name = "str",
2255     .description = "Identifier of an I/O device in the channel "
2256                    "subsystem, example: fe.1.23ab",
2257     .get = get_css_devid,
2258     .set = set_css_devid,
2259 };
2260 
2261 const PropertyInfo css_devid_ro_propinfo = {
2262     .name = "str",
2263     .description = "Read-only identifier of an I/O device in the channel "
2264                    "subsystem, example: fe.1.23ab",
2265     .get = get_css_devid,
2266 };
2267 
2268 SubchDev *css_create_sch(CssDevId bus_id, bool is_virtual, bool squash_mcss,
2269                          Error **errp)
2270 {
2271     uint16_t schid = 0;
2272     SubchDev *sch;
2273 
2274     if (bus_id.valid) {
2275         if (is_virtual != (bus_id.cssid == VIRTUAL_CSSID)) {
2276             error_setg(errp, "cssid %hhx not valid for %s devices",
2277                        bus_id.cssid,
2278                        (is_virtual ? "virtual" : "non-virtual"));
2279             return NULL;
2280         }
2281     }
2282 
2283     if (bus_id.valid) {
2284         if (squash_mcss) {
2285             bus_id.cssid = channel_subsys.default_cssid;
2286         } else if (!channel_subsys.css[bus_id.cssid]) {
2287             css_create_css_image(bus_id.cssid, false);
2288         }
2289 
2290         if (!css_find_free_subch_for_devno(bus_id.cssid, bus_id.ssid,
2291                                            bus_id.devid, &schid, errp)) {
2292             return NULL;
2293         }
2294     } else if (squash_mcss || is_virtual) {
2295         bus_id.cssid = channel_subsys.default_cssid;
2296 
2297         if (!css_find_free_subch_and_devno(bus_id.cssid, &bus_id.ssid,
2298                                            &bus_id.devid, &schid, errp)) {
2299             return NULL;
2300         }
2301     } else {
2302         for (bus_id.cssid = 0; bus_id.cssid < MAX_CSSID; ++bus_id.cssid) {
2303             if (bus_id.cssid == VIRTUAL_CSSID) {
2304                 continue;
2305             }
2306 
2307             if (!channel_subsys.css[bus_id.cssid]) {
2308                 css_create_css_image(bus_id.cssid, false);
2309             }
2310 
2311             if   (css_find_free_subch_and_devno(bus_id.cssid, &bus_id.ssid,
2312                                                 &bus_id.devid, &schid,
2313                                                 NULL)) {
2314                 break;
2315             }
2316             if (bus_id.cssid == MAX_CSSID) {
2317                 error_setg(errp, "Virtual channel subsystem is full!");
2318                 return NULL;
2319             }
2320         }
2321     }
2322 
2323     sch = g_malloc0(sizeof(*sch));
2324     sch->cssid = bus_id.cssid;
2325     sch->ssid = bus_id.ssid;
2326     sch->devno = bus_id.devid;
2327     sch->schid = schid;
2328     css_subch_assign(sch->cssid, sch->ssid, schid, sch->devno, sch);
2329     return sch;
2330 }
2331 
2332 static int css_sch_get_chpids(SubchDev *sch, CssDevId *dev_id)
2333 {
2334     char *fid_path;
2335     FILE *fd;
2336     uint32_t chpid[8];
2337     int i;
2338     PMCW *p = &sch->curr_status.pmcw;
2339 
2340     fid_path = g_strdup_printf("/sys/bus/css/devices/%x.%x.%04x/chpids",
2341                                dev_id->cssid, dev_id->ssid, dev_id->devid);
2342     fd = fopen(fid_path, "r");
2343     if (fd == NULL) {
2344         error_report("%s: open %s failed", __func__, fid_path);
2345         g_free(fid_path);
2346         return -EINVAL;
2347     }
2348 
2349     if (fscanf(fd, "%x %x %x %x %x %x %x %x",
2350         &chpid[0], &chpid[1], &chpid[2], &chpid[3],
2351         &chpid[4], &chpid[5], &chpid[6], &chpid[7]) != 8) {
2352         fclose(fd);
2353         g_free(fid_path);
2354         return -EINVAL;
2355     }
2356 
2357     for (i = 0; i < ARRAY_SIZE(p->chpid); i++) {
2358         p->chpid[i] = chpid[i];
2359     }
2360 
2361     fclose(fd);
2362     g_free(fid_path);
2363 
2364     return 0;
2365 }
2366 
2367 static int css_sch_get_path_masks(SubchDev *sch, CssDevId *dev_id)
2368 {
2369     char *fid_path;
2370     FILE *fd;
2371     uint32_t pim, pam, pom;
2372     PMCW *p = &sch->curr_status.pmcw;
2373 
2374     fid_path = g_strdup_printf("/sys/bus/css/devices/%x.%x.%04x/pimpampom",
2375                                dev_id->cssid, dev_id->ssid, dev_id->devid);
2376     fd = fopen(fid_path, "r");
2377     if (fd == NULL) {
2378         error_report("%s: open %s failed", __func__, fid_path);
2379         g_free(fid_path);
2380         return -EINVAL;
2381     }
2382 
2383     if (fscanf(fd, "%x %x %x", &pim, &pam, &pom) != 3) {
2384         fclose(fd);
2385         g_free(fid_path);
2386         return -EINVAL;
2387     }
2388 
2389     p->pim = pim;
2390     p->pam = pam;
2391     p->pom = pom;
2392     fclose(fd);
2393     g_free(fid_path);
2394 
2395     return 0;
2396 }
2397 
2398 static int css_sch_get_chpid_type(uint8_t chpid, uint32_t *type,
2399                                   CssDevId *dev_id)
2400 {
2401     char *fid_path;
2402     FILE *fd;
2403 
2404     fid_path = g_strdup_printf("/sys/devices/css%x/chp0.%02x/type",
2405                                dev_id->cssid, chpid);
2406     fd = fopen(fid_path, "r");
2407     if (fd == NULL) {
2408         error_report("%s: open %s failed", __func__, fid_path);
2409         g_free(fid_path);
2410         return -EINVAL;
2411     }
2412 
2413     if (fscanf(fd, "%x", type) != 1) {
2414         fclose(fd);
2415         g_free(fid_path);
2416         return -EINVAL;
2417     }
2418 
2419     fclose(fd);
2420     g_free(fid_path);
2421 
2422     return 0;
2423 }
2424 
2425 /*
2426  * We currently retrieve the real device information from sysfs to build the
2427  * guest subchannel information block without considering the migration feature.
2428  * We need to revisit this problem when we want to add migration support.
2429  */
2430 int css_sch_build_schib(SubchDev *sch, CssDevId *dev_id)
2431 {
2432     CssImage *css = channel_subsys.css[sch->cssid];
2433     PMCW *p = &sch->curr_status.pmcw;
2434     SCSW *s = &sch->curr_status.scsw;
2435     uint32_t type;
2436     int i, ret;
2437 
2438     assert(css != NULL);
2439     memset(p, 0, sizeof(PMCW));
2440     p->flags |= PMCW_FLAGS_MASK_DNV;
2441     /* We are dealing with I/O subchannels only. */
2442     p->devno = sch->devno;
2443 
2444     /* Grab path mask from sysfs. */
2445     ret = css_sch_get_path_masks(sch, dev_id);
2446     if (ret) {
2447         return ret;
2448     }
2449 
2450     /* Grab chpids from sysfs. */
2451     ret = css_sch_get_chpids(sch, dev_id);
2452     if (ret) {
2453         return ret;
2454     }
2455 
2456    /* Build chpid type. */
2457     for (i = 0; i < ARRAY_SIZE(p->chpid); i++) {
2458         if (p->chpid[i] && !css->chpids[p->chpid[i]].in_use) {
2459             ret = css_sch_get_chpid_type(p->chpid[i], &type, dev_id);
2460             if (ret) {
2461                 return ret;
2462             }
2463             css_add_chpid(sch->cssid, p->chpid[i], type, false);
2464         }
2465     }
2466 
2467     memset(s, 0, sizeof(SCSW));
2468     sch->curr_status.mba = 0;
2469     for (i = 0; i < ARRAY_SIZE(sch->curr_status.mda); i++) {
2470         sch->curr_status.mda[i] = 0;
2471     }
2472 
2473     return 0;
2474 }
2475