xref: /openbmc/qemu/hw/s390x/css.c (revision 0b1183e3)
1 /*
2  * Channel subsystem base support.
3  *
4  * Copyright 2012 IBM Corp.
5  * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
6  *
7  * This work is licensed under the terms of the GNU GPL, version 2 or (at
8  * your option) any later version. See the COPYING file in the top-level
9  * directory.
10  */
11 
12 #include "qemu/osdep.h"
13 #include "qapi/error.h"
14 #include "qapi/visitor.h"
15 #include "hw/qdev.h"
16 #include "qemu/error-report.h"
17 #include "qemu/bitops.h"
18 #include "qemu/error-report.h"
19 #include "exec/address-spaces.h"
20 #include "cpu.h"
21 #include "hw/s390x/ioinst.h"
22 #include "hw/s390x/css.h"
23 #include "trace.h"
24 #include "hw/s390x/s390_flic.h"
25 #include "hw/s390x/s390-virtio-ccw.h"
26 
27 typedef struct CrwContainer {
28     CRW crw;
29     QTAILQ_ENTRY(CrwContainer) sibling;
30 } CrwContainer;
31 
32 static const VMStateDescription vmstate_crw = {
33     .name = "s390_crw",
34     .version_id = 1,
35     .minimum_version_id = 1,
36     .fields = (VMStateField[]) {
37         VMSTATE_UINT16(flags, CRW),
38         VMSTATE_UINT16(rsid, CRW),
39         VMSTATE_END_OF_LIST()
40     },
41 };
42 
43 static const VMStateDescription vmstate_crw_container = {
44     .name = "s390_crw_container",
45     .version_id = 1,
46     .minimum_version_id = 1,
47     .fields = (VMStateField[]) {
48         VMSTATE_STRUCT(crw, CrwContainer, 0, vmstate_crw, CRW),
49         VMSTATE_END_OF_LIST()
50     },
51 };
52 
53 typedef struct ChpInfo {
54     uint8_t in_use;
55     uint8_t type;
56     uint8_t is_virtual;
57 } ChpInfo;
58 
59 static const VMStateDescription vmstate_chp_info = {
60     .name = "s390_chp_info",
61     .version_id = 1,
62     .minimum_version_id = 1,
63     .fields = (VMStateField[]) {
64         VMSTATE_UINT8(in_use, ChpInfo),
65         VMSTATE_UINT8(type, ChpInfo),
66         VMSTATE_UINT8(is_virtual, ChpInfo),
67         VMSTATE_END_OF_LIST()
68     }
69 };
70 
71 typedef struct SubchSet {
72     SubchDev *sch[MAX_SCHID + 1];
73     unsigned long schids_used[BITS_TO_LONGS(MAX_SCHID + 1)];
74     unsigned long devnos_used[BITS_TO_LONGS(MAX_SCHID + 1)];
75 } SubchSet;
76 
77 static const VMStateDescription vmstate_scsw = {
78     .name = "s390_scsw",
79     .version_id = 1,
80     .minimum_version_id = 1,
81     .fields = (VMStateField[]) {
82         VMSTATE_UINT16(flags, SCSW),
83         VMSTATE_UINT16(ctrl, SCSW),
84         VMSTATE_UINT32(cpa, SCSW),
85         VMSTATE_UINT8(dstat, SCSW),
86         VMSTATE_UINT8(cstat, SCSW),
87         VMSTATE_UINT16(count, SCSW),
88         VMSTATE_END_OF_LIST()
89     }
90 };
91 
92 static const VMStateDescription vmstate_pmcw = {
93     .name = "s390_pmcw",
94     .version_id = 1,
95     .minimum_version_id = 1,
96     .fields = (VMStateField[]) {
97         VMSTATE_UINT32(intparm, PMCW),
98         VMSTATE_UINT16(flags, PMCW),
99         VMSTATE_UINT16(devno, PMCW),
100         VMSTATE_UINT8(lpm, PMCW),
101         VMSTATE_UINT8(pnom, PMCW),
102         VMSTATE_UINT8(lpum, PMCW),
103         VMSTATE_UINT8(pim, PMCW),
104         VMSTATE_UINT16(mbi, PMCW),
105         VMSTATE_UINT8(pom, PMCW),
106         VMSTATE_UINT8(pam, PMCW),
107         VMSTATE_UINT8_ARRAY(chpid, PMCW, 8),
108         VMSTATE_UINT32(chars, PMCW),
109         VMSTATE_END_OF_LIST()
110     }
111 };
112 
113 static const VMStateDescription vmstate_schib = {
114     .name = "s390_schib",
115     .version_id = 1,
116     .minimum_version_id = 1,
117     .fields = (VMStateField[]) {
118         VMSTATE_STRUCT(pmcw, SCHIB, 0, vmstate_pmcw, PMCW),
119         VMSTATE_STRUCT(scsw, SCHIB, 0, vmstate_scsw, SCSW),
120         VMSTATE_UINT64(mba, SCHIB),
121         VMSTATE_UINT8_ARRAY(mda, SCHIB, 4),
122         VMSTATE_END_OF_LIST()
123     }
124 };
125 
126 
127 static const VMStateDescription vmstate_ccw1 = {
128     .name = "s390_ccw1",
129     .version_id = 1,
130     .minimum_version_id = 1,
131     .fields = (VMStateField[]) {
132         VMSTATE_UINT8(cmd_code, CCW1),
133         VMSTATE_UINT8(flags, CCW1),
134         VMSTATE_UINT16(count, CCW1),
135         VMSTATE_UINT32(cda, CCW1),
136         VMSTATE_END_OF_LIST()
137     }
138 };
139 
140 static const VMStateDescription vmstate_ciw = {
141     .name = "s390_ciw",
142     .version_id = 1,
143     .minimum_version_id = 1,
144     .fields = (VMStateField[]) {
145         VMSTATE_UINT8(type, CIW),
146         VMSTATE_UINT8(command, CIW),
147         VMSTATE_UINT16(count, CIW),
148         VMSTATE_END_OF_LIST()
149     }
150 };
151 
152 static const VMStateDescription vmstate_sense_id = {
153     .name = "s390_sense_id",
154     .version_id = 1,
155     .minimum_version_id = 1,
156     .fields = (VMStateField[]) {
157         VMSTATE_UINT8(reserved, SenseId),
158         VMSTATE_UINT16(cu_type, SenseId),
159         VMSTATE_UINT8(cu_model, SenseId),
160         VMSTATE_UINT16(dev_type, SenseId),
161         VMSTATE_UINT8(dev_model, SenseId),
162         VMSTATE_UINT8(unused, SenseId),
163         VMSTATE_STRUCT_ARRAY(ciw, SenseId, MAX_CIWS, 0, vmstate_ciw, CIW),
164         VMSTATE_END_OF_LIST()
165     }
166 };
167 
168 static const VMStateDescription vmstate_orb = {
169     .name = "s390_orb",
170     .version_id = 1,
171     .minimum_version_id = 1,
172     .fields = (VMStateField[]) {
173         VMSTATE_UINT32(intparm, ORB),
174         VMSTATE_UINT16(ctrl0, ORB),
175         VMSTATE_UINT8(lpm, ORB),
176         VMSTATE_UINT8(ctrl1, ORB),
177         VMSTATE_UINT32(cpa, ORB),
178         VMSTATE_END_OF_LIST()
179     }
180 };
181 
182 static bool vmstate_schdev_orb_needed(void *opaque)
183 {
184     return css_migration_enabled();
185 }
186 
187 static const VMStateDescription vmstate_schdev_orb = {
188     .name = "s390_subch_dev/orb",
189     .version_id = 1,
190     .minimum_version_id = 1,
191     .needed = vmstate_schdev_orb_needed,
192     .fields = (VMStateField[]) {
193         VMSTATE_STRUCT(orb, SubchDev, 1, vmstate_orb, ORB),
194         VMSTATE_END_OF_LIST()
195     }
196 };
197 
198 static int subch_dev_post_load(void *opaque, int version_id);
199 static void subch_dev_pre_save(void *opaque);
200 
201 const char err_hint_devno[] = "Devno mismatch, tried to load wrong section!"
202     " Likely reason: some sequences of plug and unplug  can break"
203     " migration for machine versions prior to  2.7 (known design flaw).";
204 
205 const VMStateDescription vmstate_subch_dev = {
206     .name = "s390_subch_dev",
207     .version_id = 1,
208     .minimum_version_id = 1,
209     .post_load = subch_dev_post_load,
210     .pre_save = subch_dev_pre_save,
211     .fields = (VMStateField[]) {
212         VMSTATE_UINT8_EQUAL(cssid, SubchDev, "Bug!"),
213         VMSTATE_UINT8_EQUAL(ssid, SubchDev, "Bug!"),
214         VMSTATE_UINT16(migrated_schid, SubchDev),
215         VMSTATE_UINT16_EQUAL(devno, SubchDev, err_hint_devno),
216         VMSTATE_BOOL(thinint_active, SubchDev),
217         VMSTATE_STRUCT(curr_status, SubchDev, 0, vmstate_schib, SCHIB),
218         VMSTATE_UINT8_ARRAY(sense_data, SubchDev, 32),
219         VMSTATE_UINT64(channel_prog, SubchDev),
220         VMSTATE_STRUCT(last_cmd, SubchDev, 0, vmstate_ccw1, CCW1),
221         VMSTATE_BOOL(last_cmd_valid, SubchDev),
222         VMSTATE_STRUCT(id, SubchDev, 0, vmstate_sense_id, SenseId),
223         VMSTATE_BOOL(ccw_fmt_1, SubchDev),
224         VMSTATE_UINT8(ccw_no_data_cnt, SubchDev),
225         VMSTATE_END_OF_LIST()
226     },
227     .subsections = (const VMStateDescription * []) {
228         &vmstate_schdev_orb,
229         NULL
230     }
231 };
232 
233 typedef struct IndAddrPtrTmp {
234     IndAddr **parent;
235     uint64_t addr;
236     int32_t len;
237 } IndAddrPtrTmp;
238 
239 static int post_load_ind_addr(void *opaque, int version_id)
240 {
241     IndAddrPtrTmp *ptmp = opaque;
242     IndAddr **ind_addr = ptmp->parent;
243 
244     if (ptmp->len != 0) {
245         *ind_addr = get_indicator(ptmp->addr, ptmp->len);
246     } else {
247         *ind_addr = NULL;
248     }
249     return 0;
250 }
251 
252 static void pre_save_ind_addr(void *opaque)
253 {
254     IndAddrPtrTmp *ptmp = opaque;
255     IndAddr *ind_addr = *(ptmp->parent);
256 
257     if (ind_addr != NULL) {
258         ptmp->len = ind_addr->len;
259         ptmp->addr = ind_addr->addr;
260     } else {
261         ptmp->len = 0;
262         ptmp->addr = 0L;
263     }
264 }
265 
266 const VMStateDescription vmstate_ind_addr_tmp = {
267     .name = "s390_ind_addr_tmp",
268     .pre_save = pre_save_ind_addr,
269     .post_load = post_load_ind_addr,
270 
271     .fields = (VMStateField[]) {
272         VMSTATE_INT32(len, IndAddrPtrTmp),
273         VMSTATE_UINT64(addr, IndAddrPtrTmp),
274         VMSTATE_END_OF_LIST()
275     }
276 };
277 
278 const VMStateDescription vmstate_ind_addr = {
279     .name = "s390_ind_addr_tmp",
280     .fields = (VMStateField[]) {
281         VMSTATE_WITH_TMP(IndAddr*, IndAddrPtrTmp, vmstate_ind_addr_tmp),
282         VMSTATE_END_OF_LIST()
283     }
284 };
285 
286 typedef struct CssImage {
287     SubchSet *sch_set[MAX_SSID + 1];
288     ChpInfo chpids[MAX_CHPID + 1];
289 } CssImage;
290 
291 static const VMStateDescription vmstate_css_img = {
292     .name = "s390_css_img",
293     .version_id = 1,
294     .minimum_version_id = 1,
295     .fields = (VMStateField[]) {
296         /* Subchannel sets have no relevant state. */
297         VMSTATE_STRUCT_ARRAY(chpids, CssImage, MAX_CHPID + 1, 0,
298                              vmstate_chp_info, ChpInfo),
299         VMSTATE_END_OF_LIST()
300     }
301 
302 };
303 
304 typedef struct IoAdapter {
305     uint32_t id;
306     uint8_t type;
307     uint8_t isc;
308     uint8_t flags;
309 } IoAdapter;
310 
311 typedef struct ChannelSubSys {
312     QTAILQ_HEAD(, CrwContainer) pending_crws;
313     bool sei_pending;
314     bool do_crw_mchk;
315     bool crws_lost;
316     uint8_t max_cssid;
317     uint8_t max_ssid;
318     bool chnmon_active;
319     uint64_t chnmon_area;
320     CssImage *css[MAX_CSSID + 1];
321     uint8_t default_cssid;
322     /* don't migrate, see css_register_io_adapters */
323     IoAdapter *io_adapters[CSS_IO_ADAPTER_TYPE_NUMS][MAX_ISC + 1];
324     /* don't migrate, see get_indicator and IndAddrPtrTmp */
325     QTAILQ_HEAD(, IndAddr) indicator_addresses;
326 } ChannelSubSys;
327 
328 static const VMStateDescription vmstate_css = {
329     .name = "s390_css",
330     .version_id = 1,
331     .minimum_version_id = 1,
332     .fields = (VMStateField[]) {
333         VMSTATE_QTAILQ_V(pending_crws, ChannelSubSys, 1, vmstate_crw_container,
334                          CrwContainer, sibling),
335         VMSTATE_BOOL(sei_pending, ChannelSubSys),
336         VMSTATE_BOOL(do_crw_mchk, ChannelSubSys),
337         VMSTATE_BOOL(crws_lost, ChannelSubSys),
338         /* These were kind of migrated by virtio */
339         VMSTATE_UINT8(max_cssid, ChannelSubSys),
340         VMSTATE_UINT8(max_ssid, ChannelSubSys),
341         VMSTATE_BOOL(chnmon_active, ChannelSubSys),
342         VMSTATE_UINT64(chnmon_area, ChannelSubSys),
343         VMSTATE_ARRAY_OF_POINTER_TO_STRUCT(css, ChannelSubSys, MAX_CSSID + 1,
344                 0, vmstate_css_img, CssImage),
345         VMSTATE_UINT8(default_cssid, ChannelSubSys),
346         VMSTATE_END_OF_LIST()
347     }
348 };
349 
350 static ChannelSubSys channel_subsys = {
351     .pending_crws = QTAILQ_HEAD_INITIALIZER(channel_subsys.pending_crws),
352     .do_crw_mchk = true,
353     .sei_pending = false,
354     .do_crw_mchk = true,
355     .crws_lost = false,
356     .chnmon_active = false,
357     .indicator_addresses =
358         QTAILQ_HEAD_INITIALIZER(channel_subsys.indicator_addresses),
359 };
360 
361 static void subch_dev_pre_save(void *opaque)
362 {
363     SubchDev *s = opaque;
364 
365     /* Prepare remote_schid for save */
366     s->migrated_schid = s->schid;
367 }
368 
369 static int subch_dev_post_load(void *opaque, int version_id)
370 {
371 
372     SubchDev *s = opaque;
373 
374     /* Re-assign the subchannel to remote_schid if necessary */
375     if (s->migrated_schid != s->schid) {
376         if (css_find_subch(true, s->cssid, s->ssid, s->schid) == s) {
377             /*
378              * Cleanup the slot before moving to s->migrated_schid provided
379              * it still belongs to us, i.e. it was not changed by previous
380              * invocation of this function.
381              */
382             css_subch_assign(s->cssid, s->ssid, s->schid, s->devno, NULL);
383         }
384         /* It's OK to re-assign without a prior de-assign. */
385         s->schid = s->migrated_schid;
386         css_subch_assign(s->cssid, s->ssid, s->schid, s->devno, s);
387     }
388 
389     if (css_migration_enabled()) {
390         /* No compat voodoo to do ;) */
391         return 0;
392     }
393     /*
394      * Hack alert. If we don't migrate the channel subsystem status
395      * we still need to find out if the guest enabled mss/mcss-e.
396      * If the subchannel is enabled, it certainly was able to access it,
397      * so adjust the max_ssid/max_cssid values for relevant ssid/cssid
398      * values. This is not watertight, but better than nothing.
399      */
400     if (s->curr_status.pmcw.flags & PMCW_FLAGS_MASK_ENA) {
401         if (s->ssid) {
402             channel_subsys.max_ssid = MAX_SSID;
403         }
404         if (s->cssid != channel_subsys.default_cssid) {
405             channel_subsys.max_cssid = MAX_CSSID;
406         }
407     }
408     return 0;
409 }
410 
411 void css_register_vmstate(void)
412 {
413     vmstate_register(NULL, 0, &vmstate_css, &channel_subsys);
414 }
415 
416 IndAddr *get_indicator(hwaddr ind_addr, int len)
417 {
418     IndAddr *indicator;
419 
420     QTAILQ_FOREACH(indicator, &channel_subsys.indicator_addresses, sibling) {
421         if (indicator->addr == ind_addr) {
422             indicator->refcnt++;
423             return indicator;
424         }
425     }
426     indicator = g_new0(IndAddr, 1);
427     indicator->addr = ind_addr;
428     indicator->len = len;
429     indicator->refcnt = 1;
430     QTAILQ_INSERT_TAIL(&channel_subsys.indicator_addresses,
431                        indicator, sibling);
432     return indicator;
433 }
434 
435 static int s390_io_adapter_map(AdapterInfo *adapter, uint64_t map_addr,
436                                bool do_map)
437 {
438     S390FLICState *fs = s390_get_flic();
439     S390FLICStateClass *fsc = S390_FLIC_COMMON_GET_CLASS(fs);
440 
441     return fsc->io_adapter_map(fs, adapter->adapter_id, map_addr, do_map);
442 }
443 
444 void release_indicator(AdapterInfo *adapter, IndAddr *indicator)
445 {
446     assert(indicator->refcnt > 0);
447     indicator->refcnt--;
448     if (indicator->refcnt > 0) {
449         return;
450     }
451     QTAILQ_REMOVE(&channel_subsys.indicator_addresses, indicator, sibling);
452     if (indicator->map) {
453         s390_io_adapter_map(adapter, indicator->map, false);
454     }
455     g_free(indicator);
456 }
457 
458 int map_indicator(AdapterInfo *adapter, IndAddr *indicator)
459 {
460     int ret;
461 
462     if (indicator->map) {
463         return 0; /* already mapped is not an error */
464     }
465     indicator->map = indicator->addr;
466     ret = s390_io_adapter_map(adapter, indicator->map, true);
467     if ((ret != 0) && (ret != -ENOSYS)) {
468         goto out_err;
469     }
470     return 0;
471 
472 out_err:
473     indicator->map = 0;
474     return ret;
475 }
476 
477 int css_create_css_image(uint8_t cssid, bool default_image)
478 {
479     trace_css_new_image(cssid, default_image ? "(default)" : "");
480     /* 255 is reserved */
481     if (cssid == 255) {
482         return -EINVAL;
483     }
484     if (channel_subsys.css[cssid]) {
485         return -EBUSY;
486     }
487     channel_subsys.css[cssid] = g_malloc0(sizeof(CssImage));
488     if (default_image) {
489         channel_subsys.default_cssid = cssid;
490     }
491     return 0;
492 }
493 
494 uint32_t css_get_adapter_id(CssIoAdapterType type, uint8_t isc)
495 {
496     if (type >= CSS_IO_ADAPTER_TYPE_NUMS || isc > MAX_ISC ||
497         !channel_subsys.io_adapters[type][isc]) {
498         return -1;
499     }
500 
501     return channel_subsys.io_adapters[type][isc]->id;
502 }
503 
504 /**
505  * css_register_io_adapters: Register I/O adapters per ISC during init
506  *
507  * @swap: an indication if byte swap is needed.
508  * @maskable: an indication if the adapter is subject to the mask operation.
509  * @flags: further characteristics of the adapter.
510  *         e.g. suppressible, an indication if the adapter is subject to AIS.
511  * @errp: location to store error information.
512  */
513 void css_register_io_adapters(CssIoAdapterType type, bool swap, bool maskable,
514                               uint8_t flags, Error **errp)
515 {
516     uint32_t id;
517     int ret, isc;
518     IoAdapter *adapter;
519     S390FLICState *fs = s390_get_flic();
520     S390FLICStateClass *fsc = S390_FLIC_COMMON_GET_CLASS(fs);
521 
522     /*
523      * Disallow multiple registrations for the same device type.
524      * Report an error if registering for an already registered type.
525      */
526     if (channel_subsys.io_adapters[type][0]) {
527         error_setg(errp, "Adapters for type %d already registered", type);
528     }
529 
530     for (isc = 0; isc <= MAX_ISC; isc++) {
531         id = (type << 3) | isc;
532         ret = fsc->register_io_adapter(fs, id, isc, swap, maskable, flags);
533         if (ret == 0) {
534             adapter = g_new0(IoAdapter, 1);
535             adapter->id = id;
536             adapter->isc = isc;
537             adapter->type = type;
538             adapter->flags = flags;
539             channel_subsys.io_adapters[type][isc] = adapter;
540         } else {
541             error_setg_errno(errp, -ret, "Unexpected error %d when "
542                              "registering adapter %d", ret, id);
543             break;
544         }
545     }
546 
547     /*
548      * No need to free registered adapters in kvm: kvm will clean up
549      * when the machine goes away.
550      */
551     if (ret) {
552         for (isc--; isc >= 0; isc--) {
553             g_free(channel_subsys.io_adapters[type][isc]);
554             channel_subsys.io_adapters[type][isc] = NULL;
555         }
556     }
557 
558 }
559 
560 static void css_clear_io_interrupt(uint16_t subchannel_id,
561                                    uint16_t subchannel_nr)
562 {
563     Error *err = NULL;
564     static bool no_clear_irq;
565     S390FLICState *fs = s390_get_flic();
566     S390FLICStateClass *fsc = S390_FLIC_COMMON_GET_CLASS(fs);
567     int r;
568 
569     if (unlikely(no_clear_irq)) {
570         return;
571     }
572     r = fsc->clear_io_irq(fs, subchannel_id, subchannel_nr);
573     switch (r) {
574     case 0:
575         break;
576     case -ENOSYS:
577         no_clear_irq = true;
578         /*
579         * Ignore unavailability, as the user can't do anything
580         * about it anyway.
581         */
582         break;
583     default:
584         error_setg_errno(&err, -r, "unexpected error condition");
585         error_propagate(&error_abort, err);
586     }
587 }
588 
589 static inline uint16_t css_do_build_subchannel_id(uint8_t cssid, uint8_t ssid)
590 {
591     if (channel_subsys.max_cssid > 0) {
592         return (cssid << 8) | (1 << 3) | (ssid << 1) | 1;
593     }
594     return (ssid << 1) | 1;
595 }
596 
597 uint16_t css_build_subchannel_id(SubchDev *sch)
598 {
599     return css_do_build_subchannel_id(sch->cssid, sch->ssid);
600 }
601 
602 void css_inject_io_interrupt(SubchDev *sch)
603 {
604     uint8_t isc = (sch->curr_status.pmcw.flags & PMCW_FLAGS_MASK_ISC) >> 11;
605 
606     trace_css_io_interrupt(sch->cssid, sch->ssid, sch->schid,
607                            sch->curr_status.pmcw.intparm, isc, "");
608     s390_io_interrupt(css_build_subchannel_id(sch),
609                       sch->schid,
610                       sch->curr_status.pmcw.intparm,
611                       isc << 27);
612 }
613 
614 void css_conditional_io_interrupt(SubchDev *sch)
615 {
616     /*
617      * If the subchannel is not currently status pending, make it pending
618      * with alert status.
619      */
620     if (!(sch->curr_status.scsw.ctrl & SCSW_STCTL_STATUS_PEND)) {
621         uint8_t isc = (sch->curr_status.pmcw.flags & PMCW_FLAGS_MASK_ISC) >> 11;
622 
623         trace_css_io_interrupt(sch->cssid, sch->ssid, sch->schid,
624                                sch->curr_status.pmcw.intparm, isc,
625                                "(unsolicited)");
626         sch->curr_status.scsw.ctrl &= ~SCSW_CTRL_MASK_STCTL;
627         sch->curr_status.scsw.ctrl |=
628             SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND;
629         /* Inject an I/O interrupt. */
630         s390_io_interrupt(css_build_subchannel_id(sch),
631                           sch->schid,
632                           sch->curr_status.pmcw.intparm,
633                           isc << 27);
634     }
635 }
636 
637 int css_do_sic(CPUS390XState *env, uint8_t isc, uint16_t mode)
638 {
639     S390FLICState *fs = s390_get_flic();
640     S390FLICStateClass *fsc = S390_FLIC_COMMON_GET_CLASS(fs);
641     int r;
642 
643     if (env->psw.mask & PSW_MASK_PSTATE) {
644         r = -PGM_PRIVILEGED;
645         goto out;
646     }
647 
648     trace_css_do_sic(mode, isc);
649     switch (mode) {
650     case SIC_IRQ_MODE_ALL:
651     case SIC_IRQ_MODE_SINGLE:
652         break;
653     default:
654         r = -PGM_OPERAND;
655         goto out;
656     }
657 
658     r = fsc->modify_ais_mode(fs, isc, mode) ? -PGM_OPERATION : 0;
659 out:
660     return r;
661 }
662 
663 void css_adapter_interrupt(CssIoAdapterType type, uint8_t isc)
664 {
665     S390FLICState *fs = s390_get_flic();
666     S390FLICStateClass *fsc = S390_FLIC_COMMON_GET_CLASS(fs);
667     uint32_t io_int_word = (isc << 27) | IO_INT_WORD_AI;
668     IoAdapter *adapter = channel_subsys.io_adapters[type][isc];
669 
670     if (!adapter) {
671         return;
672     }
673 
674     trace_css_adapter_interrupt(isc);
675     if (fs->ais_supported) {
676         if (fsc->inject_airq(fs, type, isc, adapter->flags)) {
677             error_report("Failed to inject airq with AIS supported");
678             exit(1);
679         }
680     } else {
681         s390_io_interrupt(0, 0, 0, io_int_word);
682     }
683 }
684 
685 static void sch_handle_clear_func(SubchDev *sch)
686 {
687     PMCW *p = &sch->curr_status.pmcw;
688     SCSW *s = &sch->curr_status.scsw;
689     int path;
690 
691     /* Path management: In our simple css, we always choose the only path. */
692     path = 0x80;
693 
694     /* Reset values prior to 'issuing the clear signal'. */
695     p->lpum = 0;
696     p->pom = 0xff;
697     s->flags &= ~SCSW_FLAGS_MASK_PNO;
698 
699     /* We always 'attempt to issue the clear signal', and we always succeed. */
700     sch->channel_prog = 0x0;
701     sch->last_cmd_valid = false;
702     s->ctrl &= ~SCSW_ACTL_CLEAR_PEND;
703     s->ctrl |= SCSW_STCTL_STATUS_PEND;
704 
705     s->dstat = 0;
706     s->cstat = 0;
707     p->lpum = path;
708 
709 }
710 
711 static void sch_handle_halt_func(SubchDev *sch)
712 {
713 
714     PMCW *p = &sch->curr_status.pmcw;
715     SCSW *s = &sch->curr_status.scsw;
716     hwaddr curr_ccw = sch->channel_prog;
717     int path;
718 
719     /* Path management: In our simple css, we always choose the only path. */
720     path = 0x80;
721 
722     /* We always 'attempt to issue the halt signal', and we always succeed. */
723     sch->channel_prog = 0x0;
724     sch->last_cmd_valid = false;
725     s->ctrl &= ~SCSW_ACTL_HALT_PEND;
726     s->ctrl |= SCSW_STCTL_STATUS_PEND;
727 
728     if ((s->ctrl & (SCSW_ACTL_SUBCH_ACTIVE | SCSW_ACTL_DEVICE_ACTIVE)) ||
729         !((s->ctrl & SCSW_ACTL_START_PEND) ||
730           (s->ctrl & SCSW_ACTL_SUSP))) {
731         s->dstat = SCSW_DSTAT_DEVICE_END;
732     }
733     if ((s->ctrl & (SCSW_ACTL_SUBCH_ACTIVE | SCSW_ACTL_DEVICE_ACTIVE)) ||
734         (s->ctrl & SCSW_ACTL_SUSP)) {
735         s->cpa = curr_ccw + 8;
736     }
737     s->cstat = 0;
738     p->lpum = path;
739 
740 }
741 
742 static void copy_sense_id_to_guest(SenseId *dest, SenseId *src)
743 {
744     int i;
745 
746     dest->reserved = src->reserved;
747     dest->cu_type = cpu_to_be16(src->cu_type);
748     dest->cu_model = src->cu_model;
749     dest->dev_type = cpu_to_be16(src->dev_type);
750     dest->dev_model = src->dev_model;
751     dest->unused = src->unused;
752     for (i = 0; i < ARRAY_SIZE(dest->ciw); i++) {
753         dest->ciw[i].type = src->ciw[i].type;
754         dest->ciw[i].command = src->ciw[i].command;
755         dest->ciw[i].count = cpu_to_be16(src->ciw[i].count);
756     }
757 }
758 
759 static CCW1 copy_ccw_from_guest(hwaddr addr, bool fmt1)
760 {
761     CCW0 tmp0;
762     CCW1 tmp1;
763     CCW1 ret;
764 
765     if (fmt1) {
766         cpu_physical_memory_read(addr, &tmp1, sizeof(tmp1));
767         ret.cmd_code = tmp1.cmd_code;
768         ret.flags = tmp1.flags;
769         ret.count = be16_to_cpu(tmp1.count);
770         ret.cda = be32_to_cpu(tmp1.cda);
771     } else {
772         cpu_physical_memory_read(addr, &tmp0, sizeof(tmp0));
773         if ((tmp0.cmd_code & 0x0f) == CCW_CMD_TIC) {
774             ret.cmd_code = CCW_CMD_TIC;
775             ret.flags = 0;
776             ret.count = 0;
777         } else {
778             ret.cmd_code = tmp0.cmd_code;
779             ret.flags = tmp0.flags;
780             ret.count = be16_to_cpu(tmp0.count);
781         }
782         ret.cda = be16_to_cpu(tmp0.cda1) | (tmp0.cda0 << 16);
783     }
784     return ret;
785 }
786 
787 static int css_interpret_ccw(SubchDev *sch, hwaddr ccw_addr,
788                              bool suspend_allowed)
789 {
790     int ret;
791     bool check_len;
792     int len;
793     CCW1 ccw;
794 
795     if (!ccw_addr) {
796         return -EIO;
797     }
798 
799     /* Translate everything to format-1 ccws - the information is the same. */
800     ccw = copy_ccw_from_guest(ccw_addr, sch->ccw_fmt_1);
801 
802     /* Check for invalid command codes. */
803     if ((ccw.cmd_code & 0x0f) == 0) {
804         return -EINVAL;
805     }
806     if (((ccw.cmd_code & 0x0f) == CCW_CMD_TIC) &&
807         ((ccw.cmd_code & 0xf0) != 0)) {
808         return -EINVAL;
809     }
810     if (!sch->ccw_fmt_1 && (ccw.count == 0) &&
811         (ccw.cmd_code != CCW_CMD_TIC)) {
812         return -EINVAL;
813     }
814 
815     /* We don't support MIDA. */
816     if (ccw.flags & CCW_FLAG_MIDA) {
817         return -EINVAL;
818     }
819 
820     if (ccw.flags & CCW_FLAG_SUSPEND) {
821         return suspend_allowed ? -EINPROGRESS : -EINVAL;
822     }
823 
824     check_len = !((ccw.flags & CCW_FLAG_SLI) && !(ccw.flags & CCW_FLAG_DC));
825 
826     if (!ccw.cda) {
827         if (sch->ccw_no_data_cnt == 255) {
828             return -EINVAL;
829         }
830         sch->ccw_no_data_cnt++;
831     }
832 
833     /* Look at the command. */
834     switch (ccw.cmd_code) {
835     case CCW_CMD_NOOP:
836         /* Nothing to do. */
837         ret = 0;
838         break;
839     case CCW_CMD_BASIC_SENSE:
840         if (check_len) {
841             if (ccw.count != sizeof(sch->sense_data)) {
842                 ret = -EINVAL;
843                 break;
844             }
845         }
846         len = MIN(ccw.count, sizeof(sch->sense_data));
847         cpu_physical_memory_write(ccw.cda, sch->sense_data, len);
848         sch->curr_status.scsw.count = ccw.count - len;
849         memset(sch->sense_data, 0, sizeof(sch->sense_data));
850         ret = 0;
851         break;
852     case CCW_CMD_SENSE_ID:
853     {
854         SenseId sense_id;
855 
856         copy_sense_id_to_guest(&sense_id, &sch->id);
857         /* Sense ID information is device specific. */
858         if (check_len) {
859             if (ccw.count != sizeof(sense_id)) {
860                 ret = -EINVAL;
861                 break;
862             }
863         }
864         len = MIN(ccw.count, sizeof(sense_id));
865         /*
866          * Only indicate 0xff in the first sense byte if we actually
867          * have enough place to store at least bytes 0-3.
868          */
869         if (len >= 4) {
870             sense_id.reserved = 0xff;
871         } else {
872             sense_id.reserved = 0;
873         }
874         cpu_physical_memory_write(ccw.cda, &sense_id, len);
875         sch->curr_status.scsw.count = ccw.count - len;
876         ret = 0;
877         break;
878     }
879     case CCW_CMD_TIC:
880         if (sch->last_cmd_valid && (sch->last_cmd.cmd_code == CCW_CMD_TIC)) {
881             ret = -EINVAL;
882             break;
883         }
884         if (ccw.flags & (CCW_FLAG_CC | CCW_FLAG_DC)) {
885             ret = -EINVAL;
886             break;
887         }
888         sch->channel_prog = ccw.cda;
889         ret = -EAGAIN;
890         break;
891     default:
892         if (sch->ccw_cb) {
893             /* Handle device specific commands. */
894             ret = sch->ccw_cb(sch, ccw);
895         } else {
896             ret = -ENOSYS;
897         }
898         break;
899     }
900     sch->last_cmd = ccw;
901     sch->last_cmd_valid = true;
902     if (ret == 0) {
903         if (ccw.flags & CCW_FLAG_CC) {
904             sch->channel_prog += 8;
905             ret = -EAGAIN;
906         }
907     }
908 
909     return ret;
910 }
911 
912 static void sch_handle_start_func_virtual(SubchDev *sch)
913 {
914 
915     PMCW *p = &sch->curr_status.pmcw;
916     SCSW *s = &sch->curr_status.scsw;
917     int path;
918     int ret;
919     bool suspend_allowed;
920 
921     /* Path management: In our simple css, we always choose the only path. */
922     path = 0x80;
923 
924     if (!(s->ctrl & SCSW_ACTL_SUSP)) {
925         /* Start Function triggered via ssch, i.e. we have an ORB */
926         ORB *orb = &sch->orb;
927         s->cstat = 0;
928         s->dstat = 0;
929         /* Look at the orb and try to execute the channel program. */
930         p->intparm = orb->intparm;
931         if (!(orb->lpm & path)) {
932             /* Generate a deferred cc 3 condition. */
933             s->flags |= SCSW_FLAGS_MASK_CC;
934             s->ctrl &= ~SCSW_CTRL_MASK_STCTL;
935             s->ctrl |= (SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND);
936             return;
937         }
938         sch->ccw_fmt_1 = !!(orb->ctrl0 & ORB_CTRL0_MASK_FMT);
939         s->flags |= (sch->ccw_fmt_1) ? SCSW_FLAGS_MASK_FMT : 0;
940         sch->ccw_no_data_cnt = 0;
941         suspend_allowed = !!(orb->ctrl0 & ORB_CTRL0_MASK_SPND);
942     } else {
943         /* Start Function resumed via rsch */
944         s->ctrl &= ~(SCSW_ACTL_SUSP | SCSW_ACTL_RESUME_PEND);
945         /* The channel program had been suspended before. */
946         suspend_allowed = true;
947     }
948     sch->last_cmd_valid = false;
949     do {
950         ret = css_interpret_ccw(sch, sch->channel_prog, suspend_allowed);
951         switch (ret) {
952         case -EAGAIN:
953             /* ccw chain, continue processing */
954             break;
955         case 0:
956             /* success */
957             s->ctrl &= ~SCSW_ACTL_START_PEND;
958             s->ctrl &= ~SCSW_CTRL_MASK_STCTL;
959             s->ctrl |= SCSW_STCTL_PRIMARY | SCSW_STCTL_SECONDARY |
960                     SCSW_STCTL_STATUS_PEND;
961             s->dstat = SCSW_DSTAT_CHANNEL_END | SCSW_DSTAT_DEVICE_END;
962             s->cpa = sch->channel_prog + 8;
963             break;
964         case -EIO:
965             /* I/O errors, status depends on specific devices */
966             break;
967         case -ENOSYS:
968             /* unsupported command, generate unit check (command reject) */
969             s->ctrl &= ~SCSW_ACTL_START_PEND;
970             s->dstat = SCSW_DSTAT_UNIT_CHECK;
971             /* Set sense bit 0 in ecw0. */
972             sch->sense_data[0] = 0x80;
973             s->ctrl &= ~SCSW_CTRL_MASK_STCTL;
974             s->ctrl |= SCSW_STCTL_PRIMARY | SCSW_STCTL_SECONDARY |
975                     SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND;
976             s->cpa = sch->channel_prog + 8;
977             break;
978         case -EFAULT:
979             /* memory problem, generate channel data check */
980             s->ctrl &= ~SCSW_ACTL_START_PEND;
981             s->cstat = SCSW_CSTAT_DATA_CHECK;
982             s->ctrl &= ~SCSW_CTRL_MASK_STCTL;
983             s->ctrl |= SCSW_STCTL_PRIMARY | SCSW_STCTL_SECONDARY |
984                     SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND;
985             s->cpa = sch->channel_prog + 8;
986             break;
987         case -EBUSY:
988             /* subchannel busy, generate deferred cc 1 */
989             s->flags &= ~SCSW_FLAGS_MASK_CC;
990             s->flags |= (1 << 8);
991             s->ctrl &= ~SCSW_CTRL_MASK_STCTL;
992             s->ctrl |= SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND;
993             break;
994         case -EINPROGRESS:
995             /* channel program has been suspended */
996             s->ctrl &= ~SCSW_ACTL_START_PEND;
997             s->ctrl |= SCSW_ACTL_SUSP;
998             break;
999         default:
1000             /* error, generate channel program check */
1001             s->ctrl &= ~SCSW_ACTL_START_PEND;
1002             s->cstat = SCSW_CSTAT_PROG_CHECK;
1003             s->ctrl &= ~SCSW_CTRL_MASK_STCTL;
1004             s->ctrl |= SCSW_STCTL_PRIMARY | SCSW_STCTL_SECONDARY |
1005                     SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND;
1006             s->cpa = sch->channel_prog + 8;
1007             break;
1008         }
1009     } while (ret == -EAGAIN);
1010 
1011 }
1012 
1013 static int sch_handle_start_func_passthrough(SubchDev *sch)
1014 {
1015 
1016     PMCW *p = &sch->curr_status.pmcw;
1017     SCSW *s = &sch->curr_status.scsw;
1018     int ret;
1019 
1020     ORB *orb = &sch->orb;
1021     if (!(s->ctrl & SCSW_ACTL_SUSP)) {
1022         assert(orb != NULL);
1023         p->intparm = orb->intparm;
1024     }
1025 
1026     /*
1027      * Only support prefetch enable mode.
1028      * Only support 64bit addressing idal.
1029      */
1030     if (!(orb->ctrl0 & ORB_CTRL0_MASK_PFCH) ||
1031         !(orb->ctrl0 & ORB_CTRL0_MASK_C64)) {
1032         return -EINVAL;
1033     }
1034 
1035     ret = s390_ccw_cmd_request(orb, s, sch->driver_data);
1036     switch (ret) {
1037     /* Currently we don't update control block and just return the cc code. */
1038     case 0:
1039         break;
1040     case -EBUSY:
1041         break;
1042     case -ENODEV:
1043         break;
1044     case -EACCES:
1045         /* Let's reflect an inaccessible host device by cc 3. */
1046         ret = -ENODEV;
1047         break;
1048     default:
1049        /*
1050         * All other return codes will trigger a program check,
1051         * or set cc to 1.
1052         */
1053        break;
1054     };
1055 
1056     return ret;
1057 }
1058 
1059 /*
1060  * On real machines, this would run asynchronously to the main vcpus.
1061  * We might want to make some parts of the ssch handling (interpreting
1062  * read/writes) asynchronous later on if we start supporting more than
1063  * our current very simple devices.
1064  */
1065 int do_subchannel_work_virtual(SubchDev *sch)
1066 {
1067 
1068     SCSW *s = &sch->curr_status.scsw;
1069 
1070     if (s->ctrl & SCSW_FCTL_CLEAR_FUNC) {
1071         sch_handle_clear_func(sch);
1072     } else if (s->ctrl & SCSW_FCTL_HALT_FUNC) {
1073         sch_handle_halt_func(sch);
1074     } else if (s->ctrl & SCSW_FCTL_START_FUNC) {
1075         /* Triggered by both ssch and rsch. */
1076         sch_handle_start_func_virtual(sch);
1077     } else {
1078         /* Cannot happen. */
1079         return 0;
1080     }
1081     css_inject_io_interrupt(sch);
1082     return 0;
1083 }
1084 
1085 int do_subchannel_work_passthrough(SubchDev *sch)
1086 {
1087     int ret;
1088     SCSW *s = &sch->curr_status.scsw;
1089 
1090     if (s->ctrl & SCSW_FCTL_CLEAR_FUNC) {
1091         /* TODO: Clear handling */
1092         sch_handle_clear_func(sch);
1093         ret = 0;
1094     } else if (s->ctrl & SCSW_FCTL_HALT_FUNC) {
1095         /* TODO: Halt handling */
1096         sch_handle_halt_func(sch);
1097         ret = 0;
1098     } else if (s->ctrl & SCSW_FCTL_START_FUNC) {
1099         ret = sch_handle_start_func_passthrough(sch);
1100     } else {
1101         /* Cannot happen. */
1102         return -ENODEV;
1103     }
1104 
1105     return ret;
1106 }
1107 
1108 static int do_subchannel_work(SubchDev *sch)
1109 {
1110     if (sch->do_subchannel_work) {
1111         return sch->do_subchannel_work(sch);
1112     } else {
1113         return -EINVAL;
1114     }
1115 }
1116 
1117 static void copy_pmcw_to_guest(PMCW *dest, const PMCW *src)
1118 {
1119     int i;
1120 
1121     dest->intparm = cpu_to_be32(src->intparm);
1122     dest->flags = cpu_to_be16(src->flags);
1123     dest->devno = cpu_to_be16(src->devno);
1124     dest->lpm = src->lpm;
1125     dest->pnom = src->pnom;
1126     dest->lpum = src->lpum;
1127     dest->pim = src->pim;
1128     dest->mbi = cpu_to_be16(src->mbi);
1129     dest->pom = src->pom;
1130     dest->pam = src->pam;
1131     for (i = 0; i < ARRAY_SIZE(dest->chpid); i++) {
1132         dest->chpid[i] = src->chpid[i];
1133     }
1134     dest->chars = cpu_to_be32(src->chars);
1135 }
1136 
1137 void copy_scsw_to_guest(SCSW *dest, const SCSW *src)
1138 {
1139     dest->flags = cpu_to_be16(src->flags);
1140     dest->ctrl = cpu_to_be16(src->ctrl);
1141     dest->cpa = cpu_to_be32(src->cpa);
1142     dest->dstat = src->dstat;
1143     dest->cstat = src->cstat;
1144     dest->count = cpu_to_be16(src->count);
1145 }
1146 
1147 static void copy_schib_to_guest(SCHIB *dest, const SCHIB *src)
1148 {
1149     int i;
1150 
1151     copy_pmcw_to_guest(&dest->pmcw, &src->pmcw);
1152     copy_scsw_to_guest(&dest->scsw, &src->scsw);
1153     dest->mba = cpu_to_be64(src->mba);
1154     for (i = 0; i < ARRAY_SIZE(dest->mda); i++) {
1155         dest->mda[i] = src->mda[i];
1156     }
1157 }
1158 
1159 int css_do_stsch(SubchDev *sch, SCHIB *schib)
1160 {
1161     /* Use current status. */
1162     copy_schib_to_guest(schib, &sch->curr_status);
1163     return 0;
1164 }
1165 
1166 static void copy_pmcw_from_guest(PMCW *dest, const PMCW *src)
1167 {
1168     int i;
1169 
1170     dest->intparm = be32_to_cpu(src->intparm);
1171     dest->flags = be16_to_cpu(src->flags);
1172     dest->devno = be16_to_cpu(src->devno);
1173     dest->lpm = src->lpm;
1174     dest->pnom = src->pnom;
1175     dest->lpum = src->lpum;
1176     dest->pim = src->pim;
1177     dest->mbi = be16_to_cpu(src->mbi);
1178     dest->pom = src->pom;
1179     dest->pam = src->pam;
1180     for (i = 0; i < ARRAY_SIZE(dest->chpid); i++) {
1181         dest->chpid[i] = src->chpid[i];
1182     }
1183     dest->chars = be32_to_cpu(src->chars);
1184 }
1185 
1186 static void copy_scsw_from_guest(SCSW *dest, const SCSW *src)
1187 {
1188     dest->flags = be16_to_cpu(src->flags);
1189     dest->ctrl = be16_to_cpu(src->ctrl);
1190     dest->cpa = be32_to_cpu(src->cpa);
1191     dest->dstat = src->dstat;
1192     dest->cstat = src->cstat;
1193     dest->count = be16_to_cpu(src->count);
1194 }
1195 
1196 static void copy_schib_from_guest(SCHIB *dest, const SCHIB *src)
1197 {
1198     int i;
1199 
1200     copy_pmcw_from_guest(&dest->pmcw, &src->pmcw);
1201     copy_scsw_from_guest(&dest->scsw, &src->scsw);
1202     dest->mba = be64_to_cpu(src->mba);
1203     for (i = 0; i < ARRAY_SIZE(dest->mda); i++) {
1204         dest->mda[i] = src->mda[i];
1205     }
1206 }
1207 
1208 int css_do_msch(SubchDev *sch, const SCHIB *orig_schib)
1209 {
1210     SCSW *s = &sch->curr_status.scsw;
1211     PMCW *p = &sch->curr_status.pmcw;
1212     uint16_t oldflags;
1213     int ret;
1214     SCHIB schib;
1215 
1216     if (!(sch->curr_status.pmcw.flags & PMCW_FLAGS_MASK_DNV)) {
1217         ret = 0;
1218         goto out;
1219     }
1220 
1221     if (s->ctrl & SCSW_STCTL_STATUS_PEND) {
1222         ret = -EINPROGRESS;
1223         goto out;
1224     }
1225 
1226     if (s->ctrl &
1227         (SCSW_FCTL_START_FUNC|SCSW_FCTL_HALT_FUNC|SCSW_FCTL_CLEAR_FUNC)) {
1228         ret = -EBUSY;
1229         goto out;
1230     }
1231 
1232     copy_schib_from_guest(&schib, orig_schib);
1233     /* Only update the program-modifiable fields. */
1234     p->intparm = schib.pmcw.intparm;
1235     oldflags = p->flags;
1236     p->flags &= ~(PMCW_FLAGS_MASK_ISC | PMCW_FLAGS_MASK_ENA |
1237                   PMCW_FLAGS_MASK_LM | PMCW_FLAGS_MASK_MME |
1238                   PMCW_FLAGS_MASK_MP);
1239     p->flags |= schib.pmcw.flags &
1240             (PMCW_FLAGS_MASK_ISC | PMCW_FLAGS_MASK_ENA |
1241              PMCW_FLAGS_MASK_LM | PMCW_FLAGS_MASK_MME |
1242              PMCW_FLAGS_MASK_MP);
1243     p->lpm = schib.pmcw.lpm;
1244     p->mbi = schib.pmcw.mbi;
1245     p->pom = schib.pmcw.pom;
1246     p->chars &= ~(PMCW_CHARS_MASK_MBFC | PMCW_CHARS_MASK_CSENSE);
1247     p->chars |= schib.pmcw.chars &
1248             (PMCW_CHARS_MASK_MBFC | PMCW_CHARS_MASK_CSENSE);
1249     sch->curr_status.mba = schib.mba;
1250 
1251     /* Has the channel been disabled? */
1252     if (sch->disable_cb && (oldflags & PMCW_FLAGS_MASK_ENA) != 0
1253         && (p->flags & PMCW_FLAGS_MASK_ENA) == 0) {
1254         sch->disable_cb(sch);
1255     }
1256 
1257     ret = 0;
1258 
1259 out:
1260     return ret;
1261 }
1262 
1263 int css_do_xsch(SubchDev *sch)
1264 {
1265     SCSW *s = &sch->curr_status.scsw;
1266     PMCW *p = &sch->curr_status.pmcw;
1267     int ret;
1268 
1269     if (~(p->flags) & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA)) {
1270         ret = -ENODEV;
1271         goto out;
1272     }
1273 
1274     if (!(s->ctrl & SCSW_CTRL_MASK_FCTL) ||
1275         ((s->ctrl & SCSW_CTRL_MASK_FCTL) != SCSW_FCTL_START_FUNC) ||
1276         (!(s->ctrl &
1277            (SCSW_ACTL_RESUME_PEND | SCSW_ACTL_START_PEND | SCSW_ACTL_SUSP))) ||
1278         (s->ctrl & SCSW_ACTL_SUBCH_ACTIVE)) {
1279         ret = -EINPROGRESS;
1280         goto out;
1281     }
1282 
1283     if (s->ctrl & SCSW_CTRL_MASK_STCTL) {
1284         ret = -EBUSY;
1285         goto out;
1286     }
1287 
1288     /* Cancel the current operation. */
1289     s->ctrl &= ~(SCSW_FCTL_START_FUNC |
1290                  SCSW_ACTL_RESUME_PEND |
1291                  SCSW_ACTL_START_PEND |
1292                  SCSW_ACTL_SUSP);
1293     sch->channel_prog = 0x0;
1294     sch->last_cmd_valid = false;
1295     s->dstat = 0;
1296     s->cstat = 0;
1297     ret = 0;
1298 
1299 out:
1300     return ret;
1301 }
1302 
1303 int css_do_csch(SubchDev *sch)
1304 {
1305     SCSW *s = &sch->curr_status.scsw;
1306     PMCW *p = &sch->curr_status.pmcw;
1307     int ret;
1308 
1309     if (~(p->flags) & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA)) {
1310         ret = -ENODEV;
1311         goto out;
1312     }
1313 
1314     /* Trigger the clear function. */
1315     s->ctrl &= ~(SCSW_CTRL_MASK_FCTL | SCSW_CTRL_MASK_ACTL);
1316     s->ctrl |= SCSW_FCTL_CLEAR_FUNC | SCSW_ACTL_CLEAR_PEND;
1317 
1318     do_subchannel_work(sch);
1319     ret = 0;
1320 
1321 out:
1322     return ret;
1323 }
1324 
1325 int css_do_hsch(SubchDev *sch)
1326 {
1327     SCSW *s = &sch->curr_status.scsw;
1328     PMCW *p = &sch->curr_status.pmcw;
1329     int ret;
1330 
1331     if (~(p->flags) & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA)) {
1332         ret = -ENODEV;
1333         goto out;
1334     }
1335 
1336     if (((s->ctrl & SCSW_CTRL_MASK_STCTL) == SCSW_STCTL_STATUS_PEND) ||
1337         (s->ctrl & (SCSW_STCTL_PRIMARY |
1338                     SCSW_STCTL_SECONDARY |
1339                     SCSW_STCTL_ALERT))) {
1340         ret = -EINPROGRESS;
1341         goto out;
1342     }
1343 
1344     if (s->ctrl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) {
1345         ret = -EBUSY;
1346         goto out;
1347     }
1348 
1349     /* Trigger the halt function. */
1350     s->ctrl |= SCSW_FCTL_HALT_FUNC;
1351     s->ctrl &= ~SCSW_FCTL_START_FUNC;
1352     if (((s->ctrl & SCSW_CTRL_MASK_ACTL) ==
1353          (SCSW_ACTL_SUBCH_ACTIVE | SCSW_ACTL_DEVICE_ACTIVE)) &&
1354         ((s->ctrl & SCSW_CTRL_MASK_STCTL) == SCSW_STCTL_INTERMEDIATE)) {
1355         s->ctrl &= ~SCSW_STCTL_STATUS_PEND;
1356     }
1357     s->ctrl |= SCSW_ACTL_HALT_PEND;
1358 
1359     do_subchannel_work(sch);
1360     ret = 0;
1361 
1362 out:
1363     return ret;
1364 }
1365 
1366 static void css_update_chnmon(SubchDev *sch)
1367 {
1368     if (!(sch->curr_status.pmcw.flags & PMCW_FLAGS_MASK_MME)) {
1369         /* Not active. */
1370         return;
1371     }
1372     /* The counter is conveniently located at the beginning of the struct. */
1373     if (sch->curr_status.pmcw.chars & PMCW_CHARS_MASK_MBFC) {
1374         /* Format 1, per-subchannel area. */
1375         uint32_t count;
1376 
1377         count = address_space_ldl(&address_space_memory,
1378                                   sch->curr_status.mba,
1379                                   MEMTXATTRS_UNSPECIFIED,
1380                                   NULL);
1381         count++;
1382         address_space_stl(&address_space_memory, sch->curr_status.mba, count,
1383                           MEMTXATTRS_UNSPECIFIED, NULL);
1384     } else {
1385         /* Format 0, global area. */
1386         uint32_t offset;
1387         uint16_t count;
1388 
1389         offset = sch->curr_status.pmcw.mbi << 5;
1390         count = address_space_lduw(&address_space_memory,
1391                                    channel_subsys.chnmon_area + offset,
1392                                    MEMTXATTRS_UNSPECIFIED,
1393                                    NULL);
1394         count++;
1395         address_space_stw(&address_space_memory,
1396                           channel_subsys.chnmon_area + offset, count,
1397                           MEMTXATTRS_UNSPECIFIED, NULL);
1398     }
1399 }
1400 
1401 int css_do_ssch(SubchDev *sch, ORB *orb)
1402 {
1403     SCSW *s = &sch->curr_status.scsw;
1404     PMCW *p = &sch->curr_status.pmcw;
1405     int ret;
1406 
1407     if (~(p->flags) & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA)) {
1408         ret = -ENODEV;
1409         goto out;
1410     }
1411 
1412     if (s->ctrl & SCSW_STCTL_STATUS_PEND) {
1413         ret = -EINPROGRESS;
1414         goto out;
1415     }
1416 
1417     if (s->ctrl & (SCSW_FCTL_START_FUNC |
1418                    SCSW_FCTL_HALT_FUNC |
1419                    SCSW_FCTL_CLEAR_FUNC)) {
1420         ret = -EBUSY;
1421         goto out;
1422     }
1423 
1424     /* If monitoring is active, update counter. */
1425     if (channel_subsys.chnmon_active) {
1426         css_update_chnmon(sch);
1427     }
1428     sch->orb = *orb;
1429     sch->channel_prog = orb->cpa;
1430     /* Trigger the start function. */
1431     s->ctrl |= (SCSW_FCTL_START_FUNC | SCSW_ACTL_START_PEND);
1432     s->flags &= ~SCSW_FLAGS_MASK_PNO;
1433 
1434     ret = do_subchannel_work(sch);
1435 
1436 out:
1437     return ret;
1438 }
1439 
1440 static void copy_irb_to_guest(IRB *dest, const IRB *src, PMCW *pmcw,
1441                               int *irb_len)
1442 {
1443     int i;
1444     uint16_t stctl = src->scsw.ctrl & SCSW_CTRL_MASK_STCTL;
1445     uint16_t actl = src->scsw.ctrl & SCSW_CTRL_MASK_ACTL;
1446 
1447     copy_scsw_to_guest(&dest->scsw, &src->scsw);
1448 
1449     for (i = 0; i < ARRAY_SIZE(dest->esw); i++) {
1450         dest->esw[i] = cpu_to_be32(src->esw[i]);
1451     }
1452     for (i = 0; i < ARRAY_SIZE(dest->ecw); i++) {
1453         dest->ecw[i] = cpu_to_be32(src->ecw[i]);
1454     }
1455     *irb_len = sizeof(*dest) - sizeof(dest->emw);
1456 
1457     /* extended measurements enabled? */
1458     if ((src->scsw.flags & SCSW_FLAGS_MASK_ESWF) ||
1459         !(pmcw->flags & PMCW_FLAGS_MASK_TF) ||
1460         !(pmcw->chars & PMCW_CHARS_MASK_XMWME)) {
1461         return;
1462     }
1463     /* extended measurements pending? */
1464     if (!(stctl & SCSW_STCTL_STATUS_PEND)) {
1465         return;
1466     }
1467     if ((stctl & SCSW_STCTL_PRIMARY) ||
1468         (stctl == SCSW_STCTL_SECONDARY) ||
1469         ((stctl & SCSW_STCTL_INTERMEDIATE) && (actl & SCSW_ACTL_SUSP))) {
1470         for (i = 0; i < ARRAY_SIZE(dest->emw); i++) {
1471             dest->emw[i] = cpu_to_be32(src->emw[i]);
1472         }
1473     }
1474     *irb_len = sizeof(*dest);
1475 }
1476 
1477 int css_do_tsch_get_irb(SubchDev *sch, IRB *target_irb, int *irb_len)
1478 {
1479     SCSW *s = &sch->curr_status.scsw;
1480     PMCW *p = &sch->curr_status.pmcw;
1481     uint16_t stctl;
1482     IRB irb;
1483 
1484     if (~(p->flags) & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA)) {
1485         return 3;
1486     }
1487 
1488     stctl = s->ctrl & SCSW_CTRL_MASK_STCTL;
1489 
1490     /* Prepare the irb for the guest. */
1491     memset(&irb, 0, sizeof(IRB));
1492 
1493     /* Copy scsw from current status. */
1494     memcpy(&irb.scsw, s, sizeof(SCSW));
1495     if (stctl & SCSW_STCTL_STATUS_PEND) {
1496         if (s->cstat & (SCSW_CSTAT_DATA_CHECK |
1497                         SCSW_CSTAT_CHN_CTRL_CHK |
1498                         SCSW_CSTAT_INTF_CTRL_CHK)) {
1499             irb.scsw.flags |= SCSW_FLAGS_MASK_ESWF;
1500             irb.esw[0] = 0x04804000;
1501         } else {
1502             irb.esw[0] = 0x00800000;
1503         }
1504         /* If a unit check is pending, copy sense data. */
1505         if ((s->dstat & SCSW_DSTAT_UNIT_CHECK) &&
1506             (p->chars & PMCW_CHARS_MASK_CSENSE)) {
1507             int i;
1508 
1509             irb.scsw.flags |= SCSW_FLAGS_MASK_ESWF | SCSW_FLAGS_MASK_ECTL;
1510             /* Attention: sense_data is already BE! */
1511             memcpy(irb.ecw, sch->sense_data, sizeof(sch->sense_data));
1512             for (i = 0; i < ARRAY_SIZE(irb.ecw); i++) {
1513                 irb.ecw[i] = be32_to_cpu(irb.ecw[i]);
1514             }
1515             irb.esw[1] = 0x01000000 | (sizeof(sch->sense_data) << 8);
1516         }
1517     }
1518     /* Store the irb to the guest. */
1519     copy_irb_to_guest(target_irb, &irb, p, irb_len);
1520 
1521     return ((stctl & SCSW_STCTL_STATUS_PEND) == 0);
1522 }
1523 
1524 void css_do_tsch_update_subch(SubchDev *sch)
1525 {
1526     SCSW *s = &sch->curr_status.scsw;
1527     PMCW *p = &sch->curr_status.pmcw;
1528     uint16_t stctl;
1529     uint16_t fctl;
1530     uint16_t actl;
1531 
1532     stctl = s->ctrl & SCSW_CTRL_MASK_STCTL;
1533     fctl = s->ctrl & SCSW_CTRL_MASK_FCTL;
1534     actl = s->ctrl & SCSW_CTRL_MASK_ACTL;
1535 
1536     /* Clear conditions on subchannel, if applicable. */
1537     if (stctl & SCSW_STCTL_STATUS_PEND) {
1538         s->ctrl &= ~SCSW_CTRL_MASK_STCTL;
1539         if ((stctl != (SCSW_STCTL_INTERMEDIATE | SCSW_STCTL_STATUS_PEND)) ||
1540             ((fctl & SCSW_FCTL_HALT_FUNC) &&
1541              (actl & SCSW_ACTL_SUSP))) {
1542             s->ctrl &= ~SCSW_CTRL_MASK_FCTL;
1543         }
1544         if (stctl != (SCSW_STCTL_INTERMEDIATE | SCSW_STCTL_STATUS_PEND)) {
1545             s->flags &= ~SCSW_FLAGS_MASK_PNO;
1546             s->ctrl &= ~(SCSW_ACTL_RESUME_PEND |
1547                          SCSW_ACTL_START_PEND |
1548                          SCSW_ACTL_HALT_PEND |
1549                          SCSW_ACTL_CLEAR_PEND |
1550                          SCSW_ACTL_SUSP);
1551         } else {
1552             if ((actl & SCSW_ACTL_SUSP) &&
1553                 (fctl & SCSW_FCTL_START_FUNC)) {
1554                 s->flags &= ~SCSW_FLAGS_MASK_PNO;
1555                 if (fctl & SCSW_FCTL_HALT_FUNC) {
1556                     s->ctrl &= ~(SCSW_ACTL_RESUME_PEND |
1557                                  SCSW_ACTL_START_PEND |
1558                                  SCSW_ACTL_HALT_PEND |
1559                                  SCSW_ACTL_CLEAR_PEND |
1560                                  SCSW_ACTL_SUSP);
1561                 } else {
1562                     s->ctrl &= ~SCSW_ACTL_RESUME_PEND;
1563                 }
1564             }
1565         }
1566         /* Clear pending sense data. */
1567         if (p->chars & PMCW_CHARS_MASK_CSENSE) {
1568             memset(sch->sense_data, 0 , sizeof(sch->sense_data));
1569         }
1570     }
1571 }
1572 
1573 static void copy_crw_to_guest(CRW *dest, const CRW *src)
1574 {
1575     dest->flags = cpu_to_be16(src->flags);
1576     dest->rsid = cpu_to_be16(src->rsid);
1577 }
1578 
1579 int css_do_stcrw(CRW *crw)
1580 {
1581     CrwContainer *crw_cont;
1582     int ret;
1583 
1584     crw_cont = QTAILQ_FIRST(&channel_subsys.pending_crws);
1585     if (crw_cont) {
1586         QTAILQ_REMOVE(&channel_subsys.pending_crws, crw_cont, sibling);
1587         copy_crw_to_guest(crw, &crw_cont->crw);
1588         g_free(crw_cont);
1589         ret = 0;
1590     } else {
1591         /* List was empty, turn crw machine checks on again. */
1592         memset(crw, 0, sizeof(*crw));
1593         channel_subsys.do_crw_mchk = true;
1594         ret = 1;
1595     }
1596 
1597     return ret;
1598 }
1599 
1600 static void copy_crw_from_guest(CRW *dest, const CRW *src)
1601 {
1602     dest->flags = be16_to_cpu(src->flags);
1603     dest->rsid = be16_to_cpu(src->rsid);
1604 }
1605 
1606 void css_undo_stcrw(CRW *crw)
1607 {
1608     CrwContainer *crw_cont;
1609 
1610     crw_cont = g_try_malloc0(sizeof(CrwContainer));
1611     if (!crw_cont) {
1612         channel_subsys.crws_lost = true;
1613         return;
1614     }
1615     copy_crw_from_guest(&crw_cont->crw, crw);
1616 
1617     QTAILQ_INSERT_HEAD(&channel_subsys.pending_crws, crw_cont, sibling);
1618 }
1619 
1620 int css_do_tpi(IOIntCode *int_code, int lowcore)
1621 {
1622     /* No pending interrupts for !KVM. */
1623     return 0;
1624  }
1625 
1626 int css_collect_chp_desc(int m, uint8_t cssid, uint8_t f_chpid, uint8_t l_chpid,
1627                          int rfmt, void *buf)
1628 {
1629     int i, desc_size;
1630     uint32_t words[8];
1631     uint32_t chpid_type_word;
1632     CssImage *css;
1633 
1634     if (!m && !cssid) {
1635         css = channel_subsys.css[channel_subsys.default_cssid];
1636     } else {
1637         css = channel_subsys.css[cssid];
1638     }
1639     if (!css) {
1640         return 0;
1641     }
1642     desc_size = 0;
1643     for (i = f_chpid; i <= l_chpid; i++) {
1644         if (css->chpids[i].in_use) {
1645             chpid_type_word = 0x80000000 | (css->chpids[i].type << 8) | i;
1646             if (rfmt == 0) {
1647                 words[0] = cpu_to_be32(chpid_type_word);
1648                 words[1] = 0;
1649                 memcpy(buf + desc_size, words, 8);
1650                 desc_size += 8;
1651             } else if (rfmt == 1) {
1652                 words[0] = cpu_to_be32(chpid_type_word);
1653                 words[1] = 0;
1654                 words[2] = 0;
1655                 words[3] = 0;
1656                 words[4] = 0;
1657                 words[5] = 0;
1658                 words[6] = 0;
1659                 words[7] = 0;
1660                 memcpy(buf + desc_size, words, 32);
1661                 desc_size += 32;
1662             }
1663         }
1664     }
1665     return desc_size;
1666 }
1667 
1668 void css_do_schm(uint8_t mbk, int update, int dct, uint64_t mbo)
1669 {
1670     /* dct is currently ignored (not really meaningful for our devices) */
1671     /* TODO: Don't ignore mbk. */
1672     if (update && !channel_subsys.chnmon_active) {
1673         /* Enable measuring. */
1674         channel_subsys.chnmon_area = mbo;
1675         channel_subsys.chnmon_active = true;
1676     }
1677     if (!update && channel_subsys.chnmon_active) {
1678         /* Disable measuring. */
1679         channel_subsys.chnmon_area = 0;
1680         channel_subsys.chnmon_active = false;
1681     }
1682 }
1683 
1684 int css_do_rsch(SubchDev *sch)
1685 {
1686     SCSW *s = &sch->curr_status.scsw;
1687     PMCW *p = &sch->curr_status.pmcw;
1688     int ret;
1689 
1690     if (~(p->flags) & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA)) {
1691         ret = -ENODEV;
1692         goto out;
1693     }
1694 
1695     if (s->ctrl & SCSW_STCTL_STATUS_PEND) {
1696         ret = -EINPROGRESS;
1697         goto out;
1698     }
1699 
1700     if (((s->ctrl & SCSW_CTRL_MASK_FCTL) != SCSW_FCTL_START_FUNC) ||
1701         (s->ctrl & SCSW_ACTL_RESUME_PEND) ||
1702         (!(s->ctrl & SCSW_ACTL_SUSP))) {
1703         ret = -EINVAL;
1704         goto out;
1705     }
1706 
1707     /* If monitoring is active, update counter. */
1708     if (channel_subsys.chnmon_active) {
1709         css_update_chnmon(sch);
1710     }
1711 
1712     s->ctrl |= SCSW_ACTL_RESUME_PEND;
1713     do_subchannel_work(sch);
1714     ret = 0;
1715 
1716 out:
1717     return ret;
1718 }
1719 
1720 int css_do_rchp(uint8_t cssid, uint8_t chpid)
1721 {
1722     uint8_t real_cssid;
1723 
1724     if (cssid > channel_subsys.max_cssid) {
1725         return -EINVAL;
1726     }
1727     if (channel_subsys.max_cssid == 0) {
1728         real_cssid = channel_subsys.default_cssid;
1729     } else {
1730         real_cssid = cssid;
1731     }
1732     if (!channel_subsys.css[real_cssid]) {
1733         return -EINVAL;
1734     }
1735 
1736     if (!channel_subsys.css[real_cssid]->chpids[chpid].in_use) {
1737         return -ENODEV;
1738     }
1739 
1740     if (!channel_subsys.css[real_cssid]->chpids[chpid].is_virtual) {
1741         fprintf(stderr,
1742                 "rchp unsupported for non-virtual chpid %x.%02x!\n",
1743                 real_cssid, chpid);
1744         return -ENODEV;
1745     }
1746 
1747     /* We don't really use a channel path, so we're done here. */
1748     css_queue_crw(CRW_RSC_CHP, CRW_ERC_INIT,
1749                   channel_subsys.max_cssid > 0 ? 1 : 0, chpid);
1750     if (channel_subsys.max_cssid > 0) {
1751         css_queue_crw(CRW_RSC_CHP, CRW_ERC_INIT, 0, real_cssid << 8);
1752     }
1753     return 0;
1754 }
1755 
1756 bool css_schid_final(int m, uint8_t cssid, uint8_t ssid, uint16_t schid)
1757 {
1758     SubchSet *set;
1759     uint8_t real_cssid;
1760 
1761     real_cssid = (!m && (cssid == 0)) ? channel_subsys.default_cssid : cssid;
1762     if (ssid > MAX_SSID ||
1763         !channel_subsys.css[real_cssid] ||
1764         !channel_subsys.css[real_cssid]->sch_set[ssid]) {
1765         return true;
1766     }
1767     set = channel_subsys.css[real_cssid]->sch_set[ssid];
1768     return schid > find_last_bit(set->schids_used,
1769                                  (MAX_SCHID + 1) / sizeof(unsigned long));
1770 }
1771 
1772 unsigned int css_find_free_chpid(uint8_t cssid)
1773 {
1774     CssImage *css = channel_subsys.css[cssid];
1775     unsigned int chpid;
1776 
1777     if (!css) {
1778         return MAX_CHPID + 1;
1779     }
1780 
1781     for (chpid = 0; chpid <= MAX_CHPID; chpid++) {
1782         /* skip reserved chpid */
1783         if (chpid == VIRTIO_CCW_CHPID) {
1784             continue;
1785         }
1786         if (!css->chpids[chpid].in_use) {
1787             return chpid;
1788         }
1789     }
1790     return MAX_CHPID + 1;
1791 }
1792 
1793 static int css_add_chpid(uint8_t cssid, uint8_t chpid, uint8_t type,
1794                          bool is_virt)
1795 {
1796     CssImage *css;
1797 
1798     trace_css_chpid_add(cssid, chpid, type);
1799     css = channel_subsys.css[cssid];
1800     if (!css) {
1801         return -EINVAL;
1802     }
1803     if (css->chpids[chpid].in_use) {
1804         return -EEXIST;
1805     }
1806     css->chpids[chpid].in_use = 1;
1807     css->chpids[chpid].type = type;
1808     css->chpids[chpid].is_virtual = is_virt;
1809 
1810     css_generate_chp_crws(cssid, chpid);
1811 
1812     return 0;
1813 }
1814 
1815 void css_sch_build_virtual_schib(SubchDev *sch, uint8_t chpid, uint8_t type)
1816 {
1817     PMCW *p = &sch->curr_status.pmcw;
1818     SCSW *s = &sch->curr_status.scsw;
1819     int i;
1820     CssImage *css = channel_subsys.css[sch->cssid];
1821 
1822     assert(css != NULL);
1823     memset(p, 0, sizeof(PMCW));
1824     p->flags |= PMCW_FLAGS_MASK_DNV;
1825     p->devno = sch->devno;
1826     /* single path */
1827     p->pim = 0x80;
1828     p->pom = 0xff;
1829     p->pam = 0x80;
1830     p->chpid[0] = chpid;
1831     if (!css->chpids[chpid].in_use) {
1832         css_add_chpid(sch->cssid, chpid, type, true);
1833     }
1834 
1835     memset(s, 0, sizeof(SCSW));
1836     sch->curr_status.mba = 0;
1837     for (i = 0; i < ARRAY_SIZE(sch->curr_status.mda); i++) {
1838         sch->curr_status.mda[i] = 0;
1839     }
1840 }
1841 
1842 SubchDev *css_find_subch(uint8_t m, uint8_t cssid, uint8_t ssid, uint16_t schid)
1843 {
1844     uint8_t real_cssid;
1845 
1846     real_cssid = (!m && (cssid == 0)) ? channel_subsys.default_cssid : cssid;
1847 
1848     if (!channel_subsys.css[real_cssid]) {
1849         return NULL;
1850     }
1851 
1852     if (!channel_subsys.css[real_cssid]->sch_set[ssid]) {
1853         return NULL;
1854     }
1855 
1856     return channel_subsys.css[real_cssid]->sch_set[ssid]->sch[schid];
1857 }
1858 
1859 /**
1860  * Return free device number in subchannel set.
1861  *
1862  * Return index of the first free device number in the subchannel set
1863  * identified by @p cssid and @p ssid, beginning the search at @p
1864  * start and wrapping around at MAX_DEVNO. Return a value exceeding
1865  * MAX_SCHID if there are no free device numbers in the subchannel
1866  * set.
1867  */
1868 static uint32_t css_find_free_devno(uint8_t cssid, uint8_t ssid,
1869                                     uint16_t start)
1870 {
1871     uint32_t round;
1872 
1873     for (round = 0; round <= MAX_DEVNO; round++) {
1874         uint16_t devno = (start + round) % MAX_DEVNO;
1875 
1876         if (!css_devno_used(cssid, ssid, devno)) {
1877             return devno;
1878         }
1879     }
1880     return MAX_DEVNO + 1;
1881 }
1882 
1883 /**
1884  * Return first free subchannel (id) in subchannel set.
1885  *
1886  * Return index of the first free subchannel in the subchannel set
1887  * identified by @p cssid and @p ssid, if there is any. Return a value
1888  * exceeding MAX_SCHID if there are no free subchannels in the
1889  * subchannel set.
1890  */
1891 static uint32_t css_find_free_subch(uint8_t cssid, uint8_t ssid)
1892 {
1893     uint32_t schid;
1894 
1895     for (schid = 0; schid <= MAX_SCHID; schid++) {
1896         if (!css_find_subch(1, cssid, ssid, schid)) {
1897             return schid;
1898         }
1899     }
1900     return MAX_SCHID + 1;
1901 }
1902 
1903 /**
1904  * Return first free subchannel (id) in subchannel set for a device number
1905  *
1906  * Verify the device number @p devno is not used yet in the subchannel
1907  * set identified by @p cssid and @p ssid. Set @p schid to the index
1908  * of the first free subchannel in the subchannel set, if there is
1909  * any. Return true if everything succeeded and false otherwise.
1910  */
1911 static bool css_find_free_subch_for_devno(uint8_t cssid, uint8_t ssid,
1912                                           uint16_t devno, uint16_t *schid,
1913                                           Error **errp)
1914 {
1915     uint32_t free_schid;
1916 
1917     assert(schid);
1918     if (css_devno_used(cssid, ssid, devno)) {
1919         error_setg(errp, "Device %x.%x.%04x already exists",
1920                    cssid, ssid, devno);
1921         return false;
1922     }
1923     free_schid = css_find_free_subch(cssid, ssid);
1924     if (free_schid > MAX_SCHID) {
1925         error_setg(errp, "No free subchannel found for %x.%x.%04x",
1926                    cssid, ssid, devno);
1927         return false;
1928     }
1929     *schid = free_schid;
1930     return true;
1931 }
1932 
1933 /**
1934  * Return first free subchannel (id) and device number
1935  *
1936  * Locate the first free subchannel and first free device number in
1937  * any of the subchannel sets of the channel subsystem identified by
1938  * @p cssid. Return false if no free subchannel / device number could
1939  * be found. Otherwise set @p ssid, @p devno and @p schid to identify
1940  * the available subchannel and device number and return true.
1941  *
1942  * May modify @p ssid, @p devno and / or @p schid even if no free
1943  * subchannel / device number could be found.
1944  */
1945 static bool css_find_free_subch_and_devno(uint8_t cssid, uint8_t *ssid,
1946                                           uint16_t *devno, uint16_t *schid,
1947                                           Error **errp)
1948 {
1949     uint32_t free_schid, free_devno;
1950 
1951     assert(ssid && devno && schid);
1952     for (*ssid = 0; *ssid <= MAX_SSID; (*ssid)++) {
1953         free_schid = css_find_free_subch(cssid, *ssid);
1954         if (free_schid > MAX_SCHID) {
1955             continue;
1956         }
1957         free_devno = css_find_free_devno(cssid, *ssid, free_schid);
1958         if (free_devno > MAX_DEVNO) {
1959             continue;
1960         }
1961         *schid = free_schid;
1962         *devno = free_devno;
1963         return true;
1964     }
1965     error_setg(errp, "Virtual channel subsystem is full!");
1966     return false;
1967 }
1968 
1969 bool css_subch_visible(SubchDev *sch)
1970 {
1971     if (sch->ssid > channel_subsys.max_ssid) {
1972         return false;
1973     }
1974 
1975     if (sch->cssid != channel_subsys.default_cssid) {
1976         return (channel_subsys.max_cssid > 0);
1977     }
1978 
1979     return true;
1980 }
1981 
1982 bool css_present(uint8_t cssid)
1983 {
1984     return (channel_subsys.css[cssid] != NULL);
1985 }
1986 
1987 bool css_devno_used(uint8_t cssid, uint8_t ssid, uint16_t devno)
1988 {
1989     if (!channel_subsys.css[cssid]) {
1990         return false;
1991     }
1992     if (!channel_subsys.css[cssid]->sch_set[ssid]) {
1993         return false;
1994     }
1995 
1996     return !!test_bit(devno,
1997                       channel_subsys.css[cssid]->sch_set[ssid]->devnos_used);
1998 }
1999 
2000 void css_subch_assign(uint8_t cssid, uint8_t ssid, uint16_t schid,
2001                       uint16_t devno, SubchDev *sch)
2002 {
2003     CssImage *css;
2004     SubchSet *s_set;
2005 
2006     trace_css_assign_subch(sch ? "assign" : "deassign", cssid, ssid, schid,
2007                            devno);
2008     if (!channel_subsys.css[cssid]) {
2009         fprintf(stderr,
2010                 "Suspicious call to %s (%x.%x.%04x) for non-existing css!\n",
2011                 __func__, cssid, ssid, schid);
2012         return;
2013     }
2014     css = channel_subsys.css[cssid];
2015 
2016     if (!css->sch_set[ssid]) {
2017         css->sch_set[ssid] = g_malloc0(sizeof(SubchSet));
2018     }
2019     s_set = css->sch_set[ssid];
2020 
2021     s_set->sch[schid] = sch;
2022     if (sch) {
2023         set_bit(schid, s_set->schids_used);
2024         set_bit(devno, s_set->devnos_used);
2025     } else {
2026         clear_bit(schid, s_set->schids_used);
2027         clear_bit(devno, s_set->devnos_used);
2028     }
2029 }
2030 
2031 void css_queue_crw(uint8_t rsc, uint8_t erc, int chain, uint16_t rsid)
2032 {
2033     CrwContainer *crw_cont;
2034 
2035     trace_css_crw(rsc, erc, rsid, chain ? "(chained)" : "");
2036     /* TODO: Maybe use a static crw pool? */
2037     crw_cont = g_try_malloc0(sizeof(CrwContainer));
2038     if (!crw_cont) {
2039         channel_subsys.crws_lost = true;
2040         return;
2041     }
2042     crw_cont->crw.flags = (rsc << 8) | erc;
2043     if (chain) {
2044         crw_cont->crw.flags |= CRW_FLAGS_MASK_C;
2045     }
2046     crw_cont->crw.rsid = rsid;
2047     if (channel_subsys.crws_lost) {
2048         crw_cont->crw.flags |= CRW_FLAGS_MASK_R;
2049         channel_subsys.crws_lost = false;
2050     }
2051 
2052     QTAILQ_INSERT_TAIL(&channel_subsys.pending_crws, crw_cont, sibling);
2053 
2054     if (channel_subsys.do_crw_mchk) {
2055         channel_subsys.do_crw_mchk = false;
2056         /* Inject crw pending machine check. */
2057         s390_crw_mchk();
2058     }
2059 }
2060 
2061 void css_generate_sch_crws(uint8_t cssid, uint8_t ssid, uint16_t schid,
2062                            int hotplugged, int add)
2063 {
2064     uint8_t guest_cssid;
2065     bool chain_crw;
2066 
2067     if (add && !hotplugged) {
2068         return;
2069     }
2070     if (channel_subsys.max_cssid == 0) {
2071         /* Default cssid shows up as 0. */
2072         guest_cssid = (cssid == channel_subsys.default_cssid) ? 0 : cssid;
2073     } else {
2074         /* Show real cssid to the guest. */
2075         guest_cssid = cssid;
2076     }
2077     /*
2078      * Only notify for higher subchannel sets/channel subsystems if the
2079      * guest has enabled it.
2080      */
2081     if ((ssid > channel_subsys.max_ssid) ||
2082         (guest_cssid > channel_subsys.max_cssid) ||
2083         ((channel_subsys.max_cssid == 0) &&
2084          (cssid != channel_subsys.default_cssid))) {
2085         return;
2086     }
2087     chain_crw = (channel_subsys.max_ssid > 0) ||
2088             (channel_subsys.max_cssid > 0);
2089     css_queue_crw(CRW_RSC_SUBCH, CRW_ERC_IPI, chain_crw ? 1 : 0, schid);
2090     if (chain_crw) {
2091         css_queue_crw(CRW_RSC_SUBCH, CRW_ERC_IPI, 0,
2092                       (guest_cssid << 8) | (ssid << 4));
2093     }
2094     /* RW_ERC_IPI --> clear pending interrupts */
2095     css_clear_io_interrupt(css_do_build_subchannel_id(cssid, ssid), schid);
2096 }
2097 
2098 void css_generate_chp_crws(uint8_t cssid, uint8_t chpid)
2099 {
2100     /* TODO */
2101 }
2102 
2103 void css_generate_css_crws(uint8_t cssid)
2104 {
2105     if (!channel_subsys.sei_pending) {
2106         css_queue_crw(CRW_RSC_CSS, 0, 0, cssid);
2107     }
2108     channel_subsys.sei_pending = true;
2109 }
2110 
2111 void css_clear_sei_pending(void)
2112 {
2113     channel_subsys.sei_pending = false;
2114 }
2115 
2116 int css_enable_mcsse(void)
2117 {
2118     trace_css_enable_facility("mcsse");
2119     channel_subsys.max_cssid = MAX_CSSID;
2120     return 0;
2121 }
2122 
2123 int css_enable_mss(void)
2124 {
2125     trace_css_enable_facility("mss");
2126     channel_subsys.max_ssid = MAX_SSID;
2127     return 0;
2128 }
2129 
2130 void css_reset_sch(SubchDev *sch)
2131 {
2132     PMCW *p = &sch->curr_status.pmcw;
2133 
2134     if ((p->flags & PMCW_FLAGS_MASK_ENA) != 0 && sch->disable_cb) {
2135         sch->disable_cb(sch);
2136     }
2137 
2138     p->intparm = 0;
2139     p->flags &= ~(PMCW_FLAGS_MASK_ISC | PMCW_FLAGS_MASK_ENA |
2140                   PMCW_FLAGS_MASK_LM | PMCW_FLAGS_MASK_MME |
2141                   PMCW_FLAGS_MASK_MP | PMCW_FLAGS_MASK_TF);
2142     p->flags |= PMCW_FLAGS_MASK_DNV;
2143     p->devno = sch->devno;
2144     p->pim = 0x80;
2145     p->lpm = p->pim;
2146     p->pnom = 0;
2147     p->lpum = 0;
2148     p->mbi = 0;
2149     p->pom = 0xff;
2150     p->pam = 0x80;
2151     p->chars &= ~(PMCW_CHARS_MASK_MBFC | PMCW_CHARS_MASK_XMWME |
2152                   PMCW_CHARS_MASK_CSENSE);
2153 
2154     memset(&sch->curr_status.scsw, 0, sizeof(sch->curr_status.scsw));
2155     sch->curr_status.mba = 0;
2156 
2157     sch->channel_prog = 0x0;
2158     sch->last_cmd_valid = false;
2159     sch->thinint_active = false;
2160 }
2161 
2162 void css_reset(void)
2163 {
2164     CrwContainer *crw_cont;
2165 
2166     /* Clean up monitoring. */
2167     channel_subsys.chnmon_active = false;
2168     channel_subsys.chnmon_area = 0;
2169 
2170     /* Clear pending CRWs. */
2171     while ((crw_cont = QTAILQ_FIRST(&channel_subsys.pending_crws))) {
2172         QTAILQ_REMOVE(&channel_subsys.pending_crws, crw_cont, sibling);
2173         g_free(crw_cont);
2174     }
2175     channel_subsys.sei_pending = false;
2176     channel_subsys.do_crw_mchk = true;
2177     channel_subsys.crws_lost = false;
2178 
2179     /* Reset maximum ids. */
2180     channel_subsys.max_cssid = 0;
2181     channel_subsys.max_ssid = 0;
2182 }
2183 
2184 static void get_css_devid(Object *obj, Visitor *v, const char *name,
2185                           void *opaque, Error **errp)
2186 {
2187     DeviceState *dev = DEVICE(obj);
2188     Property *prop = opaque;
2189     CssDevId *dev_id = qdev_get_prop_ptr(dev, prop);
2190     char buffer[] = "xx.x.xxxx";
2191     char *p = buffer;
2192     int r;
2193 
2194     if (dev_id->valid) {
2195 
2196         r = snprintf(buffer, sizeof(buffer), "%02x.%1x.%04x", dev_id->cssid,
2197                      dev_id->ssid, dev_id->devid);
2198         assert(r == sizeof(buffer) - 1);
2199 
2200         /* drop leading zero */
2201         if (dev_id->cssid <= 0xf) {
2202             p++;
2203         }
2204     } else {
2205         snprintf(buffer, sizeof(buffer), "<unset>");
2206     }
2207 
2208     visit_type_str(v, name, &p, errp);
2209 }
2210 
2211 /*
2212  * parse <cssid>.<ssid>.<devid> and assert valid range for cssid/ssid
2213  */
2214 static void set_css_devid(Object *obj, Visitor *v, const char *name,
2215                           void *opaque, Error **errp)
2216 {
2217     DeviceState *dev = DEVICE(obj);
2218     Property *prop = opaque;
2219     CssDevId *dev_id = qdev_get_prop_ptr(dev, prop);
2220     Error *local_err = NULL;
2221     char *str;
2222     int num, n1, n2;
2223     unsigned int cssid, ssid, devid;
2224 
2225     if (dev->realized) {
2226         qdev_prop_set_after_realize(dev, name, errp);
2227         return;
2228     }
2229 
2230     visit_type_str(v, name, &str, &local_err);
2231     if (local_err) {
2232         error_propagate(errp, local_err);
2233         return;
2234     }
2235 
2236     num = sscanf(str, "%2x.%1x%n.%4x%n", &cssid, &ssid, &n1, &devid, &n2);
2237     if (num != 3 || (n2 - n1) != 5 || strlen(str) != n2) {
2238         error_set_from_qdev_prop_error(errp, EINVAL, dev, prop, str);
2239         goto out;
2240     }
2241     if ((cssid > MAX_CSSID) || (ssid > MAX_SSID)) {
2242         error_setg(errp, "Invalid cssid or ssid: cssid %x, ssid %x",
2243                    cssid, ssid);
2244         goto out;
2245     }
2246 
2247     dev_id->cssid = cssid;
2248     dev_id->ssid = ssid;
2249     dev_id->devid = devid;
2250     dev_id->valid = true;
2251 
2252 out:
2253     g_free(str);
2254 }
2255 
2256 const PropertyInfo css_devid_propinfo = {
2257     .name = "str",
2258     .description = "Identifier of an I/O device in the channel "
2259                    "subsystem, example: fe.1.23ab",
2260     .get = get_css_devid,
2261     .set = set_css_devid,
2262 };
2263 
2264 const PropertyInfo css_devid_ro_propinfo = {
2265     .name = "str",
2266     .description = "Read-only identifier of an I/O device in the channel "
2267                    "subsystem, example: fe.1.23ab",
2268     .get = get_css_devid,
2269 };
2270 
2271 SubchDev *css_create_sch(CssDevId bus_id, bool is_virtual, bool squash_mcss,
2272                          Error **errp)
2273 {
2274     uint16_t schid = 0;
2275     SubchDev *sch;
2276 
2277     if (bus_id.valid) {
2278         if (is_virtual != (bus_id.cssid == VIRTUAL_CSSID)) {
2279             error_setg(errp, "cssid %hhx not valid for %s devices",
2280                        bus_id.cssid,
2281                        (is_virtual ? "virtual" : "non-virtual"));
2282             return NULL;
2283         }
2284     }
2285 
2286     if (bus_id.valid) {
2287         if (squash_mcss) {
2288             bus_id.cssid = channel_subsys.default_cssid;
2289         } else if (!channel_subsys.css[bus_id.cssid]) {
2290             css_create_css_image(bus_id.cssid, false);
2291         }
2292 
2293         if (!css_find_free_subch_for_devno(bus_id.cssid, bus_id.ssid,
2294                                            bus_id.devid, &schid, errp)) {
2295             return NULL;
2296         }
2297     } else if (squash_mcss || is_virtual) {
2298         bus_id.cssid = channel_subsys.default_cssid;
2299 
2300         if (!css_find_free_subch_and_devno(bus_id.cssid, &bus_id.ssid,
2301                                            &bus_id.devid, &schid, errp)) {
2302             return NULL;
2303         }
2304     } else {
2305         for (bus_id.cssid = 0; bus_id.cssid < MAX_CSSID; ++bus_id.cssid) {
2306             if (bus_id.cssid == VIRTUAL_CSSID) {
2307                 continue;
2308             }
2309 
2310             if (!channel_subsys.css[bus_id.cssid]) {
2311                 css_create_css_image(bus_id.cssid, false);
2312             }
2313 
2314             if   (css_find_free_subch_and_devno(bus_id.cssid, &bus_id.ssid,
2315                                                 &bus_id.devid, &schid,
2316                                                 NULL)) {
2317                 break;
2318             }
2319             if (bus_id.cssid == MAX_CSSID) {
2320                 error_setg(errp, "Virtual channel subsystem is full!");
2321                 return NULL;
2322             }
2323         }
2324     }
2325 
2326     sch = g_malloc0(sizeof(*sch));
2327     sch->cssid = bus_id.cssid;
2328     sch->ssid = bus_id.ssid;
2329     sch->devno = bus_id.devid;
2330     sch->schid = schid;
2331     css_subch_assign(sch->cssid, sch->ssid, schid, sch->devno, sch);
2332     return sch;
2333 }
2334 
2335 static int css_sch_get_chpids(SubchDev *sch, CssDevId *dev_id)
2336 {
2337     char *fid_path;
2338     FILE *fd;
2339     uint32_t chpid[8];
2340     int i;
2341     PMCW *p = &sch->curr_status.pmcw;
2342 
2343     fid_path = g_strdup_printf("/sys/bus/css/devices/%x.%x.%04x/chpids",
2344                                dev_id->cssid, dev_id->ssid, dev_id->devid);
2345     fd = fopen(fid_path, "r");
2346     if (fd == NULL) {
2347         error_report("%s: open %s failed", __func__, fid_path);
2348         g_free(fid_path);
2349         return -EINVAL;
2350     }
2351 
2352     if (fscanf(fd, "%x %x %x %x %x %x %x %x",
2353         &chpid[0], &chpid[1], &chpid[2], &chpid[3],
2354         &chpid[4], &chpid[5], &chpid[6], &chpid[7]) != 8) {
2355         fclose(fd);
2356         g_free(fid_path);
2357         return -EINVAL;
2358     }
2359 
2360     for (i = 0; i < ARRAY_SIZE(p->chpid); i++) {
2361         p->chpid[i] = chpid[i];
2362     }
2363 
2364     fclose(fd);
2365     g_free(fid_path);
2366 
2367     return 0;
2368 }
2369 
2370 static int css_sch_get_path_masks(SubchDev *sch, CssDevId *dev_id)
2371 {
2372     char *fid_path;
2373     FILE *fd;
2374     uint32_t pim, pam, pom;
2375     PMCW *p = &sch->curr_status.pmcw;
2376 
2377     fid_path = g_strdup_printf("/sys/bus/css/devices/%x.%x.%04x/pimpampom",
2378                                dev_id->cssid, dev_id->ssid, dev_id->devid);
2379     fd = fopen(fid_path, "r");
2380     if (fd == NULL) {
2381         error_report("%s: open %s failed", __func__, fid_path);
2382         g_free(fid_path);
2383         return -EINVAL;
2384     }
2385 
2386     if (fscanf(fd, "%x %x %x", &pim, &pam, &pom) != 3) {
2387         fclose(fd);
2388         g_free(fid_path);
2389         return -EINVAL;
2390     }
2391 
2392     p->pim = pim;
2393     p->pam = pam;
2394     p->pom = pom;
2395     fclose(fd);
2396     g_free(fid_path);
2397 
2398     return 0;
2399 }
2400 
2401 static int css_sch_get_chpid_type(uint8_t chpid, uint32_t *type,
2402                                   CssDevId *dev_id)
2403 {
2404     char *fid_path;
2405     FILE *fd;
2406 
2407     fid_path = g_strdup_printf("/sys/devices/css%x/chp0.%02x/type",
2408                                dev_id->cssid, chpid);
2409     fd = fopen(fid_path, "r");
2410     if (fd == NULL) {
2411         error_report("%s: open %s failed", __func__, fid_path);
2412         g_free(fid_path);
2413         return -EINVAL;
2414     }
2415 
2416     if (fscanf(fd, "%x", type) != 1) {
2417         fclose(fd);
2418         g_free(fid_path);
2419         return -EINVAL;
2420     }
2421 
2422     fclose(fd);
2423     g_free(fid_path);
2424 
2425     return 0;
2426 }
2427 
2428 /*
2429  * We currently retrieve the real device information from sysfs to build the
2430  * guest subchannel information block without considering the migration feature.
2431  * We need to revisit this problem when we want to add migration support.
2432  */
2433 int css_sch_build_schib(SubchDev *sch, CssDevId *dev_id)
2434 {
2435     CssImage *css = channel_subsys.css[sch->cssid];
2436     PMCW *p = &sch->curr_status.pmcw;
2437     SCSW *s = &sch->curr_status.scsw;
2438     uint32_t type;
2439     int i, ret;
2440 
2441     assert(css != NULL);
2442     memset(p, 0, sizeof(PMCW));
2443     p->flags |= PMCW_FLAGS_MASK_DNV;
2444     /* We are dealing with I/O subchannels only. */
2445     p->devno = sch->devno;
2446 
2447     /* Grab path mask from sysfs. */
2448     ret = css_sch_get_path_masks(sch, dev_id);
2449     if (ret) {
2450         return ret;
2451     }
2452 
2453     /* Grab chpids from sysfs. */
2454     ret = css_sch_get_chpids(sch, dev_id);
2455     if (ret) {
2456         return ret;
2457     }
2458 
2459    /* Build chpid type. */
2460     for (i = 0; i < ARRAY_SIZE(p->chpid); i++) {
2461         if (p->chpid[i] && !css->chpids[p->chpid[i]].in_use) {
2462             ret = css_sch_get_chpid_type(p->chpid[i], &type, dev_id);
2463             if (ret) {
2464                 return ret;
2465             }
2466             css_add_chpid(sch->cssid, p->chpid[i], type, false);
2467         }
2468     }
2469 
2470     memset(s, 0, sizeof(SCSW));
2471     sch->curr_status.mba = 0;
2472     for (i = 0; i < ARRAY_SIZE(sch->curr_status.mda); i++) {
2473         sch->curr_status.mda[i] = 0;
2474     }
2475 
2476     return 0;
2477 }
2478