1 /*
2 * Channel subsystem structures and definitions.
3 *
4 * Copyright 2012 IBM Corp.
5 * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
6 *
7 * This work is licensed under the terms of the GNU GPL, version 2 or (at
8 * your option) any later version. See the COPYING file in the top-level
9 * directory.
10 */
11
12 #ifndef CSS_H
13 #define CSS_H
14
15 #include "hw/s390x/adapter.h"
16 #include "hw/s390x/s390_flic.h"
17 #include "hw/s390x/ioinst.h"
18 #include "sysemu/kvm.h"
19 #include "target/s390x/cpu-qom.h"
20
21 /* Channel subsystem constants. */
22 #define MAX_DEVNO 65535
23 #define MAX_SCHID 65535
24 #define MAX_SSID 3
25 #define MAX_CSSID 255
26 #define MAX_CHPID 255
27
28 #define MAX_ISC 7
29
30 #define MAX_CIWS 62
31
32 #define VIRTUAL_CSSID 0xfe
33 #define VIRTIO_CCW_CHPID 0 /* used by convention */
34
35 typedef struct CIW {
36 uint8_t type;
37 uint8_t command;
38 uint16_t count;
39 } QEMU_PACKED CIW;
40
41 typedef struct SenseId {
42 /* common part */
43 uint8_t reserved; /* always 0x'FF' */
44 uint16_t cu_type; /* control unit type */
45 uint8_t cu_model; /* control unit model */
46 uint16_t dev_type; /* device type */
47 uint8_t dev_model; /* device model */
48 uint8_t unused; /* padding byte */
49 /* extended part */
50 CIW ciw[MAX_CIWS]; /* variable # of CIWs */
51 } SenseId; /* Note: No QEMU_PACKED due to unaligned members */
52
53 /* Channel measurements, from linux/drivers/s390/cio/cmf.c. */
54 typedef struct CMB {
55 uint16_t ssch_rsch_count;
56 uint16_t sample_count;
57 uint32_t device_connect_time;
58 uint32_t function_pending_time;
59 uint32_t device_disconnect_time;
60 uint32_t control_unit_queuing_time;
61 uint32_t device_active_only_time;
62 uint32_t reserved[2];
63 } QEMU_PACKED CMB;
64
65 typedef struct CMBE {
66 uint32_t ssch_rsch_count;
67 uint32_t sample_count;
68 uint32_t device_connect_time;
69 uint32_t function_pending_time;
70 uint32_t device_disconnect_time;
71 uint32_t control_unit_queuing_time;
72 uint32_t device_active_only_time;
73 uint32_t device_busy_time;
74 uint32_t initial_command_response_time;
75 uint32_t reserved[7];
76 } QEMU_PACKED CMBE;
77
78 typedef enum CcwDataStreamOp {
79 CDS_OP_R = 0, /* read, false when used as is_write */
80 CDS_OP_W = 1, /* write, true when used as is_write */
81 CDS_OP_A = 2 /* advance, should not be used as is_write */
82 } CcwDataStreamOp;
83
84 /* normal usage is via SuchchDev.cds instead of instantiating */
85 typedef struct CcwDataStream {
86 #define CDS_F_IDA 0x01
87 #define CDS_F_MIDA 0x02
88 #define CDS_F_I2K 0x04
89 #define CDS_F_C64 0x08
90 #define CDS_F_FMT 0x10 /* CCW format-1 */
91 #define CDS_F_STREAM_BROKEN 0x80
92 uint8_t flags;
93 uint8_t at_idaw;
94 uint16_t at_byte;
95 uint16_t count;
96 uint32_t cda_orig;
97 int (*op_handler)(struct CcwDataStream *cds, void *buff, int len,
98 CcwDataStreamOp op);
99 hwaddr cda;
100 bool do_skip;
101 } CcwDataStream;
102
103 /*
104 * IO instructions conclude according to this. Currently we have only
105 * cc codes. Valid values are 0, 1, 2, 3 and the generic semantic for
106 * IO instructions is described briefly. For more details consult the PoP.
107 */
108 typedef enum IOInstEnding {
109 /* produced expected result */
110 IOINST_CC_EXPECTED = 0,
111 /* status conditions were present or produced alternate result */
112 IOINST_CC_STATUS_PRESENT = 1,
113 /* inst. ineffective because busy with previously initiated function */
114 IOINST_CC_BUSY = 2,
115 /* inst. ineffective because not operational */
116 IOINST_CC_NOT_OPERATIONAL = 3
117 } IOInstEnding;
118
119 typedef struct SubchDev SubchDev;
120 struct SubchDev {
121 /* channel-subsystem related things: */
122 SCHIB curr_status; /* Needs alignment and thus must come first */
123 ORB orb;
124 uint8_t cssid;
125 uint8_t ssid;
126 uint16_t schid;
127 uint16_t devno;
128 uint8_t sense_data[32];
129 hwaddr channel_prog;
130 CCW1 last_cmd;
131 bool last_cmd_valid;
132 bool ccw_fmt_1;
133 bool thinint_active;
134 uint8_t ccw_no_data_cnt;
135 uint16_t migrated_schid; /* used for mismatch detection */
136 CcwDataStream cds;
137 /* transport-provided data: */
138 int (*ccw_cb) (SubchDev *, CCW1);
139 void (*disable_cb)(SubchDev *);
140 IOInstEnding (*do_subchannel_work) (SubchDev *);
141 void (*irb_cb)(SubchDev *, IRB *);
142 SenseId id;
143 void *driver_data;
144 ESW esw;
145 };
146
sch_gen_unit_exception(SubchDev * sch)147 static inline void sch_gen_unit_exception(SubchDev *sch)
148 {
149 sch->curr_status.scsw.ctrl &= ~(SCSW_ACTL_DEVICE_ACTIVE |
150 SCSW_ACTL_SUBCH_ACTIVE);
151 sch->curr_status.scsw.ctrl |= SCSW_STCTL_PRIMARY |
152 SCSW_STCTL_SECONDARY |
153 SCSW_STCTL_ALERT |
154 SCSW_STCTL_STATUS_PEND;
155 sch->curr_status.scsw.cpa = sch->channel_prog + 8;
156 sch->curr_status.scsw.dstat = SCSW_DSTAT_UNIT_EXCEP;
157 }
158
159 extern const VMStateDescription vmstate_subch_dev;
160
161 /*
162 * Identify a device within the channel subsystem.
163 * Note that this can be used to identify either the subchannel or
164 * the attached I/O device, as there's always one I/O device per
165 * subchannel.
166 */
167 typedef struct CssDevId {
168 uint8_t cssid;
169 uint8_t ssid;
170 uint16_t devid;
171 bool valid;
172 } CssDevId;
173
174 extern const PropertyInfo css_devid_propinfo;
175
176 #define DEFINE_PROP_CSS_DEV_ID(_n, _s, _f) \
177 DEFINE_PROP(_n, _s, _f, css_devid_propinfo, CssDevId)
178
179 typedef struct IndAddr {
180 hwaddr addr;
181 uint64_t map;
182 unsigned long refcnt;
183 int32_t len;
184 QTAILQ_ENTRY(IndAddr) sibling;
185 } IndAddr;
186
187 extern const VMStateDescription vmstate_ind_addr;
188
189 #define VMSTATE_PTR_TO_IND_ADDR(_f, _s) \
190 VMSTATE_STRUCT(_f, _s, 1, vmstate_ind_addr, IndAddr*)
191
192 IndAddr *get_indicator(hwaddr ind_addr, int len);
193 void release_indicator(AdapterInfo *adapter, IndAddr *indicator);
194 int map_indicator(AdapterInfo *adapter, IndAddr *indicator);
195
196 typedef SubchDev *(*css_subch_cb_func)(uint8_t m, uint8_t cssid, uint8_t ssid,
197 uint16_t schid);
198 int css_create_css_image(uint8_t cssid, bool default_image);
199 bool css_devno_used(uint8_t cssid, uint8_t ssid, uint16_t devno);
200 void css_subch_assign(uint8_t cssid, uint8_t ssid, uint16_t schid,
201 uint16_t devno, SubchDev *sch);
202 void css_sch_build_virtual_schib(SubchDev *sch, uint8_t chpid, uint8_t type);
203 int css_sch_build_schib(SubchDev *sch, CssDevId *dev_id);
204 unsigned int css_find_free_chpid(uint8_t cssid);
205 uint16_t css_build_subchannel_id(SubchDev *sch);
206 void copy_scsw_to_guest(SCSW *dest, const SCSW *src);
207 void copy_esw_to_guest(ESW *dest, const ESW *src);
208 void css_inject_io_interrupt(SubchDev *sch);
209 void css_reset(void);
210 void css_reset_sch(SubchDev *sch);
211 void css_crw_add_to_queue(CRW crw);
212 void css_queue_crw(uint8_t rsc, uint8_t erc, int solicited,
213 int chain, uint16_t rsid);
214 void css_generate_sch_crws(uint8_t cssid, uint8_t ssid, uint16_t schid,
215 int hotplugged, int add);
216 void css_generate_chp_crws(uint8_t cssid, uint8_t chpid);
217 void css_generate_css_crws(uint8_t cssid);
218 void css_clear_sei_pending(void);
219 IOInstEnding s390_ccw_cmd_request(SubchDev *sch);
220 IOInstEnding do_subchannel_work_virtual(SubchDev *sub);
221 IOInstEnding do_subchannel_work_passthrough(SubchDev *sub);
222 void build_irb_passthrough(SubchDev *sch, IRB *irb);
223 void build_irb_virtual(SubchDev *sch, IRB *irb);
224
225 int s390_ccw_halt(SubchDev *sch);
226 int s390_ccw_clear(SubchDev *sch);
227 IOInstEnding s390_ccw_store(SubchDev *sch);
228
229 typedef enum {
230 CSS_IO_ADAPTER_VIRTIO = 0,
231 CSS_IO_ADAPTER_PCI = 1,
232 CSS_IO_ADAPTER_TYPE_NUMS,
233 } CssIoAdapterType;
234
235 void css_adapter_interrupt(CssIoAdapterType type, uint8_t isc);
236 int css_do_sic(S390CPU *cpu, uint8_t isc, uint16_t mode);
237 uint32_t css_get_adapter_id(CssIoAdapterType type, uint8_t isc);
238 void css_register_io_adapters(CssIoAdapterType type, bool swap, bool maskable,
239 uint8_t flags, Error **errp);
240
241 #ifndef CONFIG_USER_ONLY
242 SubchDev *css_find_subch(uint8_t m, uint8_t cssid, uint8_t ssid,
243 uint16_t schid);
244 bool css_subch_visible(SubchDev *sch);
245 void css_conditional_io_interrupt(SubchDev *sch);
246 IOInstEnding css_do_stsch(SubchDev *sch, SCHIB *schib);
247 bool css_schid_final(int m, uint8_t cssid, uint8_t ssid, uint16_t schid);
248 IOInstEnding css_do_msch(SubchDev *sch, const SCHIB *schib);
249 IOInstEnding css_do_xsch(SubchDev *sch);
250 IOInstEnding css_do_csch(SubchDev *sch);
251 IOInstEnding css_do_hsch(SubchDev *sch);
252 IOInstEnding css_do_ssch(SubchDev *sch, ORB *orb);
253 int css_do_tsch_get_irb(SubchDev *sch, IRB *irb, int *irb_len);
254 void css_do_tsch_update_subch(SubchDev *sch);
255 int css_do_stcrw(CRW *crw);
256 void css_undo_stcrw(CRW *crw);
257 int css_collect_chp_desc(int m, uint8_t cssid, uint8_t f_chpid, uint8_t l_chpid,
258 int rfmt, void *buf);
259 void css_do_schm(uint8_t mbk, int update, int dct, uint64_t mbo);
260 int css_enable_mcsse(void);
261 int css_enable_mss(void);
262 IOInstEnding css_do_rsch(SubchDev *sch);
263 int css_do_rchp(uint8_t cssid, uint8_t chpid);
264 bool css_present(uint8_t cssid);
265 #endif
266
267 extern const PropertyInfo css_devid_ro_propinfo;
268
269 #define DEFINE_PROP_CSS_DEV_ID_RO(_n, _s, _f) \
270 DEFINE_PROP(_n, _s, _f, css_devid_ro_propinfo, CssDevId)
271
272 /**
273 * Create a subchannel for the given bus id.
274 *
275 * If @p bus_id is valid, verify that it is not already in use, and find a
276 * free devno for it.
277 * If @p bus_id is not valid find a free subchannel id and device number
278 * across all subchannel sets and all css images starting from the default
279 * css image.
280 *
281 * If either of the former actions succeed, allocate a subchannel structure,
282 * initialise it with the bus id, subchannel id and device number, register
283 * it with the CSS and return it. Otherwise return NULL.
284 *
285 * The caller becomes owner of the returned subchannel structure and
286 * is responsible for unregistering and freeing it.
287 */
288 SubchDev *css_create_sch(CssDevId bus_id, Error **errp);
289
290 /** Turn on css migration */
291 void css_register_vmstate(void);
292
293
294 void ccw_dstream_init(CcwDataStream *cds, CCW1 const *ccw, ORB const *orb);
295
ccw_dstream_rewind(CcwDataStream * cds)296 static inline void ccw_dstream_rewind(CcwDataStream *cds)
297 {
298 cds->at_byte = 0;
299 cds->at_idaw = 0;
300 cds->cda = cds->cda_orig;
301 }
302
ccw_dstream_good(CcwDataStream * cds)303 static inline bool ccw_dstream_good(CcwDataStream *cds)
304 {
305 return !(cds->flags & CDS_F_STREAM_BROKEN);
306 }
307
ccw_dstream_residual_count(CcwDataStream * cds)308 static inline uint16_t ccw_dstream_residual_count(CcwDataStream *cds)
309 {
310 return cds->count - cds->at_byte;
311 }
312
ccw_dstream_avail(CcwDataStream * cds)313 static inline uint16_t ccw_dstream_avail(CcwDataStream *cds)
314 {
315 return ccw_dstream_good(cds) ? ccw_dstream_residual_count(cds) : 0;
316 }
317
ccw_dstream_advance(CcwDataStream * cds,int len)318 static inline int ccw_dstream_advance(CcwDataStream *cds, int len)
319 {
320 return cds->op_handler(cds, NULL, len, CDS_OP_A);
321 }
322
ccw_dstream_write_buf(CcwDataStream * cds,void * buff,int len)323 static inline int ccw_dstream_write_buf(CcwDataStream *cds, void *buff, int len)
324 {
325 return cds->op_handler(cds, buff, len, CDS_OP_W);
326 }
327
ccw_dstream_read_buf(CcwDataStream * cds,void * buff,int len)328 static inline int ccw_dstream_read_buf(CcwDataStream *cds, void *buff, int len)
329 {
330 return cds->op_handler(cds, buff, len, CDS_OP_R);
331 }
332
333 #define ccw_dstream_read(cds, v) ccw_dstream_read_buf((cds), &(v), sizeof(v))
334 #define ccw_dstream_write(cds, v) ccw_dstream_write_buf((cds), &(v), sizeof(v))
335
336 /**
337 * true if (vmstate based) migration of the channel subsystem
338 * is enabled, false if it is disabled.
339 */
340 extern bool css_migration_enabled;
341
342 #endif
343