1 /* 2 * SCLP 3 * Event Facility 4 * handles SCLP event types 5 * - Signal Quiesce - system power down 6 * - ASCII Console Data - VT220 read and write 7 * 8 * Copyright IBM, Corp. 2012 9 * 10 * Authors: 11 * Heinz Graalfs <graalfs@de.ibm.com> 12 * 13 * This work is licensed under the terms of the GNU GPL, version 2 or (at your 14 * option) any later version. See the COPYING file in the top-level directory. 15 * 16 */ 17 18 #include "qemu/osdep.h" 19 #include "qapi/error.h" 20 #include "qemu/module.h" 21 22 #include "hw/s390x/sclp.h" 23 #include "migration/vmstate.h" 24 #include "hw/s390x/event-facility.h" 25 26 typedef struct SCLPEventsBus { 27 BusState qbus; 28 } SCLPEventsBus; 29 30 /* we need to save 32 bit chunks for compatibility */ 31 #if HOST_BIG_ENDIAN 32 #define RECV_MASK_LOWER 1 33 #define RECV_MASK_UPPER 0 34 #else /* little endian host */ 35 #define RECV_MASK_LOWER 0 36 #define RECV_MASK_UPPER 1 37 #endif 38 39 struct SCLPEventFacility { 40 SysBusDevice parent_obj; 41 SCLPEventsBus sbus; 42 SCLPEvent quiesce, cpu_hotplug; 43 /* guest's receive mask */ 44 union { 45 uint32_t receive_mask_pieces[2]; 46 sccb_mask_t receive_mask; 47 }; 48 /* 49 * when false, we keep the same broken, backwards compatible behaviour as 50 * before, allowing only masks of size exactly 4; when true, we implement 51 * the architecture correctly, allowing all valid mask sizes. Needed for 52 * migration toward older versions. 53 */ 54 bool allow_all_mask_sizes; 55 /* length of the receive mask */ 56 uint16_t mask_length; 57 }; 58 59 /* return true if any child has event pending set */ 60 static bool event_pending(SCLPEventFacility *ef) 61 { 62 BusChild *kid; 63 SCLPEvent *event; 64 SCLPEventClass *event_class; 65 66 QTAILQ_FOREACH(kid, &ef->sbus.qbus.children, sibling) { 67 event = SCLP_EVENT(kid->child); 68 event_class = SCLP_EVENT_GET_CLASS(event); 69 if (event->event_pending && 70 event_class->get_send_mask() & ef->receive_mask) { 71 return true; 72 } 73 } 74 return false; 75 } 76 77 static sccb_mask_t get_host_send_mask(SCLPEventFacility *ef) 78 { 79 sccb_mask_t mask; 80 BusChild *kid; 81 SCLPEventClass *child; 82 83 mask = 0; 84 85 QTAILQ_FOREACH(kid, &ef->sbus.qbus.children, sibling) { 86 DeviceState *qdev = kid->child; 87 child = SCLP_EVENT_GET_CLASS((SCLPEvent *) qdev); 88 mask |= child->get_send_mask(); 89 } 90 return mask; 91 } 92 93 static sccb_mask_t get_host_receive_mask(SCLPEventFacility *ef) 94 { 95 sccb_mask_t mask; 96 BusChild *kid; 97 SCLPEventClass *child; 98 99 mask = 0; 100 101 QTAILQ_FOREACH(kid, &ef->sbus.qbus.children, sibling) { 102 DeviceState *qdev = kid->child; 103 child = SCLP_EVENT_GET_CLASS((SCLPEvent *) qdev); 104 mask |= child->get_receive_mask(); 105 } 106 return mask; 107 } 108 109 static uint16_t write_event_length_check(SCCB *sccb) 110 { 111 int slen; 112 unsigned elen = 0; 113 EventBufferHeader *event; 114 WriteEventData *wed = (WriteEventData *) sccb; 115 116 event = (EventBufferHeader *) &wed->ebh; 117 for (slen = sccb_data_len(sccb); slen > 0; slen -= elen) { 118 elen = be16_to_cpu(event->length); 119 if (elen < sizeof(*event) || elen > slen) { 120 return SCLP_RC_EVENT_BUFFER_SYNTAX_ERROR; 121 } 122 event = (void *) event + elen; 123 } 124 if (slen) { 125 return SCLP_RC_INCONSISTENT_LENGTHS; 126 } 127 return SCLP_RC_NORMAL_COMPLETION; 128 } 129 130 static uint16_t handle_write_event_buf(SCLPEventFacility *ef, 131 EventBufferHeader *event_buf, SCCB *sccb) 132 { 133 uint16_t rc; 134 BusChild *kid; 135 SCLPEvent *event; 136 SCLPEventClass *ec; 137 138 rc = SCLP_RC_INVALID_FUNCTION; 139 140 QTAILQ_FOREACH(kid, &ef->sbus.qbus.children, sibling) { 141 DeviceState *qdev = kid->child; 142 event = (SCLPEvent *) qdev; 143 ec = SCLP_EVENT_GET_CLASS(event); 144 145 if (ec->write_event_data && 146 ec->can_handle_event(event_buf->type)) { 147 rc = ec->write_event_data(event, event_buf); 148 break; 149 } 150 } 151 return rc; 152 } 153 154 static uint16_t handle_sccb_write_events(SCLPEventFacility *ef, SCCB *sccb) 155 { 156 uint16_t rc; 157 int slen; 158 unsigned elen = 0; 159 EventBufferHeader *event_buf; 160 WriteEventData *wed = (WriteEventData *) sccb; 161 162 event_buf = &wed->ebh; 163 rc = SCLP_RC_NORMAL_COMPLETION; 164 165 /* loop over all contained event buffers */ 166 for (slen = sccb_data_len(sccb); slen > 0; slen -= elen) { 167 elen = be16_to_cpu(event_buf->length); 168 169 /* in case of a previous error mark all trailing buffers 170 * as not accepted */ 171 if (rc != SCLP_RC_NORMAL_COMPLETION) { 172 event_buf->flags &= ~(SCLP_EVENT_BUFFER_ACCEPTED); 173 } else { 174 rc = handle_write_event_buf(ef, event_buf, sccb); 175 } 176 event_buf = (void *) event_buf + elen; 177 } 178 return rc; 179 } 180 181 static void write_event_data(SCLPEventFacility *ef, SCCB *sccb) 182 { 183 if (sccb->h.function_code != SCLP_FC_NORMAL_WRITE) { 184 sccb->h.response_code = cpu_to_be16(SCLP_RC_INVALID_FUNCTION); 185 return; 186 } 187 if (be16_to_cpu(sccb->h.length) < 8) { 188 sccb->h.response_code = cpu_to_be16(SCLP_RC_INSUFFICIENT_SCCB_LENGTH); 189 return; 190 } 191 /* first do a sanity check of the write events */ 192 sccb->h.response_code = cpu_to_be16(write_event_length_check(sccb)); 193 194 /* if no early error, then execute */ 195 if (sccb->h.response_code == be16_to_cpu(SCLP_RC_NORMAL_COMPLETION)) { 196 sccb->h.response_code = 197 cpu_to_be16(handle_sccb_write_events(ef, sccb)); 198 } 199 } 200 201 static uint16_t handle_sccb_read_events(SCLPEventFacility *ef, SCCB *sccb, 202 sccb_mask_t mask) 203 { 204 uint16_t rc; 205 int slen; 206 unsigned elen; 207 BusChild *kid; 208 SCLPEvent *event; 209 SCLPEventClass *ec; 210 EventBufferHeader *event_buf; 211 ReadEventData *red = (ReadEventData *) sccb; 212 213 event_buf = &red->ebh; 214 event_buf->length = 0; 215 slen = sccb_data_len(sccb); 216 217 rc = SCLP_RC_NO_EVENT_BUFFERS_STORED; 218 219 QTAILQ_FOREACH(kid, &ef->sbus.qbus.children, sibling) { 220 DeviceState *qdev = kid->child; 221 event = (SCLPEvent *) qdev; 222 ec = SCLP_EVENT_GET_CLASS(event); 223 224 if (mask & ec->get_send_mask()) { 225 if (ec->read_event_data(event, event_buf, &slen)) { 226 elen = be16_to_cpu(event_buf->length); 227 event_buf = (EventBufferHeader *) ((char *)event_buf + elen); 228 rc = SCLP_RC_NORMAL_COMPLETION; 229 } 230 } 231 } 232 233 if (sccb->h.control_mask[2] & SCLP_VARIABLE_LENGTH_RESPONSE) { 234 /* architecture suggests to reset variable-length-response bit */ 235 sccb->h.control_mask[2] &= ~SCLP_VARIABLE_LENGTH_RESPONSE; 236 /* with a new length value */ 237 sccb->h.length = cpu_to_be16(SCCB_SIZE - slen); 238 } 239 return rc; 240 } 241 242 /* copy up to src_len bytes and fill the rest of dst with zeroes */ 243 static void copy_mask(uint8_t *dst, uint8_t *src, uint16_t dst_len, 244 uint16_t src_len) 245 { 246 int i; 247 248 for (i = 0; i < dst_len; i++) { 249 dst[i] = i < src_len ? src[i] : 0; 250 } 251 } 252 253 static void read_event_data(SCLPEventFacility *ef, SCCB *sccb) 254 { 255 sccb_mask_t sclp_active_selection_mask; 256 sccb_mask_t sclp_cp_receive_mask; 257 258 ReadEventData *red = (ReadEventData *) sccb; 259 260 if (be16_to_cpu(sccb->h.length) != SCCB_SIZE) { 261 sccb->h.response_code = cpu_to_be16(SCLP_RC_INSUFFICIENT_SCCB_LENGTH); 262 return; 263 } 264 265 switch (sccb->h.function_code) { 266 case SCLP_UNCONDITIONAL_READ: 267 sccb->h.response_code = cpu_to_be16( 268 handle_sccb_read_events(ef, sccb, ef->receive_mask)); 269 break; 270 case SCLP_SELECTIVE_READ: 271 /* get active selection mask */ 272 sclp_cp_receive_mask = ef->receive_mask; 273 274 copy_mask((uint8_t *)&sclp_active_selection_mask, (uint8_t *)&red->mask, 275 sizeof(sclp_active_selection_mask), ef->mask_length); 276 sclp_active_selection_mask = be64_to_cpu(sclp_active_selection_mask); 277 if (!sclp_cp_receive_mask || 278 (sclp_active_selection_mask & ~sclp_cp_receive_mask)) { 279 sccb->h.response_code = 280 cpu_to_be16(SCLP_RC_INVALID_SELECTION_MASK); 281 } else { 282 sccb->h.response_code = cpu_to_be16( 283 handle_sccb_read_events(ef, sccb, sclp_active_selection_mask)); 284 } 285 break; 286 default: 287 sccb->h.response_code = cpu_to_be16(SCLP_RC_INVALID_FUNCTION); 288 } 289 } 290 291 static void write_event_mask(SCLPEventFacility *ef, SCCB *sccb) 292 { 293 WriteEventMask *we_mask = (WriteEventMask *) sccb; 294 uint16_t mask_length = be16_to_cpu(we_mask->mask_length); 295 sccb_mask_t tmp_mask; 296 297 if (!mask_length || (mask_length > SCLP_EVENT_MASK_LEN_MAX) || 298 ((mask_length != 4) && !ef->allow_all_mask_sizes)) { 299 sccb->h.response_code = cpu_to_be16(SCLP_RC_INVALID_MASK_LENGTH); 300 return; 301 } 302 303 /* 304 * Note: We currently only support masks up to 8 byte length; 305 * the remainder is filled up with zeroes. Older Linux 306 * kernels use a 4 byte mask length, newer ones can use both 307 * 8 or 4 depending on what is available on the host. 308 */ 309 310 /* keep track of the guest's capability masks */ 311 copy_mask((uint8_t *)&tmp_mask, WEM_CP_RECEIVE_MASK(we_mask, mask_length), 312 sizeof(tmp_mask), mask_length); 313 ef->receive_mask = be64_to_cpu(tmp_mask); 314 315 /* return the SCLP's capability masks to the guest */ 316 tmp_mask = cpu_to_be64(get_host_receive_mask(ef)); 317 copy_mask(WEM_RECEIVE_MASK(we_mask, mask_length), (uint8_t *)&tmp_mask, 318 mask_length, sizeof(tmp_mask)); 319 tmp_mask = cpu_to_be64(get_host_send_mask(ef)); 320 copy_mask(WEM_SEND_MASK(we_mask, mask_length), (uint8_t *)&tmp_mask, 321 mask_length, sizeof(tmp_mask)); 322 323 sccb->h.response_code = cpu_to_be16(SCLP_RC_NORMAL_COMPLETION); 324 ef->mask_length = mask_length; 325 } 326 327 /* qemu object creation and initialization functions */ 328 329 #define TYPE_SCLP_EVENTS_BUS "s390-sclp-events-bus" 330 331 static const TypeInfo sclp_events_bus_info = { 332 .name = TYPE_SCLP_EVENTS_BUS, 333 .parent = TYPE_BUS, 334 }; 335 336 static void command_handler(SCLPEventFacility *ef, SCCB *sccb, uint64_t code) 337 { 338 switch (code & SCLP_CMD_CODE_MASK) { 339 case SCLP_CMD_READ_EVENT_DATA: 340 read_event_data(ef, sccb); 341 break; 342 case SCLP_CMD_WRITE_EVENT_DATA: 343 write_event_data(ef, sccb); 344 break; 345 case SCLP_CMD_WRITE_EVENT_MASK: 346 write_event_mask(ef, sccb); 347 break; 348 } 349 } 350 351 static bool vmstate_event_facility_mask64_needed(void *opaque) 352 { 353 SCLPEventFacility *ef = opaque; 354 355 return (ef->receive_mask & 0xFFFFFFFF) != 0; 356 } 357 358 static bool vmstate_event_facility_mask_length_needed(void *opaque) 359 { 360 SCLPEventFacility *ef = opaque; 361 362 return ef->allow_all_mask_sizes; 363 } 364 365 static const VMStateDescription vmstate_event_facility_mask64 = { 366 .name = "vmstate-event-facility/mask64", 367 .version_id = 0, 368 .minimum_version_id = 0, 369 .needed = vmstate_event_facility_mask64_needed, 370 .fields = (const VMStateField[]) { 371 VMSTATE_UINT32(receive_mask_pieces[RECV_MASK_LOWER], SCLPEventFacility), 372 VMSTATE_END_OF_LIST() 373 } 374 }; 375 376 static const VMStateDescription vmstate_event_facility_mask_length = { 377 .name = "vmstate-event-facility/mask_length", 378 .version_id = 0, 379 .minimum_version_id = 0, 380 .needed = vmstate_event_facility_mask_length_needed, 381 .fields = (const VMStateField[]) { 382 VMSTATE_UINT16(mask_length, SCLPEventFacility), 383 VMSTATE_END_OF_LIST() 384 } 385 }; 386 387 static const VMStateDescription vmstate_event_facility = { 388 .name = "vmstate-event-facility", 389 .version_id = 0, 390 .minimum_version_id = 0, 391 .fields = (const VMStateField[]) { 392 VMSTATE_UINT32(receive_mask_pieces[RECV_MASK_UPPER], SCLPEventFacility), 393 VMSTATE_END_OF_LIST() 394 }, 395 .subsections = (const VMStateDescription * const []) { 396 &vmstate_event_facility_mask64, 397 &vmstate_event_facility_mask_length, 398 NULL 399 } 400 }; 401 402 static void sclp_event_set_allow_all_mask_sizes(Object *obj, bool value, 403 Error **errp) 404 { 405 SCLPEventFacility *ef = (SCLPEventFacility *)obj; 406 407 ef->allow_all_mask_sizes = value; 408 } 409 410 static bool sclp_event_get_allow_all_mask_sizes(Object *obj, Error **errp) 411 { 412 SCLPEventFacility *ef = (SCLPEventFacility *)obj; 413 414 return ef->allow_all_mask_sizes; 415 } 416 417 static void init_event_facility(Object *obj) 418 { 419 SCLPEventFacility *event_facility = EVENT_FACILITY(obj); 420 DeviceState *sdev = DEVICE(obj); 421 422 event_facility->mask_length = 4; 423 event_facility->allow_all_mask_sizes = true; 424 object_property_add_bool(obj, "allow_all_mask_sizes", 425 sclp_event_get_allow_all_mask_sizes, 426 sclp_event_set_allow_all_mask_sizes); 427 428 /* Spawn a new bus for SCLP events */ 429 qbus_init(&event_facility->sbus, sizeof(event_facility->sbus), 430 TYPE_SCLP_EVENTS_BUS, sdev, NULL); 431 432 object_initialize_child(obj, TYPE_SCLP_QUIESCE, 433 &event_facility->quiesce, 434 TYPE_SCLP_QUIESCE); 435 436 object_initialize_child(obj, TYPE_SCLP_CPU_HOTPLUG, 437 &event_facility->cpu_hotplug, 438 TYPE_SCLP_CPU_HOTPLUG); 439 } 440 441 static void realize_event_facility(DeviceState *dev, Error **errp) 442 { 443 SCLPEventFacility *event_facility = EVENT_FACILITY(dev); 444 445 if (!qdev_realize(DEVICE(&event_facility->quiesce), 446 BUS(&event_facility->sbus), errp)) { 447 return; 448 } 449 if (!qdev_realize(DEVICE(&event_facility->cpu_hotplug), 450 BUS(&event_facility->sbus), errp)) { 451 qdev_unrealize(DEVICE(&event_facility->quiesce)); 452 return; 453 } 454 } 455 456 static void reset_event_facility(DeviceState *dev) 457 { 458 SCLPEventFacility *sdev = EVENT_FACILITY(dev); 459 460 sdev->receive_mask = 0; 461 } 462 463 static void init_event_facility_class(ObjectClass *klass, void *data) 464 { 465 SysBusDeviceClass *sbdc = SYS_BUS_DEVICE_CLASS(klass); 466 DeviceClass *dc = DEVICE_CLASS(sbdc); 467 SCLPEventFacilityClass *k = EVENT_FACILITY_CLASS(dc); 468 469 dc->realize = realize_event_facility; 470 dc->reset = reset_event_facility; 471 dc->vmsd = &vmstate_event_facility; 472 set_bit(DEVICE_CATEGORY_MISC, dc->categories); 473 k->command_handler = command_handler; 474 k->event_pending = event_pending; 475 } 476 477 static const TypeInfo sclp_event_facility_info = { 478 .name = TYPE_SCLP_EVENT_FACILITY, 479 .parent = TYPE_SYS_BUS_DEVICE, 480 .instance_init = init_event_facility, 481 .instance_size = sizeof(SCLPEventFacility), 482 .class_init = init_event_facility_class, 483 .class_size = sizeof(SCLPEventFacilityClass), 484 }; 485 486 static void event_realize(DeviceState *qdev, Error **errp) 487 { 488 SCLPEvent *event = SCLP_EVENT(qdev); 489 SCLPEventClass *child = SCLP_EVENT_GET_CLASS(event); 490 491 if (child->init) { 492 int rc = child->init(event); 493 if (rc < 0) { 494 error_setg(errp, "SCLP event initialization failed."); 495 return; 496 } 497 } 498 } 499 500 static void event_class_init(ObjectClass *klass, void *data) 501 { 502 DeviceClass *dc = DEVICE_CLASS(klass); 503 504 dc->bus_type = TYPE_SCLP_EVENTS_BUS; 505 dc->realize = event_realize; 506 } 507 508 static const TypeInfo sclp_event_type_info = { 509 .name = TYPE_SCLP_EVENT, 510 .parent = TYPE_DEVICE, 511 .instance_size = sizeof(SCLPEvent), 512 .class_init = event_class_init, 513 .class_size = sizeof(SCLPEventClass), 514 .abstract = true, 515 }; 516 517 static void register_types(void) 518 { 519 type_register_static(&sclp_events_bus_info); 520 type_register_static(&sclp_event_facility_info); 521 type_register_static(&sclp_event_type_info); 522 } 523 524 type_init(register_types) 525 526 BusState *sclp_get_event_facility_bus(SCLPEventFacility *ef) 527 { 528 return BUS(&ef->sbus); 529 } 530