1 /* 2 * SCLP 3 * Event Facility 4 * handles SCLP event types 5 * - Signal Quiesce - system power down 6 * - ASCII Console Data - VT220 read and write 7 * 8 * Copyright IBM, Corp. 2012 9 * 10 * Authors: 11 * Heinz Graalfs <graalfs@de.ibm.com> 12 * 13 * This work is licensed under the terms of the GNU GPL, version 2 or (at your 14 * option) any later version. See the COPYING file in the top-level directory. 15 * 16 */ 17 18 #include "qemu/osdep.h" 19 #include "qapi/error.h" 20 #include "sysemu/sysemu.h" 21 22 #include "hw/s390x/sclp.h" 23 #include "hw/s390x/event-facility.h" 24 25 typedef struct SCLPEventsBus { 26 BusState qbus; 27 } SCLPEventsBus; 28 29 /* we need to save 32 bit chunks for compatibility */ 30 #ifdef HOST_WORDS_BIGENDIAN 31 #define RECV_MASK_LOWER 1 32 #define RECV_MASK_UPPER 0 33 #else /* little endian host */ 34 #define RECV_MASK_LOWER 0 35 #define RECV_MASK_UPPER 1 36 #endif 37 38 struct SCLPEventFacility { 39 SysBusDevice parent_obj; 40 SCLPEventsBus sbus; 41 /* guest's receive mask */ 42 union { 43 uint32_t receive_mask_pieces[2]; 44 sccb_mask_t receive_mask; 45 }; 46 /* 47 * when false, we keep the same broken, backwards compatible behaviour as 48 * before, allowing only masks of size exactly 4; when true, we implement 49 * the architecture correctly, allowing all valid mask sizes. Needed for 50 * migration toward older versions. 51 */ 52 bool allow_all_mask_sizes; 53 /* length of the receive mask */ 54 uint16_t mask_length; 55 }; 56 57 /* return true if any child has event pending set */ 58 static bool event_pending(SCLPEventFacility *ef) 59 { 60 BusChild *kid; 61 SCLPEvent *event; 62 SCLPEventClass *event_class; 63 64 QTAILQ_FOREACH(kid, &ef->sbus.qbus.children, sibling) { 65 DeviceState *qdev = kid->child; 66 event = DO_UPCAST(SCLPEvent, qdev, qdev); 67 event_class = SCLP_EVENT_GET_CLASS(event); 68 if (event->event_pending && 69 event_class->get_send_mask() & ef->receive_mask) { 70 return true; 71 } 72 } 73 return false; 74 } 75 76 static sccb_mask_t get_host_send_mask(SCLPEventFacility *ef) 77 { 78 sccb_mask_t mask; 79 BusChild *kid; 80 SCLPEventClass *child; 81 82 mask = 0; 83 84 QTAILQ_FOREACH(kid, &ef->sbus.qbus.children, sibling) { 85 DeviceState *qdev = kid->child; 86 child = SCLP_EVENT_GET_CLASS((SCLPEvent *) qdev); 87 mask |= child->get_send_mask(); 88 } 89 return mask; 90 } 91 92 static sccb_mask_t get_host_receive_mask(SCLPEventFacility *ef) 93 { 94 sccb_mask_t mask; 95 BusChild *kid; 96 SCLPEventClass *child; 97 98 mask = 0; 99 100 QTAILQ_FOREACH(kid, &ef->sbus.qbus.children, sibling) { 101 DeviceState *qdev = kid->child; 102 child = SCLP_EVENT_GET_CLASS((SCLPEvent *) qdev); 103 mask |= child->get_receive_mask(); 104 } 105 return mask; 106 } 107 108 static uint16_t write_event_length_check(SCCB *sccb) 109 { 110 int slen; 111 unsigned elen = 0; 112 EventBufferHeader *event; 113 WriteEventData *wed = (WriteEventData *) sccb; 114 115 event = (EventBufferHeader *) &wed->ebh; 116 for (slen = sccb_data_len(sccb); slen > 0; slen -= elen) { 117 elen = be16_to_cpu(event->length); 118 if (elen < sizeof(*event) || elen > slen) { 119 return SCLP_RC_EVENT_BUFFER_SYNTAX_ERROR; 120 } 121 event = (void *) event + elen; 122 } 123 if (slen) { 124 return SCLP_RC_INCONSISTENT_LENGTHS; 125 } 126 return SCLP_RC_NORMAL_COMPLETION; 127 } 128 129 static uint16_t handle_write_event_buf(SCLPEventFacility *ef, 130 EventBufferHeader *event_buf, SCCB *sccb) 131 { 132 uint16_t rc; 133 BusChild *kid; 134 SCLPEvent *event; 135 SCLPEventClass *ec; 136 137 rc = SCLP_RC_INVALID_FUNCTION; 138 139 QTAILQ_FOREACH(kid, &ef->sbus.qbus.children, sibling) { 140 DeviceState *qdev = kid->child; 141 event = (SCLPEvent *) qdev; 142 ec = SCLP_EVENT_GET_CLASS(event); 143 144 if (ec->write_event_data && 145 ec->can_handle_event(event_buf->type)) { 146 rc = ec->write_event_data(event, event_buf); 147 break; 148 } 149 } 150 return rc; 151 } 152 153 static uint16_t handle_sccb_write_events(SCLPEventFacility *ef, SCCB *sccb) 154 { 155 uint16_t rc; 156 int slen; 157 unsigned elen = 0; 158 EventBufferHeader *event_buf; 159 WriteEventData *wed = (WriteEventData *) sccb; 160 161 event_buf = &wed->ebh; 162 rc = SCLP_RC_NORMAL_COMPLETION; 163 164 /* loop over all contained event buffers */ 165 for (slen = sccb_data_len(sccb); slen > 0; slen -= elen) { 166 elen = be16_to_cpu(event_buf->length); 167 168 /* in case of a previous error mark all trailing buffers 169 * as not accepted */ 170 if (rc != SCLP_RC_NORMAL_COMPLETION) { 171 event_buf->flags &= ~(SCLP_EVENT_BUFFER_ACCEPTED); 172 } else { 173 rc = handle_write_event_buf(ef, event_buf, sccb); 174 } 175 event_buf = (void *) event_buf + elen; 176 } 177 return rc; 178 } 179 180 static void write_event_data(SCLPEventFacility *ef, SCCB *sccb) 181 { 182 if (sccb->h.function_code != SCLP_FC_NORMAL_WRITE) { 183 sccb->h.response_code = cpu_to_be16(SCLP_RC_INVALID_FUNCTION); 184 goto out; 185 } 186 if (be16_to_cpu(sccb->h.length) < 8) { 187 sccb->h.response_code = cpu_to_be16(SCLP_RC_INSUFFICIENT_SCCB_LENGTH); 188 goto out; 189 } 190 /* first do a sanity check of the write events */ 191 sccb->h.response_code = cpu_to_be16(write_event_length_check(sccb)); 192 193 /* if no early error, then execute */ 194 if (sccb->h.response_code == be16_to_cpu(SCLP_RC_NORMAL_COMPLETION)) { 195 sccb->h.response_code = 196 cpu_to_be16(handle_sccb_write_events(ef, sccb)); 197 } 198 199 out: 200 return; 201 } 202 203 static uint16_t handle_sccb_read_events(SCLPEventFacility *ef, SCCB *sccb, 204 sccb_mask_t mask) 205 { 206 uint16_t rc; 207 int slen; 208 unsigned elen; 209 BusChild *kid; 210 SCLPEvent *event; 211 SCLPEventClass *ec; 212 EventBufferHeader *event_buf; 213 ReadEventData *red = (ReadEventData *) sccb; 214 215 event_buf = &red->ebh; 216 event_buf->length = 0; 217 slen = sizeof(sccb->data); 218 219 rc = SCLP_RC_NO_EVENT_BUFFERS_STORED; 220 221 QTAILQ_FOREACH(kid, &ef->sbus.qbus.children, sibling) { 222 DeviceState *qdev = kid->child; 223 event = (SCLPEvent *) qdev; 224 ec = SCLP_EVENT_GET_CLASS(event); 225 226 if (mask & ec->get_send_mask()) { 227 if (ec->read_event_data(event, event_buf, &slen)) { 228 elen = be16_to_cpu(event_buf->length); 229 event_buf = (EventBufferHeader *) ((char *)event_buf + elen); 230 rc = SCLP_RC_NORMAL_COMPLETION; 231 } 232 } 233 } 234 235 if (sccb->h.control_mask[2] & SCLP_VARIABLE_LENGTH_RESPONSE) { 236 /* architecture suggests to reset variable-length-response bit */ 237 sccb->h.control_mask[2] &= ~SCLP_VARIABLE_LENGTH_RESPONSE; 238 /* with a new length value */ 239 sccb->h.length = cpu_to_be16(SCCB_SIZE - slen); 240 } 241 return rc; 242 } 243 244 /* copy up to src_len bytes and fill the rest of dst with zeroes */ 245 static void copy_mask(uint8_t *dst, uint8_t *src, uint16_t dst_len, 246 uint16_t src_len) 247 { 248 int i; 249 250 for (i = 0; i < dst_len; i++) { 251 dst[i] = i < src_len ? src[i] : 0; 252 } 253 } 254 255 static void read_event_data(SCLPEventFacility *ef, SCCB *sccb) 256 { 257 sccb_mask_t sclp_active_selection_mask; 258 sccb_mask_t sclp_cp_receive_mask; 259 260 ReadEventData *red = (ReadEventData *) sccb; 261 262 if (be16_to_cpu(sccb->h.length) != SCCB_SIZE) { 263 sccb->h.response_code = cpu_to_be16(SCLP_RC_INSUFFICIENT_SCCB_LENGTH); 264 goto out; 265 } 266 267 sclp_cp_receive_mask = ef->receive_mask; 268 269 /* get active selection mask */ 270 switch (sccb->h.function_code) { 271 case SCLP_UNCONDITIONAL_READ: 272 sclp_active_selection_mask = sclp_cp_receive_mask; 273 break; 274 case SCLP_SELECTIVE_READ: 275 copy_mask((uint8_t *)&sclp_active_selection_mask, (uint8_t *)&red->mask, 276 sizeof(sclp_active_selection_mask), ef->mask_length); 277 sclp_active_selection_mask = be64_to_cpu(sclp_active_selection_mask); 278 if (!sclp_cp_receive_mask || 279 (sclp_active_selection_mask & ~sclp_cp_receive_mask)) { 280 sccb->h.response_code = 281 cpu_to_be16(SCLP_RC_INVALID_SELECTION_MASK); 282 goto out; 283 } 284 break; 285 default: 286 sccb->h.response_code = cpu_to_be16(SCLP_RC_INVALID_FUNCTION); 287 goto out; 288 } 289 sccb->h.response_code = cpu_to_be16( 290 handle_sccb_read_events(ef, sccb, sclp_active_selection_mask)); 291 292 out: 293 return; 294 } 295 296 static void write_event_mask(SCLPEventFacility *ef, SCCB *sccb) 297 { 298 WriteEventMask *we_mask = (WriteEventMask *) sccb; 299 uint16_t mask_length = be16_to_cpu(we_mask->mask_length); 300 sccb_mask_t tmp_mask; 301 302 if (!mask_length || (mask_length > SCLP_EVENT_MASK_LEN_MAX) || 303 ((mask_length != 4) && !ef->allow_all_mask_sizes)) { 304 sccb->h.response_code = cpu_to_be16(SCLP_RC_INVALID_MASK_LENGTH); 305 goto out; 306 } 307 308 /* 309 * Note: We currently only support masks up to 8 byte length; 310 * the remainder is filled up with zeroes. Older Linux 311 * kernels use a 4 byte mask length, newer ones can use both 312 * 8 or 4 depending on what is available on the host. 313 */ 314 315 /* keep track of the guest's capability masks */ 316 copy_mask((uint8_t *)&tmp_mask, WEM_CP_RECEIVE_MASK(we_mask, mask_length), 317 sizeof(tmp_mask), mask_length); 318 ef->receive_mask = be64_to_cpu(tmp_mask); 319 320 /* return the SCLP's capability masks to the guest */ 321 tmp_mask = cpu_to_be64(get_host_receive_mask(ef)); 322 copy_mask(WEM_RECEIVE_MASK(we_mask, mask_length), (uint8_t *)&tmp_mask, 323 mask_length, sizeof(tmp_mask)); 324 tmp_mask = cpu_to_be64(get_host_send_mask(ef)); 325 copy_mask(WEM_SEND_MASK(we_mask, mask_length), (uint8_t *)&tmp_mask, 326 mask_length, sizeof(tmp_mask)); 327 328 sccb->h.response_code = cpu_to_be16(SCLP_RC_NORMAL_COMPLETION); 329 ef->mask_length = mask_length; 330 331 out: 332 return; 333 } 334 335 /* qemu object creation and initialization functions */ 336 337 #define TYPE_SCLP_EVENTS_BUS "s390-sclp-events-bus" 338 339 static void sclp_events_bus_realize(BusState *bus, Error **errp) 340 { 341 BusChild *kid; 342 343 /* TODO: recursive realization has to be done in common code */ 344 QTAILQ_FOREACH(kid, &bus->children, sibling) { 345 DeviceState *dev = kid->child; 346 347 object_property_set_bool(OBJECT(dev), true, "realized", errp); 348 if (*errp) { 349 return; 350 } 351 } 352 } 353 354 static void sclp_events_bus_class_init(ObjectClass *klass, void *data) 355 { 356 BusClass *bc = BUS_CLASS(klass); 357 358 bc->realize = sclp_events_bus_realize; 359 } 360 361 static const TypeInfo sclp_events_bus_info = { 362 .name = TYPE_SCLP_EVENTS_BUS, 363 .parent = TYPE_BUS, 364 .class_init = sclp_events_bus_class_init, 365 }; 366 367 static void command_handler(SCLPEventFacility *ef, SCCB *sccb, uint64_t code) 368 { 369 switch (code & SCLP_CMD_CODE_MASK) { 370 case SCLP_CMD_READ_EVENT_DATA: 371 read_event_data(ef, sccb); 372 break; 373 case SCLP_CMD_WRITE_EVENT_DATA: 374 write_event_data(ef, sccb); 375 break; 376 case SCLP_CMD_WRITE_EVENT_MASK: 377 write_event_mask(ef, sccb); 378 break; 379 default: 380 sccb->h.response_code = cpu_to_be16(SCLP_RC_INVALID_SCLP_COMMAND); 381 break; 382 } 383 } 384 385 static bool vmstate_event_facility_mask64_needed(void *opaque) 386 { 387 SCLPEventFacility *ef = opaque; 388 389 return (ef->receive_mask & 0xFFFFFFFF) != 0; 390 } 391 392 static bool vmstate_event_facility_mask_length_needed(void *opaque) 393 { 394 SCLPEventFacility *ef = opaque; 395 396 return ef->allow_all_mask_sizes; 397 } 398 399 static const VMStateDescription vmstate_event_facility_mask64 = { 400 .name = "vmstate-event-facility/mask64", 401 .version_id = 0, 402 .minimum_version_id = 0, 403 .needed = vmstate_event_facility_mask64_needed, 404 .fields = (VMStateField[]) { 405 VMSTATE_UINT32(receive_mask_pieces[RECV_MASK_LOWER], SCLPEventFacility), 406 VMSTATE_END_OF_LIST() 407 } 408 }; 409 410 static const VMStateDescription vmstate_event_facility_mask_length = { 411 .name = "vmstate-event-facility/mask_length", 412 .version_id = 0, 413 .minimum_version_id = 0, 414 .needed = vmstate_event_facility_mask_length_needed, 415 .fields = (VMStateField[]) { 416 VMSTATE_UINT16(mask_length, SCLPEventFacility), 417 VMSTATE_END_OF_LIST() 418 } 419 }; 420 421 static const VMStateDescription vmstate_event_facility = { 422 .name = "vmstate-event-facility", 423 .version_id = 0, 424 .minimum_version_id = 0, 425 .fields = (VMStateField[]) { 426 VMSTATE_UINT32(receive_mask_pieces[RECV_MASK_UPPER], SCLPEventFacility), 427 VMSTATE_END_OF_LIST() 428 }, 429 .subsections = (const VMStateDescription * []) { 430 &vmstate_event_facility_mask64, 431 &vmstate_event_facility_mask_length, 432 NULL 433 } 434 }; 435 436 static void sclp_event_set_allow_all_mask_sizes(Object *obj, bool value, 437 Error **errp) 438 { 439 SCLPEventFacility *ef = (SCLPEventFacility *)obj; 440 441 ef->allow_all_mask_sizes = value; 442 } 443 444 static bool sclp_event_get_allow_all_mask_sizes(Object *obj, Error **e) 445 { 446 SCLPEventFacility *ef = (SCLPEventFacility *)obj; 447 448 return ef->allow_all_mask_sizes; 449 } 450 451 static void init_event_facility(Object *obj) 452 { 453 SCLPEventFacility *event_facility = EVENT_FACILITY(obj); 454 DeviceState *sdev = DEVICE(obj); 455 Object *new; 456 457 event_facility->mask_length = 4; 458 event_facility->allow_all_mask_sizes = true; 459 object_property_add_bool(obj, "allow_all_mask_sizes", 460 sclp_event_get_allow_all_mask_sizes, 461 sclp_event_set_allow_all_mask_sizes, NULL); 462 /* Spawn a new bus for SCLP events */ 463 qbus_create_inplace(&event_facility->sbus, sizeof(event_facility->sbus), 464 TYPE_SCLP_EVENTS_BUS, sdev, NULL); 465 466 new = object_new(TYPE_SCLP_QUIESCE); 467 object_property_add_child(obj, TYPE_SCLP_QUIESCE, new, NULL); 468 object_unref(new); 469 qdev_set_parent_bus(DEVICE(new), &event_facility->sbus.qbus); 470 471 new = object_new(TYPE_SCLP_CPU_HOTPLUG); 472 object_property_add_child(obj, TYPE_SCLP_CPU_HOTPLUG, new, NULL); 473 object_unref(new); 474 qdev_set_parent_bus(DEVICE(new), &event_facility->sbus.qbus); 475 /* the facility will automatically realize the devices via the bus */ 476 } 477 478 static void reset_event_facility(DeviceState *dev) 479 { 480 SCLPEventFacility *sdev = EVENT_FACILITY(dev); 481 482 sdev->receive_mask = 0; 483 } 484 485 static void init_event_facility_class(ObjectClass *klass, void *data) 486 { 487 SysBusDeviceClass *sbdc = SYS_BUS_DEVICE_CLASS(klass); 488 DeviceClass *dc = DEVICE_CLASS(sbdc); 489 SCLPEventFacilityClass *k = EVENT_FACILITY_CLASS(dc); 490 491 dc->reset = reset_event_facility; 492 dc->vmsd = &vmstate_event_facility; 493 set_bit(DEVICE_CATEGORY_MISC, dc->categories); 494 k->command_handler = command_handler; 495 k->event_pending = event_pending; 496 } 497 498 static const TypeInfo sclp_event_facility_info = { 499 .name = TYPE_SCLP_EVENT_FACILITY, 500 .parent = TYPE_SYS_BUS_DEVICE, 501 .instance_init = init_event_facility, 502 .instance_size = sizeof(SCLPEventFacility), 503 .class_init = init_event_facility_class, 504 .class_size = sizeof(SCLPEventFacilityClass), 505 }; 506 507 static void event_realize(DeviceState *qdev, Error **errp) 508 { 509 SCLPEvent *event = SCLP_EVENT(qdev); 510 SCLPEventClass *child = SCLP_EVENT_GET_CLASS(event); 511 512 if (child->init) { 513 int rc = child->init(event); 514 if (rc < 0) { 515 error_setg(errp, "SCLP event initialization failed."); 516 return; 517 } 518 } 519 } 520 521 static void event_class_init(ObjectClass *klass, void *data) 522 { 523 DeviceClass *dc = DEVICE_CLASS(klass); 524 525 dc->bus_type = TYPE_SCLP_EVENTS_BUS; 526 dc->realize = event_realize; 527 } 528 529 static const TypeInfo sclp_event_type_info = { 530 .name = TYPE_SCLP_EVENT, 531 .parent = TYPE_DEVICE, 532 .instance_size = sizeof(SCLPEvent), 533 .class_init = event_class_init, 534 .class_size = sizeof(SCLPEventClass), 535 .abstract = true, 536 }; 537 538 static void register_types(void) 539 { 540 type_register_static(&sclp_events_bus_info); 541 type_register_static(&sclp_event_facility_info); 542 type_register_static(&sclp_event_type_info); 543 } 544 545 type_init(register_types) 546 547 BusState *sclp_get_event_facility_bus(void) 548 { 549 Object *busobj; 550 SCLPEventsBus *sbus; 551 552 busobj = object_resolve_path_type("", TYPE_SCLP_EVENTS_BUS, NULL); 553 sbus = OBJECT_CHECK(SCLPEventsBus, busobj, TYPE_SCLP_EVENTS_BUS); 554 if (!sbus) { 555 return NULL; 556 } 557 558 return &sbus->qbus; 559 } 560