1 /* 2 * SCLP 3 * Event Facility 4 * handles SCLP event types 5 * - Signal Quiesce - system power down 6 * - ASCII Console Data - VT220 read and write 7 * 8 * Copyright IBM, Corp. 2012 9 * 10 * Authors: 11 * Heinz Graalfs <graalfs@de.ibm.com> 12 * 13 * This work is licensed under the terms of the GNU GPL, version 2 or (at your 14 * option) any later version. See the COPYING file in the top-level directory. 15 * 16 */ 17 18 #include "qemu/osdep.h" 19 #include "qapi/error.h" 20 #include "qemu/module.h" 21 #include "sysemu/sysemu.h" 22 23 #include "hw/s390x/sclp.h" 24 #include "hw/s390x/event-facility.h" 25 26 typedef struct SCLPEventsBus { 27 BusState qbus; 28 } SCLPEventsBus; 29 30 /* we need to save 32 bit chunks for compatibility */ 31 #ifdef HOST_WORDS_BIGENDIAN 32 #define RECV_MASK_LOWER 1 33 #define RECV_MASK_UPPER 0 34 #else /* little endian host */ 35 #define RECV_MASK_LOWER 0 36 #define RECV_MASK_UPPER 1 37 #endif 38 39 struct SCLPEventFacility { 40 SysBusDevice parent_obj; 41 SCLPEventsBus sbus; 42 /* guest's receive mask */ 43 union { 44 uint32_t receive_mask_pieces[2]; 45 sccb_mask_t receive_mask; 46 }; 47 /* 48 * when false, we keep the same broken, backwards compatible behaviour as 49 * before, allowing only masks of size exactly 4; when true, we implement 50 * the architecture correctly, allowing all valid mask sizes. Needed for 51 * migration toward older versions. 52 */ 53 bool allow_all_mask_sizes; 54 /* length of the receive mask */ 55 uint16_t mask_length; 56 }; 57 58 /* return true if any child has event pending set */ 59 static bool event_pending(SCLPEventFacility *ef) 60 { 61 BusChild *kid; 62 SCLPEvent *event; 63 SCLPEventClass *event_class; 64 65 QTAILQ_FOREACH(kid, &ef->sbus.qbus.children, sibling) { 66 DeviceState *qdev = kid->child; 67 event = DO_UPCAST(SCLPEvent, qdev, qdev); 68 event_class = SCLP_EVENT_GET_CLASS(event); 69 if (event->event_pending && 70 event_class->get_send_mask() & ef->receive_mask) { 71 return true; 72 } 73 } 74 return false; 75 } 76 77 static sccb_mask_t get_host_send_mask(SCLPEventFacility *ef) 78 { 79 sccb_mask_t mask; 80 BusChild *kid; 81 SCLPEventClass *child; 82 83 mask = 0; 84 85 QTAILQ_FOREACH(kid, &ef->sbus.qbus.children, sibling) { 86 DeviceState *qdev = kid->child; 87 child = SCLP_EVENT_GET_CLASS((SCLPEvent *) qdev); 88 mask |= child->get_send_mask(); 89 } 90 return mask; 91 } 92 93 static sccb_mask_t get_host_receive_mask(SCLPEventFacility *ef) 94 { 95 sccb_mask_t mask; 96 BusChild *kid; 97 SCLPEventClass *child; 98 99 mask = 0; 100 101 QTAILQ_FOREACH(kid, &ef->sbus.qbus.children, sibling) { 102 DeviceState *qdev = kid->child; 103 child = SCLP_EVENT_GET_CLASS((SCLPEvent *) qdev); 104 mask |= child->get_receive_mask(); 105 } 106 return mask; 107 } 108 109 static uint16_t write_event_length_check(SCCB *sccb) 110 { 111 int slen; 112 unsigned elen = 0; 113 EventBufferHeader *event; 114 WriteEventData *wed = (WriteEventData *) sccb; 115 116 event = (EventBufferHeader *) &wed->ebh; 117 for (slen = sccb_data_len(sccb); slen > 0; slen -= elen) { 118 elen = be16_to_cpu(event->length); 119 if (elen < sizeof(*event) || elen > slen) { 120 return SCLP_RC_EVENT_BUFFER_SYNTAX_ERROR; 121 } 122 event = (void *) event + elen; 123 } 124 if (slen) { 125 return SCLP_RC_INCONSISTENT_LENGTHS; 126 } 127 return SCLP_RC_NORMAL_COMPLETION; 128 } 129 130 static uint16_t handle_write_event_buf(SCLPEventFacility *ef, 131 EventBufferHeader *event_buf, SCCB *sccb) 132 { 133 uint16_t rc; 134 BusChild *kid; 135 SCLPEvent *event; 136 SCLPEventClass *ec; 137 138 rc = SCLP_RC_INVALID_FUNCTION; 139 140 QTAILQ_FOREACH(kid, &ef->sbus.qbus.children, sibling) { 141 DeviceState *qdev = kid->child; 142 event = (SCLPEvent *) qdev; 143 ec = SCLP_EVENT_GET_CLASS(event); 144 145 if (ec->write_event_data && 146 ec->can_handle_event(event_buf->type)) { 147 rc = ec->write_event_data(event, event_buf); 148 break; 149 } 150 } 151 return rc; 152 } 153 154 static uint16_t handle_sccb_write_events(SCLPEventFacility *ef, SCCB *sccb) 155 { 156 uint16_t rc; 157 int slen; 158 unsigned elen = 0; 159 EventBufferHeader *event_buf; 160 WriteEventData *wed = (WriteEventData *) sccb; 161 162 event_buf = &wed->ebh; 163 rc = SCLP_RC_NORMAL_COMPLETION; 164 165 /* loop over all contained event buffers */ 166 for (slen = sccb_data_len(sccb); slen > 0; slen -= elen) { 167 elen = be16_to_cpu(event_buf->length); 168 169 /* in case of a previous error mark all trailing buffers 170 * as not accepted */ 171 if (rc != SCLP_RC_NORMAL_COMPLETION) { 172 event_buf->flags &= ~(SCLP_EVENT_BUFFER_ACCEPTED); 173 } else { 174 rc = handle_write_event_buf(ef, event_buf, sccb); 175 } 176 event_buf = (void *) event_buf + elen; 177 } 178 return rc; 179 } 180 181 static void write_event_data(SCLPEventFacility *ef, SCCB *sccb) 182 { 183 if (sccb->h.function_code != SCLP_FC_NORMAL_WRITE) { 184 sccb->h.response_code = cpu_to_be16(SCLP_RC_INVALID_FUNCTION); 185 goto out; 186 } 187 if (be16_to_cpu(sccb->h.length) < 8) { 188 sccb->h.response_code = cpu_to_be16(SCLP_RC_INSUFFICIENT_SCCB_LENGTH); 189 goto out; 190 } 191 /* first do a sanity check of the write events */ 192 sccb->h.response_code = cpu_to_be16(write_event_length_check(sccb)); 193 194 /* if no early error, then execute */ 195 if (sccb->h.response_code == be16_to_cpu(SCLP_RC_NORMAL_COMPLETION)) { 196 sccb->h.response_code = 197 cpu_to_be16(handle_sccb_write_events(ef, sccb)); 198 } 199 200 out: 201 return; 202 } 203 204 static uint16_t handle_sccb_read_events(SCLPEventFacility *ef, SCCB *sccb, 205 sccb_mask_t mask) 206 { 207 uint16_t rc; 208 int slen; 209 unsigned elen; 210 BusChild *kid; 211 SCLPEvent *event; 212 SCLPEventClass *ec; 213 EventBufferHeader *event_buf; 214 ReadEventData *red = (ReadEventData *) sccb; 215 216 event_buf = &red->ebh; 217 event_buf->length = 0; 218 slen = sizeof(sccb->data); 219 220 rc = SCLP_RC_NO_EVENT_BUFFERS_STORED; 221 222 QTAILQ_FOREACH(kid, &ef->sbus.qbus.children, sibling) { 223 DeviceState *qdev = kid->child; 224 event = (SCLPEvent *) qdev; 225 ec = SCLP_EVENT_GET_CLASS(event); 226 227 if (mask & ec->get_send_mask()) { 228 if (ec->read_event_data(event, event_buf, &slen)) { 229 elen = be16_to_cpu(event_buf->length); 230 event_buf = (EventBufferHeader *) ((char *)event_buf + elen); 231 rc = SCLP_RC_NORMAL_COMPLETION; 232 } 233 } 234 } 235 236 if (sccb->h.control_mask[2] & SCLP_VARIABLE_LENGTH_RESPONSE) { 237 /* architecture suggests to reset variable-length-response bit */ 238 sccb->h.control_mask[2] &= ~SCLP_VARIABLE_LENGTH_RESPONSE; 239 /* with a new length value */ 240 sccb->h.length = cpu_to_be16(SCCB_SIZE - slen); 241 } 242 return rc; 243 } 244 245 /* copy up to src_len bytes and fill the rest of dst with zeroes */ 246 static void copy_mask(uint8_t *dst, uint8_t *src, uint16_t dst_len, 247 uint16_t src_len) 248 { 249 int i; 250 251 for (i = 0; i < dst_len; i++) { 252 dst[i] = i < src_len ? src[i] : 0; 253 } 254 } 255 256 static void read_event_data(SCLPEventFacility *ef, SCCB *sccb) 257 { 258 sccb_mask_t sclp_active_selection_mask; 259 sccb_mask_t sclp_cp_receive_mask; 260 261 ReadEventData *red = (ReadEventData *) sccb; 262 263 if (be16_to_cpu(sccb->h.length) != SCCB_SIZE) { 264 sccb->h.response_code = cpu_to_be16(SCLP_RC_INSUFFICIENT_SCCB_LENGTH); 265 goto out; 266 } 267 268 sclp_cp_receive_mask = ef->receive_mask; 269 270 /* get active selection mask */ 271 switch (sccb->h.function_code) { 272 case SCLP_UNCONDITIONAL_READ: 273 sclp_active_selection_mask = sclp_cp_receive_mask; 274 break; 275 case SCLP_SELECTIVE_READ: 276 copy_mask((uint8_t *)&sclp_active_selection_mask, (uint8_t *)&red->mask, 277 sizeof(sclp_active_selection_mask), ef->mask_length); 278 sclp_active_selection_mask = be64_to_cpu(sclp_active_selection_mask); 279 if (!sclp_cp_receive_mask || 280 (sclp_active_selection_mask & ~sclp_cp_receive_mask)) { 281 sccb->h.response_code = 282 cpu_to_be16(SCLP_RC_INVALID_SELECTION_MASK); 283 goto out; 284 } 285 break; 286 default: 287 sccb->h.response_code = cpu_to_be16(SCLP_RC_INVALID_FUNCTION); 288 goto out; 289 } 290 sccb->h.response_code = cpu_to_be16( 291 handle_sccb_read_events(ef, sccb, sclp_active_selection_mask)); 292 293 out: 294 return; 295 } 296 297 static void write_event_mask(SCLPEventFacility *ef, SCCB *sccb) 298 { 299 WriteEventMask *we_mask = (WriteEventMask *) sccb; 300 uint16_t mask_length = be16_to_cpu(we_mask->mask_length); 301 sccb_mask_t tmp_mask; 302 303 if (!mask_length || (mask_length > SCLP_EVENT_MASK_LEN_MAX) || 304 ((mask_length != 4) && !ef->allow_all_mask_sizes)) { 305 sccb->h.response_code = cpu_to_be16(SCLP_RC_INVALID_MASK_LENGTH); 306 goto out; 307 } 308 309 /* 310 * Note: We currently only support masks up to 8 byte length; 311 * the remainder is filled up with zeroes. Older Linux 312 * kernels use a 4 byte mask length, newer ones can use both 313 * 8 or 4 depending on what is available on the host. 314 */ 315 316 /* keep track of the guest's capability masks */ 317 copy_mask((uint8_t *)&tmp_mask, WEM_CP_RECEIVE_MASK(we_mask, mask_length), 318 sizeof(tmp_mask), mask_length); 319 ef->receive_mask = be64_to_cpu(tmp_mask); 320 321 /* return the SCLP's capability masks to the guest */ 322 tmp_mask = cpu_to_be64(get_host_receive_mask(ef)); 323 copy_mask(WEM_RECEIVE_MASK(we_mask, mask_length), (uint8_t *)&tmp_mask, 324 mask_length, sizeof(tmp_mask)); 325 tmp_mask = cpu_to_be64(get_host_send_mask(ef)); 326 copy_mask(WEM_SEND_MASK(we_mask, mask_length), (uint8_t *)&tmp_mask, 327 mask_length, sizeof(tmp_mask)); 328 329 sccb->h.response_code = cpu_to_be16(SCLP_RC_NORMAL_COMPLETION); 330 ef->mask_length = mask_length; 331 332 out: 333 return; 334 } 335 336 /* qemu object creation and initialization functions */ 337 338 #define TYPE_SCLP_EVENTS_BUS "s390-sclp-events-bus" 339 340 static void sclp_events_bus_realize(BusState *bus, Error **errp) 341 { 342 BusChild *kid; 343 344 /* TODO: recursive realization has to be done in common code */ 345 QTAILQ_FOREACH(kid, &bus->children, sibling) { 346 DeviceState *dev = kid->child; 347 348 object_property_set_bool(OBJECT(dev), true, "realized", errp); 349 if (*errp) { 350 return; 351 } 352 } 353 } 354 355 static void sclp_events_bus_class_init(ObjectClass *klass, void *data) 356 { 357 BusClass *bc = BUS_CLASS(klass); 358 359 bc->realize = sclp_events_bus_realize; 360 } 361 362 static const TypeInfo sclp_events_bus_info = { 363 .name = TYPE_SCLP_EVENTS_BUS, 364 .parent = TYPE_BUS, 365 .class_init = sclp_events_bus_class_init, 366 }; 367 368 static void command_handler(SCLPEventFacility *ef, SCCB *sccb, uint64_t code) 369 { 370 switch (code & SCLP_CMD_CODE_MASK) { 371 case SCLP_CMD_READ_EVENT_DATA: 372 read_event_data(ef, sccb); 373 break; 374 case SCLP_CMD_WRITE_EVENT_DATA: 375 write_event_data(ef, sccb); 376 break; 377 case SCLP_CMD_WRITE_EVENT_MASK: 378 write_event_mask(ef, sccb); 379 break; 380 default: 381 sccb->h.response_code = cpu_to_be16(SCLP_RC_INVALID_SCLP_COMMAND); 382 break; 383 } 384 } 385 386 static bool vmstate_event_facility_mask64_needed(void *opaque) 387 { 388 SCLPEventFacility *ef = opaque; 389 390 return (ef->receive_mask & 0xFFFFFFFF) != 0; 391 } 392 393 static bool vmstate_event_facility_mask_length_needed(void *opaque) 394 { 395 SCLPEventFacility *ef = opaque; 396 397 return ef->allow_all_mask_sizes; 398 } 399 400 static const VMStateDescription vmstate_event_facility_mask64 = { 401 .name = "vmstate-event-facility/mask64", 402 .version_id = 0, 403 .minimum_version_id = 0, 404 .needed = vmstate_event_facility_mask64_needed, 405 .fields = (VMStateField[]) { 406 VMSTATE_UINT32(receive_mask_pieces[RECV_MASK_LOWER], SCLPEventFacility), 407 VMSTATE_END_OF_LIST() 408 } 409 }; 410 411 static const VMStateDescription vmstate_event_facility_mask_length = { 412 .name = "vmstate-event-facility/mask_length", 413 .version_id = 0, 414 .minimum_version_id = 0, 415 .needed = vmstate_event_facility_mask_length_needed, 416 .fields = (VMStateField[]) { 417 VMSTATE_UINT16(mask_length, SCLPEventFacility), 418 VMSTATE_END_OF_LIST() 419 } 420 }; 421 422 static const VMStateDescription vmstate_event_facility = { 423 .name = "vmstate-event-facility", 424 .version_id = 0, 425 .minimum_version_id = 0, 426 .fields = (VMStateField[]) { 427 VMSTATE_UINT32(receive_mask_pieces[RECV_MASK_UPPER], SCLPEventFacility), 428 VMSTATE_END_OF_LIST() 429 }, 430 .subsections = (const VMStateDescription * []) { 431 &vmstate_event_facility_mask64, 432 &vmstate_event_facility_mask_length, 433 NULL 434 } 435 }; 436 437 static void sclp_event_set_allow_all_mask_sizes(Object *obj, bool value, 438 Error **errp) 439 { 440 SCLPEventFacility *ef = (SCLPEventFacility *)obj; 441 442 ef->allow_all_mask_sizes = value; 443 } 444 445 static bool sclp_event_get_allow_all_mask_sizes(Object *obj, Error **e) 446 { 447 SCLPEventFacility *ef = (SCLPEventFacility *)obj; 448 449 return ef->allow_all_mask_sizes; 450 } 451 452 static void init_event_facility(Object *obj) 453 { 454 SCLPEventFacility *event_facility = EVENT_FACILITY(obj); 455 DeviceState *sdev = DEVICE(obj); 456 Object *new; 457 458 event_facility->mask_length = 4; 459 event_facility->allow_all_mask_sizes = true; 460 object_property_add_bool(obj, "allow_all_mask_sizes", 461 sclp_event_get_allow_all_mask_sizes, 462 sclp_event_set_allow_all_mask_sizes, NULL); 463 /* Spawn a new bus for SCLP events */ 464 qbus_create_inplace(&event_facility->sbus, sizeof(event_facility->sbus), 465 TYPE_SCLP_EVENTS_BUS, sdev, NULL); 466 467 new = object_new(TYPE_SCLP_QUIESCE); 468 object_property_add_child(obj, TYPE_SCLP_QUIESCE, new, NULL); 469 object_unref(new); 470 qdev_set_parent_bus(DEVICE(new), BUS(&event_facility->sbus)); 471 472 new = object_new(TYPE_SCLP_CPU_HOTPLUG); 473 object_property_add_child(obj, TYPE_SCLP_CPU_HOTPLUG, new, NULL); 474 object_unref(new); 475 qdev_set_parent_bus(DEVICE(new), BUS(&event_facility->sbus)); 476 /* the facility will automatically realize the devices via the bus */ 477 } 478 479 static void reset_event_facility(DeviceState *dev) 480 { 481 SCLPEventFacility *sdev = EVENT_FACILITY(dev); 482 483 sdev->receive_mask = 0; 484 } 485 486 static void init_event_facility_class(ObjectClass *klass, void *data) 487 { 488 SysBusDeviceClass *sbdc = SYS_BUS_DEVICE_CLASS(klass); 489 DeviceClass *dc = DEVICE_CLASS(sbdc); 490 SCLPEventFacilityClass *k = EVENT_FACILITY_CLASS(dc); 491 492 dc->reset = reset_event_facility; 493 dc->vmsd = &vmstate_event_facility; 494 set_bit(DEVICE_CATEGORY_MISC, dc->categories); 495 k->command_handler = command_handler; 496 k->event_pending = event_pending; 497 } 498 499 static const TypeInfo sclp_event_facility_info = { 500 .name = TYPE_SCLP_EVENT_FACILITY, 501 .parent = TYPE_SYS_BUS_DEVICE, 502 .instance_init = init_event_facility, 503 .instance_size = sizeof(SCLPEventFacility), 504 .class_init = init_event_facility_class, 505 .class_size = sizeof(SCLPEventFacilityClass), 506 }; 507 508 static void event_realize(DeviceState *qdev, Error **errp) 509 { 510 SCLPEvent *event = SCLP_EVENT(qdev); 511 SCLPEventClass *child = SCLP_EVENT_GET_CLASS(event); 512 513 if (child->init) { 514 int rc = child->init(event); 515 if (rc < 0) { 516 error_setg(errp, "SCLP event initialization failed."); 517 return; 518 } 519 } 520 } 521 522 static void event_class_init(ObjectClass *klass, void *data) 523 { 524 DeviceClass *dc = DEVICE_CLASS(klass); 525 526 dc->bus_type = TYPE_SCLP_EVENTS_BUS; 527 dc->realize = event_realize; 528 } 529 530 static const TypeInfo sclp_event_type_info = { 531 .name = TYPE_SCLP_EVENT, 532 .parent = TYPE_DEVICE, 533 .instance_size = sizeof(SCLPEvent), 534 .class_init = event_class_init, 535 .class_size = sizeof(SCLPEventClass), 536 .abstract = true, 537 }; 538 539 static void register_types(void) 540 { 541 type_register_static(&sclp_events_bus_info); 542 type_register_static(&sclp_event_facility_info); 543 type_register_static(&sclp_event_type_info); 544 } 545 546 type_init(register_types) 547 548 BusState *sclp_get_event_facility_bus(void) 549 { 550 Object *busobj; 551 SCLPEventsBus *sbus; 552 553 busobj = object_resolve_path_type("", TYPE_SCLP_EVENTS_BUS, NULL); 554 sbus = OBJECT_CHECK(SCLPEventsBus, busobj, TYPE_SCLP_EVENTS_BUS); 555 if (!sbus) { 556 return NULL; 557 } 558 559 return &sbus->qbus; 560 } 561