1 /* 2 * SCLP Support 3 * 4 * Copyright IBM, Corp. 2012 5 * 6 * Authors: 7 * Christian Borntraeger <borntraeger@de.ibm.com> 8 * Heinz Graalfs <graalfs@linux.vnet.ibm.com> 9 * 10 * This work is licensed under the terms of the GNU GPL, version 2 or (at your 11 * option) any later version. See the COPYING file in the top-level directory. 12 * 13 */ 14 15 #include "qemu/osdep.h" 16 #include "qapi/error.h" 17 #include "cpu.h" 18 #include "exec/memory.h" 19 #include "sysemu/sysemu.h" 20 #include "exec/address-spaces.h" 21 #include "hw/boards.h" 22 #include "hw/s390x/sclp.h" 23 #include "hw/s390x/event-facility.h" 24 #include "hw/s390x/s390-pci-bus.h" 25 #include "hw/s390x/ipl.h" 26 27 static inline SCLPDevice *get_sclp_device(void) 28 { 29 static SCLPDevice *sclp; 30 31 if (!sclp) { 32 sclp = SCLP(object_resolve_path_type("", TYPE_SCLP, NULL)); 33 } 34 return sclp; 35 } 36 37 static void prepare_cpu_entries(SCLPDevice *sclp, CPUEntry *entry, int count) 38 { 39 uint8_t features[SCCB_CPU_FEATURE_LEN] = { 0 }; 40 int i; 41 42 s390_get_feat_block(S390_FEAT_TYPE_SCLP_CPU, features); 43 for (i = 0; i < count; i++) { 44 entry[i].address = i; 45 entry[i].type = 0; 46 memcpy(entry[i].features, features, sizeof(entry[i].features)); 47 } 48 } 49 50 /* Provide information about the configuration, CPUs and storage */ 51 static void read_SCP_info(SCLPDevice *sclp, SCCB *sccb) 52 { 53 ReadInfo *read_info = (ReadInfo *) sccb; 54 MachineState *machine = MACHINE(qdev_get_machine()); 55 sclpMemoryHotplugDev *mhd = get_sclp_memory_hotplug_dev(); 56 CPUState *cpu; 57 int cpu_count = 0; 58 int rnsize, rnmax; 59 int slots = MIN(machine->ram_slots, s390_get_memslot_count()); 60 IplParameterBlock *ipib = s390_ipl_get_iplb(); 61 62 CPU_FOREACH(cpu) { 63 cpu_count++; 64 } 65 66 /* CPU information */ 67 read_info->entries_cpu = cpu_to_be16(cpu_count); 68 read_info->offset_cpu = cpu_to_be16(offsetof(ReadInfo, entries)); 69 read_info->highest_cpu = cpu_to_be16(max_cpus); 70 71 read_info->ibc_val = cpu_to_be32(s390_get_ibc_val()); 72 73 /* Configuration Characteristic (Extension) */ 74 s390_get_feat_block(S390_FEAT_TYPE_SCLP_CONF_CHAR, 75 read_info->conf_char); 76 s390_get_feat_block(S390_FEAT_TYPE_SCLP_CONF_CHAR_EXT, 77 read_info->conf_char_ext); 78 79 prepare_cpu_entries(sclp, read_info->entries, cpu_count); 80 81 read_info->facilities = cpu_to_be64(SCLP_HAS_CPU_INFO | 82 SCLP_HAS_IOA_RECONFIG); 83 84 /* Memory Hotplug is only supported for the ccw machine type */ 85 if (mhd) { 86 mhd->standby_subregion_size = MEM_SECTION_SIZE; 87 /* Deduct the memory slot already used for core */ 88 if (slots > 0) { 89 while ((mhd->standby_subregion_size * (slots - 1) 90 < mhd->standby_mem_size)) { 91 mhd->standby_subregion_size = mhd->standby_subregion_size << 1; 92 } 93 } 94 /* 95 * Initialize mapping of guest standby memory sections indicating which 96 * are and are not online. Assume all standby memory begins offline. 97 */ 98 if (mhd->standby_state_map == 0) { 99 if (mhd->standby_mem_size % mhd->standby_subregion_size) { 100 mhd->standby_state_map = g_malloc0((mhd->standby_mem_size / 101 mhd->standby_subregion_size + 1) * 102 (mhd->standby_subregion_size / 103 MEM_SECTION_SIZE)); 104 } else { 105 mhd->standby_state_map = g_malloc0(mhd->standby_mem_size / 106 MEM_SECTION_SIZE); 107 } 108 } 109 mhd->padded_ram_size = ram_size + mhd->pad_size; 110 mhd->rzm = 1 << mhd->increment_size; 111 112 read_info->facilities |= cpu_to_be64(SCLP_FC_ASSIGN_ATTACH_READ_STOR); 113 } 114 read_info->mha_pow = s390_get_mha_pow(); 115 read_info->hmfai = cpu_to_be32(s390_get_hmfai()); 116 117 rnsize = 1 << (sclp->increment_size - 20); 118 if (rnsize <= 128) { 119 read_info->rnsize = rnsize; 120 } else { 121 read_info->rnsize = 0; 122 read_info->rnsize2 = cpu_to_be32(rnsize); 123 } 124 125 rnmax = machine->maxram_size >> sclp->increment_size; 126 if (rnmax < 0x10000) { 127 read_info->rnmax = cpu_to_be16(rnmax); 128 } else { 129 read_info->rnmax = cpu_to_be16(0); 130 read_info->rnmax2 = cpu_to_be64(rnmax); 131 } 132 133 if (ipib && ipib->flags & DIAG308_FLAGS_LP_VALID) { 134 memcpy(&read_info->loadparm, &ipib->loadparm, 135 sizeof(read_info->loadparm)); 136 } else { 137 s390_ipl_set_loadparm(read_info->loadparm); 138 } 139 140 sccb->h.response_code = cpu_to_be16(SCLP_RC_NORMAL_READ_COMPLETION); 141 } 142 143 static void read_storage_element0_info(SCLPDevice *sclp, SCCB *sccb) 144 { 145 int i, assigned; 146 int subincrement_id = SCLP_STARTING_SUBINCREMENT_ID; 147 ReadStorageElementInfo *storage_info = (ReadStorageElementInfo *) sccb; 148 sclpMemoryHotplugDev *mhd = get_sclp_memory_hotplug_dev(); 149 150 if (!mhd) { 151 sccb->h.response_code = cpu_to_be16(SCLP_RC_INVALID_SCLP_COMMAND); 152 return; 153 } 154 155 if ((ram_size >> mhd->increment_size) >= 0x10000) { 156 sccb->h.response_code = cpu_to_be16(SCLP_RC_SCCB_BOUNDARY_VIOLATION); 157 return; 158 } 159 160 /* Return information regarding core memory */ 161 storage_info->max_id = cpu_to_be16(mhd->standby_mem_size ? 1 : 0); 162 assigned = ram_size >> mhd->increment_size; 163 storage_info->assigned = cpu_to_be16(assigned); 164 165 for (i = 0; i < assigned; i++) { 166 storage_info->entries[i] = cpu_to_be32(subincrement_id); 167 subincrement_id += SCLP_INCREMENT_UNIT; 168 } 169 sccb->h.response_code = cpu_to_be16(SCLP_RC_NORMAL_READ_COMPLETION); 170 } 171 172 static void read_storage_element1_info(SCLPDevice *sclp, SCCB *sccb) 173 { 174 ReadStorageElementInfo *storage_info = (ReadStorageElementInfo *) sccb; 175 sclpMemoryHotplugDev *mhd = get_sclp_memory_hotplug_dev(); 176 177 if (!mhd) { 178 sccb->h.response_code = cpu_to_be16(SCLP_RC_INVALID_SCLP_COMMAND); 179 return; 180 } 181 182 if ((mhd->standby_mem_size >> mhd->increment_size) >= 0x10000) { 183 sccb->h.response_code = cpu_to_be16(SCLP_RC_SCCB_BOUNDARY_VIOLATION); 184 return; 185 } 186 187 /* Return information regarding standby memory */ 188 storage_info->max_id = cpu_to_be16(mhd->standby_mem_size ? 1 : 0); 189 storage_info->assigned = cpu_to_be16(mhd->standby_mem_size >> 190 mhd->increment_size); 191 storage_info->standby = cpu_to_be16(mhd->standby_mem_size >> 192 mhd->increment_size); 193 sccb->h.response_code = cpu_to_be16(SCLP_RC_STANDBY_READ_COMPLETION); 194 } 195 196 static void attach_storage_element(SCLPDevice *sclp, SCCB *sccb, 197 uint16_t element) 198 { 199 int i, assigned, subincrement_id; 200 AttachStorageElement *attach_info = (AttachStorageElement *) sccb; 201 sclpMemoryHotplugDev *mhd = get_sclp_memory_hotplug_dev(); 202 203 if (!mhd) { 204 sccb->h.response_code = cpu_to_be16(SCLP_RC_INVALID_SCLP_COMMAND); 205 return; 206 } 207 208 if (element != 1) { 209 sccb->h.response_code = cpu_to_be16(SCLP_RC_INVALID_SCLP_COMMAND); 210 return; 211 } 212 213 assigned = mhd->standby_mem_size >> mhd->increment_size; 214 attach_info->assigned = cpu_to_be16(assigned); 215 subincrement_id = ((ram_size >> mhd->increment_size) << 16) 216 + SCLP_STARTING_SUBINCREMENT_ID; 217 for (i = 0; i < assigned; i++) { 218 attach_info->entries[i] = cpu_to_be32(subincrement_id); 219 subincrement_id += SCLP_INCREMENT_UNIT; 220 } 221 sccb->h.response_code = cpu_to_be16(SCLP_RC_NORMAL_COMPLETION); 222 } 223 224 static void assign_storage(SCLPDevice *sclp, SCCB *sccb) 225 { 226 MemoryRegion *mr = NULL; 227 uint64_t this_subregion_size; 228 AssignStorage *assign_info = (AssignStorage *) sccb; 229 sclpMemoryHotplugDev *mhd = get_sclp_memory_hotplug_dev(); 230 ram_addr_t assign_addr; 231 MemoryRegion *sysmem = get_system_memory(); 232 233 if (!mhd) { 234 sccb->h.response_code = cpu_to_be16(SCLP_RC_INVALID_SCLP_COMMAND); 235 return; 236 } 237 assign_addr = (assign_info->rn - 1) * mhd->rzm; 238 239 if ((assign_addr % MEM_SECTION_SIZE == 0) && 240 (assign_addr >= mhd->padded_ram_size)) { 241 /* Re-use existing memory region if found */ 242 mr = memory_region_find(sysmem, assign_addr, 1).mr; 243 memory_region_unref(mr); 244 if (!mr) { 245 246 MemoryRegion *standby_ram = g_new(MemoryRegion, 1); 247 248 /* offset to align to standby_subregion_size for allocation */ 249 ram_addr_t offset = assign_addr - 250 (assign_addr - mhd->padded_ram_size) 251 % mhd->standby_subregion_size; 252 253 /* strlen("standby.ram") + 4 (Max of KVM_MEMORY_SLOTS) + NULL */ 254 char id[16]; 255 snprintf(id, 16, "standby.ram%d", 256 (int)((offset - mhd->padded_ram_size) / 257 mhd->standby_subregion_size) + 1); 258 259 /* Allocate a subregion of the calculated standby_subregion_size */ 260 if (offset + mhd->standby_subregion_size > 261 mhd->padded_ram_size + mhd->standby_mem_size) { 262 this_subregion_size = mhd->padded_ram_size + 263 mhd->standby_mem_size - offset; 264 } else { 265 this_subregion_size = mhd->standby_subregion_size; 266 } 267 268 memory_region_init_ram(standby_ram, NULL, id, this_subregion_size, 269 &error_fatal); 270 /* This is a hack to make memory hotunplug work again. Once we have 271 * subdevices, we have to unparent them when unassigning memory, 272 * instead of doing it via the ref count of the MemoryRegion. */ 273 object_ref(OBJECT(standby_ram)); 274 object_unparent(OBJECT(standby_ram)); 275 memory_region_add_subregion(sysmem, offset, standby_ram); 276 } 277 /* The specified subregion is no longer in standby */ 278 mhd->standby_state_map[(assign_addr - mhd->padded_ram_size) 279 / MEM_SECTION_SIZE] = 1; 280 } 281 sccb->h.response_code = cpu_to_be16(SCLP_RC_NORMAL_COMPLETION); 282 } 283 284 static void unassign_storage(SCLPDevice *sclp, SCCB *sccb) 285 { 286 MemoryRegion *mr = NULL; 287 AssignStorage *assign_info = (AssignStorage *) sccb; 288 sclpMemoryHotplugDev *mhd = get_sclp_memory_hotplug_dev(); 289 ram_addr_t unassign_addr; 290 MemoryRegion *sysmem = get_system_memory(); 291 292 if (!mhd) { 293 sccb->h.response_code = cpu_to_be16(SCLP_RC_INVALID_SCLP_COMMAND); 294 return; 295 } 296 unassign_addr = (assign_info->rn - 1) * mhd->rzm; 297 298 /* if the addr is a multiple of 256 MB */ 299 if ((unassign_addr % MEM_SECTION_SIZE == 0) && 300 (unassign_addr >= mhd->padded_ram_size)) { 301 mhd->standby_state_map[(unassign_addr - 302 mhd->padded_ram_size) / MEM_SECTION_SIZE] = 0; 303 304 /* find the specified memory region and destroy it */ 305 mr = memory_region_find(sysmem, unassign_addr, 1).mr; 306 memory_region_unref(mr); 307 if (mr) { 308 int i; 309 int is_removable = 1; 310 ram_addr_t map_offset = (unassign_addr - mhd->padded_ram_size - 311 (unassign_addr - mhd->padded_ram_size) 312 % mhd->standby_subregion_size); 313 /* Mark all affected subregions as 'standby' once again */ 314 for (i = 0; 315 i < (mhd->standby_subregion_size / MEM_SECTION_SIZE); 316 i++) { 317 318 if (mhd->standby_state_map[i + map_offset / MEM_SECTION_SIZE]) { 319 is_removable = 0; 320 break; 321 } 322 } 323 if (is_removable) { 324 memory_region_del_subregion(sysmem, mr); 325 object_unref(OBJECT(mr)); 326 } 327 } 328 } 329 sccb->h.response_code = cpu_to_be16(SCLP_RC_NORMAL_COMPLETION); 330 } 331 332 /* Provide information about the CPU */ 333 static void sclp_read_cpu_info(SCLPDevice *sclp, SCCB *sccb) 334 { 335 ReadCpuInfo *cpu_info = (ReadCpuInfo *) sccb; 336 CPUState *cpu; 337 int cpu_count = 0; 338 339 CPU_FOREACH(cpu) { 340 cpu_count++; 341 } 342 343 cpu_info->nr_configured = cpu_to_be16(cpu_count); 344 cpu_info->offset_configured = cpu_to_be16(offsetof(ReadCpuInfo, entries)); 345 cpu_info->nr_standby = cpu_to_be16(0); 346 347 /* The standby offset is 16-byte for each CPU */ 348 cpu_info->offset_standby = cpu_to_be16(cpu_info->offset_configured 349 + cpu_info->nr_configured*sizeof(CPUEntry)); 350 351 prepare_cpu_entries(sclp, cpu_info->entries, cpu_count); 352 353 sccb->h.response_code = cpu_to_be16(SCLP_RC_NORMAL_READ_COMPLETION); 354 } 355 356 static void sclp_configure_io_adapter(SCLPDevice *sclp, SCCB *sccb, 357 bool configure) 358 { 359 int rc; 360 361 if (be16_to_cpu(sccb->h.length) < 16) { 362 rc = SCLP_RC_INSUFFICIENT_SCCB_LENGTH; 363 goto out_err; 364 } 365 366 switch (((IoaCfgSccb *)sccb)->atype) { 367 case SCLP_RECONFIG_PCI_ATYPE: 368 if (s390_has_feat(S390_FEAT_ZPCI)) { 369 if (configure) { 370 s390_pci_sclp_configure(sccb); 371 } else { 372 s390_pci_sclp_deconfigure(sccb); 373 } 374 return; 375 } 376 /* fallthrough */ 377 default: 378 rc = SCLP_RC_ADAPTER_TYPE_NOT_RECOGNIZED; 379 } 380 381 out_err: 382 sccb->h.response_code = cpu_to_be16(rc); 383 } 384 385 static void sclp_execute(SCLPDevice *sclp, SCCB *sccb, uint32_t code) 386 { 387 SCLPDeviceClass *sclp_c = SCLP_GET_CLASS(sclp); 388 SCLPEventFacility *ef = sclp->event_facility; 389 SCLPEventFacilityClass *efc = EVENT_FACILITY_GET_CLASS(ef); 390 391 switch (code & SCLP_CMD_CODE_MASK) { 392 case SCLP_CMDW_READ_SCP_INFO: 393 case SCLP_CMDW_READ_SCP_INFO_FORCED: 394 sclp_c->read_SCP_info(sclp, sccb); 395 break; 396 case SCLP_CMDW_READ_CPU_INFO: 397 sclp_c->read_cpu_info(sclp, sccb); 398 break; 399 case SCLP_READ_STORAGE_ELEMENT_INFO: 400 if (code & 0xff00) { 401 sclp_c->read_storage_element1_info(sclp, sccb); 402 } else { 403 sclp_c->read_storage_element0_info(sclp, sccb); 404 } 405 break; 406 case SCLP_ATTACH_STORAGE_ELEMENT: 407 sclp_c->attach_storage_element(sclp, sccb, (code & 0xff00) >> 8); 408 break; 409 case SCLP_ASSIGN_STORAGE: 410 sclp_c->assign_storage(sclp, sccb); 411 break; 412 case SCLP_UNASSIGN_STORAGE: 413 sclp_c->unassign_storage(sclp, sccb); 414 break; 415 case SCLP_CMDW_CONFIGURE_IOA: 416 sclp_configure_io_adapter(sclp, sccb, true); 417 break; 418 case SCLP_CMDW_DECONFIGURE_IOA: 419 sclp_configure_io_adapter(sclp, sccb, false); 420 break; 421 default: 422 efc->command_handler(ef, sccb, code); 423 break; 424 } 425 } 426 427 int sclp_service_call(CPUS390XState *env, uint64_t sccb, uint32_t code) 428 { 429 SCLPDevice *sclp = get_sclp_device(); 430 SCLPDeviceClass *sclp_c = SCLP_GET_CLASS(sclp); 431 int r = 0; 432 SCCB work_sccb; 433 434 hwaddr sccb_len = sizeof(SCCB); 435 436 /* first some basic checks on program checks */ 437 if (env->psw.mask & PSW_MASK_PSTATE) { 438 r = -PGM_PRIVILEGED; 439 goto out; 440 } 441 if (cpu_physical_memory_is_io(sccb)) { 442 r = -PGM_ADDRESSING; 443 goto out; 444 } 445 if ((sccb & ~0x1fffUL) == 0 || (sccb & ~0x1fffUL) == env->psa 446 || (sccb & ~0x7ffffff8UL) != 0) { 447 r = -PGM_SPECIFICATION; 448 goto out; 449 } 450 451 /* 452 * we want to work on a private copy of the sccb, to prevent guests 453 * from playing dirty tricks by modifying the memory content after 454 * the host has checked the values 455 */ 456 cpu_physical_memory_read(sccb, &work_sccb, sccb_len); 457 458 /* Valid sccb sizes */ 459 if (be16_to_cpu(work_sccb.h.length) < sizeof(SCCBHeader) || 460 be16_to_cpu(work_sccb.h.length) > SCCB_SIZE) { 461 r = -PGM_SPECIFICATION; 462 goto out; 463 } 464 465 sclp_c->execute(sclp, &work_sccb, code); 466 467 cpu_physical_memory_write(sccb, &work_sccb, 468 be16_to_cpu(work_sccb.h.length)); 469 470 sclp_c->service_interrupt(sclp, sccb); 471 472 out: 473 return r; 474 } 475 476 static void service_interrupt(SCLPDevice *sclp, uint32_t sccb) 477 { 478 SCLPEventFacility *ef = sclp->event_facility; 479 SCLPEventFacilityClass *efc = EVENT_FACILITY_GET_CLASS(ef); 480 481 uint32_t param = sccb & ~3; 482 483 /* Indicate whether an event is still pending */ 484 param |= efc->event_pending(ef) ? 1 : 0; 485 486 if (!param) { 487 /* No need to send an interrupt, there's nothing to be notified about */ 488 return; 489 } 490 s390_sclp_extint(param); 491 } 492 493 void sclp_service_interrupt(uint32_t sccb) 494 { 495 SCLPDevice *sclp = get_sclp_device(); 496 SCLPDeviceClass *sclp_c = SCLP_GET_CLASS(sclp); 497 498 sclp_c->service_interrupt(sclp, sccb); 499 } 500 501 /* qemu object creation and initialization functions */ 502 503 void s390_sclp_init(void) 504 { 505 Object *new = object_new(TYPE_SCLP); 506 507 object_property_add_child(qdev_get_machine(), TYPE_SCLP, new, 508 NULL); 509 object_unref(OBJECT(new)); 510 qdev_init_nofail(DEVICE(new)); 511 } 512 513 static void sclp_realize(DeviceState *dev, Error **errp) 514 { 515 MachineState *machine = MACHINE(qdev_get_machine()); 516 SCLPDevice *sclp = SCLP(dev); 517 Error *err = NULL; 518 uint64_t hw_limit; 519 int ret; 520 521 object_property_set_bool(OBJECT(sclp->event_facility), true, "realized", 522 &err); 523 if (err) { 524 goto out; 525 } 526 /* 527 * qdev_device_add searches the sysbus for TYPE_SCLP_EVENTS_BUS. As long 528 * as we can't find a fitting bus via the qom tree, we have to add the 529 * event facility to the sysbus, so e.g. a sclp console can be created. 530 */ 531 qdev_set_parent_bus(DEVICE(sclp->event_facility), sysbus_get_default()); 532 533 ret = s390_set_memory_limit(machine->maxram_size, &hw_limit); 534 if (ret == -E2BIG) { 535 error_setg(&err, "host supports a maximum of %" PRIu64 " GB", 536 hw_limit >> 30); 537 } else if (ret) { 538 error_setg(&err, "setting the guest size failed"); 539 } 540 541 out: 542 error_propagate(errp, err); 543 } 544 545 static void sclp_memory_init(SCLPDevice *sclp) 546 { 547 MachineState *machine = MACHINE(qdev_get_machine()); 548 ram_addr_t initial_mem = machine->ram_size; 549 ram_addr_t max_mem = machine->maxram_size; 550 ram_addr_t standby_mem = max_mem - initial_mem; 551 ram_addr_t pad_mem = 0; 552 int increment_size = 20; 553 554 /* The storage increment size is a multiple of 1M and is a power of 2. 555 * The number of storage increments must be MAX_STORAGE_INCREMENTS or fewer. 556 * The variable 'increment_size' is an exponent of 2 that can be 557 * used to calculate the size (in bytes) of an increment. */ 558 while ((initial_mem >> increment_size) > MAX_STORAGE_INCREMENTS) { 559 increment_size++; 560 } 561 if (machine->ram_slots) { 562 while ((standby_mem >> increment_size) > MAX_STORAGE_INCREMENTS) { 563 increment_size++; 564 } 565 } 566 sclp->increment_size = increment_size; 567 568 /* The core and standby memory areas need to be aligned with 569 * the increment size. In effect, this can cause the 570 * user-specified memory size to be rounded down to align 571 * with the nearest increment boundary. */ 572 initial_mem = initial_mem >> increment_size << increment_size; 573 standby_mem = standby_mem >> increment_size << increment_size; 574 575 /* If the size of ram is not on a MEM_SECTION_SIZE boundary, 576 calculate the pad size necessary to force this boundary. */ 577 if (machine->ram_slots && standby_mem) { 578 sclpMemoryHotplugDev *mhd = init_sclp_memory_hotplug_dev(); 579 580 if (initial_mem % MEM_SECTION_SIZE) { 581 pad_mem = MEM_SECTION_SIZE - initial_mem % MEM_SECTION_SIZE; 582 } 583 mhd->increment_size = increment_size; 584 mhd->pad_size = pad_mem; 585 mhd->standby_mem_size = standby_mem; 586 } 587 machine->ram_size = initial_mem; 588 machine->maxram_size = initial_mem + pad_mem + standby_mem; 589 /* let's propagate the changed ram size into the global variable. */ 590 ram_size = initial_mem; 591 } 592 593 static void sclp_init(Object *obj) 594 { 595 SCLPDevice *sclp = SCLP(obj); 596 Object *new; 597 598 new = object_new(TYPE_SCLP_EVENT_FACILITY); 599 object_property_add_child(obj, TYPE_SCLP_EVENT_FACILITY, new, NULL); 600 object_unref(new); 601 sclp->event_facility = EVENT_FACILITY(new); 602 603 sclp_memory_init(sclp); 604 } 605 606 static void sclp_class_init(ObjectClass *oc, void *data) 607 { 608 SCLPDeviceClass *sc = SCLP_CLASS(oc); 609 DeviceClass *dc = DEVICE_CLASS(oc); 610 611 dc->desc = "SCLP (Service-Call Logical Processor)"; 612 dc->realize = sclp_realize; 613 dc->hotpluggable = false; 614 set_bit(DEVICE_CATEGORY_MISC, dc->categories); 615 616 sc->read_SCP_info = read_SCP_info; 617 sc->read_storage_element0_info = read_storage_element0_info; 618 sc->read_storage_element1_info = read_storage_element1_info; 619 sc->attach_storage_element = attach_storage_element; 620 sc->assign_storage = assign_storage; 621 sc->unassign_storage = unassign_storage; 622 sc->read_cpu_info = sclp_read_cpu_info; 623 sc->execute = sclp_execute; 624 sc->service_interrupt = service_interrupt; 625 } 626 627 static TypeInfo sclp_info = { 628 .name = TYPE_SCLP, 629 .parent = TYPE_DEVICE, 630 .instance_init = sclp_init, 631 .instance_size = sizeof(SCLPDevice), 632 .class_init = sclp_class_init, 633 .class_size = sizeof(SCLPDeviceClass), 634 }; 635 636 sclpMemoryHotplugDev *init_sclp_memory_hotplug_dev(void) 637 { 638 DeviceState *dev; 639 dev = qdev_create(NULL, TYPE_SCLP_MEMORY_HOTPLUG_DEV); 640 object_property_add_child(qdev_get_machine(), 641 TYPE_SCLP_MEMORY_HOTPLUG_DEV, 642 OBJECT(dev), NULL); 643 qdev_init_nofail(dev); 644 return SCLP_MEMORY_HOTPLUG_DEV(object_resolve_path( 645 TYPE_SCLP_MEMORY_HOTPLUG_DEV, NULL)); 646 } 647 648 sclpMemoryHotplugDev *get_sclp_memory_hotplug_dev(void) 649 { 650 return SCLP_MEMORY_HOTPLUG_DEV(object_resolve_path( 651 TYPE_SCLP_MEMORY_HOTPLUG_DEV, NULL)); 652 } 653 654 static void sclp_memory_hotplug_dev_class_init(ObjectClass *klass, 655 void *data) 656 { 657 DeviceClass *dc = DEVICE_CLASS(klass); 658 659 set_bit(DEVICE_CATEGORY_MISC, dc->categories); 660 } 661 662 static TypeInfo sclp_memory_hotplug_dev_info = { 663 .name = TYPE_SCLP_MEMORY_HOTPLUG_DEV, 664 .parent = TYPE_SYS_BUS_DEVICE, 665 .instance_size = sizeof(sclpMemoryHotplugDev), 666 .class_init = sclp_memory_hotplug_dev_class_init, 667 }; 668 669 static void register_types(void) 670 { 671 type_register_static(&sclp_memory_hotplug_dev_info); 672 type_register_static(&sclp_info); 673 } 674 type_init(register_types); 675