xref: /openbmc/qemu/hw/s390x/sclp.c (revision 0f550c5c)
1 /*
2  * SCLP Support
3  *
4  * Copyright IBM, Corp. 2012
5  *
6  * Authors:
7  *  Christian Borntraeger <borntraeger@de.ibm.com>
8  *  Heinz Graalfs <graalfs@linux.vnet.ibm.com>
9  *
10  * This work is licensed under the terms of the GNU GPL, version 2 or (at your
11  * option) any later version.  See the COPYING file in the top-level directory.
12  *
13  */
14 
15 #include "qemu/osdep.h"
16 #include "qapi/error.h"
17 #include "cpu.h"
18 #include "exec/memory.h"
19 #include "sysemu/sysemu.h"
20 #include "exec/address-spaces.h"
21 #include "hw/boards.h"
22 #include "hw/s390x/sclp.h"
23 #include "hw/s390x/event-facility.h"
24 #include "hw/s390x/s390-pci-bus.h"
25 #include "hw/s390x/ipl.h"
26 
27 static inline SCLPDevice *get_sclp_device(void)
28 {
29     static SCLPDevice *sclp;
30 
31     if (!sclp) {
32         sclp = SCLP(object_resolve_path_type("", TYPE_SCLP, NULL));
33     }
34     return sclp;
35 }
36 
37 static void prepare_cpu_entries(SCLPDevice *sclp, CPUEntry *entry, int *count)
38 {
39     MachineState *ms = MACHINE(qdev_get_machine());
40     uint8_t features[SCCB_CPU_FEATURE_LEN] = { 0 };
41     int i;
42 
43     s390_get_feat_block(S390_FEAT_TYPE_SCLP_CPU, features);
44     for (i = 0, *count = 0; i < ms->possible_cpus->len; i++) {
45         if (!ms->possible_cpus->cpus[i].cpu) {
46             continue;
47         }
48         entry[*count].address = ms->possible_cpus->cpus[i].arch_id;
49         entry[*count].type = 0;
50         memcpy(entry[*count].features, features, sizeof(features));
51         (*count)++;
52     }
53 }
54 
55 /* Provide information about the configuration, CPUs and storage */
56 static void read_SCP_info(SCLPDevice *sclp, SCCB *sccb)
57 {
58     ReadInfo *read_info = (ReadInfo *) sccb;
59     MachineState *machine = MACHINE(qdev_get_machine());
60     sclpMemoryHotplugDev *mhd = get_sclp_memory_hotplug_dev();
61     int cpu_count;
62     int rnsize, rnmax;
63     int slots = MIN(machine->ram_slots, s390_get_memslot_count());
64     IplParameterBlock *ipib = s390_ipl_get_iplb();
65 
66     /* CPU information */
67     prepare_cpu_entries(sclp, read_info->entries, &cpu_count);
68     read_info->entries_cpu = cpu_to_be16(cpu_count);
69     read_info->offset_cpu = cpu_to_be16(offsetof(ReadInfo, entries));
70     read_info->highest_cpu = cpu_to_be16(max_cpus);
71 
72     read_info->ibc_val = cpu_to_be32(s390_get_ibc_val());
73 
74     /* Configuration Characteristic (Extension) */
75     s390_get_feat_block(S390_FEAT_TYPE_SCLP_CONF_CHAR,
76                          read_info->conf_char);
77     s390_get_feat_block(S390_FEAT_TYPE_SCLP_CONF_CHAR_EXT,
78                          read_info->conf_char_ext);
79 
80     read_info->facilities = cpu_to_be64(SCLP_HAS_CPU_INFO |
81                                         SCLP_HAS_IOA_RECONFIG);
82 
83     /* Memory Hotplug is only supported for the ccw machine type */
84     if (mhd) {
85         mhd->standby_subregion_size = MEM_SECTION_SIZE;
86         /* Deduct the memory slot already used for core */
87         if (slots > 0) {
88             while ((mhd->standby_subregion_size * (slots - 1)
89                     < mhd->standby_mem_size)) {
90                 mhd->standby_subregion_size = mhd->standby_subregion_size << 1;
91             }
92         }
93         /*
94          * Initialize mapping of guest standby memory sections indicating which
95          * are and are not online. Assume all standby memory begins offline.
96          */
97         if (mhd->standby_state_map == 0) {
98             if (mhd->standby_mem_size % mhd->standby_subregion_size) {
99                 mhd->standby_state_map = g_malloc0((mhd->standby_mem_size /
100                                              mhd->standby_subregion_size + 1) *
101                                              (mhd->standby_subregion_size /
102                                              MEM_SECTION_SIZE));
103             } else {
104                 mhd->standby_state_map = g_malloc0(mhd->standby_mem_size /
105                                                    MEM_SECTION_SIZE);
106             }
107         }
108         mhd->padded_ram_size = ram_size + mhd->pad_size;
109         mhd->rzm = 1 << mhd->increment_size;
110 
111         read_info->facilities |= cpu_to_be64(SCLP_FC_ASSIGN_ATTACH_READ_STOR);
112     }
113     read_info->mha_pow = s390_get_mha_pow();
114     read_info->hmfai = cpu_to_be32(s390_get_hmfai());
115 
116     rnsize = 1 << (sclp->increment_size - 20);
117     if (rnsize <= 128) {
118         read_info->rnsize = rnsize;
119     } else {
120         read_info->rnsize = 0;
121         read_info->rnsize2 = cpu_to_be32(rnsize);
122     }
123 
124     rnmax = machine->maxram_size >> sclp->increment_size;
125     if (rnmax < 0x10000) {
126         read_info->rnmax = cpu_to_be16(rnmax);
127     } else {
128         read_info->rnmax = cpu_to_be16(0);
129         read_info->rnmax2 = cpu_to_be64(rnmax);
130     }
131 
132     if (ipib && ipib->flags & DIAG308_FLAGS_LP_VALID) {
133         memcpy(&read_info->loadparm, &ipib->loadparm,
134                sizeof(read_info->loadparm));
135     } else {
136         s390_ipl_set_loadparm(read_info->loadparm);
137     }
138 
139     sccb->h.response_code = cpu_to_be16(SCLP_RC_NORMAL_READ_COMPLETION);
140 }
141 
142 static void read_storage_element0_info(SCLPDevice *sclp, SCCB *sccb)
143 {
144     int i, assigned;
145     int subincrement_id = SCLP_STARTING_SUBINCREMENT_ID;
146     ReadStorageElementInfo *storage_info = (ReadStorageElementInfo *) sccb;
147     sclpMemoryHotplugDev *mhd = get_sclp_memory_hotplug_dev();
148 
149     if (!mhd) {
150         sccb->h.response_code = cpu_to_be16(SCLP_RC_INVALID_SCLP_COMMAND);
151         return;
152     }
153 
154     if ((ram_size >> mhd->increment_size) >= 0x10000) {
155         sccb->h.response_code = cpu_to_be16(SCLP_RC_SCCB_BOUNDARY_VIOLATION);
156         return;
157     }
158 
159     /* Return information regarding core memory */
160     storage_info->max_id = cpu_to_be16(mhd->standby_mem_size ? 1 : 0);
161     assigned = ram_size >> mhd->increment_size;
162     storage_info->assigned = cpu_to_be16(assigned);
163 
164     for (i = 0; i < assigned; i++) {
165         storage_info->entries[i] = cpu_to_be32(subincrement_id);
166         subincrement_id += SCLP_INCREMENT_UNIT;
167     }
168     sccb->h.response_code = cpu_to_be16(SCLP_RC_NORMAL_READ_COMPLETION);
169 }
170 
171 static void read_storage_element1_info(SCLPDevice *sclp, SCCB *sccb)
172 {
173     ReadStorageElementInfo *storage_info = (ReadStorageElementInfo *) sccb;
174     sclpMemoryHotplugDev *mhd = get_sclp_memory_hotplug_dev();
175 
176     if (!mhd) {
177         sccb->h.response_code = cpu_to_be16(SCLP_RC_INVALID_SCLP_COMMAND);
178         return;
179     }
180 
181     if ((mhd->standby_mem_size >> mhd->increment_size) >= 0x10000) {
182         sccb->h.response_code = cpu_to_be16(SCLP_RC_SCCB_BOUNDARY_VIOLATION);
183         return;
184     }
185 
186     /* Return information regarding standby memory */
187     storage_info->max_id = cpu_to_be16(mhd->standby_mem_size ? 1 : 0);
188     storage_info->assigned = cpu_to_be16(mhd->standby_mem_size >>
189                                          mhd->increment_size);
190     storage_info->standby = cpu_to_be16(mhd->standby_mem_size >>
191                                         mhd->increment_size);
192     sccb->h.response_code = cpu_to_be16(SCLP_RC_STANDBY_READ_COMPLETION);
193 }
194 
195 static void attach_storage_element(SCLPDevice *sclp, SCCB *sccb,
196                                    uint16_t element)
197 {
198     int i, assigned, subincrement_id;
199     AttachStorageElement *attach_info = (AttachStorageElement *) sccb;
200     sclpMemoryHotplugDev *mhd = get_sclp_memory_hotplug_dev();
201 
202     if (!mhd) {
203         sccb->h.response_code = cpu_to_be16(SCLP_RC_INVALID_SCLP_COMMAND);
204         return;
205     }
206 
207     if (element != 1) {
208         sccb->h.response_code = cpu_to_be16(SCLP_RC_INVALID_SCLP_COMMAND);
209         return;
210     }
211 
212     assigned = mhd->standby_mem_size >> mhd->increment_size;
213     attach_info->assigned = cpu_to_be16(assigned);
214     subincrement_id = ((ram_size >> mhd->increment_size) << 16)
215                       + SCLP_STARTING_SUBINCREMENT_ID;
216     for (i = 0; i < assigned; i++) {
217         attach_info->entries[i] = cpu_to_be32(subincrement_id);
218         subincrement_id += SCLP_INCREMENT_UNIT;
219     }
220     sccb->h.response_code = cpu_to_be16(SCLP_RC_NORMAL_COMPLETION);
221 }
222 
223 static void assign_storage(SCLPDevice *sclp, SCCB *sccb)
224 {
225     MemoryRegion *mr = NULL;
226     uint64_t this_subregion_size;
227     AssignStorage *assign_info = (AssignStorage *) sccb;
228     sclpMemoryHotplugDev *mhd = get_sclp_memory_hotplug_dev();
229     ram_addr_t assign_addr;
230     MemoryRegion *sysmem = get_system_memory();
231 
232     if (!mhd) {
233         sccb->h.response_code = cpu_to_be16(SCLP_RC_INVALID_SCLP_COMMAND);
234         return;
235     }
236     assign_addr = (assign_info->rn - 1) * mhd->rzm;
237 
238     if ((assign_addr % MEM_SECTION_SIZE == 0) &&
239         (assign_addr >= mhd->padded_ram_size)) {
240         /* Re-use existing memory region if found */
241         mr = memory_region_find(sysmem, assign_addr, 1).mr;
242         memory_region_unref(mr);
243         if (!mr) {
244 
245             MemoryRegion *standby_ram = g_new(MemoryRegion, 1);
246 
247             /* offset to align to standby_subregion_size for allocation */
248             ram_addr_t offset = assign_addr -
249                                 (assign_addr - mhd->padded_ram_size)
250                                 % mhd->standby_subregion_size;
251 
252             /* strlen("standby.ram") + 4 (Max of KVM_MEMORY_SLOTS) +  NULL */
253             char id[16];
254             snprintf(id, 16, "standby.ram%d",
255                      (int)((offset - mhd->padded_ram_size) /
256                      mhd->standby_subregion_size) + 1);
257 
258             /* Allocate a subregion of the calculated standby_subregion_size */
259             if (offset + mhd->standby_subregion_size >
260                 mhd->padded_ram_size + mhd->standby_mem_size) {
261                 this_subregion_size = mhd->padded_ram_size +
262                   mhd->standby_mem_size - offset;
263             } else {
264                 this_subregion_size = mhd->standby_subregion_size;
265             }
266 
267             memory_region_init_ram(standby_ram, NULL, id, this_subregion_size,
268                                    &error_fatal);
269             /* This is a hack to make memory hotunplug work again. Once we have
270              * subdevices, we have to unparent them when unassigning memory,
271              * instead of doing it via the ref count of the MemoryRegion. */
272             object_ref(OBJECT(standby_ram));
273             object_unparent(OBJECT(standby_ram));
274             memory_region_add_subregion(sysmem, offset, standby_ram);
275         }
276         /* The specified subregion is no longer in standby */
277         mhd->standby_state_map[(assign_addr - mhd->padded_ram_size)
278                                / MEM_SECTION_SIZE] = 1;
279     }
280     sccb->h.response_code = cpu_to_be16(SCLP_RC_NORMAL_COMPLETION);
281 }
282 
283 static void unassign_storage(SCLPDevice *sclp, SCCB *sccb)
284 {
285     MemoryRegion *mr = NULL;
286     AssignStorage *assign_info = (AssignStorage *) sccb;
287     sclpMemoryHotplugDev *mhd = get_sclp_memory_hotplug_dev();
288     ram_addr_t unassign_addr;
289     MemoryRegion *sysmem = get_system_memory();
290 
291     if (!mhd) {
292         sccb->h.response_code = cpu_to_be16(SCLP_RC_INVALID_SCLP_COMMAND);
293         return;
294     }
295     unassign_addr = (assign_info->rn - 1) * mhd->rzm;
296 
297     /* if the addr is a multiple of 256 MB */
298     if ((unassign_addr % MEM_SECTION_SIZE == 0) &&
299         (unassign_addr >= mhd->padded_ram_size)) {
300         mhd->standby_state_map[(unassign_addr -
301                            mhd->padded_ram_size) / MEM_SECTION_SIZE] = 0;
302 
303         /* find the specified memory region and destroy it */
304         mr = memory_region_find(sysmem, unassign_addr, 1).mr;
305         memory_region_unref(mr);
306         if (mr) {
307             int i;
308             int is_removable = 1;
309             ram_addr_t map_offset = (unassign_addr - mhd->padded_ram_size -
310                                      (unassign_addr - mhd->padded_ram_size)
311                                      % mhd->standby_subregion_size);
312             /* Mark all affected subregions as 'standby' once again */
313             for (i = 0;
314                  i < (mhd->standby_subregion_size / MEM_SECTION_SIZE);
315                  i++) {
316 
317                 if (mhd->standby_state_map[i + map_offset / MEM_SECTION_SIZE]) {
318                     is_removable = 0;
319                     break;
320                 }
321             }
322             if (is_removable) {
323                 memory_region_del_subregion(sysmem, mr);
324                 object_unref(OBJECT(mr));
325             }
326         }
327     }
328     sccb->h.response_code = cpu_to_be16(SCLP_RC_NORMAL_COMPLETION);
329 }
330 
331 /* Provide information about the CPU */
332 static void sclp_read_cpu_info(SCLPDevice *sclp, SCCB *sccb)
333 {
334     ReadCpuInfo *cpu_info = (ReadCpuInfo *) sccb;
335     int cpu_count;
336 
337     prepare_cpu_entries(sclp, cpu_info->entries, &cpu_count);
338     cpu_info->nr_configured = cpu_to_be16(cpu_count);
339     cpu_info->offset_configured = cpu_to_be16(offsetof(ReadCpuInfo, entries));
340     cpu_info->nr_standby = cpu_to_be16(0);
341 
342     /* The standby offset is 16-byte for each CPU */
343     cpu_info->offset_standby = cpu_to_be16(cpu_info->offset_configured
344         + cpu_info->nr_configured*sizeof(CPUEntry));
345 
346 
347     sccb->h.response_code = cpu_to_be16(SCLP_RC_NORMAL_READ_COMPLETION);
348 }
349 
350 static void sclp_configure_io_adapter(SCLPDevice *sclp, SCCB *sccb,
351                                       bool configure)
352 {
353     int rc;
354 
355     if (be16_to_cpu(sccb->h.length) < 16) {
356         rc = SCLP_RC_INSUFFICIENT_SCCB_LENGTH;
357         goto out_err;
358     }
359 
360     switch (((IoaCfgSccb *)sccb)->atype) {
361     case SCLP_RECONFIG_PCI_ATYPE:
362         if (s390_has_feat(S390_FEAT_ZPCI)) {
363             if (configure) {
364                 s390_pci_sclp_configure(sccb);
365             } else {
366                 s390_pci_sclp_deconfigure(sccb);
367             }
368             return;
369         }
370         /* fallthrough */
371     default:
372         rc = SCLP_RC_ADAPTER_TYPE_NOT_RECOGNIZED;
373     }
374 
375  out_err:
376     sccb->h.response_code = cpu_to_be16(rc);
377 }
378 
379 static void sclp_execute(SCLPDevice *sclp, SCCB *sccb, uint32_t code)
380 {
381     SCLPDeviceClass *sclp_c = SCLP_GET_CLASS(sclp);
382     SCLPEventFacility *ef = sclp->event_facility;
383     SCLPEventFacilityClass *efc = EVENT_FACILITY_GET_CLASS(ef);
384 
385     switch (code & SCLP_CMD_CODE_MASK) {
386     case SCLP_CMDW_READ_SCP_INFO:
387     case SCLP_CMDW_READ_SCP_INFO_FORCED:
388         sclp_c->read_SCP_info(sclp, sccb);
389         break;
390     case SCLP_CMDW_READ_CPU_INFO:
391         sclp_c->read_cpu_info(sclp, sccb);
392         break;
393     case SCLP_READ_STORAGE_ELEMENT_INFO:
394         if (code & 0xff00) {
395             sclp_c->read_storage_element1_info(sclp, sccb);
396         } else {
397             sclp_c->read_storage_element0_info(sclp, sccb);
398         }
399         break;
400     case SCLP_ATTACH_STORAGE_ELEMENT:
401         sclp_c->attach_storage_element(sclp, sccb, (code & 0xff00) >> 8);
402         break;
403     case SCLP_ASSIGN_STORAGE:
404         sclp_c->assign_storage(sclp, sccb);
405         break;
406     case SCLP_UNASSIGN_STORAGE:
407         sclp_c->unassign_storage(sclp, sccb);
408         break;
409     case SCLP_CMDW_CONFIGURE_IOA:
410         sclp_configure_io_adapter(sclp, sccb, true);
411         break;
412     case SCLP_CMDW_DECONFIGURE_IOA:
413         sclp_configure_io_adapter(sclp, sccb, false);
414         break;
415     default:
416         efc->command_handler(ef, sccb, code);
417         break;
418     }
419 }
420 
421 int sclp_service_call(CPUS390XState *env, uint64_t sccb, uint32_t code)
422 {
423     SCLPDevice *sclp = get_sclp_device();
424     SCLPDeviceClass *sclp_c = SCLP_GET_CLASS(sclp);
425     int r = 0;
426     SCCB work_sccb;
427 
428     hwaddr sccb_len = sizeof(SCCB);
429 
430     /* first some basic checks on program checks */
431     if (env->psw.mask & PSW_MASK_PSTATE) {
432         r = -PGM_PRIVILEGED;
433         goto out;
434     }
435     if (cpu_physical_memory_is_io(sccb)) {
436         r = -PGM_ADDRESSING;
437         goto out;
438     }
439     if ((sccb & ~0x1fffUL) == 0 || (sccb & ~0x1fffUL) == env->psa
440         || (sccb & ~0x7ffffff8UL) != 0) {
441         r = -PGM_SPECIFICATION;
442         goto out;
443     }
444 
445     /*
446      * we want to work on a private copy of the sccb, to prevent guests
447      * from playing dirty tricks by modifying the memory content after
448      * the host has checked the values
449      */
450     cpu_physical_memory_read(sccb, &work_sccb, sccb_len);
451 
452     /* Valid sccb sizes */
453     if (be16_to_cpu(work_sccb.h.length) < sizeof(SCCBHeader) ||
454         be16_to_cpu(work_sccb.h.length) > SCCB_SIZE) {
455         r = -PGM_SPECIFICATION;
456         goto out;
457     }
458 
459     sclp_c->execute(sclp, &work_sccb, code);
460 
461     cpu_physical_memory_write(sccb, &work_sccb,
462                               be16_to_cpu(work_sccb.h.length));
463 
464     sclp_c->service_interrupt(sclp, sccb);
465 
466 out:
467     return r;
468 }
469 
470 static void service_interrupt(SCLPDevice *sclp, uint32_t sccb)
471 {
472     SCLPEventFacility *ef = sclp->event_facility;
473     SCLPEventFacilityClass *efc = EVENT_FACILITY_GET_CLASS(ef);
474 
475     uint32_t param = sccb & ~3;
476 
477     /* Indicate whether an event is still pending */
478     param |= efc->event_pending(ef) ? 1 : 0;
479 
480     if (!param) {
481         /* No need to send an interrupt, there's nothing to be notified about */
482         return;
483     }
484     s390_sclp_extint(param);
485 }
486 
487 void sclp_service_interrupt(uint32_t sccb)
488 {
489     SCLPDevice *sclp = get_sclp_device();
490     SCLPDeviceClass *sclp_c = SCLP_GET_CLASS(sclp);
491 
492     sclp_c->service_interrupt(sclp, sccb);
493 }
494 
495 /* qemu object creation and initialization functions */
496 
497 void s390_sclp_init(void)
498 {
499     Object *new = object_new(TYPE_SCLP);
500 
501     object_property_add_child(qdev_get_machine(), TYPE_SCLP, new,
502                               NULL);
503     object_unref(OBJECT(new));
504     qdev_init_nofail(DEVICE(new));
505 }
506 
507 static void sclp_realize(DeviceState *dev, Error **errp)
508 {
509     MachineState *machine = MACHINE(qdev_get_machine());
510     SCLPDevice *sclp = SCLP(dev);
511     Error *err = NULL;
512     uint64_t hw_limit;
513     int ret;
514 
515     object_property_set_bool(OBJECT(sclp->event_facility), true, "realized",
516                              &err);
517     if (err) {
518         goto out;
519     }
520     /*
521      * qdev_device_add searches the sysbus for TYPE_SCLP_EVENTS_BUS. As long
522      * as we can't find a fitting bus via the qom tree, we have to add the
523      * event facility to the sysbus, so e.g. a sclp console can be created.
524      */
525     qdev_set_parent_bus(DEVICE(sclp->event_facility), sysbus_get_default());
526 
527     ret = s390_set_memory_limit(machine->maxram_size, &hw_limit);
528     if (ret == -E2BIG) {
529         error_setg(&err, "host supports a maximum of %" PRIu64 " GB",
530                    hw_limit >> 30);
531     } else if (ret) {
532         error_setg(&err, "setting the guest size failed");
533     }
534 
535 out:
536     error_propagate(errp, err);
537 }
538 
539 static void sclp_memory_init(SCLPDevice *sclp)
540 {
541     MachineState *machine = MACHINE(qdev_get_machine());
542     ram_addr_t initial_mem = machine->ram_size;
543     ram_addr_t max_mem = machine->maxram_size;
544     ram_addr_t standby_mem = max_mem - initial_mem;
545     ram_addr_t pad_mem = 0;
546     int increment_size = 20;
547 
548     /* The storage increment size is a multiple of 1M and is a power of 2.
549      * The number of storage increments must be MAX_STORAGE_INCREMENTS or fewer.
550      * The variable 'increment_size' is an exponent of 2 that can be
551      * used to calculate the size (in bytes) of an increment. */
552     while ((initial_mem >> increment_size) > MAX_STORAGE_INCREMENTS) {
553         increment_size++;
554     }
555     if (machine->ram_slots) {
556         while ((standby_mem >> increment_size) > MAX_STORAGE_INCREMENTS) {
557             increment_size++;
558         }
559     }
560     sclp->increment_size = increment_size;
561 
562     /* The core and standby memory areas need to be aligned with
563      * the increment size.  In effect, this can cause the
564      * user-specified memory size to be rounded down to align
565      * with the nearest increment boundary. */
566     initial_mem = initial_mem >> increment_size << increment_size;
567     standby_mem = standby_mem >> increment_size << increment_size;
568 
569     /* If the size of ram is not on a MEM_SECTION_SIZE boundary,
570        calculate the pad size necessary to force this boundary. */
571     if (machine->ram_slots && standby_mem) {
572         sclpMemoryHotplugDev *mhd = init_sclp_memory_hotplug_dev();
573 
574         if (initial_mem % MEM_SECTION_SIZE) {
575             pad_mem = MEM_SECTION_SIZE - initial_mem % MEM_SECTION_SIZE;
576         }
577         mhd->increment_size = increment_size;
578         mhd->pad_size = pad_mem;
579         mhd->standby_mem_size = standby_mem;
580     }
581     machine->ram_size = initial_mem;
582     machine->maxram_size = initial_mem + pad_mem + standby_mem;
583     /* let's propagate the changed ram size into the global variable. */
584     ram_size = initial_mem;
585 }
586 
587 static void sclp_init(Object *obj)
588 {
589     SCLPDevice *sclp = SCLP(obj);
590     Object *new;
591 
592     new = object_new(TYPE_SCLP_EVENT_FACILITY);
593     object_property_add_child(obj, TYPE_SCLP_EVENT_FACILITY, new, NULL);
594     object_unref(new);
595     sclp->event_facility = EVENT_FACILITY(new);
596 
597     sclp_memory_init(sclp);
598 }
599 
600 static void sclp_class_init(ObjectClass *oc, void *data)
601 {
602     SCLPDeviceClass *sc = SCLP_CLASS(oc);
603     DeviceClass *dc = DEVICE_CLASS(oc);
604 
605     dc->desc = "SCLP (Service-Call Logical Processor)";
606     dc->realize = sclp_realize;
607     dc->hotpluggable = false;
608     set_bit(DEVICE_CATEGORY_MISC, dc->categories);
609     /*
610      * Reason: Creates TYPE_SCLP_EVENT_FACILITY in sclp_init
611      * which is a non-pluggable sysbus device
612      */
613     dc->user_creatable = false;
614 
615     sc->read_SCP_info = read_SCP_info;
616     sc->read_storage_element0_info = read_storage_element0_info;
617     sc->read_storage_element1_info = read_storage_element1_info;
618     sc->attach_storage_element = attach_storage_element;
619     sc->assign_storage = assign_storage;
620     sc->unassign_storage = unassign_storage;
621     sc->read_cpu_info = sclp_read_cpu_info;
622     sc->execute = sclp_execute;
623     sc->service_interrupt = service_interrupt;
624 }
625 
626 static TypeInfo sclp_info = {
627     .name = TYPE_SCLP,
628     .parent = TYPE_DEVICE,
629     .instance_init = sclp_init,
630     .instance_size = sizeof(SCLPDevice),
631     .class_init = sclp_class_init,
632     .class_size = sizeof(SCLPDeviceClass),
633 };
634 
635 sclpMemoryHotplugDev *init_sclp_memory_hotplug_dev(void)
636 {
637     DeviceState *dev;
638     dev = qdev_create(NULL, TYPE_SCLP_MEMORY_HOTPLUG_DEV);
639     object_property_add_child(qdev_get_machine(),
640                               TYPE_SCLP_MEMORY_HOTPLUG_DEV,
641                               OBJECT(dev), NULL);
642     qdev_init_nofail(dev);
643     return SCLP_MEMORY_HOTPLUG_DEV(object_resolve_path(
644                                    TYPE_SCLP_MEMORY_HOTPLUG_DEV, NULL));
645 }
646 
647 sclpMemoryHotplugDev *get_sclp_memory_hotplug_dev(void)
648 {
649     return SCLP_MEMORY_HOTPLUG_DEV(object_resolve_path(
650                                    TYPE_SCLP_MEMORY_HOTPLUG_DEV, NULL));
651 }
652 
653 static void sclp_memory_hotplug_dev_class_init(ObjectClass *klass,
654                                                void *data)
655 {
656     DeviceClass *dc = DEVICE_CLASS(klass);
657 
658     set_bit(DEVICE_CATEGORY_MISC, dc->categories);
659 }
660 
661 static TypeInfo sclp_memory_hotplug_dev_info = {
662     .name = TYPE_SCLP_MEMORY_HOTPLUG_DEV,
663     .parent = TYPE_SYS_BUS_DEVICE,
664     .instance_size = sizeof(sclpMemoryHotplugDev),
665     .class_init = sclp_memory_hotplug_dev_class_init,
666 };
667 
668 static void register_types(void)
669 {
670     type_register_static(&sclp_memory_hotplug_dev_info);
671     type_register_static(&sclp_info);
672 }
673 type_init(register_types);
674