xref: /openbmc/qemu/hw/s390x/sclp.c (revision f2cab7f1480b20adf32e13aa79be1fe225ba7790)
1 /*
2  * SCLP Support
3  *
4  * Copyright IBM, Corp. 2012
5  *
6  * Authors:
7  *  Christian Borntraeger <borntraeger@de.ibm.com>
8  *  Heinz Graalfs <graalfs@linux.vnet.ibm.com>
9  *
10  * This work is licensed under the terms of the GNU GPL, version 2 or (at your
11  * option) any later version.  See the COPYING file in the top-level directory.
12  *
13  */
14 
15 #include "qemu/osdep.h"
16 #include "qapi/error.h"
17 #include "cpu.h"
18 #include "sysemu/kvm.h"
19 #include "exec/memory.h"
20 #include "sysemu/sysemu.h"
21 #include "exec/address-spaces.h"
22 #include "hw/boards.h"
23 #include "hw/s390x/sclp.h"
24 #include "hw/s390x/event-facility.h"
25 #include "hw/s390x/s390-pci-bus.h"
26 
27 static inline SCLPDevice *get_sclp_device(void)
28 {
29     static SCLPDevice *sclp;
30 
31     if (!sclp) {
32         sclp = SCLP(object_resolve_path_type("", TYPE_SCLP, NULL));
33     }
34     return sclp;
35 }
36 
37 /* Provide information about the configuration, CPUs and storage */
38 static void read_SCP_info(SCLPDevice *sclp, SCCB *sccb)
39 {
40     ReadInfo *read_info = (ReadInfo *) sccb;
41     MachineState *machine = MACHINE(qdev_get_machine());
42     sclpMemoryHotplugDev *mhd = get_sclp_memory_hotplug_dev();
43     CPUState *cpu;
44     int cpu_count = 0;
45     int i = 0;
46     int rnsize, rnmax;
47     int slots = MIN(machine->ram_slots, s390_get_memslot_count(kvm_state));
48 
49     CPU_FOREACH(cpu) {
50         cpu_count++;
51     }
52 
53     /* CPU information */
54     read_info->entries_cpu = cpu_to_be16(cpu_count);
55     read_info->offset_cpu = cpu_to_be16(offsetof(ReadInfo, entries));
56     read_info->highest_cpu = cpu_to_be16(max_cpus);
57 
58     for (i = 0; i < cpu_count; i++) {
59         read_info->entries[i].address = i;
60         read_info->entries[i].type = 0;
61     }
62 
63     read_info->facilities = cpu_to_be64(SCLP_HAS_CPU_INFO |
64                                         SCLP_HAS_PCI_RECONFIG);
65 
66     /* Memory Hotplug is only supported for the ccw machine type */
67     if (mhd) {
68         mhd->standby_subregion_size = MEM_SECTION_SIZE;
69         /* Deduct the memory slot already used for core */
70         if (slots > 0) {
71             while ((mhd->standby_subregion_size * (slots - 1)
72                     < mhd->standby_mem_size)) {
73                 mhd->standby_subregion_size = mhd->standby_subregion_size << 1;
74             }
75         }
76         /*
77          * Initialize mapping of guest standby memory sections indicating which
78          * are and are not online. Assume all standby memory begins offline.
79          */
80         if (mhd->standby_state_map == 0) {
81             if (mhd->standby_mem_size % mhd->standby_subregion_size) {
82                 mhd->standby_state_map = g_malloc0((mhd->standby_mem_size /
83                                              mhd->standby_subregion_size + 1) *
84                                              (mhd->standby_subregion_size /
85                                              MEM_SECTION_SIZE));
86             } else {
87                 mhd->standby_state_map = g_malloc0(mhd->standby_mem_size /
88                                                    MEM_SECTION_SIZE);
89             }
90         }
91         mhd->padded_ram_size = ram_size + mhd->pad_size;
92         mhd->rzm = 1 << mhd->increment_size;
93 
94         read_info->facilities |= cpu_to_be64(SCLP_FC_ASSIGN_ATTACH_READ_STOR);
95     }
96 
97     rnsize = 1 << (sclp->increment_size - 20);
98     if (rnsize <= 128) {
99         read_info->rnsize = rnsize;
100     } else {
101         read_info->rnsize = 0;
102         read_info->rnsize2 = cpu_to_be32(rnsize);
103     }
104 
105     rnmax = machine->maxram_size >> sclp->increment_size;
106     if (rnmax < 0x10000) {
107         read_info->rnmax = cpu_to_be16(rnmax);
108     } else {
109         read_info->rnmax = cpu_to_be16(0);
110         read_info->rnmax2 = cpu_to_be64(rnmax);
111     }
112 
113     sccb->h.response_code = cpu_to_be16(SCLP_RC_NORMAL_READ_COMPLETION);
114 }
115 
116 static void read_storage_element0_info(SCLPDevice *sclp, SCCB *sccb)
117 {
118     int i, assigned;
119     int subincrement_id = SCLP_STARTING_SUBINCREMENT_ID;
120     ReadStorageElementInfo *storage_info = (ReadStorageElementInfo *) sccb;
121     sclpMemoryHotplugDev *mhd = get_sclp_memory_hotplug_dev();
122 
123     if (!mhd) {
124         sccb->h.response_code = cpu_to_be16(SCLP_RC_INVALID_SCLP_COMMAND);
125         return;
126     }
127 
128     if ((ram_size >> mhd->increment_size) >= 0x10000) {
129         sccb->h.response_code = cpu_to_be16(SCLP_RC_SCCB_BOUNDARY_VIOLATION);
130         return;
131     }
132 
133     /* Return information regarding core memory */
134     storage_info->max_id = cpu_to_be16(mhd->standby_mem_size ? 1 : 0);
135     assigned = ram_size >> mhd->increment_size;
136     storage_info->assigned = cpu_to_be16(assigned);
137 
138     for (i = 0; i < assigned; i++) {
139         storage_info->entries[i] = cpu_to_be32(subincrement_id);
140         subincrement_id += SCLP_INCREMENT_UNIT;
141     }
142     sccb->h.response_code = cpu_to_be16(SCLP_RC_NORMAL_READ_COMPLETION);
143 }
144 
145 static void read_storage_element1_info(SCLPDevice *sclp, SCCB *sccb)
146 {
147     ReadStorageElementInfo *storage_info = (ReadStorageElementInfo *) sccb;
148     sclpMemoryHotplugDev *mhd = get_sclp_memory_hotplug_dev();
149 
150     if (!mhd) {
151         sccb->h.response_code = cpu_to_be16(SCLP_RC_INVALID_SCLP_COMMAND);
152         return;
153     }
154 
155     if ((mhd->standby_mem_size >> mhd->increment_size) >= 0x10000) {
156         sccb->h.response_code = cpu_to_be16(SCLP_RC_SCCB_BOUNDARY_VIOLATION);
157         return;
158     }
159 
160     /* Return information regarding standby memory */
161     storage_info->max_id = cpu_to_be16(mhd->standby_mem_size ? 1 : 0);
162     storage_info->assigned = cpu_to_be16(mhd->standby_mem_size >>
163                                          mhd->increment_size);
164     storage_info->standby = cpu_to_be16(mhd->standby_mem_size >>
165                                         mhd->increment_size);
166     sccb->h.response_code = cpu_to_be16(SCLP_RC_STANDBY_READ_COMPLETION);
167 }
168 
169 static void attach_storage_element(SCLPDevice *sclp, SCCB *sccb,
170                                    uint16_t element)
171 {
172     int i, assigned, subincrement_id;
173     AttachStorageElement *attach_info = (AttachStorageElement *) sccb;
174     sclpMemoryHotplugDev *mhd = get_sclp_memory_hotplug_dev();
175 
176     if (!mhd) {
177         sccb->h.response_code = cpu_to_be16(SCLP_RC_INVALID_SCLP_COMMAND);
178         return;
179     }
180 
181     if (element != 1) {
182         sccb->h.response_code = cpu_to_be16(SCLP_RC_INVALID_SCLP_COMMAND);
183         return;
184     }
185 
186     assigned = mhd->standby_mem_size >> mhd->increment_size;
187     attach_info->assigned = cpu_to_be16(assigned);
188     subincrement_id = ((ram_size >> mhd->increment_size) << 16)
189                       + SCLP_STARTING_SUBINCREMENT_ID;
190     for (i = 0; i < assigned; i++) {
191         attach_info->entries[i] = cpu_to_be32(subincrement_id);
192         subincrement_id += SCLP_INCREMENT_UNIT;
193     }
194     sccb->h.response_code = cpu_to_be16(SCLP_RC_NORMAL_COMPLETION);
195 }
196 
197 static void assign_storage(SCLPDevice *sclp, SCCB *sccb)
198 {
199     MemoryRegion *mr = NULL;
200     uint64_t this_subregion_size;
201     AssignStorage *assign_info = (AssignStorage *) sccb;
202     sclpMemoryHotplugDev *mhd = get_sclp_memory_hotplug_dev();
203     ram_addr_t assign_addr;
204     MemoryRegion *sysmem = get_system_memory();
205 
206     if (!mhd) {
207         sccb->h.response_code = cpu_to_be16(SCLP_RC_INVALID_SCLP_COMMAND);
208         return;
209     }
210     assign_addr = (assign_info->rn - 1) * mhd->rzm;
211 
212     if ((assign_addr % MEM_SECTION_SIZE == 0) &&
213         (assign_addr >= mhd->padded_ram_size)) {
214         /* Re-use existing memory region if found */
215         mr = memory_region_find(sysmem, assign_addr, 1).mr;
216         memory_region_unref(mr);
217         if (!mr) {
218 
219             MemoryRegion *standby_ram = g_new(MemoryRegion, 1);
220 
221             /* offset to align to standby_subregion_size for allocation */
222             ram_addr_t offset = assign_addr -
223                                 (assign_addr - mhd->padded_ram_size)
224                                 % mhd->standby_subregion_size;
225 
226             /* strlen("standby.ram") + 4 (Max of KVM_MEMORY_SLOTS) +  NULL */
227             char id[16];
228             snprintf(id, 16, "standby.ram%d",
229                      (int)((offset - mhd->padded_ram_size) /
230                      mhd->standby_subregion_size) + 1);
231 
232             /* Allocate a subregion of the calculated standby_subregion_size */
233             if (offset + mhd->standby_subregion_size >
234                 mhd->padded_ram_size + mhd->standby_mem_size) {
235                 this_subregion_size = mhd->padded_ram_size +
236                   mhd->standby_mem_size - offset;
237             } else {
238                 this_subregion_size = mhd->standby_subregion_size;
239             }
240 
241             memory_region_init_ram(standby_ram, NULL, id, this_subregion_size,
242                                    &error_fatal);
243             /* This is a hack to make memory hotunplug work again. Once we have
244              * subdevices, we have to unparent them when unassigning memory,
245              * instead of doing it via the ref count of the MemoryRegion. */
246             object_ref(OBJECT(standby_ram));
247             object_unparent(OBJECT(standby_ram));
248             vmstate_register_ram_global(standby_ram);
249             memory_region_add_subregion(sysmem, offset, standby_ram);
250         }
251         /* The specified subregion is no longer in standby */
252         mhd->standby_state_map[(assign_addr - mhd->padded_ram_size)
253                                / MEM_SECTION_SIZE] = 1;
254     }
255     sccb->h.response_code = cpu_to_be16(SCLP_RC_NORMAL_COMPLETION);
256 }
257 
258 static void unassign_storage(SCLPDevice *sclp, SCCB *sccb)
259 {
260     MemoryRegion *mr = NULL;
261     AssignStorage *assign_info = (AssignStorage *) sccb;
262     sclpMemoryHotplugDev *mhd = get_sclp_memory_hotplug_dev();
263     ram_addr_t unassign_addr;
264     MemoryRegion *sysmem = get_system_memory();
265 
266     if (!mhd) {
267         sccb->h.response_code = cpu_to_be16(SCLP_RC_INVALID_SCLP_COMMAND);
268         return;
269     }
270     unassign_addr = (assign_info->rn - 1) * mhd->rzm;
271 
272     /* if the addr is a multiple of 256 MB */
273     if ((unassign_addr % MEM_SECTION_SIZE == 0) &&
274         (unassign_addr >= mhd->padded_ram_size)) {
275         mhd->standby_state_map[(unassign_addr -
276                            mhd->padded_ram_size) / MEM_SECTION_SIZE] = 0;
277 
278         /* find the specified memory region and destroy it */
279         mr = memory_region_find(sysmem, unassign_addr, 1).mr;
280         memory_region_unref(mr);
281         if (mr) {
282             int i;
283             int is_removable = 1;
284             ram_addr_t map_offset = (unassign_addr - mhd->padded_ram_size -
285                                      (unassign_addr - mhd->padded_ram_size)
286                                      % mhd->standby_subregion_size);
287             /* Mark all affected subregions as 'standby' once again */
288             for (i = 0;
289                  i < (mhd->standby_subregion_size / MEM_SECTION_SIZE);
290                  i++) {
291 
292                 if (mhd->standby_state_map[i + map_offset / MEM_SECTION_SIZE]) {
293                     is_removable = 0;
294                     break;
295                 }
296             }
297             if (is_removable) {
298                 memory_region_del_subregion(sysmem, mr);
299                 object_unref(OBJECT(mr));
300             }
301         }
302     }
303     sccb->h.response_code = cpu_to_be16(SCLP_RC_NORMAL_COMPLETION);
304 }
305 
306 /* Provide information about the CPU */
307 static void sclp_read_cpu_info(SCLPDevice *sclp, SCCB *sccb)
308 {
309     ReadCpuInfo *cpu_info = (ReadCpuInfo *) sccb;
310     CPUState *cpu;
311     int cpu_count = 0;
312     int i = 0;
313 
314     CPU_FOREACH(cpu) {
315         cpu_count++;
316     }
317 
318     cpu_info->nr_configured = cpu_to_be16(cpu_count);
319     cpu_info->offset_configured = cpu_to_be16(offsetof(ReadCpuInfo, entries));
320     cpu_info->nr_standby = cpu_to_be16(0);
321 
322     /* The standby offset is 16-byte for each CPU */
323     cpu_info->offset_standby = cpu_to_be16(cpu_info->offset_configured
324         + cpu_info->nr_configured*sizeof(CPUEntry));
325 
326     for (i = 0; i < cpu_count; i++) {
327         cpu_info->entries[i].address = i;
328         cpu_info->entries[i].type = 0;
329     }
330 
331     sccb->h.response_code = cpu_to_be16(SCLP_RC_NORMAL_READ_COMPLETION);
332 }
333 
334 static void sclp_execute(SCLPDevice *sclp, SCCB *sccb, uint32_t code)
335 {
336     SCLPDeviceClass *sclp_c = SCLP_GET_CLASS(sclp);
337     SCLPEventFacility *ef = sclp->event_facility;
338     SCLPEventFacilityClass *efc = EVENT_FACILITY_GET_CLASS(ef);
339 
340     switch (code & SCLP_CMD_CODE_MASK) {
341     case SCLP_CMDW_READ_SCP_INFO:
342     case SCLP_CMDW_READ_SCP_INFO_FORCED:
343         sclp_c->read_SCP_info(sclp, sccb);
344         break;
345     case SCLP_CMDW_READ_CPU_INFO:
346         sclp_c->read_cpu_info(sclp, sccb);
347         break;
348     case SCLP_READ_STORAGE_ELEMENT_INFO:
349         if (code & 0xff00) {
350             sclp_c->read_storage_element1_info(sclp, sccb);
351         } else {
352             sclp_c->read_storage_element0_info(sclp, sccb);
353         }
354         break;
355     case SCLP_ATTACH_STORAGE_ELEMENT:
356         sclp_c->attach_storage_element(sclp, sccb, (code & 0xff00) >> 8);
357         break;
358     case SCLP_ASSIGN_STORAGE:
359         sclp_c->assign_storage(sclp, sccb);
360         break;
361     case SCLP_UNASSIGN_STORAGE:
362         sclp_c->unassign_storage(sclp, sccb);
363         break;
364     case SCLP_CMDW_CONFIGURE_PCI:
365         s390_pci_sclp_configure(sccb);
366         break;
367     case SCLP_CMDW_DECONFIGURE_PCI:
368         s390_pci_sclp_deconfigure(sccb);
369         break;
370     default:
371         efc->command_handler(ef, sccb, code);
372         break;
373     }
374 }
375 
376 int sclp_service_call(CPUS390XState *env, uint64_t sccb, uint32_t code)
377 {
378     SCLPDevice *sclp = get_sclp_device();
379     SCLPDeviceClass *sclp_c = SCLP_GET_CLASS(sclp);
380     int r = 0;
381     SCCB work_sccb;
382 
383     hwaddr sccb_len = sizeof(SCCB);
384 
385     /* first some basic checks on program checks */
386     if (env->psw.mask & PSW_MASK_PSTATE) {
387         r = -PGM_PRIVILEGED;
388         goto out;
389     }
390     if (cpu_physical_memory_is_io(sccb)) {
391         r = -PGM_ADDRESSING;
392         goto out;
393     }
394     if ((sccb & ~0x1fffUL) == 0 || (sccb & ~0x1fffUL) == env->psa
395         || (sccb & ~0x7ffffff8UL) != 0) {
396         r = -PGM_SPECIFICATION;
397         goto out;
398     }
399 
400     /*
401      * we want to work on a private copy of the sccb, to prevent guests
402      * from playing dirty tricks by modifying the memory content after
403      * the host has checked the values
404      */
405     cpu_physical_memory_read(sccb, &work_sccb, sccb_len);
406 
407     /* Valid sccb sizes */
408     if (be16_to_cpu(work_sccb.h.length) < sizeof(SCCBHeader) ||
409         be16_to_cpu(work_sccb.h.length) > SCCB_SIZE) {
410         r = -PGM_SPECIFICATION;
411         goto out;
412     }
413 
414     sclp_c->execute(sclp, (SCCB *)&work_sccb, code);
415 
416     cpu_physical_memory_write(sccb, &work_sccb,
417                               be16_to_cpu(work_sccb.h.length));
418 
419     sclp_c->service_interrupt(sclp, sccb);
420 
421 out:
422     return r;
423 }
424 
425 static void service_interrupt(SCLPDevice *sclp, uint32_t sccb)
426 {
427     SCLPEventFacility *ef = sclp->event_facility;
428     SCLPEventFacilityClass *efc = EVENT_FACILITY_GET_CLASS(ef);
429 
430     uint32_t param = sccb & ~3;
431 
432     /* Indicate whether an event is still pending */
433     param |= efc->event_pending(ef) ? 1 : 0;
434 
435     if (!param) {
436         /* No need to send an interrupt, there's nothing to be notified about */
437         return;
438     }
439     s390_sclp_extint(param);
440 }
441 
442 void sclp_service_interrupt(uint32_t sccb)
443 {
444     SCLPDevice *sclp = get_sclp_device();
445     SCLPDeviceClass *sclp_c = SCLP_GET_CLASS(sclp);
446 
447     sclp_c->service_interrupt(sclp, sccb);
448 }
449 
450 /* qemu object creation and initialization functions */
451 
452 void s390_sclp_init(void)
453 {
454     Object *new = object_new(TYPE_SCLP);
455 
456     object_property_add_child(qdev_get_machine(), TYPE_SCLP, new,
457                               NULL);
458     object_unref(OBJECT(new));
459     qdev_init_nofail(DEVICE(new));
460 }
461 
462 static void sclp_realize(DeviceState *dev, Error **errp)
463 {
464     MachineState *machine = MACHINE(qdev_get_machine());
465     SCLPDevice *sclp = SCLP(dev);
466     Error *err = NULL;
467     uint64_t hw_limit;
468     int ret;
469 
470     object_property_set_bool(OBJECT(sclp->event_facility), true, "realized",
471                              &err);
472     if (err) {
473         goto out;
474     }
475     /*
476      * qdev_device_add searches the sysbus for TYPE_SCLP_EVENTS_BUS. As long
477      * as we can't find a fitting bus via the qom tree, we have to add the
478      * event facility to the sysbus, so e.g. a sclp console can be created.
479      */
480     qdev_set_parent_bus(DEVICE(sclp->event_facility), sysbus_get_default());
481 
482     ret = s390_set_memory_limit(machine->maxram_size, &hw_limit);
483     if (ret == -E2BIG) {
484         error_setg(&err, "qemu: host supports a maximum of %" PRIu64 " GB",
485                    hw_limit >> 30);
486     } else if (ret) {
487         error_setg(&err, "qemu: setting the guest size failed");
488     }
489 
490 out:
491     error_propagate(errp, err);
492 }
493 
494 static void sclp_memory_init(SCLPDevice *sclp)
495 {
496     MachineState *machine = MACHINE(qdev_get_machine());
497     ram_addr_t initial_mem = machine->ram_size;
498     ram_addr_t max_mem = machine->maxram_size;
499     ram_addr_t standby_mem = max_mem - initial_mem;
500     ram_addr_t pad_mem = 0;
501     int increment_size = 20;
502 
503     /* The storage increment size is a multiple of 1M and is a power of 2.
504      * The number of storage increments must be MAX_STORAGE_INCREMENTS or fewer.
505      * The variable 'increment_size' is an exponent of 2 that can be
506      * used to calculate the size (in bytes) of an increment. */
507     while ((initial_mem >> increment_size) > MAX_STORAGE_INCREMENTS) {
508         increment_size++;
509     }
510     if (machine->ram_slots) {
511         while ((standby_mem >> increment_size) > MAX_STORAGE_INCREMENTS) {
512             increment_size++;
513         }
514     }
515     sclp->increment_size = increment_size;
516 
517     /* The core and standby memory areas need to be aligned with
518      * the increment size.  In effect, this can cause the
519      * user-specified memory size to be rounded down to align
520      * with the nearest increment boundary. */
521     initial_mem = initial_mem >> increment_size << increment_size;
522     standby_mem = standby_mem >> increment_size << increment_size;
523 
524     /* If the size of ram is not on a MEM_SECTION_SIZE boundary,
525        calculate the pad size necessary to force this boundary. */
526     if (machine->ram_slots && standby_mem) {
527         sclpMemoryHotplugDev *mhd = init_sclp_memory_hotplug_dev();
528 
529         if (initial_mem % MEM_SECTION_SIZE) {
530             pad_mem = MEM_SECTION_SIZE - initial_mem % MEM_SECTION_SIZE;
531         }
532         mhd->increment_size = increment_size;
533         mhd->pad_size = pad_mem;
534         mhd->standby_mem_size = standby_mem;
535     }
536     machine->ram_size = initial_mem;
537     machine->maxram_size = initial_mem + pad_mem + standby_mem;
538     /* let's propagate the changed ram size into the global variable. */
539     ram_size = initial_mem;
540 }
541 
542 static void sclp_init(Object *obj)
543 {
544     SCLPDevice *sclp = SCLP(obj);
545     Object *new;
546 
547     new = object_new(TYPE_SCLP_EVENT_FACILITY);
548     object_property_add_child(obj, TYPE_SCLP_EVENT_FACILITY, new, NULL);
549     object_unref(new);
550     sclp->event_facility = EVENT_FACILITY(new);
551 
552     sclp_memory_init(sclp);
553 }
554 
555 static void sclp_class_init(ObjectClass *oc, void *data)
556 {
557     SCLPDeviceClass *sc = SCLP_CLASS(oc);
558     DeviceClass *dc = DEVICE_CLASS(oc);
559 
560     dc->desc = "SCLP (Service-Call Logical Processor)";
561     dc->realize = sclp_realize;
562     dc->hotpluggable = false;
563     set_bit(DEVICE_CATEGORY_MISC, dc->categories);
564 
565     sc->read_SCP_info = read_SCP_info;
566     sc->read_storage_element0_info = read_storage_element0_info;
567     sc->read_storage_element1_info = read_storage_element1_info;
568     sc->attach_storage_element = attach_storage_element;
569     sc->assign_storage = assign_storage;
570     sc->unassign_storage = unassign_storage;
571     sc->read_cpu_info = sclp_read_cpu_info;
572     sc->execute = sclp_execute;
573     sc->service_interrupt = service_interrupt;
574 }
575 
576 static TypeInfo sclp_info = {
577     .name = TYPE_SCLP,
578     .parent = TYPE_DEVICE,
579     .instance_init = sclp_init,
580     .instance_size = sizeof(SCLPDevice),
581     .class_init = sclp_class_init,
582     .class_size = sizeof(SCLPDeviceClass),
583 };
584 
585 sclpMemoryHotplugDev *init_sclp_memory_hotplug_dev(void)
586 {
587     DeviceState *dev;
588     dev = qdev_create(NULL, TYPE_SCLP_MEMORY_HOTPLUG_DEV);
589     object_property_add_child(qdev_get_machine(),
590                               TYPE_SCLP_MEMORY_HOTPLUG_DEV,
591                               OBJECT(dev), NULL);
592     qdev_init_nofail(dev);
593     return SCLP_MEMORY_HOTPLUG_DEV(object_resolve_path(
594                                    TYPE_SCLP_MEMORY_HOTPLUG_DEV, NULL));
595 }
596 
597 sclpMemoryHotplugDev *get_sclp_memory_hotplug_dev(void)
598 {
599     return SCLP_MEMORY_HOTPLUG_DEV(object_resolve_path(
600                                    TYPE_SCLP_MEMORY_HOTPLUG_DEV, NULL));
601 }
602 
603 static void sclp_memory_hotplug_dev_class_init(ObjectClass *klass,
604                                                void *data)
605 {
606     DeviceClass *dc = DEVICE_CLASS(klass);
607 
608     set_bit(DEVICE_CATEGORY_MISC, dc->categories);
609 }
610 
611 static TypeInfo sclp_memory_hotplug_dev_info = {
612     .name = TYPE_SCLP_MEMORY_HOTPLUG_DEV,
613     .parent = TYPE_SYS_BUS_DEVICE,
614     .instance_size = sizeof(sclpMemoryHotplugDev),
615     .class_init = sclp_memory_hotplug_dev_class_init,
616 };
617 
618 static void register_types(void)
619 {
620     type_register_static(&sclp_memory_hotplug_dev_info);
621     type_register_static(&sclp_info);
622 }
623 type_init(register_types);
624