xref: /openbmc/qemu/hw/s390x/sclp.c (revision 228aa992)
1 /*
2  * SCLP Support
3  *
4  * Copyright IBM, Corp. 2012
5  *
6  * Authors:
7  *  Christian Borntraeger <borntraeger@de.ibm.com>
8  *  Heinz Graalfs <graalfs@linux.vnet.ibm.com>
9  *
10  * This work is licensed under the terms of the GNU GPL, version 2 or (at your
11  * option) any later version.  See the COPYING file in the top-level directory.
12  *
13  */
14 
15 #include "cpu.h"
16 #include "sysemu/kvm.h"
17 #include "exec/memory.h"
18 #include "sysemu/sysemu.h"
19 #include "exec/address-spaces.h"
20 #include "qemu/config-file.h"
21 #include "hw/s390x/sclp.h"
22 #include "hw/s390x/event-facility.h"
23 
24 static inline SCLPEventFacility *get_event_facility(void)
25 {
26     ObjectProperty *op = object_property_find(qdev_get_machine(),
27                                               TYPE_SCLP_EVENT_FACILITY,
28                                               NULL);
29     assert(op);
30     return op->opaque;
31 }
32 
33 /* Provide information about the configuration, CPUs and storage */
34 static void read_SCP_info(SCCB *sccb)
35 {
36     ReadInfo *read_info = (ReadInfo *) sccb;
37     sclpMemoryHotplugDev *mhd = get_sclp_memory_hotplug_dev();
38     CPUState *cpu;
39     int cpu_count = 0;
40     int i = 0;
41     int increment_size = 20;
42     int rnsize, rnmax;
43     QemuOpts *opts = qemu_opts_find(qemu_find_opts("memory"), NULL);
44     int slots = qemu_opt_get_number(opts, "slots", 0);
45     int max_avail_slots = s390_get_memslot_count(kvm_state);
46 
47     if (slots > max_avail_slots) {
48         slots = max_avail_slots;
49     }
50 
51     CPU_FOREACH(cpu) {
52         cpu_count++;
53     }
54 
55     /* CPU information */
56     read_info->entries_cpu = cpu_to_be16(cpu_count);
57     read_info->offset_cpu = cpu_to_be16(offsetof(ReadInfo, entries));
58     read_info->highest_cpu = cpu_to_be16(max_cpus);
59 
60     for (i = 0; i < cpu_count; i++) {
61         read_info->entries[i].address = i;
62         read_info->entries[i].type = 0;
63     }
64 
65     read_info->facilities = cpu_to_be64(SCLP_HAS_CPU_INFO);
66 
67     /*
68      * The storage increment size is a multiple of 1M and is a power of 2.
69      * The number of storage increments must be MAX_STORAGE_INCREMENTS or fewer.
70      */
71     while ((ram_size >> increment_size) > MAX_STORAGE_INCREMENTS) {
72         increment_size++;
73     }
74     rnmax = ram_size >> increment_size;
75 
76     /* Memory Hotplug is only supported for the ccw machine type */
77     if (mhd) {
78         while ((mhd->standby_mem_size >> increment_size) >
79                MAX_STORAGE_INCREMENTS) {
80             increment_size++;
81         }
82         assert(increment_size == mhd->increment_size);
83 
84         mhd->standby_subregion_size = MEM_SECTION_SIZE;
85         /* Deduct the memory slot already used for core */
86         if (slots > 0) {
87             while ((mhd->standby_subregion_size * (slots - 1)
88                     < mhd->standby_mem_size)) {
89                 mhd->standby_subregion_size = mhd->standby_subregion_size << 1;
90             }
91         }
92         /*
93          * Initialize mapping of guest standby memory sections indicating which
94          * are and are not online. Assume all standby memory begins offline.
95          */
96         if (mhd->standby_state_map == 0) {
97             if (mhd->standby_mem_size % mhd->standby_subregion_size) {
98                 mhd->standby_state_map = g_malloc0((mhd->standby_mem_size /
99                                              mhd->standby_subregion_size + 1) *
100                                              (mhd->standby_subregion_size /
101                                              MEM_SECTION_SIZE));
102             } else {
103                 mhd->standby_state_map = g_malloc0(mhd->standby_mem_size /
104                                                    MEM_SECTION_SIZE);
105             }
106         }
107         mhd->padded_ram_size = ram_size + mhd->pad_size;
108         mhd->rzm = 1 << mhd->increment_size;
109         rnmax = ((ram_size + mhd->standby_mem_size + mhd->pad_size)
110              >> mhd->increment_size);
111 
112         read_info->facilities |= cpu_to_be64(SCLP_FC_ASSIGN_ATTACH_READ_STOR);
113     }
114 
115     rnsize = 1 << (increment_size - 20);
116     if (rnsize <= 128) {
117         read_info->rnsize = rnsize;
118     } else {
119         read_info->rnsize = 0;
120         read_info->rnsize2 = cpu_to_be32(rnsize);
121     }
122 
123     if (rnmax < 0x10000) {
124         read_info->rnmax = cpu_to_be16(rnmax);
125     } else {
126         read_info->rnmax = cpu_to_be16(0);
127         read_info->rnmax2 = cpu_to_be64(rnmax);
128     }
129 
130     sccb->h.response_code = cpu_to_be16(SCLP_RC_NORMAL_READ_COMPLETION);
131 }
132 
133 static void read_storage_element0_info(SCCB *sccb)
134 {
135     int i, assigned;
136     int subincrement_id = SCLP_STARTING_SUBINCREMENT_ID;
137     ReadStorageElementInfo *storage_info = (ReadStorageElementInfo *) sccb;
138     sclpMemoryHotplugDev *mhd = get_sclp_memory_hotplug_dev();
139 
140     assert(mhd);
141 
142     if ((ram_size >> mhd->increment_size) >= 0x10000) {
143         sccb->h.response_code = cpu_to_be16(SCLP_RC_SCCB_BOUNDARY_VIOLATION);
144         return;
145     }
146 
147     /* Return information regarding core memory */
148     storage_info->max_id = cpu_to_be16(mhd->standby_mem_size ? 1 : 0);
149     assigned = ram_size >> mhd->increment_size;
150     storage_info->assigned = cpu_to_be16(assigned);
151 
152     for (i = 0; i < assigned; i++) {
153         storage_info->entries[i] = cpu_to_be32(subincrement_id);
154         subincrement_id += SCLP_INCREMENT_UNIT;
155     }
156     sccb->h.response_code = cpu_to_be16(SCLP_RC_NORMAL_READ_COMPLETION);
157 }
158 
159 static void read_storage_element1_info(SCCB *sccb)
160 {
161     ReadStorageElementInfo *storage_info = (ReadStorageElementInfo *) sccb;
162     sclpMemoryHotplugDev *mhd = get_sclp_memory_hotplug_dev();
163 
164     assert(mhd);
165 
166     if ((mhd->standby_mem_size >> mhd->increment_size) >= 0x10000) {
167         sccb->h.response_code = cpu_to_be16(SCLP_RC_SCCB_BOUNDARY_VIOLATION);
168         return;
169     }
170 
171     /* Return information regarding standby memory */
172     storage_info->max_id = cpu_to_be16(mhd->standby_mem_size ? 1 : 0);
173     storage_info->assigned = cpu_to_be16(mhd->standby_mem_size >>
174                                          mhd->increment_size);
175     storage_info->standby = cpu_to_be16(mhd->standby_mem_size >>
176                                         mhd->increment_size);
177     sccb->h.response_code = cpu_to_be16(SCLP_RC_STANDBY_READ_COMPLETION);
178 }
179 
180 static void attach_storage_element(SCCB *sccb, uint16_t element)
181 {
182     int i, assigned, subincrement_id;
183     AttachStorageElement *attach_info = (AttachStorageElement *) sccb;
184     sclpMemoryHotplugDev *mhd = get_sclp_memory_hotplug_dev();
185 
186     assert(mhd);
187 
188     if (element != 1) {
189         sccb->h.response_code = cpu_to_be16(SCLP_RC_INVALID_SCLP_COMMAND);
190         return;
191     }
192 
193     assigned = mhd->standby_mem_size >> mhd->increment_size;
194     attach_info->assigned = cpu_to_be16(assigned);
195     subincrement_id = ((ram_size >> mhd->increment_size) << 16)
196                       + SCLP_STARTING_SUBINCREMENT_ID;
197     for (i = 0; i < assigned; i++) {
198         attach_info->entries[i] = cpu_to_be32(subincrement_id);
199         subincrement_id += SCLP_INCREMENT_UNIT;
200     }
201     sccb->h.response_code = cpu_to_be16(SCLP_RC_NORMAL_COMPLETION);
202 }
203 
204 static void assign_storage(SCCB *sccb)
205 {
206     MemoryRegion *mr = NULL;
207     uint64_t this_subregion_size;
208     AssignStorage *assign_info = (AssignStorage *) sccb;
209     sclpMemoryHotplugDev *mhd = get_sclp_memory_hotplug_dev();
210     assert(mhd);
211     ram_addr_t assign_addr = (assign_info->rn - 1) * mhd->rzm;
212     MemoryRegion *sysmem = get_system_memory();
213 
214     if ((assign_addr % MEM_SECTION_SIZE == 0) &&
215         (assign_addr >= mhd->padded_ram_size)) {
216         /* Re-use existing memory region if found */
217         mr = memory_region_find(sysmem, assign_addr, 1).mr;
218         if (!mr) {
219 
220             MemoryRegion *standby_ram = g_new(MemoryRegion, 1);
221 
222             /* offset to align to standby_subregion_size for allocation */
223             ram_addr_t offset = assign_addr -
224                                 (assign_addr - mhd->padded_ram_size)
225                                 % mhd->standby_subregion_size;
226 
227             /* strlen("standby.ram") + 4 (Max of KVM_MEMORY_SLOTS) +  NULL */
228             char id[16];
229             snprintf(id, 16, "standby.ram%d",
230                      (int)((offset - mhd->padded_ram_size) /
231                      mhd->standby_subregion_size) + 1);
232 
233             /* Allocate a subregion of the calculated standby_subregion_size */
234             if (offset + mhd->standby_subregion_size >
235                 mhd->padded_ram_size + mhd->standby_mem_size) {
236                 this_subregion_size = mhd->padded_ram_size +
237                   mhd->standby_mem_size - offset;
238             } else {
239                 this_subregion_size = mhd->standby_subregion_size;
240             }
241 
242             memory_region_init_ram(standby_ram, NULL, id, this_subregion_size, &error_abort);
243             vmstate_register_ram_global(standby_ram);
244             memory_region_add_subregion(sysmem, offset, standby_ram);
245         }
246         /* The specified subregion is no longer in standby */
247         mhd->standby_state_map[(assign_addr - mhd->padded_ram_size)
248                                / MEM_SECTION_SIZE] = 1;
249     }
250     sccb->h.response_code = cpu_to_be16(SCLP_RC_NORMAL_COMPLETION);
251 }
252 
253 static void unassign_storage(SCCB *sccb)
254 {
255     MemoryRegion *mr = NULL;
256     AssignStorage *assign_info = (AssignStorage *) sccb;
257     sclpMemoryHotplugDev *mhd = get_sclp_memory_hotplug_dev();
258     assert(mhd);
259     ram_addr_t unassign_addr = (assign_info->rn - 1) * mhd->rzm;
260     MemoryRegion *sysmem = get_system_memory();
261 
262     /* if the addr is a multiple of 256 MB */
263     if ((unassign_addr % MEM_SECTION_SIZE == 0) &&
264         (unassign_addr >= mhd->padded_ram_size)) {
265         mhd->standby_state_map[(unassign_addr -
266                            mhd->padded_ram_size) / MEM_SECTION_SIZE] = 0;
267 
268         /* find the specified memory region and destroy it */
269         mr = memory_region_find(sysmem, unassign_addr, 1).mr;
270         if (mr) {
271             int i;
272             int is_removable = 1;
273             ram_addr_t map_offset = (unassign_addr - mhd->padded_ram_size -
274                                      (unassign_addr - mhd->padded_ram_size)
275                                      % mhd->standby_subregion_size);
276             /* Mark all affected subregions as 'standby' once again */
277             for (i = 0;
278                  i < (mhd->standby_subregion_size / MEM_SECTION_SIZE);
279                  i++) {
280 
281                 if (mhd->standby_state_map[i + map_offset / MEM_SECTION_SIZE]) {
282                     is_removable = 0;
283                     break;
284                 }
285             }
286             if (is_removable) {
287                 memory_region_del_subregion(sysmem, mr);
288                 object_unparent(OBJECT(mr));
289                 g_free(mr);
290             }
291         }
292     }
293     sccb->h.response_code = cpu_to_be16(SCLP_RC_NORMAL_COMPLETION);
294 }
295 
296 /* Provide information about the CPU */
297 static void sclp_read_cpu_info(SCCB *sccb)
298 {
299     ReadCpuInfo *cpu_info = (ReadCpuInfo *) sccb;
300     CPUState *cpu;
301     int cpu_count = 0;
302     int i = 0;
303 
304     CPU_FOREACH(cpu) {
305         cpu_count++;
306     }
307 
308     cpu_info->nr_configured = cpu_to_be16(cpu_count);
309     cpu_info->offset_configured = cpu_to_be16(offsetof(ReadCpuInfo, entries));
310     cpu_info->nr_standby = cpu_to_be16(0);
311 
312     /* The standby offset is 16-byte for each CPU */
313     cpu_info->offset_standby = cpu_to_be16(cpu_info->offset_configured
314         + cpu_info->nr_configured*sizeof(CPUEntry));
315 
316     for (i = 0; i < cpu_count; i++) {
317         cpu_info->entries[i].address = i;
318         cpu_info->entries[i].type = 0;
319     }
320 
321     sccb->h.response_code = cpu_to_be16(SCLP_RC_NORMAL_READ_COMPLETION);
322 }
323 
324 static void sclp_execute(SCCB *sccb, uint32_t code)
325 {
326     SCLPEventFacility *ef = get_event_facility();
327     SCLPEventFacilityClass *efc = EVENT_FACILITY_GET_CLASS(ef);
328 
329     switch (code & SCLP_CMD_CODE_MASK) {
330     case SCLP_CMDW_READ_SCP_INFO:
331     case SCLP_CMDW_READ_SCP_INFO_FORCED:
332         read_SCP_info(sccb);
333         break;
334     case SCLP_CMDW_READ_CPU_INFO:
335         sclp_read_cpu_info(sccb);
336         break;
337     case SCLP_READ_STORAGE_ELEMENT_INFO:
338         if (code & 0xff00) {
339             read_storage_element1_info(sccb);
340         } else {
341             read_storage_element0_info(sccb);
342         }
343         break;
344     case SCLP_ATTACH_STORAGE_ELEMENT:
345         attach_storage_element(sccb, (code & 0xff00) >> 8);
346         break;
347     case SCLP_ASSIGN_STORAGE:
348         assign_storage(sccb);
349         break;
350     case SCLP_UNASSIGN_STORAGE:
351         unassign_storage(sccb);
352         break;
353     default:
354         efc->command_handler(ef, sccb, code);
355         break;
356     }
357 }
358 
359 int sclp_service_call(CPUS390XState *env, uint64_t sccb, uint32_t code)
360 {
361     int r = 0;
362     SCCB work_sccb;
363 
364     hwaddr sccb_len = sizeof(SCCB);
365 
366     /* first some basic checks on program checks */
367     if (env->psw.mask & PSW_MASK_PSTATE) {
368         r = -PGM_PRIVILEGED;
369         goto out;
370     }
371     if (cpu_physical_memory_is_io(sccb)) {
372         r = -PGM_ADDRESSING;
373         goto out;
374     }
375     if ((sccb & ~0x1fffUL) == 0 || (sccb & ~0x1fffUL) == env->psa
376         || (sccb & ~0x7ffffff8UL) != 0) {
377         r = -PGM_SPECIFICATION;
378         goto out;
379     }
380 
381     /*
382      * we want to work on a private copy of the sccb, to prevent guests
383      * from playing dirty tricks by modifying the memory content after
384      * the host has checked the values
385      */
386     cpu_physical_memory_read(sccb, &work_sccb, sccb_len);
387 
388     /* Valid sccb sizes */
389     if (be16_to_cpu(work_sccb.h.length) < sizeof(SCCBHeader) ||
390         be16_to_cpu(work_sccb.h.length) > SCCB_SIZE) {
391         r = -PGM_SPECIFICATION;
392         goto out;
393     }
394 
395     sclp_execute((SCCB *)&work_sccb, code);
396 
397     cpu_physical_memory_write(sccb, &work_sccb,
398                               be16_to_cpu(work_sccb.h.length));
399 
400     sclp_service_interrupt(sccb);
401 
402 out:
403     return r;
404 }
405 
406 void sclp_service_interrupt(uint32_t sccb)
407 {
408     SCLPEventFacility *ef = get_event_facility();
409     SCLPEventFacilityClass *efc = EVENT_FACILITY_GET_CLASS(ef);
410 
411     uint32_t param = sccb & ~3;
412 
413     /* Indicate whether an event is still pending */
414     param |= efc->event_pending(ef) ? 1 : 0;
415 
416     if (!param) {
417         /* No need to send an interrupt, there's nothing to be notified about */
418         return;
419     }
420     s390_sclp_extint(param);
421 }
422 
423 /* qemu object creation and initialization functions */
424 
425 void s390_sclp_init(void)
426 {
427     DeviceState *dev  = qdev_create(NULL, TYPE_SCLP_EVENT_FACILITY);
428 
429     object_property_add_child(qdev_get_machine(), TYPE_SCLP_EVENT_FACILITY,
430                               OBJECT(dev), NULL);
431     qdev_init_nofail(dev);
432 }
433 
434 sclpMemoryHotplugDev *init_sclp_memory_hotplug_dev(void)
435 {
436     DeviceState *dev;
437     dev = qdev_create(NULL, TYPE_SCLP_MEMORY_HOTPLUG_DEV);
438     object_property_add_child(qdev_get_machine(),
439                               TYPE_SCLP_MEMORY_HOTPLUG_DEV,
440                               OBJECT(dev), NULL);
441     qdev_init_nofail(dev);
442     return SCLP_MEMORY_HOTPLUG_DEV(object_resolve_path(
443                                    TYPE_SCLP_MEMORY_HOTPLUG_DEV, NULL));
444 }
445 
446 sclpMemoryHotplugDev *get_sclp_memory_hotplug_dev(void)
447 {
448     return SCLP_MEMORY_HOTPLUG_DEV(object_resolve_path(
449                                    TYPE_SCLP_MEMORY_HOTPLUG_DEV, NULL));
450 }
451 
452 static TypeInfo sclp_memory_hotplug_dev_info = {
453     .name = TYPE_SCLP_MEMORY_HOTPLUG_DEV,
454     .parent = TYPE_SYS_BUS_DEVICE,
455     .instance_size = sizeof(sclpMemoryHotplugDev),
456 };
457 
458 static void register_types(void)
459 {
460     type_register_static(&sclp_memory_hotplug_dev_info);
461 }
462 type_init(register_types);
463