1 /*
2 * QEMU PowerPC pSeries Logical Partition capabilities handling
3 *
4 * Copyright (c) 2017 David Gibson, Red Hat Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25 #include "qemu/osdep.h"
26 #include "qemu/error-report.h"
27 #include "qapi/error.h"
28 #include "qapi/visitor.h"
29 #include "sysemu/hw_accel.h"
30 #include "exec/ram_addr.h"
31 #include "target/ppc/cpu.h"
32 #include "target/ppc/mmu-hash64.h"
33 #include "cpu-models.h"
34 #include "kvm_ppc.h"
35 #include "migration/vmstate.h"
36 #include "sysemu/tcg.h"
37
38 #include "hw/ppc/spapr.h"
39
40 typedef struct SpaprCapPossible {
41 int num; /* size of vals array below */
42 const char *help; /* help text for vals */
43 /*
44 * Note:
45 * - because of the way compatibility is determined vals MUST be ordered
46 * such that later options are a superset of all preceding options.
47 * - the order of vals must be preserved, that is their index is important,
48 * however vals may be added to the end of the list so long as the above
49 * point is observed
50 */
51 const char *vals[];
52 } SpaprCapPossible;
53
54 typedef struct SpaprCapabilityInfo {
55 const char *name;
56 const char *description;
57 int index;
58
59 /* Getter and Setter Function Pointers */
60 ObjectPropertyAccessor *get;
61 ObjectPropertyAccessor *set;
62 const char *type;
63 /* Possible values if this is a custom string type */
64 SpaprCapPossible *possible;
65 /* Make sure the virtual hardware can support this capability */
66 void (*apply)(SpaprMachineState *spapr, uint8_t val, Error **errp);
67 void (*cpu_apply)(SpaprMachineState *spapr, PowerPCCPU *cpu,
68 uint8_t val, Error **errp);
69 bool (*migrate_needed)(void *opaque);
70 } SpaprCapabilityInfo;
71
spapr_cap_get_bool(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)72 static void spapr_cap_get_bool(Object *obj, Visitor *v, const char *name,
73 void *opaque, Error **errp)
74 {
75 SpaprCapabilityInfo *cap = opaque;
76 SpaprMachineState *spapr = SPAPR_MACHINE(obj);
77 bool value = spapr_get_cap(spapr, cap->index) == SPAPR_CAP_ON;
78
79 visit_type_bool(v, name, &value, errp);
80 }
81
spapr_cap_set_bool(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)82 static void spapr_cap_set_bool(Object *obj, Visitor *v, const char *name,
83 void *opaque, Error **errp)
84 {
85 SpaprCapabilityInfo *cap = opaque;
86 SpaprMachineState *spapr = SPAPR_MACHINE(obj);
87 bool value;
88
89 if (!visit_type_bool(v, name, &value, errp)) {
90 return;
91 }
92
93 spapr->cmd_line_caps[cap->index] = true;
94 spapr->eff.caps[cap->index] = value ? SPAPR_CAP_ON : SPAPR_CAP_OFF;
95 }
96
97
spapr_cap_get_string(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)98 static void spapr_cap_get_string(Object *obj, Visitor *v, const char *name,
99 void *opaque, Error **errp)
100 {
101 SpaprCapabilityInfo *cap = opaque;
102 SpaprMachineState *spapr = SPAPR_MACHINE(obj);
103 g_autofree char *val = NULL;
104 uint8_t value = spapr_get_cap(spapr, cap->index);
105
106 if (value >= cap->possible->num) {
107 error_setg(errp, "Invalid value (%d) for cap-%s", value, cap->name);
108 return;
109 }
110
111 val = g_strdup(cap->possible->vals[value]);
112
113 visit_type_str(v, name, &val, errp);
114 }
115
spapr_cap_set_string(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)116 static void spapr_cap_set_string(Object *obj, Visitor *v, const char *name,
117 void *opaque, Error **errp)
118 {
119 SpaprCapabilityInfo *cap = opaque;
120 SpaprMachineState *spapr = SPAPR_MACHINE(obj);
121 uint8_t i;
122 g_autofree char *val = NULL;
123
124 if (!visit_type_str(v, name, &val, errp)) {
125 return;
126 }
127
128 if (!strcmp(val, "?")) {
129 error_setg(errp, "%s", cap->possible->help);
130 return;
131 }
132 for (i = 0; i < cap->possible->num; i++) {
133 if (!strcasecmp(val, cap->possible->vals[i])) {
134 spapr->cmd_line_caps[cap->index] = true;
135 spapr->eff.caps[cap->index] = i;
136 return;
137 }
138 }
139
140 error_setg(errp, "Invalid capability mode \"%s\" for cap-%s", val,
141 cap->name);
142 }
143
spapr_cap_get_pagesize(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)144 static void spapr_cap_get_pagesize(Object *obj, Visitor *v, const char *name,
145 void *opaque, Error **errp)
146 {
147 SpaprCapabilityInfo *cap = opaque;
148 SpaprMachineState *spapr = SPAPR_MACHINE(obj);
149 uint8_t val = spapr_get_cap(spapr, cap->index);
150 uint64_t pagesize = (1ULL << val);
151
152 visit_type_size(v, name, &pagesize, errp);
153 }
154
spapr_cap_set_pagesize(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)155 static void spapr_cap_set_pagesize(Object *obj, Visitor *v, const char *name,
156 void *opaque, Error **errp)
157 {
158 SpaprCapabilityInfo *cap = opaque;
159 SpaprMachineState *spapr = SPAPR_MACHINE(obj);
160 uint64_t pagesize;
161 uint8_t val;
162
163 if (!visit_type_size(v, name, &pagesize, errp)) {
164 return;
165 }
166
167 if (!is_power_of_2(pagesize)) {
168 error_setg(errp, "cap-%s must be a power of 2", cap->name);
169 return;
170 }
171
172 val = ctz64(pagesize);
173 spapr->cmd_line_caps[cap->index] = true;
174 spapr->eff.caps[cap->index] = val;
175 }
176
cap_htm_apply(SpaprMachineState * spapr,uint8_t val,Error ** errp)177 static void cap_htm_apply(SpaprMachineState *spapr, uint8_t val, Error **errp)
178 {
179 ERRP_GUARD();
180 if (!val) {
181 /* TODO: We don't support disabling htm yet */
182 return;
183 }
184 if (tcg_enabled()) {
185 error_setg(errp, "No Transactional Memory support in TCG");
186 error_append_hint(errp, "Try appending -machine cap-htm=off\n");
187 } else if (kvm_enabled() && !kvmppc_has_cap_htm()) {
188 error_setg(errp,
189 "KVM implementation does not support Transactional Memory");
190 error_append_hint(errp, "Try appending -machine cap-htm=off\n");
191 }
192 }
193
cap_vsx_apply(SpaprMachineState * spapr,uint8_t val,Error ** errp)194 static void cap_vsx_apply(SpaprMachineState *spapr, uint8_t val, Error **errp)
195 {
196 ERRP_GUARD();
197 CPUPPCState *env = cpu_env(first_cpu);
198
199 if (!val) {
200 /* TODO: We don't support disabling vsx yet */
201 return;
202 }
203 /* Allowable CPUs in spapr_cpu_core.c should already have gotten
204 * rid of anything that doesn't do VMX */
205 g_assert(env->insns_flags & PPC_ALTIVEC);
206 if (!(env->insns_flags2 & PPC2_VSX)) {
207 error_setg(errp, "VSX support not available");
208 error_append_hint(errp, "Try appending -machine cap-vsx=off\n");
209 }
210 }
211
cap_dfp_apply(SpaprMachineState * spapr,uint8_t val,Error ** errp)212 static void cap_dfp_apply(SpaprMachineState *spapr, uint8_t val, Error **errp)
213 {
214 ERRP_GUARD();
215
216 if (!val) {
217 /* TODO: We don't support disabling dfp yet */
218 return;
219 }
220 if (!(cpu_env(first_cpu)->insns_flags2 & PPC2_DFP)) {
221 error_setg(errp, "DFP support not available");
222 error_append_hint(errp, "Try appending -machine cap-dfp=off\n");
223 }
224 }
225
226 SpaprCapPossible cap_cfpc_possible = {
227 .num = 3,
228 .vals = {"broken", "workaround", "fixed"},
229 .help = "broken - no protection, workaround - workaround available,"
230 " fixed - fixed in hardware",
231 };
232
cap_safe_cache_apply(SpaprMachineState * spapr,uint8_t val,Error ** errp)233 static void cap_safe_cache_apply(SpaprMachineState *spapr, uint8_t val,
234 Error **errp)
235 {
236 ERRP_GUARD();
237 uint8_t kvm_val = kvmppc_get_cap_safe_cache();
238
239 if (tcg_enabled() && val) {
240 /* TCG only supports broken, allow other values and print a warning */
241 warn_report("TCG doesn't support requested feature, cap-cfpc=%s",
242 cap_cfpc_possible.vals[val]);
243 } else if (kvm_enabled() && (val > kvm_val)) {
244 error_setg(errp,
245 "Requested safe cache capability level not supported by KVM");
246 error_append_hint(errp, "Try appending -machine cap-cfpc=%s\n",
247 cap_cfpc_possible.vals[kvm_val]);
248 }
249 }
250
251 SpaprCapPossible cap_sbbc_possible = {
252 .num = 3,
253 .vals = {"broken", "workaround", "fixed"},
254 .help = "broken - no protection, workaround - workaround available,"
255 " fixed - fixed in hardware",
256 };
257
cap_safe_bounds_check_apply(SpaprMachineState * spapr,uint8_t val,Error ** errp)258 static void cap_safe_bounds_check_apply(SpaprMachineState *spapr, uint8_t val,
259 Error **errp)
260 {
261 ERRP_GUARD();
262 uint8_t kvm_val = kvmppc_get_cap_safe_bounds_check();
263
264 if (tcg_enabled() && val) {
265 /* TCG only supports broken, allow other values and print a warning */
266 warn_report("TCG doesn't support requested feature, cap-sbbc=%s",
267 cap_sbbc_possible.vals[val]);
268 } else if (kvm_enabled() && (val > kvm_val)) {
269 error_setg(errp,
270 "Requested safe bounds check capability level not supported by KVM");
271 error_append_hint(errp, "Try appending -machine cap-sbbc=%s\n",
272 cap_sbbc_possible.vals[kvm_val]);
273 }
274 }
275
276 SpaprCapPossible cap_ibs_possible = {
277 .num = 5,
278 /* Note workaround only maintained for compatibility */
279 .vals = {"broken", "workaround", "fixed-ibs", "fixed-ccd", "fixed-na"},
280 .help = "broken - no protection, workaround - count cache flush"
281 ", fixed-ibs - indirect branch serialisation,"
282 " fixed-ccd - cache count disabled,"
283 " fixed-na - fixed in hardware (no longer applicable)",
284 };
285
cap_safe_indirect_branch_apply(SpaprMachineState * spapr,uint8_t val,Error ** errp)286 static void cap_safe_indirect_branch_apply(SpaprMachineState *spapr,
287 uint8_t val, Error **errp)
288 {
289 ERRP_GUARD();
290 uint8_t kvm_val = kvmppc_get_cap_safe_indirect_branch();
291
292 if (tcg_enabled() && val) {
293 /* TCG only supports broken, allow other values and print a warning */
294 warn_report("TCG doesn't support requested feature, cap-ibs=%s",
295 cap_ibs_possible.vals[val]);
296 } else if (kvm_enabled() && (val > kvm_val)) {
297 error_setg(errp,
298 "Requested safe indirect branch capability level not supported by KVM");
299 error_append_hint(errp, "Try appending -machine cap-ibs=%s\n",
300 cap_ibs_possible.vals[kvm_val]);
301 }
302 }
303
304 #define VALUE_DESC_TRISTATE " (broken, workaround, fixed)"
305
spapr_check_pagesize(SpaprMachineState * spapr,hwaddr pagesize,Error ** errp)306 bool spapr_check_pagesize(SpaprMachineState *spapr, hwaddr pagesize,
307 Error **errp)
308 {
309 hwaddr maxpagesize = (1ULL << spapr->eff.caps[SPAPR_CAP_HPT_MAXPAGESIZE]);
310
311 if (!kvmppc_hpt_needs_host_contiguous_pages()) {
312 return true;
313 }
314
315 if (maxpagesize > pagesize) {
316 error_setg(errp,
317 "Can't support %"HWADDR_PRIu" kiB guest pages with %"
318 HWADDR_PRIu" kiB host pages with this KVM implementation",
319 maxpagesize >> 10, pagesize >> 10);
320 return false;
321 }
322
323 return true;
324 }
325
cap_hpt_maxpagesize_apply(SpaprMachineState * spapr,uint8_t val,Error ** errp)326 static void cap_hpt_maxpagesize_apply(SpaprMachineState *spapr,
327 uint8_t val, Error **errp)
328 {
329 if (val < 12) {
330 error_setg(errp, "Require at least 4kiB hpt-max-page-size");
331 return;
332 } else if (val < 16) {
333 warn_report("Many guests require at least 64kiB hpt-max-page-size");
334 }
335
336 spapr_check_pagesize(spapr, qemu_minrampagesize(), errp);
337 }
338
cap_hpt_maxpagesize_migrate_needed(void * opaque)339 static bool cap_hpt_maxpagesize_migrate_needed(void *opaque)
340 {
341 return !SPAPR_MACHINE_GET_CLASS(opaque)->pre_4_1_migration;
342 }
343
spapr_pagesize_cb(void * opaque,uint32_t seg_pshift,uint32_t pshift)344 static bool spapr_pagesize_cb(void *opaque, uint32_t seg_pshift,
345 uint32_t pshift)
346 {
347 unsigned maxshift = *((unsigned *)opaque);
348
349 assert(pshift >= seg_pshift);
350
351 /* Don't allow the guest to use pages bigger than the configured
352 * maximum size */
353 if (pshift > maxshift) {
354 return false;
355 }
356
357 /* For whatever reason, KVM doesn't allow multiple pagesizes
358 * within a segment, *except* for the case of 16M pages in a 4k or
359 * 64k segment. Always exclude other cases, so that TCG and KVM
360 * guests see a consistent environment */
361 if ((pshift != seg_pshift) && (pshift != 24)) {
362 return false;
363 }
364
365 return true;
366 }
367
ppc_hash64_filter_pagesizes(PowerPCCPU * cpu,bool (* cb)(void *,uint32_t,uint32_t),void * opaque)368 static void ppc_hash64_filter_pagesizes(PowerPCCPU *cpu,
369 bool (*cb)(void *, uint32_t, uint32_t),
370 void *opaque)
371 {
372 PPCHash64Options *opts = cpu->hash64_opts;
373 int i;
374 int n = 0;
375 bool ci_largepage = false;
376
377 assert(opts);
378
379 n = 0;
380 for (i = 0; i < ARRAY_SIZE(opts->sps); i++) {
381 PPCHash64SegmentPageSizes *sps = &opts->sps[i];
382 int j;
383 int m = 0;
384
385 assert(n <= i);
386
387 if (!sps->page_shift) {
388 break;
389 }
390
391 for (j = 0; j < ARRAY_SIZE(sps->enc); j++) {
392 PPCHash64PageSize *ps = &sps->enc[j];
393
394 assert(m <= j);
395 if (!ps->page_shift) {
396 break;
397 }
398
399 if (cb(opaque, sps->page_shift, ps->page_shift)) {
400 if (ps->page_shift >= 16) {
401 ci_largepage = true;
402 }
403 sps->enc[m++] = *ps;
404 }
405 }
406
407 /* Clear rest of the row */
408 for (j = m; j < ARRAY_SIZE(sps->enc); j++) {
409 memset(&sps->enc[j], 0, sizeof(sps->enc[j]));
410 }
411
412 if (m) {
413 n++;
414 }
415 }
416
417 /* Clear the rest of the table */
418 for (i = n; i < ARRAY_SIZE(opts->sps); i++) {
419 memset(&opts->sps[i], 0, sizeof(opts->sps[i]));
420 }
421
422 if (!ci_largepage) {
423 opts->flags &= ~PPC_HASH64_CI_LARGEPAGE;
424 }
425 }
426
cap_hpt_maxpagesize_cpu_apply(SpaprMachineState * spapr,PowerPCCPU * cpu,uint8_t val,Error ** errp)427 static void cap_hpt_maxpagesize_cpu_apply(SpaprMachineState *spapr,
428 PowerPCCPU *cpu,
429 uint8_t val, Error **errp)
430 {
431 unsigned maxshift = val;
432
433 ppc_hash64_filter_pagesizes(cpu, spapr_pagesize_cb, &maxshift);
434 }
435
cap_nested_kvm_hv_apply(SpaprMachineState * spapr,uint8_t val,Error ** errp)436 static void cap_nested_kvm_hv_apply(SpaprMachineState *spapr,
437 uint8_t val, Error **errp)
438 {
439 ERRP_GUARD();
440 PowerPCCPU *cpu = POWERPC_CPU(first_cpu);
441 CPUPPCState *env = &cpu->env;
442
443 if (!val) {
444 /* capability disabled by default */
445 return;
446 }
447
448 if (!(env->insns_flags2 & PPC2_ISA300)) {
449 error_setg(errp, "Nested-HV only supported on POWER9 and later");
450 error_append_hint(errp, "Try appending -machine cap-nested-hv=off\n");
451 return;
452 }
453
454 if (kvm_enabled()) {
455 if (!ppc_check_compat(cpu, CPU_POWERPC_LOGICAL_3_00, 0,
456 spapr->max_compat_pvr)) {
457 error_setg(errp, "Nested-HV only supported on POWER9 and later");
458 error_append_hint(errp,
459 "Try appending -machine max-cpu-compat=power9\n");
460 return;
461 }
462
463 if (!kvmppc_has_cap_nested_kvm_hv()) {
464 error_setg(errp,
465 "KVM implementation does not support Nested-HV");
466 error_append_hint(errp,
467 "Try appending -machine cap-nested-hv=off\n");
468 } else if (kvmppc_set_cap_nested_kvm_hv(val) < 0) {
469 error_setg(errp, "Error enabling cap-nested-hv with KVM");
470 error_append_hint(errp,
471 "Try appending -machine cap-nested-hv=off\n");
472 }
473 } else if (tcg_enabled()) {
474 MachineState *ms = MACHINE(spapr);
475 unsigned int smp_threads = ms->smp.threads;
476
477 /*
478 * Nested-HV vCPU env state to L2, so SMT-shared SPR updates, for
479 * example, do not necessarily update the correct SPR value on sibling
480 * threads that are in a different guest/host context.
481 */
482 if (smp_threads > 1) {
483 error_setg(errp, "TCG does not support nested-HV with SMT");
484 error_append_hint(errp, "Try appending -machine cap-nested-hv=off "
485 "or use threads=1 with -smp\n");
486 }
487 if (spapr_nested_api(spapr) &&
488 spapr_nested_api(spapr) != NESTED_API_KVM_HV) {
489 error_setg(errp, "Nested-HV APIs are mutually exclusive");
490 error_append_hint(errp, "Please use either cap-nested-hv or "
491 "cap-nested-papr to proceed.\n");
492 return;
493 } else {
494 spapr->nested.api = NESTED_API_KVM_HV;
495 }
496 }
497 }
498
cap_nested_papr_apply(SpaprMachineState * spapr,uint8_t val,Error ** errp)499 static void cap_nested_papr_apply(SpaprMachineState *spapr,
500 uint8_t val, Error **errp)
501 {
502 ERRP_GUARD();
503 PowerPCCPU *cpu = POWERPC_CPU(first_cpu);
504 CPUPPCState *env = &cpu->env;
505
506 if (!val) {
507 /* capability disabled by default */
508 return;
509 }
510
511 if (tcg_enabled()) {
512 if (!(env->insns_flags2 & PPC2_ISA300)) {
513 error_setg(errp, "Nested-PAPR only supported on POWER9 and later");
514 error_append_hint(errp,
515 "Try appending -machine cap-nested-papr=off\n");
516 return;
517 }
518 if (spapr_nested_api(spapr) &&
519 spapr_nested_api(spapr) != NESTED_API_PAPR) {
520 error_setg(errp, "Nested-HV APIs are mutually exclusive");
521 error_append_hint(errp, "Please use either cap-nested-hv or "
522 "cap-nested-papr to proceed.\n");
523 return;
524 } else {
525 spapr->nested.api = NESTED_API_PAPR;
526 }
527 } else if (kvm_enabled()) {
528 error_setg(errp, "KVM implementation does not support Nested-PAPR");
529 error_append_hint(errp,
530 "Try appending -machine cap-nested-papr=off\n");
531 }
532 }
533
cap_large_decr_apply(SpaprMachineState * spapr,uint8_t val,Error ** errp)534 static void cap_large_decr_apply(SpaprMachineState *spapr,
535 uint8_t val, Error **errp)
536 {
537 ERRP_GUARD();
538 PowerPCCPU *cpu = POWERPC_CPU(first_cpu);
539 PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
540
541 if (!val) {
542 return; /* Disabled by default */
543 }
544
545 if (tcg_enabled()) {
546 if (!ppc_check_compat(cpu, CPU_POWERPC_LOGICAL_3_00, 0,
547 spapr->max_compat_pvr)) {
548 error_setg(errp, "Large decrementer only supported on POWER9");
549 error_append_hint(errp, "Try -cpu POWER9\n");
550 return;
551 }
552 } else if (kvm_enabled()) {
553 int kvm_nr_bits = kvmppc_get_cap_large_decr();
554
555 if (!kvm_nr_bits) {
556 error_setg(errp, "No large decrementer support");
557 error_append_hint(errp,
558 "Try appending -machine cap-large-decr=off\n");
559 } else if (pcc->lrg_decr_bits != kvm_nr_bits) {
560 error_setg(errp,
561 "KVM large decrementer size (%d) differs to model (%d)",
562 kvm_nr_bits, pcc->lrg_decr_bits);
563 error_append_hint(errp,
564 "Try appending -machine cap-large-decr=off\n");
565 }
566 }
567 }
568
cap_large_decr_cpu_apply(SpaprMachineState * spapr,PowerPCCPU * cpu,uint8_t val,Error ** errp)569 static void cap_large_decr_cpu_apply(SpaprMachineState *spapr,
570 PowerPCCPU *cpu,
571 uint8_t val, Error **errp)
572 {
573 ERRP_GUARD();
574 CPUPPCState *env = &cpu->env;
575 target_ulong lpcr = env->spr[SPR_LPCR];
576
577 if (kvm_enabled()) {
578 if (kvmppc_enable_cap_large_decr(cpu, val)) {
579 error_setg(errp, "No large decrementer support");
580 error_append_hint(errp,
581 "Try appending -machine cap-large-decr=off\n");
582 }
583 }
584
585 if (val) {
586 lpcr |= LPCR_LD;
587 } else {
588 lpcr &= ~LPCR_LD;
589 }
590 ppc_store_lpcr(cpu, lpcr);
591 }
592
cap_ccf_assist_apply(SpaprMachineState * spapr,uint8_t val,Error ** errp)593 static void cap_ccf_assist_apply(SpaprMachineState *spapr, uint8_t val,
594 Error **errp)
595 {
596 ERRP_GUARD();
597 uint8_t kvm_val = kvmppc_get_cap_count_cache_flush_assist();
598
599 if (tcg_enabled() && val) {
600 /* TCG doesn't implement anything here, but allow with a warning */
601 warn_report("TCG doesn't support requested feature, cap-ccf-assist=on");
602 } else if (kvm_enabled() && (val > kvm_val)) {
603 uint8_t kvm_ibs = kvmppc_get_cap_safe_indirect_branch();
604
605 if (kvm_ibs == SPAPR_CAP_FIXED_CCD) {
606 /*
607 * If we don't have CCF assist on the host, the assist
608 * instruction is a harmless no-op. It won't correctly
609 * implement the cache count flush *but* if we have
610 * count-cache-disabled in the host, that flush is
611 * unnecessary. So, specifically allow this case. This
612 * allows us to have better performance on POWER9 DD2.3,
613 * while still working on POWER9 DD2.2 and POWER8 host
614 * cpus.
615 */
616 return;
617 }
618 error_setg(errp,
619 "Requested count cache flush assist capability level not supported by KVM");
620 error_append_hint(errp, "Try appending -machine cap-ccf-assist=off\n");
621 }
622 }
623
cap_fwnmi_apply(SpaprMachineState * spapr,uint8_t val,Error ** errp)624 static void cap_fwnmi_apply(SpaprMachineState *spapr, uint8_t val,
625 Error **errp)
626 {
627 ERRP_GUARD();
628 if (!val) {
629 return; /* Disabled by default */
630 }
631
632 if (kvm_enabled()) {
633 if (!kvmppc_get_fwnmi()) {
634 error_setg(errp,
635 "Firmware Assisted Non-Maskable Interrupts(FWNMI) not supported by KVM.");
636 error_append_hint(errp, "Try appending -machine cap-fwnmi=off\n");
637 }
638 }
639 }
640
cap_rpt_invalidate_apply(SpaprMachineState * spapr,uint8_t val,Error ** errp)641 static void cap_rpt_invalidate_apply(SpaprMachineState *spapr,
642 uint8_t val, Error **errp)
643 {
644 ERRP_GUARD();
645
646 if (!val) {
647 /* capability disabled by default */
648 return;
649 }
650
651 if (tcg_enabled()) {
652 error_setg(errp, "No H_RPT_INVALIDATE support in TCG");
653 error_append_hint(errp,
654 "Try appending -machine cap-rpt-invalidate=off\n");
655 } else if (kvm_enabled()) {
656 if (!kvmppc_has_cap_mmu_radix()) {
657 error_setg(errp, "H_RPT_INVALIDATE only supported on Radix");
658 return;
659 }
660
661 if (!kvmppc_has_cap_rpt_invalidate()) {
662 error_setg(errp,
663 "KVM implementation does not support H_RPT_INVALIDATE");
664 error_append_hint(errp,
665 "Try appending -machine cap-rpt-invalidate=off\n");
666 } else {
667 kvmppc_enable_h_rpt_invalidate();
668 }
669 }
670 }
671
cap_ail_mode_3_apply(SpaprMachineState * spapr,uint8_t val,Error ** errp)672 static void cap_ail_mode_3_apply(SpaprMachineState *spapr,
673 uint8_t val, Error **errp)
674 {
675 ERRP_GUARD();
676 PowerPCCPU *cpu = POWERPC_CPU(first_cpu);
677 PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
678
679 if (!val) {
680 return;
681 }
682
683 if (tcg_enabled()) {
684 /* AIL-3 is only supported on POWER8 and above CPUs. */
685 if (!(pcc->insns_flags2 & PPC2_ISA207S)) {
686 error_setg(errp, "TCG only supports cap-ail-mode-3 on POWER8 and later CPUs");
687 error_append_hint(errp, "Try appending -machine cap-ail-mode-3=off\n");
688 return;
689 }
690 } else if (kvm_enabled()) {
691 if (!kvmppc_supports_ail_3()) {
692 error_setg(errp, "KVM implementation does not support cap-ail-mode-3");
693 error_append_hint(errp, "Try appending -machine cap-ail-mode-3=off\n");
694 return;
695 }
696 }
697 }
698
699 SpaprCapabilityInfo capability_table[SPAPR_CAP_NUM] = {
700 [SPAPR_CAP_HTM] = {
701 .name = "htm",
702 .description = "Allow Hardware Transactional Memory (HTM)",
703 .index = SPAPR_CAP_HTM,
704 .get = spapr_cap_get_bool,
705 .set = spapr_cap_set_bool,
706 .type = "bool",
707 .apply = cap_htm_apply,
708 },
709 [SPAPR_CAP_VSX] = {
710 .name = "vsx",
711 .description = "Allow Vector Scalar Extensions (VSX)",
712 .index = SPAPR_CAP_VSX,
713 .get = spapr_cap_get_bool,
714 .set = spapr_cap_set_bool,
715 .type = "bool",
716 .apply = cap_vsx_apply,
717 },
718 [SPAPR_CAP_DFP] = {
719 .name = "dfp",
720 .description = "Allow Decimal Floating Point (DFP)",
721 .index = SPAPR_CAP_DFP,
722 .get = spapr_cap_get_bool,
723 .set = spapr_cap_set_bool,
724 .type = "bool",
725 .apply = cap_dfp_apply,
726 },
727 [SPAPR_CAP_CFPC] = {
728 .name = "cfpc",
729 .description = "Cache Flush on Privilege Change" VALUE_DESC_TRISTATE,
730 .index = SPAPR_CAP_CFPC,
731 .get = spapr_cap_get_string,
732 .set = spapr_cap_set_string,
733 .type = "string",
734 .possible = &cap_cfpc_possible,
735 .apply = cap_safe_cache_apply,
736 },
737 [SPAPR_CAP_SBBC] = {
738 .name = "sbbc",
739 .description = "Speculation Barrier Bounds Checking" VALUE_DESC_TRISTATE,
740 .index = SPAPR_CAP_SBBC,
741 .get = spapr_cap_get_string,
742 .set = spapr_cap_set_string,
743 .type = "string",
744 .possible = &cap_sbbc_possible,
745 .apply = cap_safe_bounds_check_apply,
746 },
747 [SPAPR_CAP_IBS] = {
748 .name = "ibs",
749 .description =
750 "Indirect Branch Speculation (broken, workaround, fixed-ibs,"
751 "fixed-ccd, fixed-na)",
752 .index = SPAPR_CAP_IBS,
753 .get = spapr_cap_get_string,
754 .set = spapr_cap_set_string,
755 .type = "string",
756 .possible = &cap_ibs_possible,
757 .apply = cap_safe_indirect_branch_apply,
758 },
759 [SPAPR_CAP_HPT_MAXPAGESIZE] = {
760 .name = "hpt-max-page-size",
761 .description = "Maximum page size for Hash Page Table guests",
762 .index = SPAPR_CAP_HPT_MAXPAGESIZE,
763 .get = spapr_cap_get_pagesize,
764 .set = spapr_cap_set_pagesize,
765 .type = "int",
766 .apply = cap_hpt_maxpagesize_apply,
767 .cpu_apply = cap_hpt_maxpagesize_cpu_apply,
768 .migrate_needed = cap_hpt_maxpagesize_migrate_needed,
769 },
770 [SPAPR_CAP_NESTED_KVM_HV] = {
771 .name = "nested-hv",
772 .description = "Allow Nested KVM-HV",
773 .index = SPAPR_CAP_NESTED_KVM_HV,
774 .get = spapr_cap_get_bool,
775 .set = spapr_cap_set_bool,
776 .type = "bool",
777 .apply = cap_nested_kvm_hv_apply,
778 },
779 [SPAPR_CAP_NESTED_PAPR] = {
780 .name = "nested-papr",
781 .description = "Allow Nested HV (PAPR API)",
782 .index = SPAPR_CAP_NESTED_PAPR,
783 .get = spapr_cap_get_bool,
784 .set = spapr_cap_set_bool,
785 .type = "bool",
786 .apply = cap_nested_papr_apply,
787 },
788 [SPAPR_CAP_LARGE_DECREMENTER] = {
789 .name = "large-decr",
790 .description = "Allow Large Decrementer",
791 .index = SPAPR_CAP_LARGE_DECREMENTER,
792 .get = spapr_cap_get_bool,
793 .set = spapr_cap_set_bool,
794 .type = "bool",
795 .apply = cap_large_decr_apply,
796 .cpu_apply = cap_large_decr_cpu_apply,
797 },
798 [SPAPR_CAP_CCF_ASSIST] = {
799 .name = "ccf-assist",
800 .description = "Count Cache Flush Assist via HW Instruction",
801 .index = SPAPR_CAP_CCF_ASSIST,
802 .get = spapr_cap_get_bool,
803 .set = spapr_cap_set_bool,
804 .type = "bool",
805 .apply = cap_ccf_assist_apply,
806 },
807 [SPAPR_CAP_FWNMI] = {
808 .name = "fwnmi",
809 .description = "Implements PAPR FWNMI option",
810 .index = SPAPR_CAP_FWNMI,
811 .get = spapr_cap_get_bool,
812 .set = spapr_cap_set_bool,
813 .type = "bool",
814 .apply = cap_fwnmi_apply,
815 },
816 [SPAPR_CAP_RPT_INVALIDATE] = {
817 .name = "rpt-invalidate",
818 .description = "Allow H_RPT_INVALIDATE",
819 .index = SPAPR_CAP_RPT_INVALIDATE,
820 .get = spapr_cap_get_bool,
821 .set = spapr_cap_set_bool,
822 .type = "bool",
823 .apply = cap_rpt_invalidate_apply,
824 },
825 [SPAPR_CAP_AIL_MODE_3] = {
826 .name = "ail-mode-3",
827 .description = "Alternate Interrupt Location (AIL) mode 3 support",
828 .index = SPAPR_CAP_AIL_MODE_3,
829 .get = spapr_cap_get_bool,
830 .set = spapr_cap_set_bool,
831 .type = "bool",
832 .apply = cap_ail_mode_3_apply,
833 },
834 };
835
default_caps_with_cpu(SpaprMachineState * spapr,const char * cputype)836 static SpaprCapabilities default_caps_with_cpu(SpaprMachineState *spapr,
837 const char *cputype)
838 {
839 SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr);
840 SpaprCapabilities caps;
841
842 caps = smc->default_caps;
843
844 if (!ppc_type_check_compat(cputype, CPU_POWERPC_LOGICAL_3_00,
845 0, spapr->max_compat_pvr)) {
846 caps.caps[SPAPR_CAP_LARGE_DECREMENTER] = SPAPR_CAP_OFF;
847 }
848
849 if (!ppc_type_check_compat(cputype, CPU_POWERPC_LOGICAL_2_07,
850 0, spapr->max_compat_pvr)) {
851 caps.caps[SPAPR_CAP_HTM] = SPAPR_CAP_OFF;
852 caps.caps[SPAPR_CAP_CFPC] = SPAPR_CAP_BROKEN;
853 caps.caps[SPAPR_CAP_AIL_MODE_3] = SPAPR_CAP_OFF;
854 }
855
856 if (!ppc_type_check_compat(cputype, CPU_POWERPC_LOGICAL_2_06_PLUS,
857 0, spapr->max_compat_pvr)) {
858 caps.caps[SPAPR_CAP_SBBC] = SPAPR_CAP_BROKEN;
859 }
860
861 if (!ppc_type_check_compat(cputype, CPU_POWERPC_LOGICAL_2_06,
862 0, spapr->max_compat_pvr)) {
863 caps.caps[SPAPR_CAP_VSX] = SPAPR_CAP_OFF;
864 caps.caps[SPAPR_CAP_DFP] = SPAPR_CAP_OFF;
865 caps.caps[SPAPR_CAP_IBS] = SPAPR_CAP_BROKEN;
866 }
867
868 /* This is for pseries-2.12 and older */
869 if (smc->default_caps.caps[SPAPR_CAP_HPT_MAXPAGESIZE] == 0) {
870 uint8_t mps;
871
872 if (kvmppc_hpt_needs_host_contiguous_pages()) {
873 mps = ctz64(qemu_minrampagesize());
874 } else {
875 mps = 34; /* allow everything up to 16GiB, i.e. everything */
876 }
877
878 caps.caps[SPAPR_CAP_HPT_MAXPAGESIZE] = mps;
879 }
880
881 return caps;
882 }
883
spapr_caps_pre_load(void * opaque)884 int spapr_caps_pre_load(void *opaque)
885 {
886 SpaprMachineState *spapr = opaque;
887
888 /* Set to default so we can tell if this came in with the migration */
889 spapr->mig = spapr->def;
890 return 0;
891 }
892
spapr_caps_pre_save(void * opaque)893 int spapr_caps_pre_save(void *opaque)
894 {
895 SpaprMachineState *spapr = opaque;
896
897 spapr->mig = spapr->eff;
898 return 0;
899 }
900
901 /* This has to be called from the top-level spapr post_load, not the
902 * caps specific one. Otherwise it wouldn't be called when the source
903 * caps are all defaults, which could still conflict with overridden
904 * caps on the destination */
spapr_caps_post_migration(SpaprMachineState * spapr)905 int spapr_caps_post_migration(SpaprMachineState *spapr)
906 {
907 int i;
908 bool ok = true;
909 SpaprCapabilities dstcaps = spapr->eff;
910 SpaprCapabilities srccaps;
911
912 srccaps = default_caps_with_cpu(spapr, MACHINE(spapr)->cpu_type);
913 for (i = 0; i < SPAPR_CAP_NUM; i++) {
914 /* If not default value then assume came in with the migration */
915 if (spapr->mig.caps[i] != spapr->def.caps[i]) {
916 srccaps.caps[i] = spapr->mig.caps[i];
917 }
918 }
919
920 for (i = 0; i < SPAPR_CAP_NUM; i++) {
921 SpaprCapabilityInfo *info = &capability_table[i];
922
923 if (srccaps.caps[i] > dstcaps.caps[i]) {
924 error_report("cap-%s higher level (%d) in incoming stream than on destination (%d)",
925 info->name, srccaps.caps[i], dstcaps.caps[i]);
926 ok = false;
927 }
928
929 if (srccaps.caps[i] < dstcaps.caps[i]) {
930 warn_report("cap-%s lower level (%d) in incoming stream than on destination (%d)",
931 info->name, srccaps.caps[i], dstcaps.caps[i]);
932 }
933 }
934
935 return ok ? 0 : -EINVAL;
936 }
937
938 /* Used to generate the migration field and needed function for a spapr cap */
939 #define SPAPR_CAP_MIG_STATE(sname, cap) \
940 static bool spapr_cap_##sname##_needed(void *opaque) \
941 { \
942 SpaprMachineState *spapr = opaque; \
943 bool (*needed)(void *opaque) = \
944 capability_table[cap].migrate_needed; \
945 \
946 return needed ? needed(opaque) : true && \
947 spapr->cmd_line_caps[cap] && \
948 (spapr->eff.caps[cap] != \
949 spapr->def.caps[cap]); \
950 } \
951 \
952 const VMStateDescription vmstate_spapr_cap_##sname = { \
953 .name = "spapr/cap/" #sname, \
954 .version_id = 1, \
955 .minimum_version_id = 1, \
956 .needed = spapr_cap_##sname##_needed, \
957 .fields = (const VMStateField[]) { \
958 VMSTATE_UINT8(mig.caps[cap], \
959 SpaprMachineState), \
960 VMSTATE_END_OF_LIST() \
961 }, \
962 }
963
964 SPAPR_CAP_MIG_STATE(htm, SPAPR_CAP_HTM);
965 SPAPR_CAP_MIG_STATE(vsx, SPAPR_CAP_VSX);
966 SPAPR_CAP_MIG_STATE(dfp, SPAPR_CAP_DFP);
967 SPAPR_CAP_MIG_STATE(cfpc, SPAPR_CAP_CFPC);
968 SPAPR_CAP_MIG_STATE(sbbc, SPAPR_CAP_SBBC);
969 SPAPR_CAP_MIG_STATE(ibs, SPAPR_CAP_IBS);
970 SPAPR_CAP_MIG_STATE(hpt_maxpagesize, SPAPR_CAP_HPT_MAXPAGESIZE);
971 SPAPR_CAP_MIG_STATE(nested_kvm_hv, SPAPR_CAP_NESTED_KVM_HV);
972 SPAPR_CAP_MIG_STATE(nested_papr, SPAPR_CAP_NESTED_PAPR);
973 SPAPR_CAP_MIG_STATE(large_decr, SPAPR_CAP_LARGE_DECREMENTER);
974 SPAPR_CAP_MIG_STATE(ccf_assist, SPAPR_CAP_CCF_ASSIST);
975 SPAPR_CAP_MIG_STATE(fwnmi, SPAPR_CAP_FWNMI);
976 SPAPR_CAP_MIG_STATE(rpt_invalidate, SPAPR_CAP_RPT_INVALIDATE);
977 SPAPR_CAP_MIG_STATE(ail_mode_3, SPAPR_CAP_AIL_MODE_3);
978
spapr_caps_init(SpaprMachineState * spapr)979 void spapr_caps_init(SpaprMachineState *spapr)
980 {
981 SpaprCapabilities default_caps;
982 int i;
983
984 /* Compute the actual set of caps we should run with */
985 default_caps = default_caps_with_cpu(spapr, MACHINE(spapr)->cpu_type);
986
987 for (i = 0; i < SPAPR_CAP_NUM; i++) {
988 /* Store the defaults */
989 spapr->def.caps[i] = default_caps.caps[i];
990 /* If not set on the command line then apply the default value */
991 if (!spapr->cmd_line_caps[i]) {
992 spapr->eff.caps[i] = default_caps.caps[i];
993 }
994 }
995 }
996
spapr_caps_apply(SpaprMachineState * spapr)997 void spapr_caps_apply(SpaprMachineState *spapr)
998 {
999 int i;
1000
1001 for (i = 0; i < SPAPR_CAP_NUM; i++) {
1002 SpaprCapabilityInfo *info = &capability_table[i];
1003
1004 /*
1005 * If the apply function can't set the desired level and thinks it's
1006 * fatal, it should cause that.
1007 */
1008 info->apply(spapr, spapr->eff.caps[i], &error_fatal);
1009 }
1010 }
1011
spapr_caps_cpu_apply(SpaprMachineState * spapr,PowerPCCPU * cpu)1012 void spapr_caps_cpu_apply(SpaprMachineState *spapr, PowerPCCPU *cpu)
1013 {
1014 int i;
1015
1016 for (i = 0; i < SPAPR_CAP_NUM; i++) {
1017 SpaprCapabilityInfo *info = &capability_table[i];
1018
1019 /*
1020 * If the apply function can't set the desired level and thinks it's
1021 * fatal, it should cause that.
1022 */
1023 if (info->cpu_apply) {
1024 info->cpu_apply(spapr, cpu, spapr->eff.caps[i], &error_fatal);
1025 }
1026 }
1027 }
1028
spapr_caps_add_properties(SpaprMachineClass * smc)1029 void spapr_caps_add_properties(SpaprMachineClass *smc)
1030 {
1031 ObjectClass *klass = OBJECT_CLASS(smc);
1032 int i;
1033
1034 for (i = 0; i < ARRAY_SIZE(capability_table); i++) {
1035 SpaprCapabilityInfo *cap = &capability_table[i];
1036 g_autofree char *name = g_strdup_printf("cap-%s", cap->name);
1037 g_autofree char *desc = g_strdup_printf("%s", cap->description);
1038
1039 object_class_property_add(klass, name, cap->type,
1040 cap->get, cap->set,
1041 NULL, cap);
1042
1043 object_class_property_set_description(klass, name, desc);
1044 }
1045 }
1046