xref: /openbmc/qemu/hw/ppc/spapr_hcall.c (revision 9884abee)
1 #include "qemu/osdep.h"
2 #include "sysemu/sysemu.h"
3 #include "cpu.h"
4 #include "helper_regs.h"
5 #include "hw/ppc/spapr.h"
6 #include "mmu-hash64.h"
7 #include "cpu-models.h"
8 #include "trace.h"
9 #include "kvm_ppc.h"
10 
11 struct SPRSyncState {
12     CPUState *cs;
13     int spr;
14     target_ulong value;
15     target_ulong mask;
16 };
17 
18 static void do_spr_sync(void *arg)
19 {
20     struct SPRSyncState *s = arg;
21     PowerPCCPU *cpu = POWERPC_CPU(s->cs);
22     CPUPPCState *env = &cpu->env;
23 
24     cpu_synchronize_state(s->cs);
25     env->spr[s->spr] &= ~s->mask;
26     env->spr[s->spr] |= s->value;
27 }
28 
29 static void set_spr(CPUState *cs, int spr, target_ulong value,
30                     target_ulong mask)
31 {
32     struct SPRSyncState s = {
33         .cs = cs,
34         .spr = spr,
35         .value = value,
36         .mask = mask
37     };
38     run_on_cpu(cs, do_spr_sync, &s);
39 }
40 
41 static target_ulong compute_tlbie_rb(target_ulong v, target_ulong r,
42                                      target_ulong pte_index)
43 {
44     target_ulong rb, va_low;
45 
46     rb = (v & ~0x7fULL) << 16; /* AVA field */
47     va_low = pte_index >> 3;
48     if (v & HPTE64_V_SECONDARY) {
49         va_low = ~va_low;
50     }
51     /* xor vsid from AVA */
52     if (!(v & HPTE64_V_1TB_SEG)) {
53         va_low ^= v >> 12;
54     } else {
55         va_low ^= v >> 24;
56     }
57     va_low &= 0x7ff;
58     if (v & HPTE64_V_LARGE) {
59         rb |= 1;                         /* L field */
60 #if 0 /* Disable that P7 specific bit for now */
61         if (r & 0xff000) {
62             /* non-16MB large page, must be 64k */
63             /* (masks depend on page size) */
64             rb |= 0x1000;                /* page encoding in LP field */
65             rb |= (va_low & 0x7f) << 16; /* 7b of VA in AVA/LP field */
66             rb |= (va_low & 0xfe);       /* AVAL field */
67         }
68 #endif
69     } else {
70         /* 4kB page */
71         rb |= (va_low & 0x7ff) << 12;   /* remaining 11b of AVA */
72     }
73     rb |= (v >> 54) & 0x300;            /* B field */
74     return rb;
75 }
76 
77 static inline bool valid_pte_index(CPUPPCState *env, target_ulong pte_index)
78 {
79     /*
80      * hash value/pteg group index is normalized by htab_mask
81      */
82     if (((pte_index & ~7ULL) / HPTES_PER_GROUP) & ~env->htab_mask) {
83         return false;
84     }
85     return true;
86 }
87 
88 static target_ulong h_enter(PowerPCCPU *cpu, sPAPRMachineState *spapr,
89                             target_ulong opcode, target_ulong *args)
90 {
91     MachineState *machine = MACHINE(spapr);
92     CPUPPCState *env = &cpu->env;
93     target_ulong flags = args[0];
94     target_ulong pte_index = args[1];
95     target_ulong pteh = args[2];
96     target_ulong ptel = args[3];
97     target_ulong page_shift = 12;
98     target_ulong raddr;
99     target_ulong index;
100     uint64_t token;
101 
102     /* only handle 4k and 16M pages for now */
103     if (pteh & HPTE64_V_LARGE) {
104 #if 0 /* We don't support 64k pages yet */
105         if ((ptel & 0xf000) == 0x1000) {
106             /* 64k page */
107         } else
108 #endif
109         if ((ptel & 0xff000) == 0) {
110             /* 16M page */
111             page_shift = 24;
112             /* lowest AVA bit must be 0 for 16M pages */
113             if (pteh & 0x80) {
114                 return H_PARAMETER;
115             }
116         } else {
117             return H_PARAMETER;
118         }
119     }
120 
121     raddr = (ptel & HPTE64_R_RPN) & ~((1ULL << page_shift) - 1);
122 
123     if (raddr < machine->ram_size) {
124         /* Regular RAM - should have WIMG=0010 */
125         if ((ptel & HPTE64_R_WIMG) != HPTE64_R_M) {
126             return H_PARAMETER;
127         }
128     } else {
129         /* Looks like an IO address */
130         /* FIXME: What WIMG combinations could be sensible for IO?
131          * For now we allow WIMG=010x, but are there others? */
132         /* FIXME: Should we check against registered IO addresses? */
133         if ((ptel & (HPTE64_R_W | HPTE64_R_I | HPTE64_R_M)) != HPTE64_R_I) {
134             return H_PARAMETER;
135         }
136     }
137 
138     pteh &= ~0x60ULL;
139 
140     if (!valid_pte_index(env, pte_index)) {
141         return H_PARAMETER;
142     }
143 
144     index = 0;
145     if (likely((flags & H_EXACT) == 0)) {
146         pte_index &= ~7ULL;
147         token = ppc_hash64_start_access(cpu, pte_index);
148         for (; index < 8; index++) {
149             if ((ppc_hash64_load_hpte0(env, token, index) & HPTE64_V_VALID) == 0) {
150                 break;
151             }
152         }
153         ppc_hash64_stop_access(token);
154         if (index == 8) {
155             return H_PTEG_FULL;
156         }
157     } else {
158         token = ppc_hash64_start_access(cpu, pte_index);
159         if (ppc_hash64_load_hpte0(env, token, 0) & HPTE64_V_VALID) {
160             ppc_hash64_stop_access(token);
161             return H_PTEG_FULL;
162         }
163         ppc_hash64_stop_access(token);
164     }
165 
166     ppc_hash64_store_hpte(env, pte_index + index,
167                           pteh | HPTE64_V_HPTE_DIRTY, ptel);
168 
169     args[0] = pte_index + index;
170     return H_SUCCESS;
171 }
172 
173 typedef enum {
174     REMOVE_SUCCESS = 0,
175     REMOVE_NOT_FOUND = 1,
176     REMOVE_PARM = 2,
177     REMOVE_HW = 3,
178 } RemoveResult;
179 
180 static RemoveResult remove_hpte(CPUPPCState *env, target_ulong ptex,
181                                 target_ulong avpn,
182                                 target_ulong flags,
183                                 target_ulong *vp, target_ulong *rp)
184 {
185     uint64_t token;
186     target_ulong v, r, rb;
187 
188     if (!valid_pte_index(env, ptex)) {
189         return REMOVE_PARM;
190     }
191 
192     token = ppc_hash64_start_access(ppc_env_get_cpu(env), ptex);
193     v = ppc_hash64_load_hpte0(env, token, 0);
194     r = ppc_hash64_load_hpte1(env, token, 0);
195     ppc_hash64_stop_access(token);
196 
197     if ((v & HPTE64_V_VALID) == 0 ||
198         ((flags & H_AVPN) && (v & ~0x7fULL) != avpn) ||
199         ((flags & H_ANDCOND) && (v & avpn) != 0)) {
200         return REMOVE_NOT_FOUND;
201     }
202     *vp = v;
203     *rp = r;
204     ppc_hash64_store_hpte(env, ptex, HPTE64_V_HPTE_DIRTY, 0);
205     rb = compute_tlbie_rb(v, r, ptex);
206     ppc_tlb_invalidate_one(env, rb);
207     return REMOVE_SUCCESS;
208 }
209 
210 static target_ulong h_remove(PowerPCCPU *cpu, sPAPRMachineState *spapr,
211                              target_ulong opcode, target_ulong *args)
212 {
213     CPUPPCState *env = &cpu->env;
214     target_ulong flags = args[0];
215     target_ulong pte_index = args[1];
216     target_ulong avpn = args[2];
217     RemoveResult ret;
218 
219     ret = remove_hpte(env, pte_index, avpn, flags,
220                       &args[0], &args[1]);
221 
222     switch (ret) {
223     case REMOVE_SUCCESS:
224         return H_SUCCESS;
225 
226     case REMOVE_NOT_FOUND:
227         return H_NOT_FOUND;
228 
229     case REMOVE_PARM:
230         return H_PARAMETER;
231 
232     case REMOVE_HW:
233         return H_HARDWARE;
234     }
235 
236     g_assert_not_reached();
237 }
238 
239 #define H_BULK_REMOVE_TYPE             0xc000000000000000ULL
240 #define   H_BULK_REMOVE_REQUEST        0x4000000000000000ULL
241 #define   H_BULK_REMOVE_RESPONSE       0x8000000000000000ULL
242 #define   H_BULK_REMOVE_END            0xc000000000000000ULL
243 #define H_BULK_REMOVE_CODE             0x3000000000000000ULL
244 #define   H_BULK_REMOVE_SUCCESS        0x0000000000000000ULL
245 #define   H_BULK_REMOVE_NOT_FOUND      0x1000000000000000ULL
246 #define   H_BULK_REMOVE_PARM           0x2000000000000000ULL
247 #define   H_BULK_REMOVE_HW             0x3000000000000000ULL
248 #define H_BULK_REMOVE_RC               0x0c00000000000000ULL
249 #define H_BULK_REMOVE_FLAGS            0x0300000000000000ULL
250 #define   H_BULK_REMOVE_ABSOLUTE       0x0000000000000000ULL
251 #define   H_BULK_REMOVE_ANDCOND        0x0100000000000000ULL
252 #define   H_BULK_REMOVE_AVPN           0x0200000000000000ULL
253 #define H_BULK_REMOVE_PTEX             0x00ffffffffffffffULL
254 
255 #define H_BULK_REMOVE_MAX_BATCH        4
256 
257 static target_ulong h_bulk_remove(PowerPCCPU *cpu, sPAPRMachineState *spapr,
258                                   target_ulong opcode, target_ulong *args)
259 {
260     CPUPPCState *env = &cpu->env;
261     int i;
262 
263     for (i = 0; i < H_BULK_REMOVE_MAX_BATCH; i++) {
264         target_ulong *tsh = &args[i*2];
265         target_ulong tsl = args[i*2 + 1];
266         target_ulong v, r, ret;
267 
268         if ((*tsh & H_BULK_REMOVE_TYPE) == H_BULK_REMOVE_END) {
269             break;
270         } else if ((*tsh & H_BULK_REMOVE_TYPE) != H_BULK_REMOVE_REQUEST) {
271             return H_PARAMETER;
272         }
273 
274         *tsh &= H_BULK_REMOVE_PTEX | H_BULK_REMOVE_FLAGS;
275         *tsh |= H_BULK_REMOVE_RESPONSE;
276 
277         if ((*tsh & H_BULK_REMOVE_ANDCOND) && (*tsh & H_BULK_REMOVE_AVPN)) {
278             *tsh |= H_BULK_REMOVE_PARM;
279             return H_PARAMETER;
280         }
281 
282         ret = remove_hpte(env, *tsh & H_BULK_REMOVE_PTEX, tsl,
283                           (*tsh & H_BULK_REMOVE_FLAGS) >> 26,
284                           &v, &r);
285 
286         *tsh |= ret << 60;
287 
288         switch (ret) {
289         case REMOVE_SUCCESS:
290             *tsh |= (r & (HPTE64_R_C | HPTE64_R_R)) << 43;
291             break;
292 
293         case REMOVE_PARM:
294             return H_PARAMETER;
295 
296         case REMOVE_HW:
297             return H_HARDWARE;
298         }
299     }
300 
301     return H_SUCCESS;
302 }
303 
304 static target_ulong h_protect(PowerPCCPU *cpu, sPAPRMachineState *spapr,
305                               target_ulong opcode, target_ulong *args)
306 {
307     CPUPPCState *env = &cpu->env;
308     target_ulong flags = args[0];
309     target_ulong pte_index = args[1];
310     target_ulong avpn = args[2];
311     uint64_t token;
312     target_ulong v, r, rb;
313 
314     if (!valid_pte_index(env, pte_index)) {
315         return H_PARAMETER;
316     }
317 
318     token = ppc_hash64_start_access(cpu, pte_index);
319     v = ppc_hash64_load_hpte0(env, token, 0);
320     r = ppc_hash64_load_hpte1(env, token, 0);
321     ppc_hash64_stop_access(token);
322 
323     if ((v & HPTE64_V_VALID) == 0 ||
324         ((flags & H_AVPN) && (v & ~0x7fULL) != avpn)) {
325         return H_NOT_FOUND;
326     }
327 
328     r &= ~(HPTE64_R_PP0 | HPTE64_R_PP | HPTE64_R_N |
329            HPTE64_R_KEY_HI | HPTE64_R_KEY_LO);
330     r |= (flags << 55) & HPTE64_R_PP0;
331     r |= (flags << 48) & HPTE64_R_KEY_HI;
332     r |= flags & (HPTE64_R_PP | HPTE64_R_N | HPTE64_R_KEY_LO);
333     rb = compute_tlbie_rb(v, r, pte_index);
334     ppc_hash64_store_hpte(env, pte_index,
335                           (v & ~HPTE64_V_VALID) | HPTE64_V_HPTE_DIRTY, 0);
336     ppc_tlb_invalidate_one(env, rb);
337     /* Don't need a memory barrier, due to qemu's global lock */
338     ppc_hash64_store_hpte(env, pte_index, v | HPTE64_V_HPTE_DIRTY, r);
339     return H_SUCCESS;
340 }
341 
342 static target_ulong h_read(PowerPCCPU *cpu, sPAPRMachineState *spapr,
343                            target_ulong opcode, target_ulong *args)
344 {
345     CPUPPCState *env = &cpu->env;
346     target_ulong flags = args[0];
347     target_ulong pte_index = args[1];
348     uint8_t *hpte;
349     int i, ridx, n_entries = 1;
350 
351     if (!valid_pte_index(env, pte_index)) {
352         return H_PARAMETER;
353     }
354 
355     if (flags & H_READ_4) {
356         /* Clear the two low order bits */
357         pte_index &= ~(3ULL);
358         n_entries = 4;
359     }
360 
361     hpte = env->external_htab + (pte_index * HASH_PTE_SIZE_64);
362 
363     for (i = 0, ridx = 0; i < n_entries; i++) {
364         args[ridx++] = ldq_p(hpte);
365         args[ridx++] = ldq_p(hpte + (HASH_PTE_SIZE_64/2));
366         hpte += HASH_PTE_SIZE_64;
367     }
368 
369     return H_SUCCESS;
370 }
371 
372 static target_ulong h_set_dabr(PowerPCCPU *cpu, sPAPRMachineState *spapr,
373                                target_ulong opcode, target_ulong *args)
374 {
375     /* FIXME: actually implement this */
376     return H_HARDWARE;
377 }
378 
379 #define FLAGS_REGISTER_VPA         0x0000200000000000ULL
380 #define FLAGS_REGISTER_DTL         0x0000400000000000ULL
381 #define FLAGS_REGISTER_SLBSHADOW   0x0000600000000000ULL
382 #define FLAGS_DEREGISTER_VPA       0x0000a00000000000ULL
383 #define FLAGS_DEREGISTER_DTL       0x0000c00000000000ULL
384 #define FLAGS_DEREGISTER_SLBSHADOW 0x0000e00000000000ULL
385 
386 #define VPA_MIN_SIZE           640
387 #define VPA_SIZE_OFFSET        0x4
388 #define VPA_SHARED_PROC_OFFSET 0x9
389 #define VPA_SHARED_PROC_VAL    0x2
390 
391 static target_ulong register_vpa(CPUPPCState *env, target_ulong vpa)
392 {
393     CPUState *cs = CPU(ppc_env_get_cpu(env));
394     uint16_t size;
395     uint8_t tmp;
396 
397     if (vpa == 0) {
398         hcall_dprintf("Can't cope with registering a VPA at logical 0\n");
399         return H_HARDWARE;
400     }
401 
402     if (vpa % env->dcache_line_size) {
403         return H_PARAMETER;
404     }
405     /* FIXME: bounds check the address */
406 
407     size = lduw_be_phys(cs->as, vpa + 0x4);
408 
409     if (size < VPA_MIN_SIZE) {
410         return H_PARAMETER;
411     }
412 
413     /* VPA is not allowed to cross a page boundary */
414     if ((vpa / 4096) != ((vpa + size - 1) / 4096)) {
415         return H_PARAMETER;
416     }
417 
418     env->vpa_addr = vpa;
419 
420     tmp = ldub_phys(cs->as, env->vpa_addr + VPA_SHARED_PROC_OFFSET);
421     tmp |= VPA_SHARED_PROC_VAL;
422     stb_phys(cs->as, env->vpa_addr + VPA_SHARED_PROC_OFFSET, tmp);
423 
424     return H_SUCCESS;
425 }
426 
427 static target_ulong deregister_vpa(CPUPPCState *env, target_ulong vpa)
428 {
429     if (env->slb_shadow_addr) {
430         return H_RESOURCE;
431     }
432 
433     if (env->dtl_addr) {
434         return H_RESOURCE;
435     }
436 
437     env->vpa_addr = 0;
438     return H_SUCCESS;
439 }
440 
441 static target_ulong register_slb_shadow(CPUPPCState *env, target_ulong addr)
442 {
443     CPUState *cs = CPU(ppc_env_get_cpu(env));
444     uint32_t size;
445 
446     if (addr == 0) {
447         hcall_dprintf("Can't cope with SLB shadow at logical 0\n");
448         return H_HARDWARE;
449     }
450 
451     size = ldl_be_phys(cs->as, addr + 0x4);
452     if (size < 0x8) {
453         return H_PARAMETER;
454     }
455 
456     if ((addr / 4096) != ((addr + size - 1) / 4096)) {
457         return H_PARAMETER;
458     }
459 
460     if (!env->vpa_addr) {
461         return H_RESOURCE;
462     }
463 
464     env->slb_shadow_addr = addr;
465     env->slb_shadow_size = size;
466 
467     return H_SUCCESS;
468 }
469 
470 static target_ulong deregister_slb_shadow(CPUPPCState *env, target_ulong addr)
471 {
472     env->slb_shadow_addr = 0;
473     env->slb_shadow_size = 0;
474     return H_SUCCESS;
475 }
476 
477 static target_ulong register_dtl(CPUPPCState *env, target_ulong addr)
478 {
479     CPUState *cs = CPU(ppc_env_get_cpu(env));
480     uint32_t size;
481 
482     if (addr == 0) {
483         hcall_dprintf("Can't cope with DTL at logical 0\n");
484         return H_HARDWARE;
485     }
486 
487     size = ldl_be_phys(cs->as, addr + 0x4);
488 
489     if (size < 48) {
490         return H_PARAMETER;
491     }
492 
493     if (!env->vpa_addr) {
494         return H_RESOURCE;
495     }
496 
497     env->dtl_addr = addr;
498     env->dtl_size = size;
499 
500     return H_SUCCESS;
501 }
502 
503 static target_ulong deregister_dtl(CPUPPCState *env, target_ulong addr)
504 {
505     env->dtl_addr = 0;
506     env->dtl_size = 0;
507 
508     return H_SUCCESS;
509 }
510 
511 static target_ulong h_register_vpa(PowerPCCPU *cpu, sPAPRMachineState *spapr,
512                                    target_ulong opcode, target_ulong *args)
513 {
514     target_ulong flags = args[0];
515     target_ulong procno = args[1];
516     target_ulong vpa = args[2];
517     target_ulong ret = H_PARAMETER;
518     CPUPPCState *tenv;
519     PowerPCCPU *tcpu;
520 
521     tcpu = ppc_get_vcpu_by_dt_id(procno);
522     if (!tcpu) {
523         return H_PARAMETER;
524     }
525     tenv = &tcpu->env;
526 
527     switch (flags) {
528     case FLAGS_REGISTER_VPA:
529         ret = register_vpa(tenv, vpa);
530         break;
531 
532     case FLAGS_DEREGISTER_VPA:
533         ret = deregister_vpa(tenv, vpa);
534         break;
535 
536     case FLAGS_REGISTER_SLBSHADOW:
537         ret = register_slb_shadow(tenv, vpa);
538         break;
539 
540     case FLAGS_DEREGISTER_SLBSHADOW:
541         ret = deregister_slb_shadow(tenv, vpa);
542         break;
543 
544     case FLAGS_REGISTER_DTL:
545         ret = register_dtl(tenv, vpa);
546         break;
547 
548     case FLAGS_DEREGISTER_DTL:
549         ret = deregister_dtl(tenv, vpa);
550         break;
551     }
552 
553     return ret;
554 }
555 
556 static target_ulong h_cede(PowerPCCPU *cpu, sPAPRMachineState *spapr,
557                            target_ulong opcode, target_ulong *args)
558 {
559     CPUPPCState *env = &cpu->env;
560     CPUState *cs = CPU(cpu);
561 
562     env->msr |= (1ULL << MSR_EE);
563     hreg_compute_hflags(env);
564     if (!cpu_has_work(cs)) {
565         cs->halted = 1;
566         cs->exception_index = EXCP_HLT;
567         cs->exit_request = 1;
568     }
569     return H_SUCCESS;
570 }
571 
572 static target_ulong h_rtas(PowerPCCPU *cpu, sPAPRMachineState *spapr,
573                            target_ulong opcode, target_ulong *args)
574 {
575     target_ulong rtas_r3 = args[0];
576     uint32_t token = rtas_ld(rtas_r3, 0);
577     uint32_t nargs = rtas_ld(rtas_r3, 1);
578     uint32_t nret = rtas_ld(rtas_r3, 2);
579 
580     return spapr_rtas_call(cpu, spapr, token, nargs, rtas_r3 + 12,
581                            nret, rtas_r3 + 12 + 4*nargs);
582 }
583 
584 static target_ulong h_logical_load(PowerPCCPU *cpu, sPAPRMachineState *spapr,
585                                    target_ulong opcode, target_ulong *args)
586 {
587     CPUState *cs = CPU(cpu);
588     target_ulong size = args[0];
589     target_ulong addr = args[1];
590 
591     switch (size) {
592     case 1:
593         args[0] = ldub_phys(cs->as, addr);
594         return H_SUCCESS;
595     case 2:
596         args[0] = lduw_phys(cs->as, addr);
597         return H_SUCCESS;
598     case 4:
599         args[0] = ldl_phys(cs->as, addr);
600         return H_SUCCESS;
601     case 8:
602         args[0] = ldq_phys(cs->as, addr);
603         return H_SUCCESS;
604     }
605     return H_PARAMETER;
606 }
607 
608 static target_ulong h_logical_store(PowerPCCPU *cpu, sPAPRMachineState *spapr,
609                                     target_ulong opcode, target_ulong *args)
610 {
611     CPUState *cs = CPU(cpu);
612 
613     target_ulong size = args[0];
614     target_ulong addr = args[1];
615     target_ulong val  = args[2];
616 
617     switch (size) {
618     case 1:
619         stb_phys(cs->as, addr, val);
620         return H_SUCCESS;
621     case 2:
622         stw_phys(cs->as, addr, val);
623         return H_SUCCESS;
624     case 4:
625         stl_phys(cs->as, addr, val);
626         return H_SUCCESS;
627     case 8:
628         stq_phys(cs->as, addr, val);
629         return H_SUCCESS;
630     }
631     return H_PARAMETER;
632 }
633 
634 static target_ulong h_logical_memop(PowerPCCPU *cpu, sPAPRMachineState *spapr,
635                                     target_ulong opcode, target_ulong *args)
636 {
637     CPUState *cs = CPU(cpu);
638 
639     target_ulong dst   = args[0]; /* Destination address */
640     target_ulong src   = args[1]; /* Source address */
641     target_ulong esize = args[2]; /* Element size (0=1,1=2,2=4,3=8) */
642     target_ulong count = args[3]; /* Element count */
643     target_ulong op    = args[4]; /* 0 = copy, 1 = invert */
644     uint64_t tmp;
645     unsigned int mask = (1 << esize) - 1;
646     int step = 1 << esize;
647 
648     if (count > 0x80000000) {
649         return H_PARAMETER;
650     }
651 
652     if ((dst & mask) || (src & mask) || (op > 1)) {
653         return H_PARAMETER;
654     }
655 
656     if (dst >= src && dst < (src + (count << esize))) {
657             dst = dst + ((count - 1) << esize);
658             src = src + ((count - 1) << esize);
659             step = -step;
660     }
661 
662     while (count--) {
663         switch (esize) {
664         case 0:
665             tmp = ldub_phys(cs->as, src);
666             break;
667         case 1:
668             tmp = lduw_phys(cs->as, src);
669             break;
670         case 2:
671             tmp = ldl_phys(cs->as, src);
672             break;
673         case 3:
674             tmp = ldq_phys(cs->as, src);
675             break;
676         default:
677             return H_PARAMETER;
678         }
679         if (op == 1) {
680             tmp = ~tmp;
681         }
682         switch (esize) {
683         case 0:
684             stb_phys(cs->as, dst, tmp);
685             break;
686         case 1:
687             stw_phys(cs->as, dst, tmp);
688             break;
689         case 2:
690             stl_phys(cs->as, dst, tmp);
691             break;
692         case 3:
693             stq_phys(cs->as, dst, tmp);
694             break;
695         }
696         dst = dst + step;
697         src = src + step;
698     }
699 
700     return H_SUCCESS;
701 }
702 
703 static target_ulong h_logical_icbi(PowerPCCPU *cpu, sPAPRMachineState *spapr,
704                                    target_ulong opcode, target_ulong *args)
705 {
706     /* Nothing to do on emulation, KVM will trap this in the kernel */
707     return H_SUCCESS;
708 }
709 
710 static target_ulong h_logical_dcbf(PowerPCCPU *cpu, sPAPRMachineState *spapr,
711                                    target_ulong opcode, target_ulong *args)
712 {
713     /* Nothing to do on emulation, KVM will trap this in the kernel */
714     return H_SUCCESS;
715 }
716 
717 static target_ulong h_set_mode_resource_le(PowerPCCPU *cpu,
718                                            target_ulong mflags,
719                                            target_ulong value1,
720                                            target_ulong value2)
721 {
722     CPUState *cs;
723 
724     if (value1) {
725         return H_P3;
726     }
727     if (value2) {
728         return H_P4;
729     }
730 
731     switch (mflags) {
732     case H_SET_MODE_ENDIAN_BIG:
733         CPU_FOREACH(cs) {
734             set_spr(cs, SPR_LPCR, 0, LPCR_ILE);
735         }
736         spapr_pci_switch_vga(true);
737         return H_SUCCESS;
738 
739     case H_SET_MODE_ENDIAN_LITTLE:
740         CPU_FOREACH(cs) {
741             set_spr(cs, SPR_LPCR, LPCR_ILE, LPCR_ILE);
742         }
743         spapr_pci_switch_vga(false);
744         return H_SUCCESS;
745     }
746 
747     return H_UNSUPPORTED_FLAG;
748 }
749 
750 static target_ulong h_set_mode_resource_addr_trans_mode(PowerPCCPU *cpu,
751                                                         target_ulong mflags,
752                                                         target_ulong value1,
753                                                         target_ulong value2)
754 {
755     CPUState *cs;
756     PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
757     target_ulong prefix;
758 
759     if (!(pcc->insns_flags2 & PPC2_ISA207S)) {
760         return H_P2;
761     }
762     if (value1) {
763         return H_P3;
764     }
765     if (value2) {
766         return H_P4;
767     }
768 
769     switch (mflags) {
770     case H_SET_MODE_ADDR_TRANS_NONE:
771         prefix = 0;
772         break;
773     case H_SET_MODE_ADDR_TRANS_0001_8000:
774         prefix = 0x18000;
775         break;
776     case H_SET_MODE_ADDR_TRANS_C000_0000_0000_4000:
777         prefix = 0xC000000000004000ULL;
778         break;
779     default:
780         return H_UNSUPPORTED_FLAG;
781     }
782 
783     CPU_FOREACH(cs) {
784         CPUPPCState *env = &POWERPC_CPU(cpu)->env;
785 
786         set_spr(cs, SPR_LPCR, mflags << LPCR_AIL_SHIFT, LPCR_AIL);
787         env->excp_prefix = prefix;
788     }
789 
790     return H_SUCCESS;
791 }
792 
793 static target_ulong h_set_mode(PowerPCCPU *cpu, sPAPRMachineState *spapr,
794                                target_ulong opcode, target_ulong *args)
795 {
796     target_ulong resource = args[1];
797     target_ulong ret = H_P2;
798 
799     switch (resource) {
800     case H_SET_MODE_RESOURCE_LE:
801         ret = h_set_mode_resource_le(cpu, args[0], args[2], args[3]);
802         break;
803     case H_SET_MODE_RESOURCE_ADDR_TRANS_MODE:
804         ret = h_set_mode_resource_addr_trans_mode(cpu, args[0],
805                                                   args[2], args[3]);
806         break;
807     }
808 
809     return ret;
810 }
811 
812 /*
813  * Return the offset to the requested option vector @vector in the
814  * option vector table @table.
815  */
816 static target_ulong cas_get_option_vector(int vector, target_ulong table)
817 {
818     int i;
819     char nr_vectors, nr_entries;
820 
821     if (!table) {
822         return 0;
823     }
824 
825     nr_vectors = (ldl_phys(&address_space_memory, table) >> 24) + 1;
826     if (!vector || vector > nr_vectors) {
827         return 0;
828     }
829     table++; /* skip nr option vectors */
830 
831     for (i = 0; i < vector - 1; i++) {
832         nr_entries = ldl_phys(&address_space_memory, table) >> 24;
833         table += nr_entries + 2;
834     }
835     return table;
836 }
837 
838 typedef struct {
839     PowerPCCPU *cpu;
840     uint32_t cpu_version;
841     int ret;
842 } SetCompatState;
843 
844 static void do_set_compat(void *arg)
845 {
846     SetCompatState *s = arg;
847 
848     cpu_synchronize_state(CPU(s->cpu));
849     s->ret = ppc_set_compat(s->cpu, s->cpu_version);
850 }
851 
852 #define get_compat_level(cpuver) ( \
853     ((cpuver) == CPU_POWERPC_LOGICAL_2_05) ? 2050 : \
854     ((cpuver) == CPU_POWERPC_LOGICAL_2_06) ? 2060 : \
855     ((cpuver) == CPU_POWERPC_LOGICAL_2_06_PLUS) ? 2061 : \
856     ((cpuver) == CPU_POWERPC_LOGICAL_2_07) ? 2070 : 0)
857 
858 #define OV5_DRCONF_MEMORY 0x20
859 
860 static target_ulong h_client_architecture_support(PowerPCCPU *cpu_,
861                                                   sPAPRMachineState *spapr,
862                                                   target_ulong opcode,
863                                                   target_ulong *args)
864 {
865     target_ulong list = args[0], ov_table;
866     PowerPCCPUClass *pcc_ = POWERPC_CPU_GET_CLASS(cpu_);
867     CPUState *cs;
868     bool cpu_match = false, cpu_update = true, memory_update = false;
869     unsigned old_cpu_version = cpu_->cpu_version;
870     unsigned compat_lvl = 0, cpu_version = 0;
871     unsigned max_lvl = get_compat_level(cpu_->max_compat);
872     int counter;
873     char ov5_byte2;
874 
875     /* Parse PVR list */
876     for (counter = 0; counter < 512; ++counter) {
877         uint32_t pvr, pvr_mask;
878 
879         pvr_mask = rtas_ld(list, 0);
880         list += 4;
881         pvr = rtas_ld(list, 0);
882         list += 4;
883 
884         trace_spapr_cas_pvr_try(pvr);
885         if (!max_lvl &&
886             ((cpu_->env.spr[SPR_PVR] & pvr_mask) == (pvr & pvr_mask))) {
887             cpu_match = true;
888             cpu_version = 0;
889         } else if (pvr == cpu_->cpu_version) {
890             cpu_match = true;
891             cpu_version = cpu_->cpu_version;
892         } else if (!cpu_match) {
893             /* If it is a logical PVR, try to determine the highest level */
894             unsigned lvl = get_compat_level(pvr);
895             if (lvl) {
896                 bool is205 = (pcc_->pcr_mask & PCR_COMPAT_2_05) &&
897                      (lvl == get_compat_level(CPU_POWERPC_LOGICAL_2_05));
898                 bool is206 = (pcc_->pcr_mask & PCR_COMPAT_2_06) &&
899                     ((lvl == get_compat_level(CPU_POWERPC_LOGICAL_2_06)) ||
900                     (lvl == get_compat_level(CPU_POWERPC_LOGICAL_2_06_PLUS)));
901 
902                 if (is205 || is206) {
903                     if (!max_lvl) {
904                         /* User did not set the level, choose the highest */
905                         if (compat_lvl <= lvl) {
906                             compat_lvl = lvl;
907                             cpu_version = pvr;
908                         }
909                     } else if (max_lvl >= lvl) {
910                         /* User chose the level, don't set higher than this */
911                         compat_lvl = lvl;
912                         cpu_version = pvr;
913                     }
914                 }
915             }
916         }
917         /* Terminator record */
918         if (~pvr_mask & pvr) {
919             break;
920         }
921     }
922 
923     /* Parsing finished */
924     trace_spapr_cas_pvr(cpu_->cpu_version, cpu_match,
925                         cpu_version, pcc_->pcr_mask);
926 
927     /* Update CPUs */
928     if (old_cpu_version != cpu_version) {
929         CPU_FOREACH(cs) {
930             SetCompatState s = {
931                 .cpu = POWERPC_CPU(cs),
932                 .cpu_version = cpu_version,
933                 .ret = 0
934             };
935 
936             run_on_cpu(cs, do_set_compat, &s);
937 
938             if (s.ret < 0) {
939                 fprintf(stderr, "Unable to set compatibility mode\n");
940                 return H_HARDWARE;
941             }
942         }
943     }
944 
945     if (!cpu_version) {
946         cpu_update = false;
947     }
948 
949     /* For the future use: here @ov_table points to the first option vector */
950     ov_table = list;
951 
952     list = cas_get_option_vector(5, ov_table);
953     if (!list) {
954         return H_SUCCESS;
955     }
956 
957     /* @list now points to OV 5 */
958     list += 2;
959     ov5_byte2 = rtas_ld(list, 0) >> 24;
960     if (ov5_byte2 & OV5_DRCONF_MEMORY) {
961         memory_update = true;
962     }
963 
964     if (spapr_h_cas_compose_response(spapr, args[1], args[2],
965                                      cpu_update, memory_update)) {
966         qemu_system_reset_request();
967     }
968 
969     return H_SUCCESS;
970 }
971 
972 static spapr_hcall_fn papr_hypercall_table[(MAX_HCALL_OPCODE / 4) + 1];
973 static spapr_hcall_fn kvmppc_hypercall_table[KVMPPC_HCALL_MAX - KVMPPC_HCALL_BASE + 1];
974 
975 void spapr_register_hypercall(target_ulong opcode, spapr_hcall_fn fn)
976 {
977     spapr_hcall_fn *slot;
978 
979     if (opcode <= MAX_HCALL_OPCODE) {
980         assert((opcode & 0x3) == 0);
981 
982         slot = &papr_hypercall_table[opcode / 4];
983     } else {
984         assert((opcode >= KVMPPC_HCALL_BASE) && (opcode <= KVMPPC_HCALL_MAX));
985 
986         slot = &kvmppc_hypercall_table[opcode - KVMPPC_HCALL_BASE];
987     }
988 
989     assert(!(*slot));
990     *slot = fn;
991 }
992 
993 target_ulong spapr_hypercall(PowerPCCPU *cpu, target_ulong opcode,
994                              target_ulong *args)
995 {
996     sPAPRMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
997 
998     if ((opcode <= MAX_HCALL_OPCODE)
999         && ((opcode & 0x3) == 0)) {
1000         spapr_hcall_fn fn = papr_hypercall_table[opcode / 4];
1001 
1002         if (fn) {
1003             return fn(cpu, spapr, opcode, args);
1004         }
1005     } else if ((opcode >= KVMPPC_HCALL_BASE) &&
1006                (opcode <= KVMPPC_HCALL_MAX)) {
1007         spapr_hcall_fn fn = kvmppc_hypercall_table[opcode - KVMPPC_HCALL_BASE];
1008 
1009         if (fn) {
1010             return fn(cpu, spapr, opcode, args);
1011         }
1012     }
1013 
1014     qemu_log_mask(LOG_UNIMP, "Unimplemented SPAPR hcall 0x" TARGET_FMT_lx "\n",
1015                   opcode);
1016     return H_FUNCTION;
1017 }
1018 
1019 static void hypercall_register_types(void)
1020 {
1021     /* hcall-pft */
1022     spapr_register_hypercall(H_ENTER, h_enter);
1023     spapr_register_hypercall(H_REMOVE, h_remove);
1024     spapr_register_hypercall(H_PROTECT, h_protect);
1025     spapr_register_hypercall(H_READ, h_read);
1026 
1027     /* hcall-bulk */
1028     spapr_register_hypercall(H_BULK_REMOVE, h_bulk_remove);
1029 
1030     /* hcall-dabr */
1031     spapr_register_hypercall(H_SET_DABR, h_set_dabr);
1032 
1033     /* hcall-splpar */
1034     spapr_register_hypercall(H_REGISTER_VPA, h_register_vpa);
1035     spapr_register_hypercall(H_CEDE, h_cede);
1036 
1037     /* "debugger" hcalls (also used by SLOF). Note: We do -not- differenciate
1038      * here between the "CI" and the "CACHE" variants, they will use whatever
1039      * mapping attributes qemu is using. When using KVM, the kernel will
1040      * enforce the attributes more strongly
1041      */
1042     spapr_register_hypercall(H_LOGICAL_CI_LOAD, h_logical_load);
1043     spapr_register_hypercall(H_LOGICAL_CI_STORE, h_logical_store);
1044     spapr_register_hypercall(H_LOGICAL_CACHE_LOAD, h_logical_load);
1045     spapr_register_hypercall(H_LOGICAL_CACHE_STORE, h_logical_store);
1046     spapr_register_hypercall(H_LOGICAL_ICBI, h_logical_icbi);
1047     spapr_register_hypercall(H_LOGICAL_DCBF, h_logical_dcbf);
1048     spapr_register_hypercall(KVMPPC_H_LOGICAL_MEMOP, h_logical_memop);
1049 
1050     /* qemu/KVM-PPC specific hcalls */
1051     spapr_register_hypercall(KVMPPC_H_RTAS, h_rtas);
1052 
1053     spapr_register_hypercall(H_SET_MODE, h_set_mode);
1054 
1055     /* ibm,client-architecture-support support */
1056     spapr_register_hypercall(KVMPPC_H_CAS, h_client_architecture_support);
1057 }
1058 
1059 type_init(hypercall_register_types)
1060