xref: /openbmc/qemu/hw/ppc/spapr_hcall.c (revision 52f91c37)
1 #include "sysemu/sysemu.h"
2 #include "cpu.h"
3 #include "helper_regs.h"
4 #include "hw/ppc/spapr.h"
5 #include "mmu-hash64.h"
6 
7 struct SPRSyncState {
8     CPUState *cs;
9     int spr;
10     target_ulong value;
11     target_ulong mask;
12 };
13 
14 static void do_spr_sync(void *arg)
15 {
16     struct SPRSyncState *s = arg;
17     PowerPCCPU *cpu = POWERPC_CPU(s->cs);
18     CPUPPCState *env = &cpu->env;
19 
20     cpu_synchronize_state(s->cs);
21     env->spr[s->spr] &= ~s->mask;
22     env->spr[s->spr] |= s->value;
23 }
24 
25 static void set_spr(CPUState *cs, int spr, target_ulong value,
26                     target_ulong mask)
27 {
28     struct SPRSyncState s = {
29         .cs = cs,
30         .spr = spr,
31         .value = value,
32         .mask = mask
33     };
34     run_on_cpu(cs, do_spr_sync, &s);
35 }
36 
37 static target_ulong compute_tlbie_rb(target_ulong v, target_ulong r,
38                                      target_ulong pte_index)
39 {
40     target_ulong rb, va_low;
41 
42     rb = (v & ~0x7fULL) << 16; /* AVA field */
43     va_low = pte_index >> 3;
44     if (v & HPTE64_V_SECONDARY) {
45         va_low = ~va_low;
46     }
47     /* xor vsid from AVA */
48     if (!(v & HPTE64_V_1TB_SEG)) {
49         va_low ^= v >> 12;
50     } else {
51         va_low ^= v >> 24;
52     }
53     va_low &= 0x7ff;
54     if (v & HPTE64_V_LARGE) {
55         rb |= 1;                         /* L field */
56 #if 0 /* Disable that P7 specific bit for now */
57         if (r & 0xff000) {
58             /* non-16MB large page, must be 64k */
59             /* (masks depend on page size) */
60             rb |= 0x1000;                /* page encoding in LP field */
61             rb |= (va_low & 0x7f) << 16; /* 7b of VA in AVA/LP field */
62             rb |= (va_low & 0xfe);       /* AVAL field */
63         }
64 #endif
65     } else {
66         /* 4kB page */
67         rb |= (va_low & 0x7ff) << 12;   /* remaining 11b of AVA */
68     }
69     rb |= (v >> 54) & 0x300;            /* B field */
70     return rb;
71 }
72 
73 static inline bool valid_pte_index(CPUPPCState *env, target_ulong pte_index)
74 {
75     /*
76      * hash value/pteg group index is normalized by htab_mask
77      */
78     if (((pte_index & ~7ULL) / HPTES_PER_GROUP) & ~env->htab_mask) {
79         return false;
80     }
81     return true;
82 }
83 
84 static target_ulong h_enter(PowerPCCPU *cpu, sPAPREnvironment *spapr,
85                             target_ulong opcode, target_ulong *args)
86 {
87     CPUPPCState *env = &cpu->env;
88     target_ulong flags = args[0];
89     target_ulong pte_index = args[1];
90     target_ulong pteh = args[2];
91     target_ulong ptel = args[3];
92     target_ulong page_shift = 12;
93     target_ulong raddr;
94     target_ulong index;
95     uint64_t token;
96 
97     /* only handle 4k and 16M pages for now */
98     if (pteh & HPTE64_V_LARGE) {
99 #if 0 /* We don't support 64k pages yet */
100         if ((ptel & 0xf000) == 0x1000) {
101             /* 64k page */
102         } else
103 #endif
104         if ((ptel & 0xff000) == 0) {
105             /* 16M page */
106             page_shift = 24;
107             /* lowest AVA bit must be 0 for 16M pages */
108             if (pteh & 0x80) {
109                 return H_PARAMETER;
110             }
111         } else {
112             return H_PARAMETER;
113         }
114     }
115 
116     raddr = (ptel & HPTE64_R_RPN) & ~((1ULL << page_shift) - 1);
117 
118     if (raddr < spapr->ram_limit) {
119         /* Regular RAM - should have WIMG=0010 */
120         if ((ptel & HPTE64_R_WIMG) != HPTE64_R_M) {
121             return H_PARAMETER;
122         }
123     } else {
124         /* Looks like an IO address */
125         /* FIXME: What WIMG combinations could be sensible for IO?
126          * For now we allow WIMG=010x, but are there others? */
127         /* FIXME: Should we check against registered IO addresses? */
128         if ((ptel & (HPTE64_R_W | HPTE64_R_I | HPTE64_R_M)) != HPTE64_R_I) {
129             return H_PARAMETER;
130         }
131     }
132 
133     pteh &= ~0x60ULL;
134 
135     if (!valid_pte_index(env, pte_index)) {
136         return H_PARAMETER;
137     }
138 
139     index = 0;
140     if (likely((flags & H_EXACT) == 0)) {
141         pte_index &= ~7ULL;
142         token = ppc_hash64_start_access(cpu, pte_index);
143         for (; index < 8; index++) {
144             if ((ppc_hash64_load_hpte0(env, token, index) & HPTE64_V_VALID) == 0) {
145                 break;
146             }
147         }
148         ppc_hash64_stop_access(token);
149         if (index == 8) {
150             return H_PTEG_FULL;
151         }
152     } else {
153         token = ppc_hash64_start_access(cpu, pte_index);
154         if (ppc_hash64_load_hpte0(env, token, 0) & HPTE64_V_VALID) {
155             ppc_hash64_stop_access(token);
156             return H_PTEG_FULL;
157         }
158         ppc_hash64_stop_access(token);
159     }
160 
161     ppc_hash64_store_hpte(env, pte_index + index,
162                           pteh | HPTE64_V_HPTE_DIRTY, ptel);
163 
164     args[0] = pte_index + index;
165     return H_SUCCESS;
166 }
167 
168 typedef enum {
169     REMOVE_SUCCESS = 0,
170     REMOVE_NOT_FOUND = 1,
171     REMOVE_PARM = 2,
172     REMOVE_HW = 3,
173 } RemoveResult;
174 
175 static RemoveResult remove_hpte(CPUPPCState *env, target_ulong ptex,
176                                 target_ulong avpn,
177                                 target_ulong flags,
178                                 target_ulong *vp, target_ulong *rp)
179 {
180     uint64_t token;
181     target_ulong v, r, rb;
182 
183     if (!valid_pte_index(env, ptex)) {
184         return REMOVE_PARM;
185     }
186 
187     token = ppc_hash64_start_access(ppc_env_get_cpu(env), ptex);
188     v = ppc_hash64_load_hpte0(env, token, 0);
189     r = ppc_hash64_load_hpte1(env, token, 0);
190     ppc_hash64_stop_access(token);
191 
192     if ((v & HPTE64_V_VALID) == 0 ||
193         ((flags & H_AVPN) && (v & ~0x7fULL) != avpn) ||
194         ((flags & H_ANDCOND) && (v & avpn) != 0)) {
195         return REMOVE_NOT_FOUND;
196     }
197     *vp = v;
198     *rp = r;
199     ppc_hash64_store_hpte(env, ptex, HPTE64_V_HPTE_DIRTY, 0);
200     rb = compute_tlbie_rb(v, r, ptex);
201     ppc_tlb_invalidate_one(env, rb);
202     return REMOVE_SUCCESS;
203 }
204 
205 static target_ulong h_remove(PowerPCCPU *cpu, sPAPREnvironment *spapr,
206                              target_ulong opcode, target_ulong *args)
207 {
208     CPUPPCState *env = &cpu->env;
209     target_ulong flags = args[0];
210     target_ulong pte_index = args[1];
211     target_ulong avpn = args[2];
212     RemoveResult ret;
213 
214     ret = remove_hpte(env, pte_index, avpn, flags,
215                       &args[0], &args[1]);
216 
217     switch (ret) {
218     case REMOVE_SUCCESS:
219         return H_SUCCESS;
220 
221     case REMOVE_NOT_FOUND:
222         return H_NOT_FOUND;
223 
224     case REMOVE_PARM:
225         return H_PARAMETER;
226 
227     case REMOVE_HW:
228         return H_HARDWARE;
229     }
230 
231     g_assert_not_reached();
232 }
233 
234 #define H_BULK_REMOVE_TYPE             0xc000000000000000ULL
235 #define   H_BULK_REMOVE_REQUEST        0x4000000000000000ULL
236 #define   H_BULK_REMOVE_RESPONSE       0x8000000000000000ULL
237 #define   H_BULK_REMOVE_END            0xc000000000000000ULL
238 #define H_BULK_REMOVE_CODE             0x3000000000000000ULL
239 #define   H_BULK_REMOVE_SUCCESS        0x0000000000000000ULL
240 #define   H_BULK_REMOVE_NOT_FOUND      0x1000000000000000ULL
241 #define   H_BULK_REMOVE_PARM           0x2000000000000000ULL
242 #define   H_BULK_REMOVE_HW             0x3000000000000000ULL
243 #define H_BULK_REMOVE_RC               0x0c00000000000000ULL
244 #define H_BULK_REMOVE_FLAGS            0x0300000000000000ULL
245 #define   H_BULK_REMOVE_ABSOLUTE       0x0000000000000000ULL
246 #define   H_BULK_REMOVE_ANDCOND        0x0100000000000000ULL
247 #define   H_BULK_REMOVE_AVPN           0x0200000000000000ULL
248 #define H_BULK_REMOVE_PTEX             0x00ffffffffffffffULL
249 
250 #define H_BULK_REMOVE_MAX_BATCH        4
251 
252 static target_ulong h_bulk_remove(PowerPCCPU *cpu, sPAPREnvironment *spapr,
253                                   target_ulong opcode, target_ulong *args)
254 {
255     CPUPPCState *env = &cpu->env;
256     int i;
257 
258     for (i = 0; i < H_BULK_REMOVE_MAX_BATCH; i++) {
259         target_ulong *tsh = &args[i*2];
260         target_ulong tsl = args[i*2 + 1];
261         target_ulong v, r, ret;
262 
263         if ((*tsh & H_BULK_REMOVE_TYPE) == H_BULK_REMOVE_END) {
264             break;
265         } else if ((*tsh & H_BULK_REMOVE_TYPE) != H_BULK_REMOVE_REQUEST) {
266             return H_PARAMETER;
267         }
268 
269         *tsh &= H_BULK_REMOVE_PTEX | H_BULK_REMOVE_FLAGS;
270         *tsh |= H_BULK_REMOVE_RESPONSE;
271 
272         if ((*tsh & H_BULK_REMOVE_ANDCOND) && (*tsh & H_BULK_REMOVE_AVPN)) {
273             *tsh |= H_BULK_REMOVE_PARM;
274             return H_PARAMETER;
275         }
276 
277         ret = remove_hpte(env, *tsh & H_BULK_REMOVE_PTEX, tsl,
278                           (*tsh & H_BULK_REMOVE_FLAGS) >> 26,
279                           &v, &r);
280 
281         *tsh |= ret << 60;
282 
283         switch (ret) {
284         case REMOVE_SUCCESS:
285             *tsh |= (r & (HPTE64_R_C | HPTE64_R_R)) << 43;
286             break;
287 
288         case REMOVE_PARM:
289             return H_PARAMETER;
290 
291         case REMOVE_HW:
292             return H_HARDWARE;
293         }
294     }
295 
296     return H_SUCCESS;
297 }
298 
299 static target_ulong h_protect(PowerPCCPU *cpu, sPAPREnvironment *spapr,
300                               target_ulong opcode, target_ulong *args)
301 {
302     CPUPPCState *env = &cpu->env;
303     target_ulong flags = args[0];
304     target_ulong pte_index = args[1];
305     target_ulong avpn = args[2];
306     uint64_t token;
307     target_ulong v, r, rb;
308 
309     if (!valid_pte_index(env, pte_index)) {
310         return H_PARAMETER;
311     }
312 
313     token = ppc_hash64_start_access(cpu, pte_index);
314     v = ppc_hash64_load_hpte0(env, token, 0);
315     r = ppc_hash64_load_hpte1(env, token, 0);
316     ppc_hash64_stop_access(token);
317 
318     if ((v & HPTE64_V_VALID) == 0 ||
319         ((flags & H_AVPN) && (v & ~0x7fULL) != avpn)) {
320         return H_NOT_FOUND;
321     }
322 
323     r &= ~(HPTE64_R_PP0 | HPTE64_R_PP | HPTE64_R_N |
324            HPTE64_R_KEY_HI | HPTE64_R_KEY_LO);
325     r |= (flags << 55) & HPTE64_R_PP0;
326     r |= (flags << 48) & HPTE64_R_KEY_HI;
327     r |= flags & (HPTE64_R_PP | HPTE64_R_N | HPTE64_R_KEY_LO);
328     rb = compute_tlbie_rb(v, r, pte_index);
329     ppc_hash64_store_hpte(env, pte_index,
330                           (v & ~HPTE64_V_VALID) | HPTE64_V_HPTE_DIRTY, 0);
331     ppc_tlb_invalidate_one(env, rb);
332     /* Don't need a memory barrier, due to qemu's global lock */
333     ppc_hash64_store_hpte(env, pte_index, v | HPTE64_V_HPTE_DIRTY, r);
334     return H_SUCCESS;
335 }
336 
337 static target_ulong h_read(PowerPCCPU *cpu, sPAPREnvironment *spapr,
338                            target_ulong opcode, target_ulong *args)
339 {
340     CPUPPCState *env = &cpu->env;
341     target_ulong flags = args[0];
342     target_ulong pte_index = args[1];
343     uint8_t *hpte;
344     int i, ridx, n_entries = 1;
345 
346     if (!valid_pte_index(env, pte_index)) {
347         return H_PARAMETER;
348     }
349 
350     if (flags & H_READ_4) {
351         /* Clear the two low order bits */
352         pte_index &= ~(3ULL);
353         n_entries = 4;
354     }
355 
356     hpte = env->external_htab + (pte_index * HASH_PTE_SIZE_64);
357 
358     for (i = 0, ridx = 0; i < n_entries; i++) {
359         args[ridx++] = ldq_p(hpte);
360         args[ridx++] = ldq_p(hpte + (HASH_PTE_SIZE_64/2));
361         hpte += HASH_PTE_SIZE_64;
362     }
363 
364     return H_SUCCESS;
365 }
366 
367 static target_ulong h_set_dabr(PowerPCCPU *cpu, sPAPREnvironment *spapr,
368                                target_ulong opcode, target_ulong *args)
369 {
370     /* FIXME: actually implement this */
371     return H_HARDWARE;
372 }
373 
374 #define FLAGS_REGISTER_VPA         0x0000200000000000ULL
375 #define FLAGS_REGISTER_DTL         0x0000400000000000ULL
376 #define FLAGS_REGISTER_SLBSHADOW   0x0000600000000000ULL
377 #define FLAGS_DEREGISTER_VPA       0x0000a00000000000ULL
378 #define FLAGS_DEREGISTER_DTL       0x0000c00000000000ULL
379 #define FLAGS_DEREGISTER_SLBSHADOW 0x0000e00000000000ULL
380 
381 #define VPA_MIN_SIZE           640
382 #define VPA_SIZE_OFFSET        0x4
383 #define VPA_SHARED_PROC_OFFSET 0x9
384 #define VPA_SHARED_PROC_VAL    0x2
385 
386 static target_ulong register_vpa(CPUPPCState *env, target_ulong vpa)
387 {
388     CPUState *cs = CPU(ppc_env_get_cpu(env));
389     uint16_t size;
390     uint8_t tmp;
391 
392     if (vpa == 0) {
393         hcall_dprintf("Can't cope with registering a VPA at logical 0\n");
394         return H_HARDWARE;
395     }
396 
397     if (vpa % env->dcache_line_size) {
398         return H_PARAMETER;
399     }
400     /* FIXME: bounds check the address */
401 
402     size = lduw_be_phys(cs->as, vpa + 0x4);
403 
404     if (size < VPA_MIN_SIZE) {
405         return H_PARAMETER;
406     }
407 
408     /* VPA is not allowed to cross a page boundary */
409     if ((vpa / 4096) != ((vpa + size - 1) / 4096)) {
410         return H_PARAMETER;
411     }
412 
413     env->vpa_addr = vpa;
414 
415     tmp = ldub_phys(cs->as, env->vpa_addr + VPA_SHARED_PROC_OFFSET);
416     tmp |= VPA_SHARED_PROC_VAL;
417     stb_phys(cs->as, env->vpa_addr + VPA_SHARED_PROC_OFFSET, tmp);
418 
419     return H_SUCCESS;
420 }
421 
422 static target_ulong deregister_vpa(CPUPPCState *env, target_ulong vpa)
423 {
424     if (env->slb_shadow_addr) {
425         return H_RESOURCE;
426     }
427 
428     if (env->dtl_addr) {
429         return H_RESOURCE;
430     }
431 
432     env->vpa_addr = 0;
433     return H_SUCCESS;
434 }
435 
436 static target_ulong register_slb_shadow(CPUPPCState *env, target_ulong addr)
437 {
438     CPUState *cs = CPU(ppc_env_get_cpu(env));
439     uint32_t size;
440 
441     if (addr == 0) {
442         hcall_dprintf("Can't cope with SLB shadow at logical 0\n");
443         return H_HARDWARE;
444     }
445 
446     size = ldl_be_phys(cs->as, addr + 0x4);
447     if (size < 0x8) {
448         return H_PARAMETER;
449     }
450 
451     if ((addr / 4096) != ((addr + size - 1) / 4096)) {
452         return H_PARAMETER;
453     }
454 
455     if (!env->vpa_addr) {
456         return H_RESOURCE;
457     }
458 
459     env->slb_shadow_addr = addr;
460     env->slb_shadow_size = size;
461 
462     return H_SUCCESS;
463 }
464 
465 static target_ulong deregister_slb_shadow(CPUPPCState *env, target_ulong addr)
466 {
467     env->slb_shadow_addr = 0;
468     env->slb_shadow_size = 0;
469     return H_SUCCESS;
470 }
471 
472 static target_ulong register_dtl(CPUPPCState *env, target_ulong addr)
473 {
474     CPUState *cs = CPU(ppc_env_get_cpu(env));
475     uint32_t size;
476 
477     if (addr == 0) {
478         hcall_dprintf("Can't cope with DTL at logical 0\n");
479         return H_HARDWARE;
480     }
481 
482     size = ldl_be_phys(cs->as, addr + 0x4);
483 
484     if (size < 48) {
485         return H_PARAMETER;
486     }
487 
488     if (!env->vpa_addr) {
489         return H_RESOURCE;
490     }
491 
492     env->dtl_addr = addr;
493     env->dtl_size = size;
494 
495     return H_SUCCESS;
496 }
497 
498 static target_ulong deregister_dtl(CPUPPCState *env, target_ulong addr)
499 {
500     env->dtl_addr = 0;
501     env->dtl_size = 0;
502 
503     return H_SUCCESS;
504 }
505 
506 static target_ulong h_register_vpa(PowerPCCPU *cpu, sPAPREnvironment *spapr,
507                                    target_ulong opcode, target_ulong *args)
508 {
509     target_ulong flags = args[0];
510     target_ulong procno = args[1];
511     target_ulong vpa = args[2];
512     target_ulong ret = H_PARAMETER;
513     CPUPPCState *tenv;
514     PowerPCCPU *tcpu;
515 
516     tcpu = ppc_get_vcpu_by_dt_id(procno);
517     if (!tcpu) {
518         return H_PARAMETER;
519     }
520     tenv = &tcpu->env;
521 
522     switch (flags) {
523     case FLAGS_REGISTER_VPA:
524         ret = register_vpa(tenv, vpa);
525         break;
526 
527     case FLAGS_DEREGISTER_VPA:
528         ret = deregister_vpa(tenv, vpa);
529         break;
530 
531     case FLAGS_REGISTER_SLBSHADOW:
532         ret = register_slb_shadow(tenv, vpa);
533         break;
534 
535     case FLAGS_DEREGISTER_SLBSHADOW:
536         ret = deregister_slb_shadow(tenv, vpa);
537         break;
538 
539     case FLAGS_REGISTER_DTL:
540         ret = register_dtl(tenv, vpa);
541         break;
542 
543     case FLAGS_DEREGISTER_DTL:
544         ret = deregister_dtl(tenv, vpa);
545         break;
546     }
547 
548     return ret;
549 }
550 
551 static target_ulong h_cede(PowerPCCPU *cpu, sPAPREnvironment *spapr,
552                            target_ulong opcode, target_ulong *args)
553 {
554     CPUPPCState *env = &cpu->env;
555     CPUState *cs = CPU(cpu);
556 
557     env->msr |= (1ULL << MSR_EE);
558     hreg_compute_hflags(env);
559     if (!cpu_has_work(cs)) {
560         cs->halted = 1;
561         cs->exception_index = EXCP_HLT;
562         cs->exit_request = 1;
563     }
564     return H_SUCCESS;
565 }
566 
567 static target_ulong h_rtas(PowerPCCPU *cpu, sPAPREnvironment *spapr,
568                            target_ulong opcode, target_ulong *args)
569 {
570     target_ulong rtas_r3 = args[0];
571     uint32_t token = rtas_ld(rtas_r3, 0);
572     uint32_t nargs = rtas_ld(rtas_r3, 1);
573     uint32_t nret = rtas_ld(rtas_r3, 2);
574 
575     return spapr_rtas_call(cpu, spapr, token, nargs, rtas_r3 + 12,
576                            nret, rtas_r3 + 12 + 4*nargs);
577 }
578 
579 static target_ulong h_logical_load(PowerPCCPU *cpu, sPAPREnvironment *spapr,
580                                    target_ulong opcode, target_ulong *args)
581 {
582     CPUState *cs = CPU(cpu);
583     target_ulong size = args[0];
584     target_ulong addr = args[1];
585 
586     switch (size) {
587     case 1:
588         args[0] = ldub_phys(cs->as, addr);
589         return H_SUCCESS;
590     case 2:
591         args[0] = lduw_phys(cs->as, addr);
592         return H_SUCCESS;
593     case 4:
594         args[0] = ldl_phys(cs->as, addr);
595         return H_SUCCESS;
596     case 8:
597         args[0] = ldq_phys(cs->as, addr);
598         return H_SUCCESS;
599     }
600     return H_PARAMETER;
601 }
602 
603 static target_ulong h_logical_store(PowerPCCPU *cpu, sPAPREnvironment *spapr,
604                                     target_ulong opcode, target_ulong *args)
605 {
606     CPUState *cs = CPU(cpu);
607 
608     target_ulong size = args[0];
609     target_ulong addr = args[1];
610     target_ulong val  = args[2];
611 
612     switch (size) {
613     case 1:
614         stb_phys(cs->as, addr, val);
615         return H_SUCCESS;
616     case 2:
617         stw_phys(cs->as, addr, val);
618         return H_SUCCESS;
619     case 4:
620         stl_phys(cs->as, addr, val);
621         return H_SUCCESS;
622     case 8:
623         stq_phys(cs->as, addr, val);
624         return H_SUCCESS;
625     }
626     return H_PARAMETER;
627 }
628 
629 static target_ulong h_logical_memop(PowerPCCPU *cpu, sPAPREnvironment *spapr,
630                                     target_ulong opcode, target_ulong *args)
631 {
632     CPUState *cs = CPU(cpu);
633 
634     target_ulong dst   = args[0]; /* Destination address */
635     target_ulong src   = args[1]; /* Source address */
636     target_ulong esize = args[2]; /* Element size (0=1,1=2,2=4,3=8) */
637     target_ulong count = args[3]; /* Element count */
638     target_ulong op    = args[4]; /* 0 = copy, 1 = invert */
639     uint64_t tmp;
640     unsigned int mask = (1 << esize) - 1;
641     int step = 1 << esize;
642 
643     if (count > 0x80000000) {
644         return H_PARAMETER;
645     }
646 
647     if ((dst & mask) || (src & mask) || (op > 1)) {
648         return H_PARAMETER;
649     }
650 
651     if (dst >= src && dst < (src + (count << esize))) {
652             dst = dst + ((count - 1) << esize);
653             src = src + ((count - 1) << esize);
654             step = -step;
655     }
656 
657     while (count--) {
658         switch (esize) {
659         case 0:
660             tmp = ldub_phys(cs->as, src);
661             break;
662         case 1:
663             tmp = lduw_phys(cs->as, src);
664             break;
665         case 2:
666             tmp = ldl_phys(cs->as, src);
667             break;
668         case 3:
669             tmp = ldq_phys(cs->as, src);
670             break;
671         default:
672             return H_PARAMETER;
673         }
674         if (op == 1) {
675             tmp = ~tmp;
676         }
677         switch (esize) {
678         case 0:
679             stb_phys(cs->as, dst, tmp);
680             break;
681         case 1:
682             stw_phys(cs->as, dst, tmp);
683             break;
684         case 2:
685             stl_phys(cs->as, dst, tmp);
686             break;
687         case 3:
688             stq_phys(cs->as, dst, tmp);
689             break;
690         }
691         dst = dst + step;
692         src = src + step;
693     }
694 
695     return H_SUCCESS;
696 }
697 
698 static target_ulong h_logical_icbi(PowerPCCPU *cpu, sPAPREnvironment *spapr,
699                                    target_ulong opcode, target_ulong *args)
700 {
701     /* Nothing to do on emulation, KVM will trap this in the kernel */
702     return H_SUCCESS;
703 }
704 
705 static target_ulong h_logical_dcbf(PowerPCCPU *cpu, sPAPREnvironment *spapr,
706                                    target_ulong opcode, target_ulong *args)
707 {
708     /* Nothing to do on emulation, KVM will trap this in the kernel */
709     return H_SUCCESS;
710 }
711 
712 static target_ulong h_set_mode(PowerPCCPU *cpu, sPAPREnvironment *spapr,
713                                target_ulong opcode, target_ulong *args)
714 {
715     CPUState *cs;
716     target_ulong mflags = args[0];
717     target_ulong resource = args[1];
718     target_ulong value1 = args[2];
719     target_ulong value2 = args[3];
720     target_ulong ret = H_P2;
721 
722     if (resource == H_SET_MODE_RESOURCE_LE) {
723         if (value1) {
724             ret = H_P3;
725             goto out;
726         }
727         if (value2) {
728             ret = H_P4;
729             goto out;
730         }
731         switch (mflags) {
732         case H_SET_MODE_ENDIAN_BIG:
733             CPU_FOREACH(cs) {
734                 set_spr(cs, SPR_LPCR, 0, LPCR_ILE);
735             }
736             ret = H_SUCCESS;
737             break;
738 
739         case H_SET_MODE_ENDIAN_LITTLE:
740             CPU_FOREACH(cs) {
741                 set_spr(cs, SPR_LPCR, LPCR_ILE, LPCR_ILE);
742             }
743             ret = H_SUCCESS;
744             break;
745 
746         default:
747             ret = H_UNSUPPORTED_FLAG;
748         }
749     }
750 
751 out:
752     return ret;
753 }
754 
755 static spapr_hcall_fn papr_hypercall_table[(MAX_HCALL_OPCODE / 4) + 1];
756 static spapr_hcall_fn kvmppc_hypercall_table[KVMPPC_HCALL_MAX - KVMPPC_HCALL_BASE + 1];
757 
758 void spapr_register_hypercall(target_ulong opcode, spapr_hcall_fn fn)
759 {
760     spapr_hcall_fn *slot;
761 
762     if (opcode <= MAX_HCALL_OPCODE) {
763         assert((opcode & 0x3) == 0);
764 
765         slot = &papr_hypercall_table[opcode / 4];
766     } else {
767         assert((opcode >= KVMPPC_HCALL_BASE) && (opcode <= KVMPPC_HCALL_MAX));
768 
769         slot = &kvmppc_hypercall_table[opcode - KVMPPC_HCALL_BASE];
770     }
771 
772     assert(!(*slot));
773     *slot = fn;
774 }
775 
776 target_ulong spapr_hypercall(PowerPCCPU *cpu, target_ulong opcode,
777                              target_ulong *args)
778 {
779     if ((opcode <= MAX_HCALL_OPCODE)
780         && ((opcode & 0x3) == 0)) {
781         spapr_hcall_fn fn = papr_hypercall_table[opcode / 4];
782 
783         if (fn) {
784             return fn(cpu, spapr, opcode, args);
785         }
786     } else if ((opcode >= KVMPPC_HCALL_BASE) &&
787                (opcode <= KVMPPC_HCALL_MAX)) {
788         spapr_hcall_fn fn = kvmppc_hypercall_table[opcode - KVMPPC_HCALL_BASE];
789 
790         if (fn) {
791             return fn(cpu, spapr, opcode, args);
792         }
793     }
794 
795     hcall_dprintf("Unimplemented hcall 0x" TARGET_FMT_lx "\n", opcode);
796     return H_FUNCTION;
797 }
798 
799 static void hypercall_register_types(void)
800 {
801     /* hcall-pft */
802     spapr_register_hypercall(H_ENTER, h_enter);
803     spapr_register_hypercall(H_REMOVE, h_remove);
804     spapr_register_hypercall(H_PROTECT, h_protect);
805     spapr_register_hypercall(H_READ, h_read);
806 
807     /* hcall-bulk */
808     spapr_register_hypercall(H_BULK_REMOVE, h_bulk_remove);
809 
810     /* hcall-dabr */
811     spapr_register_hypercall(H_SET_DABR, h_set_dabr);
812 
813     /* hcall-splpar */
814     spapr_register_hypercall(H_REGISTER_VPA, h_register_vpa);
815     spapr_register_hypercall(H_CEDE, h_cede);
816 
817     /* "debugger" hcalls (also used by SLOF). Note: We do -not- differenciate
818      * here between the "CI" and the "CACHE" variants, they will use whatever
819      * mapping attributes qemu is using. When using KVM, the kernel will
820      * enforce the attributes more strongly
821      */
822     spapr_register_hypercall(H_LOGICAL_CI_LOAD, h_logical_load);
823     spapr_register_hypercall(H_LOGICAL_CI_STORE, h_logical_store);
824     spapr_register_hypercall(H_LOGICAL_CACHE_LOAD, h_logical_load);
825     spapr_register_hypercall(H_LOGICAL_CACHE_STORE, h_logical_store);
826     spapr_register_hypercall(H_LOGICAL_ICBI, h_logical_icbi);
827     spapr_register_hypercall(H_LOGICAL_DCBF, h_logical_dcbf);
828     spapr_register_hypercall(KVMPPC_H_LOGICAL_MEMOP, h_logical_memop);
829 
830     /* qemu/KVM-PPC specific hcalls */
831     spapr_register_hypercall(KVMPPC_H_RTAS, h_rtas);
832 
833     spapr_register_hypercall(H_SET_MODE, h_set_mode);
834 }
835 
836 type_init(hypercall_register_types)
837