xref: /openbmc/qemu/hw/ppc/spapr_softmmu.c (revision cc37d98b)
1 #include "qemu/osdep.h"
2 #include "qemu/cutils.h"
3 #include "qemu/memalign.h"
4 #include "qemu/error-report.h"
5 #include "cpu.h"
6 #include "helper_regs.h"
7 #include "hw/ppc/spapr.h"
8 #include "mmu-hash64.h"
9 #include "mmu-book3s-v3.h"
10 
11 
valid_ptex(PowerPCCPU * cpu,target_ulong ptex)12 static inline bool valid_ptex(PowerPCCPU *cpu, target_ulong ptex)
13 {
14     /*
15      * hash value/pteg group index is normalized by HPT mask
16      */
17     if (((ptex & ~7ULL) / HPTES_PER_GROUP) & ~ppc_hash64_hpt_mask(cpu)) {
18         return false;
19     }
20     return true;
21 }
22 
h_enter(PowerPCCPU * cpu,SpaprMachineState * spapr,target_ulong opcode,target_ulong * args)23 static target_ulong h_enter(PowerPCCPU *cpu, SpaprMachineState *spapr,
24                             target_ulong opcode, target_ulong *args)
25 {
26     target_ulong flags = args[0];
27     target_ulong ptex = args[1];
28     target_ulong pteh = args[2];
29     target_ulong ptel = args[3];
30     unsigned apshift;
31     target_ulong raddr;
32     target_ulong slot;
33     const ppc_hash_pte64_t *hptes;
34 
35     apshift = ppc_hash64_hpte_page_shift_noslb(cpu, pteh, ptel);
36     if (!apshift) {
37         /* Bad page size encoding */
38         return H_PARAMETER;
39     }
40 
41     raddr = (ptel & HPTE64_R_RPN) & ~((1ULL << apshift) - 1);
42 
43     if (is_ram_address(spapr, raddr)) {
44         /* Regular RAM - should have WIMG=0010 */
45         if ((ptel & HPTE64_R_WIMG) != HPTE64_R_M) {
46             return H_PARAMETER;
47         }
48     } else {
49         target_ulong wimg_flags;
50         /* Looks like an IO address */
51         /* FIXME: What WIMG combinations could be sensible for IO?
52          * For now we allow WIMG=010x, but are there others? */
53         /* FIXME: Should we check against registered IO addresses? */
54         wimg_flags = (ptel & (HPTE64_R_W | HPTE64_R_I | HPTE64_R_M));
55 
56         if (wimg_flags != HPTE64_R_I &&
57             wimg_flags != (HPTE64_R_I | HPTE64_R_M)) {
58             return H_PARAMETER;
59         }
60     }
61 
62     pteh &= ~0x60ULL;
63 
64     if (!valid_ptex(cpu, ptex)) {
65         return H_PARAMETER;
66     }
67 
68     slot = ptex & 7ULL;
69     ptex = ptex & ~7ULL;
70 
71     if (likely((flags & H_EXACT) == 0)) {
72         hptes = ppc_hash64_map_hptes(cpu, ptex, HPTES_PER_GROUP);
73         for (slot = 0; slot < 8; slot++) {
74             if (!(ppc_hash64_hpte0(cpu, hptes, slot) & HPTE64_V_VALID)) {
75                 break;
76             }
77         }
78         ppc_hash64_unmap_hptes(cpu, hptes, ptex, HPTES_PER_GROUP);
79         if (slot == 8) {
80             return H_PTEG_FULL;
81         }
82     } else {
83         hptes = ppc_hash64_map_hptes(cpu, ptex + slot, 1);
84         if (ppc_hash64_hpte0(cpu, hptes, 0) & HPTE64_V_VALID) {
85             ppc_hash64_unmap_hptes(cpu, hptes, ptex + slot, 1);
86             return H_PTEG_FULL;
87         }
88         ppc_hash64_unmap_hptes(cpu, hptes, ptex, 1);
89     }
90 
91     spapr_store_hpte(cpu, ptex + slot, pteh | HPTE64_V_HPTE_DIRTY, ptel);
92 
93     args[0] = ptex + slot;
94     return H_SUCCESS;
95 }
96 
97 typedef enum {
98     REMOVE_SUCCESS = 0,
99     REMOVE_NOT_FOUND = 1,
100     REMOVE_PARM = 2,
101     REMOVE_HW = 3,
102 } RemoveResult;
103 
remove_hpte(PowerPCCPU * cpu,target_ulong ptex,target_ulong avpn,target_ulong flags,target_ulong * vp,target_ulong * rp)104 static RemoveResult remove_hpte(PowerPCCPU *cpu
105                                 , target_ulong ptex,
106                                 target_ulong avpn,
107                                 target_ulong flags,
108                                 target_ulong *vp, target_ulong *rp)
109 {
110     const ppc_hash_pte64_t *hptes;
111     target_ulong v, r;
112 
113     if (!valid_ptex(cpu, ptex)) {
114         return REMOVE_PARM;
115     }
116 
117     hptes = ppc_hash64_map_hptes(cpu, ptex, 1);
118     v = ppc_hash64_hpte0(cpu, hptes, 0);
119     r = ppc_hash64_hpte1(cpu, hptes, 0);
120     ppc_hash64_unmap_hptes(cpu, hptes, ptex, 1);
121 
122     if ((v & HPTE64_V_VALID) == 0 ||
123         ((flags & H_AVPN) && (v & ~0x7fULL) != avpn) ||
124         ((flags & H_ANDCOND) && (v & avpn) != 0)) {
125         return REMOVE_NOT_FOUND;
126     }
127     *vp = v;
128     *rp = r;
129     spapr_store_hpte(cpu, ptex, HPTE64_V_HPTE_DIRTY, 0);
130     ppc_hash64_tlb_flush_hpte(cpu, ptex, v, r);
131     return REMOVE_SUCCESS;
132 }
133 
h_remove(PowerPCCPU * cpu,SpaprMachineState * spapr,target_ulong opcode,target_ulong * args)134 static target_ulong h_remove(PowerPCCPU *cpu, SpaprMachineState *spapr,
135                              target_ulong opcode, target_ulong *args)
136 {
137     CPUPPCState *env = &cpu->env;
138     target_ulong flags = args[0];
139     target_ulong ptex = args[1];
140     target_ulong avpn = args[2];
141     RemoveResult ret;
142 
143     ret = remove_hpte(cpu, ptex, avpn, flags,
144                       &args[0], &args[1]);
145 
146     switch (ret) {
147     case REMOVE_SUCCESS:
148         check_tlb_flush(env, true);
149         return H_SUCCESS;
150 
151     case REMOVE_NOT_FOUND:
152         return H_NOT_FOUND;
153 
154     case REMOVE_PARM:
155         return H_PARAMETER;
156 
157     case REMOVE_HW:
158         return H_HARDWARE;
159     }
160 
161     g_assert_not_reached();
162 }
163 
164 #define H_BULK_REMOVE_TYPE             0xc000000000000000ULL
165 #define   H_BULK_REMOVE_REQUEST        0x4000000000000000ULL
166 #define   H_BULK_REMOVE_RESPONSE       0x8000000000000000ULL
167 #define   H_BULK_REMOVE_END            0xc000000000000000ULL
168 #define H_BULK_REMOVE_CODE             0x3000000000000000ULL
169 #define   H_BULK_REMOVE_SUCCESS        0x0000000000000000ULL
170 #define   H_BULK_REMOVE_NOT_FOUND      0x1000000000000000ULL
171 #define   H_BULK_REMOVE_PARM           0x2000000000000000ULL
172 #define   H_BULK_REMOVE_HW             0x3000000000000000ULL
173 #define H_BULK_REMOVE_RC               0x0c00000000000000ULL
174 #define H_BULK_REMOVE_FLAGS            0x0300000000000000ULL
175 #define   H_BULK_REMOVE_ABSOLUTE       0x0000000000000000ULL
176 #define   H_BULK_REMOVE_ANDCOND        0x0100000000000000ULL
177 #define   H_BULK_REMOVE_AVPN           0x0200000000000000ULL
178 #define H_BULK_REMOVE_PTEX             0x00ffffffffffffffULL
179 
180 #define H_BULK_REMOVE_MAX_BATCH        4
181 
h_bulk_remove(PowerPCCPU * cpu,SpaprMachineState * spapr,target_ulong opcode,target_ulong * args)182 static target_ulong h_bulk_remove(PowerPCCPU *cpu, SpaprMachineState *spapr,
183                                   target_ulong opcode, target_ulong *args)
184 {
185     CPUPPCState *env = &cpu->env;
186     int i;
187     target_ulong rc = H_SUCCESS;
188 
189     for (i = 0; i < H_BULK_REMOVE_MAX_BATCH; i++) {
190         target_ulong *tsh = &args[i*2];
191         target_ulong tsl = args[i*2 + 1];
192         target_ulong v, r, ret;
193 
194         if ((*tsh & H_BULK_REMOVE_TYPE) == H_BULK_REMOVE_END) {
195             break;
196         } else if ((*tsh & H_BULK_REMOVE_TYPE) != H_BULK_REMOVE_REQUEST) {
197             return H_PARAMETER;
198         }
199 
200         *tsh &= H_BULK_REMOVE_PTEX | H_BULK_REMOVE_FLAGS;
201         *tsh |= H_BULK_REMOVE_RESPONSE;
202 
203         if ((*tsh & H_BULK_REMOVE_ANDCOND) && (*tsh & H_BULK_REMOVE_AVPN)) {
204             *tsh |= H_BULK_REMOVE_PARM;
205             return H_PARAMETER;
206         }
207 
208         ret = remove_hpte(cpu, *tsh & H_BULK_REMOVE_PTEX, tsl,
209                           (*tsh & H_BULK_REMOVE_FLAGS) >> 26,
210                           &v, &r);
211 
212         *tsh |= ret << 60;
213 
214         switch (ret) {
215         case REMOVE_SUCCESS:
216             *tsh |= (r & (HPTE64_R_C | HPTE64_R_R)) << 43;
217             break;
218 
219         case REMOVE_PARM:
220             rc = H_PARAMETER;
221             goto exit;
222 
223         case REMOVE_HW:
224             rc = H_HARDWARE;
225             goto exit;
226         }
227     }
228  exit:
229     check_tlb_flush(env, true);
230 
231     return rc;
232 }
233 
h_protect(PowerPCCPU * cpu,SpaprMachineState * spapr,target_ulong opcode,target_ulong * args)234 static target_ulong h_protect(PowerPCCPU *cpu, SpaprMachineState *spapr,
235                               target_ulong opcode, target_ulong *args)
236 {
237     CPUPPCState *env = &cpu->env;
238     target_ulong flags = args[0];
239     target_ulong ptex = args[1];
240     target_ulong avpn = args[2];
241     const ppc_hash_pte64_t *hptes;
242     target_ulong v, r;
243 
244     if (!valid_ptex(cpu, ptex)) {
245         return H_PARAMETER;
246     }
247 
248     hptes = ppc_hash64_map_hptes(cpu, ptex, 1);
249     v = ppc_hash64_hpte0(cpu, hptes, 0);
250     r = ppc_hash64_hpte1(cpu, hptes, 0);
251     ppc_hash64_unmap_hptes(cpu, hptes, ptex, 1);
252 
253     if ((v & HPTE64_V_VALID) == 0 ||
254         ((flags & H_AVPN) && (v & ~0x7fULL) != avpn)) {
255         return H_NOT_FOUND;
256     }
257 
258     r &= ~(HPTE64_R_PP0 | HPTE64_R_PP | HPTE64_R_N |
259            HPTE64_R_KEY_HI | HPTE64_R_KEY_LO);
260     r |= (flags << 55) & HPTE64_R_PP0;
261     r |= (flags << 48) & HPTE64_R_KEY_HI;
262     r |= flags & (HPTE64_R_PP | HPTE64_R_N | HPTE64_R_KEY_LO);
263     spapr_store_hpte(cpu, ptex,
264                      (v & ~HPTE64_V_VALID) | HPTE64_V_HPTE_DIRTY, 0);
265     ppc_hash64_tlb_flush_hpte(cpu, ptex, v, r);
266     /* Flush the tlb */
267     check_tlb_flush(env, true);
268     /* Don't need a memory barrier, due to qemu's global lock */
269     spapr_store_hpte(cpu, ptex, v | HPTE64_V_HPTE_DIRTY, r);
270     return H_SUCCESS;
271 }
272 
h_read(PowerPCCPU * cpu,SpaprMachineState * spapr,target_ulong opcode,target_ulong * args)273 static target_ulong h_read(PowerPCCPU *cpu, SpaprMachineState *spapr,
274                            target_ulong opcode, target_ulong *args)
275 {
276     target_ulong flags = args[0];
277     target_ulong ptex = args[1];
278     int i, ridx, n_entries = 1;
279     const ppc_hash_pte64_t *hptes;
280 
281     if (!valid_ptex(cpu, ptex)) {
282         return H_PARAMETER;
283     }
284 
285     if (flags & H_READ_4) {
286         /* Clear the two low order bits */
287         ptex &= ~(3ULL);
288         n_entries = 4;
289     }
290 
291     hptes = ppc_hash64_map_hptes(cpu, ptex, n_entries);
292     for (i = 0, ridx = 0; i < n_entries; i++) {
293         args[ridx++] = ppc_hash64_hpte0(cpu, hptes, i);
294         args[ridx++] = ppc_hash64_hpte1(cpu, hptes, i);
295     }
296     ppc_hash64_unmap_hptes(cpu, hptes, ptex, n_entries);
297 
298     return H_SUCCESS;
299 }
300 
301 struct SpaprPendingHpt {
302     /* These fields are read-only after initialization */
303     int shift;
304     QemuThread thread;
305 
306     /* These fields are protected by the BQL */
307     bool complete;
308 
309     /* These fields are private to the preparation thread if
310      * !complete, otherwise protected by the BQL */
311     int ret;
312     void *hpt;
313 };
314 
free_pending_hpt(SpaprPendingHpt * pending)315 static void free_pending_hpt(SpaprPendingHpt *pending)
316 {
317     if (pending->hpt) {
318         qemu_vfree(pending->hpt);
319     }
320 
321     g_free(pending);
322 }
323 
hpt_prepare_thread(void * opaque)324 static void *hpt_prepare_thread(void *opaque)
325 {
326     SpaprPendingHpt *pending = opaque;
327     size_t size = 1ULL << pending->shift;
328 
329     pending->hpt = qemu_try_memalign(size, size);
330     if (pending->hpt) {
331         memset(pending->hpt, 0, size);
332         pending->ret = H_SUCCESS;
333     } else {
334         pending->ret = H_NO_MEM;
335     }
336 
337     qemu_mutex_lock_iothread();
338 
339     if (SPAPR_MACHINE(qdev_get_machine())->pending_hpt == pending) {
340         /* Ready to go */
341         pending->complete = true;
342     } else {
343         /* We've been cancelled, clean ourselves up */
344         free_pending_hpt(pending);
345     }
346 
347     qemu_mutex_unlock_iothread();
348     return NULL;
349 }
350 
351 /* Must be called with BQL held */
cancel_hpt_prepare(SpaprMachineState * spapr)352 static void cancel_hpt_prepare(SpaprMachineState *spapr)
353 {
354     SpaprPendingHpt *pending = spapr->pending_hpt;
355 
356     /* Let the thread know it's cancelled */
357     spapr->pending_hpt = NULL;
358 
359     if (!pending) {
360         /* Nothing to do */
361         return;
362     }
363 
364     if (!pending->complete) {
365         /* thread will clean itself up */
366         return;
367     }
368 
369     free_pending_hpt(pending);
370 }
371 
softmmu_resize_hpt_prepare(PowerPCCPU * cpu,SpaprMachineState * spapr,target_ulong shift)372 target_ulong softmmu_resize_hpt_prepare(PowerPCCPU *cpu,
373                                          SpaprMachineState *spapr,
374                                          target_ulong shift)
375 {
376     SpaprPendingHpt *pending = spapr->pending_hpt;
377 
378     if (pending) {
379         /* something already in progress */
380         if (pending->shift == shift) {
381             /* and it's suitable */
382             if (pending->complete) {
383                 return pending->ret;
384             } else {
385                 return H_LONG_BUSY_ORDER_100_MSEC;
386             }
387         }
388 
389         /* not suitable, cancel and replace */
390         cancel_hpt_prepare(spapr);
391     }
392 
393     if (!shift) {
394         /* nothing to do */
395         return H_SUCCESS;
396     }
397 
398     /* start new prepare */
399 
400     pending = g_new0(SpaprPendingHpt, 1);
401     pending->shift = shift;
402     pending->ret = H_HARDWARE;
403 
404     qemu_thread_create(&pending->thread, "sPAPR HPT prepare",
405                        hpt_prepare_thread, pending, QEMU_THREAD_DETACHED);
406 
407     spapr->pending_hpt = pending;
408 
409     /* In theory we could estimate the time more accurately based on
410      * the new size, but there's not much point */
411     return H_LONG_BUSY_ORDER_100_MSEC;
412 }
413 
new_hpte_load0(void * htab,uint64_t pteg,int slot)414 static uint64_t new_hpte_load0(void *htab, uint64_t pteg, int slot)
415 {
416     uint8_t *addr = htab;
417 
418     addr += pteg * HASH_PTEG_SIZE_64;
419     addr += slot * HASH_PTE_SIZE_64;
420     return  ldq_p(addr);
421 }
422 
new_hpte_store(void * htab,uint64_t pteg,int slot,uint64_t pte0,uint64_t pte1)423 static void new_hpte_store(void *htab, uint64_t pteg, int slot,
424                            uint64_t pte0, uint64_t pte1)
425 {
426     uint8_t *addr = htab;
427 
428     addr += pteg * HASH_PTEG_SIZE_64;
429     addr += slot * HASH_PTE_SIZE_64;
430 
431     stq_p(addr, pte0);
432     stq_p(addr + HPTE64_DW1, pte1);
433 }
434 
rehash_hpte(PowerPCCPU * cpu,const ppc_hash_pte64_t * hptes,void * old_hpt,uint64_t oldsize,void * new_hpt,uint64_t newsize,uint64_t pteg,int slot)435 static int rehash_hpte(PowerPCCPU *cpu,
436                        const ppc_hash_pte64_t *hptes,
437                        void *old_hpt, uint64_t oldsize,
438                        void *new_hpt, uint64_t newsize,
439                        uint64_t pteg, int slot)
440 {
441     uint64_t old_hash_mask = (oldsize >> 7) - 1;
442     uint64_t new_hash_mask = (newsize >> 7) - 1;
443     target_ulong pte0 = ppc_hash64_hpte0(cpu, hptes, slot);
444     target_ulong pte1;
445     uint64_t avpn;
446     unsigned base_pg_shift;
447     uint64_t hash, new_pteg, replace_pte0;
448 
449     if (!(pte0 & HPTE64_V_VALID) || !(pte0 & HPTE64_V_BOLTED)) {
450         return H_SUCCESS;
451     }
452 
453     pte1 = ppc_hash64_hpte1(cpu, hptes, slot);
454 
455     base_pg_shift = ppc_hash64_hpte_page_shift_noslb(cpu, pte0, pte1);
456     assert(base_pg_shift); /* H_ENTER shouldn't allow a bad encoding */
457     avpn = HPTE64_V_AVPN_VAL(pte0) & ~(((1ULL << base_pg_shift) - 1) >> 23);
458 
459     if (pte0 & HPTE64_V_SECONDARY) {
460         pteg = ~pteg;
461     }
462 
463     if ((pte0 & HPTE64_V_SSIZE) == HPTE64_V_SSIZE_256M) {
464         uint64_t offset, vsid;
465 
466         /* We only have 28 - 23 bits of offset in avpn */
467         offset = (avpn & 0x1f) << 23;
468         vsid = avpn >> 5;
469         /* We can find more bits from the pteg value */
470         if (base_pg_shift < 23) {
471             offset |= ((vsid ^ pteg) & old_hash_mask) << base_pg_shift;
472         }
473 
474         hash = vsid ^ (offset >> base_pg_shift);
475     } else if ((pte0 & HPTE64_V_SSIZE) == HPTE64_V_SSIZE_1T) {
476         uint64_t offset, vsid;
477 
478         /* We only have 40 - 23 bits of seg_off in avpn */
479         offset = (avpn & 0x1ffff) << 23;
480         vsid = avpn >> 17;
481         if (base_pg_shift < 23) {
482             offset |= ((vsid ^ (vsid << 25) ^ pteg) & old_hash_mask)
483                 << base_pg_shift;
484         }
485 
486         hash = vsid ^ (vsid << 25) ^ (offset >> base_pg_shift);
487     } else {
488         error_report("rehash_pte: Bad segment size in HPTE");
489         return H_HARDWARE;
490     }
491 
492     new_pteg = hash & new_hash_mask;
493     if (pte0 & HPTE64_V_SECONDARY) {
494         assert(~pteg == (hash & old_hash_mask));
495         new_pteg = ~new_pteg;
496     } else {
497         assert(pteg == (hash & old_hash_mask));
498     }
499     assert((oldsize != newsize) || (pteg == new_pteg));
500     replace_pte0 = new_hpte_load0(new_hpt, new_pteg, slot);
501     /*
502      * Strictly speaking, we don't need all these tests, since we only
503      * ever rehash bolted HPTEs.  We might in future handle non-bolted
504      * HPTEs, though so make the logic correct for those cases as
505      * well.
506      */
507     if (replace_pte0 & HPTE64_V_VALID) {
508         assert(newsize < oldsize);
509         if (replace_pte0 & HPTE64_V_BOLTED) {
510             if (pte0 & HPTE64_V_BOLTED) {
511                 /* Bolted collision, nothing we can do */
512                 return H_PTEG_FULL;
513             } else {
514                 /* Discard this hpte */
515                 return H_SUCCESS;
516             }
517         }
518     }
519 
520     new_hpte_store(new_hpt, new_pteg, slot, pte0, pte1);
521     return H_SUCCESS;
522 }
523 
rehash_hpt(PowerPCCPU * cpu,void * old_hpt,uint64_t oldsize,void * new_hpt,uint64_t newsize)524 static int rehash_hpt(PowerPCCPU *cpu,
525                       void *old_hpt, uint64_t oldsize,
526                       void *new_hpt, uint64_t newsize)
527 {
528     uint64_t n_ptegs = oldsize >> 7;
529     uint64_t pteg;
530     int slot;
531     int rc;
532 
533     for (pteg = 0; pteg < n_ptegs; pteg++) {
534         hwaddr ptex = pteg * HPTES_PER_GROUP;
535         const ppc_hash_pte64_t *hptes
536             = ppc_hash64_map_hptes(cpu, ptex, HPTES_PER_GROUP);
537 
538         if (!hptes) {
539             return H_HARDWARE;
540         }
541 
542         for (slot = 0; slot < HPTES_PER_GROUP; slot++) {
543             rc = rehash_hpte(cpu, hptes, old_hpt, oldsize, new_hpt, newsize,
544                              pteg, slot);
545             if (rc != H_SUCCESS) {
546                 ppc_hash64_unmap_hptes(cpu, hptes, ptex, HPTES_PER_GROUP);
547                 return rc;
548             }
549         }
550         ppc_hash64_unmap_hptes(cpu, hptes, ptex, HPTES_PER_GROUP);
551     }
552 
553     return H_SUCCESS;
554 }
555 
softmmu_resize_hpt_commit(PowerPCCPU * cpu,SpaprMachineState * spapr,target_ulong flags,target_ulong shift)556 target_ulong softmmu_resize_hpt_commit(PowerPCCPU *cpu,
557                                         SpaprMachineState *spapr,
558                                         target_ulong flags,
559                                         target_ulong shift)
560 {
561     SpaprPendingHpt *pending = spapr->pending_hpt;
562     int rc;
563     size_t newsize;
564 
565     if (flags != 0) {
566         return H_PARAMETER;
567     }
568 
569     if (!pending || (pending->shift != shift)) {
570         /* no matching prepare */
571         return H_CLOSED;
572     }
573 
574     if (!pending->complete) {
575         /* prepare has not completed */
576         return H_BUSY;
577     }
578 
579     /* Shouldn't have got past PREPARE without an HPT */
580     g_assert(spapr->htab_shift);
581 
582     newsize = 1ULL << pending->shift;
583     rc = rehash_hpt(cpu, spapr->htab, HTAB_SIZE(spapr),
584                     pending->hpt, newsize);
585     if (rc == H_SUCCESS) {
586         qemu_vfree(spapr->htab);
587         spapr->htab = pending->hpt;
588         spapr->htab_shift = pending->shift;
589 
590         push_sregs_to_kvm_pr(spapr);
591 
592         pending->hpt = NULL; /* so it's not free()d */
593     }
594 
595     /* Clean up */
596     spapr->pending_hpt = NULL;
597     free_pending_hpt(pending);
598 
599     return rc;
600 }
601 
hypercall_register_types(void)602 static void hypercall_register_types(void)
603 {
604     /* hcall-pft */
605     spapr_register_hypercall(H_ENTER, h_enter);
606     spapr_register_hypercall(H_REMOVE, h_remove);
607     spapr_register_hypercall(H_PROTECT, h_protect);
608     spapr_register_hypercall(H_READ, h_read);
609 
610     /* hcall-bulk */
611     spapr_register_hypercall(H_BULK_REMOVE, h_bulk_remove);
612 
613 }
614 
615 type_init(hypercall_register_types)
616