xref: /openbmc/qemu/hw/ppc/spapr_pci.c (revision 4b4629d9)
1 /*
2  * QEMU sPAPR PCI host originated from Uninorth PCI host
3  *
4  * Copyright (c) 2011 Alexey Kardashevskiy, IBM Corporation.
5  * Copyright (C) 2011 David Gibson, IBM Corporation.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a copy
8  * of this software and associated documentation files (the "Software"), to deal
9  * in the Software without restriction, including without limitation the rights
10  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11  * copies of the Software, and to permit persons to whom the Software is
12  * furnished to do so, subject to the following conditions:
13  *
14  * The above copyright notice and this permission notice shall be included in
15  * all copies or substantial portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23  * THE SOFTWARE.
24  */
25 #include "qemu/osdep.h"
26 #include "qapi/error.h"
27 #include "qemu-common.h"
28 #include "cpu.h"
29 #include "hw/hw.h"
30 #include "hw/sysbus.h"
31 #include "hw/pci/pci.h"
32 #include "hw/pci/msi.h"
33 #include "hw/pci/msix.h"
34 #include "hw/pci/pci_host.h"
35 #include "hw/ppc/spapr.h"
36 #include "hw/pci-host/spapr.h"
37 #include "exec/address-spaces.h"
38 #include <libfdt.h>
39 #include "trace.h"
40 #include "qemu/error-report.h"
41 #include "qapi/qmp/qerror.h"
42 
43 #include "hw/pci/pci_bridge.h"
44 #include "hw/pci/pci_bus.h"
45 #include "hw/ppc/spapr_drc.h"
46 #include "sysemu/device_tree.h"
47 
48 #include "hw/vfio/vfio.h"
49 
50 /* Copied from the kernel arch/powerpc/platforms/pseries/msi.c */
51 #define RTAS_QUERY_FN           0
52 #define RTAS_CHANGE_FN          1
53 #define RTAS_RESET_FN           2
54 #define RTAS_CHANGE_MSI_FN      3
55 #define RTAS_CHANGE_MSIX_FN     4
56 
57 /* Interrupt types to return on RTAS_CHANGE_* */
58 #define RTAS_TYPE_MSI           1
59 #define RTAS_TYPE_MSIX          2
60 
61 #define FDT_NAME_MAX          128
62 
63 #define _FDT(exp) \
64     do { \
65         int ret = (exp);                                           \
66         if (ret < 0) {                                             \
67             return ret;                                            \
68         }                                                          \
69     } while (0)
70 
71 sPAPRPHBState *spapr_pci_find_phb(sPAPRMachineState *spapr, uint64_t buid)
72 {
73     sPAPRPHBState *sphb;
74 
75     QLIST_FOREACH(sphb, &spapr->phbs, list) {
76         if (sphb->buid != buid) {
77             continue;
78         }
79         return sphb;
80     }
81 
82     return NULL;
83 }
84 
85 PCIDevice *spapr_pci_find_dev(sPAPRMachineState *spapr, uint64_t buid,
86                               uint32_t config_addr)
87 {
88     sPAPRPHBState *sphb = spapr_pci_find_phb(spapr, buid);
89     PCIHostState *phb = PCI_HOST_BRIDGE(sphb);
90     int bus_num = (config_addr >> 16) & 0xFF;
91     int devfn = (config_addr >> 8) & 0xFF;
92 
93     if (!phb) {
94         return NULL;
95     }
96 
97     return pci_find_device(phb->bus, bus_num, devfn);
98 }
99 
100 static uint32_t rtas_pci_cfgaddr(uint32_t arg)
101 {
102     /* This handles the encoding of extended config space addresses */
103     return ((arg >> 20) & 0xf00) | (arg & 0xff);
104 }
105 
106 static void finish_read_pci_config(sPAPRMachineState *spapr, uint64_t buid,
107                                    uint32_t addr, uint32_t size,
108                                    target_ulong rets)
109 {
110     PCIDevice *pci_dev;
111     uint32_t val;
112 
113     if ((size != 1) && (size != 2) && (size != 4)) {
114         /* access must be 1, 2 or 4 bytes */
115         rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
116         return;
117     }
118 
119     pci_dev = spapr_pci_find_dev(spapr, buid, addr);
120     addr = rtas_pci_cfgaddr(addr);
121 
122     if (!pci_dev || (addr % size) || (addr >= pci_config_size(pci_dev))) {
123         /* Access must be to a valid device, within bounds and
124          * naturally aligned */
125         rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
126         return;
127     }
128 
129     val = pci_host_config_read_common(pci_dev, addr,
130                                       pci_config_size(pci_dev), size);
131 
132     rtas_st(rets, 0, RTAS_OUT_SUCCESS);
133     rtas_st(rets, 1, val);
134 }
135 
136 static void rtas_ibm_read_pci_config(PowerPCCPU *cpu, sPAPRMachineState *spapr,
137                                      uint32_t token, uint32_t nargs,
138                                      target_ulong args,
139                                      uint32_t nret, target_ulong rets)
140 {
141     uint64_t buid;
142     uint32_t size, addr;
143 
144     if ((nargs != 4) || (nret != 2)) {
145         rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
146         return;
147     }
148 
149     buid = rtas_ldq(args, 1);
150     size = rtas_ld(args, 3);
151     addr = rtas_ld(args, 0);
152 
153     finish_read_pci_config(spapr, buid, addr, size, rets);
154 }
155 
156 static void rtas_read_pci_config(PowerPCCPU *cpu, sPAPRMachineState *spapr,
157                                  uint32_t token, uint32_t nargs,
158                                  target_ulong args,
159                                  uint32_t nret, target_ulong rets)
160 {
161     uint32_t size, addr;
162 
163     if ((nargs != 2) || (nret != 2)) {
164         rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
165         return;
166     }
167 
168     size = rtas_ld(args, 1);
169     addr = rtas_ld(args, 0);
170 
171     finish_read_pci_config(spapr, 0, addr, size, rets);
172 }
173 
174 static void finish_write_pci_config(sPAPRMachineState *spapr, uint64_t buid,
175                                     uint32_t addr, uint32_t size,
176                                     uint32_t val, target_ulong rets)
177 {
178     PCIDevice *pci_dev;
179 
180     if ((size != 1) && (size != 2) && (size != 4)) {
181         /* access must be 1, 2 or 4 bytes */
182         rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
183         return;
184     }
185 
186     pci_dev = spapr_pci_find_dev(spapr, buid, addr);
187     addr = rtas_pci_cfgaddr(addr);
188 
189     if (!pci_dev || (addr % size) || (addr >= pci_config_size(pci_dev))) {
190         /* Access must be to a valid device, within bounds and
191          * naturally aligned */
192         rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
193         return;
194     }
195 
196     pci_host_config_write_common(pci_dev, addr, pci_config_size(pci_dev),
197                                  val, size);
198 
199     rtas_st(rets, 0, RTAS_OUT_SUCCESS);
200 }
201 
202 static void rtas_ibm_write_pci_config(PowerPCCPU *cpu, sPAPRMachineState *spapr,
203                                       uint32_t token, uint32_t nargs,
204                                       target_ulong args,
205                                       uint32_t nret, target_ulong rets)
206 {
207     uint64_t buid;
208     uint32_t val, size, addr;
209 
210     if ((nargs != 5) || (nret != 1)) {
211         rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
212         return;
213     }
214 
215     buid = rtas_ldq(args, 1);
216     val = rtas_ld(args, 4);
217     size = rtas_ld(args, 3);
218     addr = rtas_ld(args, 0);
219 
220     finish_write_pci_config(spapr, buid, addr, size, val, rets);
221 }
222 
223 static void rtas_write_pci_config(PowerPCCPU *cpu, sPAPRMachineState *spapr,
224                                   uint32_t token, uint32_t nargs,
225                                   target_ulong args,
226                                   uint32_t nret, target_ulong rets)
227 {
228     uint32_t val, size, addr;
229 
230     if ((nargs != 3) || (nret != 1)) {
231         rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
232         return;
233     }
234 
235 
236     val = rtas_ld(args, 2);
237     size = rtas_ld(args, 1);
238     addr = rtas_ld(args, 0);
239 
240     finish_write_pci_config(spapr, 0, addr, size, val, rets);
241 }
242 
243 /*
244  * Set MSI/MSIX message data.
245  * This is required for msi_notify()/msix_notify() which
246  * will write at the addresses via spapr_msi_write().
247  *
248  * If hwaddr == 0, all entries will have .data == first_irq i.e.
249  * table will be reset.
250  */
251 static void spapr_msi_setmsg(PCIDevice *pdev, hwaddr addr, bool msix,
252                              unsigned first_irq, unsigned req_num)
253 {
254     unsigned i;
255     MSIMessage msg = { .address = addr, .data = first_irq };
256 
257     if (!msix) {
258         msi_set_message(pdev, msg);
259         trace_spapr_pci_msi_setup(pdev->name, 0, msg.address);
260         return;
261     }
262 
263     for (i = 0; i < req_num; ++i) {
264         msix_set_message(pdev, i, msg);
265         trace_spapr_pci_msi_setup(pdev->name, i, msg.address);
266         if (addr) {
267             ++msg.data;
268         }
269     }
270 }
271 
272 static void rtas_ibm_change_msi(PowerPCCPU *cpu, sPAPRMachineState *spapr,
273                                 uint32_t token, uint32_t nargs,
274                                 target_ulong args, uint32_t nret,
275                                 target_ulong rets)
276 {
277     uint32_t config_addr = rtas_ld(args, 0);
278     uint64_t buid = rtas_ldq(args, 1);
279     unsigned int func = rtas_ld(args, 3);
280     unsigned int req_num = rtas_ld(args, 4); /* 0 == remove all */
281     unsigned int seq_num = rtas_ld(args, 5);
282     unsigned int ret_intr_type;
283     unsigned int irq, max_irqs = 0;
284     sPAPRPHBState *phb = NULL;
285     PCIDevice *pdev = NULL;
286     spapr_pci_msi *msi;
287     int *config_addr_key;
288     Error *err = NULL;
289 
290     switch (func) {
291     case RTAS_CHANGE_MSI_FN:
292     case RTAS_CHANGE_FN:
293         ret_intr_type = RTAS_TYPE_MSI;
294         break;
295     case RTAS_CHANGE_MSIX_FN:
296         ret_intr_type = RTAS_TYPE_MSIX;
297         break;
298     default:
299         error_report("rtas_ibm_change_msi(%u) is not implemented", func);
300         rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
301         return;
302     }
303 
304     /* Fins sPAPRPHBState */
305     phb = spapr_pci_find_phb(spapr, buid);
306     if (phb) {
307         pdev = spapr_pci_find_dev(spapr, buid, config_addr);
308     }
309     if (!phb || !pdev) {
310         rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
311         return;
312     }
313 
314     msi = (spapr_pci_msi *) g_hash_table_lookup(phb->msi, &config_addr);
315 
316     /* Releasing MSIs */
317     if (!req_num) {
318         if (!msi) {
319             trace_spapr_pci_msi("Releasing wrong config", config_addr);
320             rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
321             return;
322         }
323 
324         xics_free(spapr->icp, msi->first_irq, msi->num);
325         if (msi_present(pdev)) {
326             spapr_msi_setmsg(pdev, 0, false, 0, 0);
327         }
328         if (msix_present(pdev)) {
329             spapr_msi_setmsg(pdev, 0, true, 0, 0);
330         }
331         g_hash_table_remove(phb->msi, &config_addr);
332 
333         trace_spapr_pci_msi("Released MSIs", config_addr);
334         rtas_st(rets, 0, RTAS_OUT_SUCCESS);
335         rtas_st(rets, 1, 0);
336         return;
337     }
338 
339     /* Enabling MSI */
340 
341     /* Check if the device supports as many IRQs as requested */
342     if (ret_intr_type == RTAS_TYPE_MSI) {
343         max_irqs = msi_nr_vectors_allocated(pdev);
344     } else if (ret_intr_type == RTAS_TYPE_MSIX) {
345         max_irqs = pdev->msix_entries_nr;
346     }
347     if (!max_irqs) {
348         error_report("Requested interrupt type %d is not enabled for device %x",
349                      ret_intr_type, config_addr);
350         rtas_st(rets, 0, -1); /* Hardware error */
351         return;
352     }
353     /* Correct the number if the guest asked for too many */
354     if (req_num > max_irqs) {
355         trace_spapr_pci_msi_retry(config_addr, req_num, max_irqs);
356         req_num = max_irqs;
357         irq = 0; /* to avoid misleading trace */
358         goto out;
359     }
360 
361     /* Allocate MSIs */
362     irq = xics_alloc_block(spapr->icp, 0, req_num, false,
363                            ret_intr_type == RTAS_TYPE_MSI, &err);
364     if (err) {
365         error_reportf_err(err, "Can't allocate MSIs for device %x: ",
366                           config_addr);
367         rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
368         return;
369     }
370 
371     /* Release previous MSIs */
372     if (msi) {
373         xics_free(spapr->icp, msi->first_irq, msi->num);
374         g_hash_table_remove(phb->msi, &config_addr);
375     }
376 
377     /* Setup MSI/MSIX vectors in the device (via cfgspace or MSIX BAR) */
378     spapr_msi_setmsg(pdev, SPAPR_PCI_MSI_WINDOW, ret_intr_type == RTAS_TYPE_MSIX,
379                      irq, req_num);
380 
381     /* Add MSI device to cache */
382     msi = g_new(spapr_pci_msi, 1);
383     msi->first_irq = irq;
384     msi->num = req_num;
385     config_addr_key = g_new(int, 1);
386     *config_addr_key = config_addr;
387     g_hash_table_insert(phb->msi, config_addr_key, msi);
388 
389 out:
390     rtas_st(rets, 0, RTAS_OUT_SUCCESS);
391     rtas_st(rets, 1, req_num);
392     rtas_st(rets, 2, ++seq_num);
393     if (nret > 3) {
394         rtas_st(rets, 3, ret_intr_type);
395     }
396 
397     trace_spapr_pci_rtas_ibm_change_msi(config_addr, func, req_num, irq);
398 }
399 
400 static void rtas_ibm_query_interrupt_source_number(PowerPCCPU *cpu,
401                                                    sPAPRMachineState *spapr,
402                                                    uint32_t token,
403                                                    uint32_t nargs,
404                                                    target_ulong args,
405                                                    uint32_t nret,
406                                                    target_ulong rets)
407 {
408     uint32_t config_addr = rtas_ld(args, 0);
409     uint64_t buid = rtas_ldq(args, 1);
410     unsigned int intr_src_num = -1, ioa_intr_num = rtas_ld(args, 3);
411     sPAPRPHBState *phb = NULL;
412     PCIDevice *pdev = NULL;
413     spapr_pci_msi *msi;
414 
415     /* Find sPAPRPHBState */
416     phb = spapr_pci_find_phb(spapr, buid);
417     if (phb) {
418         pdev = spapr_pci_find_dev(spapr, buid, config_addr);
419     }
420     if (!phb || !pdev) {
421         rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
422         return;
423     }
424 
425     /* Find device descriptor and start IRQ */
426     msi = (spapr_pci_msi *) g_hash_table_lookup(phb->msi, &config_addr);
427     if (!msi || !msi->first_irq || !msi->num || (ioa_intr_num >= msi->num)) {
428         trace_spapr_pci_msi("Failed to return vector", config_addr);
429         rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
430         return;
431     }
432     intr_src_num = msi->first_irq + ioa_intr_num;
433     trace_spapr_pci_rtas_ibm_query_interrupt_source_number(ioa_intr_num,
434                                                            intr_src_num);
435 
436     rtas_st(rets, 0, RTAS_OUT_SUCCESS);
437     rtas_st(rets, 1, intr_src_num);
438     rtas_st(rets, 2, 1);/* 0 == level; 1 == edge */
439 }
440 
441 static void rtas_ibm_set_eeh_option(PowerPCCPU *cpu,
442                                     sPAPRMachineState *spapr,
443                                     uint32_t token, uint32_t nargs,
444                                     target_ulong args, uint32_t nret,
445                                     target_ulong rets)
446 {
447     sPAPRPHBState *sphb;
448     uint32_t addr, option;
449     uint64_t buid;
450     int ret;
451 
452     if ((nargs != 4) || (nret != 1)) {
453         goto param_error_exit;
454     }
455 
456     buid = rtas_ldq(args, 1);
457     addr = rtas_ld(args, 0);
458     option = rtas_ld(args, 3);
459 
460     sphb = spapr_pci_find_phb(spapr, buid);
461     if (!sphb) {
462         goto param_error_exit;
463     }
464 
465     if (!spapr_phb_eeh_available(sphb)) {
466         goto param_error_exit;
467     }
468 
469     ret = spapr_phb_vfio_eeh_set_option(sphb, addr, option);
470     rtas_st(rets, 0, ret);
471     return;
472 
473 param_error_exit:
474     rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
475 }
476 
477 static void rtas_ibm_get_config_addr_info2(PowerPCCPU *cpu,
478                                            sPAPRMachineState *spapr,
479                                            uint32_t token, uint32_t nargs,
480                                            target_ulong args, uint32_t nret,
481                                            target_ulong rets)
482 {
483     sPAPRPHBState *sphb;
484     PCIDevice *pdev;
485     uint32_t addr, option;
486     uint64_t buid;
487 
488     if ((nargs != 4) || (nret != 2)) {
489         goto param_error_exit;
490     }
491 
492     buid = rtas_ldq(args, 1);
493     sphb = spapr_pci_find_phb(spapr, buid);
494     if (!sphb) {
495         goto param_error_exit;
496     }
497 
498     if (!spapr_phb_eeh_available(sphb)) {
499         goto param_error_exit;
500     }
501 
502     /*
503      * We always have PE address of form "00BB0001". "BB"
504      * represents the bus number of PE's primary bus.
505      */
506     option = rtas_ld(args, 3);
507     switch (option) {
508     case RTAS_GET_PE_ADDR:
509         addr = rtas_ld(args, 0);
510         pdev = spapr_pci_find_dev(spapr, buid, addr);
511         if (!pdev) {
512             goto param_error_exit;
513         }
514 
515         rtas_st(rets, 1, (pci_bus_num(pdev->bus) << 16) + 1);
516         break;
517     case RTAS_GET_PE_MODE:
518         rtas_st(rets, 1, RTAS_PE_MODE_SHARED);
519         break;
520     default:
521         goto param_error_exit;
522     }
523 
524     rtas_st(rets, 0, RTAS_OUT_SUCCESS);
525     return;
526 
527 param_error_exit:
528     rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
529 }
530 
531 static void rtas_ibm_read_slot_reset_state2(PowerPCCPU *cpu,
532                                             sPAPRMachineState *spapr,
533                                             uint32_t token, uint32_t nargs,
534                                             target_ulong args, uint32_t nret,
535                                             target_ulong rets)
536 {
537     sPAPRPHBState *sphb;
538     uint64_t buid;
539     int state, ret;
540 
541     if ((nargs != 3) || (nret != 4 && nret != 5)) {
542         goto param_error_exit;
543     }
544 
545     buid = rtas_ldq(args, 1);
546     sphb = spapr_pci_find_phb(spapr, buid);
547     if (!sphb) {
548         goto param_error_exit;
549     }
550 
551     if (!spapr_phb_eeh_available(sphb)) {
552         goto param_error_exit;
553     }
554 
555     ret = spapr_phb_vfio_eeh_get_state(sphb, &state);
556     rtas_st(rets, 0, ret);
557     if (ret != RTAS_OUT_SUCCESS) {
558         return;
559     }
560 
561     rtas_st(rets, 1, state);
562     rtas_st(rets, 2, RTAS_EEH_SUPPORT);
563     rtas_st(rets, 3, RTAS_EEH_PE_UNAVAIL_INFO);
564     if (nret >= 5) {
565         rtas_st(rets, 4, RTAS_EEH_PE_RECOVER_INFO);
566     }
567     return;
568 
569 param_error_exit:
570     rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
571 }
572 
573 static void rtas_ibm_set_slot_reset(PowerPCCPU *cpu,
574                                     sPAPRMachineState *spapr,
575                                     uint32_t token, uint32_t nargs,
576                                     target_ulong args, uint32_t nret,
577                                     target_ulong rets)
578 {
579     sPAPRPHBState *sphb;
580     uint32_t option;
581     uint64_t buid;
582     int ret;
583 
584     if ((nargs != 4) || (nret != 1)) {
585         goto param_error_exit;
586     }
587 
588     buid = rtas_ldq(args, 1);
589     option = rtas_ld(args, 3);
590     sphb = spapr_pci_find_phb(spapr, buid);
591     if (!sphb) {
592         goto param_error_exit;
593     }
594 
595     if (!spapr_phb_eeh_available(sphb)) {
596         goto param_error_exit;
597     }
598 
599     ret = spapr_phb_vfio_eeh_reset(sphb, option);
600     rtas_st(rets, 0, ret);
601     return;
602 
603 param_error_exit:
604     rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
605 }
606 
607 static void rtas_ibm_configure_pe(PowerPCCPU *cpu,
608                                   sPAPRMachineState *spapr,
609                                   uint32_t token, uint32_t nargs,
610                                   target_ulong args, uint32_t nret,
611                                   target_ulong rets)
612 {
613     sPAPRPHBState *sphb;
614     uint64_t buid;
615     int ret;
616 
617     if ((nargs != 3) || (nret != 1)) {
618         goto param_error_exit;
619     }
620 
621     buid = rtas_ldq(args, 1);
622     sphb = spapr_pci_find_phb(spapr, buid);
623     if (!sphb) {
624         goto param_error_exit;
625     }
626 
627     if (!spapr_phb_eeh_available(sphb)) {
628         goto param_error_exit;
629     }
630 
631     ret = spapr_phb_vfio_eeh_configure(sphb);
632     rtas_st(rets, 0, ret);
633     return;
634 
635 param_error_exit:
636     rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
637 }
638 
639 /* To support it later */
640 static void rtas_ibm_slot_error_detail(PowerPCCPU *cpu,
641                                        sPAPRMachineState *spapr,
642                                        uint32_t token, uint32_t nargs,
643                                        target_ulong args, uint32_t nret,
644                                        target_ulong rets)
645 {
646     sPAPRPHBState *sphb;
647     int option;
648     uint64_t buid;
649 
650     if ((nargs != 8) || (nret != 1)) {
651         goto param_error_exit;
652     }
653 
654     buid = rtas_ldq(args, 1);
655     sphb = spapr_pci_find_phb(spapr, buid);
656     if (!sphb) {
657         goto param_error_exit;
658     }
659 
660     if (!spapr_phb_eeh_available(sphb)) {
661         goto param_error_exit;
662     }
663 
664     option = rtas_ld(args, 7);
665     switch (option) {
666     case RTAS_SLOT_TEMP_ERR_LOG:
667     case RTAS_SLOT_PERM_ERR_LOG:
668         break;
669     default:
670         goto param_error_exit;
671     }
672 
673     /* We don't have error log yet */
674     rtas_st(rets, 0, RTAS_OUT_NO_ERRORS_FOUND);
675     return;
676 
677 param_error_exit:
678     rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
679 }
680 
681 static int pci_spapr_swizzle(int slot, int pin)
682 {
683     return (slot + pin) % PCI_NUM_PINS;
684 }
685 
686 static int pci_spapr_map_irq(PCIDevice *pci_dev, int irq_num)
687 {
688     /*
689      * Here we need to convert pci_dev + irq_num to some unique value
690      * which is less than number of IRQs on the specific bus (4).  We
691      * use standard PCI swizzling, that is (slot number + pin number)
692      * % 4.
693      */
694     return pci_spapr_swizzle(PCI_SLOT(pci_dev->devfn), irq_num);
695 }
696 
697 static void pci_spapr_set_irq(void *opaque, int irq_num, int level)
698 {
699     /*
700      * Here we use the number returned by pci_spapr_map_irq to find a
701      * corresponding qemu_irq.
702      */
703     sPAPRPHBState *phb = opaque;
704 
705     trace_spapr_pci_lsi_set(phb->dtbusname, irq_num, phb->lsi_table[irq_num].irq);
706     qemu_set_irq(spapr_phb_lsi_qirq(phb, irq_num), level);
707 }
708 
709 static PCIINTxRoute spapr_route_intx_pin_to_irq(void *opaque, int pin)
710 {
711     sPAPRPHBState *sphb = SPAPR_PCI_HOST_BRIDGE(opaque);
712     PCIINTxRoute route;
713 
714     route.mode = PCI_INTX_ENABLED;
715     route.irq = sphb->lsi_table[pin].irq;
716 
717     return route;
718 }
719 
720 /*
721  * MSI/MSIX memory region implementation.
722  * The handler handles both MSI and MSIX.
723  * For MSI-X, the vector number is encoded as a part of the address,
724  * data is set to 0.
725  * For MSI, the vector number is encoded in least bits in data.
726  */
727 static void spapr_msi_write(void *opaque, hwaddr addr,
728                             uint64_t data, unsigned size)
729 {
730     sPAPRMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
731     uint32_t irq = data;
732 
733     trace_spapr_pci_msi_write(addr, data, irq);
734 
735     qemu_irq_pulse(xics_get_qirq(spapr->icp, irq));
736 }
737 
738 static const MemoryRegionOps spapr_msi_ops = {
739     /* There is no .read as the read result is undefined by PCI spec */
740     .read = NULL,
741     .write = spapr_msi_write,
742     .endianness = DEVICE_LITTLE_ENDIAN
743 };
744 
745 /*
746  * PHB PCI device
747  */
748 static AddressSpace *spapr_pci_dma_iommu(PCIBus *bus, void *opaque, int devfn)
749 {
750     sPAPRPHBState *phb = opaque;
751 
752     return &phb->iommu_as;
753 }
754 
755 static char *spapr_phb_vfio_get_loc_code(sPAPRPHBState *sphb,  PCIDevice *pdev)
756 {
757     char *path = NULL, *buf = NULL, *host = NULL;
758 
759     /* Get the PCI VFIO host id */
760     host = object_property_get_str(OBJECT(pdev), "host", NULL);
761     if (!host) {
762         goto err_out;
763     }
764 
765     /* Construct the path of the file that will give us the DT location */
766     path = g_strdup_printf("/sys/bus/pci/devices/%s/devspec", host);
767     g_free(host);
768     if (!path || !g_file_get_contents(path, &buf, NULL, NULL)) {
769         goto err_out;
770     }
771     g_free(path);
772 
773     /* Construct and read from host device tree the loc-code */
774     path = g_strdup_printf("/proc/device-tree%s/ibm,loc-code", buf);
775     g_free(buf);
776     if (!path || !g_file_get_contents(path, &buf, NULL, NULL)) {
777         goto err_out;
778     }
779     return buf;
780 
781 err_out:
782     g_free(path);
783     return NULL;
784 }
785 
786 static char *spapr_phb_get_loc_code(sPAPRPHBState *sphb, PCIDevice *pdev)
787 {
788     char *buf;
789     const char *devtype = "qemu";
790     uint32_t busnr = pci_bus_num(PCI_BUS(qdev_get_parent_bus(DEVICE(pdev))));
791 
792     if (object_dynamic_cast(OBJECT(pdev), "vfio-pci")) {
793         buf = spapr_phb_vfio_get_loc_code(sphb, pdev);
794         if (buf) {
795             return buf;
796         }
797         devtype = "vfio";
798     }
799     /*
800      * For emulated devices and VFIO-failure case, make up
801      * the loc-code.
802      */
803     buf = g_strdup_printf("%s_%s:%04x:%02x:%02x.%x",
804                           devtype, pdev->name, sphb->index, busnr,
805                           PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
806     return buf;
807 }
808 
809 /* Macros to operate with address in OF binding to PCI */
810 #define b_x(x, p, l)    (((x) & ((1<<(l))-1)) << (p))
811 #define b_n(x)          b_x((x), 31, 1) /* 0 if relocatable */
812 #define b_p(x)          b_x((x), 30, 1) /* 1 if prefetchable */
813 #define b_t(x)          b_x((x), 29, 1) /* 1 if the address is aliased */
814 #define b_ss(x)         b_x((x), 24, 2) /* the space code */
815 #define b_bbbbbbbb(x)   b_x((x), 16, 8) /* bus number */
816 #define b_ddddd(x)      b_x((x), 11, 5) /* device number */
817 #define b_fff(x)        b_x((x), 8, 3)  /* function number */
818 #define b_rrrrrrrr(x)   b_x((x), 0, 8)  /* register number */
819 
820 /* for 'reg'/'assigned-addresses' OF properties */
821 #define RESOURCE_CELLS_SIZE 2
822 #define RESOURCE_CELLS_ADDRESS 3
823 
824 typedef struct ResourceFields {
825     uint32_t phys_hi;
826     uint32_t phys_mid;
827     uint32_t phys_lo;
828     uint32_t size_hi;
829     uint32_t size_lo;
830 } QEMU_PACKED ResourceFields;
831 
832 typedef struct ResourceProps {
833     ResourceFields reg[8];
834     ResourceFields assigned[7];
835     uint32_t reg_len;
836     uint32_t assigned_len;
837 } ResourceProps;
838 
839 /* fill in the 'reg'/'assigned-resources' OF properties for
840  * a PCI device. 'reg' describes resource requirements for a
841  * device's IO/MEM regions, 'assigned-addresses' describes the
842  * actual resource assignments.
843  *
844  * the properties are arrays of ('phys-addr', 'size') pairs describing
845  * the addressable regions of the PCI device, where 'phys-addr' is a
846  * RESOURCE_CELLS_ADDRESS-tuple of 32-bit integers corresponding to
847  * (phys.hi, phys.mid, phys.lo), and 'size' is a
848  * RESOURCE_CELLS_SIZE-tuple corresponding to (size.hi, size.lo).
849  *
850  * phys.hi = 0xYYXXXXZZ, where:
851  *   0xYY = npt000ss
852  *          |||   |
853  *          |||   +-- space code
854  *          |||               |
855  *          |||               +  00 if configuration space
856  *          |||               +  01 if IO region,
857  *          |||               +  10 if 32-bit MEM region
858  *          |||               +  11 if 64-bit MEM region
859  *          |||
860  *          ||+------ for non-relocatable IO: 1 if aliased
861  *          ||        for relocatable IO: 1 if below 64KB
862  *          ||        for MEM: 1 if below 1MB
863  *          |+------- 1 if region is prefetchable
864  *          +-------- 1 if region is non-relocatable
865  *   0xXXXX = bbbbbbbb dddddfff, encoding bus, slot, and function
866  *            bits respectively
867  *   0xZZ = rrrrrrrr, the register number of the BAR corresponding
868  *          to the region
869  *
870  * phys.mid and phys.lo correspond respectively to the hi/lo portions
871  * of the actual address of the region.
872  *
873  * how the phys-addr/size values are used differ slightly between
874  * 'reg' and 'assigned-addresses' properties. namely, 'reg' has
875  * an additional description for the config space region of the
876  * device, and in the case of QEMU has n=0 and phys.mid=phys.lo=0
877  * to describe the region as relocatable, with an address-mapping
878  * that corresponds directly to the PHB's address space for the
879  * resource. 'assigned-addresses' always has n=1 set with an absolute
880  * address assigned for the resource. in general, 'assigned-addresses'
881  * won't be populated, since addresses for PCI devices are generally
882  * unmapped initially and left to the guest to assign.
883  *
884  * note also that addresses defined in these properties are, at least
885  * for PAPR guests, relative to the PHBs IO/MEM windows, and
886  * correspond directly to the addresses in the BARs.
887  *
888  * in accordance with PCI Bus Binding to Open Firmware,
889  * IEEE Std 1275-1994, section 4.1.1, as implemented by PAPR+ v2.7,
890  * Appendix C.
891  */
892 static void populate_resource_props(PCIDevice *d, ResourceProps *rp)
893 {
894     int bus_num = pci_bus_num(PCI_BUS(qdev_get_parent_bus(DEVICE(d))));
895     uint32_t dev_id = (b_bbbbbbbb(bus_num) |
896                        b_ddddd(PCI_SLOT(d->devfn)) |
897                        b_fff(PCI_FUNC(d->devfn)));
898     ResourceFields *reg, *assigned;
899     int i, reg_idx = 0, assigned_idx = 0;
900 
901     /* config space region */
902     reg = &rp->reg[reg_idx++];
903     reg->phys_hi = cpu_to_be32(dev_id);
904     reg->phys_mid = 0;
905     reg->phys_lo = 0;
906     reg->size_hi = 0;
907     reg->size_lo = 0;
908 
909     for (i = 0; i < PCI_NUM_REGIONS; i++) {
910         if (!d->io_regions[i].size) {
911             continue;
912         }
913 
914         reg = &rp->reg[reg_idx++];
915 
916         reg->phys_hi = cpu_to_be32(dev_id | b_rrrrrrrr(pci_bar(d, i)));
917         if (d->io_regions[i].type & PCI_BASE_ADDRESS_SPACE_IO) {
918             reg->phys_hi |= cpu_to_be32(b_ss(1));
919         } else if (d->io_regions[i].type & PCI_BASE_ADDRESS_MEM_TYPE_64) {
920             reg->phys_hi |= cpu_to_be32(b_ss(3));
921         } else {
922             reg->phys_hi |= cpu_to_be32(b_ss(2));
923         }
924         reg->phys_mid = 0;
925         reg->phys_lo = 0;
926         reg->size_hi = cpu_to_be32(d->io_regions[i].size >> 32);
927         reg->size_lo = cpu_to_be32(d->io_regions[i].size);
928 
929         if (d->io_regions[i].addr == PCI_BAR_UNMAPPED) {
930             continue;
931         }
932 
933         assigned = &rp->assigned[assigned_idx++];
934         assigned->phys_hi = cpu_to_be32(reg->phys_hi | b_n(1));
935         assigned->phys_mid = cpu_to_be32(d->io_regions[i].addr >> 32);
936         assigned->phys_lo = cpu_to_be32(d->io_regions[i].addr);
937         assigned->size_hi = reg->size_hi;
938         assigned->size_lo = reg->size_lo;
939     }
940 
941     rp->reg_len = reg_idx * sizeof(ResourceFields);
942     rp->assigned_len = assigned_idx * sizeof(ResourceFields);
943 }
944 
945 static uint32_t spapr_phb_get_pci_drc_index(sPAPRPHBState *phb,
946                                             PCIDevice *pdev);
947 
948 static int spapr_populate_pci_child_dt(PCIDevice *dev, void *fdt, int offset,
949                                        sPAPRPHBState *sphb)
950 {
951     ResourceProps rp;
952     bool is_bridge = false;
953     int pci_status, err;
954     char *buf = NULL;
955     uint32_t drc_index = spapr_phb_get_pci_drc_index(sphb, dev);
956     uint32_t max_msi, max_msix;
957 
958     if (pci_default_read_config(dev, PCI_HEADER_TYPE, 1) ==
959         PCI_HEADER_TYPE_BRIDGE) {
960         is_bridge = true;
961     }
962 
963     /* in accordance with PAPR+ v2.7 13.6.3, Table 181 */
964     _FDT(fdt_setprop_cell(fdt, offset, "vendor-id",
965                           pci_default_read_config(dev, PCI_VENDOR_ID, 2)));
966     _FDT(fdt_setprop_cell(fdt, offset, "device-id",
967                           pci_default_read_config(dev, PCI_DEVICE_ID, 2)));
968     _FDT(fdt_setprop_cell(fdt, offset, "revision-id",
969                           pci_default_read_config(dev, PCI_REVISION_ID, 1)));
970     _FDT(fdt_setprop_cell(fdt, offset, "class-code",
971                           pci_default_read_config(dev, PCI_CLASS_PROG, 3)));
972     if (pci_default_read_config(dev, PCI_INTERRUPT_PIN, 1)) {
973         _FDT(fdt_setprop_cell(fdt, offset, "interrupts",
974                  pci_default_read_config(dev, PCI_INTERRUPT_PIN, 1)));
975     }
976 
977     if (!is_bridge) {
978         _FDT(fdt_setprop_cell(fdt, offset, "min-grant",
979             pci_default_read_config(dev, PCI_MIN_GNT, 1)));
980         _FDT(fdt_setprop_cell(fdt, offset, "max-latency",
981             pci_default_read_config(dev, PCI_MAX_LAT, 1)));
982     }
983 
984     if (pci_default_read_config(dev, PCI_SUBSYSTEM_ID, 2)) {
985         _FDT(fdt_setprop_cell(fdt, offset, "subsystem-id",
986                  pci_default_read_config(dev, PCI_SUBSYSTEM_ID, 2)));
987     }
988 
989     if (pci_default_read_config(dev, PCI_SUBSYSTEM_VENDOR_ID, 2)) {
990         _FDT(fdt_setprop_cell(fdt, offset, "subsystem-vendor-id",
991                  pci_default_read_config(dev, PCI_SUBSYSTEM_VENDOR_ID, 2)));
992     }
993 
994     _FDT(fdt_setprop_cell(fdt, offset, "cache-line-size",
995         pci_default_read_config(dev, PCI_CACHE_LINE_SIZE, 1)));
996 
997     /* the following fdt cells are masked off the pci status register */
998     pci_status = pci_default_read_config(dev, PCI_STATUS, 2);
999     _FDT(fdt_setprop_cell(fdt, offset, "devsel-speed",
1000                           PCI_STATUS_DEVSEL_MASK & pci_status));
1001 
1002     if (pci_status & PCI_STATUS_FAST_BACK) {
1003         _FDT(fdt_setprop(fdt, offset, "fast-back-to-back", NULL, 0));
1004     }
1005     if (pci_status & PCI_STATUS_66MHZ) {
1006         _FDT(fdt_setprop(fdt, offset, "66mhz-capable", NULL, 0));
1007     }
1008     if (pci_status & PCI_STATUS_UDF) {
1009         _FDT(fdt_setprop(fdt, offset, "udf-supported", NULL, 0));
1010     }
1011 
1012     /* NOTE: this is normally generated by firmware via path/unit name,
1013      * but in our case we must set it manually since it does not get
1014      * processed by OF beforehand
1015      */
1016     _FDT(fdt_setprop_string(fdt, offset, "name", "pci"));
1017     buf = spapr_phb_get_loc_code(sphb, dev);
1018     if (!buf) {
1019         error_report("Failed setting the ibm,loc-code");
1020         return -1;
1021     }
1022 
1023     err = fdt_setprop_string(fdt, offset, "ibm,loc-code", buf);
1024     g_free(buf);
1025     if (err < 0) {
1026         return err;
1027     }
1028 
1029     if (drc_index) {
1030         _FDT(fdt_setprop_cell(fdt, offset, "ibm,my-drc-index", drc_index));
1031     }
1032 
1033     _FDT(fdt_setprop_cell(fdt, offset, "#address-cells",
1034                           RESOURCE_CELLS_ADDRESS));
1035     _FDT(fdt_setprop_cell(fdt, offset, "#size-cells",
1036                           RESOURCE_CELLS_SIZE));
1037 
1038     max_msi = msi_nr_vectors_allocated(dev);
1039     if (max_msi) {
1040         _FDT(fdt_setprop_cell(fdt, offset, "ibm,req#msi", max_msi));
1041     }
1042     max_msix = dev->msix_entries_nr;
1043     if (max_msix) {
1044         _FDT(fdt_setprop_cell(fdt, offset, "ibm,req#msi-x", max_msix));
1045     }
1046 
1047     populate_resource_props(dev, &rp);
1048     _FDT(fdt_setprop(fdt, offset, "reg", (uint8_t *)rp.reg, rp.reg_len));
1049     _FDT(fdt_setprop(fdt, offset, "assigned-addresses",
1050                      (uint8_t *)rp.assigned, rp.assigned_len));
1051 
1052     return 0;
1053 }
1054 
1055 /* create OF node for pci device and required OF DT properties */
1056 static int spapr_create_pci_child_dt(sPAPRPHBState *phb, PCIDevice *dev,
1057                                      void *fdt, int node_offset)
1058 {
1059     int offset, ret;
1060     int slot = PCI_SLOT(dev->devfn);
1061     int func = PCI_FUNC(dev->devfn);
1062     char nodename[FDT_NAME_MAX];
1063 
1064     if (func != 0) {
1065         snprintf(nodename, FDT_NAME_MAX, "pci@%x,%x", slot, func);
1066     } else {
1067         snprintf(nodename, FDT_NAME_MAX, "pci@%x", slot);
1068     }
1069     offset = fdt_add_subnode(fdt, node_offset, nodename);
1070     ret = spapr_populate_pci_child_dt(dev, fdt, offset, phb);
1071 
1072     g_assert(!ret);
1073     if (ret) {
1074         return 0;
1075     }
1076     return offset;
1077 }
1078 
1079 static void spapr_phb_add_pci_device(sPAPRDRConnector *drc,
1080                                      sPAPRPHBState *phb,
1081                                      PCIDevice *pdev,
1082                                      Error **errp)
1083 {
1084     sPAPRDRConnectorClass *drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
1085     DeviceState *dev = DEVICE(pdev);
1086     void *fdt = NULL;
1087     int fdt_start_offset = 0, fdt_size;
1088 
1089     if (object_dynamic_cast(OBJECT(pdev), "vfio-pci")) {
1090         sPAPRTCETable *tcet = spapr_tce_find_by_liobn(phb->dma_liobn);
1091 
1092         spapr_tce_set_need_vfio(tcet, true);
1093     }
1094 
1095     if (dev->hotplugged) {
1096         fdt = create_device_tree(&fdt_size);
1097         fdt_start_offset = spapr_create_pci_child_dt(phb, pdev, fdt, 0);
1098         if (!fdt_start_offset) {
1099             error_setg(errp, "Failed to create pci child device tree node");
1100             goto out;
1101         }
1102     }
1103 
1104     drck->attach(drc, DEVICE(pdev),
1105                  fdt, fdt_start_offset, !dev->hotplugged, errp);
1106 out:
1107     if (*errp) {
1108         g_free(fdt);
1109     }
1110 }
1111 
1112 static void spapr_phb_remove_pci_device_cb(DeviceState *dev, void *opaque)
1113 {
1114     /* some version guests do not wait for completion of a device
1115      * cleanup (generally done asynchronously by the kernel) before
1116      * signaling to QEMU that the device is safe, but instead sleep
1117      * for some 'safe' period of time. unfortunately on a busy host
1118      * this sleep isn't guaranteed to be long enough, resulting in
1119      * bad things like IRQ lines being left asserted during final
1120      * device removal. to deal with this we call reset just prior
1121      * to finalizing the device, which will put the device back into
1122      * an 'idle' state, as the device cleanup code expects.
1123      */
1124     pci_device_reset(PCI_DEVICE(dev));
1125     object_unparent(OBJECT(dev));
1126 }
1127 
1128 static void spapr_phb_remove_pci_device(sPAPRDRConnector *drc,
1129                                         sPAPRPHBState *phb,
1130                                         PCIDevice *pdev,
1131                                         Error **errp)
1132 {
1133     sPAPRDRConnectorClass *drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
1134 
1135     drck->detach(drc, DEVICE(pdev), spapr_phb_remove_pci_device_cb, phb, errp);
1136 }
1137 
1138 static sPAPRDRConnector *spapr_phb_get_pci_func_drc(sPAPRPHBState *phb,
1139                                                     uint32_t busnr,
1140                                                     int32_t devfn)
1141 {
1142     return spapr_dr_connector_by_id(SPAPR_DR_CONNECTOR_TYPE_PCI,
1143                                     (phb->index << 16) |
1144                                     (busnr << 8) |
1145                                     devfn);
1146 }
1147 
1148 static sPAPRDRConnector *spapr_phb_get_pci_drc(sPAPRPHBState *phb,
1149                                                PCIDevice *pdev)
1150 {
1151     uint32_t busnr = pci_bus_num(PCI_BUS(qdev_get_parent_bus(DEVICE(pdev))));
1152     return spapr_phb_get_pci_func_drc(phb, busnr, pdev->devfn);
1153 }
1154 
1155 static uint32_t spapr_phb_get_pci_drc_index(sPAPRPHBState *phb,
1156                                             PCIDevice *pdev)
1157 {
1158     sPAPRDRConnector *drc = spapr_phb_get_pci_drc(phb, pdev);
1159     sPAPRDRConnectorClass *drck;
1160 
1161     if (!drc) {
1162         return 0;
1163     }
1164 
1165     drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
1166     return drck->get_index(drc);
1167 }
1168 
1169 static void spapr_phb_hot_plug_child(HotplugHandler *plug_handler,
1170                                      DeviceState *plugged_dev, Error **errp)
1171 {
1172     sPAPRPHBState *phb = SPAPR_PCI_HOST_BRIDGE(DEVICE(plug_handler));
1173     PCIDevice *pdev = PCI_DEVICE(plugged_dev);
1174     sPAPRDRConnector *drc = spapr_phb_get_pci_drc(phb, pdev);
1175     Error *local_err = NULL;
1176     PCIBus *bus = PCI_BUS(qdev_get_parent_bus(DEVICE(pdev)));
1177     uint32_t slotnr = PCI_SLOT(pdev->devfn);
1178 
1179     /* if DR is disabled we don't need to do anything in the case of
1180      * hotplug or coldplug callbacks
1181      */
1182     if (!phb->dr_enabled) {
1183         /* if this is a hotplug operation initiated by the user
1184          * we need to let them know it's not enabled
1185          */
1186         if (plugged_dev->hotplugged) {
1187             error_setg(errp, QERR_BUS_NO_HOTPLUG,
1188                        object_get_typename(OBJECT(phb)));
1189         }
1190         return;
1191     }
1192 
1193     g_assert(drc);
1194 
1195     /* Following the QEMU convention used for PCIe multifunction
1196      * hotplug, we do not allow functions to be hotplugged to a
1197      * slot that already has function 0 present
1198      */
1199     if (plugged_dev->hotplugged && bus->devices[PCI_DEVFN(slotnr, 0)] &&
1200         PCI_FUNC(pdev->devfn) != 0) {
1201         error_setg(errp, "PCI: slot %d function 0 already ocuppied by %s,"
1202                    " additional functions can no longer be exposed to guest.",
1203                    slotnr, bus->devices[PCI_DEVFN(slotnr, 0)]->name);
1204         return;
1205     }
1206 
1207     spapr_phb_add_pci_device(drc, phb, pdev, &local_err);
1208     if (local_err) {
1209         error_propagate(errp, local_err);
1210         return;
1211     }
1212 
1213     /* If this is function 0, signal hotplug for all the device functions.
1214      * Otherwise defer sending the hotplug event.
1215      */
1216     if (plugged_dev->hotplugged && PCI_FUNC(pdev->devfn) == 0) {
1217         int i;
1218 
1219         for (i = 0; i < 8; i++) {
1220             sPAPRDRConnector *func_drc;
1221             sPAPRDRConnectorClass *func_drck;
1222             sPAPRDREntitySense state;
1223 
1224             func_drc = spapr_phb_get_pci_func_drc(phb, pci_bus_num(bus),
1225                                                   PCI_DEVFN(slotnr, i));
1226             func_drck = SPAPR_DR_CONNECTOR_GET_CLASS(func_drc);
1227             func_drck->entity_sense(func_drc, &state);
1228 
1229             if (state == SPAPR_DR_ENTITY_SENSE_PRESENT) {
1230                 spapr_hotplug_req_add_by_index(func_drc);
1231             }
1232         }
1233     }
1234 }
1235 
1236 static void spapr_phb_hot_unplug_child(HotplugHandler *plug_handler,
1237                                        DeviceState *plugged_dev, Error **errp)
1238 {
1239     sPAPRPHBState *phb = SPAPR_PCI_HOST_BRIDGE(DEVICE(plug_handler));
1240     PCIDevice *pdev = PCI_DEVICE(plugged_dev);
1241     sPAPRDRConnectorClass *drck;
1242     sPAPRDRConnector *drc = spapr_phb_get_pci_drc(phb, pdev);
1243     Error *local_err = NULL;
1244 
1245     if (!phb->dr_enabled) {
1246         error_setg(errp, QERR_BUS_NO_HOTPLUG,
1247                    object_get_typename(OBJECT(phb)));
1248         return;
1249     }
1250 
1251     g_assert(drc);
1252 
1253     drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
1254     if (!drck->release_pending(drc)) {
1255         PCIBus *bus = PCI_BUS(qdev_get_parent_bus(DEVICE(pdev)));
1256         uint32_t slotnr = PCI_SLOT(pdev->devfn);
1257         sPAPRDRConnector *func_drc;
1258         sPAPRDRConnectorClass *func_drck;
1259         sPAPRDREntitySense state;
1260         int i;
1261 
1262         /* ensure any other present functions are pending unplug */
1263         if (PCI_FUNC(pdev->devfn) == 0) {
1264             for (i = 1; i < 8; i++) {
1265                 func_drc = spapr_phb_get_pci_func_drc(phb, pci_bus_num(bus),
1266                                                       PCI_DEVFN(slotnr, i));
1267                 func_drck = SPAPR_DR_CONNECTOR_GET_CLASS(func_drc);
1268                 func_drck->entity_sense(func_drc, &state);
1269                 if (state == SPAPR_DR_ENTITY_SENSE_PRESENT
1270                     && !func_drck->release_pending(func_drc)) {
1271                     error_setg(errp,
1272                                "PCI: slot %d, function %d still present. "
1273                                "Must unplug all non-0 functions first.",
1274                                slotnr, i);
1275                     return;
1276                 }
1277             }
1278         }
1279 
1280         spapr_phb_remove_pci_device(drc, phb, pdev, &local_err);
1281         if (local_err) {
1282             error_propagate(errp, local_err);
1283             return;
1284         }
1285 
1286         /* if this isn't func 0, defer unplug event. otherwise signal removal
1287          * for all present functions
1288          */
1289         if (PCI_FUNC(pdev->devfn) == 0) {
1290             for (i = 7; i >= 0; i--) {
1291                 func_drc = spapr_phb_get_pci_func_drc(phb, pci_bus_num(bus),
1292                                                       PCI_DEVFN(slotnr, i));
1293                 func_drck = SPAPR_DR_CONNECTOR_GET_CLASS(func_drc);
1294                 func_drck->entity_sense(func_drc, &state);
1295                 if (state == SPAPR_DR_ENTITY_SENSE_PRESENT) {
1296                     spapr_hotplug_req_remove_by_index(func_drc);
1297                 }
1298             }
1299         }
1300     }
1301 }
1302 
1303 static void spapr_phb_realize(DeviceState *dev, Error **errp)
1304 {
1305     sPAPRMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
1306     SysBusDevice *s = SYS_BUS_DEVICE(dev);
1307     sPAPRPHBState *sphb = SPAPR_PCI_HOST_BRIDGE(s);
1308     PCIHostState *phb = PCI_HOST_BRIDGE(s);
1309     char *namebuf;
1310     int i;
1311     PCIBus *bus;
1312     uint64_t msi_window_size = 4096;
1313     sPAPRTCETable *tcet;
1314     uint32_t nb_table;
1315 
1316     if (sphb->index != (uint32_t)-1) {
1317         hwaddr windows_base;
1318 
1319         if ((sphb->buid != (uint64_t)-1) || (sphb->dma_liobn != (uint32_t)-1)
1320             || (sphb->mem_win_addr != (hwaddr)-1)
1321             || (sphb->io_win_addr != (hwaddr)-1)) {
1322             error_setg(errp, "Either \"index\" or other parameters must"
1323                        " be specified for PAPR PHB, not both");
1324             return;
1325         }
1326 
1327         if (sphb->index > SPAPR_PCI_MAX_INDEX) {
1328             error_setg(errp, "\"index\" for PAPR PHB is too large (max %u)",
1329                        SPAPR_PCI_MAX_INDEX);
1330             return;
1331         }
1332 
1333         sphb->buid = SPAPR_PCI_BASE_BUID + sphb->index;
1334         sphb->dma_liobn = SPAPR_PCI_LIOBN(sphb->index, 0);
1335 
1336         windows_base = SPAPR_PCI_WINDOW_BASE
1337             + sphb->index * SPAPR_PCI_WINDOW_SPACING;
1338         sphb->mem_win_addr = windows_base + SPAPR_PCI_MMIO_WIN_OFF;
1339         sphb->io_win_addr = windows_base + SPAPR_PCI_IO_WIN_OFF;
1340     }
1341 
1342     if (sphb->buid == (uint64_t)-1) {
1343         error_setg(errp, "BUID not specified for PHB");
1344         return;
1345     }
1346 
1347     if (sphb->dma_liobn == (uint32_t)-1) {
1348         error_setg(errp, "LIOBN not specified for PHB");
1349         return;
1350     }
1351 
1352     if (sphb->mem_win_addr == (hwaddr)-1) {
1353         error_setg(errp, "Memory window address not specified for PHB");
1354         return;
1355     }
1356 
1357     if (sphb->io_win_addr == (hwaddr)-1) {
1358         error_setg(errp, "IO window address not specified for PHB");
1359         return;
1360     }
1361 
1362     if (spapr_pci_find_phb(spapr, sphb->buid)) {
1363         error_setg(errp, "PCI host bridges must have unique BUIDs");
1364         return;
1365     }
1366 
1367     sphb->dtbusname = g_strdup_printf("pci@%" PRIx64, sphb->buid);
1368 
1369     namebuf = alloca(strlen(sphb->dtbusname) + 32);
1370 
1371     /* Initialize memory regions */
1372     sprintf(namebuf, "%s.mmio", sphb->dtbusname);
1373     memory_region_init(&sphb->memspace, OBJECT(sphb), namebuf, UINT64_MAX);
1374 
1375     sprintf(namebuf, "%s.mmio-alias", sphb->dtbusname);
1376     memory_region_init_alias(&sphb->memwindow, OBJECT(sphb),
1377                              namebuf, &sphb->memspace,
1378                              SPAPR_PCI_MEM_WIN_BUS_OFFSET, sphb->mem_win_size);
1379     memory_region_add_subregion(get_system_memory(), sphb->mem_win_addr,
1380                                 &sphb->memwindow);
1381 
1382     /* Initialize IO regions */
1383     sprintf(namebuf, "%s.io", sphb->dtbusname);
1384     memory_region_init(&sphb->iospace, OBJECT(sphb),
1385                        namebuf, SPAPR_PCI_IO_WIN_SIZE);
1386 
1387     sprintf(namebuf, "%s.io-alias", sphb->dtbusname);
1388     memory_region_init_alias(&sphb->iowindow, OBJECT(sphb), namebuf,
1389                              &sphb->iospace, 0, SPAPR_PCI_IO_WIN_SIZE);
1390     memory_region_add_subregion(get_system_memory(), sphb->io_win_addr,
1391                                 &sphb->iowindow);
1392 
1393     bus = pci_register_bus(dev, NULL,
1394                            pci_spapr_set_irq, pci_spapr_map_irq, sphb,
1395                            &sphb->memspace, &sphb->iospace,
1396                            PCI_DEVFN(0, 0), PCI_NUM_PINS, TYPE_PCI_BUS);
1397     phb->bus = bus;
1398     qbus_set_hotplug_handler(BUS(phb->bus), DEVICE(sphb), NULL);
1399 
1400     /*
1401      * Initialize PHB address space.
1402      * By default there will be at least one subregion for default
1403      * 32bit DMA window.
1404      * Later the guest might want to create another DMA window
1405      * which will become another memory subregion.
1406      */
1407     sprintf(namebuf, "%s.iommu-root", sphb->dtbusname);
1408 
1409     memory_region_init(&sphb->iommu_root, OBJECT(sphb),
1410                        namebuf, UINT64_MAX);
1411     address_space_init(&sphb->iommu_as, &sphb->iommu_root,
1412                        sphb->dtbusname);
1413 
1414     /*
1415      * As MSI/MSIX interrupts trigger by writing at MSI/MSIX vectors,
1416      * we need to allocate some memory to catch those writes coming
1417      * from msi_notify()/msix_notify().
1418      * As MSIMessage:addr is going to be the same and MSIMessage:data
1419      * is going to be a VIRQ number, 4 bytes of the MSI MR will only
1420      * be used.
1421      *
1422      * For KVM we want to ensure that this memory is a full page so that
1423      * our memory slot is of page size granularity.
1424      */
1425 #ifdef CONFIG_KVM
1426     if (kvm_enabled()) {
1427         msi_window_size = getpagesize();
1428     }
1429 #endif
1430 
1431     memory_region_init_io(&sphb->msiwindow, NULL, &spapr_msi_ops, spapr,
1432                           "msi", msi_window_size);
1433     memory_region_add_subregion(&sphb->iommu_root, SPAPR_PCI_MSI_WINDOW,
1434                                 &sphb->msiwindow);
1435 
1436     pci_setup_iommu(bus, spapr_pci_dma_iommu, sphb);
1437 
1438     pci_bus_set_route_irq_fn(bus, spapr_route_intx_pin_to_irq);
1439 
1440     QLIST_INSERT_HEAD(&spapr->phbs, sphb, list);
1441 
1442     /* Initialize the LSI table */
1443     for (i = 0; i < PCI_NUM_PINS; i++) {
1444         uint32_t irq;
1445         Error *local_err = NULL;
1446 
1447         irq = xics_alloc_block(spapr->icp, 0, 1, true, false, &local_err);
1448         if (local_err) {
1449             error_propagate(errp, local_err);
1450             error_prepend(errp, "can't allocate LSIs: ");
1451             return;
1452         }
1453 
1454         sphb->lsi_table[i].irq = irq;
1455     }
1456 
1457     /* allocate connectors for child PCI devices */
1458     if (sphb->dr_enabled) {
1459         for (i = 0; i < PCI_SLOT_MAX * 8; i++) {
1460             spapr_dr_connector_new(OBJECT(phb),
1461                                    SPAPR_DR_CONNECTOR_TYPE_PCI,
1462                                    (sphb->index << 16) | i);
1463         }
1464     }
1465 
1466     nb_table = sphb->dma_win_size >> SPAPR_TCE_PAGE_SHIFT;
1467     tcet = spapr_tce_new_table(DEVICE(sphb), sphb->dma_liobn,
1468                                0, SPAPR_TCE_PAGE_SHIFT, nb_table, false);
1469     if (!tcet) {
1470         error_setg(errp, "Unable to create TCE table for %s",
1471                    sphb->dtbusname);
1472         return;
1473     }
1474 
1475     /* Register default 32bit DMA window */
1476     memory_region_add_subregion(&sphb->iommu_root, sphb->dma_win_addr,
1477                                 spapr_tce_get_iommu(tcet));
1478 
1479     sphb->msi = g_hash_table_new_full(g_int_hash, g_int_equal, g_free, g_free);
1480 }
1481 
1482 static int spapr_phb_children_reset(Object *child, void *opaque)
1483 {
1484     DeviceState *dev = (DeviceState *) object_dynamic_cast(child, TYPE_DEVICE);
1485 
1486     if (dev) {
1487         device_reset(dev);
1488     }
1489 
1490     return 0;
1491 }
1492 
1493 static void spapr_phb_reset(DeviceState *qdev)
1494 {
1495     /* Reset the IOMMU state */
1496     object_child_foreach(OBJECT(qdev), spapr_phb_children_reset, NULL);
1497 
1498     if (spapr_phb_eeh_available(SPAPR_PCI_HOST_BRIDGE(qdev))) {
1499         spapr_phb_vfio_reset(qdev);
1500     }
1501 }
1502 
1503 static Property spapr_phb_properties[] = {
1504     DEFINE_PROP_UINT32("index", sPAPRPHBState, index, -1),
1505     DEFINE_PROP_UINT64("buid", sPAPRPHBState, buid, -1),
1506     DEFINE_PROP_UINT32("liobn", sPAPRPHBState, dma_liobn, -1),
1507     DEFINE_PROP_UINT64("mem_win_addr", sPAPRPHBState, mem_win_addr, -1),
1508     DEFINE_PROP_UINT64("mem_win_size", sPAPRPHBState, mem_win_size,
1509                        SPAPR_PCI_MMIO_WIN_SIZE),
1510     DEFINE_PROP_UINT64("io_win_addr", sPAPRPHBState, io_win_addr, -1),
1511     DEFINE_PROP_UINT64("io_win_size", sPAPRPHBState, io_win_size,
1512                        SPAPR_PCI_IO_WIN_SIZE),
1513     DEFINE_PROP_BOOL("dynamic-reconfiguration", sPAPRPHBState, dr_enabled,
1514                      true),
1515     /* Default DMA window is 0..1GB */
1516     DEFINE_PROP_UINT64("dma_win_addr", sPAPRPHBState, dma_win_addr, 0),
1517     DEFINE_PROP_UINT64("dma_win_size", sPAPRPHBState, dma_win_size, 0x40000000),
1518     DEFINE_PROP_END_OF_LIST(),
1519 };
1520 
1521 static const VMStateDescription vmstate_spapr_pci_lsi = {
1522     .name = "spapr_pci/lsi",
1523     .version_id = 1,
1524     .minimum_version_id = 1,
1525     .fields = (VMStateField[]) {
1526         VMSTATE_UINT32_EQUAL(irq, struct spapr_pci_lsi),
1527 
1528         VMSTATE_END_OF_LIST()
1529     },
1530 };
1531 
1532 static const VMStateDescription vmstate_spapr_pci_msi = {
1533     .name = "spapr_pci/msi",
1534     .version_id = 1,
1535     .minimum_version_id = 1,
1536     .fields = (VMStateField []) {
1537         VMSTATE_UINT32(key, spapr_pci_msi_mig),
1538         VMSTATE_UINT32(value.first_irq, spapr_pci_msi_mig),
1539         VMSTATE_UINT32(value.num, spapr_pci_msi_mig),
1540         VMSTATE_END_OF_LIST()
1541     },
1542 };
1543 
1544 static void spapr_pci_pre_save(void *opaque)
1545 {
1546     sPAPRPHBState *sphb = opaque;
1547     GHashTableIter iter;
1548     gpointer key, value;
1549     int i;
1550 
1551     g_free(sphb->msi_devs);
1552     sphb->msi_devs = NULL;
1553     sphb->msi_devs_num = g_hash_table_size(sphb->msi);
1554     if (!sphb->msi_devs_num) {
1555         return;
1556     }
1557     sphb->msi_devs = g_malloc(sphb->msi_devs_num * sizeof(spapr_pci_msi_mig));
1558 
1559     g_hash_table_iter_init(&iter, sphb->msi);
1560     for (i = 0; g_hash_table_iter_next(&iter, &key, &value); ++i) {
1561         sphb->msi_devs[i].key = *(uint32_t *) key;
1562         sphb->msi_devs[i].value = *(spapr_pci_msi *) value;
1563     }
1564 }
1565 
1566 static int spapr_pci_post_load(void *opaque, int version_id)
1567 {
1568     sPAPRPHBState *sphb = opaque;
1569     gpointer key, value;
1570     int i;
1571 
1572     for (i = 0; i < sphb->msi_devs_num; ++i) {
1573         key = g_memdup(&sphb->msi_devs[i].key,
1574                        sizeof(sphb->msi_devs[i].key));
1575         value = g_memdup(&sphb->msi_devs[i].value,
1576                          sizeof(sphb->msi_devs[i].value));
1577         g_hash_table_insert(sphb->msi, key, value);
1578     }
1579     g_free(sphb->msi_devs);
1580     sphb->msi_devs = NULL;
1581     sphb->msi_devs_num = 0;
1582 
1583     return 0;
1584 }
1585 
1586 static const VMStateDescription vmstate_spapr_pci = {
1587     .name = "spapr_pci",
1588     .version_id = 2,
1589     .minimum_version_id = 2,
1590     .pre_save = spapr_pci_pre_save,
1591     .post_load = spapr_pci_post_load,
1592     .fields = (VMStateField[]) {
1593         VMSTATE_UINT64_EQUAL(buid, sPAPRPHBState),
1594         VMSTATE_UINT32_EQUAL(dma_liobn, sPAPRPHBState),
1595         VMSTATE_UINT64_EQUAL(mem_win_addr, sPAPRPHBState),
1596         VMSTATE_UINT64_EQUAL(mem_win_size, sPAPRPHBState),
1597         VMSTATE_UINT64_EQUAL(io_win_addr, sPAPRPHBState),
1598         VMSTATE_UINT64_EQUAL(io_win_size, sPAPRPHBState),
1599         VMSTATE_STRUCT_ARRAY(lsi_table, sPAPRPHBState, PCI_NUM_PINS, 0,
1600                              vmstate_spapr_pci_lsi, struct spapr_pci_lsi),
1601         VMSTATE_INT32(msi_devs_num, sPAPRPHBState),
1602         VMSTATE_STRUCT_VARRAY_ALLOC(msi_devs, sPAPRPHBState, msi_devs_num, 0,
1603                                     vmstate_spapr_pci_msi, spapr_pci_msi_mig),
1604         VMSTATE_END_OF_LIST()
1605     },
1606 };
1607 
1608 static const char *spapr_phb_root_bus_path(PCIHostState *host_bridge,
1609                                            PCIBus *rootbus)
1610 {
1611     sPAPRPHBState *sphb = SPAPR_PCI_HOST_BRIDGE(host_bridge);
1612 
1613     return sphb->dtbusname;
1614 }
1615 
1616 static void spapr_phb_class_init(ObjectClass *klass, void *data)
1617 {
1618     PCIHostBridgeClass *hc = PCI_HOST_BRIDGE_CLASS(klass);
1619     DeviceClass *dc = DEVICE_CLASS(klass);
1620     HotplugHandlerClass *hp = HOTPLUG_HANDLER_CLASS(klass);
1621 
1622     hc->root_bus_path = spapr_phb_root_bus_path;
1623     dc->realize = spapr_phb_realize;
1624     dc->props = spapr_phb_properties;
1625     dc->reset = spapr_phb_reset;
1626     dc->vmsd = &vmstate_spapr_pci;
1627     set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
1628     dc->cannot_instantiate_with_device_add_yet = false;
1629     hp->plug = spapr_phb_hot_plug_child;
1630     hp->unplug = spapr_phb_hot_unplug_child;
1631 }
1632 
1633 static const TypeInfo spapr_phb_info = {
1634     .name          = TYPE_SPAPR_PCI_HOST_BRIDGE,
1635     .parent        = TYPE_PCI_HOST_BRIDGE,
1636     .instance_size = sizeof(sPAPRPHBState),
1637     .class_init    = spapr_phb_class_init,
1638     .interfaces    = (InterfaceInfo[]) {
1639         { TYPE_HOTPLUG_HANDLER },
1640         { }
1641     }
1642 };
1643 
1644 PCIHostState *spapr_create_phb(sPAPRMachineState *spapr, int index)
1645 {
1646     DeviceState *dev;
1647 
1648     dev = qdev_create(NULL, TYPE_SPAPR_PCI_HOST_BRIDGE);
1649     qdev_prop_set_uint32(dev, "index", index);
1650     qdev_init_nofail(dev);
1651 
1652     return PCI_HOST_BRIDGE(dev);
1653 }
1654 
1655 typedef struct sPAPRFDT {
1656     void *fdt;
1657     int node_off;
1658     sPAPRPHBState *sphb;
1659 } sPAPRFDT;
1660 
1661 static void spapr_populate_pci_devices_dt(PCIBus *bus, PCIDevice *pdev,
1662                                           void *opaque)
1663 {
1664     PCIBus *sec_bus;
1665     sPAPRFDT *p = opaque;
1666     int offset;
1667     sPAPRFDT s_fdt;
1668 
1669     offset = spapr_create_pci_child_dt(p->sphb, pdev, p->fdt, p->node_off);
1670     if (!offset) {
1671         error_report("Failed to create pci child device tree node");
1672         return;
1673     }
1674 
1675     if ((pci_default_read_config(pdev, PCI_HEADER_TYPE, 1) !=
1676          PCI_HEADER_TYPE_BRIDGE)) {
1677         return;
1678     }
1679 
1680     sec_bus = pci_bridge_get_sec_bus(PCI_BRIDGE(pdev));
1681     if (!sec_bus) {
1682         return;
1683     }
1684 
1685     s_fdt.fdt = p->fdt;
1686     s_fdt.node_off = offset;
1687     s_fdt.sphb = p->sphb;
1688     pci_for_each_device(sec_bus, pci_bus_num(sec_bus),
1689                         spapr_populate_pci_devices_dt,
1690                         &s_fdt);
1691 }
1692 
1693 static void spapr_phb_pci_enumerate_bridge(PCIBus *bus, PCIDevice *pdev,
1694                                            void *opaque)
1695 {
1696     unsigned int *bus_no = opaque;
1697     unsigned int primary = *bus_no;
1698     unsigned int subordinate = 0xff;
1699     PCIBus *sec_bus = NULL;
1700 
1701     if ((pci_default_read_config(pdev, PCI_HEADER_TYPE, 1) !=
1702          PCI_HEADER_TYPE_BRIDGE)) {
1703         return;
1704     }
1705 
1706     (*bus_no)++;
1707     pci_default_write_config(pdev, PCI_PRIMARY_BUS, primary, 1);
1708     pci_default_write_config(pdev, PCI_SECONDARY_BUS, *bus_no, 1);
1709     pci_default_write_config(pdev, PCI_SUBORDINATE_BUS, *bus_no, 1);
1710 
1711     sec_bus = pci_bridge_get_sec_bus(PCI_BRIDGE(pdev));
1712     if (!sec_bus) {
1713         return;
1714     }
1715 
1716     pci_default_write_config(pdev, PCI_SUBORDINATE_BUS, subordinate, 1);
1717     pci_for_each_device(sec_bus, pci_bus_num(sec_bus),
1718                         spapr_phb_pci_enumerate_bridge, bus_no);
1719     pci_default_write_config(pdev, PCI_SUBORDINATE_BUS, *bus_no, 1);
1720 }
1721 
1722 static void spapr_phb_pci_enumerate(sPAPRPHBState *phb)
1723 {
1724     PCIBus *bus = PCI_HOST_BRIDGE(phb)->bus;
1725     unsigned int bus_no = 0;
1726 
1727     pci_for_each_device(bus, pci_bus_num(bus),
1728                         spapr_phb_pci_enumerate_bridge,
1729                         &bus_no);
1730 
1731 }
1732 
1733 int spapr_populate_pci_dt(sPAPRPHBState *phb,
1734                           uint32_t xics_phandle,
1735                           void *fdt)
1736 {
1737     int bus_off, i, j, ret;
1738     char nodename[FDT_NAME_MAX];
1739     uint32_t bus_range[] = { cpu_to_be32(0), cpu_to_be32(0xff) };
1740     const uint64_t mmiosize = memory_region_size(&phb->memwindow);
1741     const uint64_t w32max = (1ULL << 32) - SPAPR_PCI_MEM_WIN_BUS_OFFSET;
1742     const uint64_t w32size = MIN(w32max, mmiosize);
1743     const uint64_t w64size = (mmiosize > w32size) ? (mmiosize - w32size) : 0;
1744     struct {
1745         uint32_t hi;
1746         uint64_t child;
1747         uint64_t parent;
1748         uint64_t size;
1749     } QEMU_PACKED ranges[] = {
1750         {
1751             cpu_to_be32(b_ss(1)), cpu_to_be64(0),
1752             cpu_to_be64(phb->io_win_addr),
1753             cpu_to_be64(memory_region_size(&phb->iospace)),
1754         },
1755         {
1756             cpu_to_be32(b_ss(2)), cpu_to_be64(SPAPR_PCI_MEM_WIN_BUS_OFFSET),
1757             cpu_to_be64(phb->mem_win_addr),
1758             cpu_to_be64(w32size),
1759         },
1760         {
1761             cpu_to_be32(b_ss(3)), cpu_to_be64(1ULL << 32),
1762             cpu_to_be64(phb->mem_win_addr + w32size),
1763             cpu_to_be64(w64size)
1764         },
1765     };
1766     const unsigned sizeof_ranges = (w64size ? 3 : 2) * sizeof(ranges[0]);
1767     uint64_t bus_reg[] = { cpu_to_be64(phb->buid), 0 };
1768     uint32_t interrupt_map_mask[] = {
1769         cpu_to_be32(b_ddddd(-1)|b_fff(0)), 0x0, 0x0, cpu_to_be32(-1)};
1770     uint32_t interrupt_map[PCI_SLOT_MAX * PCI_NUM_PINS][7];
1771     sPAPRTCETable *tcet;
1772     PCIBus *bus = PCI_HOST_BRIDGE(phb)->bus;
1773     sPAPRFDT s_fdt;
1774 
1775     /* Start populating the FDT */
1776     snprintf(nodename, FDT_NAME_MAX, "pci@%" PRIx64, phb->buid);
1777     bus_off = fdt_add_subnode(fdt, 0, nodename);
1778     if (bus_off < 0) {
1779         return bus_off;
1780     }
1781 
1782     /* Write PHB properties */
1783     _FDT(fdt_setprop_string(fdt, bus_off, "device_type", "pci"));
1784     _FDT(fdt_setprop_string(fdt, bus_off, "compatible", "IBM,Logical_PHB"));
1785     _FDT(fdt_setprop_cell(fdt, bus_off, "#address-cells", 0x3));
1786     _FDT(fdt_setprop_cell(fdt, bus_off, "#size-cells", 0x2));
1787     _FDT(fdt_setprop_cell(fdt, bus_off, "#interrupt-cells", 0x1));
1788     _FDT(fdt_setprop(fdt, bus_off, "used-by-rtas", NULL, 0));
1789     _FDT(fdt_setprop(fdt, bus_off, "bus-range", &bus_range, sizeof(bus_range)));
1790     _FDT(fdt_setprop(fdt, bus_off, "ranges", &ranges, sizeof_ranges));
1791     _FDT(fdt_setprop(fdt, bus_off, "reg", &bus_reg, sizeof(bus_reg)));
1792     _FDT(fdt_setprop_cell(fdt, bus_off, "ibm,pci-config-space-type", 0x1));
1793     _FDT(fdt_setprop_cell(fdt, bus_off, "ibm,pe-total-#msi", XICS_IRQS));
1794 
1795     /* Build the interrupt-map, this must matches what is done
1796      * in pci_spapr_map_irq
1797      */
1798     _FDT(fdt_setprop(fdt, bus_off, "interrupt-map-mask",
1799                      &interrupt_map_mask, sizeof(interrupt_map_mask)));
1800     for (i = 0; i < PCI_SLOT_MAX; i++) {
1801         for (j = 0; j < PCI_NUM_PINS; j++) {
1802             uint32_t *irqmap = interrupt_map[i*PCI_NUM_PINS + j];
1803             int lsi_num = pci_spapr_swizzle(i, j);
1804 
1805             irqmap[0] = cpu_to_be32(b_ddddd(i)|b_fff(0));
1806             irqmap[1] = 0;
1807             irqmap[2] = 0;
1808             irqmap[3] = cpu_to_be32(j+1);
1809             irqmap[4] = cpu_to_be32(xics_phandle);
1810             irqmap[5] = cpu_to_be32(phb->lsi_table[lsi_num].irq);
1811             irqmap[6] = cpu_to_be32(0x8);
1812         }
1813     }
1814     /* Write interrupt map */
1815     _FDT(fdt_setprop(fdt, bus_off, "interrupt-map", &interrupt_map,
1816                      sizeof(interrupt_map)));
1817 
1818     tcet = spapr_tce_find_by_liobn(SPAPR_PCI_LIOBN(phb->index, 0));
1819     if (!tcet) {
1820         return -1;
1821     }
1822     spapr_dma_dt(fdt, bus_off, "ibm,dma-window",
1823                  tcet->liobn, tcet->bus_offset,
1824                  tcet->nb_table << tcet->page_shift);
1825 
1826     /* Walk the bridges and program the bus numbers*/
1827     spapr_phb_pci_enumerate(phb);
1828     _FDT(fdt_setprop_cell(fdt, bus_off, "qemu,phb-enumerated", 0x1));
1829 
1830     /* Populate tree nodes with PCI devices attached */
1831     s_fdt.fdt = fdt;
1832     s_fdt.node_off = bus_off;
1833     s_fdt.sphb = phb;
1834     pci_for_each_device(bus, pci_bus_num(bus),
1835                         spapr_populate_pci_devices_dt,
1836                         &s_fdt);
1837 
1838     ret = spapr_drc_populate_dt(fdt, bus_off, OBJECT(phb),
1839                                 SPAPR_DR_CONNECTOR_TYPE_PCI);
1840     if (ret) {
1841         return ret;
1842     }
1843 
1844     return 0;
1845 }
1846 
1847 void spapr_pci_rtas_init(void)
1848 {
1849     spapr_rtas_register(RTAS_READ_PCI_CONFIG, "read-pci-config",
1850                         rtas_read_pci_config);
1851     spapr_rtas_register(RTAS_WRITE_PCI_CONFIG, "write-pci-config",
1852                         rtas_write_pci_config);
1853     spapr_rtas_register(RTAS_IBM_READ_PCI_CONFIG, "ibm,read-pci-config",
1854                         rtas_ibm_read_pci_config);
1855     spapr_rtas_register(RTAS_IBM_WRITE_PCI_CONFIG, "ibm,write-pci-config",
1856                         rtas_ibm_write_pci_config);
1857     if (msi_nonbroken) {
1858         spapr_rtas_register(RTAS_IBM_QUERY_INTERRUPT_SOURCE_NUMBER,
1859                             "ibm,query-interrupt-source-number",
1860                             rtas_ibm_query_interrupt_source_number);
1861         spapr_rtas_register(RTAS_IBM_CHANGE_MSI, "ibm,change-msi",
1862                             rtas_ibm_change_msi);
1863     }
1864 
1865     spapr_rtas_register(RTAS_IBM_SET_EEH_OPTION,
1866                         "ibm,set-eeh-option",
1867                         rtas_ibm_set_eeh_option);
1868     spapr_rtas_register(RTAS_IBM_GET_CONFIG_ADDR_INFO2,
1869                         "ibm,get-config-addr-info2",
1870                         rtas_ibm_get_config_addr_info2);
1871     spapr_rtas_register(RTAS_IBM_READ_SLOT_RESET_STATE2,
1872                         "ibm,read-slot-reset-state2",
1873                         rtas_ibm_read_slot_reset_state2);
1874     spapr_rtas_register(RTAS_IBM_SET_SLOT_RESET,
1875                         "ibm,set-slot-reset",
1876                         rtas_ibm_set_slot_reset);
1877     spapr_rtas_register(RTAS_IBM_CONFIGURE_PE,
1878                         "ibm,configure-pe",
1879                         rtas_ibm_configure_pe);
1880     spapr_rtas_register(RTAS_IBM_SLOT_ERROR_DETAIL,
1881                         "ibm,slot-error-detail",
1882                         rtas_ibm_slot_error_detail);
1883 }
1884 
1885 static void spapr_pci_register_types(void)
1886 {
1887     type_register_static(&spapr_phb_info);
1888 }
1889 
1890 type_init(spapr_pci_register_types)
1891 
1892 static int spapr_switch_one_vga(DeviceState *dev, void *opaque)
1893 {
1894     bool be = *(bool *)opaque;
1895 
1896     if (object_dynamic_cast(OBJECT(dev), "VGA")
1897         || object_dynamic_cast(OBJECT(dev), "secondary-vga")) {
1898         object_property_set_bool(OBJECT(dev), be, "big-endian-framebuffer",
1899                                  &error_abort);
1900     }
1901     return 0;
1902 }
1903 
1904 void spapr_pci_switch_vga(bool big_endian)
1905 {
1906     sPAPRMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
1907     sPAPRPHBState *sphb;
1908 
1909     /*
1910      * For backward compatibility with existing guests, we switch
1911      * the endianness of the VGA controller when changing the guest
1912      * interrupt mode
1913      */
1914     QLIST_FOREACH(sphb, &spapr->phbs, list) {
1915         BusState *bus = &PCI_HOST_BRIDGE(sphb)->bus->qbus;
1916         qbus_walk_children(bus, spapr_switch_one_vga, NULL, NULL, NULL,
1917                            &big_endian);
1918     }
1919 }
1920