xref: /openbmc/qemu/hw/ppc/spapr_irq.c (revision 464e447a)
1 /*
2  * QEMU PowerPC sPAPR IRQ interface
3  *
4  * Copyright (c) 2018, IBM Corporation.
5  *
6  * This code is licensed under the GPL version 2 or later. See the
7  * COPYING file in the top-level directory.
8  */
9 
10 #include "qemu/osdep.h"
11 #include "qemu/log.h"
12 #include "qemu/error-report.h"
13 #include "qapi/error.h"
14 #include "hw/ppc/spapr.h"
15 #include "hw/ppc/spapr_xive.h"
16 #include "hw/ppc/xics.h"
17 #include "sysemu/kvm.h"
18 
19 #include "trace.h"
20 
21 void spapr_irq_msi_init(sPAPRMachineState *spapr, uint32_t nr_msis)
22 {
23     spapr->irq_map_nr = nr_msis;
24     spapr->irq_map = bitmap_new(spapr->irq_map_nr);
25 }
26 
27 int spapr_irq_msi_alloc(sPAPRMachineState *spapr, uint32_t num, bool align,
28                         Error **errp)
29 {
30     int irq;
31 
32     /*
33      * The 'align_mask' parameter of bitmap_find_next_zero_area()
34      * should be one less than a power of 2; 0 means no
35      * alignment. Adapt the 'align' value of the former allocator
36      * to fit the requirements of bitmap_find_next_zero_area()
37      */
38     align -= 1;
39 
40     irq = bitmap_find_next_zero_area(spapr->irq_map, spapr->irq_map_nr, 0, num,
41                                      align);
42     if (irq == spapr->irq_map_nr) {
43         error_setg(errp, "can't find a free %d-IRQ block", num);
44         return -1;
45     }
46 
47     bitmap_set(spapr->irq_map, irq, num);
48 
49     return irq + SPAPR_IRQ_MSI;
50 }
51 
52 void spapr_irq_msi_free(sPAPRMachineState *spapr, int irq, uint32_t num)
53 {
54     bitmap_clear(spapr->irq_map, irq - SPAPR_IRQ_MSI, num);
55 }
56 
57 void spapr_irq_msi_reset(sPAPRMachineState *spapr)
58 {
59     bitmap_clear(spapr->irq_map, 0, spapr->irq_map_nr);
60 }
61 
62 
63 /*
64  * XICS IRQ backend.
65  */
66 
67 static ICSState *spapr_ics_create(sPAPRMachineState *spapr,
68                                   const char *type_ics,
69                                   int nr_irqs, Error **errp)
70 {
71     Error *local_err = NULL;
72     Object *obj;
73 
74     obj = object_new(type_ics);
75     object_property_add_child(OBJECT(spapr), "ics", obj, &error_abort);
76     object_property_add_const_link(obj, ICS_PROP_XICS, OBJECT(spapr),
77                                    &error_abort);
78     object_property_set_int(obj, nr_irqs, "nr-irqs", &local_err);
79     if (local_err) {
80         goto error;
81     }
82     object_property_set_bool(obj, true, "realized", &local_err);
83     if (local_err) {
84         goto error;
85     }
86 
87     return ICS_BASE(obj);
88 
89 error:
90     error_propagate(errp, local_err);
91     return NULL;
92 }
93 
94 static void spapr_irq_init_xics(sPAPRMachineState *spapr, Error **errp)
95 {
96     MachineState *machine = MACHINE(spapr);
97     int nr_irqs = spapr->irq->nr_irqs;
98     Error *local_err = NULL;
99 
100     if (kvm_enabled()) {
101         if (machine_kernel_irqchip_allowed(machine) &&
102             !xics_kvm_init(spapr, &local_err)) {
103             spapr->icp_type = TYPE_KVM_ICP;
104             spapr->ics = spapr_ics_create(spapr, TYPE_ICS_KVM, nr_irqs,
105                                           &local_err);
106         }
107         if (machine_kernel_irqchip_required(machine) && !spapr->ics) {
108             error_prepend(&local_err,
109                           "kernel_irqchip requested but unavailable: ");
110             goto error;
111         }
112         error_free(local_err);
113         local_err = NULL;
114     }
115 
116     if (!spapr->ics) {
117         xics_spapr_init(spapr);
118         spapr->icp_type = TYPE_ICP;
119         spapr->ics = spapr_ics_create(spapr, TYPE_ICS_SIMPLE, nr_irqs,
120                                       &local_err);
121     }
122 
123 error:
124     error_propagate(errp, local_err);
125 }
126 
127 #define ICS_IRQ_FREE(ics, srcno)   \
128     (!((ics)->irqs[(srcno)].flags & (XICS_FLAGS_IRQ_MASK)))
129 
130 static int spapr_irq_claim_xics(sPAPRMachineState *spapr, int irq, bool lsi,
131                                 Error **errp)
132 {
133     ICSState *ics = spapr->ics;
134 
135     assert(ics);
136 
137     if (!ics_valid_irq(ics, irq)) {
138         error_setg(errp, "IRQ %d is invalid", irq);
139         return -1;
140     }
141 
142     if (!ICS_IRQ_FREE(ics, irq - ics->offset)) {
143         error_setg(errp, "IRQ %d is not free", irq);
144         return -1;
145     }
146 
147     ics_set_irq_type(ics, irq - ics->offset, lsi);
148     return 0;
149 }
150 
151 static void spapr_irq_free_xics(sPAPRMachineState *spapr, int irq, int num)
152 {
153     ICSState *ics = spapr->ics;
154     uint32_t srcno = irq - ics->offset;
155     int i;
156 
157     if (ics_valid_irq(ics, irq)) {
158         trace_spapr_irq_free(0, irq, num);
159         for (i = srcno; i < srcno + num; ++i) {
160             if (ICS_IRQ_FREE(ics, i)) {
161                 trace_spapr_irq_free_warn(0, i);
162             }
163             memset(&ics->irqs[i], 0, sizeof(ICSIRQState));
164         }
165     }
166 }
167 
168 static qemu_irq spapr_qirq_xics(sPAPRMachineState *spapr, int irq)
169 {
170     ICSState *ics = spapr->ics;
171     uint32_t srcno = irq - ics->offset;
172 
173     if (ics_valid_irq(ics, irq)) {
174         return ics->qirqs[srcno];
175     }
176 
177     return NULL;
178 }
179 
180 static void spapr_irq_print_info_xics(sPAPRMachineState *spapr, Monitor *mon)
181 {
182     CPUState *cs;
183 
184     CPU_FOREACH(cs) {
185         PowerPCCPU *cpu = POWERPC_CPU(cs);
186 
187         icp_pic_print_info(ICP(cpu->intc), mon);
188     }
189 
190     ics_pic_print_info(spapr->ics, mon);
191 }
192 
193 static Object *spapr_irq_cpu_intc_create_xics(sPAPRMachineState *spapr,
194                                               Object *cpu, Error **errp)
195 {
196     return icp_create(cpu, spapr->icp_type, XICS_FABRIC(spapr), errp);
197 }
198 
199 static int spapr_irq_post_load_xics(sPAPRMachineState *spapr, int version_id)
200 {
201     if (!object_dynamic_cast(OBJECT(spapr->ics), TYPE_ICS_KVM)) {
202         CPUState *cs;
203         CPU_FOREACH(cs) {
204             PowerPCCPU *cpu = POWERPC_CPU(cs);
205             icp_resend(ICP(cpu->intc));
206         }
207     }
208     return 0;
209 }
210 
211 #define SPAPR_IRQ_XICS_NR_IRQS     0x1000
212 #define SPAPR_IRQ_XICS_NR_MSIS     \
213     (XICS_IRQ_BASE + SPAPR_IRQ_XICS_NR_IRQS - SPAPR_IRQ_MSI)
214 
215 sPAPRIrq spapr_irq_xics = {
216     .nr_irqs     = SPAPR_IRQ_XICS_NR_IRQS,
217     .nr_msis     = SPAPR_IRQ_XICS_NR_MSIS,
218     .ov5         = SPAPR_OV5_XIVE_LEGACY,
219 
220     .init        = spapr_irq_init_xics,
221     .claim       = spapr_irq_claim_xics,
222     .free        = spapr_irq_free_xics,
223     .qirq        = spapr_qirq_xics,
224     .print_info  = spapr_irq_print_info_xics,
225     .dt_populate = spapr_dt_xics,
226     .cpu_intc_create = spapr_irq_cpu_intc_create_xics,
227     .post_load   = spapr_irq_post_load_xics,
228 };
229 
230 /*
231  * XIVE IRQ backend.
232  */
233 static void spapr_irq_init_xive(sPAPRMachineState *spapr, Error **errp)
234 {
235     MachineState *machine = MACHINE(spapr);
236     uint32_t nr_servers = spapr_max_server_number(spapr);
237     DeviceState *dev;
238     int i;
239 
240     /* KVM XIVE device not yet available */
241     if (kvm_enabled()) {
242         if (machine_kernel_irqchip_required(machine)) {
243             error_setg(errp, "kernel_irqchip requested. no KVM XIVE support");
244             return;
245         }
246     }
247 
248     dev = qdev_create(NULL, TYPE_SPAPR_XIVE);
249     qdev_prop_set_uint32(dev, "nr-irqs", spapr->irq->nr_irqs);
250     /*
251      * 8 XIVE END structures per CPU. One for each available priority
252      */
253     qdev_prop_set_uint32(dev, "nr-ends", nr_servers << 3);
254     qdev_init_nofail(dev);
255 
256     spapr->xive = SPAPR_XIVE(dev);
257 
258     /* Enable the CPU IPIs */
259     for (i = 0; i < nr_servers; ++i) {
260         spapr_xive_irq_claim(spapr->xive, SPAPR_IRQ_IPI + i, false);
261     }
262 
263     spapr_xive_hcall_init(spapr);
264 }
265 
266 static int spapr_irq_claim_xive(sPAPRMachineState *spapr, int irq, bool lsi,
267                                 Error **errp)
268 {
269     if (!spapr_xive_irq_claim(spapr->xive, irq, lsi)) {
270         error_setg(errp, "IRQ %d is invalid", irq);
271         return -1;
272     }
273     return 0;
274 }
275 
276 static void spapr_irq_free_xive(sPAPRMachineState *spapr, int irq, int num)
277 {
278     int i;
279 
280     for (i = irq; i < irq + num; ++i) {
281         spapr_xive_irq_free(spapr->xive, i);
282     }
283 }
284 
285 static qemu_irq spapr_qirq_xive(sPAPRMachineState *spapr, int irq)
286 {
287     return spapr_xive_qirq(spapr->xive, irq);
288 }
289 
290 static void spapr_irq_print_info_xive(sPAPRMachineState *spapr,
291                                       Monitor *mon)
292 {
293     CPUState *cs;
294 
295     CPU_FOREACH(cs) {
296         PowerPCCPU *cpu = POWERPC_CPU(cs);
297 
298         xive_tctx_pic_print_info(XIVE_TCTX(cpu->intc), mon);
299     }
300 
301     spapr_xive_pic_print_info(spapr->xive, mon);
302 }
303 
304 static Object *spapr_irq_cpu_intc_create_xive(sPAPRMachineState *spapr,
305                                               Object *cpu, Error **errp)
306 {
307     Object *obj = xive_tctx_create(cpu, XIVE_ROUTER(spapr->xive), errp);
308 
309     /*
310      * (TCG) Early setting the OS CAM line for hotplugged CPUs as they
311      * don't benificiate from the reset of the XIVE IRQ backend
312      */
313     spapr_xive_set_tctx_os_cam(XIVE_TCTX(obj));
314     return obj;
315 }
316 
317 static int spapr_irq_post_load_xive(sPAPRMachineState *spapr, int version_id)
318 {
319     return 0;
320 }
321 
322 static void spapr_irq_reset_xive(sPAPRMachineState *spapr, Error **errp)
323 {
324     CPUState *cs;
325 
326     CPU_FOREACH(cs) {
327         PowerPCCPU *cpu = POWERPC_CPU(cs);
328 
329         /* (TCG) Set the OS CAM line of the thread interrupt context. */
330         spapr_xive_set_tctx_os_cam(XIVE_TCTX(cpu->intc));
331     }
332 }
333 
334 /*
335  * XIVE uses the full IRQ number space. Set it to 8K to be compatible
336  * with XICS.
337  */
338 
339 #define SPAPR_IRQ_XIVE_NR_IRQS     0x2000
340 #define SPAPR_IRQ_XIVE_NR_MSIS     (SPAPR_IRQ_XIVE_NR_IRQS - SPAPR_IRQ_MSI)
341 
342 sPAPRIrq spapr_irq_xive = {
343     .nr_irqs     = SPAPR_IRQ_XIVE_NR_IRQS,
344     .nr_msis     = SPAPR_IRQ_XIVE_NR_MSIS,
345     .ov5         = SPAPR_OV5_XIVE_EXPLOIT,
346 
347     .init        = spapr_irq_init_xive,
348     .claim       = spapr_irq_claim_xive,
349     .free        = spapr_irq_free_xive,
350     .qirq        = spapr_qirq_xive,
351     .print_info  = spapr_irq_print_info_xive,
352     .dt_populate = spapr_dt_xive,
353     .cpu_intc_create = spapr_irq_cpu_intc_create_xive,
354     .post_load   = spapr_irq_post_load_xive,
355     .reset       = spapr_irq_reset_xive,
356 };
357 
358 /*
359  * sPAPR IRQ frontend routines for devices
360  */
361 void spapr_irq_init(sPAPRMachineState *spapr, Error **errp)
362 {
363     /* Initialize the MSI IRQ allocator. */
364     if (!SPAPR_MACHINE_GET_CLASS(spapr)->legacy_irq_allocation) {
365         spapr_irq_msi_init(spapr, spapr->irq->nr_msis);
366     }
367 
368     spapr->irq->init(spapr, errp);
369 }
370 
371 int spapr_irq_claim(sPAPRMachineState *spapr, int irq, bool lsi, Error **errp)
372 {
373     return spapr->irq->claim(spapr, irq, lsi, errp);
374 }
375 
376 void spapr_irq_free(sPAPRMachineState *spapr, int irq, int num)
377 {
378     spapr->irq->free(spapr, irq, num);
379 }
380 
381 qemu_irq spapr_qirq(sPAPRMachineState *spapr, int irq)
382 {
383     return spapr->irq->qirq(spapr, irq);
384 }
385 
386 int spapr_irq_post_load(sPAPRMachineState *spapr, int version_id)
387 {
388     return spapr->irq->post_load(spapr, version_id);
389 }
390 
391 void spapr_irq_reset(sPAPRMachineState *spapr, Error **errp)
392 {
393     if (spapr->irq->reset) {
394         spapr->irq->reset(spapr, errp);
395     }
396 }
397 
398 /*
399  * XICS legacy routines - to deprecate one day
400  */
401 
402 static int ics_find_free_block(ICSState *ics, int num, int alignnum)
403 {
404     int first, i;
405 
406     for (first = 0; first < ics->nr_irqs; first += alignnum) {
407         if (num > (ics->nr_irqs - first)) {
408             return -1;
409         }
410         for (i = first; i < first + num; ++i) {
411             if (!ICS_IRQ_FREE(ics, i)) {
412                 break;
413             }
414         }
415         if (i == (first + num)) {
416             return first;
417         }
418     }
419 
420     return -1;
421 }
422 
423 int spapr_irq_find(sPAPRMachineState *spapr, int num, bool align, Error **errp)
424 {
425     ICSState *ics = spapr->ics;
426     int first = -1;
427 
428     assert(ics);
429 
430     /*
431      * MSIMesage::data is used for storing VIRQ so
432      * it has to be aligned to num to support multiple
433      * MSI vectors. MSI-X is not affected by this.
434      * The hint is used for the first IRQ, the rest should
435      * be allocated continuously.
436      */
437     if (align) {
438         assert((num == 1) || (num == 2) || (num == 4) ||
439                (num == 8) || (num == 16) || (num == 32));
440         first = ics_find_free_block(ics, num, num);
441     } else {
442         first = ics_find_free_block(ics, num, 1);
443     }
444 
445     if (first < 0) {
446         error_setg(errp, "can't find a free %d-IRQ block", num);
447         return -1;
448     }
449 
450     return first + ics->offset;
451 }
452 
453 #define SPAPR_IRQ_XICS_LEGACY_NR_IRQS     0x400
454 
455 sPAPRIrq spapr_irq_xics_legacy = {
456     .nr_irqs     = SPAPR_IRQ_XICS_LEGACY_NR_IRQS,
457     .nr_msis     = SPAPR_IRQ_XICS_LEGACY_NR_IRQS,
458     .ov5         = SPAPR_OV5_XIVE_LEGACY,
459 
460     .init        = spapr_irq_init_xics,
461     .claim       = spapr_irq_claim_xics,
462     .free        = spapr_irq_free_xics,
463     .qirq        = spapr_qirq_xics,
464     .print_info  = spapr_irq_print_info_xics,
465     .dt_populate = spapr_dt_xics,
466     .cpu_intc_create = spapr_irq_cpu_intc_create_xics,
467     .post_load   = spapr_irq_post_load_xics,
468 };
469