1 /*
2 * QEMU PowerPC pSeries Logical Partition (aka sPAPR) hardware System Emulator
3 *
4 * PAPR Virtualized Interrupt System, aka ICS/ICP aka xics
5 *
6 * Copyright (c) 2010,2011 David Gibson, IBM Corporation.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
24 * THE SOFTWARE.
25 *
26 */
27
28 #include "qemu/osdep.h"
29 #include "qapi/error.h"
30 #include "trace.h"
31 #include "qemu/timer.h"
32 #include "hw/ppc/xics.h"
33 #include "hw/qdev-properties.h"
34 #include "qemu/error-report.h"
35 #include "qemu/module.h"
36 #include "qapi/visitor.h"
37 #include "migration/vmstate.h"
38 #include "hw/intc/intc.h"
39 #include "hw/irq.h"
40 #include "sysemu/kvm.h"
41 #include "sysemu/reset.h"
42 #include "target/ppc/cpu.h"
43
icp_pic_print_info(ICPState * icp,GString * buf)44 void icp_pic_print_info(ICPState *icp, GString *buf)
45 {
46 int cpu_index;
47
48 /* Skip partially initialized vCPUs. This can happen on sPAPR when vCPUs
49 * are hot plugged or unplugged.
50 */
51 if (!icp) {
52 return;
53 }
54
55 cpu_index = icp->cs ? icp->cs->cpu_index : -1;
56
57 if (!icp->output) {
58 return;
59 }
60
61 if (kvm_irqchip_in_kernel()) {
62 icp_synchronize_state(icp);
63 }
64
65 g_string_append_printf(buf, "CPU %d XIRR=%08x (%p) PP=%02x MFRR=%02x\n",
66 cpu_index, icp->xirr, icp->xirr_owner,
67 icp->pending_priority, icp->mfrr);
68 }
69
ics_pic_print_info(ICSState * ics,GString * buf)70 void ics_pic_print_info(ICSState *ics, GString *buf)
71 {
72 uint32_t i;
73
74 g_string_append_printf(buf, "ICS %4x..%4x %p\n",
75 ics->offset, ics->offset + ics->nr_irqs - 1, ics);
76
77 if (!ics->irqs) {
78 return;
79 }
80
81 if (kvm_irqchip_in_kernel()) {
82 ics_synchronize_state(ics);
83 }
84
85 for (i = 0; i < ics->nr_irqs; i++) {
86 ICSIRQState *irq = ics->irqs + i;
87
88 if (!(irq->flags & XICS_FLAGS_IRQ_MASK)) {
89 continue;
90 }
91 g_string_append_printf(buf, " %4x %s %02x %02x\n",
92 ics->offset + i,
93 (irq->flags & XICS_FLAGS_IRQ_LSI) ?
94 "LSI" : "MSI",
95 irq->priority, irq->status);
96 }
97 }
98
99 /*
100 * ICP: Presentation layer
101 */
102
103 #define XISR_MASK 0x00ffffff
104 #define CPPR_MASK 0xff000000
105
106 #define XISR(icp) (((icp)->xirr) & XISR_MASK)
107 #define CPPR(icp) (((icp)->xirr) >> 24)
108
109 static void ics_reject(ICSState *ics, uint32_t nr);
110 static void ics_eoi(ICSState *ics, uint32_t nr);
111
icp_check_ipi(ICPState * icp)112 static void icp_check_ipi(ICPState *icp)
113 {
114 if (XISR(icp) && (icp->pending_priority <= icp->mfrr)) {
115 return;
116 }
117
118 trace_xics_icp_check_ipi(icp->cs->cpu_index, icp->mfrr);
119
120 if (XISR(icp) && icp->xirr_owner) {
121 ics_reject(icp->xirr_owner, XISR(icp));
122 }
123
124 icp->xirr = (icp->xirr & ~XISR_MASK) | XICS_IPI;
125 icp->pending_priority = icp->mfrr;
126 icp->xirr_owner = NULL;
127 qemu_irq_raise(icp->output);
128 }
129
icp_resend(ICPState * icp)130 void icp_resend(ICPState *icp)
131 {
132 XICSFabric *xi = icp->xics;
133 XICSFabricClass *xic = XICS_FABRIC_GET_CLASS(xi);
134
135 if (icp->mfrr < CPPR(icp)) {
136 icp_check_ipi(icp);
137 }
138
139 xic->ics_resend(xi);
140 }
141
icp_set_cppr(ICPState * icp,uint8_t cppr)142 void icp_set_cppr(ICPState *icp, uint8_t cppr)
143 {
144 uint8_t old_cppr;
145 uint32_t old_xisr;
146
147 old_cppr = CPPR(icp);
148 icp->xirr = (icp->xirr & ~CPPR_MASK) | (cppr << 24);
149
150 if (cppr < old_cppr) {
151 if (XISR(icp) && (cppr <= icp->pending_priority)) {
152 old_xisr = XISR(icp);
153 icp->xirr &= ~XISR_MASK; /* Clear XISR */
154 icp->pending_priority = 0xff;
155 qemu_irq_lower(icp->output);
156 if (icp->xirr_owner) {
157 ics_reject(icp->xirr_owner, old_xisr);
158 icp->xirr_owner = NULL;
159 }
160 }
161 } else {
162 if (!XISR(icp)) {
163 icp_resend(icp);
164 }
165 }
166 }
167
icp_set_mfrr(ICPState * icp,uint8_t mfrr)168 void icp_set_mfrr(ICPState *icp, uint8_t mfrr)
169 {
170 icp->mfrr = mfrr;
171 if (mfrr < CPPR(icp)) {
172 icp_check_ipi(icp);
173 }
174 }
175
icp_accept(ICPState * icp)176 uint32_t icp_accept(ICPState *icp)
177 {
178 uint32_t xirr = icp->xirr;
179
180 qemu_irq_lower(icp->output);
181 icp->xirr = icp->pending_priority << 24;
182 icp->pending_priority = 0xff;
183 icp->xirr_owner = NULL;
184
185 trace_xics_icp_accept(xirr, icp->xirr);
186
187 return xirr;
188 }
189
icp_ipoll(ICPState * icp,uint32_t * mfrr)190 uint32_t icp_ipoll(ICPState *icp, uint32_t *mfrr)
191 {
192 if (mfrr) {
193 *mfrr = icp->mfrr;
194 }
195 return icp->xirr;
196 }
197
icp_eoi(ICPState * icp,uint32_t xirr)198 void icp_eoi(ICPState *icp, uint32_t xirr)
199 {
200 XICSFabric *xi = icp->xics;
201 XICSFabricClass *xic = XICS_FABRIC_GET_CLASS(xi);
202 ICSState *ics;
203 uint32_t irq;
204
205 /* Send EOI -> ICS */
206 icp->xirr = (icp->xirr & ~CPPR_MASK) | (xirr & CPPR_MASK);
207 trace_xics_icp_eoi(icp->cs->cpu_index, xirr, icp->xirr);
208 irq = xirr & XISR_MASK;
209
210 ics = xic->ics_get(xi, irq);
211 if (ics) {
212 ics_eoi(ics, irq);
213 }
214 if (!XISR(icp)) {
215 icp_resend(icp);
216 }
217 }
218
icp_irq(ICSState * ics,int server,int nr,uint8_t priority)219 void icp_irq(ICSState *ics, int server, int nr, uint8_t priority)
220 {
221 ICPState *icp = xics_icp_get(ics->xics, server);
222
223 trace_xics_icp_irq(server, nr, priority);
224
225 if ((priority >= CPPR(icp))
226 || (XISR(icp) && (icp->pending_priority <= priority))) {
227 ics_reject(ics, nr);
228 } else {
229 if (XISR(icp) && icp->xirr_owner) {
230 ics_reject(icp->xirr_owner, XISR(icp));
231 icp->xirr_owner = NULL;
232 }
233 icp->xirr = (icp->xirr & ~XISR_MASK) | (nr & XISR_MASK);
234 icp->xirr_owner = ics;
235 icp->pending_priority = priority;
236 trace_xics_icp_raise(icp->xirr, icp->pending_priority);
237 qemu_irq_raise(icp->output);
238 }
239 }
240
icp_pre_save(void * opaque)241 static int icp_pre_save(void *opaque)
242 {
243 ICPState *icp = opaque;
244
245 if (kvm_irqchip_in_kernel()) {
246 icp_get_kvm_state(icp);
247 }
248
249 return 0;
250 }
251
icp_post_load(void * opaque,int version_id)252 static int icp_post_load(void *opaque, int version_id)
253 {
254 ICPState *icp = opaque;
255
256 if (kvm_irqchip_in_kernel()) {
257 Error *local_err = NULL;
258 int ret;
259
260 ret = icp_set_kvm_state(icp, &local_err);
261 if (ret < 0) {
262 error_report_err(local_err);
263 return ret;
264 }
265 }
266
267 return 0;
268 }
269
270 static const VMStateDescription vmstate_icp_server = {
271 .name = "icp/server",
272 .version_id = 1,
273 .minimum_version_id = 1,
274 .pre_save = icp_pre_save,
275 .post_load = icp_post_load,
276 .fields = (const VMStateField[]) {
277 /* Sanity check */
278 VMSTATE_UINT32(xirr, ICPState),
279 VMSTATE_UINT8(pending_priority, ICPState),
280 VMSTATE_UINT8(mfrr, ICPState),
281 VMSTATE_END_OF_LIST()
282 },
283 };
284
icp_reset(ICPState * icp)285 void icp_reset(ICPState *icp)
286 {
287 icp->xirr = 0;
288 icp->pending_priority = 0xff;
289 icp->mfrr = 0xff;
290
291 if (kvm_irqchip_in_kernel()) {
292 Error *local_err = NULL;
293
294 icp_set_kvm_state(icp, &local_err);
295 if (local_err) {
296 error_report_err(local_err);
297 }
298 }
299 }
300
icp_realize(DeviceState * dev,Error ** errp)301 static void icp_realize(DeviceState *dev, Error **errp)
302 {
303 ICPState *icp = ICP(dev);
304 PowerPCCPU *cpu;
305 CPUPPCState *env;
306 Error *err = NULL;
307
308 assert(icp->xics);
309 assert(icp->cs);
310
311 cpu = POWERPC_CPU(icp->cs);
312 env = &cpu->env;
313 switch (PPC_INPUT(env)) {
314 case PPC_FLAGS_INPUT_POWER7:
315 icp->output = qdev_get_gpio_in(DEVICE(cpu), POWER7_INPUT_INT);
316 break;
317 case PPC_FLAGS_INPUT_POWER9: /* For SPAPR xics emulation */
318 icp->output = qdev_get_gpio_in(DEVICE(cpu), POWER9_INPUT_INT);
319 break;
320
321 case PPC_FLAGS_INPUT_970:
322 icp->output = qdev_get_gpio_in(DEVICE(cpu), PPC970_INPUT_INT);
323 break;
324
325 default:
326 error_setg(errp, "XICS interrupt controller does not support this CPU bus model");
327 return;
328 }
329
330 /* Connect the presenter to the VCPU (required for CPU hotplug) */
331 if (kvm_irqchip_in_kernel()) {
332 icp_kvm_realize(dev, &err);
333 if (err) {
334 error_propagate(errp, err);
335 return;
336 }
337 }
338 /*
339 * The way that pre_2_10_icp is handling is really, really hacky.
340 * We used to have here this call:
341 *
342 * vmstate_register(NULL, icp->cs->cpu_index, &vmstate_icp_server, icp);
343 *
344 * But we were doing:
345 * pre_2_10_vmstate_register_dummy_icp()
346 * this vmstate_register()
347 * pre_2_10_vmstate_unregister_dummy_icp()
348 *
349 * So for a short amount of time we had to vmstate entries with
350 * the same name. This fixes it.
351 */
352 vmstate_replace_hack_for_ppc(NULL, icp->cs->cpu_index,
353 &vmstate_icp_server, icp);
354 }
355
icp_unrealize(DeviceState * dev)356 static void icp_unrealize(DeviceState *dev)
357 {
358 ICPState *icp = ICP(dev);
359
360 vmstate_unregister(NULL, &vmstate_icp_server, icp);
361 }
362
363 static Property icp_properties[] = {
364 DEFINE_PROP_LINK(ICP_PROP_XICS, ICPState, xics, TYPE_XICS_FABRIC,
365 XICSFabric *),
366 DEFINE_PROP_LINK(ICP_PROP_CPU, ICPState, cs, TYPE_CPU, CPUState *),
367 DEFINE_PROP_END_OF_LIST(),
368 };
369
icp_class_init(ObjectClass * klass,void * data)370 static void icp_class_init(ObjectClass *klass, void *data)
371 {
372 DeviceClass *dc = DEVICE_CLASS(klass);
373
374 dc->realize = icp_realize;
375 dc->unrealize = icp_unrealize;
376 device_class_set_props(dc, icp_properties);
377 /*
378 * Reason: part of XICS interrupt controller, needs to be wired up
379 * by icp_create().
380 */
381 dc->user_creatable = false;
382 }
383
384 static const TypeInfo icp_info = {
385 .name = TYPE_ICP,
386 .parent = TYPE_DEVICE,
387 .instance_size = sizeof(ICPState),
388 .class_init = icp_class_init,
389 .class_size = sizeof(ICPStateClass),
390 };
391
icp_create(Object * cpu,const char * type,XICSFabric * xi,Error ** errp)392 Object *icp_create(Object *cpu, const char *type, XICSFabric *xi, Error **errp)
393 {
394 Object *obj;
395
396 obj = object_new(type);
397 object_property_add_child(cpu, type, obj);
398 object_unref(obj);
399 object_property_set_link(obj, ICP_PROP_XICS, OBJECT(xi), &error_abort);
400 object_property_set_link(obj, ICP_PROP_CPU, cpu, &error_abort);
401 if (!qdev_realize(DEVICE(obj), NULL, errp)) {
402 object_unparent(obj);
403 obj = NULL;
404 }
405
406 return obj;
407 }
408
icp_destroy(ICPState * icp)409 void icp_destroy(ICPState *icp)
410 {
411 Object *obj = OBJECT(icp);
412
413 object_unparent(obj);
414 }
415
416 /*
417 * ICS: Source layer
418 */
ics_resend_msi(ICSState * ics,int srcno)419 static void ics_resend_msi(ICSState *ics, int srcno)
420 {
421 ICSIRQState *irq = ics->irqs + srcno;
422
423 /* FIXME: filter by server#? */
424 if (irq->status & XICS_STATUS_REJECTED) {
425 irq->status &= ~XICS_STATUS_REJECTED;
426 if (irq->priority != 0xff) {
427 icp_irq(ics, irq->server, srcno + ics->offset, irq->priority);
428 }
429 }
430 }
431
ics_resend_lsi(ICSState * ics,int srcno)432 static void ics_resend_lsi(ICSState *ics, int srcno)
433 {
434 ICSIRQState *irq = ics->irqs + srcno;
435
436 if ((irq->priority != 0xff)
437 && (irq->status & XICS_STATUS_ASSERTED)
438 && !(irq->status & XICS_STATUS_SENT)) {
439 irq->status |= XICS_STATUS_SENT;
440 icp_irq(ics, irq->server, srcno + ics->offset, irq->priority);
441 }
442 }
443
ics_set_irq_msi(ICSState * ics,int srcno,int val)444 static void ics_set_irq_msi(ICSState *ics, int srcno, int val)
445 {
446 ICSIRQState *irq = ics->irqs + srcno;
447
448 trace_xics_ics_set_irq_msi(srcno, srcno + ics->offset);
449
450 if (val) {
451 if (irq->priority == 0xff) {
452 irq->status |= XICS_STATUS_MASKED_PENDING;
453 trace_xics_masked_pending();
454 } else {
455 icp_irq(ics, irq->server, srcno + ics->offset, irq->priority);
456 }
457 }
458 }
459
ics_set_irq_lsi(ICSState * ics,int srcno,int val)460 static void ics_set_irq_lsi(ICSState *ics, int srcno, int val)
461 {
462 ICSIRQState *irq = ics->irqs + srcno;
463
464 trace_xics_ics_set_irq_lsi(srcno, srcno + ics->offset);
465 if (val) {
466 irq->status |= XICS_STATUS_ASSERTED;
467 } else {
468 irq->status &= ~XICS_STATUS_ASSERTED;
469 }
470 ics_resend_lsi(ics, srcno);
471 }
472
ics_set_irq(void * opaque,int srcno,int val)473 void ics_set_irq(void *opaque, int srcno, int val)
474 {
475 ICSState *ics = (ICSState *)opaque;
476
477 if (kvm_irqchip_in_kernel()) {
478 ics_kvm_set_irq(ics, srcno, val);
479 return;
480 }
481
482 if (ics->irqs[srcno].flags & XICS_FLAGS_IRQ_LSI) {
483 ics_set_irq_lsi(ics, srcno, val);
484 } else {
485 ics_set_irq_msi(ics, srcno, val);
486 }
487 }
488
ics_write_xive_msi(ICSState * ics,int srcno)489 static void ics_write_xive_msi(ICSState *ics, int srcno)
490 {
491 ICSIRQState *irq = ics->irqs + srcno;
492
493 if (!(irq->status & XICS_STATUS_MASKED_PENDING)
494 || (irq->priority == 0xff)) {
495 return;
496 }
497
498 irq->status &= ~XICS_STATUS_MASKED_PENDING;
499 icp_irq(ics, irq->server, srcno + ics->offset, irq->priority);
500 }
501
ics_write_xive_lsi(ICSState * ics,int srcno)502 static void ics_write_xive_lsi(ICSState *ics, int srcno)
503 {
504 ics_resend_lsi(ics, srcno);
505 }
506
ics_write_xive(ICSState * ics,int srcno,int server,uint8_t priority,uint8_t saved_priority)507 void ics_write_xive(ICSState *ics, int srcno, int server,
508 uint8_t priority, uint8_t saved_priority)
509 {
510 ICSIRQState *irq = ics->irqs + srcno;
511
512 irq->server = server;
513 irq->priority = priority;
514 irq->saved_priority = saved_priority;
515
516 trace_xics_ics_write_xive(ics->offset + srcno, srcno, server, priority);
517
518 if (ics->irqs[srcno].flags & XICS_FLAGS_IRQ_LSI) {
519 ics_write_xive_lsi(ics, srcno);
520 } else {
521 ics_write_xive_msi(ics, srcno);
522 }
523 }
524
ics_reject(ICSState * ics,uint32_t nr)525 static void ics_reject(ICSState *ics, uint32_t nr)
526 {
527 ICSStateClass *isc = ICS_GET_CLASS(ics);
528 ICSIRQState *irq = ics->irqs + nr - ics->offset;
529
530 if (isc->reject) {
531 isc->reject(ics, nr);
532 return;
533 }
534
535 trace_xics_ics_reject(nr, nr - ics->offset);
536 if (irq->flags & XICS_FLAGS_IRQ_MSI) {
537 irq->status |= XICS_STATUS_REJECTED;
538 } else if (irq->flags & XICS_FLAGS_IRQ_LSI) {
539 irq->status &= ~XICS_STATUS_SENT;
540 }
541 }
542
ics_resend(ICSState * ics)543 void ics_resend(ICSState *ics)
544 {
545 ICSStateClass *isc = ICS_GET_CLASS(ics);
546 int i;
547
548 if (isc->resend) {
549 isc->resend(ics);
550 return;
551 }
552
553 for (i = 0; i < ics->nr_irqs; i++) {
554 /* FIXME: filter by server#? */
555 if (ics->irqs[i].flags & XICS_FLAGS_IRQ_LSI) {
556 ics_resend_lsi(ics, i);
557 } else {
558 ics_resend_msi(ics, i);
559 }
560 }
561 }
562
ics_eoi(ICSState * ics,uint32_t nr)563 static void ics_eoi(ICSState *ics, uint32_t nr)
564 {
565 int srcno = nr - ics->offset;
566 ICSIRQState *irq = ics->irqs + srcno;
567
568 trace_xics_ics_eoi(nr);
569
570 if (ics->irqs[srcno].flags & XICS_FLAGS_IRQ_LSI) {
571 irq->status &= ~XICS_STATUS_SENT;
572 }
573 }
574
ics_reset_irq(ICSIRQState * irq)575 static void ics_reset_irq(ICSIRQState *irq)
576 {
577 irq->priority = 0xff;
578 irq->saved_priority = 0xff;
579 }
580
ics_reset_hold(Object * obj,ResetType type)581 static void ics_reset_hold(Object *obj, ResetType type)
582 {
583 ICSState *ics = ICS(obj);
584 g_autofree uint8_t *flags = g_malloc(ics->nr_irqs);
585 int i;
586
587 for (i = 0; i < ics->nr_irqs; i++) {
588 flags[i] = ics->irqs[i].flags;
589 }
590
591 memset(ics->irqs, 0, sizeof(ICSIRQState) * ics->nr_irqs);
592
593 for (i = 0; i < ics->nr_irqs; i++) {
594 ics_reset_irq(ics->irqs + i);
595 ics->irqs[i].flags = flags[i];
596 }
597
598 if (kvm_irqchip_in_kernel()) {
599 Error *local_err = NULL;
600
601 ics_set_kvm_state(ics, &local_err);
602 if (local_err) {
603 error_report_err(local_err);
604 }
605 }
606 }
607
ics_reset_handler(void * dev)608 static void ics_reset_handler(void *dev)
609 {
610 device_cold_reset(dev);
611 }
612
ics_realize(DeviceState * dev,Error ** errp)613 static void ics_realize(DeviceState *dev, Error **errp)
614 {
615 ICSState *ics = ICS(dev);
616
617 assert(ics->xics);
618
619 if (!ics->nr_irqs) {
620 error_setg(errp, "Number of interrupts needs to be greater 0");
621 return;
622 }
623 ics->irqs = g_new0(ICSIRQState, ics->nr_irqs);
624
625 qemu_register_reset(ics_reset_handler, ics);
626 }
627
ics_instance_init(Object * obj)628 static void ics_instance_init(Object *obj)
629 {
630 ICSState *ics = ICS(obj);
631
632 ics->offset = XICS_IRQ_BASE;
633 }
634
ics_pre_save(void * opaque)635 static int ics_pre_save(void *opaque)
636 {
637 ICSState *ics = opaque;
638
639 if (kvm_irqchip_in_kernel()) {
640 ics_get_kvm_state(ics);
641 }
642
643 return 0;
644 }
645
ics_post_load(void * opaque,int version_id)646 static int ics_post_load(void *opaque, int version_id)
647 {
648 ICSState *ics = opaque;
649
650 if (kvm_irqchip_in_kernel()) {
651 Error *local_err = NULL;
652 int ret;
653
654 ret = ics_set_kvm_state(ics, &local_err);
655 if (ret < 0) {
656 error_report_err(local_err);
657 return ret;
658 }
659 }
660
661 return 0;
662 }
663
664 static const VMStateDescription vmstate_ics_irq = {
665 .name = "ics/irq",
666 .version_id = 2,
667 .minimum_version_id = 1,
668 .fields = (const VMStateField[]) {
669 VMSTATE_UINT32(server, ICSIRQState),
670 VMSTATE_UINT8(priority, ICSIRQState),
671 VMSTATE_UINT8(saved_priority, ICSIRQState),
672 VMSTATE_UINT8(status, ICSIRQState),
673 VMSTATE_UINT8(flags, ICSIRQState),
674 VMSTATE_END_OF_LIST()
675 },
676 };
677
678 static const VMStateDescription vmstate_ics = {
679 .name = "ics",
680 .version_id = 1,
681 .minimum_version_id = 1,
682 .pre_save = ics_pre_save,
683 .post_load = ics_post_load,
684 .fields = (const VMStateField[]) {
685 /* Sanity check */
686 VMSTATE_UINT32_EQUAL(nr_irqs, ICSState, NULL),
687
688 VMSTATE_STRUCT_VARRAY_POINTER_UINT32(irqs, ICSState, nr_irqs,
689 vmstate_ics_irq,
690 ICSIRQState),
691 VMSTATE_END_OF_LIST()
692 },
693 };
694
695 static Property ics_properties[] = {
696 DEFINE_PROP_UINT32("nr-irqs", ICSState, nr_irqs, 0),
697 DEFINE_PROP_LINK(ICS_PROP_XICS, ICSState, xics, TYPE_XICS_FABRIC,
698 XICSFabric *),
699 DEFINE_PROP_END_OF_LIST(),
700 };
701
ics_class_init(ObjectClass * klass,void * data)702 static void ics_class_init(ObjectClass *klass, void *data)
703 {
704 DeviceClass *dc = DEVICE_CLASS(klass);
705 ResettableClass *rc = RESETTABLE_CLASS(klass);
706
707 dc->realize = ics_realize;
708 device_class_set_props(dc, ics_properties);
709 dc->vmsd = &vmstate_ics;
710 /*
711 * Reason: part of XICS interrupt controller, needs to be wired up,
712 * e.g. by spapr_irq_init().
713 */
714 dc->user_creatable = false;
715 rc->phases.hold = ics_reset_hold;
716 }
717
718 static const TypeInfo ics_info = {
719 .name = TYPE_ICS,
720 .parent = TYPE_DEVICE,
721 .instance_size = sizeof(ICSState),
722 .instance_init = ics_instance_init,
723 .class_init = ics_class_init,
724 .class_size = sizeof(ICSStateClass),
725 };
726
727 static const TypeInfo xics_fabric_info = {
728 .name = TYPE_XICS_FABRIC,
729 .parent = TYPE_INTERFACE,
730 .class_size = sizeof(XICSFabricClass),
731 };
732
733 /*
734 * Exported functions
735 */
xics_icp_get(XICSFabric * xi,int server)736 ICPState *xics_icp_get(XICSFabric *xi, int server)
737 {
738 XICSFabricClass *xic = XICS_FABRIC_GET_CLASS(xi);
739
740 return xic->icp_get(xi, server);
741 }
742
ics_set_irq_type(ICSState * ics,int srcno,bool lsi)743 void ics_set_irq_type(ICSState *ics, int srcno, bool lsi)
744 {
745 assert(!(ics->irqs[srcno].flags & XICS_FLAGS_IRQ_MASK));
746
747 ics->irqs[srcno].flags |=
748 lsi ? XICS_FLAGS_IRQ_LSI : XICS_FLAGS_IRQ_MSI;
749
750 if (kvm_irqchip_in_kernel()) {
751 Error *local_err = NULL;
752
753 ics_reset_irq(ics->irqs + srcno);
754 ics_set_kvm_state_one(ics, srcno, &local_err);
755 if (local_err) {
756 error_report_err(local_err);
757 }
758 }
759 }
760
xics_register_types(void)761 static void xics_register_types(void)
762 {
763 type_register_static(&ics_info);
764 type_register_static(&icp_info);
765 type_register_static(&xics_fabric_info);
766 }
767
768 type_init(xics_register_types)
769