1 /*
2 * QEMU PowerPC pSeries Logical Partition (aka sPAPR) hardware System Emulator
3 *
4 * PAPR Virtualized Interrupt System, aka ICS/ICP aka xics
5 *
6 * Copyright (c) 2010,2011 David Gibson, IBM Corporation.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
24 * THE SOFTWARE.
25 *
26 */
27
28 #include "qemu/osdep.h"
29 #include "qapi/error.h"
30 #include "trace.h"
31 #include "qemu/timer.h"
32 #include "hw/ppc/xics.h"
33 #include "hw/qdev-properties.h"
34 #include "qemu/error-report.h"
35 #include "qemu/module.h"
36 #include "qapi/visitor.h"
37 #include "migration/vmstate.h"
38 #include "hw/intc/intc.h"
39 #include "hw/irq.h"
40 #include "system/kvm.h"
41 #include "system/reset.h"
42 #include "target/ppc/cpu.h"
43
icp_pic_print_info(ICPState * icp,GString * buf)44 void icp_pic_print_info(ICPState *icp, GString *buf)
45 {
46 int cpu_index;
47
48 /* Skip partially initialized vCPUs. This can happen on sPAPR when vCPUs
49 * are hot plugged or unplugged.
50 */
51 if (!icp) {
52 return;
53 }
54
55 cpu_index = icp->cs ? icp->cs->cpu_index : -1;
56
57 if (!icp->output) {
58 return;
59 }
60
61 if (kvm_irqchip_in_kernel()) {
62 icp_synchronize_state(icp);
63 }
64
65 g_string_append_printf(buf, "CPU %d XIRR=%08x (%p) PP=%02x MFRR=%02x\n",
66 cpu_index, icp->xirr, icp->xirr_owner,
67 icp->pending_priority, icp->mfrr);
68 }
69
ics_pic_print_info(ICSState * ics,GString * buf)70 void ics_pic_print_info(ICSState *ics, GString *buf)
71 {
72 uint32_t i;
73
74 g_string_append_printf(buf, "ICS %4x..%4x %p\n",
75 ics->offset, ics->offset + ics->nr_irqs - 1, ics);
76
77 if (!ics->irqs) {
78 return;
79 }
80
81 if (kvm_irqchip_in_kernel()) {
82 ics_synchronize_state(ics);
83 }
84
85 for (i = 0; i < ics->nr_irqs; i++) {
86 ICSIRQState *irq = ics->irqs + i;
87
88 if (!(irq->flags & XICS_FLAGS_IRQ_MASK)) {
89 continue;
90 }
91 g_string_append_printf(buf, " %4x %s %02x %02x\n",
92 ics->offset + i,
93 (irq->flags & XICS_FLAGS_IRQ_LSI) ?
94 "LSI" : "MSI",
95 irq->priority, irq->status);
96 }
97 }
98
99 /*
100 * ICP: Presentation layer
101 */
102
103 #define XISR_MASK 0x00ffffff
104 #define CPPR_MASK 0xff000000
105
106 #define XISR(icp) (((icp)->xirr) & XISR_MASK)
107 #define CPPR(icp) (((icp)->xirr) >> 24)
108
109 static void ics_reject(ICSState *ics, uint32_t nr);
110 static void ics_eoi(ICSState *ics, uint32_t nr);
111
icp_check_ipi(ICPState * icp)112 static void icp_check_ipi(ICPState *icp)
113 {
114 if (XISR(icp) && (icp->pending_priority <= icp->mfrr)) {
115 return;
116 }
117
118 trace_xics_icp_check_ipi(icp->cs->cpu_index, icp->mfrr);
119
120 if (XISR(icp) && icp->xirr_owner) {
121 ics_reject(icp->xirr_owner, XISR(icp));
122 }
123
124 icp->xirr = (icp->xirr & ~XISR_MASK) | XICS_IPI;
125 icp->pending_priority = icp->mfrr;
126 icp->xirr_owner = NULL;
127 qemu_irq_raise(icp->output);
128 }
129
icp_resend(ICPState * icp)130 void icp_resend(ICPState *icp)
131 {
132 XICSFabric *xi = icp->xics;
133 XICSFabricClass *xic = XICS_FABRIC_GET_CLASS(xi);
134
135 if (icp->mfrr < CPPR(icp)) {
136 icp_check_ipi(icp);
137 }
138
139 xic->ics_resend(xi);
140 }
141
icp_set_cppr(ICPState * icp,uint8_t cppr)142 void icp_set_cppr(ICPState *icp, uint8_t cppr)
143 {
144 uint8_t old_cppr;
145 uint32_t old_xisr;
146
147 old_cppr = CPPR(icp);
148 icp->xirr = (icp->xirr & ~CPPR_MASK) | (cppr << 24);
149
150 if (cppr < old_cppr) {
151 if (XISR(icp) && (cppr <= icp->pending_priority)) {
152 old_xisr = XISR(icp);
153 icp->xirr &= ~XISR_MASK; /* Clear XISR */
154 icp->pending_priority = 0xff;
155 qemu_irq_lower(icp->output);
156 if (icp->xirr_owner) {
157 ics_reject(icp->xirr_owner, old_xisr);
158 icp->xirr_owner = NULL;
159 }
160 }
161 } else {
162 if (!XISR(icp)) {
163 icp_resend(icp);
164 }
165 }
166 }
167
icp_set_mfrr(ICPState * icp,uint8_t mfrr)168 void icp_set_mfrr(ICPState *icp, uint8_t mfrr)
169 {
170 icp->mfrr = mfrr;
171 if (mfrr < CPPR(icp)) {
172 icp_check_ipi(icp);
173 }
174 }
175
icp_accept(ICPState * icp)176 uint32_t icp_accept(ICPState *icp)
177 {
178 uint32_t xirr = icp->xirr;
179
180 qemu_irq_lower(icp->output);
181 icp->xirr = icp->pending_priority << 24;
182 icp->pending_priority = 0xff;
183 icp->xirr_owner = NULL;
184
185 trace_xics_icp_accept(xirr, icp->xirr);
186
187 return xirr;
188 }
189
icp_ipoll(ICPState * icp,uint32_t * mfrr)190 uint32_t icp_ipoll(ICPState *icp, uint32_t *mfrr)
191 {
192 if (mfrr) {
193 *mfrr = icp->mfrr;
194 }
195 return icp->xirr;
196 }
197
icp_eoi(ICPState * icp,uint32_t xirr)198 void icp_eoi(ICPState *icp, uint32_t xirr)
199 {
200 XICSFabric *xi = icp->xics;
201 XICSFabricClass *xic = XICS_FABRIC_GET_CLASS(xi);
202 ICSState *ics;
203 uint32_t irq;
204
205 /* Send EOI -> ICS */
206 icp->xirr = (icp->xirr & ~CPPR_MASK) | (xirr & CPPR_MASK);
207 trace_xics_icp_eoi(icp->cs->cpu_index, xirr, icp->xirr);
208 irq = xirr & XISR_MASK;
209
210 ics = xic->ics_get(xi, irq);
211 if (ics) {
212 ics_eoi(ics, irq);
213 }
214 if (!XISR(icp)) {
215 icp_resend(icp);
216 }
217 }
218
icp_irq(ICSState * ics,int server,int nr,uint8_t priority)219 void icp_irq(ICSState *ics, int server, int nr, uint8_t priority)
220 {
221 ICPState *icp = xics_icp_get(ics->xics, server);
222
223 trace_xics_icp_irq(server, nr, priority);
224
225 if ((priority >= CPPR(icp))
226 || (XISR(icp) && (icp->pending_priority <= priority))) {
227 ics_reject(ics, nr);
228 } else {
229 if (XISR(icp) && icp->xirr_owner) {
230 ics_reject(icp->xirr_owner, XISR(icp));
231 icp->xirr_owner = NULL;
232 }
233 icp->xirr = (icp->xirr & ~XISR_MASK) | (nr & XISR_MASK);
234 icp->xirr_owner = ics;
235 icp->pending_priority = priority;
236 trace_xics_icp_raise(icp->xirr, icp->pending_priority);
237 qemu_irq_raise(icp->output);
238 }
239 }
240
icp_pre_save(void * opaque)241 static int icp_pre_save(void *opaque)
242 {
243 ICPState *icp = opaque;
244
245 if (kvm_irqchip_in_kernel()) {
246 icp_get_kvm_state(icp);
247 }
248
249 return 0;
250 }
251
icp_post_load(void * opaque,int version_id)252 static int icp_post_load(void *opaque, int version_id)
253 {
254 ICPState *icp = opaque;
255
256 if (kvm_irqchip_in_kernel()) {
257 Error *local_err = NULL;
258 int ret;
259
260 ret = icp_set_kvm_state(icp, &local_err);
261 if (ret < 0) {
262 error_report_err(local_err);
263 return ret;
264 }
265 }
266
267 return 0;
268 }
269
270 static const VMStateDescription vmstate_icp_server = {
271 .name = "icp/server",
272 .version_id = 1,
273 .minimum_version_id = 1,
274 .pre_save = icp_pre_save,
275 .post_load = icp_post_load,
276 .fields = (const VMStateField[]) {
277 /* Sanity check */
278 VMSTATE_UINT32(xirr, ICPState),
279 VMSTATE_UINT8(pending_priority, ICPState),
280 VMSTATE_UINT8(mfrr, ICPState),
281 VMSTATE_END_OF_LIST()
282 },
283 };
284
icp_reset(ICPState * icp)285 void icp_reset(ICPState *icp)
286 {
287 icp->xirr = 0;
288 icp->pending_priority = 0xff;
289 icp->mfrr = 0xff;
290
291 if (kvm_irqchip_in_kernel()) {
292 Error *local_err = NULL;
293
294 icp_set_kvm_state(icp, &local_err);
295 if (local_err) {
296 error_report_err(local_err);
297 }
298 }
299 }
300
icp_realize(DeviceState * dev,Error ** errp)301 static void icp_realize(DeviceState *dev, Error **errp)
302 {
303 ICPState *icp = ICP(dev);
304 PowerPCCPU *cpu;
305 CPUPPCState *env;
306 Error *err = NULL;
307
308 assert(icp->xics);
309 assert(icp->cs);
310
311 cpu = POWERPC_CPU(icp->cs);
312 env = &cpu->env;
313 switch (PPC_INPUT(env)) {
314 case PPC_FLAGS_INPUT_POWER7:
315 icp->output = qdev_get_gpio_in(DEVICE(cpu), POWER7_INPUT_INT);
316 break;
317 case PPC_FLAGS_INPUT_POWER9: /* For SPAPR xics emulation */
318 icp->output = qdev_get_gpio_in(DEVICE(cpu), POWER9_INPUT_INT);
319 break;
320
321 case PPC_FLAGS_INPUT_970:
322 icp->output = qdev_get_gpio_in(DEVICE(cpu), PPC970_INPUT_INT);
323 break;
324
325 default:
326 error_setg(errp, "XICS interrupt controller does not support this CPU bus model");
327 return;
328 }
329
330 /* Connect the presenter to the VCPU (required for CPU hotplug) */
331 if (kvm_irqchip_in_kernel()) {
332 icp_kvm_realize(dev, &err);
333 if (err) {
334 error_propagate(errp, err);
335 return;
336 }
337 }
338
339 vmstate_register(NULL, icp->cs->cpu_index, &vmstate_icp_server, icp);
340 }
341
icp_unrealize(DeviceState * dev)342 static void icp_unrealize(DeviceState *dev)
343 {
344 ICPState *icp = ICP(dev);
345
346 vmstate_unregister(NULL, &vmstate_icp_server, icp);
347 }
348
349 static const Property icp_properties[] = {
350 DEFINE_PROP_LINK(ICP_PROP_XICS, ICPState, xics, TYPE_XICS_FABRIC,
351 XICSFabric *),
352 DEFINE_PROP_LINK(ICP_PROP_CPU, ICPState, cs, TYPE_CPU, CPUState *),
353 };
354
icp_class_init(ObjectClass * klass,const void * data)355 static void icp_class_init(ObjectClass *klass, const void *data)
356 {
357 DeviceClass *dc = DEVICE_CLASS(klass);
358
359 dc->realize = icp_realize;
360 dc->unrealize = icp_unrealize;
361 device_class_set_props(dc, icp_properties);
362 /*
363 * Reason: part of XICS interrupt controller, needs to be wired up
364 * by icp_create().
365 */
366 dc->user_creatable = false;
367 }
368
369 static const TypeInfo icp_info = {
370 .name = TYPE_ICP,
371 .parent = TYPE_DEVICE,
372 .instance_size = sizeof(ICPState),
373 .class_init = icp_class_init,
374 .class_size = sizeof(ICPStateClass),
375 };
376
icp_create(Object * cpu,const char * type,XICSFabric * xi,Error ** errp)377 Object *icp_create(Object *cpu, const char *type, XICSFabric *xi, Error **errp)
378 {
379 Object *obj;
380
381 obj = object_new(type);
382 object_property_add_child(cpu, type, obj);
383 object_unref(obj);
384 object_property_set_link(obj, ICP_PROP_XICS, OBJECT(xi), &error_abort);
385 object_property_set_link(obj, ICP_PROP_CPU, cpu, &error_abort);
386 if (!qdev_realize(DEVICE(obj), NULL, errp)) {
387 object_unparent(obj);
388 obj = NULL;
389 }
390
391 return obj;
392 }
393
icp_destroy(ICPState * icp)394 void icp_destroy(ICPState *icp)
395 {
396 Object *obj = OBJECT(icp);
397
398 object_unparent(obj);
399 }
400
401 /*
402 * ICS: Source layer
403 */
ics_resend_msi(ICSState * ics,int srcno)404 static void ics_resend_msi(ICSState *ics, int srcno)
405 {
406 ICSIRQState *irq = ics->irqs + srcno;
407
408 /* FIXME: filter by server#? */
409 if (irq->status & XICS_STATUS_REJECTED) {
410 irq->status &= ~XICS_STATUS_REJECTED;
411 if (irq->priority != 0xff) {
412 icp_irq(ics, irq->server, srcno + ics->offset, irq->priority);
413 }
414 }
415 }
416
ics_resend_lsi(ICSState * ics,int srcno)417 static void ics_resend_lsi(ICSState *ics, int srcno)
418 {
419 ICSIRQState *irq = ics->irqs + srcno;
420
421 if ((irq->priority != 0xff)
422 && (irq->status & XICS_STATUS_ASSERTED)
423 && !(irq->status & XICS_STATUS_SENT)) {
424 irq->status |= XICS_STATUS_SENT;
425 icp_irq(ics, irq->server, srcno + ics->offset, irq->priority);
426 }
427 }
428
ics_set_irq_msi(ICSState * ics,int srcno,int val)429 static void ics_set_irq_msi(ICSState *ics, int srcno, int val)
430 {
431 ICSIRQState *irq = ics->irqs + srcno;
432
433 trace_xics_ics_set_irq_msi(srcno, srcno + ics->offset);
434
435 if (val) {
436 if (irq->priority == 0xff) {
437 irq->status |= XICS_STATUS_MASKED_PENDING;
438 trace_xics_masked_pending();
439 } else {
440 icp_irq(ics, irq->server, srcno + ics->offset, irq->priority);
441 }
442 }
443 }
444
ics_set_irq_lsi(ICSState * ics,int srcno,int val)445 static void ics_set_irq_lsi(ICSState *ics, int srcno, int val)
446 {
447 ICSIRQState *irq = ics->irqs + srcno;
448
449 trace_xics_ics_set_irq_lsi(srcno, srcno + ics->offset);
450 if (val) {
451 irq->status |= XICS_STATUS_ASSERTED;
452 } else {
453 irq->status &= ~XICS_STATUS_ASSERTED;
454 }
455 ics_resend_lsi(ics, srcno);
456 }
457
ics_set_irq(void * opaque,int srcno,int val)458 void ics_set_irq(void *opaque, int srcno, int val)
459 {
460 ICSState *ics = (ICSState *)opaque;
461
462 if (kvm_irqchip_in_kernel()) {
463 ics_kvm_set_irq(ics, srcno, val);
464 return;
465 }
466
467 if (ics->irqs[srcno].flags & XICS_FLAGS_IRQ_LSI) {
468 ics_set_irq_lsi(ics, srcno, val);
469 } else {
470 ics_set_irq_msi(ics, srcno, val);
471 }
472 }
473
ics_write_xive_msi(ICSState * ics,int srcno)474 static void ics_write_xive_msi(ICSState *ics, int srcno)
475 {
476 ICSIRQState *irq = ics->irqs + srcno;
477
478 if (!(irq->status & XICS_STATUS_MASKED_PENDING)
479 || (irq->priority == 0xff)) {
480 return;
481 }
482
483 irq->status &= ~XICS_STATUS_MASKED_PENDING;
484 icp_irq(ics, irq->server, srcno + ics->offset, irq->priority);
485 }
486
ics_write_xive_lsi(ICSState * ics,int srcno)487 static void ics_write_xive_lsi(ICSState *ics, int srcno)
488 {
489 ics_resend_lsi(ics, srcno);
490 }
491
ics_write_xive(ICSState * ics,int srcno,int server,uint8_t priority,uint8_t saved_priority)492 void ics_write_xive(ICSState *ics, int srcno, int server,
493 uint8_t priority, uint8_t saved_priority)
494 {
495 ICSIRQState *irq = ics->irqs + srcno;
496
497 irq->server = server;
498 irq->priority = priority;
499 irq->saved_priority = saved_priority;
500
501 trace_xics_ics_write_xive(ics->offset + srcno, srcno, server, priority);
502
503 if (ics->irqs[srcno].flags & XICS_FLAGS_IRQ_LSI) {
504 ics_write_xive_lsi(ics, srcno);
505 } else {
506 ics_write_xive_msi(ics, srcno);
507 }
508 }
509
ics_reject(ICSState * ics,uint32_t nr)510 static void ics_reject(ICSState *ics, uint32_t nr)
511 {
512 ICSStateClass *isc = ICS_GET_CLASS(ics);
513 ICSIRQState *irq = ics->irqs + nr - ics->offset;
514
515 if (isc->reject) {
516 isc->reject(ics, nr);
517 return;
518 }
519
520 trace_xics_ics_reject(nr, nr - ics->offset);
521 if (irq->flags & XICS_FLAGS_IRQ_MSI) {
522 irq->status |= XICS_STATUS_REJECTED;
523 } else if (irq->flags & XICS_FLAGS_IRQ_LSI) {
524 irq->status &= ~XICS_STATUS_SENT;
525 }
526 }
527
ics_resend(ICSState * ics)528 void ics_resend(ICSState *ics)
529 {
530 ICSStateClass *isc = ICS_GET_CLASS(ics);
531 int i;
532
533 if (isc->resend) {
534 isc->resend(ics);
535 return;
536 }
537
538 for (i = 0; i < ics->nr_irqs; i++) {
539 /* FIXME: filter by server#? */
540 if (ics->irqs[i].flags & XICS_FLAGS_IRQ_LSI) {
541 ics_resend_lsi(ics, i);
542 } else {
543 ics_resend_msi(ics, i);
544 }
545 }
546 }
547
ics_eoi(ICSState * ics,uint32_t nr)548 static void ics_eoi(ICSState *ics, uint32_t nr)
549 {
550 int srcno = nr - ics->offset;
551 ICSIRQState *irq = ics->irqs + srcno;
552
553 trace_xics_ics_eoi(nr);
554
555 if (ics->irqs[srcno].flags & XICS_FLAGS_IRQ_LSI) {
556 irq->status &= ~XICS_STATUS_SENT;
557 }
558 }
559
ics_reset_irq(ICSIRQState * irq)560 static void ics_reset_irq(ICSIRQState *irq)
561 {
562 irq->priority = 0xff;
563 irq->saved_priority = 0xff;
564 }
565
ics_reset_hold(Object * obj,ResetType type)566 static void ics_reset_hold(Object *obj, ResetType type)
567 {
568 ICSState *ics = ICS(obj);
569 g_autofree uint8_t *flags = g_malloc(ics->nr_irqs);
570 int i;
571
572 for (i = 0; i < ics->nr_irqs; i++) {
573 flags[i] = ics->irqs[i].flags;
574 }
575
576 memset(ics->irqs, 0, sizeof(ICSIRQState) * ics->nr_irqs);
577
578 for (i = 0; i < ics->nr_irqs; i++) {
579 ics_reset_irq(ics->irqs + i);
580 ics->irqs[i].flags = flags[i];
581 }
582
583 if (kvm_irqchip_in_kernel()) {
584 Error *local_err = NULL;
585
586 ics_set_kvm_state(ics, &local_err);
587 if (local_err) {
588 error_report_err(local_err);
589 }
590 }
591 }
592
ics_reset_handler(void * dev)593 static void ics_reset_handler(void *dev)
594 {
595 device_cold_reset(dev);
596 }
597
ics_realize(DeviceState * dev,Error ** errp)598 static void ics_realize(DeviceState *dev, Error **errp)
599 {
600 ICSState *ics = ICS(dev);
601
602 assert(ics->xics);
603
604 if (!ics->nr_irqs) {
605 error_setg(errp, "Number of interrupts needs to be greater 0");
606 return;
607 }
608 ics->irqs = g_new0(ICSIRQState, ics->nr_irqs);
609
610 qemu_register_reset(ics_reset_handler, ics);
611 }
612
ics_instance_init(Object * obj)613 static void ics_instance_init(Object *obj)
614 {
615 ICSState *ics = ICS(obj);
616
617 ics->offset = XICS_IRQ_BASE;
618 }
619
ics_pre_save(void * opaque)620 static int ics_pre_save(void *opaque)
621 {
622 ICSState *ics = opaque;
623
624 if (kvm_irqchip_in_kernel()) {
625 ics_get_kvm_state(ics);
626 }
627
628 return 0;
629 }
630
ics_post_load(void * opaque,int version_id)631 static int ics_post_load(void *opaque, int version_id)
632 {
633 ICSState *ics = opaque;
634
635 if (kvm_irqchip_in_kernel()) {
636 Error *local_err = NULL;
637 int ret;
638
639 ret = ics_set_kvm_state(ics, &local_err);
640 if (ret < 0) {
641 error_report_err(local_err);
642 return ret;
643 }
644 }
645
646 return 0;
647 }
648
649 static const VMStateDescription vmstate_ics_irq = {
650 .name = "ics/irq",
651 .version_id = 2,
652 .minimum_version_id = 1,
653 .fields = (const VMStateField[]) {
654 VMSTATE_UINT32(server, ICSIRQState),
655 VMSTATE_UINT8(priority, ICSIRQState),
656 VMSTATE_UINT8(saved_priority, ICSIRQState),
657 VMSTATE_UINT8(status, ICSIRQState),
658 VMSTATE_UINT8(flags, ICSIRQState),
659 VMSTATE_END_OF_LIST()
660 },
661 };
662
663 static const VMStateDescription vmstate_ics = {
664 .name = "ics",
665 .version_id = 1,
666 .minimum_version_id = 1,
667 .pre_save = ics_pre_save,
668 .post_load = ics_post_load,
669 .fields = (const VMStateField[]) {
670 /* Sanity check */
671 VMSTATE_UINT32_EQUAL(nr_irqs, ICSState, NULL),
672
673 VMSTATE_STRUCT_VARRAY_POINTER_UINT32(irqs, ICSState, nr_irqs,
674 vmstate_ics_irq,
675 ICSIRQState),
676 VMSTATE_END_OF_LIST()
677 },
678 };
679
680 static const Property ics_properties[] = {
681 DEFINE_PROP_UINT32("nr-irqs", ICSState, nr_irqs, 0),
682 DEFINE_PROP_LINK(ICS_PROP_XICS, ICSState, xics, TYPE_XICS_FABRIC,
683 XICSFabric *),
684 };
685
ics_class_init(ObjectClass * klass,const void * data)686 static void ics_class_init(ObjectClass *klass, const void *data)
687 {
688 DeviceClass *dc = DEVICE_CLASS(klass);
689 ResettableClass *rc = RESETTABLE_CLASS(klass);
690
691 dc->realize = ics_realize;
692 device_class_set_props(dc, ics_properties);
693 dc->vmsd = &vmstate_ics;
694 /*
695 * Reason: part of XICS interrupt controller, needs to be wired up,
696 * e.g. by spapr_irq_init().
697 */
698 dc->user_creatable = false;
699 rc->phases.hold = ics_reset_hold;
700 }
701
702 static const TypeInfo ics_info = {
703 .name = TYPE_ICS,
704 .parent = TYPE_DEVICE,
705 .instance_size = sizeof(ICSState),
706 .instance_init = ics_instance_init,
707 .class_init = ics_class_init,
708 .class_size = sizeof(ICSStateClass),
709 };
710
711 static const TypeInfo xics_fabric_info = {
712 .name = TYPE_XICS_FABRIC,
713 .parent = TYPE_INTERFACE,
714 .class_size = sizeof(XICSFabricClass),
715 };
716
717 /*
718 * Exported functions
719 */
xics_icp_get(XICSFabric * xi,int server)720 ICPState *xics_icp_get(XICSFabric *xi, int server)
721 {
722 XICSFabricClass *xic = XICS_FABRIC_GET_CLASS(xi);
723
724 return xic->icp_get(xi, server);
725 }
726
ics_set_irq_type(ICSState * ics,int srcno,bool lsi)727 void ics_set_irq_type(ICSState *ics, int srcno, bool lsi)
728 {
729 assert(!(ics->irqs[srcno].flags & XICS_FLAGS_IRQ_MASK));
730
731 ics->irqs[srcno].flags |=
732 lsi ? XICS_FLAGS_IRQ_LSI : XICS_FLAGS_IRQ_MSI;
733
734 if (kvm_irqchip_in_kernel()) {
735 Error *local_err = NULL;
736
737 ics_reset_irq(ics->irqs + srcno);
738 ics_set_kvm_state_one(ics, srcno, &local_err);
739 if (local_err) {
740 error_report_err(local_err);
741 }
742 }
743 }
744
xics_register_types(void)745 static void xics_register_types(void)
746 {
747 type_register_static(&ics_info);
748 type_register_static(&icp_info);
749 type_register_static(&xics_fabric_info);
750 }
751
752 type_init(xics_register_types)
753