xref: /openbmc/qemu/hw/intc/loongarch_extioi.c (revision 2e1cacfb)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3  * Loongson 3A5000 ext interrupt controller emulation
4  *
5  * Copyright (C) 2021 Loongson Technology Corporation Limited
6  */
7 
8 #include "qemu/osdep.h"
9 #include "qemu/module.h"
10 #include "qemu/log.h"
11 #include "qapi/error.h"
12 #include "hw/irq.h"
13 #include "hw/sysbus.h"
14 #include "hw/loongarch/virt.h"
15 #include "hw/qdev-properties.h"
16 #include "exec/address-spaces.h"
17 #include "hw/intc/loongarch_extioi.h"
18 #include "migration/vmstate.h"
19 #include "trace.h"
20 
21 
22 static void extioi_update_irq(LoongArchExtIOI *s, int irq, int level)
23 {
24     int ipnum, cpu, found, irq_index, irq_mask;
25 
26     ipnum = s->sw_ipmap[irq / 32];
27     cpu = s->sw_coremap[irq];
28     irq_index = irq / 32;
29     irq_mask = 1 << (irq & 0x1f);
30 
31     if (level) {
32         /* if not enable return false */
33         if (((s->enable[irq_index]) & irq_mask) == 0) {
34             return;
35         }
36         s->cpu[cpu].coreisr[irq_index] |= irq_mask;
37         found = find_first_bit(s->cpu[cpu].sw_isr[ipnum], EXTIOI_IRQS);
38         set_bit(irq, s->cpu[cpu].sw_isr[ipnum]);
39         if (found < EXTIOI_IRQS) {
40             /* other irq is handling, need not update parent irq level */
41             return;
42         }
43     } else {
44         s->cpu[cpu].coreisr[irq_index] &= ~irq_mask;
45         clear_bit(irq, s->cpu[cpu].sw_isr[ipnum]);
46         found = find_first_bit(s->cpu[cpu].sw_isr[ipnum], EXTIOI_IRQS);
47         if (found < EXTIOI_IRQS) {
48             /* other irq is handling, need not update parent irq level */
49             return;
50         }
51     }
52     qemu_set_irq(s->cpu[cpu].parent_irq[ipnum], level);
53 }
54 
55 static void extioi_setirq(void *opaque, int irq, int level)
56 {
57     LoongArchExtIOI *s = LOONGARCH_EXTIOI(opaque);
58     trace_loongarch_extioi_setirq(irq, level);
59     if (level) {
60         /*
61          * s->isr should be used in vmstate structure,
62          * but it not support 'unsigned long',
63          * so we have to switch it.
64          */
65         set_bit(irq, (unsigned long *)s->isr);
66     } else {
67         clear_bit(irq, (unsigned long *)s->isr);
68     }
69     extioi_update_irq(s, irq, level);
70 }
71 
72 static MemTxResult extioi_readw(void *opaque, hwaddr addr, uint64_t *data,
73                                 unsigned size, MemTxAttrs attrs)
74 {
75     LoongArchExtIOI *s = LOONGARCH_EXTIOI(opaque);
76     unsigned long offset = addr & 0xffff;
77     uint32_t index, cpu;
78 
79     switch (offset) {
80     case EXTIOI_NODETYPE_START ... EXTIOI_NODETYPE_END - 1:
81         index = (offset - EXTIOI_NODETYPE_START) >> 2;
82         *data = s->nodetype[index];
83         break;
84     case EXTIOI_IPMAP_START ... EXTIOI_IPMAP_END - 1:
85         index = (offset - EXTIOI_IPMAP_START) >> 2;
86         *data = s->ipmap[index];
87         break;
88     case EXTIOI_ENABLE_START ... EXTIOI_ENABLE_END - 1:
89         index = (offset - EXTIOI_ENABLE_START) >> 2;
90         *data = s->enable[index];
91         break;
92     case EXTIOI_BOUNCE_START ... EXTIOI_BOUNCE_END - 1:
93         index = (offset - EXTIOI_BOUNCE_START) >> 2;
94         *data = s->bounce[index];
95         break;
96     case EXTIOI_COREISR_START ... EXTIOI_COREISR_END - 1:
97         index = (offset - EXTIOI_COREISR_START) >> 2;
98         /* using attrs to get current cpu index */
99         cpu = attrs.requester_id;
100         *data = s->cpu[cpu].coreisr[index];
101         break;
102     case EXTIOI_COREMAP_START ... EXTIOI_COREMAP_END - 1:
103         index = (offset - EXTIOI_COREMAP_START) >> 2;
104         *data = s->coremap[index];
105         break;
106     default:
107         break;
108     }
109 
110     trace_loongarch_extioi_readw(addr, *data);
111     return MEMTX_OK;
112 }
113 
114 static inline void extioi_enable_irq(LoongArchExtIOI *s, int index,\
115                                      uint32_t mask, int level)
116 {
117     uint32_t val;
118     int irq;
119 
120     val = mask & s->isr[index];
121     irq = ctz32(val);
122     while (irq != 32) {
123         /*
124          * enable bit change from 0 to 1,
125          * need to update irq by pending bits
126          */
127         extioi_update_irq(s, irq + index * 32, level);
128         val &= ~(1 << irq);
129         irq = ctz32(val);
130     }
131 }
132 
133 static inline void extioi_update_sw_coremap(LoongArchExtIOI *s, int irq,
134                                             uint64_t val, bool notify)
135 {
136     int i, cpu;
137 
138     /*
139      * loongarch only support little endian,
140      * so we paresd the value with little endian.
141      */
142     val = cpu_to_le64(val);
143 
144     for (i = 0; i < 4; i++) {
145         cpu = val & 0xff;
146         val = val >> 8;
147 
148         if (!(s->status & BIT(EXTIOI_ENABLE_CPU_ENCODE))) {
149             cpu = ctz32(cpu);
150             cpu = (cpu >= 4) ? 0 : cpu;
151         }
152 
153         if (s->sw_coremap[irq + i] == cpu) {
154             continue;
155         }
156 
157         if (notify && test_bit(irq + i, (unsigned long *)s->isr)) {
158             /*
159              * lower irq at old cpu and raise irq at new cpu
160              */
161             extioi_update_irq(s, irq + i, 0);
162             s->sw_coremap[irq + i] = cpu;
163             extioi_update_irq(s, irq + i, 1);
164         } else {
165             s->sw_coremap[irq + i] = cpu;
166         }
167     }
168 }
169 
170 static inline void extioi_update_sw_ipmap(LoongArchExtIOI *s, int index,
171                                           uint64_t val)
172 {
173     int i;
174     uint8_t ipnum;
175 
176     /*
177      * loongarch only support little endian,
178      * so we paresd the value with little endian.
179      */
180     val = cpu_to_le64(val);
181     for (i = 0; i < 4; i++) {
182         ipnum = val & 0xff;
183         ipnum = ctz32(ipnum);
184         ipnum = (ipnum >= 4) ? 0 : ipnum;
185         s->sw_ipmap[index * 4 + i] = ipnum;
186         val = val >> 8;
187     }
188 }
189 
190 static MemTxResult extioi_writew(void *opaque, hwaddr addr,
191                           uint64_t val, unsigned size,
192                           MemTxAttrs attrs)
193 {
194     LoongArchExtIOI *s = LOONGARCH_EXTIOI(opaque);
195     int cpu, index, old_data, irq;
196     uint32_t offset;
197 
198     trace_loongarch_extioi_writew(addr, val);
199     offset = addr & 0xffff;
200 
201     switch (offset) {
202     case EXTIOI_NODETYPE_START ... EXTIOI_NODETYPE_END - 1:
203         index = (offset - EXTIOI_NODETYPE_START) >> 2;
204         s->nodetype[index] = val;
205         break;
206     case EXTIOI_IPMAP_START ... EXTIOI_IPMAP_END - 1:
207         /*
208          * ipmap cannot be set at runtime, can be set only at the beginning
209          * of intr driver, need not update upper irq level
210          */
211         index = (offset - EXTIOI_IPMAP_START) >> 2;
212         s->ipmap[index] = val;
213         extioi_update_sw_ipmap(s, index, val);
214         break;
215     case EXTIOI_ENABLE_START ... EXTIOI_ENABLE_END - 1:
216         index = (offset - EXTIOI_ENABLE_START) >> 2;
217         old_data = s->enable[index];
218         s->enable[index] = val;
219 
220         /* unmask irq */
221         val = s->enable[index] & ~old_data;
222         extioi_enable_irq(s, index, val, 1);
223 
224         /* mask irq */
225         val = ~s->enable[index] & old_data;
226         extioi_enable_irq(s, index, val, 0);
227         break;
228     case EXTIOI_BOUNCE_START ... EXTIOI_BOUNCE_END - 1:
229         /* do not emulate hw bounced irq routing */
230         index = (offset - EXTIOI_BOUNCE_START) >> 2;
231         s->bounce[index] = val;
232         break;
233     case EXTIOI_COREISR_START ... EXTIOI_COREISR_END - 1:
234         index = (offset - EXTIOI_COREISR_START) >> 2;
235         /* using attrs to get current cpu index */
236         cpu = attrs.requester_id;
237         old_data = s->cpu[cpu].coreisr[index];
238         s->cpu[cpu].coreisr[index] = old_data & ~val;
239         /* write 1 to clear interrupt */
240         old_data &= val;
241         irq = ctz32(old_data);
242         while (irq != 32) {
243             extioi_update_irq(s, irq + index * 32, 0);
244             old_data &= ~(1 << irq);
245             irq = ctz32(old_data);
246         }
247         break;
248     case EXTIOI_COREMAP_START ... EXTIOI_COREMAP_END - 1:
249         irq = offset - EXTIOI_COREMAP_START;
250         index = irq / 4;
251         s->coremap[index] = val;
252 
253         extioi_update_sw_coremap(s, irq, val, true);
254         break;
255     default:
256         break;
257     }
258     return MEMTX_OK;
259 }
260 
261 static const MemoryRegionOps extioi_ops = {
262     .read_with_attrs = extioi_readw,
263     .write_with_attrs = extioi_writew,
264     .impl.min_access_size = 4,
265     .impl.max_access_size = 4,
266     .valid.min_access_size = 4,
267     .valid.max_access_size = 8,
268     .endianness = DEVICE_LITTLE_ENDIAN,
269 };
270 
271 static MemTxResult extioi_virt_readw(void *opaque, hwaddr addr, uint64_t *data,
272                                      unsigned size, MemTxAttrs attrs)
273 {
274     LoongArchExtIOI *s = LOONGARCH_EXTIOI(opaque);
275 
276     switch (addr) {
277     case EXTIOI_VIRT_FEATURES:
278         *data = s->features;
279         break;
280     case EXTIOI_VIRT_CONFIG:
281         *data = s->status;
282         break;
283     default:
284         g_assert_not_reached();
285     }
286 
287     return MEMTX_OK;
288 }
289 
290 static MemTxResult extioi_virt_writew(void *opaque, hwaddr addr,
291                           uint64_t val, unsigned size,
292                           MemTxAttrs attrs)
293 {
294     LoongArchExtIOI *s = LOONGARCH_EXTIOI(opaque);
295 
296     switch (addr) {
297     case EXTIOI_VIRT_FEATURES:
298         return MEMTX_ACCESS_ERROR;
299 
300     case EXTIOI_VIRT_CONFIG:
301         /*
302          * extioi features can only be set at disabled status
303          */
304         if ((s->status & BIT(EXTIOI_ENABLE)) && val) {
305             return MEMTX_ACCESS_ERROR;
306         }
307 
308         s->status = val & s->features;
309         break;
310     default:
311         g_assert_not_reached();
312     }
313     return MEMTX_OK;
314 }
315 
316 static const MemoryRegionOps extioi_virt_ops = {
317     .read_with_attrs = extioi_virt_readw,
318     .write_with_attrs = extioi_virt_writew,
319     .impl.min_access_size = 4,
320     .impl.max_access_size = 4,
321     .valid.min_access_size = 4,
322     .valid.max_access_size = 8,
323     .endianness = DEVICE_LITTLE_ENDIAN,
324 };
325 
326 static void loongarch_extioi_realize(DeviceState *dev, Error **errp)
327 {
328     LoongArchExtIOI *s = LOONGARCH_EXTIOI(dev);
329     SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
330     int i, pin;
331 
332     if (s->num_cpu == 0) {
333         error_setg(errp, "num-cpu must be at least 1");
334         return;
335     }
336 
337     for (i = 0; i < EXTIOI_IRQS; i++) {
338         sysbus_init_irq(sbd, &s->irq[i]);
339     }
340 
341     qdev_init_gpio_in(dev, extioi_setirq, EXTIOI_IRQS);
342     memory_region_init_io(&s->extioi_system_mem, OBJECT(s), &extioi_ops,
343                           s, "extioi_system_mem", 0x900);
344     sysbus_init_mmio(sbd, &s->extioi_system_mem);
345 
346     if (s->features & BIT(EXTIOI_HAS_VIRT_EXTENSION)) {
347         memory_region_init_io(&s->virt_extend, OBJECT(s), &extioi_virt_ops,
348                               s, "extioi_virt", EXTIOI_VIRT_SIZE);
349         sysbus_init_mmio(sbd, &s->virt_extend);
350         s->features |= EXTIOI_VIRT_HAS_FEATURES;
351     } else {
352         s->status |= BIT(EXTIOI_ENABLE);
353     }
354 
355     s->cpu = g_new0(ExtIOICore, s->num_cpu);
356     if (s->cpu == NULL) {
357         error_setg(errp, "Memory allocation for ExtIOICore faile");
358         return;
359     }
360 
361     for (i = 0; i < s->num_cpu; i++) {
362         for (pin = 0; pin < LS3A_INTC_IP; pin++) {
363             qdev_init_gpio_out(dev, &s->cpu[i].parent_irq[pin], 1);
364         }
365     }
366 }
367 
368 static void loongarch_extioi_finalize(Object *obj)
369 {
370     LoongArchExtIOI *s = LOONGARCH_EXTIOI(obj);
371 
372     g_free(s->cpu);
373 }
374 
375 static void loongarch_extioi_reset(DeviceState *d)
376 {
377     LoongArchExtIOI *s = LOONGARCH_EXTIOI(d);
378 
379     s->status = 0;
380 }
381 
382 static int vmstate_extioi_post_load(void *opaque, int version_id)
383 {
384     LoongArchExtIOI *s = LOONGARCH_EXTIOI(opaque);
385     int i, start_irq;
386 
387     for (i = 0; i < (EXTIOI_IRQS / 4); i++) {
388         start_irq = i * 4;
389         extioi_update_sw_coremap(s, start_irq, s->coremap[i], false);
390     }
391 
392     for (i = 0; i < (EXTIOI_IRQS_IPMAP_SIZE / 4); i++) {
393         extioi_update_sw_ipmap(s, i, s->ipmap[i]);
394     }
395 
396     return 0;
397 }
398 
399 static const VMStateDescription vmstate_extioi_core = {
400     .name = "extioi-core",
401     .version_id = 1,
402     .minimum_version_id = 1,
403     .fields = (const VMStateField[]) {
404         VMSTATE_UINT32_ARRAY(coreisr, ExtIOICore, EXTIOI_IRQS_GROUP_COUNT),
405         VMSTATE_END_OF_LIST()
406     }
407 };
408 
409 static const VMStateDescription vmstate_loongarch_extioi = {
410     .name = TYPE_LOONGARCH_EXTIOI,
411     .version_id = 3,
412     .minimum_version_id = 3,
413     .post_load = vmstate_extioi_post_load,
414     .fields = (const VMStateField[]) {
415         VMSTATE_UINT32_ARRAY(bounce, LoongArchExtIOI, EXTIOI_IRQS_GROUP_COUNT),
416         VMSTATE_UINT32_ARRAY(nodetype, LoongArchExtIOI,
417                              EXTIOI_IRQS_NODETYPE_COUNT / 2),
418         VMSTATE_UINT32_ARRAY(enable, LoongArchExtIOI, EXTIOI_IRQS / 32),
419         VMSTATE_UINT32_ARRAY(isr, LoongArchExtIOI, EXTIOI_IRQS / 32),
420         VMSTATE_UINT32_ARRAY(ipmap, LoongArchExtIOI, EXTIOI_IRQS_IPMAP_SIZE / 4),
421         VMSTATE_UINT32_ARRAY(coremap, LoongArchExtIOI, EXTIOI_IRQS / 4),
422 
423         VMSTATE_STRUCT_VARRAY_POINTER_UINT32(cpu, LoongArchExtIOI, num_cpu,
424                          vmstate_extioi_core, ExtIOICore),
425         VMSTATE_UINT32(features, LoongArchExtIOI),
426         VMSTATE_UINT32(status, LoongArchExtIOI),
427         VMSTATE_END_OF_LIST()
428     }
429 };
430 
431 static Property extioi_properties[] = {
432     DEFINE_PROP_UINT32("num-cpu", LoongArchExtIOI, num_cpu, 1),
433     DEFINE_PROP_BIT("has-virtualization-extension", LoongArchExtIOI, features,
434                     EXTIOI_HAS_VIRT_EXTENSION, 0),
435     DEFINE_PROP_END_OF_LIST(),
436 };
437 
438 static void loongarch_extioi_class_init(ObjectClass *klass, void *data)
439 {
440     DeviceClass *dc = DEVICE_CLASS(klass);
441 
442     dc->realize = loongarch_extioi_realize;
443     device_class_set_legacy_reset(dc, loongarch_extioi_reset);
444     device_class_set_props(dc, extioi_properties);
445     dc->vmsd = &vmstate_loongarch_extioi;
446 }
447 
448 static const TypeInfo loongarch_extioi_info = {
449     .name          = TYPE_LOONGARCH_EXTIOI,
450     .parent        = TYPE_SYS_BUS_DEVICE,
451     .instance_size = sizeof(struct LoongArchExtIOI),
452     .class_init    = loongarch_extioi_class_init,
453     .instance_finalize = loongarch_extioi_finalize,
454 };
455 
456 static void loongarch_extioi_register_types(void)
457 {
458     type_register_static(&loongarch_extioi_info);
459 }
460 
461 type_init(loongarch_extioi_register_types)
462