xref: /openbmc/qemu/hw/intc/riscv_imsic.c (revision 6fdc5bc1)
1 /*
2  * RISC-V IMSIC (Incoming Message Signaled Interrupt Controller)
3  *
4  * Copyright (c) 2021 Western Digital Corporation or its affiliates.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2 or later, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License along with
16  * this program.  If not, see <http://www.gnu.org/licenses/>.
17  */
18 
19 #include "qemu/osdep.h"
20 #include "qapi/error.h"
21 #include "qemu/log.h"
22 #include "qemu/module.h"
23 #include "qemu/error-report.h"
24 #include "qemu/bswap.h"
25 #include "exec/address-spaces.h"
26 #include "hw/sysbus.h"
27 #include "hw/pci/msi.h"
28 #include "hw/boards.h"
29 #include "hw/qdev-properties.h"
30 #include "hw/intc/riscv_imsic.h"
31 #include "hw/irq.h"
32 #include "target/riscv/cpu.h"
33 #include "target/riscv/cpu_bits.h"
34 #include "sysemu/sysemu.h"
35 #include "sysemu/kvm.h"
36 #include "migration/vmstate.h"
37 
38 #define IMSIC_MMIO_PAGE_LE             0x00
39 #define IMSIC_MMIO_PAGE_BE             0x04
40 
41 #define IMSIC_MIN_ID                   ((IMSIC_EIPx_BITS * 2) - 1)
42 #define IMSIC_MAX_ID                   (IMSIC_TOPEI_IID_MASK)
43 
44 #define IMSIC_EISTATE_PENDING          (1U << 0)
45 #define IMSIC_EISTATE_ENABLED          (1U << 1)
46 #define IMSIC_EISTATE_ENPEND           (IMSIC_EISTATE_ENABLED | \
47                                         IMSIC_EISTATE_PENDING)
48 
49 static uint32_t riscv_imsic_topei(RISCVIMSICState *imsic, uint32_t page)
50 {
51     uint32_t i, max_irq, base;
52 
53     base = page * imsic->num_irqs;
54     max_irq = (imsic->eithreshold[page] &&
55                (imsic->eithreshold[page] <= imsic->num_irqs)) ?
56                imsic->eithreshold[page] : imsic->num_irqs;
57     for (i = 1; i < max_irq; i++) {
58         if ((qatomic_read(&imsic->eistate[base + i]) & IMSIC_EISTATE_ENPEND) ==
59                 IMSIC_EISTATE_ENPEND) {
60             return (i << IMSIC_TOPEI_IID_SHIFT) | i;
61         }
62     }
63 
64     return 0;
65 }
66 
67 static void riscv_imsic_update(RISCVIMSICState *imsic, uint32_t page)
68 {
69     uint32_t base = page * imsic->num_irqs;
70 
71     /*
72      * Lower the interrupt line if necessary, then evaluate the current
73      * IMSIC state.
74      * This sequence ensures that any race between evaluating the eistate and
75      * updating the interrupt line will not result in an incorrectly
76      * deactivated connected CPU IRQ line.
77      * If multiple interrupts are pending, this sequence functions identically
78      * to qemu_irq_pulse.
79      */
80 
81     if (qatomic_fetch_and(&imsic->eistate[base], ~IMSIC_EISTATE_ENPEND)) {
82         qemu_irq_lower(imsic->external_irqs[page]);
83     }
84     if (imsic->eidelivery[page] && riscv_imsic_topei(imsic, page)) {
85         qemu_irq_raise(imsic->external_irqs[page]);
86         qatomic_or(&imsic->eistate[base], IMSIC_EISTATE_ENPEND);
87     }
88 }
89 
90 static int riscv_imsic_eidelivery_rmw(RISCVIMSICState *imsic, uint32_t page,
91                                       target_ulong *val,
92                                       target_ulong new_val,
93                                       target_ulong wr_mask)
94 {
95     target_ulong old_val = imsic->eidelivery[page];
96 
97     if (val) {
98         *val = old_val;
99     }
100 
101     wr_mask &= 0x1;
102     imsic->eidelivery[page] = (old_val & ~wr_mask) | (new_val & wr_mask);
103 
104     riscv_imsic_update(imsic, page);
105     return 0;
106 }
107 
108 static int riscv_imsic_eithreshold_rmw(RISCVIMSICState *imsic, uint32_t page,
109                                       target_ulong *val,
110                                       target_ulong new_val,
111                                       target_ulong wr_mask)
112 {
113     target_ulong old_val = imsic->eithreshold[page];
114 
115     if (val) {
116         *val = old_val;
117     }
118 
119     wr_mask &= IMSIC_MAX_ID;
120     imsic->eithreshold[page] = (old_val & ~wr_mask) | (new_val & wr_mask);
121 
122     riscv_imsic_update(imsic, page);
123     return 0;
124 }
125 
126 static int riscv_imsic_topei_rmw(RISCVIMSICState *imsic, uint32_t page,
127                                  target_ulong *val, target_ulong new_val,
128                                  target_ulong wr_mask)
129 {
130     uint32_t base, topei = riscv_imsic_topei(imsic, page);
131 
132     /* Read pending and enabled interrupt with highest priority */
133     if (val) {
134         *val = topei;
135     }
136 
137     /* Writes ignore value and clear top pending interrupt */
138     if (topei && wr_mask) {
139         topei >>= IMSIC_TOPEI_IID_SHIFT;
140         base = page * imsic->num_irqs;
141         if (topei) {
142             qatomic_and(&imsic->eistate[base + topei], ~IMSIC_EISTATE_PENDING);
143         }
144     }
145 
146     riscv_imsic_update(imsic, page);
147     return 0;
148 }
149 
150 static int riscv_imsic_eix_rmw(RISCVIMSICState *imsic,
151                                uint32_t xlen, uint32_t page,
152                                uint32_t num, bool pend, target_ulong *val,
153                                target_ulong new_val, target_ulong wr_mask)
154 {
155     uint32_t i, base, prev;
156     target_ulong mask;
157     uint32_t state = (pend) ? IMSIC_EISTATE_PENDING : IMSIC_EISTATE_ENABLED;
158 
159     if (xlen != 32) {
160         if (num & 0x1) {
161             return -EINVAL;
162         }
163         num >>= 1;
164     }
165     if (num >= (imsic->num_irqs / xlen)) {
166         return -EINVAL;
167     }
168 
169     base = (page * imsic->num_irqs) + (num * xlen);
170 
171     if (val) {
172         *val = 0;
173     }
174 
175     for (i = 0; i < xlen; i++) {
176         /* Bit0 of eip0 and eie0 are read-only zero */
177         if (!num && !i) {
178             continue;
179         }
180 
181         mask = (target_ulong)1 << i;
182         if (wr_mask & mask) {
183             if (new_val & mask) {
184                 prev = qatomic_fetch_or(&imsic->eistate[base + i], state);
185             } else {
186                 prev = qatomic_fetch_and(&imsic->eistate[base + i], ~state);
187             }
188         } else {
189             prev = qatomic_read(&imsic->eistate[base + i]);
190         }
191         if (val && (prev & state)) {
192             *val |= mask;
193         }
194     }
195 
196     riscv_imsic_update(imsic, page);
197     return 0;
198 }
199 
200 static int riscv_imsic_rmw(void *arg, target_ulong reg, target_ulong *val,
201                            target_ulong new_val, target_ulong wr_mask)
202 {
203     RISCVIMSICState *imsic = arg;
204     uint32_t isel, priv, virt, vgein, xlen, page;
205 
206     priv = AIA_IREG_PRIV(reg);
207     virt = AIA_IREG_VIRT(reg);
208     isel = AIA_IREG_ISEL(reg);
209     vgein = AIA_IREG_VGEIN(reg);
210     xlen = AIA_IREG_XLEN(reg);
211 
212     if (imsic->mmode) {
213         if (priv == PRV_M && !virt) {
214             page = 0;
215         } else {
216             goto err;
217         }
218     } else {
219         if (priv == PRV_S) {
220             if (virt) {
221                 if (vgein && vgein < imsic->num_pages) {
222                     page = vgein;
223                 } else {
224                     goto err;
225                 }
226             } else {
227                 page = 0;
228             }
229         } else {
230             goto err;
231         }
232     }
233 
234     switch (isel) {
235     case ISELECT_IMSIC_EIDELIVERY:
236         return riscv_imsic_eidelivery_rmw(imsic, page, val,
237                                           new_val, wr_mask);
238     case ISELECT_IMSIC_EITHRESHOLD:
239         return riscv_imsic_eithreshold_rmw(imsic, page, val,
240                                            new_val, wr_mask);
241     case ISELECT_IMSIC_TOPEI:
242         return riscv_imsic_topei_rmw(imsic, page, val, new_val, wr_mask);
243     case ISELECT_IMSIC_EIP0 ... ISELECT_IMSIC_EIP63:
244         return riscv_imsic_eix_rmw(imsic, xlen, page,
245                                    isel - ISELECT_IMSIC_EIP0,
246                                    true, val, new_val, wr_mask);
247     case ISELECT_IMSIC_EIE0 ... ISELECT_IMSIC_EIE63:
248         return riscv_imsic_eix_rmw(imsic, xlen, page,
249                                    isel - ISELECT_IMSIC_EIE0,
250                                    false, val, new_val, wr_mask);
251     default:
252         break;
253     };
254 
255 err:
256     qemu_log_mask(LOG_GUEST_ERROR,
257                   "%s: Invalid register priv=%d virt=%d isel=%d vgein=%d\n",
258                   __func__, priv, virt, isel, vgein);
259     return -EINVAL;
260 }
261 
262 static uint64_t riscv_imsic_read(void *opaque, hwaddr addr, unsigned size)
263 {
264     RISCVIMSICState *imsic = opaque;
265 
266     /* Reads must be 4 byte words */
267     if ((addr & 0x3) != 0) {
268         goto err;
269     }
270 
271     /* Reads cannot be out of range */
272     if (addr > IMSIC_MMIO_SIZE(imsic->num_pages)) {
273         goto err;
274     }
275 
276     return 0;
277 
278 err:
279     qemu_log_mask(LOG_GUEST_ERROR,
280                   "%s: Invalid register read 0x%" HWADDR_PRIx "\n",
281                   __func__, addr);
282     return 0;
283 }
284 
285 static void riscv_imsic_write(void *opaque, hwaddr addr, uint64_t value,
286         unsigned size)
287 {
288     RISCVIMSICState *imsic = opaque;
289     uint32_t page;
290 
291     /* Writes must be 4 byte words */
292     if ((addr & 0x3) != 0) {
293         goto err;
294     }
295 
296     /* Writes cannot be out of range */
297     if (addr > IMSIC_MMIO_SIZE(imsic->num_pages)) {
298         goto err;
299     }
300 
301 #if defined(CONFIG_KVM)
302     if (kvm_irqchip_in_kernel()) {
303         struct kvm_msi msi;
304 
305         msi.address_lo = extract64(imsic->mmio.addr + addr, 0, 32);
306         msi.address_hi = extract64(imsic->mmio.addr + addr, 32, 32);
307         msi.data = le32_to_cpu(value);
308 
309         kvm_vm_ioctl(kvm_state, KVM_SIGNAL_MSI, &msi);
310 
311         return;
312     }
313 #endif
314 
315     /* Writes only supported for MSI little-endian registers */
316     page = addr >> IMSIC_MMIO_PAGE_SHIFT;
317     if ((addr & (IMSIC_MMIO_PAGE_SZ - 1)) == IMSIC_MMIO_PAGE_LE) {
318         if (value && (value < imsic->num_irqs)) {
319             qatomic_or(&imsic->eistate[(page * imsic->num_irqs) + value],
320                        IMSIC_EISTATE_PENDING);
321 
322             /* Update CPU external interrupt status */
323             riscv_imsic_update(imsic, page);
324         }
325     }
326 
327     return;
328 
329 err:
330     qemu_log_mask(LOG_GUEST_ERROR,
331                   "%s: Invalid register write 0x%" HWADDR_PRIx "\n",
332                   __func__, addr);
333 }
334 
335 static const MemoryRegionOps riscv_imsic_ops = {
336     .read = riscv_imsic_read,
337     .write = riscv_imsic_write,
338     .endianness = DEVICE_LITTLE_ENDIAN,
339     .valid = {
340         .min_access_size = 4,
341         .max_access_size = 4
342     }
343 };
344 
345 static void riscv_imsic_realize(DeviceState *dev, Error **errp)
346 {
347     RISCVIMSICState *imsic = RISCV_IMSIC(dev);
348     RISCVCPU *rcpu = RISCV_CPU(cpu_by_arch_id(imsic->hartid));
349     CPUState *cpu = cpu_by_arch_id(imsic->hartid);
350     CPURISCVState *env = cpu ? cpu_env(cpu) : NULL;
351 
352     if (!kvm_irqchip_in_kernel()) {
353         imsic->num_eistate = imsic->num_pages * imsic->num_irqs;
354         imsic->eidelivery = g_new0(uint32_t, imsic->num_pages);
355         imsic->eithreshold = g_new0(uint32_t, imsic->num_pages);
356         imsic->eistate = g_new0(uint32_t, imsic->num_eistate);
357     }
358 
359     memory_region_init_io(&imsic->mmio, OBJECT(dev), &riscv_imsic_ops,
360                           imsic, TYPE_RISCV_IMSIC,
361                           IMSIC_MMIO_SIZE(imsic->num_pages));
362     sysbus_init_mmio(SYS_BUS_DEVICE(dev), &imsic->mmio);
363 
364     /* Claim the CPU interrupt to be triggered by this IMSIC */
365     if (riscv_cpu_claim_interrupts(rcpu,
366             (imsic->mmode) ? MIP_MEIP : MIP_SEIP) < 0) {
367         error_setg(errp, "%s already claimed",
368                    (imsic->mmode) ? "MEIP" : "SEIP");
369         return;
370     }
371 
372     /* Create output IRQ lines */
373     imsic->external_irqs = g_malloc(sizeof(qemu_irq) * imsic->num_pages);
374     qdev_init_gpio_out(dev, imsic->external_irqs, imsic->num_pages);
375 
376     /* Force select AIA feature and setup CSR read-modify-write callback */
377     if (env) {
378         if (!imsic->mmode) {
379             rcpu->cfg.ext_ssaia = true;
380             riscv_cpu_set_geilen(env, imsic->num_pages - 1);
381         } else {
382             rcpu->cfg.ext_smaia = true;
383         }
384         riscv_cpu_set_aia_ireg_rmw_fn(env, (imsic->mmode) ? PRV_M : PRV_S,
385                                       riscv_imsic_rmw, imsic);
386     }
387 
388     msi_nonbroken = true;
389 }
390 
391 static Property riscv_imsic_properties[] = {
392     DEFINE_PROP_BOOL("mmode", RISCVIMSICState, mmode, 0),
393     DEFINE_PROP_UINT32("hartid", RISCVIMSICState, hartid, 0),
394     DEFINE_PROP_UINT32("num-pages", RISCVIMSICState, num_pages, 0),
395     DEFINE_PROP_UINT32("num-irqs", RISCVIMSICState, num_irqs, 0),
396     DEFINE_PROP_END_OF_LIST(),
397 };
398 
399 static const VMStateDescription vmstate_riscv_imsic = {
400     .name = "riscv_imsic",
401     .version_id = 1,
402     .minimum_version_id = 1,
403     .fields = (const VMStateField[]) {
404             VMSTATE_VARRAY_UINT32(eidelivery, RISCVIMSICState,
405                                   num_pages, 0,
406                                   vmstate_info_uint32, uint32_t),
407             VMSTATE_VARRAY_UINT32(eithreshold, RISCVIMSICState,
408                                   num_pages, 0,
409                                   vmstate_info_uint32, uint32_t),
410             VMSTATE_VARRAY_UINT32(eistate, RISCVIMSICState,
411                                   num_eistate, 0,
412                                   vmstate_info_uint32, uint32_t),
413             VMSTATE_END_OF_LIST()
414         }
415 };
416 
417 static void riscv_imsic_class_init(ObjectClass *klass, void *data)
418 {
419     DeviceClass *dc = DEVICE_CLASS(klass);
420 
421     device_class_set_props(dc, riscv_imsic_properties);
422     dc->realize = riscv_imsic_realize;
423     dc->vmsd = &vmstate_riscv_imsic;
424 }
425 
426 static const TypeInfo riscv_imsic_info = {
427     .name          = TYPE_RISCV_IMSIC,
428     .parent        = TYPE_SYS_BUS_DEVICE,
429     .instance_size = sizeof(RISCVIMSICState),
430     .class_init    = riscv_imsic_class_init,
431 };
432 
433 static void riscv_imsic_register_types(void)
434 {
435     type_register_static(&riscv_imsic_info);
436 }
437 
438 type_init(riscv_imsic_register_types)
439 
440 /*
441  * Create IMSIC device.
442  */
443 DeviceState *riscv_imsic_create(hwaddr addr, uint32_t hartid, bool mmode,
444                                 uint32_t num_pages, uint32_t num_ids)
445 {
446     DeviceState *dev = qdev_new(TYPE_RISCV_IMSIC);
447     CPUState *cpu = cpu_by_arch_id(hartid);
448     uint32_t i;
449 
450     assert(!(addr & (IMSIC_MMIO_PAGE_SZ - 1)));
451     if (mmode) {
452         assert(num_pages == 1);
453     } else {
454         assert(num_pages >= 1 && num_pages <= (IRQ_LOCAL_GUEST_MAX + 1));
455     }
456     assert(IMSIC_MIN_ID <= num_ids);
457     assert(num_ids <= IMSIC_MAX_ID);
458     assert((num_ids & IMSIC_MIN_ID) == IMSIC_MIN_ID);
459 
460     qdev_prop_set_bit(dev, "mmode", mmode);
461     qdev_prop_set_uint32(dev, "hartid", hartid);
462     qdev_prop_set_uint32(dev, "num-pages", num_pages);
463     qdev_prop_set_uint32(dev, "num-irqs", num_ids + 1);
464 
465     sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
466     sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, addr);
467 
468     for (i = 0; i < num_pages; i++) {
469         if (!i) {
470             qdev_connect_gpio_out_named(dev, NULL, i,
471                                         qdev_get_gpio_in(DEVICE(cpu),
472                                             (mmode) ? IRQ_M_EXT : IRQ_S_EXT));
473         } else {
474             qdev_connect_gpio_out_named(dev, NULL, i,
475                                         qdev_get_gpio_in(DEVICE(cpu),
476                                             IRQ_LOCAL_MAX + i - 1));
477         }
478     }
479 
480     return dev;
481 }
482