1 /* 2 * RISC-V IMSIC (Incoming Message Signaled Interrupt Controller) 3 * 4 * Copyright (c) 2021 Western Digital Corporation or its affiliates. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms and conditions of the GNU General Public License, 8 * version 2 or later, as published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 * more details. 14 * 15 * You should have received a copy of the GNU General Public License along with 16 * this program. If not, see <http://www.gnu.org/licenses/>. 17 */ 18 19 #include "qemu/osdep.h" 20 #include "qapi/error.h" 21 #include "qemu/log.h" 22 #include "qemu/module.h" 23 #include "qemu/error-report.h" 24 #include "qemu/bswap.h" 25 #include "exec/address-spaces.h" 26 #include "hw/sysbus.h" 27 #include "hw/pci/msi.h" 28 #include "hw/boards.h" 29 #include "hw/qdev-properties.h" 30 #include "hw/intc/riscv_imsic.h" 31 #include "hw/irq.h" 32 #include "target/riscv/cpu.h" 33 #include "target/riscv/cpu_bits.h" 34 #include "sysemu/sysemu.h" 35 #include "sysemu/kvm.h" 36 #include "migration/vmstate.h" 37 38 #define IMSIC_MMIO_PAGE_LE 0x00 39 #define IMSIC_MMIO_PAGE_BE 0x04 40 41 #define IMSIC_MIN_ID ((IMSIC_EIPx_BITS * 2) - 1) 42 #define IMSIC_MAX_ID (IMSIC_TOPEI_IID_MASK) 43 44 #define IMSIC_EISTATE_PENDING (1U << 0) 45 #define IMSIC_EISTATE_ENABLED (1U << 1) 46 #define IMSIC_EISTATE_ENPEND (IMSIC_EISTATE_ENABLED | \ 47 IMSIC_EISTATE_PENDING) 48 49 static uint32_t riscv_imsic_topei(RISCVIMSICState *imsic, uint32_t page) 50 { 51 uint32_t i, max_irq, base; 52 53 base = page * imsic->num_irqs; 54 max_irq = (imsic->eithreshold[page] && 55 (imsic->eithreshold[page] <= imsic->num_irqs)) ? 56 imsic->eithreshold[page] : imsic->num_irqs; 57 for (i = 1; i < max_irq; i++) { 58 if ((imsic->eistate[base + i] & IMSIC_EISTATE_ENPEND) == 59 IMSIC_EISTATE_ENPEND) { 60 return (i << IMSIC_TOPEI_IID_SHIFT) | i; 61 } 62 } 63 64 return 0; 65 } 66 67 static void riscv_imsic_update(RISCVIMSICState *imsic, uint32_t page) 68 { 69 if (imsic->eidelivery[page] && riscv_imsic_topei(imsic, page)) { 70 qemu_irq_raise(imsic->external_irqs[page]); 71 } else { 72 qemu_irq_lower(imsic->external_irqs[page]); 73 } 74 } 75 76 static int riscv_imsic_eidelivery_rmw(RISCVIMSICState *imsic, uint32_t page, 77 target_ulong *val, 78 target_ulong new_val, 79 target_ulong wr_mask) 80 { 81 target_ulong old_val = imsic->eidelivery[page]; 82 83 if (val) { 84 *val = old_val; 85 } 86 87 wr_mask &= 0x1; 88 imsic->eidelivery[page] = (old_val & ~wr_mask) | (new_val & wr_mask); 89 90 riscv_imsic_update(imsic, page); 91 return 0; 92 } 93 94 static int riscv_imsic_eithreshold_rmw(RISCVIMSICState *imsic, uint32_t page, 95 target_ulong *val, 96 target_ulong new_val, 97 target_ulong wr_mask) 98 { 99 target_ulong old_val = imsic->eithreshold[page]; 100 101 if (val) { 102 *val = old_val; 103 } 104 105 wr_mask &= IMSIC_MAX_ID; 106 imsic->eithreshold[page] = (old_val & ~wr_mask) | (new_val & wr_mask); 107 108 riscv_imsic_update(imsic, page); 109 return 0; 110 } 111 112 static int riscv_imsic_topei_rmw(RISCVIMSICState *imsic, uint32_t page, 113 target_ulong *val, target_ulong new_val, 114 target_ulong wr_mask) 115 { 116 uint32_t base, topei = riscv_imsic_topei(imsic, page); 117 118 /* Read pending and enabled interrupt with highest priority */ 119 if (val) { 120 *val = topei; 121 } 122 123 /* Writes ignore value and clear top pending interrupt */ 124 if (topei && wr_mask) { 125 topei >>= IMSIC_TOPEI_IID_SHIFT; 126 base = page * imsic->num_irqs; 127 if (topei) { 128 imsic->eistate[base + topei] &= ~IMSIC_EISTATE_PENDING; 129 } 130 131 riscv_imsic_update(imsic, page); 132 } 133 134 return 0; 135 } 136 137 static int riscv_imsic_eix_rmw(RISCVIMSICState *imsic, 138 uint32_t xlen, uint32_t page, 139 uint32_t num, bool pend, target_ulong *val, 140 target_ulong new_val, target_ulong wr_mask) 141 { 142 uint32_t i, base; 143 target_ulong mask; 144 uint32_t state = (pend) ? IMSIC_EISTATE_PENDING : IMSIC_EISTATE_ENABLED; 145 146 if (xlen != 32) { 147 if (num & 0x1) { 148 return -EINVAL; 149 } 150 num >>= 1; 151 } 152 if (num >= (imsic->num_irqs / xlen)) { 153 return -EINVAL; 154 } 155 156 base = (page * imsic->num_irqs) + (num * xlen); 157 158 if (val) { 159 *val = 0; 160 for (i = 0; i < xlen; i++) { 161 mask = (target_ulong)1 << i; 162 *val |= (imsic->eistate[base + i] & state) ? mask : 0; 163 } 164 } 165 166 for (i = 0; i < xlen; i++) { 167 /* Bit0 of eip0 and eie0 are read-only zero */ 168 if (!num && !i) { 169 continue; 170 } 171 172 mask = (target_ulong)1 << i; 173 if (wr_mask & mask) { 174 if (new_val & mask) { 175 imsic->eistate[base + i] |= state; 176 } else { 177 imsic->eistate[base + i] &= ~state; 178 } 179 } 180 } 181 182 riscv_imsic_update(imsic, page); 183 return 0; 184 } 185 186 static int riscv_imsic_rmw(void *arg, target_ulong reg, target_ulong *val, 187 target_ulong new_val, target_ulong wr_mask) 188 { 189 RISCVIMSICState *imsic = arg; 190 uint32_t isel, priv, virt, vgein, xlen, page; 191 192 priv = AIA_IREG_PRIV(reg); 193 virt = AIA_IREG_VIRT(reg); 194 isel = AIA_IREG_ISEL(reg); 195 vgein = AIA_IREG_VGEIN(reg); 196 xlen = AIA_IREG_XLEN(reg); 197 198 if (imsic->mmode) { 199 if (priv == PRV_M && !virt) { 200 page = 0; 201 } else { 202 goto err; 203 } 204 } else { 205 if (priv == PRV_S) { 206 if (virt) { 207 if (vgein && vgein < imsic->num_pages) { 208 page = vgein; 209 } else { 210 goto err; 211 } 212 } else { 213 page = 0; 214 } 215 } else { 216 goto err; 217 } 218 } 219 220 switch (isel) { 221 case ISELECT_IMSIC_EIDELIVERY: 222 return riscv_imsic_eidelivery_rmw(imsic, page, val, 223 new_val, wr_mask); 224 case ISELECT_IMSIC_EITHRESHOLD: 225 return riscv_imsic_eithreshold_rmw(imsic, page, val, 226 new_val, wr_mask); 227 case ISELECT_IMSIC_TOPEI: 228 return riscv_imsic_topei_rmw(imsic, page, val, new_val, wr_mask); 229 case ISELECT_IMSIC_EIP0 ... ISELECT_IMSIC_EIP63: 230 return riscv_imsic_eix_rmw(imsic, xlen, page, 231 isel - ISELECT_IMSIC_EIP0, 232 true, val, new_val, wr_mask); 233 case ISELECT_IMSIC_EIE0 ... ISELECT_IMSIC_EIE63: 234 return riscv_imsic_eix_rmw(imsic, xlen, page, 235 isel - ISELECT_IMSIC_EIE0, 236 false, val, new_val, wr_mask); 237 default: 238 break; 239 }; 240 241 err: 242 qemu_log_mask(LOG_GUEST_ERROR, 243 "%s: Invalid register priv=%d virt=%d isel=%d vgein=%d\n", 244 __func__, priv, virt, isel, vgein); 245 return -EINVAL; 246 } 247 248 static uint64_t riscv_imsic_read(void *opaque, hwaddr addr, unsigned size) 249 { 250 RISCVIMSICState *imsic = opaque; 251 252 /* Reads must be 4 byte words */ 253 if ((addr & 0x3) != 0) { 254 goto err; 255 } 256 257 /* Reads cannot be out of range */ 258 if (addr > IMSIC_MMIO_SIZE(imsic->num_pages)) { 259 goto err; 260 } 261 262 return 0; 263 264 err: 265 qemu_log_mask(LOG_GUEST_ERROR, 266 "%s: Invalid register read 0x%" HWADDR_PRIx "\n", 267 __func__, addr); 268 return 0; 269 } 270 271 static void riscv_imsic_write(void *opaque, hwaddr addr, uint64_t value, 272 unsigned size) 273 { 274 RISCVIMSICState *imsic = opaque; 275 uint32_t page; 276 277 /* Writes must be 4 byte words */ 278 if ((addr & 0x3) != 0) { 279 goto err; 280 } 281 282 /* Writes cannot be out of range */ 283 if (addr > IMSIC_MMIO_SIZE(imsic->num_pages)) { 284 goto err; 285 } 286 287 #if defined(CONFIG_KVM) 288 if (kvm_irqchip_in_kernel()) { 289 struct kvm_msi msi; 290 291 msi.address_lo = extract64(imsic->mmio.addr + addr, 0, 32); 292 msi.address_hi = extract64(imsic->mmio.addr + addr, 32, 32); 293 msi.data = le32_to_cpu(value); 294 295 kvm_vm_ioctl(kvm_state, KVM_SIGNAL_MSI, &msi); 296 297 return; 298 } 299 #endif 300 301 /* Writes only supported for MSI little-endian registers */ 302 page = addr >> IMSIC_MMIO_PAGE_SHIFT; 303 if ((addr & (IMSIC_MMIO_PAGE_SZ - 1)) == IMSIC_MMIO_PAGE_LE) { 304 if (value && (value < imsic->num_irqs)) { 305 imsic->eistate[(page * imsic->num_irqs) + value] |= 306 IMSIC_EISTATE_PENDING; 307 } 308 } 309 310 /* Update CPU external interrupt status */ 311 riscv_imsic_update(imsic, page); 312 313 return; 314 315 err: 316 qemu_log_mask(LOG_GUEST_ERROR, 317 "%s: Invalid register write 0x%" HWADDR_PRIx "\n", 318 __func__, addr); 319 } 320 321 static const MemoryRegionOps riscv_imsic_ops = { 322 .read = riscv_imsic_read, 323 .write = riscv_imsic_write, 324 .endianness = DEVICE_LITTLE_ENDIAN, 325 .valid = { 326 .min_access_size = 4, 327 .max_access_size = 4 328 } 329 }; 330 331 static void riscv_imsic_realize(DeviceState *dev, Error **errp) 332 { 333 RISCVIMSICState *imsic = RISCV_IMSIC(dev); 334 RISCVCPU *rcpu = RISCV_CPU(cpu_by_arch_id(imsic->hartid)); 335 CPUState *cpu = cpu_by_arch_id(imsic->hartid); 336 CPURISCVState *env = cpu ? cpu->env_ptr : NULL; 337 338 if (!kvm_irqchip_in_kernel()) { 339 imsic->num_eistate = imsic->num_pages * imsic->num_irqs; 340 imsic->eidelivery = g_new0(uint32_t, imsic->num_pages); 341 imsic->eithreshold = g_new0(uint32_t, imsic->num_pages); 342 imsic->eistate = g_new0(uint32_t, imsic->num_eistate); 343 } 344 345 memory_region_init_io(&imsic->mmio, OBJECT(dev), &riscv_imsic_ops, 346 imsic, TYPE_RISCV_IMSIC, 347 IMSIC_MMIO_SIZE(imsic->num_pages)); 348 sysbus_init_mmio(SYS_BUS_DEVICE(dev), &imsic->mmio); 349 350 /* Claim the CPU interrupt to be triggered by this IMSIC */ 351 if (riscv_cpu_claim_interrupts(rcpu, 352 (imsic->mmode) ? MIP_MEIP : MIP_SEIP) < 0) { 353 error_setg(errp, "%s already claimed", 354 (imsic->mmode) ? "MEIP" : "SEIP"); 355 return; 356 } 357 358 /* Create output IRQ lines */ 359 imsic->external_irqs = g_malloc(sizeof(qemu_irq) * imsic->num_pages); 360 qdev_init_gpio_out(dev, imsic->external_irqs, imsic->num_pages); 361 362 /* Force select AIA feature and setup CSR read-modify-write callback */ 363 if (env) { 364 if (!imsic->mmode) { 365 rcpu->cfg.ext_ssaia = true; 366 riscv_cpu_set_geilen(env, imsic->num_pages - 1); 367 } else { 368 rcpu->cfg.ext_smaia = true; 369 } 370 riscv_cpu_set_aia_ireg_rmw_fn(env, (imsic->mmode) ? PRV_M : PRV_S, 371 riscv_imsic_rmw, imsic); 372 } 373 374 msi_nonbroken = true; 375 } 376 377 static Property riscv_imsic_properties[] = { 378 DEFINE_PROP_BOOL("mmode", RISCVIMSICState, mmode, 0), 379 DEFINE_PROP_UINT32("hartid", RISCVIMSICState, hartid, 0), 380 DEFINE_PROP_UINT32("num-pages", RISCVIMSICState, num_pages, 0), 381 DEFINE_PROP_UINT32("num-irqs", RISCVIMSICState, num_irqs, 0), 382 DEFINE_PROP_END_OF_LIST(), 383 }; 384 385 static const VMStateDescription vmstate_riscv_imsic = { 386 .name = "riscv_imsic", 387 .version_id = 1, 388 .minimum_version_id = 1, 389 .fields = (VMStateField[]) { 390 VMSTATE_VARRAY_UINT32(eidelivery, RISCVIMSICState, 391 num_pages, 0, 392 vmstate_info_uint32, uint32_t), 393 VMSTATE_VARRAY_UINT32(eithreshold, RISCVIMSICState, 394 num_pages, 0, 395 vmstate_info_uint32, uint32_t), 396 VMSTATE_VARRAY_UINT32(eistate, RISCVIMSICState, 397 num_eistate, 0, 398 vmstate_info_uint32, uint32_t), 399 VMSTATE_END_OF_LIST() 400 } 401 }; 402 403 static void riscv_imsic_class_init(ObjectClass *klass, void *data) 404 { 405 DeviceClass *dc = DEVICE_CLASS(klass); 406 407 device_class_set_props(dc, riscv_imsic_properties); 408 dc->realize = riscv_imsic_realize; 409 dc->vmsd = &vmstate_riscv_imsic; 410 } 411 412 static const TypeInfo riscv_imsic_info = { 413 .name = TYPE_RISCV_IMSIC, 414 .parent = TYPE_SYS_BUS_DEVICE, 415 .instance_size = sizeof(RISCVIMSICState), 416 .class_init = riscv_imsic_class_init, 417 }; 418 419 static void riscv_imsic_register_types(void) 420 { 421 type_register_static(&riscv_imsic_info); 422 } 423 424 type_init(riscv_imsic_register_types) 425 426 /* 427 * Create IMSIC device. 428 */ 429 DeviceState *riscv_imsic_create(hwaddr addr, uint32_t hartid, bool mmode, 430 uint32_t num_pages, uint32_t num_ids) 431 { 432 DeviceState *dev = qdev_new(TYPE_RISCV_IMSIC); 433 CPUState *cpu = cpu_by_arch_id(hartid); 434 uint32_t i; 435 436 assert(!(addr & (IMSIC_MMIO_PAGE_SZ - 1))); 437 if (mmode) { 438 assert(num_pages == 1); 439 } else { 440 assert(num_pages >= 1 && num_pages <= (IRQ_LOCAL_GUEST_MAX + 1)); 441 } 442 assert(IMSIC_MIN_ID <= num_ids); 443 assert(num_ids <= IMSIC_MAX_ID); 444 assert((num_ids & IMSIC_MIN_ID) == IMSIC_MIN_ID); 445 446 qdev_prop_set_bit(dev, "mmode", mmode); 447 qdev_prop_set_uint32(dev, "hartid", hartid); 448 qdev_prop_set_uint32(dev, "num-pages", num_pages); 449 qdev_prop_set_uint32(dev, "num-irqs", num_ids + 1); 450 451 sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); 452 sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, addr); 453 454 for (i = 0; i < num_pages; i++) { 455 if (!i) { 456 qdev_connect_gpio_out_named(dev, NULL, i, 457 qdev_get_gpio_in(DEVICE(cpu), 458 (mmode) ? IRQ_M_EXT : IRQ_S_EXT)); 459 } else { 460 qdev_connect_gpio_out_named(dev, NULL, i, 461 qdev_get_gpio_in(DEVICE(cpu), 462 IRQ_LOCAL_MAX + i - 1)); 463 } 464 } 465 466 return dev; 467 } 468