1 /* 2 * SuperH interrupt controller module 3 * 4 * Copyright (c) 2007 Magnus Damm 5 * Based on sh_timer.c and arm_timer.c by Paul Brook 6 * Copyright (c) 2005-2006 CodeSourcery. 7 * 8 * This code is licensed under the GPL. 9 */ 10 11 #include "qemu/osdep.h" 12 #include "qemu/log.h" 13 #include "cpu.h" 14 #include "hw/sh4/sh_intc.h" 15 #include "hw/irq.h" 16 #include "hw/sh4/sh.h" 17 #include "trace.h" 18 19 void sh_intc_toggle_source(struct intc_source *source, 20 int enable_adj, int assert_adj) 21 { 22 int enable_changed = 0; 23 int pending_changed = 0; 24 int old_pending; 25 26 if ((source->enable_count == source->enable_max) && (enable_adj == -1)) { 27 enable_changed = -1; 28 } 29 source->enable_count += enable_adj; 30 31 if (source->enable_count == source->enable_max) { 32 enable_changed = 1; 33 } 34 source->asserted += assert_adj; 35 36 old_pending = source->pending; 37 source->pending = source->asserted && 38 (source->enable_count == source->enable_max); 39 40 if (old_pending != source->pending) { 41 pending_changed = 1; 42 } 43 if (pending_changed) { 44 if (source->pending) { 45 source->parent->pending++; 46 if (source->parent->pending == 1) { 47 cpu_interrupt(first_cpu, CPU_INTERRUPT_HARD); 48 } 49 } else { 50 source->parent->pending--; 51 if (source->parent->pending == 0) { 52 cpu_reset_interrupt(first_cpu, CPU_INTERRUPT_HARD); 53 } 54 } 55 } 56 57 if (enable_changed || assert_adj || pending_changed) { 58 trace_sh_intc_sources(source->parent->pending, source->asserted, 59 source->enable_count, source->enable_max, 60 source->vect, source->asserted ? "asserted " : 61 assert_adj ? "deasserted" : "", 62 enable_changed == 1 ? "enabled " : 63 enable_changed == -1 ? "disabled " : "", 64 source->pending ? "pending" : ""); 65 } 66 } 67 68 static void sh_intc_set_irq(void *opaque, int n, int level) 69 { 70 struct intc_desc *desc = opaque; 71 struct intc_source *source = &(desc->sources[n]); 72 73 if (level && !source->asserted) { 74 sh_intc_toggle_source(source, 0, 1); 75 } else if (!level && source->asserted) { 76 sh_intc_toggle_source(source, 0, -1); 77 } 78 } 79 80 int sh_intc_get_pending_vector(struct intc_desc *desc, int imask) 81 { 82 unsigned int i; 83 84 /* slow: use a linked lists of pending sources instead */ 85 /* wrong: take interrupt priority into account (one list per priority) */ 86 87 if (imask == 0x0f) { 88 return -1; /* FIXME, update code to include priority per source */ 89 } 90 91 for (i = 0; i < desc->nr_sources; i++) { 92 struct intc_source *source = desc->sources + i; 93 94 if (source->pending) { 95 trace_sh_intc_pending(desc->pending, source->vect); 96 return source->vect; 97 } 98 } 99 100 abort(); 101 } 102 103 typedef enum { 104 INTC_MODE_NONE, 105 INTC_MODE_DUAL_SET, 106 INTC_MODE_DUAL_CLR, 107 INTC_MODE_ENABLE_REG, 108 INTC_MODE_MASK_REG, 109 } SHIntCMode; 110 #define INTC_MODE_IS_PRIO 0x80 111 112 static SHIntCMode sh_intc_mode(unsigned long address, unsigned long set_reg, 113 unsigned long clr_reg) 114 { 115 if (address != A7ADDR(set_reg) && address != A7ADDR(clr_reg)) { 116 return INTC_MODE_NONE; 117 } 118 if (set_reg && clr_reg) { 119 return address == A7ADDR(set_reg) ? 120 INTC_MODE_DUAL_SET : INTC_MODE_DUAL_CLR; 121 } 122 return set_reg ? INTC_MODE_ENABLE_REG : INTC_MODE_MASK_REG; 123 } 124 125 static void sh_intc_locate(struct intc_desc *desc, 126 unsigned long address, 127 unsigned long **datap, 128 intc_enum **enums, 129 unsigned int *first, 130 unsigned int *width, 131 unsigned int *modep) 132 { 133 SHIntCMode mode; 134 unsigned int i; 135 136 /* this is slow but works for now */ 137 138 if (desc->mask_regs) { 139 for (i = 0; i < desc->nr_mask_regs; i++) { 140 struct intc_mask_reg *mr = desc->mask_regs + i; 141 142 mode = sh_intc_mode(address, mr->set_reg, mr->clr_reg); 143 if (mode == INTC_MODE_NONE) { 144 continue; 145 } 146 *modep = mode; 147 *datap = &mr->value; 148 *enums = mr->enum_ids; 149 *first = mr->reg_width - 1; 150 *width = 1; 151 return; 152 } 153 } 154 155 if (desc->prio_regs) { 156 for (i = 0; i < desc->nr_prio_regs; i++) { 157 struct intc_prio_reg *pr = desc->prio_regs + i; 158 159 mode = sh_intc_mode(address, pr->set_reg, pr->clr_reg); 160 if (mode == INTC_MODE_NONE) { 161 continue; 162 } 163 *modep = mode | INTC_MODE_IS_PRIO; 164 *datap = &pr->value; 165 *enums = pr->enum_ids; 166 *first = (pr->reg_width / pr->field_width) - 1; 167 *width = pr->field_width; 168 return; 169 } 170 } 171 172 abort(); 173 } 174 175 static void sh_intc_toggle_mask(struct intc_desc *desc, intc_enum id, 176 int enable, int is_group) 177 { 178 struct intc_source *source = desc->sources + id; 179 180 if (!id) { 181 return; 182 } 183 if (!source->next_enum_id && (!source->enable_max || !source->vect)) { 184 qemu_log_mask(LOG_UNIMP, 185 "sh_intc: reserved interrupt source %d modified\n", id); 186 return; 187 } 188 189 if (source->vect) { 190 sh_intc_toggle_source(source, enable ? 1 : -1, 0); 191 } 192 193 if ((is_group || !source->vect) && source->next_enum_id) { 194 sh_intc_toggle_mask(desc, source->next_enum_id, enable, 1); 195 } 196 197 if (!source->vect) { 198 trace_sh_intc_set(id, !!enable); 199 } 200 } 201 202 static uint64_t sh_intc_read(void *opaque, hwaddr offset, 203 unsigned size) 204 { 205 struct intc_desc *desc = opaque; 206 intc_enum *enum_ids = NULL; 207 unsigned int first = 0; 208 unsigned int width = 0; 209 unsigned int mode = 0; 210 unsigned long *valuep; 211 212 sh_intc_locate(desc, (unsigned long)offset, &valuep, 213 &enum_ids, &first, &width, &mode); 214 trace_sh_intc_read(size, (uint64_t)offset, *valuep); 215 return *valuep; 216 } 217 218 static void sh_intc_write(void *opaque, hwaddr offset, 219 uint64_t value, unsigned size) 220 { 221 struct intc_desc *desc = opaque; 222 intc_enum *enum_ids = NULL; 223 unsigned int first = 0; 224 unsigned int width = 0; 225 unsigned int mode = 0; 226 unsigned int k; 227 unsigned long *valuep; 228 unsigned long mask; 229 230 trace_sh_intc_write(size, (uint64_t)offset, value); 231 sh_intc_locate(desc, (unsigned long)offset, &valuep, 232 &enum_ids, &first, &width, &mode); 233 switch (mode) { 234 case INTC_MODE_ENABLE_REG | INTC_MODE_IS_PRIO: 235 break; 236 case INTC_MODE_DUAL_SET: 237 value |= *valuep; 238 break; 239 case INTC_MODE_DUAL_CLR: 240 value = *valuep & ~value; 241 break; 242 default: 243 abort(); 244 } 245 246 for (k = 0; k <= first; k++) { 247 mask = ((1 << width) - 1) << ((first - k) * width); 248 249 if ((*valuep & mask) == (value & mask)) { 250 continue; 251 } 252 sh_intc_toggle_mask(desc, enum_ids[k], value & mask, 0); 253 } 254 255 *valuep = value; 256 } 257 258 static const MemoryRegionOps sh_intc_ops = { 259 .read = sh_intc_read, 260 .write = sh_intc_write, 261 .endianness = DEVICE_NATIVE_ENDIAN, 262 }; 263 264 struct intc_source *sh_intc_source(struct intc_desc *desc, intc_enum id) 265 { 266 if (id) { 267 return desc->sources + id; 268 } 269 return NULL; 270 } 271 272 static unsigned int sh_intc_register(MemoryRegion *sysmem, 273 struct intc_desc *desc, 274 const unsigned long address, 275 const char *type, 276 const char *action, 277 const unsigned int index) 278 { 279 char name[60]; 280 MemoryRegion *iomem, *iomem_p4, *iomem_a7; 281 282 if (!address) { 283 return 0; 284 } 285 286 iomem = &desc->iomem; 287 iomem_p4 = desc->iomem_aliases + index; 288 iomem_a7 = iomem_p4 + 1; 289 290 snprintf(name, sizeof(name), "intc-%s-%s-%s", type, action, "p4"); 291 memory_region_init_alias(iomem_p4, NULL, name, iomem, A7ADDR(address), 4); 292 memory_region_add_subregion(sysmem, P4ADDR(address), iomem_p4); 293 294 snprintf(name, sizeof(name), "intc-%s-%s-%s", type, action, "a7"); 295 memory_region_init_alias(iomem_a7, NULL, name, iomem, A7ADDR(address), 4); 296 memory_region_add_subregion(sysmem, A7ADDR(address), iomem_a7); 297 298 /* used to increment aliases index */ 299 return 2; 300 } 301 302 static void sh_intc_register_source(struct intc_desc *desc, 303 intc_enum source, 304 struct intc_group *groups, 305 int nr_groups) 306 { 307 unsigned int i, k; 308 struct intc_source *s; 309 310 if (desc->mask_regs) { 311 for (i = 0; i < desc->nr_mask_regs; i++) { 312 struct intc_mask_reg *mr = desc->mask_regs + i; 313 314 for (k = 0; k < ARRAY_SIZE(mr->enum_ids); k++) { 315 if (mr->enum_ids[k] != source) { 316 continue; 317 } 318 s = sh_intc_source(desc, mr->enum_ids[k]); 319 if (s) { 320 s->enable_max++; 321 } 322 } 323 } 324 } 325 326 if (desc->prio_regs) { 327 for (i = 0; i < desc->nr_prio_regs; i++) { 328 struct intc_prio_reg *pr = desc->prio_regs + i; 329 330 for (k = 0; k < ARRAY_SIZE(pr->enum_ids); k++) { 331 if (pr->enum_ids[k] != source) { 332 continue; 333 } 334 s = sh_intc_source(desc, pr->enum_ids[k]); 335 if (s) { 336 s->enable_max++; 337 } 338 } 339 } 340 } 341 342 if (groups) { 343 for (i = 0; i < nr_groups; i++) { 344 struct intc_group *gr = groups + i; 345 346 for (k = 0; k < ARRAY_SIZE(gr->enum_ids); k++) { 347 if (gr->enum_ids[k] != source) { 348 continue; 349 } 350 s = sh_intc_source(desc, gr->enum_ids[k]); 351 if (s) { 352 s->enable_max++; 353 } 354 } 355 } 356 } 357 358 } 359 360 void sh_intc_register_sources(struct intc_desc *desc, 361 struct intc_vect *vectors, 362 int nr_vectors, 363 struct intc_group *groups, 364 int nr_groups) 365 { 366 unsigned int i, k; 367 struct intc_source *s; 368 369 for (i = 0; i < nr_vectors; i++) { 370 struct intc_vect *vect = vectors + i; 371 372 sh_intc_register_source(desc, vect->enum_id, groups, nr_groups); 373 s = sh_intc_source(desc, vect->enum_id); 374 if (s) { 375 s->vect = vect->vect; 376 trace_sh_intc_register("source", vect->enum_id, s->vect, 377 s->enable_count, s->enable_max); 378 } 379 } 380 381 if (groups) { 382 for (i = 0; i < nr_groups; i++) { 383 struct intc_group *gr = groups + i; 384 385 s = sh_intc_source(desc, gr->enum_id); 386 s->next_enum_id = gr->enum_ids[0]; 387 388 for (k = 1; k < ARRAY_SIZE(gr->enum_ids); k++) { 389 if (!gr->enum_ids[k]) { 390 continue; 391 } 392 s = sh_intc_source(desc, gr->enum_ids[k - 1]); 393 s->next_enum_id = gr->enum_ids[k]; 394 } 395 trace_sh_intc_register("group", gr->enum_id, 0xffff, 396 s->enable_count, s->enable_max); 397 } 398 } 399 } 400 401 int sh_intc_init(MemoryRegion *sysmem, 402 struct intc_desc *desc, 403 int nr_sources, 404 struct intc_mask_reg *mask_regs, 405 int nr_mask_regs, 406 struct intc_prio_reg *prio_regs, 407 int nr_prio_regs) 408 { 409 unsigned int i, j; 410 411 desc->pending = 0; 412 desc->nr_sources = nr_sources; 413 desc->mask_regs = mask_regs; 414 desc->nr_mask_regs = nr_mask_regs; 415 desc->prio_regs = prio_regs; 416 desc->nr_prio_regs = nr_prio_regs; 417 /* Allocate 4 MemoryRegions per register (2 actions * 2 aliases) */ 418 desc->iomem_aliases = g_new0(MemoryRegion, 419 (nr_mask_regs + nr_prio_regs) * 4); 420 421 j = 0; 422 i = sizeof(struct intc_source) * nr_sources; 423 desc->sources = g_malloc0(i); 424 425 for (i = 0; i < desc->nr_sources; i++) { 426 struct intc_source *source = desc->sources + i; 427 428 source->parent = desc; 429 } 430 431 desc->irqs = qemu_allocate_irqs(sh_intc_set_irq, desc, nr_sources); 432 memory_region_init_io(&desc->iomem, NULL, &sh_intc_ops, desc, "intc", 433 0x100000000ULL); 434 435 if (desc->mask_regs) { 436 for (i = 0; i < desc->nr_mask_regs; i++) { 437 struct intc_mask_reg *mr = desc->mask_regs + i; 438 439 j += sh_intc_register(sysmem, desc, mr->set_reg, "mask", "set", j); 440 j += sh_intc_register(sysmem, desc, mr->clr_reg, "mask", "clr", j); 441 } 442 } 443 444 if (desc->prio_regs) { 445 for (i = 0; i < desc->nr_prio_regs; i++) { 446 struct intc_prio_reg *pr = desc->prio_regs + i; 447 448 j += sh_intc_register(sysmem, desc, pr->set_reg, "prio", "set", j); 449 j += sh_intc_register(sysmem, desc, pr->clr_reg, "prio", "clr", j); 450 } 451 } 452 453 return 0; 454 } 455 456 /* 457 * Assert level <n> IRL interrupt. 458 * 0:deassert. 1:lowest priority,... 15:highest priority 459 */ 460 void sh_intc_set_irl(void *opaque, int n, int level) 461 { 462 struct intc_source *s = opaque; 463 int i, irl = level ^ 15; 464 for (i = 0; (s = sh_intc_source(s->parent, s->next_enum_id)); i++) { 465 if (i == irl) { 466 sh_intc_toggle_source(s, s->enable_count ? 0 : 1, 467 s->asserted ? 0 : 1); 468 } else if (s->asserted) { 469 sh_intc_toggle_source(s, 0, -1); 470 } 471 } 472 } 473