Lines Matching +full:non +full:- +full:secure
4 * Copyright (c) 2006-2007 CodeSourcery.
11 * controller, MPCore distributed interrupt controller and ARMv7-M
61 if (!qtest_enabled() && s->num_cpu > 1) { in gic_get_current_cpu()
62 return current_cpu->cpu_index; in gic_get_current_cpu()
77 return s->revision == 2 || s->security_extn; in gic_has_groups()
82 return !gic_is_vcpu(cpu) && s->security_extn && !attrs.secure; in gic_cpu_ns_access()
94 for (irq = 0; irq < s->num_irq; irq++) { in gic_get_best_irq()
118 for (lr_idx = 0; lr_idx < s->num_lrs; lr_idx++) { in gic_get_best_virq()
119 uint32_t lr_entry = s->h_lr[lr_idx][cpu]; in gic_get_best_virq()
136 * - in the non-virt case, the distributor must be enabled for one of the
138 * - in the virt case, the virtual interface must be enabled.
139 * - in all cases, the (v)CPU interface must be enabled for one of the given
147 if (!virt && !(s->ctlr & group_mask)) { in gic_irq_signaling_enabled()
151 if (virt && !(s->h_hcr[cpu] & R_GICH_HCR_EN_MASK)) { in gic_irq_signaling_enabled()
155 if (!(s->cpu_ctlr[cpu_iface] & group_mask)) { in gic_irq_signaling_enabled()
171 qemu_irq *irq_lines = virt ? s->parent_virq : s->parent_irq; in gic_update_internal()
172 qemu_irq *fiq_lines = virt ? s->parent_vfiq : s->parent_fiq; in gic_update_internal()
174 for (cpu = 0; cpu < s->num_cpu; cpu++) { in gic_update_internal()
177 s->current_pending[cpu_iface] = 1023; in gic_update_internal()
194 s->priority_mask[cpu_iface], in gic_update_internal()
195 s->running_priority[cpu_iface]); in gic_update_internal()
200 if (best_prio < s->priority_mask[cpu_iface]) { in gic_update_internal()
201 s->current_pending[cpu_iface] = best_irq; in gic_update_internal()
202 if (best_prio < s->running_priority[cpu_iface]) { in gic_update_internal()
205 s->cpu_ctlr[cpu_iface] & GICC_CTLR_FIQ_EN) { in gic_update_internal()
259 for (lr_idx = 0; lr_idx < s->num_lrs; lr_idx++) { in gic_extract_lr_info()
260 uint32_t *entry = &s->h_lr[lr_idx][cpu]; in gic_extract_lr_info()
291 if ((s->h_hcr[cpu] & R_GICH_HCR_UIE_MASK) && (num_valid < 2)) { in gic_compute_misr()
296 if ((s->h_hcr[cpu] & R_GICH_HCR_LRENPIE_MASK) && in gic_compute_misr()
297 ((s->h_hcr[cpu] & R_GICH_HCR_EOICount_MASK) != 0)) { in gic_compute_misr()
302 if ((s->h_hcr[cpu] & R_GICH_HCR_NPIE_MASK) && (num_pending == 0)) { in gic_compute_misr()
307 if ((s->h_hcr[cpu] & R_GICH_HCR_VGRP0EIE_MASK) && in gic_compute_misr()
308 (s->cpu_ctlr[vcpu] & GICC_CTLR_EN_GRP0)) { in gic_compute_misr()
313 if ((s->h_hcr[cpu] & R_GICH_HCR_VGRP0DIE_MASK) && in gic_compute_misr()
314 !(s->cpu_ctlr[vcpu] & GICC_CTLR_EN_GRP0)) { in gic_compute_misr()
319 if ((s->h_hcr[cpu] & R_GICH_HCR_VGRP1EIE_MASK) && in gic_compute_misr()
320 (s->cpu_ctlr[vcpu] & GICC_CTLR_EN_GRP1)) { in gic_compute_misr()
325 if ((s->h_hcr[cpu] & R_GICH_HCR_VGRP1DIE_MASK) && in gic_compute_misr()
326 !(s->cpu_ctlr[vcpu] & GICC_CTLR_EN_GRP1)) { in gic_compute_misr()
330 s->h_misr[cpu] = value; in gic_compute_misr()
338 for (cpu = 0; cpu < s->num_cpu; cpu++) { in gic_update_maintenance()
340 maint_level = (s->h_hcr[cpu] & R_GICH_HCR_EN_MASK) && s->h_misr[cpu]; in gic_update_maintenance()
343 qemu_set_irq(s->maintenance_irq[cpu], maint_level); in gic_update_maintenance()
385 * [0..N-1] : external interrupts in gic_set_irq()
392 if (irq < (s->num_irq - GIC_INTERNAL)) { in gic_set_irq()
399 irq -= (s->num_irq - GIC_INTERNAL); in gic_set_irq()
412 if (s->revision == REV_11MPCORE) { in gic_set_irq()
425 uint16_t pending_irq = s->current_pending[cpu]; in gic_get_current_pending_irq()
431 * behaves in the same way as a secure access to a GIC with them. in gic_get_current_pending_irq()
433 bool secure = !gic_cpu_ns_access(s, cpu, attrs); in gic_get_current_pending_irq() local
435 if (group == 0 && !secure) { in gic_get_current_pending_irq()
436 /* Group0 interrupts hidden from Non-secure access */ in gic_get_current_pending_irq()
439 if (group == 1 && secure && !(s->cpu_ctlr[cpu] & GICC_CTLR_ACK_CTL)) { in gic_get_current_pending_irq()
440 /* Group1 interrupts only seen by Secure access if in gic_get_current_pending_irq()
459 !(s->cpu_ctlr[cpu] & GICC_CTLR_CBPR) && in gic_get_group_priority()
461 bpr = s->abpr[cpu] - 1; in gic_get_group_priority()
464 bpr = s->bpr[cpu]; in gic_get_group_priority()
490 papr = &s->h_apr[gic_get_vcpu_real_id(cpu)]; in gic_activate_irq()
492 papr = &s->nsapr[regno][cpu]; in gic_activate_irq()
494 papr = &s->apr[regno][cpu]; in gic_activate_irq()
499 s->running_priority[cpu] = prio; in gic_activate_irq()
511 uint32_t apr = s->h_apr[gic_get_vcpu_real_id(cpu)]; in gic_get_prio_from_apr_bits()
520 uint32_t apr = s->apr[i][cpu] | s->nsapr[i][cpu]; in gic_get_prio_from_apr_bits()
550 if (s->h_apr[rcpu]) { in gic_drop_prio()
552 s->h_apr[rcpu] &= s->h_apr[rcpu] - 1; in gic_drop_prio()
558 uint32_t *papr = group ? &s->nsapr[i][cpu] : &s->apr[i][cpu]; in gic_drop_prio()
563 *papr &= *papr - 1; in gic_drop_prio()
568 s->running_priority[cpu] = gic_get_prio_from_apr_bits(s, cpu); in gic_drop_prio()
581 assert(s->sgi_pending[irq][cpu] != 0); in gic_clear_pending_sgi()
582 src = ctz32(s->sgi_pending[irq][cpu]); in gic_clear_pending_sgi()
583 s->sgi_pending[irq][cpu] &= ~(1 << src); in gic_clear_pending_sgi()
584 if (s->sgi_pending[irq][cpu] == 0) { in gic_clear_pending_sgi()
616 if (gic_get_priority(s, irq, cpu) >= s->running_priority[cpu]) { in gic_acknowledge_irq()
623 if (s->revision == REV_11MPCORE) { in gic_acknowledge_irq()
659 priBits = s->n_prio_bits; in gic_fullprio_mask()
661 return ~0U << (8 - priBits); in gic_fullprio_mask()
667 if (s->security_extn && !attrs.secure) { in gic_dist_set_priority()
669 return; /* Ignore Non-secure access of Group0 IRQ */ in gic_dist_set_priority()
671 val = 0x80 | (val >> 1); /* Non-secure view */ in gic_dist_set_priority()
677 s->priority1[irq][cpu] = val; in gic_dist_set_priority()
679 s->priority2[(irq) - GIC_INTERNAL] = val; in gic_dist_set_priority()
688 if (s->security_extn && !attrs.secure) { in gic_dist_get_priority()
690 return 0; /* Non-secure access cannot read priority of Group0 IRQ */ in gic_dist_get_priority()
692 prio = (prio << 1) & 0xff; /* Non-secure view */ in gic_dist_get_priority()
701 if (s->priority_mask[cpu] & 0x80) { in gic_set_priority_mask()
705 /* Non-secure write ignored if priority mask is in lower half */ in gic_set_priority_mask()
709 s->priority_mask[cpu] = pmask & gic_fullprio_mask(s, cpu); in gic_set_priority_mask()
714 uint32_t pmask = s->priority_mask[cpu]; in gic_get_priority_mask()
718 /* Priority Mask in upper half, return Non-secure view */ in gic_get_priority_mask()
730 uint32_t ret = s->cpu_ctlr[cpu]; in gic_get_cpu_control()
753 if (s->revision == 2) { in gic_set_cpu_control()
756 s->cpu_ctlr[cpu] &= ~mask; in gic_set_cpu_control()
757 s->cpu_ctlr[cpu] |= (value << 1) & mask; in gic_set_cpu_control()
759 if (s->revision == 2) { in gic_set_cpu_control()
760 mask = s->security_extn ? GICC_CTLR_V2_S_MASK : GICC_CTLR_V2_MASK; in gic_set_cpu_control()
762 mask = s->security_extn ? GICC_CTLR_V1_S_MASK : GICC_CTLR_V1_MASK; in gic_set_cpu_control()
764 s->cpu_ctlr[cpu] = value & mask; in gic_set_cpu_control()
768 (s->cpu_ctlr[cpu] & GICC_CTLR_EN_GRP0) ? "En" : "Dis", in gic_set_cpu_control()
769 (s->cpu_ctlr[cpu] & GICC_CTLR_EN_GRP1) ? "En" : "Dis"); in gic_set_cpu_control()
774 if ((s->revision != REV_11MPCORE) && (s->running_priority[cpu] > 0xff)) { in gic_get_running_priority()
780 if (s->running_priority[cpu] & 0x80) { in gic_get_running_priority()
781 /* Running priority in upper half of range: return the Non-secure in gic_get_running_priority()
784 return s->running_priority[cpu] << 1; in gic_get_running_priority()
790 return s->running_priority[cpu]; in gic_get_running_priority()
799 if (s->revision != 2) { in gic_eoi_split()
800 /* Before GICv2 prio-drop and deactivate are not separable */ in gic_eoi_split()
804 return s->cpu_ctlr[cpu] & GICC_CTLR_EOIMODE_NS; in gic_eoi_split()
806 return s->cpu_ctlr[cpu] & GICC_CTLR_EOIMODE; in gic_eoi_split()
813 if (irq >= GIC_MAXIRQ || (!gic_is_vcpu(cpu) && irq >= s->num_irq)) { in gic_deactivate_irq()
818 * 2. If software writes the number of a non-existent interrupt in gic_deactivate_irq()
838 s->h_hcr[rcpu] += 1 << R_GICH_HCR_EOICount_SHIFT; in gic_deactivate_irq()
850 DPRINTF("Non-secure DI for Group0 interrupt %d ignored\n", irq); in gic_deactivate_irq()
867 bool prio_drop = s->running_priority[cpu] < 0x100; in gic_complete_irq()
880 * - V_CTRL.EOIMode is false (no EOI split), in gic_complete_irq()
881 * - The call to gic_drop_prio() cleared a bit in GICH_APR, in gic_complete_irq()
882 * - This vIRQ does not have an LR entry which is either in gic_complete_irq()
887 s->h_hcr[rcpu] += 1 << R_GICH_HCR_EOICount_SHIFT; in gic_complete_irq()
897 if (irq >= s->num_irq) { in gic_complete_irq()
901 * 2. If software writes the number of a non-existent interrupt in gic_complete_irq()
908 if (s->running_priority[cpu] == 0x100) { in gic_complete_irq()
912 if (s->revision == REV_11MPCORE) { in gic_complete_irq()
926 DPRINTF("Non-secure EOI for Group0 interrupt %d ignored\n", irq); in gic_complete_irq()
930 /* Secure EOI with GICC_CTLR.AckCtl == 0 when the IRQ is a Group 1 in gic_complete_irq()
937 /* In GICv2 the guest can choose to split priority-drop and deactivate */ in gic_complete_irq()
958 /* We rely here on the only non-zero bits being in byte 0 */ in gic_dist_readb()
959 if (s->security_extn && !attrs.secure) { in gic_dist_readb()
963 return extract32(s->ctlr, 1, 1); in gic_dist_readb()
965 return s->ctlr; in gic_dist_readb()
970 return ((s->num_irq / 32) - 1) | ((s->num_cpu - 1) << 5); in gic_dist_readb()
974 return (s->security_extn << 2); in gic_dist_readb()
994 if (!(s->security_extn && !attrs.secure) && gic_has_groups(s)) { in gic_dist_readb()
996 irq = (offset - 0x080) * 8; in gic_dist_readb()
997 if (irq >= s->num_irq) { in gic_dist_readb()
1012 irq = (offset - 0x100) * 8; in gic_dist_readb()
1014 irq = (offset - 0x180) * 8; in gic_dist_readb()
1015 if (irq >= s->num_irq) in gic_dist_readb()
1019 if (s->security_extn && !attrs.secure && in gic_dist_readb()
1021 continue; /* Ignore Non-secure access of Group0 IRQ */ in gic_dist_readb()
1031 irq = (offset - 0x200) * 8; in gic_dist_readb()
1033 irq = (offset - 0x280) * 8; in gic_dist_readb()
1034 if (irq >= s->num_irq) in gic_dist_readb()
1039 if (s->security_extn && !attrs.secure && in gic_dist_readb()
1041 continue; /* Ignore Non-secure access of Group0 IRQ */ in gic_dist_readb()
1051 irq = (offset - 0x300) * 8; in gic_dist_readb()
1052 } else if (s->revision == 2) { in gic_dist_readb()
1053 irq = (offset - 0x380) * 8; in gic_dist_readb()
1058 if (irq >= s->num_irq) in gic_dist_readb()
1063 if (s->security_extn && !attrs.secure && in gic_dist_readb()
1065 continue; /* Ignore Non-secure access of Group0 IRQ */ in gic_dist_readb()
1074 irq = (offset - 0x400); in gic_dist_readb()
1075 if (irq >= s->num_irq) in gic_dist_readb()
1080 if (s->num_cpu == 1 && s->revision != REV_11MPCORE) { in gic_dist_readb()
1084 irq = (offset - 0x800); in gic_dist_readb()
1085 if (irq >= s->num_irq) { in gic_dist_readb()
1088 if (irq < 29 && s->revision == REV_11MPCORE) { in gic_dist_readb()
1098 irq = (offset - 0xc00) * 4; in gic_dist_readb()
1099 if (irq >= s->num_irq) in gic_dist_readb()
1103 if (s->security_extn && !attrs.secure && in gic_dist_readb()
1105 continue; /* Ignore Non-secure access of Group0 IRQ */ in gic_dist_readb()
1118 if (s->revision == REV_11MPCORE) { in gic_dist_readb()
1124 irq = (offset - 0xf10); in gic_dist_readb()
1126 irq = (offset - 0xf20); in gic_dist_readb()
1130 if (s->security_extn && !attrs.secure && in gic_dist_readb()
1132 res = 0; /* Ignore Non-secure access of Group0 IRQ */ in gic_dist_readb()
1134 res = s->sgi_pending[irq][cpu]; in gic_dist_readb()
1142 switch (s->revision) { in gic_dist_readb()
1144 res = gic_id_11mpcore[(offset - 0xfd0) >> 2]; in gic_dist_readb()
1147 res = gic_id_gicv1[(offset - 0xfd0) >> 2]; in gic_dist_readb()
1150 res = gic_id_gicv2[(offset - 0xfd0) >> 2]; in gic_dist_readb()
1202 if (s->security_extn && !attrs.secure) { in gic_dist_writeb()
1204 s->ctlr = deposit32(s->ctlr, 1, 1, value); in gic_dist_writeb()
1206 s->ctlr = value & (GICD_CTLR_EN_GRP0 | GICD_CTLR_EN_GRP1); in gic_dist_writeb()
1208 s->ctlr = value & GICD_CTLR_EN_GRP0; in gic_dist_writeb()
1211 s->ctlr & GICD_CTLR_EN_GRP0 ? "En" : "Dis", in gic_dist_writeb()
1212 s->ctlr & GICD_CTLR_EN_GRP1 ? "En" : "Dis"); in gic_dist_writeb()
1216 /* Interrupt Group Registers: RAZ/WI for NS access to secure in gic_dist_writeb()
1219 if (!(s->security_extn && !attrs.secure) && gic_has_groups(s)) { in gic_dist_writeb()
1221 irq = (offset - 0x80) * 8; in gic_dist_writeb()
1222 if (irq >= s->num_irq) { in gic_dist_writeb()
1229 /* Group1 (Non-secure) */ in gic_dist_writeb()
1232 /* Group0 (Secure) */ in gic_dist_writeb()
1242 irq = (offset - 0x100) * 8; in gic_dist_writeb()
1243 if (irq >= s->num_irq) in gic_dist_writeb()
1256 if (s->security_extn && !attrs.secure && in gic_dist_writeb()
1258 continue; /* Ignore Non-secure access of Group0 IRQ */ in gic_dist_writeb()
1272 if (s->revision == REV_11MPCORE in gic_dist_writeb()
1282 irq = (offset - 0x180) * 8; in gic_dist_writeb()
1283 if (irq >= s->num_irq) in gic_dist_writeb()
1293 if (s->security_extn && !attrs.secure && in gic_dist_writeb()
1295 continue; /* Ignore Non-secure access of Group0 IRQ */ in gic_dist_writeb()
1307 irq = (offset - 0x200) * 8; in gic_dist_writeb()
1308 if (irq >= s->num_irq) in gic_dist_writeb()
1319 if (s->security_extn && !attrs.secure && in gic_dist_writeb()
1321 continue; /* Ignore Non-secure access of Group0 IRQ */ in gic_dist_writeb()
1329 irq = (offset - 0x280) * 8; in gic_dist_writeb()
1330 if (irq >= s->num_irq) in gic_dist_writeb()
1337 if (s->security_extn && !attrs.secure && in gic_dist_writeb()
1339 continue; /* Ignore Non-secure access of Group0 IRQ */ in gic_dist_writeb()
1343 for per-CPU interrupts. It's unclear whether this is the in gic_dist_writeb()
1351 if (s->revision != 2) { in gic_dist_writeb()
1355 irq = (offset - 0x300) * 8; in gic_dist_writeb()
1356 if (irq >= s->num_irq) { in gic_dist_writeb()
1360 /* This register is banked per-cpu for PPIs */ in gic_dist_writeb()
1364 if (s->security_extn && !attrs.secure && in gic_dist_writeb()
1366 continue; /* Ignore Non-secure access of Group0 IRQ */ in gic_dist_writeb()
1375 if (s->revision != 2) { in gic_dist_writeb()
1379 irq = (offset - 0x380) * 8; in gic_dist_writeb()
1380 if (irq >= s->num_irq) { in gic_dist_writeb()
1384 /* This register is banked per-cpu for PPIs */ in gic_dist_writeb()
1388 if (s->security_extn && !attrs.secure && in gic_dist_writeb()
1390 continue; /* Ignore Non-secure access of Group0 IRQ */ in gic_dist_writeb()
1399 irq = (offset - 0x400); in gic_dist_writeb()
1400 if (irq >= s->num_irq) in gic_dist_writeb()
1407 if (s->num_cpu != 1 || s->revision == REV_11MPCORE) { in gic_dist_writeb()
1408 irq = (offset - 0x800); in gic_dist_writeb()
1409 if (irq >= s->num_irq) { in gic_dist_writeb()
1412 if (irq < 29 && s->revision == REV_11MPCORE) { in gic_dist_writeb()
1417 s->irq_target[irq] = value & ALL_CPU_MASK; in gic_dist_writeb()
1418 if (irq >= GIC_INTERNAL && s->irq_state[irq].pending) { in gic_dist_writeb()
1423 s->irq_state[irq].pending = value & ALL_CPU_MASK; in gic_dist_writeb()
1428 irq = (offset - 0xc00) * 4; in gic_dist_writeb()
1429 if (irq >= s->num_irq) in gic_dist_writeb()
1434 if (s->security_extn && !attrs.secure && in gic_dist_writeb()
1436 continue; /* Ignore Non-secure access of Group0 IRQ */ in gic_dist_writeb()
1439 if (s->revision == REV_11MPCORE) { in gic_dist_writeb()
1453 /* 0xf00 is only handled for 32-bit writes. */ in gic_dist_writeb()
1457 if (s->revision == REV_11MPCORE) { in gic_dist_writeb()
1460 irq = (offset - 0xf10); in gic_dist_writeb()
1462 if (!s->security_extn || attrs.secure || in gic_dist_writeb()
1464 s->sgi_pending[irq][cpu] &= ~value; in gic_dist_writeb()
1465 if (s->sgi_pending[irq][cpu] == 0) { in gic_dist_writeb()
1471 if (s->revision == REV_11MPCORE) { in gic_dist_writeb()
1474 irq = (offset - 0xf20); in gic_dist_writeb()
1476 if (!s->security_extn || attrs.secure || in gic_dist_writeb()
1479 s->sgi_pending[irq][cpu] |= value; in gic_dist_writeb()
1528 s->sgi_pending[irq][target_cpu] |= (1 << cpu); in gic_dist_writel()
1567 return s->nsapr[regno + 2][cpu]; in gic_apr_ns_view()
1572 return s->nsapr[regno + 1][cpu]; in gic_apr_ns_view()
1577 return extract32(s->nsapr[0][cpu], 16, 16); in gic_apr_ns_view()
1582 return extract32(s->nsapr[0][cpu], 8, 8); in gic_apr_ns_view()
1598 s->nsapr[regno + 2][cpu] = value; in gic_apr_write_ns_view()
1603 s->nsapr[regno + 1][cpu] = value; in gic_apr_write_ns_view()
1608 s->nsapr[0][cpu] = deposit32(s->nsapr[0][cpu], 16, 16, value); in gic_apr_write_ns_view()
1613 s->nsapr[0][cpu] = deposit32(s->nsapr[0][cpu], 8, 8, value); in gic_apr_write_ns_view()
1633 if (s->cpu_ctlr[cpu] & GICC_CTLR_CBPR) { in gic_cpu_read()
1635 *data = MIN(s->bpr[cpu] + 1, 7); in gic_cpu_read()
1637 /* BPR is banked. Non-secure copy stored in ABPR. */ in gic_cpu_read()
1638 *data = s->abpr[cpu]; in gic_cpu_read()
1641 *data = s->bpr[cpu]; in gic_cpu_read()
1656 * With security extensions, secure access: ABPR (alias of NS BPR) in gic_cpu_read()
1662 *data = s->abpr[cpu]; in gic_cpu_read()
1667 int regno = (offset - 0xd0) / 4; in gic_cpu_read()
1670 if (regno >= nr_aprs || s->revision != 2) { in gic_cpu_read()
1673 *data = s->h_apr[gic_get_vcpu_real_id(cpu)]; in gic_cpu_read()
1678 *data = s->apr[regno][cpu]; in gic_cpu_read()
1684 int regno = (offset - 0xe0) / 4; in gic_cpu_read()
1686 if (regno >= GIC_NR_APRS || s->revision != 2 || !gic_has_groups(s) || in gic_cpu_read()
1690 *data = s->nsapr[regno][cpu]; in gic_cpu_read()
1695 if (s->revision == REV_11MPCORE) { in gic_cpu_read()
1700 *data = (s->revision << 16) | 0x43b; in gic_cpu_read()
1730 if (s->cpu_ctlr[cpu] & GICC_CTLR_CBPR) { in gic_cpu_write()
1734 s->abpr[cpu] = MAX(value & 0x7, GIC_MIN_ABPR); in gic_cpu_write()
1738 s->bpr[cpu] = MAX(value & 0x7, min_bpr); in gic_cpu_write()
1749 s->abpr[cpu] = MAX(value & 0x7, GIC_MIN_ABPR); in gic_cpu_write()
1754 int regno = (offset - 0xd0) / 4; in gic_cpu_write()
1757 if (regno >= nr_aprs || s->revision != 2) { in gic_cpu_write()
1761 s->h_apr[gic_get_vcpu_real_id(cpu)] = value; in gic_cpu_write()
1766 s->apr[regno][cpu] = value; in gic_cpu_write()
1768 s->running_priority[cpu] = gic_get_prio_from_apr_bits(s, cpu); in gic_cpu_write()
1773 int regno = (offset - 0xe0) / 4; in gic_cpu_write()
1775 if (regno >= GIC_NR_APRS || s->revision != 2) { in gic_cpu_write()
1784 s->nsapr[regno][cpu] = value; in gic_cpu_write()
1785 s->running_priority[cpu] = gic_get_prio_from_apr_bits(s, cpu); in gic_cpu_write()
1831 int id = (backref - s->backref); in gic_do_cpu_read()
1841 int id = (backref - s->backref); in gic_do_cpu_write()
1867 for (lr_idx = lr_start; lr_idx < s->num_lrs; lr_idx++) { in gic_compute_eisr()
1868 uint32_t *entry = &s->h_lr[lr_idx][cpu]; in gic_compute_eisr()
1869 ret = deposit32(ret, lr_idx - lr_start, 1, in gic_compute_eisr()
1881 for (lr_idx = lr_start; lr_idx < s->num_lrs; lr_idx++) { in gic_compute_elrsr()
1882 uint32_t *entry = &s->h_lr[lr_idx][cpu]; in gic_compute_elrsr()
1883 ret = deposit32(ret, lr_idx - lr_start, 1, in gic_compute_elrsr()
1904 s->abpr[vcpu] = MAX(abpr, GIC_VIRT_MIN_ABPR); in gic_vmcr_write()
1905 s->bpr[vcpu] = MAX(bpr, GIC_VIRT_MIN_BPR); in gic_vmcr_write()
1917 *data = s->h_hcr[cpu]; in gic_hyp_read()
1921 *data = FIELD_DP32(0, GICH_VTR, ListRegs, s->num_lrs - 1); in gic_hyp_read()
1923 GIC_VIRT_MAX_GROUP_PRIO_BITS - 1); in gic_hyp_read()
1925 (7 - GIC_VIRT_MIN_BPR) - 1); in gic_hyp_read()
1930 extract32(s->cpu_ctlr[vcpu], 0, 10)); in gic_hyp_read()
1931 *data = FIELD_DP32(*data, GICH_VMCR, VMABP, s->abpr[vcpu]); in gic_hyp_read()
1932 *data = FIELD_DP32(*data, GICH_VMCR, VMBP, s->bpr[vcpu]); in gic_hyp_read()
1934 extract32(s->priority_mask[vcpu], 3, 5)); in gic_hyp_read()
1938 *data = s->h_misr[cpu]; in gic_hyp_read()
1943 *data = gic_compute_eisr(s, cpu, (addr - A_GICH_EISR0) * 8); in gic_hyp_read()
1948 *data = gic_compute_elrsr(s, cpu, (addr - A_GICH_ELRSR0) * 8); in gic_hyp_read()
1952 *data = s->h_apr[cpu]; in gic_hyp_read()
1957 int lr_idx = (addr - A_GICH_LR0) / 4; in gic_hyp_read()
1959 if (lr_idx > s->num_lrs) { in gic_hyp_read()
1962 *data = s->h_lr[lr_idx][cpu]; in gic_hyp_read()
1987 s->h_hcr[cpu] = value & GICH_HCR_MASK; in gic_hyp_write()
1995 s->h_apr[cpu] = value; in gic_hyp_write()
1996 s->running_priority[vcpu] = gic_get_prio_from_apr_bits(s, vcpu); in gic_hyp_write()
2001 int lr_idx = (addr - A_GICH_LR0) / 4; in gic_hyp_write()
2003 if (lr_idx > s->num_lrs) { in gic_hyp_write()
2007 s->h_lr[lr_idx][cpu] = value & GICH_LR_MASK; in gic_hyp_write()
2008 trace_gic_lr_entry(cpu, lr_idx, s->h_lr[lr_idx][cpu]); in gic_hyp_write()
2044 int id = (backref - s->backref); in gic_do_hyp_read()
2055 int id = (backref - s->backref); in gic_do_hyp_write()
2108 agc->parent_realize(dev, &local_err); in arm_gic_realize()
2120 if (s->n_prio_bits > GIC_MAX_PRIORITY_BITS || in arm_gic_realize()
2121 (s->virt_extn ? s->n_prio_bits < GIC_VIRT_MAX_GROUP_PRIO_BITS : in arm_gic_realize()
2122 s->n_prio_bits < GIC_MIN_PRIORITY_BITS)) { in arm_gic_realize()
2123 error_setg(errp, "num-priority-bits cannot be greater than %d" in arm_gic_realize()
2125 s->virt_extn ? GIC_VIRT_MAX_GROUP_PRIO_BITS : in arm_gic_realize()
2130 /* This creates distributor, main CPU interface (s->cpuiomem[0]) and if in arm_gic_realize()
2132 * interface (s->vifaceiomem[0]) and virtual CPU interface). in arm_gic_realize()
2136 /* Extra core-specific regions for the CPU interfaces. This is in arm_gic_realize()
2137 * necessary for "franken-GIC" implementations, for example on in arm_gic_realize()
2144 for (i = 0; i < s->num_cpu; i++) { in arm_gic_realize()
2145 s->backref[i] = s; in arm_gic_realize()
2146 memory_region_init_io(&s->cpuiomem[i+1], OBJECT(s), &gic_cpu_ops, in arm_gic_realize()
2147 &s->backref[i], "gic_cpu", 0x100); in arm_gic_realize()
2148 sysbus_init_mmio(sbd, &s->cpuiomem[i+1]); in arm_gic_realize()
2151 /* Extra core-specific regions for virtual interfaces. This is required by in arm_gic_realize()
2154 if (s->virt_extn) { in arm_gic_realize()
2155 for (i = 0; i < s->num_cpu; i++) { in arm_gic_realize()
2156 memory_region_init_io(&s->vifaceiomem[i + 1], OBJECT(s), in arm_gic_realize()
2157 &gic_viface_ops, &s->backref[i], in arm_gic_realize()
2159 sysbus_init_mmio(sbd, &s->vifaceiomem[i + 1]); in arm_gic_realize()
2170 device_class_set_parent_realize(dc, arm_gic_realize, &agc->parent_realize); in arm_gic_class_init()