1 /*
2 * ARM GICv3 emulation: Redistributor
3 *
4 * Copyright (c) 2015 Huawei.
5 * Copyright (c) 2016 Linaro Limited.
6 * Written by Shlomo Pongratz, Peter Maydell
7 *
8 * This code is licensed under the GPL, version 2 or (at your option)
9 * any later version.
10 */
11
12 #include "qemu/osdep.h"
13 #include "qemu/log.h"
14 #include "trace.h"
15 #include "gicv3_internal.h"
16
mask_group(GICv3CPUState * cs,MemTxAttrs attrs)17 static uint32_t mask_group(GICv3CPUState *cs, MemTxAttrs attrs)
18 {
19 /* Return a 32-bit mask which should be applied for this set of 32
20 * interrupts; each bit is 1 if access is permitted by the
21 * combination of attrs.secure and GICR_GROUPR. (GICR_NSACR does
22 * not affect config register accesses, unlike GICD_NSACR.)
23 */
24 if (!attrs.secure && !(cs->gic->gicd_ctlr & GICD_CTLR_DS)) {
25 /* bits for Group 0 or Secure Group 1 interrupts are RAZ/WI */
26 return cs->gicr_igroupr0;
27 }
28 return 0xFFFFFFFFU;
29 }
30
gicr_ns_access(GICv3CPUState * cs,int irq)31 static int gicr_ns_access(GICv3CPUState *cs, int irq)
32 {
33 /* Return the 2 bit NSACR.NS_access field for this SGI */
34 assert(irq < 16);
35 return extract32(cs->gicr_nsacr, irq * 2, 2);
36 }
37
gicr_write_bitmap_reg(GICv3CPUState * cs,MemTxAttrs attrs,uint32_t * reg,uint32_t val)38 static void gicr_write_bitmap_reg(GICv3CPUState *cs, MemTxAttrs attrs,
39 uint32_t *reg, uint32_t val)
40 {
41 /* Helper routine to implement writing to a "set" register */
42 val &= mask_group(cs, attrs);
43 *reg = val;
44 gicv3_redist_update(cs);
45 }
46
gicr_write_set_bitmap_reg(GICv3CPUState * cs,MemTxAttrs attrs,uint32_t * reg,uint32_t val)47 static void gicr_write_set_bitmap_reg(GICv3CPUState *cs, MemTxAttrs attrs,
48 uint32_t *reg, uint32_t val)
49 {
50 /* Helper routine to implement writing to a "set-bitmap" register */
51 val &= mask_group(cs, attrs);
52 *reg |= val;
53 gicv3_redist_update(cs);
54 }
55
gicr_write_clear_bitmap_reg(GICv3CPUState * cs,MemTxAttrs attrs,uint32_t * reg,uint32_t val)56 static void gicr_write_clear_bitmap_reg(GICv3CPUState *cs, MemTxAttrs attrs,
57 uint32_t *reg, uint32_t val)
58 {
59 /* Helper routine to implement writing to a "clear-bitmap" register */
60 val &= mask_group(cs, attrs);
61 *reg &= ~val;
62 gicv3_redist_update(cs);
63 }
64
gicr_read_bitmap_reg(GICv3CPUState * cs,MemTxAttrs attrs,uint32_t reg)65 static uint32_t gicr_read_bitmap_reg(GICv3CPUState *cs, MemTxAttrs attrs,
66 uint32_t reg)
67 {
68 reg &= mask_group(cs, attrs);
69 return reg;
70 }
71
vcpu_resident(GICv3CPUState * cs,uint64_t vptaddr)72 static bool vcpu_resident(GICv3CPUState *cs, uint64_t vptaddr)
73 {
74 /*
75 * Return true if a vCPU is resident, which is defined by
76 * whether the GICR_VPENDBASER register is marked VALID and
77 * has the right virtual pending table address.
78 */
79 if (!FIELD_EX64(cs->gicr_vpendbaser, GICR_VPENDBASER, VALID)) {
80 return false;
81 }
82 return vptaddr == (cs->gicr_vpendbaser & R_GICR_VPENDBASER_PHYADDR_MASK);
83 }
84
85 /**
86 * update_for_one_lpi: Update pending information if this LPI is better
87 *
88 * @cs: GICv3CPUState
89 * @irq: interrupt to look up in the LPI Configuration table
90 * @ctbase: physical address of the LPI Configuration table to use
91 * @ds: true if priority value should not be shifted
92 * @hpp: points to pending information to update
93 *
94 * Look up @irq in the Configuration table specified by @ctbase
95 * to see if it is enabled and what its priority is. If it is an
96 * enabled interrupt with a higher priority than that currently
97 * recorded in @hpp, update @hpp.
98 */
update_for_one_lpi(GICv3CPUState * cs,int irq,uint64_t ctbase,bool ds,PendingIrq * hpp)99 static void update_for_one_lpi(GICv3CPUState *cs, int irq,
100 uint64_t ctbase, bool ds, PendingIrq *hpp)
101 {
102 uint8_t lpite;
103 uint8_t prio;
104
105 address_space_read(&cs->gic->dma_as,
106 ctbase + ((irq - GICV3_LPI_INTID_START) * sizeof(lpite)),
107 MEMTXATTRS_UNSPECIFIED, &lpite, sizeof(lpite));
108
109 if (!(lpite & LPI_CTE_ENABLED)) {
110 return;
111 }
112
113 if (ds) {
114 prio = lpite & LPI_PRIORITY_MASK;
115 } else {
116 prio = ((lpite & LPI_PRIORITY_MASK) >> 1) | 0x80;
117 }
118
119 if ((prio < hpp->prio) ||
120 ((prio == hpp->prio) && (irq <= hpp->irq))) {
121 hpp->irq = irq;
122 hpp->prio = prio;
123 hpp->nmi = false;
124 /* LPIs and vLPIs are always non-secure Grp1 interrupts */
125 hpp->grp = GICV3_G1NS;
126 }
127 }
128
129 /**
130 * update_for_all_lpis: Fully scan LPI tables and find best pending LPI
131 *
132 * @cs: GICv3CPUState
133 * @ptbase: physical address of LPI Pending table
134 * @ctbase: physical address of LPI Configuration table
135 * @ptsizebits: size of tables, specified as number of interrupt ID bits minus 1
136 * @ds: true if priority value should not be shifted
137 * @hpp: points to pending information to set
138 *
139 * Recalculate the highest priority pending enabled LPI from scratch,
140 * and set @hpp accordingly.
141 *
142 * We scan the LPI pending table @ptbase; for each pending LPI, we read the
143 * corresponding entry in the LPI configuration table @ctbase to extract
144 * the priority and enabled information.
145 *
146 * We take @ptsizebits in the form idbits-1 because this is the way that
147 * LPI table sizes are architecturally specified in GICR_PROPBASER.IDBits
148 * and in the VMAPP command's VPT_size field.
149 */
update_for_all_lpis(GICv3CPUState * cs,uint64_t ptbase,uint64_t ctbase,unsigned ptsizebits,bool ds,PendingIrq * hpp)150 static void update_for_all_lpis(GICv3CPUState *cs, uint64_t ptbase,
151 uint64_t ctbase, unsigned ptsizebits,
152 bool ds, PendingIrq *hpp)
153 {
154 AddressSpace *as = &cs->gic->dma_as;
155 uint8_t pend;
156 uint32_t pendt_size = (1ULL << (ptsizebits + 1));
157 int i, bit;
158
159 hpp->prio = 0xff;
160 hpp->nmi = false;
161
162 for (i = GICV3_LPI_INTID_START / 8; i < pendt_size / 8; i++) {
163 address_space_read(as, ptbase + i, MEMTXATTRS_UNSPECIFIED, &pend, 1);
164 while (pend) {
165 bit = ctz32(pend);
166 update_for_one_lpi(cs, i * 8 + bit, ctbase, ds, hpp);
167 pend &= ~(1 << bit);
168 }
169 }
170 }
171
172 /**
173 * set_lpi_pending_bit: Set or clear pending bit for an LPI
174 *
175 * @cs: GICv3CPUState
176 * @ptbase: physical address of LPI Pending table
177 * @irq: LPI to change pending state for
178 * @level: false to clear pending state, true to set
179 *
180 * Returns true if we needed to do something, false if the pending bit
181 * was already at @level.
182 */
set_pending_table_bit(GICv3CPUState * cs,uint64_t ptbase,int irq,bool level)183 static bool set_pending_table_bit(GICv3CPUState *cs, uint64_t ptbase,
184 int irq, bool level)
185 {
186 AddressSpace *as = &cs->gic->dma_as;
187 uint64_t addr = ptbase + irq / 8;
188 uint8_t pend;
189
190 address_space_read(as, addr, MEMTXATTRS_UNSPECIFIED, &pend, 1);
191 if (extract32(pend, irq % 8, 1) == level) {
192 /* Bit already at requested state, no action required */
193 return false;
194 }
195 pend = deposit32(pend, irq % 8, 1, level ? 1 : 0);
196 address_space_write(as, addr, MEMTXATTRS_UNSPECIFIED, &pend, 1);
197 return true;
198 }
199
gicr_read_ipriorityr(GICv3CPUState * cs,MemTxAttrs attrs,int irq)200 static uint8_t gicr_read_ipriorityr(GICv3CPUState *cs, MemTxAttrs attrs,
201 int irq)
202 {
203 /* Read the value of GICR_IPRIORITYR<n> for the specified interrupt,
204 * honouring security state (these are RAZ/WI for Group 0 or Secure
205 * Group 1 interrupts).
206 */
207 uint32_t prio;
208
209 prio = cs->gicr_ipriorityr[irq];
210
211 if (!attrs.secure && !(cs->gic->gicd_ctlr & GICD_CTLR_DS)) {
212 if (!(cs->gicr_igroupr0 & (1U << irq))) {
213 /* Fields for Group 0 or Secure Group 1 interrupts are RAZ/WI */
214 return 0;
215 }
216 /* NS view of the interrupt priority */
217 prio = (prio << 1) & 0xff;
218 }
219 return prio;
220 }
221
gicr_write_ipriorityr(GICv3CPUState * cs,MemTxAttrs attrs,int irq,uint8_t value)222 static void gicr_write_ipriorityr(GICv3CPUState *cs, MemTxAttrs attrs, int irq,
223 uint8_t value)
224 {
225 /* Write the value of GICD_IPRIORITYR<n> for the specified interrupt,
226 * honouring security state (these are RAZ/WI for Group 0 or Secure
227 * Group 1 interrupts).
228 */
229 if (!attrs.secure && !(cs->gic->gicd_ctlr & GICD_CTLR_DS)) {
230 if (!(cs->gicr_igroupr0 & (1U << irq))) {
231 /* Fields for Group 0 or Secure Group 1 interrupts are RAZ/WI */
232 return;
233 }
234 /* NS view of the interrupt priority */
235 value = 0x80 | (value >> 1);
236 }
237 cs->gicr_ipriorityr[irq] = value;
238 }
239
gicv3_redist_update_vlpi_only(GICv3CPUState * cs)240 static void gicv3_redist_update_vlpi_only(GICv3CPUState *cs)
241 {
242 uint64_t ptbase, ctbase, idbits;
243
244 if (!FIELD_EX64(cs->gicr_vpendbaser, GICR_VPENDBASER, VALID)) {
245 cs->hppvlpi.prio = 0xff;
246 cs->hppvlpi.nmi = false;
247 return;
248 }
249
250 ptbase = cs->gicr_vpendbaser & R_GICR_VPENDBASER_PHYADDR_MASK;
251 ctbase = cs->gicr_vpropbaser & R_GICR_VPROPBASER_PHYADDR_MASK;
252 idbits = FIELD_EX64(cs->gicr_vpropbaser, GICR_VPROPBASER, IDBITS);
253
254 update_for_all_lpis(cs, ptbase, ctbase, idbits, true, &cs->hppvlpi);
255 }
256
gicv3_redist_update_vlpi(GICv3CPUState * cs)257 static void gicv3_redist_update_vlpi(GICv3CPUState *cs)
258 {
259 gicv3_redist_update_vlpi_only(cs);
260 gicv3_cpuif_virt_irq_fiq_update(cs);
261 }
262
gicr_write_vpendbaser(GICv3CPUState * cs,uint64_t newval)263 static void gicr_write_vpendbaser(GICv3CPUState *cs, uint64_t newval)
264 {
265 /* Write @newval to GICR_VPENDBASER, handling its effects */
266 bool oldvalid = FIELD_EX64(cs->gicr_vpendbaser, GICR_VPENDBASER, VALID);
267 bool newvalid = FIELD_EX64(newval, GICR_VPENDBASER, VALID);
268 bool pendinglast;
269
270 /*
271 * The DIRTY bit is read-only and for us is always zero;
272 * other fields are writable.
273 */
274 newval &= R_GICR_VPENDBASER_INNERCACHE_MASK |
275 R_GICR_VPENDBASER_SHAREABILITY_MASK |
276 R_GICR_VPENDBASER_PHYADDR_MASK |
277 R_GICR_VPENDBASER_OUTERCACHE_MASK |
278 R_GICR_VPENDBASER_PENDINGLAST_MASK |
279 R_GICR_VPENDBASER_IDAI_MASK |
280 R_GICR_VPENDBASER_VALID_MASK;
281
282 if (oldvalid && newvalid) {
283 /*
284 * Changing other fields while VALID is 1 is UNPREDICTABLE;
285 * we choose to log and ignore the write.
286 */
287 if (cs->gicr_vpendbaser ^ newval) {
288 qemu_log_mask(LOG_GUEST_ERROR,
289 "%s: Changing GICR_VPENDBASER when VALID=1 "
290 "is UNPREDICTABLE\n", __func__);
291 }
292 return;
293 }
294 if (!oldvalid && !newvalid) {
295 cs->gicr_vpendbaser = newval;
296 return;
297 }
298
299 if (newvalid) {
300 /*
301 * Valid going from 0 to 1: update hppvlpi from tables.
302 * If IDAI is 0 we are allowed to use the info we cached in
303 * the IMPDEF area of the table.
304 * PendingLast is RES1 when we make this transition.
305 */
306 pendinglast = true;
307 } else {
308 /*
309 * Valid going from 1 to 0:
310 * Set PendingLast if there was a pending enabled interrupt
311 * for the vPE that was just descheduled.
312 * If we cache info in the IMPDEF area, write it out here.
313 */
314 pendinglast = cs->hppvlpi.prio != 0xff;
315 }
316
317 newval = FIELD_DP64(newval, GICR_VPENDBASER, PENDINGLAST, pendinglast);
318 cs->gicr_vpendbaser = newval;
319 gicv3_redist_update_vlpi(cs);
320 }
321
gicr_readb(GICv3CPUState * cs,hwaddr offset,uint64_t * data,MemTxAttrs attrs)322 static MemTxResult gicr_readb(GICv3CPUState *cs, hwaddr offset,
323 uint64_t *data, MemTxAttrs attrs)
324 {
325 switch (offset) {
326 case GICR_IPRIORITYR ... GICR_IPRIORITYR + 0x1f:
327 *data = gicr_read_ipriorityr(cs, attrs, offset - GICR_IPRIORITYR);
328 return MEMTX_OK;
329 default:
330 return MEMTX_ERROR;
331 }
332 }
333
gicr_writeb(GICv3CPUState * cs,hwaddr offset,uint64_t value,MemTxAttrs attrs)334 static MemTxResult gicr_writeb(GICv3CPUState *cs, hwaddr offset,
335 uint64_t value, MemTxAttrs attrs)
336 {
337 switch (offset) {
338 case GICR_IPRIORITYR ... GICR_IPRIORITYR + 0x1f:
339 gicr_write_ipriorityr(cs, attrs, offset - GICR_IPRIORITYR, value);
340 gicv3_redist_update(cs);
341 return MEMTX_OK;
342 default:
343 return MEMTX_ERROR;
344 }
345 }
346
gicr_readl(GICv3CPUState * cs,hwaddr offset,uint64_t * data,MemTxAttrs attrs)347 static MemTxResult gicr_readl(GICv3CPUState *cs, hwaddr offset,
348 uint64_t *data, MemTxAttrs attrs)
349 {
350 switch (offset) {
351 case GICR_CTLR:
352 *data = cs->gicr_ctlr;
353 return MEMTX_OK;
354 case GICR_IIDR:
355 *data = gicv3_iidr();
356 return MEMTX_OK;
357 case GICR_TYPER:
358 *data = extract64(cs->gicr_typer, 0, 32);
359 return MEMTX_OK;
360 case GICR_TYPER + 4:
361 *data = extract64(cs->gicr_typer, 32, 32);
362 return MEMTX_OK;
363 case GICR_STATUSR:
364 /* RAZ/WI for us (this is an optional register and our implementation
365 * does not track RO/WO/reserved violations to report them to the guest)
366 */
367 *data = 0;
368 return MEMTX_OK;
369 case GICR_WAKER:
370 *data = cs->gicr_waker;
371 return MEMTX_OK;
372 case GICR_PROPBASER:
373 *data = extract64(cs->gicr_propbaser, 0, 32);
374 return MEMTX_OK;
375 case GICR_PROPBASER + 4:
376 *data = extract64(cs->gicr_propbaser, 32, 32);
377 return MEMTX_OK;
378 case GICR_PENDBASER:
379 *data = extract64(cs->gicr_pendbaser, 0, 32);
380 return MEMTX_OK;
381 case GICR_PENDBASER + 4:
382 *data = extract64(cs->gicr_pendbaser, 32, 32);
383 return MEMTX_OK;
384 case GICR_IGROUPR0:
385 if (!attrs.secure && !(cs->gic->gicd_ctlr & GICD_CTLR_DS)) {
386 *data = 0;
387 return MEMTX_OK;
388 }
389 *data = cs->gicr_igroupr0;
390 return MEMTX_OK;
391 case GICR_ISENABLER0:
392 case GICR_ICENABLER0:
393 *data = gicr_read_bitmap_reg(cs, attrs, cs->gicr_ienabler0);
394 return MEMTX_OK;
395 case GICR_ISPENDR0:
396 case GICR_ICPENDR0:
397 {
398 /* The pending register reads as the logical OR of the pending
399 * latch and the input line level for level-triggered interrupts.
400 */
401 uint32_t val = cs->gicr_ipendr0 | (~cs->edge_trigger & cs->level);
402 *data = gicr_read_bitmap_reg(cs, attrs, val);
403 return MEMTX_OK;
404 }
405 case GICR_ISACTIVER0:
406 case GICR_ICACTIVER0:
407 *data = gicr_read_bitmap_reg(cs, attrs, cs->gicr_iactiver0);
408 return MEMTX_OK;
409 case GICR_IPRIORITYR ... GICR_IPRIORITYR + 0x1f:
410 {
411 int i, irq = offset - GICR_IPRIORITYR;
412 uint32_t value = 0;
413
414 for (i = irq + 3; i >= irq; i--) {
415 value <<= 8;
416 value |= gicr_read_ipriorityr(cs, attrs, i);
417 }
418 *data = value;
419 return MEMTX_OK;
420 }
421 case GICR_INMIR0:
422 *data = cs->gic->nmi_support ?
423 gicr_read_bitmap_reg(cs, attrs, cs->gicr_inmir0) : 0;
424 return MEMTX_OK;
425 case GICR_ICFGR0:
426 case GICR_ICFGR1:
427 {
428 /* Our edge_trigger bitmap is one bit per irq; take the correct
429 * half of it, and spread it out into the odd bits.
430 */
431 uint32_t value;
432
433 value = cs->edge_trigger & mask_group(cs, attrs);
434 value = extract32(value, (offset == GICR_ICFGR1) ? 16 : 0, 16);
435 value = half_shuffle32(value) << 1;
436 *data = value;
437 return MEMTX_OK;
438 }
439 case GICR_IGRPMODR0:
440 if ((cs->gic->gicd_ctlr & GICD_CTLR_DS) || !attrs.secure) {
441 /* RAZ/WI if security disabled, or if
442 * security enabled and this is an NS access
443 */
444 *data = 0;
445 return MEMTX_OK;
446 }
447 *data = cs->gicr_igrpmodr0;
448 return MEMTX_OK;
449 case GICR_NSACR:
450 if ((cs->gic->gicd_ctlr & GICD_CTLR_DS) || !attrs.secure) {
451 /* RAZ/WI if security disabled, or if
452 * security enabled and this is an NS access
453 */
454 *data = 0;
455 return MEMTX_OK;
456 }
457 *data = cs->gicr_nsacr;
458 return MEMTX_OK;
459 case GICR_IDREGS ... GICR_IDREGS + 0x2f:
460 *data = gicv3_idreg(cs->gic, offset - GICR_IDREGS, GICV3_PIDR0_REDIST);
461 return MEMTX_OK;
462 /*
463 * VLPI frame registers. We don't need a version check for
464 * VPROPBASER and VPENDBASER because gicv3_redist_size() will
465 * prevent pre-v4 GIC from passing us offsets this high.
466 */
467 case GICR_VPROPBASER:
468 *data = extract64(cs->gicr_vpropbaser, 0, 32);
469 return MEMTX_OK;
470 case GICR_VPROPBASER + 4:
471 *data = extract64(cs->gicr_vpropbaser, 32, 32);
472 return MEMTX_OK;
473 case GICR_VPENDBASER:
474 *data = extract64(cs->gicr_vpendbaser, 0, 32);
475 return MEMTX_OK;
476 case GICR_VPENDBASER + 4:
477 *data = extract64(cs->gicr_vpendbaser, 32, 32);
478 return MEMTX_OK;
479 default:
480 return MEMTX_ERROR;
481 }
482 }
483
gicr_writel(GICv3CPUState * cs,hwaddr offset,uint64_t value,MemTxAttrs attrs)484 static MemTxResult gicr_writel(GICv3CPUState *cs, hwaddr offset,
485 uint64_t value, MemTxAttrs attrs)
486 {
487 switch (offset) {
488 case GICR_CTLR:
489 /* For our implementation, GICR_TYPER.DPGS is 0 and so all
490 * the DPG bits are RAZ/WI. We don't do anything asynchronously,
491 * so UWP and RWP are RAZ/WI. GICR_TYPER.LPIS is 1 (we
492 * implement LPIs) so Enable_LPIs is programmable.
493 */
494 if (cs->gicr_typer & GICR_TYPER_PLPIS) {
495 if (value & GICR_CTLR_ENABLE_LPIS) {
496 cs->gicr_ctlr |= GICR_CTLR_ENABLE_LPIS;
497 /* Check for any pending interr in pending table */
498 gicv3_redist_update_lpi(cs);
499 } else {
500 cs->gicr_ctlr &= ~GICR_CTLR_ENABLE_LPIS;
501 /* cs->hppi might have been an LPI; recalculate */
502 gicv3_redist_update(cs);
503 }
504 }
505 return MEMTX_OK;
506 case GICR_STATUSR:
507 /* RAZ/WI for our implementation */
508 return MEMTX_OK;
509 case GICR_WAKER:
510 /* Only the ProcessorSleep bit is writable. When the guest sets
511 * it, it requests that we transition the channel between the
512 * redistributor and the cpu interface to quiescent, and that
513 * we set the ChildrenAsleep bit once the interface has reached the
514 * quiescent state.
515 * Setting the ProcessorSleep to 0 reverses the quiescing, and
516 * ChildrenAsleep is cleared once the transition is complete.
517 * Since our interface is not asynchronous, we complete these
518 * transitions instantaneously, so we set ChildrenAsleep to the
519 * same value as ProcessorSleep here.
520 */
521 value &= GICR_WAKER_ProcessorSleep;
522 if (value & GICR_WAKER_ProcessorSleep) {
523 value |= GICR_WAKER_ChildrenAsleep;
524 }
525 cs->gicr_waker = value;
526 return MEMTX_OK;
527 case GICR_PROPBASER:
528 cs->gicr_propbaser = deposit64(cs->gicr_propbaser, 0, 32, value);
529 return MEMTX_OK;
530 case GICR_PROPBASER + 4:
531 cs->gicr_propbaser = deposit64(cs->gicr_propbaser, 32, 32, value);
532 return MEMTX_OK;
533 case GICR_PENDBASER:
534 cs->gicr_pendbaser = deposit64(cs->gicr_pendbaser, 0, 32, value);
535 return MEMTX_OK;
536 case GICR_PENDBASER + 4:
537 cs->gicr_pendbaser = deposit64(cs->gicr_pendbaser, 32, 32, value);
538 return MEMTX_OK;
539 case GICR_IGROUPR0:
540 if (!attrs.secure && !(cs->gic->gicd_ctlr & GICD_CTLR_DS)) {
541 return MEMTX_OK;
542 }
543 cs->gicr_igroupr0 = value;
544 gicv3_redist_update(cs);
545 return MEMTX_OK;
546 case GICR_ISENABLER0:
547 gicr_write_set_bitmap_reg(cs, attrs, &cs->gicr_ienabler0, value);
548 return MEMTX_OK;
549 case GICR_ICENABLER0:
550 gicr_write_clear_bitmap_reg(cs, attrs, &cs->gicr_ienabler0, value);
551 return MEMTX_OK;
552 case GICR_ISPENDR0:
553 gicr_write_set_bitmap_reg(cs, attrs, &cs->gicr_ipendr0, value);
554 return MEMTX_OK;
555 case GICR_ICPENDR0:
556 gicr_write_clear_bitmap_reg(cs, attrs, &cs->gicr_ipendr0, value);
557 return MEMTX_OK;
558 case GICR_ISACTIVER0:
559 gicr_write_set_bitmap_reg(cs, attrs, &cs->gicr_iactiver0, value);
560 return MEMTX_OK;
561 case GICR_ICACTIVER0:
562 gicr_write_clear_bitmap_reg(cs, attrs, &cs->gicr_iactiver0, value);
563 return MEMTX_OK;
564 case GICR_IPRIORITYR ... GICR_IPRIORITYR + 0x1f:
565 {
566 int i, irq = offset - GICR_IPRIORITYR;
567
568 for (i = irq; i < irq + 4; i++, value >>= 8) {
569 gicr_write_ipriorityr(cs, attrs, i, value);
570 }
571 gicv3_redist_update(cs);
572 return MEMTX_OK;
573 }
574 case GICR_INMIR0:
575 if (cs->gic->nmi_support) {
576 gicr_write_bitmap_reg(cs, attrs, &cs->gicr_inmir0, value);
577 }
578 return MEMTX_OK;
579
580 case GICR_ICFGR0:
581 /* Register is all RAZ/WI or RAO/WI bits */
582 return MEMTX_OK;
583 case GICR_ICFGR1:
584 {
585 uint32_t mask;
586
587 /* Since our edge_trigger bitmap is one bit per irq, our input
588 * 32-bits will compress down into 16 bits which we need
589 * to write into the bitmap.
590 */
591 value = half_unshuffle32(value >> 1) << 16;
592 mask = mask_group(cs, attrs) & 0xffff0000U;
593
594 cs->edge_trigger &= ~mask;
595 cs->edge_trigger |= (value & mask);
596
597 gicv3_redist_update(cs);
598 return MEMTX_OK;
599 }
600 case GICR_IGRPMODR0:
601 if ((cs->gic->gicd_ctlr & GICD_CTLR_DS) || !attrs.secure) {
602 /* RAZ/WI if security disabled, or if
603 * security enabled and this is an NS access
604 */
605 return MEMTX_OK;
606 }
607 cs->gicr_igrpmodr0 = value;
608 gicv3_redist_update(cs);
609 return MEMTX_OK;
610 case GICR_NSACR:
611 if ((cs->gic->gicd_ctlr & GICD_CTLR_DS) || !attrs.secure) {
612 /* RAZ/WI if security disabled, or if
613 * security enabled and this is an NS access
614 */
615 return MEMTX_OK;
616 }
617 cs->gicr_nsacr = value;
618 /* no update required as this only affects access permission checks */
619 return MEMTX_OK;
620 case GICR_IIDR:
621 case GICR_TYPER:
622 case GICR_IDREGS ... GICR_IDREGS + 0x2f:
623 /* RO registers, ignore the write */
624 qemu_log_mask(LOG_GUEST_ERROR,
625 "%s: invalid guest write to RO register at offset "
626 HWADDR_FMT_plx "\n", __func__, offset);
627 return MEMTX_OK;
628 /*
629 * VLPI frame registers. We don't need a version check for
630 * VPROPBASER and VPENDBASER because gicv3_redist_size() will
631 * prevent pre-v4 GIC from passing us offsets this high.
632 */
633 case GICR_VPROPBASER:
634 cs->gicr_vpropbaser = deposit64(cs->gicr_vpropbaser, 0, 32, value);
635 return MEMTX_OK;
636 case GICR_VPROPBASER + 4:
637 cs->gicr_vpropbaser = deposit64(cs->gicr_vpropbaser, 32, 32, value);
638 return MEMTX_OK;
639 case GICR_VPENDBASER:
640 gicr_write_vpendbaser(cs, deposit64(cs->gicr_vpendbaser, 0, 32, value));
641 return MEMTX_OK;
642 case GICR_VPENDBASER + 4:
643 gicr_write_vpendbaser(cs, deposit64(cs->gicr_vpendbaser, 32, 32, value));
644 return MEMTX_OK;
645 default:
646 return MEMTX_ERROR;
647 }
648 }
649
gicr_readll(GICv3CPUState * cs,hwaddr offset,uint64_t * data,MemTxAttrs attrs)650 static MemTxResult gicr_readll(GICv3CPUState *cs, hwaddr offset,
651 uint64_t *data, MemTxAttrs attrs)
652 {
653 switch (offset) {
654 case GICR_TYPER:
655 *data = cs->gicr_typer;
656 return MEMTX_OK;
657 case GICR_PROPBASER:
658 *data = cs->gicr_propbaser;
659 return MEMTX_OK;
660 case GICR_PENDBASER:
661 *data = cs->gicr_pendbaser;
662 return MEMTX_OK;
663 /*
664 * VLPI frame registers. We don't need a version check for
665 * VPROPBASER and VPENDBASER because gicv3_redist_size() will
666 * prevent pre-v4 GIC from passing us offsets this high.
667 */
668 case GICR_VPROPBASER:
669 *data = cs->gicr_vpropbaser;
670 return MEMTX_OK;
671 case GICR_VPENDBASER:
672 *data = cs->gicr_vpendbaser;
673 return MEMTX_OK;
674 default:
675 return MEMTX_ERROR;
676 }
677 }
678
gicr_writell(GICv3CPUState * cs,hwaddr offset,uint64_t value,MemTxAttrs attrs)679 static MemTxResult gicr_writell(GICv3CPUState *cs, hwaddr offset,
680 uint64_t value, MemTxAttrs attrs)
681 {
682 switch (offset) {
683 case GICR_PROPBASER:
684 cs->gicr_propbaser = value;
685 return MEMTX_OK;
686 case GICR_PENDBASER:
687 cs->gicr_pendbaser = value;
688 return MEMTX_OK;
689 case GICR_TYPER:
690 /* RO register, ignore the write */
691 qemu_log_mask(LOG_GUEST_ERROR,
692 "%s: invalid guest write to RO register at offset "
693 HWADDR_FMT_plx "\n", __func__, offset);
694 return MEMTX_OK;
695 /*
696 * VLPI frame registers. We don't need a version check for
697 * VPROPBASER and VPENDBASER because gicv3_redist_size() will
698 * prevent pre-v4 GIC from passing us offsets this high.
699 */
700 case GICR_VPROPBASER:
701 cs->gicr_vpropbaser = value;
702 return MEMTX_OK;
703 case GICR_VPENDBASER:
704 gicr_write_vpendbaser(cs, value);
705 return MEMTX_OK;
706 default:
707 return MEMTX_ERROR;
708 }
709 }
710
gicv3_redist_read(void * opaque,hwaddr offset,uint64_t * data,unsigned size,MemTxAttrs attrs)711 MemTxResult gicv3_redist_read(void *opaque, hwaddr offset, uint64_t *data,
712 unsigned size, MemTxAttrs attrs)
713 {
714 GICv3RedistRegion *region = opaque;
715 GICv3State *s = region->gic;
716 GICv3CPUState *cs;
717 MemTxResult r;
718 int cpuidx;
719
720 assert((offset & (size - 1)) == 0);
721
722 /*
723 * There are (for GICv3) two 64K redistributor pages per CPU.
724 * In some cases the redistributor pages for all CPUs are not
725 * contiguous (eg on the virt board they are split into two
726 * parts if there are too many CPUs to all fit in the same place
727 * in the memory map); if so then the GIC has multiple MemoryRegions
728 * for the redistributors.
729 */
730 cpuidx = region->cpuidx + offset / gicv3_redist_size(s);
731 offset %= gicv3_redist_size(s);
732
733 cs = &s->cpu[cpuidx];
734
735 switch (size) {
736 case 1:
737 r = gicr_readb(cs, offset, data, attrs);
738 break;
739 case 4:
740 r = gicr_readl(cs, offset, data, attrs);
741 break;
742 case 8:
743 r = gicr_readll(cs, offset, data, attrs);
744 break;
745 default:
746 r = MEMTX_ERROR;
747 break;
748 }
749
750 if (r != MEMTX_OK) {
751 qemu_log_mask(LOG_GUEST_ERROR,
752 "%s: invalid guest read at offset " HWADDR_FMT_plx
753 " size %u\n", __func__, offset, size);
754 trace_gicv3_redist_badread(gicv3_redist_affid(cs), offset,
755 size, attrs.secure);
756 /* The spec requires that reserved registers are RAZ/WI;
757 * so use MEMTX_ERROR returns from leaf functions as a way to
758 * trigger the guest-error logging but don't return it to
759 * the caller, or we'll cause a spurious guest data abort.
760 */
761 r = MEMTX_OK;
762 *data = 0;
763 } else {
764 trace_gicv3_redist_read(gicv3_redist_affid(cs), offset, *data,
765 size, attrs.secure);
766 }
767 return r;
768 }
769
gicv3_redist_write(void * opaque,hwaddr offset,uint64_t data,unsigned size,MemTxAttrs attrs)770 MemTxResult gicv3_redist_write(void *opaque, hwaddr offset, uint64_t data,
771 unsigned size, MemTxAttrs attrs)
772 {
773 GICv3RedistRegion *region = opaque;
774 GICv3State *s = region->gic;
775 GICv3CPUState *cs;
776 MemTxResult r;
777 int cpuidx;
778
779 assert((offset & (size - 1)) == 0);
780
781 /*
782 * There are (for GICv3) two 64K redistributor pages per CPU.
783 * In some cases the redistributor pages for all CPUs are not
784 * contiguous (eg on the virt board they are split into two
785 * parts if there are too many CPUs to all fit in the same place
786 * in the memory map); if so then the GIC has multiple MemoryRegions
787 * for the redistributors.
788 */
789 cpuidx = region->cpuidx + offset / gicv3_redist_size(s);
790 offset %= gicv3_redist_size(s);
791
792 cs = &s->cpu[cpuidx];
793
794 switch (size) {
795 case 1:
796 r = gicr_writeb(cs, offset, data, attrs);
797 break;
798 case 4:
799 r = gicr_writel(cs, offset, data, attrs);
800 break;
801 case 8:
802 r = gicr_writell(cs, offset, data, attrs);
803 break;
804 default:
805 r = MEMTX_ERROR;
806 break;
807 }
808
809 if (r != MEMTX_OK) {
810 qemu_log_mask(LOG_GUEST_ERROR,
811 "%s: invalid guest write at offset " HWADDR_FMT_plx
812 " size %u\n", __func__, offset, size);
813 trace_gicv3_redist_badwrite(gicv3_redist_affid(cs), offset, data,
814 size, attrs.secure);
815 /* The spec requires that reserved registers are RAZ/WI;
816 * so use MEMTX_ERROR returns from leaf functions as a way to
817 * trigger the guest-error logging but don't return it to
818 * the caller, or we'll cause a spurious guest data abort.
819 */
820 r = MEMTX_OK;
821 } else {
822 trace_gicv3_redist_write(gicv3_redist_affid(cs), offset, data,
823 size, attrs.secure);
824 }
825 return r;
826 }
827
gicv3_redist_check_lpi_priority(GICv3CPUState * cs,int irq)828 static void gicv3_redist_check_lpi_priority(GICv3CPUState *cs, int irq)
829 {
830 uint64_t lpict_baddr = cs->gicr_propbaser & R_GICR_PROPBASER_PHYADDR_MASK;
831
832 update_for_one_lpi(cs, irq, lpict_baddr,
833 cs->gic->gicd_ctlr & GICD_CTLR_DS,
834 &cs->hpplpi);
835 }
836
gicv3_redist_update_lpi_only(GICv3CPUState * cs)837 void gicv3_redist_update_lpi_only(GICv3CPUState *cs)
838 {
839 /*
840 * This function scans the LPI pending table and for each pending
841 * LPI, reads the corresponding entry from LPI configuration table
842 * to extract the priority info and determine if the current LPI
843 * priority is lower than the last computed high priority lpi interrupt.
844 * If yes, replace current LPI as the new high priority lpi interrupt.
845 */
846 uint64_t lpipt_baddr, lpict_baddr;
847 uint64_t idbits;
848
849 idbits = MIN(FIELD_EX64(cs->gicr_propbaser, GICR_PROPBASER, IDBITS),
850 GICD_TYPER_IDBITS);
851
852 if (!(cs->gicr_ctlr & GICR_CTLR_ENABLE_LPIS)) {
853 return;
854 }
855
856 lpipt_baddr = cs->gicr_pendbaser & R_GICR_PENDBASER_PHYADDR_MASK;
857 lpict_baddr = cs->gicr_propbaser & R_GICR_PROPBASER_PHYADDR_MASK;
858
859 update_for_all_lpis(cs, lpipt_baddr, lpict_baddr, idbits,
860 cs->gic->gicd_ctlr & GICD_CTLR_DS, &cs->hpplpi);
861 }
862
gicv3_redist_update_lpi(GICv3CPUState * cs)863 void gicv3_redist_update_lpi(GICv3CPUState *cs)
864 {
865 gicv3_redist_update_lpi_only(cs);
866 gicv3_redist_update(cs);
867 }
868
gicv3_redist_lpi_pending(GICv3CPUState * cs,int irq,int level)869 void gicv3_redist_lpi_pending(GICv3CPUState *cs, int irq, int level)
870 {
871 /*
872 * This function updates the pending bit in lpi pending table for
873 * the irq being activated or deactivated.
874 */
875 uint64_t lpipt_baddr;
876
877 lpipt_baddr = cs->gicr_pendbaser & R_GICR_PENDBASER_PHYADDR_MASK;
878 if (!set_pending_table_bit(cs, lpipt_baddr, irq, level)) {
879 /* no change in the value of pending bit, return */
880 return;
881 }
882
883 /*
884 * check if this LPI is better than the current hpplpi, if yes
885 * just set hpplpi.prio and .irq without doing a full rescan
886 */
887 if (level) {
888 gicv3_redist_check_lpi_priority(cs, irq);
889 gicv3_redist_update(cs);
890 } else {
891 if (irq == cs->hpplpi.irq) {
892 gicv3_redist_update_lpi(cs);
893 }
894 }
895 }
896
gicv3_redist_process_lpi(GICv3CPUState * cs,int irq,int level)897 void gicv3_redist_process_lpi(GICv3CPUState *cs, int irq, int level)
898 {
899 uint64_t idbits;
900
901 idbits = MIN(FIELD_EX64(cs->gicr_propbaser, GICR_PROPBASER, IDBITS),
902 GICD_TYPER_IDBITS);
903
904 if (!(cs->gicr_ctlr & GICR_CTLR_ENABLE_LPIS) ||
905 (irq > (1ULL << (idbits + 1)) - 1) || irq < GICV3_LPI_INTID_START) {
906 return;
907 }
908
909 /* set/clear the pending bit for this irq */
910 gicv3_redist_lpi_pending(cs, irq, level);
911 }
912
gicv3_redist_inv_lpi(GICv3CPUState * cs,int irq)913 void gicv3_redist_inv_lpi(GICv3CPUState *cs, int irq)
914 {
915 /*
916 * The only cached information for LPIs we have is the HPPLPI.
917 * We could be cleverer about identifying when we don't need
918 * to do a full rescan of the pending table, but until we find
919 * this is a performance issue, just always recalculate.
920 */
921 gicv3_redist_update_lpi(cs);
922 }
923
gicv3_redist_mov_lpi(GICv3CPUState * src,GICv3CPUState * dest,int irq)924 void gicv3_redist_mov_lpi(GICv3CPUState *src, GICv3CPUState *dest, int irq)
925 {
926 /*
927 * Move the specified LPI's pending state from the source redistributor
928 * to the destination.
929 *
930 * If LPIs are disabled on dest this is CONSTRAINED UNPREDICTABLE:
931 * we choose to NOP. If LPIs are disabled on source there's nothing
932 * to be transferred anyway.
933 */
934 uint64_t idbits;
935 uint32_t pendt_size;
936 uint64_t src_baddr;
937
938 if (!(src->gicr_ctlr & GICR_CTLR_ENABLE_LPIS) ||
939 !(dest->gicr_ctlr & GICR_CTLR_ENABLE_LPIS)) {
940 return;
941 }
942
943 idbits = MIN(FIELD_EX64(src->gicr_propbaser, GICR_PROPBASER, IDBITS),
944 GICD_TYPER_IDBITS);
945 idbits = MIN(FIELD_EX64(dest->gicr_propbaser, GICR_PROPBASER, IDBITS),
946 idbits);
947
948 pendt_size = 1ULL << (idbits + 1);
949 if ((irq / 8) >= pendt_size) {
950 return;
951 }
952
953 src_baddr = src->gicr_pendbaser & R_GICR_PENDBASER_PHYADDR_MASK;
954
955 if (!set_pending_table_bit(src, src_baddr, irq, 0)) {
956 /* Not pending on source, nothing to do */
957 return;
958 }
959 if (irq == src->hpplpi.irq) {
960 /*
961 * We just made this LPI not-pending so only need to update
962 * if it was previously the highest priority pending LPI
963 */
964 gicv3_redist_update_lpi(src);
965 }
966 /* Mark it pending on the destination */
967 gicv3_redist_lpi_pending(dest, irq, 1);
968 }
969
gicv3_redist_movall_lpis(GICv3CPUState * src,GICv3CPUState * dest)970 void gicv3_redist_movall_lpis(GICv3CPUState *src, GICv3CPUState *dest)
971 {
972 /*
973 * We must move all pending LPIs from the source redistributor
974 * to the destination. That is, for every pending LPI X on
975 * src, we must set it not-pending on src and pending on dest.
976 * LPIs that are already pending on dest are not cleared.
977 *
978 * If LPIs are disabled on dest this is CONSTRAINED UNPREDICTABLE:
979 * we choose to NOP. If LPIs are disabled on source there's nothing
980 * to be transferred anyway.
981 */
982 AddressSpace *as = &src->gic->dma_as;
983 uint64_t idbits;
984 uint32_t pendt_size;
985 uint64_t src_baddr, dest_baddr;
986 int i;
987
988 if (!(src->gicr_ctlr & GICR_CTLR_ENABLE_LPIS) ||
989 !(dest->gicr_ctlr & GICR_CTLR_ENABLE_LPIS)) {
990 return;
991 }
992
993 idbits = MIN(FIELD_EX64(src->gicr_propbaser, GICR_PROPBASER, IDBITS),
994 GICD_TYPER_IDBITS);
995 idbits = MIN(FIELD_EX64(dest->gicr_propbaser, GICR_PROPBASER, IDBITS),
996 idbits);
997
998 pendt_size = 1ULL << (idbits + 1);
999 src_baddr = src->gicr_pendbaser & R_GICR_PENDBASER_PHYADDR_MASK;
1000 dest_baddr = dest->gicr_pendbaser & R_GICR_PENDBASER_PHYADDR_MASK;
1001
1002 for (i = GICV3_LPI_INTID_START / 8; i < pendt_size / 8; i++) {
1003 uint8_t src_pend, dest_pend;
1004
1005 address_space_read(as, src_baddr + i, MEMTXATTRS_UNSPECIFIED,
1006 &src_pend, sizeof(src_pend));
1007 if (!src_pend) {
1008 continue;
1009 }
1010 address_space_read(as, dest_baddr + i, MEMTXATTRS_UNSPECIFIED,
1011 &dest_pend, sizeof(dest_pend));
1012 dest_pend |= src_pend;
1013 src_pend = 0;
1014 address_space_write(as, src_baddr + i, MEMTXATTRS_UNSPECIFIED,
1015 &src_pend, sizeof(src_pend));
1016 address_space_write(as, dest_baddr + i, MEMTXATTRS_UNSPECIFIED,
1017 &dest_pend, sizeof(dest_pend));
1018 }
1019
1020 gicv3_redist_update_lpi(src);
1021 gicv3_redist_update_lpi(dest);
1022 }
1023
gicv3_redist_vlpi_pending(GICv3CPUState * cs,int irq,int level)1024 void gicv3_redist_vlpi_pending(GICv3CPUState *cs, int irq, int level)
1025 {
1026 /*
1027 * Change the pending state of the specified vLPI.
1028 * Unlike gicv3_redist_process_vlpi(), we know here that the
1029 * vCPU is definitely resident on this redistributor, and that
1030 * the irq is in range.
1031 */
1032 uint64_t vptbase, ctbase;
1033
1034 vptbase = FIELD_EX64(cs->gicr_vpendbaser, GICR_VPENDBASER, PHYADDR) << 16;
1035
1036 if (set_pending_table_bit(cs, vptbase, irq, level)) {
1037 if (level) {
1038 /* Check whether this vLPI is now the best */
1039 ctbase = cs->gicr_vpropbaser & R_GICR_VPROPBASER_PHYADDR_MASK;
1040 update_for_one_lpi(cs, irq, ctbase, true, &cs->hppvlpi);
1041 gicv3_cpuif_virt_irq_fiq_update(cs);
1042 } else {
1043 /* Only need to recalculate if this was previously the best vLPI */
1044 if (irq == cs->hppvlpi.irq) {
1045 gicv3_redist_update_vlpi(cs);
1046 }
1047 }
1048 }
1049 }
1050
gicv3_redist_process_vlpi(GICv3CPUState * cs,int irq,uint64_t vptaddr,int doorbell,int level)1051 void gicv3_redist_process_vlpi(GICv3CPUState *cs, int irq, uint64_t vptaddr,
1052 int doorbell, int level)
1053 {
1054 bool bit_changed;
1055 bool resident = vcpu_resident(cs, vptaddr);
1056 uint64_t ctbase;
1057
1058 if (resident) {
1059 uint32_t idbits = FIELD_EX64(cs->gicr_vpropbaser, GICR_VPROPBASER, IDBITS);
1060 if (irq >= (1ULL << (idbits + 1))) {
1061 return;
1062 }
1063 }
1064
1065 bit_changed = set_pending_table_bit(cs, vptaddr, irq, level);
1066 if (resident && bit_changed) {
1067 if (level) {
1068 /* Check whether this vLPI is now the best */
1069 ctbase = cs->gicr_vpropbaser & R_GICR_VPROPBASER_PHYADDR_MASK;
1070 update_for_one_lpi(cs, irq, ctbase, true, &cs->hppvlpi);
1071 gicv3_cpuif_virt_irq_fiq_update(cs);
1072 } else {
1073 /* Only need to recalculate if this was previously the best vLPI */
1074 if (irq == cs->hppvlpi.irq) {
1075 gicv3_redist_update_vlpi(cs);
1076 }
1077 }
1078 }
1079
1080 if (!resident && level && doorbell != INTID_SPURIOUS &&
1081 (cs->gicr_ctlr & GICR_CTLR_ENABLE_LPIS)) {
1082 /* vCPU is not currently resident: ring the doorbell */
1083 gicv3_redist_process_lpi(cs, doorbell, 1);
1084 }
1085 }
1086
gicv3_redist_mov_vlpi(GICv3CPUState * src,uint64_t src_vptaddr,GICv3CPUState * dest,uint64_t dest_vptaddr,int irq,int doorbell)1087 void gicv3_redist_mov_vlpi(GICv3CPUState *src, uint64_t src_vptaddr,
1088 GICv3CPUState *dest, uint64_t dest_vptaddr,
1089 int irq, int doorbell)
1090 {
1091 /*
1092 * Move the specified vLPI's pending state from the source redistributor
1093 * to the destination.
1094 */
1095 if (!set_pending_table_bit(src, src_vptaddr, irq, 0)) {
1096 /* Not pending on source, nothing to do */
1097 return;
1098 }
1099 if (vcpu_resident(src, src_vptaddr) && irq == src->hppvlpi.irq) {
1100 /*
1101 * Update src's cached highest-priority pending vLPI if we just made
1102 * it not-pending
1103 */
1104 gicv3_redist_update_vlpi(src);
1105 }
1106 /*
1107 * Mark the vLPI pending on the destination (ringing the doorbell
1108 * if the vCPU isn't resident)
1109 */
1110 gicv3_redist_process_vlpi(dest, irq, dest_vptaddr, doorbell, irq);
1111 }
1112
gicv3_redist_vinvall(GICv3CPUState * cs,uint64_t vptaddr)1113 void gicv3_redist_vinvall(GICv3CPUState *cs, uint64_t vptaddr)
1114 {
1115 if (!vcpu_resident(cs, vptaddr)) {
1116 /* We don't have anything cached if the vCPU isn't resident */
1117 return;
1118 }
1119
1120 /* Otherwise, our only cached information is the HPPVLPI info */
1121 gicv3_redist_update_vlpi(cs);
1122 }
1123
gicv3_redist_inv_vlpi(GICv3CPUState * cs,int irq,uint64_t vptaddr)1124 void gicv3_redist_inv_vlpi(GICv3CPUState *cs, int irq, uint64_t vptaddr)
1125 {
1126 /*
1127 * The only cached information for LPIs we have is the HPPLPI.
1128 * We could be cleverer about identifying when we don't need
1129 * to do a full rescan of the pending table, but until we find
1130 * this is a performance issue, just always recalculate.
1131 */
1132 gicv3_redist_vinvall(cs, vptaddr);
1133 }
1134
gicv3_redist_set_irq(GICv3CPUState * cs,int irq,int level)1135 void gicv3_redist_set_irq(GICv3CPUState *cs, int irq, int level)
1136 {
1137 /* Update redistributor state for a change in an external PPI input line */
1138 if (level == extract32(cs->level, irq, 1)) {
1139 return;
1140 }
1141
1142 trace_gicv3_redist_set_irq(gicv3_redist_affid(cs), irq, level);
1143
1144 cs->level = deposit32(cs->level, irq, 1, level);
1145
1146 if (level) {
1147 /* 0->1 edges latch the pending bit for edge-triggered interrupts */
1148 if (extract32(cs->edge_trigger, irq, 1)) {
1149 cs->gicr_ipendr0 = deposit32(cs->gicr_ipendr0, irq, 1, 1);
1150 }
1151 }
1152
1153 gicv3_redist_update(cs);
1154 }
1155
gicv3_redist_send_sgi(GICv3CPUState * cs,int grp,int irq,bool ns)1156 void gicv3_redist_send_sgi(GICv3CPUState *cs, int grp, int irq, bool ns)
1157 {
1158 /* Update redistributor state for a generated SGI */
1159 int irqgrp = gicv3_irq_group(cs->gic, cs, irq);
1160
1161 /* If we are asked for a Secure Group 1 SGI and it's actually
1162 * configured as Secure Group 0 this is OK (subject to the usual
1163 * NSACR checks).
1164 */
1165 if (grp == GICV3_G1 && irqgrp == GICV3_G0) {
1166 grp = GICV3_G0;
1167 }
1168
1169 if (grp != irqgrp) {
1170 return;
1171 }
1172
1173 if (ns && !(cs->gic->gicd_ctlr & GICD_CTLR_DS)) {
1174 /* If security is enabled we must test the NSACR bits */
1175 int nsaccess = gicr_ns_access(cs, irq);
1176
1177 if ((irqgrp == GICV3_G0 && nsaccess < 1) ||
1178 (irqgrp == GICV3_G1 && nsaccess < 2)) {
1179 return;
1180 }
1181 }
1182
1183 /* OK, we can accept the SGI */
1184 trace_gicv3_redist_send_sgi(gicv3_redist_affid(cs), irq);
1185 cs->gicr_ipendr0 = deposit32(cs->gicr_ipendr0, irq, 1, 1);
1186 gicv3_redist_update(cs);
1187 }
1188