xref: /openbmc/qemu/hw/arm/smmuv3.c (revision 19f4ed36)
1 /*
2  * Copyright (C) 2014-2016 Broadcom Corporation
3  * Copyright (c) 2017 Red Hat, Inc.
4  * Written by Prem Mallappa, Eric Auger
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License along
16  * with this program; if not, see <http://www.gnu.org/licenses/>.
17  */
18 
19 #include "qemu/osdep.h"
20 #include "qemu/bitops.h"
21 #include "hw/irq.h"
22 #include "hw/sysbus.h"
23 #include "migration/vmstate.h"
24 #include "hw/qdev-core.h"
25 #include "hw/pci/pci.h"
26 #include "exec/address-spaces.h"
27 #include "cpu.h"
28 #include "trace.h"
29 #include "qemu/log.h"
30 #include "qemu/error-report.h"
31 #include "qapi/error.h"
32 
33 #include "hw/arm/smmuv3.h"
34 #include "smmuv3-internal.h"
35 #include "smmu-internal.h"
36 
37 /**
38  * smmuv3_trigger_irq - pulse @irq if enabled and update
39  * GERROR register in case of GERROR interrupt
40  *
41  * @irq: irq type
42  * @gerror_mask: mask of gerrors to toggle (relevant if @irq is GERROR)
43  */
44 static void smmuv3_trigger_irq(SMMUv3State *s, SMMUIrq irq,
45                                uint32_t gerror_mask)
46 {
47 
48     bool pulse = false;
49 
50     switch (irq) {
51     case SMMU_IRQ_EVTQ:
52         pulse = smmuv3_eventq_irq_enabled(s);
53         break;
54     case SMMU_IRQ_PRIQ:
55         qemu_log_mask(LOG_UNIMP, "PRI not yet supported\n");
56         break;
57     case SMMU_IRQ_CMD_SYNC:
58         pulse = true;
59         break;
60     case SMMU_IRQ_GERROR:
61     {
62         uint32_t pending = s->gerror ^ s->gerrorn;
63         uint32_t new_gerrors = ~pending & gerror_mask;
64 
65         if (!new_gerrors) {
66             /* only toggle non pending errors */
67             return;
68         }
69         s->gerror ^= new_gerrors;
70         trace_smmuv3_write_gerror(new_gerrors, s->gerror);
71 
72         pulse = smmuv3_gerror_irq_enabled(s);
73         break;
74     }
75     }
76     if (pulse) {
77             trace_smmuv3_trigger_irq(irq);
78             qemu_irq_pulse(s->irq[irq]);
79     }
80 }
81 
82 static void smmuv3_write_gerrorn(SMMUv3State *s, uint32_t new_gerrorn)
83 {
84     uint32_t pending = s->gerror ^ s->gerrorn;
85     uint32_t toggled = s->gerrorn ^ new_gerrorn;
86 
87     if (toggled & ~pending) {
88         qemu_log_mask(LOG_GUEST_ERROR,
89                       "guest toggles non pending errors = 0x%x\n",
90                       toggled & ~pending);
91     }
92 
93     /*
94      * We do not raise any error in case guest toggles bits corresponding
95      * to not active IRQs (CONSTRAINED UNPREDICTABLE)
96      */
97     s->gerrorn = new_gerrorn;
98 
99     trace_smmuv3_write_gerrorn(toggled & pending, s->gerrorn);
100 }
101 
102 static inline MemTxResult queue_read(SMMUQueue *q, void *data)
103 {
104     dma_addr_t addr = Q_CONS_ENTRY(q);
105 
106     return dma_memory_read(&address_space_memory, addr, data, q->entry_size);
107 }
108 
109 static MemTxResult queue_write(SMMUQueue *q, void *data)
110 {
111     dma_addr_t addr = Q_PROD_ENTRY(q);
112     MemTxResult ret;
113 
114     ret = dma_memory_write(&address_space_memory, addr, data, q->entry_size);
115     if (ret != MEMTX_OK) {
116         return ret;
117     }
118 
119     queue_prod_incr(q);
120     return MEMTX_OK;
121 }
122 
123 static MemTxResult smmuv3_write_eventq(SMMUv3State *s, Evt *evt)
124 {
125     SMMUQueue *q = &s->eventq;
126     MemTxResult r;
127 
128     if (!smmuv3_eventq_enabled(s)) {
129         return MEMTX_ERROR;
130     }
131 
132     if (smmuv3_q_full(q)) {
133         return MEMTX_ERROR;
134     }
135 
136     r = queue_write(q, evt);
137     if (r != MEMTX_OK) {
138         return r;
139     }
140 
141     if (!smmuv3_q_empty(q)) {
142         smmuv3_trigger_irq(s, SMMU_IRQ_EVTQ, 0);
143     }
144     return MEMTX_OK;
145 }
146 
147 void smmuv3_record_event(SMMUv3State *s, SMMUEventInfo *info)
148 {
149     Evt evt = {};
150     MemTxResult r;
151 
152     if (!smmuv3_eventq_enabled(s)) {
153         return;
154     }
155 
156     EVT_SET_TYPE(&evt, info->type);
157     EVT_SET_SID(&evt, info->sid);
158 
159     switch (info->type) {
160     case SMMU_EVT_NONE:
161         return;
162     case SMMU_EVT_F_UUT:
163         EVT_SET_SSID(&evt, info->u.f_uut.ssid);
164         EVT_SET_SSV(&evt,  info->u.f_uut.ssv);
165         EVT_SET_ADDR(&evt, info->u.f_uut.addr);
166         EVT_SET_RNW(&evt,  info->u.f_uut.rnw);
167         EVT_SET_PNU(&evt,  info->u.f_uut.pnu);
168         EVT_SET_IND(&evt,  info->u.f_uut.ind);
169         break;
170     case SMMU_EVT_C_BAD_STREAMID:
171         EVT_SET_SSID(&evt, info->u.c_bad_streamid.ssid);
172         EVT_SET_SSV(&evt,  info->u.c_bad_streamid.ssv);
173         break;
174     case SMMU_EVT_F_STE_FETCH:
175         EVT_SET_SSID(&evt, info->u.f_ste_fetch.ssid);
176         EVT_SET_SSV(&evt,  info->u.f_ste_fetch.ssv);
177         EVT_SET_ADDR2(&evt, info->u.f_ste_fetch.addr);
178         break;
179     case SMMU_EVT_C_BAD_STE:
180         EVT_SET_SSID(&evt, info->u.c_bad_ste.ssid);
181         EVT_SET_SSV(&evt,  info->u.c_bad_ste.ssv);
182         break;
183     case SMMU_EVT_F_STREAM_DISABLED:
184         break;
185     case SMMU_EVT_F_TRANS_FORBIDDEN:
186         EVT_SET_ADDR(&evt, info->u.f_transl_forbidden.addr);
187         EVT_SET_RNW(&evt, info->u.f_transl_forbidden.rnw);
188         break;
189     case SMMU_EVT_C_BAD_SUBSTREAMID:
190         EVT_SET_SSID(&evt, info->u.c_bad_substream.ssid);
191         break;
192     case SMMU_EVT_F_CD_FETCH:
193         EVT_SET_SSID(&evt, info->u.f_cd_fetch.ssid);
194         EVT_SET_SSV(&evt,  info->u.f_cd_fetch.ssv);
195         EVT_SET_ADDR(&evt, info->u.f_cd_fetch.addr);
196         break;
197     case SMMU_EVT_C_BAD_CD:
198         EVT_SET_SSID(&evt, info->u.c_bad_cd.ssid);
199         EVT_SET_SSV(&evt,  info->u.c_bad_cd.ssv);
200         break;
201     case SMMU_EVT_F_WALK_EABT:
202     case SMMU_EVT_F_TRANSLATION:
203     case SMMU_EVT_F_ADDR_SIZE:
204     case SMMU_EVT_F_ACCESS:
205     case SMMU_EVT_F_PERMISSION:
206         EVT_SET_STALL(&evt, info->u.f_walk_eabt.stall);
207         EVT_SET_STAG(&evt, info->u.f_walk_eabt.stag);
208         EVT_SET_SSID(&evt, info->u.f_walk_eabt.ssid);
209         EVT_SET_SSV(&evt, info->u.f_walk_eabt.ssv);
210         EVT_SET_S2(&evt, info->u.f_walk_eabt.s2);
211         EVT_SET_ADDR(&evt, info->u.f_walk_eabt.addr);
212         EVT_SET_RNW(&evt, info->u.f_walk_eabt.rnw);
213         EVT_SET_PNU(&evt, info->u.f_walk_eabt.pnu);
214         EVT_SET_IND(&evt, info->u.f_walk_eabt.ind);
215         EVT_SET_CLASS(&evt, info->u.f_walk_eabt.class);
216         EVT_SET_ADDR2(&evt, info->u.f_walk_eabt.addr2);
217         break;
218     case SMMU_EVT_F_CFG_CONFLICT:
219         EVT_SET_SSID(&evt, info->u.f_cfg_conflict.ssid);
220         EVT_SET_SSV(&evt,  info->u.f_cfg_conflict.ssv);
221         break;
222     /* rest is not implemented */
223     case SMMU_EVT_F_BAD_ATS_TREQ:
224     case SMMU_EVT_F_TLB_CONFLICT:
225     case SMMU_EVT_E_PAGE_REQ:
226     default:
227         g_assert_not_reached();
228     }
229 
230     trace_smmuv3_record_event(smmu_event_string(info->type), info->sid);
231     r = smmuv3_write_eventq(s, &evt);
232     if (r != MEMTX_OK) {
233         smmuv3_trigger_irq(s, SMMU_IRQ_GERROR, R_GERROR_EVENTQ_ABT_ERR_MASK);
234     }
235     info->recorded = true;
236 }
237 
238 static void smmuv3_init_regs(SMMUv3State *s)
239 {
240     /**
241      * IDR0: stage1 only, AArch64 only, coherent access, 16b ASID,
242      *       multi-level stream table
243      */
244     s->idr[0] = FIELD_DP32(s->idr[0], IDR0, S1P, 1); /* stage 1 supported */
245     s->idr[0] = FIELD_DP32(s->idr[0], IDR0, TTF, 2); /* AArch64 PTW only */
246     s->idr[0] = FIELD_DP32(s->idr[0], IDR0, COHACC, 1); /* IO coherent */
247     s->idr[0] = FIELD_DP32(s->idr[0], IDR0, ASID16, 1); /* 16-bit ASID */
248     s->idr[0] = FIELD_DP32(s->idr[0], IDR0, TTENDIAN, 2); /* little endian */
249     s->idr[0] = FIELD_DP32(s->idr[0], IDR0, STALL_MODEL, 1); /* No stall */
250     /* terminated transaction will always be aborted/error returned */
251     s->idr[0] = FIELD_DP32(s->idr[0], IDR0, TERM_MODEL, 1);
252     /* 2-level stream table supported */
253     s->idr[0] = FIELD_DP32(s->idr[0], IDR0, STLEVEL, 1);
254 
255     s->idr[1] = FIELD_DP32(s->idr[1], IDR1, SIDSIZE, SMMU_IDR1_SIDSIZE);
256     s->idr[1] = FIELD_DP32(s->idr[1], IDR1, EVENTQS, SMMU_EVENTQS);
257     s->idr[1] = FIELD_DP32(s->idr[1], IDR1, CMDQS,   SMMU_CMDQS);
258 
259     s->idr[3] = FIELD_DP32(s->idr[3], IDR3, RIL, 1);
260     s->idr[3] = FIELD_DP32(s->idr[3], IDR3, HAD, 1);
261 
262     /* 4K, 16K and 64K granule support */
263     s->idr[5] = FIELD_DP32(s->idr[5], IDR5, GRAN4K, 1);
264     s->idr[5] = FIELD_DP32(s->idr[5], IDR5, GRAN16K, 1);
265     s->idr[5] = FIELD_DP32(s->idr[5], IDR5, GRAN64K, 1);
266     s->idr[5] = FIELD_DP32(s->idr[5], IDR5, OAS, SMMU_IDR5_OAS); /* 44 bits */
267 
268     s->cmdq.base = deposit64(s->cmdq.base, 0, 5, SMMU_CMDQS);
269     s->cmdq.prod = 0;
270     s->cmdq.cons = 0;
271     s->cmdq.entry_size = sizeof(struct Cmd);
272     s->eventq.base = deposit64(s->eventq.base, 0, 5, SMMU_EVENTQS);
273     s->eventq.prod = 0;
274     s->eventq.cons = 0;
275     s->eventq.entry_size = sizeof(struct Evt);
276 
277     s->features = 0;
278     s->sid_split = 0;
279     s->aidr = 0x1;
280 }
281 
282 static int smmu_get_ste(SMMUv3State *s, dma_addr_t addr, STE *buf,
283                         SMMUEventInfo *event)
284 {
285     int ret;
286 
287     trace_smmuv3_get_ste(addr);
288     /* TODO: guarantee 64-bit single-copy atomicity */
289     ret = dma_memory_read(&address_space_memory, addr, buf, sizeof(*buf));
290     if (ret != MEMTX_OK) {
291         qemu_log_mask(LOG_GUEST_ERROR,
292                       "Cannot fetch pte at address=0x%"PRIx64"\n", addr);
293         event->type = SMMU_EVT_F_STE_FETCH;
294         event->u.f_ste_fetch.addr = addr;
295         return -EINVAL;
296     }
297     return 0;
298 
299 }
300 
301 /* @ssid > 0 not supported yet */
302 static int smmu_get_cd(SMMUv3State *s, STE *ste, uint32_t ssid,
303                        CD *buf, SMMUEventInfo *event)
304 {
305     dma_addr_t addr = STE_CTXPTR(ste);
306     int ret;
307 
308     trace_smmuv3_get_cd(addr);
309     /* TODO: guarantee 64-bit single-copy atomicity */
310     ret = dma_memory_read(&address_space_memory, addr, buf, sizeof(*buf));
311     if (ret != MEMTX_OK) {
312         qemu_log_mask(LOG_GUEST_ERROR,
313                       "Cannot fetch pte at address=0x%"PRIx64"\n", addr);
314         event->type = SMMU_EVT_F_CD_FETCH;
315         event->u.f_ste_fetch.addr = addr;
316         return -EINVAL;
317     }
318     return 0;
319 }
320 
321 /* Returns < 0 in case of invalid STE, 0 otherwise */
322 static int decode_ste(SMMUv3State *s, SMMUTransCfg *cfg,
323                       STE *ste, SMMUEventInfo *event)
324 {
325     uint32_t config;
326 
327     if (!STE_VALID(ste)) {
328         if (!event->inval_ste_allowed) {
329             qemu_log_mask(LOG_GUEST_ERROR, "invalid STE\n");
330         }
331         goto bad_ste;
332     }
333 
334     config = STE_CONFIG(ste);
335 
336     if (STE_CFG_ABORT(config)) {
337         cfg->aborted = true;
338         return 0;
339     }
340 
341     if (STE_CFG_BYPASS(config)) {
342         cfg->bypassed = true;
343         return 0;
344     }
345 
346     if (STE_CFG_S2_ENABLED(config)) {
347         qemu_log_mask(LOG_UNIMP, "SMMUv3 does not support stage 2 yet\n");
348         goto bad_ste;
349     }
350 
351     if (STE_S1CDMAX(ste) != 0) {
352         qemu_log_mask(LOG_UNIMP,
353                       "SMMUv3 does not support multiple context descriptors yet\n");
354         goto bad_ste;
355     }
356 
357     if (STE_S1STALLD(ste)) {
358         qemu_log_mask(LOG_UNIMP,
359                       "SMMUv3 S1 stalling fault model not allowed yet\n");
360         goto bad_ste;
361     }
362     return 0;
363 
364 bad_ste:
365     event->type = SMMU_EVT_C_BAD_STE;
366     return -EINVAL;
367 }
368 
369 /**
370  * smmu_find_ste - Return the stream table entry associated
371  * to the sid
372  *
373  * @s: smmuv3 handle
374  * @sid: stream ID
375  * @ste: returned stream table entry
376  * @event: handle to an event info
377  *
378  * Supports linear and 2-level stream table
379  * Return 0 on success, -EINVAL otherwise
380  */
381 static int smmu_find_ste(SMMUv3State *s, uint32_t sid, STE *ste,
382                          SMMUEventInfo *event)
383 {
384     dma_addr_t addr, strtab_base;
385     uint32_t log2size;
386     int strtab_size_shift;
387     int ret;
388 
389     trace_smmuv3_find_ste(sid, s->features, s->sid_split);
390     log2size = FIELD_EX32(s->strtab_base_cfg, STRTAB_BASE_CFG, LOG2SIZE);
391     /*
392      * Check SID range against both guest-configured and implementation limits
393      */
394     if (sid >= (1 << MIN(log2size, SMMU_IDR1_SIDSIZE))) {
395         event->type = SMMU_EVT_C_BAD_STREAMID;
396         return -EINVAL;
397     }
398     if (s->features & SMMU_FEATURE_2LVL_STE) {
399         int l1_ste_offset, l2_ste_offset, max_l2_ste, span;
400         dma_addr_t l1ptr, l2ptr;
401         STEDesc l1std;
402 
403         /*
404          * Align strtab base address to table size. For this purpose, assume it
405          * is not bounded by SMMU_IDR1_SIDSIZE.
406          */
407         strtab_size_shift = MAX(5, (int)log2size - s->sid_split - 1 + 3);
408         strtab_base = s->strtab_base & SMMU_BASE_ADDR_MASK &
409                       ~MAKE_64BIT_MASK(0, strtab_size_shift);
410         l1_ste_offset = sid >> s->sid_split;
411         l2_ste_offset = sid & ((1 << s->sid_split) - 1);
412         l1ptr = (dma_addr_t)(strtab_base + l1_ste_offset * sizeof(l1std));
413         /* TODO: guarantee 64-bit single-copy atomicity */
414         ret = dma_memory_read(&address_space_memory, l1ptr, &l1std,
415                               sizeof(l1std));
416         if (ret != MEMTX_OK) {
417             qemu_log_mask(LOG_GUEST_ERROR,
418                           "Could not read L1PTR at 0X%"PRIx64"\n", l1ptr);
419             event->type = SMMU_EVT_F_STE_FETCH;
420             event->u.f_ste_fetch.addr = l1ptr;
421             return -EINVAL;
422         }
423 
424         span = L1STD_SPAN(&l1std);
425 
426         if (!span) {
427             /* l2ptr is not valid */
428             if (!event->inval_ste_allowed) {
429                 qemu_log_mask(LOG_GUEST_ERROR,
430                               "invalid sid=%d (L1STD span=0)\n", sid);
431             }
432             event->type = SMMU_EVT_C_BAD_STREAMID;
433             return -EINVAL;
434         }
435         max_l2_ste = (1 << span) - 1;
436         l2ptr = l1std_l2ptr(&l1std);
437         trace_smmuv3_find_ste_2lvl(s->strtab_base, l1ptr, l1_ste_offset,
438                                    l2ptr, l2_ste_offset, max_l2_ste);
439         if (l2_ste_offset > max_l2_ste) {
440             qemu_log_mask(LOG_GUEST_ERROR,
441                           "l2_ste_offset=%d > max_l2_ste=%d\n",
442                           l2_ste_offset, max_l2_ste);
443             event->type = SMMU_EVT_C_BAD_STE;
444             return -EINVAL;
445         }
446         addr = l2ptr + l2_ste_offset * sizeof(*ste);
447     } else {
448         strtab_size_shift = log2size + 5;
449         strtab_base = s->strtab_base & SMMU_BASE_ADDR_MASK &
450                       ~MAKE_64BIT_MASK(0, strtab_size_shift);
451         addr = strtab_base + sid * sizeof(*ste);
452     }
453 
454     if (smmu_get_ste(s, addr, ste, event)) {
455         return -EINVAL;
456     }
457 
458     return 0;
459 }
460 
461 static int decode_cd(SMMUTransCfg *cfg, CD *cd, SMMUEventInfo *event)
462 {
463     int ret = -EINVAL;
464     int i;
465 
466     if (!CD_VALID(cd) || !CD_AARCH64(cd)) {
467         goto bad_cd;
468     }
469     if (!CD_A(cd)) {
470         goto bad_cd; /* SMMU_IDR0.TERM_MODEL == 1 */
471     }
472     if (CD_S(cd)) {
473         goto bad_cd; /* !STE_SECURE && SMMU_IDR0.STALL_MODEL == 1 */
474     }
475     if (CD_HA(cd) || CD_HD(cd)) {
476         goto bad_cd; /* HTTU = 0 */
477     }
478 
479     /* we support only those at the moment */
480     cfg->aa64 = true;
481     cfg->stage = 1;
482 
483     cfg->oas = oas2bits(CD_IPS(cd));
484     cfg->oas = MIN(oas2bits(SMMU_IDR5_OAS), cfg->oas);
485     cfg->tbi = CD_TBI(cd);
486     cfg->asid = CD_ASID(cd);
487 
488     trace_smmuv3_decode_cd(cfg->oas);
489 
490     /* decode data dependent on TT */
491     for (i = 0; i <= 1; i++) {
492         int tg, tsz;
493         SMMUTransTableInfo *tt = &cfg->tt[i];
494 
495         cfg->tt[i].disabled = CD_EPD(cd, i);
496         if (cfg->tt[i].disabled) {
497             continue;
498         }
499 
500         tsz = CD_TSZ(cd, i);
501         if (tsz < 16 || tsz > 39) {
502             goto bad_cd;
503         }
504 
505         tg = CD_TG(cd, i);
506         tt->granule_sz = tg2granule(tg, i);
507         if ((tt->granule_sz != 12 && tt->granule_sz != 14 &&
508              tt->granule_sz != 16) || CD_ENDI(cd)) {
509             goto bad_cd;
510         }
511 
512         tt->tsz = tsz;
513         tt->ttb = CD_TTB(cd, i);
514         if (tt->ttb & ~(MAKE_64BIT_MASK(0, cfg->oas))) {
515             goto bad_cd;
516         }
517         tt->had = CD_HAD(cd, i);
518         trace_smmuv3_decode_cd_tt(i, tt->tsz, tt->ttb, tt->granule_sz, tt->had);
519     }
520 
521     event->record_trans_faults = CD_R(cd);
522 
523     return 0;
524 
525 bad_cd:
526     event->type = SMMU_EVT_C_BAD_CD;
527     return ret;
528 }
529 
530 /**
531  * smmuv3_decode_config - Prepare the translation configuration
532  * for the @mr iommu region
533  * @mr: iommu memory region the translation config must be prepared for
534  * @cfg: output translation configuration which is populated through
535  *       the different configuration decoding steps
536  * @event: must be zero'ed by the caller
537  *
538  * return < 0 in case of config decoding error (@event is filled
539  * accordingly). Return 0 otherwise.
540  */
541 static int smmuv3_decode_config(IOMMUMemoryRegion *mr, SMMUTransCfg *cfg,
542                                 SMMUEventInfo *event)
543 {
544     SMMUDevice *sdev = container_of(mr, SMMUDevice, iommu);
545     uint32_t sid = smmu_get_sid(sdev);
546     SMMUv3State *s = sdev->smmu;
547     int ret;
548     STE ste;
549     CD cd;
550 
551     ret = smmu_find_ste(s, sid, &ste, event);
552     if (ret) {
553         return ret;
554     }
555 
556     ret = decode_ste(s, cfg, &ste, event);
557     if (ret) {
558         return ret;
559     }
560 
561     if (cfg->aborted || cfg->bypassed) {
562         return 0;
563     }
564 
565     ret = smmu_get_cd(s, &ste, 0 /* ssid */, &cd, event);
566     if (ret) {
567         return ret;
568     }
569 
570     return decode_cd(cfg, &cd, event);
571 }
572 
573 /**
574  * smmuv3_get_config - Look up for a cached copy of configuration data for
575  * @sdev and on cache miss performs a configuration structure decoding from
576  * guest RAM.
577  *
578  * @sdev: SMMUDevice handle
579  * @event: output event info
580  *
581  * The configuration cache contains data resulting from both STE and CD
582  * decoding under the form of an SMMUTransCfg struct. The hash table is indexed
583  * by the SMMUDevice handle.
584  */
585 static SMMUTransCfg *smmuv3_get_config(SMMUDevice *sdev, SMMUEventInfo *event)
586 {
587     SMMUv3State *s = sdev->smmu;
588     SMMUState *bc = &s->smmu_state;
589     SMMUTransCfg *cfg;
590 
591     cfg = g_hash_table_lookup(bc->configs, sdev);
592     if (cfg) {
593         sdev->cfg_cache_hits++;
594         trace_smmuv3_config_cache_hit(smmu_get_sid(sdev),
595                             sdev->cfg_cache_hits, sdev->cfg_cache_misses,
596                             100 * sdev->cfg_cache_hits /
597                             (sdev->cfg_cache_hits + sdev->cfg_cache_misses));
598     } else {
599         sdev->cfg_cache_misses++;
600         trace_smmuv3_config_cache_miss(smmu_get_sid(sdev),
601                             sdev->cfg_cache_hits, sdev->cfg_cache_misses,
602                             100 * sdev->cfg_cache_hits /
603                             (sdev->cfg_cache_hits + sdev->cfg_cache_misses));
604         cfg = g_new0(SMMUTransCfg, 1);
605 
606         if (!smmuv3_decode_config(&sdev->iommu, cfg, event)) {
607             g_hash_table_insert(bc->configs, sdev, cfg);
608         } else {
609             g_free(cfg);
610             cfg = NULL;
611         }
612     }
613     return cfg;
614 }
615 
616 static void smmuv3_flush_config(SMMUDevice *sdev)
617 {
618     SMMUv3State *s = sdev->smmu;
619     SMMUState *bc = &s->smmu_state;
620 
621     trace_smmuv3_config_cache_inv(smmu_get_sid(sdev));
622     g_hash_table_remove(bc->configs, sdev);
623 }
624 
625 static IOMMUTLBEntry smmuv3_translate(IOMMUMemoryRegion *mr, hwaddr addr,
626                                       IOMMUAccessFlags flag, int iommu_idx)
627 {
628     SMMUDevice *sdev = container_of(mr, SMMUDevice, iommu);
629     SMMUv3State *s = sdev->smmu;
630     uint32_t sid = smmu_get_sid(sdev);
631     SMMUEventInfo event = {.type = SMMU_EVT_NONE,
632                            .sid = sid,
633                            .inval_ste_allowed = false};
634     SMMUPTWEventInfo ptw_info = {};
635     SMMUTranslationStatus status;
636     SMMUState *bs = ARM_SMMU(s);
637     uint64_t page_mask, aligned_addr;
638     SMMUTLBEntry *cached_entry = NULL;
639     SMMUTransTableInfo *tt;
640     SMMUTransCfg *cfg = NULL;
641     IOMMUTLBEntry entry = {
642         .target_as = &address_space_memory,
643         .iova = addr,
644         .translated_addr = addr,
645         .addr_mask = ~(hwaddr)0,
646         .perm = IOMMU_NONE,
647     };
648 
649     qemu_mutex_lock(&s->mutex);
650 
651     if (!smmu_enabled(s)) {
652         status = SMMU_TRANS_DISABLE;
653         goto epilogue;
654     }
655 
656     cfg = smmuv3_get_config(sdev, &event);
657     if (!cfg) {
658         status = SMMU_TRANS_ERROR;
659         goto epilogue;
660     }
661 
662     if (cfg->aborted) {
663         status = SMMU_TRANS_ABORT;
664         goto epilogue;
665     }
666 
667     if (cfg->bypassed) {
668         status = SMMU_TRANS_BYPASS;
669         goto epilogue;
670     }
671 
672     tt = select_tt(cfg, addr);
673     if (!tt) {
674         if (event.record_trans_faults) {
675             event.type = SMMU_EVT_F_TRANSLATION;
676             event.u.f_translation.addr = addr;
677             event.u.f_translation.rnw = flag & 0x1;
678         }
679         status = SMMU_TRANS_ERROR;
680         goto epilogue;
681     }
682 
683     page_mask = (1ULL << (tt->granule_sz)) - 1;
684     aligned_addr = addr & ~page_mask;
685 
686     cached_entry = smmu_iotlb_lookup(bs, cfg, tt, aligned_addr);
687     if (cached_entry) {
688         if ((flag & IOMMU_WO) && !(cached_entry->entry.perm & IOMMU_WO)) {
689             status = SMMU_TRANS_ERROR;
690             if (event.record_trans_faults) {
691                 event.type = SMMU_EVT_F_PERMISSION;
692                 event.u.f_permission.addr = addr;
693                 event.u.f_permission.rnw = flag & 0x1;
694             }
695         } else {
696             status = SMMU_TRANS_SUCCESS;
697         }
698         goto epilogue;
699     }
700 
701     cached_entry = g_new0(SMMUTLBEntry, 1);
702 
703     if (smmu_ptw(cfg, aligned_addr, flag, cached_entry, &ptw_info)) {
704         g_free(cached_entry);
705         switch (ptw_info.type) {
706         case SMMU_PTW_ERR_WALK_EABT:
707             event.type = SMMU_EVT_F_WALK_EABT;
708             event.u.f_walk_eabt.addr = addr;
709             event.u.f_walk_eabt.rnw = flag & 0x1;
710             event.u.f_walk_eabt.class = 0x1;
711             event.u.f_walk_eabt.addr2 = ptw_info.addr;
712             break;
713         case SMMU_PTW_ERR_TRANSLATION:
714             if (event.record_trans_faults) {
715                 event.type = SMMU_EVT_F_TRANSLATION;
716                 event.u.f_translation.addr = addr;
717                 event.u.f_translation.rnw = flag & 0x1;
718             }
719             break;
720         case SMMU_PTW_ERR_ADDR_SIZE:
721             if (event.record_trans_faults) {
722                 event.type = SMMU_EVT_F_ADDR_SIZE;
723                 event.u.f_addr_size.addr = addr;
724                 event.u.f_addr_size.rnw = flag & 0x1;
725             }
726             break;
727         case SMMU_PTW_ERR_ACCESS:
728             if (event.record_trans_faults) {
729                 event.type = SMMU_EVT_F_ACCESS;
730                 event.u.f_access.addr = addr;
731                 event.u.f_access.rnw = flag & 0x1;
732             }
733             break;
734         case SMMU_PTW_ERR_PERMISSION:
735             if (event.record_trans_faults) {
736                 event.type = SMMU_EVT_F_PERMISSION;
737                 event.u.f_permission.addr = addr;
738                 event.u.f_permission.rnw = flag & 0x1;
739             }
740             break;
741         default:
742             g_assert_not_reached();
743         }
744         status = SMMU_TRANS_ERROR;
745     } else {
746         smmu_iotlb_insert(bs, cfg, cached_entry);
747         status = SMMU_TRANS_SUCCESS;
748     }
749 
750 epilogue:
751     qemu_mutex_unlock(&s->mutex);
752     switch (status) {
753     case SMMU_TRANS_SUCCESS:
754         entry.perm = flag;
755         entry.translated_addr = cached_entry->entry.translated_addr +
756                                     (addr & cached_entry->entry.addr_mask);
757         entry.addr_mask = cached_entry->entry.addr_mask;
758         trace_smmuv3_translate_success(mr->parent_obj.name, sid, addr,
759                                        entry.translated_addr, entry.perm);
760         break;
761     case SMMU_TRANS_DISABLE:
762         entry.perm = flag;
763         entry.addr_mask = ~TARGET_PAGE_MASK;
764         trace_smmuv3_translate_disable(mr->parent_obj.name, sid, addr,
765                                       entry.perm);
766         break;
767     case SMMU_TRANS_BYPASS:
768         entry.perm = flag;
769         entry.addr_mask = ~TARGET_PAGE_MASK;
770         trace_smmuv3_translate_bypass(mr->parent_obj.name, sid, addr,
771                                       entry.perm);
772         break;
773     case SMMU_TRANS_ABORT:
774         /* no event is recorded on abort */
775         trace_smmuv3_translate_abort(mr->parent_obj.name, sid, addr,
776                                      entry.perm);
777         break;
778     case SMMU_TRANS_ERROR:
779         qemu_log_mask(LOG_GUEST_ERROR,
780                       "%s translation failed for iova=0x%"PRIx64"(%s)\n",
781                       mr->parent_obj.name, addr, smmu_event_string(event.type));
782         smmuv3_record_event(s, &event);
783         break;
784     }
785 
786     return entry;
787 }
788 
789 /**
790  * smmuv3_notify_iova - call the notifier @n for a given
791  * @asid and @iova tuple.
792  *
793  * @mr: IOMMU mr region handle
794  * @n: notifier to be called
795  * @asid: address space ID or negative value if we don't care
796  * @iova: iova
797  * @tg: translation granule (if communicated through range invalidation)
798  * @num_pages: number of @granule sized pages (if tg != 0), otherwise 1
799  */
800 static void smmuv3_notify_iova(IOMMUMemoryRegion *mr,
801                                IOMMUNotifier *n,
802                                int asid, dma_addr_t iova,
803                                uint8_t tg, uint64_t num_pages)
804 {
805     SMMUDevice *sdev = container_of(mr, SMMUDevice, iommu);
806     IOMMUTLBEvent event;
807     uint8_t granule;
808 
809     if (!tg) {
810         SMMUEventInfo event = {.inval_ste_allowed = true};
811         SMMUTransCfg *cfg = smmuv3_get_config(sdev, &event);
812         SMMUTransTableInfo *tt;
813 
814         if (!cfg) {
815             return;
816         }
817 
818         if (asid >= 0 && cfg->asid != asid) {
819             return;
820         }
821 
822         tt = select_tt(cfg, iova);
823         if (!tt) {
824             return;
825         }
826         granule = tt->granule_sz;
827     } else {
828         granule = tg * 2 + 10;
829     }
830 
831     event.type = IOMMU_NOTIFIER_UNMAP;
832     event.entry.target_as = &address_space_memory;
833     event.entry.iova = iova;
834     event.entry.addr_mask = num_pages * (1 << granule) - 1;
835     event.entry.perm = IOMMU_NONE;
836 
837     memory_region_notify_iommu_one(n, &event);
838 }
839 
840 /* invalidate an asid/iova range tuple in all mr's */
841 static void smmuv3_inv_notifiers_iova(SMMUState *s, int asid, dma_addr_t iova,
842                                       uint8_t tg, uint64_t num_pages)
843 {
844     SMMUDevice *sdev;
845 
846     QLIST_FOREACH(sdev, &s->devices_with_notifiers, next) {
847         IOMMUMemoryRegion *mr = &sdev->iommu;
848         IOMMUNotifier *n;
849 
850         trace_smmuv3_inv_notifiers_iova(mr->parent_obj.name, asid, iova,
851                                         tg, num_pages);
852 
853         IOMMU_NOTIFIER_FOREACH(n, mr) {
854             smmuv3_notify_iova(mr, n, asid, iova, tg, num_pages);
855         }
856     }
857 }
858 
859 static void smmuv3_s1_range_inval(SMMUState *s, Cmd *cmd)
860 {
861     uint8_t scale = 0, num = 0, ttl = 0;
862     dma_addr_t addr = CMD_ADDR(cmd);
863     uint8_t type = CMD_TYPE(cmd);
864     uint16_t vmid = CMD_VMID(cmd);
865     bool leaf = CMD_LEAF(cmd);
866     uint8_t tg = CMD_TG(cmd);
867     uint64_t first_page = 0, last_page;
868     uint64_t num_pages = 1;
869     int asid = -1;
870 
871     if (tg) {
872         scale = CMD_SCALE(cmd);
873         num = CMD_NUM(cmd);
874         ttl = CMD_TTL(cmd);
875         num_pages = (num + 1) * BIT_ULL(scale);
876     }
877 
878     if (type == SMMU_CMD_TLBI_NH_VA) {
879         asid = CMD_ASID(cmd);
880     }
881 
882     /* Split invalidations into ^2 range invalidations */
883     last_page = num_pages - 1;
884     while (num_pages) {
885         uint8_t granule = tg * 2 + 10;
886         uint64_t mask, count;
887 
888         mask = dma_aligned_pow2_mask(first_page, last_page, 64 - granule);
889         count = mask + 1;
890 
891         trace_smmuv3_s1_range_inval(vmid, asid, addr, tg, count, ttl, leaf);
892         smmuv3_inv_notifiers_iova(s, asid, addr, tg, count);
893         smmu_iotlb_inv_iova(s, asid, addr, tg, count, ttl);
894 
895         num_pages -= count;
896         first_page += count;
897         addr += count * BIT_ULL(granule);
898     }
899 }
900 
901 static gboolean
902 smmuv3_invalidate_ste(gpointer key, gpointer value, gpointer user_data)
903 {
904     SMMUDevice *sdev = (SMMUDevice *)key;
905     uint32_t sid = smmu_get_sid(sdev);
906     SMMUSIDRange *sid_range = (SMMUSIDRange *)user_data;
907 
908     if (sid < sid_range->start || sid > sid_range->end) {
909         return false;
910     }
911     trace_smmuv3_config_cache_inv(sid);
912     return true;
913 }
914 
915 static int smmuv3_cmdq_consume(SMMUv3State *s)
916 {
917     SMMUState *bs = ARM_SMMU(s);
918     SMMUCmdError cmd_error = SMMU_CERROR_NONE;
919     SMMUQueue *q = &s->cmdq;
920     SMMUCommandType type = 0;
921 
922     if (!smmuv3_cmdq_enabled(s)) {
923         return 0;
924     }
925     /*
926      * some commands depend on register values, typically CR0. In case those
927      * register values change while handling the command, spec says it
928      * is UNPREDICTABLE whether the command is interpreted under the new
929      * or old value.
930      */
931 
932     while (!smmuv3_q_empty(q)) {
933         uint32_t pending = s->gerror ^ s->gerrorn;
934         Cmd cmd;
935 
936         trace_smmuv3_cmdq_consume(Q_PROD(q), Q_CONS(q),
937                                   Q_PROD_WRAP(q), Q_CONS_WRAP(q));
938 
939         if (FIELD_EX32(pending, GERROR, CMDQ_ERR)) {
940             break;
941         }
942 
943         if (queue_read(q, &cmd) != MEMTX_OK) {
944             cmd_error = SMMU_CERROR_ABT;
945             break;
946         }
947 
948         type = CMD_TYPE(&cmd);
949 
950         trace_smmuv3_cmdq_opcode(smmu_cmd_string(type));
951 
952         qemu_mutex_lock(&s->mutex);
953         switch (type) {
954         case SMMU_CMD_SYNC:
955             if (CMD_SYNC_CS(&cmd) & CMD_SYNC_SIG_IRQ) {
956                 smmuv3_trigger_irq(s, SMMU_IRQ_CMD_SYNC, 0);
957             }
958             break;
959         case SMMU_CMD_PREFETCH_CONFIG:
960         case SMMU_CMD_PREFETCH_ADDR:
961             break;
962         case SMMU_CMD_CFGI_STE:
963         {
964             uint32_t sid = CMD_SID(&cmd);
965             IOMMUMemoryRegion *mr = smmu_iommu_mr(bs, sid);
966             SMMUDevice *sdev;
967 
968             if (CMD_SSEC(&cmd)) {
969                 cmd_error = SMMU_CERROR_ILL;
970                 break;
971             }
972 
973             if (!mr) {
974                 break;
975             }
976 
977             trace_smmuv3_cmdq_cfgi_ste(sid);
978             sdev = container_of(mr, SMMUDevice, iommu);
979             smmuv3_flush_config(sdev);
980 
981             break;
982         }
983         case SMMU_CMD_CFGI_STE_RANGE: /* same as SMMU_CMD_CFGI_ALL */
984         {
985             uint32_t sid = CMD_SID(&cmd), mask;
986             uint8_t range = CMD_STE_RANGE(&cmd);
987             SMMUSIDRange sid_range;
988 
989             if (CMD_SSEC(&cmd)) {
990                 cmd_error = SMMU_CERROR_ILL;
991                 break;
992             }
993 
994             mask = (1ULL << (range + 1)) - 1;
995             sid_range.start = sid & ~mask;
996             sid_range.end = sid_range.start + mask;
997 
998             trace_smmuv3_cmdq_cfgi_ste_range(sid_range.start, sid_range.end);
999             g_hash_table_foreach_remove(bs->configs, smmuv3_invalidate_ste,
1000                                         &sid_range);
1001             break;
1002         }
1003         case SMMU_CMD_CFGI_CD:
1004         case SMMU_CMD_CFGI_CD_ALL:
1005         {
1006             uint32_t sid = CMD_SID(&cmd);
1007             IOMMUMemoryRegion *mr = smmu_iommu_mr(bs, sid);
1008             SMMUDevice *sdev;
1009 
1010             if (CMD_SSEC(&cmd)) {
1011                 cmd_error = SMMU_CERROR_ILL;
1012                 break;
1013             }
1014 
1015             if (!mr) {
1016                 break;
1017             }
1018 
1019             trace_smmuv3_cmdq_cfgi_cd(sid);
1020             sdev = container_of(mr, SMMUDevice, iommu);
1021             smmuv3_flush_config(sdev);
1022             break;
1023         }
1024         case SMMU_CMD_TLBI_NH_ASID:
1025         {
1026             uint16_t asid = CMD_ASID(&cmd);
1027 
1028             trace_smmuv3_cmdq_tlbi_nh_asid(asid);
1029             smmu_inv_notifiers_all(&s->smmu_state);
1030             smmu_iotlb_inv_asid(bs, asid);
1031             break;
1032         }
1033         case SMMU_CMD_TLBI_NH_ALL:
1034         case SMMU_CMD_TLBI_NSNH_ALL:
1035             trace_smmuv3_cmdq_tlbi_nh();
1036             smmu_inv_notifiers_all(&s->smmu_state);
1037             smmu_iotlb_inv_all(bs);
1038             break;
1039         case SMMU_CMD_TLBI_NH_VAA:
1040         case SMMU_CMD_TLBI_NH_VA:
1041             smmuv3_s1_range_inval(bs, &cmd);
1042             break;
1043         case SMMU_CMD_TLBI_EL3_ALL:
1044         case SMMU_CMD_TLBI_EL3_VA:
1045         case SMMU_CMD_TLBI_EL2_ALL:
1046         case SMMU_CMD_TLBI_EL2_ASID:
1047         case SMMU_CMD_TLBI_EL2_VA:
1048         case SMMU_CMD_TLBI_EL2_VAA:
1049         case SMMU_CMD_TLBI_S12_VMALL:
1050         case SMMU_CMD_TLBI_S2_IPA:
1051         case SMMU_CMD_ATC_INV:
1052         case SMMU_CMD_PRI_RESP:
1053         case SMMU_CMD_RESUME:
1054         case SMMU_CMD_STALL_TERM:
1055             trace_smmuv3_unhandled_cmd(type);
1056             break;
1057         default:
1058             cmd_error = SMMU_CERROR_ILL;
1059             qemu_log_mask(LOG_GUEST_ERROR,
1060                           "Illegal command type: %d\n", CMD_TYPE(&cmd));
1061             break;
1062         }
1063         qemu_mutex_unlock(&s->mutex);
1064         if (cmd_error) {
1065             break;
1066         }
1067         /*
1068          * We only increment the cons index after the completion of
1069          * the command. We do that because the SYNC returns immediately
1070          * and does not check the completion of previous commands
1071          */
1072         queue_cons_incr(q);
1073     }
1074 
1075     if (cmd_error) {
1076         trace_smmuv3_cmdq_consume_error(smmu_cmd_string(type), cmd_error);
1077         smmu_write_cmdq_err(s, cmd_error);
1078         smmuv3_trigger_irq(s, SMMU_IRQ_GERROR, R_GERROR_CMDQ_ERR_MASK);
1079     }
1080 
1081     trace_smmuv3_cmdq_consume_out(Q_PROD(q), Q_CONS(q),
1082                                   Q_PROD_WRAP(q), Q_CONS_WRAP(q));
1083 
1084     return 0;
1085 }
1086 
1087 static MemTxResult smmu_writell(SMMUv3State *s, hwaddr offset,
1088                                uint64_t data, MemTxAttrs attrs)
1089 {
1090     switch (offset) {
1091     case A_GERROR_IRQ_CFG0:
1092         s->gerror_irq_cfg0 = data;
1093         return MEMTX_OK;
1094     case A_STRTAB_BASE:
1095         s->strtab_base = data;
1096         return MEMTX_OK;
1097     case A_CMDQ_BASE:
1098         s->cmdq.base = data;
1099         s->cmdq.log2size = extract64(s->cmdq.base, 0, 5);
1100         if (s->cmdq.log2size > SMMU_CMDQS) {
1101             s->cmdq.log2size = SMMU_CMDQS;
1102         }
1103         return MEMTX_OK;
1104     case A_EVENTQ_BASE:
1105         s->eventq.base = data;
1106         s->eventq.log2size = extract64(s->eventq.base, 0, 5);
1107         if (s->eventq.log2size > SMMU_EVENTQS) {
1108             s->eventq.log2size = SMMU_EVENTQS;
1109         }
1110         return MEMTX_OK;
1111     case A_EVENTQ_IRQ_CFG0:
1112         s->eventq_irq_cfg0 = data;
1113         return MEMTX_OK;
1114     default:
1115         qemu_log_mask(LOG_UNIMP,
1116                       "%s Unexpected 64-bit access to 0x%"PRIx64" (WI)\n",
1117                       __func__, offset);
1118         return MEMTX_OK;
1119     }
1120 }
1121 
1122 static MemTxResult smmu_writel(SMMUv3State *s, hwaddr offset,
1123                                uint64_t data, MemTxAttrs attrs)
1124 {
1125     switch (offset) {
1126     case A_CR0:
1127         s->cr[0] = data;
1128         s->cr0ack = data & ~SMMU_CR0_RESERVED;
1129         /* in case the command queue has been enabled */
1130         smmuv3_cmdq_consume(s);
1131         return MEMTX_OK;
1132     case A_CR1:
1133         s->cr[1] = data;
1134         return MEMTX_OK;
1135     case A_CR2:
1136         s->cr[2] = data;
1137         return MEMTX_OK;
1138     case A_IRQ_CTRL:
1139         s->irq_ctrl = data;
1140         return MEMTX_OK;
1141     case A_GERRORN:
1142         smmuv3_write_gerrorn(s, data);
1143         /*
1144          * By acknowledging the CMDQ_ERR, SW may notify cmds can
1145          * be processed again
1146          */
1147         smmuv3_cmdq_consume(s);
1148         return MEMTX_OK;
1149     case A_GERROR_IRQ_CFG0: /* 64b */
1150         s->gerror_irq_cfg0 = deposit64(s->gerror_irq_cfg0, 0, 32, data);
1151         return MEMTX_OK;
1152     case A_GERROR_IRQ_CFG0 + 4:
1153         s->gerror_irq_cfg0 = deposit64(s->gerror_irq_cfg0, 32, 32, data);
1154         return MEMTX_OK;
1155     case A_GERROR_IRQ_CFG1:
1156         s->gerror_irq_cfg1 = data;
1157         return MEMTX_OK;
1158     case A_GERROR_IRQ_CFG2:
1159         s->gerror_irq_cfg2 = data;
1160         return MEMTX_OK;
1161     case A_STRTAB_BASE: /* 64b */
1162         s->strtab_base = deposit64(s->strtab_base, 0, 32, data);
1163         return MEMTX_OK;
1164     case A_STRTAB_BASE + 4:
1165         s->strtab_base = deposit64(s->strtab_base, 32, 32, data);
1166         return MEMTX_OK;
1167     case A_STRTAB_BASE_CFG:
1168         s->strtab_base_cfg = data;
1169         if (FIELD_EX32(data, STRTAB_BASE_CFG, FMT) == 1) {
1170             s->sid_split = FIELD_EX32(data, STRTAB_BASE_CFG, SPLIT);
1171             s->features |= SMMU_FEATURE_2LVL_STE;
1172         }
1173         return MEMTX_OK;
1174     case A_CMDQ_BASE: /* 64b */
1175         s->cmdq.base = deposit64(s->cmdq.base, 0, 32, data);
1176         s->cmdq.log2size = extract64(s->cmdq.base, 0, 5);
1177         if (s->cmdq.log2size > SMMU_CMDQS) {
1178             s->cmdq.log2size = SMMU_CMDQS;
1179         }
1180         return MEMTX_OK;
1181     case A_CMDQ_BASE + 4: /* 64b */
1182         s->cmdq.base = deposit64(s->cmdq.base, 32, 32, data);
1183         return MEMTX_OK;
1184     case A_CMDQ_PROD:
1185         s->cmdq.prod = data;
1186         smmuv3_cmdq_consume(s);
1187         return MEMTX_OK;
1188     case A_CMDQ_CONS:
1189         s->cmdq.cons = data;
1190         return MEMTX_OK;
1191     case A_EVENTQ_BASE: /* 64b */
1192         s->eventq.base = deposit64(s->eventq.base, 0, 32, data);
1193         s->eventq.log2size = extract64(s->eventq.base, 0, 5);
1194         if (s->eventq.log2size > SMMU_EVENTQS) {
1195             s->eventq.log2size = SMMU_EVENTQS;
1196         }
1197         return MEMTX_OK;
1198     case A_EVENTQ_BASE + 4:
1199         s->eventq.base = deposit64(s->eventq.base, 32, 32, data);
1200         return MEMTX_OK;
1201     case A_EVENTQ_PROD:
1202         s->eventq.prod = data;
1203         return MEMTX_OK;
1204     case A_EVENTQ_CONS:
1205         s->eventq.cons = data;
1206         return MEMTX_OK;
1207     case A_EVENTQ_IRQ_CFG0: /* 64b */
1208         s->eventq_irq_cfg0 = deposit64(s->eventq_irq_cfg0, 0, 32, data);
1209         return MEMTX_OK;
1210     case A_EVENTQ_IRQ_CFG0 + 4:
1211         s->eventq_irq_cfg0 = deposit64(s->eventq_irq_cfg0, 32, 32, data);
1212         return MEMTX_OK;
1213     case A_EVENTQ_IRQ_CFG1:
1214         s->eventq_irq_cfg1 = data;
1215         return MEMTX_OK;
1216     case A_EVENTQ_IRQ_CFG2:
1217         s->eventq_irq_cfg2 = data;
1218         return MEMTX_OK;
1219     default:
1220         qemu_log_mask(LOG_UNIMP,
1221                       "%s Unexpected 32-bit access to 0x%"PRIx64" (WI)\n",
1222                       __func__, offset);
1223         return MEMTX_OK;
1224     }
1225 }
1226 
1227 static MemTxResult smmu_write_mmio(void *opaque, hwaddr offset, uint64_t data,
1228                                    unsigned size, MemTxAttrs attrs)
1229 {
1230     SMMUState *sys = opaque;
1231     SMMUv3State *s = ARM_SMMUV3(sys);
1232     MemTxResult r;
1233 
1234     /* CONSTRAINED UNPREDICTABLE choice to have page0/1 be exact aliases */
1235     offset &= ~0x10000;
1236 
1237     switch (size) {
1238     case 8:
1239         r = smmu_writell(s, offset, data, attrs);
1240         break;
1241     case 4:
1242         r = smmu_writel(s, offset, data, attrs);
1243         break;
1244     default:
1245         r = MEMTX_ERROR;
1246         break;
1247     }
1248 
1249     trace_smmuv3_write_mmio(offset, data, size, r);
1250     return r;
1251 }
1252 
1253 static MemTxResult smmu_readll(SMMUv3State *s, hwaddr offset,
1254                                uint64_t *data, MemTxAttrs attrs)
1255 {
1256     switch (offset) {
1257     case A_GERROR_IRQ_CFG0:
1258         *data = s->gerror_irq_cfg0;
1259         return MEMTX_OK;
1260     case A_STRTAB_BASE:
1261         *data = s->strtab_base;
1262         return MEMTX_OK;
1263     case A_CMDQ_BASE:
1264         *data = s->cmdq.base;
1265         return MEMTX_OK;
1266     case A_EVENTQ_BASE:
1267         *data = s->eventq.base;
1268         return MEMTX_OK;
1269     default:
1270         *data = 0;
1271         qemu_log_mask(LOG_UNIMP,
1272                       "%s Unexpected 64-bit access to 0x%"PRIx64" (RAZ)\n",
1273                       __func__, offset);
1274         return MEMTX_OK;
1275     }
1276 }
1277 
1278 static MemTxResult smmu_readl(SMMUv3State *s, hwaddr offset,
1279                               uint64_t *data, MemTxAttrs attrs)
1280 {
1281     switch (offset) {
1282     case A_IDREGS ... A_IDREGS + 0x2f:
1283         *data = smmuv3_idreg(offset - A_IDREGS);
1284         return MEMTX_OK;
1285     case A_IDR0 ... A_IDR5:
1286         *data = s->idr[(offset - A_IDR0) / 4];
1287         return MEMTX_OK;
1288     case A_IIDR:
1289         *data = s->iidr;
1290         return MEMTX_OK;
1291     case A_AIDR:
1292         *data = s->aidr;
1293         return MEMTX_OK;
1294     case A_CR0:
1295         *data = s->cr[0];
1296         return MEMTX_OK;
1297     case A_CR0ACK:
1298         *data = s->cr0ack;
1299         return MEMTX_OK;
1300     case A_CR1:
1301         *data = s->cr[1];
1302         return MEMTX_OK;
1303     case A_CR2:
1304         *data = s->cr[2];
1305         return MEMTX_OK;
1306     case A_STATUSR:
1307         *data = s->statusr;
1308         return MEMTX_OK;
1309     case A_IRQ_CTRL:
1310     case A_IRQ_CTRL_ACK:
1311         *data = s->irq_ctrl;
1312         return MEMTX_OK;
1313     case A_GERROR:
1314         *data = s->gerror;
1315         return MEMTX_OK;
1316     case A_GERRORN:
1317         *data = s->gerrorn;
1318         return MEMTX_OK;
1319     case A_GERROR_IRQ_CFG0: /* 64b */
1320         *data = extract64(s->gerror_irq_cfg0, 0, 32);
1321         return MEMTX_OK;
1322     case A_GERROR_IRQ_CFG0 + 4:
1323         *data = extract64(s->gerror_irq_cfg0, 32, 32);
1324         return MEMTX_OK;
1325     case A_GERROR_IRQ_CFG1:
1326         *data = s->gerror_irq_cfg1;
1327         return MEMTX_OK;
1328     case A_GERROR_IRQ_CFG2:
1329         *data = s->gerror_irq_cfg2;
1330         return MEMTX_OK;
1331     case A_STRTAB_BASE: /* 64b */
1332         *data = extract64(s->strtab_base, 0, 32);
1333         return MEMTX_OK;
1334     case A_STRTAB_BASE + 4: /* 64b */
1335         *data = extract64(s->strtab_base, 32, 32);
1336         return MEMTX_OK;
1337     case A_STRTAB_BASE_CFG:
1338         *data = s->strtab_base_cfg;
1339         return MEMTX_OK;
1340     case A_CMDQ_BASE: /* 64b */
1341         *data = extract64(s->cmdq.base, 0, 32);
1342         return MEMTX_OK;
1343     case A_CMDQ_BASE + 4:
1344         *data = extract64(s->cmdq.base, 32, 32);
1345         return MEMTX_OK;
1346     case A_CMDQ_PROD:
1347         *data = s->cmdq.prod;
1348         return MEMTX_OK;
1349     case A_CMDQ_CONS:
1350         *data = s->cmdq.cons;
1351         return MEMTX_OK;
1352     case A_EVENTQ_BASE: /* 64b */
1353         *data = extract64(s->eventq.base, 0, 32);
1354         return MEMTX_OK;
1355     case A_EVENTQ_BASE + 4: /* 64b */
1356         *data = extract64(s->eventq.base, 32, 32);
1357         return MEMTX_OK;
1358     case A_EVENTQ_PROD:
1359         *data = s->eventq.prod;
1360         return MEMTX_OK;
1361     case A_EVENTQ_CONS:
1362         *data = s->eventq.cons;
1363         return MEMTX_OK;
1364     default:
1365         *data = 0;
1366         qemu_log_mask(LOG_UNIMP,
1367                       "%s unhandled 32-bit access at 0x%"PRIx64" (RAZ)\n",
1368                       __func__, offset);
1369         return MEMTX_OK;
1370     }
1371 }
1372 
1373 static MemTxResult smmu_read_mmio(void *opaque, hwaddr offset, uint64_t *data,
1374                                   unsigned size, MemTxAttrs attrs)
1375 {
1376     SMMUState *sys = opaque;
1377     SMMUv3State *s = ARM_SMMUV3(sys);
1378     MemTxResult r;
1379 
1380     /* CONSTRAINED UNPREDICTABLE choice to have page0/1 be exact aliases */
1381     offset &= ~0x10000;
1382 
1383     switch (size) {
1384     case 8:
1385         r = smmu_readll(s, offset, data, attrs);
1386         break;
1387     case 4:
1388         r = smmu_readl(s, offset, data, attrs);
1389         break;
1390     default:
1391         r = MEMTX_ERROR;
1392         break;
1393     }
1394 
1395     trace_smmuv3_read_mmio(offset, *data, size, r);
1396     return r;
1397 }
1398 
1399 static const MemoryRegionOps smmu_mem_ops = {
1400     .read_with_attrs = smmu_read_mmio,
1401     .write_with_attrs = smmu_write_mmio,
1402     .endianness = DEVICE_LITTLE_ENDIAN,
1403     .valid = {
1404         .min_access_size = 4,
1405         .max_access_size = 8,
1406     },
1407     .impl = {
1408         .min_access_size = 4,
1409         .max_access_size = 8,
1410     },
1411 };
1412 
1413 static void smmu_init_irq(SMMUv3State *s, SysBusDevice *dev)
1414 {
1415     int i;
1416 
1417     for (i = 0; i < ARRAY_SIZE(s->irq); i++) {
1418         sysbus_init_irq(dev, &s->irq[i]);
1419     }
1420 }
1421 
1422 static void smmu_reset(DeviceState *dev)
1423 {
1424     SMMUv3State *s = ARM_SMMUV3(dev);
1425     SMMUv3Class *c = ARM_SMMUV3_GET_CLASS(s);
1426 
1427     c->parent_reset(dev);
1428 
1429     smmuv3_init_regs(s);
1430 }
1431 
1432 static void smmu_realize(DeviceState *d, Error **errp)
1433 {
1434     SMMUState *sys = ARM_SMMU(d);
1435     SMMUv3State *s = ARM_SMMUV3(sys);
1436     SMMUv3Class *c = ARM_SMMUV3_GET_CLASS(s);
1437     SysBusDevice *dev = SYS_BUS_DEVICE(d);
1438     Error *local_err = NULL;
1439 
1440     c->parent_realize(d, &local_err);
1441     if (local_err) {
1442         error_propagate(errp, local_err);
1443         return;
1444     }
1445 
1446     qemu_mutex_init(&s->mutex);
1447 
1448     memory_region_init_io(&sys->iomem, OBJECT(s),
1449                           &smmu_mem_ops, sys, TYPE_ARM_SMMUV3, 0x20000);
1450 
1451     sys->mrtypename = TYPE_SMMUV3_IOMMU_MEMORY_REGION;
1452 
1453     sysbus_init_mmio(dev, &sys->iomem);
1454 
1455     smmu_init_irq(s, dev);
1456 }
1457 
1458 static const VMStateDescription vmstate_smmuv3_queue = {
1459     .name = "smmuv3_queue",
1460     .version_id = 1,
1461     .minimum_version_id = 1,
1462     .fields = (VMStateField[]) {
1463         VMSTATE_UINT64(base, SMMUQueue),
1464         VMSTATE_UINT32(prod, SMMUQueue),
1465         VMSTATE_UINT32(cons, SMMUQueue),
1466         VMSTATE_UINT8(log2size, SMMUQueue),
1467         VMSTATE_END_OF_LIST(),
1468     },
1469 };
1470 
1471 static const VMStateDescription vmstate_smmuv3 = {
1472     .name = "smmuv3",
1473     .version_id = 1,
1474     .minimum_version_id = 1,
1475     .priority = MIG_PRI_IOMMU,
1476     .fields = (VMStateField[]) {
1477         VMSTATE_UINT32(features, SMMUv3State),
1478         VMSTATE_UINT8(sid_size, SMMUv3State),
1479         VMSTATE_UINT8(sid_split, SMMUv3State),
1480 
1481         VMSTATE_UINT32_ARRAY(cr, SMMUv3State, 3),
1482         VMSTATE_UINT32(cr0ack, SMMUv3State),
1483         VMSTATE_UINT32(statusr, SMMUv3State),
1484         VMSTATE_UINT32(irq_ctrl, SMMUv3State),
1485         VMSTATE_UINT32(gerror, SMMUv3State),
1486         VMSTATE_UINT32(gerrorn, SMMUv3State),
1487         VMSTATE_UINT64(gerror_irq_cfg0, SMMUv3State),
1488         VMSTATE_UINT32(gerror_irq_cfg1, SMMUv3State),
1489         VMSTATE_UINT32(gerror_irq_cfg2, SMMUv3State),
1490         VMSTATE_UINT64(strtab_base, SMMUv3State),
1491         VMSTATE_UINT32(strtab_base_cfg, SMMUv3State),
1492         VMSTATE_UINT64(eventq_irq_cfg0, SMMUv3State),
1493         VMSTATE_UINT32(eventq_irq_cfg1, SMMUv3State),
1494         VMSTATE_UINT32(eventq_irq_cfg2, SMMUv3State),
1495 
1496         VMSTATE_STRUCT(cmdq, SMMUv3State, 0, vmstate_smmuv3_queue, SMMUQueue),
1497         VMSTATE_STRUCT(eventq, SMMUv3State, 0, vmstate_smmuv3_queue, SMMUQueue),
1498 
1499         VMSTATE_END_OF_LIST(),
1500     },
1501 };
1502 
1503 static void smmuv3_instance_init(Object *obj)
1504 {
1505     /* Nothing much to do here as of now */
1506 }
1507 
1508 static void smmuv3_class_init(ObjectClass *klass, void *data)
1509 {
1510     DeviceClass *dc = DEVICE_CLASS(klass);
1511     SMMUv3Class *c = ARM_SMMUV3_CLASS(klass);
1512 
1513     dc->vmsd = &vmstate_smmuv3;
1514     device_class_set_parent_reset(dc, smmu_reset, &c->parent_reset);
1515     c->parent_realize = dc->realize;
1516     dc->realize = smmu_realize;
1517 }
1518 
1519 static int smmuv3_notify_flag_changed(IOMMUMemoryRegion *iommu,
1520                                       IOMMUNotifierFlag old,
1521                                       IOMMUNotifierFlag new,
1522                                       Error **errp)
1523 {
1524     SMMUDevice *sdev = container_of(iommu, SMMUDevice, iommu);
1525     SMMUv3State *s3 = sdev->smmu;
1526     SMMUState *s = &(s3->smmu_state);
1527 
1528     if (new & IOMMU_NOTIFIER_DEVIOTLB_UNMAP) {
1529         error_setg(errp, "SMMUv3 does not support dev-iotlb yet");
1530         return -EINVAL;
1531     }
1532 
1533     if (new & IOMMU_NOTIFIER_MAP) {
1534         error_setg(errp,
1535                    "device %02x.%02x.%x requires iommu MAP notifier which is "
1536                    "not currently supported", pci_bus_num(sdev->bus),
1537                    PCI_SLOT(sdev->devfn), PCI_FUNC(sdev->devfn));
1538         return -EINVAL;
1539     }
1540 
1541     if (old == IOMMU_NOTIFIER_NONE) {
1542         trace_smmuv3_notify_flag_add(iommu->parent_obj.name);
1543         QLIST_INSERT_HEAD(&s->devices_with_notifiers, sdev, next);
1544     } else if (new == IOMMU_NOTIFIER_NONE) {
1545         trace_smmuv3_notify_flag_del(iommu->parent_obj.name);
1546         QLIST_REMOVE(sdev, next);
1547     }
1548     return 0;
1549 }
1550 
1551 static void smmuv3_iommu_memory_region_class_init(ObjectClass *klass,
1552                                                   void *data)
1553 {
1554     IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass);
1555 
1556     imrc->translate = smmuv3_translate;
1557     imrc->notify_flag_changed = smmuv3_notify_flag_changed;
1558 }
1559 
1560 static const TypeInfo smmuv3_type_info = {
1561     .name          = TYPE_ARM_SMMUV3,
1562     .parent        = TYPE_ARM_SMMU,
1563     .instance_size = sizeof(SMMUv3State),
1564     .instance_init = smmuv3_instance_init,
1565     .class_size    = sizeof(SMMUv3Class),
1566     .class_init    = smmuv3_class_init,
1567 };
1568 
1569 static const TypeInfo smmuv3_iommu_memory_region_info = {
1570     .parent = TYPE_IOMMU_MEMORY_REGION,
1571     .name = TYPE_SMMUV3_IOMMU_MEMORY_REGION,
1572     .class_init = smmuv3_iommu_memory_region_class_init,
1573 };
1574 
1575 static void smmuv3_register_types(void)
1576 {
1577     type_register(&smmuv3_type_info);
1578     type_register(&smmuv3_iommu_memory_region_info);
1579 }
1580 
1581 type_init(smmuv3_register_types)
1582 
1583