xref: /openbmc/qemu/hw/arm/smmuv3.c (revision db725815)
1 /*
2  * Copyright (C) 2014-2016 Broadcom Corporation
3  * Copyright (c) 2017 Red Hat, Inc.
4  * Written by Prem Mallappa, Eric Auger
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License along
16  * with this program; if not, see <http://www.gnu.org/licenses/>.
17  */
18 
19 #include "qemu/osdep.h"
20 #include "hw/boards.h"
21 #include "hw/irq.h"
22 #include "sysemu/sysemu.h"
23 #include "hw/sysbus.h"
24 #include "migration/vmstate.h"
25 #include "hw/qdev-core.h"
26 #include "hw/pci/pci.h"
27 #include "exec/address-spaces.h"
28 #include "cpu.h"
29 #include "trace.h"
30 #include "qemu/log.h"
31 #include "qemu/error-report.h"
32 #include "qapi/error.h"
33 
34 #include "hw/arm/smmuv3.h"
35 #include "smmuv3-internal.h"
36 
37 /**
38  * smmuv3_trigger_irq - pulse @irq if enabled and update
39  * GERROR register in case of GERROR interrupt
40  *
41  * @irq: irq type
42  * @gerror_mask: mask of gerrors to toggle (relevant if @irq is GERROR)
43  */
44 static void smmuv3_trigger_irq(SMMUv3State *s, SMMUIrq irq,
45                                uint32_t gerror_mask)
46 {
47 
48     bool pulse = false;
49 
50     switch (irq) {
51     case SMMU_IRQ_EVTQ:
52         pulse = smmuv3_eventq_irq_enabled(s);
53         break;
54     case SMMU_IRQ_PRIQ:
55         qemu_log_mask(LOG_UNIMP, "PRI not yet supported\n");
56         break;
57     case SMMU_IRQ_CMD_SYNC:
58         pulse = true;
59         break;
60     case SMMU_IRQ_GERROR:
61     {
62         uint32_t pending = s->gerror ^ s->gerrorn;
63         uint32_t new_gerrors = ~pending & gerror_mask;
64 
65         if (!new_gerrors) {
66             /* only toggle non pending errors */
67             return;
68         }
69         s->gerror ^= new_gerrors;
70         trace_smmuv3_write_gerror(new_gerrors, s->gerror);
71 
72         pulse = smmuv3_gerror_irq_enabled(s);
73         break;
74     }
75     }
76     if (pulse) {
77             trace_smmuv3_trigger_irq(irq);
78             qemu_irq_pulse(s->irq[irq]);
79     }
80 }
81 
82 static void smmuv3_write_gerrorn(SMMUv3State *s, uint32_t new_gerrorn)
83 {
84     uint32_t pending = s->gerror ^ s->gerrorn;
85     uint32_t toggled = s->gerrorn ^ new_gerrorn;
86 
87     if (toggled & ~pending) {
88         qemu_log_mask(LOG_GUEST_ERROR,
89                       "guest toggles non pending errors = 0x%x\n",
90                       toggled & ~pending);
91     }
92 
93     /*
94      * We do not raise any error in case guest toggles bits corresponding
95      * to not active IRQs (CONSTRAINED UNPREDICTABLE)
96      */
97     s->gerrorn = new_gerrorn;
98 
99     trace_smmuv3_write_gerrorn(toggled & pending, s->gerrorn);
100 }
101 
102 static inline MemTxResult queue_read(SMMUQueue *q, void *data)
103 {
104     dma_addr_t addr = Q_CONS_ENTRY(q);
105 
106     return dma_memory_read(&address_space_memory, addr, data, q->entry_size);
107 }
108 
109 static MemTxResult queue_write(SMMUQueue *q, void *data)
110 {
111     dma_addr_t addr = Q_PROD_ENTRY(q);
112     MemTxResult ret;
113 
114     ret = dma_memory_write(&address_space_memory, addr, data, q->entry_size);
115     if (ret != MEMTX_OK) {
116         return ret;
117     }
118 
119     queue_prod_incr(q);
120     return MEMTX_OK;
121 }
122 
123 static MemTxResult smmuv3_write_eventq(SMMUv3State *s, Evt *evt)
124 {
125     SMMUQueue *q = &s->eventq;
126     MemTxResult r;
127 
128     if (!smmuv3_eventq_enabled(s)) {
129         return MEMTX_ERROR;
130     }
131 
132     if (smmuv3_q_full(q)) {
133         return MEMTX_ERROR;
134     }
135 
136     r = queue_write(q, evt);
137     if (r != MEMTX_OK) {
138         return r;
139     }
140 
141     if (!smmuv3_q_empty(q)) {
142         smmuv3_trigger_irq(s, SMMU_IRQ_EVTQ, 0);
143     }
144     return MEMTX_OK;
145 }
146 
147 void smmuv3_record_event(SMMUv3State *s, SMMUEventInfo *info)
148 {
149     Evt evt = {};
150     MemTxResult r;
151 
152     if (!smmuv3_eventq_enabled(s)) {
153         return;
154     }
155 
156     EVT_SET_TYPE(&evt, info->type);
157     EVT_SET_SID(&evt, info->sid);
158 
159     switch (info->type) {
160     case SMMU_EVT_NONE:
161         return;
162     case SMMU_EVT_F_UUT:
163         EVT_SET_SSID(&evt, info->u.f_uut.ssid);
164         EVT_SET_SSV(&evt,  info->u.f_uut.ssv);
165         EVT_SET_ADDR(&evt, info->u.f_uut.addr);
166         EVT_SET_RNW(&evt,  info->u.f_uut.rnw);
167         EVT_SET_PNU(&evt,  info->u.f_uut.pnu);
168         EVT_SET_IND(&evt,  info->u.f_uut.ind);
169         break;
170     case SMMU_EVT_C_BAD_STREAMID:
171         EVT_SET_SSID(&evt, info->u.c_bad_streamid.ssid);
172         EVT_SET_SSV(&evt,  info->u.c_bad_streamid.ssv);
173         break;
174     case SMMU_EVT_F_STE_FETCH:
175         EVT_SET_SSID(&evt, info->u.f_ste_fetch.ssid);
176         EVT_SET_SSV(&evt,  info->u.f_ste_fetch.ssv);
177         EVT_SET_ADDR(&evt, info->u.f_ste_fetch.addr);
178         break;
179     case SMMU_EVT_C_BAD_STE:
180         EVT_SET_SSID(&evt, info->u.c_bad_ste.ssid);
181         EVT_SET_SSV(&evt,  info->u.c_bad_ste.ssv);
182         break;
183     case SMMU_EVT_F_STREAM_DISABLED:
184         break;
185     case SMMU_EVT_F_TRANS_FORBIDDEN:
186         EVT_SET_ADDR(&evt, info->u.f_transl_forbidden.addr);
187         EVT_SET_RNW(&evt, info->u.f_transl_forbidden.rnw);
188         break;
189     case SMMU_EVT_C_BAD_SUBSTREAMID:
190         EVT_SET_SSID(&evt, info->u.c_bad_substream.ssid);
191         break;
192     case SMMU_EVT_F_CD_FETCH:
193         EVT_SET_SSID(&evt, info->u.f_cd_fetch.ssid);
194         EVT_SET_SSV(&evt,  info->u.f_cd_fetch.ssv);
195         EVT_SET_ADDR(&evt, info->u.f_cd_fetch.addr);
196         break;
197     case SMMU_EVT_C_BAD_CD:
198         EVT_SET_SSID(&evt, info->u.c_bad_cd.ssid);
199         EVT_SET_SSV(&evt,  info->u.c_bad_cd.ssv);
200         break;
201     case SMMU_EVT_F_WALK_EABT:
202     case SMMU_EVT_F_TRANSLATION:
203     case SMMU_EVT_F_ADDR_SIZE:
204     case SMMU_EVT_F_ACCESS:
205     case SMMU_EVT_F_PERMISSION:
206         EVT_SET_STALL(&evt, info->u.f_walk_eabt.stall);
207         EVT_SET_STAG(&evt, info->u.f_walk_eabt.stag);
208         EVT_SET_SSID(&evt, info->u.f_walk_eabt.ssid);
209         EVT_SET_SSV(&evt, info->u.f_walk_eabt.ssv);
210         EVT_SET_S2(&evt, info->u.f_walk_eabt.s2);
211         EVT_SET_ADDR(&evt, info->u.f_walk_eabt.addr);
212         EVT_SET_RNW(&evt, info->u.f_walk_eabt.rnw);
213         EVT_SET_PNU(&evt, info->u.f_walk_eabt.pnu);
214         EVT_SET_IND(&evt, info->u.f_walk_eabt.ind);
215         EVT_SET_CLASS(&evt, info->u.f_walk_eabt.class);
216         EVT_SET_ADDR2(&evt, info->u.f_walk_eabt.addr2);
217         break;
218     case SMMU_EVT_F_CFG_CONFLICT:
219         EVT_SET_SSID(&evt, info->u.f_cfg_conflict.ssid);
220         EVT_SET_SSV(&evt,  info->u.f_cfg_conflict.ssv);
221         break;
222     /* rest is not implemented */
223     case SMMU_EVT_F_BAD_ATS_TREQ:
224     case SMMU_EVT_F_TLB_CONFLICT:
225     case SMMU_EVT_E_PAGE_REQ:
226     default:
227         g_assert_not_reached();
228     }
229 
230     trace_smmuv3_record_event(smmu_event_string(info->type), info->sid);
231     r = smmuv3_write_eventq(s, &evt);
232     if (r != MEMTX_OK) {
233         smmuv3_trigger_irq(s, SMMU_IRQ_GERROR, R_GERROR_EVENTQ_ABT_ERR_MASK);
234     }
235     info->recorded = true;
236 }
237 
238 static void smmuv3_init_regs(SMMUv3State *s)
239 {
240     /**
241      * IDR0: stage1 only, AArch64 only, coherent access, 16b ASID,
242      *       multi-level stream table
243      */
244     s->idr[0] = FIELD_DP32(s->idr[0], IDR0, S1P, 1); /* stage 1 supported */
245     s->idr[0] = FIELD_DP32(s->idr[0], IDR0, TTF, 2); /* AArch64 PTW only */
246     s->idr[0] = FIELD_DP32(s->idr[0], IDR0, COHACC, 1); /* IO coherent */
247     s->idr[0] = FIELD_DP32(s->idr[0], IDR0, ASID16, 1); /* 16-bit ASID */
248     s->idr[0] = FIELD_DP32(s->idr[0], IDR0, TTENDIAN, 2); /* little endian */
249     s->idr[0] = FIELD_DP32(s->idr[0], IDR0, STALL_MODEL, 1); /* No stall */
250     /* terminated transaction will always be aborted/error returned */
251     s->idr[0] = FIELD_DP32(s->idr[0], IDR0, TERM_MODEL, 1);
252     /* 2-level stream table supported */
253     s->idr[0] = FIELD_DP32(s->idr[0], IDR0, STLEVEL, 1);
254 
255     s->idr[1] = FIELD_DP32(s->idr[1], IDR1, SIDSIZE, SMMU_IDR1_SIDSIZE);
256     s->idr[1] = FIELD_DP32(s->idr[1], IDR1, EVENTQS, SMMU_EVENTQS);
257     s->idr[1] = FIELD_DP32(s->idr[1], IDR1, CMDQS,   SMMU_CMDQS);
258 
259    /* 4K and 64K granule support */
260     s->idr[5] = FIELD_DP32(s->idr[5], IDR5, GRAN4K, 1);
261     s->idr[5] = FIELD_DP32(s->idr[5], IDR5, GRAN64K, 1);
262     s->idr[5] = FIELD_DP32(s->idr[5], IDR5, OAS, SMMU_IDR5_OAS); /* 44 bits */
263 
264     s->cmdq.base = deposit64(s->cmdq.base, 0, 5, SMMU_CMDQS);
265     s->cmdq.prod = 0;
266     s->cmdq.cons = 0;
267     s->cmdq.entry_size = sizeof(struct Cmd);
268     s->eventq.base = deposit64(s->eventq.base, 0, 5, SMMU_EVENTQS);
269     s->eventq.prod = 0;
270     s->eventq.cons = 0;
271     s->eventq.entry_size = sizeof(struct Evt);
272 
273     s->features = 0;
274     s->sid_split = 0;
275 }
276 
277 static int smmu_get_ste(SMMUv3State *s, dma_addr_t addr, STE *buf,
278                         SMMUEventInfo *event)
279 {
280     int ret;
281 
282     trace_smmuv3_get_ste(addr);
283     /* TODO: guarantee 64-bit single-copy atomicity */
284     ret = dma_memory_read(&address_space_memory, addr,
285                           (void *)buf, sizeof(*buf));
286     if (ret != MEMTX_OK) {
287         qemu_log_mask(LOG_GUEST_ERROR,
288                       "Cannot fetch pte at address=0x%"PRIx64"\n", addr);
289         event->type = SMMU_EVT_F_STE_FETCH;
290         event->u.f_ste_fetch.addr = addr;
291         return -EINVAL;
292     }
293     return 0;
294 
295 }
296 
297 /* @ssid > 0 not supported yet */
298 static int smmu_get_cd(SMMUv3State *s, STE *ste, uint32_t ssid,
299                        CD *buf, SMMUEventInfo *event)
300 {
301     dma_addr_t addr = STE_CTXPTR(ste);
302     int ret;
303 
304     trace_smmuv3_get_cd(addr);
305     /* TODO: guarantee 64-bit single-copy atomicity */
306     ret = dma_memory_read(&address_space_memory, addr,
307                            (void *)buf, sizeof(*buf));
308     if (ret != MEMTX_OK) {
309         qemu_log_mask(LOG_GUEST_ERROR,
310                       "Cannot fetch pte at address=0x%"PRIx64"\n", addr);
311         event->type = SMMU_EVT_F_CD_FETCH;
312         event->u.f_ste_fetch.addr = addr;
313         return -EINVAL;
314     }
315     return 0;
316 }
317 
318 /* Returns < 0 in case of invalid STE, 0 otherwise */
319 static int decode_ste(SMMUv3State *s, SMMUTransCfg *cfg,
320                       STE *ste, SMMUEventInfo *event)
321 {
322     uint32_t config;
323 
324     if (!STE_VALID(ste)) {
325         goto bad_ste;
326     }
327 
328     config = STE_CONFIG(ste);
329 
330     if (STE_CFG_ABORT(config)) {
331         cfg->aborted = true;
332         return 0;
333     }
334 
335     if (STE_CFG_BYPASS(config)) {
336         cfg->bypassed = true;
337         return 0;
338     }
339 
340     if (STE_CFG_S2_ENABLED(config)) {
341         qemu_log_mask(LOG_UNIMP, "SMMUv3 does not support stage 2 yet\n");
342         goto bad_ste;
343     }
344 
345     if (STE_S1CDMAX(ste) != 0) {
346         qemu_log_mask(LOG_UNIMP,
347                       "SMMUv3 does not support multiple context descriptors yet\n");
348         goto bad_ste;
349     }
350 
351     if (STE_S1STALLD(ste)) {
352         qemu_log_mask(LOG_UNIMP,
353                       "SMMUv3 S1 stalling fault model not allowed yet\n");
354         goto bad_ste;
355     }
356     return 0;
357 
358 bad_ste:
359     event->type = SMMU_EVT_C_BAD_STE;
360     return -EINVAL;
361 }
362 
363 /**
364  * smmu_find_ste - Return the stream table entry associated
365  * to the sid
366  *
367  * @s: smmuv3 handle
368  * @sid: stream ID
369  * @ste: returned stream table entry
370  * @event: handle to an event info
371  *
372  * Supports linear and 2-level stream table
373  * Return 0 on success, -EINVAL otherwise
374  */
375 static int smmu_find_ste(SMMUv3State *s, uint32_t sid, STE *ste,
376                          SMMUEventInfo *event)
377 {
378     dma_addr_t addr;
379     int ret;
380 
381     trace_smmuv3_find_ste(sid, s->features, s->sid_split);
382     /* Check SID range */
383     if (sid > (1 << SMMU_IDR1_SIDSIZE)) {
384         event->type = SMMU_EVT_C_BAD_STREAMID;
385         return -EINVAL;
386     }
387     if (s->features & SMMU_FEATURE_2LVL_STE) {
388         int l1_ste_offset, l2_ste_offset, max_l2_ste, span;
389         dma_addr_t strtab_base, l1ptr, l2ptr;
390         STEDesc l1std;
391 
392         strtab_base = s->strtab_base & SMMU_BASE_ADDR_MASK;
393         l1_ste_offset = sid >> s->sid_split;
394         l2_ste_offset = sid & ((1 << s->sid_split) - 1);
395         l1ptr = (dma_addr_t)(strtab_base + l1_ste_offset * sizeof(l1std));
396         /* TODO: guarantee 64-bit single-copy atomicity */
397         ret = dma_memory_read(&address_space_memory, l1ptr,
398                               (uint8_t *)&l1std, sizeof(l1std));
399         if (ret != MEMTX_OK) {
400             qemu_log_mask(LOG_GUEST_ERROR,
401                           "Could not read L1PTR at 0X%"PRIx64"\n", l1ptr);
402             event->type = SMMU_EVT_F_STE_FETCH;
403             event->u.f_ste_fetch.addr = l1ptr;
404             return -EINVAL;
405         }
406 
407         span = L1STD_SPAN(&l1std);
408 
409         if (!span) {
410             /* l2ptr is not valid */
411             qemu_log_mask(LOG_GUEST_ERROR,
412                           "invalid sid=%d (L1STD span=0)\n", sid);
413             event->type = SMMU_EVT_C_BAD_STREAMID;
414             return -EINVAL;
415         }
416         max_l2_ste = (1 << span) - 1;
417         l2ptr = l1std_l2ptr(&l1std);
418         trace_smmuv3_find_ste_2lvl(s->strtab_base, l1ptr, l1_ste_offset,
419                                    l2ptr, l2_ste_offset, max_l2_ste);
420         if (l2_ste_offset > max_l2_ste) {
421             qemu_log_mask(LOG_GUEST_ERROR,
422                           "l2_ste_offset=%d > max_l2_ste=%d\n",
423                           l2_ste_offset, max_l2_ste);
424             event->type = SMMU_EVT_C_BAD_STE;
425             return -EINVAL;
426         }
427         addr = l2ptr + l2_ste_offset * sizeof(*ste);
428     } else {
429         addr = s->strtab_base + sid * sizeof(*ste);
430     }
431 
432     if (smmu_get_ste(s, addr, ste, event)) {
433         return -EINVAL;
434     }
435 
436     return 0;
437 }
438 
439 static int decode_cd(SMMUTransCfg *cfg, CD *cd, SMMUEventInfo *event)
440 {
441     int ret = -EINVAL;
442     int i;
443 
444     if (!CD_VALID(cd) || !CD_AARCH64(cd)) {
445         goto bad_cd;
446     }
447     if (!CD_A(cd)) {
448         goto bad_cd; /* SMMU_IDR0.TERM_MODEL == 1 */
449     }
450     if (CD_S(cd)) {
451         goto bad_cd; /* !STE_SECURE && SMMU_IDR0.STALL_MODEL == 1 */
452     }
453     if (CD_HA(cd) || CD_HD(cd)) {
454         goto bad_cd; /* HTTU = 0 */
455     }
456 
457     /* we support only those at the moment */
458     cfg->aa64 = true;
459     cfg->stage = 1;
460 
461     cfg->oas = oas2bits(CD_IPS(cd));
462     cfg->oas = MIN(oas2bits(SMMU_IDR5_OAS), cfg->oas);
463     cfg->tbi = CD_TBI(cd);
464     cfg->asid = CD_ASID(cd);
465 
466     trace_smmuv3_decode_cd(cfg->oas);
467 
468     /* decode data dependent on TT */
469     for (i = 0; i <= 1; i++) {
470         int tg, tsz;
471         SMMUTransTableInfo *tt = &cfg->tt[i];
472 
473         cfg->tt[i].disabled = CD_EPD(cd, i);
474         if (cfg->tt[i].disabled) {
475             continue;
476         }
477 
478         tsz = CD_TSZ(cd, i);
479         if (tsz < 16 || tsz > 39) {
480             goto bad_cd;
481         }
482 
483         tg = CD_TG(cd, i);
484         tt->granule_sz = tg2granule(tg, i);
485         if ((tt->granule_sz != 12 && tt->granule_sz != 16) || CD_ENDI(cd)) {
486             goto bad_cd;
487         }
488 
489         tt->tsz = tsz;
490         tt->ttb = CD_TTB(cd, i);
491         if (tt->ttb & ~(MAKE_64BIT_MASK(0, cfg->oas))) {
492             goto bad_cd;
493         }
494         trace_smmuv3_decode_cd_tt(i, tt->tsz, tt->ttb, tt->granule_sz);
495     }
496 
497     event->record_trans_faults = CD_R(cd);
498 
499     return 0;
500 
501 bad_cd:
502     event->type = SMMU_EVT_C_BAD_CD;
503     return ret;
504 }
505 
506 /**
507  * smmuv3_decode_config - Prepare the translation configuration
508  * for the @mr iommu region
509  * @mr: iommu memory region the translation config must be prepared for
510  * @cfg: output translation configuration which is populated through
511  *       the different configuration decoding steps
512  * @event: must be zero'ed by the caller
513  *
514  * return < 0 in case of config decoding error (@event is filled
515  * accordingly). Return 0 otherwise.
516  */
517 static int smmuv3_decode_config(IOMMUMemoryRegion *mr, SMMUTransCfg *cfg,
518                                 SMMUEventInfo *event)
519 {
520     SMMUDevice *sdev = container_of(mr, SMMUDevice, iommu);
521     uint32_t sid = smmu_get_sid(sdev);
522     SMMUv3State *s = sdev->smmu;
523     int ret;
524     STE ste;
525     CD cd;
526 
527     ret = smmu_find_ste(s, sid, &ste, event);
528     if (ret) {
529         return ret;
530     }
531 
532     ret = decode_ste(s, cfg, &ste, event);
533     if (ret) {
534         return ret;
535     }
536 
537     if (cfg->aborted || cfg->bypassed) {
538         return 0;
539     }
540 
541     ret = smmu_get_cd(s, &ste, 0 /* ssid */, &cd, event);
542     if (ret) {
543         return ret;
544     }
545 
546     return decode_cd(cfg, &cd, event);
547 }
548 
549 /**
550  * smmuv3_get_config - Look up for a cached copy of configuration data for
551  * @sdev and on cache miss performs a configuration structure decoding from
552  * guest RAM.
553  *
554  * @sdev: SMMUDevice handle
555  * @event: output event info
556  *
557  * The configuration cache contains data resulting from both STE and CD
558  * decoding under the form of an SMMUTransCfg struct. The hash table is indexed
559  * by the SMMUDevice handle.
560  */
561 static SMMUTransCfg *smmuv3_get_config(SMMUDevice *sdev, SMMUEventInfo *event)
562 {
563     SMMUv3State *s = sdev->smmu;
564     SMMUState *bc = &s->smmu_state;
565     SMMUTransCfg *cfg;
566 
567     cfg = g_hash_table_lookup(bc->configs, sdev);
568     if (cfg) {
569         sdev->cfg_cache_hits++;
570         trace_smmuv3_config_cache_hit(smmu_get_sid(sdev),
571                             sdev->cfg_cache_hits, sdev->cfg_cache_misses,
572                             100 * sdev->cfg_cache_hits /
573                             (sdev->cfg_cache_hits + sdev->cfg_cache_misses));
574     } else {
575         sdev->cfg_cache_misses++;
576         trace_smmuv3_config_cache_miss(smmu_get_sid(sdev),
577                             sdev->cfg_cache_hits, sdev->cfg_cache_misses,
578                             100 * sdev->cfg_cache_hits /
579                             (sdev->cfg_cache_hits + sdev->cfg_cache_misses));
580         cfg = g_new0(SMMUTransCfg, 1);
581 
582         if (!smmuv3_decode_config(&sdev->iommu, cfg, event)) {
583             g_hash_table_insert(bc->configs, sdev, cfg);
584         } else {
585             g_free(cfg);
586             cfg = NULL;
587         }
588     }
589     return cfg;
590 }
591 
592 static void smmuv3_flush_config(SMMUDevice *sdev)
593 {
594     SMMUv3State *s = sdev->smmu;
595     SMMUState *bc = &s->smmu_state;
596 
597     trace_smmuv3_config_cache_inv(smmu_get_sid(sdev));
598     g_hash_table_remove(bc->configs, sdev);
599 }
600 
601 static IOMMUTLBEntry smmuv3_translate(IOMMUMemoryRegion *mr, hwaddr addr,
602                                       IOMMUAccessFlags flag, int iommu_idx)
603 {
604     SMMUDevice *sdev = container_of(mr, SMMUDevice, iommu);
605     SMMUv3State *s = sdev->smmu;
606     uint32_t sid = smmu_get_sid(sdev);
607     SMMUEventInfo event = {.type = SMMU_EVT_NONE, .sid = sid};
608     SMMUPTWEventInfo ptw_info = {};
609     SMMUTranslationStatus status;
610     SMMUState *bs = ARM_SMMU(s);
611     uint64_t page_mask, aligned_addr;
612     IOMMUTLBEntry *cached_entry = NULL;
613     SMMUTransTableInfo *tt;
614     SMMUTransCfg *cfg = NULL;
615     IOMMUTLBEntry entry = {
616         .target_as = &address_space_memory,
617         .iova = addr,
618         .translated_addr = addr,
619         .addr_mask = ~(hwaddr)0,
620         .perm = IOMMU_NONE,
621     };
622     SMMUIOTLBKey key, *new_key;
623 
624     qemu_mutex_lock(&s->mutex);
625 
626     if (!smmu_enabled(s)) {
627         status = SMMU_TRANS_DISABLE;
628         goto epilogue;
629     }
630 
631     cfg = smmuv3_get_config(sdev, &event);
632     if (!cfg) {
633         status = SMMU_TRANS_ERROR;
634         goto epilogue;
635     }
636 
637     if (cfg->aborted) {
638         status = SMMU_TRANS_ABORT;
639         goto epilogue;
640     }
641 
642     if (cfg->bypassed) {
643         status = SMMU_TRANS_BYPASS;
644         goto epilogue;
645     }
646 
647     tt = select_tt(cfg, addr);
648     if (!tt) {
649         if (event.record_trans_faults) {
650             event.type = SMMU_EVT_F_TRANSLATION;
651             event.u.f_translation.addr = addr;
652             event.u.f_translation.rnw = flag & 0x1;
653         }
654         status = SMMU_TRANS_ERROR;
655         goto epilogue;
656     }
657 
658     page_mask = (1ULL << (tt->granule_sz)) - 1;
659     aligned_addr = addr & ~page_mask;
660 
661     key.asid = cfg->asid;
662     key.iova = aligned_addr;
663 
664     cached_entry = g_hash_table_lookup(bs->iotlb, &key);
665     if (cached_entry) {
666         cfg->iotlb_hits++;
667         trace_smmu_iotlb_cache_hit(cfg->asid, aligned_addr,
668                                    cfg->iotlb_hits, cfg->iotlb_misses,
669                                    100 * cfg->iotlb_hits /
670                                    (cfg->iotlb_hits + cfg->iotlb_misses));
671         if ((flag & IOMMU_WO) && !(cached_entry->perm & IOMMU_WO)) {
672             status = SMMU_TRANS_ERROR;
673             if (event.record_trans_faults) {
674                 event.type = SMMU_EVT_F_PERMISSION;
675                 event.u.f_permission.addr = addr;
676                 event.u.f_permission.rnw = flag & 0x1;
677             }
678         } else {
679             status = SMMU_TRANS_SUCCESS;
680         }
681         goto epilogue;
682     }
683 
684     cfg->iotlb_misses++;
685     trace_smmu_iotlb_cache_miss(cfg->asid, addr & ~page_mask,
686                                 cfg->iotlb_hits, cfg->iotlb_misses,
687                                 100 * cfg->iotlb_hits /
688                                 (cfg->iotlb_hits + cfg->iotlb_misses));
689 
690     if (g_hash_table_size(bs->iotlb) >= SMMU_IOTLB_MAX_SIZE) {
691         smmu_iotlb_inv_all(bs);
692     }
693 
694     cached_entry = g_new0(IOMMUTLBEntry, 1);
695 
696     if (smmu_ptw(cfg, aligned_addr, flag, cached_entry, &ptw_info)) {
697         g_free(cached_entry);
698         switch (ptw_info.type) {
699         case SMMU_PTW_ERR_WALK_EABT:
700             event.type = SMMU_EVT_F_WALK_EABT;
701             event.u.f_walk_eabt.addr = addr;
702             event.u.f_walk_eabt.rnw = flag & 0x1;
703             event.u.f_walk_eabt.class = 0x1;
704             event.u.f_walk_eabt.addr2 = ptw_info.addr;
705             break;
706         case SMMU_PTW_ERR_TRANSLATION:
707             if (event.record_trans_faults) {
708                 event.type = SMMU_EVT_F_TRANSLATION;
709                 event.u.f_translation.addr = addr;
710                 event.u.f_translation.rnw = flag & 0x1;
711             }
712             break;
713         case SMMU_PTW_ERR_ADDR_SIZE:
714             if (event.record_trans_faults) {
715                 event.type = SMMU_EVT_F_ADDR_SIZE;
716                 event.u.f_addr_size.addr = addr;
717                 event.u.f_addr_size.rnw = flag & 0x1;
718             }
719             break;
720         case SMMU_PTW_ERR_ACCESS:
721             if (event.record_trans_faults) {
722                 event.type = SMMU_EVT_F_ACCESS;
723                 event.u.f_access.addr = addr;
724                 event.u.f_access.rnw = flag & 0x1;
725             }
726             break;
727         case SMMU_PTW_ERR_PERMISSION:
728             if (event.record_trans_faults) {
729                 event.type = SMMU_EVT_F_PERMISSION;
730                 event.u.f_permission.addr = addr;
731                 event.u.f_permission.rnw = flag & 0x1;
732             }
733             break;
734         default:
735             g_assert_not_reached();
736         }
737         status = SMMU_TRANS_ERROR;
738     } else {
739         new_key = g_new0(SMMUIOTLBKey, 1);
740         new_key->asid = cfg->asid;
741         new_key->iova = aligned_addr;
742         g_hash_table_insert(bs->iotlb, new_key, cached_entry);
743         status = SMMU_TRANS_SUCCESS;
744     }
745 
746 epilogue:
747     qemu_mutex_unlock(&s->mutex);
748     switch (status) {
749     case SMMU_TRANS_SUCCESS:
750         entry.perm = flag;
751         entry.translated_addr = cached_entry->translated_addr +
752                                     (addr & page_mask);
753         entry.addr_mask = cached_entry->addr_mask;
754         trace_smmuv3_translate_success(mr->parent_obj.name, sid, addr,
755                                        entry.translated_addr, entry.perm);
756         break;
757     case SMMU_TRANS_DISABLE:
758         entry.perm = flag;
759         entry.addr_mask = ~TARGET_PAGE_MASK;
760         trace_smmuv3_translate_disable(mr->parent_obj.name, sid, addr,
761                                       entry.perm);
762         break;
763     case SMMU_TRANS_BYPASS:
764         entry.perm = flag;
765         entry.addr_mask = ~TARGET_PAGE_MASK;
766         trace_smmuv3_translate_bypass(mr->parent_obj.name, sid, addr,
767                                       entry.perm);
768         break;
769     case SMMU_TRANS_ABORT:
770         /* no event is recorded on abort */
771         trace_smmuv3_translate_abort(mr->parent_obj.name, sid, addr,
772                                      entry.perm);
773         break;
774     case SMMU_TRANS_ERROR:
775         qemu_log_mask(LOG_GUEST_ERROR,
776                       "%s translation failed for iova=0x%"PRIx64"(%s)\n",
777                       mr->parent_obj.name, addr, smmu_event_string(event.type));
778         smmuv3_record_event(s, &event);
779         break;
780     }
781 
782     return entry;
783 }
784 
785 /**
786  * smmuv3_notify_iova - call the notifier @n for a given
787  * @asid and @iova tuple.
788  *
789  * @mr: IOMMU mr region handle
790  * @n: notifier to be called
791  * @asid: address space ID or negative value if we don't care
792  * @iova: iova
793  */
794 static void smmuv3_notify_iova(IOMMUMemoryRegion *mr,
795                                IOMMUNotifier *n,
796                                int asid,
797                                dma_addr_t iova)
798 {
799     SMMUDevice *sdev = container_of(mr, SMMUDevice, iommu);
800     SMMUEventInfo event = {};
801     SMMUTransTableInfo *tt;
802     SMMUTransCfg *cfg;
803     IOMMUTLBEntry entry;
804 
805     cfg = smmuv3_get_config(sdev, &event);
806     if (!cfg) {
807         qemu_log_mask(LOG_GUEST_ERROR,
808                       "%s error decoding the configuration for iommu mr=%s\n",
809                       __func__, mr->parent_obj.name);
810         return;
811     }
812 
813     if (asid >= 0 && cfg->asid != asid) {
814         return;
815     }
816 
817     tt = select_tt(cfg, iova);
818     if (!tt) {
819         return;
820     }
821 
822     entry.target_as = &address_space_memory;
823     entry.iova = iova;
824     entry.addr_mask = (1 << tt->granule_sz) - 1;
825     entry.perm = IOMMU_NONE;
826 
827     memory_region_notify_one(n, &entry);
828 }
829 
830 /* invalidate an asid/iova tuple in all mr's */
831 static void smmuv3_inv_notifiers_iova(SMMUState *s, int asid, dma_addr_t iova)
832 {
833     SMMUDevice *sdev;
834 
835     QLIST_FOREACH(sdev, &s->devices_with_notifiers, next) {
836         IOMMUMemoryRegion *mr = &sdev->iommu;
837         IOMMUNotifier *n;
838 
839         trace_smmuv3_inv_notifiers_iova(mr->parent_obj.name, asid, iova);
840 
841         IOMMU_NOTIFIER_FOREACH(n, mr) {
842             smmuv3_notify_iova(mr, n, asid, iova);
843         }
844     }
845 }
846 
847 static int smmuv3_cmdq_consume(SMMUv3State *s)
848 {
849     SMMUState *bs = ARM_SMMU(s);
850     SMMUCmdError cmd_error = SMMU_CERROR_NONE;
851     SMMUQueue *q = &s->cmdq;
852     SMMUCommandType type = 0;
853 
854     if (!smmuv3_cmdq_enabled(s)) {
855         return 0;
856     }
857     /*
858      * some commands depend on register values, typically CR0. In case those
859      * register values change while handling the command, spec says it
860      * is UNPREDICTABLE whether the command is interpreted under the new
861      * or old value.
862      */
863 
864     while (!smmuv3_q_empty(q)) {
865         uint32_t pending = s->gerror ^ s->gerrorn;
866         Cmd cmd;
867 
868         trace_smmuv3_cmdq_consume(Q_PROD(q), Q_CONS(q),
869                                   Q_PROD_WRAP(q), Q_CONS_WRAP(q));
870 
871         if (FIELD_EX32(pending, GERROR, CMDQ_ERR)) {
872             break;
873         }
874 
875         if (queue_read(q, &cmd) != MEMTX_OK) {
876             cmd_error = SMMU_CERROR_ABT;
877             break;
878         }
879 
880         type = CMD_TYPE(&cmd);
881 
882         trace_smmuv3_cmdq_opcode(smmu_cmd_string(type));
883 
884         qemu_mutex_lock(&s->mutex);
885         switch (type) {
886         case SMMU_CMD_SYNC:
887             if (CMD_SYNC_CS(&cmd) & CMD_SYNC_SIG_IRQ) {
888                 smmuv3_trigger_irq(s, SMMU_IRQ_CMD_SYNC, 0);
889             }
890             break;
891         case SMMU_CMD_PREFETCH_CONFIG:
892         case SMMU_CMD_PREFETCH_ADDR:
893             break;
894         case SMMU_CMD_CFGI_STE:
895         {
896             uint32_t sid = CMD_SID(&cmd);
897             IOMMUMemoryRegion *mr = smmu_iommu_mr(bs, sid);
898             SMMUDevice *sdev;
899 
900             if (CMD_SSEC(&cmd)) {
901                 cmd_error = SMMU_CERROR_ILL;
902                 break;
903             }
904 
905             if (!mr) {
906                 break;
907             }
908 
909             trace_smmuv3_cmdq_cfgi_ste(sid);
910             sdev = container_of(mr, SMMUDevice, iommu);
911             smmuv3_flush_config(sdev);
912 
913             break;
914         }
915         case SMMU_CMD_CFGI_STE_RANGE: /* same as SMMU_CMD_CFGI_ALL */
916         {
917             uint32_t start = CMD_SID(&cmd), end, i;
918             uint8_t range = CMD_STE_RANGE(&cmd);
919 
920             if (CMD_SSEC(&cmd)) {
921                 cmd_error = SMMU_CERROR_ILL;
922                 break;
923             }
924 
925             end = start + (1 << (range + 1)) - 1;
926             trace_smmuv3_cmdq_cfgi_ste_range(start, end);
927 
928             for (i = start; i <= end; i++) {
929                 IOMMUMemoryRegion *mr = smmu_iommu_mr(bs, i);
930                 SMMUDevice *sdev;
931 
932                 if (!mr) {
933                     continue;
934                 }
935                 sdev = container_of(mr, SMMUDevice, iommu);
936                 smmuv3_flush_config(sdev);
937             }
938             break;
939         }
940         case SMMU_CMD_CFGI_CD:
941         case SMMU_CMD_CFGI_CD_ALL:
942         {
943             uint32_t sid = CMD_SID(&cmd);
944             IOMMUMemoryRegion *mr = smmu_iommu_mr(bs, sid);
945             SMMUDevice *sdev;
946 
947             if (CMD_SSEC(&cmd)) {
948                 cmd_error = SMMU_CERROR_ILL;
949                 break;
950             }
951 
952             if (!mr) {
953                 break;
954             }
955 
956             trace_smmuv3_cmdq_cfgi_cd(sid);
957             sdev = container_of(mr, SMMUDevice, iommu);
958             smmuv3_flush_config(sdev);
959             break;
960         }
961         case SMMU_CMD_TLBI_NH_ASID:
962         {
963             uint16_t asid = CMD_ASID(&cmd);
964 
965             trace_smmuv3_cmdq_tlbi_nh_asid(asid);
966             smmu_inv_notifiers_all(&s->smmu_state);
967             smmu_iotlb_inv_asid(bs, asid);
968             break;
969         }
970         case SMMU_CMD_TLBI_NH_ALL:
971         case SMMU_CMD_TLBI_NSNH_ALL:
972             trace_smmuv3_cmdq_tlbi_nh();
973             smmu_inv_notifiers_all(&s->smmu_state);
974             smmu_iotlb_inv_all(bs);
975             break;
976         case SMMU_CMD_TLBI_NH_VAA:
977         {
978             dma_addr_t addr = CMD_ADDR(&cmd);
979             uint16_t vmid = CMD_VMID(&cmd);
980 
981             trace_smmuv3_cmdq_tlbi_nh_vaa(vmid, addr);
982             smmuv3_inv_notifiers_iova(bs, -1, addr);
983             smmu_iotlb_inv_all(bs);
984             break;
985         }
986         case SMMU_CMD_TLBI_NH_VA:
987         {
988             uint16_t asid = CMD_ASID(&cmd);
989             uint16_t vmid = CMD_VMID(&cmd);
990             dma_addr_t addr = CMD_ADDR(&cmd);
991             bool leaf = CMD_LEAF(&cmd);
992 
993             trace_smmuv3_cmdq_tlbi_nh_va(vmid, asid, addr, leaf);
994             smmuv3_inv_notifiers_iova(bs, asid, addr);
995             smmu_iotlb_inv_iova(bs, asid, addr);
996             break;
997         }
998         case SMMU_CMD_TLBI_EL3_ALL:
999         case SMMU_CMD_TLBI_EL3_VA:
1000         case SMMU_CMD_TLBI_EL2_ALL:
1001         case SMMU_CMD_TLBI_EL2_ASID:
1002         case SMMU_CMD_TLBI_EL2_VA:
1003         case SMMU_CMD_TLBI_EL2_VAA:
1004         case SMMU_CMD_TLBI_S12_VMALL:
1005         case SMMU_CMD_TLBI_S2_IPA:
1006         case SMMU_CMD_ATC_INV:
1007         case SMMU_CMD_PRI_RESP:
1008         case SMMU_CMD_RESUME:
1009         case SMMU_CMD_STALL_TERM:
1010             trace_smmuv3_unhandled_cmd(type);
1011             break;
1012         default:
1013             cmd_error = SMMU_CERROR_ILL;
1014             qemu_log_mask(LOG_GUEST_ERROR,
1015                           "Illegal command type: %d\n", CMD_TYPE(&cmd));
1016             break;
1017         }
1018         qemu_mutex_unlock(&s->mutex);
1019         if (cmd_error) {
1020             break;
1021         }
1022         /*
1023          * We only increment the cons index after the completion of
1024          * the command. We do that because the SYNC returns immediately
1025          * and does not check the completion of previous commands
1026          */
1027         queue_cons_incr(q);
1028     }
1029 
1030     if (cmd_error) {
1031         trace_smmuv3_cmdq_consume_error(smmu_cmd_string(type), cmd_error);
1032         smmu_write_cmdq_err(s, cmd_error);
1033         smmuv3_trigger_irq(s, SMMU_IRQ_GERROR, R_GERROR_CMDQ_ERR_MASK);
1034     }
1035 
1036     trace_smmuv3_cmdq_consume_out(Q_PROD(q), Q_CONS(q),
1037                                   Q_PROD_WRAP(q), Q_CONS_WRAP(q));
1038 
1039     return 0;
1040 }
1041 
1042 static MemTxResult smmu_writell(SMMUv3State *s, hwaddr offset,
1043                                uint64_t data, MemTxAttrs attrs)
1044 {
1045     switch (offset) {
1046     case A_GERROR_IRQ_CFG0:
1047         s->gerror_irq_cfg0 = data;
1048         return MEMTX_OK;
1049     case A_STRTAB_BASE:
1050         s->strtab_base = data;
1051         return MEMTX_OK;
1052     case A_CMDQ_BASE:
1053         s->cmdq.base = data;
1054         s->cmdq.log2size = extract64(s->cmdq.base, 0, 5);
1055         if (s->cmdq.log2size > SMMU_CMDQS) {
1056             s->cmdq.log2size = SMMU_CMDQS;
1057         }
1058         return MEMTX_OK;
1059     case A_EVENTQ_BASE:
1060         s->eventq.base = data;
1061         s->eventq.log2size = extract64(s->eventq.base, 0, 5);
1062         if (s->eventq.log2size > SMMU_EVENTQS) {
1063             s->eventq.log2size = SMMU_EVENTQS;
1064         }
1065         return MEMTX_OK;
1066     case A_EVENTQ_IRQ_CFG0:
1067         s->eventq_irq_cfg0 = data;
1068         return MEMTX_OK;
1069     default:
1070         qemu_log_mask(LOG_UNIMP,
1071                       "%s Unexpected 64-bit access to 0x%"PRIx64" (WI)\n",
1072                       __func__, offset);
1073         return MEMTX_OK;
1074     }
1075 }
1076 
1077 static MemTxResult smmu_writel(SMMUv3State *s, hwaddr offset,
1078                                uint64_t data, MemTxAttrs attrs)
1079 {
1080     switch (offset) {
1081     case A_CR0:
1082         s->cr[0] = data;
1083         s->cr0ack = data & ~SMMU_CR0_RESERVED;
1084         /* in case the command queue has been enabled */
1085         smmuv3_cmdq_consume(s);
1086         return MEMTX_OK;
1087     case A_CR1:
1088         s->cr[1] = data;
1089         return MEMTX_OK;
1090     case A_CR2:
1091         s->cr[2] = data;
1092         return MEMTX_OK;
1093     case A_IRQ_CTRL:
1094         s->irq_ctrl = data;
1095         return MEMTX_OK;
1096     case A_GERRORN:
1097         smmuv3_write_gerrorn(s, data);
1098         /*
1099          * By acknowledging the CMDQ_ERR, SW may notify cmds can
1100          * be processed again
1101          */
1102         smmuv3_cmdq_consume(s);
1103         return MEMTX_OK;
1104     case A_GERROR_IRQ_CFG0: /* 64b */
1105         s->gerror_irq_cfg0 = deposit64(s->gerror_irq_cfg0, 0, 32, data);
1106         return MEMTX_OK;
1107     case A_GERROR_IRQ_CFG0 + 4:
1108         s->gerror_irq_cfg0 = deposit64(s->gerror_irq_cfg0, 32, 32, data);
1109         return MEMTX_OK;
1110     case A_GERROR_IRQ_CFG1:
1111         s->gerror_irq_cfg1 = data;
1112         return MEMTX_OK;
1113     case A_GERROR_IRQ_CFG2:
1114         s->gerror_irq_cfg2 = data;
1115         return MEMTX_OK;
1116     case A_STRTAB_BASE: /* 64b */
1117         s->strtab_base = deposit64(s->strtab_base, 0, 32, data);
1118         return MEMTX_OK;
1119     case A_STRTAB_BASE + 4:
1120         s->strtab_base = deposit64(s->strtab_base, 32, 32, data);
1121         return MEMTX_OK;
1122     case A_STRTAB_BASE_CFG:
1123         s->strtab_base_cfg = data;
1124         if (FIELD_EX32(data, STRTAB_BASE_CFG, FMT) == 1) {
1125             s->sid_split = FIELD_EX32(data, STRTAB_BASE_CFG, SPLIT);
1126             s->features |= SMMU_FEATURE_2LVL_STE;
1127         }
1128         return MEMTX_OK;
1129     case A_CMDQ_BASE: /* 64b */
1130         s->cmdq.base = deposit64(s->cmdq.base, 0, 32, data);
1131         s->cmdq.log2size = extract64(s->cmdq.base, 0, 5);
1132         if (s->cmdq.log2size > SMMU_CMDQS) {
1133             s->cmdq.log2size = SMMU_CMDQS;
1134         }
1135         return MEMTX_OK;
1136     case A_CMDQ_BASE + 4: /* 64b */
1137         s->cmdq.base = deposit64(s->cmdq.base, 32, 32, data);
1138         return MEMTX_OK;
1139     case A_CMDQ_PROD:
1140         s->cmdq.prod = data;
1141         smmuv3_cmdq_consume(s);
1142         return MEMTX_OK;
1143     case A_CMDQ_CONS:
1144         s->cmdq.cons = data;
1145         return MEMTX_OK;
1146     case A_EVENTQ_BASE: /* 64b */
1147         s->eventq.base = deposit64(s->eventq.base, 0, 32, data);
1148         s->eventq.log2size = extract64(s->eventq.base, 0, 5);
1149         if (s->eventq.log2size > SMMU_EVENTQS) {
1150             s->eventq.log2size = SMMU_EVENTQS;
1151         }
1152         return MEMTX_OK;
1153     case A_EVENTQ_BASE + 4:
1154         s->eventq.base = deposit64(s->eventq.base, 32, 32, data);
1155         return MEMTX_OK;
1156     case A_EVENTQ_PROD:
1157         s->eventq.prod = data;
1158         return MEMTX_OK;
1159     case A_EVENTQ_CONS:
1160         s->eventq.cons = data;
1161         return MEMTX_OK;
1162     case A_EVENTQ_IRQ_CFG0: /* 64b */
1163         s->eventq_irq_cfg0 = deposit64(s->eventq_irq_cfg0, 0, 32, data);
1164         return MEMTX_OK;
1165     case A_EVENTQ_IRQ_CFG0 + 4:
1166         s->eventq_irq_cfg0 = deposit64(s->eventq_irq_cfg0, 32, 32, data);
1167         return MEMTX_OK;
1168     case A_EVENTQ_IRQ_CFG1:
1169         s->eventq_irq_cfg1 = data;
1170         return MEMTX_OK;
1171     case A_EVENTQ_IRQ_CFG2:
1172         s->eventq_irq_cfg2 = data;
1173         return MEMTX_OK;
1174     default:
1175         qemu_log_mask(LOG_UNIMP,
1176                       "%s Unexpected 32-bit access to 0x%"PRIx64" (WI)\n",
1177                       __func__, offset);
1178         return MEMTX_OK;
1179     }
1180 }
1181 
1182 static MemTxResult smmu_write_mmio(void *opaque, hwaddr offset, uint64_t data,
1183                                    unsigned size, MemTxAttrs attrs)
1184 {
1185     SMMUState *sys = opaque;
1186     SMMUv3State *s = ARM_SMMUV3(sys);
1187     MemTxResult r;
1188 
1189     /* CONSTRAINED UNPREDICTABLE choice to have page0/1 be exact aliases */
1190     offset &= ~0x10000;
1191 
1192     switch (size) {
1193     case 8:
1194         r = smmu_writell(s, offset, data, attrs);
1195         break;
1196     case 4:
1197         r = smmu_writel(s, offset, data, attrs);
1198         break;
1199     default:
1200         r = MEMTX_ERROR;
1201         break;
1202     }
1203 
1204     trace_smmuv3_write_mmio(offset, data, size, r);
1205     return r;
1206 }
1207 
1208 static MemTxResult smmu_readll(SMMUv3State *s, hwaddr offset,
1209                                uint64_t *data, MemTxAttrs attrs)
1210 {
1211     switch (offset) {
1212     case A_GERROR_IRQ_CFG0:
1213         *data = s->gerror_irq_cfg0;
1214         return MEMTX_OK;
1215     case A_STRTAB_BASE:
1216         *data = s->strtab_base;
1217         return MEMTX_OK;
1218     case A_CMDQ_BASE:
1219         *data = s->cmdq.base;
1220         return MEMTX_OK;
1221     case A_EVENTQ_BASE:
1222         *data = s->eventq.base;
1223         return MEMTX_OK;
1224     default:
1225         *data = 0;
1226         qemu_log_mask(LOG_UNIMP,
1227                       "%s Unexpected 64-bit access to 0x%"PRIx64" (RAZ)\n",
1228                       __func__, offset);
1229         return MEMTX_OK;
1230     }
1231 }
1232 
1233 static MemTxResult smmu_readl(SMMUv3State *s, hwaddr offset,
1234                               uint64_t *data, MemTxAttrs attrs)
1235 {
1236     switch (offset) {
1237     case A_IDREGS ... A_IDREGS + 0x2f:
1238         *data = smmuv3_idreg(offset - A_IDREGS);
1239         return MEMTX_OK;
1240     case A_IDR0 ... A_IDR5:
1241         *data = s->idr[(offset - A_IDR0) / 4];
1242         return MEMTX_OK;
1243     case A_IIDR:
1244         *data = s->iidr;
1245         return MEMTX_OK;
1246     case A_CR0:
1247         *data = s->cr[0];
1248         return MEMTX_OK;
1249     case A_CR0ACK:
1250         *data = s->cr0ack;
1251         return MEMTX_OK;
1252     case A_CR1:
1253         *data = s->cr[1];
1254         return MEMTX_OK;
1255     case A_CR2:
1256         *data = s->cr[2];
1257         return MEMTX_OK;
1258     case A_STATUSR:
1259         *data = s->statusr;
1260         return MEMTX_OK;
1261     case A_IRQ_CTRL:
1262     case A_IRQ_CTRL_ACK:
1263         *data = s->irq_ctrl;
1264         return MEMTX_OK;
1265     case A_GERROR:
1266         *data = s->gerror;
1267         return MEMTX_OK;
1268     case A_GERRORN:
1269         *data = s->gerrorn;
1270         return MEMTX_OK;
1271     case A_GERROR_IRQ_CFG0: /* 64b */
1272         *data = extract64(s->gerror_irq_cfg0, 0, 32);
1273         return MEMTX_OK;
1274     case A_GERROR_IRQ_CFG0 + 4:
1275         *data = extract64(s->gerror_irq_cfg0, 32, 32);
1276         return MEMTX_OK;
1277     case A_GERROR_IRQ_CFG1:
1278         *data = s->gerror_irq_cfg1;
1279         return MEMTX_OK;
1280     case A_GERROR_IRQ_CFG2:
1281         *data = s->gerror_irq_cfg2;
1282         return MEMTX_OK;
1283     case A_STRTAB_BASE: /* 64b */
1284         *data = extract64(s->strtab_base, 0, 32);
1285         return MEMTX_OK;
1286     case A_STRTAB_BASE + 4: /* 64b */
1287         *data = extract64(s->strtab_base, 32, 32);
1288         return MEMTX_OK;
1289     case A_STRTAB_BASE_CFG:
1290         *data = s->strtab_base_cfg;
1291         return MEMTX_OK;
1292     case A_CMDQ_BASE: /* 64b */
1293         *data = extract64(s->cmdq.base, 0, 32);
1294         return MEMTX_OK;
1295     case A_CMDQ_BASE + 4:
1296         *data = extract64(s->cmdq.base, 32, 32);
1297         return MEMTX_OK;
1298     case A_CMDQ_PROD:
1299         *data = s->cmdq.prod;
1300         return MEMTX_OK;
1301     case A_CMDQ_CONS:
1302         *data = s->cmdq.cons;
1303         return MEMTX_OK;
1304     case A_EVENTQ_BASE: /* 64b */
1305         *data = extract64(s->eventq.base, 0, 32);
1306         return MEMTX_OK;
1307     case A_EVENTQ_BASE + 4: /* 64b */
1308         *data = extract64(s->eventq.base, 32, 32);
1309         return MEMTX_OK;
1310     case A_EVENTQ_PROD:
1311         *data = s->eventq.prod;
1312         return MEMTX_OK;
1313     case A_EVENTQ_CONS:
1314         *data = s->eventq.cons;
1315         return MEMTX_OK;
1316     default:
1317         *data = 0;
1318         qemu_log_mask(LOG_UNIMP,
1319                       "%s unhandled 32-bit access at 0x%"PRIx64" (RAZ)\n",
1320                       __func__, offset);
1321         return MEMTX_OK;
1322     }
1323 }
1324 
1325 static MemTxResult smmu_read_mmio(void *opaque, hwaddr offset, uint64_t *data,
1326                                   unsigned size, MemTxAttrs attrs)
1327 {
1328     SMMUState *sys = opaque;
1329     SMMUv3State *s = ARM_SMMUV3(sys);
1330     MemTxResult r;
1331 
1332     /* CONSTRAINED UNPREDICTABLE choice to have page0/1 be exact aliases */
1333     offset &= ~0x10000;
1334 
1335     switch (size) {
1336     case 8:
1337         r = smmu_readll(s, offset, data, attrs);
1338         break;
1339     case 4:
1340         r = smmu_readl(s, offset, data, attrs);
1341         break;
1342     default:
1343         r = MEMTX_ERROR;
1344         break;
1345     }
1346 
1347     trace_smmuv3_read_mmio(offset, *data, size, r);
1348     return r;
1349 }
1350 
1351 static const MemoryRegionOps smmu_mem_ops = {
1352     .read_with_attrs = smmu_read_mmio,
1353     .write_with_attrs = smmu_write_mmio,
1354     .endianness = DEVICE_LITTLE_ENDIAN,
1355     .valid = {
1356         .min_access_size = 4,
1357         .max_access_size = 8,
1358     },
1359     .impl = {
1360         .min_access_size = 4,
1361         .max_access_size = 8,
1362     },
1363 };
1364 
1365 static void smmu_init_irq(SMMUv3State *s, SysBusDevice *dev)
1366 {
1367     int i;
1368 
1369     for (i = 0; i < ARRAY_SIZE(s->irq); i++) {
1370         sysbus_init_irq(dev, &s->irq[i]);
1371     }
1372 }
1373 
1374 static void smmu_reset(DeviceState *dev)
1375 {
1376     SMMUv3State *s = ARM_SMMUV3(dev);
1377     SMMUv3Class *c = ARM_SMMUV3_GET_CLASS(s);
1378 
1379     c->parent_reset(dev);
1380 
1381     smmuv3_init_regs(s);
1382 }
1383 
1384 static void smmu_realize(DeviceState *d, Error **errp)
1385 {
1386     SMMUState *sys = ARM_SMMU(d);
1387     SMMUv3State *s = ARM_SMMUV3(sys);
1388     SMMUv3Class *c = ARM_SMMUV3_GET_CLASS(s);
1389     SysBusDevice *dev = SYS_BUS_DEVICE(d);
1390     Error *local_err = NULL;
1391 
1392     c->parent_realize(d, &local_err);
1393     if (local_err) {
1394         error_propagate(errp, local_err);
1395         return;
1396     }
1397 
1398     qemu_mutex_init(&s->mutex);
1399 
1400     memory_region_init_io(&sys->iomem, OBJECT(s),
1401                           &smmu_mem_ops, sys, TYPE_ARM_SMMUV3, 0x20000);
1402 
1403     sys->mrtypename = TYPE_SMMUV3_IOMMU_MEMORY_REGION;
1404 
1405     sysbus_init_mmio(dev, &sys->iomem);
1406 
1407     smmu_init_irq(s, dev);
1408 }
1409 
1410 static const VMStateDescription vmstate_smmuv3_queue = {
1411     .name = "smmuv3_queue",
1412     .version_id = 1,
1413     .minimum_version_id = 1,
1414     .fields = (VMStateField[]) {
1415         VMSTATE_UINT64(base, SMMUQueue),
1416         VMSTATE_UINT32(prod, SMMUQueue),
1417         VMSTATE_UINT32(cons, SMMUQueue),
1418         VMSTATE_UINT8(log2size, SMMUQueue),
1419         VMSTATE_END_OF_LIST(),
1420     },
1421 };
1422 
1423 static const VMStateDescription vmstate_smmuv3 = {
1424     .name = "smmuv3",
1425     .version_id = 1,
1426     .minimum_version_id = 1,
1427     .fields = (VMStateField[]) {
1428         VMSTATE_UINT32(features, SMMUv3State),
1429         VMSTATE_UINT8(sid_size, SMMUv3State),
1430         VMSTATE_UINT8(sid_split, SMMUv3State),
1431 
1432         VMSTATE_UINT32_ARRAY(cr, SMMUv3State, 3),
1433         VMSTATE_UINT32(cr0ack, SMMUv3State),
1434         VMSTATE_UINT32(statusr, SMMUv3State),
1435         VMSTATE_UINT32(irq_ctrl, SMMUv3State),
1436         VMSTATE_UINT32(gerror, SMMUv3State),
1437         VMSTATE_UINT32(gerrorn, SMMUv3State),
1438         VMSTATE_UINT64(gerror_irq_cfg0, SMMUv3State),
1439         VMSTATE_UINT32(gerror_irq_cfg1, SMMUv3State),
1440         VMSTATE_UINT32(gerror_irq_cfg2, SMMUv3State),
1441         VMSTATE_UINT64(strtab_base, SMMUv3State),
1442         VMSTATE_UINT32(strtab_base_cfg, SMMUv3State),
1443         VMSTATE_UINT64(eventq_irq_cfg0, SMMUv3State),
1444         VMSTATE_UINT32(eventq_irq_cfg1, SMMUv3State),
1445         VMSTATE_UINT32(eventq_irq_cfg2, SMMUv3State),
1446 
1447         VMSTATE_STRUCT(cmdq, SMMUv3State, 0, vmstate_smmuv3_queue, SMMUQueue),
1448         VMSTATE_STRUCT(eventq, SMMUv3State, 0, vmstate_smmuv3_queue, SMMUQueue),
1449 
1450         VMSTATE_END_OF_LIST(),
1451     },
1452 };
1453 
1454 static void smmuv3_instance_init(Object *obj)
1455 {
1456     /* Nothing much to do here as of now */
1457 }
1458 
1459 static void smmuv3_class_init(ObjectClass *klass, void *data)
1460 {
1461     DeviceClass *dc = DEVICE_CLASS(klass);
1462     SMMUv3Class *c = ARM_SMMUV3_CLASS(klass);
1463 
1464     dc->vmsd = &vmstate_smmuv3;
1465     device_class_set_parent_reset(dc, smmu_reset, &c->parent_reset);
1466     c->parent_realize = dc->realize;
1467     dc->realize = smmu_realize;
1468 }
1469 
1470 static void smmuv3_notify_flag_changed(IOMMUMemoryRegion *iommu,
1471                                        IOMMUNotifierFlag old,
1472                                        IOMMUNotifierFlag new)
1473 {
1474     SMMUDevice *sdev = container_of(iommu, SMMUDevice, iommu);
1475     SMMUv3State *s3 = sdev->smmu;
1476     SMMUState *s = &(s3->smmu_state);
1477 
1478     if (new & IOMMU_NOTIFIER_MAP) {
1479         int bus_num = pci_bus_num(sdev->bus);
1480         PCIDevice *pcidev = pci_find_device(sdev->bus, bus_num, sdev->devfn);
1481 
1482         warn_report("SMMUv3 does not support notification on MAP: "
1483                      "device %s will not function properly", pcidev->name);
1484     }
1485 
1486     if (old == IOMMU_NOTIFIER_NONE) {
1487         trace_smmuv3_notify_flag_add(iommu->parent_obj.name);
1488         QLIST_INSERT_HEAD(&s->devices_with_notifiers, sdev, next);
1489     } else if (new == IOMMU_NOTIFIER_NONE) {
1490         trace_smmuv3_notify_flag_del(iommu->parent_obj.name);
1491         QLIST_REMOVE(sdev, next);
1492     }
1493 }
1494 
1495 static void smmuv3_iommu_memory_region_class_init(ObjectClass *klass,
1496                                                   void *data)
1497 {
1498     IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass);
1499 
1500     imrc->translate = smmuv3_translate;
1501     imrc->notify_flag_changed = smmuv3_notify_flag_changed;
1502 }
1503 
1504 static const TypeInfo smmuv3_type_info = {
1505     .name          = TYPE_ARM_SMMUV3,
1506     .parent        = TYPE_ARM_SMMU,
1507     .instance_size = sizeof(SMMUv3State),
1508     .instance_init = smmuv3_instance_init,
1509     .class_size    = sizeof(SMMUv3Class),
1510     .class_init    = smmuv3_class_init,
1511 };
1512 
1513 static const TypeInfo smmuv3_iommu_memory_region_info = {
1514     .parent = TYPE_IOMMU_MEMORY_REGION,
1515     .name = TYPE_SMMUV3_IOMMU_MEMORY_REGION,
1516     .class_init = smmuv3_iommu_memory_region_class_init,
1517 };
1518 
1519 static void smmuv3_register_types(void)
1520 {
1521     type_register(&smmuv3_type_info);
1522     type_register(&smmuv3_iommu_memory_region_info);
1523 }
1524 
1525 type_init(smmuv3_register_types)
1526 
1527