xref: /openbmc/qemu/hw/arm/smmuv3.c (revision 76bccf3c)
1 /*
2  * Copyright (C) 2014-2016 Broadcom Corporation
3  * Copyright (c) 2017 Red Hat, Inc.
4  * Written by Prem Mallappa, Eric Auger
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License along
16  * with this program; if not, see <http://www.gnu.org/licenses/>.
17  */
18 
19 #include "qemu/osdep.h"
20 #include "qemu/bitops.h"
21 #include "hw/irq.h"
22 #include "hw/sysbus.h"
23 #include "migration/vmstate.h"
24 #include "hw/qdev-properties.h"
25 #include "hw/qdev-core.h"
26 #include "hw/pci/pci.h"
27 #include "cpu.h"
28 #include "trace.h"
29 #include "qemu/log.h"
30 #include "qemu/error-report.h"
31 #include "qapi/error.h"
32 
33 #include "hw/arm/smmuv3.h"
34 #include "smmuv3-internal.h"
35 #include "smmu-internal.h"
36 
37 #define PTW_RECORD_FAULT(cfg)   (((cfg)->stage == 1) ? (cfg)->record_faults : \
38                                  (cfg)->s2cfg.record_faults)
39 
40 /**
41  * smmuv3_trigger_irq - pulse @irq if enabled and update
42  * GERROR register in case of GERROR interrupt
43  *
44  * @irq: irq type
45  * @gerror_mask: mask of gerrors to toggle (relevant if @irq is GERROR)
46  */
47 static void smmuv3_trigger_irq(SMMUv3State *s, SMMUIrq irq,
48                                uint32_t gerror_mask)
49 {
50 
51     bool pulse = false;
52 
53     switch (irq) {
54     case SMMU_IRQ_EVTQ:
55         pulse = smmuv3_eventq_irq_enabled(s);
56         break;
57     case SMMU_IRQ_PRIQ:
58         qemu_log_mask(LOG_UNIMP, "PRI not yet supported\n");
59         break;
60     case SMMU_IRQ_CMD_SYNC:
61         pulse = true;
62         break;
63     case SMMU_IRQ_GERROR:
64     {
65         uint32_t pending = s->gerror ^ s->gerrorn;
66         uint32_t new_gerrors = ~pending & gerror_mask;
67 
68         if (!new_gerrors) {
69             /* only toggle non pending errors */
70             return;
71         }
72         s->gerror ^= new_gerrors;
73         trace_smmuv3_write_gerror(new_gerrors, s->gerror);
74 
75         pulse = smmuv3_gerror_irq_enabled(s);
76         break;
77     }
78     }
79     if (pulse) {
80             trace_smmuv3_trigger_irq(irq);
81             qemu_irq_pulse(s->irq[irq]);
82     }
83 }
84 
85 static void smmuv3_write_gerrorn(SMMUv3State *s, uint32_t new_gerrorn)
86 {
87     uint32_t pending = s->gerror ^ s->gerrorn;
88     uint32_t toggled = s->gerrorn ^ new_gerrorn;
89 
90     if (toggled & ~pending) {
91         qemu_log_mask(LOG_GUEST_ERROR,
92                       "guest toggles non pending errors = 0x%x\n",
93                       toggled & ~pending);
94     }
95 
96     /*
97      * We do not raise any error in case guest toggles bits corresponding
98      * to not active IRQs (CONSTRAINED UNPREDICTABLE)
99      */
100     s->gerrorn = new_gerrorn;
101 
102     trace_smmuv3_write_gerrorn(toggled & pending, s->gerrorn);
103 }
104 
105 static inline MemTxResult queue_read(SMMUQueue *q, Cmd *cmd)
106 {
107     dma_addr_t addr = Q_CONS_ENTRY(q);
108     MemTxResult ret;
109     int i;
110 
111     ret = dma_memory_read(&address_space_memory, addr, cmd, sizeof(Cmd),
112                           MEMTXATTRS_UNSPECIFIED);
113     if (ret != MEMTX_OK) {
114         return ret;
115     }
116     for (i = 0; i < ARRAY_SIZE(cmd->word); i++) {
117         le32_to_cpus(&cmd->word[i]);
118     }
119     return ret;
120 }
121 
122 static MemTxResult queue_write(SMMUQueue *q, Evt *evt_in)
123 {
124     dma_addr_t addr = Q_PROD_ENTRY(q);
125     MemTxResult ret;
126     Evt evt = *evt_in;
127     int i;
128 
129     for (i = 0; i < ARRAY_SIZE(evt.word); i++) {
130         cpu_to_le32s(&evt.word[i]);
131     }
132     ret = dma_memory_write(&address_space_memory, addr, &evt, sizeof(Evt),
133                            MEMTXATTRS_UNSPECIFIED);
134     if (ret != MEMTX_OK) {
135         return ret;
136     }
137 
138     queue_prod_incr(q);
139     return MEMTX_OK;
140 }
141 
142 static MemTxResult smmuv3_write_eventq(SMMUv3State *s, Evt *evt)
143 {
144     SMMUQueue *q = &s->eventq;
145     MemTxResult r;
146 
147     if (!smmuv3_eventq_enabled(s)) {
148         return MEMTX_ERROR;
149     }
150 
151     if (smmuv3_q_full(q)) {
152         return MEMTX_ERROR;
153     }
154 
155     r = queue_write(q, evt);
156     if (r != MEMTX_OK) {
157         return r;
158     }
159 
160     if (!smmuv3_q_empty(q)) {
161         smmuv3_trigger_irq(s, SMMU_IRQ_EVTQ, 0);
162     }
163     return MEMTX_OK;
164 }
165 
166 void smmuv3_record_event(SMMUv3State *s, SMMUEventInfo *info)
167 {
168     Evt evt = {};
169     MemTxResult r;
170 
171     if (!smmuv3_eventq_enabled(s)) {
172         return;
173     }
174 
175     EVT_SET_TYPE(&evt, info->type);
176     EVT_SET_SID(&evt, info->sid);
177 
178     switch (info->type) {
179     case SMMU_EVT_NONE:
180         return;
181     case SMMU_EVT_F_UUT:
182         EVT_SET_SSID(&evt, info->u.f_uut.ssid);
183         EVT_SET_SSV(&evt,  info->u.f_uut.ssv);
184         EVT_SET_ADDR(&evt, info->u.f_uut.addr);
185         EVT_SET_RNW(&evt,  info->u.f_uut.rnw);
186         EVT_SET_PNU(&evt,  info->u.f_uut.pnu);
187         EVT_SET_IND(&evt,  info->u.f_uut.ind);
188         break;
189     case SMMU_EVT_C_BAD_STREAMID:
190         EVT_SET_SSID(&evt, info->u.c_bad_streamid.ssid);
191         EVT_SET_SSV(&evt,  info->u.c_bad_streamid.ssv);
192         break;
193     case SMMU_EVT_F_STE_FETCH:
194         EVT_SET_SSID(&evt, info->u.f_ste_fetch.ssid);
195         EVT_SET_SSV(&evt,  info->u.f_ste_fetch.ssv);
196         EVT_SET_ADDR2(&evt, info->u.f_ste_fetch.addr);
197         break;
198     case SMMU_EVT_C_BAD_STE:
199         EVT_SET_SSID(&evt, info->u.c_bad_ste.ssid);
200         EVT_SET_SSV(&evt,  info->u.c_bad_ste.ssv);
201         break;
202     case SMMU_EVT_F_STREAM_DISABLED:
203         break;
204     case SMMU_EVT_F_TRANS_FORBIDDEN:
205         EVT_SET_ADDR(&evt, info->u.f_transl_forbidden.addr);
206         EVT_SET_RNW(&evt, info->u.f_transl_forbidden.rnw);
207         break;
208     case SMMU_EVT_C_BAD_SUBSTREAMID:
209         EVT_SET_SSID(&evt, info->u.c_bad_substream.ssid);
210         break;
211     case SMMU_EVT_F_CD_FETCH:
212         EVT_SET_SSID(&evt, info->u.f_cd_fetch.ssid);
213         EVT_SET_SSV(&evt,  info->u.f_cd_fetch.ssv);
214         EVT_SET_ADDR(&evt, info->u.f_cd_fetch.addr);
215         break;
216     case SMMU_EVT_C_BAD_CD:
217         EVT_SET_SSID(&evt, info->u.c_bad_cd.ssid);
218         EVT_SET_SSV(&evt,  info->u.c_bad_cd.ssv);
219         break;
220     case SMMU_EVT_F_WALK_EABT:
221     case SMMU_EVT_F_TRANSLATION:
222     case SMMU_EVT_F_ADDR_SIZE:
223     case SMMU_EVT_F_ACCESS:
224     case SMMU_EVT_F_PERMISSION:
225         EVT_SET_STALL(&evt, info->u.f_walk_eabt.stall);
226         EVT_SET_STAG(&evt, info->u.f_walk_eabt.stag);
227         EVT_SET_SSID(&evt, info->u.f_walk_eabt.ssid);
228         EVT_SET_SSV(&evt, info->u.f_walk_eabt.ssv);
229         EVT_SET_S2(&evt, info->u.f_walk_eabt.s2);
230         EVT_SET_ADDR(&evt, info->u.f_walk_eabt.addr);
231         EVT_SET_RNW(&evt, info->u.f_walk_eabt.rnw);
232         EVT_SET_PNU(&evt, info->u.f_walk_eabt.pnu);
233         EVT_SET_IND(&evt, info->u.f_walk_eabt.ind);
234         EVT_SET_CLASS(&evt, info->u.f_walk_eabt.class);
235         EVT_SET_ADDR2(&evt, info->u.f_walk_eabt.addr2);
236         break;
237     case SMMU_EVT_F_CFG_CONFLICT:
238         EVT_SET_SSID(&evt, info->u.f_cfg_conflict.ssid);
239         EVT_SET_SSV(&evt,  info->u.f_cfg_conflict.ssv);
240         break;
241     /* rest is not implemented */
242     case SMMU_EVT_F_BAD_ATS_TREQ:
243     case SMMU_EVT_F_TLB_CONFLICT:
244     case SMMU_EVT_E_PAGE_REQ:
245     default:
246         g_assert_not_reached();
247     }
248 
249     trace_smmuv3_record_event(smmu_event_string(info->type), info->sid);
250     r = smmuv3_write_eventq(s, &evt);
251     if (r != MEMTX_OK) {
252         smmuv3_trigger_irq(s, SMMU_IRQ_GERROR, R_GERROR_EVENTQ_ABT_ERR_MASK);
253     }
254     info->recorded = true;
255 }
256 
257 static void smmuv3_init_regs(SMMUv3State *s)
258 {
259     /* Based on sys property, the stages supported in smmu will be advertised.*/
260     if (s->stage && !strcmp("2", s->stage)) {
261         s->idr[0] = FIELD_DP32(s->idr[0], IDR0, S2P, 1);
262     } else {
263         s->idr[0] = FIELD_DP32(s->idr[0], IDR0, S1P, 1);
264     }
265 
266     s->idr[0] = FIELD_DP32(s->idr[0], IDR0, TTF, 2); /* AArch64 PTW only */
267     s->idr[0] = FIELD_DP32(s->idr[0], IDR0, COHACC, 1); /* IO coherent */
268     s->idr[0] = FIELD_DP32(s->idr[0], IDR0, ASID16, 1); /* 16-bit ASID */
269     s->idr[0] = FIELD_DP32(s->idr[0], IDR0, VMID16, 1); /* 16-bit VMID */
270     s->idr[0] = FIELD_DP32(s->idr[0], IDR0, TTENDIAN, 2); /* little endian */
271     s->idr[0] = FIELD_DP32(s->idr[0], IDR0, STALL_MODEL, 1); /* No stall */
272     /* terminated transaction will always be aborted/error returned */
273     s->idr[0] = FIELD_DP32(s->idr[0], IDR0, TERM_MODEL, 1);
274     /* 2-level stream table supported */
275     s->idr[0] = FIELD_DP32(s->idr[0], IDR0, STLEVEL, 1);
276 
277     s->idr[1] = FIELD_DP32(s->idr[1], IDR1, SIDSIZE, SMMU_IDR1_SIDSIZE);
278     s->idr[1] = FIELD_DP32(s->idr[1], IDR1, EVENTQS, SMMU_EVENTQS);
279     s->idr[1] = FIELD_DP32(s->idr[1], IDR1, CMDQS,   SMMU_CMDQS);
280 
281     s->idr[3] = FIELD_DP32(s->idr[3], IDR3, HAD, 1);
282     if (FIELD_EX32(s->idr[0], IDR0, S2P)) {
283         /* XNX is a stage-2-specific feature */
284         s->idr[3] = FIELD_DP32(s->idr[3], IDR3, XNX, 1);
285     }
286     s->idr[3] = FIELD_DP32(s->idr[3], IDR3, RIL, 1);
287     s->idr[3] = FIELD_DP32(s->idr[3], IDR3, BBML, 2);
288 
289     s->idr[5] = FIELD_DP32(s->idr[5], IDR5, OAS, SMMU_IDR5_OAS); /* 44 bits */
290     /* 4K, 16K and 64K granule support */
291     s->idr[5] = FIELD_DP32(s->idr[5], IDR5, GRAN4K, 1);
292     s->idr[5] = FIELD_DP32(s->idr[5], IDR5, GRAN16K, 1);
293     s->idr[5] = FIELD_DP32(s->idr[5], IDR5, GRAN64K, 1);
294 
295     s->cmdq.base = deposit64(s->cmdq.base, 0, 5, SMMU_CMDQS);
296     s->cmdq.prod = 0;
297     s->cmdq.cons = 0;
298     s->cmdq.entry_size = sizeof(struct Cmd);
299     s->eventq.base = deposit64(s->eventq.base, 0, 5, SMMU_EVENTQS);
300     s->eventq.prod = 0;
301     s->eventq.cons = 0;
302     s->eventq.entry_size = sizeof(struct Evt);
303 
304     s->features = 0;
305     s->sid_split = 0;
306     s->aidr = 0x1;
307     s->cr[0] = 0;
308     s->cr0ack = 0;
309     s->irq_ctrl = 0;
310     s->gerror = 0;
311     s->gerrorn = 0;
312     s->statusr = 0;
313     s->gbpa = SMMU_GBPA_RESET_VAL;
314 }
315 
316 static int smmu_get_ste(SMMUv3State *s, dma_addr_t addr, STE *buf,
317                         SMMUEventInfo *event)
318 {
319     int ret, i;
320 
321     trace_smmuv3_get_ste(addr);
322     /* TODO: guarantee 64-bit single-copy atomicity */
323     ret = dma_memory_read(&address_space_memory, addr, buf, sizeof(*buf),
324                           MEMTXATTRS_UNSPECIFIED);
325     if (ret != MEMTX_OK) {
326         qemu_log_mask(LOG_GUEST_ERROR,
327                       "Cannot fetch pte at address=0x%"PRIx64"\n", addr);
328         event->type = SMMU_EVT_F_STE_FETCH;
329         event->u.f_ste_fetch.addr = addr;
330         return -EINVAL;
331     }
332     for (i = 0; i < ARRAY_SIZE(buf->word); i++) {
333         le32_to_cpus(&buf->word[i]);
334     }
335     return 0;
336 
337 }
338 
339 /* @ssid > 0 not supported yet */
340 static int smmu_get_cd(SMMUv3State *s, STE *ste, uint32_t ssid,
341                        CD *buf, SMMUEventInfo *event)
342 {
343     dma_addr_t addr = STE_CTXPTR(ste);
344     int ret, i;
345 
346     trace_smmuv3_get_cd(addr);
347     /* TODO: guarantee 64-bit single-copy atomicity */
348     ret = dma_memory_read(&address_space_memory, addr, buf, sizeof(*buf),
349                           MEMTXATTRS_UNSPECIFIED);
350     if (ret != MEMTX_OK) {
351         qemu_log_mask(LOG_GUEST_ERROR,
352                       "Cannot fetch pte at address=0x%"PRIx64"\n", addr);
353         event->type = SMMU_EVT_F_CD_FETCH;
354         event->u.f_ste_fetch.addr = addr;
355         return -EINVAL;
356     }
357     for (i = 0; i < ARRAY_SIZE(buf->word); i++) {
358         le32_to_cpus(&buf->word[i]);
359     }
360     return 0;
361 }
362 
363 /*
364  * Max valid value is 39 when SMMU_IDR3.STT == 0.
365  * In architectures after SMMUv3.0:
366  * - If STE.S2TG selects a 4KB or 16KB granule, the minimum valid value for this
367  *   field is MAX(16, 64-IAS)
368  * - If STE.S2TG selects a 64KB granule, the minimum valid value for this field
369  *   is (64-IAS).
370  * As we only support AA64, IAS = OAS.
371  */
372 static bool s2t0sz_valid(SMMUTransCfg *cfg)
373 {
374     if (cfg->s2cfg.tsz > 39) {
375         return false;
376     }
377 
378     if (cfg->s2cfg.granule_sz == 16) {
379         return (cfg->s2cfg.tsz >= 64 - oas2bits(SMMU_IDR5_OAS));
380     }
381 
382     return (cfg->s2cfg.tsz >= MAX(64 - oas2bits(SMMU_IDR5_OAS), 16));
383 }
384 
385 /*
386  * Return true if s2 page table config is valid.
387  * This checks with the configured start level, ias_bits and granularity we can
388  * have a valid page table as described in ARM ARM D8.2 Translation process.
389  * The idea here is to see for the highest possible number of IPA bits, how
390  * many concatenated tables we would need, if it is more than 16, then this is
391  * not possible.
392  */
393 static bool s2_pgtable_config_valid(uint8_t sl0, uint8_t t0sz, uint8_t gran)
394 {
395     int level = get_start_level(sl0, gran);
396     uint64_t ipa_bits = 64 - t0sz;
397     uint64_t max_ipa = (1ULL << ipa_bits) - 1;
398     int nr_concat = pgd_concat_idx(level, gran, max_ipa) + 1;
399 
400     return nr_concat <= VMSA_MAX_S2_CONCAT;
401 }
402 
403 static int decode_ste_s2_cfg(SMMUTransCfg *cfg, STE *ste)
404 {
405     cfg->stage = 2;
406 
407     if (STE_S2AA64(ste) == 0x0) {
408         qemu_log_mask(LOG_UNIMP,
409                       "SMMUv3 AArch32 tables not supported\n");
410         g_assert_not_reached();
411     }
412 
413     switch (STE_S2TG(ste)) {
414     case 0x0: /* 4KB */
415         cfg->s2cfg.granule_sz = 12;
416         break;
417     case 0x1: /* 64KB */
418         cfg->s2cfg.granule_sz = 16;
419         break;
420     case 0x2: /* 16KB */
421         cfg->s2cfg.granule_sz = 14;
422         break;
423     default:
424         qemu_log_mask(LOG_GUEST_ERROR,
425                       "SMMUv3 bad STE S2TG: %x\n", STE_S2TG(ste));
426         goto bad_ste;
427     }
428 
429     cfg->s2cfg.vttb = STE_S2TTB(ste);
430 
431     cfg->s2cfg.sl0 = STE_S2SL0(ste);
432     /* FEAT_TTST not supported. */
433     if (cfg->s2cfg.sl0 == 0x3) {
434         qemu_log_mask(LOG_UNIMP, "SMMUv3 S2SL0 = 0x3 has no meaning!\n");
435         goto bad_ste;
436     }
437 
438     /* For AA64, The effective S2PS size is capped to the OAS. */
439     cfg->s2cfg.eff_ps = oas2bits(MIN(STE_S2PS(ste), SMMU_IDR5_OAS));
440     /*
441      * It is ILLEGAL for the address in S2TTB to be outside the range
442      * described by the effective S2PS value.
443      */
444     if (cfg->s2cfg.vttb & ~(MAKE_64BIT_MASK(0, cfg->s2cfg.eff_ps))) {
445         qemu_log_mask(LOG_GUEST_ERROR,
446                       "SMMUv3 S2TTB too large 0x%" PRIx64
447                       ", effective PS %d bits\n",
448                       cfg->s2cfg.vttb,  cfg->s2cfg.eff_ps);
449         goto bad_ste;
450     }
451 
452     cfg->s2cfg.tsz = STE_S2T0SZ(ste);
453 
454     if (!s2t0sz_valid(cfg)) {
455         qemu_log_mask(LOG_GUEST_ERROR, "SMMUv3 bad STE S2T0SZ = %d\n",
456                       cfg->s2cfg.tsz);
457         goto bad_ste;
458     }
459 
460     if (!s2_pgtable_config_valid(cfg->s2cfg.sl0, cfg->s2cfg.tsz,
461                                     cfg->s2cfg.granule_sz)) {
462         qemu_log_mask(LOG_GUEST_ERROR,
463                       "SMMUv3 STE stage 2 config not valid!\n");
464         goto bad_ste;
465     }
466 
467     /* Only LE supported(IDR0.TTENDIAN). */
468     if (STE_S2ENDI(ste)) {
469         qemu_log_mask(LOG_GUEST_ERROR,
470                       "SMMUv3 STE_S2ENDI only supports LE!\n");
471         goto bad_ste;
472     }
473 
474     cfg->s2cfg.affd = STE_S2AFFD(ste);
475 
476     cfg->s2cfg.record_faults = STE_S2R(ste);
477     /* As stall is not supported. */
478     if (STE_S2S(ste)) {
479         qemu_log_mask(LOG_UNIMP, "SMMUv3 Stall not implemented!\n");
480         goto bad_ste;
481     }
482 
483     return 0;
484 
485 bad_ste:
486     return -EINVAL;
487 }
488 
489 /* Returns < 0 in case of invalid STE, 0 otherwise */
490 static int decode_ste(SMMUv3State *s, SMMUTransCfg *cfg,
491                       STE *ste, SMMUEventInfo *event)
492 {
493     uint32_t config;
494     int ret;
495 
496     if (!STE_VALID(ste)) {
497         if (!event->inval_ste_allowed) {
498             qemu_log_mask(LOG_GUEST_ERROR, "invalid STE\n");
499         }
500         goto bad_ste;
501     }
502 
503     config = STE_CONFIG(ste);
504 
505     if (STE_CFG_ABORT(config)) {
506         cfg->aborted = true;
507         return 0;
508     }
509 
510     if (STE_CFG_BYPASS(config)) {
511         cfg->bypassed = true;
512         return 0;
513     }
514 
515     /*
516      * If a stage is enabled in SW while not advertised, throw bad ste
517      * according to user manual(IHI0070E) "5.2 Stream Table Entry".
518      */
519     if (!STAGE1_SUPPORTED(s) && STE_CFG_S1_ENABLED(config)) {
520         qemu_log_mask(LOG_GUEST_ERROR, "SMMUv3 S1 used but not supported.\n");
521         goto bad_ste;
522     }
523     if (!STAGE2_SUPPORTED(s) && STE_CFG_S2_ENABLED(config)) {
524         qemu_log_mask(LOG_GUEST_ERROR, "SMMUv3 S2 used but not supported.\n");
525         goto bad_ste;
526     }
527 
528     if (STAGE2_SUPPORTED(s)) {
529         /* VMID is considered even if s2 is disabled. */
530         cfg->s2cfg.vmid = STE_S2VMID(ste);
531     } else {
532         /* Default to -1 */
533         cfg->s2cfg.vmid = -1;
534     }
535 
536     if (STE_CFG_S2_ENABLED(config)) {
537         /*
538          * Stage-1 OAS defaults to OAS even if not enabled as it would be used
539          * in input address check for stage-2.
540          */
541         cfg->oas = oas2bits(SMMU_IDR5_OAS);
542         ret = decode_ste_s2_cfg(cfg, ste);
543         if (ret) {
544             goto bad_ste;
545         }
546     }
547 
548     if (STE_S1CDMAX(ste) != 0) {
549         qemu_log_mask(LOG_UNIMP,
550                       "SMMUv3 does not support multiple context descriptors yet\n");
551         goto bad_ste;
552     }
553 
554     if (STE_S1STALLD(ste)) {
555         qemu_log_mask(LOG_UNIMP,
556                       "SMMUv3 S1 stalling fault model not allowed yet\n");
557         goto bad_ste;
558     }
559     return 0;
560 
561 bad_ste:
562     event->type = SMMU_EVT_C_BAD_STE;
563     return -EINVAL;
564 }
565 
566 /**
567  * smmu_find_ste - Return the stream table entry associated
568  * to the sid
569  *
570  * @s: smmuv3 handle
571  * @sid: stream ID
572  * @ste: returned stream table entry
573  * @event: handle to an event info
574  *
575  * Supports linear and 2-level stream table
576  * Return 0 on success, -EINVAL otherwise
577  */
578 static int smmu_find_ste(SMMUv3State *s, uint32_t sid, STE *ste,
579                          SMMUEventInfo *event)
580 {
581     dma_addr_t addr, strtab_base;
582     uint32_t log2size;
583     int strtab_size_shift;
584     int ret;
585 
586     trace_smmuv3_find_ste(sid, s->features, s->sid_split);
587     log2size = FIELD_EX32(s->strtab_base_cfg, STRTAB_BASE_CFG, LOG2SIZE);
588     /*
589      * Check SID range against both guest-configured and implementation limits
590      */
591     if (sid >= (1 << MIN(log2size, SMMU_IDR1_SIDSIZE))) {
592         event->type = SMMU_EVT_C_BAD_STREAMID;
593         return -EINVAL;
594     }
595     if (s->features & SMMU_FEATURE_2LVL_STE) {
596         int l1_ste_offset, l2_ste_offset, max_l2_ste, span, i;
597         dma_addr_t l1ptr, l2ptr;
598         STEDesc l1std;
599 
600         /*
601          * Align strtab base address to table size. For this purpose, assume it
602          * is not bounded by SMMU_IDR1_SIDSIZE.
603          */
604         strtab_size_shift = MAX(5, (int)log2size - s->sid_split - 1 + 3);
605         strtab_base = s->strtab_base & SMMU_BASE_ADDR_MASK &
606                       ~MAKE_64BIT_MASK(0, strtab_size_shift);
607         l1_ste_offset = sid >> s->sid_split;
608         l2_ste_offset = sid & ((1 << s->sid_split) - 1);
609         l1ptr = (dma_addr_t)(strtab_base + l1_ste_offset * sizeof(l1std));
610         /* TODO: guarantee 64-bit single-copy atomicity */
611         ret = dma_memory_read(&address_space_memory, l1ptr, &l1std,
612                               sizeof(l1std), MEMTXATTRS_UNSPECIFIED);
613         if (ret != MEMTX_OK) {
614             qemu_log_mask(LOG_GUEST_ERROR,
615                           "Could not read L1PTR at 0X%"PRIx64"\n", l1ptr);
616             event->type = SMMU_EVT_F_STE_FETCH;
617             event->u.f_ste_fetch.addr = l1ptr;
618             return -EINVAL;
619         }
620         for (i = 0; i < ARRAY_SIZE(l1std.word); i++) {
621             le32_to_cpus(&l1std.word[i]);
622         }
623 
624         span = L1STD_SPAN(&l1std);
625 
626         if (!span) {
627             /* l2ptr is not valid */
628             if (!event->inval_ste_allowed) {
629                 qemu_log_mask(LOG_GUEST_ERROR,
630                               "invalid sid=%d (L1STD span=0)\n", sid);
631             }
632             event->type = SMMU_EVT_C_BAD_STREAMID;
633             return -EINVAL;
634         }
635         max_l2_ste = (1 << span) - 1;
636         l2ptr = l1std_l2ptr(&l1std);
637         trace_smmuv3_find_ste_2lvl(s->strtab_base, l1ptr, l1_ste_offset,
638                                    l2ptr, l2_ste_offset, max_l2_ste);
639         if (l2_ste_offset > max_l2_ste) {
640             qemu_log_mask(LOG_GUEST_ERROR,
641                           "l2_ste_offset=%d > max_l2_ste=%d\n",
642                           l2_ste_offset, max_l2_ste);
643             event->type = SMMU_EVT_C_BAD_STE;
644             return -EINVAL;
645         }
646         addr = l2ptr + l2_ste_offset * sizeof(*ste);
647     } else {
648         strtab_size_shift = log2size + 5;
649         strtab_base = s->strtab_base & SMMU_BASE_ADDR_MASK &
650                       ~MAKE_64BIT_MASK(0, strtab_size_shift);
651         addr = strtab_base + sid * sizeof(*ste);
652     }
653 
654     if (smmu_get_ste(s, addr, ste, event)) {
655         return -EINVAL;
656     }
657 
658     return 0;
659 }
660 
661 static int decode_cd(SMMUTransCfg *cfg, CD *cd, SMMUEventInfo *event)
662 {
663     int ret = -EINVAL;
664     int i;
665 
666     if (!CD_VALID(cd) || !CD_AARCH64(cd)) {
667         goto bad_cd;
668     }
669     if (!CD_A(cd)) {
670         goto bad_cd; /* SMMU_IDR0.TERM_MODEL == 1 */
671     }
672     if (CD_S(cd)) {
673         goto bad_cd; /* !STE_SECURE && SMMU_IDR0.STALL_MODEL == 1 */
674     }
675     if (CD_HA(cd) || CD_HD(cd)) {
676         goto bad_cd; /* HTTU = 0 */
677     }
678 
679     /* we support only those at the moment */
680     cfg->aa64 = true;
681     cfg->stage = 1;
682 
683     cfg->oas = oas2bits(CD_IPS(cd));
684     cfg->oas = MIN(oas2bits(SMMU_IDR5_OAS), cfg->oas);
685     cfg->tbi = CD_TBI(cd);
686     cfg->asid = CD_ASID(cd);
687     cfg->affd = CD_AFFD(cd);
688 
689     trace_smmuv3_decode_cd(cfg->oas);
690 
691     /* decode data dependent on TT */
692     for (i = 0; i <= 1; i++) {
693         int tg, tsz;
694         SMMUTransTableInfo *tt = &cfg->tt[i];
695 
696         cfg->tt[i].disabled = CD_EPD(cd, i);
697         if (cfg->tt[i].disabled) {
698             continue;
699         }
700 
701         tsz = CD_TSZ(cd, i);
702         if (tsz < 16 || tsz > 39) {
703             goto bad_cd;
704         }
705 
706         tg = CD_TG(cd, i);
707         tt->granule_sz = tg2granule(tg, i);
708         if ((tt->granule_sz != 12 && tt->granule_sz != 14 &&
709              tt->granule_sz != 16) || CD_ENDI(cd)) {
710             goto bad_cd;
711         }
712 
713         tt->tsz = tsz;
714         tt->ttb = CD_TTB(cd, i);
715         if (tt->ttb & ~(MAKE_64BIT_MASK(0, cfg->oas))) {
716             goto bad_cd;
717         }
718         tt->had = CD_HAD(cd, i);
719         trace_smmuv3_decode_cd_tt(i, tt->tsz, tt->ttb, tt->granule_sz, tt->had);
720     }
721 
722     cfg->record_faults = CD_R(cd);
723 
724     return 0;
725 
726 bad_cd:
727     event->type = SMMU_EVT_C_BAD_CD;
728     return ret;
729 }
730 
731 /**
732  * smmuv3_decode_config - Prepare the translation configuration
733  * for the @mr iommu region
734  * @mr: iommu memory region the translation config must be prepared for
735  * @cfg: output translation configuration which is populated through
736  *       the different configuration decoding steps
737  * @event: must be zero'ed by the caller
738  *
739  * return < 0 in case of config decoding error (@event is filled
740  * accordingly). Return 0 otherwise.
741  */
742 static int smmuv3_decode_config(IOMMUMemoryRegion *mr, SMMUTransCfg *cfg,
743                                 SMMUEventInfo *event)
744 {
745     SMMUDevice *sdev = container_of(mr, SMMUDevice, iommu);
746     uint32_t sid = smmu_get_sid(sdev);
747     SMMUv3State *s = sdev->smmu;
748     int ret;
749     STE ste;
750     CD cd;
751 
752     /* ASID defaults to -1 (if s1 is not supported). */
753     cfg->asid = -1;
754 
755     ret = smmu_find_ste(s, sid, &ste, event);
756     if (ret) {
757         return ret;
758     }
759 
760     ret = decode_ste(s, cfg, &ste, event);
761     if (ret) {
762         return ret;
763     }
764 
765     if (cfg->aborted || cfg->bypassed || (cfg->stage == 2)) {
766         return 0;
767     }
768 
769     ret = smmu_get_cd(s, &ste, 0 /* ssid */, &cd, event);
770     if (ret) {
771         return ret;
772     }
773 
774     return decode_cd(cfg, &cd, event);
775 }
776 
777 /**
778  * smmuv3_get_config - Look up for a cached copy of configuration data for
779  * @sdev and on cache miss performs a configuration structure decoding from
780  * guest RAM.
781  *
782  * @sdev: SMMUDevice handle
783  * @event: output event info
784  *
785  * The configuration cache contains data resulting from both STE and CD
786  * decoding under the form of an SMMUTransCfg struct. The hash table is indexed
787  * by the SMMUDevice handle.
788  */
789 static SMMUTransCfg *smmuv3_get_config(SMMUDevice *sdev, SMMUEventInfo *event)
790 {
791     SMMUv3State *s = sdev->smmu;
792     SMMUState *bc = &s->smmu_state;
793     SMMUTransCfg *cfg;
794 
795     cfg = g_hash_table_lookup(bc->configs, sdev);
796     if (cfg) {
797         sdev->cfg_cache_hits++;
798         trace_smmuv3_config_cache_hit(smmu_get_sid(sdev),
799                             sdev->cfg_cache_hits, sdev->cfg_cache_misses,
800                             100 * sdev->cfg_cache_hits /
801                             (sdev->cfg_cache_hits + sdev->cfg_cache_misses));
802     } else {
803         sdev->cfg_cache_misses++;
804         trace_smmuv3_config_cache_miss(smmu_get_sid(sdev),
805                             sdev->cfg_cache_hits, sdev->cfg_cache_misses,
806                             100 * sdev->cfg_cache_hits /
807                             (sdev->cfg_cache_hits + sdev->cfg_cache_misses));
808         cfg = g_new0(SMMUTransCfg, 1);
809 
810         if (!smmuv3_decode_config(&sdev->iommu, cfg, event)) {
811             g_hash_table_insert(bc->configs, sdev, cfg);
812         } else {
813             g_free(cfg);
814             cfg = NULL;
815         }
816     }
817     return cfg;
818 }
819 
820 static void smmuv3_flush_config(SMMUDevice *sdev)
821 {
822     SMMUv3State *s = sdev->smmu;
823     SMMUState *bc = &s->smmu_state;
824 
825     trace_smmuv3_config_cache_inv(smmu_get_sid(sdev));
826     g_hash_table_remove(bc->configs, sdev);
827 }
828 
829 static IOMMUTLBEntry smmuv3_translate(IOMMUMemoryRegion *mr, hwaddr addr,
830                                       IOMMUAccessFlags flag, int iommu_idx)
831 {
832     SMMUDevice *sdev = container_of(mr, SMMUDevice, iommu);
833     SMMUv3State *s = sdev->smmu;
834     uint32_t sid = smmu_get_sid(sdev);
835     SMMUEventInfo event = {.type = SMMU_EVT_NONE,
836                            .sid = sid,
837                            .inval_ste_allowed = false};
838     SMMUPTWEventInfo ptw_info = {};
839     SMMUTranslationStatus status;
840     SMMUState *bs = ARM_SMMU(s);
841     uint64_t page_mask, aligned_addr;
842     SMMUTLBEntry *cached_entry = NULL;
843     SMMUTransTableInfo *tt;
844     SMMUTransCfg *cfg = NULL;
845     IOMMUTLBEntry entry = {
846         .target_as = &address_space_memory,
847         .iova = addr,
848         .translated_addr = addr,
849         .addr_mask = ~(hwaddr)0,
850         .perm = IOMMU_NONE,
851     };
852     /*
853      * Combined attributes used for TLB lookup, as only one stage is supported,
854      * it will hold attributes based on the enabled stage.
855      */
856     SMMUTransTableInfo tt_combined;
857 
858     qemu_mutex_lock(&s->mutex);
859 
860     if (!smmu_enabled(s)) {
861         if (FIELD_EX32(s->gbpa, GBPA, ABORT)) {
862             status = SMMU_TRANS_ABORT;
863         } else {
864             status = SMMU_TRANS_DISABLE;
865         }
866         goto epilogue;
867     }
868 
869     cfg = smmuv3_get_config(sdev, &event);
870     if (!cfg) {
871         status = SMMU_TRANS_ERROR;
872         goto epilogue;
873     }
874 
875     if (cfg->aborted) {
876         status = SMMU_TRANS_ABORT;
877         goto epilogue;
878     }
879 
880     if (cfg->bypassed) {
881         status = SMMU_TRANS_BYPASS;
882         goto epilogue;
883     }
884 
885     if (cfg->stage == 1) {
886         /* Select stage1 translation table. */
887         tt = select_tt(cfg, addr);
888         if (!tt) {
889             if (cfg->record_faults) {
890                 event.type = SMMU_EVT_F_TRANSLATION;
891                 event.u.f_translation.addr = addr;
892                 event.u.f_translation.rnw = flag & 0x1;
893             }
894             status = SMMU_TRANS_ERROR;
895             goto epilogue;
896         }
897         tt_combined.granule_sz = tt->granule_sz;
898         tt_combined.tsz = tt->tsz;
899 
900     } else {
901         /* Stage2. */
902         tt_combined.granule_sz = cfg->s2cfg.granule_sz;
903         tt_combined.tsz = cfg->s2cfg.tsz;
904     }
905     /*
906      * TLB lookup looks for granule and input size for a translation stage,
907      * as only one stage is supported right now, choose the right values
908      * from the configuration.
909      */
910     page_mask = (1ULL << tt_combined.granule_sz) - 1;
911     aligned_addr = addr & ~page_mask;
912 
913     cached_entry = smmu_iotlb_lookup(bs, cfg, &tt_combined, aligned_addr);
914     if (cached_entry) {
915         if ((flag & IOMMU_WO) && !(cached_entry->entry.perm & IOMMU_WO)) {
916             status = SMMU_TRANS_ERROR;
917             /*
918              * We know that the TLB only contains either stage-1 or stage-2 as
919              * nesting is not supported. So it is sufficient to check the
920              * translation stage to know the TLB stage for now.
921              */
922             event.u.f_walk_eabt.s2 = (cfg->stage == 2);
923             if (PTW_RECORD_FAULT(cfg)) {
924                 event.type = SMMU_EVT_F_PERMISSION;
925                 event.u.f_permission.addr = addr;
926                 event.u.f_permission.rnw = flag & 0x1;
927             }
928         } else {
929             status = SMMU_TRANS_SUCCESS;
930         }
931         goto epilogue;
932     }
933 
934     cached_entry = g_new0(SMMUTLBEntry, 1);
935 
936     if (smmu_ptw(cfg, aligned_addr, flag, cached_entry, &ptw_info)) {
937         /* All faults from PTW has S2 field. */
938         event.u.f_walk_eabt.s2 = (ptw_info.stage == 2);
939         g_free(cached_entry);
940         switch (ptw_info.type) {
941         case SMMU_PTW_ERR_WALK_EABT:
942             event.type = SMMU_EVT_F_WALK_EABT;
943             event.u.f_walk_eabt.addr = addr;
944             event.u.f_walk_eabt.rnw = flag & 0x1;
945             event.u.f_walk_eabt.class = 0x1;
946             event.u.f_walk_eabt.addr2 = ptw_info.addr;
947             break;
948         case SMMU_PTW_ERR_TRANSLATION:
949             if (PTW_RECORD_FAULT(cfg)) {
950                 event.type = SMMU_EVT_F_TRANSLATION;
951                 event.u.f_translation.addr = addr;
952                 event.u.f_translation.rnw = flag & 0x1;
953             }
954             break;
955         case SMMU_PTW_ERR_ADDR_SIZE:
956             if (PTW_RECORD_FAULT(cfg)) {
957                 event.type = SMMU_EVT_F_ADDR_SIZE;
958                 event.u.f_addr_size.addr = addr;
959                 event.u.f_addr_size.rnw = flag & 0x1;
960             }
961             break;
962         case SMMU_PTW_ERR_ACCESS:
963             if (PTW_RECORD_FAULT(cfg)) {
964                 event.type = SMMU_EVT_F_ACCESS;
965                 event.u.f_access.addr = addr;
966                 event.u.f_access.rnw = flag & 0x1;
967             }
968             break;
969         case SMMU_PTW_ERR_PERMISSION:
970             if (PTW_RECORD_FAULT(cfg)) {
971                 event.type = SMMU_EVT_F_PERMISSION;
972                 event.u.f_permission.addr = addr;
973                 event.u.f_permission.rnw = flag & 0x1;
974             }
975             break;
976         default:
977             g_assert_not_reached();
978         }
979         status = SMMU_TRANS_ERROR;
980     } else {
981         smmu_iotlb_insert(bs, cfg, cached_entry);
982         status = SMMU_TRANS_SUCCESS;
983     }
984 
985 epilogue:
986     qemu_mutex_unlock(&s->mutex);
987     switch (status) {
988     case SMMU_TRANS_SUCCESS:
989         entry.perm = cached_entry->entry.perm;
990         entry.translated_addr = cached_entry->entry.translated_addr +
991                                     (addr & cached_entry->entry.addr_mask);
992         entry.addr_mask = cached_entry->entry.addr_mask;
993         trace_smmuv3_translate_success(mr->parent_obj.name, sid, addr,
994                                        entry.translated_addr, entry.perm);
995         break;
996     case SMMU_TRANS_DISABLE:
997         entry.perm = flag;
998         entry.addr_mask = ~TARGET_PAGE_MASK;
999         trace_smmuv3_translate_disable(mr->parent_obj.name, sid, addr,
1000                                       entry.perm);
1001         break;
1002     case SMMU_TRANS_BYPASS:
1003         entry.perm = flag;
1004         entry.addr_mask = ~TARGET_PAGE_MASK;
1005         trace_smmuv3_translate_bypass(mr->parent_obj.name, sid, addr,
1006                                       entry.perm);
1007         break;
1008     case SMMU_TRANS_ABORT:
1009         /* no event is recorded on abort */
1010         trace_smmuv3_translate_abort(mr->parent_obj.name, sid, addr,
1011                                      entry.perm);
1012         break;
1013     case SMMU_TRANS_ERROR:
1014         qemu_log_mask(LOG_GUEST_ERROR,
1015                       "%s translation failed for iova=0x%"PRIx64" (%s)\n",
1016                       mr->parent_obj.name, addr, smmu_event_string(event.type));
1017         smmuv3_record_event(s, &event);
1018         break;
1019     }
1020 
1021     return entry;
1022 }
1023 
1024 /**
1025  * smmuv3_notify_iova - call the notifier @n for a given
1026  * @asid and @iova tuple.
1027  *
1028  * @mr: IOMMU mr region handle
1029  * @n: notifier to be called
1030  * @asid: address space ID or negative value if we don't care
1031  * @vmid: virtual machine ID or negative value if we don't care
1032  * @iova: iova
1033  * @tg: translation granule (if communicated through range invalidation)
1034  * @num_pages: number of @granule sized pages (if tg != 0), otherwise 1
1035  */
1036 static void smmuv3_notify_iova(IOMMUMemoryRegion *mr,
1037                                IOMMUNotifier *n,
1038                                int asid, int vmid,
1039                                dma_addr_t iova, uint8_t tg,
1040                                uint64_t num_pages)
1041 {
1042     SMMUDevice *sdev = container_of(mr, SMMUDevice, iommu);
1043     IOMMUTLBEvent event;
1044     uint8_t granule;
1045     SMMUv3State *s = sdev->smmu;
1046 
1047     if (!tg) {
1048         SMMUEventInfo eventinfo = {.inval_ste_allowed = true};
1049         SMMUTransCfg *cfg = smmuv3_get_config(sdev, &eventinfo);
1050         SMMUTransTableInfo *tt;
1051 
1052         if (!cfg) {
1053             return;
1054         }
1055 
1056         if (asid >= 0 && cfg->asid != asid) {
1057             return;
1058         }
1059 
1060         if (vmid >= 0 && cfg->s2cfg.vmid != vmid) {
1061             return;
1062         }
1063 
1064         if (STAGE1_SUPPORTED(s)) {
1065             tt = select_tt(cfg, iova);
1066             if (!tt) {
1067                 return;
1068             }
1069             granule = tt->granule_sz;
1070         } else {
1071             granule = cfg->s2cfg.granule_sz;
1072         }
1073 
1074     } else {
1075         granule = tg * 2 + 10;
1076     }
1077 
1078     event.type = IOMMU_NOTIFIER_UNMAP;
1079     event.entry.target_as = &address_space_memory;
1080     event.entry.iova = iova;
1081     event.entry.addr_mask = num_pages * (1 << granule) - 1;
1082     event.entry.perm = IOMMU_NONE;
1083 
1084     memory_region_notify_iommu_one(n, &event);
1085 }
1086 
1087 /* invalidate an asid/vmid/iova range tuple in all mr's */
1088 static void smmuv3_inv_notifiers_iova(SMMUState *s, int asid, int vmid,
1089                                       dma_addr_t iova, uint8_t tg,
1090                                       uint64_t num_pages)
1091 {
1092     SMMUDevice *sdev;
1093 
1094     QLIST_FOREACH(sdev, &s->devices_with_notifiers, next) {
1095         IOMMUMemoryRegion *mr = &sdev->iommu;
1096         IOMMUNotifier *n;
1097 
1098         trace_smmuv3_inv_notifiers_iova(mr->parent_obj.name, asid, vmid,
1099                                         iova, tg, num_pages);
1100 
1101         IOMMU_NOTIFIER_FOREACH(n, mr) {
1102             smmuv3_notify_iova(mr, n, asid, vmid, iova, tg, num_pages);
1103         }
1104     }
1105 }
1106 
1107 static void smmuv3_range_inval(SMMUState *s, Cmd *cmd)
1108 {
1109     dma_addr_t end, addr = CMD_ADDR(cmd);
1110     uint8_t type = CMD_TYPE(cmd);
1111     int vmid = -1;
1112     uint8_t scale = CMD_SCALE(cmd);
1113     uint8_t num = CMD_NUM(cmd);
1114     uint8_t ttl = CMD_TTL(cmd);
1115     bool leaf = CMD_LEAF(cmd);
1116     uint8_t tg = CMD_TG(cmd);
1117     uint64_t num_pages;
1118     uint8_t granule;
1119     int asid = -1;
1120     SMMUv3State *smmuv3 = ARM_SMMUV3(s);
1121 
1122     /* Only consider VMID if stage-2 is supported. */
1123     if (STAGE2_SUPPORTED(smmuv3)) {
1124         vmid = CMD_VMID(cmd);
1125     }
1126 
1127     if (type == SMMU_CMD_TLBI_NH_VA) {
1128         asid = CMD_ASID(cmd);
1129     }
1130 
1131     if (!tg) {
1132         trace_smmuv3_range_inval(vmid, asid, addr, tg, 1, ttl, leaf);
1133         smmuv3_inv_notifiers_iova(s, asid, vmid, addr, tg, 1);
1134         smmu_iotlb_inv_iova(s, asid, vmid, addr, tg, 1, ttl);
1135         return;
1136     }
1137 
1138     /* RIL in use */
1139 
1140     num_pages = (num + 1) * BIT_ULL(scale);
1141     granule = tg * 2 + 10;
1142 
1143     /* Split invalidations into ^2 range invalidations */
1144     end = addr + (num_pages << granule) - 1;
1145 
1146     while (addr != end + 1) {
1147         uint64_t mask = dma_aligned_pow2_mask(addr, end, 64);
1148 
1149         num_pages = (mask + 1) >> granule;
1150         trace_smmuv3_range_inval(vmid, asid, addr, tg, num_pages, ttl, leaf);
1151         smmuv3_inv_notifiers_iova(s, asid, vmid, addr, tg, num_pages);
1152         smmu_iotlb_inv_iova(s, asid, vmid, addr, tg, num_pages, ttl);
1153         addr += mask + 1;
1154     }
1155 }
1156 
1157 static gboolean
1158 smmuv3_invalidate_ste(gpointer key, gpointer value, gpointer user_data)
1159 {
1160     SMMUDevice *sdev = (SMMUDevice *)key;
1161     uint32_t sid = smmu_get_sid(sdev);
1162     SMMUSIDRange *sid_range = (SMMUSIDRange *)user_data;
1163 
1164     if (sid < sid_range->start || sid > sid_range->end) {
1165         return false;
1166     }
1167     trace_smmuv3_config_cache_inv(sid);
1168     return true;
1169 }
1170 
1171 static int smmuv3_cmdq_consume(SMMUv3State *s)
1172 {
1173     SMMUState *bs = ARM_SMMU(s);
1174     SMMUCmdError cmd_error = SMMU_CERROR_NONE;
1175     SMMUQueue *q = &s->cmdq;
1176     SMMUCommandType type = 0;
1177 
1178     if (!smmuv3_cmdq_enabled(s)) {
1179         return 0;
1180     }
1181     /*
1182      * some commands depend on register values, typically CR0. In case those
1183      * register values change while handling the command, spec says it
1184      * is UNPREDICTABLE whether the command is interpreted under the new
1185      * or old value.
1186      */
1187 
1188     while (!smmuv3_q_empty(q)) {
1189         uint32_t pending = s->gerror ^ s->gerrorn;
1190         Cmd cmd;
1191 
1192         trace_smmuv3_cmdq_consume(Q_PROD(q), Q_CONS(q),
1193                                   Q_PROD_WRAP(q), Q_CONS_WRAP(q));
1194 
1195         if (FIELD_EX32(pending, GERROR, CMDQ_ERR)) {
1196             break;
1197         }
1198 
1199         if (queue_read(q, &cmd) != MEMTX_OK) {
1200             cmd_error = SMMU_CERROR_ABT;
1201             break;
1202         }
1203 
1204         type = CMD_TYPE(&cmd);
1205 
1206         trace_smmuv3_cmdq_opcode(smmu_cmd_string(type));
1207 
1208         qemu_mutex_lock(&s->mutex);
1209         switch (type) {
1210         case SMMU_CMD_SYNC:
1211             if (CMD_SYNC_CS(&cmd) & CMD_SYNC_SIG_IRQ) {
1212                 smmuv3_trigger_irq(s, SMMU_IRQ_CMD_SYNC, 0);
1213             }
1214             break;
1215         case SMMU_CMD_PREFETCH_CONFIG:
1216         case SMMU_CMD_PREFETCH_ADDR:
1217             break;
1218         case SMMU_CMD_CFGI_STE:
1219         {
1220             uint32_t sid = CMD_SID(&cmd);
1221             SMMUDevice *sdev = smmu_find_sdev(bs, sid);
1222 
1223             if (CMD_SSEC(&cmd)) {
1224                 cmd_error = SMMU_CERROR_ILL;
1225                 break;
1226             }
1227 
1228             if (!sdev) {
1229                 break;
1230             }
1231 
1232             trace_smmuv3_cmdq_cfgi_ste(sid);
1233             smmuv3_flush_config(sdev);
1234 
1235             break;
1236         }
1237         case SMMU_CMD_CFGI_STE_RANGE: /* same as SMMU_CMD_CFGI_ALL */
1238         {
1239             uint32_t sid = CMD_SID(&cmd), mask;
1240             uint8_t range = CMD_STE_RANGE(&cmd);
1241             SMMUSIDRange sid_range;
1242 
1243             if (CMD_SSEC(&cmd)) {
1244                 cmd_error = SMMU_CERROR_ILL;
1245                 break;
1246             }
1247 
1248             mask = (1ULL << (range + 1)) - 1;
1249             sid_range.start = sid & ~mask;
1250             sid_range.end = sid_range.start + mask;
1251 
1252             trace_smmuv3_cmdq_cfgi_ste_range(sid_range.start, sid_range.end);
1253             g_hash_table_foreach_remove(bs->configs, smmuv3_invalidate_ste,
1254                                         &sid_range);
1255             break;
1256         }
1257         case SMMU_CMD_CFGI_CD:
1258         case SMMU_CMD_CFGI_CD_ALL:
1259         {
1260             uint32_t sid = CMD_SID(&cmd);
1261             SMMUDevice *sdev = smmu_find_sdev(bs, sid);
1262 
1263             if (CMD_SSEC(&cmd)) {
1264                 cmd_error = SMMU_CERROR_ILL;
1265                 break;
1266             }
1267 
1268             if (!sdev) {
1269                 break;
1270             }
1271 
1272             trace_smmuv3_cmdq_cfgi_cd(sid);
1273             smmuv3_flush_config(sdev);
1274             break;
1275         }
1276         case SMMU_CMD_TLBI_NH_ASID:
1277         {
1278             uint16_t asid = CMD_ASID(&cmd);
1279 
1280             if (!STAGE1_SUPPORTED(s)) {
1281                 cmd_error = SMMU_CERROR_ILL;
1282                 break;
1283             }
1284 
1285             trace_smmuv3_cmdq_tlbi_nh_asid(asid);
1286             smmu_inv_notifiers_all(&s->smmu_state);
1287             smmu_iotlb_inv_asid(bs, asid);
1288             break;
1289         }
1290         case SMMU_CMD_TLBI_NH_ALL:
1291             if (!STAGE1_SUPPORTED(s)) {
1292                 cmd_error = SMMU_CERROR_ILL;
1293                 break;
1294             }
1295             QEMU_FALLTHROUGH;
1296         case SMMU_CMD_TLBI_NSNH_ALL:
1297             trace_smmuv3_cmdq_tlbi_nh();
1298             smmu_inv_notifiers_all(&s->smmu_state);
1299             smmu_iotlb_inv_all(bs);
1300             break;
1301         case SMMU_CMD_TLBI_NH_VAA:
1302         case SMMU_CMD_TLBI_NH_VA:
1303             if (!STAGE1_SUPPORTED(s)) {
1304                 cmd_error = SMMU_CERROR_ILL;
1305                 break;
1306             }
1307             smmuv3_range_inval(bs, &cmd);
1308             break;
1309         case SMMU_CMD_TLBI_S12_VMALL:
1310         {
1311             uint16_t vmid = CMD_VMID(&cmd);
1312 
1313             if (!STAGE2_SUPPORTED(s)) {
1314                 cmd_error = SMMU_CERROR_ILL;
1315                 break;
1316             }
1317 
1318             trace_smmuv3_cmdq_tlbi_s12_vmid(vmid);
1319             smmu_inv_notifiers_all(&s->smmu_state);
1320             smmu_iotlb_inv_vmid(bs, vmid);
1321             break;
1322         }
1323         case SMMU_CMD_TLBI_S2_IPA:
1324             if (!STAGE2_SUPPORTED(s)) {
1325                 cmd_error = SMMU_CERROR_ILL;
1326                 break;
1327             }
1328             /*
1329              * As currently only either s1 or s2 are supported
1330              * we can reuse same function for s2.
1331              */
1332             smmuv3_range_inval(bs, &cmd);
1333             break;
1334         case SMMU_CMD_TLBI_EL3_ALL:
1335         case SMMU_CMD_TLBI_EL3_VA:
1336         case SMMU_CMD_TLBI_EL2_ALL:
1337         case SMMU_CMD_TLBI_EL2_ASID:
1338         case SMMU_CMD_TLBI_EL2_VA:
1339         case SMMU_CMD_TLBI_EL2_VAA:
1340         case SMMU_CMD_ATC_INV:
1341         case SMMU_CMD_PRI_RESP:
1342         case SMMU_CMD_RESUME:
1343         case SMMU_CMD_STALL_TERM:
1344             trace_smmuv3_unhandled_cmd(type);
1345             break;
1346         default:
1347             cmd_error = SMMU_CERROR_ILL;
1348             break;
1349         }
1350         qemu_mutex_unlock(&s->mutex);
1351         if (cmd_error) {
1352             if (cmd_error == SMMU_CERROR_ILL) {
1353                 qemu_log_mask(LOG_GUEST_ERROR,
1354                               "Illegal command type: %d\n", CMD_TYPE(&cmd));
1355             }
1356             break;
1357         }
1358         /*
1359          * We only increment the cons index after the completion of
1360          * the command. We do that because the SYNC returns immediately
1361          * and does not check the completion of previous commands
1362          */
1363         queue_cons_incr(q);
1364     }
1365 
1366     if (cmd_error) {
1367         trace_smmuv3_cmdq_consume_error(smmu_cmd_string(type), cmd_error);
1368         smmu_write_cmdq_err(s, cmd_error);
1369         smmuv3_trigger_irq(s, SMMU_IRQ_GERROR, R_GERROR_CMDQ_ERR_MASK);
1370     }
1371 
1372     trace_smmuv3_cmdq_consume_out(Q_PROD(q), Q_CONS(q),
1373                                   Q_PROD_WRAP(q), Q_CONS_WRAP(q));
1374 
1375     return 0;
1376 }
1377 
1378 static MemTxResult smmu_writell(SMMUv3State *s, hwaddr offset,
1379                                uint64_t data, MemTxAttrs attrs)
1380 {
1381     switch (offset) {
1382     case A_GERROR_IRQ_CFG0:
1383         s->gerror_irq_cfg0 = data;
1384         return MEMTX_OK;
1385     case A_STRTAB_BASE:
1386         s->strtab_base = data;
1387         return MEMTX_OK;
1388     case A_CMDQ_BASE:
1389         s->cmdq.base = data;
1390         s->cmdq.log2size = extract64(s->cmdq.base, 0, 5);
1391         if (s->cmdq.log2size > SMMU_CMDQS) {
1392             s->cmdq.log2size = SMMU_CMDQS;
1393         }
1394         return MEMTX_OK;
1395     case A_EVENTQ_BASE:
1396         s->eventq.base = data;
1397         s->eventq.log2size = extract64(s->eventq.base, 0, 5);
1398         if (s->eventq.log2size > SMMU_EVENTQS) {
1399             s->eventq.log2size = SMMU_EVENTQS;
1400         }
1401         return MEMTX_OK;
1402     case A_EVENTQ_IRQ_CFG0:
1403         s->eventq_irq_cfg0 = data;
1404         return MEMTX_OK;
1405     default:
1406         qemu_log_mask(LOG_UNIMP,
1407                       "%s Unexpected 64-bit access to 0x%"PRIx64" (WI)\n",
1408                       __func__, offset);
1409         return MEMTX_OK;
1410     }
1411 }
1412 
1413 static MemTxResult smmu_writel(SMMUv3State *s, hwaddr offset,
1414                                uint64_t data, MemTxAttrs attrs)
1415 {
1416     switch (offset) {
1417     case A_CR0:
1418         s->cr[0] = data;
1419         s->cr0ack = data & ~SMMU_CR0_RESERVED;
1420         /* in case the command queue has been enabled */
1421         smmuv3_cmdq_consume(s);
1422         return MEMTX_OK;
1423     case A_CR1:
1424         s->cr[1] = data;
1425         return MEMTX_OK;
1426     case A_CR2:
1427         s->cr[2] = data;
1428         return MEMTX_OK;
1429     case A_IRQ_CTRL:
1430         s->irq_ctrl = data;
1431         return MEMTX_OK;
1432     case A_GERRORN:
1433         smmuv3_write_gerrorn(s, data);
1434         /*
1435          * By acknowledging the CMDQ_ERR, SW may notify cmds can
1436          * be processed again
1437          */
1438         smmuv3_cmdq_consume(s);
1439         return MEMTX_OK;
1440     case A_GERROR_IRQ_CFG0: /* 64b */
1441         s->gerror_irq_cfg0 = deposit64(s->gerror_irq_cfg0, 0, 32, data);
1442         return MEMTX_OK;
1443     case A_GERROR_IRQ_CFG0 + 4:
1444         s->gerror_irq_cfg0 = deposit64(s->gerror_irq_cfg0, 32, 32, data);
1445         return MEMTX_OK;
1446     case A_GERROR_IRQ_CFG1:
1447         s->gerror_irq_cfg1 = data;
1448         return MEMTX_OK;
1449     case A_GERROR_IRQ_CFG2:
1450         s->gerror_irq_cfg2 = data;
1451         return MEMTX_OK;
1452     case A_GBPA:
1453         /*
1454          * If UPDATE is not set, the write is ignored. This is the only
1455          * permitted behavior in SMMUv3.2 and later.
1456          */
1457         if (data & R_GBPA_UPDATE_MASK) {
1458             /* Ignore update bit as write is synchronous. */
1459             s->gbpa = data & ~R_GBPA_UPDATE_MASK;
1460         }
1461         return MEMTX_OK;
1462     case A_STRTAB_BASE: /* 64b */
1463         s->strtab_base = deposit64(s->strtab_base, 0, 32, data);
1464         return MEMTX_OK;
1465     case A_STRTAB_BASE + 4:
1466         s->strtab_base = deposit64(s->strtab_base, 32, 32, data);
1467         return MEMTX_OK;
1468     case A_STRTAB_BASE_CFG:
1469         s->strtab_base_cfg = data;
1470         if (FIELD_EX32(data, STRTAB_BASE_CFG, FMT) == 1) {
1471             s->sid_split = FIELD_EX32(data, STRTAB_BASE_CFG, SPLIT);
1472             s->features |= SMMU_FEATURE_2LVL_STE;
1473         }
1474         return MEMTX_OK;
1475     case A_CMDQ_BASE: /* 64b */
1476         s->cmdq.base = deposit64(s->cmdq.base, 0, 32, data);
1477         s->cmdq.log2size = extract64(s->cmdq.base, 0, 5);
1478         if (s->cmdq.log2size > SMMU_CMDQS) {
1479             s->cmdq.log2size = SMMU_CMDQS;
1480         }
1481         return MEMTX_OK;
1482     case A_CMDQ_BASE + 4: /* 64b */
1483         s->cmdq.base = deposit64(s->cmdq.base, 32, 32, data);
1484         return MEMTX_OK;
1485     case A_CMDQ_PROD:
1486         s->cmdq.prod = data;
1487         smmuv3_cmdq_consume(s);
1488         return MEMTX_OK;
1489     case A_CMDQ_CONS:
1490         s->cmdq.cons = data;
1491         return MEMTX_OK;
1492     case A_EVENTQ_BASE: /* 64b */
1493         s->eventq.base = deposit64(s->eventq.base, 0, 32, data);
1494         s->eventq.log2size = extract64(s->eventq.base, 0, 5);
1495         if (s->eventq.log2size > SMMU_EVENTQS) {
1496             s->eventq.log2size = SMMU_EVENTQS;
1497         }
1498         return MEMTX_OK;
1499     case A_EVENTQ_BASE + 4:
1500         s->eventq.base = deposit64(s->eventq.base, 32, 32, data);
1501         return MEMTX_OK;
1502     case A_EVENTQ_PROD:
1503         s->eventq.prod = data;
1504         return MEMTX_OK;
1505     case A_EVENTQ_CONS:
1506         s->eventq.cons = data;
1507         return MEMTX_OK;
1508     case A_EVENTQ_IRQ_CFG0: /* 64b */
1509         s->eventq_irq_cfg0 = deposit64(s->eventq_irq_cfg0, 0, 32, data);
1510         return MEMTX_OK;
1511     case A_EVENTQ_IRQ_CFG0 + 4:
1512         s->eventq_irq_cfg0 = deposit64(s->eventq_irq_cfg0, 32, 32, data);
1513         return MEMTX_OK;
1514     case A_EVENTQ_IRQ_CFG1:
1515         s->eventq_irq_cfg1 = data;
1516         return MEMTX_OK;
1517     case A_EVENTQ_IRQ_CFG2:
1518         s->eventq_irq_cfg2 = data;
1519         return MEMTX_OK;
1520     default:
1521         qemu_log_mask(LOG_UNIMP,
1522                       "%s Unexpected 32-bit access to 0x%"PRIx64" (WI)\n",
1523                       __func__, offset);
1524         return MEMTX_OK;
1525     }
1526 }
1527 
1528 static MemTxResult smmu_write_mmio(void *opaque, hwaddr offset, uint64_t data,
1529                                    unsigned size, MemTxAttrs attrs)
1530 {
1531     SMMUState *sys = opaque;
1532     SMMUv3State *s = ARM_SMMUV3(sys);
1533     MemTxResult r;
1534 
1535     /* CONSTRAINED UNPREDICTABLE choice to have page0/1 be exact aliases */
1536     offset &= ~0x10000;
1537 
1538     switch (size) {
1539     case 8:
1540         r = smmu_writell(s, offset, data, attrs);
1541         break;
1542     case 4:
1543         r = smmu_writel(s, offset, data, attrs);
1544         break;
1545     default:
1546         r = MEMTX_ERROR;
1547         break;
1548     }
1549 
1550     trace_smmuv3_write_mmio(offset, data, size, r);
1551     return r;
1552 }
1553 
1554 static MemTxResult smmu_readll(SMMUv3State *s, hwaddr offset,
1555                                uint64_t *data, MemTxAttrs attrs)
1556 {
1557     switch (offset) {
1558     case A_GERROR_IRQ_CFG0:
1559         *data = s->gerror_irq_cfg0;
1560         return MEMTX_OK;
1561     case A_STRTAB_BASE:
1562         *data = s->strtab_base;
1563         return MEMTX_OK;
1564     case A_CMDQ_BASE:
1565         *data = s->cmdq.base;
1566         return MEMTX_OK;
1567     case A_EVENTQ_BASE:
1568         *data = s->eventq.base;
1569         return MEMTX_OK;
1570     default:
1571         *data = 0;
1572         qemu_log_mask(LOG_UNIMP,
1573                       "%s Unexpected 64-bit access to 0x%"PRIx64" (RAZ)\n",
1574                       __func__, offset);
1575         return MEMTX_OK;
1576     }
1577 }
1578 
1579 static MemTxResult smmu_readl(SMMUv3State *s, hwaddr offset,
1580                               uint64_t *data, MemTxAttrs attrs)
1581 {
1582     switch (offset) {
1583     case A_IDREGS ... A_IDREGS + 0x2f:
1584         *data = smmuv3_idreg(offset - A_IDREGS);
1585         return MEMTX_OK;
1586     case A_IDR0 ... A_IDR5:
1587         *data = s->idr[(offset - A_IDR0) / 4];
1588         return MEMTX_OK;
1589     case A_IIDR:
1590         *data = s->iidr;
1591         return MEMTX_OK;
1592     case A_AIDR:
1593         *data = s->aidr;
1594         return MEMTX_OK;
1595     case A_CR0:
1596         *data = s->cr[0];
1597         return MEMTX_OK;
1598     case A_CR0ACK:
1599         *data = s->cr0ack;
1600         return MEMTX_OK;
1601     case A_CR1:
1602         *data = s->cr[1];
1603         return MEMTX_OK;
1604     case A_CR2:
1605         *data = s->cr[2];
1606         return MEMTX_OK;
1607     case A_STATUSR:
1608         *data = s->statusr;
1609         return MEMTX_OK;
1610     case A_GBPA:
1611         *data = s->gbpa;
1612         return MEMTX_OK;
1613     case A_IRQ_CTRL:
1614     case A_IRQ_CTRL_ACK:
1615         *data = s->irq_ctrl;
1616         return MEMTX_OK;
1617     case A_GERROR:
1618         *data = s->gerror;
1619         return MEMTX_OK;
1620     case A_GERRORN:
1621         *data = s->gerrorn;
1622         return MEMTX_OK;
1623     case A_GERROR_IRQ_CFG0: /* 64b */
1624         *data = extract64(s->gerror_irq_cfg0, 0, 32);
1625         return MEMTX_OK;
1626     case A_GERROR_IRQ_CFG0 + 4:
1627         *data = extract64(s->gerror_irq_cfg0, 32, 32);
1628         return MEMTX_OK;
1629     case A_GERROR_IRQ_CFG1:
1630         *data = s->gerror_irq_cfg1;
1631         return MEMTX_OK;
1632     case A_GERROR_IRQ_CFG2:
1633         *data = s->gerror_irq_cfg2;
1634         return MEMTX_OK;
1635     case A_STRTAB_BASE: /* 64b */
1636         *data = extract64(s->strtab_base, 0, 32);
1637         return MEMTX_OK;
1638     case A_STRTAB_BASE + 4: /* 64b */
1639         *data = extract64(s->strtab_base, 32, 32);
1640         return MEMTX_OK;
1641     case A_STRTAB_BASE_CFG:
1642         *data = s->strtab_base_cfg;
1643         return MEMTX_OK;
1644     case A_CMDQ_BASE: /* 64b */
1645         *data = extract64(s->cmdq.base, 0, 32);
1646         return MEMTX_OK;
1647     case A_CMDQ_BASE + 4:
1648         *data = extract64(s->cmdq.base, 32, 32);
1649         return MEMTX_OK;
1650     case A_CMDQ_PROD:
1651         *data = s->cmdq.prod;
1652         return MEMTX_OK;
1653     case A_CMDQ_CONS:
1654         *data = s->cmdq.cons;
1655         return MEMTX_OK;
1656     case A_EVENTQ_BASE: /* 64b */
1657         *data = extract64(s->eventq.base, 0, 32);
1658         return MEMTX_OK;
1659     case A_EVENTQ_BASE + 4: /* 64b */
1660         *data = extract64(s->eventq.base, 32, 32);
1661         return MEMTX_OK;
1662     case A_EVENTQ_PROD:
1663         *data = s->eventq.prod;
1664         return MEMTX_OK;
1665     case A_EVENTQ_CONS:
1666         *data = s->eventq.cons;
1667         return MEMTX_OK;
1668     default:
1669         *data = 0;
1670         qemu_log_mask(LOG_UNIMP,
1671                       "%s unhandled 32-bit access at 0x%"PRIx64" (RAZ)\n",
1672                       __func__, offset);
1673         return MEMTX_OK;
1674     }
1675 }
1676 
1677 static MemTxResult smmu_read_mmio(void *opaque, hwaddr offset, uint64_t *data,
1678                                   unsigned size, MemTxAttrs attrs)
1679 {
1680     SMMUState *sys = opaque;
1681     SMMUv3State *s = ARM_SMMUV3(sys);
1682     MemTxResult r;
1683 
1684     /* CONSTRAINED UNPREDICTABLE choice to have page0/1 be exact aliases */
1685     offset &= ~0x10000;
1686 
1687     switch (size) {
1688     case 8:
1689         r = smmu_readll(s, offset, data, attrs);
1690         break;
1691     case 4:
1692         r = smmu_readl(s, offset, data, attrs);
1693         break;
1694     default:
1695         r = MEMTX_ERROR;
1696         break;
1697     }
1698 
1699     trace_smmuv3_read_mmio(offset, *data, size, r);
1700     return r;
1701 }
1702 
1703 static const MemoryRegionOps smmu_mem_ops = {
1704     .read_with_attrs = smmu_read_mmio,
1705     .write_with_attrs = smmu_write_mmio,
1706     .endianness = DEVICE_LITTLE_ENDIAN,
1707     .valid = {
1708         .min_access_size = 4,
1709         .max_access_size = 8,
1710     },
1711     .impl = {
1712         .min_access_size = 4,
1713         .max_access_size = 8,
1714     },
1715 };
1716 
1717 static void smmu_init_irq(SMMUv3State *s, SysBusDevice *dev)
1718 {
1719     int i;
1720 
1721     for (i = 0; i < ARRAY_SIZE(s->irq); i++) {
1722         sysbus_init_irq(dev, &s->irq[i]);
1723     }
1724 }
1725 
1726 static void smmu_reset_hold(Object *obj, ResetType type)
1727 {
1728     SMMUv3State *s = ARM_SMMUV3(obj);
1729     SMMUv3Class *c = ARM_SMMUV3_GET_CLASS(s);
1730 
1731     if (c->parent_phases.hold) {
1732         c->parent_phases.hold(obj, type);
1733     }
1734 
1735     smmuv3_init_regs(s);
1736 }
1737 
1738 static void smmu_realize(DeviceState *d, Error **errp)
1739 {
1740     SMMUState *sys = ARM_SMMU(d);
1741     SMMUv3State *s = ARM_SMMUV3(sys);
1742     SMMUv3Class *c = ARM_SMMUV3_GET_CLASS(s);
1743     SysBusDevice *dev = SYS_BUS_DEVICE(d);
1744     Error *local_err = NULL;
1745 
1746     c->parent_realize(d, &local_err);
1747     if (local_err) {
1748         error_propagate(errp, local_err);
1749         return;
1750     }
1751 
1752     qemu_mutex_init(&s->mutex);
1753 
1754     memory_region_init_io(&sys->iomem, OBJECT(s),
1755                           &smmu_mem_ops, sys, TYPE_ARM_SMMUV3, 0x20000);
1756 
1757     sys->mrtypename = TYPE_SMMUV3_IOMMU_MEMORY_REGION;
1758 
1759     sysbus_init_mmio(dev, &sys->iomem);
1760 
1761     smmu_init_irq(s, dev);
1762 }
1763 
1764 static const VMStateDescription vmstate_smmuv3_queue = {
1765     .name = "smmuv3_queue",
1766     .version_id = 1,
1767     .minimum_version_id = 1,
1768     .fields = (const VMStateField[]) {
1769         VMSTATE_UINT64(base, SMMUQueue),
1770         VMSTATE_UINT32(prod, SMMUQueue),
1771         VMSTATE_UINT32(cons, SMMUQueue),
1772         VMSTATE_UINT8(log2size, SMMUQueue),
1773         VMSTATE_END_OF_LIST(),
1774     },
1775 };
1776 
1777 static bool smmuv3_gbpa_needed(void *opaque)
1778 {
1779     SMMUv3State *s = opaque;
1780 
1781     /* Only migrate GBPA if it has different reset value. */
1782     return s->gbpa != SMMU_GBPA_RESET_VAL;
1783 }
1784 
1785 static const VMStateDescription vmstate_gbpa = {
1786     .name = "smmuv3/gbpa",
1787     .version_id = 1,
1788     .minimum_version_id = 1,
1789     .needed = smmuv3_gbpa_needed,
1790     .fields = (const VMStateField[]) {
1791         VMSTATE_UINT32(gbpa, SMMUv3State),
1792         VMSTATE_END_OF_LIST()
1793     }
1794 };
1795 
1796 static const VMStateDescription vmstate_smmuv3 = {
1797     .name = "smmuv3",
1798     .version_id = 1,
1799     .minimum_version_id = 1,
1800     .priority = MIG_PRI_IOMMU,
1801     .fields = (const VMStateField[]) {
1802         VMSTATE_UINT32(features, SMMUv3State),
1803         VMSTATE_UINT8(sid_size, SMMUv3State),
1804         VMSTATE_UINT8(sid_split, SMMUv3State),
1805 
1806         VMSTATE_UINT32_ARRAY(cr, SMMUv3State, 3),
1807         VMSTATE_UINT32(cr0ack, SMMUv3State),
1808         VMSTATE_UINT32(statusr, SMMUv3State),
1809         VMSTATE_UINT32(irq_ctrl, SMMUv3State),
1810         VMSTATE_UINT32(gerror, SMMUv3State),
1811         VMSTATE_UINT32(gerrorn, SMMUv3State),
1812         VMSTATE_UINT64(gerror_irq_cfg0, SMMUv3State),
1813         VMSTATE_UINT32(gerror_irq_cfg1, SMMUv3State),
1814         VMSTATE_UINT32(gerror_irq_cfg2, SMMUv3State),
1815         VMSTATE_UINT64(strtab_base, SMMUv3State),
1816         VMSTATE_UINT32(strtab_base_cfg, SMMUv3State),
1817         VMSTATE_UINT64(eventq_irq_cfg0, SMMUv3State),
1818         VMSTATE_UINT32(eventq_irq_cfg1, SMMUv3State),
1819         VMSTATE_UINT32(eventq_irq_cfg2, SMMUv3State),
1820 
1821         VMSTATE_STRUCT(cmdq, SMMUv3State, 0, vmstate_smmuv3_queue, SMMUQueue),
1822         VMSTATE_STRUCT(eventq, SMMUv3State, 0, vmstate_smmuv3_queue, SMMUQueue),
1823 
1824         VMSTATE_END_OF_LIST(),
1825     },
1826     .subsections = (const VMStateDescription * const []) {
1827         &vmstate_gbpa,
1828         NULL
1829     }
1830 };
1831 
1832 static Property smmuv3_properties[] = {
1833     /*
1834      * Stages of translation advertised.
1835      * "1": Stage 1
1836      * "2": Stage 2
1837      * Defaults to stage 1
1838      */
1839     DEFINE_PROP_STRING("stage", SMMUv3State, stage),
1840     DEFINE_PROP_END_OF_LIST()
1841 };
1842 
1843 static void smmuv3_instance_init(Object *obj)
1844 {
1845     /* Nothing much to do here as of now */
1846 }
1847 
1848 static void smmuv3_class_init(ObjectClass *klass, void *data)
1849 {
1850     DeviceClass *dc = DEVICE_CLASS(klass);
1851     ResettableClass *rc = RESETTABLE_CLASS(klass);
1852     SMMUv3Class *c = ARM_SMMUV3_CLASS(klass);
1853 
1854     dc->vmsd = &vmstate_smmuv3;
1855     resettable_class_set_parent_phases(rc, NULL, smmu_reset_hold, NULL,
1856                                        &c->parent_phases);
1857     device_class_set_parent_realize(dc, smmu_realize,
1858                                     &c->parent_realize);
1859     device_class_set_props(dc, smmuv3_properties);
1860 }
1861 
1862 static int smmuv3_notify_flag_changed(IOMMUMemoryRegion *iommu,
1863                                       IOMMUNotifierFlag old,
1864                                       IOMMUNotifierFlag new,
1865                                       Error **errp)
1866 {
1867     SMMUDevice *sdev = container_of(iommu, SMMUDevice, iommu);
1868     SMMUv3State *s3 = sdev->smmu;
1869     SMMUState *s = &(s3->smmu_state);
1870 
1871     if (new & IOMMU_NOTIFIER_DEVIOTLB_UNMAP) {
1872         error_setg(errp, "SMMUv3 does not support dev-iotlb yet");
1873         return -EINVAL;
1874     }
1875 
1876     if (new & IOMMU_NOTIFIER_MAP) {
1877         error_setg(errp,
1878                    "device %02x.%02x.%x requires iommu MAP notifier which is "
1879                    "not currently supported", pci_bus_num(sdev->bus),
1880                    PCI_SLOT(sdev->devfn), PCI_FUNC(sdev->devfn));
1881         return -EINVAL;
1882     }
1883 
1884     if (old == IOMMU_NOTIFIER_NONE) {
1885         trace_smmuv3_notify_flag_add(iommu->parent_obj.name);
1886         QLIST_INSERT_HEAD(&s->devices_with_notifiers, sdev, next);
1887     } else if (new == IOMMU_NOTIFIER_NONE) {
1888         trace_smmuv3_notify_flag_del(iommu->parent_obj.name);
1889         QLIST_REMOVE(sdev, next);
1890     }
1891     return 0;
1892 }
1893 
1894 static void smmuv3_iommu_memory_region_class_init(ObjectClass *klass,
1895                                                   void *data)
1896 {
1897     IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass);
1898 
1899     imrc->translate = smmuv3_translate;
1900     imrc->notify_flag_changed = smmuv3_notify_flag_changed;
1901 }
1902 
1903 static const TypeInfo smmuv3_type_info = {
1904     .name          = TYPE_ARM_SMMUV3,
1905     .parent        = TYPE_ARM_SMMU,
1906     .instance_size = sizeof(SMMUv3State),
1907     .instance_init = smmuv3_instance_init,
1908     .class_size    = sizeof(SMMUv3Class),
1909     .class_init    = smmuv3_class_init,
1910 };
1911 
1912 static const TypeInfo smmuv3_iommu_memory_region_info = {
1913     .parent = TYPE_IOMMU_MEMORY_REGION,
1914     .name = TYPE_SMMUV3_IOMMU_MEMORY_REGION,
1915     .class_init = smmuv3_iommu_memory_region_class_init,
1916 };
1917 
1918 static void smmuv3_register_types(void)
1919 {
1920     type_register(&smmuv3_type_info);
1921     type_register(&smmuv3_iommu_memory_region_info);
1922 }
1923 
1924 type_init(smmuv3_register_types)
1925 
1926