xref: /openbmc/qemu/hw/misc/tz-mpc.c (revision e4ea952f)
1 /*
2  * ARM AHB5 TrustZone Memory Protection Controller emulation
3  *
4  * Copyright (c) 2018 Linaro Limited
5  * Written by Peter Maydell
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 or
9  * (at your option) any later version.
10  */
11 
12 #include "qemu/osdep.h"
13 #include "qemu/log.h"
14 #include "qemu/module.h"
15 #include "qapi/error.h"
16 #include "trace.h"
17 #include "hw/sysbus.h"
18 #include "migration/vmstate.h"
19 #include "hw/registerfields.h"
20 #include "hw/irq.h"
21 #include "hw/misc/tz-mpc.h"
22 #include "hw/qdev-properties.h"
23 
24 /* Our IOMMU has two IOMMU indexes, one for secure transactions and one for
25  * non-secure transactions.
26  */
27 enum {
28     IOMMU_IDX_S,
29     IOMMU_IDX_NS,
30     IOMMU_NUM_INDEXES,
31 };
32 
33 /* Config registers */
34 REG32(CTRL, 0x00)
35     FIELD(CTRL, SEC_RESP, 4, 1)
36     FIELD(CTRL, AUTOINC, 8, 1)
37     FIELD(CTRL, LOCKDOWN, 31, 1)
38 REG32(BLK_MAX, 0x10)
39 REG32(BLK_CFG, 0x14)
40 REG32(BLK_IDX, 0x18)
41 REG32(BLK_LUT, 0x1c)
42 REG32(INT_STAT, 0x20)
43     FIELD(INT_STAT, IRQ, 0, 1)
44 REG32(INT_CLEAR, 0x24)
45     FIELD(INT_CLEAR, IRQ, 0, 1)
46 REG32(INT_EN, 0x28)
47     FIELD(INT_EN, IRQ, 0, 1)
48 REG32(INT_INFO1, 0x2c)
49 REG32(INT_INFO2, 0x30)
50     FIELD(INT_INFO2, HMASTER, 0, 16)
51     FIELD(INT_INFO2, HNONSEC, 16, 1)
52     FIELD(INT_INFO2, CFG_NS, 17, 1)
53 REG32(INT_SET, 0x34)
54     FIELD(INT_SET, IRQ, 0, 1)
55 REG32(PIDR4, 0xfd0)
56 REG32(PIDR5, 0xfd4)
57 REG32(PIDR6, 0xfd8)
58 REG32(PIDR7, 0xfdc)
59 REG32(PIDR0, 0xfe0)
60 REG32(PIDR1, 0xfe4)
61 REG32(PIDR2, 0xfe8)
62 REG32(PIDR3, 0xfec)
63 REG32(CIDR0, 0xff0)
64 REG32(CIDR1, 0xff4)
65 REG32(CIDR2, 0xff8)
66 REG32(CIDR3, 0xffc)
67 
68 static const uint8_t tz_mpc_idregs[] = {
69     0x04, 0x00, 0x00, 0x00,
70     0x60, 0xb8, 0x1b, 0x00,
71     0x0d, 0xf0, 0x05, 0xb1,
72 };
73 
tz_mpc_irq_update(TZMPC * s)74 static void tz_mpc_irq_update(TZMPC *s)
75 {
76     qemu_set_irq(s->irq, s->int_stat && s->int_en);
77 }
78 
tz_mpc_iommu_notify(TZMPC * s,uint32_t lutidx,uint32_t oldlut,uint32_t newlut)79 static void tz_mpc_iommu_notify(TZMPC *s, uint32_t lutidx,
80                                 uint32_t oldlut, uint32_t newlut)
81 {
82     /* Called when the LUT word at lutidx has changed from oldlut to newlut;
83      * must call the IOMMU notifiers for the changed blocks.
84      */
85     IOMMUTLBEvent event = {
86         .entry = {
87             .addr_mask = s->blocksize - 1,
88         }
89     };
90     hwaddr addr = lutidx * s->blocksize * 32;
91     int i;
92 
93     for (i = 0; i < 32; i++, addr += s->blocksize) {
94         bool block_is_ns;
95 
96         if (!((oldlut ^ newlut) & (1 << i))) {
97             continue;
98         }
99         /* This changes the mappings for both the S and the NS space,
100          * so we need to do four notifies: an UNMAP then a MAP for each.
101          */
102         block_is_ns = newlut & (1 << i);
103 
104         trace_tz_mpc_iommu_notify(addr);
105         event.entry.iova = addr;
106         event.entry.translated_addr = addr;
107 
108         event.type = IOMMU_NOTIFIER_UNMAP;
109         event.entry.perm = IOMMU_NONE;
110         memory_region_notify_iommu(&s->upstream, IOMMU_IDX_S, event);
111         memory_region_notify_iommu(&s->upstream, IOMMU_IDX_NS, event);
112 
113         event.type = IOMMU_NOTIFIER_MAP;
114         event.entry.perm = IOMMU_RW;
115         if (block_is_ns) {
116             event.entry.target_as = &s->blocked_io_as;
117         } else {
118             event.entry.target_as = &s->downstream_as;
119         }
120         memory_region_notify_iommu(&s->upstream, IOMMU_IDX_S, event);
121         if (block_is_ns) {
122             event.entry.target_as = &s->downstream_as;
123         } else {
124             event.entry.target_as = &s->blocked_io_as;
125         }
126         memory_region_notify_iommu(&s->upstream, IOMMU_IDX_NS, event);
127     }
128 }
129 
tz_mpc_autoinc_idx(TZMPC * s,unsigned access_size)130 static void tz_mpc_autoinc_idx(TZMPC *s, unsigned access_size)
131 {
132     /* Auto-increment BLK_IDX if necessary */
133     if (access_size == 4 && (s->ctrl & R_CTRL_AUTOINC_MASK)) {
134         s->blk_idx++;
135         s->blk_idx %= s->blk_max;
136     }
137 }
138 
tz_mpc_reg_read(void * opaque,hwaddr addr,uint64_t * pdata,unsigned size,MemTxAttrs attrs)139 static MemTxResult tz_mpc_reg_read(void *opaque, hwaddr addr,
140                                    uint64_t *pdata,
141                                    unsigned size, MemTxAttrs attrs)
142 {
143     TZMPC *s = TZ_MPC(opaque);
144     uint64_t r;
145     uint32_t offset = addr & ~0x3;
146 
147     if (!attrs.secure && offset < A_PIDR4) {
148         /* NS accesses can only see the ID registers */
149         qemu_log_mask(LOG_GUEST_ERROR,
150                       "TZ MPC register read: NS access to offset 0x%x\n",
151                       offset);
152         r = 0;
153         goto read_out;
154     }
155 
156     switch (offset) {
157     case A_CTRL:
158         r = s->ctrl;
159         break;
160     case A_BLK_MAX:
161         r = s->blk_max - 1;
162         break;
163     case A_BLK_CFG:
164         /* We are never in "init in progress state", so this just indicates
165          * the block size. s->blocksize == (1 << BLK_CFG + 5), so
166          * BLK_CFG == ctz32(s->blocksize) - 5
167          */
168         r = ctz32(s->blocksize) - 5;
169         break;
170     case A_BLK_IDX:
171         r = s->blk_idx;
172         break;
173     case A_BLK_LUT:
174         r = s->blk_lut[s->blk_idx];
175         tz_mpc_autoinc_idx(s, size);
176         break;
177     case A_INT_STAT:
178         r = s->int_stat;
179         break;
180     case A_INT_EN:
181         r = s->int_en;
182         break;
183     case A_INT_INFO1:
184         r = s->int_info1;
185         break;
186     case A_INT_INFO2:
187         r = s->int_info2;
188         break;
189     case A_PIDR4:
190     case A_PIDR5:
191     case A_PIDR6:
192     case A_PIDR7:
193     case A_PIDR0:
194     case A_PIDR1:
195     case A_PIDR2:
196     case A_PIDR3:
197     case A_CIDR0:
198     case A_CIDR1:
199     case A_CIDR2:
200     case A_CIDR3:
201         r = tz_mpc_idregs[(offset - A_PIDR4) / 4];
202         break;
203     case A_INT_CLEAR:
204     case A_INT_SET:
205         qemu_log_mask(LOG_GUEST_ERROR,
206                       "TZ MPC register read: write-only offset 0x%x\n",
207                       offset);
208         r = 0;
209         break;
210     default:
211         qemu_log_mask(LOG_GUEST_ERROR,
212                       "TZ MPC register read: bad offset 0x%x\n", offset);
213         r = 0;
214         break;
215     }
216 
217     if (size != 4) {
218         /* None of our registers are read-sensitive (except BLK_LUT,
219          * which can special case the "size not 4" case), so just
220          * pull the right bytes out of the word read result.
221          */
222         r = extract32(r, (addr & 3) * 8, size * 8);
223     }
224 
225 read_out:
226     trace_tz_mpc_reg_read(addr, r, size);
227     *pdata = r;
228     return MEMTX_OK;
229 }
230 
tz_mpc_reg_write(void * opaque,hwaddr addr,uint64_t value,unsigned size,MemTxAttrs attrs)231 static MemTxResult tz_mpc_reg_write(void *opaque, hwaddr addr,
232                                     uint64_t value,
233                                     unsigned size, MemTxAttrs attrs)
234 {
235     TZMPC *s = TZ_MPC(opaque);
236     uint32_t offset = addr & ~0x3;
237 
238     trace_tz_mpc_reg_write(addr, value, size);
239 
240     if (!attrs.secure && offset < A_PIDR4) {
241         /* NS accesses can only see the ID registers */
242         qemu_log_mask(LOG_GUEST_ERROR,
243                       "TZ MPC register write: NS access to offset 0x%x\n",
244                       offset);
245         return MEMTX_OK;
246     }
247 
248     if (size != 4) {
249         /* Expand the byte or halfword write to a full word size.
250          * In most cases we can do this with zeroes; the exceptions
251          * are CTRL, BLK_IDX and BLK_LUT.
252          */
253         uint32_t oldval;
254 
255         switch (offset) {
256         case A_CTRL:
257             oldval = s->ctrl;
258             break;
259         case A_BLK_IDX:
260             oldval = s->blk_idx;
261             break;
262         case A_BLK_LUT:
263             oldval = s->blk_lut[s->blk_idx];
264             break;
265         default:
266             oldval = 0;
267             break;
268         }
269         value = deposit32(oldval, (addr & 3) * 8, size * 8, value);
270     }
271 
272     if ((s->ctrl & R_CTRL_LOCKDOWN_MASK) &&
273         (offset == A_CTRL || offset == A_BLK_LUT || offset == A_INT_EN)) {
274         /* Lockdown mode makes these three registers read-only, and
275          * the only way out of it is to reset the device.
276          */
277         qemu_log_mask(LOG_GUEST_ERROR, "TZ MPC register write to offset 0x%x "
278                       "while MPC is in lockdown mode\n", offset);
279         return MEMTX_OK;
280     }
281 
282     switch (offset) {
283     case A_CTRL:
284         /* We don't implement the 'data gating' feature so all other bits
285          * are reserved and we make them RAZ/WI.
286          */
287         s->ctrl = value & (R_CTRL_SEC_RESP_MASK |
288                            R_CTRL_AUTOINC_MASK |
289                            R_CTRL_LOCKDOWN_MASK);
290         break;
291     case A_BLK_IDX:
292         s->blk_idx = value % s->blk_max;
293         break;
294     case A_BLK_LUT:
295         tz_mpc_iommu_notify(s, s->blk_idx, s->blk_lut[s->blk_idx], value);
296         s->blk_lut[s->blk_idx] = value;
297         tz_mpc_autoinc_idx(s, size);
298         break;
299     case A_INT_CLEAR:
300         if (value & R_INT_CLEAR_IRQ_MASK) {
301             s->int_stat = 0;
302             tz_mpc_irq_update(s);
303         }
304         break;
305     case A_INT_EN:
306         s->int_en = value & R_INT_EN_IRQ_MASK;
307         tz_mpc_irq_update(s);
308         break;
309     case A_INT_SET:
310         if (value & R_INT_SET_IRQ_MASK) {
311             s->int_stat = R_INT_STAT_IRQ_MASK;
312             tz_mpc_irq_update(s);
313         }
314         break;
315     case A_PIDR4:
316     case A_PIDR5:
317     case A_PIDR6:
318     case A_PIDR7:
319     case A_PIDR0:
320     case A_PIDR1:
321     case A_PIDR2:
322     case A_PIDR3:
323     case A_CIDR0:
324     case A_CIDR1:
325     case A_CIDR2:
326     case A_CIDR3:
327         qemu_log_mask(LOG_GUEST_ERROR,
328                       "TZ MPC register write: read-only offset 0x%x\n", offset);
329         break;
330     default:
331         qemu_log_mask(LOG_GUEST_ERROR,
332                       "TZ MPC register write: bad offset 0x%x\n", offset);
333         break;
334     }
335 
336     return MEMTX_OK;
337 }
338 
339 static const MemoryRegionOps tz_mpc_reg_ops = {
340     .read_with_attrs = tz_mpc_reg_read,
341     .write_with_attrs = tz_mpc_reg_write,
342     .endianness = DEVICE_LITTLE_ENDIAN,
343     .valid.min_access_size = 1,
344     .valid.max_access_size = 4,
345     .impl.min_access_size = 1,
346     .impl.max_access_size = 4,
347 };
348 
tz_mpc_cfg_ns(TZMPC * s,hwaddr addr)349 static inline bool tz_mpc_cfg_ns(TZMPC *s, hwaddr addr)
350 {
351     /* Return the cfg_ns bit from the LUT for the specified address */
352     hwaddr blknum = addr / s->blocksize;
353     hwaddr blkword = blknum / 32;
354     uint32_t blkbit = 1U << (blknum % 32);
355 
356     /* This would imply the address was larger than the size we
357      * defined this memory region to be, so it can't happen.
358      */
359     assert(blkword < s->blk_max);
360     return s->blk_lut[blkword] & blkbit;
361 }
362 
tz_mpc_handle_block(TZMPC * s,hwaddr addr,MemTxAttrs attrs)363 static MemTxResult tz_mpc_handle_block(TZMPC *s, hwaddr addr, MemTxAttrs attrs)
364 {
365     /* Handle a blocked transaction: raise IRQ, capture info, etc */
366     if (!s->int_stat) {
367         /* First blocked transfer: capture information into INT_INFO1 and
368          * INT_INFO2. Subsequent transfers are still blocked but don't
369          * capture information until the guest clears the interrupt.
370          */
371 
372         s->int_info1 = addr;
373         s->int_info2 = 0;
374         s->int_info2 = FIELD_DP32(s->int_info2, INT_INFO2, HMASTER,
375                                   attrs.requester_id & 0xffff);
376         s->int_info2 = FIELD_DP32(s->int_info2, INT_INFO2, HNONSEC,
377                                   ~attrs.secure);
378         s->int_info2 = FIELD_DP32(s->int_info2, INT_INFO2, CFG_NS,
379                                   tz_mpc_cfg_ns(s, addr));
380         s->int_stat |= R_INT_STAT_IRQ_MASK;
381         tz_mpc_irq_update(s);
382     }
383 
384     /* Generate bus error if desired; otherwise RAZ/WI */
385     return (s->ctrl & R_CTRL_SEC_RESP_MASK) ? MEMTX_ERROR : MEMTX_OK;
386 }
387 
388 /* Accesses only reach these read and write functions if the MPC is
389  * blocking them; non-blocked accesses go directly to the downstream
390  * memory region without passing through this code.
391  */
tz_mpc_mem_blocked_read(void * opaque,hwaddr addr,uint64_t * pdata,unsigned size,MemTxAttrs attrs)392 static MemTxResult tz_mpc_mem_blocked_read(void *opaque, hwaddr addr,
393                                            uint64_t *pdata,
394                                            unsigned size, MemTxAttrs attrs)
395 {
396     TZMPC *s = TZ_MPC(opaque);
397 
398     trace_tz_mpc_mem_blocked_read(addr, size, attrs.secure);
399 
400     *pdata = 0;
401     return tz_mpc_handle_block(s, addr, attrs);
402 }
403 
tz_mpc_mem_blocked_write(void * opaque,hwaddr addr,uint64_t value,unsigned size,MemTxAttrs attrs)404 static MemTxResult tz_mpc_mem_blocked_write(void *opaque, hwaddr addr,
405                                             uint64_t value,
406                                             unsigned size, MemTxAttrs attrs)
407 {
408     TZMPC *s = TZ_MPC(opaque);
409 
410     trace_tz_mpc_mem_blocked_write(addr, value, size, attrs.secure);
411 
412     return tz_mpc_handle_block(s, addr, attrs);
413 }
414 
415 static const MemoryRegionOps tz_mpc_mem_blocked_ops = {
416     .read_with_attrs = tz_mpc_mem_blocked_read,
417     .write_with_attrs = tz_mpc_mem_blocked_write,
418     .endianness = DEVICE_LITTLE_ENDIAN,
419     .valid.min_access_size = 1,
420     .valid.max_access_size = 8,
421     .impl.min_access_size = 1,
422     .impl.max_access_size = 8,
423 };
424 
tz_mpc_translate(IOMMUMemoryRegion * iommu,hwaddr addr,IOMMUAccessFlags flags,int iommu_idx)425 static IOMMUTLBEntry tz_mpc_translate(IOMMUMemoryRegion *iommu,
426                                       hwaddr addr, IOMMUAccessFlags flags,
427                                       int iommu_idx)
428 {
429     TZMPC *s = TZ_MPC(container_of(iommu, TZMPC, upstream));
430     bool ok;
431 
432     IOMMUTLBEntry ret = {
433         .iova = addr & ~(s->blocksize - 1),
434         .translated_addr = addr & ~(s->blocksize - 1),
435         .addr_mask = s->blocksize - 1,
436         .perm = IOMMU_RW,
437     };
438 
439     /* Look at the per-block configuration for this address, and
440      * return a TLB entry directing the transaction at either
441      * downstream_as or blocked_io_as, as appropriate.
442      * If the LUT cfg_ns bit is 1, only non-secure transactions
443      * may pass. If the bit is 0, only secure transactions may pass.
444      */
445     ok = tz_mpc_cfg_ns(s, addr) == (iommu_idx == IOMMU_IDX_NS);
446 
447     trace_tz_mpc_translate(addr, flags,
448                            iommu_idx == IOMMU_IDX_S ? "S" : "NS",
449                            ok ? "pass" : "block");
450 
451     ret.target_as = ok ? &s->downstream_as : &s->blocked_io_as;
452     return ret;
453 }
454 
tz_mpc_attrs_to_index(IOMMUMemoryRegion * iommu,MemTxAttrs attrs)455 static int tz_mpc_attrs_to_index(IOMMUMemoryRegion *iommu, MemTxAttrs attrs)
456 {
457     /* We treat unspecified attributes like secure. Transactions with
458      * unspecified attributes come from places like
459      * rom_reset() for initial image load, and we want
460      * those to pass through the from-reset "everything is secure" config.
461      * All the real during-emulation transactions from the CPU will
462      * specify attributes.
463      */
464     return (attrs.unspecified || attrs.secure) ? IOMMU_IDX_S : IOMMU_IDX_NS;
465 }
466 
tz_mpc_num_indexes(IOMMUMemoryRegion * iommu)467 static int tz_mpc_num_indexes(IOMMUMemoryRegion *iommu)
468 {
469     return IOMMU_NUM_INDEXES;
470 }
471 
tz_mpc_reset(DeviceState * dev)472 static void tz_mpc_reset(DeviceState *dev)
473 {
474     TZMPC *s = TZ_MPC(dev);
475 
476     s->ctrl = 0x00000100;
477     s->blk_idx = 0;
478     s->int_stat = 0;
479     s->int_en = 1;
480     s->int_info1 = 0;
481     s->int_info2 = 0;
482 
483     memset(s->blk_lut, 0, s->blk_max * sizeof(uint32_t));
484 }
485 
tz_mpc_init(Object * obj)486 static void tz_mpc_init(Object *obj)
487 {
488     DeviceState *dev = DEVICE(obj);
489     TZMPC *s = TZ_MPC(obj);
490 
491     qdev_init_gpio_out_named(dev, &s->irq, "irq", 1);
492 }
493 
tz_mpc_realize(DeviceState * dev,Error ** errp)494 static void tz_mpc_realize(DeviceState *dev, Error **errp)
495 {
496     Object *obj = OBJECT(dev);
497     SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
498     TZMPC *s = TZ_MPC(dev);
499     uint64_t size;
500 
501     /* We can't create the upstream end of the port until realize,
502      * as we don't know the size of the MR used as the downstream until then.
503      * We insist on having a downstream, to avoid complicating the code
504      * with handling the "don't know how big this is" case. It's easy
505      * enough for the user to create an unimplemented_device as downstream
506      * if they have nothing else to plug into this.
507      */
508     if (!s->downstream) {
509         error_setg(errp, "MPC 'downstream' link not set");
510         return;
511     }
512 
513     size = memory_region_size(s->downstream);
514 
515     memory_region_init_iommu(&s->upstream, sizeof(s->upstream),
516                              TYPE_TZ_MPC_IOMMU_MEMORY_REGION,
517                              obj, "tz-mpc-upstream", size);
518 
519     /* In real hardware the block size is configurable. In QEMU we could
520      * make it configurable but will need it to be at least as big as the
521      * target page size so we can execute out of the resulting MRs. Guest
522      * software is supposed to check the block size using the BLK_CFG
523      * register, so make it fixed at the page size.
524      */
525     s->blocksize = memory_region_iommu_get_min_page_size(&s->upstream);
526     if (size % s->blocksize != 0) {
527         error_setg(errp,
528                    "MPC 'downstream' size %" PRId64
529                    " is not a multiple of %" HWADDR_PRIx " bytes",
530                    size, s->blocksize);
531         object_unref(OBJECT(&s->upstream));
532         return;
533     }
534 
535     /* BLK_MAX is the max value of BLK_IDX, which indexes an array of 32-bit
536      * words, each bit of which indicates one block.
537      */
538     s->blk_max = DIV_ROUND_UP(size / s->blocksize, 32);
539 
540     memory_region_init_io(&s->regmr, obj, &tz_mpc_reg_ops,
541                           s, "tz-mpc-regs", 0x1000);
542     sysbus_init_mmio(sbd, &s->regmr);
543 
544     sysbus_init_mmio(sbd, MEMORY_REGION(&s->upstream));
545 
546     /* This memory region is not exposed to users of this device as a
547      * sysbus MMIO region, but is instead used internally as something
548      * that our IOMMU translate function might direct accesses to.
549      */
550     memory_region_init_io(&s->blocked_io, obj, &tz_mpc_mem_blocked_ops,
551                           s, "tz-mpc-blocked-io", size);
552 
553     address_space_init(&s->downstream_as, s->downstream,
554                        "tz-mpc-downstream");
555     address_space_init(&s->blocked_io_as, &s->blocked_io,
556                        "tz-mpc-blocked-io");
557 
558     s->blk_lut = g_new0(uint32_t, s->blk_max);
559 }
560 
tz_mpc_post_load(void * opaque,int version_id)561 static int tz_mpc_post_load(void *opaque, int version_id)
562 {
563     TZMPC *s = TZ_MPC(opaque);
564 
565     /* Check the incoming data doesn't point blk_idx off the end of blk_lut. */
566     if (s->blk_idx >= s->blk_max) {
567         return -1;
568     }
569     return 0;
570 }
571 
572 static const VMStateDescription tz_mpc_vmstate = {
573     .name = "tz-mpc",
574     .version_id = 1,
575     .minimum_version_id = 1,
576     .post_load = tz_mpc_post_load,
577     .fields = (const VMStateField[]) {
578         VMSTATE_UINT32(ctrl, TZMPC),
579         VMSTATE_UINT32(blk_idx, TZMPC),
580         VMSTATE_UINT32(int_stat, TZMPC),
581         VMSTATE_UINT32(int_en, TZMPC),
582         VMSTATE_UINT32(int_info1, TZMPC),
583         VMSTATE_UINT32(int_info2, TZMPC),
584         VMSTATE_VARRAY_UINT32(blk_lut, TZMPC, blk_max,
585                               0, vmstate_info_uint32, uint32_t),
586         VMSTATE_END_OF_LIST()
587     }
588 };
589 
590 static Property tz_mpc_properties[] = {
591     DEFINE_PROP_LINK("downstream", TZMPC, downstream,
592                      TYPE_MEMORY_REGION, MemoryRegion *),
593     DEFINE_PROP_END_OF_LIST(),
594 };
595 
tz_mpc_class_init(ObjectClass * klass,void * data)596 static void tz_mpc_class_init(ObjectClass *klass, void *data)
597 {
598     DeviceClass *dc = DEVICE_CLASS(klass);
599 
600     dc->realize = tz_mpc_realize;
601     dc->vmsd = &tz_mpc_vmstate;
602     dc->reset = tz_mpc_reset;
603     device_class_set_props(dc, tz_mpc_properties);
604 }
605 
606 static const TypeInfo tz_mpc_info = {
607     .name = TYPE_TZ_MPC,
608     .parent = TYPE_SYS_BUS_DEVICE,
609     .instance_size = sizeof(TZMPC),
610     .instance_init = tz_mpc_init,
611     .class_init = tz_mpc_class_init,
612 };
613 
tz_mpc_iommu_memory_region_class_init(ObjectClass * klass,void * data)614 static void tz_mpc_iommu_memory_region_class_init(ObjectClass *klass,
615                                                   void *data)
616 {
617     IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass);
618 
619     imrc->translate = tz_mpc_translate;
620     imrc->attrs_to_index = tz_mpc_attrs_to_index;
621     imrc->num_indexes = tz_mpc_num_indexes;
622 }
623 
624 static const TypeInfo tz_mpc_iommu_memory_region_info = {
625     .name = TYPE_TZ_MPC_IOMMU_MEMORY_REGION,
626     .parent = TYPE_IOMMU_MEMORY_REGION,
627     .class_init = tz_mpc_iommu_memory_region_class_init,
628 };
629 
tz_mpc_register_types(void)630 static void tz_mpc_register_types(void)
631 {
632     type_register_static(&tz_mpc_info);
633     type_register_static(&tz_mpc_iommu_memory_region_info);
634 }
635 
636 type_init(tz_mpc_register_types);
637