xref: /openbmc/qemu/hw/misc/tz-mpc.c (revision db725815985654007ade0fd53590d613fd657208)
1 /*
2  * ARM AHB5 TrustZone Memory Protection Controller emulation
3  *
4  * Copyright (c) 2018 Linaro Limited
5  * Written by Peter Maydell
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 or
9  * (at your option) any later version.
10  */
11 
12 #include "qemu/osdep.h"
13 #include "qemu/log.h"
14 #include "qemu/module.h"
15 #include "qapi/error.h"
16 #include "trace.h"
17 #include "hw/sysbus.h"
18 #include "migration/vmstate.h"
19 #include "hw/registerfields.h"
20 #include "hw/irq.h"
21 #include "hw/misc/tz-mpc.h"
22 
23 /* Our IOMMU has two IOMMU indexes, one for secure transactions and one for
24  * non-secure transactions.
25  */
26 enum {
27     IOMMU_IDX_S,
28     IOMMU_IDX_NS,
29     IOMMU_NUM_INDEXES,
30 };
31 
32 /* Config registers */
33 REG32(CTRL, 0x00)
34     FIELD(CTRL, SEC_RESP, 4, 1)
35     FIELD(CTRL, AUTOINC, 8, 1)
36     FIELD(CTRL, LOCKDOWN, 31, 1)
37 REG32(BLK_MAX, 0x10)
38 REG32(BLK_CFG, 0x14)
39 REG32(BLK_IDX, 0x18)
40 REG32(BLK_LUT, 0x1c)
41 REG32(INT_STAT, 0x20)
42     FIELD(INT_STAT, IRQ, 0, 1)
43 REG32(INT_CLEAR, 0x24)
44     FIELD(INT_CLEAR, IRQ, 0, 1)
45 REG32(INT_EN, 0x28)
46     FIELD(INT_EN, IRQ, 0, 1)
47 REG32(INT_INFO1, 0x2c)
48 REG32(INT_INFO2, 0x30)
49     FIELD(INT_INFO2, HMASTER, 0, 16)
50     FIELD(INT_INFO2, HNONSEC, 16, 1)
51     FIELD(INT_INFO2, CFG_NS, 17, 1)
52 REG32(INT_SET, 0x34)
53     FIELD(INT_SET, IRQ, 0, 1)
54 REG32(PIDR4, 0xfd0)
55 REG32(PIDR5, 0xfd4)
56 REG32(PIDR6, 0xfd8)
57 REG32(PIDR7, 0xfdc)
58 REG32(PIDR0, 0xfe0)
59 REG32(PIDR1, 0xfe4)
60 REG32(PIDR2, 0xfe8)
61 REG32(PIDR3, 0xfec)
62 REG32(CIDR0, 0xff0)
63 REG32(CIDR1, 0xff4)
64 REG32(CIDR2, 0xff8)
65 REG32(CIDR3, 0xffc)
66 
67 static const uint8_t tz_mpc_idregs[] = {
68     0x04, 0x00, 0x00, 0x00,
69     0x60, 0xb8, 0x1b, 0x00,
70     0x0d, 0xf0, 0x05, 0xb1,
71 };
72 
73 static void tz_mpc_irq_update(TZMPC *s)
74 {
75     qemu_set_irq(s->irq, s->int_stat && s->int_en);
76 }
77 
78 static void tz_mpc_iommu_notify(TZMPC *s, uint32_t lutidx,
79                                 uint32_t oldlut, uint32_t newlut)
80 {
81     /* Called when the LUT word at lutidx has changed from oldlut to newlut;
82      * must call the IOMMU notifiers for the changed blocks.
83      */
84     IOMMUTLBEntry entry = {
85         .addr_mask = s->blocksize - 1,
86     };
87     hwaddr addr = lutidx * s->blocksize * 32;
88     int i;
89 
90     for (i = 0; i < 32; i++, addr += s->blocksize) {
91         bool block_is_ns;
92 
93         if (!((oldlut ^ newlut) & (1 << i))) {
94             continue;
95         }
96         /* This changes the mappings for both the S and the NS space,
97          * so we need to do four notifies: an UNMAP then a MAP for each.
98          */
99         block_is_ns = newlut & (1 << i);
100 
101         trace_tz_mpc_iommu_notify(addr);
102         entry.iova = addr;
103         entry.translated_addr = addr;
104 
105         entry.perm = IOMMU_NONE;
106         memory_region_notify_iommu(&s->upstream, IOMMU_IDX_S, entry);
107         memory_region_notify_iommu(&s->upstream, IOMMU_IDX_NS, entry);
108 
109         entry.perm = IOMMU_RW;
110         if (block_is_ns) {
111             entry.target_as = &s->blocked_io_as;
112         } else {
113             entry.target_as = &s->downstream_as;
114         }
115         memory_region_notify_iommu(&s->upstream, IOMMU_IDX_S, entry);
116         if (block_is_ns) {
117             entry.target_as = &s->downstream_as;
118         } else {
119             entry.target_as = &s->blocked_io_as;
120         }
121         memory_region_notify_iommu(&s->upstream, IOMMU_IDX_NS, entry);
122     }
123 }
124 
125 static void tz_mpc_autoinc_idx(TZMPC *s, unsigned access_size)
126 {
127     /* Auto-increment BLK_IDX if necessary */
128     if (access_size == 4 && (s->ctrl & R_CTRL_AUTOINC_MASK)) {
129         s->blk_idx++;
130         s->blk_idx %= s->blk_max;
131     }
132 }
133 
134 static MemTxResult tz_mpc_reg_read(void *opaque, hwaddr addr,
135                                    uint64_t *pdata,
136                                    unsigned size, MemTxAttrs attrs)
137 {
138     TZMPC *s = TZ_MPC(opaque);
139     uint64_t r;
140     uint32_t offset = addr & ~0x3;
141 
142     if (!attrs.secure && offset < A_PIDR4) {
143         /* NS accesses can only see the ID registers */
144         qemu_log_mask(LOG_GUEST_ERROR,
145                       "TZ MPC register read: NS access to offset 0x%x\n",
146                       offset);
147         r = 0;
148         goto read_out;
149     }
150 
151     switch (offset) {
152     case A_CTRL:
153         r = s->ctrl;
154         break;
155     case A_BLK_MAX:
156         r = s->blk_max - 1;
157         break;
158     case A_BLK_CFG:
159         /* We are never in "init in progress state", so this just indicates
160          * the block size. s->blocksize == (1 << BLK_CFG + 5), so
161          * BLK_CFG == ctz32(s->blocksize) - 5
162          */
163         r = ctz32(s->blocksize) - 5;
164         break;
165     case A_BLK_IDX:
166         r = s->blk_idx;
167         break;
168     case A_BLK_LUT:
169         r = s->blk_lut[s->blk_idx];
170         tz_mpc_autoinc_idx(s, size);
171         break;
172     case A_INT_STAT:
173         r = s->int_stat;
174         break;
175     case A_INT_EN:
176         r = s->int_en;
177         break;
178     case A_INT_INFO1:
179         r = s->int_info1;
180         break;
181     case A_INT_INFO2:
182         r = s->int_info2;
183         break;
184     case A_PIDR4:
185     case A_PIDR5:
186     case A_PIDR6:
187     case A_PIDR7:
188     case A_PIDR0:
189     case A_PIDR1:
190     case A_PIDR2:
191     case A_PIDR3:
192     case A_CIDR0:
193     case A_CIDR1:
194     case A_CIDR2:
195     case A_CIDR3:
196         r = tz_mpc_idregs[(offset - A_PIDR4) / 4];
197         break;
198     case A_INT_CLEAR:
199     case A_INT_SET:
200         qemu_log_mask(LOG_GUEST_ERROR,
201                       "TZ MPC register read: write-only offset 0x%x\n",
202                       offset);
203         r = 0;
204         break;
205     default:
206         qemu_log_mask(LOG_GUEST_ERROR,
207                       "TZ MPC register read: bad offset 0x%x\n", offset);
208         r = 0;
209         break;
210     }
211 
212     if (size != 4) {
213         /* None of our registers are read-sensitive (except BLK_LUT,
214          * which can special case the "size not 4" case), so just
215          * pull the right bytes out of the word read result.
216          */
217         r = extract32(r, (addr & 3) * 8, size * 8);
218     }
219 
220 read_out:
221     trace_tz_mpc_reg_read(addr, r, size);
222     *pdata = r;
223     return MEMTX_OK;
224 }
225 
226 static MemTxResult tz_mpc_reg_write(void *opaque, hwaddr addr,
227                                     uint64_t value,
228                                     unsigned size, MemTxAttrs attrs)
229 {
230     TZMPC *s = TZ_MPC(opaque);
231     uint32_t offset = addr & ~0x3;
232 
233     trace_tz_mpc_reg_write(addr, value, size);
234 
235     if (!attrs.secure && offset < A_PIDR4) {
236         /* NS accesses can only see the ID registers */
237         qemu_log_mask(LOG_GUEST_ERROR,
238                       "TZ MPC register write: NS access to offset 0x%x\n",
239                       offset);
240         return MEMTX_OK;
241     }
242 
243     if (size != 4) {
244         /* Expand the byte or halfword write to a full word size.
245          * In most cases we can do this with zeroes; the exceptions
246          * are CTRL, BLK_IDX and BLK_LUT.
247          */
248         uint32_t oldval;
249 
250         switch (offset) {
251         case A_CTRL:
252             oldval = s->ctrl;
253             break;
254         case A_BLK_IDX:
255             oldval = s->blk_idx;
256             break;
257         case A_BLK_LUT:
258             oldval = s->blk_lut[s->blk_idx];
259             break;
260         default:
261             oldval = 0;
262             break;
263         }
264         value = deposit32(oldval, (addr & 3) * 8, size * 8, value);
265     }
266 
267     if ((s->ctrl & R_CTRL_LOCKDOWN_MASK) &&
268         (offset == A_CTRL || offset == A_BLK_LUT || offset == A_INT_EN)) {
269         /* Lockdown mode makes these three registers read-only, and
270          * the only way out of it is to reset the device.
271          */
272         qemu_log_mask(LOG_GUEST_ERROR, "TZ MPC register write to offset 0x%x "
273                       "while MPC is in lockdown mode\n", offset);
274         return MEMTX_OK;
275     }
276 
277     switch (offset) {
278     case A_CTRL:
279         /* We don't implement the 'data gating' feature so all other bits
280          * are reserved and we make them RAZ/WI.
281          */
282         s->ctrl = value & (R_CTRL_SEC_RESP_MASK |
283                            R_CTRL_AUTOINC_MASK |
284                            R_CTRL_LOCKDOWN_MASK);
285         break;
286     case A_BLK_IDX:
287         s->blk_idx = value % s->blk_max;
288         break;
289     case A_BLK_LUT:
290         tz_mpc_iommu_notify(s, s->blk_idx, s->blk_lut[s->blk_idx], value);
291         s->blk_lut[s->blk_idx] = value;
292         tz_mpc_autoinc_idx(s, size);
293         break;
294     case A_INT_CLEAR:
295         if (value & R_INT_CLEAR_IRQ_MASK) {
296             s->int_stat = 0;
297             tz_mpc_irq_update(s);
298         }
299         break;
300     case A_INT_EN:
301         s->int_en = value & R_INT_EN_IRQ_MASK;
302         tz_mpc_irq_update(s);
303         break;
304     case A_INT_SET:
305         if (value & R_INT_SET_IRQ_MASK) {
306             s->int_stat = R_INT_STAT_IRQ_MASK;
307             tz_mpc_irq_update(s);
308         }
309         break;
310     case A_PIDR4:
311     case A_PIDR5:
312     case A_PIDR6:
313     case A_PIDR7:
314     case A_PIDR0:
315     case A_PIDR1:
316     case A_PIDR2:
317     case A_PIDR3:
318     case A_CIDR0:
319     case A_CIDR1:
320     case A_CIDR2:
321     case A_CIDR3:
322         qemu_log_mask(LOG_GUEST_ERROR,
323                       "TZ MPC register write: read-only offset 0x%x\n", offset);
324         break;
325     default:
326         qemu_log_mask(LOG_GUEST_ERROR,
327                       "TZ MPC register write: bad offset 0x%x\n", offset);
328         break;
329     }
330 
331     return MEMTX_OK;
332 }
333 
334 static const MemoryRegionOps tz_mpc_reg_ops = {
335     .read_with_attrs = tz_mpc_reg_read,
336     .write_with_attrs = tz_mpc_reg_write,
337     .endianness = DEVICE_LITTLE_ENDIAN,
338     .valid.min_access_size = 1,
339     .valid.max_access_size = 4,
340     .impl.min_access_size = 1,
341     .impl.max_access_size = 4,
342 };
343 
344 static inline bool tz_mpc_cfg_ns(TZMPC *s, hwaddr addr)
345 {
346     /* Return the cfg_ns bit from the LUT for the specified address */
347     hwaddr blknum = addr / s->blocksize;
348     hwaddr blkword = blknum / 32;
349     uint32_t blkbit = 1U << (blknum % 32);
350 
351     /* This would imply the address was larger than the size we
352      * defined this memory region to be, so it can't happen.
353      */
354     assert(blkword < s->blk_max);
355     return s->blk_lut[blkword] & blkbit;
356 }
357 
358 static MemTxResult tz_mpc_handle_block(TZMPC *s, hwaddr addr, MemTxAttrs attrs)
359 {
360     /* Handle a blocked transaction: raise IRQ, capture info, etc */
361     if (!s->int_stat) {
362         /* First blocked transfer: capture information into INT_INFO1 and
363          * INT_INFO2. Subsequent transfers are still blocked but don't
364          * capture information until the guest clears the interrupt.
365          */
366 
367         s->int_info1 = addr;
368         s->int_info2 = 0;
369         s->int_info2 = FIELD_DP32(s->int_info2, INT_INFO2, HMASTER,
370                                   attrs.requester_id & 0xffff);
371         s->int_info2 = FIELD_DP32(s->int_info2, INT_INFO2, HNONSEC,
372                                   ~attrs.secure);
373         s->int_info2 = FIELD_DP32(s->int_info2, INT_INFO2, CFG_NS,
374                                   tz_mpc_cfg_ns(s, addr));
375         s->int_stat |= R_INT_STAT_IRQ_MASK;
376         tz_mpc_irq_update(s);
377     }
378 
379     /* Generate bus error if desired; otherwise RAZ/WI */
380     return (s->ctrl & R_CTRL_SEC_RESP_MASK) ? MEMTX_ERROR : MEMTX_OK;
381 }
382 
383 /* Accesses only reach these read and write functions if the MPC is
384  * blocking them; non-blocked accesses go directly to the downstream
385  * memory region without passing through this code.
386  */
387 static MemTxResult tz_mpc_mem_blocked_read(void *opaque, hwaddr addr,
388                                            uint64_t *pdata,
389                                            unsigned size, MemTxAttrs attrs)
390 {
391     TZMPC *s = TZ_MPC(opaque);
392 
393     trace_tz_mpc_mem_blocked_read(addr, size, attrs.secure);
394 
395     *pdata = 0;
396     return tz_mpc_handle_block(s, addr, attrs);
397 }
398 
399 static MemTxResult tz_mpc_mem_blocked_write(void *opaque, hwaddr addr,
400                                             uint64_t value,
401                                             unsigned size, MemTxAttrs attrs)
402 {
403     TZMPC *s = TZ_MPC(opaque);
404 
405     trace_tz_mpc_mem_blocked_write(addr, value, size, attrs.secure);
406 
407     return tz_mpc_handle_block(s, addr, attrs);
408 }
409 
410 static const MemoryRegionOps tz_mpc_mem_blocked_ops = {
411     .read_with_attrs = tz_mpc_mem_blocked_read,
412     .write_with_attrs = tz_mpc_mem_blocked_write,
413     .endianness = DEVICE_LITTLE_ENDIAN,
414     .valid.min_access_size = 1,
415     .valid.max_access_size = 8,
416     .impl.min_access_size = 1,
417     .impl.max_access_size = 8,
418 };
419 
420 static IOMMUTLBEntry tz_mpc_translate(IOMMUMemoryRegion *iommu,
421                                       hwaddr addr, IOMMUAccessFlags flags,
422                                       int iommu_idx)
423 {
424     TZMPC *s = TZ_MPC(container_of(iommu, TZMPC, upstream));
425     bool ok;
426 
427     IOMMUTLBEntry ret = {
428         .iova = addr & ~(s->blocksize - 1),
429         .translated_addr = addr & ~(s->blocksize - 1),
430         .addr_mask = s->blocksize - 1,
431         .perm = IOMMU_RW,
432     };
433 
434     /* Look at the per-block configuration for this address, and
435      * return a TLB entry directing the transaction at either
436      * downstream_as or blocked_io_as, as appropriate.
437      * If the LUT cfg_ns bit is 1, only non-secure transactions
438      * may pass. If the bit is 0, only secure transactions may pass.
439      */
440     ok = tz_mpc_cfg_ns(s, addr) == (iommu_idx == IOMMU_IDX_NS);
441 
442     trace_tz_mpc_translate(addr, flags,
443                            iommu_idx == IOMMU_IDX_S ? "S" : "NS",
444                            ok ? "pass" : "block");
445 
446     ret.target_as = ok ? &s->downstream_as : &s->blocked_io_as;
447     return ret;
448 }
449 
450 static int tz_mpc_attrs_to_index(IOMMUMemoryRegion *iommu, MemTxAttrs attrs)
451 {
452     /* We treat unspecified attributes like secure. Transactions with
453      * unspecified attributes come from places like
454      * rom_reset() for initial image load, and we want
455      * those to pass through the from-reset "everything is secure" config.
456      * All the real during-emulation transactions from the CPU will
457      * specify attributes.
458      */
459     return (attrs.unspecified || attrs.secure) ? IOMMU_IDX_S : IOMMU_IDX_NS;
460 }
461 
462 static int tz_mpc_num_indexes(IOMMUMemoryRegion *iommu)
463 {
464     return IOMMU_NUM_INDEXES;
465 }
466 
467 static void tz_mpc_reset(DeviceState *dev)
468 {
469     TZMPC *s = TZ_MPC(dev);
470 
471     s->ctrl = 0x00000100;
472     s->blk_idx = 0;
473     s->int_stat = 0;
474     s->int_en = 1;
475     s->int_info1 = 0;
476     s->int_info2 = 0;
477 
478     memset(s->blk_lut, 0, s->blk_max * sizeof(uint32_t));
479 }
480 
481 static void tz_mpc_init(Object *obj)
482 {
483     DeviceState *dev = DEVICE(obj);
484     TZMPC *s = TZ_MPC(obj);
485 
486     qdev_init_gpio_out_named(dev, &s->irq, "irq", 1);
487 }
488 
489 static void tz_mpc_realize(DeviceState *dev, Error **errp)
490 {
491     Object *obj = OBJECT(dev);
492     SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
493     TZMPC *s = TZ_MPC(dev);
494     uint64_t size;
495 
496     /* We can't create the upstream end of the port until realize,
497      * as we don't know the size of the MR used as the downstream until then.
498      * We insist on having a downstream, to avoid complicating the code
499      * with handling the "don't know how big this is" case. It's easy
500      * enough for the user to create an unimplemented_device as downstream
501      * if they have nothing else to plug into this.
502      */
503     if (!s->downstream) {
504         error_setg(errp, "MPC 'downstream' link not set");
505         return;
506     }
507 
508     size = memory_region_size(s->downstream);
509 
510     memory_region_init_iommu(&s->upstream, sizeof(s->upstream),
511                              TYPE_TZ_MPC_IOMMU_MEMORY_REGION,
512                              obj, "tz-mpc-upstream", size);
513 
514     /* In real hardware the block size is configurable. In QEMU we could
515      * make it configurable but will need it to be at least as big as the
516      * target page size so we can execute out of the resulting MRs. Guest
517      * software is supposed to check the block size using the BLK_CFG
518      * register, so make it fixed at the page size.
519      */
520     s->blocksize = memory_region_iommu_get_min_page_size(&s->upstream);
521     if (size % s->blocksize != 0) {
522         error_setg(errp,
523                    "MPC 'downstream' size %" PRId64
524                    " is not a multiple of %" HWADDR_PRIx " bytes",
525                    size, s->blocksize);
526         object_unref(OBJECT(&s->upstream));
527         return;
528     }
529 
530     /* BLK_MAX is the max value of BLK_IDX, which indexes an array of 32-bit
531      * words, each bit of which indicates one block.
532      */
533     s->blk_max = DIV_ROUND_UP(size / s->blocksize, 32);
534 
535     memory_region_init_io(&s->regmr, obj, &tz_mpc_reg_ops,
536                           s, "tz-mpc-regs", 0x1000);
537     sysbus_init_mmio(sbd, &s->regmr);
538 
539     sysbus_init_mmio(sbd, MEMORY_REGION(&s->upstream));
540 
541     /* This memory region is not exposed to users of this device as a
542      * sysbus MMIO region, but is instead used internally as something
543      * that our IOMMU translate function might direct accesses to.
544      */
545     memory_region_init_io(&s->blocked_io, obj, &tz_mpc_mem_blocked_ops,
546                           s, "tz-mpc-blocked-io", size);
547 
548     address_space_init(&s->downstream_as, s->downstream,
549                        "tz-mpc-downstream");
550     address_space_init(&s->blocked_io_as, &s->blocked_io,
551                        "tz-mpc-blocked-io");
552 
553     s->blk_lut = g_new0(uint32_t, s->blk_max);
554 }
555 
556 static int tz_mpc_post_load(void *opaque, int version_id)
557 {
558     TZMPC *s = TZ_MPC(opaque);
559 
560     /* Check the incoming data doesn't point blk_idx off the end of blk_lut. */
561     if (s->blk_idx >= s->blk_max) {
562         return -1;
563     }
564     return 0;
565 }
566 
567 static const VMStateDescription tz_mpc_vmstate = {
568     .name = "tz-mpc",
569     .version_id = 1,
570     .minimum_version_id = 1,
571     .post_load = tz_mpc_post_load,
572     .fields = (VMStateField[]) {
573         VMSTATE_UINT32(ctrl, TZMPC),
574         VMSTATE_UINT32(blk_idx, TZMPC),
575         VMSTATE_UINT32(int_stat, TZMPC),
576         VMSTATE_UINT32(int_en, TZMPC),
577         VMSTATE_UINT32(int_info1, TZMPC),
578         VMSTATE_UINT32(int_info2, TZMPC),
579         VMSTATE_VARRAY_UINT32(blk_lut, TZMPC, blk_max,
580                               0, vmstate_info_uint32, uint32_t),
581         VMSTATE_END_OF_LIST()
582     }
583 };
584 
585 static Property tz_mpc_properties[] = {
586     DEFINE_PROP_LINK("downstream", TZMPC, downstream,
587                      TYPE_MEMORY_REGION, MemoryRegion *),
588     DEFINE_PROP_END_OF_LIST(),
589 };
590 
591 static void tz_mpc_class_init(ObjectClass *klass, void *data)
592 {
593     DeviceClass *dc = DEVICE_CLASS(klass);
594 
595     dc->realize = tz_mpc_realize;
596     dc->vmsd = &tz_mpc_vmstate;
597     dc->reset = tz_mpc_reset;
598     dc->props = tz_mpc_properties;
599 }
600 
601 static const TypeInfo tz_mpc_info = {
602     .name = TYPE_TZ_MPC,
603     .parent = TYPE_SYS_BUS_DEVICE,
604     .instance_size = sizeof(TZMPC),
605     .instance_init = tz_mpc_init,
606     .class_init = tz_mpc_class_init,
607 };
608 
609 static void tz_mpc_iommu_memory_region_class_init(ObjectClass *klass,
610                                                   void *data)
611 {
612     IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass);
613 
614     imrc->translate = tz_mpc_translate;
615     imrc->attrs_to_index = tz_mpc_attrs_to_index;
616     imrc->num_indexes = tz_mpc_num_indexes;
617 }
618 
619 static const TypeInfo tz_mpc_iommu_memory_region_info = {
620     .name = TYPE_TZ_MPC_IOMMU_MEMORY_REGION,
621     .parent = TYPE_IOMMU_MEMORY_REGION,
622     .class_init = tz_mpc_iommu_memory_region_class_init,
623 };
624 
625 static void tz_mpc_register_types(void)
626 {
627     type_register_static(&tz_mpc_info);
628     type_register_static(&tz_mpc_iommu_memory_region_info);
629 }
630 
631 type_init(tz_mpc_register_types);
632