xref: /openbmc/qemu/hw/misc/tz-mpc.c (revision f7160f32)
1 /*
2  * ARM AHB5 TrustZone Memory Protection Controller emulation
3  *
4  * Copyright (c) 2018 Linaro Limited
5  * Written by Peter Maydell
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 or
9  * (at your option) any later version.
10  */
11 
12 #include "qemu/osdep.h"
13 #include "qemu/log.h"
14 #include "qemu/module.h"
15 #include "qapi/error.h"
16 #include "trace.h"
17 #include "hw/sysbus.h"
18 #include "migration/vmstate.h"
19 #include "hw/registerfields.h"
20 #include "hw/irq.h"
21 #include "hw/misc/tz-mpc.h"
22 #include "hw/qdev-properties.h"
23 
24 /* Our IOMMU has two IOMMU indexes, one for secure transactions and one for
25  * non-secure transactions.
26  */
27 enum {
28     IOMMU_IDX_S,
29     IOMMU_IDX_NS,
30     IOMMU_NUM_INDEXES,
31 };
32 
33 /* Config registers */
34 REG32(CTRL, 0x00)
35     FIELD(CTRL, SEC_RESP, 4, 1)
36     FIELD(CTRL, AUTOINC, 8, 1)
37     FIELD(CTRL, LOCKDOWN, 31, 1)
38 REG32(BLK_MAX, 0x10)
39 REG32(BLK_CFG, 0x14)
40 REG32(BLK_IDX, 0x18)
41 REG32(BLK_LUT, 0x1c)
42 REG32(INT_STAT, 0x20)
43     FIELD(INT_STAT, IRQ, 0, 1)
44 REG32(INT_CLEAR, 0x24)
45     FIELD(INT_CLEAR, IRQ, 0, 1)
46 REG32(INT_EN, 0x28)
47     FIELD(INT_EN, IRQ, 0, 1)
48 REG32(INT_INFO1, 0x2c)
49 REG32(INT_INFO2, 0x30)
50     FIELD(INT_INFO2, HMASTER, 0, 16)
51     FIELD(INT_INFO2, HNONSEC, 16, 1)
52     FIELD(INT_INFO2, CFG_NS, 17, 1)
53 REG32(INT_SET, 0x34)
54     FIELD(INT_SET, IRQ, 0, 1)
55 REG32(PIDR4, 0xfd0)
56 REG32(PIDR5, 0xfd4)
57 REG32(PIDR6, 0xfd8)
58 REG32(PIDR7, 0xfdc)
59 REG32(PIDR0, 0xfe0)
60 REG32(PIDR1, 0xfe4)
61 REG32(PIDR2, 0xfe8)
62 REG32(PIDR3, 0xfec)
63 REG32(CIDR0, 0xff0)
64 REG32(CIDR1, 0xff4)
65 REG32(CIDR2, 0xff8)
66 REG32(CIDR3, 0xffc)
67 
68 static const uint8_t tz_mpc_idregs[] = {
69     0x04, 0x00, 0x00, 0x00,
70     0x60, 0xb8, 0x1b, 0x00,
71     0x0d, 0xf0, 0x05, 0xb1,
72 };
73 
74 static void tz_mpc_irq_update(TZMPC *s)
75 {
76     qemu_set_irq(s->irq, s->int_stat && s->int_en);
77 }
78 
79 static void tz_mpc_iommu_notify(TZMPC *s, uint32_t lutidx,
80                                 uint32_t oldlut, uint32_t newlut)
81 {
82     /* Called when the LUT word at lutidx has changed from oldlut to newlut;
83      * must call the IOMMU notifiers for the changed blocks.
84      */
85     IOMMUTLBEntry entry = {
86         .addr_mask = s->blocksize - 1,
87     };
88     hwaddr addr = lutidx * s->blocksize * 32;
89     int i;
90 
91     for (i = 0; i < 32; i++, addr += s->blocksize) {
92         bool block_is_ns;
93 
94         if (!((oldlut ^ newlut) & (1 << i))) {
95             continue;
96         }
97         /* This changes the mappings for both the S and the NS space,
98          * so we need to do four notifies: an UNMAP then a MAP for each.
99          */
100         block_is_ns = newlut & (1 << i);
101 
102         trace_tz_mpc_iommu_notify(addr);
103         entry.iova = addr;
104         entry.translated_addr = addr;
105 
106         entry.perm = IOMMU_NONE;
107         memory_region_notify_iommu(&s->upstream, IOMMU_IDX_S, entry);
108         memory_region_notify_iommu(&s->upstream, IOMMU_IDX_NS, entry);
109 
110         entry.perm = IOMMU_RW;
111         if (block_is_ns) {
112             entry.target_as = &s->blocked_io_as;
113         } else {
114             entry.target_as = &s->downstream_as;
115         }
116         memory_region_notify_iommu(&s->upstream, IOMMU_IDX_S, entry);
117         if (block_is_ns) {
118             entry.target_as = &s->downstream_as;
119         } else {
120             entry.target_as = &s->blocked_io_as;
121         }
122         memory_region_notify_iommu(&s->upstream, IOMMU_IDX_NS, entry);
123     }
124 }
125 
126 static void tz_mpc_autoinc_idx(TZMPC *s, unsigned access_size)
127 {
128     /* Auto-increment BLK_IDX if necessary */
129     if (access_size == 4 && (s->ctrl & R_CTRL_AUTOINC_MASK)) {
130         s->blk_idx++;
131         s->blk_idx %= s->blk_max;
132     }
133 }
134 
135 static MemTxResult tz_mpc_reg_read(void *opaque, hwaddr addr,
136                                    uint64_t *pdata,
137                                    unsigned size, MemTxAttrs attrs)
138 {
139     TZMPC *s = TZ_MPC(opaque);
140     uint64_t r;
141     uint32_t offset = addr & ~0x3;
142 
143     if (!attrs.secure && offset < A_PIDR4) {
144         /* NS accesses can only see the ID registers */
145         qemu_log_mask(LOG_GUEST_ERROR,
146                       "TZ MPC register read: NS access to offset 0x%x\n",
147                       offset);
148         r = 0;
149         goto read_out;
150     }
151 
152     switch (offset) {
153     case A_CTRL:
154         r = s->ctrl;
155         break;
156     case A_BLK_MAX:
157         r = s->blk_max - 1;
158         break;
159     case A_BLK_CFG:
160         /* We are never in "init in progress state", so this just indicates
161          * the block size. s->blocksize == (1 << BLK_CFG + 5), so
162          * BLK_CFG == ctz32(s->blocksize) - 5
163          */
164         r = ctz32(s->blocksize) - 5;
165         break;
166     case A_BLK_IDX:
167         r = s->blk_idx;
168         break;
169     case A_BLK_LUT:
170         r = s->blk_lut[s->blk_idx];
171         tz_mpc_autoinc_idx(s, size);
172         break;
173     case A_INT_STAT:
174         r = s->int_stat;
175         break;
176     case A_INT_EN:
177         r = s->int_en;
178         break;
179     case A_INT_INFO1:
180         r = s->int_info1;
181         break;
182     case A_INT_INFO2:
183         r = s->int_info2;
184         break;
185     case A_PIDR4:
186     case A_PIDR5:
187     case A_PIDR6:
188     case A_PIDR7:
189     case A_PIDR0:
190     case A_PIDR1:
191     case A_PIDR2:
192     case A_PIDR3:
193     case A_CIDR0:
194     case A_CIDR1:
195     case A_CIDR2:
196     case A_CIDR3:
197         r = tz_mpc_idregs[(offset - A_PIDR4) / 4];
198         break;
199     case A_INT_CLEAR:
200     case A_INT_SET:
201         qemu_log_mask(LOG_GUEST_ERROR,
202                       "TZ MPC register read: write-only offset 0x%x\n",
203                       offset);
204         r = 0;
205         break;
206     default:
207         qemu_log_mask(LOG_GUEST_ERROR,
208                       "TZ MPC register read: bad offset 0x%x\n", offset);
209         r = 0;
210         break;
211     }
212 
213     if (size != 4) {
214         /* None of our registers are read-sensitive (except BLK_LUT,
215          * which can special case the "size not 4" case), so just
216          * pull the right bytes out of the word read result.
217          */
218         r = extract32(r, (addr & 3) * 8, size * 8);
219     }
220 
221 read_out:
222     trace_tz_mpc_reg_read(addr, r, size);
223     *pdata = r;
224     return MEMTX_OK;
225 }
226 
227 static MemTxResult tz_mpc_reg_write(void *opaque, hwaddr addr,
228                                     uint64_t value,
229                                     unsigned size, MemTxAttrs attrs)
230 {
231     TZMPC *s = TZ_MPC(opaque);
232     uint32_t offset = addr & ~0x3;
233 
234     trace_tz_mpc_reg_write(addr, value, size);
235 
236     if (!attrs.secure && offset < A_PIDR4) {
237         /* NS accesses can only see the ID registers */
238         qemu_log_mask(LOG_GUEST_ERROR,
239                       "TZ MPC register write: NS access to offset 0x%x\n",
240                       offset);
241         return MEMTX_OK;
242     }
243 
244     if (size != 4) {
245         /* Expand the byte or halfword write to a full word size.
246          * In most cases we can do this with zeroes; the exceptions
247          * are CTRL, BLK_IDX and BLK_LUT.
248          */
249         uint32_t oldval;
250 
251         switch (offset) {
252         case A_CTRL:
253             oldval = s->ctrl;
254             break;
255         case A_BLK_IDX:
256             oldval = s->blk_idx;
257             break;
258         case A_BLK_LUT:
259             oldval = s->blk_lut[s->blk_idx];
260             break;
261         default:
262             oldval = 0;
263             break;
264         }
265         value = deposit32(oldval, (addr & 3) * 8, size * 8, value);
266     }
267 
268     if ((s->ctrl & R_CTRL_LOCKDOWN_MASK) &&
269         (offset == A_CTRL || offset == A_BLK_LUT || offset == A_INT_EN)) {
270         /* Lockdown mode makes these three registers read-only, and
271          * the only way out of it is to reset the device.
272          */
273         qemu_log_mask(LOG_GUEST_ERROR, "TZ MPC register write to offset 0x%x "
274                       "while MPC is in lockdown mode\n", offset);
275         return MEMTX_OK;
276     }
277 
278     switch (offset) {
279     case A_CTRL:
280         /* We don't implement the 'data gating' feature so all other bits
281          * are reserved and we make them RAZ/WI.
282          */
283         s->ctrl = value & (R_CTRL_SEC_RESP_MASK |
284                            R_CTRL_AUTOINC_MASK |
285                            R_CTRL_LOCKDOWN_MASK);
286         break;
287     case A_BLK_IDX:
288         s->blk_idx = value % s->blk_max;
289         break;
290     case A_BLK_LUT:
291         tz_mpc_iommu_notify(s, s->blk_idx, s->blk_lut[s->blk_idx], value);
292         s->blk_lut[s->blk_idx] = value;
293         tz_mpc_autoinc_idx(s, size);
294         break;
295     case A_INT_CLEAR:
296         if (value & R_INT_CLEAR_IRQ_MASK) {
297             s->int_stat = 0;
298             tz_mpc_irq_update(s);
299         }
300         break;
301     case A_INT_EN:
302         s->int_en = value & R_INT_EN_IRQ_MASK;
303         tz_mpc_irq_update(s);
304         break;
305     case A_INT_SET:
306         if (value & R_INT_SET_IRQ_MASK) {
307             s->int_stat = R_INT_STAT_IRQ_MASK;
308             tz_mpc_irq_update(s);
309         }
310         break;
311     case A_PIDR4:
312     case A_PIDR5:
313     case A_PIDR6:
314     case A_PIDR7:
315     case A_PIDR0:
316     case A_PIDR1:
317     case A_PIDR2:
318     case A_PIDR3:
319     case A_CIDR0:
320     case A_CIDR1:
321     case A_CIDR2:
322     case A_CIDR3:
323         qemu_log_mask(LOG_GUEST_ERROR,
324                       "TZ MPC register write: read-only offset 0x%x\n", offset);
325         break;
326     default:
327         qemu_log_mask(LOG_GUEST_ERROR,
328                       "TZ MPC register write: bad offset 0x%x\n", offset);
329         break;
330     }
331 
332     return MEMTX_OK;
333 }
334 
335 static const MemoryRegionOps tz_mpc_reg_ops = {
336     .read_with_attrs = tz_mpc_reg_read,
337     .write_with_attrs = tz_mpc_reg_write,
338     .endianness = DEVICE_LITTLE_ENDIAN,
339     .valid.min_access_size = 1,
340     .valid.max_access_size = 4,
341     .impl.min_access_size = 1,
342     .impl.max_access_size = 4,
343 };
344 
345 static inline bool tz_mpc_cfg_ns(TZMPC *s, hwaddr addr)
346 {
347     /* Return the cfg_ns bit from the LUT for the specified address */
348     hwaddr blknum = addr / s->blocksize;
349     hwaddr blkword = blknum / 32;
350     uint32_t blkbit = 1U << (blknum % 32);
351 
352     /* This would imply the address was larger than the size we
353      * defined this memory region to be, so it can't happen.
354      */
355     assert(blkword < s->blk_max);
356     return s->blk_lut[blkword] & blkbit;
357 }
358 
359 static MemTxResult tz_mpc_handle_block(TZMPC *s, hwaddr addr, MemTxAttrs attrs)
360 {
361     /* Handle a blocked transaction: raise IRQ, capture info, etc */
362     if (!s->int_stat) {
363         /* First blocked transfer: capture information into INT_INFO1 and
364          * INT_INFO2. Subsequent transfers are still blocked but don't
365          * capture information until the guest clears the interrupt.
366          */
367 
368         s->int_info1 = addr;
369         s->int_info2 = 0;
370         s->int_info2 = FIELD_DP32(s->int_info2, INT_INFO2, HMASTER,
371                                   attrs.requester_id & 0xffff);
372         s->int_info2 = FIELD_DP32(s->int_info2, INT_INFO2, HNONSEC,
373                                   ~attrs.secure);
374         s->int_info2 = FIELD_DP32(s->int_info2, INT_INFO2, CFG_NS,
375                                   tz_mpc_cfg_ns(s, addr));
376         s->int_stat |= R_INT_STAT_IRQ_MASK;
377         tz_mpc_irq_update(s);
378     }
379 
380     /* Generate bus error if desired; otherwise RAZ/WI */
381     return (s->ctrl & R_CTRL_SEC_RESP_MASK) ? MEMTX_ERROR : MEMTX_OK;
382 }
383 
384 /* Accesses only reach these read and write functions if the MPC is
385  * blocking them; non-blocked accesses go directly to the downstream
386  * memory region without passing through this code.
387  */
388 static MemTxResult tz_mpc_mem_blocked_read(void *opaque, hwaddr addr,
389                                            uint64_t *pdata,
390                                            unsigned size, MemTxAttrs attrs)
391 {
392     TZMPC *s = TZ_MPC(opaque);
393 
394     trace_tz_mpc_mem_blocked_read(addr, size, attrs.secure);
395 
396     *pdata = 0;
397     return tz_mpc_handle_block(s, addr, attrs);
398 }
399 
400 static MemTxResult tz_mpc_mem_blocked_write(void *opaque, hwaddr addr,
401                                             uint64_t value,
402                                             unsigned size, MemTxAttrs attrs)
403 {
404     TZMPC *s = TZ_MPC(opaque);
405 
406     trace_tz_mpc_mem_blocked_write(addr, value, size, attrs.secure);
407 
408     return tz_mpc_handle_block(s, addr, attrs);
409 }
410 
411 static const MemoryRegionOps tz_mpc_mem_blocked_ops = {
412     .read_with_attrs = tz_mpc_mem_blocked_read,
413     .write_with_attrs = tz_mpc_mem_blocked_write,
414     .endianness = DEVICE_LITTLE_ENDIAN,
415     .valid.min_access_size = 1,
416     .valid.max_access_size = 8,
417     .impl.min_access_size = 1,
418     .impl.max_access_size = 8,
419 };
420 
421 static IOMMUTLBEntry tz_mpc_translate(IOMMUMemoryRegion *iommu,
422                                       hwaddr addr, IOMMUAccessFlags flags,
423                                       int iommu_idx)
424 {
425     TZMPC *s = TZ_MPC(container_of(iommu, TZMPC, upstream));
426     bool ok;
427 
428     IOMMUTLBEntry ret = {
429         .iova = addr & ~(s->blocksize - 1),
430         .translated_addr = addr & ~(s->blocksize - 1),
431         .addr_mask = s->blocksize - 1,
432         .perm = IOMMU_RW,
433     };
434 
435     /* Look at the per-block configuration for this address, and
436      * return a TLB entry directing the transaction at either
437      * downstream_as or blocked_io_as, as appropriate.
438      * If the LUT cfg_ns bit is 1, only non-secure transactions
439      * may pass. If the bit is 0, only secure transactions may pass.
440      */
441     ok = tz_mpc_cfg_ns(s, addr) == (iommu_idx == IOMMU_IDX_NS);
442 
443     trace_tz_mpc_translate(addr, flags,
444                            iommu_idx == IOMMU_IDX_S ? "S" : "NS",
445                            ok ? "pass" : "block");
446 
447     ret.target_as = ok ? &s->downstream_as : &s->blocked_io_as;
448     return ret;
449 }
450 
451 static int tz_mpc_attrs_to_index(IOMMUMemoryRegion *iommu, MemTxAttrs attrs)
452 {
453     /* We treat unspecified attributes like secure. Transactions with
454      * unspecified attributes come from places like
455      * rom_reset() for initial image load, and we want
456      * those to pass through the from-reset "everything is secure" config.
457      * All the real during-emulation transactions from the CPU will
458      * specify attributes.
459      */
460     return (attrs.unspecified || attrs.secure) ? IOMMU_IDX_S : IOMMU_IDX_NS;
461 }
462 
463 static int tz_mpc_num_indexes(IOMMUMemoryRegion *iommu)
464 {
465     return IOMMU_NUM_INDEXES;
466 }
467 
468 static void tz_mpc_reset(DeviceState *dev)
469 {
470     TZMPC *s = TZ_MPC(dev);
471 
472     s->ctrl = 0x00000100;
473     s->blk_idx = 0;
474     s->int_stat = 0;
475     s->int_en = 1;
476     s->int_info1 = 0;
477     s->int_info2 = 0;
478 
479     memset(s->blk_lut, 0, s->blk_max * sizeof(uint32_t));
480 }
481 
482 static void tz_mpc_init(Object *obj)
483 {
484     DeviceState *dev = DEVICE(obj);
485     TZMPC *s = TZ_MPC(obj);
486 
487     qdev_init_gpio_out_named(dev, &s->irq, "irq", 1);
488 }
489 
490 static void tz_mpc_realize(DeviceState *dev, Error **errp)
491 {
492     Object *obj = OBJECT(dev);
493     SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
494     TZMPC *s = TZ_MPC(dev);
495     uint64_t size;
496 
497     /* We can't create the upstream end of the port until realize,
498      * as we don't know the size of the MR used as the downstream until then.
499      * We insist on having a downstream, to avoid complicating the code
500      * with handling the "don't know how big this is" case. It's easy
501      * enough for the user to create an unimplemented_device as downstream
502      * if they have nothing else to plug into this.
503      */
504     if (!s->downstream) {
505         error_setg(errp, "MPC 'downstream' link not set");
506         return;
507     }
508 
509     size = memory_region_size(s->downstream);
510 
511     memory_region_init_iommu(&s->upstream, sizeof(s->upstream),
512                              TYPE_TZ_MPC_IOMMU_MEMORY_REGION,
513                              obj, "tz-mpc-upstream", size);
514 
515     /* In real hardware the block size is configurable. In QEMU we could
516      * make it configurable but will need it to be at least as big as the
517      * target page size so we can execute out of the resulting MRs. Guest
518      * software is supposed to check the block size using the BLK_CFG
519      * register, so make it fixed at the page size.
520      */
521     s->blocksize = memory_region_iommu_get_min_page_size(&s->upstream);
522     if (size % s->blocksize != 0) {
523         error_setg(errp,
524                    "MPC 'downstream' size %" PRId64
525                    " is not a multiple of %" HWADDR_PRIx " bytes",
526                    size, s->blocksize);
527         object_unref(OBJECT(&s->upstream));
528         return;
529     }
530 
531     /* BLK_MAX is the max value of BLK_IDX, which indexes an array of 32-bit
532      * words, each bit of which indicates one block.
533      */
534     s->blk_max = DIV_ROUND_UP(size / s->blocksize, 32);
535 
536     memory_region_init_io(&s->regmr, obj, &tz_mpc_reg_ops,
537                           s, "tz-mpc-regs", 0x1000);
538     sysbus_init_mmio(sbd, &s->regmr);
539 
540     sysbus_init_mmio(sbd, MEMORY_REGION(&s->upstream));
541 
542     /* This memory region is not exposed to users of this device as a
543      * sysbus MMIO region, but is instead used internally as something
544      * that our IOMMU translate function might direct accesses to.
545      */
546     memory_region_init_io(&s->blocked_io, obj, &tz_mpc_mem_blocked_ops,
547                           s, "tz-mpc-blocked-io", size);
548 
549     address_space_init(&s->downstream_as, s->downstream,
550                        "tz-mpc-downstream");
551     address_space_init(&s->blocked_io_as, &s->blocked_io,
552                        "tz-mpc-blocked-io");
553 
554     s->blk_lut = g_new0(uint32_t, s->blk_max);
555 }
556 
557 static int tz_mpc_post_load(void *opaque, int version_id)
558 {
559     TZMPC *s = TZ_MPC(opaque);
560 
561     /* Check the incoming data doesn't point blk_idx off the end of blk_lut. */
562     if (s->blk_idx >= s->blk_max) {
563         return -1;
564     }
565     return 0;
566 }
567 
568 static const VMStateDescription tz_mpc_vmstate = {
569     .name = "tz-mpc",
570     .version_id = 1,
571     .minimum_version_id = 1,
572     .post_load = tz_mpc_post_load,
573     .fields = (VMStateField[]) {
574         VMSTATE_UINT32(ctrl, TZMPC),
575         VMSTATE_UINT32(blk_idx, TZMPC),
576         VMSTATE_UINT32(int_stat, TZMPC),
577         VMSTATE_UINT32(int_en, TZMPC),
578         VMSTATE_UINT32(int_info1, TZMPC),
579         VMSTATE_UINT32(int_info2, TZMPC),
580         VMSTATE_VARRAY_UINT32(blk_lut, TZMPC, blk_max,
581                               0, vmstate_info_uint32, uint32_t),
582         VMSTATE_END_OF_LIST()
583     }
584 };
585 
586 static Property tz_mpc_properties[] = {
587     DEFINE_PROP_LINK("downstream", TZMPC, downstream,
588                      TYPE_MEMORY_REGION, MemoryRegion *),
589     DEFINE_PROP_END_OF_LIST(),
590 };
591 
592 static void tz_mpc_class_init(ObjectClass *klass, void *data)
593 {
594     DeviceClass *dc = DEVICE_CLASS(klass);
595 
596     dc->realize = tz_mpc_realize;
597     dc->vmsd = &tz_mpc_vmstate;
598     dc->reset = tz_mpc_reset;
599     device_class_set_props(dc, tz_mpc_properties);
600 }
601 
602 static const TypeInfo tz_mpc_info = {
603     .name = TYPE_TZ_MPC,
604     .parent = TYPE_SYS_BUS_DEVICE,
605     .instance_size = sizeof(TZMPC),
606     .instance_init = tz_mpc_init,
607     .class_init = tz_mpc_class_init,
608 };
609 
610 static void tz_mpc_iommu_memory_region_class_init(ObjectClass *klass,
611                                                   void *data)
612 {
613     IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass);
614 
615     imrc->translate = tz_mpc_translate;
616     imrc->attrs_to_index = tz_mpc_attrs_to_index;
617     imrc->num_indexes = tz_mpc_num_indexes;
618 }
619 
620 static const TypeInfo tz_mpc_iommu_memory_region_info = {
621     .name = TYPE_TZ_MPC_IOMMU_MEMORY_REGION,
622     .parent = TYPE_IOMMU_MEMORY_REGION,
623     .class_init = tz_mpc_iommu_memory_region_class_init,
624 };
625 
626 static void tz_mpc_register_types(void)
627 {
628     type_register_static(&tz_mpc_info);
629     type_register_static(&tz_mpc_iommu_memory_region_info);
630 }
631 
632 type_init(tz_mpc_register_types);
633