1 /*
2 * QEMU model of the Xilinx Zynq Devcfg Interface
3 *
4 * (C) 2011 PetaLogix Pty Ltd
5 * (C) 2014 Xilinx Inc.
6 * Written by Peter Crosthwaite <peter.crosthwaite@xilinx.com>
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
24 * THE SOFTWARE.
25 */
26
27 #include "qemu/osdep.h"
28 #include "hw/dma/xlnx-zynq-devcfg.h"
29 #include "hw/irq.h"
30 #include "migration/vmstate.h"
31 #include "qemu/bitops.h"
32 #include "sysemu/dma.h"
33 #include "qemu/log.h"
34 #include "qemu/module.h"
35
36 #define FREQ_HZ 900000000
37
38 #define BTT_MAX 0x400
39
40 #ifndef XLNX_ZYNQ_DEVCFG_ERR_DEBUG
41 #define XLNX_ZYNQ_DEVCFG_ERR_DEBUG 0
42 #endif
43
44 #define DB_PRINT(fmt, args...) do { \
45 if (XLNX_ZYNQ_DEVCFG_ERR_DEBUG) { \
46 qemu_log("%s: " fmt, __func__, ## args); \
47 } \
48 } while (0)
49
50 REG32(CTRL, 0x00)
51 FIELD(CTRL, FORCE_RST, 31, 1) /* Not supported, wr ignored */
52 FIELD(CTRL, PCAP_PR, 27, 1) /* Forced to 0 on bad unlock */
53 FIELD(CTRL, PCAP_MODE, 26, 1)
54 FIELD(CTRL, MULTIBOOT_EN, 24, 1)
55 FIELD(CTRL, USER_MODE, 15, 1)
56 FIELD(CTRL, PCFG_AES_FUSE, 12, 1)
57 FIELD(CTRL, PCFG_AES_EN, 9, 3)
58 FIELD(CTRL, SEU_EN, 8, 1)
59 FIELD(CTRL, SEC_EN, 7, 1)
60 FIELD(CTRL, SPNIDEN, 6, 1)
61 FIELD(CTRL, SPIDEN, 5, 1)
62 FIELD(CTRL, NIDEN, 4, 1)
63 FIELD(CTRL, DBGEN, 3, 1)
64 FIELD(CTRL, DAP_EN, 0, 3)
65
66 REG32(LOCK, 0x04)
67 #define AES_FUSE_LOCK 4
68 #define AES_EN_LOCK 3
69 #define SEU_LOCK 2
70 #define SEC_LOCK 1
71 #define DBG_LOCK 0
72
73 /* mapping bits in R_LOCK to what they lock in R_CTRL */
74 static const uint32_t lock_ctrl_map[] = {
75 [AES_FUSE_LOCK] = R_CTRL_PCFG_AES_FUSE_MASK,
76 [AES_EN_LOCK] = R_CTRL_PCFG_AES_EN_MASK,
77 [SEU_LOCK] = R_CTRL_SEU_EN_MASK,
78 [SEC_LOCK] = R_CTRL_SEC_EN_MASK,
79 [DBG_LOCK] = R_CTRL_SPNIDEN_MASK | R_CTRL_SPIDEN_MASK |
80 R_CTRL_NIDEN_MASK | R_CTRL_DBGEN_MASK |
81 R_CTRL_DAP_EN_MASK,
82 };
83
84 REG32(CFG, 0x08)
85 FIELD(CFG, RFIFO_TH, 10, 2)
86 FIELD(CFG, WFIFO_TH, 8, 2)
87 FIELD(CFG, RCLK_EDGE, 7, 1)
88 FIELD(CFG, WCLK_EDGE, 6, 1)
89 FIELD(CFG, DISABLE_SRC_INC, 5, 1)
90 FIELD(CFG, DISABLE_DST_INC, 4, 1)
91 #define R_CFG_RESET 0x50B
92
93 REG32(INT_STS, 0x0C)
94 FIELD(INT_STS, PSS_GTS_USR_B, 31, 1)
95 FIELD(INT_STS, PSS_FST_CFG_B, 30, 1)
96 FIELD(INT_STS, PSS_CFG_RESET_B, 27, 1)
97 FIELD(INT_STS, RX_FIFO_OV, 18, 1)
98 FIELD(INT_STS, WR_FIFO_LVL, 17, 1)
99 FIELD(INT_STS, RD_FIFO_LVL, 16, 1)
100 FIELD(INT_STS, DMA_CMD_ERR, 15, 1)
101 FIELD(INT_STS, DMA_Q_OV, 14, 1)
102 FIELD(INT_STS, DMA_DONE, 13, 1)
103 FIELD(INT_STS, DMA_P_DONE, 12, 1)
104 FIELD(INT_STS, P2D_LEN_ERR, 11, 1)
105 FIELD(INT_STS, PCFG_DONE, 2, 1)
106 #define R_INT_STS_RSVD ((0x7 << 24) | (0x1 << 19) | (0xF < 7))
107
108 REG32(INT_MASK, 0x10)
109
110 REG32(STATUS, 0x14)
111 FIELD(STATUS, DMA_CMD_Q_F, 31, 1)
112 FIELD(STATUS, DMA_CMD_Q_E, 30, 1)
113 FIELD(STATUS, DMA_DONE_CNT, 28, 2)
114 FIELD(STATUS, RX_FIFO_LVL, 20, 5)
115 FIELD(STATUS, TX_FIFO_LVL, 12, 7)
116 FIELD(STATUS, PSS_GTS_USR_B, 11, 1)
117 FIELD(STATUS, PSS_FST_CFG_B, 10, 1)
118 FIELD(STATUS, PSS_CFG_RESET_B, 5, 1)
119
120 REG32(DMA_SRC_ADDR, 0x18)
121 REG32(DMA_DST_ADDR, 0x1C)
122 REG32(DMA_SRC_LEN, 0x20)
123 REG32(DMA_DST_LEN, 0x24)
124 REG32(ROM_SHADOW, 0x28)
125 REG32(SW_ID, 0x30)
126 REG32(UNLOCK, 0x34)
127
128 #define R_UNLOCK_MAGIC 0x757BDF0D
129
130 REG32(MCTRL, 0x80)
131 FIELD(MCTRL, PS_VERSION, 28, 4)
132 FIELD(MCTRL, PCFG_POR_B, 8, 1)
133 FIELD(MCTRL, INT_PCAP_LPBK, 4, 1)
134 FIELD(MCTRL, QEMU, 3, 1)
135
xlnx_zynq_devcfg_update_ixr(XlnxZynqDevcfg * s)136 static void xlnx_zynq_devcfg_update_ixr(XlnxZynqDevcfg *s)
137 {
138 qemu_set_irq(s->irq, ~s->regs[R_INT_MASK] & s->regs[R_INT_STS]);
139 }
140
xlnx_zynq_devcfg_reset(DeviceState * dev)141 static void xlnx_zynq_devcfg_reset(DeviceState *dev)
142 {
143 XlnxZynqDevcfg *s = XLNX_ZYNQ_DEVCFG(dev);
144 int i;
145
146 for (i = 0; i < XLNX_ZYNQ_DEVCFG_R_MAX; ++i) {
147 register_reset(&s->regs_info[i]);
148 }
149 }
150
xlnx_zynq_devcfg_dma_go(XlnxZynqDevcfg * s)151 static void xlnx_zynq_devcfg_dma_go(XlnxZynqDevcfg *s)
152 {
153 do {
154 uint8_t buf[BTT_MAX];
155 XlnxZynqDevcfgDMACmd *dmah = s->dma_cmd_fifo;
156 uint32_t btt = BTT_MAX;
157 bool loopback = s->regs[R_MCTRL] & R_MCTRL_INT_PCAP_LPBK_MASK;
158
159 btt = MIN(btt, dmah->src_len);
160 if (loopback) {
161 btt = MIN(btt, dmah->dest_len);
162 }
163 DB_PRINT("reading %x bytes from %x\n", btt, dmah->src_addr);
164 dma_memory_read(&address_space_memory, dmah->src_addr, buf, btt,
165 MEMTXATTRS_UNSPECIFIED);
166 dmah->src_len -= btt;
167 dmah->src_addr += btt;
168 if (loopback && (dmah->src_len || dmah->dest_len)) {
169 DB_PRINT("writing %x bytes from %x\n", btt, dmah->dest_addr);
170 dma_memory_write(&address_space_memory, dmah->dest_addr, buf, btt,
171 MEMTXATTRS_UNSPECIFIED);
172 dmah->dest_len -= btt;
173 dmah->dest_addr += btt;
174 }
175 if (!dmah->src_len && !dmah->dest_len) {
176 DB_PRINT("dma operation finished\n");
177 s->regs[R_INT_STS] |= R_INT_STS_DMA_DONE_MASK |
178 R_INT_STS_DMA_P_DONE_MASK;
179 s->dma_cmd_fifo_num--;
180 memmove(s->dma_cmd_fifo, &s->dma_cmd_fifo[1],
181 sizeof(s->dma_cmd_fifo) - sizeof(s->dma_cmd_fifo[0]));
182 }
183 xlnx_zynq_devcfg_update_ixr(s);
184 } while (s->dma_cmd_fifo_num);
185 }
186
r_ixr_post_write(RegisterInfo * reg,uint64_t val)187 static void r_ixr_post_write(RegisterInfo *reg, uint64_t val)
188 {
189 XlnxZynqDevcfg *s = XLNX_ZYNQ_DEVCFG(reg->opaque);
190
191 xlnx_zynq_devcfg_update_ixr(s);
192 }
193
r_ctrl_pre_write(RegisterInfo * reg,uint64_t val)194 static uint64_t r_ctrl_pre_write(RegisterInfo *reg, uint64_t val)
195 {
196 XlnxZynqDevcfg *s = XLNX_ZYNQ_DEVCFG(reg->opaque);
197 int i;
198
199 for (i = 0; i < ARRAY_SIZE(lock_ctrl_map); ++i) {
200 if (s->regs[R_LOCK] & 1 << i) {
201 val &= ~lock_ctrl_map[i];
202 val |= lock_ctrl_map[i] & s->regs[R_CTRL];
203 }
204 }
205 return val;
206 }
207
r_ctrl_post_write(RegisterInfo * reg,uint64_t val)208 static void r_ctrl_post_write(RegisterInfo *reg, uint64_t val)
209 {
210 const char *device_prefix = object_get_typename(OBJECT(reg->opaque));
211 uint32_t aes_en = FIELD_EX32(val, CTRL, PCFG_AES_EN);
212
213 if (aes_en != 0 && aes_en != 7) {
214 qemu_log_mask(LOG_UNIMP, "%s: warning, aes-en bits inconsistent,"
215 "unimplemented security reset should happen!\n",
216 device_prefix);
217 }
218 }
219
r_unlock_post_write(RegisterInfo * reg,uint64_t val)220 static void r_unlock_post_write(RegisterInfo *reg, uint64_t val)
221 {
222 XlnxZynqDevcfg *s = XLNX_ZYNQ_DEVCFG(reg->opaque);
223 const char *device_prefix = object_get_typename(OBJECT(s));
224
225 if (val == R_UNLOCK_MAGIC) {
226 DB_PRINT("successful unlock\n");
227 s->regs[R_CTRL] |= R_CTRL_PCAP_PR_MASK;
228 s->regs[R_CTRL] |= R_CTRL_PCFG_AES_EN_MASK;
229 memory_region_set_enabled(&s->iomem, true);
230 } else { /* bad unlock attempt */
231 qemu_log_mask(LOG_GUEST_ERROR, "%s: failed unlock\n", device_prefix);
232 s->regs[R_CTRL] &= ~R_CTRL_PCAP_PR_MASK;
233 s->regs[R_CTRL] &= ~R_CTRL_PCFG_AES_EN_MASK;
234 /* core becomes inaccessible */
235 memory_region_set_enabled(&s->iomem, false);
236 }
237 }
238
r_lock_pre_write(RegisterInfo * reg,uint64_t val)239 static uint64_t r_lock_pre_write(RegisterInfo *reg, uint64_t val)
240 {
241 XlnxZynqDevcfg *s = XLNX_ZYNQ_DEVCFG(reg->opaque);
242
243 /* once bits are locked they stay locked */
244 return s->regs[R_LOCK] | val;
245 }
246
r_dma_dst_len_post_write(RegisterInfo * reg,uint64_t val)247 static void r_dma_dst_len_post_write(RegisterInfo *reg, uint64_t val)
248 {
249 XlnxZynqDevcfg *s = XLNX_ZYNQ_DEVCFG(reg->opaque);
250
251 s->dma_cmd_fifo[s->dma_cmd_fifo_num] = (XlnxZynqDevcfgDMACmd) {
252 .src_addr = s->regs[R_DMA_SRC_ADDR] & ~0x3UL,
253 .dest_addr = s->regs[R_DMA_DST_ADDR] & ~0x3UL,
254 .src_len = s->regs[R_DMA_SRC_LEN] << 2,
255 .dest_len = s->regs[R_DMA_DST_LEN] << 2,
256 };
257 s->dma_cmd_fifo_num++;
258 DB_PRINT("dma transfer started; %d total transfers pending\n",
259 s->dma_cmd_fifo_num);
260 xlnx_zynq_devcfg_dma_go(s);
261 }
262
263 static const RegisterAccessInfo xlnx_zynq_devcfg_regs_info[] = {
264 { .name = "CTRL", .addr = A_CTRL,
265 .reset = R_CTRL_PCAP_PR_MASK | R_CTRL_PCAP_MODE_MASK | 0x3 << 13,
266 .rsvd = 0x1 << 28 | 0x3ff << 13 | 0x3 << 13,
267 .pre_write = r_ctrl_pre_write,
268 .post_write = r_ctrl_post_write,
269 },
270 { .name = "LOCK", .addr = A_LOCK,
271 .rsvd = MAKE_64BIT_MASK(5, 64 - 5),
272 .pre_write = r_lock_pre_write,
273 },
274 { .name = "CFG", .addr = A_CFG,
275 .reset = R_CFG_RESET,
276 .rsvd = 0xfffff00f,
277 },
278 { .name = "INT_STS", .addr = A_INT_STS,
279 .w1c = ~R_INT_STS_RSVD,
280 .reset = R_INT_STS_PSS_GTS_USR_B_MASK |
281 R_INT_STS_PSS_CFG_RESET_B_MASK |
282 R_INT_STS_WR_FIFO_LVL_MASK,
283 .rsvd = R_INT_STS_RSVD,
284 .post_write = r_ixr_post_write,
285 },
286 { .name = "INT_MASK", .addr = A_INT_MASK,
287 .reset = ~0,
288 .rsvd = R_INT_STS_RSVD,
289 .post_write = r_ixr_post_write,
290 },
291 { .name = "STATUS", .addr = A_STATUS,
292 .reset = R_STATUS_DMA_CMD_Q_E_MASK |
293 R_STATUS_PSS_GTS_USR_B_MASK |
294 R_STATUS_PSS_CFG_RESET_B_MASK,
295 .ro = ~0,
296 },
297 { .name = "DMA_SRC_ADDR", .addr = A_DMA_SRC_ADDR, },
298 { .name = "DMA_DST_ADDR", .addr = A_DMA_DST_ADDR, },
299 { .name = "DMA_SRC_LEN", .addr = A_DMA_SRC_LEN,
300 .ro = MAKE_64BIT_MASK(27, 64 - 27) },
301 { .name = "DMA_DST_LEN", .addr = A_DMA_DST_LEN,
302 .ro = MAKE_64BIT_MASK(27, 64 - 27),
303 .post_write = r_dma_dst_len_post_write,
304 },
305 { .name = "ROM_SHADOW", .addr = A_ROM_SHADOW,
306 .rsvd = ~0ull,
307 },
308 { .name = "SW_ID", .addr = A_SW_ID, },
309 { .name = "UNLOCK", .addr = A_UNLOCK,
310 .post_write = r_unlock_post_write,
311 },
312 { .name = "MCTRL", .addr = R_MCTRL * 4,
313 /* Silicon 3.0 for version field, the mysterious reserved bit 23
314 * and QEMU platform identifier.
315 */
316 .reset = 0x2 << R_MCTRL_PS_VERSION_SHIFT | 1 << 23 | R_MCTRL_QEMU_MASK,
317 .ro = ~R_MCTRL_INT_PCAP_LPBK_MASK,
318 .rsvd = 0x00f00303,
319 },
320 };
321
322 static const MemoryRegionOps xlnx_zynq_devcfg_reg_ops = {
323 .read = register_read_memory,
324 .write = register_write_memory,
325 .endianness = DEVICE_LITTLE_ENDIAN,
326 .valid = {
327 .min_access_size = 4,
328 .max_access_size = 4,
329 }
330 };
331
332 static const VMStateDescription vmstate_xlnx_zynq_devcfg_dma_cmd = {
333 .name = "xlnx_zynq_devcfg_dma_cmd",
334 .version_id = 1,
335 .minimum_version_id = 1,
336 .fields = (const VMStateField[]) {
337 VMSTATE_UINT32(src_addr, XlnxZynqDevcfgDMACmd),
338 VMSTATE_UINT32(dest_addr, XlnxZynqDevcfgDMACmd),
339 VMSTATE_UINT32(src_len, XlnxZynqDevcfgDMACmd),
340 VMSTATE_UINT32(dest_len, XlnxZynqDevcfgDMACmd),
341 VMSTATE_END_OF_LIST()
342 }
343 };
344
345 static const VMStateDescription vmstate_xlnx_zynq_devcfg = {
346 .name = "xlnx_zynq_devcfg",
347 .version_id = 1,
348 .minimum_version_id = 1,
349 .fields = (const VMStateField[]) {
350 VMSTATE_STRUCT_ARRAY(dma_cmd_fifo, XlnxZynqDevcfg,
351 XLNX_ZYNQ_DEVCFG_DMA_CMD_FIFO_LEN, 0,
352 vmstate_xlnx_zynq_devcfg_dma_cmd,
353 XlnxZynqDevcfgDMACmd),
354 VMSTATE_UINT8(dma_cmd_fifo_num, XlnxZynqDevcfg),
355 VMSTATE_UINT32_ARRAY(regs, XlnxZynqDevcfg, XLNX_ZYNQ_DEVCFG_R_MAX),
356 VMSTATE_END_OF_LIST()
357 }
358 };
359
xlnx_zynq_devcfg_init(Object * obj)360 static void xlnx_zynq_devcfg_init(Object *obj)
361 {
362 SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
363 XlnxZynqDevcfg *s = XLNX_ZYNQ_DEVCFG(obj);
364 RegisterInfoArray *reg_array;
365
366 sysbus_init_irq(sbd, &s->irq);
367
368 memory_region_init(&s->iomem, obj, "devcfg", XLNX_ZYNQ_DEVCFG_R_MAX * 4);
369 reg_array =
370 register_init_block32(DEVICE(obj), xlnx_zynq_devcfg_regs_info,
371 ARRAY_SIZE(xlnx_zynq_devcfg_regs_info),
372 s->regs_info, s->regs,
373 &xlnx_zynq_devcfg_reg_ops,
374 XLNX_ZYNQ_DEVCFG_ERR_DEBUG,
375 XLNX_ZYNQ_DEVCFG_R_MAX);
376 memory_region_add_subregion(&s->iomem,
377 A_CTRL,
378 ®_array->mem);
379
380 sysbus_init_mmio(sbd, &s->iomem);
381 }
382
xlnx_zynq_devcfg_class_init(ObjectClass * klass,void * data)383 static void xlnx_zynq_devcfg_class_init(ObjectClass *klass, void *data)
384 {
385 DeviceClass *dc = DEVICE_CLASS(klass);
386
387 device_class_set_legacy_reset(dc, xlnx_zynq_devcfg_reset);
388 dc->vmsd = &vmstate_xlnx_zynq_devcfg;
389 }
390
391 static const TypeInfo xlnx_zynq_devcfg_info = {
392 .name = TYPE_XLNX_ZYNQ_DEVCFG,
393 .parent = TYPE_SYS_BUS_DEVICE,
394 .instance_size = sizeof(XlnxZynqDevcfg),
395 .instance_init = xlnx_zynq_devcfg_init,
396 .class_init = xlnx_zynq_devcfg_class_init,
397 };
398
xlnx_zynq_devcfg_register_types(void)399 static void xlnx_zynq_devcfg_register_types(void)
400 {
401 type_register_static(&xlnx_zynq_devcfg_info);
402 }
403
404 type_init(xlnx_zynq_devcfg_register_types)
405