1 /* 2 * QEMU model of the ZynqMP generic DMA 3 * 4 * Copyright (c) 2014 Xilinx Inc. 5 * Copyright (c) 2018 FEIMTECH AB 6 * 7 * Written by Edgar E. Iglesias <edgar.iglesias@xilinx.com>, 8 * Francisco Iglesias <francisco.iglesias@feimtech.se> 9 * 10 * Permission is hereby granted, free of charge, to any person obtaining a copy 11 * of this software and associated documentation files (the "Software"), to deal 12 * in the Software without restriction, including without limitation the rights 13 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 14 * copies of the Software, and to permit persons to whom the Software is 15 * furnished to do so, subject to the following conditions: 16 * 17 * The above copyright notice and this permission notice shall be included in 18 * all copies or substantial portions of the Software. 19 * 20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 23 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 24 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 25 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 26 * THE SOFTWARE. 27 */ 28 29 #include "qemu/osdep.h" 30 #include "hw/dma/xlnx-zdma.h" 31 #include "qemu/bitops.h" 32 #include "qemu/log.h" 33 #include "qapi/error.h" 34 35 #ifndef XLNX_ZDMA_ERR_DEBUG 36 #define XLNX_ZDMA_ERR_DEBUG 0 37 #endif 38 39 REG32(ZDMA_ERR_CTRL, 0x0) 40 FIELD(ZDMA_ERR_CTRL, APB_ERR_RES, 0, 1) 41 REG32(ZDMA_CH_ISR, 0x100) 42 FIELD(ZDMA_CH_ISR, DMA_PAUSE, 11, 1) 43 FIELD(ZDMA_CH_ISR, DMA_DONE, 10, 1) 44 FIELD(ZDMA_CH_ISR, AXI_WR_DATA, 9, 1) 45 FIELD(ZDMA_CH_ISR, AXI_RD_DATA, 8, 1) 46 FIELD(ZDMA_CH_ISR, AXI_RD_DST_DSCR, 7, 1) 47 FIELD(ZDMA_CH_ISR, AXI_RD_SRC_DSCR, 6, 1) 48 FIELD(ZDMA_CH_ISR, IRQ_DST_ACCT_ERR, 5, 1) 49 FIELD(ZDMA_CH_ISR, IRQ_SRC_ACCT_ERR, 4, 1) 50 FIELD(ZDMA_CH_ISR, BYTE_CNT_OVRFL, 3, 1) 51 FIELD(ZDMA_CH_ISR, DST_DSCR_DONE, 2, 1) 52 FIELD(ZDMA_CH_ISR, SRC_DSCR_DONE, 1, 1) 53 FIELD(ZDMA_CH_ISR, INV_APB, 0, 1) 54 REG32(ZDMA_CH_IMR, 0x104) 55 FIELD(ZDMA_CH_IMR, DMA_PAUSE, 11, 1) 56 FIELD(ZDMA_CH_IMR, DMA_DONE, 10, 1) 57 FIELD(ZDMA_CH_IMR, AXI_WR_DATA, 9, 1) 58 FIELD(ZDMA_CH_IMR, AXI_RD_DATA, 8, 1) 59 FIELD(ZDMA_CH_IMR, AXI_RD_DST_DSCR, 7, 1) 60 FIELD(ZDMA_CH_IMR, AXI_RD_SRC_DSCR, 6, 1) 61 FIELD(ZDMA_CH_IMR, IRQ_DST_ACCT_ERR, 5, 1) 62 FIELD(ZDMA_CH_IMR, IRQ_SRC_ACCT_ERR, 4, 1) 63 FIELD(ZDMA_CH_IMR, BYTE_CNT_OVRFL, 3, 1) 64 FIELD(ZDMA_CH_IMR, DST_DSCR_DONE, 2, 1) 65 FIELD(ZDMA_CH_IMR, SRC_DSCR_DONE, 1, 1) 66 FIELD(ZDMA_CH_IMR, INV_APB, 0, 1) 67 REG32(ZDMA_CH_IEN, 0x108) 68 FIELD(ZDMA_CH_IEN, DMA_PAUSE, 11, 1) 69 FIELD(ZDMA_CH_IEN, DMA_DONE, 10, 1) 70 FIELD(ZDMA_CH_IEN, AXI_WR_DATA, 9, 1) 71 FIELD(ZDMA_CH_IEN, AXI_RD_DATA, 8, 1) 72 FIELD(ZDMA_CH_IEN, AXI_RD_DST_DSCR, 7, 1) 73 FIELD(ZDMA_CH_IEN, AXI_RD_SRC_DSCR, 6, 1) 74 FIELD(ZDMA_CH_IEN, IRQ_DST_ACCT_ERR, 5, 1) 75 FIELD(ZDMA_CH_IEN, IRQ_SRC_ACCT_ERR, 4, 1) 76 FIELD(ZDMA_CH_IEN, BYTE_CNT_OVRFL, 3, 1) 77 FIELD(ZDMA_CH_IEN, DST_DSCR_DONE, 2, 1) 78 FIELD(ZDMA_CH_IEN, SRC_DSCR_DONE, 1, 1) 79 FIELD(ZDMA_CH_IEN, INV_APB, 0, 1) 80 REG32(ZDMA_CH_IDS, 0x10c) 81 FIELD(ZDMA_CH_IDS, DMA_PAUSE, 11, 1) 82 FIELD(ZDMA_CH_IDS, DMA_DONE, 10, 1) 83 FIELD(ZDMA_CH_IDS, AXI_WR_DATA, 9, 1) 84 FIELD(ZDMA_CH_IDS, AXI_RD_DATA, 8, 1) 85 FIELD(ZDMA_CH_IDS, AXI_RD_DST_DSCR, 7, 1) 86 FIELD(ZDMA_CH_IDS, AXI_RD_SRC_DSCR, 6, 1) 87 FIELD(ZDMA_CH_IDS, IRQ_DST_ACCT_ERR, 5, 1) 88 FIELD(ZDMA_CH_IDS, IRQ_SRC_ACCT_ERR, 4, 1) 89 FIELD(ZDMA_CH_IDS, BYTE_CNT_OVRFL, 3, 1) 90 FIELD(ZDMA_CH_IDS, DST_DSCR_DONE, 2, 1) 91 FIELD(ZDMA_CH_IDS, SRC_DSCR_DONE, 1, 1) 92 FIELD(ZDMA_CH_IDS, INV_APB, 0, 1) 93 REG32(ZDMA_CH_CTRL0, 0x110) 94 FIELD(ZDMA_CH_CTRL0, OVR_FETCH, 7, 1) 95 FIELD(ZDMA_CH_CTRL0, POINT_TYPE, 6, 1) 96 FIELD(ZDMA_CH_CTRL0, MODE, 4, 2) 97 FIELD(ZDMA_CH_CTRL0, RATE_CTRL, 3, 1) 98 FIELD(ZDMA_CH_CTRL0, CONT_ADDR, 2, 1) 99 FIELD(ZDMA_CH_CTRL0, CONT, 1, 1) 100 REG32(ZDMA_CH_CTRL1, 0x114) 101 FIELD(ZDMA_CH_CTRL1, DST_ISSUE, 5, 5) 102 FIELD(ZDMA_CH_CTRL1, SRC_ISSUE, 0, 5) 103 REG32(ZDMA_CH_FCI, 0x118) 104 FIELD(ZDMA_CH_FCI, PROG_CELL_CNT, 2, 2) 105 FIELD(ZDMA_CH_FCI, SIDE, 1, 1) 106 FIELD(ZDMA_CH_FCI, EN, 0, 1) 107 REG32(ZDMA_CH_STATUS, 0x11c) 108 FIELD(ZDMA_CH_STATUS, STATE, 0, 2) 109 REG32(ZDMA_CH_DATA_ATTR, 0x120) 110 FIELD(ZDMA_CH_DATA_ATTR, ARBURST, 26, 2) 111 FIELD(ZDMA_CH_DATA_ATTR, ARCACHE, 22, 4) 112 FIELD(ZDMA_CH_DATA_ATTR, ARQOS, 18, 4) 113 FIELD(ZDMA_CH_DATA_ATTR, ARLEN, 14, 4) 114 FIELD(ZDMA_CH_DATA_ATTR, AWBURST, 12, 2) 115 FIELD(ZDMA_CH_DATA_ATTR, AWCACHE, 8, 4) 116 FIELD(ZDMA_CH_DATA_ATTR, AWQOS, 4, 4) 117 FIELD(ZDMA_CH_DATA_ATTR, AWLEN, 0, 4) 118 REG32(ZDMA_CH_DSCR_ATTR, 0x124) 119 FIELD(ZDMA_CH_DSCR_ATTR, AXCOHRNT, 8, 1) 120 FIELD(ZDMA_CH_DSCR_ATTR, AXCACHE, 4, 4) 121 FIELD(ZDMA_CH_DSCR_ATTR, AXQOS, 0, 4) 122 REG32(ZDMA_CH_SRC_DSCR_WORD0, 0x128) 123 REG32(ZDMA_CH_SRC_DSCR_WORD1, 0x12c) 124 FIELD(ZDMA_CH_SRC_DSCR_WORD1, MSB, 0, 17) 125 REG32(ZDMA_CH_SRC_DSCR_WORD2, 0x130) 126 FIELD(ZDMA_CH_SRC_DSCR_WORD2, SIZE, 0, 30) 127 REG32(ZDMA_CH_SRC_DSCR_WORD3, 0x134) 128 FIELD(ZDMA_CH_SRC_DSCR_WORD3, CMD, 3, 2) 129 FIELD(ZDMA_CH_SRC_DSCR_WORD3, INTR, 2, 1) 130 FIELD(ZDMA_CH_SRC_DSCR_WORD3, TYPE, 1, 1) 131 FIELD(ZDMA_CH_SRC_DSCR_WORD3, COHRNT, 0, 1) 132 REG32(ZDMA_CH_DST_DSCR_WORD0, 0x138) 133 REG32(ZDMA_CH_DST_DSCR_WORD1, 0x13c) 134 FIELD(ZDMA_CH_DST_DSCR_WORD1, MSB, 0, 17) 135 REG32(ZDMA_CH_DST_DSCR_WORD2, 0x140) 136 FIELD(ZDMA_CH_DST_DSCR_WORD2, SIZE, 0, 30) 137 REG32(ZDMA_CH_DST_DSCR_WORD3, 0x144) 138 FIELD(ZDMA_CH_DST_DSCR_WORD3, INTR, 2, 1) 139 FIELD(ZDMA_CH_DST_DSCR_WORD3, TYPE, 1, 1) 140 FIELD(ZDMA_CH_DST_DSCR_WORD3, COHRNT, 0, 1) 141 REG32(ZDMA_CH_WR_ONLY_WORD0, 0x148) 142 REG32(ZDMA_CH_WR_ONLY_WORD1, 0x14c) 143 REG32(ZDMA_CH_WR_ONLY_WORD2, 0x150) 144 REG32(ZDMA_CH_WR_ONLY_WORD3, 0x154) 145 REG32(ZDMA_CH_SRC_START_LSB, 0x158) 146 REG32(ZDMA_CH_SRC_START_MSB, 0x15c) 147 FIELD(ZDMA_CH_SRC_START_MSB, ADDR, 0, 17) 148 REG32(ZDMA_CH_DST_START_LSB, 0x160) 149 REG32(ZDMA_CH_DST_START_MSB, 0x164) 150 FIELD(ZDMA_CH_DST_START_MSB, ADDR, 0, 17) 151 REG32(ZDMA_CH_RATE_CTRL, 0x18c) 152 FIELD(ZDMA_CH_RATE_CTRL, CNT, 0, 12) 153 REG32(ZDMA_CH_SRC_CUR_PYLD_LSB, 0x168) 154 REG32(ZDMA_CH_SRC_CUR_PYLD_MSB, 0x16c) 155 FIELD(ZDMA_CH_SRC_CUR_PYLD_MSB, ADDR, 0, 17) 156 REG32(ZDMA_CH_DST_CUR_PYLD_LSB, 0x170) 157 REG32(ZDMA_CH_DST_CUR_PYLD_MSB, 0x174) 158 FIELD(ZDMA_CH_DST_CUR_PYLD_MSB, ADDR, 0, 17) 159 REG32(ZDMA_CH_SRC_CUR_DSCR_LSB, 0x178) 160 REG32(ZDMA_CH_SRC_CUR_DSCR_MSB, 0x17c) 161 FIELD(ZDMA_CH_SRC_CUR_DSCR_MSB, ADDR, 0, 17) 162 REG32(ZDMA_CH_DST_CUR_DSCR_LSB, 0x180) 163 REG32(ZDMA_CH_DST_CUR_DSCR_MSB, 0x184) 164 FIELD(ZDMA_CH_DST_CUR_DSCR_MSB, ADDR, 0, 17) 165 REG32(ZDMA_CH_TOTAL_BYTE, 0x188) 166 REG32(ZDMA_CH_RATE_CNTL, 0x18c) 167 FIELD(ZDMA_CH_RATE_CNTL, CNT, 0, 12) 168 REG32(ZDMA_CH_IRQ_SRC_ACCT, 0x190) 169 FIELD(ZDMA_CH_IRQ_SRC_ACCT, CNT, 0, 8) 170 REG32(ZDMA_CH_IRQ_DST_ACCT, 0x194) 171 FIELD(ZDMA_CH_IRQ_DST_ACCT, CNT, 0, 8) 172 REG32(ZDMA_CH_DBG0, 0x198) 173 FIELD(ZDMA_CH_DBG0, CMN_BUF_FREE, 0, 9) 174 REG32(ZDMA_CH_DBG1, 0x19c) 175 FIELD(ZDMA_CH_DBG1, CMN_BUF_OCC, 0, 9) 176 REG32(ZDMA_CH_CTRL2, 0x200) 177 FIELD(ZDMA_CH_CTRL2, EN, 0, 1) 178 179 enum { 180 PT_REG = 0, 181 PT_MEM = 1, 182 }; 183 184 enum { 185 CMD_HALT = 1, 186 CMD_STOP = 2, 187 }; 188 189 enum { 190 RW_MODE_RW = 0, 191 RW_MODE_WO = 1, 192 RW_MODE_RO = 2, 193 }; 194 195 enum { 196 DTYPE_LINEAR = 0, 197 DTYPE_LINKED = 1, 198 }; 199 200 enum { 201 AXI_BURST_FIXED = 0, 202 AXI_BURST_INCR = 1, 203 }; 204 205 static void zdma_ch_imr_update_irq(XlnxZDMA *s) 206 { 207 bool pending; 208 209 pending = s->regs[R_ZDMA_CH_ISR] & ~s->regs[R_ZDMA_CH_IMR]; 210 211 qemu_set_irq(s->irq_zdma_ch_imr, pending); 212 } 213 214 static void zdma_ch_isr_postw(RegisterInfo *reg, uint64_t val64) 215 { 216 XlnxZDMA *s = XLNX_ZDMA(reg->opaque); 217 zdma_ch_imr_update_irq(s); 218 } 219 220 static uint64_t zdma_ch_ien_prew(RegisterInfo *reg, uint64_t val64) 221 { 222 XlnxZDMA *s = XLNX_ZDMA(reg->opaque); 223 uint32_t val = val64; 224 225 s->regs[R_ZDMA_CH_IMR] &= ~val; 226 zdma_ch_imr_update_irq(s); 227 return 0; 228 } 229 230 static uint64_t zdma_ch_ids_prew(RegisterInfo *reg, uint64_t val64) 231 { 232 XlnxZDMA *s = XLNX_ZDMA(reg->opaque); 233 uint32_t val = val64; 234 235 s->regs[R_ZDMA_CH_IMR] |= val; 236 zdma_ch_imr_update_irq(s); 237 return 0; 238 } 239 240 static void zdma_set_state(XlnxZDMA *s, XlnxZDMAState state) 241 { 242 s->state = state; 243 ARRAY_FIELD_DP32(s->regs, ZDMA_CH_STATUS, STATE, state); 244 245 /* Signal error if we have an error condition. */ 246 if (s->error) { 247 ARRAY_FIELD_DP32(s->regs, ZDMA_CH_STATUS, STATE, 3); 248 } 249 } 250 251 static void zdma_src_done(XlnxZDMA *s) 252 { 253 unsigned int cnt; 254 cnt = ARRAY_FIELD_EX32(s->regs, ZDMA_CH_IRQ_SRC_ACCT, CNT); 255 cnt++; 256 ARRAY_FIELD_DP32(s->regs, ZDMA_CH_IRQ_SRC_ACCT, CNT, cnt); 257 ARRAY_FIELD_DP32(s->regs, ZDMA_CH_ISR, SRC_DSCR_DONE, true); 258 259 /* Did we overflow? */ 260 if (cnt != ARRAY_FIELD_EX32(s->regs, ZDMA_CH_IRQ_SRC_ACCT, CNT)) { 261 ARRAY_FIELD_DP32(s->regs, ZDMA_CH_ISR, IRQ_SRC_ACCT_ERR, true); 262 } 263 zdma_ch_imr_update_irq(s); 264 } 265 266 static void zdma_dst_done(XlnxZDMA *s) 267 { 268 unsigned int cnt; 269 cnt = ARRAY_FIELD_EX32(s->regs, ZDMA_CH_IRQ_DST_ACCT, CNT); 270 cnt++; 271 ARRAY_FIELD_DP32(s->regs, ZDMA_CH_IRQ_DST_ACCT, CNT, cnt); 272 ARRAY_FIELD_DP32(s->regs, ZDMA_CH_ISR, DST_DSCR_DONE, true); 273 274 /* Did we overflow? */ 275 if (cnt != ARRAY_FIELD_EX32(s->regs, ZDMA_CH_IRQ_DST_ACCT, CNT)) { 276 ARRAY_FIELD_DP32(s->regs, ZDMA_CH_ISR, IRQ_DST_ACCT_ERR, true); 277 } 278 zdma_ch_imr_update_irq(s); 279 } 280 281 static uint64_t zdma_get_regaddr64(XlnxZDMA *s, unsigned int basereg) 282 { 283 uint64_t addr; 284 285 addr = s->regs[basereg + 1]; 286 addr <<= 32; 287 addr |= s->regs[basereg]; 288 289 return addr; 290 } 291 292 static void zdma_put_regaddr64(XlnxZDMA *s, unsigned int basereg, uint64_t addr) 293 { 294 s->regs[basereg] = addr; 295 s->regs[basereg + 1] = addr >> 32; 296 } 297 298 static bool zdma_load_descriptor(XlnxZDMA *s, uint64_t addr, void *buf) 299 { 300 /* ZDMA descriptors must be aligned to their own size. */ 301 if (addr % sizeof(XlnxZDMADescr)) { 302 qemu_log_mask(LOG_GUEST_ERROR, 303 "zdma: unaligned descriptor at %" PRIx64, 304 addr); 305 memset(buf, 0x0, sizeof(XlnxZDMADescr)); 306 s->error = true; 307 return false; 308 } 309 310 address_space_rw(s->dma_as, addr, s->attr, 311 buf, sizeof(XlnxZDMADescr), false); 312 return true; 313 } 314 315 static void zdma_load_src_descriptor(XlnxZDMA *s) 316 { 317 uint64_t src_addr; 318 unsigned int ptype = ARRAY_FIELD_EX32(s->regs, ZDMA_CH_CTRL0, POINT_TYPE); 319 320 if (ptype == PT_REG) { 321 memcpy(&s->dsc_src, &s->regs[R_ZDMA_CH_SRC_DSCR_WORD0], 322 sizeof(s->dsc_src)); 323 return; 324 } 325 326 src_addr = zdma_get_regaddr64(s, R_ZDMA_CH_SRC_CUR_DSCR_LSB); 327 328 if (!zdma_load_descriptor(s, src_addr, &s->dsc_src)) { 329 ARRAY_FIELD_DP32(s->regs, ZDMA_CH_ISR, AXI_RD_SRC_DSCR, true); 330 } 331 } 332 333 static void zdma_load_dst_descriptor(XlnxZDMA *s) 334 { 335 uint64_t dst_addr; 336 unsigned int ptype = ARRAY_FIELD_EX32(s->regs, ZDMA_CH_CTRL0, POINT_TYPE); 337 338 if (ptype == PT_REG) { 339 memcpy(&s->dsc_dst, &s->regs[R_ZDMA_CH_DST_DSCR_WORD0], 340 sizeof(s->dsc_dst)); 341 return; 342 } 343 344 dst_addr = zdma_get_regaddr64(s, R_ZDMA_CH_DST_CUR_DSCR_LSB); 345 346 if (!zdma_load_descriptor(s, dst_addr, &s->dsc_dst)) { 347 ARRAY_FIELD_DP32(s->regs, ZDMA_CH_ISR, AXI_RD_DST_DSCR, true); 348 } 349 } 350 351 static uint64_t zdma_update_descr_addr(XlnxZDMA *s, bool type, 352 unsigned int basereg) 353 { 354 uint64_t addr, next; 355 356 if (type == DTYPE_LINEAR) { 357 next = zdma_get_regaddr64(s, basereg); 358 next += sizeof(s->dsc_dst); 359 zdma_put_regaddr64(s, basereg, next); 360 } else { 361 addr = zdma_get_regaddr64(s, basereg); 362 addr += sizeof(s->dsc_dst); 363 address_space_rw(s->dma_as, addr, s->attr, (void *) &next, 8, false); 364 zdma_put_regaddr64(s, basereg, next); 365 } 366 return next; 367 } 368 369 static void zdma_write_dst(XlnxZDMA *s, uint8_t *buf, uint32_t len) 370 { 371 uint32_t dst_size, dlen; 372 bool dst_intr, dst_type; 373 unsigned int ptype = ARRAY_FIELD_EX32(s->regs, ZDMA_CH_CTRL0, POINT_TYPE); 374 unsigned int rw_mode = ARRAY_FIELD_EX32(s->regs, ZDMA_CH_CTRL0, MODE); 375 unsigned int burst_type = ARRAY_FIELD_EX32(s->regs, ZDMA_CH_DATA_ATTR, 376 AWBURST); 377 378 /* FIXED burst types are only supported in simple dma mode. */ 379 if (ptype != PT_REG) { 380 burst_type = AXI_BURST_INCR; 381 } 382 383 while (len) { 384 dst_size = FIELD_EX32(s->dsc_dst.words[2], ZDMA_CH_DST_DSCR_WORD2, 385 SIZE); 386 dst_type = FIELD_EX32(s->dsc_dst.words[3], ZDMA_CH_DST_DSCR_WORD3, 387 TYPE); 388 if (dst_size == 0 && ptype == PT_MEM) { 389 uint64_t next; 390 next = zdma_update_descr_addr(s, dst_type, 391 R_ZDMA_CH_DST_CUR_DSCR_LSB); 392 zdma_load_descriptor(s, next, &s->dsc_dst); 393 dst_size = FIELD_EX32(s->dsc_dst.words[2], ZDMA_CH_DST_DSCR_WORD2, 394 SIZE); 395 dst_type = FIELD_EX32(s->dsc_dst.words[3], ZDMA_CH_DST_DSCR_WORD3, 396 TYPE); 397 } 398 399 /* Match what hardware does by ignoring the dst_size and only using 400 * the src size for Simple register mode. */ 401 if (ptype == PT_REG && rw_mode != RW_MODE_WO) { 402 dst_size = len; 403 } 404 405 dst_intr = FIELD_EX32(s->dsc_dst.words[3], ZDMA_CH_DST_DSCR_WORD3, 406 INTR); 407 408 dlen = len > dst_size ? dst_size : len; 409 if (burst_type == AXI_BURST_FIXED) { 410 if (dlen > (s->cfg.bus_width / 8)) { 411 dlen = s->cfg.bus_width / 8; 412 } 413 } 414 415 address_space_rw(s->dma_as, s->dsc_dst.addr, s->attr, buf, dlen, 416 true); 417 if (burst_type == AXI_BURST_INCR) { 418 s->dsc_dst.addr += dlen; 419 } 420 dst_size -= dlen; 421 buf += dlen; 422 len -= dlen; 423 424 if (dst_size == 0 && dst_intr) { 425 zdma_dst_done(s); 426 } 427 428 /* Write back to buffered descriptor. */ 429 s->dsc_dst.words[2] = FIELD_DP32(s->dsc_dst.words[2], 430 ZDMA_CH_DST_DSCR_WORD2, 431 SIZE, 432 dst_size); 433 } 434 } 435 436 static void zdma_process_descr(XlnxZDMA *s) 437 { 438 uint64_t src_addr; 439 uint32_t src_size, len; 440 unsigned int src_cmd; 441 bool src_intr, src_type; 442 unsigned int ptype = ARRAY_FIELD_EX32(s->regs, ZDMA_CH_CTRL0, POINT_TYPE); 443 unsigned int rw_mode = ARRAY_FIELD_EX32(s->regs, ZDMA_CH_CTRL0, MODE); 444 unsigned int burst_type = ARRAY_FIELD_EX32(s->regs, ZDMA_CH_DATA_ATTR, 445 ARBURST); 446 447 src_addr = s->dsc_src.addr; 448 src_size = FIELD_EX32(s->dsc_src.words[2], ZDMA_CH_SRC_DSCR_WORD2, SIZE); 449 src_cmd = FIELD_EX32(s->dsc_src.words[3], ZDMA_CH_SRC_DSCR_WORD3, CMD); 450 src_type = FIELD_EX32(s->dsc_src.words[3], ZDMA_CH_SRC_DSCR_WORD3, TYPE); 451 src_intr = FIELD_EX32(s->dsc_src.words[3], ZDMA_CH_SRC_DSCR_WORD3, INTR); 452 453 /* FIXED burst types and non-rw modes are only supported in 454 * simple dma mode. 455 */ 456 if (ptype != PT_REG) { 457 if (rw_mode != RW_MODE_RW) { 458 qemu_log_mask(LOG_GUEST_ERROR, 459 "zDMA: rw-mode=%d but not simple DMA mode.\n", 460 rw_mode); 461 } 462 if (burst_type != AXI_BURST_INCR) { 463 qemu_log_mask(LOG_GUEST_ERROR, 464 "zDMA: burst_type=%d but not simple DMA mode.\n", 465 burst_type); 466 } 467 burst_type = AXI_BURST_INCR; 468 rw_mode = RW_MODE_RW; 469 } 470 471 if (rw_mode == RW_MODE_WO) { 472 /* In Simple DMA Write-Only, we need to push DST size bytes 473 * regardless of what SRC size is set to. */ 474 src_size = FIELD_EX32(s->dsc_dst.words[2], ZDMA_CH_DST_DSCR_WORD2, 475 SIZE); 476 memcpy(s->buf, &s->regs[R_ZDMA_CH_WR_ONLY_WORD0], s->cfg.bus_width / 8); 477 } 478 479 while (src_size) { 480 len = src_size > ARRAY_SIZE(s->buf) ? ARRAY_SIZE(s->buf) : src_size; 481 if (burst_type == AXI_BURST_FIXED) { 482 if (len > (s->cfg.bus_width / 8)) { 483 len = s->cfg.bus_width / 8; 484 } 485 } 486 487 if (rw_mode == RW_MODE_WO) { 488 if (len > s->cfg.bus_width / 8) { 489 len = s->cfg.bus_width / 8; 490 } 491 } else { 492 address_space_rw(s->dma_as, src_addr, s->attr, s->buf, len, 493 false); 494 if (burst_type == AXI_BURST_INCR) { 495 src_addr += len; 496 } 497 } 498 499 if (rw_mode != RW_MODE_RO) { 500 zdma_write_dst(s, s->buf, len); 501 } 502 503 s->regs[R_ZDMA_CH_TOTAL_BYTE] += len; 504 src_size -= len; 505 } 506 507 ARRAY_FIELD_DP32(s->regs, ZDMA_CH_ISR, DMA_DONE, true); 508 509 if (src_intr) { 510 zdma_src_done(s); 511 } 512 513 /* Load next descriptor. */ 514 if (ptype == PT_REG || src_cmd == CMD_STOP) { 515 ARRAY_FIELD_DP32(s->regs, ZDMA_CH_CTRL2, EN, 0); 516 zdma_set_state(s, DISABLED); 517 return; 518 } 519 520 if (src_cmd == CMD_HALT) { 521 zdma_set_state(s, PAUSED); 522 ARRAY_FIELD_DP32(s->regs, ZDMA_CH_ISR, DMA_PAUSE, 1); 523 zdma_ch_imr_update_irq(s); 524 return; 525 } 526 527 zdma_update_descr_addr(s, src_type, R_ZDMA_CH_SRC_CUR_DSCR_LSB); 528 } 529 530 static void zdma_run(XlnxZDMA *s) 531 { 532 while (s->state == ENABLED && !s->error) { 533 zdma_load_src_descriptor(s); 534 535 if (s->error) { 536 zdma_set_state(s, DISABLED); 537 } else { 538 zdma_process_descr(s); 539 } 540 } 541 542 zdma_ch_imr_update_irq(s); 543 } 544 545 static void zdma_update_descr_addr_from_start(XlnxZDMA *s) 546 { 547 uint64_t src_addr, dst_addr; 548 549 src_addr = zdma_get_regaddr64(s, R_ZDMA_CH_SRC_START_LSB); 550 zdma_put_regaddr64(s, R_ZDMA_CH_SRC_CUR_DSCR_LSB, src_addr); 551 dst_addr = zdma_get_regaddr64(s, R_ZDMA_CH_DST_START_LSB); 552 zdma_put_regaddr64(s, R_ZDMA_CH_DST_CUR_DSCR_LSB, dst_addr); 553 zdma_load_dst_descriptor(s); 554 } 555 556 static void zdma_ch_ctrlx_postw(RegisterInfo *reg, uint64_t val64) 557 { 558 XlnxZDMA *s = XLNX_ZDMA(reg->opaque); 559 560 if (ARRAY_FIELD_EX32(s->regs, ZDMA_CH_CTRL2, EN)) { 561 s->error = false; 562 563 if (s->state == PAUSED && 564 ARRAY_FIELD_EX32(s->regs, ZDMA_CH_CTRL0, CONT)) { 565 if (ARRAY_FIELD_EX32(s->regs, ZDMA_CH_CTRL0, CONT_ADDR) == 1) { 566 zdma_update_descr_addr_from_start(s); 567 } else { 568 bool src_type = FIELD_EX32(s->dsc_src.words[3], 569 ZDMA_CH_SRC_DSCR_WORD3, TYPE); 570 zdma_update_descr_addr(s, src_type, 571 R_ZDMA_CH_SRC_CUR_DSCR_LSB); 572 } 573 ARRAY_FIELD_DP32(s->regs, ZDMA_CH_CTRL0, CONT, false); 574 zdma_set_state(s, ENABLED); 575 } else if (s->state == DISABLED) { 576 zdma_update_descr_addr_from_start(s); 577 zdma_set_state(s, ENABLED); 578 } 579 } else { 580 /* Leave Paused state? */ 581 if (s->state == PAUSED && 582 ARRAY_FIELD_EX32(s->regs, ZDMA_CH_CTRL0, CONT)) { 583 zdma_set_state(s, DISABLED); 584 } 585 } 586 587 zdma_run(s); 588 } 589 590 static RegisterAccessInfo zdma_regs_info[] = { 591 { .name = "ZDMA_ERR_CTRL", .addr = A_ZDMA_ERR_CTRL, 592 .rsvd = 0xfffffffe, 593 },{ .name = "ZDMA_CH_ISR", .addr = A_ZDMA_CH_ISR, 594 .rsvd = 0xfffff000, 595 .w1c = 0xfff, 596 .post_write = zdma_ch_isr_postw, 597 },{ .name = "ZDMA_CH_IMR", .addr = A_ZDMA_CH_IMR, 598 .reset = 0xfff, 599 .rsvd = 0xfffff000, 600 .ro = 0xfff, 601 },{ .name = "ZDMA_CH_IEN", .addr = A_ZDMA_CH_IEN, 602 .rsvd = 0xfffff000, 603 .pre_write = zdma_ch_ien_prew, 604 },{ .name = "ZDMA_CH_IDS", .addr = A_ZDMA_CH_IDS, 605 .rsvd = 0xfffff000, 606 .pre_write = zdma_ch_ids_prew, 607 },{ .name = "ZDMA_CH_CTRL0", .addr = A_ZDMA_CH_CTRL0, 608 .reset = 0x80, 609 .rsvd = 0xffffff01, 610 .post_write = zdma_ch_ctrlx_postw, 611 },{ .name = "ZDMA_CH_CTRL1", .addr = A_ZDMA_CH_CTRL1, 612 .reset = 0x3ff, 613 .rsvd = 0xfffffc00, 614 },{ .name = "ZDMA_CH_FCI", .addr = A_ZDMA_CH_FCI, 615 .rsvd = 0xffffffc0, 616 },{ .name = "ZDMA_CH_STATUS", .addr = A_ZDMA_CH_STATUS, 617 .rsvd = 0xfffffffc, 618 .ro = 0x3, 619 },{ .name = "ZDMA_CH_DATA_ATTR", .addr = A_ZDMA_CH_DATA_ATTR, 620 .reset = 0x483d20f, 621 .rsvd = 0xf0000000, 622 },{ .name = "ZDMA_CH_DSCR_ATTR", .addr = A_ZDMA_CH_DSCR_ATTR, 623 .rsvd = 0xfffffe00, 624 },{ .name = "ZDMA_CH_SRC_DSCR_WORD0", .addr = A_ZDMA_CH_SRC_DSCR_WORD0, 625 },{ .name = "ZDMA_CH_SRC_DSCR_WORD1", .addr = A_ZDMA_CH_SRC_DSCR_WORD1, 626 .rsvd = 0xfffe0000, 627 },{ .name = "ZDMA_CH_SRC_DSCR_WORD2", .addr = A_ZDMA_CH_SRC_DSCR_WORD2, 628 .rsvd = 0xc0000000, 629 },{ .name = "ZDMA_CH_SRC_DSCR_WORD3", .addr = A_ZDMA_CH_SRC_DSCR_WORD3, 630 .rsvd = 0xffffffe0, 631 },{ .name = "ZDMA_CH_DST_DSCR_WORD0", .addr = A_ZDMA_CH_DST_DSCR_WORD0, 632 },{ .name = "ZDMA_CH_DST_DSCR_WORD1", .addr = A_ZDMA_CH_DST_DSCR_WORD1, 633 .rsvd = 0xfffe0000, 634 },{ .name = "ZDMA_CH_DST_DSCR_WORD2", .addr = A_ZDMA_CH_DST_DSCR_WORD2, 635 .rsvd = 0xc0000000, 636 },{ .name = "ZDMA_CH_DST_DSCR_WORD3", .addr = A_ZDMA_CH_DST_DSCR_WORD3, 637 .rsvd = 0xfffffffa, 638 },{ .name = "ZDMA_CH_WR_ONLY_WORD0", .addr = A_ZDMA_CH_WR_ONLY_WORD0, 639 },{ .name = "ZDMA_CH_WR_ONLY_WORD1", .addr = A_ZDMA_CH_WR_ONLY_WORD1, 640 },{ .name = "ZDMA_CH_WR_ONLY_WORD2", .addr = A_ZDMA_CH_WR_ONLY_WORD2, 641 },{ .name = "ZDMA_CH_WR_ONLY_WORD3", .addr = A_ZDMA_CH_WR_ONLY_WORD3, 642 },{ .name = "ZDMA_CH_SRC_START_LSB", .addr = A_ZDMA_CH_SRC_START_LSB, 643 },{ .name = "ZDMA_CH_SRC_START_MSB", .addr = A_ZDMA_CH_SRC_START_MSB, 644 .rsvd = 0xfffe0000, 645 },{ .name = "ZDMA_CH_DST_START_LSB", .addr = A_ZDMA_CH_DST_START_LSB, 646 },{ .name = "ZDMA_CH_DST_START_MSB", .addr = A_ZDMA_CH_DST_START_MSB, 647 .rsvd = 0xfffe0000, 648 },{ .name = "ZDMA_CH_SRC_CUR_PYLD_LSB", .addr = A_ZDMA_CH_SRC_CUR_PYLD_LSB, 649 .ro = 0xffffffff, 650 },{ .name = "ZDMA_CH_SRC_CUR_PYLD_MSB", .addr = A_ZDMA_CH_SRC_CUR_PYLD_MSB, 651 .rsvd = 0xfffe0000, 652 .ro = 0x1ffff, 653 },{ .name = "ZDMA_CH_DST_CUR_PYLD_LSB", .addr = A_ZDMA_CH_DST_CUR_PYLD_LSB, 654 .ro = 0xffffffff, 655 },{ .name = "ZDMA_CH_DST_CUR_PYLD_MSB", .addr = A_ZDMA_CH_DST_CUR_PYLD_MSB, 656 .rsvd = 0xfffe0000, 657 .ro = 0x1ffff, 658 },{ .name = "ZDMA_CH_SRC_CUR_DSCR_LSB", .addr = A_ZDMA_CH_SRC_CUR_DSCR_LSB, 659 .ro = 0xffffffff, 660 },{ .name = "ZDMA_CH_SRC_CUR_DSCR_MSB", .addr = A_ZDMA_CH_SRC_CUR_DSCR_MSB, 661 .rsvd = 0xfffe0000, 662 .ro = 0x1ffff, 663 },{ .name = "ZDMA_CH_DST_CUR_DSCR_LSB", .addr = A_ZDMA_CH_DST_CUR_DSCR_LSB, 664 .ro = 0xffffffff, 665 },{ .name = "ZDMA_CH_DST_CUR_DSCR_MSB", .addr = A_ZDMA_CH_DST_CUR_DSCR_MSB, 666 .rsvd = 0xfffe0000, 667 .ro = 0x1ffff, 668 },{ .name = "ZDMA_CH_TOTAL_BYTE", .addr = A_ZDMA_CH_TOTAL_BYTE, 669 .w1c = 0xffffffff, 670 },{ .name = "ZDMA_CH_RATE_CNTL", .addr = A_ZDMA_CH_RATE_CNTL, 671 .rsvd = 0xfffff000, 672 },{ .name = "ZDMA_CH_IRQ_SRC_ACCT", .addr = A_ZDMA_CH_IRQ_SRC_ACCT, 673 .rsvd = 0xffffff00, 674 .ro = 0xff, 675 .cor = 0xff, 676 },{ .name = "ZDMA_CH_IRQ_DST_ACCT", .addr = A_ZDMA_CH_IRQ_DST_ACCT, 677 .rsvd = 0xffffff00, 678 .ro = 0xff, 679 .cor = 0xff, 680 },{ .name = "ZDMA_CH_DBG0", .addr = A_ZDMA_CH_DBG0, 681 .rsvd = 0xfffffe00, 682 .ro = 0x1ff, 683 },{ .name = "ZDMA_CH_DBG1", .addr = A_ZDMA_CH_DBG1, 684 .rsvd = 0xfffffe00, 685 .ro = 0x1ff, 686 },{ .name = "ZDMA_CH_CTRL2", .addr = A_ZDMA_CH_CTRL2, 687 .rsvd = 0xfffffffe, 688 .post_write = zdma_ch_ctrlx_postw, 689 } 690 }; 691 692 static void zdma_reset(DeviceState *dev) 693 { 694 XlnxZDMA *s = XLNX_ZDMA(dev); 695 unsigned int i; 696 697 for (i = 0; i < ARRAY_SIZE(s->regs_info); ++i) { 698 register_reset(&s->regs_info[i]); 699 } 700 701 zdma_ch_imr_update_irq(s); 702 } 703 704 static uint64_t zdma_read(void *opaque, hwaddr addr, unsigned size) 705 { 706 XlnxZDMA *s = XLNX_ZDMA(opaque); 707 RegisterInfo *r = &s->regs_info[addr / 4]; 708 709 if (!r->data) { 710 gchar *path = object_get_canonical_path(OBJECT(s)); 711 qemu_log("%s: Decode error: read from %" HWADDR_PRIx "\n", 712 path, 713 addr); 714 g_free(path); 715 ARRAY_FIELD_DP32(s->regs, ZDMA_CH_ISR, INV_APB, true); 716 zdma_ch_imr_update_irq(s); 717 return 0; 718 } 719 return register_read(r, ~0, NULL, false); 720 } 721 722 static void zdma_write(void *opaque, hwaddr addr, uint64_t value, 723 unsigned size) 724 { 725 XlnxZDMA *s = XLNX_ZDMA(opaque); 726 RegisterInfo *r = &s->regs_info[addr / 4]; 727 728 if (!r->data) { 729 gchar *path = object_get_canonical_path(OBJECT(s)); 730 qemu_log("%s: Decode error: write to %" HWADDR_PRIx "=%" PRIx64 "\n", 731 path, 732 addr, value); 733 g_free(path); 734 ARRAY_FIELD_DP32(s->regs, ZDMA_CH_ISR, INV_APB, true); 735 zdma_ch_imr_update_irq(s); 736 return; 737 } 738 register_write(r, value, ~0, NULL, false); 739 } 740 741 static const MemoryRegionOps zdma_ops = { 742 .read = zdma_read, 743 .write = zdma_write, 744 .endianness = DEVICE_LITTLE_ENDIAN, 745 .valid = { 746 .min_access_size = 4, 747 .max_access_size = 4, 748 }, 749 }; 750 751 static void zdma_realize(DeviceState *dev, Error **errp) 752 { 753 XlnxZDMA *s = XLNX_ZDMA(dev); 754 unsigned int i; 755 756 for (i = 0; i < ARRAY_SIZE(zdma_regs_info); ++i) { 757 RegisterInfo *r = &s->regs_info[zdma_regs_info[i].addr / 4]; 758 759 *r = (RegisterInfo) { 760 .data = (uint8_t *)&s->regs[ 761 zdma_regs_info[i].addr / 4], 762 .data_size = sizeof(uint32_t), 763 .access = &zdma_regs_info[i], 764 .opaque = s, 765 }; 766 } 767 768 if (s->dma_mr) { 769 s->dma_as = g_malloc0(sizeof(AddressSpace)); 770 address_space_init(s->dma_as, s->dma_mr, NULL); 771 } else { 772 s->dma_as = &address_space_memory; 773 } 774 s->attr = MEMTXATTRS_UNSPECIFIED; 775 } 776 777 static void zdma_init(Object *obj) 778 { 779 XlnxZDMA *s = XLNX_ZDMA(obj); 780 SysBusDevice *sbd = SYS_BUS_DEVICE(obj); 781 782 memory_region_init_io(&s->iomem, obj, &zdma_ops, s, 783 TYPE_XLNX_ZDMA, ZDMA_R_MAX * 4); 784 sysbus_init_mmio(sbd, &s->iomem); 785 sysbus_init_irq(sbd, &s->irq_zdma_ch_imr); 786 787 object_property_add_link(obj, "dma", TYPE_MEMORY_REGION, 788 (Object **)&s->dma_mr, 789 qdev_prop_allow_set_link_before_realize, 790 OBJ_PROP_LINK_STRONG, 791 &error_abort); 792 } 793 794 static const VMStateDescription vmstate_zdma = { 795 .name = TYPE_XLNX_ZDMA, 796 .version_id = 1, 797 .minimum_version_id = 1, 798 .minimum_version_id_old = 1, 799 .fields = (VMStateField[]) { 800 VMSTATE_UINT32_ARRAY(regs, XlnxZDMA, ZDMA_R_MAX), 801 VMSTATE_UINT32(state, XlnxZDMA), 802 VMSTATE_UINT32_ARRAY(dsc_src.words, XlnxZDMA, 4), 803 VMSTATE_UINT32_ARRAY(dsc_dst.words, XlnxZDMA, 4), 804 VMSTATE_END_OF_LIST(), 805 } 806 }; 807 808 static Property zdma_props[] = { 809 DEFINE_PROP_UINT32("bus-width", XlnxZDMA, cfg.bus_width, 64), 810 DEFINE_PROP_END_OF_LIST(), 811 }; 812 813 static void zdma_class_init(ObjectClass *klass, void *data) 814 { 815 DeviceClass *dc = DEVICE_CLASS(klass); 816 817 dc->reset = zdma_reset; 818 dc->realize = zdma_realize; 819 dc->props = zdma_props; 820 dc->vmsd = &vmstate_zdma; 821 } 822 823 static const TypeInfo zdma_info = { 824 .name = TYPE_XLNX_ZDMA, 825 .parent = TYPE_SYS_BUS_DEVICE, 826 .instance_size = sizeof(XlnxZDMA), 827 .class_init = zdma_class_init, 828 .instance_init = zdma_init, 829 }; 830 831 static void zdma_register_types(void) 832 { 833 type_register_static(&zdma_info); 834 } 835 836 type_init(zdma_register_types) 837