1 /* 2 * xlnx_dpdma.c 3 * 4 * Copyright (C) 2015 : GreenSocs Ltd 5 * http://www.greensocs.com/ , email: info@greensocs.com 6 * 7 * Developed by : 8 * Frederic Konrad <fred.konrad@greensocs.com> 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License as published by 12 * the Free Software Foundation, either version 2 of the License, or 13 * (at your option) any later version. 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * GNU General Public License for more details. 19 * 20 * You should have received a copy of the GNU General Public License along 21 * with this program; if not, see <http://www.gnu.org/licenses/>. 22 * 23 */ 24 25 #include "qemu/osdep.h" 26 #include "qemu/log.h" 27 #include "qemu/module.h" 28 #include "hw/dma/xlnx_dpdma.h" 29 30 #ifndef DEBUG_DPDMA 31 #define DEBUG_DPDMA 0 32 #endif 33 34 #define DPRINTF(fmt, ...) do { \ 35 if (DEBUG_DPDMA) { \ 36 qemu_log("xlnx_dpdma: " fmt , ## __VA_ARGS__); \ 37 } \ 38 } while (0) 39 40 /* 41 * Registers offset for DPDMA. 42 */ 43 #define DPDMA_ERR_CTRL (0x0000) 44 #define DPDMA_ISR (0x0004 >> 2) 45 #define DPDMA_IMR (0x0008 >> 2) 46 #define DPDMA_IEN (0x000C >> 2) 47 #define DPDMA_IDS (0x0010 >> 2) 48 #define DPDMA_EISR (0x0014 >> 2) 49 #define DPDMA_EIMR (0x0018 >> 2) 50 #define DPDMA_EIEN (0x001C >> 2) 51 #define DPDMA_EIDS (0x0020 >> 2) 52 #define DPDMA_CNTL (0x0100 >> 2) 53 54 #define DPDMA_GBL (0x0104 >> 2) 55 #define DPDMA_GBL_TRG_CH(n) (1 << n) 56 #define DPDMA_GBL_RTRG_CH(n) (1 << 6 << n) 57 58 #define DPDMA_ALC0_CNTL (0x0108 >> 2) 59 #define DPDMA_ALC0_STATUS (0x010C >> 2) 60 #define DPDMA_ALC0_MAX (0x0110 >> 2) 61 #define DPDMA_ALC0_MIN (0x0114 >> 2) 62 #define DPDMA_ALC0_ACC (0x0118 >> 2) 63 #define DPDMA_ALC0_ACC_TRAN (0x011C >> 2) 64 #define DPDMA_ALC1_CNTL (0x0120 >> 2) 65 #define DPDMA_ALC1_STATUS (0x0124 >> 2) 66 #define DPDMA_ALC1_MAX (0x0128 >> 2) 67 #define DPDMA_ALC1_MIN (0x012C >> 2) 68 #define DPDMA_ALC1_ACC (0x0130 >> 2) 69 #define DPDMA_ALC1_ACC_TRAN (0x0134 >> 2) 70 71 #define DPDMA_DSCR_STRT_ADDRE_CH(n) ((0x0200 + n * 0x100) >> 2) 72 #define DPDMA_DSCR_STRT_ADDR_CH(n) ((0x0204 + n * 0x100) >> 2) 73 #define DPDMA_DSCR_NEXT_ADDRE_CH(n) ((0x0208 + n * 0x100) >> 2) 74 #define DPDMA_DSCR_NEXT_ADDR_CH(n) ((0x020C + n * 0x100) >> 2) 75 #define DPDMA_PYLD_CUR_ADDRE_CH(n) ((0x0210 + n * 0x100) >> 2) 76 #define DPDMA_PYLD_CUR_ADDR_CH(n) ((0x0214 + n * 0x100) >> 2) 77 78 #define DPDMA_CNTL_CH(n) ((0x0218 + n * 0x100) >> 2) 79 #define DPDMA_CNTL_CH_EN (1) 80 #define DPDMA_CNTL_CH_PAUSED (1 << 1) 81 82 #define DPDMA_STATUS_CH(n) ((0x021C + n * 0x100) >> 2) 83 #define DPDMA_STATUS_BURST_TYPE (1 << 4) 84 #define DPDMA_STATUS_MODE (1 << 5) 85 #define DPDMA_STATUS_EN_CRC (1 << 6) 86 #define DPDMA_STATUS_LAST_DSCR (1 << 7) 87 #define DPDMA_STATUS_LDSCR_FRAME (1 << 8) 88 #define DPDMA_STATUS_IGNR_DONE (1 << 9) 89 #define DPDMA_STATUS_DSCR_DONE (1 << 10) 90 #define DPDMA_STATUS_EN_DSCR_UP (1 << 11) 91 #define DPDMA_STATUS_EN_DSCR_INTR (1 << 12) 92 #define DPDMA_STATUS_PREAMBLE_OFF (13) 93 94 #define DPDMA_VDO_CH(n) ((0x0220 + n * 0x100) >> 2) 95 #define DPDMA_PYLD_SZ_CH(n) ((0x0224 + n * 0x100) >> 2) 96 #define DPDMA_DSCR_ID_CH(n) ((0x0228 + n * 0x100) >> 2) 97 98 /* 99 * Descriptor control field. 100 */ 101 #define CONTROL_PREAMBLE_VALUE 0xA5 102 103 #define DSCR_CTRL_PREAMBLE 0xFF 104 #define DSCR_CTRL_EN_DSCR_DONE_INTR (1 << 8) 105 #define DSCR_CTRL_EN_DSCR_UPDATE (1 << 9) 106 #define DSCR_CTRL_IGNORE_DONE (1 << 10) 107 #define DSCR_CTRL_AXI_BURST_TYPE (1 << 11) 108 #define DSCR_CTRL_AXCACHE (0x0F << 12) 109 #define DSCR_CTRL_AXPROT (0x2 << 16) 110 #define DSCR_CTRL_DESCRIPTOR_MODE (1 << 18) 111 #define DSCR_CTRL_LAST_DESCRIPTOR (1 << 19) 112 #define DSCR_CTRL_ENABLE_CRC (1 << 20) 113 #define DSCR_CTRL_LAST_DESCRIPTOR_OF_FRAME (1 << 21) 114 115 /* 116 * Descriptor timestamp field. 117 */ 118 #define STATUS_DONE (1 << 31) 119 120 #define DPDMA_FRAG_MAX_SZ (4096) 121 122 enum DPDMABurstType { 123 DPDMA_INCR = 0, 124 DPDMA_FIXED = 1 125 }; 126 127 enum DPDMAMode { 128 DPDMA_CONTIGOUS = 0, 129 DPDMA_FRAGMENTED = 1 130 }; 131 132 struct DPDMADescriptor { 133 uint32_t control; 134 uint32_t descriptor_id; 135 /* transfer size in byte. */ 136 uint32_t xfer_size; 137 uint32_t line_size_stride; 138 uint32_t timestamp_lsb; 139 uint32_t timestamp_msb; 140 /* contains extension for both descriptor and source. */ 141 uint32_t address_extension; 142 uint32_t next_descriptor; 143 uint32_t source_address; 144 uint32_t address_extension_23; 145 uint32_t address_extension_45; 146 uint32_t source_address2; 147 uint32_t source_address3; 148 uint32_t source_address4; 149 uint32_t source_address5; 150 uint32_t crc; 151 }; 152 153 typedef enum DPDMABurstType DPDMABurstType; 154 typedef enum DPDMAMode DPDMAMode; 155 typedef struct DPDMADescriptor DPDMADescriptor; 156 157 static bool xlnx_dpdma_desc_is_last(DPDMADescriptor *desc) 158 { 159 return ((desc->control & DSCR_CTRL_LAST_DESCRIPTOR) != 0); 160 } 161 162 static bool xlnx_dpdma_desc_is_last_of_frame(DPDMADescriptor *desc) 163 { 164 return ((desc->control & DSCR_CTRL_LAST_DESCRIPTOR_OF_FRAME) != 0); 165 } 166 167 static uint64_t xlnx_dpdma_desc_get_source_address(DPDMADescriptor *desc, 168 uint8_t frag) 169 { 170 uint64_t addr = 0; 171 assert(frag < 5); 172 173 switch (frag) { 174 case 0: 175 addr = desc->source_address 176 + (extract32(desc->address_extension, 16, 12) << 20); 177 break; 178 case 1: 179 addr = desc->source_address2 180 + (extract32(desc->address_extension_23, 0, 12) << 8); 181 break; 182 case 2: 183 addr = desc->source_address3 184 + (extract32(desc->address_extension_23, 16, 12) << 20); 185 break; 186 case 3: 187 addr = desc->source_address4 188 + (extract32(desc->address_extension_45, 0, 12) << 8); 189 break; 190 case 4: 191 addr = desc->source_address5 192 + (extract32(desc->address_extension_45, 16, 12) << 20); 193 break; 194 default: 195 addr = 0; 196 break; 197 } 198 199 return addr; 200 } 201 202 static uint32_t xlnx_dpdma_desc_get_transfer_size(DPDMADescriptor *desc) 203 { 204 return desc->xfer_size; 205 } 206 207 static uint32_t xlnx_dpdma_desc_get_line_size(DPDMADescriptor *desc) 208 { 209 return extract32(desc->line_size_stride, 0, 18); 210 } 211 212 static uint32_t xlnx_dpdma_desc_get_line_stride(DPDMADescriptor *desc) 213 { 214 return extract32(desc->line_size_stride, 18, 14) * 16; 215 } 216 217 static inline bool xlnx_dpdma_desc_crc_enabled(DPDMADescriptor *desc) 218 { 219 return (desc->control & DSCR_CTRL_ENABLE_CRC) != 0; 220 } 221 222 static inline bool xlnx_dpdma_desc_check_crc(DPDMADescriptor *desc) 223 { 224 uint32_t *p = (uint32_t *)desc; 225 uint32_t crc = 0; 226 uint8_t i; 227 228 /* 229 * CRC is calculated on the whole descriptor except the last 32bits word 230 * using 32bits addition. 231 */ 232 for (i = 0; i < 15; i++) { 233 crc += p[i]; 234 } 235 236 return crc == desc->crc; 237 } 238 239 static inline bool xlnx_dpdma_desc_completion_interrupt(DPDMADescriptor *desc) 240 { 241 return (desc->control & DSCR_CTRL_EN_DSCR_DONE_INTR) != 0; 242 } 243 244 static inline bool xlnx_dpdma_desc_is_valid(DPDMADescriptor *desc) 245 { 246 return (desc->control & DSCR_CTRL_PREAMBLE) == CONTROL_PREAMBLE_VALUE; 247 } 248 249 static inline bool xlnx_dpdma_desc_is_contiguous(DPDMADescriptor *desc) 250 { 251 return (desc->control & DSCR_CTRL_DESCRIPTOR_MODE) == 0; 252 } 253 254 static inline bool xlnx_dpdma_desc_update_enabled(DPDMADescriptor *desc) 255 { 256 return (desc->control & DSCR_CTRL_EN_DSCR_UPDATE) != 0; 257 } 258 259 static inline void xlnx_dpdma_desc_set_done(DPDMADescriptor *desc) 260 { 261 desc->timestamp_msb |= STATUS_DONE; 262 } 263 264 static inline bool xlnx_dpdma_desc_is_already_done(DPDMADescriptor *desc) 265 { 266 return (desc->timestamp_msb & STATUS_DONE) != 0; 267 } 268 269 static inline bool xlnx_dpdma_desc_ignore_done_bit(DPDMADescriptor *desc) 270 { 271 return (desc->control & DSCR_CTRL_IGNORE_DONE) != 0; 272 } 273 274 static const VMStateDescription vmstate_xlnx_dpdma = { 275 .name = TYPE_XLNX_DPDMA, 276 .version_id = 1, 277 .fields = (VMStateField[]) { 278 VMSTATE_UINT32_ARRAY(registers, XlnxDPDMAState, 279 XLNX_DPDMA_REG_ARRAY_SIZE), 280 VMSTATE_BOOL_ARRAY(operation_finished, XlnxDPDMAState, 6), 281 VMSTATE_END_OF_LIST() 282 } 283 }; 284 285 static void xlnx_dpdma_update_irq(XlnxDPDMAState *s) 286 { 287 bool flags; 288 289 flags = ((s->registers[DPDMA_ISR] & (~s->registers[DPDMA_IMR])) 290 || (s->registers[DPDMA_EISR] & (~s->registers[DPDMA_EIMR]))); 291 qemu_set_irq(s->irq, flags); 292 } 293 294 static uint64_t xlnx_dpdma_descriptor_start_address(XlnxDPDMAState *s, 295 uint8_t channel) 296 { 297 return (s->registers[DPDMA_DSCR_STRT_ADDRE_CH(channel)] << 16) 298 + s->registers[DPDMA_DSCR_STRT_ADDR_CH(channel)]; 299 } 300 301 static uint64_t xlnx_dpdma_descriptor_next_address(XlnxDPDMAState *s, 302 uint8_t channel) 303 { 304 return ((uint64_t)s->registers[DPDMA_DSCR_NEXT_ADDRE_CH(channel)] << 32) 305 + s->registers[DPDMA_DSCR_NEXT_ADDR_CH(channel)]; 306 } 307 308 static bool xlnx_dpdma_is_channel_enabled(XlnxDPDMAState *s, 309 uint8_t channel) 310 { 311 return (s->registers[DPDMA_CNTL_CH(channel)] & DPDMA_CNTL_CH_EN) != 0; 312 } 313 314 static bool xlnx_dpdma_is_channel_paused(XlnxDPDMAState *s, 315 uint8_t channel) 316 { 317 return (s->registers[DPDMA_CNTL_CH(channel)] & DPDMA_CNTL_CH_PAUSED) != 0; 318 } 319 320 static inline bool xlnx_dpdma_is_channel_retriggered(XlnxDPDMAState *s, 321 uint8_t channel) 322 { 323 /* Clear the retriggered bit after reading it. */ 324 bool channel_is_retriggered = s->registers[DPDMA_GBL] 325 & DPDMA_GBL_RTRG_CH(channel); 326 s->registers[DPDMA_GBL] &= ~DPDMA_GBL_RTRG_CH(channel); 327 return channel_is_retriggered; 328 } 329 330 static inline bool xlnx_dpdma_is_channel_triggered(XlnxDPDMAState *s, 331 uint8_t channel) 332 { 333 return s->registers[DPDMA_GBL] & DPDMA_GBL_TRG_CH(channel); 334 } 335 336 static void xlnx_dpdma_update_desc_info(XlnxDPDMAState *s, uint8_t channel, 337 DPDMADescriptor *desc) 338 { 339 s->registers[DPDMA_DSCR_NEXT_ADDRE_CH(channel)] = 340 extract32(desc->address_extension, 0, 16); 341 s->registers[DPDMA_DSCR_NEXT_ADDR_CH(channel)] = desc->next_descriptor; 342 s->registers[DPDMA_PYLD_CUR_ADDRE_CH(channel)] = 343 extract32(desc->address_extension, 16, 16); 344 s->registers[DPDMA_PYLD_CUR_ADDR_CH(channel)] = desc->source_address; 345 s->registers[DPDMA_VDO_CH(channel)] = 346 extract32(desc->line_size_stride, 18, 14) 347 + (extract32(desc->line_size_stride, 0, 18) 348 << 14); 349 s->registers[DPDMA_PYLD_SZ_CH(channel)] = desc->xfer_size; 350 s->registers[DPDMA_DSCR_ID_CH(channel)] = desc->descriptor_id; 351 352 /* Compute the status register with the descriptor information. */ 353 s->registers[DPDMA_STATUS_CH(channel)] = 354 extract32(desc->control, 0, 8) << 13; 355 if ((desc->control & DSCR_CTRL_EN_DSCR_DONE_INTR) != 0) { 356 s->registers[DPDMA_STATUS_CH(channel)] |= DPDMA_STATUS_EN_DSCR_INTR; 357 } 358 if ((desc->control & DSCR_CTRL_EN_DSCR_UPDATE) != 0) { 359 s->registers[DPDMA_STATUS_CH(channel)] |= DPDMA_STATUS_EN_DSCR_UP; 360 } 361 if ((desc->timestamp_msb & STATUS_DONE) != 0) { 362 s->registers[DPDMA_STATUS_CH(channel)] |= DPDMA_STATUS_DSCR_DONE; 363 } 364 if ((desc->control & DSCR_CTRL_IGNORE_DONE) != 0) { 365 s->registers[DPDMA_STATUS_CH(channel)] |= DPDMA_STATUS_IGNR_DONE; 366 } 367 if ((desc->control & DSCR_CTRL_LAST_DESCRIPTOR_OF_FRAME) != 0) { 368 s->registers[DPDMA_STATUS_CH(channel)] |= DPDMA_STATUS_LDSCR_FRAME; 369 } 370 if ((desc->control & DSCR_CTRL_LAST_DESCRIPTOR) != 0) { 371 s->registers[DPDMA_STATUS_CH(channel)] |= DPDMA_STATUS_LAST_DSCR; 372 } 373 if ((desc->control & DSCR_CTRL_ENABLE_CRC) != 0) { 374 s->registers[DPDMA_STATUS_CH(channel)] |= DPDMA_STATUS_EN_CRC; 375 } 376 if ((desc->control & DSCR_CTRL_DESCRIPTOR_MODE) != 0) { 377 s->registers[DPDMA_STATUS_CH(channel)] |= DPDMA_STATUS_MODE; 378 } 379 if ((desc->control & DSCR_CTRL_AXI_BURST_TYPE) != 0) { 380 s->registers[DPDMA_STATUS_CH(channel)] |= DPDMA_STATUS_BURST_TYPE; 381 } 382 } 383 384 static void xlnx_dpdma_dump_descriptor(DPDMADescriptor *desc) 385 { 386 if (DEBUG_DPDMA) { 387 qemu_log("DUMP DESCRIPTOR:\n"); 388 qemu_hexdump((char *)desc, stdout, "", sizeof(DPDMADescriptor)); 389 } 390 } 391 392 static uint64_t xlnx_dpdma_read(void *opaque, hwaddr offset, 393 unsigned size) 394 { 395 XlnxDPDMAState *s = XLNX_DPDMA(opaque); 396 397 DPRINTF("read @%" HWADDR_PRIx "\n", offset); 398 offset = offset >> 2; 399 400 switch (offset) { 401 /* 402 * Trying to read a write only register. 403 */ 404 case DPDMA_GBL: 405 return 0; 406 default: 407 assert(offset <= (0xFFC >> 2)); 408 return s->registers[offset]; 409 } 410 return 0; 411 } 412 413 static void xlnx_dpdma_write(void *opaque, hwaddr offset, 414 uint64_t value, unsigned size) 415 { 416 XlnxDPDMAState *s = XLNX_DPDMA(opaque); 417 418 DPRINTF("write @%" HWADDR_PRIx " = %" PRIx64 "\n", offset, value); 419 offset = offset >> 2; 420 421 switch (offset) { 422 case DPDMA_ISR: 423 s->registers[DPDMA_ISR] &= ~value; 424 xlnx_dpdma_update_irq(s); 425 break; 426 case DPDMA_IEN: 427 s->registers[DPDMA_IMR] &= ~value; 428 break; 429 case DPDMA_IDS: 430 s->registers[DPDMA_IMR] |= value; 431 break; 432 case DPDMA_EISR: 433 s->registers[DPDMA_EISR] &= ~value; 434 xlnx_dpdma_update_irq(s); 435 break; 436 case DPDMA_EIEN: 437 s->registers[DPDMA_EIMR] &= ~value; 438 break; 439 case DPDMA_EIDS: 440 s->registers[DPDMA_EIMR] |= value; 441 break; 442 case DPDMA_IMR: 443 case DPDMA_EIMR: 444 case DPDMA_DSCR_NEXT_ADDRE_CH(0): 445 case DPDMA_DSCR_NEXT_ADDRE_CH(1): 446 case DPDMA_DSCR_NEXT_ADDRE_CH(2): 447 case DPDMA_DSCR_NEXT_ADDRE_CH(3): 448 case DPDMA_DSCR_NEXT_ADDRE_CH(4): 449 case DPDMA_DSCR_NEXT_ADDRE_CH(5): 450 case DPDMA_DSCR_NEXT_ADDR_CH(0): 451 case DPDMA_DSCR_NEXT_ADDR_CH(1): 452 case DPDMA_DSCR_NEXT_ADDR_CH(2): 453 case DPDMA_DSCR_NEXT_ADDR_CH(3): 454 case DPDMA_DSCR_NEXT_ADDR_CH(4): 455 case DPDMA_DSCR_NEXT_ADDR_CH(5): 456 case DPDMA_PYLD_CUR_ADDRE_CH(0): 457 case DPDMA_PYLD_CUR_ADDRE_CH(1): 458 case DPDMA_PYLD_CUR_ADDRE_CH(2): 459 case DPDMA_PYLD_CUR_ADDRE_CH(3): 460 case DPDMA_PYLD_CUR_ADDRE_CH(4): 461 case DPDMA_PYLD_CUR_ADDRE_CH(5): 462 case DPDMA_PYLD_CUR_ADDR_CH(0): 463 case DPDMA_PYLD_CUR_ADDR_CH(1): 464 case DPDMA_PYLD_CUR_ADDR_CH(2): 465 case DPDMA_PYLD_CUR_ADDR_CH(3): 466 case DPDMA_PYLD_CUR_ADDR_CH(4): 467 case DPDMA_PYLD_CUR_ADDR_CH(5): 468 case DPDMA_STATUS_CH(0): 469 case DPDMA_STATUS_CH(1): 470 case DPDMA_STATUS_CH(2): 471 case DPDMA_STATUS_CH(3): 472 case DPDMA_STATUS_CH(4): 473 case DPDMA_STATUS_CH(5): 474 case DPDMA_VDO_CH(0): 475 case DPDMA_VDO_CH(1): 476 case DPDMA_VDO_CH(2): 477 case DPDMA_VDO_CH(3): 478 case DPDMA_VDO_CH(4): 479 case DPDMA_VDO_CH(5): 480 case DPDMA_PYLD_SZ_CH(0): 481 case DPDMA_PYLD_SZ_CH(1): 482 case DPDMA_PYLD_SZ_CH(2): 483 case DPDMA_PYLD_SZ_CH(3): 484 case DPDMA_PYLD_SZ_CH(4): 485 case DPDMA_PYLD_SZ_CH(5): 486 case DPDMA_DSCR_ID_CH(0): 487 case DPDMA_DSCR_ID_CH(1): 488 case DPDMA_DSCR_ID_CH(2): 489 case DPDMA_DSCR_ID_CH(3): 490 case DPDMA_DSCR_ID_CH(4): 491 case DPDMA_DSCR_ID_CH(5): 492 /* 493 * Trying to write to a read only register.. 494 */ 495 break; 496 case DPDMA_GBL: 497 /* 498 * This is a write only register so it's read as zero in the read 499 * callback. 500 * We store the value anyway so we can know if the channel is 501 * enabled. 502 */ 503 s->registers[offset] |= value & 0x00000FFF; 504 break; 505 case DPDMA_DSCR_STRT_ADDRE_CH(0): 506 case DPDMA_DSCR_STRT_ADDRE_CH(1): 507 case DPDMA_DSCR_STRT_ADDRE_CH(2): 508 case DPDMA_DSCR_STRT_ADDRE_CH(3): 509 case DPDMA_DSCR_STRT_ADDRE_CH(4): 510 case DPDMA_DSCR_STRT_ADDRE_CH(5): 511 value &= 0x0000FFFF; 512 s->registers[offset] = value; 513 break; 514 case DPDMA_CNTL_CH(0): 515 s->registers[DPDMA_GBL] &= ~DPDMA_GBL_TRG_CH(0); 516 value &= 0x3FFFFFFF; 517 s->registers[offset] = value; 518 break; 519 case DPDMA_CNTL_CH(1): 520 s->registers[DPDMA_GBL] &= ~DPDMA_GBL_TRG_CH(1); 521 value &= 0x3FFFFFFF; 522 s->registers[offset] = value; 523 break; 524 case DPDMA_CNTL_CH(2): 525 s->registers[DPDMA_GBL] &= ~DPDMA_GBL_TRG_CH(2); 526 value &= 0x3FFFFFFF; 527 s->registers[offset] = value; 528 break; 529 case DPDMA_CNTL_CH(3): 530 s->registers[DPDMA_GBL] &= ~DPDMA_GBL_TRG_CH(3); 531 value &= 0x3FFFFFFF; 532 s->registers[offset] = value; 533 break; 534 case DPDMA_CNTL_CH(4): 535 s->registers[DPDMA_GBL] &= ~DPDMA_GBL_TRG_CH(4); 536 value &= 0x3FFFFFFF; 537 s->registers[offset] = value; 538 break; 539 case DPDMA_CNTL_CH(5): 540 s->registers[DPDMA_GBL] &= ~DPDMA_GBL_TRG_CH(5); 541 value &= 0x3FFFFFFF; 542 s->registers[offset] = value; 543 break; 544 default: 545 assert(offset <= (0xFFC >> 2)); 546 s->registers[offset] = value; 547 break; 548 } 549 } 550 551 static const MemoryRegionOps dma_ops = { 552 .read = xlnx_dpdma_read, 553 .write = xlnx_dpdma_write, 554 .endianness = DEVICE_NATIVE_ENDIAN, 555 .valid = { 556 .min_access_size = 4, 557 .max_access_size = 4, 558 }, 559 .impl = { 560 .min_access_size = 4, 561 .max_access_size = 4, 562 }, 563 }; 564 565 static void xlnx_dpdma_init(Object *obj) 566 { 567 SysBusDevice *sbd = SYS_BUS_DEVICE(obj); 568 XlnxDPDMAState *s = XLNX_DPDMA(obj); 569 570 memory_region_init_io(&s->iomem, obj, &dma_ops, s, 571 TYPE_XLNX_DPDMA, 0x1000); 572 sysbus_init_mmio(sbd, &s->iomem); 573 sysbus_init_irq(sbd, &s->irq); 574 } 575 576 static void xlnx_dpdma_reset(DeviceState *dev) 577 { 578 XlnxDPDMAState *s = XLNX_DPDMA(dev); 579 size_t i; 580 581 memset(s->registers, 0, sizeof(s->registers)); 582 s->registers[DPDMA_IMR] = 0x07FFFFFF; 583 s->registers[DPDMA_EIMR] = 0xFFFFFFFF; 584 s->registers[DPDMA_ALC0_MIN] = 0x0000FFFF; 585 s->registers[DPDMA_ALC1_MIN] = 0x0000FFFF; 586 587 for (i = 0; i < 6; i++) { 588 s->data[i] = NULL; 589 s->operation_finished[i] = true; 590 } 591 } 592 593 static void xlnx_dpdma_class_init(ObjectClass *oc, void *data) 594 { 595 DeviceClass *dc = DEVICE_CLASS(oc); 596 597 dc->vmsd = &vmstate_xlnx_dpdma; 598 dc->reset = xlnx_dpdma_reset; 599 } 600 601 static const TypeInfo xlnx_dpdma_info = { 602 .name = TYPE_XLNX_DPDMA, 603 .parent = TYPE_SYS_BUS_DEVICE, 604 .instance_size = sizeof(XlnxDPDMAState), 605 .instance_init = xlnx_dpdma_init, 606 .class_init = xlnx_dpdma_class_init, 607 }; 608 609 static void xlnx_dpdma_register_types(void) 610 { 611 type_register_static(&xlnx_dpdma_info); 612 } 613 614 size_t xlnx_dpdma_start_operation(XlnxDPDMAState *s, uint8_t channel, 615 bool one_desc) 616 { 617 uint64_t desc_addr; 618 uint64_t source_addr[6]; 619 DPDMADescriptor desc; 620 bool done = false; 621 size_t ptr = 0; 622 623 assert(channel <= 5); 624 625 DPRINTF("start dpdma channel 0x%" PRIX8 "\n", channel); 626 627 if (!xlnx_dpdma_is_channel_triggered(s, channel)) { 628 DPRINTF("Channel isn't triggered..\n"); 629 return 0; 630 } 631 632 if (!xlnx_dpdma_is_channel_enabled(s, channel)) { 633 DPRINTF("Channel isn't enabled..\n"); 634 return 0; 635 } 636 637 if (xlnx_dpdma_is_channel_paused(s, channel)) { 638 DPRINTF("Channel is paused..\n"); 639 return 0; 640 } 641 642 do { 643 if ((s->operation_finished[channel]) 644 || xlnx_dpdma_is_channel_retriggered(s, channel)) { 645 desc_addr = xlnx_dpdma_descriptor_start_address(s, channel); 646 s->operation_finished[channel] = false; 647 } else { 648 desc_addr = xlnx_dpdma_descriptor_next_address(s, channel); 649 } 650 651 if (dma_memory_read(&address_space_memory, desc_addr, &desc, 652 sizeof(DPDMADescriptor))) { 653 s->registers[DPDMA_EISR] |= ((1 << 1) << channel); 654 xlnx_dpdma_update_irq(s); 655 s->operation_finished[channel] = true; 656 DPRINTF("Can't get the descriptor.\n"); 657 break; 658 } 659 660 xlnx_dpdma_update_desc_info(s, channel, &desc); 661 662 #ifdef DEBUG_DPDMA 663 xlnx_dpdma_dump_descriptor(&desc); 664 #endif 665 666 DPRINTF("location of the descriptor: %" PRIx64 "\n", desc_addr); 667 if (!xlnx_dpdma_desc_is_valid(&desc)) { 668 s->registers[DPDMA_EISR] |= ((1 << 7) << channel); 669 xlnx_dpdma_update_irq(s); 670 s->operation_finished[channel] = true; 671 DPRINTF("Invalid descriptor..\n"); 672 break; 673 } 674 675 if (xlnx_dpdma_desc_crc_enabled(&desc) 676 && !xlnx_dpdma_desc_check_crc(&desc)) { 677 s->registers[DPDMA_EISR] |= ((1 << 13) << channel); 678 xlnx_dpdma_update_irq(s); 679 s->operation_finished[channel] = true; 680 DPRINTF("Bad CRC for descriptor..\n"); 681 break; 682 } 683 684 if (xlnx_dpdma_desc_is_already_done(&desc) 685 && !xlnx_dpdma_desc_ignore_done_bit(&desc)) { 686 /* We are trying to process an already processed descriptor. */ 687 s->registers[DPDMA_EISR] |= ((1 << 25) << channel); 688 xlnx_dpdma_update_irq(s); 689 s->operation_finished[channel] = true; 690 DPRINTF("Already processed descriptor..\n"); 691 break; 692 } 693 694 done = xlnx_dpdma_desc_is_last(&desc) 695 || xlnx_dpdma_desc_is_last_of_frame(&desc); 696 697 s->operation_finished[channel] = done; 698 if (s->data[channel]) { 699 int64_t transfer_len = xlnx_dpdma_desc_get_transfer_size(&desc); 700 uint32_t line_size = xlnx_dpdma_desc_get_line_size(&desc); 701 uint32_t line_stride = xlnx_dpdma_desc_get_line_stride(&desc); 702 if (xlnx_dpdma_desc_is_contiguous(&desc)) { 703 source_addr[0] = xlnx_dpdma_desc_get_source_address(&desc, 0); 704 while (transfer_len != 0) { 705 if (dma_memory_read(&address_space_memory, 706 source_addr[0], 707 &s->data[channel][ptr], 708 line_size)) { 709 s->registers[DPDMA_ISR] |= ((1 << 12) << channel); 710 xlnx_dpdma_update_irq(s); 711 DPRINTF("Can't get data.\n"); 712 break; 713 } 714 ptr += line_size; 715 transfer_len -= line_size; 716 source_addr[0] += line_stride; 717 } 718 } else { 719 DPRINTF("Source address:\n"); 720 int frag; 721 for (frag = 0; frag < 5; frag++) { 722 source_addr[frag] = 723 xlnx_dpdma_desc_get_source_address(&desc, frag); 724 DPRINTF("Fragment %u: %" PRIx64 "\n", frag + 1, 725 source_addr[frag]); 726 } 727 728 frag = 0; 729 while ((transfer_len < 0) && (frag < 5)) { 730 size_t fragment_len = DPDMA_FRAG_MAX_SZ 731 - (source_addr[frag] % DPDMA_FRAG_MAX_SZ); 732 733 if (dma_memory_read(&address_space_memory, 734 source_addr[frag], 735 &(s->data[channel][ptr]), 736 fragment_len)) { 737 s->registers[DPDMA_ISR] |= ((1 << 12) << channel); 738 xlnx_dpdma_update_irq(s); 739 DPRINTF("Can't get data.\n"); 740 break; 741 } 742 ptr += fragment_len; 743 transfer_len -= fragment_len; 744 frag += 1; 745 } 746 } 747 } 748 749 if (xlnx_dpdma_desc_update_enabled(&desc)) { 750 /* The descriptor need to be updated when it's completed. */ 751 DPRINTF("update the descriptor with the done flag set.\n"); 752 xlnx_dpdma_desc_set_done(&desc); 753 dma_memory_write(&address_space_memory, desc_addr, &desc, 754 sizeof(DPDMADescriptor)); 755 } 756 757 if (xlnx_dpdma_desc_completion_interrupt(&desc)) { 758 DPRINTF("completion interrupt enabled!\n"); 759 s->registers[DPDMA_ISR] |= (1 << channel); 760 xlnx_dpdma_update_irq(s); 761 } 762 763 } while (!done && !one_desc); 764 765 return ptr; 766 } 767 768 void xlnx_dpdma_set_host_data_location(XlnxDPDMAState *s, uint8_t channel, 769 void *p) 770 { 771 if (!s) { 772 qemu_log_mask(LOG_UNIMP, "DPDMA client not attached to valid DPDMA" 773 " instance\n"); 774 return; 775 } 776 777 assert(channel <= 5); 778 s->data[channel] = p; 779 } 780 781 void xlnx_dpdma_trigger_vsync_irq(XlnxDPDMAState *s) 782 { 783 s->registers[DPDMA_ISR] |= (1 << 27); 784 xlnx_dpdma_update_irq(s); 785 } 786 787 type_init(xlnx_dpdma_register_types) 788