1 /* 2 * Intel XScale PXA255/270 DMA controller. 3 * 4 * Copyright (c) 2006 Openedhand Ltd. 5 * Copyright (c) 2006 Thorsten Zitterell 6 * Written by Andrzej Zaborowski <balrog@zabor.org> 7 * 8 * This code is licensed under the GPL. 9 */ 10 11 #include "qemu/osdep.h" 12 #include "hw/hw.h" 13 #include "hw/arm/pxa.h" 14 #include "hw/sysbus.h" 15 #include "qapi/error.h" 16 17 #define PXA255_DMA_NUM_CHANNELS 16 18 #define PXA27X_DMA_NUM_CHANNELS 32 19 20 #define PXA2XX_DMA_NUM_REQUESTS 75 21 22 typedef struct { 23 uint32_t descr; 24 uint32_t src; 25 uint32_t dest; 26 uint32_t cmd; 27 uint32_t state; 28 int request; 29 } PXA2xxDMAChannel; 30 31 #define TYPE_PXA2XX_DMA "pxa2xx-dma" 32 #define PXA2XX_DMA(obj) OBJECT_CHECK(PXA2xxDMAState, (obj), TYPE_PXA2XX_DMA) 33 34 typedef struct PXA2xxDMAState { 35 SysBusDevice parent_obj; 36 37 MemoryRegion iomem; 38 qemu_irq irq; 39 40 uint32_t stopintr; 41 uint32_t eorintr; 42 uint32_t rasintr; 43 uint32_t startintr; 44 uint32_t endintr; 45 46 uint32_t align; 47 uint32_t pio; 48 49 int channels; 50 PXA2xxDMAChannel *chan; 51 52 uint8_t req[PXA2XX_DMA_NUM_REQUESTS]; 53 54 /* Flag to avoid recursive DMA invocations. */ 55 int running; 56 } PXA2xxDMAState; 57 58 #define DCSR0 0x0000 /* DMA Control / Status register for Channel 0 */ 59 #define DCSR31 0x007c /* DMA Control / Status register for Channel 31 */ 60 #define DALGN 0x00a0 /* DMA Alignment register */ 61 #define DPCSR 0x00a4 /* DMA Programmed I/O Control Status register */ 62 #define DRQSR0 0x00e0 /* DMA DREQ<0> Status register */ 63 #define DRQSR1 0x00e4 /* DMA DREQ<1> Status register */ 64 #define DRQSR2 0x00e8 /* DMA DREQ<2> Status register */ 65 #define DINT 0x00f0 /* DMA Interrupt register */ 66 #define DRCMR0 0x0100 /* Request to Channel Map register 0 */ 67 #define DRCMR63 0x01fc /* Request to Channel Map register 63 */ 68 #define D_CH0 0x0200 /* Channel 0 Descriptor start */ 69 #define DRCMR64 0x1100 /* Request to Channel Map register 64 */ 70 #define DRCMR74 0x1128 /* Request to Channel Map register 74 */ 71 72 /* Per-channel register */ 73 #define DDADR 0x00 74 #define DSADR 0x01 75 #define DTADR 0x02 76 #define DCMD 0x03 77 78 /* Bit-field masks */ 79 #define DRCMR_CHLNUM 0x1f 80 #define DRCMR_MAPVLD (1 << 7) 81 #define DDADR_STOP (1 << 0) 82 #define DDADR_BREN (1 << 1) 83 #define DCMD_LEN 0x1fff 84 #define DCMD_WIDTH(x) (1 << ((((x) >> 14) & 3) - 1)) 85 #define DCMD_SIZE(x) (4 << (((x) >> 16) & 3)) 86 #define DCMD_FLYBYT (1 << 19) 87 #define DCMD_FLYBYS (1 << 20) 88 #define DCMD_ENDIRQEN (1 << 21) 89 #define DCMD_STARTIRQEN (1 << 22) 90 #define DCMD_CMPEN (1 << 25) 91 #define DCMD_FLOWTRG (1 << 28) 92 #define DCMD_FLOWSRC (1 << 29) 93 #define DCMD_INCTRGADDR (1 << 30) 94 #define DCMD_INCSRCADDR (1 << 31) 95 #define DCSR_BUSERRINTR (1 << 0) 96 #define DCSR_STARTINTR (1 << 1) 97 #define DCSR_ENDINTR (1 << 2) 98 #define DCSR_STOPINTR (1 << 3) 99 #define DCSR_RASINTR (1 << 4) 100 #define DCSR_REQPEND (1 << 8) 101 #define DCSR_EORINT (1 << 9) 102 #define DCSR_CMPST (1 << 10) 103 #define DCSR_MASKRUN (1 << 22) 104 #define DCSR_RASIRQEN (1 << 23) 105 #define DCSR_CLRCMPST (1 << 24) 106 #define DCSR_SETCMPST (1 << 25) 107 #define DCSR_EORSTOPEN (1 << 26) 108 #define DCSR_EORJMPEN (1 << 27) 109 #define DCSR_EORIRQEN (1 << 28) 110 #define DCSR_STOPIRQEN (1 << 29) 111 #define DCSR_NODESCFETCH (1 << 30) 112 #define DCSR_RUN (1 << 31) 113 114 static inline void pxa2xx_dma_update(PXA2xxDMAState *s, int ch) 115 { 116 if (ch >= 0) { 117 if ((s->chan[ch].state & DCSR_STOPIRQEN) && 118 (s->chan[ch].state & DCSR_STOPINTR)) 119 s->stopintr |= 1 << ch; 120 else 121 s->stopintr &= ~(1 << ch); 122 123 if ((s->chan[ch].state & DCSR_EORIRQEN) && 124 (s->chan[ch].state & DCSR_EORINT)) 125 s->eorintr |= 1 << ch; 126 else 127 s->eorintr &= ~(1 << ch); 128 129 if ((s->chan[ch].state & DCSR_RASIRQEN) && 130 (s->chan[ch].state & DCSR_RASINTR)) 131 s->rasintr |= 1 << ch; 132 else 133 s->rasintr &= ~(1 << ch); 134 135 if (s->chan[ch].state & DCSR_STARTINTR) 136 s->startintr |= 1 << ch; 137 else 138 s->startintr &= ~(1 << ch); 139 140 if (s->chan[ch].state & DCSR_ENDINTR) 141 s->endintr |= 1 << ch; 142 else 143 s->endintr &= ~(1 << ch); 144 } 145 146 if (s->stopintr | s->eorintr | s->rasintr | s->startintr | s->endintr) 147 qemu_irq_raise(s->irq); 148 else 149 qemu_irq_lower(s->irq); 150 } 151 152 static inline void pxa2xx_dma_descriptor_fetch( 153 PXA2xxDMAState *s, int ch) 154 { 155 uint32_t desc[4]; 156 hwaddr daddr = s->chan[ch].descr & ~0xf; 157 if ((s->chan[ch].descr & DDADR_BREN) && (s->chan[ch].state & DCSR_CMPST)) 158 daddr += 32; 159 160 cpu_physical_memory_read(daddr, desc, 16); 161 s->chan[ch].descr = desc[DDADR]; 162 s->chan[ch].src = desc[DSADR]; 163 s->chan[ch].dest = desc[DTADR]; 164 s->chan[ch].cmd = desc[DCMD]; 165 166 if (s->chan[ch].cmd & DCMD_FLOWSRC) 167 s->chan[ch].src &= ~3; 168 if (s->chan[ch].cmd & DCMD_FLOWTRG) 169 s->chan[ch].dest &= ~3; 170 171 if (s->chan[ch].cmd & (DCMD_CMPEN | DCMD_FLYBYS | DCMD_FLYBYT)) 172 printf("%s: unsupported mode in channel %i\n", __func__, ch); 173 174 if (s->chan[ch].cmd & DCMD_STARTIRQEN) 175 s->chan[ch].state |= DCSR_STARTINTR; 176 } 177 178 static void pxa2xx_dma_run(PXA2xxDMAState *s) 179 { 180 int c, srcinc, destinc; 181 uint32_t n, size; 182 uint32_t width; 183 uint32_t length; 184 uint8_t buffer[32]; 185 PXA2xxDMAChannel *ch; 186 187 if (s->running ++) 188 return; 189 190 while (s->running) { 191 s->running = 1; 192 for (c = 0; c < s->channels; c ++) { 193 ch = &s->chan[c]; 194 195 while ((ch->state & DCSR_RUN) && !(ch->state & DCSR_STOPINTR)) { 196 /* Test for pending requests */ 197 if ((ch->cmd & (DCMD_FLOWSRC | DCMD_FLOWTRG)) && !ch->request) 198 break; 199 200 length = ch->cmd & DCMD_LEN; 201 size = DCMD_SIZE(ch->cmd); 202 width = DCMD_WIDTH(ch->cmd); 203 204 srcinc = (ch->cmd & DCMD_INCSRCADDR) ? width : 0; 205 destinc = (ch->cmd & DCMD_INCTRGADDR) ? width : 0; 206 207 while (length) { 208 size = MIN(length, size); 209 210 for (n = 0; n < size; n += width) { 211 cpu_physical_memory_read(ch->src, buffer + n, width); 212 ch->src += srcinc; 213 } 214 215 for (n = 0; n < size; n += width) { 216 cpu_physical_memory_write(ch->dest, buffer + n, width); 217 ch->dest += destinc; 218 } 219 220 length -= size; 221 222 if ((ch->cmd & (DCMD_FLOWSRC | DCMD_FLOWTRG)) && 223 !ch->request) { 224 ch->state |= DCSR_EORINT; 225 if (ch->state & DCSR_EORSTOPEN) 226 ch->state |= DCSR_STOPINTR; 227 if ((ch->state & DCSR_EORJMPEN) && 228 !(ch->state & DCSR_NODESCFETCH)) 229 pxa2xx_dma_descriptor_fetch(s, c); 230 break; 231 } 232 } 233 234 ch->cmd = (ch->cmd & ~DCMD_LEN) | length; 235 236 /* Is the transfer complete now? */ 237 if (!length) { 238 if (ch->cmd & DCMD_ENDIRQEN) 239 ch->state |= DCSR_ENDINTR; 240 241 if ((ch->state & DCSR_NODESCFETCH) || 242 (ch->descr & DDADR_STOP) || 243 (ch->state & DCSR_EORSTOPEN)) { 244 ch->state |= DCSR_STOPINTR; 245 ch->state &= ~DCSR_RUN; 246 247 break; 248 } 249 250 ch->state |= DCSR_STOPINTR; 251 break; 252 } 253 } 254 } 255 256 s->running --; 257 } 258 } 259 260 static uint64_t pxa2xx_dma_read(void *opaque, hwaddr offset, 261 unsigned size) 262 { 263 PXA2xxDMAState *s = (PXA2xxDMAState *) opaque; 264 unsigned int channel; 265 266 if (size != 4) { 267 hw_error("%s: Bad access width\n", __func__); 268 return 5; 269 } 270 271 switch (offset) { 272 case DRCMR64 ... DRCMR74: 273 offset -= DRCMR64 - DRCMR0 - (64 << 2); 274 /* Fall through */ 275 case DRCMR0 ... DRCMR63: 276 channel = (offset - DRCMR0) >> 2; 277 return s->req[channel]; 278 279 case DRQSR0: 280 case DRQSR1: 281 case DRQSR2: 282 return 0; 283 284 case DCSR0 ... DCSR31: 285 channel = offset >> 2; 286 if (s->chan[channel].request) 287 return s->chan[channel].state | DCSR_REQPEND; 288 return s->chan[channel].state; 289 290 case DINT: 291 return s->stopintr | s->eorintr | s->rasintr | 292 s->startintr | s->endintr; 293 294 case DALGN: 295 return s->align; 296 297 case DPCSR: 298 return s->pio; 299 } 300 301 if (offset >= D_CH0 && offset < D_CH0 + (s->channels << 4)) { 302 channel = (offset - D_CH0) >> 4; 303 switch ((offset & 0x0f) >> 2) { 304 case DDADR: 305 return s->chan[channel].descr; 306 case DSADR: 307 return s->chan[channel].src; 308 case DTADR: 309 return s->chan[channel].dest; 310 case DCMD: 311 return s->chan[channel].cmd; 312 } 313 } 314 315 hw_error("%s: Bad offset 0x" TARGET_FMT_plx "\n", __func__, offset); 316 return 7; 317 } 318 319 static void pxa2xx_dma_write(void *opaque, hwaddr offset, 320 uint64_t value, unsigned size) 321 { 322 PXA2xxDMAState *s = (PXA2xxDMAState *) opaque; 323 unsigned int channel; 324 325 if (size != 4) { 326 hw_error("%s: Bad access width\n", __func__); 327 return; 328 } 329 330 switch (offset) { 331 case DRCMR64 ... DRCMR74: 332 offset -= DRCMR64 - DRCMR0 - (64 << 2); 333 /* Fall through */ 334 case DRCMR0 ... DRCMR63: 335 channel = (offset - DRCMR0) >> 2; 336 337 if (value & DRCMR_MAPVLD) 338 if ((value & DRCMR_CHLNUM) > s->channels) 339 hw_error("%s: Bad DMA channel %i\n", 340 __func__, (unsigned)value & DRCMR_CHLNUM); 341 342 s->req[channel] = value; 343 break; 344 345 case DRQSR0: 346 case DRQSR1: 347 case DRQSR2: 348 /* Nothing to do */ 349 break; 350 351 case DCSR0 ... DCSR31: 352 channel = offset >> 2; 353 s->chan[channel].state &= 0x0000071f & ~(value & 354 (DCSR_EORINT | DCSR_ENDINTR | 355 DCSR_STARTINTR | DCSR_BUSERRINTR)); 356 s->chan[channel].state |= value & 0xfc800000; 357 358 if (s->chan[channel].state & DCSR_STOPIRQEN) 359 s->chan[channel].state &= ~DCSR_STOPINTR; 360 361 if (value & DCSR_NODESCFETCH) { 362 /* No-descriptor-fetch mode */ 363 if (value & DCSR_RUN) { 364 s->chan[channel].state &= ~DCSR_STOPINTR; 365 pxa2xx_dma_run(s); 366 } 367 } else { 368 /* Descriptor-fetch mode */ 369 if (value & DCSR_RUN) { 370 s->chan[channel].state &= ~DCSR_STOPINTR; 371 pxa2xx_dma_descriptor_fetch(s, channel); 372 pxa2xx_dma_run(s); 373 } 374 } 375 376 /* Shouldn't matter as our DMA is synchronous. */ 377 if (!(value & (DCSR_RUN | DCSR_MASKRUN))) 378 s->chan[channel].state |= DCSR_STOPINTR; 379 380 if (value & DCSR_CLRCMPST) 381 s->chan[channel].state &= ~DCSR_CMPST; 382 if (value & DCSR_SETCMPST) 383 s->chan[channel].state |= DCSR_CMPST; 384 385 pxa2xx_dma_update(s, channel); 386 break; 387 388 case DALGN: 389 s->align = value; 390 break; 391 392 case DPCSR: 393 s->pio = value & 0x80000001; 394 break; 395 396 default: 397 if (offset >= D_CH0 && offset < D_CH0 + (s->channels << 4)) { 398 channel = (offset - D_CH0) >> 4; 399 switch ((offset & 0x0f) >> 2) { 400 case DDADR: 401 s->chan[channel].descr = value; 402 break; 403 case DSADR: 404 s->chan[channel].src = value; 405 break; 406 case DTADR: 407 s->chan[channel].dest = value; 408 break; 409 case DCMD: 410 s->chan[channel].cmd = value; 411 break; 412 default: 413 goto fail; 414 } 415 416 break; 417 } 418 fail: 419 hw_error("%s: Bad offset " TARGET_FMT_plx "\n", __func__, offset); 420 } 421 } 422 423 static const MemoryRegionOps pxa2xx_dma_ops = { 424 .read = pxa2xx_dma_read, 425 .write = pxa2xx_dma_write, 426 .endianness = DEVICE_NATIVE_ENDIAN, 427 }; 428 429 static void pxa2xx_dma_request(void *opaque, int req_num, int on) 430 { 431 PXA2xxDMAState *s = opaque; 432 int ch; 433 if (req_num < 0 || req_num >= PXA2XX_DMA_NUM_REQUESTS) 434 hw_error("%s: Bad DMA request %i\n", __func__, req_num); 435 436 if (!(s->req[req_num] & DRCMR_MAPVLD)) 437 return; 438 ch = s->req[req_num] & DRCMR_CHLNUM; 439 440 if (!s->chan[ch].request && on) 441 s->chan[ch].state |= DCSR_RASINTR; 442 else 443 s->chan[ch].state &= ~DCSR_RASINTR; 444 if (s->chan[ch].request && !on) 445 s->chan[ch].state |= DCSR_EORINT; 446 447 s->chan[ch].request = on; 448 if (on) { 449 pxa2xx_dma_run(s); 450 pxa2xx_dma_update(s, ch); 451 } 452 } 453 454 static void pxa2xx_dma_init(Object *obj) 455 { 456 DeviceState *dev = DEVICE(obj); 457 PXA2xxDMAState *s = PXA2XX_DMA(obj); 458 SysBusDevice *sbd = SYS_BUS_DEVICE(obj); 459 460 memset(s->req, 0, sizeof(uint8_t) * PXA2XX_DMA_NUM_REQUESTS); 461 462 qdev_init_gpio_in(dev, pxa2xx_dma_request, PXA2XX_DMA_NUM_REQUESTS); 463 464 memory_region_init_io(&s->iomem, obj, &pxa2xx_dma_ops, s, 465 "pxa2xx.dma", 0x00010000); 466 sysbus_init_mmio(sbd, &s->iomem); 467 sysbus_init_irq(sbd, &s->irq); 468 } 469 470 static void pxa2xx_dma_realize(DeviceState *dev, Error **errp) 471 { 472 PXA2xxDMAState *s = PXA2XX_DMA(dev); 473 int i; 474 475 if (s->channels <= 0) { 476 error_setg(errp, "channels value invalid"); 477 return; 478 } 479 480 s->chan = g_new0(PXA2xxDMAChannel, s->channels); 481 482 for (i = 0; i < s->channels; i ++) 483 s->chan[i].state = DCSR_STOPINTR; 484 } 485 486 DeviceState *pxa27x_dma_init(hwaddr base, qemu_irq irq) 487 { 488 DeviceState *dev; 489 490 dev = qdev_create(NULL, "pxa2xx-dma"); 491 qdev_prop_set_int32(dev, "channels", PXA27X_DMA_NUM_CHANNELS); 492 qdev_init_nofail(dev); 493 494 sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, base); 495 sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, irq); 496 497 return dev; 498 } 499 500 DeviceState *pxa255_dma_init(hwaddr base, qemu_irq irq) 501 { 502 DeviceState *dev; 503 504 dev = qdev_create(NULL, "pxa2xx-dma"); 505 qdev_prop_set_int32(dev, "channels", PXA27X_DMA_NUM_CHANNELS); 506 qdev_init_nofail(dev); 507 508 sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, base); 509 sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, irq); 510 511 return dev; 512 } 513 514 static bool is_version_0(void *opaque, int version_id) 515 { 516 return version_id == 0; 517 } 518 519 static VMStateDescription vmstate_pxa2xx_dma_chan = { 520 .name = "pxa2xx_dma_chan", 521 .version_id = 1, 522 .minimum_version_id = 1, 523 .fields = (VMStateField[]) { 524 VMSTATE_UINT32(descr, PXA2xxDMAChannel), 525 VMSTATE_UINT32(src, PXA2xxDMAChannel), 526 VMSTATE_UINT32(dest, PXA2xxDMAChannel), 527 VMSTATE_UINT32(cmd, PXA2xxDMAChannel), 528 VMSTATE_UINT32(state, PXA2xxDMAChannel), 529 VMSTATE_INT32(request, PXA2xxDMAChannel), 530 VMSTATE_END_OF_LIST(), 531 }, 532 }; 533 534 static VMStateDescription vmstate_pxa2xx_dma = { 535 .name = "pxa2xx_dma", 536 .version_id = 1, 537 .minimum_version_id = 0, 538 .fields = (VMStateField[]) { 539 VMSTATE_UNUSED_TEST(is_version_0, 4), 540 VMSTATE_UINT32(stopintr, PXA2xxDMAState), 541 VMSTATE_UINT32(eorintr, PXA2xxDMAState), 542 VMSTATE_UINT32(rasintr, PXA2xxDMAState), 543 VMSTATE_UINT32(startintr, PXA2xxDMAState), 544 VMSTATE_UINT32(endintr, PXA2xxDMAState), 545 VMSTATE_UINT32(align, PXA2xxDMAState), 546 VMSTATE_UINT32(pio, PXA2xxDMAState), 547 VMSTATE_BUFFER(req, PXA2xxDMAState), 548 VMSTATE_STRUCT_VARRAY_POINTER_INT32(chan, PXA2xxDMAState, channels, 549 vmstate_pxa2xx_dma_chan, PXA2xxDMAChannel), 550 VMSTATE_END_OF_LIST(), 551 }, 552 }; 553 554 static Property pxa2xx_dma_properties[] = { 555 DEFINE_PROP_INT32("channels", PXA2xxDMAState, channels, -1), 556 DEFINE_PROP_END_OF_LIST(), 557 }; 558 559 static void pxa2xx_dma_class_init(ObjectClass *klass, void *data) 560 { 561 DeviceClass *dc = DEVICE_CLASS(klass); 562 563 dc->desc = "PXA2xx DMA controller"; 564 dc->vmsd = &vmstate_pxa2xx_dma; 565 dc->props = pxa2xx_dma_properties; 566 dc->realize = pxa2xx_dma_realize; 567 } 568 569 static const TypeInfo pxa2xx_dma_info = { 570 .name = TYPE_PXA2XX_DMA, 571 .parent = TYPE_SYS_BUS_DEVICE, 572 .instance_size = sizeof(PXA2xxDMAState), 573 .instance_init = pxa2xx_dma_init, 574 .class_init = pxa2xx_dma_class_init, 575 }; 576 577 static void pxa2xx_dma_register_types(void) 578 { 579 type_register_static(&pxa2xx_dma_info); 580 } 581 582 type_init(pxa2xx_dma_register_types) 583