1 /* 2 * Intel XScale PXA255/270 DMA controller. 3 * 4 * Copyright (c) 2006 Openedhand Ltd. 5 * Copyright (c) 2006 Thorsten Zitterell 6 * Written by Andrzej Zaborowski <balrog@zabor.org> 7 * 8 * This code is licensed under the GPL. 9 */ 10 11 #include "qemu/osdep.h" 12 #include "qemu/log.h" 13 #include "hw/hw.h" 14 #include "hw/irq.h" 15 #include "hw/qdev-properties.h" 16 #include "hw/arm/pxa.h" 17 #include "hw/sysbus.h" 18 #include "migration/vmstate.h" 19 #include "qapi/error.h" 20 #include "qemu/module.h" 21 22 #define PXA255_DMA_NUM_CHANNELS 16 23 #define PXA27X_DMA_NUM_CHANNELS 32 24 25 #define PXA2XX_DMA_NUM_REQUESTS 75 26 27 typedef struct { 28 uint32_t descr; 29 uint32_t src; 30 uint32_t dest; 31 uint32_t cmd; 32 uint32_t state; 33 int request; 34 } PXA2xxDMAChannel; 35 36 #define TYPE_PXA2XX_DMA "pxa2xx-dma" 37 #define PXA2XX_DMA(obj) OBJECT_CHECK(PXA2xxDMAState, (obj), TYPE_PXA2XX_DMA) 38 39 typedef struct PXA2xxDMAState { 40 SysBusDevice parent_obj; 41 42 MemoryRegion iomem; 43 qemu_irq irq; 44 45 uint32_t stopintr; 46 uint32_t eorintr; 47 uint32_t rasintr; 48 uint32_t startintr; 49 uint32_t endintr; 50 51 uint32_t align; 52 uint32_t pio; 53 54 int channels; 55 PXA2xxDMAChannel *chan; 56 57 uint8_t req[PXA2XX_DMA_NUM_REQUESTS]; 58 59 /* Flag to avoid recursive DMA invocations. */ 60 int running; 61 } PXA2xxDMAState; 62 63 #define DCSR0 0x0000 /* DMA Control / Status register for Channel 0 */ 64 #define DCSR31 0x007c /* DMA Control / Status register for Channel 31 */ 65 #define DALGN 0x00a0 /* DMA Alignment register */ 66 #define DPCSR 0x00a4 /* DMA Programmed I/O Control Status register */ 67 #define DRQSR0 0x00e0 /* DMA DREQ<0> Status register */ 68 #define DRQSR1 0x00e4 /* DMA DREQ<1> Status register */ 69 #define DRQSR2 0x00e8 /* DMA DREQ<2> Status register */ 70 #define DINT 0x00f0 /* DMA Interrupt register */ 71 #define DRCMR0 0x0100 /* Request to Channel Map register 0 */ 72 #define DRCMR63 0x01fc /* Request to Channel Map register 63 */ 73 #define D_CH0 0x0200 /* Channel 0 Descriptor start */ 74 #define DRCMR64 0x1100 /* Request to Channel Map register 64 */ 75 #define DRCMR74 0x1128 /* Request to Channel Map register 74 */ 76 77 /* Per-channel register */ 78 #define DDADR 0x00 79 #define DSADR 0x01 80 #define DTADR 0x02 81 #define DCMD 0x03 82 83 /* Bit-field masks */ 84 #define DRCMR_CHLNUM 0x1f 85 #define DRCMR_MAPVLD (1 << 7) 86 #define DDADR_STOP (1 << 0) 87 #define DDADR_BREN (1 << 1) 88 #define DCMD_LEN 0x1fff 89 #define DCMD_WIDTH(x) (1 << ((((x) >> 14) & 3) - 1)) 90 #define DCMD_SIZE(x) (4 << (((x) >> 16) & 3)) 91 #define DCMD_FLYBYT (1 << 19) 92 #define DCMD_FLYBYS (1 << 20) 93 #define DCMD_ENDIRQEN (1 << 21) 94 #define DCMD_STARTIRQEN (1 << 22) 95 #define DCMD_CMPEN (1 << 25) 96 #define DCMD_FLOWTRG (1 << 28) 97 #define DCMD_FLOWSRC (1 << 29) 98 #define DCMD_INCTRGADDR (1 << 30) 99 #define DCMD_INCSRCADDR (1 << 31) 100 #define DCSR_BUSERRINTR (1 << 0) 101 #define DCSR_STARTINTR (1 << 1) 102 #define DCSR_ENDINTR (1 << 2) 103 #define DCSR_STOPINTR (1 << 3) 104 #define DCSR_RASINTR (1 << 4) 105 #define DCSR_REQPEND (1 << 8) 106 #define DCSR_EORINT (1 << 9) 107 #define DCSR_CMPST (1 << 10) 108 #define DCSR_MASKRUN (1 << 22) 109 #define DCSR_RASIRQEN (1 << 23) 110 #define DCSR_CLRCMPST (1 << 24) 111 #define DCSR_SETCMPST (1 << 25) 112 #define DCSR_EORSTOPEN (1 << 26) 113 #define DCSR_EORJMPEN (1 << 27) 114 #define DCSR_EORIRQEN (1 << 28) 115 #define DCSR_STOPIRQEN (1 << 29) 116 #define DCSR_NODESCFETCH (1 << 30) 117 #define DCSR_RUN (1 << 31) 118 119 static inline void pxa2xx_dma_update(PXA2xxDMAState *s, int ch) 120 { 121 if (ch >= 0) { 122 if ((s->chan[ch].state & DCSR_STOPIRQEN) && 123 (s->chan[ch].state & DCSR_STOPINTR)) 124 s->stopintr |= 1 << ch; 125 else 126 s->stopintr &= ~(1 << ch); 127 128 if ((s->chan[ch].state & DCSR_EORIRQEN) && 129 (s->chan[ch].state & DCSR_EORINT)) 130 s->eorintr |= 1 << ch; 131 else 132 s->eorintr &= ~(1 << ch); 133 134 if ((s->chan[ch].state & DCSR_RASIRQEN) && 135 (s->chan[ch].state & DCSR_RASINTR)) 136 s->rasintr |= 1 << ch; 137 else 138 s->rasintr &= ~(1 << ch); 139 140 if (s->chan[ch].state & DCSR_STARTINTR) 141 s->startintr |= 1 << ch; 142 else 143 s->startintr &= ~(1 << ch); 144 145 if (s->chan[ch].state & DCSR_ENDINTR) 146 s->endintr |= 1 << ch; 147 else 148 s->endintr &= ~(1 << ch); 149 } 150 151 if (s->stopintr | s->eorintr | s->rasintr | s->startintr | s->endintr) 152 qemu_irq_raise(s->irq); 153 else 154 qemu_irq_lower(s->irq); 155 } 156 157 static inline void pxa2xx_dma_descriptor_fetch( 158 PXA2xxDMAState *s, int ch) 159 { 160 uint32_t desc[4]; 161 hwaddr daddr = s->chan[ch].descr & ~0xf; 162 if ((s->chan[ch].descr & DDADR_BREN) && (s->chan[ch].state & DCSR_CMPST)) 163 daddr += 32; 164 165 cpu_physical_memory_read(daddr, desc, 16); 166 s->chan[ch].descr = desc[DDADR]; 167 s->chan[ch].src = desc[DSADR]; 168 s->chan[ch].dest = desc[DTADR]; 169 s->chan[ch].cmd = desc[DCMD]; 170 171 if (s->chan[ch].cmd & DCMD_FLOWSRC) 172 s->chan[ch].src &= ~3; 173 if (s->chan[ch].cmd & DCMD_FLOWTRG) 174 s->chan[ch].dest &= ~3; 175 176 if (s->chan[ch].cmd & (DCMD_CMPEN | DCMD_FLYBYS | DCMD_FLYBYT)) 177 printf("%s: unsupported mode in channel %i\n", __func__, ch); 178 179 if (s->chan[ch].cmd & DCMD_STARTIRQEN) 180 s->chan[ch].state |= DCSR_STARTINTR; 181 } 182 183 static void pxa2xx_dma_run(PXA2xxDMAState *s) 184 { 185 int c, srcinc, destinc; 186 uint32_t n, size; 187 uint32_t width; 188 uint32_t length; 189 uint8_t buffer[32]; 190 PXA2xxDMAChannel *ch; 191 192 if (s->running ++) 193 return; 194 195 while (s->running) { 196 s->running = 1; 197 for (c = 0; c < s->channels; c ++) { 198 ch = &s->chan[c]; 199 200 while ((ch->state & DCSR_RUN) && !(ch->state & DCSR_STOPINTR)) { 201 /* Test for pending requests */ 202 if ((ch->cmd & (DCMD_FLOWSRC | DCMD_FLOWTRG)) && !ch->request) 203 break; 204 205 length = ch->cmd & DCMD_LEN; 206 size = DCMD_SIZE(ch->cmd); 207 width = DCMD_WIDTH(ch->cmd); 208 209 srcinc = (ch->cmd & DCMD_INCSRCADDR) ? width : 0; 210 destinc = (ch->cmd & DCMD_INCTRGADDR) ? width : 0; 211 212 while (length) { 213 size = MIN(length, size); 214 215 for (n = 0; n < size; n += width) { 216 cpu_physical_memory_read(ch->src, buffer + n, width); 217 ch->src += srcinc; 218 } 219 220 for (n = 0; n < size; n += width) { 221 cpu_physical_memory_write(ch->dest, buffer + n, width); 222 ch->dest += destinc; 223 } 224 225 length -= size; 226 227 if ((ch->cmd & (DCMD_FLOWSRC | DCMD_FLOWTRG)) && 228 !ch->request) { 229 ch->state |= DCSR_EORINT; 230 if (ch->state & DCSR_EORSTOPEN) 231 ch->state |= DCSR_STOPINTR; 232 if ((ch->state & DCSR_EORJMPEN) && 233 !(ch->state & DCSR_NODESCFETCH)) 234 pxa2xx_dma_descriptor_fetch(s, c); 235 break; 236 } 237 } 238 239 ch->cmd = (ch->cmd & ~DCMD_LEN) | length; 240 241 /* Is the transfer complete now? */ 242 if (!length) { 243 if (ch->cmd & DCMD_ENDIRQEN) 244 ch->state |= DCSR_ENDINTR; 245 246 if ((ch->state & DCSR_NODESCFETCH) || 247 (ch->descr & DDADR_STOP) || 248 (ch->state & DCSR_EORSTOPEN)) { 249 ch->state |= DCSR_STOPINTR; 250 ch->state &= ~DCSR_RUN; 251 252 break; 253 } 254 255 ch->state |= DCSR_STOPINTR; 256 break; 257 } 258 } 259 } 260 261 s->running --; 262 } 263 } 264 265 static uint64_t pxa2xx_dma_read(void *opaque, hwaddr offset, 266 unsigned size) 267 { 268 PXA2xxDMAState *s = (PXA2xxDMAState *) opaque; 269 unsigned int channel; 270 271 if (size != 4) { 272 qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad access width %u\n", 273 __func__, size); 274 return 5; 275 } 276 277 switch (offset) { 278 case DRCMR64 ... DRCMR74: 279 offset -= DRCMR64 - DRCMR0 - (64 << 2); 280 /* Fall through */ 281 case DRCMR0 ... DRCMR63: 282 channel = (offset - DRCMR0) >> 2; 283 return s->req[channel]; 284 285 case DRQSR0: 286 case DRQSR1: 287 case DRQSR2: 288 return 0; 289 290 case DCSR0 ... DCSR31: 291 channel = offset >> 2; 292 if (s->chan[channel].request) 293 return s->chan[channel].state | DCSR_REQPEND; 294 return s->chan[channel].state; 295 296 case DINT: 297 return s->stopintr | s->eorintr | s->rasintr | 298 s->startintr | s->endintr; 299 300 case DALGN: 301 return s->align; 302 303 case DPCSR: 304 return s->pio; 305 } 306 307 if (offset >= D_CH0 && offset < D_CH0 + (s->channels << 4)) { 308 channel = (offset - D_CH0) >> 4; 309 switch ((offset & 0x0f) >> 2) { 310 case DDADR: 311 return s->chan[channel].descr; 312 case DSADR: 313 return s->chan[channel].src; 314 case DTADR: 315 return s->chan[channel].dest; 316 case DCMD: 317 return s->chan[channel].cmd; 318 } 319 } 320 qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%" HWADDR_PRIX "\n", 321 __func__, offset); 322 return 7; 323 } 324 325 static void pxa2xx_dma_write(void *opaque, hwaddr offset, 326 uint64_t value, unsigned size) 327 { 328 PXA2xxDMAState *s = (PXA2xxDMAState *) opaque; 329 unsigned int channel; 330 331 if (size != 4) { 332 qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad access width %u\n", 333 __func__, size); 334 return; 335 } 336 337 switch (offset) { 338 case DRCMR64 ... DRCMR74: 339 offset -= DRCMR64 - DRCMR0 - (64 << 2); 340 /* Fall through */ 341 case DRCMR0 ... DRCMR63: 342 channel = (offset - DRCMR0) >> 2; 343 344 if (value & DRCMR_MAPVLD) 345 if ((value & DRCMR_CHLNUM) > s->channels) 346 hw_error("%s: Bad DMA channel %i\n", 347 __func__, (unsigned)value & DRCMR_CHLNUM); 348 349 s->req[channel] = value; 350 break; 351 352 case DRQSR0: 353 case DRQSR1: 354 case DRQSR2: 355 /* Nothing to do */ 356 break; 357 358 case DCSR0 ... DCSR31: 359 channel = offset >> 2; 360 s->chan[channel].state &= 0x0000071f & ~(value & 361 (DCSR_EORINT | DCSR_ENDINTR | 362 DCSR_STARTINTR | DCSR_BUSERRINTR)); 363 s->chan[channel].state |= value & 0xfc800000; 364 365 if (s->chan[channel].state & DCSR_STOPIRQEN) 366 s->chan[channel].state &= ~DCSR_STOPINTR; 367 368 if (value & DCSR_NODESCFETCH) { 369 /* No-descriptor-fetch mode */ 370 if (value & DCSR_RUN) { 371 s->chan[channel].state &= ~DCSR_STOPINTR; 372 pxa2xx_dma_run(s); 373 } 374 } else { 375 /* Descriptor-fetch mode */ 376 if (value & DCSR_RUN) { 377 s->chan[channel].state &= ~DCSR_STOPINTR; 378 pxa2xx_dma_descriptor_fetch(s, channel); 379 pxa2xx_dma_run(s); 380 } 381 } 382 383 /* Shouldn't matter as our DMA is synchronous. */ 384 if (!(value & (DCSR_RUN | DCSR_MASKRUN))) 385 s->chan[channel].state |= DCSR_STOPINTR; 386 387 if (value & DCSR_CLRCMPST) 388 s->chan[channel].state &= ~DCSR_CMPST; 389 if (value & DCSR_SETCMPST) 390 s->chan[channel].state |= DCSR_CMPST; 391 392 pxa2xx_dma_update(s, channel); 393 break; 394 395 case DALGN: 396 s->align = value; 397 break; 398 399 case DPCSR: 400 s->pio = value & 0x80000001; 401 break; 402 403 default: 404 if (offset >= D_CH0 && offset < D_CH0 + (s->channels << 4)) { 405 channel = (offset - D_CH0) >> 4; 406 switch ((offset & 0x0f) >> 2) { 407 case DDADR: 408 s->chan[channel].descr = value; 409 break; 410 case DSADR: 411 s->chan[channel].src = value; 412 break; 413 case DTADR: 414 s->chan[channel].dest = value; 415 break; 416 case DCMD: 417 s->chan[channel].cmd = value; 418 break; 419 default: 420 goto fail; 421 } 422 423 break; 424 } 425 fail: 426 qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%" HWADDR_PRIX "\n", 427 __func__, offset); 428 } 429 } 430 431 static const MemoryRegionOps pxa2xx_dma_ops = { 432 .read = pxa2xx_dma_read, 433 .write = pxa2xx_dma_write, 434 .endianness = DEVICE_NATIVE_ENDIAN, 435 }; 436 437 static void pxa2xx_dma_request(void *opaque, int req_num, int on) 438 { 439 PXA2xxDMAState *s = opaque; 440 int ch; 441 if (req_num < 0 || req_num >= PXA2XX_DMA_NUM_REQUESTS) 442 hw_error("%s: Bad DMA request %i\n", __func__, req_num); 443 444 if (!(s->req[req_num] & DRCMR_MAPVLD)) 445 return; 446 ch = s->req[req_num] & DRCMR_CHLNUM; 447 448 if (!s->chan[ch].request && on) 449 s->chan[ch].state |= DCSR_RASINTR; 450 else 451 s->chan[ch].state &= ~DCSR_RASINTR; 452 if (s->chan[ch].request && !on) 453 s->chan[ch].state |= DCSR_EORINT; 454 455 s->chan[ch].request = on; 456 if (on) { 457 pxa2xx_dma_run(s); 458 pxa2xx_dma_update(s, ch); 459 } 460 } 461 462 static void pxa2xx_dma_init(Object *obj) 463 { 464 DeviceState *dev = DEVICE(obj); 465 PXA2xxDMAState *s = PXA2XX_DMA(obj); 466 SysBusDevice *sbd = SYS_BUS_DEVICE(obj); 467 468 memset(s->req, 0, sizeof(uint8_t) * PXA2XX_DMA_NUM_REQUESTS); 469 470 qdev_init_gpio_in(dev, pxa2xx_dma_request, PXA2XX_DMA_NUM_REQUESTS); 471 472 memory_region_init_io(&s->iomem, obj, &pxa2xx_dma_ops, s, 473 "pxa2xx.dma", 0x00010000); 474 sysbus_init_mmio(sbd, &s->iomem); 475 sysbus_init_irq(sbd, &s->irq); 476 } 477 478 static void pxa2xx_dma_realize(DeviceState *dev, Error **errp) 479 { 480 PXA2xxDMAState *s = PXA2XX_DMA(dev); 481 int i; 482 483 if (s->channels <= 0) { 484 error_setg(errp, "channels value invalid"); 485 return; 486 } 487 488 s->chan = g_new0(PXA2xxDMAChannel, s->channels); 489 490 for (i = 0; i < s->channels; i ++) 491 s->chan[i].state = DCSR_STOPINTR; 492 } 493 494 DeviceState *pxa27x_dma_init(hwaddr base, qemu_irq irq) 495 { 496 DeviceState *dev; 497 498 dev = qdev_new("pxa2xx-dma"); 499 qdev_prop_set_int32(dev, "channels", PXA27X_DMA_NUM_CHANNELS); 500 sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); 501 502 sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, base); 503 sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, irq); 504 505 return dev; 506 } 507 508 DeviceState *pxa255_dma_init(hwaddr base, qemu_irq irq) 509 { 510 DeviceState *dev; 511 512 dev = qdev_new("pxa2xx-dma"); 513 qdev_prop_set_int32(dev, "channels", PXA27X_DMA_NUM_CHANNELS); 514 sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); 515 516 sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, base); 517 sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, irq); 518 519 return dev; 520 } 521 522 static bool is_version_0(void *opaque, int version_id) 523 { 524 return version_id == 0; 525 } 526 527 static VMStateDescription vmstate_pxa2xx_dma_chan = { 528 .name = "pxa2xx_dma_chan", 529 .version_id = 1, 530 .minimum_version_id = 1, 531 .fields = (VMStateField[]) { 532 VMSTATE_UINT32(descr, PXA2xxDMAChannel), 533 VMSTATE_UINT32(src, PXA2xxDMAChannel), 534 VMSTATE_UINT32(dest, PXA2xxDMAChannel), 535 VMSTATE_UINT32(cmd, PXA2xxDMAChannel), 536 VMSTATE_UINT32(state, PXA2xxDMAChannel), 537 VMSTATE_INT32(request, PXA2xxDMAChannel), 538 VMSTATE_END_OF_LIST(), 539 }, 540 }; 541 542 static VMStateDescription vmstate_pxa2xx_dma = { 543 .name = "pxa2xx_dma", 544 .version_id = 1, 545 .minimum_version_id = 0, 546 .fields = (VMStateField[]) { 547 VMSTATE_UNUSED_TEST(is_version_0, 4), 548 VMSTATE_UINT32(stopintr, PXA2xxDMAState), 549 VMSTATE_UINT32(eorintr, PXA2xxDMAState), 550 VMSTATE_UINT32(rasintr, PXA2xxDMAState), 551 VMSTATE_UINT32(startintr, PXA2xxDMAState), 552 VMSTATE_UINT32(endintr, PXA2xxDMAState), 553 VMSTATE_UINT32(align, PXA2xxDMAState), 554 VMSTATE_UINT32(pio, PXA2xxDMAState), 555 VMSTATE_BUFFER(req, PXA2xxDMAState), 556 VMSTATE_STRUCT_VARRAY_POINTER_INT32(chan, PXA2xxDMAState, channels, 557 vmstate_pxa2xx_dma_chan, PXA2xxDMAChannel), 558 VMSTATE_END_OF_LIST(), 559 }, 560 }; 561 562 static Property pxa2xx_dma_properties[] = { 563 DEFINE_PROP_INT32("channels", PXA2xxDMAState, channels, -1), 564 DEFINE_PROP_END_OF_LIST(), 565 }; 566 567 static void pxa2xx_dma_class_init(ObjectClass *klass, void *data) 568 { 569 DeviceClass *dc = DEVICE_CLASS(klass); 570 571 dc->desc = "PXA2xx DMA controller"; 572 dc->vmsd = &vmstate_pxa2xx_dma; 573 device_class_set_props(dc, pxa2xx_dma_properties); 574 dc->realize = pxa2xx_dma_realize; 575 } 576 577 static const TypeInfo pxa2xx_dma_info = { 578 .name = TYPE_PXA2XX_DMA, 579 .parent = TYPE_SYS_BUS_DEVICE, 580 .instance_size = sizeof(PXA2xxDMAState), 581 .instance_init = pxa2xx_dma_init, 582 .class_init = pxa2xx_dma_class_init, 583 }; 584 585 static void pxa2xx_dma_register_types(void) 586 { 587 type_register_static(&pxa2xx_dma_info); 588 } 589 590 type_init(pxa2xx_dma_register_types) 591