1 /* 2 * TI OMAP DMA gigacell. 3 * 4 * Copyright (C) 2006-2008 Andrzej Zaborowski <balrog@zabor.org> 5 * Copyright (C) 2007-2008 Lauro Ramos Venancio <lauro.venancio@indt.org.br> 6 * 7 * This program is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU General Public License as 9 * published by the Free Software Foundation; either version 2 of 10 * the License, or (at your option) any later version. 11 * 12 * This program is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * GNU General Public License for more details. 16 * 17 * You should have received a copy of the GNU General Public License along 18 * with this program; if not, see <http://www.gnu.org/licenses/>. 19 */ 20 #include "qemu/osdep.h" 21 #include "qemu/log.h" 22 #include "qemu-common.h" 23 #include "qemu/timer.h" 24 #include "hw/arm/omap.h" 25 #include "hw/irq.h" 26 #include "hw/arm/soc_dma.h" 27 28 struct omap_dma_channel_s { 29 /* transfer data */ 30 int burst[2]; 31 int pack[2]; 32 int endian[2]; 33 int endian_lock[2]; 34 int translate[2]; 35 enum omap_dma_port port[2]; 36 hwaddr addr[2]; 37 omap_dma_addressing_t mode[2]; 38 uint32_t elements; 39 uint16_t frames; 40 int32_t frame_index[2]; 41 int16_t element_index[2]; 42 int data_type; 43 44 /* transfer type */ 45 int transparent_copy; 46 int constant_fill; 47 uint32_t color; 48 int prefetch; 49 50 /* auto init and linked channel data */ 51 int end_prog; 52 int repeat; 53 int auto_init; 54 int link_enabled; 55 int link_next_ch; 56 57 /* interruption data */ 58 int interrupts; 59 int status; 60 int cstatus; 61 62 /* state data */ 63 int active; 64 int enable; 65 int sync; 66 int src_sync; 67 int pending_request; 68 int waiting_end_prog; 69 uint16_t cpc; 70 int set_update; 71 72 /* sync type */ 73 int fs; 74 int bs; 75 76 /* compatibility */ 77 int omap_3_1_compatible_disable; 78 79 qemu_irq irq; 80 struct omap_dma_channel_s *sibling; 81 82 struct omap_dma_reg_set_s { 83 hwaddr src, dest; 84 int frame; 85 int element; 86 int pck_element; 87 int frame_delta[2]; 88 int elem_delta[2]; 89 int frames; 90 int elements; 91 int pck_elements; 92 } active_set; 93 94 struct soc_dma_ch_s *dma; 95 96 /* unused parameters */ 97 int write_mode; 98 int priority; 99 int interleave_disabled; 100 int type; 101 int suspend; 102 int buf_disable; 103 }; 104 105 struct omap_dma_s { 106 struct soc_dma_s *dma; 107 MemoryRegion iomem; 108 109 struct omap_mpu_state_s *mpu; 110 omap_clk clk; 111 qemu_irq irq[4]; 112 void (*intr_update)(struct omap_dma_s *s); 113 enum omap_dma_model model; 114 int omap_3_1_mapping_disabled; 115 116 uint32_t gcr; 117 uint32_t ocp; 118 uint32_t caps[5]; 119 uint32_t irqen[4]; 120 uint32_t irqstat[4]; 121 122 int chans; 123 struct omap_dma_channel_s ch[32]; 124 struct omap_dma_lcd_channel_s lcd_ch; 125 }; 126 127 /* Interrupts */ 128 #define TIMEOUT_INTR (1 << 0) 129 #define EVENT_DROP_INTR (1 << 1) 130 #define HALF_FRAME_INTR (1 << 2) 131 #define END_FRAME_INTR (1 << 3) 132 #define LAST_FRAME_INTR (1 << 4) 133 #define END_BLOCK_INTR (1 << 5) 134 #define SYNC (1 << 6) 135 #define END_PKT_INTR (1 << 7) 136 #define TRANS_ERR_INTR (1 << 8) 137 #define MISALIGN_INTR (1 << 11) 138 139 static inline void omap_dma_interrupts_update(struct omap_dma_s *s) 140 { 141 s->intr_update(s); 142 } 143 144 static void omap_dma_channel_load(struct omap_dma_channel_s *ch) 145 { 146 struct omap_dma_reg_set_s *a = &ch->active_set; 147 int i, normal; 148 int omap_3_1 = !ch->omap_3_1_compatible_disable; 149 150 /* 151 * TODO: verify address ranges and alignment 152 * TODO: port endianness 153 */ 154 155 a->src = ch->addr[0]; 156 a->dest = ch->addr[1]; 157 a->frames = ch->frames; 158 a->elements = ch->elements; 159 a->pck_elements = ch->frame_index[!ch->src_sync]; 160 a->frame = 0; 161 a->element = 0; 162 a->pck_element = 0; 163 164 if (unlikely(!ch->elements || !ch->frames)) { 165 printf("%s: bad DMA request\n", __func__); 166 return; 167 } 168 169 for (i = 0; i < 2; i ++) 170 switch (ch->mode[i]) { 171 case constant: 172 a->elem_delta[i] = 0; 173 a->frame_delta[i] = 0; 174 break; 175 case post_incremented: 176 a->elem_delta[i] = ch->data_type; 177 a->frame_delta[i] = 0; 178 break; 179 case single_index: 180 a->elem_delta[i] = ch->data_type + 181 ch->element_index[omap_3_1 ? 0 : i] - 1; 182 a->frame_delta[i] = 0; 183 break; 184 case double_index: 185 a->elem_delta[i] = ch->data_type + 186 ch->element_index[omap_3_1 ? 0 : i] - 1; 187 a->frame_delta[i] = ch->frame_index[omap_3_1 ? 0 : i] - 188 ch->element_index[omap_3_1 ? 0 : i]; 189 break; 190 default: 191 break; 192 } 193 194 normal = !ch->transparent_copy && !ch->constant_fill && 195 /* FIFO is big-endian so either (ch->endian[n] == 1) OR 196 * (ch->endian_lock[n] == 1) mean no endianism conversion. */ 197 (ch->endian[0] | ch->endian_lock[0]) == 198 (ch->endian[1] | ch->endian_lock[1]); 199 for (i = 0; i < 2; i ++) { 200 /* TODO: for a->frame_delta[i] > 0 still use the fast path, just 201 * limit min_elems in omap_dma_transfer_setup to the nearest frame 202 * end. */ 203 if (!a->elem_delta[i] && normal && 204 (a->frames == 1 || !a->frame_delta[i])) 205 ch->dma->type[i] = soc_dma_access_const; 206 else if (a->elem_delta[i] == ch->data_type && normal && 207 (a->frames == 1 || !a->frame_delta[i])) 208 ch->dma->type[i] = soc_dma_access_linear; 209 else 210 ch->dma->type[i] = soc_dma_access_other; 211 212 ch->dma->vaddr[i] = ch->addr[i]; 213 } 214 soc_dma_ch_update(ch->dma); 215 } 216 217 static void omap_dma_activate_channel(struct omap_dma_s *s, 218 struct omap_dma_channel_s *ch) 219 { 220 if (!ch->active) { 221 if (ch->set_update) { 222 /* It's not clear when the active set is supposed to be 223 * loaded from registers. We're already loading it when the 224 * channel is enabled, and for some guests this is not enough 225 * but that may be also because of a race condition (no 226 * delays in qemu) in the guest code, which we're just 227 * working around here. */ 228 omap_dma_channel_load(ch); 229 ch->set_update = 0; 230 } 231 232 ch->active = 1; 233 soc_dma_set_request(ch->dma, 1); 234 if (ch->sync) 235 ch->status |= SYNC; 236 } 237 } 238 239 static void omap_dma_deactivate_channel(struct omap_dma_s *s, 240 struct omap_dma_channel_s *ch) 241 { 242 /* Update cpc */ 243 ch->cpc = ch->active_set.dest & 0xffff; 244 245 if (ch->pending_request && !ch->waiting_end_prog && ch->enable) { 246 /* Don't deactivate the channel */ 247 ch->pending_request = 0; 248 return; 249 } 250 251 /* Don't deactive the channel if it is synchronized and the DMA request is 252 active */ 253 if (ch->sync && ch->enable && (s->dma->drqbmp & (1ULL << ch->sync))) 254 return; 255 256 if (ch->active) { 257 ch->active = 0; 258 ch->status &= ~SYNC; 259 soc_dma_set_request(ch->dma, 0); 260 } 261 } 262 263 static void omap_dma_enable_channel(struct omap_dma_s *s, 264 struct omap_dma_channel_s *ch) 265 { 266 if (!ch->enable) { 267 ch->enable = 1; 268 ch->waiting_end_prog = 0; 269 omap_dma_channel_load(ch); 270 /* TODO: theoretically if ch->sync && ch->prefetch && 271 * !s->dma->drqbmp[ch->sync], we should also activate and fetch 272 * from source and then stall until signalled. */ 273 if ((!ch->sync) || (s->dma->drqbmp & (1ULL << ch->sync))) { 274 omap_dma_activate_channel(s, ch); 275 } 276 } 277 } 278 279 static void omap_dma_disable_channel(struct omap_dma_s *s, 280 struct omap_dma_channel_s *ch) 281 { 282 if (ch->enable) { 283 ch->enable = 0; 284 /* Discard any pending request */ 285 ch->pending_request = 0; 286 omap_dma_deactivate_channel(s, ch); 287 } 288 } 289 290 static void omap_dma_channel_end_prog(struct omap_dma_s *s, 291 struct omap_dma_channel_s *ch) 292 { 293 if (ch->waiting_end_prog) { 294 ch->waiting_end_prog = 0; 295 if (!ch->sync || ch->pending_request) { 296 ch->pending_request = 0; 297 omap_dma_activate_channel(s, ch); 298 } 299 } 300 } 301 302 static void omap_dma_interrupts_3_1_update(struct omap_dma_s *s) 303 { 304 struct omap_dma_channel_s *ch = s->ch; 305 306 /* First three interrupts are shared between two channels each. */ 307 if (ch[0].status | ch[6].status) 308 qemu_irq_raise(ch[0].irq); 309 if (ch[1].status | ch[7].status) 310 qemu_irq_raise(ch[1].irq); 311 if (ch[2].status | ch[8].status) 312 qemu_irq_raise(ch[2].irq); 313 if (ch[3].status) 314 qemu_irq_raise(ch[3].irq); 315 if (ch[4].status) 316 qemu_irq_raise(ch[4].irq); 317 if (ch[5].status) 318 qemu_irq_raise(ch[5].irq); 319 } 320 321 static void omap_dma_interrupts_3_2_update(struct omap_dma_s *s) 322 { 323 struct omap_dma_channel_s *ch = s->ch; 324 int i; 325 326 for (i = s->chans; i; ch ++, i --) 327 if (ch->status) 328 qemu_irq_raise(ch->irq); 329 } 330 331 static void omap_dma_enable_3_1_mapping(struct omap_dma_s *s) 332 { 333 s->omap_3_1_mapping_disabled = 0; 334 s->chans = 9; 335 s->intr_update = omap_dma_interrupts_3_1_update; 336 } 337 338 static void omap_dma_disable_3_1_mapping(struct omap_dma_s *s) 339 { 340 s->omap_3_1_mapping_disabled = 1; 341 s->chans = 16; 342 s->intr_update = omap_dma_interrupts_3_2_update; 343 } 344 345 static void omap_dma_process_request(struct omap_dma_s *s, int request) 346 { 347 int channel; 348 int drop_event = 0; 349 struct omap_dma_channel_s *ch = s->ch; 350 351 for (channel = 0; channel < s->chans; channel ++, ch ++) { 352 if (ch->enable && ch->sync == request) { 353 if (!ch->active) 354 omap_dma_activate_channel(s, ch); 355 else if (!ch->pending_request) 356 ch->pending_request = 1; 357 else { 358 /* Request collision */ 359 /* Second request received while processing other request */ 360 ch->status |= EVENT_DROP_INTR; 361 drop_event = 1; 362 } 363 } 364 } 365 366 if (drop_event) 367 omap_dma_interrupts_update(s); 368 } 369 370 static void omap_dma_transfer_generic(struct soc_dma_ch_s *dma) 371 { 372 uint8_t value[4]; 373 struct omap_dma_channel_s *ch = dma->opaque; 374 struct omap_dma_reg_set_s *a = &ch->active_set; 375 int bytes = dma->bytes; 376 #ifdef MULTI_REQ 377 uint16_t status = ch->status; 378 #endif 379 380 do { 381 /* Transfer a single element */ 382 /* FIXME: check the endianness */ 383 if (!ch->constant_fill) 384 cpu_physical_memory_read(a->src, value, ch->data_type); 385 else 386 *(uint32_t *) value = ch->color; 387 388 if (!ch->transparent_copy || *(uint32_t *) value != ch->color) 389 cpu_physical_memory_write(a->dest, value, ch->data_type); 390 391 a->src += a->elem_delta[0]; 392 a->dest += a->elem_delta[1]; 393 a->element ++; 394 395 #ifndef MULTI_REQ 396 if (a->element == a->elements) { 397 /* End of Frame */ 398 a->element = 0; 399 a->src += a->frame_delta[0]; 400 a->dest += a->frame_delta[1]; 401 a->frame ++; 402 403 /* If the channel is async, update cpc */ 404 if (!ch->sync) 405 ch->cpc = a->dest & 0xffff; 406 } 407 } while ((bytes -= ch->data_type)); 408 #else 409 /* If the channel is element synchronized, deactivate it */ 410 if (ch->sync && !ch->fs && !ch->bs) 411 omap_dma_deactivate_channel(s, ch); 412 413 /* If it is the last frame, set the LAST_FRAME interrupt */ 414 if (a->element == 1 && a->frame == a->frames - 1) 415 if (ch->interrupts & LAST_FRAME_INTR) 416 ch->status |= LAST_FRAME_INTR; 417 418 /* If the half of the frame was reached, set the HALF_FRAME 419 interrupt */ 420 if (a->element == (a->elements >> 1)) 421 if (ch->interrupts & HALF_FRAME_INTR) 422 ch->status |= HALF_FRAME_INTR; 423 424 if (ch->fs && ch->bs) { 425 a->pck_element ++; 426 /* Check if a full packet has beed transferred. */ 427 if (a->pck_element == a->pck_elements) { 428 a->pck_element = 0; 429 430 /* Set the END_PKT interrupt */ 431 if ((ch->interrupts & END_PKT_INTR) && !ch->src_sync) 432 ch->status |= END_PKT_INTR; 433 434 /* If the channel is packet-synchronized, deactivate it */ 435 if (ch->sync) 436 omap_dma_deactivate_channel(s, ch); 437 } 438 } 439 440 if (a->element == a->elements) { 441 /* End of Frame */ 442 a->element = 0; 443 a->src += a->frame_delta[0]; 444 a->dest += a->frame_delta[1]; 445 a->frame ++; 446 447 /* If the channel is frame synchronized, deactivate it */ 448 if (ch->sync && ch->fs && !ch->bs) 449 omap_dma_deactivate_channel(s, ch); 450 451 /* If the channel is async, update cpc */ 452 if (!ch->sync) 453 ch->cpc = a->dest & 0xffff; 454 455 /* Set the END_FRAME interrupt */ 456 if (ch->interrupts & END_FRAME_INTR) 457 ch->status |= END_FRAME_INTR; 458 459 if (a->frame == a->frames) { 460 /* End of Block */ 461 /* Disable the channel */ 462 463 if (ch->omap_3_1_compatible_disable) { 464 omap_dma_disable_channel(s, ch); 465 if (ch->link_enabled) 466 omap_dma_enable_channel(s, 467 &s->ch[ch->link_next_ch]); 468 } else { 469 if (!ch->auto_init) 470 omap_dma_disable_channel(s, ch); 471 else if (ch->repeat || ch->end_prog) 472 omap_dma_channel_load(ch); 473 else { 474 ch->waiting_end_prog = 1; 475 omap_dma_deactivate_channel(s, ch); 476 } 477 } 478 479 if (ch->interrupts & END_BLOCK_INTR) 480 ch->status |= END_BLOCK_INTR; 481 } 482 } 483 } while (status == ch->status && ch->active); 484 485 omap_dma_interrupts_update(s); 486 #endif 487 } 488 489 enum { 490 omap_dma_intr_element_sync, 491 omap_dma_intr_last_frame, 492 omap_dma_intr_half_frame, 493 omap_dma_intr_frame, 494 omap_dma_intr_frame_sync, 495 omap_dma_intr_packet, 496 omap_dma_intr_packet_sync, 497 omap_dma_intr_block, 498 __omap_dma_intr_last, 499 }; 500 501 static void omap_dma_transfer_setup(struct soc_dma_ch_s *dma) 502 { 503 struct omap_dma_port_if_s *src_p, *dest_p; 504 struct omap_dma_reg_set_s *a; 505 struct omap_dma_channel_s *ch = dma->opaque; 506 struct omap_dma_s *s = dma->dma->opaque; 507 int frames, min_elems, elements[__omap_dma_intr_last]; 508 509 a = &ch->active_set; 510 511 src_p = &s->mpu->port[ch->port[0]]; 512 dest_p = &s->mpu->port[ch->port[1]]; 513 if ((!ch->constant_fill && !src_p->addr_valid(s->mpu, a->src)) || 514 (!dest_p->addr_valid(s->mpu, a->dest))) { 515 #if 0 516 /* Bus time-out */ 517 if (ch->interrupts & TIMEOUT_INTR) 518 ch->status |= TIMEOUT_INTR; 519 omap_dma_deactivate_channel(s, ch); 520 continue; 521 #endif 522 printf("%s: Bus time-out in DMA%i operation\n", 523 __func__, dma->num); 524 } 525 526 min_elems = INT_MAX; 527 528 /* Check all the conditions that terminate the transfer starting 529 * with those that can occur the soonest. */ 530 #define INTR_CHECK(cond, id, nelements) \ 531 if (cond) { \ 532 elements[id] = nelements; \ 533 if (elements[id] < min_elems) \ 534 min_elems = elements[id]; \ 535 } else \ 536 elements[id] = INT_MAX; 537 538 /* Elements */ 539 INTR_CHECK( 540 ch->sync && !ch->fs && !ch->bs, 541 omap_dma_intr_element_sync, 542 1) 543 544 /* Frames */ 545 /* TODO: for transfers where entire frames can be read and written 546 * using memcpy() but a->frame_delta is non-zero, try to still do 547 * transfers using soc_dma but limit min_elems to a->elements - ... 548 * See also the TODO in omap_dma_channel_load. */ 549 INTR_CHECK( 550 (ch->interrupts & LAST_FRAME_INTR) && 551 ((a->frame < a->frames - 1) || !a->element), 552 omap_dma_intr_last_frame, 553 (a->frames - a->frame - 2) * a->elements + 554 (a->elements - a->element + 1)) 555 INTR_CHECK( 556 ch->interrupts & HALF_FRAME_INTR, 557 omap_dma_intr_half_frame, 558 (a->elements >> 1) + 559 (a->element >= (a->elements >> 1) ? a->elements : 0) - 560 a->element) 561 INTR_CHECK( 562 ch->sync && ch->fs && (ch->interrupts & END_FRAME_INTR), 563 omap_dma_intr_frame, 564 a->elements - a->element) 565 INTR_CHECK( 566 ch->sync && ch->fs && !ch->bs, 567 omap_dma_intr_frame_sync, 568 a->elements - a->element) 569 570 /* Packets */ 571 INTR_CHECK( 572 ch->fs && ch->bs && 573 (ch->interrupts & END_PKT_INTR) && !ch->src_sync, 574 omap_dma_intr_packet, 575 a->pck_elements - a->pck_element) 576 INTR_CHECK( 577 ch->fs && ch->bs && ch->sync, 578 omap_dma_intr_packet_sync, 579 a->pck_elements - a->pck_element) 580 581 /* Blocks */ 582 INTR_CHECK( 583 1, 584 omap_dma_intr_block, 585 (a->frames - a->frame - 1) * a->elements + 586 (a->elements - a->element)) 587 588 dma->bytes = min_elems * ch->data_type; 589 590 /* Set appropriate interrupts and/or deactivate channels */ 591 592 #ifdef MULTI_REQ 593 /* TODO: should all of this only be done if dma->update, and otherwise 594 * inside omap_dma_transfer_generic below - check what's faster. */ 595 if (dma->update) { 596 #endif 597 598 /* If the channel is element synchronized, deactivate it */ 599 if (min_elems == elements[omap_dma_intr_element_sync]) 600 omap_dma_deactivate_channel(s, ch); 601 602 /* If it is the last frame, set the LAST_FRAME interrupt */ 603 if (min_elems == elements[omap_dma_intr_last_frame]) 604 ch->status |= LAST_FRAME_INTR; 605 606 /* If exactly half of the frame was reached, set the HALF_FRAME 607 interrupt */ 608 if (min_elems == elements[omap_dma_intr_half_frame]) 609 ch->status |= HALF_FRAME_INTR; 610 611 /* If a full packet has been transferred, set the END_PKT interrupt */ 612 if (min_elems == elements[omap_dma_intr_packet]) 613 ch->status |= END_PKT_INTR; 614 615 /* If the channel is packet-synchronized, deactivate it */ 616 if (min_elems == elements[omap_dma_intr_packet_sync]) 617 omap_dma_deactivate_channel(s, ch); 618 619 /* If the channel is frame synchronized, deactivate it */ 620 if (min_elems == elements[omap_dma_intr_frame_sync]) 621 omap_dma_deactivate_channel(s, ch); 622 623 /* Set the END_FRAME interrupt */ 624 if (min_elems == elements[omap_dma_intr_frame]) 625 ch->status |= END_FRAME_INTR; 626 627 if (min_elems == elements[omap_dma_intr_block]) { 628 /* End of Block */ 629 /* Disable the channel */ 630 631 if (ch->omap_3_1_compatible_disable) { 632 omap_dma_disable_channel(s, ch); 633 if (ch->link_enabled) 634 omap_dma_enable_channel(s, &s->ch[ch->link_next_ch]); 635 } else { 636 if (!ch->auto_init) 637 omap_dma_disable_channel(s, ch); 638 else if (ch->repeat || ch->end_prog) 639 omap_dma_channel_load(ch); 640 else { 641 ch->waiting_end_prog = 1; 642 omap_dma_deactivate_channel(s, ch); 643 } 644 } 645 646 if (ch->interrupts & END_BLOCK_INTR) 647 ch->status |= END_BLOCK_INTR; 648 } 649 650 /* Update packet number */ 651 if (ch->fs && ch->bs) { 652 a->pck_element += min_elems; 653 a->pck_element %= a->pck_elements; 654 } 655 656 /* TODO: check if we really need to update anything here or perhaps we 657 * can skip part of this. */ 658 #ifndef MULTI_REQ 659 if (dma->update) { 660 #endif 661 a->element += min_elems; 662 663 frames = a->element / a->elements; 664 a->element = a->element % a->elements; 665 a->frame += frames; 666 a->src += min_elems * a->elem_delta[0] + frames * a->frame_delta[0]; 667 a->dest += min_elems * a->elem_delta[1] + frames * a->frame_delta[1]; 668 669 /* If the channel is async, update cpc */ 670 if (!ch->sync && frames) 671 ch->cpc = a->dest & 0xffff; 672 673 /* TODO: if the destination port is IMIF or EMIFF, set the dirty 674 * bits on it. */ 675 #ifndef MULTI_REQ 676 } 677 #else 678 } 679 #endif 680 681 omap_dma_interrupts_update(s); 682 } 683 684 void omap_dma_reset(struct soc_dma_s *dma) 685 { 686 int i; 687 struct omap_dma_s *s = dma->opaque; 688 689 soc_dma_reset(s->dma); 690 if (s->model < omap_dma_4) 691 s->gcr = 0x0004; 692 else 693 s->gcr = 0x00010010; 694 s->ocp = 0x00000000; 695 memset(&s->irqstat, 0, sizeof(s->irqstat)); 696 memset(&s->irqen, 0, sizeof(s->irqen)); 697 s->lcd_ch.src = emiff; 698 s->lcd_ch.condition = 0; 699 s->lcd_ch.interrupts = 0; 700 s->lcd_ch.dual = 0; 701 if (s->model < omap_dma_4) 702 omap_dma_enable_3_1_mapping(s); 703 for (i = 0; i < s->chans; i ++) { 704 s->ch[i].suspend = 0; 705 s->ch[i].prefetch = 0; 706 s->ch[i].buf_disable = 0; 707 s->ch[i].src_sync = 0; 708 memset(&s->ch[i].burst, 0, sizeof(s->ch[i].burst)); 709 memset(&s->ch[i].port, 0, sizeof(s->ch[i].port)); 710 memset(&s->ch[i].mode, 0, sizeof(s->ch[i].mode)); 711 memset(&s->ch[i].frame_index, 0, sizeof(s->ch[i].frame_index)); 712 memset(&s->ch[i].element_index, 0, sizeof(s->ch[i].element_index)); 713 memset(&s->ch[i].endian, 0, sizeof(s->ch[i].endian)); 714 memset(&s->ch[i].endian_lock, 0, sizeof(s->ch[i].endian_lock)); 715 memset(&s->ch[i].translate, 0, sizeof(s->ch[i].translate)); 716 s->ch[i].write_mode = 0; 717 s->ch[i].data_type = 0; 718 s->ch[i].transparent_copy = 0; 719 s->ch[i].constant_fill = 0; 720 s->ch[i].color = 0x00000000; 721 s->ch[i].end_prog = 0; 722 s->ch[i].repeat = 0; 723 s->ch[i].auto_init = 0; 724 s->ch[i].link_enabled = 0; 725 if (s->model < omap_dma_4) 726 s->ch[i].interrupts = 0x0003; 727 else 728 s->ch[i].interrupts = 0x0000; 729 s->ch[i].status = 0; 730 s->ch[i].cstatus = 0; 731 s->ch[i].active = 0; 732 s->ch[i].enable = 0; 733 s->ch[i].sync = 0; 734 s->ch[i].pending_request = 0; 735 s->ch[i].waiting_end_prog = 0; 736 s->ch[i].cpc = 0x0000; 737 s->ch[i].fs = 0; 738 s->ch[i].bs = 0; 739 s->ch[i].omap_3_1_compatible_disable = 0; 740 memset(&s->ch[i].active_set, 0, sizeof(s->ch[i].active_set)); 741 s->ch[i].priority = 0; 742 s->ch[i].interleave_disabled = 0; 743 s->ch[i].type = 0; 744 } 745 } 746 747 static int omap_dma_ch_reg_read(struct omap_dma_s *s, 748 struct omap_dma_channel_s *ch, int reg, uint16_t *value) 749 { 750 switch (reg) { 751 case 0x00: /* SYS_DMA_CSDP_CH0 */ 752 *value = (ch->burst[1] << 14) | 753 (ch->pack[1] << 13) | 754 (ch->port[1] << 9) | 755 (ch->burst[0] << 7) | 756 (ch->pack[0] << 6) | 757 (ch->port[0] << 2) | 758 (ch->data_type >> 1); 759 break; 760 761 case 0x02: /* SYS_DMA_CCR_CH0 */ 762 if (s->model <= omap_dma_3_1) 763 *value = 0 << 10; /* FIFO_FLUSH reads as 0 */ 764 else 765 *value = ch->omap_3_1_compatible_disable << 10; 766 *value |= (ch->mode[1] << 14) | 767 (ch->mode[0] << 12) | 768 (ch->end_prog << 11) | 769 (ch->repeat << 9) | 770 (ch->auto_init << 8) | 771 (ch->enable << 7) | 772 (ch->priority << 6) | 773 (ch->fs << 5) | ch->sync; 774 break; 775 776 case 0x04: /* SYS_DMA_CICR_CH0 */ 777 *value = ch->interrupts; 778 break; 779 780 case 0x06: /* SYS_DMA_CSR_CH0 */ 781 *value = ch->status; 782 ch->status &= SYNC; 783 if (!ch->omap_3_1_compatible_disable && ch->sibling) { 784 *value |= (ch->sibling->status & 0x3f) << 6; 785 ch->sibling->status &= SYNC; 786 } 787 qemu_irq_lower(ch->irq); 788 break; 789 790 case 0x08: /* SYS_DMA_CSSA_L_CH0 */ 791 *value = ch->addr[0] & 0x0000ffff; 792 break; 793 794 case 0x0a: /* SYS_DMA_CSSA_U_CH0 */ 795 *value = ch->addr[0] >> 16; 796 break; 797 798 case 0x0c: /* SYS_DMA_CDSA_L_CH0 */ 799 *value = ch->addr[1] & 0x0000ffff; 800 break; 801 802 case 0x0e: /* SYS_DMA_CDSA_U_CH0 */ 803 *value = ch->addr[1] >> 16; 804 break; 805 806 case 0x10: /* SYS_DMA_CEN_CH0 */ 807 *value = ch->elements; 808 break; 809 810 case 0x12: /* SYS_DMA_CFN_CH0 */ 811 *value = ch->frames; 812 break; 813 814 case 0x14: /* SYS_DMA_CFI_CH0 */ 815 *value = ch->frame_index[0]; 816 break; 817 818 case 0x16: /* SYS_DMA_CEI_CH0 */ 819 *value = ch->element_index[0]; 820 break; 821 822 case 0x18: /* SYS_DMA_CPC_CH0 or DMA_CSAC */ 823 if (ch->omap_3_1_compatible_disable) 824 *value = ch->active_set.src & 0xffff; /* CSAC */ 825 else 826 *value = ch->cpc; 827 break; 828 829 case 0x1a: /* DMA_CDAC */ 830 *value = ch->active_set.dest & 0xffff; /* CDAC */ 831 break; 832 833 case 0x1c: /* DMA_CDEI */ 834 *value = ch->element_index[1]; 835 break; 836 837 case 0x1e: /* DMA_CDFI */ 838 *value = ch->frame_index[1]; 839 break; 840 841 case 0x20: /* DMA_COLOR_L */ 842 *value = ch->color & 0xffff; 843 break; 844 845 case 0x22: /* DMA_COLOR_U */ 846 *value = ch->color >> 16; 847 break; 848 849 case 0x24: /* DMA_CCR2 */ 850 *value = (ch->bs << 2) | 851 (ch->transparent_copy << 1) | 852 ch->constant_fill; 853 break; 854 855 case 0x28: /* DMA_CLNK_CTRL */ 856 *value = (ch->link_enabled << 15) | 857 (ch->link_next_ch & 0xf); 858 break; 859 860 case 0x2a: /* DMA_LCH_CTRL */ 861 *value = (ch->interleave_disabled << 15) | 862 ch->type; 863 break; 864 865 default: 866 return 1; 867 } 868 return 0; 869 } 870 871 static int omap_dma_ch_reg_write(struct omap_dma_s *s, 872 struct omap_dma_channel_s *ch, int reg, uint16_t value) 873 { 874 switch (reg) { 875 case 0x00: /* SYS_DMA_CSDP_CH0 */ 876 ch->burst[1] = (value & 0xc000) >> 14; 877 ch->pack[1] = (value & 0x2000) >> 13; 878 ch->port[1] = (enum omap_dma_port) ((value & 0x1e00) >> 9); 879 ch->burst[0] = (value & 0x0180) >> 7; 880 ch->pack[0] = (value & 0x0040) >> 6; 881 ch->port[0] = (enum omap_dma_port) ((value & 0x003c) >> 2); 882 if (ch->port[0] >= __omap_dma_port_last) { 883 qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid DMA port %i\n", 884 __func__, ch->port[0]); 885 } 886 if (ch->port[1] >= __omap_dma_port_last) { 887 qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid DMA port %i\n", 888 __func__, ch->port[1]); 889 } 890 ch->data_type = 1 << (value & 3); 891 if ((value & 3) == 3) { 892 qemu_log_mask(LOG_GUEST_ERROR, 893 "%s: bad data_type for DMA channel\n", __func__); 894 ch->data_type >>= 1; 895 } 896 break; 897 898 case 0x02: /* SYS_DMA_CCR_CH0 */ 899 ch->mode[1] = (omap_dma_addressing_t) ((value & 0xc000) >> 14); 900 ch->mode[0] = (omap_dma_addressing_t) ((value & 0x3000) >> 12); 901 ch->end_prog = (value & 0x0800) >> 11; 902 if (s->model >= omap_dma_3_2) 903 ch->omap_3_1_compatible_disable = (value >> 10) & 0x1; 904 ch->repeat = (value & 0x0200) >> 9; 905 ch->auto_init = (value & 0x0100) >> 8; 906 ch->priority = (value & 0x0040) >> 6; 907 ch->fs = (value & 0x0020) >> 5; 908 ch->sync = value & 0x001f; 909 910 if (value & 0x0080) 911 omap_dma_enable_channel(s, ch); 912 else 913 omap_dma_disable_channel(s, ch); 914 915 if (ch->end_prog) 916 omap_dma_channel_end_prog(s, ch); 917 918 break; 919 920 case 0x04: /* SYS_DMA_CICR_CH0 */ 921 ch->interrupts = value & 0x3f; 922 break; 923 924 case 0x06: /* SYS_DMA_CSR_CH0 */ 925 OMAP_RO_REG((hwaddr) reg); 926 break; 927 928 case 0x08: /* SYS_DMA_CSSA_L_CH0 */ 929 ch->addr[0] &= 0xffff0000; 930 ch->addr[0] |= value; 931 break; 932 933 case 0x0a: /* SYS_DMA_CSSA_U_CH0 */ 934 ch->addr[0] &= 0x0000ffff; 935 ch->addr[0] |= (uint32_t) value << 16; 936 break; 937 938 case 0x0c: /* SYS_DMA_CDSA_L_CH0 */ 939 ch->addr[1] &= 0xffff0000; 940 ch->addr[1] |= value; 941 break; 942 943 case 0x0e: /* SYS_DMA_CDSA_U_CH0 */ 944 ch->addr[1] &= 0x0000ffff; 945 ch->addr[1] |= (uint32_t) value << 16; 946 break; 947 948 case 0x10: /* SYS_DMA_CEN_CH0 */ 949 ch->elements = value; 950 break; 951 952 case 0x12: /* SYS_DMA_CFN_CH0 */ 953 ch->frames = value; 954 break; 955 956 case 0x14: /* SYS_DMA_CFI_CH0 */ 957 ch->frame_index[0] = (int16_t) value; 958 break; 959 960 case 0x16: /* SYS_DMA_CEI_CH0 */ 961 ch->element_index[0] = (int16_t) value; 962 break; 963 964 case 0x18: /* SYS_DMA_CPC_CH0 or DMA_CSAC */ 965 OMAP_RO_REG((hwaddr) reg); 966 break; 967 968 case 0x1c: /* DMA_CDEI */ 969 ch->element_index[1] = (int16_t) value; 970 break; 971 972 case 0x1e: /* DMA_CDFI */ 973 ch->frame_index[1] = (int16_t) value; 974 break; 975 976 case 0x20: /* DMA_COLOR_L */ 977 ch->color &= 0xffff0000; 978 ch->color |= value; 979 break; 980 981 case 0x22: /* DMA_COLOR_U */ 982 ch->color &= 0xffff; 983 ch->color |= (uint32_t)value << 16; 984 break; 985 986 case 0x24: /* DMA_CCR2 */ 987 ch->bs = (value >> 2) & 0x1; 988 ch->transparent_copy = (value >> 1) & 0x1; 989 ch->constant_fill = value & 0x1; 990 break; 991 992 case 0x28: /* DMA_CLNK_CTRL */ 993 ch->link_enabled = (value >> 15) & 0x1; 994 if (value & (1 << 14)) { /* Stop_Lnk */ 995 ch->link_enabled = 0; 996 omap_dma_disable_channel(s, ch); 997 } 998 ch->link_next_ch = value & 0x1f; 999 break; 1000 1001 case 0x2a: /* DMA_LCH_CTRL */ 1002 ch->interleave_disabled = (value >> 15) & 0x1; 1003 ch->type = value & 0xf; 1004 break; 1005 1006 default: 1007 return 1; 1008 } 1009 return 0; 1010 } 1011 1012 static int omap_dma_3_2_lcd_write(struct omap_dma_lcd_channel_s *s, int offset, 1013 uint16_t value) 1014 { 1015 switch (offset) { 1016 case 0xbc0: /* DMA_LCD_CSDP */ 1017 s->brust_f2 = (value >> 14) & 0x3; 1018 s->pack_f2 = (value >> 13) & 0x1; 1019 s->data_type_f2 = (1 << ((value >> 11) & 0x3)); 1020 s->brust_f1 = (value >> 7) & 0x3; 1021 s->pack_f1 = (value >> 6) & 0x1; 1022 s->data_type_f1 = (1 << ((value >> 0) & 0x3)); 1023 break; 1024 1025 case 0xbc2: /* DMA_LCD_CCR */ 1026 s->mode_f2 = (value >> 14) & 0x3; 1027 s->mode_f1 = (value >> 12) & 0x3; 1028 s->end_prog = (value >> 11) & 0x1; 1029 s->omap_3_1_compatible_disable = (value >> 10) & 0x1; 1030 s->repeat = (value >> 9) & 0x1; 1031 s->auto_init = (value >> 8) & 0x1; 1032 s->running = (value >> 7) & 0x1; 1033 s->priority = (value >> 6) & 0x1; 1034 s->bs = (value >> 4) & 0x1; 1035 break; 1036 1037 case 0xbc4: /* DMA_LCD_CTRL */ 1038 s->dst = (value >> 8) & 0x1; 1039 s->src = ((value >> 6) & 0x3) << 1; 1040 s->condition = 0; 1041 /* Assume no bus errors and thus no BUS_ERROR irq bits. */ 1042 s->interrupts = (value >> 1) & 1; 1043 s->dual = value & 1; 1044 break; 1045 1046 case 0xbc8: /* TOP_B1_L */ 1047 s->src_f1_top &= 0xffff0000; 1048 s->src_f1_top |= 0x0000ffff & value; 1049 break; 1050 1051 case 0xbca: /* TOP_B1_U */ 1052 s->src_f1_top &= 0x0000ffff; 1053 s->src_f1_top |= (uint32_t)value << 16; 1054 break; 1055 1056 case 0xbcc: /* BOT_B1_L */ 1057 s->src_f1_bottom &= 0xffff0000; 1058 s->src_f1_bottom |= 0x0000ffff & value; 1059 break; 1060 1061 case 0xbce: /* BOT_B1_U */ 1062 s->src_f1_bottom &= 0x0000ffff; 1063 s->src_f1_bottom |= (uint32_t) value << 16; 1064 break; 1065 1066 case 0xbd0: /* TOP_B2_L */ 1067 s->src_f2_top &= 0xffff0000; 1068 s->src_f2_top |= 0x0000ffff & value; 1069 break; 1070 1071 case 0xbd2: /* TOP_B2_U */ 1072 s->src_f2_top &= 0x0000ffff; 1073 s->src_f2_top |= (uint32_t) value << 16; 1074 break; 1075 1076 case 0xbd4: /* BOT_B2_L */ 1077 s->src_f2_bottom &= 0xffff0000; 1078 s->src_f2_bottom |= 0x0000ffff & value; 1079 break; 1080 1081 case 0xbd6: /* BOT_B2_U */ 1082 s->src_f2_bottom &= 0x0000ffff; 1083 s->src_f2_bottom |= (uint32_t) value << 16; 1084 break; 1085 1086 case 0xbd8: /* DMA_LCD_SRC_EI_B1 */ 1087 s->element_index_f1 = value; 1088 break; 1089 1090 case 0xbda: /* DMA_LCD_SRC_FI_B1_L */ 1091 s->frame_index_f1 &= 0xffff0000; 1092 s->frame_index_f1 |= 0x0000ffff & value; 1093 break; 1094 1095 case 0xbf4: /* DMA_LCD_SRC_FI_B1_U */ 1096 s->frame_index_f1 &= 0x0000ffff; 1097 s->frame_index_f1 |= (uint32_t) value << 16; 1098 break; 1099 1100 case 0xbdc: /* DMA_LCD_SRC_EI_B2 */ 1101 s->element_index_f2 = value; 1102 break; 1103 1104 case 0xbde: /* DMA_LCD_SRC_FI_B2_L */ 1105 s->frame_index_f2 &= 0xffff0000; 1106 s->frame_index_f2 |= 0x0000ffff & value; 1107 break; 1108 1109 case 0xbf6: /* DMA_LCD_SRC_FI_B2_U */ 1110 s->frame_index_f2 &= 0x0000ffff; 1111 s->frame_index_f2 |= (uint32_t) value << 16; 1112 break; 1113 1114 case 0xbe0: /* DMA_LCD_SRC_EN_B1 */ 1115 s->elements_f1 = value; 1116 break; 1117 1118 case 0xbe4: /* DMA_LCD_SRC_FN_B1 */ 1119 s->frames_f1 = value; 1120 break; 1121 1122 case 0xbe2: /* DMA_LCD_SRC_EN_B2 */ 1123 s->elements_f2 = value; 1124 break; 1125 1126 case 0xbe6: /* DMA_LCD_SRC_FN_B2 */ 1127 s->frames_f2 = value; 1128 break; 1129 1130 case 0xbea: /* DMA_LCD_LCH_CTRL */ 1131 s->lch_type = value & 0xf; 1132 break; 1133 1134 default: 1135 return 1; 1136 } 1137 return 0; 1138 } 1139 1140 static int omap_dma_3_2_lcd_read(struct omap_dma_lcd_channel_s *s, int offset, 1141 uint16_t *ret) 1142 { 1143 switch (offset) { 1144 case 0xbc0: /* DMA_LCD_CSDP */ 1145 *ret = (s->brust_f2 << 14) | 1146 (s->pack_f2 << 13) | 1147 ((s->data_type_f2 >> 1) << 11) | 1148 (s->brust_f1 << 7) | 1149 (s->pack_f1 << 6) | 1150 ((s->data_type_f1 >> 1) << 0); 1151 break; 1152 1153 case 0xbc2: /* DMA_LCD_CCR */ 1154 *ret = (s->mode_f2 << 14) | 1155 (s->mode_f1 << 12) | 1156 (s->end_prog << 11) | 1157 (s->omap_3_1_compatible_disable << 10) | 1158 (s->repeat << 9) | 1159 (s->auto_init << 8) | 1160 (s->running << 7) | 1161 (s->priority << 6) | 1162 (s->bs << 4); 1163 break; 1164 1165 case 0xbc4: /* DMA_LCD_CTRL */ 1166 qemu_irq_lower(s->irq); 1167 *ret = (s->dst << 8) | 1168 ((s->src & 0x6) << 5) | 1169 (s->condition << 3) | 1170 (s->interrupts << 1) | 1171 s->dual; 1172 break; 1173 1174 case 0xbc8: /* TOP_B1_L */ 1175 *ret = s->src_f1_top & 0xffff; 1176 break; 1177 1178 case 0xbca: /* TOP_B1_U */ 1179 *ret = s->src_f1_top >> 16; 1180 break; 1181 1182 case 0xbcc: /* BOT_B1_L */ 1183 *ret = s->src_f1_bottom & 0xffff; 1184 break; 1185 1186 case 0xbce: /* BOT_B1_U */ 1187 *ret = s->src_f1_bottom >> 16; 1188 break; 1189 1190 case 0xbd0: /* TOP_B2_L */ 1191 *ret = s->src_f2_top & 0xffff; 1192 break; 1193 1194 case 0xbd2: /* TOP_B2_U */ 1195 *ret = s->src_f2_top >> 16; 1196 break; 1197 1198 case 0xbd4: /* BOT_B2_L */ 1199 *ret = s->src_f2_bottom & 0xffff; 1200 break; 1201 1202 case 0xbd6: /* BOT_B2_U */ 1203 *ret = s->src_f2_bottom >> 16; 1204 break; 1205 1206 case 0xbd8: /* DMA_LCD_SRC_EI_B1 */ 1207 *ret = s->element_index_f1; 1208 break; 1209 1210 case 0xbda: /* DMA_LCD_SRC_FI_B1_L */ 1211 *ret = s->frame_index_f1 & 0xffff; 1212 break; 1213 1214 case 0xbf4: /* DMA_LCD_SRC_FI_B1_U */ 1215 *ret = s->frame_index_f1 >> 16; 1216 break; 1217 1218 case 0xbdc: /* DMA_LCD_SRC_EI_B2 */ 1219 *ret = s->element_index_f2; 1220 break; 1221 1222 case 0xbde: /* DMA_LCD_SRC_FI_B2_L */ 1223 *ret = s->frame_index_f2 & 0xffff; 1224 break; 1225 1226 case 0xbf6: /* DMA_LCD_SRC_FI_B2_U */ 1227 *ret = s->frame_index_f2 >> 16; 1228 break; 1229 1230 case 0xbe0: /* DMA_LCD_SRC_EN_B1 */ 1231 *ret = s->elements_f1; 1232 break; 1233 1234 case 0xbe4: /* DMA_LCD_SRC_FN_B1 */ 1235 *ret = s->frames_f1; 1236 break; 1237 1238 case 0xbe2: /* DMA_LCD_SRC_EN_B2 */ 1239 *ret = s->elements_f2; 1240 break; 1241 1242 case 0xbe6: /* DMA_LCD_SRC_FN_B2 */ 1243 *ret = s->frames_f2; 1244 break; 1245 1246 case 0xbea: /* DMA_LCD_LCH_CTRL */ 1247 *ret = s->lch_type; 1248 break; 1249 1250 default: 1251 return 1; 1252 } 1253 return 0; 1254 } 1255 1256 static int omap_dma_3_1_lcd_write(struct omap_dma_lcd_channel_s *s, int offset, 1257 uint16_t value) 1258 { 1259 switch (offset) { 1260 case 0x300: /* SYS_DMA_LCD_CTRL */ 1261 s->src = (value & 0x40) ? imif : emiff; 1262 s->condition = 0; 1263 /* Assume no bus errors and thus no BUS_ERROR irq bits. */ 1264 s->interrupts = (value >> 1) & 1; 1265 s->dual = value & 1; 1266 break; 1267 1268 case 0x302: /* SYS_DMA_LCD_TOP_F1_L */ 1269 s->src_f1_top &= 0xffff0000; 1270 s->src_f1_top |= 0x0000ffff & value; 1271 break; 1272 1273 case 0x304: /* SYS_DMA_LCD_TOP_F1_U */ 1274 s->src_f1_top &= 0x0000ffff; 1275 s->src_f1_top |= (uint32_t)value << 16; 1276 break; 1277 1278 case 0x306: /* SYS_DMA_LCD_BOT_F1_L */ 1279 s->src_f1_bottom &= 0xffff0000; 1280 s->src_f1_bottom |= 0x0000ffff & value; 1281 break; 1282 1283 case 0x308: /* SYS_DMA_LCD_BOT_F1_U */ 1284 s->src_f1_bottom &= 0x0000ffff; 1285 s->src_f1_bottom |= (uint32_t)value << 16; 1286 break; 1287 1288 case 0x30a: /* SYS_DMA_LCD_TOP_F2_L */ 1289 s->src_f2_top &= 0xffff0000; 1290 s->src_f2_top |= 0x0000ffff & value; 1291 break; 1292 1293 case 0x30c: /* SYS_DMA_LCD_TOP_F2_U */ 1294 s->src_f2_top &= 0x0000ffff; 1295 s->src_f2_top |= (uint32_t)value << 16; 1296 break; 1297 1298 case 0x30e: /* SYS_DMA_LCD_BOT_F2_L */ 1299 s->src_f2_bottom &= 0xffff0000; 1300 s->src_f2_bottom |= 0x0000ffff & value; 1301 break; 1302 1303 case 0x310: /* SYS_DMA_LCD_BOT_F2_U */ 1304 s->src_f2_bottom &= 0x0000ffff; 1305 s->src_f2_bottom |= (uint32_t)value << 16; 1306 break; 1307 1308 default: 1309 return 1; 1310 } 1311 return 0; 1312 } 1313 1314 static int omap_dma_3_1_lcd_read(struct omap_dma_lcd_channel_s *s, int offset, 1315 uint16_t *ret) 1316 { 1317 int i; 1318 1319 switch (offset) { 1320 case 0x300: /* SYS_DMA_LCD_CTRL */ 1321 i = s->condition; 1322 s->condition = 0; 1323 qemu_irq_lower(s->irq); 1324 *ret = ((s->src == imif) << 6) | (i << 3) | 1325 (s->interrupts << 1) | s->dual; 1326 break; 1327 1328 case 0x302: /* SYS_DMA_LCD_TOP_F1_L */ 1329 *ret = s->src_f1_top & 0xffff; 1330 break; 1331 1332 case 0x304: /* SYS_DMA_LCD_TOP_F1_U */ 1333 *ret = s->src_f1_top >> 16; 1334 break; 1335 1336 case 0x306: /* SYS_DMA_LCD_BOT_F1_L */ 1337 *ret = s->src_f1_bottom & 0xffff; 1338 break; 1339 1340 case 0x308: /* SYS_DMA_LCD_BOT_F1_U */ 1341 *ret = s->src_f1_bottom >> 16; 1342 break; 1343 1344 case 0x30a: /* SYS_DMA_LCD_TOP_F2_L */ 1345 *ret = s->src_f2_top & 0xffff; 1346 break; 1347 1348 case 0x30c: /* SYS_DMA_LCD_TOP_F2_U */ 1349 *ret = s->src_f2_top >> 16; 1350 break; 1351 1352 case 0x30e: /* SYS_DMA_LCD_BOT_F2_L */ 1353 *ret = s->src_f2_bottom & 0xffff; 1354 break; 1355 1356 case 0x310: /* SYS_DMA_LCD_BOT_F2_U */ 1357 *ret = s->src_f2_bottom >> 16; 1358 break; 1359 1360 default: 1361 return 1; 1362 } 1363 return 0; 1364 } 1365 1366 static int omap_dma_sys_write(struct omap_dma_s *s, int offset, uint16_t value) 1367 { 1368 switch (offset) { 1369 case 0x400: /* SYS_DMA_GCR */ 1370 s->gcr = value; 1371 break; 1372 1373 case 0x404: /* DMA_GSCR */ 1374 if (value & 0x8) 1375 omap_dma_disable_3_1_mapping(s); 1376 else 1377 omap_dma_enable_3_1_mapping(s); 1378 break; 1379 1380 case 0x408: /* DMA_GRST */ 1381 if (value & 0x1) 1382 omap_dma_reset(s->dma); 1383 break; 1384 1385 default: 1386 return 1; 1387 } 1388 return 0; 1389 } 1390 1391 static int omap_dma_sys_read(struct omap_dma_s *s, int offset, 1392 uint16_t *ret) 1393 { 1394 switch (offset) { 1395 case 0x400: /* SYS_DMA_GCR */ 1396 *ret = s->gcr; 1397 break; 1398 1399 case 0x404: /* DMA_GSCR */ 1400 *ret = s->omap_3_1_mapping_disabled << 3; 1401 break; 1402 1403 case 0x408: /* DMA_GRST */ 1404 *ret = 0; 1405 break; 1406 1407 case 0x442: /* DMA_HW_ID */ 1408 case 0x444: /* DMA_PCh2_ID */ 1409 case 0x446: /* DMA_PCh0_ID */ 1410 case 0x448: /* DMA_PCh1_ID */ 1411 case 0x44a: /* DMA_PChG_ID */ 1412 case 0x44c: /* DMA_PChD_ID */ 1413 *ret = 1; 1414 break; 1415 1416 case 0x44e: /* DMA_CAPS_0_U */ 1417 *ret = (s->caps[0] >> 16) & 0xffff; 1418 break; 1419 case 0x450: /* DMA_CAPS_0_L */ 1420 *ret = (s->caps[0] >> 0) & 0xffff; 1421 break; 1422 1423 case 0x452: /* DMA_CAPS_1_U */ 1424 *ret = (s->caps[1] >> 16) & 0xffff; 1425 break; 1426 case 0x454: /* DMA_CAPS_1_L */ 1427 *ret = (s->caps[1] >> 0) & 0xffff; 1428 break; 1429 1430 case 0x456: /* DMA_CAPS_2 */ 1431 *ret = s->caps[2]; 1432 break; 1433 1434 case 0x458: /* DMA_CAPS_3 */ 1435 *ret = s->caps[3]; 1436 break; 1437 1438 case 0x45a: /* DMA_CAPS_4 */ 1439 *ret = s->caps[4]; 1440 break; 1441 1442 case 0x460: /* DMA_PCh2_SR */ 1443 case 0x480: /* DMA_PCh0_SR */ 1444 case 0x482: /* DMA_PCh1_SR */ 1445 case 0x4c0: /* DMA_PChD_SR_0 */ 1446 qemu_log_mask(LOG_UNIMP, 1447 "%s: Physical Channel Status Registers not implemented\n", 1448 __func__); 1449 *ret = 0xff; 1450 break; 1451 1452 default: 1453 return 1; 1454 } 1455 return 0; 1456 } 1457 1458 static uint64_t omap_dma_read(void *opaque, hwaddr addr, 1459 unsigned size) 1460 { 1461 struct omap_dma_s *s = (struct omap_dma_s *) opaque; 1462 int reg, ch; 1463 uint16_t ret; 1464 1465 if (size != 2) { 1466 return omap_badwidth_read16(opaque, addr); 1467 } 1468 1469 switch (addr) { 1470 case 0x300 ... 0x3fe: 1471 if (s->model <= omap_dma_3_1 || !s->omap_3_1_mapping_disabled) { 1472 if (omap_dma_3_1_lcd_read(&s->lcd_ch, addr, &ret)) 1473 break; 1474 return ret; 1475 } 1476 /* Fall through. */ 1477 case 0x000 ... 0x2fe: 1478 reg = addr & 0x3f; 1479 ch = (addr >> 6) & 0x0f; 1480 if (omap_dma_ch_reg_read(s, &s->ch[ch], reg, &ret)) 1481 break; 1482 return ret; 1483 1484 case 0x404 ... 0x4fe: 1485 if (s->model <= omap_dma_3_1) 1486 break; 1487 /* Fall through. */ 1488 case 0x400: 1489 if (omap_dma_sys_read(s, addr, &ret)) 1490 break; 1491 return ret; 1492 1493 case 0xb00 ... 0xbfe: 1494 if (s->model == omap_dma_3_2 && s->omap_3_1_mapping_disabled) { 1495 if (omap_dma_3_2_lcd_read(&s->lcd_ch, addr, &ret)) 1496 break; 1497 return ret; 1498 } 1499 break; 1500 } 1501 1502 OMAP_BAD_REG(addr); 1503 return 0; 1504 } 1505 1506 static void omap_dma_write(void *opaque, hwaddr addr, 1507 uint64_t value, unsigned size) 1508 { 1509 struct omap_dma_s *s = (struct omap_dma_s *) opaque; 1510 int reg, ch; 1511 1512 if (size != 2) { 1513 omap_badwidth_write16(opaque, addr, value); 1514 return; 1515 } 1516 1517 switch (addr) { 1518 case 0x300 ... 0x3fe: 1519 if (s->model <= omap_dma_3_1 || !s->omap_3_1_mapping_disabled) { 1520 if (omap_dma_3_1_lcd_write(&s->lcd_ch, addr, value)) 1521 break; 1522 return; 1523 } 1524 /* Fall through. */ 1525 case 0x000 ... 0x2fe: 1526 reg = addr & 0x3f; 1527 ch = (addr >> 6) & 0x0f; 1528 if (omap_dma_ch_reg_write(s, &s->ch[ch], reg, value)) 1529 break; 1530 return; 1531 1532 case 0x404 ... 0x4fe: 1533 if (s->model <= omap_dma_3_1) 1534 break; 1535 case 0x400: 1536 /* Fall through. */ 1537 if (omap_dma_sys_write(s, addr, value)) 1538 break; 1539 return; 1540 1541 case 0xb00 ... 0xbfe: 1542 if (s->model == omap_dma_3_2 && s->omap_3_1_mapping_disabled) { 1543 if (omap_dma_3_2_lcd_write(&s->lcd_ch, addr, value)) 1544 break; 1545 return; 1546 } 1547 break; 1548 } 1549 1550 OMAP_BAD_REG(addr); 1551 } 1552 1553 static const MemoryRegionOps omap_dma_ops = { 1554 .read = omap_dma_read, 1555 .write = omap_dma_write, 1556 .endianness = DEVICE_NATIVE_ENDIAN, 1557 }; 1558 1559 static void omap_dma_request(void *opaque, int drq, int req) 1560 { 1561 struct omap_dma_s *s = (struct omap_dma_s *) opaque; 1562 /* The request pins are level triggered in QEMU. */ 1563 if (req) { 1564 if (~s->dma->drqbmp & (1ULL << drq)) { 1565 s->dma->drqbmp |= 1ULL << drq; 1566 omap_dma_process_request(s, drq); 1567 } 1568 } else 1569 s->dma->drqbmp &= ~(1ULL << drq); 1570 } 1571 1572 /* XXX: this won't be needed once soc_dma knows about clocks. */ 1573 static void omap_dma_clk_update(void *opaque, int line, int on) 1574 { 1575 struct omap_dma_s *s = (struct omap_dma_s *) opaque; 1576 int i; 1577 1578 s->dma->freq = omap_clk_getrate(s->clk); 1579 1580 for (i = 0; i < s->chans; i ++) 1581 if (s->ch[i].active) 1582 soc_dma_set_request(s->ch[i].dma, on); 1583 } 1584 1585 static void omap_dma_setcaps(struct omap_dma_s *s) 1586 { 1587 switch (s->model) { 1588 default: 1589 case omap_dma_3_1: 1590 break; 1591 case omap_dma_3_2: 1592 case omap_dma_4: 1593 /* XXX Only available for sDMA */ 1594 s->caps[0] = 1595 (1 << 19) | /* Constant Fill Capability */ 1596 (1 << 18); /* Transparent BLT Capability */ 1597 s->caps[1] = 1598 (1 << 1); /* 1-bit palettized capability (DMA 3.2 only) */ 1599 s->caps[2] = 1600 (1 << 8) | /* SEPARATE_SRC_AND_DST_INDEX_CPBLTY */ 1601 (1 << 7) | /* DST_DOUBLE_INDEX_ADRS_CPBLTY */ 1602 (1 << 6) | /* DST_SINGLE_INDEX_ADRS_CPBLTY */ 1603 (1 << 5) | /* DST_POST_INCRMNT_ADRS_CPBLTY */ 1604 (1 << 4) | /* DST_CONST_ADRS_CPBLTY */ 1605 (1 << 3) | /* SRC_DOUBLE_INDEX_ADRS_CPBLTY */ 1606 (1 << 2) | /* SRC_SINGLE_INDEX_ADRS_CPBLTY */ 1607 (1 << 1) | /* SRC_POST_INCRMNT_ADRS_CPBLTY */ 1608 (1 << 0); /* SRC_CONST_ADRS_CPBLTY */ 1609 s->caps[3] = 1610 (1 << 6) | /* BLOCK_SYNCHR_CPBLTY (DMA 4 only) */ 1611 (1 << 7) | /* PKT_SYNCHR_CPBLTY (DMA 4 only) */ 1612 (1 << 5) | /* CHANNEL_CHAINING_CPBLTY */ 1613 (1 << 4) | /* LCh_INTERLEAVE_CPBLTY */ 1614 (1 << 3) | /* AUTOINIT_REPEAT_CPBLTY (DMA 3.2 only) */ 1615 (1 << 2) | /* AUTOINIT_ENDPROG_CPBLTY (DMA 3.2 only) */ 1616 (1 << 1) | /* FRAME_SYNCHR_CPBLTY */ 1617 (1 << 0); /* ELMNT_SYNCHR_CPBLTY */ 1618 s->caps[4] = 1619 (1 << 7) | /* PKT_INTERRUPT_CPBLTY (DMA 4 only) */ 1620 (1 << 6) | /* SYNC_STATUS_CPBLTY */ 1621 (1 << 5) | /* BLOCK_INTERRUPT_CPBLTY */ 1622 (1 << 4) | /* LAST_FRAME_INTERRUPT_CPBLTY */ 1623 (1 << 3) | /* FRAME_INTERRUPT_CPBLTY */ 1624 (1 << 2) | /* HALF_FRAME_INTERRUPT_CPBLTY */ 1625 (1 << 1) | /* EVENT_DROP_INTERRUPT_CPBLTY */ 1626 (1 << 0); /* TIMEOUT_INTERRUPT_CPBLTY (DMA 3.2 only) */ 1627 break; 1628 } 1629 } 1630 1631 struct soc_dma_s *omap_dma_init(hwaddr base, qemu_irq *irqs, 1632 MemoryRegion *sysmem, 1633 qemu_irq lcd_irq, struct omap_mpu_state_s *mpu, omap_clk clk, 1634 enum omap_dma_model model) 1635 { 1636 int num_irqs, memsize, i; 1637 struct omap_dma_s *s = g_new0(struct omap_dma_s, 1); 1638 1639 if (model <= omap_dma_3_1) { 1640 num_irqs = 6; 1641 memsize = 0x800; 1642 } else { 1643 num_irqs = 16; 1644 memsize = 0xc00; 1645 } 1646 s->model = model; 1647 s->mpu = mpu; 1648 s->clk = clk; 1649 s->lcd_ch.irq = lcd_irq; 1650 s->lcd_ch.mpu = mpu; 1651 1652 s->dma = soc_dma_init((model <= omap_dma_3_1) ? 9 : 16); 1653 s->dma->freq = omap_clk_getrate(clk); 1654 s->dma->transfer_fn = omap_dma_transfer_generic; 1655 s->dma->setup_fn = omap_dma_transfer_setup; 1656 s->dma->drq = qemu_allocate_irqs(omap_dma_request, s, 32); 1657 s->dma->opaque = s; 1658 1659 while (num_irqs --) 1660 s->ch[num_irqs].irq = irqs[num_irqs]; 1661 for (i = 0; i < 3; i ++) { 1662 s->ch[i].sibling = &s->ch[i + 6]; 1663 s->ch[i + 6].sibling = &s->ch[i]; 1664 } 1665 for (i = (model <= omap_dma_3_1) ? 8 : 15; i >= 0; i --) { 1666 s->ch[i].dma = &s->dma->ch[i]; 1667 s->dma->ch[i].opaque = &s->ch[i]; 1668 } 1669 1670 omap_dma_setcaps(s); 1671 omap_clk_adduser(s->clk, qemu_allocate_irq(omap_dma_clk_update, s, 0)); 1672 omap_dma_reset(s->dma); 1673 omap_dma_clk_update(s, 0, 1); 1674 1675 memory_region_init_io(&s->iomem, NULL, &omap_dma_ops, s, "omap.dma", memsize); 1676 memory_region_add_subregion(sysmem, base, &s->iomem); 1677 1678 mpu->drq = s->dma->drq; 1679 1680 return s->dma; 1681 } 1682 1683 static void omap_dma_interrupts_4_update(struct omap_dma_s *s) 1684 { 1685 struct omap_dma_channel_s *ch = s->ch; 1686 uint32_t bmp, bit; 1687 1688 for (bmp = 0, bit = 1; bit; ch ++, bit <<= 1) 1689 if (ch->status) { 1690 bmp |= bit; 1691 ch->cstatus |= ch->status; 1692 ch->status = 0; 1693 } 1694 if ((s->irqstat[0] |= s->irqen[0] & bmp)) 1695 qemu_irq_raise(s->irq[0]); 1696 if ((s->irqstat[1] |= s->irqen[1] & bmp)) 1697 qemu_irq_raise(s->irq[1]); 1698 if ((s->irqstat[2] |= s->irqen[2] & bmp)) 1699 qemu_irq_raise(s->irq[2]); 1700 if ((s->irqstat[3] |= s->irqen[3] & bmp)) 1701 qemu_irq_raise(s->irq[3]); 1702 } 1703 1704 static uint64_t omap_dma4_read(void *opaque, hwaddr addr, 1705 unsigned size) 1706 { 1707 struct omap_dma_s *s = (struct omap_dma_s *) opaque; 1708 int irqn = 0, chnum; 1709 struct omap_dma_channel_s *ch; 1710 1711 if (size == 1) { 1712 return omap_badwidth_read16(opaque, addr); 1713 } 1714 1715 switch (addr) { 1716 case 0x00: /* DMA4_REVISION */ 1717 return 0x40; 1718 1719 case 0x14: /* DMA4_IRQSTATUS_L3 */ 1720 irqn ++; 1721 /* fall through */ 1722 case 0x10: /* DMA4_IRQSTATUS_L2 */ 1723 irqn ++; 1724 /* fall through */ 1725 case 0x0c: /* DMA4_IRQSTATUS_L1 */ 1726 irqn ++; 1727 /* fall through */ 1728 case 0x08: /* DMA4_IRQSTATUS_L0 */ 1729 return s->irqstat[irqn]; 1730 1731 case 0x24: /* DMA4_IRQENABLE_L3 */ 1732 irqn ++; 1733 /* fall through */ 1734 case 0x20: /* DMA4_IRQENABLE_L2 */ 1735 irqn ++; 1736 /* fall through */ 1737 case 0x1c: /* DMA4_IRQENABLE_L1 */ 1738 irqn ++; 1739 /* fall through */ 1740 case 0x18: /* DMA4_IRQENABLE_L0 */ 1741 return s->irqen[irqn]; 1742 1743 case 0x28: /* DMA4_SYSSTATUS */ 1744 return 1; /* RESETDONE */ 1745 1746 case 0x2c: /* DMA4_OCP_SYSCONFIG */ 1747 return s->ocp; 1748 1749 case 0x64: /* DMA4_CAPS_0 */ 1750 return s->caps[0]; 1751 case 0x6c: /* DMA4_CAPS_2 */ 1752 return s->caps[2]; 1753 case 0x70: /* DMA4_CAPS_3 */ 1754 return s->caps[3]; 1755 case 0x74: /* DMA4_CAPS_4 */ 1756 return s->caps[4]; 1757 1758 case 0x78: /* DMA4_GCR */ 1759 return s->gcr; 1760 1761 case 0x80 ... 0xfff: 1762 addr -= 0x80; 1763 chnum = addr / 0x60; 1764 ch = s->ch + chnum; 1765 addr -= chnum * 0x60; 1766 break; 1767 1768 default: 1769 OMAP_BAD_REG(addr); 1770 return 0; 1771 } 1772 1773 /* Per-channel registers */ 1774 switch (addr) { 1775 case 0x00: /* DMA4_CCR */ 1776 return (ch->buf_disable << 25) | 1777 (ch->src_sync << 24) | 1778 (ch->prefetch << 23) | 1779 ((ch->sync & 0x60) << 14) | 1780 (ch->bs << 18) | 1781 (ch->transparent_copy << 17) | 1782 (ch->constant_fill << 16) | 1783 (ch->mode[1] << 14) | 1784 (ch->mode[0] << 12) | 1785 (0 << 10) | (0 << 9) | 1786 (ch->suspend << 8) | 1787 (ch->enable << 7) | 1788 (ch->priority << 6) | 1789 (ch->fs << 5) | (ch->sync & 0x1f); 1790 1791 case 0x04: /* DMA4_CLNK_CTRL */ 1792 return (ch->link_enabled << 15) | ch->link_next_ch; 1793 1794 case 0x08: /* DMA4_CICR */ 1795 return ch->interrupts; 1796 1797 case 0x0c: /* DMA4_CSR */ 1798 return ch->cstatus; 1799 1800 case 0x10: /* DMA4_CSDP */ 1801 return (ch->endian[0] << 21) | 1802 (ch->endian_lock[0] << 20) | 1803 (ch->endian[1] << 19) | 1804 (ch->endian_lock[1] << 18) | 1805 (ch->write_mode << 16) | 1806 (ch->burst[1] << 14) | 1807 (ch->pack[1] << 13) | 1808 (ch->translate[1] << 9) | 1809 (ch->burst[0] << 7) | 1810 (ch->pack[0] << 6) | 1811 (ch->translate[0] << 2) | 1812 (ch->data_type >> 1); 1813 1814 case 0x14: /* DMA4_CEN */ 1815 return ch->elements; 1816 1817 case 0x18: /* DMA4_CFN */ 1818 return ch->frames; 1819 1820 case 0x1c: /* DMA4_CSSA */ 1821 return ch->addr[0]; 1822 1823 case 0x20: /* DMA4_CDSA */ 1824 return ch->addr[1]; 1825 1826 case 0x24: /* DMA4_CSEI */ 1827 return ch->element_index[0]; 1828 1829 case 0x28: /* DMA4_CSFI */ 1830 return ch->frame_index[0]; 1831 1832 case 0x2c: /* DMA4_CDEI */ 1833 return ch->element_index[1]; 1834 1835 case 0x30: /* DMA4_CDFI */ 1836 return ch->frame_index[1]; 1837 1838 case 0x34: /* DMA4_CSAC */ 1839 return ch->active_set.src & 0xffff; 1840 1841 case 0x38: /* DMA4_CDAC */ 1842 return ch->active_set.dest & 0xffff; 1843 1844 case 0x3c: /* DMA4_CCEN */ 1845 return ch->active_set.element; 1846 1847 case 0x40: /* DMA4_CCFN */ 1848 return ch->active_set.frame; 1849 1850 case 0x44: /* DMA4_COLOR */ 1851 /* XXX only in sDMA */ 1852 return ch->color; 1853 1854 default: 1855 OMAP_BAD_REG(addr); 1856 return 0; 1857 } 1858 } 1859 1860 static void omap_dma4_write(void *opaque, hwaddr addr, 1861 uint64_t value, unsigned size) 1862 { 1863 struct omap_dma_s *s = (struct omap_dma_s *) opaque; 1864 int chnum, irqn = 0; 1865 struct omap_dma_channel_s *ch; 1866 1867 if (size == 1) { 1868 omap_badwidth_write16(opaque, addr, value); 1869 return; 1870 } 1871 1872 switch (addr) { 1873 case 0x14: /* DMA4_IRQSTATUS_L3 */ 1874 irqn ++; 1875 /* fall through */ 1876 case 0x10: /* DMA4_IRQSTATUS_L2 */ 1877 irqn ++; 1878 /* fall through */ 1879 case 0x0c: /* DMA4_IRQSTATUS_L1 */ 1880 irqn ++; 1881 /* fall through */ 1882 case 0x08: /* DMA4_IRQSTATUS_L0 */ 1883 s->irqstat[irqn] &= ~value; 1884 if (!s->irqstat[irqn]) 1885 qemu_irq_lower(s->irq[irqn]); 1886 return; 1887 1888 case 0x24: /* DMA4_IRQENABLE_L3 */ 1889 irqn ++; 1890 /* fall through */ 1891 case 0x20: /* DMA4_IRQENABLE_L2 */ 1892 irqn ++; 1893 /* fall through */ 1894 case 0x1c: /* DMA4_IRQENABLE_L1 */ 1895 irqn ++; 1896 /* fall through */ 1897 case 0x18: /* DMA4_IRQENABLE_L0 */ 1898 s->irqen[irqn] = value; 1899 return; 1900 1901 case 0x2c: /* DMA4_OCP_SYSCONFIG */ 1902 if (value & 2) /* SOFTRESET */ 1903 omap_dma_reset(s->dma); 1904 s->ocp = value & 0x3321; 1905 if (((s->ocp >> 12) & 3) == 3) { /* MIDLEMODE */ 1906 qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid DMA power mode\n", 1907 __func__); 1908 } 1909 return; 1910 1911 case 0x78: /* DMA4_GCR */ 1912 s->gcr = value & 0x00ff00ff; 1913 if ((value & 0xff) == 0x00) { /* MAX_CHANNEL_FIFO_DEPTH */ 1914 qemu_log_mask(LOG_GUEST_ERROR, "%s: wrong FIFO depth in GCR\n", 1915 __func__); 1916 } 1917 return; 1918 1919 case 0x80 ... 0xfff: 1920 addr -= 0x80; 1921 chnum = addr / 0x60; 1922 ch = s->ch + chnum; 1923 addr -= chnum * 0x60; 1924 break; 1925 1926 case 0x00: /* DMA4_REVISION */ 1927 case 0x28: /* DMA4_SYSSTATUS */ 1928 case 0x64: /* DMA4_CAPS_0 */ 1929 case 0x6c: /* DMA4_CAPS_2 */ 1930 case 0x70: /* DMA4_CAPS_3 */ 1931 case 0x74: /* DMA4_CAPS_4 */ 1932 OMAP_RO_REG(addr); 1933 return; 1934 1935 default: 1936 OMAP_BAD_REG(addr); 1937 return; 1938 } 1939 1940 /* Per-channel registers */ 1941 switch (addr) { 1942 case 0x00: /* DMA4_CCR */ 1943 ch->buf_disable = (value >> 25) & 1; 1944 ch->src_sync = (value >> 24) & 1; /* XXX For CamDMA must be 1 */ 1945 if (ch->buf_disable && !ch->src_sync) { 1946 qemu_log_mask(LOG_GUEST_ERROR, 1947 "%s: Buffering disable is not allowed in " 1948 "destination synchronised mode\n", __func__); 1949 } 1950 ch->prefetch = (value >> 23) & 1; 1951 ch->bs = (value >> 18) & 1; 1952 ch->transparent_copy = (value >> 17) & 1; 1953 ch->constant_fill = (value >> 16) & 1; 1954 ch->mode[1] = (omap_dma_addressing_t) ((value & 0xc000) >> 14); 1955 ch->mode[0] = (omap_dma_addressing_t) ((value & 0x3000) >> 12); 1956 ch->suspend = (value & 0x0100) >> 8; 1957 ch->priority = (value & 0x0040) >> 6; 1958 ch->fs = (value & 0x0020) >> 5; 1959 if (ch->fs && ch->bs && ch->mode[0] && ch->mode[1]) { 1960 qemu_log_mask(LOG_GUEST_ERROR, 1961 "%s: For a packet transfer at least one port " 1962 "must be constant-addressed\n", __func__); 1963 } 1964 ch->sync = (value & 0x001f) | ((value >> 14) & 0x0060); 1965 /* XXX must be 0x01 for CamDMA */ 1966 1967 if (value & 0x0080) 1968 omap_dma_enable_channel(s, ch); 1969 else 1970 omap_dma_disable_channel(s, ch); 1971 1972 break; 1973 1974 case 0x04: /* DMA4_CLNK_CTRL */ 1975 ch->link_enabled = (value >> 15) & 0x1; 1976 ch->link_next_ch = value & 0x1f; 1977 break; 1978 1979 case 0x08: /* DMA4_CICR */ 1980 ch->interrupts = value & 0x09be; 1981 break; 1982 1983 case 0x0c: /* DMA4_CSR */ 1984 ch->cstatus &= ~value; 1985 break; 1986 1987 case 0x10: /* DMA4_CSDP */ 1988 ch->endian[0] =(value >> 21) & 1; 1989 ch->endian_lock[0] =(value >> 20) & 1; 1990 ch->endian[1] =(value >> 19) & 1; 1991 ch->endian_lock[1] =(value >> 18) & 1; 1992 if (ch->endian[0] != ch->endian[1]) { 1993 qemu_log_mask(LOG_GUEST_ERROR, 1994 "%s: DMA endianness conversion enable attempt\n", 1995 __func__); 1996 } 1997 ch->write_mode = (value >> 16) & 3; 1998 ch->burst[1] = (value & 0xc000) >> 14; 1999 ch->pack[1] = (value & 0x2000) >> 13; 2000 ch->translate[1] = (value & 0x1e00) >> 9; 2001 ch->burst[0] = (value & 0x0180) >> 7; 2002 ch->pack[0] = (value & 0x0040) >> 6; 2003 ch->translate[0] = (value & 0x003c) >> 2; 2004 if (ch->translate[0] | ch->translate[1]) { 2005 qemu_log_mask(LOG_GUEST_ERROR, 2006 "%s: bad MReqAddressTranslate sideband signal\n", 2007 __func__); 2008 } 2009 ch->data_type = 1 << (value & 3); 2010 if ((value & 3) == 3) { 2011 qemu_log_mask(LOG_GUEST_ERROR, 2012 "%s: bad data_type for DMA channel\n", __func__); 2013 ch->data_type >>= 1; 2014 } 2015 break; 2016 2017 case 0x14: /* DMA4_CEN */ 2018 ch->set_update = 1; 2019 ch->elements = value & 0xffffff; 2020 break; 2021 2022 case 0x18: /* DMA4_CFN */ 2023 ch->frames = value & 0xffff; 2024 ch->set_update = 1; 2025 break; 2026 2027 case 0x1c: /* DMA4_CSSA */ 2028 ch->addr[0] = (hwaddr) (uint32_t) value; 2029 ch->set_update = 1; 2030 break; 2031 2032 case 0x20: /* DMA4_CDSA */ 2033 ch->addr[1] = (hwaddr) (uint32_t) value; 2034 ch->set_update = 1; 2035 break; 2036 2037 case 0x24: /* DMA4_CSEI */ 2038 ch->element_index[0] = (int16_t) value; 2039 ch->set_update = 1; 2040 break; 2041 2042 case 0x28: /* DMA4_CSFI */ 2043 ch->frame_index[0] = (int32_t) value; 2044 ch->set_update = 1; 2045 break; 2046 2047 case 0x2c: /* DMA4_CDEI */ 2048 ch->element_index[1] = (int16_t) value; 2049 ch->set_update = 1; 2050 break; 2051 2052 case 0x30: /* DMA4_CDFI */ 2053 ch->frame_index[1] = (int32_t) value; 2054 ch->set_update = 1; 2055 break; 2056 2057 case 0x44: /* DMA4_COLOR */ 2058 /* XXX only in sDMA */ 2059 ch->color = value; 2060 break; 2061 2062 case 0x34: /* DMA4_CSAC */ 2063 case 0x38: /* DMA4_CDAC */ 2064 case 0x3c: /* DMA4_CCEN */ 2065 case 0x40: /* DMA4_CCFN */ 2066 OMAP_RO_REG(addr); 2067 break; 2068 2069 default: 2070 OMAP_BAD_REG(addr); 2071 } 2072 } 2073 2074 static const MemoryRegionOps omap_dma4_ops = { 2075 .read = omap_dma4_read, 2076 .write = omap_dma4_write, 2077 .endianness = DEVICE_NATIVE_ENDIAN, 2078 }; 2079 2080 struct soc_dma_s *omap_dma4_init(hwaddr base, qemu_irq *irqs, 2081 MemoryRegion *sysmem, 2082 struct omap_mpu_state_s *mpu, int fifo, 2083 int chans, omap_clk iclk, omap_clk fclk) 2084 { 2085 int i; 2086 struct omap_dma_s *s = g_new0(struct omap_dma_s, 1); 2087 2088 s->model = omap_dma_4; 2089 s->chans = chans; 2090 s->mpu = mpu; 2091 s->clk = fclk; 2092 2093 s->dma = soc_dma_init(s->chans); 2094 s->dma->freq = omap_clk_getrate(fclk); 2095 s->dma->transfer_fn = omap_dma_transfer_generic; 2096 s->dma->setup_fn = omap_dma_transfer_setup; 2097 s->dma->drq = qemu_allocate_irqs(omap_dma_request, s, 64); 2098 s->dma->opaque = s; 2099 for (i = 0; i < s->chans; i ++) { 2100 s->ch[i].dma = &s->dma->ch[i]; 2101 s->dma->ch[i].opaque = &s->ch[i]; 2102 } 2103 2104 memcpy(&s->irq, irqs, sizeof(s->irq)); 2105 s->intr_update = omap_dma_interrupts_4_update; 2106 2107 omap_dma_setcaps(s); 2108 omap_clk_adduser(s->clk, qemu_allocate_irq(omap_dma_clk_update, s, 0)); 2109 omap_dma_reset(s->dma); 2110 omap_dma_clk_update(s, 0, !!s->dma->freq); 2111 2112 memory_region_init_io(&s->iomem, NULL, &omap_dma4_ops, s, "omap.dma4", 0x1000); 2113 memory_region_add_subregion(sysmem, base, &s->iomem); 2114 2115 mpu->drq = s->dma->drq; 2116 2117 return s->dma; 2118 } 2119 2120 struct omap_dma_lcd_channel_s *omap_dma_get_lcdch(struct soc_dma_s *dma) 2121 { 2122 struct omap_dma_s *s = dma->opaque; 2123 2124 return &s->lcd_ch; 2125 } 2126