1 /* 2 * PowerMac descriptor-based DMA emulation 3 * 4 * Copyright (c) 2005-2007 Fabrice Bellard 5 * Copyright (c) 2007 Jocelyn Mayer 6 * Copyright (c) 2009 Laurent Vivier 7 * 8 * some parts from linux-2.6.28, arch/powerpc/include/asm/dbdma.h 9 * 10 * Definitions for using the Apple Descriptor-Based DMA controller 11 * in Power Macintosh computers. 12 * 13 * Copyright (C) 1996 Paul Mackerras. 14 * 15 * some parts from mol 0.9.71 16 * 17 * Descriptor based DMA emulation 18 * 19 * Copyright (C) 1998-2004 Samuel Rydh (samuel@ibrium.se) 20 * 21 * Permission is hereby granted, free of charge, to any person obtaining a copy 22 * of this software and associated documentation files (the "Software"), to deal 23 * in the Software without restriction, including without limitation the rights 24 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 25 * copies of the Software, and to permit persons to whom the Software is 26 * furnished to do so, subject to the following conditions: 27 * 28 * The above copyright notice and this permission notice shall be included in 29 * all copies or substantial portions of the Software. 30 * 31 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 32 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 33 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 34 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 35 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 36 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 37 * THE SOFTWARE. 38 */ 39 40 #include "qemu/osdep.h" 41 #include "hw/irq.h" 42 #include "hw/ppc/mac_dbdma.h" 43 #include "migration/vmstate.h" 44 #include "qemu/main-loop.h" 45 #include "qemu/module.h" 46 #include "qemu/log.h" 47 #include "sysemu/dma.h" 48 49 /* debug DBDMA */ 50 #define DEBUG_DBDMA 0 51 #define DEBUG_DBDMA_CHANMASK ((1ull << DBDMA_CHANNELS) - 1) 52 53 #define DBDMA_DPRINTF(fmt, ...) do { \ 54 if (DEBUG_DBDMA) { \ 55 printf("DBDMA: " fmt , ## __VA_ARGS__); \ 56 } \ 57 } while (0) 58 59 #define DBDMA_DPRINTFCH(ch, fmt, ...) do { \ 60 if (DEBUG_DBDMA) { \ 61 if ((1ul << (ch)->channel) & DEBUG_DBDMA_CHANMASK) { \ 62 printf("DBDMA[%02x]: " fmt , (ch)->channel, ## __VA_ARGS__); \ 63 } \ 64 } \ 65 } while (0) 66 67 /* 68 */ 69 70 static DBDMAState *dbdma_from_ch(DBDMA_channel *ch) 71 { 72 return container_of(ch, DBDMAState, channels[ch->channel]); 73 } 74 75 #if DEBUG_DBDMA 76 static void dump_dbdma_cmd(DBDMA_channel *ch, dbdma_cmd *cmd) 77 { 78 DBDMA_DPRINTFCH(ch, "dbdma_cmd %p\n", cmd); 79 DBDMA_DPRINTFCH(ch, " req_count 0x%04x\n", le16_to_cpu(cmd->req_count)); 80 DBDMA_DPRINTFCH(ch, " command 0x%04x\n", le16_to_cpu(cmd->command)); 81 DBDMA_DPRINTFCH(ch, " phy_addr 0x%08x\n", le32_to_cpu(cmd->phy_addr)); 82 DBDMA_DPRINTFCH(ch, " cmd_dep 0x%08x\n", le32_to_cpu(cmd->cmd_dep)); 83 DBDMA_DPRINTFCH(ch, " res_count 0x%04x\n", le16_to_cpu(cmd->res_count)); 84 DBDMA_DPRINTFCH(ch, " xfer_status 0x%04x\n", 85 le16_to_cpu(cmd->xfer_status)); 86 } 87 #else 88 static void dump_dbdma_cmd(DBDMA_channel *ch, dbdma_cmd *cmd) 89 { 90 } 91 #endif 92 static void dbdma_cmdptr_load(DBDMA_channel *ch) 93 { 94 DBDMA_DPRINTFCH(ch, "dbdma_cmdptr_load 0x%08x\n", 95 ch->regs[DBDMA_CMDPTR_LO]); 96 dma_memory_read(&address_space_memory, ch->regs[DBDMA_CMDPTR_LO], 97 &ch->current, sizeof(dbdma_cmd), MEMTXATTRS_UNSPECIFIED); 98 } 99 100 static void dbdma_cmdptr_save(DBDMA_channel *ch) 101 { 102 DBDMA_DPRINTFCH(ch, "-> update 0x%08x stat=0x%08x, res=0x%04x\n", 103 ch->regs[DBDMA_CMDPTR_LO], 104 le16_to_cpu(ch->current.xfer_status), 105 le16_to_cpu(ch->current.res_count)); 106 dma_memory_write(&address_space_memory, ch->regs[DBDMA_CMDPTR_LO], 107 &ch->current, sizeof(dbdma_cmd), MEMTXATTRS_UNSPECIFIED); 108 } 109 110 static void kill_channel(DBDMA_channel *ch) 111 { 112 DBDMA_DPRINTFCH(ch, "kill_channel\n"); 113 114 ch->regs[DBDMA_STATUS] |= DEAD; 115 ch->regs[DBDMA_STATUS] &= ~ACTIVE; 116 117 qemu_irq_raise(ch->irq); 118 } 119 120 static void conditional_interrupt(DBDMA_channel *ch) 121 { 122 dbdma_cmd *current = &ch->current; 123 uint16_t intr; 124 uint16_t sel_mask, sel_value; 125 uint32_t status; 126 int cond; 127 128 DBDMA_DPRINTFCH(ch, "%s\n", __func__); 129 130 intr = le16_to_cpu(current->command) & INTR_MASK; 131 132 switch(intr) { 133 case INTR_NEVER: /* don't interrupt */ 134 return; 135 case INTR_ALWAYS: /* always interrupt */ 136 qemu_irq_raise(ch->irq); 137 DBDMA_DPRINTFCH(ch, "%s: raise\n", __func__); 138 return; 139 } 140 141 status = ch->regs[DBDMA_STATUS] & DEVSTAT; 142 143 sel_mask = (ch->regs[DBDMA_INTR_SEL] >> 16) & 0x0f; 144 sel_value = ch->regs[DBDMA_INTR_SEL] & 0x0f; 145 146 cond = (status & sel_mask) == (sel_value & sel_mask); 147 148 switch(intr) { 149 case INTR_IFSET: /* intr if condition bit is 1 */ 150 if (cond) { 151 qemu_irq_raise(ch->irq); 152 DBDMA_DPRINTFCH(ch, "%s: raise\n", __func__); 153 } 154 return; 155 case INTR_IFCLR: /* intr if condition bit is 0 */ 156 if (!cond) { 157 qemu_irq_raise(ch->irq); 158 DBDMA_DPRINTFCH(ch, "%s: raise\n", __func__); 159 } 160 return; 161 } 162 } 163 164 static int conditional_wait(DBDMA_channel *ch) 165 { 166 dbdma_cmd *current = &ch->current; 167 uint16_t wait; 168 uint16_t sel_mask, sel_value; 169 uint32_t status; 170 int cond; 171 int res = 0; 172 173 wait = le16_to_cpu(current->command) & WAIT_MASK; 174 switch(wait) { 175 case WAIT_NEVER: /* don't wait */ 176 return 0; 177 case WAIT_ALWAYS: /* always wait */ 178 DBDMA_DPRINTFCH(ch, " [WAIT_ALWAYS]\n"); 179 return 1; 180 } 181 182 status = ch->regs[DBDMA_STATUS] & DEVSTAT; 183 184 sel_mask = (ch->regs[DBDMA_WAIT_SEL] >> 16) & 0x0f; 185 sel_value = ch->regs[DBDMA_WAIT_SEL] & 0x0f; 186 187 cond = (status & sel_mask) == (sel_value & sel_mask); 188 189 switch(wait) { 190 case WAIT_IFSET: /* wait if condition bit is 1 */ 191 if (cond) { 192 res = 1; 193 } 194 DBDMA_DPRINTFCH(ch, " [WAIT_IFSET=%d]\n", res); 195 break; 196 case WAIT_IFCLR: /* wait if condition bit is 0 */ 197 if (!cond) { 198 res = 1; 199 } 200 DBDMA_DPRINTFCH(ch, " [WAIT_IFCLR=%d]\n", res); 201 break; 202 } 203 return res; 204 } 205 206 static void next(DBDMA_channel *ch) 207 { 208 uint32_t cp; 209 210 ch->regs[DBDMA_STATUS] &= ~BT; 211 212 cp = ch->regs[DBDMA_CMDPTR_LO]; 213 ch->regs[DBDMA_CMDPTR_LO] = cp + sizeof(dbdma_cmd); 214 dbdma_cmdptr_load(ch); 215 } 216 217 static void branch(DBDMA_channel *ch) 218 { 219 dbdma_cmd *current = &ch->current; 220 221 ch->regs[DBDMA_CMDPTR_LO] = le32_to_cpu(current->cmd_dep); 222 ch->regs[DBDMA_STATUS] |= BT; 223 dbdma_cmdptr_load(ch); 224 } 225 226 static void conditional_branch(DBDMA_channel *ch) 227 { 228 dbdma_cmd *current = &ch->current; 229 uint16_t br; 230 uint16_t sel_mask, sel_value; 231 uint32_t status; 232 int cond; 233 234 /* check if we must branch */ 235 236 br = le16_to_cpu(current->command) & BR_MASK; 237 238 switch(br) { 239 case BR_NEVER: /* don't branch */ 240 next(ch); 241 return; 242 case BR_ALWAYS: /* always branch */ 243 DBDMA_DPRINTFCH(ch, " [BR_ALWAYS]\n"); 244 branch(ch); 245 return; 246 } 247 248 status = ch->regs[DBDMA_STATUS] & DEVSTAT; 249 250 sel_mask = (ch->regs[DBDMA_BRANCH_SEL] >> 16) & 0x0f; 251 sel_value = ch->regs[DBDMA_BRANCH_SEL] & 0x0f; 252 253 cond = (status & sel_mask) == (sel_value & sel_mask); 254 255 switch(br) { 256 case BR_IFSET: /* branch if condition bit is 1 */ 257 if (cond) { 258 DBDMA_DPRINTFCH(ch, " [BR_IFSET = 1]\n"); 259 branch(ch); 260 } else { 261 DBDMA_DPRINTFCH(ch, " [BR_IFSET = 0]\n"); 262 next(ch); 263 } 264 return; 265 case BR_IFCLR: /* branch if condition bit is 0 */ 266 if (!cond) { 267 DBDMA_DPRINTFCH(ch, " [BR_IFCLR = 1]\n"); 268 branch(ch); 269 } else { 270 DBDMA_DPRINTFCH(ch, " [BR_IFCLR = 0]\n"); 271 next(ch); 272 } 273 return; 274 } 275 } 276 277 static void channel_run(DBDMA_channel *ch); 278 279 static void dbdma_end(DBDMA_io *io) 280 { 281 DBDMA_channel *ch = io->channel; 282 dbdma_cmd *current = &ch->current; 283 284 DBDMA_DPRINTFCH(ch, "%s\n", __func__); 285 286 if (conditional_wait(ch)) 287 goto wait; 288 289 current->xfer_status = cpu_to_le16(ch->regs[DBDMA_STATUS]); 290 current->res_count = cpu_to_le16(io->len); 291 dbdma_cmdptr_save(ch); 292 if (io->is_last) 293 ch->regs[DBDMA_STATUS] &= ~FLUSH; 294 295 conditional_interrupt(ch); 296 conditional_branch(ch); 297 298 wait: 299 /* Indicate that we're ready for a new DMA round */ 300 ch->io.processing = false; 301 302 if ((ch->regs[DBDMA_STATUS] & RUN) && 303 (ch->regs[DBDMA_STATUS] & ACTIVE)) 304 channel_run(ch); 305 } 306 307 static void start_output(DBDMA_channel *ch, int key, uint32_t addr, 308 uint16_t req_count, int is_last) 309 { 310 DBDMA_DPRINTFCH(ch, "start_output\n"); 311 312 /* KEY_REGS, KEY_DEVICE and KEY_STREAM 313 * are not implemented in the mac-io chip 314 */ 315 316 DBDMA_DPRINTFCH(ch, "addr 0x%x key 0x%x\n", addr, key); 317 if (!addr || key > KEY_STREAM3) { 318 kill_channel(ch); 319 return; 320 } 321 322 ch->io.addr = addr; 323 ch->io.len = req_count; 324 ch->io.is_last = is_last; 325 ch->io.dma_end = dbdma_end; 326 ch->io.is_dma_out = 1; 327 ch->io.processing = true; 328 if (ch->rw) { 329 ch->rw(&ch->io); 330 } 331 } 332 333 static void start_input(DBDMA_channel *ch, int key, uint32_t addr, 334 uint16_t req_count, int is_last) 335 { 336 DBDMA_DPRINTFCH(ch, "start_input\n"); 337 338 /* KEY_REGS, KEY_DEVICE and KEY_STREAM 339 * are not implemented in the mac-io chip 340 */ 341 342 DBDMA_DPRINTFCH(ch, "addr 0x%x key 0x%x\n", addr, key); 343 if (!addr || key > KEY_STREAM3) { 344 kill_channel(ch); 345 return; 346 } 347 348 ch->io.addr = addr; 349 ch->io.len = req_count; 350 ch->io.is_last = is_last; 351 ch->io.dma_end = dbdma_end; 352 ch->io.is_dma_out = 0; 353 ch->io.processing = true; 354 if (ch->rw) { 355 ch->rw(&ch->io); 356 } 357 } 358 359 static void load_word(DBDMA_channel *ch, int key, uint32_t addr, 360 uint16_t len) 361 { 362 dbdma_cmd *current = &ch->current; 363 364 DBDMA_DPRINTFCH(ch, "load_word %d bytes, addr=%08x\n", len, addr); 365 366 /* only implements KEY_SYSTEM */ 367 368 if (key != KEY_SYSTEM) { 369 printf("DBDMA: LOAD_WORD, unimplemented key %x\n", key); 370 kill_channel(ch); 371 return; 372 } 373 374 dma_memory_read(&address_space_memory, addr, ¤t->cmd_dep, len, 375 MEMTXATTRS_UNSPECIFIED); 376 377 if (conditional_wait(ch)) 378 goto wait; 379 380 current->xfer_status = cpu_to_le16(ch->regs[DBDMA_STATUS]); 381 dbdma_cmdptr_save(ch); 382 ch->regs[DBDMA_STATUS] &= ~FLUSH; 383 384 conditional_interrupt(ch); 385 next(ch); 386 387 wait: 388 DBDMA_kick(dbdma_from_ch(ch)); 389 } 390 391 static void store_word(DBDMA_channel *ch, int key, uint32_t addr, 392 uint16_t len) 393 { 394 dbdma_cmd *current = &ch->current; 395 396 DBDMA_DPRINTFCH(ch, "store_word %d bytes, addr=%08x pa=%x\n", 397 len, addr, le32_to_cpu(current->cmd_dep)); 398 399 /* only implements KEY_SYSTEM */ 400 401 if (key != KEY_SYSTEM) { 402 printf("DBDMA: STORE_WORD, unimplemented key %x\n", key); 403 kill_channel(ch); 404 return; 405 } 406 407 dma_memory_write(&address_space_memory, addr, ¤t->cmd_dep, len, 408 MEMTXATTRS_UNSPECIFIED); 409 410 if (conditional_wait(ch)) 411 goto wait; 412 413 current->xfer_status = cpu_to_le16(ch->regs[DBDMA_STATUS]); 414 dbdma_cmdptr_save(ch); 415 ch->regs[DBDMA_STATUS] &= ~FLUSH; 416 417 conditional_interrupt(ch); 418 next(ch); 419 420 wait: 421 DBDMA_kick(dbdma_from_ch(ch)); 422 } 423 424 static void nop(DBDMA_channel *ch) 425 { 426 dbdma_cmd *current = &ch->current; 427 428 if (conditional_wait(ch)) 429 goto wait; 430 431 current->xfer_status = cpu_to_le16(ch->regs[DBDMA_STATUS]); 432 dbdma_cmdptr_save(ch); 433 434 conditional_interrupt(ch); 435 conditional_branch(ch); 436 437 wait: 438 DBDMA_kick(dbdma_from_ch(ch)); 439 } 440 441 static void stop(DBDMA_channel *ch) 442 { 443 ch->regs[DBDMA_STATUS] &= ~(ACTIVE); 444 445 /* the stop command does not increment command pointer */ 446 } 447 448 static void channel_run(DBDMA_channel *ch) 449 { 450 dbdma_cmd *current = &ch->current; 451 uint16_t cmd, key; 452 uint16_t req_count; 453 uint32_t phy_addr; 454 455 DBDMA_DPRINTFCH(ch, "channel_run\n"); 456 dump_dbdma_cmd(ch, current); 457 458 /* clear WAKE flag at command fetch */ 459 460 ch->regs[DBDMA_STATUS] &= ~WAKE; 461 462 cmd = le16_to_cpu(current->command) & COMMAND_MASK; 463 464 switch (cmd) { 465 case DBDMA_NOP: 466 nop(ch); 467 return; 468 469 case DBDMA_STOP: 470 stop(ch); 471 return; 472 } 473 474 key = le16_to_cpu(current->command) & 0x0700; 475 req_count = le16_to_cpu(current->req_count); 476 phy_addr = le32_to_cpu(current->phy_addr); 477 478 if (key == KEY_STREAM4) { 479 printf("command %x, invalid key 4\n", cmd); 480 kill_channel(ch); 481 return; 482 } 483 484 switch (cmd) { 485 case OUTPUT_MORE: 486 DBDMA_DPRINTFCH(ch, "* OUTPUT_MORE *\n"); 487 start_output(ch, key, phy_addr, req_count, 0); 488 return; 489 490 case OUTPUT_LAST: 491 DBDMA_DPRINTFCH(ch, "* OUTPUT_LAST *\n"); 492 start_output(ch, key, phy_addr, req_count, 1); 493 return; 494 495 case INPUT_MORE: 496 DBDMA_DPRINTFCH(ch, "* INPUT_MORE *\n"); 497 start_input(ch, key, phy_addr, req_count, 0); 498 return; 499 500 case INPUT_LAST: 501 DBDMA_DPRINTFCH(ch, "* INPUT_LAST *\n"); 502 start_input(ch, key, phy_addr, req_count, 1); 503 return; 504 } 505 506 if (key < KEY_REGS) { 507 printf("command %x, invalid key %x\n", cmd, key); 508 key = KEY_SYSTEM; 509 } 510 511 /* for LOAD_WORD and STORE_WORD, req_count is on 3 bits 512 * and BRANCH is invalid 513 */ 514 515 req_count = req_count & 0x0007; 516 if (req_count & 0x4) { 517 req_count = 4; 518 phy_addr &= ~3; 519 } else if (req_count & 0x2) { 520 req_count = 2; 521 phy_addr &= ~1; 522 } else 523 req_count = 1; 524 525 switch (cmd) { 526 case LOAD_WORD: 527 DBDMA_DPRINTFCH(ch, "* LOAD_WORD *\n"); 528 load_word(ch, key, phy_addr, req_count); 529 return; 530 531 case STORE_WORD: 532 DBDMA_DPRINTFCH(ch, "* STORE_WORD *\n"); 533 store_word(ch, key, phy_addr, req_count); 534 return; 535 } 536 } 537 538 static void DBDMA_run(DBDMAState *s) 539 { 540 int channel; 541 542 for (channel = 0; channel < DBDMA_CHANNELS; channel++) { 543 DBDMA_channel *ch = &s->channels[channel]; 544 uint32_t status = ch->regs[DBDMA_STATUS]; 545 if (!ch->io.processing && (status & RUN) && (status & ACTIVE)) { 546 channel_run(ch); 547 } 548 } 549 } 550 551 static void DBDMA_run_bh(void *opaque) 552 { 553 DBDMAState *s = opaque; 554 555 DBDMA_DPRINTF("-> DBDMA_run_bh\n"); 556 DBDMA_run(s); 557 DBDMA_DPRINTF("<- DBDMA_run_bh\n"); 558 } 559 560 void DBDMA_kick(DBDMAState *dbdma) 561 { 562 qemu_bh_schedule(dbdma->bh); 563 } 564 565 void DBDMA_register_channel(void *dbdma, int nchan, qemu_irq irq, 566 DBDMA_rw rw, DBDMA_flush flush, 567 void *opaque) 568 { 569 DBDMAState *s = dbdma; 570 DBDMA_channel *ch = &s->channels[nchan]; 571 572 DBDMA_DPRINTFCH(ch, "DBDMA_register_channel 0x%x\n", nchan); 573 574 assert(rw); 575 assert(flush); 576 577 ch->irq = irq; 578 ch->rw = rw; 579 ch->flush = flush; 580 ch->io.opaque = opaque; 581 } 582 583 static void dbdma_control_write(DBDMA_channel *ch) 584 { 585 uint16_t mask, value; 586 uint32_t status; 587 bool do_flush = false; 588 589 mask = (ch->regs[DBDMA_CONTROL] >> 16) & 0xffff; 590 value = ch->regs[DBDMA_CONTROL] & 0xffff; 591 592 /* This is the status register which we'll update 593 * appropriately and store back 594 */ 595 status = ch->regs[DBDMA_STATUS]; 596 597 /* RUN and PAUSE are bits under SW control only 598 * FLUSH and WAKE are set by SW and cleared by HW 599 * DEAD, ACTIVE and BT are only under HW control 600 * 601 * We handle ACTIVE separately at the end of the 602 * logic to ensure all cases are covered. 603 */ 604 605 /* Setting RUN will tentatively activate the channel 606 */ 607 if ((mask & RUN) && (value & RUN)) { 608 status |= RUN; 609 DBDMA_DPRINTFCH(ch, " Setting RUN !\n"); 610 } 611 612 /* Clearing RUN 1->0 will stop the channel */ 613 if ((mask & RUN) && !(value & RUN)) { 614 /* This has the side effect of clearing the DEAD bit */ 615 status &= ~(DEAD | RUN); 616 DBDMA_DPRINTFCH(ch, " Clearing RUN !\n"); 617 } 618 619 /* Setting WAKE wakes up an idle channel if it's running 620 * 621 * Note: The doc doesn't say so but assume that only works 622 * on a channel whose RUN bit is set. 623 * 624 * We set WAKE in status, it's not terribly useful as it will 625 * be cleared on the next command fetch but it seems to mimmic 626 * the HW behaviour and is useful for the way we handle 627 * ACTIVE further down. 628 */ 629 if ((mask & WAKE) && (value & WAKE) && (status & RUN)) { 630 status |= WAKE; 631 DBDMA_DPRINTFCH(ch, " Setting WAKE !\n"); 632 } 633 634 /* PAUSE being set will deactivate (or prevent activation) 635 * of the channel. We just copy it over for now, ACTIVE will 636 * be re-evaluated later. 637 */ 638 if (mask & PAUSE) { 639 status = (status & ~PAUSE) | (value & PAUSE); 640 DBDMA_DPRINTFCH(ch, " %sing PAUSE !\n", 641 (value & PAUSE) ? "sett" : "clear"); 642 } 643 644 /* FLUSH is its own thing */ 645 if ((mask & FLUSH) && (value & FLUSH)) { 646 DBDMA_DPRINTFCH(ch, " Setting FLUSH !\n"); 647 /* We set flush directly in the status register, we do *NOT* 648 * set it in "status" so that it gets naturally cleared when 649 * we update the status register further down. That way it 650 * will be set only during the HW flush operation so it is 651 * visible to any completions happening during that time. 652 */ 653 ch->regs[DBDMA_STATUS] |= FLUSH; 654 do_flush = true; 655 } 656 657 /* If either RUN or PAUSE is clear, so should ACTIVE be, 658 * otherwise, ACTIVE will be set if we modified RUN, PAUSE or 659 * set WAKE. That means that PAUSE was just cleared, RUN was 660 * just set or WAKE was just set. 661 */ 662 if ((status & PAUSE) || !(status & RUN)) { 663 status &= ~ACTIVE; 664 DBDMA_DPRINTFCH(ch, " -> ACTIVE down !\n"); 665 666 /* We stopped processing, we want the underlying HW command 667 * to complete *before* we clear the ACTIVE bit. Otherwise 668 * we can get into a situation where the command status will 669 * have RUN or ACTIVE not set which is going to confuse the 670 * MacOS driver. 671 */ 672 do_flush = true; 673 } else if (mask & (RUN | PAUSE)) { 674 status |= ACTIVE; 675 DBDMA_DPRINTFCH(ch, " -> ACTIVE up !\n"); 676 } else if ((mask & WAKE) && (value & WAKE)) { 677 status |= ACTIVE; 678 DBDMA_DPRINTFCH(ch, " -> ACTIVE up !\n"); 679 } 680 681 DBDMA_DPRINTFCH(ch, " new status=0x%08x\n", status); 682 683 /* If we need to flush the underlying HW, do it now, this happens 684 * both on FLUSH commands and when stopping the channel for safety. 685 */ 686 if (do_flush && ch->flush) { 687 ch->flush(&ch->io); 688 } 689 690 /* Finally update the status register image */ 691 ch->regs[DBDMA_STATUS] = status; 692 693 /* If active, make sure the BH gets to run */ 694 if (status & ACTIVE) { 695 DBDMA_kick(dbdma_from_ch(ch)); 696 } 697 } 698 699 static void dbdma_write(void *opaque, hwaddr addr, 700 uint64_t value, unsigned size) 701 { 702 int channel = addr >> DBDMA_CHANNEL_SHIFT; 703 DBDMAState *s = opaque; 704 DBDMA_channel *ch = &s->channels[channel]; 705 int reg = (addr - (channel << DBDMA_CHANNEL_SHIFT)) >> 2; 706 707 DBDMA_DPRINTFCH(ch, "writel 0x" HWADDR_FMT_plx " <= 0x%08"PRIx64"\n", 708 addr, value); 709 DBDMA_DPRINTFCH(ch, "channel 0x%x reg 0x%x\n", 710 (uint32_t)addr >> DBDMA_CHANNEL_SHIFT, reg); 711 712 /* cmdptr cannot be modified if channel is ACTIVE */ 713 714 if (reg == DBDMA_CMDPTR_LO && (ch->regs[DBDMA_STATUS] & ACTIVE)) { 715 return; 716 } 717 718 ch->regs[reg] = value; 719 720 switch(reg) { 721 case DBDMA_CONTROL: 722 dbdma_control_write(ch); 723 break; 724 case DBDMA_CMDPTR_LO: 725 /* 16-byte aligned */ 726 ch->regs[DBDMA_CMDPTR_LO] &= ~0xf; 727 dbdma_cmdptr_load(ch); 728 break; 729 case DBDMA_STATUS: 730 case DBDMA_INTR_SEL: 731 case DBDMA_BRANCH_SEL: 732 case DBDMA_WAIT_SEL: 733 /* nothing to do */ 734 break; 735 case DBDMA_XFER_MODE: 736 case DBDMA_CMDPTR_HI: 737 case DBDMA_DATA2PTR_HI: 738 case DBDMA_DATA2PTR_LO: 739 case DBDMA_ADDRESS_HI: 740 case DBDMA_BRANCH_ADDR_HI: 741 case DBDMA_RES1: 742 case DBDMA_RES2: 743 case DBDMA_RES3: 744 case DBDMA_RES4: 745 /* unused */ 746 break; 747 } 748 } 749 750 static uint64_t dbdma_read(void *opaque, hwaddr addr, 751 unsigned size) 752 { 753 uint32_t value; 754 int channel = addr >> DBDMA_CHANNEL_SHIFT; 755 DBDMAState *s = opaque; 756 DBDMA_channel *ch = &s->channels[channel]; 757 int reg = (addr - (channel << DBDMA_CHANNEL_SHIFT)) >> 2; 758 759 value = ch->regs[reg]; 760 761 switch(reg) { 762 case DBDMA_CONTROL: 763 value = ch->regs[DBDMA_STATUS]; 764 break; 765 case DBDMA_STATUS: 766 case DBDMA_CMDPTR_LO: 767 case DBDMA_INTR_SEL: 768 case DBDMA_BRANCH_SEL: 769 case DBDMA_WAIT_SEL: 770 /* nothing to do */ 771 break; 772 case DBDMA_XFER_MODE: 773 case DBDMA_CMDPTR_HI: 774 case DBDMA_DATA2PTR_HI: 775 case DBDMA_DATA2PTR_LO: 776 case DBDMA_ADDRESS_HI: 777 case DBDMA_BRANCH_ADDR_HI: 778 /* unused */ 779 value = 0; 780 break; 781 case DBDMA_RES1: 782 case DBDMA_RES2: 783 case DBDMA_RES3: 784 case DBDMA_RES4: 785 /* reserved */ 786 break; 787 } 788 789 DBDMA_DPRINTFCH(ch, "readl 0x" HWADDR_FMT_plx " => 0x%08x\n", addr, value); 790 DBDMA_DPRINTFCH(ch, "channel 0x%x reg 0x%x\n", 791 (uint32_t)addr >> DBDMA_CHANNEL_SHIFT, reg); 792 793 return value; 794 } 795 796 static const MemoryRegionOps dbdma_ops = { 797 .read = dbdma_read, 798 .write = dbdma_write, 799 .endianness = DEVICE_LITTLE_ENDIAN, 800 .valid = { 801 .min_access_size = 4, 802 .max_access_size = 4, 803 }, 804 }; 805 806 static const VMStateDescription vmstate_dbdma_io = { 807 .name = "dbdma_io", 808 .version_id = 0, 809 .minimum_version_id = 0, 810 .fields = (const VMStateField[]) { 811 VMSTATE_UINT64(addr, struct DBDMA_io), 812 VMSTATE_INT32(len, struct DBDMA_io), 813 VMSTATE_INT32(is_last, struct DBDMA_io), 814 VMSTATE_INT32(is_dma_out, struct DBDMA_io), 815 VMSTATE_BOOL(processing, struct DBDMA_io), 816 VMSTATE_END_OF_LIST() 817 } 818 }; 819 820 static const VMStateDescription vmstate_dbdma_cmd = { 821 .name = "dbdma_cmd", 822 .version_id = 0, 823 .minimum_version_id = 0, 824 .fields = (const VMStateField[]) { 825 VMSTATE_UINT16(req_count, dbdma_cmd), 826 VMSTATE_UINT16(command, dbdma_cmd), 827 VMSTATE_UINT32(phy_addr, dbdma_cmd), 828 VMSTATE_UINT32(cmd_dep, dbdma_cmd), 829 VMSTATE_UINT16(res_count, dbdma_cmd), 830 VMSTATE_UINT16(xfer_status, dbdma_cmd), 831 VMSTATE_END_OF_LIST() 832 } 833 }; 834 835 static const VMStateDescription vmstate_dbdma_channel = { 836 .name = "dbdma_channel", 837 .version_id = 1, 838 .minimum_version_id = 1, 839 .fields = (const VMStateField[]) { 840 VMSTATE_UINT32_ARRAY(regs, struct DBDMA_channel, DBDMA_REGS), 841 VMSTATE_STRUCT(io, struct DBDMA_channel, 0, vmstate_dbdma_io, DBDMA_io), 842 VMSTATE_STRUCT(current, struct DBDMA_channel, 0, vmstate_dbdma_cmd, 843 dbdma_cmd), 844 VMSTATE_END_OF_LIST() 845 } 846 }; 847 848 static const VMStateDescription vmstate_dbdma = { 849 .name = "dbdma", 850 .version_id = 3, 851 .minimum_version_id = 3, 852 .fields = (const VMStateField[]) { 853 VMSTATE_STRUCT_ARRAY(channels, DBDMAState, DBDMA_CHANNELS, 1, 854 vmstate_dbdma_channel, DBDMA_channel), 855 VMSTATE_END_OF_LIST() 856 } 857 }; 858 859 static void mac_dbdma_reset(DeviceState *d) 860 { 861 DBDMAState *s = MAC_DBDMA(d); 862 int i; 863 864 for (i = 0; i < DBDMA_CHANNELS; i++) { 865 memset(s->channels[i].regs, 0, DBDMA_SIZE); 866 } 867 } 868 869 static void dbdma_unassigned_rw(DBDMA_io *io) 870 { 871 DBDMA_channel *ch = io->channel; 872 dbdma_cmd *current = &ch->current; 873 uint16_t cmd; 874 qemu_log_mask(LOG_GUEST_ERROR, "%s: use of unassigned channel %d\n", 875 __func__, ch->channel); 876 ch->io.processing = false; 877 878 cmd = le16_to_cpu(current->command) & COMMAND_MASK; 879 if (cmd == OUTPUT_MORE || cmd == OUTPUT_LAST || 880 cmd == INPUT_MORE || cmd == INPUT_LAST) { 881 current->xfer_status = cpu_to_le16(ch->regs[DBDMA_STATUS]); 882 current->res_count = cpu_to_le16(io->len); 883 dbdma_cmdptr_save(ch); 884 } 885 } 886 887 static void dbdma_unassigned_flush(DBDMA_io *io) 888 { 889 DBDMA_channel *ch = io->channel; 890 qemu_log_mask(LOG_GUEST_ERROR, "%s: use of unassigned channel %d\n", 891 __func__, ch->channel); 892 } 893 894 static void mac_dbdma_init(Object *obj) 895 { 896 SysBusDevice *sbd = SYS_BUS_DEVICE(obj); 897 DBDMAState *s = MAC_DBDMA(obj); 898 int i; 899 900 for (i = 0; i < DBDMA_CHANNELS; i++) { 901 DBDMA_channel *ch = &s->channels[i]; 902 903 ch->rw = dbdma_unassigned_rw; 904 ch->flush = dbdma_unassigned_flush; 905 ch->channel = i; 906 ch->io.channel = ch; 907 } 908 909 memory_region_init_io(&s->mem, obj, &dbdma_ops, s, "dbdma", 0x1000); 910 sysbus_init_mmio(sbd, &s->mem); 911 } 912 913 static void mac_dbdma_realize(DeviceState *dev, Error **errp) 914 { 915 DBDMAState *s = MAC_DBDMA(dev); 916 917 s->bh = qemu_bh_new_guarded(DBDMA_run_bh, s, &dev->mem_reentrancy_guard); 918 } 919 920 static void mac_dbdma_class_init(ObjectClass *oc, void *data) 921 { 922 DeviceClass *dc = DEVICE_CLASS(oc); 923 924 dc->realize = mac_dbdma_realize; 925 dc->reset = mac_dbdma_reset; 926 dc->vmsd = &vmstate_dbdma; 927 } 928 929 static const TypeInfo mac_dbdma_type_info = { 930 .name = TYPE_MAC_DBDMA, 931 .parent = TYPE_SYS_BUS_DEVICE, 932 .instance_size = sizeof(DBDMAState), 933 .instance_init = mac_dbdma_init, 934 .class_init = mac_dbdma_class_init 935 }; 936 937 static void mac_dbdma_register_types(void) 938 { 939 type_register_static(&mac_dbdma_type_info); 940 } 941 942 type_init(mac_dbdma_register_types) 943