1 /* 2 * PowerMac descriptor-based DMA emulation 3 * 4 * Copyright (c) 2005-2007 Fabrice Bellard 5 * Copyright (c) 2007 Jocelyn Mayer 6 * Copyright (c) 2009 Laurent Vivier 7 * 8 * some parts from linux-2.6.28, arch/powerpc/include/asm/dbdma.h 9 * 10 * Definitions for using the Apple Descriptor-Based DMA controller 11 * in Power Macintosh computers. 12 * 13 * Copyright (C) 1996 Paul Mackerras. 14 * 15 * some parts from mol 0.9.71 16 * 17 * Descriptor based DMA emulation 18 * 19 * Copyright (C) 1998-2004 Samuel Rydh (samuel@ibrium.se) 20 * 21 * Permission is hereby granted, free of charge, to any person obtaining a copy 22 * of this software and associated documentation files (the "Software"), to deal 23 * in the Software without restriction, including without limitation the rights 24 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 25 * copies of the Software, and to permit persons to whom the Software is 26 * furnished to do so, subject to the following conditions: 27 * 28 * The above copyright notice and this permission notice shall be included in 29 * all copies or substantial portions of the Software. 30 * 31 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 32 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 33 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 34 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 35 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 36 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 37 * THE SOFTWARE. 38 */ 39 #include "qemu/osdep.h" 40 #include "hw/hw.h" 41 #include "hw/isa/isa.h" 42 #include "hw/ppc/mac_dbdma.h" 43 #include "qemu/main-loop.h" 44 #include "qemu/log.h" 45 #include "sysemu/dma.h" 46 47 /* debug DBDMA */ 48 #define DEBUG_DBDMA 0 49 #define DEBUG_DBDMA_CHANMASK ((1ull << DBDMA_CHANNELS) - 1) 50 51 #define DBDMA_DPRINTF(fmt, ...) do { \ 52 if (DEBUG_DBDMA) { \ 53 printf("DBDMA: " fmt , ## __VA_ARGS__); \ 54 } \ 55 } while (0); 56 57 #define DBDMA_DPRINTFCH(ch, fmt, ...) do { \ 58 if (DEBUG_DBDMA) { \ 59 if ((1ul << (ch)->channel) & DEBUG_DBDMA_CHANMASK) { \ 60 printf("DBDMA[%02x]: " fmt , (ch)->channel, ## __VA_ARGS__); \ 61 } \ 62 } \ 63 } while (0); 64 65 /* 66 */ 67 68 static DBDMAState *dbdma_from_ch(DBDMA_channel *ch) 69 { 70 return container_of(ch, DBDMAState, channels[ch->channel]); 71 } 72 73 #if DEBUG_DBDMA 74 static void dump_dbdma_cmd(dbdma_cmd *cmd) 75 { 76 printf("dbdma_cmd %p\n", cmd); 77 printf(" req_count 0x%04x\n", le16_to_cpu(cmd->req_count)); 78 printf(" command 0x%04x\n", le16_to_cpu(cmd->command)); 79 printf(" phy_addr 0x%08x\n", le32_to_cpu(cmd->phy_addr)); 80 printf(" cmd_dep 0x%08x\n", le32_to_cpu(cmd->cmd_dep)); 81 printf(" res_count 0x%04x\n", le16_to_cpu(cmd->res_count)); 82 printf(" xfer_status 0x%04x\n", le16_to_cpu(cmd->xfer_status)); 83 } 84 #else 85 static void dump_dbdma_cmd(dbdma_cmd *cmd) 86 { 87 } 88 #endif 89 static void dbdma_cmdptr_load(DBDMA_channel *ch) 90 { 91 DBDMA_DPRINTFCH(ch, "dbdma_cmdptr_load 0x%08x\n", 92 ch->regs[DBDMA_CMDPTR_LO]); 93 dma_memory_read(&address_space_memory, ch->regs[DBDMA_CMDPTR_LO], 94 &ch->current, sizeof(dbdma_cmd)); 95 } 96 97 static void dbdma_cmdptr_save(DBDMA_channel *ch) 98 { 99 DBDMA_DPRINTFCH(ch, "dbdma_cmdptr_save 0x%08x\n", 100 ch->regs[DBDMA_CMDPTR_LO]); 101 DBDMA_DPRINTFCH(ch, "xfer_status 0x%08x res_count 0x%04x\n", 102 le16_to_cpu(ch->current.xfer_status), 103 le16_to_cpu(ch->current.res_count)); 104 dma_memory_write(&address_space_memory, ch->regs[DBDMA_CMDPTR_LO], 105 &ch->current, sizeof(dbdma_cmd)); 106 } 107 108 static void kill_channel(DBDMA_channel *ch) 109 { 110 DBDMA_DPRINTFCH(ch, "kill_channel\n"); 111 112 ch->regs[DBDMA_STATUS] |= DEAD; 113 ch->regs[DBDMA_STATUS] &= ~ACTIVE; 114 115 qemu_irq_raise(ch->irq); 116 } 117 118 static void conditional_interrupt(DBDMA_channel *ch) 119 { 120 dbdma_cmd *current = &ch->current; 121 uint16_t intr; 122 uint16_t sel_mask, sel_value; 123 uint32_t status; 124 int cond; 125 126 DBDMA_DPRINTFCH(ch, "%s\n", __func__); 127 128 intr = le16_to_cpu(current->command) & INTR_MASK; 129 130 switch(intr) { 131 case INTR_NEVER: /* don't interrupt */ 132 return; 133 case INTR_ALWAYS: /* always interrupt */ 134 qemu_irq_raise(ch->irq); 135 DBDMA_DPRINTFCH(ch, "%s: raise\n", __func__); 136 return; 137 } 138 139 status = ch->regs[DBDMA_STATUS] & DEVSTAT; 140 141 sel_mask = (ch->regs[DBDMA_INTR_SEL] >> 16) & 0x0f; 142 sel_value = ch->regs[DBDMA_INTR_SEL] & 0x0f; 143 144 cond = (status & sel_mask) == (sel_value & sel_mask); 145 146 switch(intr) { 147 case INTR_IFSET: /* intr if condition bit is 1 */ 148 if (cond) { 149 qemu_irq_raise(ch->irq); 150 DBDMA_DPRINTFCH(ch, "%s: raise\n", __func__); 151 } 152 return; 153 case INTR_IFCLR: /* intr if condition bit is 0 */ 154 if (!cond) { 155 qemu_irq_raise(ch->irq); 156 DBDMA_DPRINTFCH(ch, "%s: raise\n", __func__); 157 } 158 return; 159 } 160 } 161 162 static int conditional_wait(DBDMA_channel *ch) 163 { 164 dbdma_cmd *current = &ch->current; 165 uint16_t wait; 166 uint16_t sel_mask, sel_value; 167 uint32_t status; 168 int cond; 169 170 DBDMA_DPRINTFCH(ch, "conditional_wait\n"); 171 172 wait = le16_to_cpu(current->command) & WAIT_MASK; 173 174 switch(wait) { 175 case WAIT_NEVER: /* don't wait */ 176 return 0; 177 case WAIT_ALWAYS: /* always wait */ 178 return 1; 179 } 180 181 status = ch->regs[DBDMA_STATUS] & DEVSTAT; 182 183 sel_mask = (ch->regs[DBDMA_WAIT_SEL] >> 16) & 0x0f; 184 sel_value = ch->regs[DBDMA_WAIT_SEL] & 0x0f; 185 186 cond = (status & sel_mask) == (sel_value & sel_mask); 187 188 switch(wait) { 189 case WAIT_IFSET: /* wait if condition bit is 1 */ 190 if (cond) 191 return 1; 192 return 0; 193 case WAIT_IFCLR: /* wait if condition bit is 0 */ 194 if (!cond) 195 return 1; 196 return 0; 197 } 198 return 0; 199 } 200 201 static void next(DBDMA_channel *ch) 202 { 203 uint32_t cp; 204 205 ch->regs[DBDMA_STATUS] &= ~BT; 206 207 cp = ch->regs[DBDMA_CMDPTR_LO]; 208 ch->regs[DBDMA_CMDPTR_LO] = cp + sizeof(dbdma_cmd); 209 dbdma_cmdptr_load(ch); 210 } 211 212 static void branch(DBDMA_channel *ch) 213 { 214 dbdma_cmd *current = &ch->current; 215 216 ch->regs[DBDMA_CMDPTR_LO] = le32_to_cpu(current->cmd_dep); 217 ch->regs[DBDMA_STATUS] |= BT; 218 dbdma_cmdptr_load(ch); 219 } 220 221 static void conditional_branch(DBDMA_channel *ch) 222 { 223 dbdma_cmd *current = &ch->current; 224 uint16_t br; 225 uint16_t sel_mask, sel_value; 226 uint32_t status; 227 int cond; 228 229 DBDMA_DPRINTFCH(ch, "conditional_branch\n"); 230 231 /* check if we must branch */ 232 233 br = le16_to_cpu(current->command) & BR_MASK; 234 235 switch(br) { 236 case BR_NEVER: /* don't branch */ 237 next(ch); 238 return; 239 case BR_ALWAYS: /* always branch */ 240 branch(ch); 241 return; 242 } 243 244 status = ch->regs[DBDMA_STATUS] & DEVSTAT; 245 246 sel_mask = (ch->regs[DBDMA_BRANCH_SEL] >> 16) & 0x0f; 247 sel_value = ch->regs[DBDMA_BRANCH_SEL] & 0x0f; 248 249 cond = (status & sel_mask) == (sel_value & sel_mask); 250 251 switch(br) { 252 case BR_IFSET: /* branch if condition bit is 1 */ 253 if (cond) 254 branch(ch); 255 else 256 next(ch); 257 return; 258 case BR_IFCLR: /* branch if condition bit is 0 */ 259 if (!cond) 260 branch(ch); 261 else 262 next(ch); 263 return; 264 } 265 } 266 267 static void channel_run(DBDMA_channel *ch); 268 269 static void dbdma_end(DBDMA_io *io) 270 { 271 DBDMA_channel *ch = io->channel; 272 dbdma_cmd *current = &ch->current; 273 274 DBDMA_DPRINTFCH(ch, "%s\n", __func__); 275 276 if (conditional_wait(ch)) 277 goto wait; 278 279 current->xfer_status = cpu_to_le16(ch->regs[DBDMA_STATUS]); 280 current->res_count = cpu_to_le16(io->len); 281 dbdma_cmdptr_save(ch); 282 if (io->is_last) 283 ch->regs[DBDMA_STATUS] &= ~FLUSH; 284 285 conditional_interrupt(ch); 286 conditional_branch(ch); 287 288 wait: 289 /* Indicate that we're ready for a new DMA round */ 290 ch->io.processing = false; 291 292 if ((ch->regs[DBDMA_STATUS] & RUN) && 293 (ch->regs[DBDMA_STATUS] & ACTIVE)) 294 channel_run(ch); 295 } 296 297 static void start_output(DBDMA_channel *ch, int key, uint32_t addr, 298 uint16_t req_count, int is_last) 299 { 300 DBDMA_DPRINTFCH(ch, "start_output\n"); 301 302 /* KEY_REGS, KEY_DEVICE and KEY_STREAM 303 * are not implemented in the mac-io chip 304 */ 305 306 DBDMA_DPRINTFCH(ch, "addr 0x%x key 0x%x\n", addr, key); 307 if (!addr || key > KEY_STREAM3) { 308 kill_channel(ch); 309 return; 310 } 311 312 ch->io.addr = addr; 313 ch->io.len = req_count; 314 ch->io.is_last = is_last; 315 ch->io.dma_end = dbdma_end; 316 ch->io.is_dma_out = 1; 317 ch->io.processing = true; 318 if (ch->rw) { 319 ch->rw(&ch->io); 320 } 321 } 322 323 static void start_input(DBDMA_channel *ch, int key, uint32_t addr, 324 uint16_t req_count, int is_last) 325 { 326 DBDMA_DPRINTFCH(ch, "start_input\n"); 327 328 /* KEY_REGS, KEY_DEVICE and KEY_STREAM 329 * are not implemented in the mac-io chip 330 */ 331 332 DBDMA_DPRINTFCH(ch, "addr 0x%x key 0x%x\n", addr, key); 333 if (!addr || key > KEY_STREAM3) { 334 kill_channel(ch); 335 return; 336 } 337 338 ch->io.addr = addr; 339 ch->io.len = req_count; 340 ch->io.is_last = is_last; 341 ch->io.dma_end = dbdma_end; 342 ch->io.is_dma_out = 0; 343 ch->io.processing = true; 344 if (ch->rw) { 345 ch->rw(&ch->io); 346 } 347 } 348 349 static void load_word(DBDMA_channel *ch, int key, uint32_t addr, 350 uint16_t len) 351 { 352 dbdma_cmd *current = &ch->current; 353 354 DBDMA_DPRINTFCH(ch, "load_word %d bytes, addr=%08x\n", len, addr); 355 356 /* only implements KEY_SYSTEM */ 357 358 if (key != KEY_SYSTEM) { 359 printf("DBDMA: LOAD_WORD, unimplemented key %x\n", key); 360 kill_channel(ch); 361 return; 362 } 363 364 dma_memory_read(&address_space_memory, addr, ¤t->cmd_dep, len); 365 366 if (conditional_wait(ch)) 367 goto wait; 368 369 current->xfer_status = cpu_to_le16(ch->regs[DBDMA_STATUS]); 370 dbdma_cmdptr_save(ch); 371 ch->regs[DBDMA_STATUS] &= ~FLUSH; 372 373 conditional_interrupt(ch); 374 next(ch); 375 376 wait: 377 DBDMA_kick(dbdma_from_ch(ch)); 378 } 379 380 static void store_word(DBDMA_channel *ch, int key, uint32_t addr, 381 uint16_t len) 382 { 383 dbdma_cmd *current = &ch->current; 384 385 DBDMA_DPRINTFCH(ch, "store_word %d bytes, addr=%08x pa=%x\n", 386 len, addr, le32_to_cpu(current->cmd_dep)); 387 388 /* only implements KEY_SYSTEM */ 389 390 if (key != KEY_SYSTEM) { 391 printf("DBDMA: STORE_WORD, unimplemented key %x\n", key); 392 kill_channel(ch); 393 return; 394 } 395 396 dma_memory_write(&address_space_memory, addr, ¤t->cmd_dep, len); 397 398 if (conditional_wait(ch)) 399 goto wait; 400 401 current->xfer_status = cpu_to_le16(ch->regs[DBDMA_STATUS]); 402 dbdma_cmdptr_save(ch); 403 ch->regs[DBDMA_STATUS] &= ~FLUSH; 404 405 conditional_interrupt(ch); 406 next(ch); 407 408 wait: 409 DBDMA_kick(dbdma_from_ch(ch)); 410 } 411 412 static void nop(DBDMA_channel *ch) 413 { 414 dbdma_cmd *current = &ch->current; 415 416 if (conditional_wait(ch)) 417 goto wait; 418 419 current->xfer_status = cpu_to_le16(ch->regs[DBDMA_STATUS]); 420 dbdma_cmdptr_save(ch); 421 422 conditional_interrupt(ch); 423 conditional_branch(ch); 424 425 wait: 426 DBDMA_kick(dbdma_from_ch(ch)); 427 } 428 429 static void stop(DBDMA_channel *ch) 430 { 431 ch->regs[DBDMA_STATUS] &= ~(ACTIVE|DEAD|FLUSH); 432 433 /* the stop command does not increment command pointer */ 434 } 435 436 static void channel_run(DBDMA_channel *ch) 437 { 438 dbdma_cmd *current = &ch->current; 439 uint16_t cmd, key; 440 uint16_t req_count; 441 uint32_t phy_addr; 442 443 DBDMA_DPRINTFCH(ch, "channel_run\n"); 444 dump_dbdma_cmd(current); 445 446 /* clear WAKE flag at command fetch */ 447 448 ch->regs[DBDMA_STATUS] &= ~WAKE; 449 450 cmd = le16_to_cpu(current->command) & COMMAND_MASK; 451 452 switch (cmd) { 453 case DBDMA_NOP: 454 nop(ch); 455 return; 456 457 case DBDMA_STOP: 458 stop(ch); 459 return; 460 } 461 462 key = le16_to_cpu(current->command) & 0x0700; 463 req_count = le16_to_cpu(current->req_count); 464 phy_addr = le32_to_cpu(current->phy_addr); 465 466 if (key == KEY_STREAM4) { 467 printf("command %x, invalid key 4\n", cmd); 468 kill_channel(ch); 469 return; 470 } 471 472 switch (cmd) { 473 case OUTPUT_MORE: 474 start_output(ch, key, phy_addr, req_count, 0); 475 return; 476 477 case OUTPUT_LAST: 478 start_output(ch, key, phy_addr, req_count, 1); 479 return; 480 481 case INPUT_MORE: 482 start_input(ch, key, phy_addr, req_count, 0); 483 return; 484 485 case INPUT_LAST: 486 start_input(ch, key, phy_addr, req_count, 1); 487 return; 488 } 489 490 if (key < KEY_REGS) { 491 printf("command %x, invalid key %x\n", cmd, key); 492 key = KEY_SYSTEM; 493 } 494 495 /* for LOAD_WORD and STORE_WORD, req_count is on 3 bits 496 * and BRANCH is invalid 497 */ 498 499 req_count = req_count & 0x0007; 500 if (req_count & 0x4) { 501 req_count = 4; 502 phy_addr &= ~3; 503 } else if (req_count & 0x2) { 504 req_count = 2; 505 phy_addr &= ~1; 506 } else 507 req_count = 1; 508 509 switch (cmd) { 510 case LOAD_WORD: 511 load_word(ch, key, phy_addr, req_count); 512 return; 513 514 case STORE_WORD: 515 store_word(ch, key, phy_addr, req_count); 516 return; 517 } 518 } 519 520 static void DBDMA_run(DBDMAState *s) 521 { 522 int channel; 523 524 for (channel = 0; channel < DBDMA_CHANNELS; channel++) { 525 DBDMA_channel *ch = &s->channels[channel]; 526 uint32_t status = ch->regs[DBDMA_STATUS]; 527 if (!ch->io.processing && (status & RUN) && (status & ACTIVE)) { 528 channel_run(ch); 529 } 530 } 531 } 532 533 static void DBDMA_run_bh(void *opaque) 534 { 535 DBDMAState *s = opaque; 536 537 DBDMA_DPRINTF("-> DBDMA_run_bh\n"); 538 DBDMA_run(s); 539 DBDMA_DPRINTF("<- DBDMA_run_bh\n"); 540 } 541 542 void DBDMA_kick(DBDMAState *dbdma) 543 { 544 qemu_bh_schedule(dbdma->bh); 545 } 546 547 void DBDMA_register_channel(void *dbdma, int nchan, qemu_irq irq, 548 DBDMA_rw rw, DBDMA_flush flush, 549 void *opaque) 550 { 551 DBDMAState *s = dbdma; 552 DBDMA_channel *ch = &s->channels[nchan]; 553 554 DBDMA_DPRINTFCH(ch, "DBDMA_register_channel 0x%x\n", nchan); 555 556 assert(rw); 557 assert(flush); 558 559 ch->irq = irq; 560 ch->rw = rw; 561 ch->flush = flush; 562 ch->io.opaque = opaque; 563 } 564 565 static void 566 dbdma_control_write(DBDMA_channel *ch) 567 { 568 uint16_t mask, value; 569 uint32_t status; 570 571 mask = (ch->regs[DBDMA_CONTROL] >> 16) & 0xffff; 572 value = ch->regs[DBDMA_CONTROL] & 0xffff; 573 574 value &= (RUN | PAUSE | FLUSH | WAKE | DEVSTAT); 575 576 status = ch->regs[DBDMA_STATUS]; 577 578 status = (value & mask) | (status & ~mask); 579 580 if (status & WAKE) 581 status |= ACTIVE; 582 if (status & RUN) { 583 status |= ACTIVE; 584 status &= ~DEAD; 585 } 586 if (status & PAUSE) 587 status &= ~ACTIVE; 588 if ((ch->regs[DBDMA_STATUS] & RUN) && !(status & RUN)) { 589 /* RUN is cleared */ 590 status &= ~(ACTIVE|DEAD); 591 } 592 593 if ((status & FLUSH) && ch->flush) { 594 ch->flush(&ch->io); 595 status &= ~FLUSH; 596 } 597 598 DBDMA_DPRINTFCH(ch, " status 0x%08x\n", status); 599 600 ch->regs[DBDMA_STATUS] = status; 601 602 if (status & ACTIVE) { 603 DBDMA_kick(dbdma_from_ch(ch)); 604 } 605 } 606 607 static void dbdma_write(void *opaque, hwaddr addr, 608 uint64_t value, unsigned size) 609 { 610 int channel = addr >> DBDMA_CHANNEL_SHIFT; 611 DBDMAState *s = opaque; 612 DBDMA_channel *ch = &s->channels[channel]; 613 int reg = (addr - (channel << DBDMA_CHANNEL_SHIFT)) >> 2; 614 615 DBDMA_DPRINTFCH(ch, "writel 0x" TARGET_FMT_plx " <= 0x%08"PRIx64"\n", 616 addr, value); 617 DBDMA_DPRINTFCH(ch, "channel 0x%x reg 0x%x\n", 618 (uint32_t)addr >> DBDMA_CHANNEL_SHIFT, reg); 619 620 /* cmdptr cannot be modified if channel is ACTIVE */ 621 622 if (reg == DBDMA_CMDPTR_LO && (ch->regs[DBDMA_STATUS] & ACTIVE)) { 623 return; 624 } 625 626 ch->regs[reg] = value; 627 628 switch(reg) { 629 case DBDMA_CONTROL: 630 dbdma_control_write(ch); 631 break; 632 case DBDMA_CMDPTR_LO: 633 /* 16-byte aligned */ 634 ch->regs[DBDMA_CMDPTR_LO] &= ~0xf; 635 dbdma_cmdptr_load(ch); 636 break; 637 case DBDMA_STATUS: 638 case DBDMA_INTR_SEL: 639 case DBDMA_BRANCH_SEL: 640 case DBDMA_WAIT_SEL: 641 /* nothing to do */ 642 break; 643 case DBDMA_XFER_MODE: 644 case DBDMA_CMDPTR_HI: 645 case DBDMA_DATA2PTR_HI: 646 case DBDMA_DATA2PTR_LO: 647 case DBDMA_ADDRESS_HI: 648 case DBDMA_BRANCH_ADDR_HI: 649 case DBDMA_RES1: 650 case DBDMA_RES2: 651 case DBDMA_RES3: 652 case DBDMA_RES4: 653 /* unused */ 654 break; 655 } 656 } 657 658 static uint64_t dbdma_read(void *opaque, hwaddr addr, 659 unsigned size) 660 { 661 uint32_t value; 662 int channel = addr >> DBDMA_CHANNEL_SHIFT; 663 DBDMAState *s = opaque; 664 DBDMA_channel *ch = &s->channels[channel]; 665 int reg = (addr - (channel << DBDMA_CHANNEL_SHIFT)) >> 2; 666 667 value = ch->regs[reg]; 668 669 DBDMA_DPRINTFCH(ch, "readl 0x" TARGET_FMT_plx " => 0x%08x\n", addr, value); 670 DBDMA_DPRINTFCH(ch, "channel 0x%x reg 0x%x\n", 671 (uint32_t)addr >> DBDMA_CHANNEL_SHIFT, reg); 672 673 switch(reg) { 674 case DBDMA_CONTROL: 675 value = 0; 676 break; 677 case DBDMA_STATUS: 678 case DBDMA_CMDPTR_LO: 679 case DBDMA_INTR_SEL: 680 case DBDMA_BRANCH_SEL: 681 case DBDMA_WAIT_SEL: 682 /* nothing to do */ 683 break; 684 case DBDMA_XFER_MODE: 685 case DBDMA_CMDPTR_HI: 686 case DBDMA_DATA2PTR_HI: 687 case DBDMA_DATA2PTR_LO: 688 case DBDMA_ADDRESS_HI: 689 case DBDMA_BRANCH_ADDR_HI: 690 /* unused */ 691 value = 0; 692 break; 693 case DBDMA_RES1: 694 case DBDMA_RES2: 695 case DBDMA_RES3: 696 case DBDMA_RES4: 697 /* reserved */ 698 break; 699 } 700 701 return value; 702 } 703 704 static const MemoryRegionOps dbdma_ops = { 705 .read = dbdma_read, 706 .write = dbdma_write, 707 .endianness = DEVICE_LITTLE_ENDIAN, 708 .valid = { 709 .min_access_size = 4, 710 .max_access_size = 4, 711 }, 712 }; 713 714 static const VMStateDescription vmstate_dbdma_io = { 715 .name = "dbdma_io", 716 .version_id = 0, 717 .minimum_version_id = 0, 718 .fields = (VMStateField[]) { 719 VMSTATE_UINT64(addr, struct DBDMA_io), 720 VMSTATE_INT32(len, struct DBDMA_io), 721 VMSTATE_INT32(is_last, struct DBDMA_io), 722 VMSTATE_INT32(is_dma_out, struct DBDMA_io), 723 VMSTATE_BOOL(processing, struct DBDMA_io), 724 VMSTATE_END_OF_LIST() 725 } 726 }; 727 728 static const VMStateDescription vmstate_dbdma_cmd = { 729 .name = "dbdma_cmd", 730 .version_id = 0, 731 .minimum_version_id = 0, 732 .fields = (VMStateField[]) { 733 VMSTATE_UINT16(req_count, dbdma_cmd), 734 VMSTATE_UINT16(command, dbdma_cmd), 735 VMSTATE_UINT32(phy_addr, dbdma_cmd), 736 VMSTATE_UINT32(cmd_dep, dbdma_cmd), 737 VMSTATE_UINT16(res_count, dbdma_cmd), 738 VMSTATE_UINT16(xfer_status, dbdma_cmd), 739 VMSTATE_END_OF_LIST() 740 } 741 }; 742 743 static const VMStateDescription vmstate_dbdma_channel = { 744 .name = "dbdma_channel", 745 .version_id = 1, 746 .minimum_version_id = 1, 747 .fields = (VMStateField[]) { 748 VMSTATE_UINT32_ARRAY(regs, struct DBDMA_channel, DBDMA_REGS), 749 VMSTATE_STRUCT(io, struct DBDMA_channel, 0, vmstate_dbdma_io, DBDMA_io), 750 VMSTATE_STRUCT(current, struct DBDMA_channel, 0, vmstate_dbdma_cmd, 751 dbdma_cmd), 752 VMSTATE_END_OF_LIST() 753 } 754 }; 755 756 static const VMStateDescription vmstate_dbdma = { 757 .name = "dbdma", 758 .version_id = 3, 759 .minimum_version_id = 3, 760 .fields = (VMStateField[]) { 761 VMSTATE_STRUCT_ARRAY(channels, DBDMAState, DBDMA_CHANNELS, 1, 762 vmstate_dbdma_channel, DBDMA_channel), 763 VMSTATE_END_OF_LIST() 764 } 765 }; 766 767 static void dbdma_reset(void *opaque) 768 { 769 DBDMAState *s = opaque; 770 int i; 771 772 for (i = 0; i < DBDMA_CHANNELS; i++) 773 memset(s->channels[i].regs, 0, DBDMA_SIZE); 774 } 775 776 static void dbdma_unassigned_rw(DBDMA_io *io) 777 { 778 DBDMA_channel *ch = io->channel; 779 qemu_log_mask(LOG_GUEST_ERROR, "%s: use of unassigned channel %d\n", 780 __func__, ch->channel); 781 ch->io.processing = false; 782 } 783 784 static void dbdma_unassigned_flush(DBDMA_io *io) 785 { 786 DBDMA_channel *ch = io->channel; 787 dbdma_cmd *current = &ch->current; 788 uint16_t cmd; 789 qemu_log_mask(LOG_GUEST_ERROR, "%s: use of unassigned channel %d\n", 790 __func__, ch->channel); 791 792 cmd = le16_to_cpu(current->command) & COMMAND_MASK; 793 if (cmd == OUTPUT_MORE || cmd == OUTPUT_LAST || 794 cmd == INPUT_MORE || cmd == INPUT_LAST) { 795 current->xfer_status = cpu_to_le16(ch->regs[DBDMA_STATUS] | FLUSH); 796 current->res_count = cpu_to_le16(io->len); 797 dbdma_cmdptr_save(ch); 798 } 799 } 800 801 void* DBDMA_init (MemoryRegion **dbdma_mem) 802 { 803 DBDMAState *s; 804 int i; 805 806 s = g_malloc0(sizeof(DBDMAState)); 807 808 for (i = 0; i < DBDMA_CHANNELS; i++) { 809 DBDMA_io *io = &s->channels[i].io; 810 DBDMA_channel *ch = &s->channels[i]; 811 qemu_iovec_init(&io->iov, 1); 812 813 ch->rw = dbdma_unassigned_rw; 814 ch->flush = dbdma_unassigned_flush; 815 ch->channel = i; 816 ch->io.channel = ch; 817 } 818 819 memory_region_init_io(&s->mem, NULL, &dbdma_ops, s, "dbdma", 0x1000); 820 *dbdma_mem = &s->mem; 821 vmstate_register(NULL, -1, &vmstate_dbdma, s); 822 qemu_register_reset(dbdma_reset, s); 823 824 s->bh = qemu_bh_new(DBDMA_run_bh, s); 825 826 return s; 827 } 828