1 /* 2 * Freescale i.MX28 APBH DMA driver 3 * 4 * Copyright (C) 2011 Marek Vasut <marek.vasut@gmail.com> 5 * on behalf of DENX Software Engineering GmbH 6 * 7 * Based on code from LTIB: 8 * Copyright (C) 2010 Freescale Semiconductor, Inc. All Rights Reserved. 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License as published by 12 * the Free Software Foundation; either version 2 of the License, or 13 * (at your option) any later version. 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * GNU General Public License for more details. 19 * 20 * You should have received a copy of the GNU General Public License along 21 * with this program; if not, write to the Free Software Foundation, Inc., 22 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. 23 */ 24 25 #include <linux/list.h> 26 27 #include <common.h> 28 #include <malloc.h> 29 #include <asm/errno.h> 30 #include <asm/io.h> 31 #include <asm/arch/clock.h> 32 #include <asm/arch/imx-regs.h> 33 #include <asm/arch/sys_proto.h> 34 #include <asm/imx-common/dma.h> 35 #include <asm/imx-common/regs-apbh.h> 36 37 static struct mxs_dma_chan mxs_dma_channels[MXS_MAX_DMA_CHANNELS]; 38 39 /* 40 * Test is the DMA channel is valid channel 41 */ 42 int mxs_dma_validate_chan(int channel) 43 { 44 struct mxs_dma_chan *pchan; 45 46 if ((channel < 0) || (channel >= MXS_MAX_DMA_CHANNELS)) 47 return -EINVAL; 48 49 pchan = mxs_dma_channels + channel; 50 if (!(pchan->flags & MXS_DMA_FLAGS_ALLOCATED)) 51 return -EINVAL; 52 53 return 0; 54 } 55 56 /* 57 * Return the address of the command within a descriptor. 58 */ 59 static unsigned int mxs_dma_cmd_address(struct mxs_dma_desc *desc) 60 { 61 return desc->address + offsetof(struct mxs_dma_desc, cmd); 62 } 63 64 /* 65 * Read a DMA channel's hardware semaphore. 66 * 67 * As used by the MXS platform's DMA software, the DMA channel's hardware 68 * semaphore reflects the number of DMA commands the hardware will process, but 69 * has not yet finished. This is a volatile value read directly from hardware, 70 * so it must be be viewed as immediately stale. 71 * 72 * If the channel is not marked busy, or has finished processing all its 73 * commands, this value should be zero. 74 * 75 * See mxs_dma_append() for details on how DMA command blocks must be configured 76 * to maintain the expected behavior of the semaphore's value. 77 */ 78 static int mxs_dma_read_semaphore(int channel) 79 { 80 struct mxs_apbh_regs *apbh_regs = 81 (struct mxs_apbh_regs *)MXS_APBH_BASE; 82 uint32_t tmp; 83 int ret; 84 85 ret = mxs_dma_validate_chan(channel); 86 if (ret) 87 return ret; 88 89 tmp = readl(&apbh_regs->ch[channel].hw_apbh_ch_sema); 90 91 tmp &= APBH_CHn_SEMA_PHORE_MASK; 92 tmp >>= APBH_CHn_SEMA_PHORE_OFFSET; 93 94 return tmp; 95 } 96 97 #ifndef CONFIG_SYS_DCACHE_OFF 98 void mxs_dma_flush_desc(struct mxs_dma_desc *desc) 99 { 100 uint32_t addr; 101 uint32_t size; 102 103 addr = (uint32_t)desc; 104 size = roundup(sizeof(struct mxs_dma_desc), MXS_DMA_ALIGNMENT); 105 106 flush_dcache_range(addr, addr + size); 107 } 108 #else 109 inline void mxs_dma_flush_desc(struct mxs_dma_desc *desc) {} 110 #endif 111 112 /* 113 * Enable a DMA channel. 114 * 115 * If the given channel has any DMA descriptors on its active list, this 116 * function causes the DMA hardware to begin processing them. 117 * 118 * This function marks the DMA channel as "busy," whether or not there are any 119 * descriptors to process. 120 */ 121 static int mxs_dma_enable(int channel) 122 { 123 struct mxs_apbh_regs *apbh_regs = 124 (struct mxs_apbh_regs *)MXS_APBH_BASE; 125 unsigned int sem; 126 struct mxs_dma_chan *pchan; 127 struct mxs_dma_desc *pdesc; 128 int ret; 129 130 ret = mxs_dma_validate_chan(channel); 131 if (ret) 132 return ret; 133 134 pchan = mxs_dma_channels + channel; 135 136 if (pchan->pending_num == 0) { 137 pchan->flags |= MXS_DMA_FLAGS_BUSY; 138 return 0; 139 } 140 141 pdesc = list_first_entry(&pchan->active, struct mxs_dma_desc, node); 142 if (pdesc == NULL) 143 return -EFAULT; 144 145 if (pchan->flags & MXS_DMA_FLAGS_BUSY) { 146 if (!(pdesc->cmd.data & MXS_DMA_DESC_CHAIN)) 147 return 0; 148 149 sem = mxs_dma_read_semaphore(channel); 150 if (sem == 0) 151 return 0; 152 153 if (sem == 1) { 154 pdesc = list_entry(pdesc->node.next, 155 struct mxs_dma_desc, node); 156 writel(mxs_dma_cmd_address(pdesc), 157 &apbh_regs->ch[channel].hw_apbh_ch_nxtcmdar); 158 } 159 writel(pchan->pending_num, 160 &apbh_regs->ch[channel].hw_apbh_ch_sema); 161 pchan->active_num += pchan->pending_num; 162 pchan->pending_num = 0; 163 } else { 164 pchan->active_num += pchan->pending_num; 165 pchan->pending_num = 0; 166 writel(mxs_dma_cmd_address(pdesc), 167 &apbh_regs->ch[channel].hw_apbh_ch_nxtcmdar); 168 writel(pchan->active_num, 169 &apbh_regs->ch[channel].hw_apbh_ch_sema); 170 writel(1 << (channel + APBH_CTRL0_CLKGATE_CHANNEL_OFFSET), 171 &apbh_regs->hw_apbh_ctrl0_clr); 172 } 173 174 pchan->flags |= MXS_DMA_FLAGS_BUSY; 175 return 0; 176 } 177 178 /* 179 * Disable a DMA channel. 180 * 181 * This function shuts down a DMA channel and marks it as "not busy." Any 182 * descriptors on the active list are immediately moved to the head of the 183 * "done" list, whether or not they have actually been processed by the 184 * hardware. The "ready" flags of these descriptors are NOT cleared, so they 185 * still appear to be active. 186 * 187 * This function immediately shuts down a DMA channel's hardware, aborting any 188 * I/O that may be in progress, potentially leaving I/O hardware in an undefined 189 * state. It is unwise to call this function if there is ANY chance the hardware 190 * is still processing a command. 191 */ 192 static int mxs_dma_disable(int channel) 193 { 194 struct mxs_dma_chan *pchan; 195 struct mxs_apbh_regs *apbh_regs = 196 (struct mxs_apbh_regs *)MXS_APBH_BASE; 197 int ret; 198 199 ret = mxs_dma_validate_chan(channel); 200 if (ret) 201 return ret; 202 203 pchan = mxs_dma_channels + channel; 204 205 if (!(pchan->flags & MXS_DMA_FLAGS_BUSY)) 206 return -EINVAL; 207 208 writel(1 << (channel + APBH_CTRL0_CLKGATE_CHANNEL_OFFSET), 209 &apbh_regs->hw_apbh_ctrl0_set); 210 211 pchan->flags &= ~MXS_DMA_FLAGS_BUSY; 212 pchan->active_num = 0; 213 pchan->pending_num = 0; 214 list_splice_init(&pchan->active, &pchan->done); 215 216 return 0; 217 } 218 219 /* 220 * Resets the DMA channel hardware. 221 */ 222 static int mxs_dma_reset(int channel) 223 { 224 struct mxs_apbh_regs *apbh_regs = 225 (struct mxs_apbh_regs *)MXS_APBH_BASE; 226 int ret; 227 #if defined(CONFIG_MX23) 228 uint32_t setreg = (uint32_t)(&apbh_regs->hw_apbh_ctrl0_set); 229 uint32_t offset = APBH_CTRL0_RESET_CHANNEL_OFFSET; 230 #elif (defined(CONFIG_MX28) || defined(CONFIG_MX6)) 231 uint32_t setreg = (uint32_t)(&apbh_regs->hw_apbh_channel_ctrl_set); 232 uint32_t offset = APBH_CHANNEL_CTRL_RESET_CHANNEL_OFFSET; 233 #endif 234 235 ret = mxs_dma_validate_chan(channel); 236 if (ret) 237 return ret; 238 239 writel(1 << (channel + offset), setreg); 240 241 return 0; 242 } 243 244 /* 245 * Enable or disable DMA interrupt. 246 * 247 * This function enables the given DMA channel to interrupt the CPU. 248 */ 249 static int mxs_dma_enable_irq(int channel, int enable) 250 { 251 struct mxs_apbh_regs *apbh_regs = 252 (struct mxs_apbh_regs *)MXS_APBH_BASE; 253 int ret; 254 255 ret = mxs_dma_validate_chan(channel); 256 if (ret) 257 return ret; 258 259 if (enable) 260 writel(1 << (channel + APBH_CTRL1_CH_CMDCMPLT_IRQ_EN_OFFSET), 261 &apbh_regs->hw_apbh_ctrl1_set); 262 else 263 writel(1 << (channel + APBH_CTRL1_CH_CMDCMPLT_IRQ_EN_OFFSET), 264 &apbh_regs->hw_apbh_ctrl1_clr); 265 266 return 0; 267 } 268 269 /* 270 * Clear DMA interrupt. 271 * 272 * The software that is using the DMA channel must register to receive its 273 * interrupts and, when they arrive, must call this function to clear them. 274 */ 275 static int mxs_dma_ack_irq(int channel) 276 { 277 struct mxs_apbh_regs *apbh_regs = 278 (struct mxs_apbh_regs *)MXS_APBH_BASE; 279 int ret; 280 281 ret = mxs_dma_validate_chan(channel); 282 if (ret) 283 return ret; 284 285 writel(1 << channel, &apbh_regs->hw_apbh_ctrl1_clr); 286 writel(1 << channel, &apbh_regs->hw_apbh_ctrl2_clr); 287 288 return 0; 289 } 290 291 /* 292 * Request to reserve a DMA channel 293 */ 294 static int mxs_dma_request(int channel) 295 { 296 struct mxs_dma_chan *pchan; 297 298 if ((channel < 0) || (channel >= MXS_MAX_DMA_CHANNELS)) 299 return -EINVAL; 300 301 pchan = mxs_dma_channels + channel; 302 if ((pchan->flags & MXS_DMA_FLAGS_VALID) != MXS_DMA_FLAGS_VALID) 303 return -ENODEV; 304 305 if (pchan->flags & MXS_DMA_FLAGS_ALLOCATED) 306 return -EBUSY; 307 308 pchan->flags |= MXS_DMA_FLAGS_ALLOCATED; 309 pchan->active_num = 0; 310 pchan->pending_num = 0; 311 312 INIT_LIST_HEAD(&pchan->active); 313 INIT_LIST_HEAD(&pchan->done); 314 315 return 0; 316 } 317 318 /* 319 * Release a DMA channel. 320 * 321 * This function releases a DMA channel from its current owner. 322 * 323 * The channel will NOT be released if it's marked "busy" (see 324 * mxs_dma_enable()). 325 */ 326 int mxs_dma_release(int channel) 327 { 328 struct mxs_dma_chan *pchan; 329 int ret; 330 331 ret = mxs_dma_validate_chan(channel); 332 if (ret) 333 return ret; 334 335 pchan = mxs_dma_channels + channel; 336 337 if (pchan->flags & MXS_DMA_FLAGS_BUSY) 338 return -EBUSY; 339 340 pchan->dev = 0; 341 pchan->active_num = 0; 342 pchan->pending_num = 0; 343 pchan->flags &= ~MXS_DMA_FLAGS_ALLOCATED; 344 345 return 0; 346 } 347 348 /* 349 * Allocate DMA descriptor 350 */ 351 struct mxs_dma_desc *mxs_dma_desc_alloc(void) 352 { 353 struct mxs_dma_desc *pdesc; 354 uint32_t size; 355 356 size = roundup(sizeof(struct mxs_dma_desc), MXS_DMA_ALIGNMENT); 357 pdesc = memalign(MXS_DMA_ALIGNMENT, size); 358 359 if (pdesc == NULL) 360 return NULL; 361 362 memset(pdesc, 0, sizeof(*pdesc)); 363 pdesc->address = (dma_addr_t)pdesc; 364 365 return pdesc; 366 }; 367 368 /* 369 * Free DMA descriptor 370 */ 371 void mxs_dma_desc_free(struct mxs_dma_desc *pdesc) 372 { 373 if (pdesc == NULL) 374 return; 375 376 free(pdesc); 377 } 378 379 /* 380 * Add a DMA descriptor to a channel. 381 * 382 * If the descriptor list for this channel is not empty, this function sets the 383 * CHAIN bit and the NEXTCMD_ADDR fields in the last descriptor's DMA command so 384 * it will chain to the new descriptor's command. 385 * 386 * Then, this function marks the new descriptor as "ready," adds it to the end 387 * of the active descriptor list, and increments the count of pending 388 * descriptors. 389 * 390 * The MXS platform DMA software imposes some rules on DMA commands to maintain 391 * important invariants. These rules are NOT checked, but they must be carefully 392 * applied by software that uses MXS DMA channels. 393 * 394 * Invariant: 395 * The DMA channel's hardware semaphore must reflect the number of DMA 396 * commands the hardware will process, but has not yet finished. 397 * 398 * Explanation: 399 * A DMA channel begins processing commands when its hardware semaphore is 400 * written with a value greater than zero, and it stops processing commands 401 * when the semaphore returns to zero. 402 * 403 * When a channel finishes a DMA command, it will decrement its semaphore if 404 * the DECREMENT_SEMAPHORE bit is set in that command's flags bits. 405 * 406 * In principle, it's not necessary for the DECREMENT_SEMAPHORE to be set, 407 * unless it suits the purposes of the software. For example, one could 408 * construct a series of five DMA commands, with the DECREMENT_SEMAPHORE 409 * bit set only in the last one. Then, setting the DMA channel's hardware 410 * semaphore to one would cause the entire series of five commands to be 411 * processed. However, this example would violate the invariant given above. 412 * 413 * Rule: 414 * ALL DMA commands MUST have the DECREMENT_SEMAPHORE bit set so that the DMA 415 * channel's hardware semaphore will be decremented EVERY time a command is 416 * processed. 417 */ 418 int mxs_dma_desc_append(int channel, struct mxs_dma_desc *pdesc) 419 { 420 struct mxs_dma_chan *pchan; 421 struct mxs_dma_desc *last; 422 int ret; 423 424 ret = mxs_dma_validate_chan(channel); 425 if (ret) 426 return ret; 427 428 pchan = mxs_dma_channels + channel; 429 430 pdesc->cmd.next = mxs_dma_cmd_address(pdesc); 431 pdesc->flags |= MXS_DMA_DESC_FIRST | MXS_DMA_DESC_LAST; 432 433 if (!list_empty(&pchan->active)) { 434 last = list_entry(pchan->active.prev, struct mxs_dma_desc, 435 node); 436 437 pdesc->flags &= ~MXS_DMA_DESC_FIRST; 438 last->flags &= ~MXS_DMA_DESC_LAST; 439 440 last->cmd.next = mxs_dma_cmd_address(pdesc); 441 last->cmd.data |= MXS_DMA_DESC_CHAIN; 442 443 mxs_dma_flush_desc(last); 444 } 445 pdesc->flags |= MXS_DMA_DESC_READY; 446 if (pdesc->flags & MXS_DMA_DESC_FIRST) 447 pchan->pending_num++; 448 list_add_tail(&pdesc->node, &pchan->active); 449 450 mxs_dma_flush_desc(pdesc); 451 452 return ret; 453 } 454 455 /* 456 * Clean up processed DMA descriptors. 457 * 458 * This function removes processed DMA descriptors from the "active" list. Pass 459 * in a non-NULL list head to get the descriptors moved to your list. Pass NULL 460 * to get the descriptors moved to the channel's "done" list. Descriptors on 461 * the "done" list can be retrieved with mxs_dma_get_finished(). 462 * 463 * This function marks the DMA channel as "not busy" if no unprocessed 464 * descriptors remain on the "active" list. 465 */ 466 static int mxs_dma_finish(int channel, struct list_head *head) 467 { 468 int sem; 469 struct mxs_dma_chan *pchan; 470 struct list_head *p, *q; 471 struct mxs_dma_desc *pdesc; 472 int ret; 473 474 ret = mxs_dma_validate_chan(channel); 475 if (ret) 476 return ret; 477 478 pchan = mxs_dma_channels + channel; 479 480 sem = mxs_dma_read_semaphore(channel); 481 if (sem < 0) 482 return sem; 483 484 if (sem == pchan->active_num) 485 return 0; 486 487 list_for_each_safe(p, q, &pchan->active) { 488 if ((pchan->active_num) <= sem) 489 break; 490 491 pdesc = list_entry(p, struct mxs_dma_desc, node); 492 pdesc->flags &= ~MXS_DMA_DESC_READY; 493 494 if (head) 495 list_move_tail(p, head); 496 else 497 list_move_tail(p, &pchan->done); 498 499 if (pdesc->flags & MXS_DMA_DESC_LAST) 500 pchan->active_num--; 501 } 502 503 if (sem == 0) 504 pchan->flags &= ~MXS_DMA_FLAGS_BUSY; 505 506 return 0; 507 } 508 509 /* 510 * Wait for DMA channel to complete 511 */ 512 static int mxs_dma_wait_complete(uint32_t timeout, unsigned int chan) 513 { 514 struct mxs_apbh_regs *apbh_regs = 515 (struct mxs_apbh_regs *)MXS_APBH_BASE; 516 int ret; 517 518 ret = mxs_dma_validate_chan(chan); 519 if (ret) 520 return ret; 521 522 if (mxs_wait_mask_set(&apbh_regs->hw_apbh_ctrl1_reg, 523 1 << chan, timeout)) { 524 ret = -ETIMEDOUT; 525 mxs_dma_reset(chan); 526 } 527 528 return ret; 529 } 530 531 /* 532 * Execute the DMA channel 533 */ 534 int mxs_dma_go(int chan) 535 { 536 uint32_t timeout = 10000000; 537 int ret; 538 539 LIST_HEAD(tmp_desc_list); 540 541 mxs_dma_enable_irq(chan, 1); 542 mxs_dma_enable(chan); 543 544 /* Wait for DMA to finish. */ 545 ret = mxs_dma_wait_complete(timeout, chan); 546 547 /* Clear out the descriptors we just ran. */ 548 mxs_dma_finish(chan, &tmp_desc_list); 549 550 /* Shut the DMA channel down. */ 551 mxs_dma_ack_irq(chan); 552 mxs_dma_reset(chan); 553 mxs_dma_enable_irq(chan, 0); 554 mxs_dma_disable(chan); 555 556 return ret; 557 } 558 559 /* 560 * Initialize the DMA hardware 561 */ 562 void mxs_dma_init(void) 563 { 564 struct mxs_apbh_regs *apbh_regs = 565 (struct mxs_apbh_regs *)MXS_APBH_BASE; 566 567 mxs_reset_block(&apbh_regs->hw_apbh_ctrl0_reg); 568 569 #ifdef CONFIG_APBH_DMA_BURST8 570 writel(APBH_CTRL0_AHB_BURST8_EN, 571 &apbh_regs->hw_apbh_ctrl0_set); 572 #else 573 writel(APBH_CTRL0_AHB_BURST8_EN, 574 &apbh_regs->hw_apbh_ctrl0_clr); 575 #endif 576 577 #ifdef CONFIG_APBH_DMA_BURST 578 writel(APBH_CTRL0_APB_BURST_EN, 579 &apbh_regs->hw_apbh_ctrl0_set); 580 #else 581 writel(APBH_CTRL0_APB_BURST_EN, 582 &apbh_regs->hw_apbh_ctrl0_clr); 583 #endif 584 } 585 586 int mxs_dma_init_channel(int channel) 587 { 588 struct mxs_dma_chan *pchan; 589 int ret; 590 591 pchan = mxs_dma_channels + channel; 592 pchan->flags = MXS_DMA_FLAGS_VALID; 593 594 ret = mxs_dma_request(channel); 595 596 if (ret) { 597 printf("MXS DMA: Can't acquire DMA channel %i\n", 598 channel); 599 return ret; 600 } 601 602 mxs_dma_reset(channel); 603 mxs_dma_ack_irq(channel); 604 605 return 0; 606 } 607