1 /* 2 * Driver for the Cirrus Logic EP93xx DMA Controller 3 * 4 * Copyright (C) 2011 Mika Westerberg 5 * 6 * DMA M2P implementation is based on the original 7 * arch/arm/mach-ep93xx/dma-m2p.c which has following copyrights: 8 * 9 * Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org> 10 * Copyright (C) 2006 Applied Data Systems 11 * Copyright (C) 2009 Ryan Mallon <rmallon@gmail.com> 12 * 13 * This driver is based on dw_dmac and amba-pl08x drivers. 14 * 15 * This program is free software; you can redistribute it and/or modify 16 * it under the terms of the GNU General Public License as published by 17 * the Free Software Foundation; either version 2 of the License, or 18 * (at your option) any later version. 19 */ 20 21 #include <linux/clk.h> 22 #include <linux/init.h> 23 #include <linux/interrupt.h> 24 #include <linux/dmaengine.h> 25 #include <linux/module.h> 26 #include <linux/platform_device.h> 27 #include <linux/slab.h> 28 29 #include <mach/dma.h> 30 31 #include "dmaengine.h" 32 33 /* M2P registers */ 34 #define M2P_CONTROL 0x0000 35 #define M2P_CONTROL_STALLINT BIT(0) 36 #define M2P_CONTROL_NFBINT BIT(1) 37 #define M2P_CONTROL_CH_ERROR_INT BIT(3) 38 #define M2P_CONTROL_ENABLE BIT(4) 39 #define M2P_CONTROL_ICE BIT(6) 40 41 #define M2P_INTERRUPT 0x0004 42 #define M2P_INTERRUPT_STALL BIT(0) 43 #define M2P_INTERRUPT_NFB BIT(1) 44 #define M2P_INTERRUPT_ERROR BIT(3) 45 46 #define M2P_PPALLOC 0x0008 47 #define M2P_STATUS 0x000c 48 49 #define M2P_MAXCNT0 0x0020 50 #define M2P_BASE0 0x0024 51 #define M2P_MAXCNT1 0x0030 52 #define M2P_BASE1 0x0034 53 54 #define M2P_STATE_IDLE 0 55 #define M2P_STATE_STALL 1 56 #define M2P_STATE_ON 2 57 #define M2P_STATE_NEXT 3 58 59 /* M2M registers */ 60 #define M2M_CONTROL 0x0000 61 #define M2M_CONTROL_DONEINT BIT(2) 62 #define M2M_CONTROL_ENABLE BIT(3) 63 #define M2M_CONTROL_START BIT(4) 64 #define M2M_CONTROL_DAH BIT(11) 65 #define M2M_CONTROL_SAH BIT(12) 66 #define M2M_CONTROL_PW_SHIFT 9 67 #define M2M_CONTROL_PW_8 (0 << M2M_CONTROL_PW_SHIFT) 68 #define M2M_CONTROL_PW_16 (1 << M2M_CONTROL_PW_SHIFT) 69 #define M2M_CONTROL_PW_32 (2 << M2M_CONTROL_PW_SHIFT) 70 #define M2M_CONTROL_PW_MASK (3 << M2M_CONTROL_PW_SHIFT) 71 #define M2M_CONTROL_TM_SHIFT 13 72 #define M2M_CONTROL_TM_TX (1 << M2M_CONTROL_TM_SHIFT) 73 #define M2M_CONTROL_TM_RX (2 << M2M_CONTROL_TM_SHIFT) 74 #define M2M_CONTROL_RSS_SHIFT 22 75 #define M2M_CONTROL_RSS_SSPRX (1 << M2M_CONTROL_RSS_SHIFT) 76 #define M2M_CONTROL_RSS_SSPTX (2 << M2M_CONTROL_RSS_SHIFT) 77 #define M2M_CONTROL_RSS_IDE (3 << M2M_CONTROL_RSS_SHIFT) 78 #define M2M_CONTROL_NO_HDSK BIT(24) 79 #define M2M_CONTROL_PWSC_SHIFT 25 80 81 #define M2M_INTERRUPT 0x0004 82 #define M2M_INTERRUPT_DONEINT BIT(1) 83 84 #define M2M_BCR0 0x0010 85 #define M2M_BCR1 0x0014 86 #define M2M_SAR_BASE0 0x0018 87 #define M2M_SAR_BASE1 0x001c 88 #define M2M_DAR_BASE0 0x002c 89 #define M2M_DAR_BASE1 0x0030 90 91 #define DMA_MAX_CHAN_BYTES 0xffff 92 #define DMA_MAX_CHAN_DESCRIPTORS 32 93 94 struct ep93xx_dma_engine; 95 96 /** 97 * struct ep93xx_dma_desc - EP93xx specific transaction descriptor 98 * @src_addr: source address of the transaction 99 * @dst_addr: destination address of the transaction 100 * @size: size of the transaction (in bytes) 101 * @complete: this descriptor is completed 102 * @txd: dmaengine API descriptor 103 * @tx_list: list of linked descriptors 104 * @node: link used for putting this into a channel queue 105 */ 106 struct ep93xx_dma_desc { 107 u32 src_addr; 108 u32 dst_addr; 109 size_t size; 110 bool complete; 111 struct dma_async_tx_descriptor txd; 112 struct list_head tx_list; 113 struct list_head node; 114 }; 115 116 /** 117 * struct ep93xx_dma_chan - an EP93xx DMA M2P/M2M channel 118 * @chan: dmaengine API channel 119 * @edma: pointer to to the engine device 120 * @regs: memory mapped registers 121 * @irq: interrupt number of the channel 122 * @clk: clock used by this channel 123 * @tasklet: channel specific tasklet used for callbacks 124 * @lock: lock protecting the fields following 125 * @flags: flags for the channel 126 * @buffer: which buffer to use next (0/1) 127 * @active: flattened chain of descriptors currently being processed 128 * @queue: pending descriptors which are handled next 129 * @free_list: list of free descriptors which can be used 130 * @runtime_addr: physical address currently used as dest/src (M2M only). This 131 * is set via %DMA_SLAVE_CONFIG before slave operation is 132 * prepared 133 * @runtime_ctrl: M2M runtime values for the control register. 134 * 135 * As EP93xx DMA controller doesn't support real chained DMA descriptors we 136 * will have slightly different scheme here: @active points to a head of 137 * flattened DMA descriptor chain. 138 * 139 * @queue holds pending transactions. These are linked through the first 140 * descriptor in the chain. When a descriptor is moved to the @active queue, 141 * the first and chained descriptors are flattened into a single list. 142 * 143 * @chan.private holds pointer to &struct ep93xx_dma_data which contains 144 * necessary channel configuration information. For memcpy channels this must 145 * be %NULL. 146 */ 147 struct ep93xx_dma_chan { 148 struct dma_chan chan; 149 const struct ep93xx_dma_engine *edma; 150 void __iomem *regs; 151 int irq; 152 struct clk *clk; 153 struct tasklet_struct tasklet; 154 /* protects the fields following */ 155 spinlock_t lock; 156 unsigned long flags; 157 /* Channel is configured for cyclic transfers */ 158 #define EP93XX_DMA_IS_CYCLIC 0 159 160 int buffer; 161 struct list_head active; 162 struct list_head queue; 163 struct list_head free_list; 164 u32 runtime_addr; 165 u32 runtime_ctrl; 166 }; 167 168 /** 169 * struct ep93xx_dma_engine - the EP93xx DMA engine instance 170 * @dma_dev: holds the dmaengine device 171 * @m2m: is this an M2M or M2P device 172 * @hw_setup: method which sets the channel up for operation 173 * @hw_shutdown: shuts the channel down and flushes whatever is left 174 * @hw_submit: pushes active descriptor(s) to the hardware 175 * @hw_interrupt: handle the interrupt 176 * @num_channels: number of channels for this instance 177 * @channels: array of channels 178 * 179 * There is one instance of this struct for the M2P channels and one for the 180 * M2M channels. hw_xxx() methods are used to perform operations which are 181 * different on M2M and M2P channels. These methods are called with channel 182 * lock held and interrupts disabled so they cannot sleep. 183 */ 184 struct ep93xx_dma_engine { 185 struct dma_device dma_dev; 186 bool m2m; 187 int (*hw_setup)(struct ep93xx_dma_chan *); 188 void (*hw_shutdown)(struct ep93xx_dma_chan *); 189 void (*hw_submit)(struct ep93xx_dma_chan *); 190 int (*hw_interrupt)(struct ep93xx_dma_chan *); 191 #define INTERRUPT_UNKNOWN 0 192 #define INTERRUPT_DONE 1 193 #define INTERRUPT_NEXT_BUFFER 2 194 195 size_t num_channels; 196 struct ep93xx_dma_chan channels[]; 197 }; 198 199 static inline struct device *chan2dev(struct ep93xx_dma_chan *edmac) 200 { 201 return &edmac->chan.dev->device; 202 } 203 204 static struct ep93xx_dma_chan *to_ep93xx_dma_chan(struct dma_chan *chan) 205 { 206 return container_of(chan, struct ep93xx_dma_chan, chan); 207 } 208 209 /** 210 * ep93xx_dma_set_active - set new active descriptor chain 211 * @edmac: channel 212 * @desc: head of the new active descriptor chain 213 * 214 * Sets @desc to be the head of the new active descriptor chain. This is the 215 * chain which is processed next. The active list must be empty before calling 216 * this function. 217 * 218 * Called with @edmac->lock held and interrupts disabled. 219 */ 220 static void ep93xx_dma_set_active(struct ep93xx_dma_chan *edmac, 221 struct ep93xx_dma_desc *desc) 222 { 223 BUG_ON(!list_empty(&edmac->active)); 224 225 list_add_tail(&desc->node, &edmac->active); 226 227 /* Flatten the @desc->tx_list chain into @edmac->active list */ 228 while (!list_empty(&desc->tx_list)) { 229 struct ep93xx_dma_desc *d = list_first_entry(&desc->tx_list, 230 struct ep93xx_dma_desc, node); 231 232 /* 233 * We copy the callback parameters from the first descriptor 234 * to all the chained descriptors. This way we can call the 235 * callback without having to find out the first descriptor in 236 * the chain. Useful for cyclic transfers. 237 */ 238 d->txd.callback = desc->txd.callback; 239 d->txd.callback_param = desc->txd.callback_param; 240 241 list_move_tail(&d->node, &edmac->active); 242 } 243 } 244 245 /* Called with @edmac->lock held and interrupts disabled */ 246 static struct ep93xx_dma_desc * 247 ep93xx_dma_get_active(struct ep93xx_dma_chan *edmac) 248 { 249 if (list_empty(&edmac->active)) 250 return NULL; 251 252 return list_first_entry(&edmac->active, struct ep93xx_dma_desc, node); 253 } 254 255 /** 256 * ep93xx_dma_advance_active - advances to the next active descriptor 257 * @edmac: channel 258 * 259 * Function advances active descriptor to the next in the @edmac->active and 260 * returns %true if we still have descriptors in the chain to process. 261 * Otherwise returns %false. 262 * 263 * When the channel is in cyclic mode always returns %true. 264 * 265 * Called with @edmac->lock held and interrupts disabled. 266 */ 267 static bool ep93xx_dma_advance_active(struct ep93xx_dma_chan *edmac) 268 { 269 struct ep93xx_dma_desc *desc; 270 271 list_rotate_left(&edmac->active); 272 273 if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) 274 return true; 275 276 desc = ep93xx_dma_get_active(edmac); 277 if (!desc) 278 return false; 279 280 /* 281 * If txd.cookie is set it means that we are back in the first 282 * descriptor in the chain and hence done with it. 283 */ 284 return !desc->txd.cookie; 285 } 286 287 /* 288 * M2P DMA implementation 289 */ 290 291 static void m2p_set_control(struct ep93xx_dma_chan *edmac, u32 control) 292 { 293 writel(control, edmac->regs + M2P_CONTROL); 294 /* 295 * EP93xx User's Guide states that we must perform a dummy read after 296 * write to the control register. 297 */ 298 readl(edmac->regs + M2P_CONTROL); 299 } 300 301 static int m2p_hw_setup(struct ep93xx_dma_chan *edmac) 302 { 303 struct ep93xx_dma_data *data = edmac->chan.private; 304 u32 control; 305 306 writel(data->port & 0xf, edmac->regs + M2P_PPALLOC); 307 308 control = M2P_CONTROL_CH_ERROR_INT | M2P_CONTROL_ICE 309 | M2P_CONTROL_ENABLE; 310 m2p_set_control(edmac, control); 311 312 return 0; 313 } 314 315 static inline u32 m2p_channel_state(struct ep93xx_dma_chan *edmac) 316 { 317 return (readl(edmac->regs + M2P_STATUS) >> 4) & 0x3; 318 } 319 320 static void m2p_hw_shutdown(struct ep93xx_dma_chan *edmac) 321 { 322 u32 control; 323 324 control = readl(edmac->regs + M2P_CONTROL); 325 control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT); 326 m2p_set_control(edmac, control); 327 328 while (m2p_channel_state(edmac) >= M2P_STATE_ON) 329 cpu_relax(); 330 331 m2p_set_control(edmac, 0); 332 333 while (m2p_channel_state(edmac) == M2P_STATE_STALL) 334 cpu_relax(); 335 } 336 337 static void m2p_fill_desc(struct ep93xx_dma_chan *edmac) 338 { 339 struct ep93xx_dma_desc *desc; 340 u32 bus_addr; 341 342 desc = ep93xx_dma_get_active(edmac); 343 if (!desc) { 344 dev_warn(chan2dev(edmac), "M2P: empty descriptor list\n"); 345 return; 346 } 347 348 if (ep93xx_dma_chan_direction(&edmac->chan) == DMA_MEM_TO_DEV) 349 bus_addr = desc->src_addr; 350 else 351 bus_addr = desc->dst_addr; 352 353 if (edmac->buffer == 0) { 354 writel(desc->size, edmac->regs + M2P_MAXCNT0); 355 writel(bus_addr, edmac->regs + M2P_BASE0); 356 } else { 357 writel(desc->size, edmac->regs + M2P_MAXCNT1); 358 writel(bus_addr, edmac->regs + M2P_BASE1); 359 } 360 361 edmac->buffer ^= 1; 362 } 363 364 static void m2p_hw_submit(struct ep93xx_dma_chan *edmac) 365 { 366 u32 control = readl(edmac->regs + M2P_CONTROL); 367 368 m2p_fill_desc(edmac); 369 control |= M2P_CONTROL_STALLINT; 370 371 if (ep93xx_dma_advance_active(edmac)) { 372 m2p_fill_desc(edmac); 373 control |= M2P_CONTROL_NFBINT; 374 } 375 376 m2p_set_control(edmac, control); 377 } 378 379 static int m2p_hw_interrupt(struct ep93xx_dma_chan *edmac) 380 { 381 u32 irq_status = readl(edmac->regs + M2P_INTERRUPT); 382 u32 control; 383 384 if (irq_status & M2P_INTERRUPT_ERROR) { 385 struct ep93xx_dma_desc *desc = ep93xx_dma_get_active(edmac); 386 387 /* Clear the error interrupt */ 388 writel(1, edmac->regs + M2P_INTERRUPT); 389 390 /* 391 * It seems that there is no easy way of reporting errors back 392 * to client so we just report the error here and continue as 393 * usual. 394 * 395 * Revisit this when there is a mechanism to report back the 396 * errors. 397 */ 398 dev_err(chan2dev(edmac), 399 "DMA transfer failed! Details:\n" 400 "\tcookie : %d\n" 401 "\tsrc_addr : 0x%08x\n" 402 "\tdst_addr : 0x%08x\n" 403 "\tsize : %zu\n", 404 desc->txd.cookie, desc->src_addr, desc->dst_addr, 405 desc->size); 406 } 407 408 switch (irq_status & (M2P_INTERRUPT_STALL | M2P_INTERRUPT_NFB)) { 409 case M2P_INTERRUPT_STALL: 410 /* Disable interrupts */ 411 control = readl(edmac->regs + M2P_CONTROL); 412 control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT); 413 m2p_set_control(edmac, control); 414 415 return INTERRUPT_DONE; 416 417 case M2P_INTERRUPT_NFB: 418 if (ep93xx_dma_advance_active(edmac)) 419 m2p_fill_desc(edmac); 420 421 return INTERRUPT_NEXT_BUFFER; 422 } 423 424 return INTERRUPT_UNKNOWN; 425 } 426 427 /* 428 * M2M DMA implementation 429 * 430 * For the M2M transfers we don't use NFB at all. This is because it simply 431 * doesn't work well with memcpy transfers. When you submit both buffers it is 432 * extremely unlikely that you get an NFB interrupt, but it instead reports 433 * DONE interrupt and both buffers are already transferred which means that we 434 * weren't able to update the next buffer. 435 * 436 * So for now we "simulate" NFB by just submitting buffer after buffer 437 * without double buffering. 438 */ 439 440 static int m2m_hw_setup(struct ep93xx_dma_chan *edmac) 441 { 442 const struct ep93xx_dma_data *data = edmac->chan.private; 443 u32 control = 0; 444 445 if (!data) { 446 /* This is memcpy channel, nothing to configure */ 447 writel(control, edmac->regs + M2M_CONTROL); 448 return 0; 449 } 450 451 switch (data->port) { 452 case EP93XX_DMA_SSP: 453 /* 454 * This was found via experimenting - anything less than 5 455 * causes the channel to perform only a partial transfer which 456 * leads to problems since we don't get DONE interrupt then. 457 */ 458 control = (5 << M2M_CONTROL_PWSC_SHIFT); 459 control |= M2M_CONTROL_NO_HDSK; 460 461 if (data->direction == DMA_MEM_TO_DEV) { 462 control |= M2M_CONTROL_DAH; 463 control |= M2M_CONTROL_TM_TX; 464 control |= M2M_CONTROL_RSS_SSPTX; 465 } else { 466 control |= M2M_CONTROL_SAH; 467 control |= M2M_CONTROL_TM_RX; 468 control |= M2M_CONTROL_RSS_SSPRX; 469 } 470 break; 471 472 case EP93XX_DMA_IDE: 473 /* 474 * This IDE part is totally untested. Values below are taken 475 * from the EP93xx Users's Guide and might not be correct. 476 */ 477 if (data->direction == DMA_MEM_TO_DEV) { 478 /* Worst case from the UG */ 479 control = (3 << M2M_CONTROL_PWSC_SHIFT); 480 control |= M2M_CONTROL_DAH; 481 control |= M2M_CONTROL_TM_TX; 482 } else { 483 control = (2 << M2M_CONTROL_PWSC_SHIFT); 484 control |= M2M_CONTROL_SAH; 485 control |= M2M_CONTROL_TM_RX; 486 } 487 488 control |= M2M_CONTROL_NO_HDSK; 489 control |= M2M_CONTROL_RSS_IDE; 490 control |= M2M_CONTROL_PW_16; 491 break; 492 493 default: 494 return -EINVAL; 495 } 496 497 writel(control, edmac->regs + M2M_CONTROL); 498 return 0; 499 } 500 501 static void m2m_hw_shutdown(struct ep93xx_dma_chan *edmac) 502 { 503 /* Just disable the channel */ 504 writel(0, edmac->regs + M2M_CONTROL); 505 } 506 507 static void m2m_fill_desc(struct ep93xx_dma_chan *edmac) 508 { 509 struct ep93xx_dma_desc *desc; 510 511 desc = ep93xx_dma_get_active(edmac); 512 if (!desc) { 513 dev_warn(chan2dev(edmac), "M2M: empty descriptor list\n"); 514 return; 515 } 516 517 if (edmac->buffer == 0) { 518 writel(desc->src_addr, edmac->regs + M2M_SAR_BASE0); 519 writel(desc->dst_addr, edmac->regs + M2M_DAR_BASE0); 520 writel(desc->size, edmac->regs + M2M_BCR0); 521 } else { 522 writel(desc->src_addr, edmac->regs + M2M_SAR_BASE1); 523 writel(desc->dst_addr, edmac->regs + M2M_DAR_BASE1); 524 writel(desc->size, edmac->regs + M2M_BCR1); 525 } 526 527 edmac->buffer ^= 1; 528 } 529 530 static void m2m_hw_submit(struct ep93xx_dma_chan *edmac) 531 { 532 struct ep93xx_dma_data *data = edmac->chan.private; 533 u32 control = readl(edmac->regs + M2M_CONTROL); 534 535 /* 536 * Since we allow clients to configure PW (peripheral width) we always 537 * clear PW bits here and then set them according what is given in 538 * the runtime configuration. 539 */ 540 control &= ~M2M_CONTROL_PW_MASK; 541 control |= edmac->runtime_ctrl; 542 543 m2m_fill_desc(edmac); 544 control |= M2M_CONTROL_DONEINT; 545 546 /* 547 * Now we can finally enable the channel. For M2M channel this must be 548 * done _after_ the BCRx registers are programmed. 549 */ 550 control |= M2M_CONTROL_ENABLE; 551 writel(control, edmac->regs + M2M_CONTROL); 552 553 if (!data) { 554 /* 555 * For memcpy channels the software trigger must be asserted 556 * in order to start the memcpy operation. 557 */ 558 control |= M2M_CONTROL_START; 559 writel(control, edmac->regs + M2M_CONTROL); 560 } 561 } 562 563 static int m2m_hw_interrupt(struct ep93xx_dma_chan *edmac) 564 { 565 u32 control; 566 567 if (!(readl(edmac->regs + M2M_INTERRUPT) & M2M_INTERRUPT_DONEINT)) 568 return INTERRUPT_UNKNOWN; 569 570 /* Clear the DONE bit */ 571 writel(0, edmac->regs + M2M_INTERRUPT); 572 573 /* Disable interrupts and the channel */ 574 control = readl(edmac->regs + M2M_CONTROL); 575 control &= ~(M2M_CONTROL_DONEINT | M2M_CONTROL_ENABLE); 576 writel(control, edmac->regs + M2M_CONTROL); 577 578 /* 579 * Since we only get DONE interrupt we have to find out ourselves 580 * whether there still is something to process. So we try to advance 581 * the chain an see whether it succeeds. 582 */ 583 if (ep93xx_dma_advance_active(edmac)) { 584 edmac->edma->hw_submit(edmac); 585 return INTERRUPT_NEXT_BUFFER; 586 } 587 588 return INTERRUPT_DONE; 589 } 590 591 /* 592 * DMA engine API implementation 593 */ 594 595 static struct ep93xx_dma_desc * 596 ep93xx_dma_desc_get(struct ep93xx_dma_chan *edmac) 597 { 598 struct ep93xx_dma_desc *desc, *_desc; 599 struct ep93xx_dma_desc *ret = NULL; 600 unsigned long flags; 601 602 spin_lock_irqsave(&edmac->lock, flags); 603 list_for_each_entry_safe(desc, _desc, &edmac->free_list, node) { 604 if (async_tx_test_ack(&desc->txd)) { 605 list_del_init(&desc->node); 606 607 /* Re-initialize the descriptor */ 608 desc->src_addr = 0; 609 desc->dst_addr = 0; 610 desc->size = 0; 611 desc->complete = false; 612 desc->txd.cookie = 0; 613 desc->txd.callback = NULL; 614 desc->txd.callback_param = NULL; 615 616 ret = desc; 617 break; 618 } 619 } 620 spin_unlock_irqrestore(&edmac->lock, flags); 621 return ret; 622 } 623 624 static void ep93xx_dma_desc_put(struct ep93xx_dma_chan *edmac, 625 struct ep93xx_dma_desc *desc) 626 { 627 if (desc) { 628 unsigned long flags; 629 630 spin_lock_irqsave(&edmac->lock, flags); 631 list_splice_init(&desc->tx_list, &edmac->free_list); 632 list_add(&desc->node, &edmac->free_list); 633 spin_unlock_irqrestore(&edmac->lock, flags); 634 } 635 } 636 637 /** 638 * ep93xx_dma_advance_work - start processing the next pending transaction 639 * @edmac: channel 640 * 641 * If we have pending transactions queued and we are currently idling, this 642 * function takes the next queued transaction from the @edmac->queue and 643 * pushes it to the hardware for execution. 644 */ 645 static void ep93xx_dma_advance_work(struct ep93xx_dma_chan *edmac) 646 { 647 struct ep93xx_dma_desc *new; 648 unsigned long flags; 649 650 spin_lock_irqsave(&edmac->lock, flags); 651 if (!list_empty(&edmac->active) || list_empty(&edmac->queue)) { 652 spin_unlock_irqrestore(&edmac->lock, flags); 653 return; 654 } 655 656 /* Take the next descriptor from the pending queue */ 657 new = list_first_entry(&edmac->queue, struct ep93xx_dma_desc, node); 658 list_del_init(&new->node); 659 660 ep93xx_dma_set_active(edmac, new); 661 662 /* Push it to the hardware */ 663 edmac->edma->hw_submit(edmac); 664 spin_unlock_irqrestore(&edmac->lock, flags); 665 } 666 667 static void ep93xx_dma_unmap_buffers(struct ep93xx_dma_desc *desc) 668 { 669 struct device *dev = desc->txd.chan->device->dev; 670 671 if (!(desc->txd.flags & DMA_COMPL_SKIP_SRC_UNMAP)) { 672 if (desc->txd.flags & DMA_COMPL_SRC_UNMAP_SINGLE) 673 dma_unmap_single(dev, desc->src_addr, desc->size, 674 DMA_TO_DEVICE); 675 else 676 dma_unmap_page(dev, desc->src_addr, desc->size, 677 DMA_TO_DEVICE); 678 } 679 if (!(desc->txd.flags & DMA_COMPL_SKIP_DEST_UNMAP)) { 680 if (desc->txd.flags & DMA_COMPL_DEST_UNMAP_SINGLE) 681 dma_unmap_single(dev, desc->dst_addr, desc->size, 682 DMA_FROM_DEVICE); 683 else 684 dma_unmap_page(dev, desc->dst_addr, desc->size, 685 DMA_FROM_DEVICE); 686 } 687 } 688 689 static void ep93xx_dma_tasklet(unsigned long data) 690 { 691 struct ep93xx_dma_chan *edmac = (struct ep93xx_dma_chan *)data; 692 struct ep93xx_dma_desc *desc, *d; 693 dma_async_tx_callback callback = NULL; 694 void *callback_param = NULL; 695 LIST_HEAD(list); 696 697 spin_lock_irq(&edmac->lock); 698 /* 699 * If dma_terminate_all() was called before we get to run, the active 700 * list has become empty. If that happens we aren't supposed to do 701 * anything more than call ep93xx_dma_advance_work(). 702 */ 703 desc = ep93xx_dma_get_active(edmac); 704 if (desc) { 705 if (desc->complete) { 706 dma_cookie_complete(&desc->txd); 707 list_splice_init(&edmac->active, &list); 708 } 709 callback = desc->txd.callback; 710 callback_param = desc->txd.callback_param; 711 } 712 spin_unlock_irq(&edmac->lock); 713 714 /* Pick up the next descriptor from the queue */ 715 ep93xx_dma_advance_work(edmac); 716 717 /* Now we can release all the chained descriptors */ 718 list_for_each_entry_safe(desc, d, &list, node) { 719 /* 720 * For the memcpy channels the API requires us to unmap the 721 * buffers unless requested otherwise. 722 */ 723 if (!edmac->chan.private) 724 ep93xx_dma_unmap_buffers(desc); 725 726 ep93xx_dma_desc_put(edmac, desc); 727 } 728 729 if (callback) 730 callback(callback_param); 731 } 732 733 static irqreturn_t ep93xx_dma_interrupt(int irq, void *dev_id) 734 { 735 struct ep93xx_dma_chan *edmac = dev_id; 736 struct ep93xx_dma_desc *desc; 737 irqreturn_t ret = IRQ_HANDLED; 738 739 spin_lock(&edmac->lock); 740 741 desc = ep93xx_dma_get_active(edmac); 742 if (!desc) { 743 dev_warn(chan2dev(edmac), 744 "got interrupt while active list is empty\n"); 745 spin_unlock(&edmac->lock); 746 return IRQ_NONE; 747 } 748 749 switch (edmac->edma->hw_interrupt(edmac)) { 750 case INTERRUPT_DONE: 751 desc->complete = true; 752 tasklet_schedule(&edmac->tasklet); 753 break; 754 755 case INTERRUPT_NEXT_BUFFER: 756 if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) 757 tasklet_schedule(&edmac->tasklet); 758 break; 759 760 default: 761 dev_warn(chan2dev(edmac), "unknown interrupt!\n"); 762 ret = IRQ_NONE; 763 break; 764 } 765 766 spin_unlock(&edmac->lock); 767 return ret; 768 } 769 770 /** 771 * ep93xx_dma_tx_submit - set the prepared descriptor(s) to be executed 772 * @tx: descriptor to be executed 773 * 774 * Function will execute given descriptor on the hardware or if the hardware 775 * is busy, queue the descriptor to be executed later on. Returns cookie which 776 * can be used to poll the status of the descriptor. 777 */ 778 static dma_cookie_t ep93xx_dma_tx_submit(struct dma_async_tx_descriptor *tx) 779 { 780 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(tx->chan); 781 struct ep93xx_dma_desc *desc; 782 dma_cookie_t cookie; 783 unsigned long flags; 784 785 spin_lock_irqsave(&edmac->lock, flags); 786 cookie = dma_cookie_assign(tx); 787 788 desc = container_of(tx, struct ep93xx_dma_desc, txd); 789 790 /* 791 * If nothing is currently prosessed, we push this descriptor 792 * directly to the hardware. Otherwise we put the descriptor 793 * to the pending queue. 794 */ 795 if (list_empty(&edmac->active)) { 796 ep93xx_dma_set_active(edmac, desc); 797 edmac->edma->hw_submit(edmac); 798 } else { 799 list_add_tail(&desc->node, &edmac->queue); 800 } 801 802 spin_unlock_irqrestore(&edmac->lock, flags); 803 return cookie; 804 } 805 806 /** 807 * ep93xx_dma_alloc_chan_resources - allocate resources for the channel 808 * @chan: channel to allocate resources 809 * 810 * Function allocates necessary resources for the given DMA channel and 811 * returns number of allocated descriptors for the channel. Negative errno 812 * is returned in case of failure. 813 */ 814 static int ep93xx_dma_alloc_chan_resources(struct dma_chan *chan) 815 { 816 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan); 817 struct ep93xx_dma_data *data = chan->private; 818 const char *name = dma_chan_name(chan); 819 int ret, i; 820 821 /* Sanity check the channel parameters */ 822 if (!edmac->edma->m2m) { 823 if (!data) 824 return -EINVAL; 825 if (data->port < EP93XX_DMA_I2S1 || 826 data->port > EP93XX_DMA_IRDA) 827 return -EINVAL; 828 if (data->direction != ep93xx_dma_chan_direction(chan)) 829 return -EINVAL; 830 } else { 831 if (data) { 832 switch (data->port) { 833 case EP93XX_DMA_SSP: 834 case EP93XX_DMA_IDE: 835 if (data->direction != DMA_MEM_TO_DEV && 836 data->direction != DMA_DEV_TO_MEM) 837 return -EINVAL; 838 break; 839 default: 840 return -EINVAL; 841 } 842 } 843 } 844 845 if (data && data->name) 846 name = data->name; 847 848 ret = clk_enable(edmac->clk); 849 if (ret) 850 return ret; 851 852 ret = request_irq(edmac->irq, ep93xx_dma_interrupt, 0, name, edmac); 853 if (ret) 854 goto fail_clk_disable; 855 856 spin_lock_irq(&edmac->lock); 857 dma_cookie_init(&edmac->chan); 858 ret = edmac->edma->hw_setup(edmac); 859 spin_unlock_irq(&edmac->lock); 860 861 if (ret) 862 goto fail_free_irq; 863 864 for (i = 0; i < DMA_MAX_CHAN_DESCRIPTORS; i++) { 865 struct ep93xx_dma_desc *desc; 866 867 desc = kzalloc(sizeof(*desc), GFP_KERNEL); 868 if (!desc) { 869 dev_warn(chan2dev(edmac), "not enough descriptors\n"); 870 break; 871 } 872 873 INIT_LIST_HEAD(&desc->tx_list); 874 875 dma_async_tx_descriptor_init(&desc->txd, chan); 876 desc->txd.flags = DMA_CTRL_ACK; 877 desc->txd.tx_submit = ep93xx_dma_tx_submit; 878 879 ep93xx_dma_desc_put(edmac, desc); 880 } 881 882 return i; 883 884 fail_free_irq: 885 free_irq(edmac->irq, edmac); 886 fail_clk_disable: 887 clk_disable(edmac->clk); 888 889 return ret; 890 } 891 892 /** 893 * ep93xx_dma_free_chan_resources - release resources for the channel 894 * @chan: channel 895 * 896 * Function releases all the resources allocated for the given channel. 897 * The channel must be idle when this is called. 898 */ 899 static void ep93xx_dma_free_chan_resources(struct dma_chan *chan) 900 { 901 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan); 902 struct ep93xx_dma_desc *desc, *d; 903 unsigned long flags; 904 LIST_HEAD(list); 905 906 BUG_ON(!list_empty(&edmac->active)); 907 BUG_ON(!list_empty(&edmac->queue)); 908 909 spin_lock_irqsave(&edmac->lock, flags); 910 edmac->edma->hw_shutdown(edmac); 911 edmac->runtime_addr = 0; 912 edmac->runtime_ctrl = 0; 913 edmac->buffer = 0; 914 list_splice_init(&edmac->free_list, &list); 915 spin_unlock_irqrestore(&edmac->lock, flags); 916 917 list_for_each_entry_safe(desc, d, &list, node) 918 kfree(desc); 919 920 clk_disable(edmac->clk); 921 free_irq(edmac->irq, edmac); 922 } 923 924 /** 925 * ep93xx_dma_prep_dma_memcpy - prepare a memcpy DMA operation 926 * @chan: channel 927 * @dest: destination bus address 928 * @src: source bus address 929 * @len: size of the transaction 930 * @flags: flags for the descriptor 931 * 932 * Returns a valid DMA descriptor or %NULL in case of failure. 933 */ 934 static struct dma_async_tx_descriptor * 935 ep93xx_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, 936 dma_addr_t src, size_t len, unsigned long flags) 937 { 938 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan); 939 struct ep93xx_dma_desc *desc, *first; 940 size_t bytes, offset; 941 942 first = NULL; 943 for (offset = 0; offset < len; offset += bytes) { 944 desc = ep93xx_dma_desc_get(edmac); 945 if (!desc) { 946 dev_warn(chan2dev(edmac), "couln't get descriptor\n"); 947 goto fail; 948 } 949 950 bytes = min_t(size_t, len - offset, DMA_MAX_CHAN_BYTES); 951 952 desc->src_addr = src + offset; 953 desc->dst_addr = dest + offset; 954 desc->size = bytes; 955 956 if (!first) 957 first = desc; 958 else 959 list_add_tail(&desc->node, &first->tx_list); 960 } 961 962 first->txd.cookie = -EBUSY; 963 first->txd.flags = flags; 964 965 return &first->txd; 966 fail: 967 ep93xx_dma_desc_put(edmac, first); 968 return NULL; 969 } 970 971 /** 972 * ep93xx_dma_prep_slave_sg - prepare a slave DMA operation 973 * @chan: channel 974 * @sgl: list of buffers to transfer 975 * @sg_len: number of entries in @sgl 976 * @dir: direction of tha DMA transfer 977 * @flags: flags for the descriptor 978 * @context: operation context (ignored) 979 * 980 * Returns a valid DMA descriptor or %NULL in case of failure. 981 */ 982 static struct dma_async_tx_descriptor * 983 ep93xx_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, 984 unsigned int sg_len, enum dma_transfer_direction dir, 985 unsigned long flags, void *context) 986 { 987 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan); 988 struct ep93xx_dma_desc *desc, *first; 989 struct scatterlist *sg; 990 int i; 991 992 if (!edmac->edma->m2m && dir != ep93xx_dma_chan_direction(chan)) { 993 dev_warn(chan2dev(edmac), 994 "channel was configured with different direction\n"); 995 return NULL; 996 } 997 998 if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) { 999 dev_warn(chan2dev(edmac), 1000 "channel is already used for cyclic transfers\n"); 1001 return NULL; 1002 } 1003 1004 first = NULL; 1005 for_each_sg(sgl, sg, sg_len, i) { 1006 size_t sg_len = sg_dma_len(sg); 1007 1008 if (sg_len > DMA_MAX_CHAN_BYTES) { 1009 dev_warn(chan2dev(edmac), "too big transfer size %d\n", 1010 sg_len); 1011 goto fail; 1012 } 1013 1014 desc = ep93xx_dma_desc_get(edmac); 1015 if (!desc) { 1016 dev_warn(chan2dev(edmac), "couln't get descriptor\n"); 1017 goto fail; 1018 } 1019 1020 if (dir == DMA_MEM_TO_DEV) { 1021 desc->src_addr = sg_dma_address(sg); 1022 desc->dst_addr = edmac->runtime_addr; 1023 } else { 1024 desc->src_addr = edmac->runtime_addr; 1025 desc->dst_addr = sg_dma_address(sg); 1026 } 1027 desc->size = sg_len; 1028 1029 if (!first) 1030 first = desc; 1031 else 1032 list_add_tail(&desc->node, &first->tx_list); 1033 } 1034 1035 first->txd.cookie = -EBUSY; 1036 first->txd.flags = flags; 1037 1038 return &first->txd; 1039 1040 fail: 1041 ep93xx_dma_desc_put(edmac, first); 1042 return NULL; 1043 } 1044 1045 /** 1046 * ep93xx_dma_prep_dma_cyclic - prepare a cyclic DMA operation 1047 * @chan: channel 1048 * @dma_addr: DMA mapped address of the buffer 1049 * @buf_len: length of the buffer (in bytes) 1050 * @period_len: lenght of a single period 1051 * @dir: direction of the operation 1052 * @context: operation context (ignored) 1053 * 1054 * Prepares a descriptor for cyclic DMA operation. This means that once the 1055 * descriptor is submitted, we will be submitting in a @period_len sized 1056 * buffers and calling callback once the period has been elapsed. Transfer 1057 * terminates only when client calls dmaengine_terminate_all() for this 1058 * channel. 1059 * 1060 * Returns a valid DMA descriptor or %NULL in case of failure. 1061 */ 1062 static struct dma_async_tx_descriptor * 1063 ep93xx_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr, 1064 size_t buf_len, size_t period_len, 1065 enum dma_transfer_direction dir, void *context) 1066 { 1067 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan); 1068 struct ep93xx_dma_desc *desc, *first; 1069 size_t offset = 0; 1070 1071 if (!edmac->edma->m2m && dir != ep93xx_dma_chan_direction(chan)) { 1072 dev_warn(chan2dev(edmac), 1073 "channel was configured with different direction\n"); 1074 return NULL; 1075 } 1076 1077 if (test_and_set_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) { 1078 dev_warn(chan2dev(edmac), 1079 "channel is already used for cyclic transfers\n"); 1080 return NULL; 1081 } 1082 1083 if (period_len > DMA_MAX_CHAN_BYTES) { 1084 dev_warn(chan2dev(edmac), "too big period length %d\n", 1085 period_len); 1086 return NULL; 1087 } 1088 1089 /* Split the buffer into period size chunks */ 1090 first = NULL; 1091 for (offset = 0; offset < buf_len; offset += period_len) { 1092 desc = ep93xx_dma_desc_get(edmac); 1093 if (!desc) { 1094 dev_warn(chan2dev(edmac), "couln't get descriptor\n"); 1095 goto fail; 1096 } 1097 1098 if (dir == DMA_MEM_TO_DEV) { 1099 desc->src_addr = dma_addr + offset; 1100 desc->dst_addr = edmac->runtime_addr; 1101 } else { 1102 desc->src_addr = edmac->runtime_addr; 1103 desc->dst_addr = dma_addr + offset; 1104 } 1105 1106 desc->size = period_len; 1107 1108 if (!first) 1109 first = desc; 1110 else 1111 list_add_tail(&desc->node, &first->tx_list); 1112 } 1113 1114 first->txd.cookie = -EBUSY; 1115 1116 return &first->txd; 1117 1118 fail: 1119 ep93xx_dma_desc_put(edmac, first); 1120 return NULL; 1121 } 1122 1123 /** 1124 * ep93xx_dma_terminate_all - terminate all transactions 1125 * @edmac: channel 1126 * 1127 * Stops all DMA transactions. All descriptors are put back to the 1128 * @edmac->free_list and callbacks are _not_ called. 1129 */ 1130 static int ep93xx_dma_terminate_all(struct ep93xx_dma_chan *edmac) 1131 { 1132 struct ep93xx_dma_desc *desc, *_d; 1133 unsigned long flags; 1134 LIST_HEAD(list); 1135 1136 spin_lock_irqsave(&edmac->lock, flags); 1137 /* First we disable and flush the DMA channel */ 1138 edmac->edma->hw_shutdown(edmac); 1139 clear_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags); 1140 list_splice_init(&edmac->active, &list); 1141 list_splice_init(&edmac->queue, &list); 1142 /* 1143 * We then re-enable the channel. This way we can continue submitting 1144 * the descriptors by just calling ->hw_submit() again. 1145 */ 1146 edmac->edma->hw_setup(edmac); 1147 spin_unlock_irqrestore(&edmac->lock, flags); 1148 1149 list_for_each_entry_safe(desc, _d, &list, node) 1150 ep93xx_dma_desc_put(edmac, desc); 1151 1152 return 0; 1153 } 1154 1155 static int ep93xx_dma_slave_config(struct ep93xx_dma_chan *edmac, 1156 struct dma_slave_config *config) 1157 { 1158 enum dma_slave_buswidth width; 1159 unsigned long flags; 1160 u32 addr, ctrl; 1161 1162 if (!edmac->edma->m2m) 1163 return -EINVAL; 1164 1165 switch (config->direction) { 1166 case DMA_DEV_TO_MEM: 1167 width = config->src_addr_width; 1168 addr = config->src_addr; 1169 break; 1170 1171 case DMA_MEM_TO_DEV: 1172 width = config->dst_addr_width; 1173 addr = config->dst_addr; 1174 break; 1175 1176 default: 1177 return -EINVAL; 1178 } 1179 1180 switch (width) { 1181 case DMA_SLAVE_BUSWIDTH_1_BYTE: 1182 ctrl = 0; 1183 break; 1184 case DMA_SLAVE_BUSWIDTH_2_BYTES: 1185 ctrl = M2M_CONTROL_PW_16; 1186 break; 1187 case DMA_SLAVE_BUSWIDTH_4_BYTES: 1188 ctrl = M2M_CONTROL_PW_32; 1189 break; 1190 default: 1191 return -EINVAL; 1192 } 1193 1194 spin_lock_irqsave(&edmac->lock, flags); 1195 edmac->runtime_addr = addr; 1196 edmac->runtime_ctrl = ctrl; 1197 spin_unlock_irqrestore(&edmac->lock, flags); 1198 1199 return 0; 1200 } 1201 1202 /** 1203 * ep93xx_dma_control - manipulate all pending operations on a channel 1204 * @chan: channel 1205 * @cmd: control command to perform 1206 * @arg: optional argument 1207 * 1208 * Controls the channel. Function returns %0 in case of success or negative 1209 * error in case of failure. 1210 */ 1211 static int ep93xx_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 1212 unsigned long arg) 1213 { 1214 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan); 1215 struct dma_slave_config *config; 1216 1217 switch (cmd) { 1218 case DMA_TERMINATE_ALL: 1219 return ep93xx_dma_terminate_all(edmac); 1220 1221 case DMA_SLAVE_CONFIG: 1222 config = (struct dma_slave_config *)arg; 1223 return ep93xx_dma_slave_config(edmac, config); 1224 1225 default: 1226 break; 1227 } 1228 1229 return -ENOSYS; 1230 } 1231 1232 /** 1233 * ep93xx_dma_tx_status - check if a transaction is completed 1234 * @chan: channel 1235 * @cookie: transaction specific cookie 1236 * @state: state of the transaction is stored here if given 1237 * 1238 * This function can be used to query state of a given transaction. 1239 */ 1240 static enum dma_status ep93xx_dma_tx_status(struct dma_chan *chan, 1241 dma_cookie_t cookie, 1242 struct dma_tx_state *state) 1243 { 1244 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan); 1245 enum dma_status ret; 1246 unsigned long flags; 1247 1248 spin_lock_irqsave(&edmac->lock, flags); 1249 ret = dma_cookie_status(chan, cookie, state); 1250 spin_unlock_irqrestore(&edmac->lock, flags); 1251 1252 return ret; 1253 } 1254 1255 /** 1256 * ep93xx_dma_issue_pending - push pending transactions to the hardware 1257 * @chan: channel 1258 * 1259 * When this function is called, all pending transactions are pushed to the 1260 * hardware and executed. 1261 */ 1262 static void ep93xx_dma_issue_pending(struct dma_chan *chan) 1263 { 1264 ep93xx_dma_advance_work(to_ep93xx_dma_chan(chan)); 1265 } 1266 1267 static int __init ep93xx_dma_probe(struct platform_device *pdev) 1268 { 1269 struct ep93xx_dma_platform_data *pdata = dev_get_platdata(&pdev->dev); 1270 struct ep93xx_dma_engine *edma; 1271 struct dma_device *dma_dev; 1272 size_t edma_size; 1273 int ret, i; 1274 1275 edma_size = pdata->num_channels * sizeof(struct ep93xx_dma_chan); 1276 edma = kzalloc(sizeof(*edma) + edma_size, GFP_KERNEL); 1277 if (!edma) 1278 return -ENOMEM; 1279 1280 dma_dev = &edma->dma_dev; 1281 edma->m2m = platform_get_device_id(pdev)->driver_data; 1282 edma->num_channels = pdata->num_channels; 1283 1284 INIT_LIST_HEAD(&dma_dev->channels); 1285 for (i = 0; i < pdata->num_channels; i++) { 1286 const struct ep93xx_dma_chan_data *cdata = &pdata->channels[i]; 1287 struct ep93xx_dma_chan *edmac = &edma->channels[i]; 1288 1289 edmac->chan.device = dma_dev; 1290 edmac->regs = cdata->base; 1291 edmac->irq = cdata->irq; 1292 edmac->edma = edma; 1293 1294 edmac->clk = clk_get(NULL, cdata->name); 1295 if (IS_ERR(edmac->clk)) { 1296 dev_warn(&pdev->dev, "failed to get clock for %s\n", 1297 cdata->name); 1298 continue; 1299 } 1300 1301 spin_lock_init(&edmac->lock); 1302 INIT_LIST_HEAD(&edmac->active); 1303 INIT_LIST_HEAD(&edmac->queue); 1304 INIT_LIST_HEAD(&edmac->free_list); 1305 tasklet_init(&edmac->tasklet, ep93xx_dma_tasklet, 1306 (unsigned long)edmac); 1307 1308 list_add_tail(&edmac->chan.device_node, 1309 &dma_dev->channels); 1310 } 1311 1312 dma_cap_zero(dma_dev->cap_mask); 1313 dma_cap_set(DMA_SLAVE, dma_dev->cap_mask); 1314 dma_cap_set(DMA_CYCLIC, dma_dev->cap_mask); 1315 1316 dma_dev->dev = &pdev->dev; 1317 dma_dev->device_alloc_chan_resources = ep93xx_dma_alloc_chan_resources; 1318 dma_dev->device_free_chan_resources = ep93xx_dma_free_chan_resources; 1319 dma_dev->device_prep_slave_sg = ep93xx_dma_prep_slave_sg; 1320 dma_dev->device_prep_dma_cyclic = ep93xx_dma_prep_dma_cyclic; 1321 dma_dev->device_control = ep93xx_dma_control; 1322 dma_dev->device_issue_pending = ep93xx_dma_issue_pending; 1323 dma_dev->device_tx_status = ep93xx_dma_tx_status; 1324 1325 dma_set_max_seg_size(dma_dev->dev, DMA_MAX_CHAN_BYTES); 1326 1327 if (edma->m2m) { 1328 dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask); 1329 dma_dev->device_prep_dma_memcpy = ep93xx_dma_prep_dma_memcpy; 1330 1331 edma->hw_setup = m2m_hw_setup; 1332 edma->hw_shutdown = m2m_hw_shutdown; 1333 edma->hw_submit = m2m_hw_submit; 1334 edma->hw_interrupt = m2m_hw_interrupt; 1335 } else { 1336 dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask); 1337 1338 edma->hw_setup = m2p_hw_setup; 1339 edma->hw_shutdown = m2p_hw_shutdown; 1340 edma->hw_submit = m2p_hw_submit; 1341 edma->hw_interrupt = m2p_hw_interrupt; 1342 } 1343 1344 ret = dma_async_device_register(dma_dev); 1345 if (unlikely(ret)) { 1346 for (i = 0; i < edma->num_channels; i++) { 1347 struct ep93xx_dma_chan *edmac = &edma->channels[i]; 1348 if (!IS_ERR_OR_NULL(edmac->clk)) 1349 clk_put(edmac->clk); 1350 } 1351 kfree(edma); 1352 } else { 1353 dev_info(dma_dev->dev, "EP93xx M2%s DMA ready\n", 1354 edma->m2m ? "M" : "P"); 1355 } 1356 1357 return ret; 1358 } 1359 1360 static struct platform_device_id ep93xx_dma_driver_ids[] = { 1361 { "ep93xx-dma-m2p", 0 }, 1362 { "ep93xx-dma-m2m", 1 }, 1363 { }, 1364 }; 1365 1366 static struct platform_driver ep93xx_dma_driver = { 1367 .driver = { 1368 .name = "ep93xx-dma", 1369 }, 1370 .id_table = ep93xx_dma_driver_ids, 1371 }; 1372 1373 static int __init ep93xx_dma_module_init(void) 1374 { 1375 return platform_driver_probe(&ep93xx_dma_driver, ep93xx_dma_probe); 1376 } 1377 subsys_initcall(ep93xx_dma_module_init); 1378 1379 MODULE_AUTHOR("Mika Westerberg <mika.westerberg@iki.fi>"); 1380 MODULE_DESCRIPTION("EP93xx DMA driver"); 1381 MODULE_LICENSE("GPL"); 1382