1173acc7cSZhang Wei /* 2173acc7cSZhang Wei * Freescale MPC85xx, MPC83xx DMA Engine support 3173acc7cSZhang Wei * 4e2c8e425SLi Yang * Copyright (C) 2007-2010 Freescale Semiconductor, Inc. All rights reserved. 5173acc7cSZhang Wei * 6173acc7cSZhang Wei * Author: 7173acc7cSZhang Wei * Zhang Wei <wei.zhang@freescale.com>, Jul 2007 8173acc7cSZhang Wei * Ebony Zhu <ebony.zhu@freescale.com>, May 2007 9173acc7cSZhang Wei * 10173acc7cSZhang Wei * Description: 11173acc7cSZhang Wei * DMA engine driver for Freescale MPC8540 DMA controller, which is 12173acc7cSZhang Wei * also fit for MPC8560, MPC8555, MPC8548, MPC8641, and etc. 13c2e07b3aSStefan Weil * The support for MPC8349 DMA controller is also added. 14173acc7cSZhang Wei * 15a7aea373SIra W. Snyder * This driver instructs the DMA controller to issue the PCI Read Multiple 16a7aea373SIra W. Snyder * command for PCI read operations, instead of using the default PCI Read Line 17a7aea373SIra W. Snyder * command. Please be aware that this setting may result in read pre-fetching 18a7aea373SIra W. Snyder * on some platforms. 19a7aea373SIra W. Snyder * 20173acc7cSZhang Wei * This is free software; you can redistribute it and/or modify 21173acc7cSZhang Wei * it under the terms of the GNU General Public License as published by 22173acc7cSZhang Wei * the Free Software Foundation; either version 2 of the License, or 23173acc7cSZhang Wei * (at your option) any later version. 24173acc7cSZhang Wei * 25173acc7cSZhang Wei */ 26173acc7cSZhang Wei 27173acc7cSZhang Wei #include <linux/init.h> 28173acc7cSZhang Wei #include <linux/module.h> 29173acc7cSZhang Wei #include <linux/pci.h> 305a0e3ad6STejun Heo #include <linux/slab.h> 31173acc7cSZhang Wei #include <linux/interrupt.h> 32173acc7cSZhang Wei #include <linux/dmaengine.h> 33173acc7cSZhang Wei #include <linux/delay.h> 34173acc7cSZhang Wei #include <linux/dma-mapping.h> 35173acc7cSZhang Wei #include <linux/dmapool.h> 365af50730SRob Herring #include <linux/of_address.h> 375af50730SRob Herring #include <linux/of_irq.h> 38173acc7cSZhang Wei #include <linux/of_platform.h> 39173acc7cSZhang Wei 40d2ebfb33SRussell King - ARM Linux #include "dmaengine.h" 41173acc7cSZhang Wei #include "fsldma.h" 42173acc7cSZhang Wei 43b158471eSIra Snyder #define chan_dbg(chan, fmt, arg...) \ 44b158471eSIra Snyder dev_dbg(chan->dev, "%s: " fmt, chan->name, ##arg) 45b158471eSIra Snyder #define chan_err(chan, fmt, arg...) \ 46b158471eSIra Snyder dev_err(chan->dev, "%s: " fmt, chan->name, ##arg) 47c1433041SIra Snyder 48b158471eSIra Snyder static const char msg_ld_oom[] = "No free memory for link descriptor"; 49173acc7cSZhang Wei 50e8bd84dfSIra Snyder /* 51e8bd84dfSIra Snyder * Register Helpers 52173acc7cSZhang Wei */ 53173acc7cSZhang Wei 54a1c03319SIra Snyder static void set_sr(struct fsldma_chan *chan, u32 val) 55173acc7cSZhang Wei { 56a1c03319SIra Snyder DMA_OUT(chan, &chan->regs->sr, val, 32); 57173acc7cSZhang Wei } 58173acc7cSZhang Wei 59a1c03319SIra Snyder static u32 get_sr(struct fsldma_chan *chan) 60173acc7cSZhang Wei { 61a1c03319SIra Snyder return DMA_IN(chan, &chan->regs->sr, 32); 62173acc7cSZhang Wei } 63173acc7cSZhang Wei 64ccdce9a0SHongbo Zhang static void set_mr(struct fsldma_chan *chan, u32 val) 65ccdce9a0SHongbo Zhang { 66ccdce9a0SHongbo Zhang DMA_OUT(chan, &chan->regs->mr, val, 32); 67ccdce9a0SHongbo Zhang } 68ccdce9a0SHongbo Zhang 69ccdce9a0SHongbo Zhang static u32 get_mr(struct fsldma_chan *chan) 70ccdce9a0SHongbo Zhang { 71ccdce9a0SHongbo Zhang return DMA_IN(chan, &chan->regs->mr, 32); 72ccdce9a0SHongbo Zhang } 73ccdce9a0SHongbo Zhang 74a1c03319SIra Snyder static void set_cdar(struct fsldma_chan *chan, dma_addr_t addr) 75173acc7cSZhang Wei { 76a1c03319SIra Snyder DMA_OUT(chan, &chan->regs->cdar, addr | FSL_DMA_SNEN, 64); 77173acc7cSZhang Wei } 78173acc7cSZhang Wei 79a1c03319SIra Snyder static dma_addr_t get_cdar(struct fsldma_chan *chan) 80173acc7cSZhang Wei { 81a1c03319SIra Snyder return DMA_IN(chan, &chan->regs->cdar, 64) & ~FSL_DMA_SNEN; 82173acc7cSZhang Wei } 83173acc7cSZhang Wei 84ccdce9a0SHongbo Zhang static void set_bcr(struct fsldma_chan *chan, u32 val) 85ccdce9a0SHongbo Zhang { 86ccdce9a0SHongbo Zhang DMA_OUT(chan, &chan->regs->bcr, val, 32); 87ccdce9a0SHongbo Zhang } 88ccdce9a0SHongbo Zhang 89a1c03319SIra Snyder static u32 get_bcr(struct fsldma_chan *chan) 90f79abb62SZhang Wei { 91a1c03319SIra Snyder return DMA_IN(chan, &chan->regs->bcr, 32); 92f79abb62SZhang Wei } 93f79abb62SZhang Wei 94e8bd84dfSIra Snyder /* 95e8bd84dfSIra Snyder * Descriptor Helpers 96e8bd84dfSIra Snyder */ 97e8bd84dfSIra Snyder 98173acc7cSZhang Wei static void set_desc_cnt(struct fsldma_chan *chan, 99173acc7cSZhang Wei struct fsl_dma_ld_hw *hw, u32 count) 100173acc7cSZhang Wei { 101173acc7cSZhang Wei hw->count = CPU_TO_DMA(chan, count, 32); 102173acc7cSZhang Wei } 103173acc7cSZhang Wei 104173acc7cSZhang Wei static void set_desc_src(struct fsldma_chan *chan, 105173acc7cSZhang Wei struct fsl_dma_ld_hw *hw, dma_addr_t src) 106173acc7cSZhang Wei { 107173acc7cSZhang Wei u64 snoop_bits; 108900325a6SDan Williams 109173acc7cSZhang Wei snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) 110173acc7cSZhang Wei ? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0; 111173acc7cSZhang Wei hw->src_addr = CPU_TO_DMA(chan, snoop_bits | src, 64); 112900325a6SDan Williams } 113272ca655SIra Snyder 114173acc7cSZhang Wei static void set_desc_dst(struct fsldma_chan *chan, 115173acc7cSZhang Wei struct fsl_dma_ld_hw *hw, dma_addr_t dst) 116173acc7cSZhang Wei { 117173acc7cSZhang Wei u64 snoop_bits; 118173acc7cSZhang Wei 119173acc7cSZhang Wei snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) 120173acc7cSZhang Wei ? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0; 121173acc7cSZhang Wei hw->dst_addr = CPU_TO_DMA(chan, snoop_bits | dst, 64); 122173acc7cSZhang Wei } 123173acc7cSZhang Wei 124173acc7cSZhang Wei static void set_desc_next(struct fsldma_chan *chan, 125173acc7cSZhang Wei struct fsl_dma_ld_hw *hw, dma_addr_t next) 126173acc7cSZhang Wei { 127173acc7cSZhang Wei u64 snoop_bits; 128173acc7cSZhang Wei 129173acc7cSZhang Wei snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX) 130173acc7cSZhang Wei ? FSL_DMA_SNEN : 0; 131173acc7cSZhang Wei hw->next_ln_addr = CPU_TO_DMA(chan, snoop_bits | next, 64); 132173acc7cSZhang Wei } 133173acc7cSZhang Wei 13431f4306cSIra Snyder static void set_ld_eol(struct fsldma_chan *chan, struct fsl_desc_sw *desc) 135173acc7cSZhang Wei { 136e8bd84dfSIra Snyder u64 snoop_bits; 137e8bd84dfSIra Snyder 138a1c03319SIra Snyder snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX) 139776c8943SIra Snyder ? FSL_DMA_SNEN : 0; 140776c8943SIra Snyder 141a1c03319SIra Snyder desc->hw.next_ln_addr = CPU_TO_DMA(chan, 142a1c03319SIra Snyder DMA_TO_CPU(chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL 143776c8943SIra Snyder | snoop_bits, 64); 144173acc7cSZhang Wei } 145173acc7cSZhang Wei 146e8bd84dfSIra Snyder /* 147e8bd84dfSIra Snyder * DMA Engine Hardware Control Helpers 148e8bd84dfSIra Snyder */ 149173acc7cSZhang Wei 150e8bd84dfSIra Snyder static void dma_init(struct fsldma_chan *chan) 151173acc7cSZhang Wei { 152e8bd84dfSIra Snyder /* Reset the channel */ 153ccdce9a0SHongbo Zhang set_mr(chan, 0); 154173acc7cSZhang Wei 155e8bd84dfSIra Snyder switch (chan->feature & FSL_DMA_IP_MASK) { 156e8bd84dfSIra Snyder case FSL_DMA_IP_85XX: 157e8bd84dfSIra Snyder /* Set the channel to below modes: 158e8bd84dfSIra Snyder * EIE - Error interrupt enable 159e8bd84dfSIra Snyder * EOLNIE - End of links interrupt enable 160e8bd84dfSIra Snyder * BWC - Bandwidth sharing among channels 161e8bd84dfSIra Snyder */ 162ccdce9a0SHongbo Zhang set_mr(chan, FSL_DMA_MR_BWC | FSL_DMA_MR_EIE 163ccdce9a0SHongbo Zhang | FSL_DMA_MR_EOLNIE); 164e8bd84dfSIra Snyder break; 165e8bd84dfSIra Snyder case FSL_DMA_IP_83XX: 166e8bd84dfSIra Snyder /* Set the channel to below modes: 167e8bd84dfSIra Snyder * EOTIE - End-of-transfer interrupt enable 168e8bd84dfSIra Snyder * PRC_RM - PCI read multiple 169e8bd84dfSIra Snyder */ 170ccdce9a0SHongbo Zhang set_mr(chan, FSL_DMA_MR_EOTIE | FSL_DMA_MR_PRC_RM); 171e8bd84dfSIra Snyder break; 172e8bd84dfSIra Snyder } 173173acc7cSZhang Wei } 174173acc7cSZhang Wei 175173acc7cSZhang Wei static int dma_is_idle(struct fsldma_chan *chan) 176173acc7cSZhang Wei { 177173acc7cSZhang Wei u32 sr = get_sr(chan); 178173acc7cSZhang Wei return (!(sr & FSL_DMA_SR_CB)) || (sr & FSL_DMA_SR_CH); 179173acc7cSZhang Wei } 180173acc7cSZhang Wei 181f04cd407SIra Snyder /* 182f04cd407SIra Snyder * Start the DMA controller 183f04cd407SIra Snyder * 184f04cd407SIra Snyder * Preconditions: 185f04cd407SIra Snyder * - the CDAR register must point to the start descriptor 186f04cd407SIra Snyder * - the MRn[CS] bit must be cleared 187f04cd407SIra Snyder */ 188173acc7cSZhang Wei static void dma_start(struct fsldma_chan *chan) 189173acc7cSZhang Wei { 190173acc7cSZhang Wei u32 mode; 191173acc7cSZhang Wei 192ccdce9a0SHongbo Zhang mode = get_mr(chan); 193173acc7cSZhang Wei 194173acc7cSZhang Wei if (chan->feature & FSL_DMA_CHAN_PAUSE_EXT) { 195ccdce9a0SHongbo Zhang set_bcr(chan, 0); 196173acc7cSZhang Wei mode |= FSL_DMA_MR_EMP_EN; 197173acc7cSZhang Wei } else { 198173acc7cSZhang Wei mode &= ~FSL_DMA_MR_EMP_EN; 199173acc7cSZhang Wei } 200173acc7cSZhang Wei 201f04cd407SIra Snyder if (chan->feature & FSL_DMA_CHAN_START_EXT) { 202173acc7cSZhang Wei mode |= FSL_DMA_MR_EMS_EN; 203f04cd407SIra Snyder } else { 204f04cd407SIra Snyder mode &= ~FSL_DMA_MR_EMS_EN; 205173acc7cSZhang Wei mode |= FSL_DMA_MR_CS; 206f04cd407SIra Snyder } 207173acc7cSZhang Wei 208ccdce9a0SHongbo Zhang set_mr(chan, mode); 209173acc7cSZhang Wei } 210173acc7cSZhang Wei 211173acc7cSZhang Wei static void dma_halt(struct fsldma_chan *chan) 212173acc7cSZhang Wei { 213173acc7cSZhang Wei u32 mode; 214173acc7cSZhang Wei int i; 215173acc7cSZhang Wei 216a00ae34aSIra Snyder /* read the mode register */ 217ccdce9a0SHongbo Zhang mode = get_mr(chan); 218a00ae34aSIra Snyder 219a00ae34aSIra Snyder /* 220a00ae34aSIra Snyder * The 85xx controller supports channel abort, which will stop 221a00ae34aSIra Snyder * the current transfer. On 83xx, this bit is the transfer error 222a00ae34aSIra Snyder * mask bit, which should not be changed. 223a00ae34aSIra Snyder */ 224a00ae34aSIra Snyder if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) { 225173acc7cSZhang Wei mode |= FSL_DMA_MR_CA; 226ccdce9a0SHongbo Zhang set_mr(chan, mode); 227173acc7cSZhang Wei 228a00ae34aSIra Snyder mode &= ~FSL_DMA_MR_CA; 229a00ae34aSIra Snyder } 230a00ae34aSIra Snyder 231a00ae34aSIra Snyder /* stop the DMA controller */ 232a00ae34aSIra Snyder mode &= ~(FSL_DMA_MR_CS | FSL_DMA_MR_EMS_EN); 233ccdce9a0SHongbo Zhang set_mr(chan, mode); 234173acc7cSZhang Wei 235a00ae34aSIra Snyder /* wait for the DMA controller to become idle */ 236173acc7cSZhang Wei for (i = 0; i < 100; i++) { 237173acc7cSZhang Wei if (dma_is_idle(chan)) 238173acc7cSZhang Wei return; 239173acc7cSZhang Wei 240173acc7cSZhang Wei udelay(10); 241173acc7cSZhang Wei } 242173acc7cSZhang Wei 243173acc7cSZhang Wei if (!dma_is_idle(chan)) 244b158471eSIra Snyder chan_err(chan, "DMA halt timeout!\n"); 245173acc7cSZhang Wei } 246173acc7cSZhang Wei 247173acc7cSZhang Wei /** 248173acc7cSZhang Wei * fsl_chan_set_src_loop_size - Set source address hold transfer size 249a1c03319SIra Snyder * @chan : Freescale DMA channel 250173acc7cSZhang Wei * @size : Address loop size, 0 for disable loop 251173acc7cSZhang Wei * 252173acc7cSZhang Wei * The set source address hold transfer size. The source 253173acc7cSZhang Wei * address hold or loop transfer size is when the DMA transfer 254173acc7cSZhang Wei * data from source address (SA), if the loop size is 4, the DMA will 255173acc7cSZhang Wei * read data from SA, SA + 1, SA + 2, SA + 3, then loop back to SA, 256173acc7cSZhang Wei * SA + 1 ... and so on. 257173acc7cSZhang Wei */ 258a1c03319SIra Snyder static void fsl_chan_set_src_loop_size(struct fsldma_chan *chan, int size) 259173acc7cSZhang Wei { 260272ca655SIra Snyder u32 mode; 261272ca655SIra Snyder 262ccdce9a0SHongbo Zhang mode = get_mr(chan); 263272ca655SIra Snyder 264173acc7cSZhang Wei switch (size) { 265173acc7cSZhang Wei case 0: 266272ca655SIra Snyder mode &= ~FSL_DMA_MR_SAHE; 267173acc7cSZhang Wei break; 268173acc7cSZhang Wei case 1: 269173acc7cSZhang Wei case 2: 270173acc7cSZhang Wei case 4: 271173acc7cSZhang Wei case 8: 272272ca655SIra Snyder mode |= FSL_DMA_MR_SAHE | (__ilog2(size) << 14); 273173acc7cSZhang Wei break; 274173acc7cSZhang Wei } 275272ca655SIra Snyder 276ccdce9a0SHongbo Zhang set_mr(chan, mode); 277173acc7cSZhang Wei } 278173acc7cSZhang Wei 279173acc7cSZhang Wei /** 280738f5f7eSIra Snyder * fsl_chan_set_dst_loop_size - Set destination address hold transfer size 281a1c03319SIra Snyder * @chan : Freescale DMA channel 282173acc7cSZhang Wei * @size : Address loop size, 0 for disable loop 283173acc7cSZhang Wei * 284173acc7cSZhang Wei * The set destination address hold transfer size. The destination 285173acc7cSZhang Wei * address hold or loop transfer size is when the DMA transfer 286173acc7cSZhang Wei * data to destination address (TA), if the loop size is 4, the DMA will 287173acc7cSZhang Wei * write data to TA, TA + 1, TA + 2, TA + 3, then loop back to TA, 288173acc7cSZhang Wei * TA + 1 ... and so on. 289173acc7cSZhang Wei */ 290a1c03319SIra Snyder static void fsl_chan_set_dst_loop_size(struct fsldma_chan *chan, int size) 291173acc7cSZhang Wei { 292272ca655SIra Snyder u32 mode; 293272ca655SIra Snyder 294ccdce9a0SHongbo Zhang mode = get_mr(chan); 295272ca655SIra Snyder 296173acc7cSZhang Wei switch (size) { 297173acc7cSZhang Wei case 0: 298272ca655SIra Snyder mode &= ~FSL_DMA_MR_DAHE; 299173acc7cSZhang Wei break; 300173acc7cSZhang Wei case 1: 301173acc7cSZhang Wei case 2: 302173acc7cSZhang Wei case 4: 303173acc7cSZhang Wei case 8: 304272ca655SIra Snyder mode |= FSL_DMA_MR_DAHE | (__ilog2(size) << 16); 305173acc7cSZhang Wei break; 306173acc7cSZhang Wei } 307272ca655SIra Snyder 308ccdce9a0SHongbo Zhang set_mr(chan, mode); 309173acc7cSZhang Wei } 310173acc7cSZhang Wei 311173acc7cSZhang Wei /** 312e6c7ecb6SIra Snyder * fsl_chan_set_request_count - Set DMA Request Count for external control 313a1c03319SIra Snyder * @chan : Freescale DMA channel 314e6c7ecb6SIra Snyder * @size : Number of bytes to transfer in a single request 315173acc7cSZhang Wei * 316e6c7ecb6SIra Snyder * The Freescale DMA channel can be controlled by the external signal DREQ#. 317e6c7ecb6SIra Snyder * The DMA request count is how many bytes are allowed to transfer before 318e6c7ecb6SIra Snyder * pausing the channel, after which a new assertion of DREQ# resumes channel 319e6c7ecb6SIra Snyder * operation. 320e6c7ecb6SIra Snyder * 321e6c7ecb6SIra Snyder * A size of 0 disables external pause control. The maximum size is 1024. 322173acc7cSZhang Wei */ 323a1c03319SIra Snyder static void fsl_chan_set_request_count(struct fsldma_chan *chan, int size) 324173acc7cSZhang Wei { 325272ca655SIra Snyder u32 mode; 326272ca655SIra Snyder 327e6c7ecb6SIra Snyder BUG_ON(size > 1024); 328272ca655SIra Snyder 329ccdce9a0SHongbo Zhang mode = get_mr(chan); 330272ca655SIra Snyder mode |= (__ilog2(size) << 24) & 0x0f000000; 331272ca655SIra Snyder 332ccdce9a0SHongbo Zhang set_mr(chan, mode); 333e6c7ecb6SIra Snyder } 334e6c7ecb6SIra Snyder 335e6c7ecb6SIra Snyder /** 336e6c7ecb6SIra Snyder * fsl_chan_toggle_ext_pause - Toggle channel external pause status 337a1c03319SIra Snyder * @chan : Freescale DMA channel 338e6c7ecb6SIra Snyder * @enable : 0 is disabled, 1 is enabled. 339e6c7ecb6SIra Snyder * 340e6c7ecb6SIra Snyder * The Freescale DMA channel can be controlled by the external signal DREQ#. 341e6c7ecb6SIra Snyder * The DMA Request Count feature should be used in addition to this feature 342e6c7ecb6SIra Snyder * to set the number of bytes to transfer before pausing the channel. 343e6c7ecb6SIra Snyder */ 344a1c03319SIra Snyder static void fsl_chan_toggle_ext_pause(struct fsldma_chan *chan, int enable) 345e6c7ecb6SIra Snyder { 346e6c7ecb6SIra Snyder if (enable) 347a1c03319SIra Snyder chan->feature |= FSL_DMA_CHAN_PAUSE_EXT; 348e6c7ecb6SIra Snyder else 349a1c03319SIra Snyder chan->feature &= ~FSL_DMA_CHAN_PAUSE_EXT; 350173acc7cSZhang Wei } 351173acc7cSZhang Wei 352173acc7cSZhang Wei /** 353173acc7cSZhang Wei * fsl_chan_toggle_ext_start - Toggle channel external start status 354a1c03319SIra Snyder * @chan : Freescale DMA channel 355173acc7cSZhang Wei * @enable : 0 is disabled, 1 is enabled. 356173acc7cSZhang Wei * 357173acc7cSZhang Wei * If enable the external start, the channel can be started by an 358173acc7cSZhang Wei * external DMA start pin. So the dma_start() does not start the 359173acc7cSZhang Wei * transfer immediately. The DMA channel will wait for the 360173acc7cSZhang Wei * control pin asserted. 361173acc7cSZhang Wei */ 362a1c03319SIra Snyder static void fsl_chan_toggle_ext_start(struct fsldma_chan *chan, int enable) 363173acc7cSZhang Wei { 364173acc7cSZhang Wei if (enable) 365a1c03319SIra Snyder chan->feature |= FSL_DMA_CHAN_START_EXT; 366173acc7cSZhang Wei else 367a1c03319SIra Snyder chan->feature &= ~FSL_DMA_CHAN_START_EXT; 368173acc7cSZhang Wei } 369173acc7cSZhang Wei 37031f4306cSIra Snyder static void append_ld_queue(struct fsldma_chan *chan, struct fsl_desc_sw *desc) 3719c3a50b7SIra Snyder { 3729c3a50b7SIra Snyder struct fsl_desc_sw *tail = to_fsl_desc(chan->ld_pending.prev); 3739c3a50b7SIra Snyder 3749c3a50b7SIra Snyder if (list_empty(&chan->ld_pending)) 3759c3a50b7SIra Snyder goto out_splice; 3769c3a50b7SIra Snyder 3779c3a50b7SIra Snyder /* 3789c3a50b7SIra Snyder * Add the hardware descriptor to the chain of hardware descriptors 3799c3a50b7SIra Snyder * that already exists in memory. 3809c3a50b7SIra Snyder * 3819c3a50b7SIra Snyder * This will un-set the EOL bit of the existing transaction, and the 3829c3a50b7SIra Snyder * last link in this transaction will become the EOL descriptor. 3839c3a50b7SIra Snyder */ 3849c3a50b7SIra Snyder set_desc_next(chan, &tail->hw, desc->async_tx.phys); 3859c3a50b7SIra Snyder 3869c3a50b7SIra Snyder /* 3879c3a50b7SIra Snyder * Add the software descriptor and all children to the list 3889c3a50b7SIra Snyder * of pending transactions 3899c3a50b7SIra Snyder */ 3909c3a50b7SIra Snyder out_splice: 3919c3a50b7SIra Snyder list_splice_tail_init(&desc->tx_list, &chan->ld_pending); 3929c3a50b7SIra Snyder } 3939c3a50b7SIra Snyder 394173acc7cSZhang Wei static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx) 395173acc7cSZhang Wei { 396a1c03319SIra Snyder struct fsldma_chan *chan = to_fsl_chan(tx->chan); 397eda34234SDan Williams struct fsl_desc_sw *desc = tx_to_fsl_desc(tx); 398eda34234SDan Williams struct fsl_desc_sw *child; 399bbc76560SDan Williams dma_cookie_t cookie = -EINVAL; 400173acc7cSZhang Wei 4012baff570SHongbo Zhang spin_lock_bh(&chan->desc_lock); 402173acc7cSZhang Wei 40314c6a333SHongbo Zhang #ifdef CONFIG_PM 40414c6a333SHongbo Zhang if (unlikely(chan->pm_state != RUNNING)) { 40514c6a333SHongbo Zhang chan_dbg(chan, "cannot submit due to suspend\n"); 40614c6a333SHongbo Zhang spin_unlock_bh(&chan->desc_lock); 40714c6a333SHongbo Zhang return -1; 40814c6a333SHongbo Zhang } 40914c6a333SHongbo Zhang #endif 41014c6a333SHongbo Zhang 4119c3a50b7SIra Snyder /* 4129c3a50b7SIra Snyder * assign cookies to all of the software descriptors 4139c3a50b7SIra Snyder * that make up this transaction 4149c3a50b7SIra Snyder */ 415eda34234SDan Williams list_for_each_entry(child, &desc->tx_list, node) { 416884485e1SRussell King - ARM Linux cookie = dma_cookie_assign(&child->async_tx); 417bcfb7465SIra Snyder } 418bcfb7465SIra Snyder 4199c3a50b7SIra Snyder /* put this transaction onto the tail of the pending queue */ 420a1c03319SIra Snyder append_ld_queue(chan, desc); 421173acc7cSZhang Wei 4222baff570SHongbo Zhang spin_unlock_bh(&chan->desc_lock); 423173acc7cSZhang Wei 424173acc7cSZhang Wei return cookie; 425173acc7cSZhang Wei } 426173acc7cSZhang Wei 427173acc7cSZhang Wei /** 42886d19a54SHongbo Zhang * fsl_dma_free_descriptor - Free descriptor from channel's DMA pool. 42986d19a54SHongbo Zhang * @chan : Freescale DMA channel 43086d19a54SHongbo Zhang * @desc: descriptor to be freed 43186d19a54SHongbo Zhang */ 43286d19a54SHongbo Zhang static void fsl_dma_free_descriptor(struct fsldma_chan *chan, 43386d19a54SHongbo Zhang struct fsl_desc_sw *desc) 43486d19a54SHongbo Zhang { 43586d19a54SHongbo Zhang list_del(&desc->node); 43686d19a54SHongbo Zhang chan_dbg(chan, "LD %p free\n", desc); 43786d19a54SHongbo Zhang dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); 43886d19a54SHongbo Zhang } 43986d19a54SHongbo Zhang 44086d19a54SHongbo Zhang /** 441173acc7cSZhang Wei * fsl_dma_alloc_descriptor - Allocate descriptor from channel's DMA pool. 442a1c03319SIra Snyder * @chan : Freescale DMA channel 443173acc7cSZhang Wei * 444173acc7cSZhang Wei * Return - The descriptor allocated. NULL for failed. 445173acc7cSZhang Wei */ 44631f4306cSIra Snyder static struct fsl_desc_sw *fsl_dma_alloc_descriptor(struct fsldma_chan *chan) 447173acc7cSZhang Wei { 4489c3a50b7SIra Snyder struct fsl_desc_sw *desc; 449173acc7cSZhang Wei dma_addr_t pdesc; 450173acc7cSZhang Wei 4519c3a50b7SIra Snyder desc = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &pdesc); 4529c3a50b7SIra Snyder if (!desc) { 453b158471eSIra Snyder chan_dbg(chan, "out of memory for link descriptor\n"); 4549c3a50b7SIra Snyder return NULL; 455173acc7cSZhang Wei } 456173acc7cSZhang Wei 4579c3a50b7SIra Snyder memset(desc, 0, sizeof(*desc)); 4589c3a50b7SIra Snyder INIT_LIST_HEAD(&desc->tx_list); 4599c3a50b7SIra Snyder dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); 4609c3a50b7SIra Snyder desc->async_tx.tx_submit = fsl_dma_tx_submit; 4619c3a50b7SIra Snyder desc->async_tx.phys = pdesc; 4629c3a50b7SIra Snyder 4630ab09c36SIra Snyder chan_dbg(chan, "LD %p allocated\n", desc); 4640ab09c36SIra Snyder 4659c3a50b7SIra Snyder return desc; 466173acc7cSZhang Wei } 467173acc7cSZhang Wei 468173acc7cSZhang Wei /** 46943452fadSHongbo Zhang * fsldma_clean_completed_descriptor - free all descriptors which 47043452fadSHongbo Zhang * has been completed and acked 47143452fadSHongbo Zhang * @chan: Freescale DMA channel 47243452fadSHongbo Zhang * 47343452fadSHongbo Zhang * This function is used on all completed and acked descriptors. 47443452fadSHongbo Zhang * All descriptors should only be freed in this function. 47543452fadSHongbo Zhang */ 47643452fadSHongbo Zhang static void fsldma_clean_completed_descriptor(struct fsldma_chan *chan) 47743452fadSHongbo Zhang { 47843452fadSHongbo Zhang struct fsl_desc_sw *desc, *_desc; 47943452fadSHongbo Zhang 48043452fadSHongbo Zhang /* Run the callback for each descriptor, in order */ 48143452fadSHongbo Zhang list_for_each_entry_safe(desc, _desc, &chan->ld_completed, node) 48243452fadSHongbo Zhang if (async_tx_test_ack(&desc->async_tx)) 48343452fadSHongbo Zhang fsl_dma_free_descriptor(chan, desc); 48443452fadSHongbo Zhang } 48543452fadSHongbo Zhang 48643452fadSHongbo Zhang /** 48743452fadSHongbo Zhang * fsldma_run_tx_complete_actions - cleanup a single link descriptor 48843452fadSHongbo Zhang * @chan: Freescale DMA channel 48943452fadSHongbo Zhang * @desc: descriptor to cleanup and free 49043452fadSHongbo Zhang * @cookie: Freescale DMA transaction identifier 49143452fadSHongbo Zhang * 49243452fadSHongbo Zhang * This function is used on a descriptor which has been executed by the DMA 49343452fadSHongbo Zhang * controller. It will run any callbacks, submit any dependencies. 49443452fadSHongbo Zhang */ 49543452fadSHongbo Zhang static dma_cookie_t fsldma_run_tx_complete_actions(struct fsldma_chan *chan, 49643452fadSHongbo Zhang struct fsl_desc_sw *desc, dma_cookie_t cookie) 49743452fadSHongbo Zhang { 49843452fadSHongbo Zhang struct dma_async_tx_descriptor *txd = &desc->async_tx; 49943452fadSHongbo Zhang dma_cookie_t ret = cookie; 50043452fadSHongbo Zhang 50143452fadSHongbo Zhang BUG_ON(txd->cookie < 0); 50243452fadSHongbo Zhang 50343452fadSHongbo Zhang if (txd->cookie > 0) { 50443452fadSHongbo Zhang ret = txd->cookie; 50543452fadSHongbo Zhang 50643452fadSHongbo Zhang /* Run the link descriptor callback function */ 50743452fadSHongbo Zhang if (txd->callback) { 50843452fadSHongbo Zhang chan_dbg(chan, "LD %p callback\n", desc); 50943452fadSHongbo Zhang txd->callback(txd->callback_param); 51043452fadSHongbo Zhang } 51143452fadSHongbo Zhang } 51243452fadSHongbo Zhang 51343452fadSHongbo Zhang /* Run any dependencies */ 51443452fadSHongbo Zhang dma_run_dependencies(txd); 51543452fadSHongbo Zhang 51643452fadSHongbo Zhang return ret; 51743452fadSHongbo Zhang } 51843452fadSHongbo Zhang 51943452fadSHongbo Zhang /** 52043452fadSHongbo Zhang * fsldma_clean_running_descriptor - move the completed descriptor from 52143452fadSHongbo Zhang * ld_running to ld_completed 52243452fadSHongbo Zhang * @chan: Freescale DMA channel 52343452fadSHongbo Zhang * @desc: the descriptor which is completed 52443452fadSHongbo Zhang * 52543452fadSHongbo Zhang * Free the descriptor directly if acked by async_tx api, or move it to 52643452fadSHongbo Zhang * queue ld_completed. 52743452fadSHongbo Zhang */ 52843452fadSHongbo Zhang static void fsldma_clean_running_descriptor(struct fsldma_chan *chan, 52943452fadSHongbo Zhang struct fsl_desc_sw *desc) 53043452fadSHongbo Zhang { 53143452fadSHongbo Zhang /* Remove from the list of transactions */ 53243452fadSHongbo Zhang list_del(&desc->node); 53343452fadSHongbo Zhang 53443452fadSHongbo Zhang /* 53543452fadSHongbo Zhang * the client is allowed to attach dependent operations 53643452fadSHongbo Zhang * until 'ack' is set 53743452fadSHongbo Zhang */ 53843452fadSHongbo Zhang if (!async_tx_test_ack(&desc->async_tx)) { 53943452fadSHongbo Zhang /* 54043452fadSHongbo Zhang * Move this descriptor to the list of descriptors which is 54143452fadSHongbo Zhang * completed, but still awaiting the 'ack' bit to be set. 54243452fadSHongbo Zhang */ 54343452fadSHongbo Zhang list_add_tail(&desc->node, &chan->ld_completed); 54443452fadSHongbo Zhang return; 54543452fadSHongbo Zhang } 54643452fadSHongbo Zhang 54743452fadSHongbo Zhang dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); 54843452fadSHongbo Zhang } 54943452fadSHongbo Zhang 55043452fadSHongbo Zhang /** 5512a5ecb79SHongbo Zhang * fsl_chan_xfer_ld_queue - transfer any pending transactions 5522a5ecb79SHongbo Zhang * @chan : Freescale DMA channel 5532a5ecb79SHongbo Zhang * 5542a5ecb79SHongbo Zhang * HARDWARE STATE: idle 5552a5ecb79SHongbo Zhang * LOCKING: must hold chan->desc_lock 5562a5ecb79SHongbo Zhang */ 5572a5ecb79SHongbo Zhang static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan) 5582a5ecb79SHongbo Zhang { 5592a5ecb79SHongbo Zhang struct fsl_desc_sw *desc; 5602a5ecb79SHongbo Zhang 5612a5ecb79SHongbo Zhang /* 5622a5ecb79SHongbo Zhang * If the list of pending descriptors is empty, then we 5632a5ecb79SHongbo Zhang * don't need to do any work at all 5642a5ecb79SHongbo Zhang */ 5652a5ecb79SHongbo Zhang if (list_empty(&chan->ld_pending)) { 5662a5ecb79SHongbo Zhang chan_dbg(chan, "no pending LDs\n"); 5672a5ecb79SHongbo Zhang return; 5682a5ecb79SHongbo Zhang } 5692a5ecb79SHongbo Zhang 5702a5ecb79SHongbo Zhang /* 5712a5ecb79SHongbo Zhang * The DMA controller is not idle, which means that the interrupt 5722a5ecb79SHongbo Zhang * handler will start any queued transactions when it runs after 5732a5ecb79SHongbo Zhang * this transaction finishes 5742a5ecb79SHongbo Zhang */ 5752a5ecb79SHongbo Zhang if (!chan->idle) { 5762a5ecb79SHongbo Zhang chan_dbg(chan, "DMA controller still busy\n"); 5772a5ecb79SHongbo Zhang return; 5782a5ecb79SHongbo Zhang } 5792a5ecb79SHongbo Zhang 5802a5ecb79SHongbo Zhang /* 5812a5ecb79SHongbo Zhang * If there are some link descriptors which have not been 5822a5ecb79SHongbo Zhang * transferred, we need to start the controller 5832a5ecb79SHongbo Zhang */ 5842a5ecb79SHongbo Zhang 5852a5ecb79SHongbo Zhang /* 5862a5ecb79SHongbo Zhang * Move all elements from the queue of pending transactions 5872a5ecb79SHongbo Zhang * onto the list of running transactions 5882a5ecb79SHongbo Zhang */ 5892a5ecb79SHongbo Zhang chan_dbg(chan, "idle, starting controller\n"); 5902a5ecb79SHongbo Zhang desc = list_first_entry(&chan->ld_pending, struct fsl_desc_sw, node); 5912a5ecb79SHongbo Zhang list_splice_tail_init(&chan->ld_pending, &chan->ld_running); 5922a5ecb79SHongbo Zhang 5932a5ecb79SHongbo Zhang /* 5942a5ecb79SHongbo Zhang * The 85xx DMA controller doesn't clear the channel start bit 5952a5ecb79SHongbo Zhang * automatically at the end of a transfer. Therefore we must clear 5962a5ecb79SHongbo Zhang * it in software before starting the transfer. 5972a5ecb79SHongbo Zhang */ 5982a5ecb79SHongbo Zhang if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) { 5992a5ecb79SHongbo Zhang u32 mode; 6002a5ecb79SHongbo Zhang 6012a5ecb79SHongbo Zhang mode = get_mr(chan); 6022a5ecb79SHongbo Zhang mode &= ~FSL_DMA_MR_CS; 6032a5ecb79SHongbo Zhang set_mr(chan, mode); 6042a5ecb79SHongbo Zhang } 6052a5ecb79SHongbo Zhang 6062a5ecb79SHongbo Zhang /* 6072a5ecb79SHongbo Zhang * Program the descriptor's address into the DMA controller, 6082a5ecb79SHongbo Zhang * then start the DMA transaction 6092a5ecb79SHongbo Zhang */ 6102a5ecb79SHongbo Zhang set_cdar(chan, desc->async_tx.phys); 6112a5ecb79SHongbo Zhang get_cdar(chan); 6122a5ecb79SHongbo Zhang 6132a5ecb79SHongbo Zhang dma_start(chan); 6142a5ecb79SHongbo Zhang chan->idle = false; 6152a5ecb79SHongbo Zhang } 6162a5ecb79SHongbo Zhang 6172a5ecb79SHongbo Zhang /** 61843452fadSHongbo Zhang * fsldma_cleanup_descriptors - cleanup link descriptors which are completed 61943452fadSHongbo Zhang * and move them to ld_completed to free until flag 'ack' is set 6202a5ecb79SHongbo Zhang * @chan: Freescale DMA channel 6212a5ecb79SHongbo Zhang * 62243452fadSHongbo Zhang * This function is used on descriptors which have been executed by the DMA 62343452fadSHongbo Zhang * controller. It will run any callbacks, submit any dependencies, then 62443452fadSHongbo Zhang * free these descriptors if flag 'ack' is set. 6252a5ecb79SHongbo Zhang */ 62643452fadSHongbo Zhang static void fsldma_cleanup_descriptors(struct fsldma_chan *chan) 6272a5ecb79SHongbo Zhang { 62843452fadSHongbo Zhang struct fsl_desc_sw *desc, *_desc; 62943452fadSHongbo Zhang dma_cookie_t cookie = 0; 63043452fadSHongbo Zhang dma_addr_t curr_phys = get_cdar(chan); 63143452fadSHongbo Zhang int seen_current = 0; 6322a5ecb79SHongbo Zhang 63343452fadSHongbo Zhang fsldma_clean_completed_descriptor(chan); 63443452fadSHongbo Zhang 63543452fadSHongbo Zhang /* Run the callback for each descriptor, in order */ 63643452fadSHongbo Zhang list_for_each_entry_safe(desc, _desc, &chan->ld_running, node) { 63743452fadSHongbo Zhang /* 63843452fadSHongbo Zhang * do not advance past the current descriptor loaded into the 63943452fadSHongbo Zhang * hardware channel, subsequent descriptors are either in 64043452fadSHongbo Zhang * process or have not been submitted 64143452fadSHongbo Zhang */ 64243452fadSHongbo Zhang if (seen_current) 64343452fadSHongbo Zhang break; 64443452fadSHongbo Zhang 64543452fadSHongbo Zhang /* 64643452fadSHongbo Zhang * stop the search if we reach the current descriptor and the 64743452fadSHongbo Zhang * channel is busy 64843452fadSHongbo Zhang */ 64943452fadSHongbo Zhang if (desc->async_tx.phys == curr_phys) { 65043452fadSHongbo Zhang seen_current = 1; 65143452fadSHongbo Zhang if (!dma_is_idle(chan)) 65243452fadSHongbo Zhang break; 6532a5ecb79SHongbo Zhang } 6542a5ecb79SHongbo Zhang 65543452fadSHongbo Zhang cookie = fsldma_run_tx_complete_actions(chan, desc, cookie); 6562a5ecb79SHongbo Zhang 65743452fadSHongbo Zhang fsldma_clean_running_descriptor(chan, desc); 65843452fadSHongbo Zhang } 65943452fadSHongbo Zhang 66043452fadSHongbo Zhang /* 66143452fadSHongbo Zhang * Start any pending transactions automatically 66243452fadSHongbo Zhang * 66343452fadSHongbo Zhang * In the ideal case, we keep the DMA controller busy while we go 66443452fadSHongbo Zhang * ahead and free the descriptors below. 66543452fadSHongbo Zhang */ 66643452fadSHongbo Zhang fsl_chan_xfer_ld_queue(chan); 66743452fadSHongbo Zhang 66843452fadSHongbo Zhang if (cookie > 0) 66943452fadSHongbo Zhang chan->common.completed_cookie = cookie; 6702a5ecb79SHongbo Zhang } 6712a5ecb79SHongbo Zhang 6722a5ecb79SHongbo Zhang /** 673173acc7cSZhang Wei * fsl_dma_alloc_chan_resources - Allocate resources for DMA channel. 674a1c03319SIra Snyder * @chan : Freescale DMA channel 675173acc7cSZhang Wei * 676173acc7cSZhang Wei * This function will create a dma pool for descriptor allocation. 677173acc7cSZhang Wei * 678173acc7cSZhang Wei * Return - The number of descriptors allocated. 679173acc7cSZhang Wei */ 680a1c03319SIra Snyder static int fsl_dma_alloc_chan_resources(struct dma_chan *dchan) 681173acc7cSZhang Wei { 682a1c03319SIra Snyder struct fsldma_chan *chan = to_fsl_chan(dchan); 68377cd62e8STimur Tabi 68477cd62e8STimur Tabi /* Has this channel already been allocated? */ 685a1c03319SIra Snyder if (chan->desc_pool) 68677cd62e8STimur Tabi return 1; 687173acc7cSZhang Wei 6889c3a50b7SIra Snyder /* 6899c3a50b7SIra Snyder * We need the descriptor to be aligned to 32bytes 690173acc7cSZhang Wei * for meeting FSL DMA specification requirement. 691173acc7cSZhang Wei */ 692b158471eSIra Snyder chan->desc_pool = dma_pool_create(chan->name, chan->dev, 6939c3a50b7SIra Snyder sizeof(struct fsl_desc_sw), 6949c3a50b7SIra Snyder __alignof__(struct fsl_desc_sw), 0); 695a1c03319SIra Snyder if (!chan->desc_pool) { 696b158471eSIra Snyder chan_err(chan, "unable to allocate descriptor pool\n"); 6979c3a50b7SIra Snyder return -ENOMEM; 698173acc7cSZhang Wei } 699173acc7cSZhang Wei 7009c3a50b7SIra Snyder /* there is at least one descriptor free to be allocated */ 701173acc7cSZhang Wei return 1; 702173acc7cSZhang Wei } 703173acc7cSZhang Wei 704173acc7cSZhang Wei /** 7059c3a50b7SIra Snyder * fsldma_free_desc_list - Free all descriptors in a queue 7069c3a50b7SIra Snyder * @chan: Freescae DMA channel 7079c3a50b7SIra Snyder * @list: the list to free 7089c3a50b7SIra Snyder * 7099c3a50b7SIra Snyder * LOCKING: must hold chan->desc_lock 7109c3a50b7SIra Snyder */ 7119c3a50b7SIra Snyder static void fsldma_free_desc_list(struct fsldma_chan *chan, 7129c3a50b7SIra Snyder struct list_head *list) 7139c3a50b7SIra Snyder { 7149c3a50b7SIra Snyder struct fsl_desc_sw *desc, *_desc; 7159c3a50b7SIra Snyder 71686d19a54SHongbo Zhang list_for_each_entry_safe(desc, _desc, list, node) 71786d19a54SHongbo Zhang fsl_dma_free_descriptor(chan, desc); 7189c3a50b7SIra Snyder } 7199c3a50b7SIra Snyder 7209c3a50b7SIra Snyder static void fsldma_free_desc_list_reverse(struct fsldma_chan *chan, 7219c3a50b7SIra Snyder struct list_head *list) 7229c3a50b7SIra Snyder { 7239c3a50b7SIra Snyder struct fsl_desc_sw *desc, *_desc; 7249c3a50b7SIra Snyder 72586d19a54SHongbo Zhang list_for_each_entry_safe_reverse(desc, _desc, list, node) 72686d19a54SHongbo Zhang fsl_dma_free_descriptor(chan, desc); 7279c3a50b7SIra Snyder } 7289c3a50b7SIra Snyder 7299c3a50b7SIra Snyder /** 730173acc7cSZhang Wei * fsl_dma_free_chan_resources - Free all resources of the channel. 731a1c03319SIra Snyder * @chan : Freescale DMA channel 732173acc7cSZhang Wei */ 733a1c03319SIra Snyder static void fsl_dma_free_chan_resources(struct dma_chan *dchan) 734173acc7cSZhang Wei { 735a1c03319SIra Snyder struct fsldma_chan *chan = to_fsl_chan(dchan); 736173acc7cSZhang Wei 737b158471eSIra Snyder chan_dbg(chan, "free all channel resources\n"); 7382baff570SHongbo Zhang spin_lock_bh(&chan->desc_lock); 73943452fadSHongbo Zhang fsldma_cleanup_descriptors(chan); 7409c3a50b7SIra Snyder fsldma_free_desc_list(chan, &chan->ld_pending); 7419c3a50b7SIra Snyder fsldma_free_desc_list(chan, &chan->ld_running); 74243452fadSHongbo Zhang fsldma_free_desc_list(chan, &chan->ld_completed); 7432baff570SHongbo Zhang spin_unlock_bh(&chan->desc_lock); 74477cd62e8STimur Tabi 7459c3a50b7SIra Snyder dma_pool_destroy(chan->desc_pool); 746a1c03319SIra Snyder chan->desc_pool = NULL; 747173acc7cSZhang Wei } 748173acc7cSZhang Wei 7492187c269SZhang Wei static struct dma_async_tx_descriptor * 75031f4306cSIra Snyder fsl_dma_prep_memcpy(struct dma_chan *dchan, 75131f4306cSIra Snyder dma_addr_t dma_dst, dma_addr_t dma_src, 752173acc7cSZhang Wei size_t len, unsigned long flags) 753173acc7cSZhang Wei { 754a1c03319SIra Snyder struct fsldma_chan *chan; 755173acc7cSZhang Wei struct fsl_desc_sw *first = NULL, *prev = NULL, *new; 756173acc7cSZhang Wei size_t copy; 757173acc7cSZhang Wei 758a1c03319SIra Snyder if (!dchan) 759173acc7cSZhang Wei return NULL; 760173acc7cSZhang Wei 761173acc7cSZhang Wei if (!len) 762173acc7cSZhang Wei return NULL; 763173acc7cSZhang Wei 764a1c03319SIra Snyder chan = to_fsl_chan(dchan); 765173acc7cSZhang Wei 766173acc7cSZhang Wei do { 767173acc7cSZhang Wei 768173acc7cSZhang Wei /* Allocate the link descriptor from DMA pool */ 769a1c03319SIra Snyder new = fsl_dma_alloc_descriptor(chan); 770173acc7cSZhang Wei if (!new) { 771b158471eSIra Snyder chan_err(chan, "%s\n", msg_ld_oom); 7722e077f8eSIra Snyder goto fail; 773173acc7cSZhang Wei } 774173acc7cSZhang Wei 77556822843SZhang Wei copy = min(len, (size_t)FSL_DMA_BCR_MAX_CNT); 776173acc7cSZhang Wei 777a1c03319SIra Snyder set_desc_cnt(chan, &new->hw, copy); 778a1c03319SIra Snyder set_desc_src(chan, &new->hw, dma_src); 779a1c03319SIra Snyder set_desc_dst(chan, &new->hw, dma_dst); 780173acc7cSZhang Wei 781173acc7cSZhang Wei if (!first) 782173acc7cSZhang Wei first = new; 783173acc7cSZhang Wei else 784a1c03319SIra Snyder set_desc_next(chan, &prev->hw, new->async_tx.phys); 785173acc7cSZhang Wei 786173acc7cSZhang Wei new->async_tx.cookie = 0; 787636bdeaaSDan Williams async_tx_ack(&new->async_tx); 788173acc7cSZhang Wei 789173acc7cSZhang Wei prev = new; 790173acc7cSZhang Wei len -= copy; 791173acc7cSZhang Wei dma_src += copy; 792738f5f7eSIra Snyder dma_dst += copy; 793173acc7cSZhang Wei 794173acc7cSZhang Wei /* Insert the link descriptor to the LD ring */ 795eda34234SDan Williams list_add_tail(&new->node, &first->tx_list); 796173acc7cSZhang Wei } while (len); 797173acc7cSZhang Wei 798636bdeaaSDan Williams new->async_tx.flags = flags; /* client is in control of this ack */ 799173acc7cSZhang Wei new->async_tx.cookie = -EBUSY; 800173acc7cSZhang Wei 801173acc7cSZhang Wei /* Set End-of-link to the last link descriptor of new list */ 802a1c03319SIra Snyder set_ld_eol(chan, new); 803173acc7cSZhang Wei 8042e077f8eSIra Snyder return &first->async_tx; 8052e077f8eSIra Snyder 8062e077f8eSIra Snyder fail: 8072e077f8eSIra Snyder if (!first) 8082e077f8eSIra Snyder return NULL; 8092e077f8eSIra Snyder 8109c3a50b7SIra Snyder fsldma_free_desc_list_reverse(chan, &first->tx_list); 8112e077f8eSIra Snyder return NULL; 812173acc7cSZhang Wei } 813173acc7cSZhang Wei 814c1433041SIra Snyder static struct dma_async_tx_descriptor *fsl_dma_prep_sg(struct dma_chan *dchan, 815c1433041SIra Snyder struct scatterlist *dst_sg, unsigned int dst_nents, 816c1433041SIra Snyder struct scatterlist *src_sg, unsigned int src_nents, 817c1433041SIra Snyder unsigned long flags) 818c1433041SIra Snyder { 819c1433041SIra Snyder struct fsl_desc_sw *first = NULL, *prev = NULL, *new = NULL; 820c1433041SIra Snyder struct fsldma_chan *chan = to_fsl_chan(dchan); 821c1433041SIra Snyder size_t dst_avail, src_avail; 822c1433041SIra Snyder dma_addr_t dst, src; 823c1433041SIra Snyder size_t len; 824c1433041SIra Snyder 825c1433041SIra Snyder /* basic sanity checks */ 826c1433041SIra Snyder if (dst_nents == 0 || src_nents == 0) 827c1433041SIra Snyder return NULL; 828c1433041SIra Snyder 829c1433041SIra Snyder if (dst_sg == NULL || src_sg == NULL) 830c1433041SIra Snyder return NULL; 831c1433041SIra Snyder 832c1433041SIra Snyder /* 833c1433041SIra Snyder * TODO: should we check that both scatterlists have the same 834c1433041SIra Snyder * TODO: number of bytes in total? Is that really an error? 835c1433041SIra Snyder */ 836c1433041SIra Snyder 837c1433041SIra Snyder /* get prepared for the loop */ 838c1433041SIra Snyder dst_avail = sg_dma_len(dst_sg); 839c1433041SIra Snyder src_avail = sg_dma_len(src_sg); 840c1433041SIra Snyder 841c1433041SIra Snyder /* run until we are out of scatterlist entries */ 842c1433041SIra Snyder while (true) { 843c1433041SIra Snyder 844c1433041SIra Snyder /* create the largest transaction possible */ 845c1433041SIra Snyder len = min_t(size_t, src_avail, dst_avail); 846c1433041SIra Snyder len = min_t(size_t, len, FSL_DMA_BCR_MAX_CNT); 847c1433041SIra Snyder if (len == 0) 848c1433041SIra Snyder goto fetch; 849c1433041SIra Snyder 850c1433041SIra Snyder dst = sg_dma_address(dst_sg) + sg_dma_len(dst_sg) - dst_avail; 851c1433041SIra Snyder src = sg_dma_address(src_sg) + sg_dma_len(src_sg) - src_avail; 852c1433041SIra Snyder 853c1433041SIra Snyder /* allocate and populate the descriptor */ 854c1433041SIra Snyder new = fsl_dma_alloc_descriptor(chan); 855c1433041SIra Snyder if (!new) { 856b158471eSIra Snyder chan_err(chan, "%s\n", msg_ld_oom); 857c1433041SIra Snyder goto fail; 858c1433041SIra Snyder } 859c1433041SIra Snyder 860c1433041SIra Snyder set_desc_cnt(chan, &new->hw, len); 861c1433041SIra Snyder set_desc_src(chan, &new->hw, src); 862c1433041SIra Snyder set_desc_dst(chan, &new->hw, dst); 863c1433041SIra Snyder 864c1433041SIra Snyder if (!first) 865c1433041SIra Snyder first = new; 866c1433041SIra Snyder else 867c1433041SIra Snyder set_desc_next(chan, &prev->hw, new->async_tx.phys); 868c1433041SIra Snyder 869c1433041SIra Snyder new->async_tx.cookie = 0; 870c1433041SIra Snyder async_tx_ack(&new->async_tx); 871c1433041SIra Snyder prev = new; 872c1433041SIra Snyder 873c1433041SIra Snyder /* Insert the link descriptor to the LD ring */ 874c1433041SIra Snyder list_add_tail(&new->node, &first->tx_list); 875c1433041SIra Snyder 876c1433041SIra Snyder /* update metadata */ 877c1433041SIra Snyder dst_avail -= len; 878c1433041SIra Snyder src_avail -= len; 879c1433041SIra Snyder 880c1433041SIra Snyder fetch: 881c1433041SIra Snyder /* fetch the next dst scatterlist entry */ 882c1433041SIra Snyder if (dst_avail == 0) { 883c1433041SIra Snyder 884c1433041SIra Snyder /* no more entries: we're done */ 885c1433041SIra Snyder if (dst_nents == 0) 886c1433041SIra Snyder break; 887c1433041SIra Snyder 888c1433041SIra Snyder /* fetch the next entry: if there are no more: done */ 889c1433041SIra Snyder dst_sg = sg_next(dst_sg); 890c1433041SIra Snyder if (dst_sg == NULL) 891c1433041SIra Snyder break; 892c1433041SIra Snyder 893c1433041SIra Snyder dst_nents--; 894c1433041SIra Snyder dst_avail = sg_dma_len(dst_sg); 895c1433041SIra Snyder } 896c1433041SIra Snyder 897c1433041SIra Snyder /* fetch the next src scatterlist entry */ 898c1433041SIra Snyder if (src_avail == 0) { 899c1433041SIra Snyder 900c1433041SIra Snyder /* no more entries: we're done */ 901c1433041SIra Snyder if (src_nents == 0) 902c1433041SIra Snyder break; 903c1433041SIra Snyder 904c1433041SIra Snyder /* fetch the next entry: if there are no more: done */ 905c1433041SIra Snyder src_sg = sg_next(src_sg); 906c1433041SIra Snyder if (src_sg == NULL) 907c1433041SIra Snyder break; 908c1433041SIra Snyder 909c1433041SIra Snyder src_nents--; 910c1433041SIra Snyder src_avail = sg_dma_len(src_sg); 911c1433041SIra Snyder } 912c1433041SIra Snyder } 913c1433041SIra Snyder 914c1433041SIra Snyder new->async_tx.flags = flags; /* client is in control of this ack */ 915c1433041SIra Snyder new->async_tx.cookie = -EBUSY; 916c1433041SIra Snyder 917c1433041SIra Snyder /* Set End-of-link to the last link descriptor of new list */ 918c1433041SIra Snyder set_ld_eol(chan, new); 919c1433041SIra Snyder 920c1433041SIra Snyder return &first->async_tx; 921c1433041SIra Snyder 922c1433041SIra Snyder fail: 923c1433041SIra Snyder if (!first) 924c1433041SIra Snyder return NULL; 925c1433041SIra Snyder 926c1433041SIra Snyder fsldma_free_desc_list_reverse(chan, &first->tx_list); 927c1433041SIra Snyder return NULL; 928c1433041SIra Snyder } 929c1433041SIra Snyder 930173acc7cSZhang Wei /** 931bbea0b6eSIra Snyder * fsl_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction 932bbea0b6eSIra Snyder * @chan: DMA channel 933bbea0b6eSIra Snyder * @sgl: scatterlist to transfer to/from 934bbea0b6eSIra Snyder * @sg_len: number of entries in @scatterlist 935bbea0b6eSIra Snyder * @direction: DMA direction 936bbea0b6eSIra Snyder * @flags: DMAEngine flags 937185ecb5fSAlexandre Bounine * @context: transaction context (ignored) 938bbea0b6eSIra Snyder * 939bbea0b6eSIra Snyder * Prepare a set of descriptors for a DMA_SLAVE transaction. Following the 940bbea0b6eSIra Snyder * DMA_SLAVE API, this gets the device-specific information from the 941bbea0b6eSIra Snyder * chan->private variable. 942bbea0b6eSIra Snyder */ 943bbea0b6eSIra Snyder static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg( 944a1c03319SIra Snyder struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len, 945185ecb5fSAlexandre Bounine enum dma_transfer_direction direction, unsigned long flags, 946185ecb5fSAlexandre Bounine void *context) 947bbea0b6eSIra Snyder { 948bbea0b6eSIra Snyder /* 949968f19aeSIra Snyder * This operation is not supported on the Freescale DMA controller 950bbea0b6eSIra Snyder * 951968f19aeSIra Snyder * However, we need to provide the function pointer to allow the 952968f19aeSIra Snyder * device_control() method to work. 953bbea0b6eSIra Snyder */ 954bbea0b6eSIra Snyder return NULL; 955bbea0b6eSIra Snyder } 956bbea0b6eSIra Snyder 957c3635c78SLinus Walleij static int fsl_dma_device_control(struct dma_chan *dchan, 95805827630SLinus Walleij enum dma_ctrl_cmd cmd, unsigned long arg) 959bbea0b6eSIra Snyder { 960968f19aeSIra Snyder struct dma_slave_config *config; 961a1c03319SIra Snyder struct fsldma_chan *chan; 962968f19aeSIra Snyder int size; 963c3635c78SLinus Walleij 964a1c03319SIra Snyder if (!dchan) 965c3635c78SLinus Walleij return -EINVAL; 966bbea0b6eSIra Snyder 967a1c03319SIra Snyder chan = to_fsl_chan(dchan); 968bbea0b6eSIra Snyder 969968f19aeSIra Snyder switch (cmd) { 970968f19aeSIra Snyder case DMA_TERMINATE_ALL: 9712baff570SHongbo Zhang spin_lock_bh(&chan->desc_lock); 972f04cd407SIra Snyder 973bbea0b6eSIra Snyder /* Halt the DMA engine */ 974a1c03319SIra Snyder dma_halt(chan); 975bbea0b6eSIra Snyder 976bbea0b6eSIra Snyder /* Remove and free all of the descriptors in the LD queue */ 9779c3a50b7SIra Snyder fsldma_free_desc_list(chan, &chan->ld_pending); 9789c3a50b7SIra Snyder fsldma_free_desc_list(chan, &chan->ld_running); 97943452fadSHongbo Zhang fsldma_free_desc_list(chan, &chan->ld_completed); 980f04cd407SIra Snyder chan->idle = true; 981bbea0b6eSIra Snyder 9822baff570SHongbo Zhang spin_unlock_bh(&chan->desc_lock); 983968f19aeSIra Snyder return 0; 984968f19aeSIra Snyder 985968f19aeSIra Snyder case DMA_SLAVE_CONFIG: 986968f19aeSIra Snyder config = (struct dma_slave_config *)arg; 987968f19aeSIra Snyder 988968f19aeSIra Snyder /* make sure the channel supports setting burst size */ 989968f19aeSIra Snyder if (!chan->set_request_count) 990968f19aeSIra Snyder return -ENXIO; 991968f19aeSIra Snyder 992968f19aeSIra Snyder /* we set the controller burst size depending on direction */ 993db8196dfSVinod Koul if (config->direction == DMA_MEM_TO_DEV) 994968f19aeSIra Snyder size = config->dst_addr_width * config->dst_maxburst; 995968f19aeSIra Snyder else 996968f19aeSIra Snyder size = config->src_addr_width * config->src_maxburst; 997968f19aeSIra Snyder 998968f19aeSIra Snyder chan->set_request_count(chan, size); 999968f19aeSIra Snyder return 0; 1000968f19aeSIra Snyder 1001968f19aeSIra Snyder case FSLDMA_EXTERNAL_START: 1002968f19aeSIra Snyder 1003968f19aeSIra Snyder /* make sure the channel supports external start */ 1004968f19aeSIra Snyder if (!chan->toggle_ext_start) 1005968f19aeSIra Snyder return -ENXIO; 1006968f19aeSIra Snyder 1007968f19aeSIra Snyder chan->toggle_ext_start(chan, arg); 1008968f19aeSIra Snyder return 0; 1009968f19aeSIra Snyder 1010968f19aeSIra Snyder default: 1011968f19aeSIra Snyder return -ENXIO; 1012968f19aeSIra Snyder } 1013c3635c78SLinus Walleij 1014c3635c78SLinus Walleij return 0; 1015bbea0b6eSIra Snyder } 1016bbea0b6eSIra Snyder 1017bbea0b6eSIra Snyder /** 1018173acc7cSZhang Wei * fsl_dma_memcpy_issue_pending - Issue the DMA start command 1019a1c03319SIra Snyder * @chan : Freescale DMA channel 1020173acc7cSZhang Wei */ 1021a1c03319SIra Snyder static void fsl_dma_memcpy_issue_pending(struct dma_chan *dchan) 1022173acc7cSZhang Wei { 1023a1c03319SIra Snyder struct fsldma_chan *chan = to_fsl_chan(dchan); 1024dc8d4091SIra Snyder 10252baff570SHongbo Zhang spin_lock_bh(&chan->desc_lock); 1026a1c03319SIra Snyder fsl_chan_xfer_ld_queue(chan); 10272baff570SHongbo Zhang spin_unlock_bh(&chan->desc_lock); 1028173acc7cSZhang Wei } 1029173acc7cSZhang Wei 1030173acc7cSZhang Wei /** 103107934481SLinus Walleij * fsl_tx_status - Determine the DMA status 1032a1c03319SIra Snyder * @chan : Freescale DMA channel 1033173acc7cSZhang Wei */ 103407934481SLinus Walleij static enum dma_status fsl_tx_status(struct dma_chan *dchan, 1035173acc7cSZhang Wei dma_cookie_t cookie, 103607934481SLinus Walleij struct dma_tx_state *txstate) 1037173acc7cSZhang Wei { 103843452fadSHongbo Zhang struct fsldma_chan *chan = to_fsl_chan(dchan); 103943452fadSHongbo Zhang enum dma_status ret; 104043452fadSHongbo Zhang 104143452fadSHongbo Zhang ret = dma_cookie_status(dchan, cookie, txstate); 104243452fadSHongbo Zhang if (ret == DMA_COMPLETE) 104343452fadSHongbo Zhang return ret; 104443452fadSHongbo Zhang 104543452fadSHongbo Zhang spin_lock_bh(&chan->desc_lock); 104643452fadSHongbo Zhang fsldma_cleanup_descriptors(chan); 104743452fadSHongbo Zhang spin_unlock_bh(&chan->desc_lock); 104843452fadSHongbo Zhang 10499b0b0bdcSAndy Shevchenko return dma_cookie_status(dchan, cookie, txstate); 1050173acc7cSZhang Wei } 1051173acc7cSZhang Wei 1052d3f620b2SIra Snyder /*----------------------------------------------------------------------------*/ 1053d3f620b2SIra Snyder /* Interrupt Handling */ 1054d3f620b2SIra Snyder /*----------------------------------------------------------------------------*/ 1055d3f620b2SIra Snyder 1056e7a29151SIra Snyder static irqreturn_t fsldma_chan_irq(int irq, void *data) 1057173acc7cSZhang Wei { 1058a1c03319SIra Snyder struct fsldma_chan *chan = data; 1059a1c03319SIra Snyder u32 stat; 1060173acc7cSZhang Wei 10619c3a50b7SIra Snyder /* save and clear the status register */ 1062a1c03319SIra Snyder stat = get_sr(chan); 10639c3a50b7SIra Snyder set_sr(chan, stat); 1064b158471eSIra Snyder chan_dbg(chan, "irq: stat = 0x%x\n", stat); 1065173acc7cSZhang Wei 1066f04cd407SIra Snyder /* check that this was really our device */ 1067173acc7cSZhang Wei stat &= ~(FSL_DMA_SR_CB | FSL_DMA_SR_CH); 1068173acc7cSZhang Wei if (!stat) 1069173acc7cSZhang Wei return IRQ_NONE; 1070173acc7cSZhang Wei 1071173acc7cSZhang Wei if (stat & FSL_DMA_SR_TE) 1072b158471eSIra Snyder chan_err(chan, "Transfer Error!\n"); 1073173acc7cSZhang Wei 10749c3a50b7SIra Snyder /* 10759c3a50b7SIra Snyder * Programming Error 1076f79abb62SZhang Wei * The DMA_INTERRUPT async_tx is a NULL transfer, which will 1077d73111c6SMasanari Iida * trigger a PE interrupt. 1078f79abb62SZhang Wei */ 1079f79abb62SZhang Wei if (stat & FSL_DMA_SR_PE) { 1080b158471eSIra Snyder chan_dbg(chan, "irq: Programming Error INT\n"); 1081f79abb62SZhang Wei stat &= ~FSL_DMA_SR_PE; 1082f04cd407SIra Snyder if (get_bcr(chan) != 0) 1083f04cd407SIra Snyder chan_err(chan, "Programming Error!\n"); 10841c62979eSZhang Wei } 10851c62979eSZhang Wei 10869c3a50b7SIra Snyder /* 10879c3a50b7SIra Snyder * For MPC8349, EOCDI event need to update cookie 10881c62979eSZhang Wei * and start the next transfer if it exist. 10891c62979eSZhang Wei */ 10901c62979eSZhang Wei if (stat & FSL_DMA_SR_EOCDI) { 1091b158471eSIra Snyder chan_dbg(chan, "irq: End-of-Chain link INT\n"); 10921c62979eSZhang Wei stat &= ~FSL_DMA_SR_EOCDI; 1093173acc7cSZhang Wei } 1094173acc7cSZhang Wei 10959c3a50b7SIra Snyder /* 10969c3a50b7SIra Snyder * If it current transfer is the end-of-transfer, 1097173acc7cSZhang Wei * we should clear the Channel Start bit for 1098173acc7cSZhang Wei * prepare next transfer. 1099173acc7cSZhang Wei */ 11001c62979eSZhang Wei if (stat & FSL_DMA_SR_EOLNI) { 1101b158471eSIra Snyder chan_dbg(chan, "irq: End-of-link INT\n"); 1102173acc7cSZhang Wei stat &= ~FSL_DMA_SR_EOLNI; 1103173acc7cSZhang Wei } 1104173acc7cSZhang Wei 1105f04cd407SIra Snyder /* check that the DMA controller is really idle */ 1106f04cd407SIra Snyder if (!dma_is_idle(chan)) 1107f04cd407SIra Snyder chan_err(chan, "irq: controller not idle!\n"); 1108173acc7cSZhang Wei 1109f04cd407SIra Snyder /* check that we handled all of the bits */ 1110f04cd407SIra Snyder if (stat) 1111f04cd407SIra Snyder chan_err(chan, "irq: unhandled sr 0x%08x\n", stat); 1112f04cd407SIra Snyder 1113f04cd407SIra Snyder /* 1114f04cd407SIra Snyder * Schedule the tasklet to handle all cleanup of the current 1115f04cd407SIra Snyder * transaction. It will start a new transaction if there is 1116f04cd407SIra Snyder * one pending. 1117f04cd407SIra Snyder */ 1118a1c03319SIra Snyder tasklet_schedule(&chan->tasklet); 1119f04cd407SIra Snyder chan_dbg(chan, "irq: Exit\n"); 1120173acc7cSZhang Wei return IRQ_HANDLED; 1121173acc7cSZhang Wei } 1122173acc7cSZhang Wei 1123173acc7cSZhang Wei static void dma_do_tasklet(unsigned long data) 1124173acc7cSZhang Wei { 1125a1c03319SIra Snyder struct fsldma_chan *chan = (struct fsldma_chan *)data; 1126f04cd407SIra Snyder 1127f04cd407SIra Snyder chan_dbg(chan, "tasklet entry\n"); 1128f04cd407SIra Snyder 11292baff570SHongbo Zhang spin_lock_bh(&chan->desc_lock); 1130dc8d4091SIra Snyder 1131dc8d4091SIra Snyder /* the hardware is now idle and ready for more */ 1132f04cd407SIra Snyder chan->idle = true; 1133dc8d4091SIra Snyder 113443452fadSHongbo Zhang /* Run all cleanup for descriptors which have been completed */ 113543452fadSHongbo Zhang fsldma_cleanup_descriptors(chan); 113643452fadSHongbo Zhang 11372baff570SHongbo Zhang spin_unlock_bh(&chan->desc_lock); 1138f04cd407SIra Snyder 1139f04cd407SIra Snyder chan_dbg(chan, "tasklet exit\n"); 1140173acc7cSZhang Wei } 1141173acc7cSZhang Wei 1142d3f620b2SIra Snyder static irqreturn_t fsldma_ctrl_irq(int irq, void *data) 1143d3f620b2SIra Snyder { 1144d3f620b2SIra Snyder struct fsldma_device *fdev = data; 1145d3f620b2SIra Snyder struct fsldma_chan *chan; 1146d3f620b2SIra Snyder unsigned int handled = 0; 1147d3f620b2SIra Snyder u32 gsr, mask; 1148d3f620b2SIra Snyder int i; 1149d3f620b2SIra Snyder 1150d3f620b2SIra Snyder gsr = (fdev->feature & FSL_DMA_BIG_ENDIAN) ? in_be32(fdev->regs) 1151d3f620b2SIra Snyder : in_le32(fdev->regs); 1152d3f620b2SIra Snyder mask = 0xff000000; 1153d3f620b2SIra Snyder dev_dbg(fdev->dev, "IRQ: gsr 0x%.8x\n", gsr); 1154d3f620b2SIra Snyder 1155d3f620b2SIra Snyder for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) { 1156d3f620b2SIra Snyder chan = fdev->chan[i]; 1157d3f620b2SIra Snyder if (!chan) 1158d3f620b2SIra Snyder continue; 1159d3f620b2SIra Snyder 1160d3f620b2SIra Snyder if (gsr & mask) { 1161d3f620b2SIra Snyder dev_dbg(fdev->dev, "IRQ: chan %d\n", chan->id); 1162d3f620b2SIra Snyder fsldma_chan_irq(irq, chan); 1163d3f620b2SIra Snyder handled++; 1164d3f620b2SIra Snyder } 1165d3f620b2SIra Snyder 1166d3f620b2SIra Snyder gsr &= ~mask; 1167d3f620b2SIra Snyder mask >>= 8; 1168d3f620b2SIra Snyder } 1169d3f620b2SIra Snyder 1170d3f620b2SIra Snyder return IRQ_RETVAL(handled); 1171d3f620b2SIra Snyder } 1172d3f620b2SIra Snyder 1173d3f620b2SIra Snyder static void fsldma_free_irqs(struct fsldma_device *fdev) 1174d3f620b2SIra Snyder { 1175d3f620b2SIra Snyder struct fsldma_chan *chan; 1176d3f620b2SIra Snyder int i; 1177d3f620b2SIra Snyder 1178d3f620b2SIra Snyder if (fdev->irq != NO_IRQ) { 1179d3f620b2SIra Snyder dev_dbg(fdev->dev, "free per-controller IRQ\n"); 1180d3f620b2SIra Snyder free_irq(fdev->irq, fdev); 1181d3f620b2SIra Snyder return; 1182d3f620b2SIra Snyder } 1183d3f620b2SIra Snyder 1184d3f620b2SIra Snyder for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) { 1185d3f620b2SIra Snyder chan = fdev->chan[i]; 1186d3f620b2SIra Snyder if (chan && chan->irq != NO_IRQ) { 1187b158471eSIra Snyder chan_dbg(chan, "free per-channel IRQ\n"); 1188d3f620b2SIra Snyder free_irq(chan->irq, chan); 1189d3f620b2SIra Snyder } 1190d3f620b2SIra Snyder } 1191d3f620b2SIra Snyder } 1192d3f620b2SIra Snyder 1193d3f620b2SIra Snyder static int fsldma_request_irqs(struct fsldma_device *fdev) 1194d3f620b2SIra Snyder { 1195d3f620b2SIra Snyder struct fsldma_chan *chan; 1196d3f620b2SIra Snyder int ret; 1197d3f620b2SIra Snyder int i; 1198d3f620b2SIra Snyder 1199d3f620b2SIra Snyder /* if we have a per-controller IRQ, use that */ 1200d3f620b2SIra Snyder if (fdev->irq != NO_IRQ) { 1201d3f620b2SIra Snyder dev_dbg(fdev->dev, "request per-controller IRQ\n"); 1202d3f620b2SIra Snyder ret = request_irq(fdev->irq, fsldma_ctrl_irq, IRQF_SHARED, 1203d3f620b2SIra Snyder "fsldma-controller", fdev); 1204d3f620b2SIra Snyder return ret; 1205d3f620b2SIra Snyder } 1206d3f620b2SIra Snyder 1207d3f620b2SIra Snyder /* no per-controller IRQ, use the per-channel IRQs */ 1208d3f620b2SIra Snyder for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) { 1209d3f620b2SIra Snyder chan = fdev->chan[i]; 1210d3f620b2SIra Snyder if (!chan) 1211d3f620b2SIra Snyder continue; 1212d3f620b2SIra Snyder 1213d3f620b2SIra Snyder if (chan->irq == NO_IRQ) { 1214b158471eSIra Snyder chan_err(chan, "interrupts property missing in device tree\n"); 1215d3f620b2SIra Snyder ret = -ENODEV; 1216d3f620b2SIra Snyder goto out_unwind; 1217d3f620b2SIra Snyder } 1218d3f620b2SIra Snyder 1219b158471eSIra Snyder chan_dbg(chan, "request per-channel IRQ\n"); 1220d3f620b2SIra Snyder ret = request_irq(chan->irq, fsldma_chan_irq, IRQF_SHARED, 1221d3f620b2SIra Snyder "fsldma-chan", chan); 1222d3f620b2SIra Snyder if (ret) { 1223b158471eSIra Snyder chan_err(chan, "unable to request per-channel IRQ\n"); 1224d3f620b2SIra Snyder goto out_unwind; 1225d3f620b2SIra Snyder } 1226d3f620b2SIra Snyder } 1227d3f620b2SIra Snyder 1228d3f620b2SIra Snyder return 0; 1229d3f620b2SIra Snyder 1230d3f620b2SIra Snyder out_unwind: 1231d3f620b2SIra Snyder for (/* none */; i >= 0; i--) { 1232d3f620b2SIra Snyder chan = fdev->chan[i]; 1233d3f620b2SIra Snyder if (!chan) 1234d3f620b2SIra Snyder continue; 1235d3f620b2SIra Snyder 1236d3f620b2SIra Snyder if (chan->irq == NO_IRQ) 1237d3f620b2SIra Snyder continue; 1238d3f620b2SIra Snyder 1239d3f620b2SIra Snyder free_irq(chan->irq, chan); 1240d3f620b2SIra Snyder } 1241d3f620b2SIra Snyder 1242d3f620b2SIra Snyder return ret; 1243d3f620b2SIra Snyder } 1244d3f620b2SIra Snyder 1245a4f56d4bSIra Snyder /*----------------------------------------------------------------------------*/ 1246a4f56d4bSIra Snyder /* OpenFirmware Subsystem */ 1247a4f56d4bSIra Snyder /*----------------------------------------------------------------------------*/ 1248a4f56d4bSIra Snyder 1249463a1f8bSBill Pemberton static int fsl_dma_chan_probe(struct fsldma_device *fdev, 125077cd62e8STimur Tabi struct device_node *node, u32 feature, const char *compatible) 1251173acc7cSZhang Wei { 1252a1c03319SIra Snyder struct fsldma_chan *chan; 12534ce0e953SIra Snyder struct resource res; 1254173acc7cSZhang Wei int err; 1255173acc7cSZhang Wei 1256173acc7cSZhang Wei /* alloc channel */ 1257a1c03319SIra Snyder chan = kzalloc(sizeof(*chan), GFP_KERNEL); 1258a1c03319SIra Snyder if (!chan) { 1259e7a29151SIra Snyder dev_err(fdev->dev, "no free memory for DMA channels!\n"); 1260e7a29151SIra Snyder err = -ENOMEM; 1261e7a29151SIra Snyder goto out_return; 1262173acc7cSZhang Wei } 1263173acc7cSZhang Wei 1264e7a29151SIra Snyder /* ioremap registers for use */ 1265a1c03319SIra Snyder chan->regs = of_iomap(node, 0); 1266a1c03319SIra Snyder if (!chan->regs) { 1267e7a29151SIra Snyder dev_err(fdev->dev, "unable to ioremap registers\n"); 1268e7a29151SIra Snyder err = -ENOMEM; 1269a1c03319SIra Snyder goto out_free_chan; 1270e7a29151SIra Snyder } 1271e7a29151SIra Snyder 12724ce0e953SIra Snyder err = of_address_to_resource(node, 0, &res); 1273173acc7cSZhang Wei if (err) { 1274e7a29151SIra Snyder dev_err(fdev->dev, "unable to find 'reg' property\n"); 1275e7a29151SIra Snyder goto out_iounmap_regs; 1276173acc7cSZhang Wei } 1277173acc7cSZhang Wei 1278a1c03319SIra Snyder chan->feature = feature; 1279173acc7cSZhang Wei if (!fdev->feature) 1280a1c03319SIra Snyder fdev->feature = chan->feature; 1281173acc7cSZhang Wei 1282e7a29151SIra Snyder /* 1283e7a29151SIra Snyder * If the DMA device's feature is different than the feature 1284e7a29151SIra Snyder * of its channels, report the bug 1285173acc7cSZhang Wei */ 1286a1c03319SIra Snyder WARN_ON(fdev->feature != chan->feature); 1287173acc7cSZhang Wei 1288a1c03319SIra Snyder chan->dev = fdev->dev; 12898de7a7d9SHongbo Zhang chan->id = (res.start & 0xfff) < 0x300 ? 12908de7a7d9SHongbo Zhang ((res.start - 0x100) & 0xfff) >> 7 : 12918de7a7d9SHongbo Zhang ((res.start - 0x200) & 0xfff) >> 7; 1292a1c03319SIra Snyder if (chan->id >= FSL_DMA_MAX_CHANS_PER_DEVICE) { 1293e7a29151SIra Snyder dev_err(fdev->dev, "too many channels for device\n"); 1294173acc7cSZhang Wei err = -EINVAL; 1295e7a29151SIra Snyder goto out_iounmap_regs; 1296173acc7cSZhang Wei } 1297173acc7cSZhang Wei 1298a1c03319SIra Snyder fdev->chan[chan->id] = chan; 1299a1c03319SIra Snyder tasklet_init(&chan->tasklet, dma_do_tasklet, (unsigned long)chan); 1300b158471eSIra Snyder snprintf(chan->name, sizeof(chan->name), "chan%d", chan->id); 1301e7a29151SIra Snyder 1302e7a29151SIra Snyder /* Initialize the channel */ 1303a1c03319SIra Snyder dma_init(chan); 1304173acc7cSZhang Wei 1305173acc7cSZhang Wei /* Clear cdar registers */ 1306a1c03319SIra Snyder set_cdar(chan, 0); 1307173acc7cSZhang Wei 1308a1c03319SIra Snyder switch (chan->feature & FSL_DMA_IP_MASK) { 1309173acc7cSZhang Wei case FSL_DMA_IP_85XX: 1310a1c03319SIra Snyder chan->toggle_ext_pause = fsl_chan_toggle_ext_pause; 1311173acc7cSZhang Wei case FSL_DMA_IP_83XX: 1312a1c03319SIra Snyder chan->toggle_ext_start = fsl_chan_toggle_ext_start; 1313a1c03319SIra Snyder chan->set_src_loop_size = fsl_chan_set_src_loop_size; 1314a1c03319SIra Snyder chan->set_dst_loop_size = fsl_chan_set_dst_loop_size; 1315a1c03319SIra Snyder chan->set_request_count = fsl_chan_set_request_count; 1316173acc7cSZhang Wei } 1317173acc7cSZhang Wei 1318a1c03319SIra Snyder spin_lock_init(&chan->desc_lock); 13199c3a50b7SIra Snyder INIT_LIST_HEAD(&chan->ld_pending); 13209c3a50b7SIra Snyder INIT_LIST_HEAD(&chan->ld_running); 132143452fadSHongbo Zhang INIT_LIST_HEAD(&chan->ld_completed); 1322f04cd407SIra Snyder chan->idle = true; 132314c6a333SHongbo Zhang #ifdef CONFIG_PM 132414c6a333SHongbo Zhang chan->pm_state = RUNNING; 132514c6a333SHongbo Zhang #endif 1326173acc7cSZhang Wei 1327a1c03319SIra Snyder chan->common.device = &fdev->common; 13288ac69546SRussell King - ARM Linux dma_cookie_init(&chan->common); 1329173acc7cSZhang Wei 1330d3f620b2SIra Snyder /* find the IRQ line, if it exists in the device tree */ 1331a1c03319SIra Snyder chan->irq = irq_of_parse_and_map(node, 0); 1332d3f620b2SIra Snyder 1333173acc7cSZhang Wei /* Add the channel to DMA device channel list */ 1334a1c03319SIra Snyder list_add_tail(&chan->common.device_node, &fdev->common.channels); 1335173acc7cSZhang Wei fdev->common.chancnt++; 1336173acc7cSZhang Wei 1337a1c03319SIra Snyder dev_info(fdev->dev, "#%d (%s), irq %d\n", chan->id, compatible, 1338a1c03319SIra Snyder chan->irq != NO_IRQ ? chan->irq : fdev->irq); 1339173acc7cSZhang Wei 1340173acc7cSZhang Wei return 0; 134151ee87f2SLi Yang 1342e7a29151SIra Snyder out_iounmap_regs: 1343a1c03319SIra Snyder iounmap(chan->regs); 1344a1c03319SIra Snyder out_free_chan: 1345a1c03319SIra Snyder kfree(chan); 1346e7a29151SIra Snyder out_return: 1347173acc7cSZhang Wei return err; 1348173acc7cSZhang Wei } 1349173acc7cSZhang Wei 1350a1c03319SIra Snyder static void fsl_dma_chan_remove(struct fsldma_chan *chan) 1351173acc7cSZhang Wei { 1352a1c03319SIra Snyder irq_dispose_mapping(chan->irq); 1353a1c03319SIra Snyder list_del(&chan->common.device_node); 1354a1c03319SIra Snyder iounmap(chan->regs); 1355a1c03319SIra Snyder kfree(chan); 1356173acc7cSZhang Wei } 1357173acc7cSZhang Wei 1358463a1f8bSBill Pemberton static int fsldma_of_probe(struct platform_device *op) 1359173acc7cSZhang Wei { 1360a4f56d4bSIra Snyder struct fsldma_device *fdev; 136177cd62e8STimur Tabi struct device_node *child; 1362e7a29151SIra Snyder int err; 1363173acc7cSZhang Wei 1364a4f56d4bSIra Snyder fdev = kzalloc(sizeof(*fdev), GFP_KERNEL); 1365173acc7cSZhang Wei if (!fdev) { 1366e7a29151SIra Snyder dev_err(&op->dev, "No enough memory for 'priv'\n"); 1367e7a29151SIra Snyder err = -ENOMEM; 1368e7a29151SIra Snyder goto out_return; 1369173acc7cSZhang Wei } 1370e7a29151SIra Snyder 1371e7a29151SIra Snyder fdev->dev = &op->dev; 1372173acc7cSZhang Wei INIT_LIST_HEAD(&fdev->common.channels); 1373173acc7cSZhang Wei 1374e7a29151SIra Snyder /* ioremap the registers for use */ 137561c7a080SGrant Likely fdev->regs = of_iomap(op->dev.of_node, 0); 1376e7a29151SIra Snyder if (!fdev->regs) { 1377e7a29151SIra Snyder dev_err(&op->dev, "unable to ioremap registers\n"); 1378e7a29151SIra Snyder err = -ENOMEM; 1379e7a29151SIra Snyder goto out_free_fdev; 1380173acc7cSZhang Wei } 1381173acc7cSZhang Wei 1382d3f620b2SIra Snyder /* map the channel IRQ if it exists, but don't hookup the handler yet */ 138361c7a080SGrant Likely fdev->irq = irq_of_parse_and_map(op->dev.of_node, 0); 1384d3f620b2SIra Snyder 1385173acc7cSZhang Wei dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask); 1386c1433041SIra Snyder dma_cap_set(DMA_SG, fdev->common.cap_mask); 1387bbea0b6eSIra Snyder dma_cap_set(DMA_SLAVE, fdev->common.cap_mask); 1388173acc7cSZhang Wei fdev->common.device_alloc_chan_resources = fsl_dma_alloc_chan_resources; 1389173acc7cSZhang Wei fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources; 1390173acc7cSZhang Wei fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy; 1391c1433041SIra Snyder fdev->common.device_prep_dma_sg = fsl_dma_prep_sg; 139207934481SLinus Walleij fdev->common.device_tx_status = fsl_tx_status; 1393173acc7cSZhang Wei fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending; 1394bbea0b6eSIra Snyder fdev->common.device_prep_slave_sg = fsl_dma_prep_slave_sg; 1395c3635c78SLinus Walleij fdev->common.device_control = fsl_dma_device_control; 1396e7a29151SIra Snyder fdev->common.dev = &op->dev; 1397173acc7cSZhang Wei 1398e2c8e425SLi Yang dma_set_mask(&(op->dev), DMA_BIT_MASK(36)); 1399e2c8e425SLi Yang 1400dd3daca1SJingoo Han platform_set_drvdata(op, fdev); 140177cd62e8STimur Tabi 1402e7a29151SIra Snyder /* 1403e7a29151SIra Snyder * We cannot use of_platform_bus_probe() because there is no 1404e7a29151SIra Snyder * of_platform_bus_remove(). Instead, we manually instantiate every DMA 140577cd62e8STimur Tabi * channel object. 140677cd62e8STimur Tabi */ 140761c7a080SGrant Likely for_each_child_of_node(op->dev.of_node, child) { 1408e7a29151SIra Snyder if (of_device_is_compatible(child, "fsl,eloplus-dma-channel")) { 140977cd62e8STimur Tabi fsl_dma_chan_probe(fdev, child, 141077cd62e8STimur Tabi FSL_DMA_IP_85XX | FSL_DMA_BIG_ENDIAN, 141177cd62e8STimur Tabi "fsl,eloplus-dma-channel"); 1412e7a29151SIra Snyder } 1413e7a29151SIra Snyder 1414e7a29151SIra Snyder if (of_device_is_compatible(child, "fsl,elo-dma-channel")) { 141577cd62e8STimur Tabi fsl_dma_chan_probe(fdev, child, 141677cd62e8STimur Tabi FSL_DMA_IP_83XX | FSL_DMA_LITTLE_ENDIAN, 141777cd62e8STimur Tabi "fsl,elo-dma-channel"); 141877cd62e8STimur Tabi } 1419e7a29151SIra Snyder } 1420173acc7cSZhang Wei 1421d3f620b2SIra Snyder /* 1422d3f620b2SIra Snyder * Hookup the IRQ handler(s) 1423d3f620b2SIra Snyder * 1424d3f620b2SIra Snyder * If we have a per-controller interrupt, we prefer that to the 1425d3f620b2SIra Snyder * per-channel interrupts to reduce the number of shared interrupt 1426d3f620b2SIra Snyder * handlers on the same IRQ line 1427d3f620b2SIra Snyder */ 1428d3f620b2SIra Snyder err = fsldma_request_irqs(fdev); 1429d3f620b2SIra Snyder if (err) { 1430d3f620b2SIra Snyder dev_err(fdev->dev, "unable to request IRQs\n"); 1431d3f620b2SIra Snyder goto out_free_fdev; 1432d3f620b2SIra Snyder } 1433d3f620b2SIra Snyder 1434173acc7cSZhang Wei dma_async_device_register(&fdev->common); 1435173acc7cSZhang Wei return 0; 1436173acc7cSZhang Wei 1437e7a29151SIra Snyder out_free_fdev: 1438d3f620b2SIra Snyder irq_dispose_mapping(fdev->irq); 1439173acc7cSZhang Wei kfree(fdev); 1440e7a29151SIra Snyder out_return: 1441173acc7cSZhang Wei return err; 1442173acc7cSZhang Wei } 1443173acc7cSZhang Wei 14442dc11581SGrant Likely static int fsldma_of_remove(struct platform_device *op) 144577cd62e8STimur Tabi { 1446a4f56d4bSIra Snyder struct fsldma_device *fdev; 144777cd62e8STimur Tabi unsigned int i; 144877cd62e8STimur Tabi 1449dd3daca1SJingoo Han fdev = platform_get_drvdata(op); 145077cd62e8STimur Tabi dma_async_device_unregister(&fdev->common); 145177cd62e8STimur Tabi 1452d3f620b2SIra Snyder fsldma_free_irqs(fdev); 1453d3f620b2SIra Snyder 1454e7a29151SIra Snyder for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) { 145577cd62e8STimur Tabi if (fdev->chan[i]) 145677cd62e8STimur Tabi fsl_dma_chan_remove(fdev->chan[i]); 1457e7a29151SIra Snyder } 145877cd62e8STimur Tabi 1459e7a29151SIra Snyder iounmap(fdev->regs); 146077cd62e8STimur Tabi kfree(fdev); 146177cd62e8STimur Tabi 146277cd62e8STimur Tabi return 0; 146377cd62e8STimur Tabi } 146477cd62e8STimur Tabi 146514c6a333SHongbo Zhang #ifdef CONFIG_PM 146614c6a333SHongbo Zhang static int fsldma_suspend_late(struct device *dev) 146714c6a333SHongbo Zhang { 146814c6a333SHongbo Zhang struct platform_device *pdev = to_platform_device(dev); 146914c6a333SHongbo Zhang struct fsldma_device *fdev = platform_get_drvdata(pdev); 147014c6a333SHongbo Zhang struct fsldma_chan *chan; 147114c6a333SHongbo Zhang int i; 147214c6a333SHongbo Zhang 147314c6a333SHongbo Zhang for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) { 147414c6a333SHongbo Zhang chan = fdev->chan[i]; 147514c6a333SHongbo Zhang if (!chan) 147614c6a333SHongbo Zhang continue; 147714c6a333SHongbo Zhang 147814c6a333SHongbo Zhang spin_lock_bh(&chan->desc_lock); 147914c6a333SHongbo Zhang if (unlikely(!chan->idle)) 148014c6a333SHongbo Zhang goto out; 148114c6a333SHongbo Zhang chan->regs_save.mr = get_mr(chan); 148214c6a333SHongbo Zhang chan->pm_state = SUSPENDED; 148314c6a333SHongbo Zhang spin_unlock_bh(&chan->desc_lock); 148414c6a333SHongbo Zhang } 148514c6a333SHongbo Zhang return 0; 148614c6a333SHongbo Zhang 148714c6a333SHongbo Zhang out: 148814c6a333SHongbo Zhang for (; i >= 0; i--) { 148914c6a333SHongbo Zhang chan = fdev->chan[i]; 149014c6a333SHongbo Zhang if (!chan) 149114c6a333SHongbo Zhang continue; 149214c6a333SHongbo Zhang chan->pm_state = RUNNING; 149314c6a333SHongbo Zhang spin_unlock_bh(&chan->desc_lock); 149414c6a333SHongbo Zhang } 149514c6a333SHongbo Zhang return -EBUSY; 149614c6a333SHongbo Zhang } 149714c6a333SHongbo Zhang 149814c6a333SHongbo Zhang static int fsldma_resume_early(struct device *dev) 149914c6a333SHongbo Zhang { 150014c6a333SHongbo Zhang struct platform_device *pdev = to_platform_device(dev); 150114c6a333SHongbo Zhang struct fsldma_device *fdev = platform_get_drvdata(pdev); 150214c6a333SHongbo Zhang struct fsldma_chan *chan; 150314c6a333SHongbo Zhang u32 mode; 150414c6a333SHongbo Zhang int i; 150514c6a333SHongbo Zhang 150614c6a333SHongbo Zhang for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) { 150714c6a333SHongbo Zhang chan = fdev->chan[i]; 150814c6a333SHongbo Zhang if (!chan) 150914c6a333SHongbo Zhang continue; 151014c6a333SHongbo Zhang 151114c6a333SHongbo Zhang spin_lock_bh(&chan->desc_lock); 151214c6a333SHongbo Zhang mode = chan->regs_save.mr 151314c6a333SHongbo Zhang & ~FSL_DMA_MR_CS & ~FSL_DMA_MR_CC & ~FSL_DMA_MR_CA; 151414c6a333SHongbo Zhang set_mr(chan, mode); 151514c6a333SHongbo Zhang chan->pm_state = RUNNING; 151614c6a333SHongbo Zhang spin_unlock_bh(&chan->desc_lock); 151714c6a333SHongbo Zhang } 151814c6a333SHongbo Zhang 151914c6a333SHongbo Zhang return 0; 152014c6a333SHongbo Zhang } 152114c6a333SHongbo Zhang 152214c6a333SHongbo Zhang static const struct dev_pm_ops fsldma_pm_ops = { 152314c6a333SHongbo Zhang .suspend_late = fsldma_suspend_late, 152414c6a333SHongbo Zhang .resume_early = fsldma_resume_early, 152514c6a333SHongbo Zhang }; 152614c6a333SHongbo Zhang #endif 152714c6a333SHongbo Zhang 15284b1cf1faSMárton Németh static const struct of_device_id fsldma_of_ids[] = { 15298de7a7d9SHongbo Zhang { .compatible = "fsl,elo3-dma", }, 1530049c9d45SKumar Gala { .compatible = "fsl,eloplus-dma", }, 1531049c9d45SKumar Gala { .compatible = "fsl,elo-dma", }, 1532173acc7cSZhang Wei {} 1533173acc7cSZhang Wei }; 1534173acc7cSZhang Wei 15358faa7cf8SIra W. Snyder static struct platform_driver fsldma_of_driver = { 15364018294bSGrant Likely .driver = { 153777cd62e8STimur Tabi .name = "fsl-elo-dma", 15384018294bSGrant Likely .owner = THIS_MODULE, 15394018294bSGrant Likely .of_match_table = fsldma_of_ids, 154014c6a333SHongbo Zhang #ifdef CONFIG_PM 154114c6a333SHongbo Zhang .pm = &fsldma_pm_ops, 154214c6a333SHongbo Zhang #endif 15434018294bSGrant Likely }, 1544a4f56d4bSIra Snyder .probe = fsldma_of_probe, 1545a4f56d4bSIra Snyder .remove = fsldma_of_remove, 1546173acc7cSZhang Wei }; 1547173acc7cSZhang Wei 1548a4f56d4bSIra Snyder /*----------------------------------------------------------------------------*/ 1549a4f56d4bSIra Snyder /* Module Init / Exit */ 1550a4f56d4bSIra Snyder /*----------------------------------------------------------------------------*/ 1551a4f56d4bSIra Snyder 1552a4f56d4bSIra Snyder static __init int fsldma_init(void) 1553173acc7cSZhang Wei { 15548de7a7d9SHongbo Zhang pr_info("Freescale Elo series DMA driver\n"); 155500006124SGrant Likely return platform_driver_register(&fsldma_of_driver); 1556173acc7cSZhang Wei } 1557173acc7cSZhang Wei 1558a4f56d4bSIra Snyder static void __exit fsldma_exit(void) 155977cd62e8STimur Tabi { 156000006124SGrant Likely platform_driver_unregister(&fsldma_of_driver); 156177cd62e8STimur Tabi } 156277cd62e8STimur Tabi 1563a4f56d4bSIra Snyder subsys_initcall(fsldma_init); 1564a4f56d4bSIra Snyder module_exit(fsldma_exit); 156577cd62e8STimur Tabi 15668de7a7d9SHongbo Zhang MODULE_DESCRIPTION("Freescale Elo series DMA driver"); 156777cd62e8STimur Tabi MODULE_LICENSE("GPL"); 1568