1*31650d64SMarek Vasut /* 2*31650d64SMarek Vasut * Freescale i.MX28 APBH DMA driver 3*31650d64SMarek Vasut * 4*31650d64SMarek Vasut * Copyright (C) 2011 Marek Vasut <marek.vasut@gmail.com> 5*31650d64SMarek Vasut * on behalf of DENX Software Engineering GmbH 6*31650d64SMarek Vasut * 7*31650d64SMarek Vasut * Based on code from LTIB: 8*31650d64SMarek Vasut * Copyright (C) 2010 Freescale Semiconductor, Inc. All Rights Reserved. 9*31650d64SMarek Vasut * 10*31650d64SMarek Vasut * This program is free software; you can redistribute it and/or modify 11*31650d64SMarek Vasut * it under the terms of the GNU General Public License as published by 12*31650d64SMarek Vasut * the Free Software Foundation; either version 2 of the License, or 13*31650d64SMarek Vasut * (at your option) any later version. 14*31650d64SMarek Vasut * 15*31650d64SMarek Vasut * This program is distributed in the hope that it will be useful, 16*31650d64SMarek Vasut * but WITHOUT ANY WARRANTY; without even the implied warranty of 17*31650d64SMarek Vasut * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18*31650d64SMarek Vasut * GNU General Public License for more details. 19*31650d64SMarek Vasut * 20*31650d64SMarek Vasut * You should have received a copy of the GNU General Public License along 21*31650d64SMarek Vasut * with this program; if not, write to the Free Software Foundation, Inc., 22*31650d64SMarek Vasut * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. 23*31650d64SMarek Vasut */ 24*31650d64SMarek Vasut 25*31650d64SMarek Vasut #include <linux/list.h> 26*31650d64SMarek Vasut 27*31650d64SMarek Vasut #include <common.h> 28*31650d64SMarek Vasut #include <malloc.h> 29*31650d64SMarek Vasut #include <asm/errno.h> 30*31650d64SMarek Vasut #include <asm/io.h> 31*31650d64SMarek Vasut #include <asm/arch/clock.h> 32*31650d64SMarek Vasut #include <asm/arch/imx-regs.h> 33*31650d64SMarek Vasut #include <asm/arch/sys_proto.h> 34*31650d64SMarek Vasut #include <asm/arch/dma.h> 35*31650d64SMarek Vasut 36*31650d64SMarek Vasut static struct mxs_dma_chan mxs_dma_channels[MXS_MAX_DMA_CHANNELS]; 37*31650d64SMarek Vasut 38*31650d64SMarek Vasut /* 39*31650d64SMarek Vasut * Test is the DMA channel is valid channel 40*31650d64SMarek Vasut */ 41*31650d64SMarek Vasut int mxs_dma_validate_chan(int channel) 42*31650d64SMarek Vasut { 43*31650d64SMarek Vasut struct mxs_dma_chan *pchan; 44*31650d64SMarek Vasut 45*31650d64SMarek Vasut if ((channel < 0) || (channel >= MXS_MAX_DMA_CHANNELS)) 46*31650d64SMarek Vasut return -EINVAL; 47*31650d64SMarek Vasut 48*31650d64SMarek Vasut pchan = mxs_dma_channels + channel; 49*31650d64SMarek Vasut if (!(pchan->flags & MXS_DMA_FLAGS_ALLOCATED)) 50*31650d64SMarek Vasut return -EINVAL; 51*31650d64SMarek Vasut 52*31650d64SMarek Vasut return 0; 53*31650d64SMarek Vasut } 54*31650d64SMarek Vasut 55*31650d64SMarek Vasut /* 56*31650d64SMarek Vasut * Enable a DMA channel. 57*31650d64SMarek Vasut * 58*31650d64SMarek Vasut * If the given channel has any DMA descriptors on its active list, this 59*31650d64SMarek Vasut * function causes the DMA hardware to begin processing them. 60*31650d64SMarek Vasut * 61*31650d64SMarek Vasut * This function marks the DMA channel as "busy," whether or not there are any 62*31650d64SMarek Vasut * descriptors to process. 63*31650d64SMarek Vasut */ 64*31650d64SMarek Vasut int mxs_dma_enable(int channel) 65*31650d64SMarek Vasut { 66*31650d64SMarek Vasut struct mx28_apbh_regs *apbh_regs = 67*31650d64SMarek Vasut (struct mx28_apbh_regs *)MXS_APBH_BASE; 68*31650d64SMarek Vasut unsigned int sem; 69*31650d64SMarek Vasut struct mxs_dma_chan *pchan; 70*31650d64SMarek Vasut struct mxs_dma_desc *pdesc; 71*31650d64SMarek Vasut int ret; 72*31650d64SMarek Vasut 73*31650d64SMarek Vasut ret = mxs_dma_validate_chan(channel); 74*31650d64SMarek Vasut if (ret) 75*31650d64SMarek Vasut return ret; 76*31650d64SMarek Vasut 77*31650d64SMarek Vasut pchan = mxs_dma_channels + channel; 78*31650d64SMarek Vasut 79*31650d64SMarek Vasut if (pchan->pending_num == 0) { 80*31650d64SMarek Vasut pchan->flags |= MXS_DMA_FLAGS_BUSY; 81*31650d64SMarek Vasut return 0; 82*31650d64SMarek Vasut } 83*31650d64SMarek Vasut 84*31650d64SMarek Vasut pdesc = list_first_entry(&pchan->active, struct mxs_dma_desc, node); 85*31650d64SMarek Vasut if (pdesc == NULL) 86*31650d64SMarek Vasut return -EFAULT; 87*31650d64SMarek Vasut 88*31650d64SMarek Vasut if (pchan->flags & MXS_DMA_FLAGS_BUSY) { 89*31650d64SMarek Vasut if (!(pdesc->cmd.data & MXS_DMA_DESC_CHAIN)) 90*31650d64SMarek Vasut return 0; 91*31650d64SMarek Vasut 92*31650d64SMarek Vasut sem = mxs_dma_read_semaphore(channel); 93*31650d64SMarek Vasut if (sem == 0) 94*31650d64SMarek Vasut return 0; 95*31650d64SMarek Vasut 96*31650d64SMarek Vasut if (sem == 1) { 97*31650d64SMarek Vasut pdesc = list_entry(pdesc->node.next, 98*31650d64SMarek Vasut struct mxs_dma_desc, node); 99*31650d64SMarek Vasut writel(mxs_dma_cmd_address(pdesc), 100*31650d64SMarek Vasut &apbh_regs->ch[channel].hw_apbh_ch_nxtcmdar); 101*31650d64SMarek Vasut } 102*31650d64SMarek Vasut writel(pchan->pending_num, 103*31650d64SMarek Vasut &apbh_regs->ch[channel].hw_apbh_ch_sema); 104*31650d64SMarek Vasut pchan->active_num += pchan->pending_num; 105*31650d64SMarek Vasut pchan->pending_num = 0; 106*31650d64SMarek Vasut } else { 107*31650d64SMarek Vasut pchan->active_num += pchan->pending_num; 108*31650d64SMarek Vasut pchan->pending_num = 0; 109*31650d64SMarek Vasut writel(mxs_dma_cmd_address(pdesc), 110*31650d64SMarek Vasut &apbh_regs->ch[channel].hw_apbh_ch_nxtcmdar); 111*31650d64SMarek Vasut writel(pchan->active_num, 112*31650d64SMarek Vasut &apbh_regs->ch[channel].hw_apbh_ch_sema); 113*31650d64SMarek Vasut writel(1 << (channel + APBH_CTRL0_CLKGATE_CHANNEL_OFFSET), 114*31650d64SMarek Vasut &apbh_regs->hw_apbh_ctrl0_clr); 115*31650d64SMarek Vasut } 116*31650d64SMarek Vasut 117*31650d64SMarek Vasut pchan->flags |= MXS_DMA_FLAGS_BUSY; 118*31650d64SMarek Vasut return 0; 119*31650d64SMarek Vasut } 120*31650d64SMarek Vasut 121*31650d64SMarek Vasut /* 122*31650d64SMarek Vasut * Disable a DMA channel. 123*31650d64SMarek Vasut * 124*31650d64SMarek Vasut * This function shuts down a DMA channel and marks it as "not busy." Any 125*31650d64SMarek Vasut * descriptors on the active list are immediately moved to the head of the 126*31650d64SMarek Vasut * "done" list, whether or not they have actually been processed by the 127*31650d64SMarek Vasut * hardware. The "ready" flags of these descriptors are NOT cleared, so they 128*31650d64SMarek Vasut * still appear to be active. 129*31650d64SMarek Vasut * 130*31650d64SMarek Vasut * This function immediately shuts down a DMA channel's hardware, aborting any 131*31650d64SMarek Vasut * I/O that may be in progress, potentially leaving I/O hardware in an undefined 132*31650d64SMarek Vasut * state. It is unwise to call this function if there is ANY chance the hardware 133*31650d64SMarek Vasut * is still processing a command. 134*31650d64SMarek Vasut */ 135*31650d64SMarek Vasut int mxs_dma_disable(int channel) 136*31650d64SMarek Vasut { 137*31650d64SMarek Vasut struct mxs_dma_chan *pchan; 138*31650d64SMarek Vasut struct mx28_apbh_regs *apbh_regs = 139*31650d64SMarek Vasut (struct mx28_apbh_regs *)MXS_APBH_BASE; 140*31650d64SMarek Vasut int ret; 141*31650d64SMarek Vasut 142*31650d64SMarek Vasut ret = mxs_dma_validate_chan(channel); 143*31650d64SMarek Vasut if (ret) 144*31650d64SMarek Vasut return ret; 145*31650d64SMarek Vasut 146*31650d64SMarek Vasut pchan = mxs_dma_channels + channel; 147*31650d64SMarek Vasut 148*31650d64SMarek Vasut if (!(pchan->flags & MXS_DMA_FLAGS_BUSY)) 149*31650d64SMarek Vasut return -EINVAL; 150*31650d64SMarek Vasut 151*31650d64SMarek Vasut writel(1 << (channel + APBH_CTRL0_CLKGATE_CHANNEL_OFFSET), 152*31650d64SMarek Vasut &apbh_regs->hw_apbh_ctrl0_set); 153*31650d64SMarek Vasut 154*31650d64SMarek Vasut pchan->flags &= ~MXS_DMA_FLAGS_BUSY; 155*31650d64SMarek Vasut pchan->active_num = 0; 156*31650d64SMarek Vasut pchan->pending_num = 0; 157*31650d64SMarek Vasut list_splice_init(&pchan->active, &pchan->done); 158*31650d64SMarek Vasut 159*31650d64SMarek Vasut return 0; 160*31650d64SMarek Vasut } 161*31650d64SMarek Vasut 162*31650d64SMarek Vasut /* 163*31650d64SMarek Vasut * Resets the DMA channel hardware. 164*31650d64SMarek Vasut */ 165*31650d64SMarek Vasut int mxs_dma_reset(int channel) 166*31650d64SMarek Vasut { 167*31650d64SMarek Vasut struct mx28_apbh_regs *apbh_regs = 168*31650d64SMarek Vasut (struct mx28_apbh_regs *)MXS_APBH_BASE; 169*31650d64SMarek Vasut int ret; 170*31650d64SMarek Vasut 171*31650d64SMarek Vasut ret = mxs_dma_validate_chan(channel); 172*31650d64SMarek Vasut if (ret) 173*31650d64SMarek Vasut return ret; 174*31650d64SMarek Vasut 175*31650d64SMarek Vasut writel(1 << (channel + APBH_CHANNEL_CTRL_RESET_CHANNEL_OFFSET), 176*31650d64SMarek Vasut &apbh_regs->hw_apbh_channel_ctrl_set); 177*31650d64SMarek Vasut 178*31650d64SMarek Vasut return 0; 179*31650d64SMarek Vasut } 180*31650d64SMarek Vasut 181*31650d64SMarek Vasut /* 182*31650d64SMarek Vasut * Freeze a DMA channel. 183*31650d64SMarek Vasut * 184*31650d64SMarek Vasut * This function causes the channel to continuously fail arbitration for bus 185*31650d64SMarek Vasut * access, which halts all forward progress without losing any state. A call to 186*31650d64SMarek Vasut * mxs_dma_unfreeze() will cause the channel to continue its current operation 187*31650d64SMarek Vasut * with no ill effect. 188*31650d64SMarek Vasut */ 189*31650d64SMarek Vasut int mxs_dma_freeze(int channel) 190*31650d64SMarek Vasut { 191*31650d64SMarek Vasut struct mx28_apbh_regs *apbh_regs = 192*31650d64SMarek Vasut (struct mx28_apbh_regs *)MXS_APBH_BASE; 193*31650d64SMarek Vasut int ret; 194*31650d64SMarek Vasut 195*31650d64SMarek Vasut ret = mxs_dma_validate_chan(channel); 196*31650d64SMarek Vasut if (ret) 197*31650d64SMarek Vasut return ret; 198*31650d64SMarek Vasut 199*31650d64SMarek Vasut writel(1 << (channel + APBH_CHANNEL_CTRL_FREEZE_CHANNEL_OFFSET), 200*31650d64SMarek Vasut &apbh_regs->hw_apbh_channel_ctrl_set); 201*31650d64SMarek Vasut 202*31650d64SMarek Vasut return 0; 203*31650d64SMarek Vasut } 204*31650d64SMarek Vasut 205*31650d64SMarek Vasut /* 206*31650d64SMarek Vasut * Unfreeze a DMA channel. 207*31650d64SMarek Vasut * 208*31650d64SMarek Vasut * This function reverses the effect of mxs_dma_freeze(), enabling the DMA 209*31650d64SMarek Vasut * channel to continue from where it was frozen. 210*31650d64SMarek Vasut */ 211*31650d64SMarek Vasut int mxs_dma_unfreeze(int channel) 212*31650d64SMarek Vasut { 213*31650d64SMarek Vasut struct mx28_apbh_regs *apbh_regs = 214*31650d64SMarek Vasut (struct mx28_apbh_regs *)MXS_APBH_BASE; 215*31650d64SMarek Vasut int ret; 216*31650d64SMarek Vasut 217*31650d64SMarek Vasut ret = mxs_dma_validate_chan(channel); 218*31650d64SMarek Vasut if (ret) 219*31650d64SMarek Vasut return ret; 220*31650d64SMarek Vasut 221*31650d64SMarek Vasut writel(1 << (channel + APBH_CHANNEL_CTRL_FREEZE_CHANNEL_OFFSET), 222*31650d64SMarek Vasut &apbh_regs->hw_apbh_channel_ctrl_clr); 223*31650d64SMarek Vasut 224*31650d64SMarek Vasut return 0; 225*31650d64SMarek Vasut } 226*31650d64SMarek Vasut 227*31650d64SMarek Vasut /* 228*31650d64SMarek Vasut * Read a DMA channel's hardware semaphore. 229*31650d64SMarek Vasut * 230*31650d64SMarek Vasut * As used by the MXS platform's DMA software, the DMA channel's hardware 231*31650d64SMarek Vasut * semaphore reflects the number of DMA commands the hardware will process, but 232*31650d64SMarek Vasut * has not yet finished. This is a volatile value read directly from hardware, 233*31650d64SMarek Vasut * so it must be be viewed as immediately stale. 234*31650d64SMarek Vasut * 235*31650d64SMarek Vasut * If the channel is not marked busy, or has finished processing all its 236*31650d64SMarek Vasut * commands, this value should be zero. 237*31650d64SMarek Vasut * 238*31650d64SMarek Vasut * See mxs_dma_append() for details on how DMA command blocks must be configured 239*31650d64SMarek Vasut * to maintain the expected behavior of the semaphore's value. 240*31650d64SMarek Vasut */ 241*31650d64SMarek Vasut int mxs_dma_read_semaphore(int channel) 242*31650d64SMarek Vasut { 243*31650d64SMarek Vasut struct mx28_apbh_regs *apbh_regs = 244*31650d64SMarek Vasut (struct mx28_apbh_regs *)MXS_APBH_BASE; 245*31650d64SMarek Vasut uint32_t tmp; 246*31650d64SMarek Vasut int ret; 247*31650d64SMarek Vasut 248*31650d64SMarek Vasut ret = mxs_dma_validate_chan(channel); 249*31650d64SMarek Vasut if (ret) 250*31650d64SMarek Vasut return ret; 251*31650d64SMarek Vasut 252*31650d64SMarek Vasut tmp = readl(&apbh_regs->ch[channel].hw_apbh_ch_sema); 253*31650d64SMarek Vasut 254*31650d64SMarek Vasut tmp &= APBH_CHn_SEMA_PHORE_MASK; 255*31650d64SMarek Vasut tmp >>= APBH_CHn_SEMA_PHORE_OFFSET; 256*31650d64SMarek Vasut 257*31650d64SMarek Vasut return tmp; 258*31650d64SMarek Vasut } 259*31650d64SMarek Vasut 260*31650d64SMarek Vasut /* 261*31650d64SMarek Vasut * Enable or disable DMA interrupt. 262*31650d64SMarek Vasut * 263*31650d64SMarek Vasut * This function enables the given DMA channel to interrupt the CPU. 264*31650d64SMarek Vasut */ 265*31650d64SMarek Vasut int mxs_dma_enable_irq(int channel, int enable) 266*31650d64SMarek Vasut { 267*31650d64SMarek Vasut struct mx28_apbh_regs *apbh_regs = 268*31650d64SMarek Vasut (struct mx28_apbh_regs *)MXS_APBH_BASE; 269*31650d64SMarek Vasut int ret; 270*31650d64SMarek Vasut 271*31650d64SMarek Vasut ret = mxs_dma_validate_chan(channel); 272*31650d64SMarek Vasut if (ret) 273*31650d64SMarek Vasut return ret; 274*31650d64SMarek Vasut 275*31650d64SMarek Vasut if (enable) 276*31650d64SMarek Vasut writel(1 << (channel + APBH_CTRL1_CH_CMDCMPLT_IRQ_EN_OFFSET), 277*31650d64SMarek Vasut &apbh_regs->hw_apbh_ctrl1_set); 278*31650d64SMarek Vasut else 279*31650d64SMarek Vasut writel(1 << (channel + APBH_CTRL1_CH_CMDCMPLT_IRQ_EN_OFFSET), 280*31650d64SMarek Vasut &apbh_regs->hw_apbh_ctrl1_clr); 281*31650d64SMarek Vasut 282*31650d64SMarek Vasut return 0; 283*31650d64SMarek Vasut } 284*31650d64SMarek Vasut 285*31650d64SMarek Vasut /* 286*31650d64SMarek Vasut * Check if a DMA interrupt is pending. 287*31650d64SMarek Vasut */ 288*31650d64SMarek Vasut int mxs_dma_irq_is_pending(int channel) 289*31650d64SMarek Vasut { 290*31650d64SMarek Vasut struct mx28_apbh_regs *apbh_regs = 291*31650d64SMarek Vasut (struct mx28_apbh_regs *)MXS_APBH_BASE; 292*31650d64SMarek Vasut uint32_t tmp; 293*31650d64SMarek Vasut int ret; 294*31650d64SMarek Vasut 295*31650d64SMarek Vasut ret = mxs_dma_validate_chan(channel); 296*31650d64SMarek Vasut if (ret) 297*31650d64SMarek Vasut return ret; 298*31650d64SMarek Vasut 299*31650d64SMarek Vasut tmp = readl(&apbh_regs->hw_apbh_ctrl1); 300*31650d64SMarek Vasut tmp |= readl(&apbh_regs->hw_apbh_ctrl2); 301*31650d64SMarek Vasut 302*31650d64SMarek Vasut return (tmp >> channel) & 1; 303*31650d64SMarek Vasut } 304*31650d64SMarek Vasut 305*31650d64SMarek Vasut /* 306*31650d64SMarek Vasut * Clear DMA interrupt. 307*31650d64SMarek Vasut * 308*31650d64SMarek Vasut * The software that is using the DMA channel must register to receive its 309*31650d64SMarek Vasut * interrupts and, when they arrive, must call this function to clear them. 310*31650d64SMarek Vasut */ 311*31650d64SMarek Vasut int mxs_dma_ack_irq(int channel) 312*31650d64SMarek Vasut { 313*31650d64SMarek Vasut struct mx28_apbh_regs *apbh_regs = 314*31650d64SMarek Vasut (struct mx28_apbh_regs *)MXS_APBH_BASE; 315*31650d64SMarek Vasut int ret; 316*31650d64SMarek Vasut 317*31650d64SMarek Vasut ret = mxs_dma_validate_chan(channel); 318*31650d64SMarek Vasut if (ret) 319*31650d64SMarek Vasut return ret; 320*31650d64SMarek Vasut 321*31650d64SMarek Vasut writel(1 << channel, &apbh_regs->hw_apbh_ctrl1_clr); 322*31650d64SMarek Vasut writel(1 << channel, &apbh_regs->hw_apbh_ctrl2_clr); 323*31650d64SMarek Vasut 324*31650d64SMarek Vasut return 0; 325*31650d64SMarek Vasut } 326*31650d64SMarek Vasut 327*31650d64SMarek Vasut /* 328*31650d64SMarek Vasut * Request to reserve a DMA channel 329*31650d64SMarek Vasut */ 330*31650d64SMarek Vasut int mxs_dma_request(int channel) 331*31650d64SMarek Vasut { 332*31650d64SMarek Vasut struct mxs_dma_chan *pchan; 333*31650d64SMarek Vasut 334*31650d64SMarek Vasut if ((channel < 0) || (channel >= MXS_MAX_DMA_CHANNELS)) 335*31650d64SMarek Vasut return -EINVAL; 336*31650d64SMarek Vasut 337*31650d64SMarek Vasut pchan = mxs_dma_channels + channel; 338*31650d64SMarek Vasut if ((pchan->flags & MXS_DMA_FLAGS_VALID) != MXS_DMA_FLAGS_VALID) 339*31650d64SMarek Vasut return -ENODEV; 340*31650d64SMarek Vasut 341*31650d64SMarek Vasut if (pchan->flags & MXS_DMA_FLAGS_ALLOCATED) 342*31650d64SMarek Vasut return -EBUSY; 343*31650d64SMarek Vasut 344*31650d64SMarek Vasut pchan->flags |= MXS_DMA_FLAGS_ALLOCATED; 345*31650d64SMarek Vasut pchan->active_num = 0; 346*31650d64SMarek Vasut pchan->pending_num = 0; 347*31650d64SMarek Vasut 348*31650d64SMarek Vasut INIT_LIST_HEAD(&pchan->active); 349*31650d64SMarek Vasut INIT_LIST_HEAD(&pchan->done); 350*31650d64SMarek Vasut 351*31650d64SMarek Vasut return 0; 352*31650d64SMarek Vasut } 353*31650d64SMarek Vasut 354*31650d64SMarek Vasut /* 355*31650d64SMarek Vasut * Release a DMA channel. 356*31650d64SMarek Vasut * 357*31650d64SMarek Vasut * This function releases a DMA channel from its current owner. 358*31650d64SMarek Vasut * 359*31650d64SMarek Vasut * The channel will NOT be released if it's marked "busy" (see 360*31650d64SMarek Vasut * mxs_dma_enable()). 361*31650d64SMarek Vasut */ 362*31650d64SMarek Vasut int mxs_dma_release(int channel) 363*31650d64SMarek Vasut { 364*31650d64SMarek Vasut struct mxs_dma_chan *pchan; 365*31650d64SMarek Vasut int ret; 366*31650d64SMarek Vasut 367*31650d64SMarek Vasut ret = mxs_dma_validate_chan(channel); 368*31650d64SMarek Vasut if (ret) 369*31650d64SMarek Vasut return ret; 370*31650d64SMarek Vasut 371*31650d64SMarek Vasut pchan = mxs_dma_channels + channel; 372*31650d64SMarek Vasut 373*31650d64SMarek Vasut if (pchan->flags & MXS_DMA_FLAGS_BUSY) 374*31650d64SMarek Vasut return -EBUSY; 375*31650d64SMarek Vasut 376*31650d64SMarek Vasut pchan->dev = 0; 377*31650d64SMarek Vasut pchan->active_num = 0; 378*31650d64SMarek Vasut pchan->pending_num = 0; 379*31650d64SMarek Vasut pchan->flags &= ~MXS_DMA_FLAGS_ALLOCATED; 380*31650d64SMarek Vasut 381*31650d64SMarek Vasut return 0; 382*31650d64SMarek Vasut } 383*31650d64SMarek Vasut 384*31650d64SMarek Vasut /* 385*31650d64SMarek Vasut * Allocate DMA descriptor 386*31650d64SMarek Vasut */ 387*31650d64SMarek Vasut struct mxs_dma_desc *mxs_dma_desc_alloc(void) 388*31650d64SMarek Vasut { 389*31650d64SMarek Vasut struct mxs_dma_desc *pdesc; 390*31650d64SMarek Vasut 391*31650d64SMarek Vasut pdesc = memalign(MXS_DMA_ALIGNMENT, sizeof(struct mxs_dma_desc)); 392*31650d64SMarek Vasut 393*31650d64SMarek Vasut if (pdesc == NULL) 394*31650d64SMarek Vasut return NULL; 395*31650d64SMarek Vasut 396*31650d64SMarek Vasut memset(pdesc, 0, sizeof(*pdesc)); 397*31650d64SMarek Vasut pdesc->address = (dma_addr_t)pdesc; 398*31650d64SMarek Vasut 399*31650d64SMarek Vasut return pdesc; 400*31650d64SMarek Vasut }; 401*31650d64SMarek Vasut 402*31650d64SMarek Vasut /* 403*31650d64SMarek Vasut * Free DMA descriptor 404*31650d64SMarek Vasut */ 405*31650d64SMarek Vasut void mxs_dma_desc_free(struct mxs_dma_desc *pdesc) 406*31650d64SMarek Vasut { 407*31650d64SMarek Vasut if (pdesc == NULL) 408*31650d64SMarek Vasut return; 409*31650d64SMarek Vasut 410*31650d64SMarek Vasut free(pdesc); 411*31650d64SMarek Vasut } 412*31650d64SMarek Vasut 413*31650d64SMarek Vasut /* 414*31650d64SMarek Vasut * Return the address of the command within a descriptor. 415*31650d64SMarek Vasut */ 416*31650d64SMarek Vasut unsigned int mxs_dma_cmd_address(struct mxs_dma_desc *desc) 417*31650d64SMarek Vasut { 418*31650d64SMarek Vasut return desc->address + offsetof(struct mxs_dma_desc, cmd); 419*31650d64SMarek Vasut } 420*31650d64SMarek Vasut 421*31650d64SMarek Vasut /* 422*31650d64SMarek Vasut * Check if descriptor is on a channel's active list. 423*31650d64SMarek Vasut * 424*31650d64SMarek Vasut * This function returns the state of a descriptor's "ready" flag. This flag is 425*31650d64SMarek Vasut * usually set only if the descriptor appears on a channel's active list. The 426*31650d64SMarek Vasut * descriptor may or may not have already been processed by the hardware. 427*31650d64SMarek Vasut * 428*31650d64SMarek Vasut * The "ready" flag is set when the descriptor is submitted to a channel by a 429*31650d64SMarek Vasut * call to mxs_dma_append() or mxs_dma_append_list(). The "ready" flag is 430*31650d64SMarek Vasut * cleared when a processed descriptor is moved off the active list by a call 431*31650d64SMarek Vasut * to mxs_dma_finish(). The "ready" flag is NOT cleared if the descriptor is 432*31650d64SMarek Vasut * aborted by a call to mxs_dma_disable(). 433*31650d64SMarek Vasut */ 434*31650d64SMarek Vasut int mxs_dma_desc_pending(struct mxs_dma_desc *pdesc) 435*31650d64SMarek Vasut { 436*31650d64SMarek Vasut return pdesc->flags & MXS_DMA_DESC_READY; 437*31650d64SMarek Vasut } 438*31650d64SMarek Vasut 439*31650d64SMarek Vasut /* 440*31650d64SMarek Vasut * Add a DMA descriptor to a channel. 441*31650d64SMarek Vasut * 442*31650d64SMarek Vasut * If the descriptor list for this channel is not empty, this function sets the 443*31650d64SMarek Vasut * CHAIN bit and the NEXTCMD_ADDR fields in the last descriptor's DMA command so 444*31650d64SMarek Vasut * it will chain to the new descriptor's command. 445*31650d64SMarek Vasut * 446*31650d64SMarek Vasut * Then, this function marks the new descriptor as "ready," adds it to the end 447*31650d64SMarek Vasut * of the active descriptor list, and increments the count of pending 448*31650d64SMarek Vasut * descriptors. 449*31650d64SMarek Vasut * 450*31650d64SMarek Vasut * The MXS platform DMA software imposes some rules on DMA commands to maintain 451*31650d64SMarek Vasut * important invariants. These rules are NOT checked, but they must be carefully 452*31650d64SMarek Vasut * applied by software that uses MXS DMA channels. 453*31650d64SMarek Vasut * 454*31650d64SMarek Vasut * Invariant: 455*31650d64SMarek Vasut * The DMA channel's hardware semaphore must reflect the number of DMA 456*31650d64SMarek Vasut * commands the hardware will process, but has not yet finished. 457*31650d64SMarek Vasut * 458*31650d64SMarek Vasut * Explanation: 459*31650d64SMarek Vasut * A DMA channel begins processing commands when its hardware semaphore is 460*31650d64SMarek Vasut * written with a value greater than zero, and it stops processing commands 461*31650d64SMarek Vasut * when the semaphore returns to zero. 462*31650d64SMarek Vasut * 463*31650d64SMarek Vasut * When a channel finishes a DMA command, it will decrement its semaphore if 464*31650d64SMarek Vasut * the DECREMENT_SEMAPHORE bit is set in that command's flags bits. 465*31650d64SMarek Vasut * 466*31650d64SMarek Vasut * In principle, it's not necessary for the DECREMENT_SEMAPHORE to be set, 467*31650d64SMarek Vasut * unless it suits the purposes of the software. For example, one could 468*31650d64SMarek Vasut * construct a series of five DMA commands, with the DECREMENT_SEMAPHORE 469*31650d64SMarek Vasut * bit set only in the last one. Then, setting the DMA channel's hardware 470*31650d64SMarek Vasut * semaphore to one would cause the entire series of five commands to be 471*31650d64SMarek Vasut * processed. However, this example would violate the invariant given above. 472*31650d64SMarek Vasut * 473*31650d64SMarek Vasut * Rule: 474*31650d64SMarek Vasut * ALL DMA commands MUST have the DECREMENT_SEMAPHORE bit set so that the DMA 475*31650d64SMarek Vasut * channel's hardware semaphore will be decremented EVERY time a command is 476*31650d64SMarek Vasut * processed. 477*31650d64SMarek Vasut */ 478*31650d64SMarek Vasut int mxs_dma_desc_append(int channel, struct mxs_dma_desc *pdesc) 479*31650d64SMarek Vasut { 480*31650d64SMarek Vasut struct mxs_dma_chan *pchan; 481*31650d64SMarek Vasut struct mxs_dma_desc *last; 482*31650d64SMarek Vasut int ret; 483*31650d64SMarek Vasut 484*31650d64SMarek Vasut ret = mxs_dma_validate_chan(channel); 485*31650d64SMarek Vasut if (ret) 486*31650d64SMarek Vasut return ret; 487*31650d64SMarek Vasut 488*31650d64SMarek Vasut pchan = mxs_dma_channels + channel; 489*31650d64SMarek Vasut 490*31650d64SMarek Vasut pdesc->cmd.next = mxs_dma_cmd_address(pdesc); 491*31650d64SMarek Vasut pdesc->flags |= MXS_DMA_DESC_FIRST | MXS_DMA_DESC_LAST; 492*31650d64SMarek Vasut 493*31650d64SMarek Vasut if (!list_empty(&pchan->active)) { 494*31650d64SMarek Vasut last = list_entry(pchan->active.prev, struct mxs_dma_desc, 495*31650d64SMarek Vasut node); 496*31650d64SMarek Vasut 497*31650d64SMarek Vasut pdesc->flags &= ~MXS_DMA_DESC_FIRST; 498*31650d64SMarek Vasut last->flags &= ~MXS_DMA_DESC_LAST; 499*31650d64SMarek Vasut 500*31650d64SMarek Vasut last->cmd.next = mxs_dma_cmd_address(pdesc); 501*31650d64SMarek Vasut last->cmd.data |= MXS_DMA_DESC_CHAIN; 502*31650d64SMarek Vasut } 503*31650d64SMarek Vasut pdesc->flags |= MXS_DMA_DESC_READY; 504*31650d64SMarek Vasut if (pdesc->flags & MXS_DMA_DESC_FIRST) 505*31650d64SMarek Vasut pchan->pending_num++; 506*31650d64SMarek Vasut list_add_tail(&pdesc->node, &pchan->active); 507*31650d64SMarek Vasut 508*31650d64SMarek Vasut return ret; 509*31650d64SMarek Vasut } 510*31650d64SMarek Vasut 511*31650d64SMarek Vasut /* 512*31650d64SMarek Vasut * Retrieve processed DMA descriptors. 513*31650d64SMarek Vasut * 514*31650d64SMarek Vasut * This function moves all the descriptors from the DMA channel's "done" list to 515*31650d64SMarek Vasut * the head of the given list. 516*31650d64SMarek Vasut */ 517*31650d64SMarek Vasut int mxs_dma_get_finished(int channel, struct list_head *head) 518*31650d64SMarek Vasut { 519*31650d64SMarek Vasut struct mxs_dma_chan *pchan; 520*31650d64SMarek Vasut int ret; 521*31650d64SMarek Vasut 522*31650d64SMarek Vasut ret = mxs_dma_validate_chan(channel); 523*31650d64SMarek Vasut if (ret) 524*31650d64SMarek Vasut return ret; 525*31650d64SMarek Vasut 526*31650d64SMarek Vasut if (head == NULL) 527*31650d64SMarek Vasut return 0; 528*31650d64SMarek Vasut 529*31650d64SMarek Vasut pchan = mxs_dma_channels + channel; 530*31650d64SMarek Vasut 531*31650d64SMarek Vasut list_splice(&pchan->done, head); 532*31650d64SMarek Vasut 533*31650d64SMarek Vasut return 0; 534*31650d64SMarek Vasut } 535*31650d64SMarek Vasut 536*31650d64SMarek Vasut /* 537*31650d64SMarek Vasut * Clean up processed DMA descriptors. 538*31650d64SMarek Vasut * 539*31650d64SMarek Vasut * This function removes processed DMA descriptors from the "active" list. Pass 540*31650d64SMarek Vasut * in a non-NULL list head to get the descriptors moved to your list. Pass NULL 541*31650d64SMarek Vasut * to get the descriptors moved to the channel's "done" list. Descriptors on 542*31650d64SMarek Vasut * the "done" list can be retrieved with mxs_dma_get_finished(). 543*31650d64SMarek Vasut * 544*31650d64SMarek Vasut * This function marks the DMA channel as "not busy" if no unprocessed 545*31650d64SMarek Vasut * descriptors remain on the "active" list. 546*31650d64SMarek Vasut */ 547*31650d64SMarek Vasut int mxs_dma_finish(int channel, struct list_head *head) 548*31650d64SMarek Vasut { 549*31650d64SMarek Vasut int sem; 550*31650d64SMarek Vasut struct mxs_dma_chan *pchan; 551*31650d64SMarek Vasut struct list_head *p, *q; 552*31650d64SMarek Vasut struct mxs_dma_desc *pdesc; 553*31650d64SMarek Vasut int ret; 554*31650d64SMarek Vasut 555*31650d64SMarek Vasut ret = mxs_dma_validate_chan(channel); 556*31650d64SMarek Vasut if (ret) 557*31650d64SMarek Vasut return ret; 558*31650d64SMarek Vasut 559*31650d64SMarek Vasut pchan = mxs_dma_channels + channel; 560*31650d64SMarek Vasut 561*31650d64SMarek Vasut sem = mxs_dma_read_semaphore(channel); 562*31650d64SMarek Vasut if (sem < 0) 563*31650d64SMarek Vasut return sem; 564*31650d64SMarek Vasut 565*31650d64SMarek Vasut if (sem == pchan->active_num) 566*31650d64SMarek Vasut return 0; 567*31650d64SMarek Vasut 568*31650d64SMarek Vasut list_for_each_safe(p, q, &pchan->active) { 569*31650d64SMarek Vasut if ((pchan->active_num) <= sem) 570*31650d64SMarek Vasut break; 571*31650d64SMarek Vasut 572*31650d64SMarek Vasut pdesc = list_entry(p, struct mxs_dma_desc, node); 573*31650d64SMarek Vasut pdesc->flags &= ~MXS_DMA_DESC_READY; 574*31650d64SMarek Vasut 575*31650d64SMarek Vasut if (head) 576*31650d64SMarek Vasut list_move_tail(p, head); 577*31650d64SMarek Vasut else 578*31650d64SMarek Vasut list_move_tail(p, &pchan->done); 579*31650d64SMarek Vasut 580*31650d64SMarek Vasut if (pdesc->flags & MXS_DMA_DESC_LAST) 581*31650d64SMarek Vasut pchan->active_num--; 582*31650d64SMarek Vasut } 583*31650d64SMarek Vasut 584*31650d64SMarek Vasut if (sem == 0) 585*31650d64SMarek Vasut pchan->flags &= ~MXS_DMA_FLAGS_BUSY; 586*31650d64SMarek Vasut 587*31650d64SMarek Vasut return 0; 588*31650d64SMarek Vasut } 589*31650d64SMarek Vasut 590*31650d64SMarek Vasut /* 591*31650d64SMarek Vasut * Wait for DMA channel to complete 592*31650d64SMarek Vasut */ 593*31650d64SMarek Vasut int mxs_dma_wait_complete(uint32_t timeout, unsigned int chan) 594*31650d64SMarek Vasut { 595*31650d64SMarek Vasut struct mx28_apbh_regs *apbh_regs = 596*31650d64SMarek Vasut (struct mx28_apbh_regs *)MXS_APBH_BASE; 597*31650d64SMarek Vasut int ret; 598*31650d64SMarek Vasut 599*31650d64SMarek Vasut ret = mxs_dma_validate_chan(chan); 600*31650d64SMarek Vasut if (ret) 601*31650d64SMarek Vasut return ret; 602*31650d64SMarek Vasut 603*31650d64SMarek Vasut if (mx28_wait_mask_set(&apbh_regs->hw_apbh_ctrl1_reg, 604*31650d64SMarek Vasut 1 << chan, timeout)) { 605*31650d64SMarek Vasut ret = -ETIMEDOUT; 606*31650d64SMarek Vasut mxs_dma_reset(chan); 607*31650d64SMarek Vasut } 608*31650d64SMarek Vasut 609*31650d64SMarek Vasut return 0; 610*31650d64SMarek Vasut } 611*31650d64SMarek Vasut 612*31650d64SMarek Vasut /* 613*31650d64SMarek Vasut * Execute the DMA channel 614*31650d64SMarek Vasut */ 615*31650d64SMarek Vasut int mxs_dma_go(int chan) 616*31650d64SMarek Vasut { 617*31650d64SMarek Vasut uint32_t timeout = 10000; 618*31650d64SMarek Vasut int ret; 619*31650d64SMarek Vasut 620*31650d64SMarek Vasut LIST_HEAD(tmp_desc_list); 621*31650d64SMarek Vasut 622*31650d64SMarek Vasut mxs_dma_enable_irq(chan, 1); 623*31650d64SMarek Vasut mxs_dma_enable(chan); 624*31650d64SMarek Vasut 625*31650d64SMarek Vasut /* Wait for DMA to finish. */ 626*31650d64SMarek Vasut ret = mxs_dma_wait_complete(timeout, chan); 627*31650d64SMarek Vasut 628*31650d64SMarek Vasut /* Clear out the descriptors we just ran. */ 629*31650d64SMarek Vasut mxs_dma_finish(chan, &tmp_desc_list); 630*31650d64SMarek Vasut 631*31650d64SMarek Vasut /* Shut the DMA channel down. */ 632*31650d64SMarek Vasut mxs_dma_ack_irq(chan); 633*31650d64SMarek Vasut mxs_dma_reset(chan); 634*31650d64SMarek Vasut mxs_dma_enable_irq(chan, 0); 635*31650d64SMarek Vasut mxs_dma_disable(chan); 636*31650d64SMarek Vasut 637*31650d64SMarek Vasut return ret; 638*31650d64SMarek Vasut } 639*31650d64SMarek Vasut 640*31650d64SMarek Vasut /* 641*31650d64SMarek Vasut * Initialize the DMA hardware 642*31650d64SMarek Vasut */ 643*31650d64SMarek Vasut int mxs_dma_init(void) 644*31650d64SMarek Vasut { 645*31650d64SMarek Vasut struct mx28_apbh_regs *apbh_regs = 646*31650d64SMarek Vasut (struct mx28_apbh_regs *)MXS_APBH_BASE; 647*31650d64SMarek Vasut struct mxs_dma_chan *pchan; 648*31650d64SMarek Vasut int ret, channel; 649*31650d64SMarek Vasut 650*31650d64SMarek Vasut mx28_reset_block(&apbh_regs->hw_apbh_ctrl0_reg); 651*31650d64SMarek Vasut 652*31650d64SMarek Vasut #ifdef CONFIG_APBH_DMA_BURST8 653*31650d64SMarek Vasut writel(APBH_CTRL0_AHB_BURST8_EN, 654*31650d64SMarek Vasut &apbh_regs->hw_apbh_ctrl0_set); 655*31650d64SMarek Vasut #else 656*31650d64SMarek Vasut writel(APBH_CTRL0_AHB_BURST8_EN, 657*31650d64SMarek Vasut &apbh_regs->hw_apbh_ctrl0_clr); 658*31650d64SMarek Vasut #endif 659*31650d64SMarek Vasut 660*31650d64SMarek Vasut #ifdef CONFIG_APBH_DMA_BURST 661*31650d64SMarek Vasut writel(APBH_CTRL0_APB_BURST_EN, 662*31650d64SMarek Vasut &apbh_regs->hw_apbh_ctrl0_set); 663*31650d64SMarek Vasut #else 664*31650d64SMarek Vasut writel(APBH_CTRL0_APB_BURST_EN, 665*31650d64SMarek Vasut &apbh_regs->hw_apbh_ctrl0_clr); 666*31650d64SMarek Vasut #endif 667*31650d64SMarek Vasut 668*31650d64SMarek Vasut for (channel = 0; channel < MXS_MAX_DMA_CHANNELS; channel++) { 669*31650d64SMarek Vasut pchan = mxs_dma_channels + channel; 670*31650d64SMarek Vasut pchan->flags = MXS_DMA_FLAGS_VALID; 671*31650d64SMarek Vasut 672*31650d64SMarek Vasut ret = mxs_dma_request(channel); 673*31650d64SMarek Vasut 674*31650d64SMarek Vasut if (ret) { 675*31650d64SMarek Vasut printf("MXS DMA: Can't acquire DMA channel %i\n", 676*31650d64SMarek Vasut channel); 677*31650d64SMarek Vasut 678*31650d64SMarek Vasut goto err; 679*31650d64SMarek Vasut } 680*31650d64SMarek Vasut 681*31650d64SMarek Vasut mxs_dma_reset(channel); 682*31650d64SMarek Vasut mxs_dma_ack_irq(channel); 683*31650d64SMarek Vasut } 684*31650d64SMarek Vasut 685*31650d64SMarek Vasut return 0; 686*31650d64SMarek Vasut 687*31650d64SMarek Vasut err: 688*31650d64SMarek Vasut while (--channel >= 0) 689*31650d64SMarek Vasut mxs_dma_release(channel); 690*31650d64SMarek Vasut return ret; 691*31650d64SMarek Vasut } 692