10fb6f739SPiotr Ziecik /* 20fb6f739SPiotr Ziecik * Copyright (C) Freescale Semicondutor, Inc. 2007, 2008. 30fb6f739SPiotr Ziecik * Copyright (C) Semihalf 2009 40fb6f739SPiotr Ziecik * 50fb6f739SPiotr Ziecik * Written by Piotr Ziecik <kosmo@semihalf.com>. Hardware description 60fb6f739SPiotr Ziecik * (defines, structures and comments) was taken from MPC5121 DMA driver 70fb6f739SPiotr Ziecik * written by Hongjun Chen <hong-jun.chen@freescale.com>. 80fb6f739SPiotr Ziecik * 90fb6f739SPiotr Ziecik * Approved as OSADL project by a majority of OSADL members and funded 100fb6f739SPiotr Ziecik * by OSADL membership fees in 2009; for details see www.osadl.org. 110fb6f739SPiotr Ziecik * 120fb6f739SPiotr Ziecik * This program is free software; you can redistribute it and/or modify it 130fb6f739SPiotr Ziecik * under the terms of the GNU General Public License as published by the Free 140fb6f739SPiotr Ziecik * Software Foundation; either version 2 of the License, or (at your option) 150fb6f739SPiotr Ziecik * any later version. 160fb6f739SPiotr Ziecik * 170fb6f739SPiotr Ziecik * This program is distributed in the hope that it will be useful, but WITHOUT 180fb6f739SPiotr Ziecik * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 190fb6f739SPiotr Ziecik * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 200fb6f739SPiotr Ziecik * more details. 210fb6f739SPiotr Ziecik * 220fb6f739SPiotr Ziecik * You should have received a copy of the GNU General Public License along with 230fb6f739SPiotr Ziecik * this program; if not, write to the Free Software Foundation, Inc., 59 240fb6f739SPiotr Ziecik * Temple Place - Suite 330, Boston, MA 02111-1307, USA. 250fb6f739SPiotr Ziecik * 260fb6f739SPiotr Ziecik * The full GNU General Public License is included in this distribution in the 270fb6f739SPiotr Ziecik * file called COPYING. 280fb6f739SPiotr Ziecik */ 290fb6f739SPiotr Ziecik 300fb6f739SPiotr Ziecik /* 310fb6f739SPiotr Ziecik * This is initial version of MPC5121 DMA driver. Only memory to memory 320fb6f739SPiotr Ziecik * transfers are supported (tested using dmatest module). 330fb6f739SPiotr Ziecik */ 340fb6f739SPiotr Ziecik 350fb6f739SPiotr Ziecik #include <linux/module.h> 360fb6f739SPiotr Ziecik #include <linux/dmaengine.h> 370fb6f739SPiotr Ziecik #include <linux/dma-mapping.h> 380fb6f739SPiotr Ziecik #include <linux/interrupt.h> 390fb6f739SPiotr Ziecik #include <linux/io.h> 400fb6f739SPiotr Ziecik #include <linux/of_device.h> 410fb6f739SPiotr Ziecik #include <linux/of_platform.h> 420fb6f739SPiotr Ziecik 430fb6f739SPiotr Ziecik #include <linux/random.h> 440fb6f739SPiotr Ziecik 450fb6f739SPiotr Ziecik /* Number of DMA Transfer descriptors allocated per channel */ 460fb6f739SPiotr Ziecik #define MPC_DMA_DESCRIPTORS 64 470fb6f739SPiotr Ziecik 480fb6f739SPiotr Ziecik /* Macro definitions */ 490fb6f739SPiotr Ziecik #define MPC_DMA_CHANNELS 64 500fb6f739SPiotr Ziecik #define MPC_DMA_TCD_OFFSET 0x1000 510fb6f739SPiotr Ziecik 520fb6f739SPiotr Ziecik /* Arbitration mode of group and channel */ 530fb6f739SPiotr Ziecik #define MPC_DMA_DMACR_EDCG (1 << 31) 540fb6f739SPiotr Ziecik #define MPC_DMA_DMACR_ERGA (1 << 3) 550fb6f739SPiotr Ziecik #define MPC_DMA_DMACR_ERCA (1 << 2) 560fb6f739SPiotr Ziecik 570fb6f739SPiotr Ziecik /* Error codes */ 580fb6f739SPiotr Ziecik #define MPC_DMA_DMAES_VLD (1 << 31) 590fb6f739SPiotr Ziecik #define MPC_DMA_DMAES_GPE (1 << 15) 600fb6f739SPiotr Ziecik #define MPC_DMA_DMAES_CPE (1 << 14) 610fb6f739SPiotr Ziecik #define MPC_DMA_DMAES_ERRCHN(err) \ 620fb6f739SPiotr Ziecik (((err) >> 8) & 0x3f) 630fb6f739SPiotr Ziecik #define MPC_DMA_DMAES_SAE (1 << 7) 640fb6f739SPiotr Ziecik #define MPC_DMA_DMAES_SOE (1 << 6) 650fb6f739SPiotr Ziecik #define MPC_DMA_DMAES_DAE (1 << 5) 660fb6f739SPiotr Ziecik #define MPC_DMA_DMAES_DOE (1 << 4) 670fb6f739SPiotr Ziecik #define MPC_DMA_DMAES_NCE (1 << 3) 680fb6f739SPiotr Ziecik #define MPC_DMA_DMAES_SGE (1 << 2) 690fb6f739SPiotr Ziecik #define MPC_DMA_DMAES_SBE (1 << 1) 700fb6f739SPiotr Ziecik #define MPC_DMA_DMAES_DBE (1 << 0) 710fb6f739SPiotr Ziecik 720fb6f739SPiotr Ziecik #define MPC_DMA_TSIZE_1 0x00 730fb6f739SPiotr Ziecik #define MPC_DMA_TSIZE_2 0x01 740fb6f739SPiotr Ziecik #define MPC_DMA_TSIZE_4 0x02 750fb6f739SPiotr Ziecik #define MPC_DMA_TSIZE_16 0x04 760fb6f739SPiotr Ziecik #define MPC_DMA_TSIZE_32 0x05 770fb6f739SPiotr Ziecik 780fb6f739SPiotr Ziecik /* MPC5121 DMA engine registers */ 790fb6f739SPiotr Ziecik struct __attribute__ ((__packed__)) mpc_dma_regs { 800fb6f739SPiotr Ziecik /* 0x00 */ 810fb6f739SPiotr Ziecik u32 dmacr; /* DMA control register */ 820fb6f739SPiotr Ziecik u32 dmaes; /* DMA error status */ 830fb6f739SPiotr Ziecik /* 0x08 */ 840fb6f739SPiotr Ziecik u32 dmaerqh; /* DMA enable request high(channels 63~32) */ 850fb6f739SPiotr Ziecik u32 dmaerql; /* DMA enable request low(channels 31~0) */ 860fb6f739SPiotr Ziecik u32 dmaeeih; /* DMA enable error interrupt high(ch63~32) */ 870fb6f739SPiotr Ziecik u32 dmaeeil; /* DMA enable error interrupt low(ch31~0) */ 880fb6f739SPiotr Ziecik /* 0x18 */ 890fb6f739SPiotr Ziecik u8 dmaserq; /* DMA set enable request */ 900fb6f739SPiotr Ziecik u8 dmacerq; /* DMA clear enable request */ 910fb6f739SPiotr Ziecik u8 dmaseei; /* DMA set enable error interrupt */ 920fb6f739SPiotr Ziecik u8 dmaceei; /* DMA clear enable error interrupt */ 930fb6f739SPiotr Ziecik /* 0x1c */ 940fb6f739SPiotr Ziecik u8 dmacint; /* DMA clear interrupt request */ 950fb6f739SPiotr Ziecik u8 dmacerr; /* DMA clear error */ 960fb6f739SPiotr Ziecik u8 dmassrt; /* DMA set start bit */ 970fb6f739SPiotr Ziecik u8 dmacdne; /* DMA clear DONE status bit */ 980fb6f739SPiotr Ziecik /* 0x20 */ 990fb6f739SPiotr Ziecik u32 dmainth; /* DMA interrupt request high(ch63~32) */ 1000fb6f739SPiotr Ziecik u32 dmaintl; /* DMA interrupt request low(ch31~0) */ 1010fb6f739SPiotr Ziecik u32 dmaerrh; /* DMA error high(ch63~32) */ 1020fb6f739SPiotr Ziecik u32 dmaerrl; /* DMA error low(ch31~0) */ 1030fb6f739SPiotr Ziecik /* 0x30 */ 1040fb6f739SPiotr Ziecik u32 dmahrsh; /* DMA hw request status high(ch63~32) */ 1050fb6f739SPiotr Ziecik u32 dmahrsl; /* DMA hardware request status low(ch31~0) */ 1060fb6f739SPiotr Ziecik u32 dmaihsa; /* DMA interrupt high select AXE(ch63~32) */ 1070fb6f739SPiotr Ziecik u32 dmailsa; /* DMA interrupt low select AXE(ch31~0) */ 1080fb6f739SPiotr Ziecik /* 0x40 ~ 0xff */ 1090fb6f739SPiotr Ziecik u32 reserve0[48]; /* Reserved */ 1100fb6f739SPiotr Ziecik /* 0x100 */ 1110fb6f739SPiotr Ziecik u8 dchpri[MPC_DMA_CHANNELS]; 1120fb6f739SPiotr Ziecik /* DMA channels(0~63) priority */ 1130fb6f739SPiotr Ziecik }; 1140fb6f739SPiotr Ziecik 1150fb6f739SPiotr Ziecik struct __attribute__ ((__packed__)) mpc_dma_tcd { 1160fb6f739SPiotr Ziecik /* 0x00 */ 1170fb6f739SPiotr Ziecik u32 saddr; /* Source address */ 1180fb6f739SPiotr Ziecik 1190fb6f739SPiotr Ziecik u32 smod:5; /* Source address modulo */ 1200fb6f739SPiotr Ziecik u32 ssize:3; /* Source data transfer size */ 1210fb6f739SPiotr Ziecik u32 dmod:5; /* Destination address modulo */ 1220fb6f739SPiotr Ziecik u32 dsize:3; /* Destination data transfer size */ 1230fb6f739SPiotr Ziecik u32 soff:16; /* Signed source address offset */ 1240fb6f739SPiotr Ziecik 1250fb6f739SPiotr Ziecik /* 0x08 */ 1260fb6f739SPiotr Ziecik u32 nbytes; /* Inner "minor" byte count */ 1270fb6f739SPiotr Ziecik u32 slast; /* Last source address adjustment */ 1280fb6f739SPiotr Ziecik u32 daddr; /* Destination address */ 1290fb6f739SPiotr Ziecik 1300fb6f739SPiotr Ziecik /* 0x14 */ 1310fb6f739SPiotr Ziecik u32 citer_elink:1; /* Enable channel-to-channel linking on 1320fb6f739SPiotr Ziecik * minor loop complete 1330fb6f739SPiotr Ziecik */ 1340fb6f739SPiotr Ziecik u32 citer_linkch:6; /* Link channel for minor loop complete */ 1350fb6f739SPiotr Ziecik u32 citer:9; /* Current "major" iteration count */ 1360fb6f739SPiotr Ziecik u32 doff:16; /* Signed destination address offset */ 1370fb6f739SPiotr Ziecik 1380fb6f739SPiotr Ziecik /* 0x18 */ 1390fb6f739SPiotr Ziecik u32 dlast_sga; /* Last Destination address adjustment/scatter 1400fb6f739SPiotr Ziecik * gather address 1410fb6f739SPiotr Ziecik */ 1420fb6f739SPiotr Ziecik 1430fb6f739SPiotr Ziecik /* 0x1c */ 1440fb6f739SPiotr Ziecik u32 biter_elink:1; /* Enable channel-to-channel linking on major 1450fb6f739SPiotr Ziecik * loop complete 1460fb6f739SPiotr Ziecik */ 1470fb6f739SPiotr Ziecik u32 biter_linkch:6; 1480fb6f739SPiotr Ziecik u32 biter:9; /* Beginning "major" iteration count */ 1490fb6f739SPiotr Ziecik u32 bwc:2; /* Bandwidth control */ 1500fb6f739SPiotr Ziecik u32 major_linkch:6; /* Link channel number */ 1510fb6f739SPiotr Ziecik u32 done:1; /* Channel done */ 1520fb6f739SPiotr Ziecik u32 active:1; /* Channel active */ 1530fb6f739SPiotr Ziecik u32 major_elink:1; /* Enable channel-to-channel linking on major 1540fb6f739SPiotr Ziecik * loop complete 1550fb6f739SPiotr Ziecik */ 1560fb6f739SPiotr Ziecik u32 e_sg:1; /* Enable scatter/gather processing */ 1570fb6f739SPiotr Ziecik u32 d_req:1; /* Disable request */ 1580fb6f739SPiotr Ziecik u32 int_half:1; /* Enable an interrupt when major counter is 1590fb6f739SPiotr Ziecik * half complete 1600fb6f739SPiotr Ziecik */ 1610fb6f739SPiotr Ziecik u32 int_maj:1; /* Enable an interrupt when major iteration 1620fb6f739SPiotr Ziecik * count completes 1630fb6f739SPiotr Ziecik */ 1640fb6f739SPiotr Ziecik u32 start:1; /* Channel start */ 1650fb6f739SPiotr Ziecik }; 1660fb6f739SPiotr Ziecik 1670fb6f739SPiotr Ziecik struct mpc_dma_desc { 1680fb6f739SPiotr Ziecik struct dma_async_tx_descriptor desc; 1690fb6f739SPiotr Ziecik struct mpc_dma_tcd *tcd; 1700fb6f739SPiotr Ziecik dma_addr_t tcd_paddr; 1710fb6f739SPiotr Ziecik int error; 1720fb6f739SPiotr Ziecik struct list_head node; 1730fb6f739SPiotr Ziecik }; 1740fb6f739SPiotr Ziecik 1750fb6f739SPiotr Ziecik struct mpc_dma_chan { 1760fb6f739SPiotr Ziecik struct dma_chan chan; 1770fb6f739SPiotr Ziecik struct list_head free; 1780fb6f739SPiotr Ziecik struct list_head prepared; 1790fb6f739SPiotr Ziecik struct list_head queued; 1800fb6f739SPiotr Ziecik struct list_head active; 1810fb6f739SPiotr Ziecik struct list_head completed; 1820fb6f739SPiotr Ziecik struct mpc_dma_tcd *tcd; 1830fb6f739SPiotr Ziecik dma_addr_t tcd_paddr; 1840fb6f739SPiotr Ziecik dma_cookie_t completed_cookie; 1850fb6f739SPiotr Ziecik 1860fb6f739SPiotr Ziecik /* Lock for this structure */ 1870fb6f739SPiotr Ziecik spinlock_t lock; 1880fb6f739SPiotr Ziecik }; 1890fb6f739SPiotr Ziecik 1900fb6f739SPiotr Ziecik struct mpc_dma { 1910fb6f739SPiotr Ziecik struct dma_device dma; 1920fb6f739SPiotr Ziecik struct tasklet_struct tasklet; 1930fb6f739SPiotr Ziecik struct mpc_dma_chan channels[MPC_DMA_CHANNELS]; 1940fb6f739SPiotr Ziecik struct mpc_dma_regs __iomem *regs; 1950fb6f739SPiotr Ziecik struct mpc_dma_tcd __iomem *tcd; 1960fb6f739SPiotr Ziecik int irq; 1970fb6f739SPiotr Ziecik uint error_status; 1980fb6f739SPiotr Ziecik 1990fb6f739SPiotr Ziecik /* Lock for error_status field in this structure */ 2000fb6f739SPiotr Ziecik spinlock_t error_status_lock; 2010fb6f739SPiotr Ziecik }; 2020fb6f739SPiotr Ziecik 2030fb6f739SPiotr Ziecik #define DRV_NAME "mpc512x_dma" 2040fb6f739SPiotr Ziecik 2050fb6f739SPiotr Ziecik /* Convert struct dma_chan to struct mpc_dma_chan */ 2060fb6f739SPiotr Ziecik static inline struct mpc_dma_chan *dma_chan_to_mpc_dma_chan(struct dma_chan *c) 2070fb6f739SPiotr Ziecik { 2080fb6f739SPiotr Ziecik return container_of(c, struct mpc_dma_chan, chan); 2090fb6f739SPiotr Ziecik } 2100fb6f739SPiotr Ziecik 2110fb6f739SPiotr Ziecik /* Convert struct dma_chan to struct mpc_dma */ 2120fb6f739SPiotr Ziecik static inline struct mpc_dma *dma_chan_to_mpc_dma(struct dma_chan *c) 2130fb6f739SPiotr Ziecik { 2140fb6f739SPiotr Ziecik struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(c); 2150fb6f739SPiotr Ziecik return container_of(mchan, struct mpc_dma, channels[c->chan_id]); 2160fb6f739SPiotr Ziecik } 2170fb6f739SPiotr Ziecik 2180fb6f739SPiotr Ziecik /* 2190fb6f739SPiotr Ziecik * Execute all queued DMA descriptors. 2200fb6f739SPiotr Ziecik * 2210fb6f739SPiotr Ziecik * Following requirements must be met while calling mpc_dma_execute(): 2220fb6f739SPiotr Ziecik * a) mchan->lock is acquired, 2230fb6f739SPiotr Ziecik * b) mchan->active list is empty, 2240fb6f739SPiotr Ziecik * c) mchan->queued list contains at least one entry. 2250fb6f739SPiotr Ziecik */ 2260fb6f739SPiotr Ziecik static void mpc_dma_execute(struct mpc_dma_chan *mchan) 2270fb6f739SPiotr Ziecik { 2280fb6f739SPiotr Ziecik struct mpc_dma *mdma = dma_chan_to_mpc_dma(&mchan->chan); 2290fb6f739SPiotr Ziecik struct mpc_dma_desc *first = NULL; 2300fb6f739SPiotr Ziecik struct mpc_dma_desc *prev = NULL; 2310fb6f739SPiotr Ziecik struct mpc_dma_desc *mdesc; 2320fb6f739SPiotr Ziecik int cid = mchan->chan.chan_id; 2330fb6f739SPiotr Ziecik 2340fb6f739SPiotr Ziecik /* Move all queued descriptors to active list */ 2350fb6f739SPiotr Ziecik list_splice_tail_init(&mchan->queued, &mchan->active); 2360fb6f739SPiotr Ziecik 2370fb6f739SPiotr Ziecik /* Chain descriptors into one transaction */ 2380fb6f739SPiotr Ziecik list_for_each_entry(mdesc, &mchan->active, node) { 2390fb6f739SPiotr Ziecik if (!first) 2400fb6f739SPiotr Ziecik first = mdesc; 2410fb6f739SPiotr Ziecik 2420fb6f739SPiotr Ziecik if (!prev) { 2430fb6f739SPiotr Ziecik prev = mdesc; 2440fb6f739SPiotr Ziecik continue; 2450fb6f739SPiotr Ziecik } 2460fb6f739SPiotr Ziecik 2470fb6f739SPiotr Ziecik prev->tcd->dlast_sga = mdesc->tcd_paddr; 2480fb6f739SPiotr Ziecik prev->tcd->e_sg = 1; 2490fb6f739SPiotr Ziecik mdesc->tcd->start = 1; 2500fb6f739SPiotr Ziecik 2510fb6f739SPiotr Ziecik prev = mdesc; 2520fb6f739SPiotr Ziecik } 2530fb6f739SPiotr Ziecik 2540fb6f739SPiotr Ziecik prev->tcd->start = 0; 2550fb6f739SPiotr Ziecik prev->tcd->int_maj = 1; 2560fb6f739SPiotr Ziecik 2570fb6f739SPiotr Ziecik /* Send first descriptor in chain into hardware */ 2580fb6f739SPiotr Ziecik memcpy_toio(&mdma->tcd[cid], first->tcd, sizeof(struct mpc_dma_tcd)); 2590fb6f739SPiotr Ziecik out_8(&mdma->regs->dmassrt, cid); 2600fb6f739SPiotr Ziecik } 2610fb6f739SPiotr Ziecik 2620fb6f739SPiotr Ziecik /* Handle interrupt on one half of DMA controller (32 channels) */ 2630fb6f739SPiotr Ziecik static void mpc_dma_irq_process(struct mpc_dma *mdma, u32 is, u32 es, int off) 2640fb6f739SPiotr Ziecik { 2650fb6f739SPiotr Ziecik struct mpc_dma_chan *mchan; 2660fb6f739SPiotr Ziecik struct mpc_dma_desc *mdesc; 2670fb6f739SPiotr Ziecik u32 status = is | es; 2680fb6f739SPiotr Ziecik int ch; 2690fb6f739SPiotr Ziecik 2700fb6f739SPiotr Ziecik while ((ch = fls(status) - 1) >= 0) { 2710fb6f739SPiotr Ziecik status &= ~(1 << ch); 2720fb6f739SPiotr Ziecik mchan = &mdma->channels[ch + off]; 2730fb6f739SPiotr Ziecik 2740fb6f739SPiotr Ziecik spin_lock(&mchan->lock); 2750fb6f739SPiotr Ziecik 2760fb6f739SPiotr Ziecik /* Check error status */ 2770fb6f739SPiotr Ziecik if (es & (1 << ch)) 2780fb6f739SPiotr Ziecik list_for_each_entry(mdesc, &mchan->active, node) 2790fb6f739SPiotr Ziecik mdesc->error = -EIO; 2800fb6f739SPiotr Ziecik 2810fb6f739SPiotr Ziecik /* Execute queued descriptors */ 2820fb6f739SPiotr Ziecik list_splice_tail_init(&mchan->active, &mchan->completed); 2830fb6f739SPiotr Ziecik if (!list_empty(&mchan->queued)) 2840fb6f739SPiotr Ziecik mpc_dma_execute(mchan); 2850fb6f739SPiotr Ziecik 2860fb6f739SPiotr Ziecik spin_unlock(&mchan->lock); 2870fb6f739SPiotr Ziecik } 2880fb6f739SPiotr Ziecik } 2890fb6f739SPiotr Ziecik 2900fb6f739SPiotr Ziecik /* Interrupt handler */ 2910fb6f739SPiotr Ziecik static irqreturn_t mpc_dma_irq(int irq, void *data) 2920fb6f739SPiotr Ziecik { 2930fb6f739SPiotr Ziecik struct mpc_dma *mdma = data; 2940fb6f739SPiotr Ziecik uint es; 2950fb6f739SPiotr Ziecik 2960fb6f739SPiotr Ziecik /* Save error status register */ 2970fb6f739SPiotr Ziecik es = in_be32(&mdma->regs->dmaes); 2980fb6f739SPiotr Ziecik spin_lock(&mdma->error_status_lock); 2990fb6f739SPiotr Ziecik if ((es & MPC_DMA_DMAES_VLD) && mdma->error_status == 0) 3000fb6f739SPiotr Ziecik mdma->error_status = es; 3010fb6f739SPiotr Ziecik spin_unlock(&mdma->error_status_lock); 3020fb6f739SPiotr Ziecik 3030fb6f739SPiotr Ziecik /* Handle interrupt on each channel */ 3040fb6f739SPiotr Ziecik mpc_dma_irq_process(mdma, in_be32(&mdma->regs->dmainth), 3050fb6f739SPiotr Ziecik in_be32(&mdma->regs->dmaerrh), 32); 3060fb6f739SPiotr Ziecik mpc_dma_irq_process(mdma, in_be32(&mdma->regs->dmaintl), 3070fb6f739SPiotr Ziecik in_be32(&mdma->regs->dmaerrl), 0); 3080fb6f739SPiotr Ziecik 3090fb6f739SPiotr Ziecik /* Ack interrupt on all channels */ 3100fb6f739SPiotr Ziecik out_be32(&mdma->regs->dmainth, 0xFFFFFFFF); 3110fb6f739SPiotr Ziecik out_be32(&mdma->regs->dmaintl, 0xFFFFFFFF); 3120fb6f739SPiotr Ziecik out_be32(&mdma->regs->dmaerrh, 0xFFFFFFFF); 3130fb6f739SPiotr Ziecik out_be32(&mdma->regs->dmaerrl, 0xFFFFFFFF); 3140fb6f739SPiotr Ziecik 3150fb6f739SPiotr Ziecik /* Schedule tasklet */ 3160fb6f739SPiotr Ziecik tasklet_schedule(&mdma->tasklet); 3170fb6f739SPiotr Ziecik 3180fb6f739SPiotr Ziecik return IRQ_HANDLED; 3190fb6f739SPiotr Ziecik } 3200fb6f739SPiotr Ziecik 3210fb6f739SPiotr Ziecik /* DMA Tasklet */ 3220fb6f739SPiotr Ziecik static void mpc_dma_tasklet(unsigned long data) 3230fb6f739SPiotr Ziecik { 3240fb6f739SPiotr Ziecik struct mpc_dma *mdma = (void *)data; 3250fb6f739SPiotr Ziecik dma_cookie_t last_cookie = 0; 3260fb6f739SPiotr Ziecik struct mpc_dma_chan *mchan; 3270fb6f739SPiotr Ziecik struct mpc_dma_desc *mdesc; 3280fb6f739SPiotr Ziecik struct dma_async_tx_descriptor *desc; 3290fb6f739SPiotr Ziecik unsigned long flags; 3300fb6f739SPiotr Ziecik LIST_HEAD(list); 3310fb6f739SPiotr Ziecik uint es; 3320fb6f739SPiotr Ziecik int i; 3330fb6f739SPiotr Ziecik 3340fb6f739SPiotr Ziecik spin_lock_irqsave(&mdma->error_status_lock, flags); 3350fb6f739SPiotr Ziecik es = mdma->error_status; 3360fb6f739SPiotr Ziecik mdma->error_status = 0; 3370fb6f739SPiotr Ziecik spin_unlock_irqrestore(&mdma->error_status_lock, flags); 3380fb6f739SPiotr Ziecik 3390fb6f739SPiotr Ziecik /* Print nice error report */ 3400fb6f739SPiotr Ziecik if (es) { 3410fb6f739SPiotr Ziecik dev_err(mdma->dma.dev, 3420fb6f739SPiotr Ziecik "Hardware reported following error(s) on channel %u:\n", 3430fb6f739SPiotr Ziecik MPC_DMA_DMAES_ERRCHN(es)); 3440fb6f739SPiotr Ziecik 3450fb6f739SPiotr Ziecik if (es & MPC_DMA_DMAES_GPE) 3460fb6f739SPiotr Ziecik dev_err(mdma->dma.dev, "- Group Priority Error\n"); 3470fb6f739SPiotr Ziecik if (es & MPC_DMA_DMAES_CPE) 3480fb6f739SPiotr Ziecik dev_err(mdma->dma.dev, "- Channel Priority Error\n"); 3490fb6f739SPiotr Ziecik if (es & MPC_DMA_DMAES_SAE) 3500fb6f739SPiotr Ziecik dev_err(mdma->dma.dev, "- Source Address Error\n"); 3510fb6f739SPiotr Ziecik if (es & MPC_DMA_DMAES_SOE) 3520fb6f739SPiotr Ziecik dev_err(mdma->dma.dev, "- Source Offset" 3530fb6f739SPiotr Ziecik " Configuration Error\n"); 3540fb6f739SPiotr Ziecik if (es & MPC_DMA_DMAES_DAE) 3550fb6f739SPiotr Ziecik dev_err(mdma->dma.dev, "- Destination Address" 3560fb6f739SPiotr Ziecik " Error\n"); 3570fb6f739SPiotr Ziecik if (es & MPC_DMA_DMAES_DOE) 3580fb6f739SPiotr Ziecik dev_err(mdma->dma.dev, "- Destination Offset" 3590fb6f739SPiotr Ziecik " Configuration Error\n"); 3600fb6f739SPiotr Ziecik if (es & MPC_DMA_DMAES_NCE) 3610fb6f739SPiotr Ziecik dev_err(mdma->dma.dev, "- NBytes/Citter" 3620fb6f739SPiotr Ziecik " Configuration Error\n"); 3630fb6f739SPiotr Ziecik if (es & MPC_DMA_DMAES_SGE) 3640fb6f739SPiotr Ziecik dev_err(mdma->dma.dev, "- Scatter/Gather" 3650fb6f739SPiotr Ziecik " Configuration Error\n"); 3660fb6f739SPiotr Ziecik if (es & MPC_DMA_DMAES_SBE) 3670fb6f739SPiotr Ziecik dev_err(mdma->dma.dev, "- Source Bus Error\n"); 3680fb6f739SPiotr Ziecik if (es & MPC_DMA_DMAES_DBE) 3690fb6f739SPiotr Ziecik dev_err(mdma->dma.dev, "- Destination Bus Error\n"); 3700fb6f739SPiotr Ziecik } 3710fb6f739SPiotr Ziecik 3720fb6f739SPiotr Ziecik for (i = 0; i < mdma->dma.chancnt; i++) { 3730fb6f739SPiotr Ziecik mchan = &mdma->channels[i]; 3740fb6f739SPiotr Ziecik 3750fb6f739SPiotr Ziecik /* Get all completed descriptors */ 3760fb6f739SPiotr Ziecik spin_lock_irqsave(&mchan->lock, flags); 3770fb6f739SPiotr Ziecik if (!list_empty(&mchan->completed)) 3780fb6f739SPiotr Ziecik list_splice_tail_init(&mchan->completed, &list); 3790fb6f739SPiotr Ziecik spin_unlock_irqrestore(&mchan->lock, flags); 3800fb6f739SPiotr Ziecik 3810fb6f739SPiotr Ziecik if (list_empty(&list)) 3820fb6f739SPiotr Ziecik continue; 3830fb6f739SPiotr Ziecik 3840fb6f739SPiotr Ziecik /* Execute callbacks and run dependencies */ 3850fb6f739SPiotr Ziecik list_for_each_entry(mdesc, &list, node) { 3860fb6f739SPiotr Ziecik desc = &mdesc->desc; 3870fb6f739SPiotr Ziecik 3880fb6f739SPiotr Ziecik if (desc->callback) 3890fb6f739SPiotr Ziecik desc->callback(desc->callback_param); 3900fb6f739SPiotr Ziecik 3910fb6f739SPiotr Ziecik last_cookie = desc->cookie; 3920fb6f739SPiotr Ziecik dma_run_dependencies(desc); 3930fb6f739SPiotr Ziecik } 3940fb6f739SPiotr Ziecik 3950fb6f739SPiotr Ziecik /* Free descriptors */ 3960fb6f739SPiotr Ziecik spin_lock_irqsave(&mchan->lock, flags); 3970fb6f739SPiotr Ziecik list_splice_tail_init(&list, &mchan->free); 3980fb6f739SPiotr Ziecik mchan->completed_cookie = last_cookie; 3990fb6f739SPiotr Ziecik spin_unlock_irqrestore(&mchan->lock, flags); 4000fb6f739SPiotr Ziecik } 4010fb6f739SPiotr Ziecik } 4020fb6f739SPiotr Ziecik 4030fb6f739SPiotr Ziecik /* Submit descriptor to hardware */ 4040fb6f739SPiotr Ziecik static dma_cookie_t mpc_dma_tx_submit(struct dma_async_tx_descriptor *txd) 4050fb6f739SPiotr Ziecik { 4060fb6f739SPiotr Ziecik struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(txd->chan); 4070fb6f739SPiotr Ziecik struct mpc_dma_desc *mdesc; 4080fb6f739SPiotr Ziecik unsigned long flags; 4090fb6f739SPiotr Ziecik dma_cookie_t cookie; 4100fb6f739SPiotr Ziecik 4110fb6f739SPiotr Ziecik mdesc = container_of(txd, struct mpc_dma_desc, desc); 4120fb6f739SPiotr Ziecik 4130fb6f739SPiotr Ziecik spin_lock_irqsave(&mchan->lock, flags); 4140fb6f739SPiotr Ziecik 4150fb6f739SPiotr Ziecik /* Move descriptor to queue */ 4160fb6f739SPiotr Ziecik list_move_tail(&mdesc->node, &mchan->queued); 4170fb6f739SPiotr Ziecik 4180fb6f739SPiotr Ziecik /* If channel is idle, execute all queued descriptors */ 4190fb6f739SPiotr Ziecik if (list_empty(&mchan->active)) 4200fb6f739SPiotr Ziecik mpc_dma_execute(mchan); 4210fb6f739SPiotr Ziecik 4220fb6f739SPiotr Ziecik /* Update cookie */ 4230fb6f739SPiotr Ziecik cookie = mchan->chan.cookie + 1; 4240fb6f739SPiotr Ziecik if (cookie <= 0) 4250fb6f739SPiotr Ziecik cookie = 1; 4260fb6f739SPiotr Ziecik 4270fb6f739SPiotr Ziecik mchan->chan.cookie = cookie; 4280fb6f739SPiotr Ziecik mdesc->desc.cookie = cookie; 4290fb6f739SPiotr Ziecik 4300fb6f739SPiotr Ziecik spin_unlock_irqrestore(&mchan->lock, flags); 4310fb6f739SPiotr Ziecik 4320fb6f739SPiotr Ziecik return cookie; 4330fb6f739SPiotr Ziecik } 4340fb6f739SPiotr Ziecik 4350fb6f739SPiotr Ziecik /* Alloc channel resources */ 4360fb6f739SPiotr Ziecik static int mpc_dma_alloc_chan_resources(struct dma_chan *chan) 4370fb6f739SPiotr Ziecik { 4380fb6f739SPiotr Ziecik struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan); 4390fb6f739SPiotr Ziecik struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan); 4400fb6f739SPiotr Ziecik struct mpc_dma_desc *mdesc; 4410fb6f739SPiotr Ziecik struct mpc_dma_tcd *tcd; 4420fb6f739SPiotr Ziecik dma_addr_t tcd_paddr; 4430fb6f739SPiotr Ziecik unsigned long flags; 4440fb6f739SPiotr Ziecik LIST_HEAD(descs); 4450fb6f739SPiotr Ziecik int i; 4460fb6f739SPiotr Ziecik 4470fb6f739SPiotr Ziecik /* Alloc DMA memory for Transfer Control Descriptors */ 4480fb6f739SPiotr Ziecik tcd = dma_alloc_coherent(mdma->dma.dev, 4490fb6f739SPiotr Ziecik MPC_DMA_DESCRIPTORS * sizeof(struct mpc_dma_tcd), 4500fb6f739SPiotr Ziecik &tcd_paddr, GFP_KERNEL); 4510fb6f739SPiotr Ziecik if (!tcd) 4520fb6f739SPiotr Ziecik return -ENOMEM; 4530fb6f739SPiotr Ziecik 4540fb6f739SPiotr Ziecik /* Alloc descriptors for this channel */ 4550fb6f739SPiotr Ziecik for (i = 0; i < MPC_DMA_DESCRIPTORS; i++) { 4560fb6f739SPiotr Ziecik mdesc = kzalloc(sizeof(struct mpc_dma_desc), GFP_KERNEL); 4570fb6f739SPiotr Ziecik if (!mdesc) { 4580fb6f739SPiotr Ziecik dev_notice(mdma->dma.dev, "Memory allocation error. " 4590fb6f739SPiotr Ziecik "Allocated only %u descriptors\n", i); 4600fb6f739SPiotr Ziecik break; 4610fb6f739SPiotr Ziecik } 4620fb6f739SPiotr Ziecik 4630fb6f739SPiotr Ziecik dma_async_tx_descriptor_init(&mdesc->desc, chan); 4640fb6f739SPiotr Ziecik mdesc->desc.flags = DMA_CTRL_ACK; 4650fb6f739SPiotr Ziecik mdesc->desc.tx_submit = mpc_dma_tx_submit; 4660fb6f739SPiotr Ziecik 4670fb6f739SPiotr Ziecik mdesc->tcd = &tcd[i]; 4680fb6f739SPiotr Ziecik mdesc->tcd_paddr = tcd_paddr + (i * sizeof(struct mpc_dma_tcd)); 4690fb6f739SPiotr Ziecik 4700fb6f739SPiotr Ziecik list_add_tail(&mdesc->node, &descs); 4710fb6f739SPiotr Ziecik } 4720fb6f739SPiotr Ziecik 4730fb6f739SPiotr Ziecik /* Return error only if no descriptors were allocated */ 4740fb6f739SPiotr Ziecik if (i == 0) { 4750fb6f739SPiotr Ziecik dma_free_coherent(mdma->dma.dev, 4760fb6f739SPiotr Ziecik MPC_DMA_DESCRIPTORS * sizeof(struct mpc_dma_tcd), 4770fb6f739SPiotr Ziecik tcd, tcd_paddr); 4780fb6f739SPiotr Ziecik return -ENOMEM; 4790fb6f739SPiotr Ziecik } 4800fb6f739SPiotr Ziecik 4810fb6f739SPiotr Ziecik spin_lock_irqsave(&mchan->lock, flags); 4820fb6f739SPiotr Ziecik mchan->tcd = tcd; 4830fb6f739SPiotr Ziecik mchan->tcd_paddr = tcd_paddr; 4840fb6f739SPiotr Ziecik list_splice_tail_init(&descs, &mchan->free); 4850fb6f739SPiotr Ziecik spin_unlock_irqrestore(&mchan->lock, flags); 4860fb6f739SPiotr Ziecik 4870fb6f739SPiotr Ziecik /* Enable Error Interrupt */ 4880fb6f739SPiotr Ziecik out_8(&mdma->regs->dmaseei, chan->chan_id); 4890fb6f739SPiotr Ziecik 4900fb6f739SPiotr Ziecik return 0; 4910fb6f739SPiotr Ziecik } 4920fb6f739SPiotr Ziecik 4930fb6f739SPiotr Ziecik /* Free channel resources */ 4940fb6f739SPiotr Ziecik static void mpc_dma_free_chan_resources(struct dma_chan *chan) 4950fb6f739SPiotr Ziecik { 4960fb6f739SPiotr Ziecik struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan); 4970fb6f739SPiotr Ziecik struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan); 4980fb6f739SPiotr Ziecik struct mpc_dma_desc *mdesc, *tmp; 4990fb6f739SPiotr Ziecik struct mpc_dma_tcd *tcd; 5000fb6f739SPiotr Ziecik dma_addr_t tcd_paddr; 5010fb6f739SPiotr Ziecik unsigned long flags; 5020fb6f739SPiotr Ziecik LIST_HEAD(descs); 5030fb6f739SPiotr Ziecik 5040fb6f739SPiotr Ziecik spin_lock_irqsave(&mchan->lock, flags); 5050fb6f739SPiotr Ziecik 5060fb6f739SPiotr Ziecik /* Channel must be idle */ 5070fb6f739SPiotr Ziecik BUG_ON(!list_empty(&mchan->prepared)); 5080fb6f739SPiotr Ziecik BUG_ON(!list_empty(&mchan->queued)); 5090fb6f739SPiotr Ziecik BUG_ON(!list_empty(&mchan->active)); 5100fb6f739SPiotr Ziecik BUG_ON(!list_empty(&mchan->completed)); 5110fb6f739SPiotr Ziecik 5120fb6f739SPiotr Ziecik /* Move data */ 5130fb6f739SPiotr Ziecik list_splice_tail_init(&mchan->free, &descs); 5140fb6f739SPiotr Ziecik tcd = mchan->tcd; 5150fb6f739SPiotr Ziecik tcd_paddr = mchan->tcd_paddr; 5160fb6f739SPiotr Ziecik 5170fb6f739SPiotr Ziecik spin_unlock_irqrestore(&mchan->lock, flags); 5180fb6f739SPiotr Ziecik 5190fb6f739SPiotr Ziecik /* Free DMA memory used by descriptors */ 5200fb6f739SPiotr Ziecik dma_free_coherent(mdma->dma.dev, 5210fb6f739SPiotr Ziecik MPC_DMA_DESCRIPTORS * sizeof(struct mpc_dma_tcd), 5220fb6f739SPiotr Ziecik tcd, tcd_paddr); 5230fb6f739SPiotr Ziecik 5240fb6f739SPiotr Ziecik /* Free descriptors */ 5250fb6f739SPiotr Ziecik list_for_each_entry_safe(mdesc, tmp, &descs, node) 5260fb6f739SPiotr Ziecik kfree(mdesc); 5270fb6f739SPiotr Ziecik 5280fb6f739SPiotr Ziecik /* Disable Error Interrupt */ 5290fb6f739SPiotr Ziecik out_8(&mdma->regs->dmaceei, chan->chan_id); 5300fb6f739SPiotr Ziecik } 5310fb6f739SPiotr Ziecik 5320fb6f739SPiotr Ziecik /* Send all pending descriptor to hardware */ 5330fb6f739SPiotr Ziecik static void mpc_dma_issue_pending(struct dma_chan *chan) 5340fb6f739SPiotr Ziecik { 5350fb6f739SPiotr Ziecik /* 5360fb6f739SPiotr Ziecik * We are posting descriptors to the hardware as soon as 5370fb6f739SPiotr Ziecik * they are ready, so this function does nothing. 5380fb6f739SPiotr Ziecik */ 5390fb6f739SPiotr Ziecik } 5400fb6f739SPiotr Ziecik 5410fb6f739SPiotr Ziecik /* Check request completion status */ 5420fb6f739SPiotr Ziecik static enum dma_status 54307934481SLinus Walleij mpc_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie, 54407934481SLinus Walleij struct dma_tx_state *txstate) 5450fb6f739SPiotr Ziecik { 5460fb6f739SPiotr Ziecik struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan); 5470fb6f739SPiotr Ziecik unsigned long flags; 5480fb6f739SPiotr Ziecik dma_cookie_t last_used; 5490fb6f739SPiotr Ziecik dma_cookie_t last_complete; 5500fb6f739SPiotr Ziecik 5510fb6f739SPiotr Ziecik spin_lock_irqsave(&mchan->lock, flags); 5520fb6f739SPiotr Ziecik last_used = mchan->chan.cookie; 5530fb6f739SPiotr Ziecik last_complete = mchan->completed_cookie; 5540fb6f739SPiotr Ziecik spin_unlock_irqrestore(&mchan->lock, flags); 5550fb6f739SPiotr Ziecik 556bca34692SDan Williams dma_set_tx_state(txstate, last_complete, last_used, 0); 5570fb6f739SPiotr Ziecik return dma_async_is_complete(cookie, last_complete, last_used); 5580fb6f739SPiotr Ziecik } 5590fb6f739SPiotr Ziecik 5600fb6f739SPiotr Ziecik /* Prepare descriptor for memory to memory copy */ 5610fb6f739SPiotr Ziecik static struct dma_async_tx_descriptor * 5620fb6f739SPiotr Ziecik mpc_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src, 5630fb6f739SPiotr Ziecik size_t len, unsigned long flags) 5640fb6f739SPiotr Ziecik { 5650fb6f739SPiotr Ziecik struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan); 5660fb6f739SPiotr Ziecik struct mpc_dma_desc *mdesc = NULL; 5670fb6f739SPiotr Ziecik struct mpc_dma_tcd *tcd; 5680fb6f739SPiotr Ziecik unsigned long iflags; 5690fb6f739SPiotr Ziecik 5700fb6f739SPiotr Ziecik /* Get free descriptor */ 5710fb6f739SPiotr Ziecik spin_lock_irqsave(&mchan->lock, iflags); 5720fb6f739SPiotr Ziecik if (!list_empty(&mchan->free)) { 5730fb6f739SPiotr Ziecik mdesc = list_first_entry(&mchan->free, struct mpc_dma_desc, 5740fb6f739SPiotr Ziecik node); 5750fb6f739SPiotr Ziecik list_del(&mdesc->node); 5760fb6f739SPiotr Ziecik } 5770fb6f739SPiotr Ziecik spin_unlock_irqrestore(&mchan->lock, iflags); 5780fb6f739SPiotr Ziecik 5790fb6f739SPiotr Ziecik if (!mdesc) 5800fb6f739SPiotr Ziecik return NULL; 5810fb6f739SPiotr Ziecik 5820fb6f739SPiotr Ziecik mdesc->error = 0; 5830fb6f739SPiotr Ziecik tcd = mdesc->tcd; 5840fb6f739SPiotr Ziecik 5850fb6f739SPiotr Ziecik /* Prepare Transfer Control Descriptor for this transaction */ 5860fb6f739SPiotr Ziecik memset(tcd, 0, sizeof(struct mpc_dma_tcd)); 5870fb6f739SPiotr Ziecik 5880fb6f739SPiotr Ziecik if (IS_ALIGNED(src | dst | len, 32)) { 5890fb6f739SPiotr Ziecik tcd->ssize = MPC_DMA_TSIZE_32; 5900fb6f739SPiotr Ziecik tcd->dsize = MPC_DMA_TSIZE_32; 5910fb6f739SPiotr Ziecik tcd->soff = 32; 5920fb6f739SPiotr Ziecik tcd->doff = 32; 5930fb6f739SPiotr Ziecik } else if (IS_ALIGNED(src | dst | len, 16)) { 5940fb6f739SPiotr Ziecik tcd->ssize = MPC_DMA_TSIZE_16; 5950fb6f739SPiotr Ziecik tcd->dsize = MPC_DMA_TSIZE_16; 5960fb6f739SPiotr Ziecik tcd->soff = 16; 5970fb6f739SPiotr Ziecik tcd->doff = 16; 5980fb6f739SPiotr Ziecik } else if (IS_ALIGNED(src | dst | len, 4)) { 5990fb6f739SPiotr Ziecik tcd->ssize = MPC_DMA_TSIZE_4; 6000fb6f739SPiotr Ziecik tcd->dsize = MPC_DMA_TSIZE_4; 6010fb6f739SPiotr Ziecik tcd->soff = 4; 6020fb6f739SPiotr Ziecik tcd->doff = 4; 6030fb6f739SPiotr Ziecik } else if (IS_ALIGNED(src | dst | len, 2)) { 6040fb6f739SPiotr Ziecik tcd->ssize = MPC_DMA_TSIZE_2; 6050fb6f739SPiotr Ziecik tcd->dsize = MPC_DMA_TSIZE_2; 6060fb6f739SPiotr Ziecik tcd->soff = 2; 6070fb6f739SPiotr Ziecik tcd->doff = 2; 6080fb6f739SPiotr Ziecik } else { 6090fb6f739SPiotr Ziecik tcd->ssize = MPC_DMA_TSIZE_1; 6100fb6f739SPiotr Ziecik tcd->dsize = MPC_DMA_TSIZE_1; 6110fb6f739SPiotr Ziecik tcd->soff = 1; 6120fb6f739SPiotr Ziecik tcd->doff = 1; 6130fb6f739SPiotr Ziecik } 6140fb6f739SPiotr Ziecik 6150fb6f739SPiotr Ziecik tcd->saddr = src; 6160fb6f739SPiotr Ziecik tcd->daddr = dst; 6170fb6f739SPiotr Ziecik tcd->nbytes = len; 6180fb6f739SPiotr Ziecik tcd->biter = 1; 6190fb6f739SPiotr Ziecik tcd->citer = 1; 6200fb6f739SPiotr Ziecik 6210fb6f739SPiotr Ziecik /* Place descriptor in prepared list */ 6220fb6f739SPiotr Ziecik spin_lock_irqsave(&mchan->lock, iflags); 6230fb6f739SPiotr Ziecik list_add_tail(&mdesc->node, &mchan->prepared); 6240fb6f739SPiotr Ziecik spin_unlock_irqrestore(&mchan->lock, iflags); 6250fb6f739SPiotr Ziecik 6260fb6f739SPiotr Ziecik return &mdesc->desc; 6270fb6f739SPiotr Ziecik } 6280fb6f739SPiotr Ziecik 6290fb6f739SPiotr Ziecik static int __devinit mpc_dma_probe(struct of_device *op, 6300fb6f739SPiotr Ziecik const struct of_device_id *match) 6310fb6f739SPiotr Ziecik { 6320fb6f739SPiotr Ziecik struct device_node *dn = op->node; 6330fb6f739SPiotr Ziecik struct device *dev = &op->dev; 6340fb6f739SPiotr Ziecik struct dma_device *dma; 6350fb6f739SPiotr Ziecik struct mpc_dma *mdma; 6360fb6f739SPiotr Ziecik struct mpc_dma_chan *mchan; 6370fb6f739SPiotr Ziecik struct resource res; 6380fb6f739SPiotr Ziecik ulong regs_start, regs_size; 6390fb6f739SPiotr Ziecik int retval, i; 6400fb6f739SPiotr Ziecik 6410fb6f739SPiotr Ziecik mdma = devm_kzalloc(dev, sizeof(struct mpc_dma), GFP_KERNEL); 6420fb6f739SPiotr Ziecik if (!mdma) { 6430fb6f739SPiotr Ziecik dev_err(dev, "Memory exhausted!\n"); 6440fb6f739SPiotr Ziecik return -ENOMEM; 6450fb6f739SPiotr Ziecik } 6460fb6f739SPiotr Ziecik 6470fb6f739SPiotr Ziecik mdma->irq = irq_of_parse_and_map(dn, 0); 6480fb6f739SPiotr Ziecik if (mdma->irq == NO_IRQ) { 6490fb6f739SPiotr Ziecik dev_err(dev, "Error mapping IRQ!\n"); 6500fb6f739SPiotr Ziecik return -EINVAL; 6510fb6f739SPiotr Ziecik } 6520fb6f739SPiotr Ziecik 6530fb6f739SPiotr Ziecik retval = of_address_to_resource(dn, 0, &res); 6540fb6f739SPiotr Ziecik if (retval) { 6550fb6f739SPiotr Ziecik dev_err(dev, "Error parsing memory region!\n"); 6560fb6f739SPiotr Ziecik return retval; 6570fb6f739SPiotr Ziecik } 6580fb6f739SPiotr Ziecik 6590fb6f739SPiotr Ziecik regs_start = res.start; 6608381fc35STobias Klauser regs_size = resource_size(&res); 6610fb6f739SPiotr Ziecik 6620fb6f739SPiotr Ziecik if (!devm_request_mem_region(dev, regs_start, regs_size, DRV_NAME)) { 6630fb6f739SPiotr Ziecik dev_err(dev, "Error requesting memory region!\n"); 6640fb6f739SPiotr Ziecik return -EBUSY; 6650fb6f739SPiotr Ziecik } 6660fb6f739SPiotr Ziecik 6670fb6f739SPiotr Ziecik mdma->regs = devm_ioremap(dev, regs_start, regs_size); 6680fb6f739SPiotr Ziecik if (!mdma->regs) { 6690fb6f739SPiotr Ziecik dev_err(dev, "Error mapping memory region!\n"); 6700fb6f739SPiotr Ziecik return -ENOMEM; 6710fb6f739SPiotr Ziecik } 6720fb6f739SPiotr Ziecik 6730fb6f739SPiotr Ziecik mdma->tcd = (struct mpc_dma_tcd *)((u8 *)(mdma->regs) 6740fb6f739SPiotr Ziecik + MPC_DMA_TCD_OFFSET); 6750fb6f739SPiotr Ziecik 6760fb6f739SPiotr Ziecik retval = devm_request_irq(dev, mdma->irq, &mpc_dma_irq, 0, DRV_NAME, 6770fb6f739SPiotr Ziecik mdma); 6780fb6f739SPiotr Ziecik if (retval) { 6790fb6f739SPiotr Ziecik dev_err(dev, "Error requesting IRQ!\n"); 6800fb6f739SPiotr Ziecik return -EINVAL; 6810fb6f739SPiotr Ziecik } 6820fb6f739SPiotr Ziecik 6830fb6f739SPiotr Ziecik spin_lock_init(&mdma->error_status_lock); 6840fb6f739SPiotr Ziecik 6850fb6f739SPiotr Ziecik dma = &mdma->dma; 6860fb6f739SPiotr Ziecik dma->dev = dev; 6870fb6f739SPiotr Ziecik dma->chancnt = MPC_DMA_CHANNELS; 6880fb6f739SPiotr Ziecik dma->device_alloc_chan_resources = mpc_dma_alloc_chan_resources; 6890fb6f739SPiotr Ziecik dma->device_free_chan_resources = mpc_dma_free_chan_resources; 6900fb6f739SPiotr Ziecik dma->device_issue_pending = mpc_dma_issue_pending; 69107934481SLinus Walleij dma->device_tx_status = mpc_dma_tx_status; 6920fb6f739SPiotr Ziecik dma->device_prep_dma_memcpy = mpc_dma_prep_memcpy; 6930fb6f739SPiotr Ziecik 6940fb6f739SPiotr Ziecik INIT_LIST_HEAD(&dma->channels); 6950fb6f739SPiotr Ziecik dma_cap_set(DMA_MEMCPY, dma->cap_mask); 6960fb6f739SPiotr Ziecik 6970fb6f739SPiotr Ziecik for (i = 0; i < dma->chancnt; i++) { 6980fb6f739SPiotr Ziecik mchan = &mdma->channels[i]; 6990fb6f739SPiotr Ziecik 7000fb6f739SPiotr Ziecik mchan->chan.device = dma; 7010fb6f739SPiotr Ziecik mchan->chan.chan_id = i; 7020fb6f739SPiotr Ziecik mchan->chan.cookie = 1; 7030fb6f739SPiotr Ziecik mchan->completed_cookie = mchan->chan.cookie; 7040fb6f739SPiotr Ziecik 7050fb6f739SPiotr Ziecik INIT_LIST_HEAD(&mchan->free); 7060fb6f739SPiotr Ziecik INIT_LIST_HEAD(&mchan->prepared); 7070fb6f739SPiotr Ziecik INIT_LIST_HEAD(&mchan->queued); 7080fb6f739SPiotr Ziecik INIT_LIST_HEAD(&mchan->active); 7090fb6f739SPiotr Ziecik INIT_LIST_HEAD(&mchan->completed); 7100fb6f739SPiotr Ziecik 7110fb6f739SPiotr Ziecik spin_lock_init(&mchan->lock); 7120fb6f739SPiotr Ziecik list_add_tail(&mchan->chan.device_node, &dma->channels); 7130fb6f739SPiotr Ziecik } 7140fb6f739SPiotr Ziecik 7150fb6f739SPiotr Ziecik tasklet_init(&mdma->tasklet, mpc_dma_tasklet, (unsigned long)mdma); 7160fb6f739SPiotr Ziecik 7170fb6f739SPiotr Ziecik /* 7180fb6f739SPiotr Ziecik * Configure DMA Engine: 7190fb6f739SPiotr Ziecik * - Dynamic clock, 7200fb6f739SPiotr Ziecik * - Round-robin group arbitration, 7210fb6f739SPiotr Ziecik * - Round-robin channel arbitration. 7220fb6f739SPiotr Ziecik */ 7230fb6f739SPiotr Ziecik out_be32(&mdma->regs->dmacr, MPC_DMA_DMACR_EDCG | 7240fb6f739SPiotr Ziecik MPC_DMA_DMACR_ERGA | MPC_DMA_DMACR_ERCA); 7250fb6f739SPiotr Ziecik 7260fb6f739SPiotr Ziecik /* Disable hardware DMA requests */ 7270fb6f739SPiotr Ziecik out_be32(&mdma->regs->dmaerqh, 0); 7280fb6f739SPiotr Ziecik out_be32(&mdma->regs->dmaerql, 0); 7290fb6f739SPiotr Ziecik 7300fb6f739SPiotr Ziecik /* Disable error interrupts */ 7310fb6f739SPiotr Ziecik out_be32(&mdma->regs->dmaeeih, 0); 7320fb6f739SPiotr Ziecik out_be32(&mdma->regs->dmaeeil, 0); 7330fb6f739SPiotr Ziecik 7340fb6f739SPiotr Ziecik /* Clear interrupts status */ 7350fb6f739SPiotr Ziecik out_be32(&mdma->regs->dmainth, 0xFFFFFFFF); 7360fb6f739SPiotr Ziecik out_be32(&mdma->regs->dmaintl, 0xFFFFFFFF); 7370fb6f739SPiotr Ziecik out_be32(&mdma->regs->dmaerrh, 0xFFFFFFFF); 7380fb6f739SPiotr Ziecik out_be32(&mdma->regs->dmaerrl, 0xFFFFFFFF); 7390fb6f739SPiotr Ziecik 7400fb6f739SPiotr Ziecik /* Route interrupts to IPIC */ 7410fb6f739SPiotr Ziecik out_be32(&mdma->regs->dmaihsa, 0); 7420fb6f739SPiotr Ziecik out_be32(&mdma->regs->dmailsa, 0); 7430fb6f739SPiotr Ziecik 7440fb6f739SPiotr Ziecik /* Register DMA engine */ 7450fb6f739SPiotr Ziecik dev_set_drvdata(dev, mdma); 7460fb6f739SPiotr Ziecik retval = dma_async_device_register(dma); 7470fb6f739SPiotr Ziecik if (retval) { 7480fb6f739SPiotr Ziecik devm_free_irq(dev, mdma->irq, mdma); 7490fb6f739SPiotr Ziecik irq_dispose_mapping(mdma->irq); 7500fb6f739SPiotr Ziecik } 7510fb6f739SPiotr Ziecik 7520fb6f739SPiotr Ziecik return retval; 7530fb6f739SPiotr Ziecik } 7540fb6f739SPiotr Ziecik 7550fb6f739SPiotr Ziecik static int __devexit mpc_dma_remove(struct of_device *op) 7560fb6f739SPiotr Ziecik { 7570fb6f739SPiotr Ziecik struct device *dev = &op->dev; 7580fb6f739SPiotr Ziecik struct mpc_dma *mdma = dev_get_drvdata(dev); 7590fb6f739SPiotr Ziecik 7600fb6f739SPiotr Ziecik dma_async_device_unregister(&mdma->dma); 7610fb6f739SPiotr Ziecik devm_free_irq(dev, mdma->irq, mdma); 7620fb6f739SPiotr Ziecik irq_dispose_mapping(mdma->irq); 7630fb6f739SPiotr Ziecik 7640fb6f739SPiotr Ziecik return 0; 7650fb6f739SPiotr Ziecik } 7660fb6f739SPiotr Ziecik 7670fb6f739SPiotr Ziecik static struct of_device_id mpc_dma_match[] = { 7680fb6f739SPiotr Ziecik { .compatible = "fsl,mpc5121-dma", }, 7690fb6f739SPiotr Ziecik {}, 7700fb6f739SPiotr Ziecik }; 7710fb6f739SPiotr Ziecik 7720fb6f739SPiotr Ziecik static struct of_platform_driver mpc_dma_driver = { 7730fb6f739SPiotr Ziecik .match_table = mpc_dma_match, 7740fb6f739SPiotr Ziecik .probe = mpc_dma_probe, 7750fb6f739SPiotr Ziecik .remove = __devexit_p(mpc_dma_remove), 7760fb6f739SPiotr Ziecik .driver = { 7770fb6f739SPiotr Ziecik .name = DRV_NAME, 7780fb6f739SPiotr Ziecik .owner = THIS_MODULE, 7790fb6f739SPiotr Ziecik }, 7800fb6f739SPiotr Ziecik }; 7810fb6f739SPiotr Ziecik 7820fb6f739SPiotr Ziecik static int __init mpc_dma_init(void) 7830fb6f739SPiotr Ziecik { 7840fb6f739SPiotr Ziecik return of_register_platform_driver(&mpc_dma_driver); 7850fb6f739SPiotr Ziecik } 7860fb6f739SPiotr Ziecik module_init(mpc_dma_init); 7870fb6f739SPiotr Ziecik 7880fb6f739SPiotr Ziecik static void __exit mpc_dma_exit(void) 7890fb6f739SPiotr Ziecik { 7900fb6f739SPiotr Ziecik of_unregister_platform_driver(&mpc_dma_driver); 7910fb6f739SPiotr Ziecik } 7920fb6f739SPiotr Ziecik module_exit(mpc_dma_exit); 7930fb6f739SPiotr Ziecik 7940fb6f739SPiotr Ziecik MODULE_LICENSE("GPL"); 7950fb6f739SPiotr Ziecik MODULE_AUTHOR("Piotr Ziecik <kosmo@semihalf.com>"); 796