11fe20f1bSEugeniy Paltsev // SPDX-License-Identifier: GPL-2.0
21fe20f1bSEugeniy Paltsev // (C) 2017-2018 Synopsys, Inc. (www.synopsys.com)
31fe20f1bSEugeniy Paltsev
41fe20f1bSEugeniy Paltsev /*
51fe20f1bSEugeniy Paltsev * Synopsys DesignWare AXI DMA Controller driver.
61fe20f1bSEugeniy Paltsev *
71fe20f1bSEugeniy Paltsev * Author: Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com>
81fe20f1bSEugeniy Paltsev */
91fe20f1bSEugeniy Paltsev
101fe20f1bSEugeniy Paltsev #include <linux/bitops.h>
111fe20f1bSEugeniy Paltsev #include <linux/delay.h>
121fe20f1bSEugeniy Paltsev #include <linux/device.h>
131fe20f1bSEugeniy Paltsev #include <linux/dmaengine.h>
141fe20f1bSEugeniy Paltsev #include <linux/dmapool.h>
1578a90a1eSSia Jee Heng #include <linux/dma-mapping.h>
161fe20f1bSEugeniy Paltsev #include <linux/err.h>
171fe20f1bSEugeniy Paltsev #include <linux/interrupt.h>
181fe20f1bSEugeniy Paltsev #include <linux/io.h>
191deb96c0SSia Jee Heng #include <linux/iopoll.h>
201deb96c0SSia Jee Heng #include <linux/io-64-nonatomic-lo-hi.h>
211fe20f1bSEugeniy Paltsev #include <linux/kernel.h>
221fe20f1bSEugeniy Paltsev #include <linux/module.h>
231fe20f1bSEugeniy Paltsev #include <linux/of.h>
24b428c6faSSia Jee Heng #include <linux/of_dma.h>
251fe20f1bSEugeniy Paltsev #include <linux/platform_device.h>
261fe20f1bSEugeniy Paltsev #include <linux/pm_runtime.h>
271fe20f1bSEugeniy Paltsev #include <linux/property.h>
28790f3c8bSWalker Chen #include <linux/reset.h>
29ef6fb2d6SSia Jee Heng #include <linux/slab.h>
301fe20f1bSEugeniy Paltsev #include <linux/types.h>
311fe20f1bSEugeniy Paltsev
321fe20f1bSEugeniy Paltsev #include "dw-axi-dmac.h"
331fe20f1bSEugeniy Paltsev #include "../dmaengine.h"
341fe20f1bSEugeniy Paltsev #include "../virt-dma.h"
351fe20f1bSEugeniy Paltsev
361fe20f1bSEugeniy Paltsev /*
371fe20f1bSEugeniy Paltsev * The set of bus widths supported by the DMA controller. DW AXI DMAC supports
381fe20f1bSEugeniy Paltsev * master data bus width up to 512 bits (for both AXI master interfaces), but
39e7c7a016STom Rix * it depends on IP block configuration.
401fe20f1bSEugeniy Paltsev */
411fe20f1bSEugeniy Paltsev #define AXI_DMA_BUSWIDTHS \
421fe20f1bSEugeniy Paltsev (DMA_SLAVE_BUSWIDTH_1_BYTE | \
431fe20f1bSEugeniy Paltsev DMA_SLAVE_BUSWIDTH_2_BYTES | \
441fe20f1bSEugeniy Paltsev DMA_SLAVE_BUSWIDTH_4_BYTES | \
451fe20f1bSEugeniy Paltsev DMA_SLAVE_BUSWIDTH_8_BYTES | \
461fe20f1bSEugeniy Paltsev DMA_SLAVE_BUSWIDTH_16_BYTES | \
471fe20f1bSEugeniy Paltsev DMA_SLAVE_BUSWIDTH_32_BYTES | \
481fe20f1bSEugeniy Paltsev DMA_SLAVE_BUSWIDTH_64_BYTES)
491fe20f1bSEugeniy Paltsev
50790f3c8bSWalker Chen #define AXI_DMA_FLAG_HAS_APB_REGS BIT(0)
51790f3c8bSWalker Chen #define AXI_DMA_FLAG_HAS_RESETS BIT(1)
52790f3c8bSWalker Chen #define AXI_DMA_FLAG_USE_CFG2 BIT(2)
53790f3c8bSWalker Chen
541fe20f1bSEugeniy Paltsev static inline void
axi_dma_iowrite32(struct axi_dma_chip * chip,u32 reg,u32 val)551fe20f1bSEugeniy Paltsev axi_dma_iowrite32(struct axi_dma_chip *chip, u32 reg, u32 val)
561fe20f1bSEugeniy Paltsev {
571fe20f1bSEugeniy Paltsev iowrite32(val, chip->regs + reg);
581fe20f1bSEugeniy Paltsev }
591fe20f1bSEugeniy Paltsev
axi_dma_ioread32(struct axi_dma_chip * chip,u32 reg)601fe20f1bSEugeniy Paltsev static inline u32 axi_dma_ioread32(struct axi_dma_chip *chip, u32 reg)
611fe20f1bSEugeniy Paltsev {
621fe20f1bSEugeniy Paltsev return ioread32(chip->regs + reg);
631fe20f1bSEugeniy Paltsev }
641fe20f1bSEugeniy Paltsev
651fe20f1bSEugeniy Paltsev static inline void
axi_chan_iowrite32(struct axi_dma_chan * chan,u32 reg,u32 val)661fe20f1bSEugeniy Paltsev axi_chan_iowrite32(struct axi_dma_chan *chan, u32 reg, u32 val)
671fe20f1bSEugeniy Paltsev {
681fe20f1bSEugeniy Paltsev iowrite32(val, chan->chan_regs + reg);
691fe20f1bSEugeniy Paltsev }
701fe20f1bSEugeniy Paltsev
axi_chan_ioread32(struct axi_dma_chan * chan,u32 reg)711fe20f1bSEugeniy Paltsev static inline u32 axi_chan_ioread32(struct axi_dma_chan *chan, u32 reg)
721fe20f1bSEugeniy Paltsev {
731fe20f1bSEugeniy Paltsev return ioread32(chan->chan_regs + reg);
741fe20f1bSEugeniy Paltsev }
751fe20f1bSEugeniy Paltsev
761fe20f1bSEugeniy Paltsev static inline void
axi_chan_iowrite64(struct axi_dma_chan * chan,u32 reg,u64 val)771fe20f1bSEugeniy Paltsev axi_chan_iowrite64(struct axi_dma_chan *chan, u32 reg, u64 val)
781fe20f1bSEugeniy Paltsev {
791fe20f1bSEugeniy Paltsev /*
801fe20f1bSEugeniy Paltsev * We split one 64 bit write for two 32 bit write as some HW doesn't
811fe20f1bSEugeniy Paltsev * support 64 bit access.
821fe20f1bSEugeniy Paltsev */
831fe20f1bSEugeniy Paltsev iowrite32(lower_32_bits(val), chan->chan_regs + reg);
841fe20f1bSEugeniy Paltsev iowrite32(upper_32_bits(val), chan->chan_regs + reg + 4);
851fe20f1bSEugeniy Paltsev }
861fe20f1bSEugeniy Paltsev
axi_chan_config_write(struct axi_dma_chan * chan,struct axi_dma_chan_config * config)8782435166SPandith N static inline void axi_chan_config_write(struct axi_dma_chan *chan,
8882435166SPandith N struct axi_dma_chan_config *config)
8982435166SPandith N {
9082435166SPandith N u32 cfg_lo, cfg_hi;
9182435166SPandith N
9282435166SPandith N cfg_lo = (config->dst_multblk_type << CH_CFG_L_DST_MULTBLK_TYPE_POS |
9382435166SPandith N config->src_multblk_type << CH_CFG_L_SRC_MULTBLK_TYPE_POS);
94790f3c8bSWalker Chen if (chan->chip->dw->hdata->reg_map_8_channels &&
95790f3c8bSWalker Chen !chan->chip->dw->hdata->use_cfg2) {
9682435166SPandith N cfg_hi = config->tt_fc << CH_CFG_H_TT_FC_POS |
9782435166SPandith N config->hs_sel_src << CH_CFG_H_HS_SEL_SRC_POS |
9882435166SPandith N config->hs_sel_dst << CH_CFG_H_HS_SEL_DST_POS |
9982435166SPandith N config->src_per << CH_CFG_H_SRC_PER_POS |
10082435166SPandith N config->dst_per << CH_CFG_H_DST_PER_POS |
10182435166SPandith N config->prior << CH_CFG_H_PRIORITY_POS;
10282435166SPandith N } else {
10382435166SPandith N cfg_lo |= config->src_per << CH_CFG2_L_SRC_PER_POS |
10482435166SPandith N config->dst_per << CH_CFG2_L_DST_PER_POS;
10582435166SPandith N cfg_hi = config->tt_fc << CH_CFG2_H_TT_FC_POS |
10682435166SPandith N config->hs_sel_src << CH_CFG2_H_HS_SEL_SRC_POS |
10782435166SPandith N config->hs_sel_dst << CH_CFG2_H_HS_SEL_DST_POS |
10882435166SPandith N config->prior << CH_CFG2_H_PRIORITY_POS;
10982435166SPandith N }
11082435166SPandith N axi_chan_iowrite32(chan, CH_CFG_L, cfg_lo);
11182435166SPandith N axi_chan_iowrite32(chan, CH_CFG_H, cfg_hi);
11282435166SPandith N }
11382435166SPandith N
axi_dma_disable(struct axi_dma_chip * chip)1141fe20f1bSEugeniy Paltsev static inline void axi_dma_disable(struct axi_dma_chip *chip)
1151fe20f1bSEugeniy Paltsev {
1161fe20f1bSEugeniy Paltsev u32 val;
1171fe20f1bSEugeniy Paltsev
1181fe20f1bSEugeniy Paltsev val = axi_dma_ioread32(chip, DMAC_CFG);
1191fe20f1bSEugeniy Paltsev val &= ~DMAC_EN_MASK;
1201fe20f1bSEugeniy Paltsev axi_dma_iowrite32(chip, DMAC_CFG, val);
1211fe20f1bSEugeniy Paltsev }
1221fe20f1bSEugeniy Paltsev
axi_dma_enable(struct axi_dma_chip * chip)1231fe20f1bSEugeniy Paltsev static inline void axi_dma_enable(struct axi_dma_chip *chip)
1241fe20f1bSEugeniy Paltsev {
1251fe20f1bSEugeniy Paltsev u32 val;
1261fe20f1bSEugeniy Paltsev
1271fe20f1bSEugeniy Paltsev val = axi_dma_ioread32(chip, DMAC_CFG);
1281fe20f1bSEugeniy Paltsev val |= DMAC_EN_MASK;
1291fe20f1bSEugeniy Paltsev axi_dma_iowrite32(chip, DMAC_CFG, val);
1301fe20f1bSEugeniy Paltsev }
1311fe20f1bSEugeniy Paltsev
axi_dma_irq_disable(struct axi_dma_chip * chip)1321fe20f1bSEugeniy Paltsev static inline void axi_dma_irq_disable(struct axi_dma_chip *chip)
1331fe20f1bSEugeniy Paltsev {
1341fe20f1bSEugeniy Paltsev u32 val;
1351fe20f1bSEugeniy Paltsev
1361fe20f1bSEugeniy Paltsev val = axi_dma_ioread32(chip, DMAC_CFG);
1371fe20f1bSEugeniy Paltsev val &= ~INT_EN_MASK;
1381fe20f1bSEugeniy Paltsev axi_dma_iowrite32(chip, DMAC_CFG, val);
1391fe20f1bSEugeniy Paltsev }
1401fe20f1bSEugeniy Paltsev
axi_dma_irq_enable(struct axi_dma_chip * chip)1411fe20f1bSEugeniy Paltsev static inline void axi_dma_irq_enable(struct axi_dma_chip *chip)
1421fe20f1bSEugeniy Paltsev {
1431fe20f1bSEugeniy Paltsev u32 val;
1441fe20f1bSEugeniy Paltsev
1451fe20f1bSEugeniy Paltsev val = axi_dma_ioread32(chip, DMAC_CFG);
1461fe20f1bSEugeniy Paltsev val |= INT_EN_MASK;
1471fe20f1bSEugeniy Paltsev axi_dma_iowrite32(chip, DMAC_CFG, val);
1481fe20f1bSEugeniy Paltsev }
1491fe20f1bSEugeniy Paltsev
axi_chan_irq_disable(struct axi_dma_chan * chan,u32 irq_mask)1501fe20f1bSEugeniy Paltsev static inline void axi_chan_irq_disable(struct axi_dma_chan *chan, u32 irq_mask)
1511fe20f1bSEugeniy Paltsev {
1521fe20f1bSEugeniy Paltsev u32 val;
1531fe20f1bSEugeniy Paltsev
1541fe20f1bSEugeniy Paltsev if (likely(irq_mask == DWAXIDMAC_IRQ_ALL)) {
1551fe20f1bSEugeniy Paltsev axi_chan_iowrite32(chan, CH_INTSTATUS_ENA, DWAXIDMAC_IRQ_NONE);
1561fe20f1bSEugeniy Paltsev } else {
1571fe20f1bSEugeniy Paltsev val = axi_chan_ioread32(chan, CH_INTSTATUS_ENA);
1581fe20f1bSEugeniy Paltsev val &= ~irq_mask;
1591fe20f1bSEugeniy Paltsev axi_chan_iowrite32(chan, CH_INTSTATUS_ENA, val);
1601fe20f1bSEugeniy Paltsev }
1611fe20f1bSEugeniy Paltsev }
1621fe20f1bSEugeniy Paltsev
axi_chan_irq_set(struct axi_dma_chan * chan,u32 irq_mask)1631fe20f1bSEugeniy Paltsev static inline void axi_chan_irq_set(struct axi_dma_chan *chan, u32 irq_mask)
1641fe20f1bSEugeniy Paltsev {
1651fe20f1bSEugeniy Paltsev axi_chan_iowrite32(chan, CH_INTSTATUS_ENA, irq_mask);
1661fe20f1bSEugeniy Paltsev }
1671fe20f1bSEugeniy Paltsev
axi_chan_irq_sig_set(struct axi_dma_chan * chan,u32 irq_mask)1681fe20f1bSEugeniy Paltsev static inline void axi_chan_irq_sig_set(struct axi_dma_chan *chan, u32 irq_mask)
1691fe20f1bSEugeniy Paltsev {
1701fe20f1bSEugeniy Paltsev axi_chan_iowrite32(chan, CH_INTSIGNAL_ENA, irq_mask);
1711fe20f1bSEugeniy Paltsev }
1721fe20f1bSEugeniy Paltsev
axi_chan_irq_clear(struct axi_dma_chan * chan,u32 irq_mask)1731fe20f1bSEugeniy Paltsev static inline void axi_chan_irq_clear(struct axi_dma_chan *chan, u32 irq_mask)
1741fe20f1bSEugeniy Paltsev {
1751fe20f1bSEugeniy Paltsev axi_chan_iowrite32(chan, CH_INTCLEAR, irq_mask);
1761fe20f1bSEugeniy Paltsev }
1771fe20f1bSEugeniy Paltsev
axi_chan_irq_read(struct axi_dma_chan * chan)1781fe20f1bSEugeniy Paltsev static inline u32 axi_chan_irq_read(struct axi_dma_chan *chan)
1791fe20f1bSEugeniy Paltsev {
1801fe20f1bSEugeniy Paltsev return axi_chan_ioread32(chan, CH_INTSTATUS);
1811fe20f1bSEugeniy Paltsev }
1821fe20f1bSEugeniy Paltsev
axi_chan_disable(struct axi_dma_chan * chan)1831fe20f1bSEugeniy Paltsev static inline void axi_chan_disable(struct axi_dma_chan *chan)
1841fe20f1bSEugeniy Paltsev {
1851fe20f1bSEugeniy Paltsev u32 val;
1861fe20f1bSEugeniy Paltsev
1871fe20f1bSEugeniy Paltsev val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
1881fe20f1bSEugeniy Paltsev val &= ~(BIT(chan->id) << DMAC_CHAN_EN_SHIFT);
18982435166SPandith N if (chan->chip->dw->hdata->reg_map_8_channels)
1901fe20f1bSEugeniy Paltsev val |= BIT(chan->id) << DMAC_CHAN_EN_WE_SHIFT;
19182435166SPandith N else
19282435166SPandith N val |= BIT(chan->id) << DMAC_CHAN_EN2_WE_SHIFT;
1931fe20f1bSEugeniy Paltsev axi_dma_iowrite32(chan->chip, DMAC_CHEN, val);
1941fe20f1bSEugeniy Paltsev }
1951fe20f1bSEugeniy Paltsev
axi_chan_enable(struct axi_dma_chan * chan)1961fe20f1bSEugeniy Paltsev static inline void axi_chan_enable(struct axi_dma_chan *chan)
1971fe20f1bSEugeniy Paltsev {
1981fe20f1bSEugeniy Paltsev u32 val;
1991fe20f1bSEugeniy Paltsev
2001fe20f1bSEugeniy Paltsev val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
20182435166SPandith N if (chan->chip->dw->hdata->reg_map_8_channels)
2021fe20f1bSEugeniy Paltsev val |= BIT(chan->id) << DMAC_CHAN_EN_SHIFT |
2031fe20f1bSEugeniy Paltsev BIT(chan->id) << DMAC_CHAN_EN_WE_SHIFT;
20482435166SPandith N else
20582435166SPandith N val |= BIT(chan->id) << DMAC_CHAN_EN_SHIFT |
20682435166SPandith N BIT(chan->id) << DMAC_CHAN_EN2_WE_SHIFT;
2071fe20f1bSEugeniy Paltsev axi_dma_iowrite32(chan->chip, DMAC_CHEN, val);
2081fe20f1bSEugeniy Paltsev }
2091fe20f1bSEugeniy Paltsev
axi_chan_is_hw_enable(struct axi_dma_chan * chan)2101fe20f1bSEugeniy Paltsev static inline bool axi_chan_is_hw_enable(struct axi_dma_chan *chan)
2111fe20f1bSEugeniy Paltsev {
2121fe20f1bSEugeniy Paltsev u32 val;
2131fe20f1bSEugeniy Paltsev
2141fe20f1bSEugeniy Paltsev val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
2151fe20f1bSEugeniy Paltsev
2161fe20f1bSEugeniy Paltsev return !!(val & (BIT(chan->id) << DMAC_CHAN_EN_SHIFT));
2171fe20f1bSEugeniy Paltsev }
2181fe20f1bSEugeniy Paltsev
axi_dma_hw_init(struct axi_dma_chip * chip)2191fe20f1bSEugeniy Paltsev static void axi_dma_hw_init(struct axi_dma_chip *chip)
2201fe20f1bSEugeniy Paltsev {
2212d0f07f8SPandith N int ret;
2221fe20f1bSEugeniy Paltsev u32 i;
2231fe20f1bSEugeniy Paltsev
2241fe20f1bSEugeniy Paltsev for (i = 0; i < chip->dw->hdata->nr_channels; i++) {
2251fe20f1bSEugeniy Paltsev axi_chan_irq_disable(&chip->dw->chan[i], DWAXIDMAC_IRQ_ALL);
2261fe20f1bSEugeniy Paltsev axi_chan_disable(&chip->dw->chan[i]);
2271fe20f1bSEugeniy Paltsev }
2282d0f07f8SPandith N ret = dma_set_mask_and_coherent(chip->dev, DMA_BIT_MASK(64));
2292d0f07f8SPandith N if (ret)
2302d0f07f8SPandith N dev_warn(chip->dev, "Unable to set coherent mask\n");
2311fe20f1bSEugeniy Paltsev }
2321fe20f1bSEugeniy Paltsev
axi_chan_get_xfer_width(struct axi_dma_chan * chan,dma_addr_t src,dma_addr_t dst,size_t len)2331fe20f1bSEugeniy Paltsev static u32 axi_chan_get_xfer_width(struct axi_dma_chan *chan, dma_addr_t src,
2341fe20f1bSEugeniy Paltsev dma_addr_t dst, size_t len)
2351fe20f1bSEugeniy Paltsev {
2361fe20f1bSEugeniy Paltsev u32 max_width = chan->chip->dw->hdata->m_data_width;
2371fe20f1bSEugeniy Paltsev
2381fe20f1bSEugeniy Paltsev return __ffs(src | dst | len | BIT(max_width));
2391fe20f1bSEugeniy Paltsev }
2401fe20f1bSEugeniy Paltsev
axi_chan_name(struct axi_dma_chan * chan)2411fe20f1bSEugeniy Paltsev static inline const char *axi_chan_name(struct axi_dma_chan *chan)
2421fe20f1bSEugeniy Paltsev {
2431fe20f1bSEugeniy Paltsev return dma_chan_name(&chan->vc.chan);
2441fe20f1bSEugeniy Paltsev }
2451fe20f1bSEugeniy Paltsev
axi_desc_alloc(u32 num)246ef6fb2d6SSia Jee Heng static struct axi_dma_desc *axi_desc_alloc(u32 num)
247ef6fb2d6SSia Jee Heng {
248ef6fb2d6SSia Jee Heng struct axi_dma_desc *desc;
249ef6fb2d6SSia Jee Heng
250ef6fb2d6SSia Jee Heng desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
251ef6fb2d6SSia Jee Heng if (!desc)
252ef6fb2d6SSia Jee Heng return NULL;
253ef6fb2d6SSia Jee Heng
254ef6fb2d6SSia Jee Heng desc->hw_desc = kcalloc(num, sizeof(*desc->hw_desc), GFP_NOWAIT);
255ef6fb2d6SSia Jee Heng if (!desc->hw_desc) {
256ef6fb2d6SSia Jee Heng kfree(desc);
257ef6fb2d6SSia Jee Heng return NULL;
258ef6fb2d6SSia Jee Heng }
259*e151ae1eSJoao Pinto desc->nr_hw_descs = num;
260ef6fb2d6SSia Jee Heng
261ef6fb2d6SSia Jee Heng return desc;
262ef6fb2d6SSia Jee Heng }
263ef6fb2d6SSia Jee Heng
axi_desc_get(struct axi_dma_chan * chan,dma_addr_t * addr)264ef6fb2d6SSia Jee Heng static struct axi_dma_lli *axi_desc_get(struct axi_dma_chan *chan,
265ef6fb2d6SSia Jee Heng dma_addr_t *addr)
2661fe20f1bSEugeniy Paltsev {
267ef6fb2d6SSia Jee Heng struct axi_dma_lli *lli;
2681fe20f1bSEugeniy Paltsev dma_addr_t phys;
2691fe20f1bSEugeniy Paltsev
2700b9d2fb3SSia Jee Heng lli = dma_pool_zalloc(chan->desc_pool, GFP_NOWAIT, &phys);
271ef6fb2d6SSia Jee Heng if (unlikely(!lli)) {
2721fe20f1bSEugeniy Paltsev dev_err(chan2dev(chan), "%s: not enough descriptors available\n",
2731fe20f1bSEugeniy Paltsev axi_chan_name(chan));
2741fe20f1bSEugeniy Paltsev return NULL;
2751fe20f1bSEugeniy Paltsev }
2761fe20f1bSEugeniy Paltsev
2771fe20f1bSEugeniy Paltsev atomic_inc(&chan->descs_allocated);
278ef6fb2d6SSia Jee Heng *addr = phys;
2791fe20f1bSEugeniy Paltsev
280ef6fb2d6SSia Jee Heng return lli;
2811fe20f1bSEugeniy Paltsev }
2821fe20f1bSEugeniy Paltsev
axi_desc_put(struct axi_dma_desc * desc)2831fe20f1bSEugeniy Paltsev static void axi_desc_put(struct axi_dma_desc *desc)
2841fe20f1bSEugeniy Paltsev {
2851fe20f1bSEugeniy Paltsev struct axi_dma_chan *chan = desc->chan;
286*e151ae1eSJoao Pinto int count = desc->nr_hw_descs;
287ef6fb2d6SSia Jee Heng struct axi_dma_hw_desc *hw_desc;
288ef6fb2d6SSia Jee Heng int descs_put;
2891fe20f1bSEugeniy Paltsev
290ef6fb2d6SSia Jee Heng for (descs_put = 0; descs_put < count; descs_put++) {
291ef6fb2d6SSia Jee Heng hw_desc = &desc->hw_desc[descs_put];
2920b9d2fb3SSia Jee Heng dma_pool_free(chan->desc_pool, hw_desc->lli, hw_desc->llp);
2931fe20f1bSEugeniy Paltsev }
2941fe20f1bSEugeniy Paltsev
295ef6fb2d6SSia Jee Heng kfree(desc->hw_desc);
296ef6fb2d6SSia Jee Heng kfree(desc);
2971fe20f1bSEugeniy Paltsev atomic_sub(descs_put, &chan->descs_allocated);
2981fe20f1bSEugeniy Paltsev dev_vdbg(chan2dev(chan), "%s: %d descs put, %d still allocated\n",
2991fe20f1bSEugeniy Paltsev axi_chan_name(chan), descs_put,
3001fe20f1bSEugeniy Paltsev atomic_read(&chan->descs_allocated));
3011fe20f1bSEugeniy Paltsev }
3021fe20f1bSEugeniy Paltsev
vchan_desc_put(struct virt_dma_desc * vdesc)3031fe20f1bSEugeniy Paltsev static void vchan_desc_put(struct virt_dma_desc *vdesc)
3041fe20f1bSEugeniy Paltsev {
3051fe20f1bSEugeniy Paltsev axi_desc_put(vd_to_axi_desc(vdesc));
3061fe20f1bSEugeniy Paltsev }
3071fe20f1bSEugeniy Paltsev
3081fe20f1bSEugeniy Paltsev static enum dma_status
dma_chan_tx_status(struct dma_chan * dchan,dma_cookie_t cookie,struct dma_tx_state * txstate)3091fe20f1bSEugeniy Paltsev dma_chan_tx_status(struct dma_chan *dchan, dma_cookie_t cookie,
3101fe20f1bSEugeniy Paltsev struct dma_tx_state *txstate)
3111fe20f1bSEugeniy Paltsev {
3121fe20f1bSEugeniy Paltsev struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
3138e55444dSSia Jee Heng struct virt_dma_desc *vdesc;
3148e55444dSSia Jee Heng enum dma_status status;
3158e55444dSSia Jee Heng u32 completed_length;
3168e55444dSSia Jee Heng unsigned long flags;
3178e55444dSSia Jee Heng u32 completed_blocks;
3188e55444dSSia Jee Heng size_t bytes = 0;
3198e55444dSSia Jee Heng u32 length;
3208e55444dSSia Jee Heng u32 len;
3211fe20f1bSEugeniy Paltsev
3228e55444dSSia Jee Heng status = dma_cookie_status(dchan, cookie, txstate);
3238e55444dSSia Jee Heng if (status == DMA_COMPLETE || !txstate)
3248e55444dSSia Jee Heng return status;
3251fe20f1bSEugeniy Paltsev
3268e55444dSSia Jee Heng spin_lock_irqsave(&chan->vc.lock, flags);
3271fe20f1bSEugeniy Paltsev
3288e55444dSSia Jee Heng vdesc = vchan_find_desc(&chan->vc, cookie);
3298e55444dSSia Jee Heng if (vdesc) {
3308e55444dSSia Jee Heng length = vd_to_axi_desc(vdesc)->length;
3318e55444dSSia Jee Heng completed_blocks = vd_to_axi_desc(vdesc)->completed_blocks;
3328e55444dSSia Jee Heng len = vd_to_axi_desc(vdesc)->hw_desc[0].len;
3338e55444dSSia Jee Heng completed_length = completed_blocks * len;
3348e55444dSSia Jee Heng bytes = length - completed_length;
3358e55444dSSia Jee Heng }
3368e55444dSSia Jee Heng
3378e55444dSSia Jee Heng spin_unlock_irqrestore(&chan->vc.lock, flags);
3388e55444dSSia Jee Heng dma_set_residue(txstate, bytes);
3398e55444dSSia Jee Heng
3408e55444dSSia Jee Heng return status;
3411fe20f1bSEugeniy Paltsev }
3421fe20f1bSEugeniy Paltsev
write_desc_llp(struct axi_dma_hw_desc * desc,dma_addr_t adr)343ef6fb2d6SSia Jee Heng static void write_desc_llp(struct axi_dma_hw_desc *desc, dma_addr_t adr)
3441fe20f1bSEugeniy Paltsev {
345ef6fb2d6SSia Jee Heng desc->lli->llp = cpu_to_le64(adr);
3461fe20f1bSEugeniy Paltsev }
3471fe20f1bSEugeniy Paltsev
write_chan_llp(struct axi_dma_chan * chan,dma_addr_t adr)3481fe20f1bSEugeniy Paltsev static void write_chan_llp(struct axi_dma_chan *chan, dma_addr_t adr)
3491fe20f1bSEugeniy Paltsev {
3501fe20f1bSEugeniy Paltsev axi_chan_iowrite64(chan, CH_LLP, adr);
3511fe20f1bSEugeniy Paltsev }
3521fe20f1bSEugeniy Paltsev
dw_axi_dma_set_byte_halfword(struct axi_dma_chan * chan,bool set)353f74b3025SSia Jee Heng static void dw_axi_dma_set_byte_halfword(struct axi_dma_chan *chan, bool set)
354f74b3025SSia Jee Heng {
355f74b3025SSia Jee Heng u32 offset = DMAC_APB_BYTE_WR_CH_EN;
356f74b3025SSia Jee Heng u32 reg_width, val;
357f74b3025SSia Jee Heng
358f74b3025SSia Jee Heng if (!chan->chip->apb_regs) {
359f74b3025SSia Jee Heng dev_dbg(chan->chip->dev, "apb_regs not initialized\n");
360f74b3025SSia Jee Heng return;
361f74b3025SSia Jee Heng }
362f74b3025SSia Jee Heng
363f74b3025SSia Jee Heng reg_width = __ffs(chan->config.dst_addr_width);
364f74b3025SSia Jee Heng if (reg_width == DWAXIDMAC_TRANS_WIDTH_16)
365f74b3025SSia Jee Heng offset = DMAC_APB_HALFWORD_WR_CH_EN;
366f74b3025SSia Jee Heng
367f74b3025SSia Jee Heng val = ioread32(chan->chip->apb_regs + offset);
368f74b3025SSia Jee Heng
369f74b3025SSia Jee Heng if (set)
370f74b3025SSia Jee Heng val |= BIT(chan->id);
371f74b3025SSia Jee Heng else
372f74b3025SSia Jee Heng val &= ~BIT(chan->id);
373f74b3025SSia Jee Heng
374f74b3025SSia Jee Heng iowrite32(val, chan->chip->apb_regs + offset);
375f74b3025SSia Jee Heng }
3761fe20f1bSEugeniy Paltsev /* Called in chan locked context */
axi_chan_block_xfer_start(struct axi_dma_chan * chan,struct axi_dma_desc * first)3771fe20f1bSEugeniy Paltsev static void axi_chan_block_xfer_start(struct axi_dma_chan *chan,
3781fe20f1bSEugeniy Paltsev struct axi_dma_desc *first)
3791fe20f1bSEugeniy Paltsev {
3801fe20f1bSEugeniy Paltsev u32 priority = chan->chip->dw->hdata->priority[chan->id];
38188563307STim Gardner struct axi_dma_chan_config config = {};
38282435166SPandith N u32 irq_mask;
3831fe20f1bSEugeniy Paltsev u8 lms = 0; /* Select AXI0 master for LLI fetching */
3841fe20f1bSEugeniy Paltsev
3851fe20f1bSEugeniy Paltsev if (unlikely(axi_chan_is_hw_enable(chan))) {
3861fe20f1bSEugeniy Paltsev dev_err(chan2dev(chan), "%s is non-idle!\n",
3871fe20f1bSEugeniy Paltsev axi_chan_name(chan));
3881fe20f1bSEugeniy Paltsev
3891fe20f1bSEugeniy Paltsev return;
3901fe20f1bSEugeniy Paltsev }
3911fe20f1bSEugeniy Paltsev
3921fe20f1bSEugeniy Paltsev axi_dma_enable(chan->chip);
3931fe20f1bSEugeniy Paltsev
39482435166SPandith N config.dst_multblk_type = DWAXIDMAC_MBLK_TYPE_LL;
39582435166SPandith N config.src_multblk_type = DWAXIDMAC_MBLK_TYPE_LL;
39682435166SPandith N config.tt_fc = DWAXIDMAC_TT_FC_MEM_TO_MEM_DMAC;
39782435166SPandith N config.prior = priority;
39882435166SPandith N config.hs_sel_dst = DWAXIDMAC_HS_SEL_HW;
39988563307STim Gardner config.hs_sel_src = DWAXIDMAC_HS_SEL_HW;
400eec91760SSia Jee Heng switch (chan->direction) {
401eec91760SSia Jee Heng case DMA_MEM_TO_DEV:
402f74b3025SSia Jee Heng dw_axi_dma_set_byte_halfword(chan, true);
40382435166SPandith N config.tt_fc = chan->config.device_fc ?
404eec91760SSia Jee Heng DWAXIDMAC_TT_FC_MEM_TO_PER_DST :
40582435166SPandith N DWAXIDMAC_TT_FC_MEM_TO_PER_DMAC;
406f95f3b53SPandith N if (chan->chip->apb_regs)
40782435166SPandith N config.dst_per = chan->id;
40893a7d32eSPandith N else
40993a7d32eSPandith N config.dst_per = chan->hw_handshake_num;
410eec91760SSia Jee Heng break;
411eec91760SSia Jee Heng case DMA_DEV_TO_MEM:
41282435166SPandith N config.tt_fc = chan->config.device_fc ?
413eec91760SSia Jee Heng DWAXIDMAC_TT_FC_PER_TO_MEM_SRC :
41482435166SPandith N DWAXIDMAC_TT_FC_PER_TO_MEM_DMAC;
415f95f3b53SPandith N if (chan->chip->apb_regs)
41682435166SPandith N config.src_per = chan->id;
41793a7d32eSPandith N else
41893a7d32eSPandith N config.src_per = chan->hw_handshake_num;
419eec91760SSia Jee Heng break;
420eec91760SSia Jee Heng default:
421eec91760SSia Jee Heng break;
422eec91760SSia Jee Heng }
42382435166SPandith N axi_chan_config_write(chan, &config);
4241fe20f1bSEugeniy Paltsev
425ef6fb2d6SSia Jee Heng write_chan_llp(chan, first->hw_desc[0].llp | lms);
4261fe20f1bSEugeniy Paltsev
4271fe20f1bSEugeniy Paltsev irq_mask = DWAXIDMAC_IRQ_DMA_TRF | DWAXIDMAC_IRQ_ALL_ERR;
4281fe20f1bSEugeniy Paltsev axi_chan_irq_sig_set(chan, irq_mask);
4291fe20f1bSEugeniy Paltsev
4301fe20f1bSEugeniy Paltsev /* Generate 'suspend' status but don't generate interrupt */
4311fe20f1bSEugeniy Paltsev irq_mask |= DWAXIDMAC_IRQ_SUSPENDED;
4321fe20f1bSEugeniy Paltsev axi_chan_irq_set(chan, irq_mask);
4331fe20f1bSEugeniy Paltsev
4341fe20f1bSEugeniy Paltsev axi_chan_enable(chan);
4351fe20f1bSEugeniy Paltsev }
4361fe20f1bSEugeniy Paltsev
axi_chan_start_first_queued(struct axi_dma_chan * chan)4371fe20f1bSEugeniy Paltsev static void axi_chan_start_first_queued(struct axi_dma_chan *chan)
4381fe20f1bSEugeniy Paltsev {
4391fe20f1bSEugeniy Paltsev struct axi_dma_desc *desc;
4401fe20f1bSEugeniy Paltsev struct virt_dma_desc *vd;
4411fe20f1bSEugeniy Paltsev
4421fe20f1bSEugeniy Paltsev vd = vchan_next_desc(&chan->vc);
4431fe20f1bSEugeniy Paltsev if (!vd)
4441fe20f1bSEugeniy Paltsev return;
4451fe20f1bSEugeniy Paltsev
4461fe20f1bSEugeniy Paltsev desc = vd_to_axi_desc(vd);
4471fe20f1bSEugeniy Paltsev dev_vdbg(chan2dev(chan), "%s: started %u\n", axi_chan_name(chan),
4481fe20f1bSEugeniy Paltsev vd->tx.cookie);
4491fe20f1bSEugeniy Paltsev axi_chan_block_xfer_start(chan, desc);
4501fe20f1bSEugeniy Paltsev }
4511fe20f1bSEugeniy Paltsev
dma_chan_issue_pending(struct dma_chan * dchan)4521fe20f1bSEugeniy Paltsev static void dma_chan_issue_pending(struct dma_chan *dchan)
4531fe20f1bSEugeniy Paltsev {
4541fe20f1bSEugeniy Paltsev struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
4551fe20f1bSEugeniy Paltsev unsigned long flags;
4561fe20f1bSEugeniy Paltsev
4571fe20f1bSEugeniy Paltsev spin_lock_irqsave(&chan->vc.lock, flags);
4581fe20f1bSEugeniy Paltsev if (vchan_issue_pending(&chan->vc))
4591fe20f1bSEugeniy Paltsev axi_chan_start_first_queued(chan);
4601fe20f1bSEugeniy Paltsev spin_unlock_irqrestore(&chan->vc.lock, flags);
4611fe20f1bSEugeniy Paltsev }
4621fe20f1bSEugeniy Paltsev
dw_axi_dma_synchronize(struct dma_chan * dchan)46367b2e39fSSia Jee Heng static void dw_axi_dma_synchronize(struct dma_chan *dchan)
46467b2e39fSSia Jee Heng {
46567b2e39fSSia Jee Heng struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
46667b2e39fSSia Jee Heng
46767b2e39fSSia Jee Heng vchan_synchronize(&chan->vc);
46867b2e39fSSia Jee Heng }
46967b2e39fSSia Jee Heng
dma_chan_alloc_chan_resources(struct dma_chan * dchan)4701fe20f1bSEugeniy Paltsev static int dma_chan_alloc_chan_resources(struct dma_chan *dchan)
4711fe20f1bSEugeniy Paltsev {
4721fe20f1bSEugeniy Paltsev struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
4731fe20f1bSEugeniy Paltsev
4741fe20f1bSEugeniy Paltsev /* ASSERT: channel is idle */
4751fe20f1bSEugeniy Paltsev if (axi_chan_is_hw_enable(chan)) {
4761fe20f1bSEugeniy Paltsev dev_err(chan2dev(chan), "%s is non-idle!\n",
4771fe20f1bSEugeniy Paltsev axi_chan_name(chan));
4781fe20f1bSEugeniy Paltsev return -EBUSY;
4791fe20f1bSEugeniy Paltsev }
4801fe20f1bSEugeniy Paltsev
4810b9d2fb3SSia Jee Heng /* LLI address must be aligned to a 64-byte boundary */
4820b9d2fb3SSia Jee Heng chan->desc_pool = dma_pool_create(dev_name(chan2dev(chan)),
4830b9d2fb3SSia Jee Heng chan->chip->dev,
4840b9d2fb3SSia Jee Heng sizeof(struct axi_dma_lli),
4850b9d2fb3SSia Jee Heng 64, 0);
4860b9d2fb3SSia Jee Heng if (!chan->desc_pool) {
4870b9d2fb3SSia Jee Heng dev_err(chan2dev(chan), "No memory for descriptors\n");
4880b9d2fb3SSia Jee Heng return -ENOMEM;
4890b9d2fb3SSia Jee Heng }
4901fe20f1bSEugeniy Paltsev dev_vdbg(dchan2dev(dchan), "%s: allocating\n", axi_chan_name(chan));
4911fe20f1bSEugeniy Paltsev
4921fe20f1bSEugeniy Paltsev pm_runtime_get(chan->chip->dev);
4931fe20f1bSEugeniy Paltsev
4941fe20f1bSEugeniy Paltsev return 0;
4951fe20f1bSEugeniy Paltsev }
4961fe20f1bSEugeniy Paltsev
dma_chan_free_chan_resources(struct dma_chan * dchan)4971fe20f1bSEugeniy Paltsev static void dma_chan_free_chan_resources(struct dma_chan *dchan)
4981fe20f1bSEugeniy Paltsev {
4991fe20f1bSEugeniy Paltsev struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
5001fe20f1bSEugeniy Paltsev
5011fe20f1bSEugeniy Paltsev /* ASSERT: channel is idle */
5021fe20f1bSEugeniy Paltsev if (axi_chan_is_hw_enable(chan))
5031fe20f1bSEugeniy Paltsev dev_err(dchan2dev(dchan), "%s is non-idle!\n",
5041fe20f1bSEugeniy Paltsev axi_chan_name(chan));
5051fe20f1bSEugeniy Paltsev
5061fe20f1bSEugeniy Paltsev axi_chan_disable(chan);
5071fe20f1bSEugeniy Paltsev axi_chan_irq_disable(chan, DWAXIDMAC_IRQ_ALL);
5081fe20f1bSEugeniy Paltsev
5091fe20f1bSEugeniy Paltsev vchan_free_chan_resources(&chan->vc);
5101fe20f1bSEugeniy Paltsev
5110b9d2fb3SSia Jee Heng dma_pool_destroy(chan->desc_pool);
5120b9d2fb3SSia Jee Heng chan->desc_pool = NULL;
5131fe20f1bSEugeniy Paltsev dev_vdbg(dchan2dev(dchan),
5141fe20f1bSEugeniy Paltsev "%s: free resources, descriptor still allocated: %u\n",
5151fe20f1bSEugeniy Paltsev axi_chan_name(chan), atomic_read(&chan->descs_allocated));
5161fe20f1bSEugeniy Paltsev
5171fe20f1bSEugeniy Paltsev pm_runtime_put(chan->chip->dev);
5181fe20f1bSEugeniy Paltsev }
5191fe20f1bSEugeniy Paltsev
dw_axi_dma_set_hw_channel(struct axi_dma_chan * chan,bool set)52032286e27SPandith N static void dw_axi_dma_set_hw_channel(struct axi_dma_chan *chan, bool set)
521425c8a53SSia Jee Heng {
52232286e27SPandith N struct axi_dma_chip *chip = chan->chip;
52332286e27SPandith N unsigned long reg_value, val;
524425c8a53SSia Jee Heng
525425c8a53SSia Jee Heng if (!chip->apb_regs) {
52632286e27SPandith N dev_err(chip->dev, "apb_regs not initialized\n");
527425c8a53SSia Jee Heng return;
528425c8a53SSia Jee Heng }
529425c8a53SSia Jee Heng
530425c8a53SSia Jee Heng /*
531425c8a53SSia Jee Heng * An unused DMA channel has a default value of 0x3F.
532425c8a53SSia Jee Heng * Lock the DMA channel by assign a handshake number to the channel.
533425c8a53SSia Jee Heng * Unlock the DMA channel by assign 0x3F to the channel.
534425c8a53SSia Jee Heng */
53532286e27SPandith N if (set)
53632286e27SPandith N val = chan->hw_handshake_num;
53732286e27SPandith N else
538425c8a53SSia Jee Heng val = UNUSED_CHANNEL;
539425c8a53SSia Jee Heng
540425c8a53SSia Jee Heng reg_value = lo_hi_readq(chip->apb_regs + DMAC_APB_HW_HS_SEL_0);
541425c8a53SSia Jee Heng
54232286e27SPandith N /* Channel is already allocated, set handshake as per channel ID */
54332286e27SPandith N /* 64 bit write should handle for 8 channels */
54432286e27SPandith N
54532286e27SPandith N reg_value &= ~(DMA_APB_HS_SEL_MASK <<
54632286e27SPandith N (chan->id * DMA_APB_HS_SEL_BIT_SIZE));
54732286e27SPandith N reg_value |= (val << (chan->id * DMA_APB_HS_SEL_BIT_SIZE));
54832286e27SPandith N lo_hi_writeq(reg_value, chip->apb_regs + DMAC_APB_HW_HS_SEL_0);
54932286e27SPandith N
55032286e27SPandith N return;
551425c8a53SSia Jee Heng }
552425c8a53SSia Jee Heng
5531fe20f1bSEugeniy Paltsev /*
5541fe20f1bSEugeniy Paltsev * If DW_axi_dmac sees CHx_CTL.ShadowReg_Or_LLI_Last bit of the fetched LLI
5551fe20f1bSEugeniy Paltsev * as 1, it understands that the current block is the final block in the
5561fe20f1bSEugeniy Paltsev * transfer and completes the DMA transfer operation at the end of current
5571fe20f1bSEugeniy Paltsev * block transfer.
5581fe20f1bSEugeniy Paltsev */
set_desc_last(struct axi_dma_hw_desc * desc)559ef6fb2d6SSia Jee Heng static void set_desc_last(struct axi_dma_hw_desc *desc)
5601fe20f1bSEugeniy Paltsev {
5611fe20f1bSEugeniy Paltsev u32 val;
5621fe20f1bSEugeniy Paltsev
563ef6fb2d6SSia Jee Heng val = le32_to_cpu(desc->lli->ctl_hi);
5641fe20f1bSEugeniy Paltsev val |= CH_CTL_H_LLI_LAST;
565ef6fb2d6SSia Jee Heng desc->lli->ctl_hi = cpu_to_le32(val);
5661fe20f1bSEugeniy Paltsev }
5671fe20f1bSEugeniy Paltsev
write_desc_sar(struct axi_dma_hw_desc * desc,dma_addr_t adr)568ef6fb2d6SSia Jee Heng static void write_desc_sar(struct axi_dma_hw_desc *desc, dma_addr_t adr)
5691fe20f1bSEugeniy Paltsev {
570ef6fb2d6SSia Jee Heng desc->lli->sar = cpu_to_le64(adr);
5711fe20f1bSEugeniy Paltsev }
5721fe20f1bSEugeniy Paltsev
write_desc_dar(struct axi_dma_hw_desc * desc,dma_addr_t adr)573ef6fb2d6SSia Jee Heng static void write_desc_dar(struct axi_dma_hw_desc *desc, dma_addr_t adr)
5741fe20f1bSEugeniy Paltsev {
575ef6fb2d6SSia Jee Heng desc->lli->dar = cpu_to_le64(adr);
5761fe20f1bSEugeniy Paltsev }
5771fe20f1bSEugeniy Paltsev
set_desc_src_master(struct axi_dma_hw_desc * desc)578ef6fb2d6SSia Jee Heng static void set_desc_src_master(struct axi_dma_hw_desc *desc)
5791fe20f1bSEugeniy Paltsev {
5801fe20f1bSEugeniy Paltsev u32 val;
5811fe20f1bSEugeniy Paltsev
5821fe20f1bSEugeniy Paltsev /* Select AXI0 for source master */
583ef6fb2d6SSia Jee Heng val = le32_to_cpu(desc->lli->ctl_lo);
5841fe20f1bSEugeniy Paltsev val &= ~CH_CTL_L_SRC_MAST;
585ef6fb2d6SSia Jee Heng desc->lli->ctl_lo = cpu_to_le32(val);
5861fe20f1bSEugeniy Paltsev }
5871fe20f1bSEugeniy Paltsev
set_desc_dest_master(struct axi_dma_hw_desc * hw_desc,struct axi_dma_desc * desc)588ef6fb2d6SSia Jee Heng static void set_desc_dest_master(struct axi_dma_hw_desc *hw_desc,
589ef6fb2d6SSia Jee Heng struct axi_dma_desc *desc)
5901fe20f1bSEugeniy Paltsev {
5911fe20f1bSEugeniy Paltsev u32 val;
5921fe20f1bSEugeniy Paltsev
5931fe20f1bSEugeniy Paltsev /* Select AXI1 for source master if available */
594ef6fb2d6SSia Jee Heng val = le32_to_cpu(hw_desc->lli->ctl_lo);
5951fe20f1bSEugeniy Paltsev if (desc->chan->chip->dw->hdata->nr_masters > 1)
5961fe20f1bSEugeniy Paltsev val |= CH_CTL_L_DST_MAST;
5971fe20f1bSEugeniy Paltsev else
5981fe20f1bSEugeniy Paltsev val &= ~CH_CTL_L_DST_MAST;
5991fe20f1bSEugeniy Paltsev
600ef6fb2d6SSia Jee Heng hw_desc->lli->ctl_lo = cpu_to_le32(val);
6011fe20f1bSEugeniy Paltsev }
6021fe20f1bSEugeniy Paltsev
dw_axi_dma_set_hw_desc(struct axi_dma_chan * chan,struct axi_dma_hw_desc * hw_desc,dma_addr_t mem_addr,size_t len)603eec91760SSia Jee Heng static int dw_axi_dma_set_hw_desc(struct axi_dma_chan *chan,
604eec91760SSia Jee Heng struct axi_dma_hw_desc *hw_desc,
605eec91760SSia Jee Heng dma_addr_t mem_addr, size_t len)
606eec91760SSia Jee Heng {
607eec91760SSia Jee Heng unsigned int data_width = BIT(chan->chip->dw->hdata->m_data_width);
608eec91760SSia Jee Heng unsigned int reg_width;
609eec91760SSia Jee Heng unsigned int mem_width;
610eec91760SSia Jee Heng dma_addr_t device_addr;
611eec91760SSia Jee Heng size_t axi_block_ts;
612eec91760SSia Jee Heng size_t block_ts;
613eec91760SSia Jee Heng u32 ctllo, ctlhi;
614eec91760SSia Jee Heng u32 burst_len;
615eec91760SSia Jee Heng
616eec91760SSia Jee Heng axi_block_ts = chan->chip->dw->hdata->block_size[chan->id];
617eec91760SSia Jee Heng
618eec91760SSia Jee Heng mem_width = __ffs(data_width | mem_addr | len);
619eec91760SSia Jee Heng if (mem_width > DWAXIDMAC_TRANS_WIDTH_32)
620eec91760SSia Jee Heng mem_width = DWAXIDMAC_TRANS_WIDTH_32;
621eec91760SSia Jee Heng
622f80f7c96SSia Jee Heng if (!IS_ALIGNED(mem_addr, 4)) {
623f80f7c96SSia Jee Heng dev_err(chan->chip->dev, "invalid buffer alignment\n");
624f80f7c96SSia Jee Heng return -EINVAL;
625f80f7c96SSia Jee Heng }
626f80f7c96SSia Jee Heng
627eec91760SSia Jee Heng switch (chan->direction) {
628eec91760SSia Jee Heng case DMA_MEM_TO_DEV:
629eec91760SSia Jee Heng reg_width = __ffs(chan->config.dst_addr_width);
630eec91760SSia Jee Heng device_addr = chan->config.dst_addr;
631eec91760SSia Jee Heng ctllo = reg_width << CH_CTL_L_DST_WIDTH_POS |
632eec91760SSia Jee Heng mem_width << CH_CTL_L_SRC_WIDTH_POS |
633eec91760SSia Jee Heng DWAXIDMAC_CH_CTL_L_NOINC << CH_CTL_L_DST_INC_POS |
634eec91760SSia Jee Heng DWAXIDMAC_CH_CTL_L_INC << CH_CTL_L_SRC_INC_POS;
635eec91760SSia Jee Heng block_ts = len >> mem_width;
636eec91760SSia Jee Heng break;
637eec91760SSia Jee Heng case DMA_DEV_TO_MEM:
638eec91760SSia Jee Heng reg_width = __ffs(chan->config.src_addr_width);
639eec91760SSia Jee Heng device_addr = chan->config.src_addr;
640eec91760SSia Jee Heng ctllo = reg_width << CH_CTL_L_SRC_WIDTH_POS |
641eec91760SSia Jee Heng mem_width << CH_CTL_L_DST_WIDTH_POS |
642eec91760SSia Jee Heng DWAXIDMAC_CH_CTL_L_INC << CH_CTL_L_DST_INC_POS |
643eec91760SSia Jee Heng DWAXIDMAC_CH_CTL_L_NOINC << CH_CTL_L_SRC_INC_POS;
644eec91760SSia Jee Heng block_ts = len >> reg_width;
645eec91760SSia Jee Heng break;
646eec91760SSia Jee Heng default:
647eec91760SSia Jee Heng return -EINVAL;
648eec91760SSia Jee Heng }
649eec91760SSia Jee Heng
650eec91760SSia Jee Heng if (block_ts > axi_block_ts)
651eec91760SSia Jee Heng return -EINVAL;
652eec91760SSia Jee Heng
653eec91760SSia Jee Heng hw_desc->lli = axi_desc_get(chan, &hw_desc->llp);
654eec91760SSia Jee Heng if (unlikely(!hw_desc->lli))
655eec91760SSia Jee Heng return -ENOMEM;
656eec91760SSia Jee Heng
657eec91760SSia Jee Heng ctlhi = CH_CTL_H_LLI_VALID;
658eec91760SSia Jee Heng
659eec91760SSia Jee Heng if (chan->chip->dw->hdata->restrict_axi_burst_len) {
660eec91760SSia Jee Heng burst_len = chan->chip->dw->hdata->axi_rw_burst_len;
661eec91760SSia Jee Heng ctlhi |= CH_CTL_H_ARLEN_EN | CH_CTL_H_AWLEN_EN |
662eec91760SSia Jee Heng burst_len << CH_CTL_H_ARLEN_POS |
663eec91760SSia Jee Heng burst_len << CH_CTL_H_AWLEN_POS;
664eec91760SSia Jee Heng }
665eec91760SSia Jee Heng
666eec91760SSia Jee Heng hw_desc->lli->ctl_hi = cpu_to_le32(ctlhi);
667eec91760SSia Jee Heng
668eec91760SSia Jee Heng if (chan->direction == DMA_MEM_TO_DEV) {
669eec91760SSia Jee Heng write_desc_sar(hw_desc, mem_addr);
670eec91760SSia Jee Heng write_desc_dar(hw_desc, device_addr);
671eec91760SSia Jee Heng } else {
672eec91760SSia Jee Heng write_desc_sar(hw_desc, device_addr);
673eec91760SSia Jee Heng write_desc_dar(hw_desc, mem_addr);
674eec91760SSia Jee Heng }
675eec91760SSia Jee Heng
676eec91760SSia Jee Heng hw_desc->lli->block_ts_lo = cpu_to_le32(block_ts - 1);
677eec91760SSia Jee Heng
678eec91760SSia Jee Heng ctllo |= DWAXIDMAC_BURST_TRANS_LEN_4 << CH_CTL_L_DST_MSIZE_POS |
679eec91760SSia Jee Heng DWAXIDMAC_BURST_TRANS_LEN_4 << CH_CTL_L_SRC_MSIZE_POS;
680eec91760SSia Jee Heng hw_desc->lli->ctl_lo = cpu_to_le32(ctllo);
681eec91760SSia Jee Heng
682eec91760SSia Jee Heng set_desc_src_master(hw_desc);
683eec91760SSia Jee Heng
6848e55444dSSia Jee Heng hw_desc->len = len;
685eec91760SSia Jee Heng return 0;
686eec91760SSia Jee Heng }
687eec91760SSia Jee Heng
calculate_block_len(struct axi_dma_chan * chan,dma_addr_t dma_addr,size_t buf_len,enum dma_transfer_direction direction)688f80f7c96SSia Jee Heng static size_t calculate_block_len(struct axi_dma_chan *chan,
689f80f7c96SSia Jee Heng dma_addr_t dma_addr, size_t buf_len,
690f80f7c96SSia Jee Heng enum dma_transfer_direction direction)
691f80f7c96SSia Jee Heng {
692f80f7c96SSia Jee Heng u32 data_width, reg_width, mem_width;
693f80f7c96SSia Jee Heng size_t axi_block_ts, block_len;
694f80f7c96SSia Jee Heng
695f80f7c96SSia Jee Heng axi_block_ts = chan->chip->dw->hdata->block_size[chan->id];
696f80f7c96SSia Jee Heng
697f80f7c96SSia Jee Heng switch (direction) {
698f80f7c96SSia Jee Heng case DMA_MEM_TO_DEV:
699f80f7c96SSia Jee Heng data_width = BIT(chan->chip->dw->hdata->m_data_width);
700f80f7c96SSia Jee Heng mem_width = __ffs(data_width | dma_addr | buf_len);
701f80f7c96SSia Jee Heng if (mem_width > DWAXIDMAC_TRANS_WIDTH_32)
702f80f7c96SSia Jee Heng mem_width = DWAXIDMAC_TRANS_WIDTH_32;
703f80f7c96SSia Jee Heng
704f80f7c96SSia Jee Heng block_len = axi_block_ts << mem_width;
705f80f7c96SSia Jee Heng break;
706f80f7c96SSia Jee Heng case DMA_DEV_TO_MEM:
707f80f7c96SSia Jee Heng reg_width = __ffs(chan->config.src_addr_width);
708f80f7c96SSia Jee Heng block_len = axi_block_ts << reg_width;
709f80f7c96SSia Jee Heng break;
710f80f7c96SSia Jee Heng default:
711f80f7c96SSia Jee Heng block_len = 0;
712f80f7c96SSia Jee Heng }
713f80f7c96SSia Jee Heng
714f80f7c96SSia Jee Heng return block_len;
715f80f7c96SSia Jee Heng }
716f80f7c96SSia Jee Heng
717eec91760SSia Jee Heng static struct dma_async_tx_descriptor *
dw_axi_dma_chan_prep_cyclic(struct dma_chan * dchan,dma_addr_t dma_addr,size_t buf_len,size_t period_len,enum dma_transfer_direction direction,unsigned long flags)7181deb96c0SSia Jee Heng dw_axi_dma_chan_prep_cyclic(struct dma_chan *dchan, dma_addr_t dma_addr,
7191deb96c0SSia Jee Heng size_t buf_len, size_t period_len,
7201deb96c0SSia Jee Heng enum dma_transfer_direction direction,
7211deb96c0SSia Jee Heng unsigned long flags)
7221deb96c0SSia Jee Heng {
7231deb96c0SSia Jee Heng struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
7241deb96c0SSia Jee Heng struct axi_dma_hw_desc *hw_desc = NULL;
7251deb96c0SSia Jee Heng struct axi_dma_desc *desc = NULL;
7261deb96c0SSia Jee Heng dma_addr_t src_addr = dma_addr;
727f80f7c96SSia Jee Heng u32 num_periods, num_segments;
728f80f7c96SSia Jee Heng size_t axi_block_len;
729f80f7c96SSia Jee Heng u32 total_segments;
730f80f7c96SSia Jee Heng u32 segment_len;
7311deb96c0SSia Jee Heng unsigned int i;
7321deb96c0SSia Jee Heng int status;
7331deb96c0SSia Jee Heng u64 llp = 0;
7341deb96c0SSia Jee Heng u8 lms = 0; /* Select AXI0 master for LLI fetching */
7351deb96c0SSia Jee Heng
736f80f7c96SSia Jee Heng num_periods = buf_len / period_len;
737f80f7c96SSia Jee Heng
738f80f7c96SSia Jee Heng axi_block_len = calculate_block_len(chan, dma_addr, buf_len, direction);
739f80f7c96SSia Jee Heng if (axi_block_len == 0)
740f80f7c96SSia Jee Heng return NULL;
741f80f7c96SSia Jee Heng
742f80f7c96SSia Jee Heng num_segments = DIV_ROUND_UP(period_len, axi_block_len);
743f80f7c96SSia Jee Heng segment_len = DIV_ROUND_UP(period_len, num_segments);
744f80f7c96SSia Jee Heng
745f80f7c96SSia Jee Heng total_segments = num_periods * num_segments;
746f80f7c96SSia Jee Heng
747f80f7c96SSia Jee Heng desc = axi_desc_alloc(total_segments);
7481deb96c0SSia Jee Heng if (unlikely(!desc))
7491deb96c0SSia Jee Heng goto err_desc_get;
7501deb96c0SSia Jee Heng
7511deb96c0SSia Jee Heng chan->direction = direction;
7521deb96c0SSia Jee Heng desc->chan = chan;
7531deb96c0SSia Jee Heng chan->cyclic = true;
7548e55444dSSia Jee Heng desc->length = 0;
755f80f7c96SSia Jee Heng desc->period_len = period_len;
7561deb96c0SSia Jee Heng
757f80f7c96SSia Jee Heng for (i = 0; i < total_segments; i++) {
7581deb96c0SSia Jee Heng hw_desc = &desc->hw_desc[i];
7591deb96c0SSia Jee Heng
7601deb96c0SSia Jee Heng status = dw_axi_dma_set_hw_desc(chan, hw_desc, src_addr,
761f80f7c96SSia Jee Heng segment_len);
7621deb96c0SSia Jee Heng if (status < 0)
7631deb96c0SSia Jee Heng goto err_desc_get;
7641deb96c0SSia Jee Heng
7658e55444dSSia Jee Heng desc->length += hw_desc->len;
7661deb96c0SSia Jee Heng /* Set end-of-link to the linked descriptor, so that cyclic
7671deb96c0SSia Jee Heng * callback function can be triggered during interrupt.
7681deb96c0SSia Jee Heng */
7691deb96c0SSia Jee Heng set_desc_last(hw_desc);
7701deb96c0SSia Jee Heng
771f80f7c96SSia Jee Heng src_addr += segment_len;
7721deb96c0SSia Jee Heng }
7731deb96c0SSia Jee Heng
7741deb96c0SSia Jee Heng llp = desc->hw_desc[0].llp;
7751deb96c0SSia Jee Heng
7761deb96c0SSia Jee Heng /* Managed transfer list */
7771deb96c0SSia Jee Heng do {
778f80f7c96SSia Jee Heng hw_desc = &desc->hw_desc[--total_segments];
7791deb96c0SSia Jee Heng write_desc_llp(hw_desc, llp | lms);
7801deb96c0SSia Jee Heng llp = hw_desc->llp;
781f80f7c96SSia Jee Heng } while (total_segments);
7821deb96c0SSia Jee Heng
78332286e27SPandith N dw_axi_dma_set_hw_channel(chan, true);
784425c8a53SSia Jee Heng
7851deb96c0SSia Jee Heng return vchan_tx_prep(&chan->vc, &desc->vd, flags);
7861deb96c0SSia Jee Heng
7871deb96c0SSia Jee Heng err_desc_get:
7881deb96c0SSia Jee Heng if (desc)
7891deb96c0SSia Jee Heng axi_desc_put(desc);
7901deb96c0SSia Jee Heng
7911deb96c0SSia Jee Heng return NULL;
7921deb96c0SSia Jee Heng }
7931deb96c0SSia Jee Heng
7941deb96c0SSia Jee Heng static struct dma_async_tx_descriptor *
dw_axi_dma_chan_prep_slave_sg(struct dma_chan * dchan,struct scatterlist * sgl,unsigned int sg_len,enum dma_transfer_direction direction,unsigned long flags,void * context)795eec91760SSia Jee Heng dw_axi_dma_chan_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
796eec91760SSia Jee Heng unsigned int sg_len,
797eec91760SSia Jee Heng enum dma_transfer_direction direction,
798eec91760SSia Jee Heng unsigned long flags, void *context)
799eec91760SSia Jee Heng {
800eec91760SSia Jee Heng struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
801eec91760SSia Jee Heng struct axi_dma_hw_desc *hw_desc = NULL;
802eec91760SSia Jee Heng struct axi_dma_desc *desc = NULL;
803f80f7c96SSia Jee Heng u32 num_segments, segment_len;
804f80f7c96SSia Jee Heng unsigned int loop = 0;
805eec91760SSia Jee Heng struct scatterlist *sg;
806f80f7c96SSia Jee Heng size_t axi_block_len;
807f80f7c96SSia Jee Heng u32 len, num_sgs = 0;
808eec91760SSia Jee Heng unsigned int i;
809f80f7c96SSia Jee Heng dma_addr_t mem;
810eec91760SSia Jee Heng int status;
811eec91760SSia Jee Heng u64 llp = 0;
812eec91760SSia Jee Heng u8 lms = 0; /* Select AXI0 master for LLI fetching */
813eec91760SSia Jee Heng
814eec91760SSia Jee Heng if (unlikely(!is_slave_direction(direction) || !sg_len))
815eec91760SSia Jee Heng return NULL;
816eec91760SSia Jee Heng
817f80f7c96SSia Jee Heng mem = sg_dma_address(sgl);
818f80f7c96SSia Jee Heng len = sg_dma_len(sgl);
819eec91760SSia Jee Heng
820f80f7c96SSia Jee Heng axi_block_len = calculate_block_len(chan, mem, len, direction);
821f80f7c96SSia Jee Heng if (axi_block_len == 0)
822f80f7c96SSia Jee Heng return NULL;
823f80f7c96SSia Jee Heng
824f80f7c96SSia Jee Heng for_each_sg(sgl, sg, sg_len, i)
825f80f7c96SSia Jee Heng num_sgs += DIV_ROUND_UP(sg_dma_len(sg), axi_block_len);
826f80f7c96SSia Jee Heng
827f80f7c96SSia Jee Heng desc = axi_desc_alloc(num_sgs);
828eec91760SSia Jee Heng if (unlikely(!desc))
829eec91760SSia Jee Heng goto err_desc_get;
830eec91760SSia Jee Heng
831eec91760SSia Jee Heng desc->chan = chan;
8328e55444dSSia Jee Heng desc->length = 0;
833f80f7c96SSia Jee Heng chan->direction = direction;
834eec91760SSia Jee Heng
835eec91760SSia Jee Heng for_each_sg(sgl, sg, sg_len, i) {
836eec91760SSia Jee Heng mem = sg_dma_address(sg);
837eec91760SSia Jee Heng len = sg_dma_len(sg);
838f80f7c96SSia Jee Heng num_segments = DIV_ROUND_UP(sg_dma_len(sg), axi_block_len);
839f80f7c96SSia Jee Heng segment_len = DIV_ROUND_UP(sg_dma_len(sg), num_segments);
840eec91760SSia Jee Heng
841f80f7c96SSia Jee Heng do {
842f80f7c96SSia Jee Heng hw_desc = &desc->hw_desc[loop++];
843f80f7c96SSia Jee Heng status = dw_axi_dma_set_hw_desc(chan, hw_desc, mem, segment_len);
844eec91760SSia Jee Heng if (status < 0)
845eec91760SSia Jee Heng goto err_desc_get;
846f80f7c96SSia Jee Heng
8478e55444dSSia Jee Heng desc->length += hw_desc->len;
848f80f7c96SSia Jee Heng len -= segment_len;
849f80f7c96SSia Jee Heng mem += segment_len;
850f80f7c96SSia Jee Heng } while (len >= segment_len);
851eec91760SSia Jee Heng }
852eec91760SSia Jee Heng
853eec91760SSia Jee Heng /* Set end-of-link to the last link descriptor of list */
854f80f7c96SSia Jee Heng set_desc_last(&desc->hw_desc[num_sgs - 1]);
855eec91760SSia Jee Heng
856eec91760SSia Jee Heng /* Managed transfer list */
857eec91760SSia Jee Heng do {
858f80f7c96SSia Jee Heng hw_desc = &desc->hw_desc[--num_sgs];
859eec91760SSia Jee Heng write_desc_llp(hw_desc, llp | lms);
860eec91760SSia Jee Heng llp = hw_desc->llp;
861f80f7c96SSia Jee Heng } while (num_sgs);
862eec91760SSia Jee Heng
86332286e27SPandith N dw_axi_dma_set_hw_channel(chan, true);
864425c8a53SSia Jee Heng
865eec91760SSia Jee Heng return vchan_tx_prep(&chan->vc, &desc->vd, flags);
866eec91760SSia Jee Heng
867eec91760SSia Jee Heng err_desc_get:
868eec91760SSia Jee Heng if (desc)
869eec91760SSia Jee Heng axi_desc_put(desc);
870eec91760SSia Jee Heng
871eec91760SSia Jee Heng return NULL;
872eec91760SSia Jee Heng }
873eec91760SSia Jee Heng
8741fe20f1bSEugeniy Paltsev static struct dma_async_tx_descriptor *
dma_chan_prep_dma_memcpy(struct dma_chan * dchan,dma_addr_t dst_adr,dma_addr_t src_adr,size_t len,unsigned long flags)8751fe20f1bSEugeniy Paltsev dma_chan_prep_dma_memcpy(struct dma_chan *dchan, dma_addr_t dst_adr,
8761fe20f1bSEugeniy Paltsev dma_addr_t src_adr, size_t len, unsigned long flags)
8771fe20f1bSEugeniy Paltsev {
8781fe20f1bSEugeniy Paltsev struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
8791fe20f1bSEugeniy Paltsev size_t block_ts, max_block_ts, xfer_len;
880ef6fb2d6SSia Jee Heng struct axi_dma_hw_desc *hw_desc = NULL;
881ef6fb2d6SSia Jee Heng struct axi_dma_desc *desc = NULL;
882ef6fb2d6SSia Jee Heng u32 xfer_width, reg, num;
883ef6fb2d6SSia Jee Heng u64 llp = 0;
8841fe20f1bSEugeniy Paltsev u8 lms = 0; /* Select AXI0 master for LLI fetching */
8851fe20f1bSEugeniy Paltsev
8861fe20f1bSEugeniy Paltsev dev_dbg(chan2dev(chan), "%s: memcpy: src: %pad dst: %pad length: %zd flags: %#lx",
8871fe20f1bSEugeniy Paltsev axi_chan_name(chan), &src_adr, &dst_adr, len, flags);
8881fe20f1bSEugeniy Paltsev
8891fe20f1bSEugeniy Paltsev max_block_ts = chan->chip->dw->hdata->block_size[chan->id];
890ef6fb2d6SSia Jee Heng xfer_width = axi_chan_get_xfer_width(chan, src_adr, dst_adr, len);
891ef6fb2d6SSia Jee Heng num = DIV_ROUND_UP(len, max_block_ts << xfer_width);
892ef6fb2d6SSia Jee Heng desc = axi_desc_alloc(num);
893ef6fb2d6SSia Jee Heng if (unlikely(!desc))
894ef6fb2d6SSia Jee Heng goto err_desc_get;
8951fe20f1bSEugeniy Paltsev
896ef6fb2d6SSia Jee Heng desc->chan = chan;
897ef6fb2d6SSia Jee Heng num = 0;
8988e55444dSSia Jee Heng desc->length = 0;
8991fe20f1bSEugeniy Paltsev while (len) {
9001fe20f1bSEugeniy Paltsev xfer_len = len;
9011fe20f1bSEugeniy Paltsev
902ef6fb2d6SSia Jee Heng hw_desc = &desc->hw_desc[num];
9031fe20f1bSEugeniy Paltsev /*
9041fe20f1bSEugeniy Paltsev * Take care for the alignment.
9051fe20f1bSEugeniy Paltsev * Actually source and destination widths can be different, but
9061fe20f1bSEugeniy Paltsev * make them same to be simpler.
9071fe20f1bSEugeniy Paltsev */
9081fe20f1bSEugeniy Paltsev xfer_width = axi_chan_get_xfer_width(chan, src_adr, dst_adr, xfer_len);
9091fe20f1bSEugeniy Paltsev
9101fe20f1bSEugeniy Paltsev /*
9111fe20f1bSEugeniy Paltsev * block_ts indicates the total number of data of width
9121fe20f1bSEugeniy Paltsev * to be transferred in a DMA block transfer.
9131fe20f1bSEugeniy Paltsev * BLOCK_TS register should be set to block_ts - 1
9141fe20f1bSEugeniy Paltsev */
9151fe20f1bSEugeniy Paltsev block_ts = xfer_len >> xfer_width;
9161fe20f1bSEugeniy Paltsev if (block_ts > max_block_ts) {
9171fe20f1bSEugeniy Paltsev block_ts = max_block_ts;
9181fe20f1bSEugeniy Paltsev xfer_len = max_block_ts << xfer_width;
9191fe20f1bSEugeniy Paltsev }
9201fe20f1bSEugeniy Paltsev
921ef6fb2d6SSia Jee Heng hw_desc->lli = axi_desc_get(chan, &hw_desc->llp);
922ef6fb2d6SSia Jee Heng if (unlikely(!hw_desc->lli))
9231fe20f1bSEugeniy Paltsev goto err_desc_get;
9241fe20f1bSEugeniy Paltsev
925ef6fb2d6SSia Jee Heng write_desc_sar(hw_desc, src_adr);
926ef6fb2d6SSia Jee Heng write_desc_dar(hw_desc, dst_adr);
927ef6fb2d6SSia Jee Heng hw_desc->lli->block_ts_lo = cpu_to_le32(block_ts - 1);
9281fe20f1bSEugeniy Paltsev
9291fe20f1bSEugeniy Paltsev reg = CH_CTL_H_LLI_VALID;
9301fe20f1bSEugeniy Paltsev if (chan->chip->dw->hdata->restrict_axi_burst_len) {
9311fe20f1bSEugeniy Paltsev u32 burst_len = chan->chip->dw->hdata->axi_rw_burst_len;
9321fe20f1bSEugeniy Paltsev
9331fe20f1bSEugeniy Paltsev reg |= (CH_CTL_H_ARLEN_EN |
9341fe20f1bSEugeniy Paltsev burst_len << CH_CTL_H_ARLEN_POS |
9351fe20f1bSEugeniy Paltsev CH_CTL_H_AWLEN_EN |
9361fe20f1bSEugeniy Paltsev burst_len << CH_CTL_H_AWLEN_POS);
9371fe20f1bSEugeniy Paltsev }
938ef6fb2d6SSia Jee Heng hw_desc->lli->ctl_hi = cpu_to_le32(reg);
9391fe20f1bSEugeniy Paltsev
9401fe20f1bSEugeniy Paltsev reg = (DWAXIDMAC_BURST_TRANS_LEN_4 << CH_CTL_L_DST_MSIZE_POS |
9411fe20f1bSEugeniy Paltsev DWAXIDMAC_BURST_TRANS_LEN_4 << CH_CTL_L_SRC_MSIZE_POS |
9421fe20f1bSEugeniy Paltsev xfer_width << CH_CTL_L_DST_WIDTH_POS |
9431fe20f1bSEugeniy Paltsev xfer_width << CH_CTL_L_SRC_WIDTH_POS |
9441fe20f1bSEugeniy Paltsev DWAXIDMAC_CH_CTL_L_INC << CH_CTL_L_DST_INC_POS |
9451fe20f1bSEugeniy Paltsev DWAXIDMAC_CH_CTL_L_INC << CH_CTL_L_SRC_INC_POS);
946ef6fb2d6SSia Jee Heng hw_desc->lli->ctl_lo = cpu_to_le32(reg);
9471fe20f1bSEugeniy Paltsev
948ef6fb2d6SSia Jee Heng set_desc_src_master(hw_desc);
949ef6fb2d6SSia Jee Heng set_desc_dest_master(hw_desc, desc);
9501fe20f1bSEugeniy Paltsev
9518e55444dSSia Jee Heng hw_desc->len = xfer_len;
9528e55444dSSia Jee Heng desc->length += hw_desc->len;
9531fe20f1bSEugeniy Paltsev /* update the length and addresses for the next loop cycle */
9541fe20f1bSEugeniy Paltsev len -= xfer_len;
9551fe20f1bSEugeniy Paltsev dst_adr += xfer_len;
9561fe20f1bSEugeniy Paltsev src_adr += xfer_len;
957ef6fb2d6SSia Jee Heng num++;
9581fe20f1bSEugeniy Paltsev }
9591fe20f1bSEugeniy Paltsev
9601fe20f1bSEugeniy Paltsev /* Set end-of-link to the last link descriptor of list */
961ef6fb2d6SSia Jee Heng set_desc_last(&desc->hw_desc[num - 1]);
962ef6fb2d6SSia Jee Heng /* Managed transfer list */
963ef6fb2d6SSia Jee Heng do {
964ef6fb2d6SSia Jee Heng hw_desc = &desc->hw_desc[--num];
965ef6fb2d6SSia Jee Heng write_desc_llp(hw_desc, llp | lms);
966ef6fb2d6SSia Jee Heng llp = hw_desc->llp;
967ef6fb2d6SSia Jee Heng } while (num);
9681fe20f1bSEugeniy Paltsev
969ef6fb2d6SSia Jee Heng return vchan_tx_prep(&chan->vc, &desc->vd, flags);
9701fe20f1bSEugeniy Paltsev
9711fe20f1bSEugeniy Paltsev err_desc_get:
972ef6fb2d6SSia Jee Heng if (desc)
973ef6fb2d6SSia Jee Heng axi_desc_put(desc);
9741fe20f1bSEugeniy Paltsev return NULL;
9751fe20f1bSEugeniy Paltsev }
9761fe20f1bSEugeniy Paltsev
dw_axi_dma_chan_slave_config(struct dma_chan * dchan,struct dma_slave_config * config)97766c6c945SSia Jee Heng static int dw_axi_dma_chan_slave_config(struct dma_chan *dchan,
97866c6c945SSia Jee Heng struct dma_slave_config *config)
97966c6c945SSia Jee Heng {
98066c6c945SSia Jee Heng struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
98166c6c945SSia Jee Heng
98266c6c945SSia Jee Heng memcpy(&chan->config, config, sizeof(*config));
98366c6c945SSia Jee Heng
98466c6c945SSia Jee Heng return 0;
98566c6c945SSia Jee Heng }
98666c6c945SSia Jee Heng
axi_chan_dump_lli(struct axi_dma_chan * chan,struct axi_dma_hw_desc * desc)9871fe20f1bSEugeniy Paltsev static void axi_chan_dump_lli(struct axi_dma_chan *chan,
988ef6fb2d6SSia Jee Heng struct axi_dma_hw_desc *desc)
9891fe20f1bSEugeniy Paltsev {
99086cb0defSBen Dooks if (!desc->lli) {
99186cb0defSBen Dooks dev_err(dchan2dev(&chan->vc.chan), "NULL LLI\n");
99286cb0defSBen Dooks return;
99386cb0defSBen Dooks }
99486cb0defSBen Dooks
9951fe20f1bSEugeniy Paltsev dev_err(dchan2dev(&chan->vc.chan),
9961fe20f1bSEugeniy Paltsev "SAR: 0x%llx DAR: 0x%llx LLP: 0x%llx BTS 0x%x CTL: 0x%x:%08x",
997ef6fb2d6SSia Jee Heng le64_to_cpu(desc->lli->sar),
998ef6fb2d6SSia Jee Heng le64_to_cpu(desc->lli->dar),
999ef6fb2d6SSia Jee Heng le64_to_cpu(desc->lli->llp),
1000ef6fb2d6SSia Jee Heng le32_to_cpu(desc->lli->block_ts_lo),
1001ef6fb2d6SSia Jee Heng le32_to_cpu(desc->lli->ctl_hi),
1002ef6fb2d6SSia Jee Heng le32_to_cpu(desc->lli->ctl_lo));
10031fe20f1bSEugeniy Paltsev }
10041fe20f1bSEugeniy Paltsev
axi_chan_list_dump_lli(struct axi_dma_chan * chan,struct axi_dma_desc * desc_head)10051fe20f1bSEugeniy Paltsev static void axi_chan_list_dump_lli(struct axi_dma_chan *chan,
10061fe20f1bSEugeniy Paltsev struct axi_dma_desc *desc_head)
10071fe20f1bSEugeniy Paltsev {
1008ef6fb2d6SSia Jee Heng int count = atomic_read(&chan->descs_allocated);
1009ef6fb2d6SSia Jee Heng int i;
10101fe20f1bSEugeniy Paltsev
1011ef6fb2d6SSia Jee Heng for (i = 0; i < count; i++)
1012ef6fb2d6SSia Jee Heng axi_chan_dump_lli(chan, &desc_head->hw_desc[i]);
10131fe20f1bSEugeniy Paltsev }
10141fe20f1bSEugeniy Paltsev
axi_chan_handle_err(struct axi_dma_chan * chan,u32 status)10151fe20f1bSEugeniy Paltsev static noinline void axi_chan_handle_err(struct axi_dma_chan *chan, u32 status)
10161fe20f1bSEugeniy Paltsev {
10171fe20f1bSEugeniy Paltsev struct virt_dma_desc *vd;
10181fe20f1bSEugeniy Paltsev unsigned long flags;
10191fe20f1bSEugeniy Paltsev
10201fe20f1bSEugeniy Paltsev spin_lock_irqsave(&chan->vc.lock, flags);
10211fe20f1bSEugeniy Paltsev
10221fe20f1bSEugeniy Paltsev axi_chan_disable(chan);
10231fe20f1bSEugeniy Paltsev
10241fe20f1bSEugeniy Paltsev /* The bad descriptor currently is in the head of vc list */
10251fe20f1bSEugeniy Paltsev vd = vchan_next_desc(&chan->vc);
102657054fe5SShawn.Shao if (!vd) {
102757054fe5SShawn.Shao dev_err(chan2dev(chan), "BUG: %s, IRQ with no descriptors\n",
102857054fe5SShawn.Shao axi_chan_name(chan));
102957054fe5SShawn.Shao goto out;
103057054fe5SShawn.Shao }
10311fe20f1bSEugeniy Paltsev /* Remove the completed descriptor from issued list */
10321fe20f1bSEugeniy Paltsev list_del(&vd->node);
10331fe20f1bSEugeniy Paltsev
10341fe20f1bSEugeniy Paltsev /* WARN about bad descriptor */
10351fe20f1bSEugeniy Paltsev dev_err(chan2dev(chan),
10361fe20f1bSEugeniy Paltsev "Bad descriptor submitted for %s, cookie: %d, irq: 0x%08x\n",
10371fe20f1bSEugeniy Paltsev axi_chan_name(chan), vd->tx.cookie, status);
10381fe20f1bSEugeniy Paltsev axi_chan_list_dump_lli(chan, vd_to_axi_desc(vd));
10391fe20f1bSEugeniy Paltsev
10401fe20f1bSEugeniy Paltsev vchan_cookie_complete(vd);
10411fe20f1bSEugeniy Paltsev
10421fe20f1bSEugeniy Paltsev /* Try to restart the controller */
10431fe20f1bSEugeniy Paltsev axi_chan_start_first_queued(chan);
10441fe20f1bSEugeniy Paltsev
104557054fe5SShawn.Shao out:
10461fe20f1bSEugeniy Paltsev spin_unlock_irqrestore(&chan->vc.lock, flags);
10471fe20f1bSEugeniy Paltsev }
10481fe20f1bSEugeniy Paltsev
axi_chan_block_xfer_complete(struct axi_dma_chan * chan)10491fe20f1bSEugeniy Paltsev static void axi_chan_block_xfer_complete(struct axi_dma_chan *chan)
10501fe20f1bSEugeniy Paltsev {
10511deb96c0SSia Jee Heng int count = atomic_read(&chan->descs_allocated);
10521deb96c0SSia Jee Heng struct axi_dma_hw_desc *hw_desc;
10531deb96c0SSia Jee Heng struct axi_dma_desc *desc;
10541fe20f1bSEugeniy Paltsev struct virt_dma_desc *vd;
10551fe20f1bSEugeniy Paltsev unsigned long flags;
10561deb96c0SSia Jee Heng u64 llp;
10571deb96c0SSia Jee Heng int i;
10581fe20f1bSEugeniy Paltsev
10591fe20f1bSEugeniy Paltsev spin_lock_irqsave(&chan->vc.lock, flags);
10601fe20f1bSEugeniy Paltsev if (unlikely(axi_chan_is_hw_enable(chan))) {
10616a28ba26SColin Ian King dev_err(chan2dev(chan), "BUG: %s caught DWAXIDMAC_IRQ_DMA_TRF, but channel not idle!\n",
10621fe20f1bSEugeniy Paltsev axi_chan_name(chan));
10631fe20f1bSEugeniy Paltsev axi_chan_disable(chan);
10641fe20f1bSEugeniy Paltsev }
10651fe20f1bSEugeniy Paltsev
10661fe20f1bSEugeniy Paltsev /* The completed descriptor currently is in the head of vc list */
10671fe20f1bSEugeniy Paltsev vd = vchan_next_desc(&chan->vc);
1068820f5ce9SBen Dooks if (!vd) {
1069820f5ce9SBen Dooks dev_err(chan2dev(chan), "BUG: %s, IRQ with no descriptors\n",
1070820f5ce9SBen Dooks axi_chan_name(chan));
1071820f5ce9SBen Dooks goto out;
1072820f5ce9SBen Dooks }
10731deb96c0SSia Jee Heng
10741deb96c0SSia Jee Heng if (chan->cyclic) {
10751deb96c0SSia Jee Heng desc = vd_to_axi_desc(vd);
10761deb96c0SSia Jee Heng if (desc) {
10771deb96c0SSia Jee Heng llp = lo_hi_readq(chan->chan_regs + CH_LLP);
10781deb96c0SSia Jee Heng for (i = 0; i < count; i++) {
10791deb96c0SSia Jee Heng hw_desc = &desc->hw_desc[i];
10801deb96c0SSia Jee Heng if (hw_desc->llp == llp) {
10811deb96c0SSia Jee Heng axi_chan_irq_clear(chan, hw_desc->lli->status_lo);
10821deb96c0SSia Jee Heng hw_desc->lli->ctl_hi |= CH_CTL_H_LLI_VALID;
10831deb96c0SSia Jee Heng desc->completed_blocks = i;
1084f80f7c96SSia Jee Heng
1085f80f7c96SSia Jee Heng if (((hw_desc->len * (i + 1)) % desc->period_len) == 0)
1086f80f7c96SSia Jee Heng vchan_cyclic_callback(vd);
10871deb96c0SSia Jee Heng break;
10881deb96c0SSia Jee Heng }
10891deb96c0SSia Jee Heng }
10901deb96c0SSia Jee Heng
10911deb96c0SSia Jee Heng axi_chan_enable(chan);
10921deb96c0SSia Jee Heng }
10931deb96c0SSia Jee Heng } else {
10941fe20f1bSEugeniy Paltsev /* Remove the completed descriptor from issued list before completing */
10951fe20f1bSEugeniy Paltsev list_del(&vd->node);
10961fe20f1bSEugeniy Paltsev vchan_cookie_complete(vd);
10971deb96c0SSia Jee Heng }
10981fe20f1bSEugeniy Paltsev
1099820f5ce9SBen Dooks out:
11001fe20f1bSEugeniy Paltsev spin_unlock_irqrestore(&chan->vc.lock, flags);
11011fe20f1bSEugeniy Paltsev }
11021fe20f1bSEugeniy Paltsev
dw_axi_dma_interrupt(int irq,void * dev_id)11031fe20f1bSEugeniy Paltsev static irqreturn_t dw_axi_dma_interrupt(int irq, void *dev_id)
11041fe20f1bSEugeniy Paltsev {
11051fe20f1bSEugeniy Paltsev struct axi_dma_chip *chip = dev_id;
11061fe20f1bSEugeniy Paltsev struct dw_axi_dma *dw = chip->dw;
11071fe20f1bSEugeniy Paltsev struct axi_dma_chan *chan;
11081fe20f1bSEugeniy Paltsev
11091fe20f1bSEugeniy Paltsev u32 status, i;
11101fe20f1bSEugeniy Paltsev
1111e7c7a016STom Rix /* Disable DMAC interrupts. We'll enable them after processing channels */
11121fe20f1bSEugeniy Paltsev axi_dma_irq_disable(chip);
11131fe20f1bSEugeniy Paltsev
1114e7c7a016STom Rix /* Poll, clear and process every channel interrupt status */
11151fe20f1bSEugeniy Paltsev for (i = 0; i < dw->hdata->nr_channels; i++) {
11161fe20f1bSEugeniy Paltsev chan = &dw->chan[i];
11171fe20f1bSEugeniy Paltsev status = axi_chan_irq_read(chan);
11181fe20f1bSEugeniy Paltsev axi_chan_irq_clear(chan, status);
11191fe20f1bSEugeniy Paltsev
11201fe20f1bSEugeniy Paltsev dev_vdbg(chip->dev, "%s %u IRQ status: 0x%08x\n",
11211fe20f1bSEugeniy Paltsev axi_chan_name(chan), i, status);
11221fe20f1bSEugeniy Paltsev
11231fe20f1bSEugeniy Paltsev if (status & DWAXIDMAC_IRQ_ALL_ERR)
11241fe20f1bSEugeniy Paltsev axi_chan_handle_err(chan, status);
11251fe20f1bSEugeniy Paltsev else if (status & DWAXIDMAC_IRQ_DMA_TRF)
11261fe20f1bSEugeniy Paltsev axi_chan_block_xfer_complete(chan);
11271fe20f1bSEugeniy Paltsev }
11281fe20f1bSEugeniy Paltsev
11291fe20f1bSEugeniy Paltsev /* Re-enable interrupts */
11301fe20f1bSEugeniy Paltsev axi_dma_irq_enable(chip);
11311fe20f1bSEugeniy Paltsev
11321fe20f1bSEugeniy Paltsev return IRQ_HANDLED;
11331fe20f1bSEugeniy Paltsev }
11341fe20f1bSEugeniy Paltsev
dma_chan_terminate_all(struct dma_chan * dchan)11351fe20f1bSEugeniy Paltsev static int dma_chan_terminate_all(struct dma_chan *dchan)
11361fe20f1bSEugeniy Paltsev {
11371fe20f1bSEugeniy Paltsev struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
11381deb96c0SSia Jee Heng u32 chan_active = BIT(chan->id) << DMAC_CHAN_EN_SHIFT;
11391fe20f1bSEugeniy Paltsev unsigned long flags;
11401deb96c0SSia Jee Heng u32 val;
11411deb96c0SSia Jee Heng int ret;
11421fe20f1bSEugeniy Paltsev LIST_HEAD(head);
11431fe20f1bSEugeniy Paltsev
11441fe20f1bSEugeniy Paltsev axi_chan_disable(chan);
11451fe20f1bSEugeniy Paltsev
11461deb96c0SSia Jee Heng ret = readl_poll_timeout_atomic(chan->chip->regs + DMAC_CHEN, val,
1147ce62432cSWalker Chen !(val & chan_active), 1000, 50000);
11481deb96c0SSia Jee Heng if (ret == -ETIMEDOUT)
11491deb96c0SSia Jee Heng dev_warn(dchan2dev(dchan),
11501deb96c0SSia Jee Heng "%s failed to stop\n", axi_chan_name(chan));
11511deb96c0SSia Jee Heng
1152425c8a53SSia Jee Heng if (chan->direction != DMA_MEM_TO_MEM)
115332286e27SPandith N dw_axi_dma_set_hw_channel(chan, false);
1154f74b3025SSia Jee Heng if (chan->direction == DMA_MEM_TO_DEV)
1155f74b3025SSia Jee Heng dw_axi_dma_set_byte_halfword(chan, false);
1156425c8a53SSia Jee Heng
11571deb96c0SSia Jee Heng spin_lock_irqsave(&chan->vc.lock, flags);
11581deb96c0SSia Jee Heng
11591fe20f1bSEugeniy Paltsev vchan_get_all_descriptors(&chan->vc, &head);
11601fe20f1bSEugeniy Paltsev
11611deb96c0SSia Jee Heng chan->cyclic = false;
11621fe20f1bSEugeniy Paltsev spin_unlock_irqrestore(&chan->vc.lock, flags);
11631fe20f1bSEugeniy Paltsev
116451fe9cd2SSascha Hauer vchan_dma_desc_free_list(&chan->vc, &head);
116551fe9cd2SSascha Hauer
11661fe20f1bSEugeniy Paltsev dev_vdbg(dchan2dev(dchan), "terminated: %s\n", axi_chan_name(chan));
11671fe20f1bSEugeniy Paltsev
11681fe20f1bSEugeniy Paltsev return 0;
11691fe20f1bSEugeniy Paltsev }
11701fe20f1bSEugeniy Paltsev
dma_chan_pause(struct dma_chan * dchan)11711fe20f1bSEugeniy Paltsev static int dma_chan_pause(struct dma_chan *dchan)
11721fe20f1bSEugeniy Paltsev {
11731fe20f1bSEugeniy Paltsev struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
11741fe20f1bSEugeniy Paltsev unsigned long flags;
11751fe20f1bSEugeniy Paltsev unsigned int timeout = 20; /* timeout iterations */
11761fe20f1bSEugeniy Paltsev u32 val;
11771fe20f1bSEugeniy Paltsev
11781fe20f1bSEugeniy Paltsev spin_lock_irqsave(&chan->vc.lock, flags);
11791fe20f1bSEugeniy Paltsev
118082435166SPandith N if (chan->chip->dw->hdata->reg_map_8_channels) {
11811fe20f1bSEugeniy Paltsev val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
11821fe20f1bSEugeniy Paltsev val |= BIT(chan->id) << DMAC_CHAN_SUSP_SHIFT |
11831fe20f1bSEugeniy Paltsev BIT(chan->id) << DMAC_CHAN_SUSP_WE_SHIFT;
11841fe20f1bSEugeniy Paltsev axi_dma_iowrite32(chan->chip, DMAC_CHEN, val);
118582435166SPandith N } else {
118649db68d4SEmil Renner Berthing val = axi_dma_ioread32(chan->chip, DMAC_CHSUSPREG);
118749db68d4SEmil Renner Berthing val |= BIT(chan->id) << DMAC_CHAN_SUSP2_SHIFT |
118882435166SPandith N BIT(chan->id) << DMAC_CHAN_SUSP2_WE_SHIFT;
118982435166SPandith N axi_dma_iowrite32(chan->chip, DMAC_CHSUSPREG, val);
119082435166SPandith N }
11911fe20f1bSEugeniy Paltsev
11921fe20f1bSEugeniy Paltsev do {
11931fe20f1bSEugeniy Paltsev if (axi_chan_irq_read(chan) & DWAXIDMAC_IRQ_SUSPENDED)
11941fe20f1bSEugeniy Paltsev break;
11951fe20f1bSEugeniy Paltsev
11961fe20f1bSEugeniy Paltsev udelay(2);
11971fe20f1bSEugeniy Paltsev } while (--timeout);
11981fe20f1bSEugeniy Paltsev
11991fe20f1bSEugeniy Paltsev axi_chan_irq_clear(chan, DWAXIDMAC_IRQ_SUSPENDED);
12001fe20f1bSEugeniy Paltsev
12011fe20f1bSEugeniy Paltsev chan->is_paused = true;
12021fe20f1bSEugeniy Paltsev
12031fe20f1bSEugeniy Paltsev spin_unlock_irqrestore(&chan->vc.lock, flags);
12041fe20f1bSEugeniy Paltsev
12051fe20f1bSEugeniy Paltsev return timeout ? 0 : -EAGAIN;
12061fe20f1bSEugeniy Paltsev }
12071fe20f1bSEugeniy Paltsev
12081fe20f1bSEugeniy Paltsev /* Called in chan locked context */
axi_chan_resume(struct axi_dma_chan * chan)12091fe20f1bSEugeniy Paltsev static inline void axi_chan_resume(struct axi_dma_chan *chan)
12101fe20f1bSEugeniy Paltsev {
12111fe20f1bSEugeniy Paltsev u32 val;
12121fe20f1bSEugeniy Paltsev
121382435166SPandith N if (chan->chip->dw->hdata->reg_map_8_channels) {
121449db68d4SEmil Renner Berthing val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
12151fe20f1bSEugeniy Paltsev val &= ~(BIT(chan->id) << DMAC_CHAN_SUSP_SHIFT);
12161fe20f1bSEugeniy Paltsev val |= (BIT(chan->id) << DMAC_CHAN_SUSP_WE_SHIFT);
12171fe20f1bSEugeniy Paltsev axi_dma_iowrite32(chan->chip, DMAC_CHEN, val);
121882435166SPandith N } else {
121949db68d4SEmil Renner Berthing val = axi_dma_ioread32(chan->chip, DMAC_CHSUSPREG);
122082435166SPandith N val &= ~(BIT(chan->id) << DMAC_CHAN_SUSP2_SHIFT);
122182435166SPandith N val |= (BIT(chan->id) << DMAC_CHAN_SUSP2_WE_SHIFT);
122282435166SPandith N axi_dma_iowrite32(chan->chip, DMAC_CHSUSPREG, val);
122382435166SPandith N }
12241fe20f1bSEugeniy Paltsev
12251fe20f1bSEugeniy Paltsev chan->is_paused = false;
12261fe20f1bSEugeniy Paltsev }
12271fe20f1bSEugeniy Paltsev
dma_chan_resume(struct dma_chan * dchan)12281fe20f1bSEugeniy Paltsev static int dma_chan_resume(struct dma_chan *dchan)
12291fe20f1bSEugeniy Paltsev {
12301fe20f1bSEugeniy Paltsev struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
12311fe20f1bSEugeniy Paltsev unsigned long flags;
12321fe20f1bSEugeniy Paltsev
12331fe20f1bSEugeniy Paltsev spin_lock_irqsave(&chan->vc.lock, flags);
12341fe20f1bSEugeniy Paltsev
12351fe20f1bSEugeniy Paltsev if (chan->is_paused)
12361fe20f1bSEugeniy Paltsev axi_chan_resume(chan);
12371fe20f1bSEugeniy Paltsev
12381fe20f1bSEugeniy Paltsev spin_unlock_irqrestore(&chan->vc.lock, flags);
12391fe20f1bSEugeniy Paltsev
12401fe20f1bSEugeniy Paltsev return 0;
12411fe20f1bSEugeniy Paltsev }
12421fe20f1bSEugeniy Paltsev
axi_dma_suspend(struct axi_dma_chip * chip)12431fe20f1bSEugeniy Paltsev static int axi_dma_suspend(struct axi_dma_chip *chip)
12441fe20f1bSEugeniy Paltsev {
12451fe20f1bSEugeniy Paltsev axi_dma_irq_disable(chip);
12461fe20f1bSEugeniy Paltsev axi_dma_disable(chip);
12471fe20f1bSEugeniy Paltsev
12481fe20f1bSEugeniy Paltsev clk_disable_unprepare(chip->core_clk);
12491fe20f1bSEugeniy Paltsev clk_disable_unprepare(chip->cfgr_clk);
12501fe20f1bSEugeniy Paltsev
12511fe20f1bSEugeniy Paltsev return 0;
12521fe20f1bSEugeniy Paltsev }
12531fe20f1bSEugeniy Paltsev
axi_dma_resume(struct axi_dma_chip * chip)12541fe20f1bSEugeniy Paltsev static int axi_dma_resume(struct axi_dma_chip *chip)
12551fe20f1bSEugeniy Paltsev {
12561fe20f1bSEugeniy Paltsev int ret;
12571fe20f1bSEugeniy Paltsev
12581fe20f1bSEugeniy Paltsev ret = clk_prepare_enable(chip->cfgr_clk);
12591fe20f1bSEugeniy Paltsev if (ret < 0)
12601fe20f1bSEugeniy Paltsev return ret;
12611fe20f1bSEugeniy Paltsev
12621fe20f1bSEugeniy Paltsev ret = clk_prepare_enable(chip->core_clk);
12631fe20f1bSEugeniy Paltsev if (ret < 0)
12641fe20f1bSEugeniy Paltsev return ret;
12651fe20f1bSEugeniy Paltsev
12661fe20f1bSEugeniy Paltsev axi_dma_enable(chip);
12671fe20f1bSEugeniy Paltsev axi_dma_irq_enable(chip);
12681fe20f1bSEugeniy Paltsev
12691fe20f1bSEugeniy Paltsev return 0;
12701fe20f1bSEugeniy Paltsev }
12711fe20f1bSEugeniy Paltsev
axi_dma_runtime_suspend(struct device * dev)12721fe20f1bSEugeniy Paltsev static int __maybe_unused axi_dma_runtime_suspend(struct device *dev)
12731fe20f1bSEugeniy Paltsev {
12741fe20f1bSEugeniy Paltsev struct axi_dma_chip *chip = dev_get_drvdata(dev);
12751fe20f1bSEugeniy Paltsev
12761fe20f1bSEugeniy Paltsev return axi_dma_suspend(chip);
12771fe20f1bSEugeniy Paltsev }
12781fe20f1bSEugeniy Paltsev
axi_dma_runtime_resume(struct device * dev)12791fe20f1bSEugeniy Paltsev static int __maybe_unused axi_dma_runtime_resume(struct device *dev)
12801fe20f1bSEugeniy Paltsev {
12811fe20f1bSEugeniy Paltsev struct axi_dma_chip *chip = dev_get_drvdata(dev);
12821fe20f1bSEugeniy Paltsev
12831fe20f1bSEugeniy Paltsev return axi_dma_resume(chip);
12841fe20f1bSEugeniy Paltsev }
12851fe20f1bSEugeniy Paltsev
dw_axi_dma_of_xlate(struct of_phandle_args * dma_spec,struct of_dma * ofdma)1286b428c6faSSia Jee Heng static struct dma_chan *dw_axi_dma_of_xlate(struct of_phandle_args *dma_spec,
1287b428c6faSSia Jee Heng struct of_dma *ofdma)
1288b428c6faSSia Jee Heng {
1289b428c6faSSia Jee Heng struct dw_axi_dma *dw = ofdma->of_dma_data;
1290b428c6faSSia Jee Heng struct axi_dma_chan *chan;
1291b428c6faSSia Jee Heng struct dma_chan *dchan;
1292b428c6faSSia Jee Heng
1293b428c6faSSia Jee Heng dchan = dma_get_any_slave_channel(&dw->dma);
1294b428c6faSSia Jee Heng if (!dchan)
1295b428c6faSSia Jee Heng return NULL;
1296b428c6faSSia Jee Heng
1297b428c6faSSia Jee Heng chan = dchan_to_axi_dma_chan(dchan);
1298b428c6faSSia Jee Heng chan->hw_handshake_num = dma_spec->args[0];
1299b428c6faSSia Jee Heng return dchan;
1300b428c6faSSia Jee Heng }
1301b428c6faSSia Jee Heng
parse_device_properties(struct axi_dma_chip * chip)13021fe20f1bSEugeniy Paltsev static int parse_device_properties(struct axi_dma_chip *chip)
13031fe20f1bSEugeniy Paltsev {
13041fe20f1bSEugeniy Paltsev struct device *dev = chip->dev;
13051fe20f1bSEugeniy Paltsev u32 tmp, carr[DMAC_MAX_CHANNELS];
13061fe20f1bSEugeniy Paltsev int ret;
13071fe20f1bSEugeniy Paltsev
13081fe20f1bSEugeniy Paltsev ret = device_property_read_u32(dev, "dma-channels", &tmp);
13091fe20f1bSEugeniy Paltsev if (ret)
13101fe20f1bSEugeniy Paltsev return ret;
13111fe20f1bSEugeniy Paltsev if (tmp == 0 || tmp > DMAC_MAX_CHANNELS)
13121fe20f1bSEugeniy Paltsev return -EINVAL;
13131fe20f1bSEugeniy Paltsev
13141fe20f1bSEugeniy Paltsev chip->dw->hdata->nr_channels = tmp;
131582435166SPandith N if (tmp <= DMA_REG_MAP_CH_REF)
131682435166SPandith N chip->dw->hdata->reg_map_8_channels = true;
13171fe20f1bSEugeniy Paltsev
13181fe20f1bSEugeniy Paltsev ret = device_property_read_u32(dev, "snps,dma-masters", &tmp);
13191fe20f1bSEugeniy Paltsev if (ret)
13201fe20f1bSEugeniy Paltsev return ret;
13211fe20f1bSEugeniy Paltsev if (tmp == 0 || tmp > DMAC_MAX_MASTERS)
13221fe20f1bSEugeniy Paltsev return -EINVAL;
13231fe20f1bSEugeniy Paltsev
13241fe20f1bSEugeniy Paltsev chip->dw->hdata->nr_masters = tmp;
13251fe20f1bSEugeniy Paltsev
13261fe20f1bSEugeniy Paltsev ret = device_property_read_u32(dev, "snps,data-width", &tmp);
13271fe20f1bSEugeniy Paltsev if (ret)
13281fe20f1bSEugeniy Paltsev return ret;
13291fe20f1bSEugeniy Paltsev if (tmp > DWAXIDMAC_TRANS_WIDTH_MAX)
13301fe20f1bSEugeniy Paltsev return -EINVAL;
13311fe20f1bSEugeniy Paltsev
13321fe20f1bSEugeniy Paltsev chip->dw->hdata->m_data_width = tmp;
13331fe20f1bSEugeniy Paltsev
13341fe20f1bSEugeniy Paltsev ret = device_property_read_u32_array(dev, "snps,block-size", carr,
13351fe20f1bSEugeniy Paltsev chip->dw->hdata->nr_channels);
13361fe20f1bSEugeniy Paltsev if (ret)
13371fe20f1bSEugeniy Paltsev return ret;
13381fe20f1bSEugeniy Paltsev for (tmp = 0; tmp < chip->dw->hdata->nr_channels; tmp++) {
13391fe20f1bSEugeniy Paltsev if (carr[tmp] == 0 || carr[tmp] > DMAC_MAX_BLK_SIZE)
13401fe20f1bSEugeniy Paltsev return -EINVAL;
13411fe20f1bSEugeniy Paltsev
13421fe20f1bSEugeniy Paltsev chip->dw->hdata->block_size[tmp] = carr[tmp];
13431fe20f1bSEugeniy Paltsev }
13441fe20f1bSEugeniy Paltsev
13451fe20f1bSEugeniy Paltsev ret = device_property_read_u32_array(dev, "snps,priority", carr,
13461fe20f1bSEugeniy Paltsev chip->dw->hdata->nr_channels);
13471fe20f1bSEugeniy Paltsev if (ret)
13481fe20f1bSEugeniy Paltsev return ret;
13491fe20f1bSEugeniy Paltsev /* Priority value must be programmed within [0:nr_channels-1] range */
13501fe20f1bSEugeniy Paltsev for (tmp = 0; tmp < chip->dw->hdata->nr_channels; tmp++) {
13511fe20f1bSEugeniy Paltsev if (carr[tmp] >= chip->dw->hdata->nr_channels)
13521fe20f1bSEugeniy Paltsev return -EINVAL;
13531fe20f1bSEugeniy Paltsev
13541fe20f1bSEugeniy Paltsev chip->dw->hdata->priority[tmp] = carr[tmp];
13551fe20f1bSEugeniy Paltsev }
13561fe20f1bSEugeniy Paltsev
13571fe20f1bSEugeniy Paltsev /* axi-max-burst-len is optional property */
13581fe20f1bSEugeniy Paltsev ret = device_property_read_u32(dev, "snps,axi-max-burst-len", &tmp);
13591fe20f1bSEugeniy Paltsev if (!ret) {
13601fe20f1bSEugeniy Paltsev if (tmp > DWAXIDMAC_ARWLEN_MAX + 1)
13611fe20f1bSEugeniy Paltsev return -EINVAL;
13621fe20f1bSEugeniy Paltsev if (tmp < DWAXIDMAC_ARWLEN_MIN + 1)
13631fe20f1bSEugeniy Paltsev return -EINVAL;
13641fe20f1bSEugeniy Paltsev
13651fe20f1bSEugeniy Paltsev chip->dw->hdata->restrict_axi_burst_len = true;
1366c454d16aSPandith N chip->dw->hdata->axi_rw_burst_len = tmp;
13671fe20f1bSEugeniy Paltsev }
13681fe20f1bSEugeniy Paltsev
13691fe20f1bSEugeniy Paltsev return 0;
13701fe20f1bSEugeniy Paltsev }
13711fe20f1bSEugeniy Paltsev
dw_probe(struct platform_device * pdev)13721fe20f1bSEugeniy Paltsev static int dw_probe(struct platform_device *pdev)
13731fe20f1bSEugeniy Paltsev {
13741fe20f1bSEugeniy Paltsev struct axi_dma_chip *chip;
13751fe20f1bSEugeniy Paltsev struct dw_axi_dma *dw;
13761fe20f1bSEugeniy Paltsev struct dw_axi_dma_hcfg *hdata;
1377790f3c8bSWalker Chen struct reset_control *resets;
1378790f3c8bSWalker Chen unsigned int flags;
13791fe20f1bSEugeniy Paltsev u32 i;
13801fe20f1bSEugeniy Paltsev int ret;
13811fe20f1bSEugeniy Paltsev
13821fe20f1bSEugeniy Paltsev chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
13831fe20f1bSEugeniy Paltsev if (!chip)
13841fe20f1bSEugeniy Paltsev return -ENOMEM;
13851fe20f1bSEugeniy Paltsev
13861fe20f1bSEugeniy Paltsev dw = devm_kzalloc(&pdev->dev, sizeof(*dw), GFP_KERNEL);
13871fe20f1bSEugeniy Paltsev if (!dw)
13881fe20f1bSEugeniy Paltsev return -ENOMEM;
13891fe20f1bSEugeniy Paltsev
13901fe20f1bSEugeniy Paltsev hdata = devm_kzalloc(&pdev->dev, sizeof(*hdata), GFP_KERNEL);
13911fe20f1bSEugeniy Paltsev if (!hdata)
13921fe20f1bSEugeniy Paltsev return -ENOMEM;
13931fe20f1bSEugeniy Paltsev
13941fe20f1bSEugeniy Paltsev chip->dw = dw;
13951fe20f1bSEugeniy Paltsev chip->dev = &pdev->dev;
13961fe20f1bSEugeniy Paltsev chip->dw->hdata = hdata;
13971fe20f1bSEugeniy Paltsev
13981fe20f1bSEugeniy Paltsev chip->irq = platform_get_irq(pdev, 0);
13991fe20f1bSEugeniy Paltsev if (chip->irq < 0)
14001fe20f1bSEugeniy Paltsev return chip->irq;
14011fe20f1bSEugeniy Paltsev
14024b23603aSTudor Ambarus chip->regs = devm_platform_ioremap_resource(pdev, 0);
14031fe20f1bSEugeniy Paltsev if (IS_ERR(chip->regs))
14041fe20f1bSEugeniy Paltsev return PTR_ERR(chip->regs);
14051fe20f1bSEugeniy Paltsev
1406790f3c8bSWalker Chen flags = (uintptr_t)of_device_get_match_data(&pdev->dev);
1407790f3c8bSWalker Chen if (flags & AXI_DMA_FLAG_HAS_APB_REGS) {
14083df2d81fSSia Jee Heng chip->apb_regs = devm_platform_ioremap_resource(pdev, 1);
14093df2d81fSSia Jee Heng if (IS_ERR(chip->apb_regs))
14103df2d81fSSia Jee Heng return PTR_ERR(chip->apb_regs);
14113df2d81fSSia Jee Heng }
14123df2d81fSSia Jee Heng
1413790f3c8bSWalker Chen if (flags & AXI_DMA_FLAG_HAS_RESETS) {
1414790f3c8bSWalker Chen resets = devm_reset_control_array_get_exclusive(&pdev->dev);
1415790f3c8bSWalker Chen if (IS_ERR(resets))
1416790f3c8bSWalker Chen return PTR_ERR(resets);
1417790f3c8bSWalker Chen
1418790f3c8bSWalker Chen ret = reset_control_deassert(resets);
1419790f3c8bSWalker Chen if (ret)
1420790f3c8bSWalker Chen return ret;
1421790f3c8bSWalker Chen }
1422790f3c8bSWalker Chen
1423790f3c8bSWalker Chen chip->dw->hdata->use_cfg2 = !!(flags & AXI_DMA_FLAG_USE_CFG2);
1424790f3c8bSWalker Chen
14251fe20f1bSEugeniy Paltsev chip->core_clk = devm_clk_get(chip->dev, "core-clk");
14261fe20f1bSEugeniy Paltsev if (IS_ERR(chip->core_clk))
14271fe20f1bSEugeniy Paltsev return PTR_ERR(chip->core_clk);
14281fe20f1bSEugeniy Paltsev
14291fe20f1bSEugeniy Paltsev chip->cfgr_clk = devm_clk_get(chip->dev, "cfgr-clk");
14301fe20f1bSEugeniy Paltsev if (IS_ERR(chip->cfgr_clk))
14311fe20f1bSEugeniy Paltsev return PTR_ERR(chip->cfgr_clk);
14321fe20f1bSEugeniy Paltsev
14331fe20f1bSEugeniy Paltsev ret = parse_device_properties(chip);
14341fe20f1bSEugeniy Paltsev if (ret)
14351fe20f1bSEugeniy Paltsev return ret;
14361fe20f1bSEugeniy Paltsev
14371fe20f1bSEugeniy Paltsev dw->chan = devm_kcalloc(chip->dev, hdata->nr_channels,
14381fe20f1bSEugeniy Paltsev sizeof(*dw->chan), GFP_KERNEL);
14391fe20f1bSEugeniy Paltsev if (!dw->chan)
14401fe20f1bSEugeniy Paltsev return -ENOMEM;
14411fe20f1bSEugeniy Paltsev
14421fe20f1bSEugeniy Paltsev ret = devm_request_irq(chip->dev, chip->irq, dw_axi_dma_interrupt,
14431fe20f1bSEugeniy Paltsev IRQF_SHARED, KBUILD_MODNAME, chip);
14441fe20f1bSEugeniy Paltsev if (ret)
14451fe20f1bSEugeniy Paltsev return ret;
14461fe20f1bSEugeniy Paltsev
14471fe20f1bSEugeniy Paltsev INIT_LIST_HEAD(&dw->dma.channels);
14481fe20f1bSEugeniy Paltsev for (i = 0; i < hdata->nr_channels; i++) {
14491fe20f1bSEugeniy Paltsev struct axi_dma_chan *chan = &dw->chan[i];
14501fe20f1bSEugeniy Paltsev
14511fe20f1bSEugeniy Paltsev chan->chip = chip;
14521fe20f1bSEugeniy Paltsev chan->id = i;
14531fe20f1bSEugeniy Paltsev chan->chan_regs = chip->regs + COMMON_REG_LEN + i * CHAN_REG_LEN;
14541fe20f1bSEugeniy Paltsev atomic_set(&chan->descs_allocated, 0);
14551fe20f1bSEugeniy Paltsev
14561fe20f1bSEugeniy Paltsev chan->vc.desc_free = vchan_desc_put;
14571fe20f1bSEugeniy Paltsev vchan_init(&chan->vc, &dw->dma);
14581fe20f1bSEugeniy Paltsev }
14591fe20f1bSEugeniy Paltsev
14601fe20f1bSEugeniy Paltsev /* Set capabilities */
14611fe20f1bSEugeniy Paltsev dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask);
1462eec91760SSia Jee Heng dma_cap_set(DMA_SLAVE, dw->dma.cap_mask);
14631deb96c0SSia Jee Heng dma_cap_set(DMA_CYCLIC, dw->dma.cap_mask);
14641fe20f1bSEugeniy Paltsev
14651fe20f1bSEugeniy Paltsev /* DMA capabilities */
1466c454d16aSPandith N dw->dma.max_burst = hdata->axi_rw_burst_len;
14671fe20f1bSEugeniy Paltsev dw->dma.src_addr_widths = AXI_DMA_BUSWIDTHS;
14681fe20f1bSEugeniy Paltsev dw->dma.dst_addr_widths = AXI_DMA_BUSWIDTHS;
14691fe20f1bSEugeniy Paltsev dw->dma.directions = BIT(DMA_MEM_TO_MEM);
1470eec91760SSia Jee Heng dw->dma.directions |= BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
14718e55444dSSia Jee Heng dw->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
14721fe20f1bSEugeniy Paltsev
14731fe20f1bSEugeniy Paltsev dw->dma.dev = chip->dev;
14741fe20f1bSEugeniy Paltsev dw->dma.device_tx_status = dma_chan_tx_status;
14751fe20f1bSEugeniy Paltsev dw->dma.device_issue_pending = dma_chan_issue_pending;
14761fe20f1bSEugeniy Paltsev dw->dma.device_terminate_all = dma_chan_terminate_all;
14771fe20f1bSEugeniy Paltsev dw->dma.device_pause = dma_chan_pause;
14781fe20f1bSEugeniy Paltsev dw->dma.device_resume = dma_chan_resume;
14791fe20f1bSEugeniy Paltsev
14801fe20f1bSEugeniy Paltsev dw->dma.device_alloc_chan_resources = dma_chan_alloc_chan_resources;
14811fe20f1bSEugeniy Paltsev dw->dma.device_free_chan_resources = dma_chan_free_chan_resources;
14821fe20f1bSEugeniy Paltsev
14831fe20f1bSEugeniy Paltsev dw->dma.device_prep_dma_memcpy = dma_chan_prep_dma_memcpy;
148467b2e39fSSia Jee Heng dw->dma.device_synchronize = dw_axi_dma_synchronize;
148566c6c945SSia Jee Heng dw->dma.device_config = dw_axi_dma_chan_slave_config;
1486eec91760SSia Jee Heng dw->dma.device_prep_slave_sg = dw_axi_dma_chan_prep_slave_sg;
14871deb96c0SSia Jee Heng dw->dma.device_prep_dma_cyclic = dw_axi_dma_chan_prep_cyclic;
14881fe20f1bSEugeniy Paltsev
148978a90a1eSSia Jee Heng /*
149078a90a1eSSia Jee Heng * Synopsis DesignWare AxiDMA datasheet mentioned Maximum
149178a90a1eSSia Jee Heng * supported blocks is 1024. Device register width is 4 bytes.
149278a90a1eSSia Jee Heng * Therefore, set constraint to 1024 * 4.
149378a90a1eSSia Jee Heng */
149478a90a1eSSia Jee Heng dw->dma.dev->dma_parms = &dw->dma_parms;
149578a90a1eSSia Jee Heng dma_set_max_seg_size(&pdev->dev, MAX_BLOCK_SIZE);
14961fe20f1bSEugeniy Paltsev platform_set_drvdata(pdev, chip);
14971fe20f1bSEugeniy Paltsev
14981fe20f1bSEugeniy Paltsev pm_runtime_enable(chip->dev);
14991fe20f1bSEugeniy Paltsev
15001fe20f1bSEugeniy Paltsev /*
15011fe20f1bSEugeniy Paltsev * We can't just call pm_runtime_get here instead of
15021fe20f1bSEugeniy Paltsev * pm_runtime_get_noresume + axi_dma_resume because we need
15031fe20f1bSEugeniy Paltsev * driver to work also without Runtime PM.
15041fe20f1bSEugeniy Paltsev */
15051fe20f1bSEugeniy Paltsev pm_runtime_get_noresume(chip->dev);
15061fe20f1bSEugeniy Paltsev ret = axi_dma_resume(chip);
15071fe20f1bSEugeniy Paltsev if (ret < 0)
15081fe20f1bSEugeniy Paltsev goto err_pm_disable;
15091fe20f1bSEugeniy Paltsev
15101fe20f1bSEugeniy Paltsev axi_dma_hw_init(chip);
15111fe20f1bSEugeniy Paltsev
15121fe20f1bSEugeniy Paltsev pm_runtime_put(chip->dev);
15131fe20f1bSEugeniy Paltsev
1514c88c2d46SHuang Shijie ret = dmaenginem_async_device_register(&dw->dma);
15151fe20f1bSEugeniy Paltsev if (ret)
15161fe20f1bSEugeniy Paltsev goto err_pm_disable;
15171fe20f1bSEugeniy Paltsev
1518b428c6faSSia Jee Heng /* Register with OF helpers for DMA lookups */
1519b428c6faSSia Jee Heng ret = of_dma_controller_register(pdev->dev.of_node,
1520b428c6faSSia Jee Heng dw_axi_dma_of_xlate, dw);
1521b428c6faSSia Jee Heng if (ret < 0)
1522b428c6faSSia Jee Heng dev_warn(&pdev->dev,
1523b428c6faSSia Jee Heng "Failed to register OF DMA controller, fallback to MEM_TO_MEM mode\n");
1524b428c6faSSia Jee Heng
15251fe20f1bSEugeniy Paltsev dev_info(chip->dev, "DesignWare AXI DMA Controller, %d channels\n",
15261fe20f1bSEugeniy Paltsev dw->hdata->nr_channels);
15271fe20f1bSEugeniy Paltsev
15281fe20f1bSEugeniy Paltsev return 0;
15291fe20f1bSEugeniy Paltsev
15301fe20f1bSEugeniy Paltsev err_pm_disable:
15311fe20f1bSEugeniy Paltsev pm_runtime_disable(chip->dev);
15321fe20f1bSEugeniy Paltsev
15331fe20f1bSEugeniy Paltsev return ret;
15341fe20f1bSEugeniy Paltsev }
15351fe20f1bSEugeniy Paltsev
dw_remove(struct platform_device * pdev)15361fe20f1bSEugeniy Paltsev static int dw_remove(struct platform_device *pdev)
15371fe20f1bSEugeniy Paltsev {
15381fe20f1bSEugeniy Paltsev struct axi_dma_chip *chip = platform_get_drvdata(pdev);
15391fe20f1bSEugeniy Paltsev struct dw_axi_dma *dw = chip->dw;
15401fe20f1bSEugeniy Paltsev struct axi_dma_chan *chan, *_chan;
15411fe20f1bSEugeniy Paltsev u32 i;
15421fe20f1bSEugeniy Paltsev
15431fe20f1bSEugeniy Paltsev /* Enable clk before accessing to registers */
15441fe20f1bSEugeniy Paltsev clk_prepare_enable(chip->cfgr_clk);
15451fe20f1bSEugeniy Paltsev clk_prepare_enable(chip->core_clk);
15461fe20f1bSEugeniy Paltsev axi_dma_irq_disable(chip);
15471fe20f1bSEugeniy Paltsev for (i = 0; i < dw->hdata->nr_channels; i++) {
15481fe20f1bSEugeniy Paltsev axi_chan_disable(&chip->dw->chan[i]);
15491fe20f1bSEugeniy Paltsev axi_chan_irq_disable(&chip->dw->chan[i], DWAXIDMAC_IRQ_ALL);
15501fe20f1bSEugeniy Paltsev }
15511fe20f1bSEugeniy Paltsev axi_dma_disable(chip);
15521fe20f1bSEugeniy Paltsev
15531fe20f1bSEugeniy Paltsev pm_runtime_disable(chip->dev);
15541fe20f1bSEugeniy Paltsev axi_dma_suspend(chip);
15551fe20f1bSEugeniy Paltsev
15561fe20f1bSEugeniy Paltsev devm_free_irq(chip->dev, chip->irq, chip);
15571fe20f1bSEugeniy Paltsev
1558b428c6faSSia Jee Heng of_dma_controller_free(chip->dev->of_node);
1559b428c6faSSia Jee Heng
15601fe20f1bSEugeniy Paltsev list_for_each_entry_safe(chan, _chan, &dw->dma.channels,
15611fe20f1bSEugeniy Paltsev vc.chan.device_node) {
15621fe20f1bSEugeniy Paltsev list_del(&chan->vc.chan.device_node);
15631fe20f1bSEugeniy Paltsev tasklet_kill(&chan->vc.task);
15641fe20f1bSEugeniy Paltsev }
15651fe20f1bSEugeniy Paltsev
15661fe20f1bSEugeniy Paltsev return 0;
15671fe20f1bSEugeniy Paltsev }
15681fe20f1bSEugeniy Paltsev
15691fe20f1bSEugeniy Paltsev static const struct dev_pm_ops dw_axi_dma_pm_ops = {
15701fe20f1bSEugeniy Paltsev SET_RUNTIME_PM_OPS(axi_dma_runtime_suspend, axi_dma_runtime_resume, NULL)
15711fe20f1bSEugeniy Paltsev };
15721fe20f1bSEugeniy Paltsev
15731fe20f1bSEugeniy Paltsev static const struct of_device_id dw_dma_of_id_table[] = {
1574790f3c8bSWalker Chen {
1575790f3c8bSWalker Chen .compatible = "snps,axi-dma-1.01a"
1576790f3c8bSWalker Chen }, {
1577790f3c8bSWalker Chen .compatible = "intel,kmb-axi-dma",
1578790f3c8bSWalker Chen .data = (void *)AXI_DMA_FLAG_HAS_APB_REGS,
1579790f3c8bSWalker Chen }, {
1580790f3c8bSWalker Chen .compatible = "starfive,jh7110-axi-dma",
1581790f3c8bSWalker Chen .data = (void *)(AXI_DMA_FLAG_HAS_RESETS | AXI_DMA_FLAG_USE_CFG2),
1582790f3c8bSWalker Chen },
15831fe20f1bSEugeniy Paltsev {}
15841fe20f1bSEugeniy Paltsev };
15851fe20f1bSEugeniy Paltsev MODULE_DEVICE_TABLE(of, dw_dma_of_id_table);
15861fe20f1bSEugeniy Paltsev
15871fe20f1bSEugeniy Paltsev static struct platform_driver dw_driver = {
15881fe20f1bSEugeniy Paltsev .probe = dw_probe,
15891fe20f1bSEugeniy Paltsev .remove = dw_remove,
15901fe20f1bSEugeniy Paltsev .driver = {
15911fe20f1bSEugeniy Paltsev .name = KBUILD_MODNAME,
159260b6122eSKrzysztof Kozlowski .of_match_table = dw_dma_of_id_table,
15931fe20f1bSEugeniy Paltsev .pm = &dw_axi_dma_pm_ops,
15941fe20f1bSEugeniy Paltsev },
15951fe20f1bSEugeniy Paltsev };
15961fe20f1bSEugeniy Paltsev module_platform_driver(dw_driver);
15971fe20f1bSEugeniy Paltsev
15981fe20f1bSEugeniy Paltsev MODULE_LICENSE("GPL v2");
15991fe20f1bSEugeniy Paltsev MODULE_DESCRIPTION("Synopsys DesignWare AXI DMA Controller platform driver");
16001fe20f1bSEugeniy Paltsev MODULE_AUTHOR("Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com>");
1601