11fe20f1bSEugeniy Paltsev // SPDX-License-Identifier: GPL-2.0
21fe20f1bSEugeniy Paltsev // (C) 2017-2018 Synopsys, Inc. (www.synopsys.com)
31fe20f1bSEugeniy Paltsev 
41fe20f1bSEugeniy Paltsev /*
51fe20f1bSEugeniy Paltsev  * Synopsys DesignWare AXI DMA Controller driver.
61fe20f1bSEugeniy Paltsev  *
71fe20f1bSEugeniy Paltsev  * Author: Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com>
81fe20f1bSEugeniy Paltsev  */
91fe20f1bSEugeniy Paltsev 
101fe20f1bSEugeniy Paltsev #include <linux/bitops.h>
111fe20f1bSEugeniy Paltsev #include <linux/delay.h>
121fe20f1bSEugeniy Paltsev #include <linux/device.h>
131fe20f1bSEugeniy Paltsev #include <linux/dmaengine.h>
141fe20f1bSEugeniy Paltsev #include <linux/dmapool.h>
1578a90a1eSSia Jee Heng #include <linux/dma-mapping.h>
161fe20f1bSEugeniy Paltsev #include <linux/err.h>
171fe20f1bSEugeniy Paltsev #include <linux/interrupt.h>
181fe20f1bSEugeniy Paltsev #include <linux/io.h>
191deb96c0SSia Jee Heng #include <linux/iopoll.h>
201deb96c0SSia Jee Heng #include <linux/io-64-nonatomic-lo-hi.h>
211fe20f1bSEugeniy Paltsev #include <linux/kernel.h>
221fe20f1bSEugeniy Paltsev #include <linux/module.h>
231fe20f1bSEugeniy Paltsev #include <linux/of.h>
24b428c6faSSia Jee Heng #include <linux/of_dma.h>
251fe20f1bSEugeniy Paltsev #include <linux/platform_device.h>
261fe20f1bSEugeniy Paltsev #include <linux/pm_runtime.h>
271fe20f1bSEugeniy Paltsev #include <linux/property.h>
28ef6fb2d6SSia Jee Heng #include <linux/slab.h>
291fe20f1bSEugeniy Paltsev #include <linux/types.h>
301fe20f1bSEugeniy Paltsev 
311fe20f1bSEugeniy Paltsev #include "dw-axi-dmac.h"
321fe20f1bSEugeniy Paltsev #include "../dmaengine.h"
331fe20f1bSEugeniy Paltsev #include "../virt-dma.h"
341fe20f1bSEugeniy Paltsev 
351fe20f1bSEugeniy Paltsev /*
361fe20f1bSEugeniy Paltsev  * The set of bus widths supported by the DMA controller. DW AXI DMAC supports
371fe20f1bSEugeniy Paltsev  * master data bus width up to 512 bits (for both AXI master interfaces), but
38e7c7a016STom Rix  * it depends on IP block configuration.
391fe20f1bSEugeniy Paltsev  */
401fe20f1bSEugeniy Paltsev #define AXI_DMA_BUSWIDTHS		  \
411fe20f1bSEugeniy Paltsev 	(DMA_SLAVE_BUSWIDTH_1_BYTE	| \
421fe20f1bSEugeniy Paltsev 	DMA_SLAVE_BUSWIDTH_2_BYTES	| \
431fe20f1bSEugeniy Paltsev 	DMA_SLAVE_BUSWIDTH_4_BYTES	| \
441fe20f1bSEugeniy Paltsev 	DMA_SLAVE_BUSWIDTH_8_BYTES	| \
451fe20f1bSEugeniy Paltsev 	DMA_SLAVE_BUSWIDTH_16_BYTES	| \
461fe20f1bSEugeniy Paltsev 	DMA_SLAVE_BUSWIDTH_32_BYTES	| \
471fe20f1bSEugeniy Paltsev 	DMA_SLAVE_BUSWIDTH_64_BYTES)
481fe20f1bSEugeniy Paltsev 
491fe20f1bSEugeniy Paltsev static inline void
501fe20f1bSEugeniy Paltsev axi_dma_iowrite32(struct axi_dma_chip *chip, u32 reg, u32 val)
511fe20f1bSEugeniy Paltsev {
521fe20f1bSEugeniy Paltsev 	iowrite32(val, chip->regs + reg);
531fe20f1bSEugeniy Paltsev }
541fe20f1bSEugeniy Paltsev 
551fe20f1bSEugeniy Paltsev static inline u32 axi_dma_ioread32(struct axi_dma_chip *chip, u32 reg)
561fe20f1bSEugeniy Paltsev {
571fe20f1bSEugeniy Paltsev 	return ioread32(chip->regs + reg);
581fe20f1bSEugeniy Paltsev }
591fe20f1bSEugeniy Paltsev 
601fe20f1bSEugeniy Paltsev static inline void
611fe20f1bSEugeniy Paltsev axi_chan_iowrite32(struct axi_dma_chan *chan, u32 reg, u32 val)
621fe20f1bSEugeniy Paltsev {
631fe20f1bSEugeniy Paltsev 	iowrite32(val, chan->chan_regs + reg);
641fe20f1bSEugeniy Paltsev }
651fe20f1bSEugeniy Paltsev 
661fe20f1bSEugeniy Paltsev static inline u32 axi_chan_ioread32(struct axi_dma_chan *chan, u32 reg)
671fe20f1bSEugeniy Paltsev {
681fe20f1bSEugeniy Paltsev 	return ioread32(chan->chan_regs + reg);
691fe20f1bSEugeniy Paltsev }
701fe20f1bSEugeniy Paltsev 
711fe20f1bSEugeniy Paltsev static inline void
721fe20f1bSEugeniy Paltsev axi_chan_iowrite64(struct axi_dma_chan *chan, u32 reg, u64 val)
731fe20f1bSEugeniy Paltsev {
741fe20f1bSEugeniy Paltsev 	/*
751fe20f1bSEugeniy Paltsev 	 * We split one 64 bit write for two 32 bit write as some HW doesn't
761fe20f1bSEugeniy Paltsev 	 * support 64 bit access.
771fe20f1bSEugeniy Paltsev 	 */
781fe20f1bSEugeniy Paltsev 	iowrite32(lower_32_bits(val), chan->chan_regs + reg);
791fe20f1bSEugeniy Paltsev 	iowrite32(upper_32_bits(val), chan->chan_regs + reg + 4);
801fe20f1bSEugeniy Paltsev }
811fe20f1bSEugeniy Paltsev 
8282435166SPandith N static inline void axi_chan_config_write(struct axi_dma_chan *chan,
8382435166SPandith N 					 struct axi_dma_chan_config *config)
8482435166SPandith N {
8582435166SPandith N 	u32 cfg_lo, cfg_hi;
8682435166SPandith N 
8782435166SPandith N 	cfg_lo = (config->dst_multblk_type << CH_CFG_L_DST_MULTBLK_TYPE_POS |
8882435166SPandith N 		  config->src_multblk_type << CH_CFG_L_SRC_MULTBLK_TYPE_POS);
8982435166SPandith N 	if (chan->chip->dw->hdata->reg_map_8_channels) {
9082435166SPandith N 		cfg_hi = config->tt_fc << CH_CFG_H_TT_FC_POS |
9182435166SPandith N 			 config->hs_sel_src << CH_CFG_H_HS_SEL_SRC_POS |
9282435166SPandith N 			 config->hs_sel_dst << CH_CFG_H_HS_SEL_DST_POS |
9382435166SPandith N 			 config->src_per << CH_CFG_H_SRC_PER_POS |
9482435166SPandith N 			 config->dst_per << CH_CFG_H_DST_PER_POS |
9582435166SPandith N 			 config->prior << CH_CFG_H_PRIORITY_POS;
9682435166SPandith N 	} else {
9782435166SPandith N 		cfg_lo |= config->src_per << CH_CFG2_L_SRC_PER_POS |
9882435166SPandith N 			  config->dst_per << CH_CFG2_L_DST_PER_POS;
9982435166SPandith N 		cfg_hi = config->tt_fc << CH_CFG2_H_TT_FC_POS |
10082435166SPandith N 			 config->hs_sel_src << CH_CFG2_H_HS_SEL_SRC_POS |
10182435166SPandith N 			 config->hs_sel_dst << CH_CFG2_H_HS_SEL_DST_POS |
10282435166SPandith N 			 config->prior << CH_CFG2_H_PRIORITY_POS;
10382435166SPandith N 	}
10482435166SPandith N 	axi_chan_iowrite32(chan, CH_CFG_L, cfg_lo);
10582435166SPandith N 	axi_chan_iowrite32(chan, CH_CFG_H, cfg_hi);
10682435166SPandith N }
10782435166SPandith N 
1081fe20f1bSEugeniy Paltsev static inline void axi_dma_disable(struct axi_dma_chip *chip)
1091fe20f1bSEugeniy Paltsev {
1101fe20f1bSEugeniy Paltsev 	u32 val;
1111fe20f1bSEugeniy Paltsev 
1121fe20f1bSEugeniy Paltsev 	val = axi_dma_ioread32(chip, DMAC_CFG);
1131fe20f1bSEugeniy Paltsev 	val &= ~DMAC_EN_MASK;
1141fe20f1bSEugeniy Paltsev 	axi_dma_iowrite32(chip, DMAC_CFG, val);
1151fe20f1bSEugeniy Paltsev }
1161fe20f1bSEugeniy Paltsev 
1171fe20f1bSEugeniy Paltsev static inline void axi_dma_enable(struct axi_dma_chip *chip)
1181fe20f1bSEugeniy Paltsev {
1191fe20f1bSEugeniy Paltsev 	u32 val;
1201fe20f1bSEugeniy Paltsev 
1211fe20f1bSEugeniy Paltsev 	val = axi_dma_ioread32(chip, DMAC_CFG);
1221fe20f1bSEugeniy Paltsev 	val |= DMAC_EN_MASK;
1231fe20f1bSEugeniy Paltsev 	axi_dma_iowrite32(chip, DMAC_CFG, val);
1241fe20f1bSEugeniy Paltsev }
1251fe20f1bSEugeniy Paltsev 
1261fe20f1bSEugeniy Paltsev static inline void axi_dma_irq_disable(struct axi_dma_chip *chip)
1271fe20f1bSEugeniy Paltsev {
1281fe20f1bSEugeniy Paltsev 	u32 val;
1291fe20f1bSEugeniy Paltsev 
1301fe20f1bSEugeniy Paltsev 	val = axi_dma_ioread32(chip, DMAC_CFG);
1311fe20f1bSEugeniy Paltsev 	val &= ~INT_EN_MASK;
1321fe20f1bSEugeniy Paltsev 	axi_dma_iowrite32(chip, DMAC_CFG, val);
1331fe20f1bSEugeniy Paltsev }
1341fe20f1bSEugeniy Paltsev 
1351fe20f1bSEugeniy Paltsev static inline void axi_dma_irq_enable(struct axi_dma_chip *chip)
1361fe20f1bSEugeniy Paltsev {
1371fe20f1bSEugeniy Paltsev 	u32 val;
1381fe20f1bSEugeniy Paltsev 
1391fe20f1bSEugeniy Paltsev 	val = axi_dma_ioread32(chip, DMAC_CFG);
1401fe20f1bSEugeniy Paltsev 	val |= INT_EN_MASK;
1411fe20f1bSEugeniy Paltsev 	axi_dma_iowrite32(chip, DMAC_CFG, val);
1421fe20f1bSEugeniy Paltsev }
1431fe20f1bSEugeniy Paltsev 
1441fe20f1bSEugeniy Paltsev static inline void axi_chan_irq_disable(struct axi_dma_chan *chan, u32 irq_mask)
1451fe20f1bSEugeniy Paltsev {
1461fe20f1bSEugeniy Paltsev 	u32 val;
1471fe20f1bSEugeniy Paltsev 
1481fe20f1bSEugeniy Paltsev 	if (likely(irq_mask == DWAXIDMAC_IRQ_ALL)) {
1491fe20f1bSEugeniy Paltsev 		axi_chan_iowrite32(chan, CH_INTSTATUS_ENA, DWAXIDMAC_IRQ_NONE);
1501fe20f1bSEugeniy Paltsev 	} else {
1511fe20f1bSEugeniy Paltsev 		val = axi_chan_ioread32(chan, CH_INTSTATUS_ENA);
1521fe20f1bSEugeniy Paltsev 		val &= ~irq_mask;
1531fe20f1bSEugeniy Paltsev 		axi_chan_iowrite32(chan, CH_INTSTATUS_ENA, val);
1541fe20f1bSEugeniy Paltsev 	}
1551fe20f1bSEugeniy Paltsev }
1561fe20f1bSEugeniy Paltsev 
1571fe20f1bSEugeniy Paltsev static inline void axi_chan_irq_set(struct axi_dma_chan *chan, u32 irq_mask)
1581fe20f1bSEugeniy Paltsev {
1591fe20f1bSEugeniy Paltsev 	axi_chan_iowrite32(chan, CH_INTSTATUS_ENA, irq_mask);
1601fe20f1bSEugeniy Paltsev }
1611fe20f1bSEugeniy Paltsev 
1621fe20f1bSEugeniy Paltsev static inline void axi_chan_irq_sig_set(struct axi_dma_chan *chan, u32 irq_mask)
1631fe20f1bSEugeniy Paltsev {
1641fe20f1bSEugeniy Paltsev 	axi_chan_iowrite32(chan, CH_INTSIGNAL_ENA, irq_mask);
1651fe20f1bSEugeniy Paltsev }
1661fe20f1bSEugeniy Paltsev 
1671fe20f1bSEugeniy Paltsev static inline void axi_chan_irq_clear(struct axi_dma_chan *chan, u32 irq_mask)
1681fe20f1bSEugeniy Paltsev {
1691fe20f1bSEugeniy Paltsev 	axi_chan_iowrite32(chan, CH_INTCLEAR, irq_mask);
1701fe20f1bSEugeniy Paltsev }
1711fe20f1bSEugeniy Paltsev 
1721fe20f1bSEugeniy Paltsev static inline u32 axi_chan_irq_read(struct axi_dma_chan *chan)
1731fe20f1bSEugeniy Paltsev {
1741fe20f1bSEugeniy Paltsev 	return axi_chan_ioread32(chan, CH_INTSTATUS);
1751fe20f1bSEugeniy Paltsev }
1761fe20f1bSEugeniy Paltsev 
1771fe20f1bSEugeniy Paltsev static inline void axi_chan_disable(struct axi_dma_chan *chan)
1781fe20f1bSEugeniy Paltsev {
1791fe20f1bSEugeniy Paltsev 	u32 val;
1801fe20f1bSEugeniy Paltsev 
1811fe20f1bSEugeniy Paltsev 	val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
1821fe20f1bSEugeniy Paltsev 	val &= ~(BIT(chan->id) << DMAC_CHAN_EN_SHIFT);
18382435166SPandith N 	if (chan->chip->dw->hdata->reg_map_8_channels)
1841fe20f1bSEugeniy Paltsev 		val |=   BIT(chan->id) << DMAC_CHAN_EN_WE_SHIFT;
18582435166SPandith N 	else
18682435166SPandith N 		val |=   BIT(chan->id) << DMAC_CHAN_EN2_WE_SHIFT;
1871fe20f1bSEugeniy Paltsev 	axi_dma_iowrite32(chan->chip, DMAC_CHEN, val);
1881fe20f1bSEugeniy Paltsev }
1891fe20f1bSEugeniy Paltsev 
1901fe20f1bSEugeniy Paltsev static inline void axi_chan_enable(struct axi_dma_chan *chan)
1911fe20f1bSEugeniy Paltsev {
1921fe20f1bSEugeniy Paltsev 	u32 val;
1931fe20f1bSEugeniy Paltsev 
1941fe20f1bSEugeniy Paltsev 	val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
19582435166SPandith N 	if (chan->chip->dw->hdata->reg_map_8_channels)
1961fe20f1bSEugeniy Paltsev 		val |= BIT(chan->id) << DMAC_CHAN_EN_SHIFT |
1971fe20f1bSEugeniy Paltsev 			BIT(chan->id) << DMAC_CHAN_EN_WE_SHIFT;
19882435166SPandith N 	else
19982435166SPandith N 		val |= BIT(chan->id) << DMAC_CHAN_EN_SHIFT |
20082435166SPandith N 			BIT(chan->id) << DMAC_CHAN_EN2_WE_SHIFT;
2011fe20f1bSEugeniy Paltsev 	axi_dma_iowrite32(chan->chip, DMAC_CHEN, val);
2021fe20f1bSEugeniy Paltsev }
2031fe20f1bSEugeniy Paltsev 
2041fe20f1bSEugeniy Paltsev static inline bool axi_chan_is_hw_enable(struct axi_dma_chan *chan)
2051fe20f1bSEugeniy Paltsev {
2061fe20f1bSEugeniy Paltsev 	u32 val;
2071fe20f1bSEugeniy Paltsev 
2081fe20f1bSEugeniy Paltsev 	val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
2091fe20f1bSEugeniy Paltsev 
2101fe20f1bSEugeniy Paltsev 	return !!(val & (BIT(chan->id) << DMAC_CHAN_EN_SHIFT));
2111fe20f1bSEugeniy Paltsev }
2121fe20f1bSEugeniy Paltsev 
2131fe20f1bSEugeniy Paltsev static void axi_dma_hw_init(struct axi_dma_chip *chip)
2141fe20f1bSEugeniy Paltsev {
2152d0f07f8SPandith N 	int ret;
2161fe20f1bSEugeniy Paltsev 	u32 i;
2171fe20f1bSEugeniy Paltsev 
2181fe20f1bSEugeniy Paltsev 	for (i = 0; i < chip->dw->hdata->nr_channels; i++) {
2191fe20f1bSEugeniy Paltsev 		axi_chan_irq_disable(&chip->dw->chan[i], DWAXIDMAC_IRQ_ALL);
2201fe20f1bSEugeniy Paltsev 		axi_chan_disable(&chip->dw->chan[i]);
2211fe20f1bSEugeniy Paltsev 	}
2222d0f07f8SPandith N 	ret = dma_set_mask_and_coherent(chip->dev, DMA_BIT_MASK(64));
2232d0f07f8SPandith N 	if (ret)
2242d0f07f8SPandith N 		dev_warn(chip->dev, "Unable to set coherent mask\n");
2251fe20f1bSEugeniy Paltsev }
2261fe20f1bSEugeniy Paltsev 
2271fe20f1bSEugeniy Paltsev static u32 axi_chan_get_xfer_width(struct axi_dma_chan *chan, dma_addr_t src,
2281fe20f1bSEugeniy Paltsev 				   dma_addr_t dst, size_t len)
2291fe20f1bSEugeniy Paltsev {
2301fe20f1bSEugeniy Paltsev 	u32 max_width = chan->chip->dw->hdata->m_data_width;
2311fe20f1bSEugeniy Paltsev 
2321fe20f1bSEugeniy Paltsev 	return __ffs(src | dst | len | BIT(max_width));
2331fe20f1bSEugeniy Paltsev }
2341fe20f1bSEugeniy Paltsev 
2351fe20f1bSEugeniy Paltsev static inline const char *axi_chan_name(struct axi_dma_chan *chan)
2361fe20f1bSEugeniy Paltsev {
2371fe20f1bSEugeniy Paltsev 	return dma_chan_name(&chan->vc.chan);
2381fe20f1bSEugeniy Paltsev }
2391fe20f1bSEugeniy Paltsev 
240ef6fb2d6SSia Jee Heng static struct axi_dma_desc *axi_desc_alloc(u32 num)
241ef6fb2d6SSia Jee Heng {
242ef6fb2d6SSia Jee Heng 	struct axi_dma_desc *desc;
243ef6fb2d6SSia Jee Heng 
244ef6fb2d6SSia Jee Heng 	desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
245ef6fb2d6SSia Jee Heng 	if (!desc)
246ef6fb2d6SSia Jee Heng 		return NULL;
247ef6fb2d6SSia Jee Heng 
248ef6fb2d6SSia Jee Heng 	desc->hw_desc = kcalloc(num, sizeof(*desc->hw_desc), GFP_NOWAIT);
249ef6fb2d6SSia Jee Heng 	if (!desc->hw_desc) {
250ef6fb2d6SSia Jee Heng 		kfree(desc);
251ef6fb2d6SSia Jee Heng 		return NULL;
252ef6fb2d6SSia Jee Heng 	}
253ef6fb2d6SSia Jee Heng 
254ef6fb2d6SSia Jee Heng 	return desc;
255ef6fb2d6SSia Jee Heng }
256ef6fb2d6SSia Jee Heng 
257ef6fb2d6SSia Jee Heng static struct axi_dma_lli *axi_desc_get(struct axi_dma_chan *chan,
258ef6fb2d6SSia Jee Heng 					dma_addr_t *addr)
2591fe20f1bSEugeniy Paltsev {
260ef6fb2d6SSia Jee Heng 	struct axi_dma_lli *lli;
2611fe20f1bSEugeniy Paltsev 	dma_addr_t phys;
2621fe20f1bSEugeniy Paltsev 
2630b9d2fb3SSia Jee Heng 	lli = dma_pool_zalloc(chan->desc_pool, GFP_NOWAIT, &phys);
264ef6fb2d6SSia Jee Heng 	if (unlikely(!lli)) {
2651fe20f1bSEugeniy Paltsev 		dev_err(chan2dev(chan), "%s: not enough descriptors available\n",
2661fe20f1bSEugeniy Paltsev 			axi_chan_name(chan));
2671fe20f1bSEugeniy Paltsev 		return NULL;
2681fe20f1bSEugeniy Paltsev 	}
2691fe20f1bSEugeniy Paltsev 
2701fe20f1bSEugeniy Paltsev 	atomic_inc(&chan->descs_allocated);
271ef6fb2d6SSia Jee Heng 	*addr = phys;
2721fe20f1bSEugeniy Paltsev 
273ef6fb2d6SSia Jee Heng 	return lli;
2741fe20f1bSEugeniy Paltsev }
2751fe20f1bSEugeniy Paltsev 
2761fe20f1bSEugeniy Paltsev static void axi_desc_put(struct axi_dma_desc *desc)
2771fe20f1bSEugeniy Paltsev {
2781fe20f1bSEugeniy Paltsev 	struct axi_dma_chan *chan = desc->chan;
279ef6fb2d6SSia Jee Heng 	int count = atomic_read(&chan->descs_allocated);
280ef6fb2d6SSia Jee Heng 	struct axi_dma_hw_desc *hw_desc;
281ef6fb2d6SSia Jee Heng 	int descs_put;
2821fe20f1bSEugeniy Paltsev 
283ef6fb2d6SSia Jee Heng 	for (descs_put = 0; descs_put < count; descs_put++) {
284ef6fb2d6SSia Jee Heng 		hw_desc = &desc->hw_desc[descs_put];
2850b9d2fb3SSia Jee Heng 		dma_pool_free(chan->desc_pool, hw_desc->lli, hw_desc->llp);
2861fe20f1bSEugeniy Paltsev 	}
2871fe20f1bSEugeniy Paltsev 
288ef6fb2d6SSia Jee Heng 	kfree(desc->hw_desc);
289ef6fb2d6SSia Jee Heng 	kfree(desc);
2901fe20f1bSEugeniy Paltsev 	atomic_sub(descs_put, &chan->descs_allocated);
2911fe20f1bSEugeniy Paltsev 	dev_vdbg(chan2dev(chan), "%s: %d descs put, %d still allocated\n",
2921fe20f1bSEugeniy Paltsev 		axi_chan_name(chan), descs_put,
2931fe20f1bSEugeniy Paltsev 		atomic_read(&chan->descs_allocated));
2941fe20f1bSEugeniy Paltsev }
2951fe20f1bSEugeniy Paltsev 
2961fe20f1bSEugeniy Paltsev static void vchan_desc_put(struct virt_dma_desc *vdesc)
2971fe20f1bSEugeniy Paltsev {
2981fe20f1bSEugeniy Paltsev 	axi_desc_put(vd_to_axi_desc(vdesc));
2991fe20f1bSEugeniy Paltsev }
3001fe20f1bSEugeniy Paltsev 
3011fe20f1bSEugeniy Paltsev static enum dma_status
3021fe20f1bSEugeniy Paltsev dma_chan_tx_status(struct dma_chan *dchan, dma_cookie_t cookie,
3031fe20f1bSEugeniy Paltsev 		  struct dma_tx_state *txstate)
3041fe20f1bSEugeniy Paltsev {
3051fe20f1bSEugeniy Paltsev 	struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
3068e55444dSSia Jee Heng 	struct virt_dma_desc *vdesc;
3078e55444dSSia Jee Heng 	enum dma_status status;
3088e55444dSSia Jee Heng 	u32 completed_length;
3098e55444dSSia Jee Heng 	unsigned long flags;
3108e55444dSSia Jee Heng 	u32 completed_blocks;
3118e55444dSSia Jee Heng 	size_t bytes = 0;
3128e55444dSSia Jee Heng 	u32 length;
3138e55444dSSia Jee Heng 	u32 len;
3141fe20f1bSEugeniy Paltsev 
3158e55444dSSia Jee Heng 	status = dma_cookie_status(dchan, cookie, txstate);
3168e55444dSSia Jee Heng 	if (status == DMA_COMPLETE || !txstate)
3178e55444dSSia Jee Heng 		return status;
3181fe20f1bSEugeniy Paltsev 
3198e55444dSSia Jee Heng 	spin_lock_irqsave(&chan->vc.lock, flags);
3201fe20f1bSEugeniy Paltsev 
3218e55444dSSia Jee Heng 	vdesc = vchan_find_desc(&chan->vc, cookie);
3228e55444dSSia Jee Heng 	if (vdesc) {
3238e55444dSSia Jee Heng 		length = vd_to_axi_desc(vdesc)->length;
3248e55444dSSia Jee Heng 		completed_blocks = vd_to_axi_desc(vdesc)->completed_blocks;
3258e55444dSSia Jee Heng 		len = vd_to_axi_desc(vdesc)->hw_desc[0].len;
3268e55444dSSia Jee Heng 		completed_length = completed_blocks * len;
3278e55444dSSia Jee Heng 		bytes = length - completed_length;
3288e55444dSSia Jee Heng 	} else {
3298e55444dSSia Jee Heng 		bytes = vd_to_axi_desc(vdesc)->length;
3308e55444dSSia Jee Heng 	}
3318e55444dSSia Jee Heng 
3328e55444dSSia Jee Heng 	spin_unlock_irqrestore(&chan->vc.lock, flags);
3338e55444dSSia Jee Heng 	dma_set_residue(txstate, bytes);
3348e55444dSSia Jee Heng 
3358e55444dSSia Jee Heng 	return status;
3361fe20f1bSEugeniy Paltsev }
3371fe20f1bSEugeniy Paltsev 
338ef6fb2d6SSia Jee Heng static void write_desc_llp(struct axi_dma_hw_desc *desc, dma_addr_t adr)
3391fe20f1bSEugeniy Paltsev {
340ef6fb2d6SSia Jee Heng 	desc->lli->llp = cpu_to_le64(adr);
3411fe20f1bSEugeniy Paltsev }
3421fe20f1bSEugeniy Paltsev 
3431fe20f1bSEugeniy Paltsev static void write_chan_llp(struct axi_dma_chan *chan, dma_addr_t adr)
3441fe20f1bSEugeniy Paltsev {
3451fe20f1bSEugeniy Paltsev 	axi_chan_iowrite64(chan, CH_LLP, adr);
3461fe20f1bSEugeniy Paltsev }
3471fe20f1bSEugeniy Paltsev 
348f74b3025SSia Jee Heng static void dw_axi_dma_set_byte_halfword(struct axi_dma_chan *chan, bool set)
349f74b3025SSia Jee Heng {
350f74b3025SSia Jee Heng 	u32 offset = DMAC_APB_BYTE_WR_CH_EN;
351f74b3025SSia Jee Heng 	u32 reg_width, val;
352f74b3025SSia Jee Heng 
353f74b3025SSia Jee Heng 	if (!chan->chip->apb_regs) {
354f74b3025SSia Jee Heng 		dev_dbg(chan->chip->dev, "apb_regs not initialized\n");
355f74b3025SSia Jee Heng 		return;
356f74b3025SSia Jee Heng 	}
357f74b3025SSia Jee Heng 
358f74b3025SSia Jee Heng 	reg_width = __ffs(chan->config.dst_addr_width);
359f74b3025SSia Jee Heng 	if (reg_width == DWAXIDMAC_TRANS_WIDTH_16)
360f74b3025SSia Jee Heng 		offset = DMAC_APB_HALFWORD_WR_CH_EN;
361f74b3025SSia Jee Heng 
362f74b3025SSia Jee Heng 	val = ioread32(chan->chip->apb_regs + offset);
363f74b3025SSia Jee Heng 
364f74b3025SSia Jee Heng 	if (set)
365f74b3025SSia Jee Heng 		val |= BIT(chan->id);
366f74b3025SSia Jee Heng 	else
367f74b3025SSia Jee Heng 		val &= ~BIT(chan->id);
368f74b3025SSia Jee Heng 
369f74b3025SSia Jee Heng 	iowrite32(val, chan->chip->apb_regs + offset);
370f74b3025SSia Jee Heng }
3711fe20f1bSEugeniy Paltsev /* Called in chan locked context */
3721fe20f1bSEugeniy Paltsev static void axi_chan_block_xfer_start(struct axi_dma_chan *chan,
3731fe20f1bSEugeniy Paltsev 				      struct axi_dma_desc *first)
3741fe20f1bSEugeniy Paltsev {
3751fe20f1bSEugeniy Paltsev 	u32 priority = chan->chip->dw->hdata->priority[chan->id];
37688563307STim Gardner 	struct axi_dma_chan_config config = {};
37782435166SPandith N 	u32 irq_mask;
3781fe20f1bSEugeniy Paltsev 	u8 lms = 0; /* Select AXI0 master for LLI fetching */
3791fe20f1bSEugeniy Paltsev 
3801fe20f1bSEugeniy Paltsev 	if (unlikely(axi_chan_is_hw_enable(chan))) {
3811fe20f1bSEugeniy Paltsev 		dev_err(chan2dev(chan), "%s is non-idle!\n",
3821fe20f1bSEugeniy Paltsev 			axi_chan_name(chan));
3831fe20f1bSEugeniy Paltsev 
3841fe20f1bSEugeniy Paltsev 		return;
3851fe20f1bSEugeniy Paltsev 	}
3861fe20f1bSEugeniy Paltsev 
3871fe20f1bSEugeniy Paltsev 	axi_dma_enable(chan->chip);
3881fe20f1bSEugeniy Paltsev 
38982435166SPandith N 	config.dst_multblk_type = DWAXIDMAC_MBLK_TYPE_LL;
39082435166SPandith N 	config.src_multblk_type = DWAXIDMAC_MBLK_TYPE_LL;
39182435166SPandith N 	config.tt_fc = DWAXIDMAC_TT_FC_MEM_TO_MEM_DMAC;
39282435166SPandith N 	config.prior = priority;
39382435166SPandith N 	config.hs_sel_dst = DWAXIDMAC_HS_SEL_HW;
39488563307STim Gardner 	config.hs_sel_src = DWAXIDMAC_HS_SEL_HW;
395eec91760SSia Jee Heng 	switch (chan->direction) {
396eec91760SSia Jee Heng 	case DMA_MEM_TO_DEV:
397f74b3025SSia Jee Heng 		dw_axi_dma_set_byte_halfword(chan, true);
39882435166SPandith N 		config.tt_fc = chan->config.device_fc ?
399eec91760SSia Jee Heng 				DWAXIDMAC_TT_FC_MEM_TO_PER_DST :
40082435166SPandith N 				DWAXIDMAC_TT_FC_MEM_TO_PER_DMAC;
401f95f3b53SPandith N 		if (chan->chip->apb_regs)
40282435166SPandith N 			config.dst_per = chan->id;
40393a7d32eSPandith N 		else
40493a7d32eSPandith N 			config.dst_per = chan->hw_handshake_num;
405eec91760SSia Jee Heng 		break;
406eec91760SSia Jee Heng 	case DMA_DEV_TO_MEM:
40782435166SPandith N 		config.tt_fc = chan->config.device_fc ?
408eec91760SSia Jee Heng 				DWAXIDMAC_TT_FC_PER_TO_MEM_SRC :
40982435166SPandith N 				DWAXIDMAC_TT_FC_PER_TO_MEM_DMAC;
410f95f3b53SPandith N 		if (chan->chip->apb_regs)
41182435166SPandith N 			config.src_per = chan->id;
41293a7d32eSPandith N 		else
41393a7d32eSPandith N 			config.src_per = chan->hw_handshake_num;
414eec91760SSia Jee Heng 		break;
415eec91760SSia Jee Heng 	default:
416eec91760SSia Jee Heng 		break;
417eec91760SSia Jee Heng 	}
41882435166SPandith N 	axi_chan_config_write(chan, &config);
4191fe20f1bSEugeniy Paltsev 
420ef6fb2d6SSia Jee Heng 	write_chan_llp(chan, first->hw_desc[0].llp | lms);
4211fe20f1bSEugeniy Paltsev 
4221fe20f1bSEugeniy Paltsev 	irq_mask = DWAXIDMAC_IRQ_DMA_TRF | DWAXIDMAC_IRQ_ALL_ERR;
4231fe20f1bSEugeniy Paltsev 	axi_chan_irq_sig_set(chan, irq_mask);
4241fe20f1bSEugeniy Paltsev 
4251fe20f1bSEugeniy Paltsev 	/* Generate 'suspend' status but don't generate interrupt */
4261fe20f1bSEugeniy Paltsev 	irq_mask |= DWAXIDMAC_IRQ_SUSPENDED;
4271fe20f1bSEugeniy Paltsev 	axi_chan_irq_set(chan, irq_mask);
4281fe20f1bSEugeniy Paltsev 
4291fe20f1bSEugeniy Paltsev 	axi_chan_enable(chan);
4301fe20f1bSEugeniy Paltsev }
4311fe20f1bSEugeniy Paltsev 
4321fe20f1bSEugeniy Paltsev static void axi_chan_start_first_queued(struct axi_dma_chan *chan)
4331fe20f1bSEugeniy Paltsev {
4341fe20f1bSEugeniy Paltsev 	struct axi_dma_desc *desc;
4351fe20f1bSEugeniy Paltsev 	struct virt_dma_desc *vd;
4361fe20f1bSEugeniy Paltsev 
4371fe20f1bSEugeniy Paltsev 	vd = vchan_next_desc(&chan->vc);
4381fe20f1bSEugeniy Paltsev 	if (!vd)
4391fe20f1bSEugeniy Paltsev 		return;
4401fe20f1bSEugeniy Paltsev 
4411fe20f1bSEugeniy Paltsev 	desc = vd_to_axi_desc(vd);
4421fe20f1bSEugeniy Paltsev 	dev_vdbg(chan2dev(chan), "%s: started %u\n", axi_chan_name(chan),
4431fe20f1bSEugeniy Paltsev 		vd->tx.cookie);
4441fe20f1bSEugeniy Paltsev 	axi_chan_block_xfer_start(chan, desc);
4451fe20f1bSEugeniy Paltsev }
4461fe20f1bSEugeniy Paltsev 
4471fe20f1bSEugeniy Paltsev static void dma_chan_issue_pending(struct dma_chan *dchan)
4481fe20f1bSEugeniy Paltsev {
4491fe20f1bSEugeniy Paltsev 	struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
4501fe20f1bSEugeniy Paltsev 	unsigned long flags;
4511fe20f1bSEugeniy Paltsev 
4521fe20f1bSEugeniy Paltsev 	spin_lock_irqsave(&chan->vc.lock, flags);
4531fe20f1bSEugeniy Paltsev 	if (vchan_issue_pending(&chan->vc))
4541fe20f1bSEugeniy Paltsev 		axi_chan_start_first_queued(chan);
4551fe20f1bSEugeniy Paltsev 	spin_unlock_irqrestore(&chan->vc.lock, flags);
4561fe20f1bSEugeniy Paltsev }
4571fe20f1bSEugeniy Paltsev 
45867b2e39fSSia Jee Heng static void dw_axi_dma_synchronize(struct dma_chan *dchan)
45967b2e39fSSia Jee Heng {
46067b2e39fSSia Jee Heng 	struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
46167b2e39fSSia Jee Heng 
46267b2e39fSSia Jee Heng 	vchan_synchronize(&chan->vc);
46367b2e39fSSia Jee Heng }
46467b2e39fSSia Jee Heng 
4651fe20f1bSEugeniy Paltsev static int dma_chan_alloc_chan_resources(struct dma_chan *dchan)
4661fe20f1bSEugeniy Paltsev {
4671fe20f1bSEugeniy Paltsev 	struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
4681fe20f1bSEugeniy Paltsev 
4691fe20f1bSEugeniy Paltsev 	/* ASSERT: channel is idle */
4701fe20f1bSEugeniy Paltsev 	if (axi_chan_is_hw_enable(chan)) {
4711fe20f1bSEugeniy Paltsev 		dev_err(chan2dev(chan), "%s is non-idle!\n",
4721fe20f1bSEugeniy Paltsev 			axi_chan_name(chan));
4731fe20f1bSEugeniy Paltsev 		return -EBUSY;
4741fe20f1bSEugeniy Paltsev 	}
4751fe20f1bSEugeniy Paltsev 
4760b9d2fb3SSia Jee Heng 	/* LLI address must be aligned to a 64-byte boundary */
4770b9d2fb3SSia Jee Heng 	chan->desc_pool = dma_pool_create(dev_name(chan2dev(chan)),
4780b9d2fb3SSia Jee Heng 					  chan->chip->dev,
4790b9d2fb3SSia Jee Heng 					  sizeof(struct axi_dma_lli),
4800b9d2fb3SSia Jee Heng 					  64, 0);
4810b9d2fb3SSia Jee Heng 	if (!chan->desc_pool) {
4820b9d2fb3SSia Jee Heng 		dev_err(chan2dev(chan), "No memory for descriptors\n");
4830b9d2fb3SSia Jee Heng 		return -ENOMEM;
4840b9d2fb3SSia Jee Heng 	}
4851fe20f1bSEugeniy Paltsev 	dev_vdbg(dchan2dev(dchan), "%s: allocating\n", axi_chan_name(chan));
4861fe20f1bSEugeniy Paltsev 
4871fe20f1bSEugeniy Paltsev 	pm_runtime_get(chan->chip->dev);
4881fe20f1bSEugeniy Paltsev 
4891fe20f1bSEugeniy Paltsev 	return 0;
4901fe20f1bSEugeniy Paltsev }
4911fe20f1bSEugeniy Paltsev 
4921fe20f1bSEugeniy Paltsev static void dma_chan_free_chan_resources(struct dma_chan *dchan)
4931fe20f1bSEugeniy Paltsev {
4941fe20f1bSEugeniy Paltsev 	struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
4951fe20f1bSEugeniy Paltsev 
4961fe20f1bSEugeniy Paltsev 	/* ASSERT: channel is idle */
4971fe20f1bSEugeniy Paltsev 	if (axi_chan_is_hw_enable(chan))
4981fe20f1bSEugeniy Paltsev 		dev_err(dchan2dev(dchan), "%s is non-idle!\n",
4991fe20f1bSEugeniy Paltsev 			axi_chan_name(chan));
5001fe20f1bSEugeniy Paltsev 
5011fe20f1bSEugeniy Paltsev 	axi_chan_disable(chan);
5021fe20f1bSEugeniy Paltsev 	axi_chan_irq_disable(chan, DWAXIDMAC_IRQ_ALL);
5031fe20f1bSEugeniy Paltsev 
5041fe20f1bSEugeniy Paltsev 	vchan_free_chan_resources(&chan->vc);
5051fe20f1bSEugeniy Paltsev 
5060b9d2fb3SSia Jee Heng 	dma_pool_destroy(chan->desc_pool);
5070b9d2fb3SSia Jee Heng 	chan->desc_pool = NULL;
5081fe20f1bSEugeniy Paltsev 	dev_vdbg(dchan2dev(dchan),
5091fe20f1bSEugeniy Paltsev 		 "%s: free resources, descriptor still allocated: %u\n",
5101fe20f1bSEugeniy Paltsev 		 axi_chan_name(chan), atomic_read(&chan->descs_allocated));
5111fe20f1bSEugeniy Paltsev 
5121fe20f1bSEugeniy Paltsev 	pm_runtime_put(chan->chip->dev);
5131fe20f1bSEugeniy Paltsev }
5141fe20f1bSEugeniy Paltsev 
51532286e27SPandith N static void dw_axi_dma_set_hw_channel(struct axi_dma_chan *chan, bool set)
516425c8a53SSia Jee Heng {
51732286e27SPandith N 	struct axi_dma_chip *chip = chan->chip;
51832286e27SPandith N 	unsigned long reg_value, val;
519425c8a53SSia Jee Heng 
520425c8a53SSia Jee Heng 	if (!chip->apb_regs) {
52132286e27SPandith N 		dev_err(chip->dev, "apb_regs not initialized\n");
522425c8a53SSia Jee Heng 		return;
523425c8a53SSia Jee Heng 	}
524425c8a53SSia Jee Heng 
525425c8a53SSia Jee Heng 	/*
526425c8a53SSia Jee Heng 	 * An unused DMA channel has a default value of 0x3F.
527425c8a53SSia Jee Heng 	 * Lock the DMA channel by assign a handshake number to the channel.
528425c8a53SSia Jee Heng 	 * Unlock the DMA channel by assign 0x3F to the channel.
529425c8a53SSia Jee Heng 	 */
53032286e27SPandith N 	if (set)
53132286e27SPandith N 		val = chan->hw_handshake_num;
53232286e27SPandith N 	else
533425c8a53SSia Jee Heng 		val = UNUSED_CHANNEL;
534425c8a53SSia Jee Heng 
535425c8a53SSia Jee Heng 	reg_value = lo_hi_readq(chip->apb_regs + DMAC_APB_HW_HS_SEL_0);
536425c8a53SSia Jee Heng 
53732286e27SPandith N 	/* Channel is already allocated, set handshake as per channel ID */
53832286e27SPandith N 	/* 64 bit write should handle for 8 channels */
53932286e27SPandith N 
54032286e27SPandith N 	reg_value &= ~(DMA_APB_HS_SEL_MASK <<
54132286e27SPandith N 			(chan->id * DMA_APB_HS_SEL_BIT_SIZE));
54232286e27SPandith N 	reg_value |= (val << (chan->id * DMA_APB_HS_SEL_BIT_SIZE));
54332286e27SPandith N 	lo_hi_writeq(reg_value, chip->apb_regs + DMAC_APB_HW_HS_SEL_0);
54432286e27SPandith N 
54532286e27SPandith N 	return;
546425c8a53SSia Jee Heng }
547425c8a53SSia Jee Heng 
5481fe20f1bSEugeniy Paltsev /*
5491fe20f1bSEugeniy Paltsev  * If DW_axi_dmac sees CHx_CTL.ShadowReg_Or_LLI_Last bit of the fetched LLI
5501fe20f1bSEugeniy Paltsev  * as 1, it understands that the current block is the final block in the
5511fe20f1bSEugeniy Paltsev  * transfer and completes the DMA transfer operation at the end of current
5521fe20f1bSEugeniy Paltsev  * block transfer.
5531fe20f1bSEugeniy Paltsev  */
554ef6fb2d6SSia Jee Heng static void set_desc_last(struct axi_dma_hw_desc *desc)
5551fe20f1bSEugeniy Paltsev {
5561fe20f1bSEugeniy Paltsev 	u32 val;
5571fe20f1bSEugeniy Paltsev 
558ef6fb2d6SSia Jee Heng 	val = le32_to_cpu(desc->lli->ctl_hi);
5591fe20f1bSEugeniy Paltsev 	val |= CH_CTL_H_LLI_LAST;
560ef6fb2d6SSia Jee Heng 	desc->lli->ctl_hi = cpu_to_le32(val);
5611fe20f1bSEugeniy Paltsev }
5621fe20f1bSEugeniy Paltsev 
563ef6fb2d6SSia Jee Heng static void write_desc_sar(struct axi_dma_hw_desc *desc, dma_addr_t adr)
5641fe20f1bSEugeniy Paltsev {
565ef6fb2d6SSia Jee Heng 	desc->lli->sar = cpu_to_le64(adr);
5661fe20f1bSEugeniy Paltsev }
5671fe20f1bSEugeniy Paltsev 
568ef6fb2d6SSia Jee Heng static void write_desc_dar(struct axi_dma_hw_desc *desc, dma_addr_t adr)
5691fe20f1bSEugeniy Paltsev {
570ef6fb2d6SSia Jee Heng 	desc->lli->dar = cpu_to_le64(adr);
5711fe20f1bSEugeniy Paltsev }
5721fe20f1bSEugeniy Paltsev 
573ef6fb2d6SSia Jee Heng static void set_desc_src_master(struct axi_dma_hw_desc *desc)
5741fe20f1bSEugeniy Paltsev {
5751fe20f1bSEugeniy Paltsev 	u32 val;
5761fe20f1bSEugeniy Paltsev 
5771fe20f1bSEugeniy Paltsev 	/* Select AXI0 for source master */
578ef6fb2d6SSia Jee Heng 	val = le32_to_cpu(desc->lli->ctl_lo);
5791fe20f1bSEugeniy Paltsev 	val &= ~CH_CTL_L_SRC_MAST;
580ef6fb2d6SSia Jee Heng 	desc->lli->ctl_lo = cpu_to_le32(val);
5811fe20f1bSEugeniy Paltsev }
5821fe20f1bSEugeniy Paltsev 
583ef6fb2d6SSia Jee Heng static void set_desc_dest_master(struct axi_dma_hw_desc *hw_desc,
584ef6fb2d6SSia Jee Heng 				 struct axi_dma_desc *desc)
5851fe20f1bSEugeniy Paltsev {
5861fe20f1bSEugeniy Paltsev 	u32 val;
5871fe20f1bSEugeniy Paltsev 
5881fe20f1bSEugeniy Paltsev 	/* Select AXI1 for source master if available */
589ef6fb2d6SSia Jee Heng 	val = le32_to_cpu(hw_desc->lli->ctl_lo);
5901fe20f1bSEugeniy Paltsev 	if (desc->chan->chip->dw->hdata->nr_masters > 1)
5911fe20f1bSEugeniy Paltsev 		val |= CH_CTL_L_DST_MAST;
5921fe20f1bSEugeniy Paltsev 	else
5931fe20f1bSEugeniy Paltsev 		val &= ~CH_CTL_L_DST_MAST;
5941fe20f1bSEugeniy Paltsev 
595ef6fb2d6SSia Jee Heng 	hw_desc->lli->ctl_lo = cpu_to_le32(val);
5961fe20f1bSEugeniy Paltsev }
5971fe20f1bSEugeniy Paltsev 
598eec91760SSia Jee Heng static int dw_axi_dma_set_hw_desc(struct axi_dma_chan *chan,
599eec91760SSia Jee Heng 				  struct axi_dma_hw_desc *hw_desc,
600eec91760SSia Jee Heng 				  dma_addr_t mem_addr, size_t len)
601eec91760SSia Jee Heng {
602eec91760SSia Jee Heng 	unsigned int data_width = BIT(chan->chip->dw->hdata->m_data_width);
603eec91760SSia Jee Heng 	unsigned int reg_width;
604eec91760SSia Jee Heng 	unsigned int mem_width;
605eec91760SSia Jee Heng 	dma_addr_t device_addr;
606eec91760SSia Jee Heng 	size_t axi_block_ts;
607eec91760SSia Jee Heng 	size_t block_ts;
608eec91760SSia Jee Heng 	u32 ctllo, ctlhi;
609eec91760SSia Jee Heng 	u32 burst_len;
610eec91760SSia Jee Heng 
611eec91760SSia Jee Heng 	axi_block_ts = chan->chip->dw->hdata->block_size[chan->id];
612eec91760SSia Jee Heng 
613eec91760SSia Jee Heng 	mem_width = __ffs(data_width | mem_addr | len);
614eec91760SSia Jee Heng 	if (mem_width > DWAXIDMAC_TRANS_WIDTH_32)
615eec91760SSia Jee Heng 		mem_width = DWAXIDMAC_TRANS_WIDTH_32;
616eec91760SSia Jee Heng 
617f80f7c96SSia Jee Heng 	if (!IS_ALIGNED(mem_addr, 4)) {
618f80f7c96SSia Jee Heng 		dev_err(chan->chip->dev, "invalid buffer alignment\n");
619f80f7c96SSia Jee Heng 		return -EINVAL;
620f80f7c96SSia Jee Heng 	}
621f80f7c96SSia Jee Heng 
622eec91760SSia Jee Heng 	switch (chan->direction) {
623eec91760SSia Jee Heng 	case DMA_MEM_TO_DEV:
624eec91760SSia Jee Heng 		reg_width = __ffs(chan->config.dst_addr_width);
625eec91760SSia Jee Heng 		device_addr = chan->config.dst_addr;
626eec91760SSia Jee Heng 		ctllo = reg_width << CH_CTL_L_DST_WIDTH_POS |
627eec91760SSia Jee Heng 			mem_width << CH_CTL_L_SRC_WIDTH_POS |
628eec91760SSia Jee Heng 			DWAXIDMAC_CH_CTL_L_NOINC << CH_CTL_L_DST_INC_POS |
629eec91760SSia Jee Heng 			DWAXIDMAC_CH_CTL_L_INC << CH_CTL_L_SRC_INC_POS;
630eec91760SSia Jee Heng 		block_ts = len >> mem_width;
631eec91760SSia Jee Heng 		break;
632eec91760SSia Jee Heng 	case DMA_DEV_TO_MEM:
633eec91760SSia Jee Heng 		reg_width = __ffs(chan->config.src_addr_width);
634eec91760SSia Jee Heng 		device_addr = chan->config.src_addr;
635eec91760SSia Jee Heng 		ctllo = reg_width << CH_CTL_L_SRC_WIDTH_POS |
636eec91760SSia Jee Heng 			mem_width << CH_CTL_L_DST_WIDTH_POS |
637eec91760SSia Jee Heng 			DWAXIDMAC_CH_CTL_L_INC << CH_CTL_L_DST_INC_POS |
638eec91760SSia Jee Heng 			DWAXIDMAC_CH_CTL_L_NOINC << CH_CTL_L_SRC_INC_POS;
639eec91760SSia Jee Heng 		block_ts = len >> reg_width;
640eec91760SSia Jee Heng 		break;
641eec91760SSia Jee Heng 	default:
642eec91760SSia Jee Heng 		return -EINVAL;
643eec91760SSia Jee Heng 	}
644eec91760SSia Jee Heng 
645eec91760SSia Jee Heng 	if (block_ts > axi_block_ts)
646eec91760SSia Jee Heng 		return -EINVAL;
647eec91760SSia Jee Heng 
648eec91760SSia Jee Heng 	hw_desc->lli = axi_desc_get(chan, &hw_desc->llp);
649eec91760SSia Jee Heng 	if (unlikely(!hw_desc->lli))
650eec91760SSia Jee Heng 		return -ENOMEM;
651eec91760SSia Jee Heng 
652eec91760SSia Jee Heng 	ctlhi = CH_CTL_H_LLI_VALID;
653eec91760SSia Jee Heng 
654eec91760SSia Jee Heng 	if (chan->chip->dw->hdata->restrict_axi_burst_len) {
655eec91760SSia Jee Heng 		burst_len = chan->chip->dw->hdata->axi_rw_burst_len;
656eec91760SSia Jee Heng 		ctlhi |= CH_CTL_H_ARLEN_EN | CH_CTL_H_AWLEN_EN |
657eec91760SSia Jee Heng 			 burst_len << CH_CTL_H_ARLEN_POS |
658eec91760SSia Jee Heng 			 burst_len << CH_CTL_H_AWLEN_POS;
659eec91760SSia Jee Heng 	}
660eec91760SSia Jee Heng 
661eec91760SSia Jee Heng 	hw_desc->lli->ctl_hi = cpu_to_le32(ctlhi);
662eec91760SSia Jee Heng 
663eec91760SSia Jee Heng 	if (chan->direction == DMA_MEM_TO_DEV) {
664eec91760SSia Jee Heng 		write_desc_sar(hw_desc, mem_addr);
665eec91760SSia Jee Heng 		write_desc_dar(hw_desc, device_addr);
666eec91760SSia Jee Heng 	} else {
667eec91760SSia Jee Heng 		write_desc_sar(hw_desc, device_addr);
668eec91760SSia Jee Heng 		write_desc_dar(hw_desc, mem_addr);
669eec91760SSia Jee Heng 	}
670eec91760SSia Jee Heng 
671eec91760SSia Jee Heng 	hw_desc->lli->block_ts_lo = cpu_to_le32(block_ts - 1);
672eec91760SSia Jee Heng 
673eec91760SSia Jee Heng 	ctllo |= DWAXIDMAC_BURST_TRANS_LEN_4 << CH_CTL_L_DST_MSIZE_POS |
674eec91760SSia Jee Heng 		 DWAXIDMAC_BURST_TRANS_LEN_4 << CH_CTL_L_SRC_MSIZE_POS;
675eec91760SSia Jee Heng 	hw_desc->lli->ctl_lo = cpu_to_le32(ctllo);
676eec91760SSia Jee Heng 
677eec91760SSia Jee Heng 	set_desc_src_master(hw_desc);
678eec91760SSia Jee Heng 
6798e55444dSSia Jee Heng 	hw_desc->len = len;
680eec91760SSia Jee Heng 	return 0;
681eec91760SSia Jee Heng }
682eec91760SSia Jee Heng 
683f80f7c96SSia Jee Heng static size_t calculate_block_len(struct axi_dma_chan *chan,
684f80f7c96SSia Jee Heng 				  dma_addr_t dma_addr, size_t buf_len,
685f80f7c96SSia Jee Heng 				  enum dma_transfer_direction direction)
686f80f7c96SSia Jee Heng {
687f80f7c96SSia Jee Heng 	u32 data_width, reg_width, mem_width;
688f80f7c96SSia Jee Heng 	size_t axi_block_ts, block_len;
689f80f7c96SSia Jee Heng 
690f80f7c96SSia Jee Heng 	axi_block_ts = chan->chip->dw->hdata->block_size[chan->id];
691f80f7c96SSia Jee Heng 
692f80f7c96SSia Jee Heng 	switch (direction) {
693f80f7c96SSia Jee Heng 	case DMA_MEM_TO_DEV:
694f80f7c96SSia Jee Heng 		data_width = BIT(chan->chip->dw->hdata->m_data_width);
695f80f7c96SSia Jee Heng 		mem_width = __ffs(data_width | dma_addr | buf_len);
696f80f7c96SSia Jee Heng 		if (mem_width > DWAXIDMAC_TRANS_WIDTH_32)
697f80f7c96SSia Jee Heng 			mem_width = DWAXIDMAC_TRANS_WIDTH_32;
698f80f7c96SSia Jee Heng 
699f80f7c96SSia Jee Heng 		block_len = axi_block_ts << mem_width;
700f80f7c96SSia Jee Heng 		break;
701f80f7c96SSia Jee Heng 	case DMA_DEV_TO_MEM:
702f80f7c96SSia Jee Heng 		reg_width = __ffs(chan->config.src_addr_width);
703f80f7c96SSia Jee Heng 		block_len = axi_block_ts << reg_width;
704f80f7c96SSia Jee Heng 		break;
705f80f7c96SSia Jee Heng 	default:
706f80f7c96SSia Jee Heng 		block_len = 0;
707f80f7c96SSia Jee Heng 	}
708f80f7c96SSia Jee Heng 
709f80f7c96SSia Jee Heng 	return block_len;
710f80f7c96SSia Jee Heng }
711f80f7c96SSia Jee Heng 
712eec91760SSia Jee Heng static struct dma_async_tx_descriptor *
7131deb96c0SSia Jee Heng dw_axi_dma_chan_prep_cyclic(struct dma_chan *dchan, dma_addr_t dma_addr,
7141deb96c0SSia Jee Heng 			    size_t buf_len, size_t period_len,
7151deb96c0SSia Jee Heng 			    enum dma_transfer_direction direction,
7161deb96c0SSia Jee Heng 			    unsigned long flags)
7171deb96c0SSia Jee Heng {
7181deb96c0SSia Jee Heng 	struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
7191deb96c0SSia Jee Heng 	struct axi_dma_hw_desc *hw_desc = NULL;
7201deb96c0SSia Jee Heng 	struct axi_dma_desc *desc = NULL;
7211deb96c0SSia Jee Heng 	dma_addr_t src_addr = dma_addr;
722f80f7c96SSia Jee Heng 	u32 num_periods, num_segments;
723f80f7c96SSia Jee Heng 	size_t axi_block_len;
724f80f7c96SSia Jee Heng 	u32 total_segments;
725f80f7c96SSia Jee Heng 	u32 segment_len;
7261deb96c0SSia Jee Heng 	unsigned int i;
7271deb96c0SSia Jee Heng 	int status;
7281deb96c0SSia Jee Heng 	u64 llp = 0;
7291deb96c0SSia Jee Heng 	u8 lms = 0; /* Select AXI0 master for LLI fetching */
7301deb96c0SSia Jee Heng 
731f80f7c96SSia Jee Heng 	num_periods = buf_len / period_len;
732f80f7c96SSia Jee Heng 
733f80f7c96SSia Jee Heng 	axi_block_len = calculate_block_len(chan, dma_addr, buf_len, direction);
734f80f7c96SSia Jee Heng 	if (axi_block_len == 0)
735f80f7c96SSia Jee Heng 		return NULL;
736f80f7c96SSia Jee Heng 
737f80f7c96SSia Jee Heng 	num_segments = DIV_ROUND_UP(period_len, axi_block_len);
738f80f7c96SSia Jee Heng 	segment_len = DIV_ROUND_UP(period_len, num_segments);
739f80f7c96SSia Jee Heng 
740f80f7c96SSia Jee Heng 	total_segments = num_periods * num_segments;
741f80f7c96SSia Jee Heng 
742f80f7c96SSia Jee Heng 	desc = axi_desc_alloc(total_segments);
7431deb96c0SSia Jee Heng 	if (unlikely(!desc))
7441deb96c0SSia Jee Heng 		goto err_desc_get;
7451deb96c0SSia Jee Heng 
7461deb96c0SSia Jee Heng 	chan->direction = direction;
7471deb96c0SSia Jee Heng 	desc->chan = chan;
7481deb96c0SSia Jee Heng 	chan->cyclic = true;
7498e55444dSSia Jee Heng 	desc->length = 0;
750f80f7c96SSia Jee Heng 	desc->period_len = period_len;
7511deb96c0SSia Jee Heng 
752f80f7c96SSia Jee Heng 	for (i = 0; i < total_segments; i++) {
7531deb96c0SSia Jee Heng 		hw_desc = &desc->hw_desc[i];
7541deb96c0SSia Jee Heng 
7551deb96c0SSia Jee Heng 		status = dw_axi_dma_set_hw_desc(chan, hw_desc, src_addr,
756f80f7c96SSia Jee Heng 						segment_len);
7571deb96c0SSia Jee Heng 		if (status < 0)
7581deb96c0SSia Jee Heng 			goto err_desc_get;
7591deb96c0SSia Jee Heng 
7608e55444dSSia Jee Heng 		desc->length += hw_desc->len;
7611deb96c0SSia Jee Heng 		/* Set end-of-link to the linked descriptor, so that cyclic
7621deb96c0SSia Jee Heng 		 * callback function can be triggered during interrupt.
7631deb96c0SSia Jee Heng 		 */
7641deb96c0SSia Jee Heng 		set_desc_last(hw_desc);
7651deb96c0SSia Jee Heng 
766f80f7c96SSia Jee Heng 		src_addr += segment_len;
7671deb96c0SSia Jee Heng 	}
7681deb96c0SSia Jee Heng 
7691deb96c0SSia Jee Heng 	llp = desc->hw_desc[0].llp;
7701deb96c0SSia Jee Heng 
7711deb96c0SSia Jee Heng 	/* Managed transfer list */
7721deb96c0SSia Jee Heng 	do {
773f80f7c96SSia Jee Heng 		hw_desc = &desc->hw_desc[--total_segments];
7741deb96c0SSia Jee Heng 		write_desc_llp(hw_desc, llp | lms);
7751deb96c0SSia Jee Heng 		llp = hw_desc->llp;
776f80f7c96SSia Jee Heng 	} while (total_segments);
7771deb96c0SSia Jee Heng 
77832286e27SPandith N 	dw_axi_dma_set_hw_channel(chan, true);
779425c8a53SSia Jee Heng 
7801deb96c0SSia Jee Heng 	return vchan_tx_prep(&chan->vc, &desc->vd, flags);
7811deb96c0SSia Jee Heng 
7821deb96c0SSia Jee Heng err_desc_get:
7831deb96c0SSia Jee Heng 	if (desc)
7841deb96c0SSia Jee Heng 		axi_desc_put(desc);
7851deb96c0SSia Jee Heng 
7861deb96c0SSia Jee Heng 	return NULL;
7871deb96c0SSia Jee Heng }
7881deb96c0SSia Jee Heng 
7891deb96c0SSia Jee Heng static struct dma_async_tx_descriptor *
790eec91760SSia Jee Heng dw_axi_dma_chan_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
791eec91760SSia Jee Heng 			      unsigned int sg_len,
792eec91760SSia Jee Heng 			      enum dma_transfer_direction direction,
793eec91760SSia Jee Heng 			      unsigned long flags, void *context)
794eec91760SSia Jee Heng {
795eec91760SSia Jee Heng 	struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
796eec91760SSia Jee Heng 	struct axi_dma_hw_desc *hw_desc = NULL;
797eec91760SSia Jee Heng 	struct axi_dma_desc *desc = NULL;
798f80f7c96SSia Jee Heng 	u32 num_segments, segment_len;
799f80f7c96SSia Jee Heng 	unsigned int loop = 0;
800eec91760SSia Jee Heng 	struct scatterlist *sg;
801f80f7c96SSia Jee Heng 	size_t axi_block_len;
802f80f7c96SSia Jee Heng 	u32 len, num_sgs = 0;
803eec91760SSia Jee Heng 	unsigned int i;
804f80f7c96SSia Jee Heng 	dma_addr_t mem;
805eec91760SSia Jee Heng 	int status;
806eec91760SSia Jee Heng 	u64 llp = 0;
807eec91760SSia Jee Heng 	u8 lms = 0; /* Select AXI0 master for LLI fetching */
808eec91760SSia Jee Heng 
809eec91760SSia Jee Heng 	if (unlikely(!is_slave_direction(direction) || !sg_len))
810eec91760SSia Jee Heng 		return NULL;
811eec91760SSia Jee Heng 
812f80f7c96SSia Jee Heng 	mem = sg_dma_address(sgl);
813f80f7c96SSia Jee Heng 	len = sg_dma_len(sgl);
814eec91760SSia Jee Heng 
815f80f7c96SSia Jee Heng 	axi_block_len = calculate_block_len(chan, mem, len, direction);
816f80f7c96SSia Jee Heng 	if (axi_block_len == 0)
817f80f7c96SSia Jee Heng 		return NULL;
818f80f7c96SSia Jee Heng 
819f80f7c96SSia Jee Heng 	for_each_sg(sgl, sg, sg_len, i)
820f80f7c96SSia Jee Heng 		num_sgs += DIV_ROUND_UP(sg_dma_len(sg), axi_block_len);
821f80f7c96SSia Jee Heng 
822f80f7c96SSia Jee Heng 	desc = axi_desc_alloc(num_sgs);
823eec91760SSia Jee Heng 	if (unlikely(!desc))
824eec91760SSia Jee Heng 		goto err_desc_get;
825eec91760SSia Jee Heng 
826eec91760SSia Jee Heng 	desc->chan = chan;
8278e55444dSSia Jee Heng 	desc->length = 0;
828f80f7c96SSia Jee Heng 	chan->direction = direction;
829eec91760SSia Jee Heng 
830eec91760SSia Jee Heng 	for_each_sg(sgl, sg, sg_len, i) {
831eec91760SSia Jee Heng 		mem = sg_dma_address(sg);
832eec91760SSia Jee Heng 		len = sg_dma_len(sg);
833f80f7c96SSia Jee Heng 		num_segments = DIV_ROUND_UP(sg_dma_len(sg), axi_block_len);
834f80f7c96SSia Jee Heng 		segment_len = DIV_ROUND_UP(sg_dma_len(sg), num_segments);
835eec91760SSia Jee Heng 
836f80f7c96SSia Jee Heng 		do {
837f80f7c96SSia Jee Heng 			hw_desc = &desc->hw_desc[loop++];
838f80f7c96SSia Jee Heng 			status = dw_axi_dma_set_hw_desc(chan, hw_desc, mem, segment_len);
839eec91760SSia Jee Heng 			if (status < 0)
840eec91760SSia Jee Heng 				goto err_desc_get;
841f80f7c96SSia Jee Heng 
8428e55444dSSia Jee Heng 			desc->length += hw_desc->len;
843f80f7c96SSia Jee Heng 			len -= segment_len;
844f80f7c96SSia Jee Heng 			mem += segment_len;
845f80f7c96SSia Jee Heng 		} while (len >= segment_len);
846eec91760SSia Jee Heng 	}
847eec91760SSia Jee Heng 
848eec91760SSia Jee Heng 	/* Set end-of-link to the last link descriptor of list */
849f80f7c96SSia Jee Heng 	set_desc_last(&desc->hw_desc[num_sgs - 1]);
850eec91760SSia Jee Heng 
851eec91760SSia Jee Heng 	/* Managed transfer list */
852eec91760SSia Jee Heng 	do {
853f80f7c96SSia Jee Heng 		hw_desc = &desc->hw_desc[--num_sgs];
854eec91760SSia Jee Heng 		write_desc_llp(hw_desc, llp | lms);
855eec91760SSia Jee Heng 		llp = hw_desc->llp;
856f80f7c96SSia Jee Heng 	} while (num_sgs);
857eec91760SSia Jee Heng 
85832286e27SPandith N 	dw_axi_dma_set_hw_channel(chan, true);
859425c8a53SSia Jee Heng 
860eec91760SSia Jee Heng 	return vchan_tx_prep(&chan->vc, &desc->vd, flags);
861eec91760SSia Jee Heng 
862eec91760SSia Jee Heng err_desc_get:
863eec91760SSia Jee Heng 	if (desc)
864eec91760SSia Jee Heng 		axi_desc_put(desc);
865eec91760SSia Jee Heng 
866eec91760SSia Jee Heng 	return NULL;
867eec91760SSia Jee Heng }
868eec91760SSia Jee Heng 
8691fe20f1bSEugeniy Paltsev static struct dma_async_tx_descriptor *
8701fe20f1bSEugeniy Paltsev dma_chan_prep_dma_memcpy(struct dma_chan *dchan, dma_addr_t dst_adr,
8711fe20f1bSEugeniy Paltsev 			 dma_addr_t src_adr, size_t len, unsigned long flags)
8721fe20f1bSEugeniy Paltsev {
8731fe20f1bSEugeniy Paltsev 	struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
8741fe20f1bSEugeniy Paltsev 	size_t block_ts, max_block_ts, xfer_len;
875ef6fb2d6SSia Jee Heng 	struct axi_dma_hw_desc *hw_desc = NULL;
876ef6fb2d6SSia Jee Heng 	struct axi_dma_desc *desc = NULL;
877ef6fb2d6SSia Jee Heng 	u32 xfer_width, reg, num;
878ef6fb2d6SSia Jee Heng 	u64 llp = 0;
8791fe20f1bSEugeniy Paltsev 	u8 lms = 0; /* Select AXI0 master for LLI fetching */
8801fe20f1bSEugeniy Paltsev 
8811fe20f1bSEugeniy Paltsev 	dev_dbg(chan2dev(chan), "%s: memcpy: src: %pad dst: %pad length: %zd flags: %#lx",
8821fe20f1bSEugeniy Paltsev 		axi_chan_name(chan), &src_adr, &dst_adr, len, flags);
8831fe20f1bSEugeniy Paltsev 
8841fe20f1bSEugeniy Paltsev 	max_block_ts = chan->chip->dw->hdata->block_size[chan->id];
885ef6fb2d6SSia Jee Heng 	xfer_width = axi_chan_get_xfer_width(chan, src_adr, dst_adr, len);
886ef6fb2d6SSia Jee Heng 	num = DIV_ROUND_UP(len, max_block_ts << xfer_width);
887ef6fb2d6SSia Jee Heng 	desc = axi_desc_alloc(num);
888ef6fb2d6SSia Jee Heng 	if (unlikely(!desc))
889ef6fb2d6SSia Jee Heng 		goto err_desc_get;
8901fe20f1bSEugeniy Paltsev 
891ef6fb2d6SSia Jee Heng 	desc->chan = chan;
892ef6fb2d6SSia Jee Heng 	num = 0;
8938e55444dSSia Jee Heng 	desc->length = 0;
8941fe20f1bSEugeniy Paltsev 	while (len) {
8951fe20f1bSEugeniy Paltsev 		xfer_len = len;
8961fe20f1bSEugeniy Paltsev 
897ef6fb2d6SSia Jee Heng 		hw_desc = &desc->hw_desc[num];
8981fe20f1bSEugeniy Paltsev 		/*
8991fe20f1bSEugeniy Paltsev 		 * Take care for the alignment.
9001fe20f1bSEugeniy Paltsev 		 * Actually source and destination widths can be different, but
9011fe20f1bSEugeniy Paltsev 		 * make them same to be simpler.
9021fe20f1bSEugeniy Paltsev 		 */
9031fe20f1bSEugeniy Paltsev 		xfer_width = axi_chan_get_xfer_width(chan, src_adr, dst_adr, xfer_len);
9041fe20f1bSEugeniy Paltsev 
9051fe20f1bSEugeniy Paltsev 		/*
9061fe20f1bSEugeniy Paltsev 		 * block_ts indicates the total number of data of width
9071fe20f1bSEugeniy Paltsev 		 * to be transferred in a DMA block transfer.
9081fe20f1bSEugeniy Paltsev 		 * BLOCK_TS register should be set to block_ts - 1
9091fe20f1bSEugeniy Paltsev 		 */
9101fe20f1bSEugeniy Paltsev 		block_ts = xfer_len >> xfer_width;
9111fe20f1bSEugeniy Paltsev 		if (block_ts > max_block_ts) {
9121fe20f1bSEugeniy Paltsev 			block_ts = max_block_ts;
9131fe20f1bSEugeniy Paltsev 			xfer_len = max_block_ts << xfer_width;
9141fe20f1bSEugeniy Paltsev 		}
9151fe20f1bSEugeniy Paltsev 
916ef6fb2d6SSia Jee Heng 		hw_desc->lli = axi_desc_get(chan, &hw_desc->llp);
917ef6fb2d6SSia Jee Heng 		if (unlikely(!hw_desc->lli))
9181fe20f1bSEugeniy Paltsev 			goto err_desc_get;
9191fe20f1bSEugeniy Paltsev 
920ef6fb2d6SSia Jee Heng 		write_desc_sar(hw_desc, src_adr);
921ef6fb2d6SSia Jee Heng 		write_desc_dar(hw_desc, dst_adr);
922ef6fb2d6SSia Jee Heng 		hw_desc->lli->block_ts_lo = cpu_to_le32(block_ts - 1);
9231fe20f1bSEugeniy Paltsev 
9241fe20f1bSEugeniy Paltsev 		reg = CH_CTL_H_LLI_VALID;
9251fe20f1bSEugeniy Paltsev 		if (chan->chip->dw->hdata->restrict_axi_burst_len) {
9261fe20f1bSEugeniy Paltsev 			u32 burst_len = chan->chip->dw->hdata->axi_rw_burst_len;
9271fe20f1bSEugeniy Paltsev 
9281fe20f1bSEugeniy Paltsev 			reg |= (CH_CTL_H_ARLEN_EN |
9291fe20f1bSEugeniy Paltsev 				burst_len << CH_CTL_H_ARLEN_POS |
9301fe20f1bSEugeniy Paltsev 				CH_CTL_H_AWLEN_EN |
9311fe20f1bSEugeniy Paltsev 				burst_len << CH_CTL_H_AWLEN_POS);
9321fe20f1bSEugeniy Paltsev 		}
933ef6fb2d6SSia Jee Heng 		hw_desc->lli->ctl_hi = cpu_to_le32(reg);
9341fe20f1bSEugeniy Paltsev 
9351fe20f1bSEugeniy Paltsev 		reg = (DWAXIDMAC_BURST_TRANS_LEN_4 << CH_CTL_L_DST_MSIZE_POS |
9361fe20f1bSEugeniy Paltsev 		       DWAXIDMAC_BURST_TRANS_LEN_4 << CH_CTL_L_SRC_MSIZE_POS |
9371fe20f1bSEugeniy Paltsev 		       xfer_width << CH_CTL_L_DST_WIDTH_POS |
9381fe20f1bSEugeniy Paltsev 		       xfer_width << CH_CTL_L_SRC_WIDTH_POS |
9391fe20f1bSEugeniy Paltsev 		       DWAXIDMAC_CH_CTL_L_INC << CH_CTL_L_DST_INC_POS |
9401fe20f1bSEugeniy Paltsev 		       DWAXIDMAC_CH_CTL_L_INC << CH_CTL_L_SRC_INC_POS);
941ef6fb2d6SSia Jee Heng 		hw_desc->lli->ctl_lo = cpu_to_le32(reg);
9421fe20f1bSEugeniy Paltsev 
943ef6fb2d6SSia Jee Heng 		set_desc_src_master(hw_desc);
944ef6fb2d6SSia Jee Heng 		set_desc_dest_master(hw_desc, desc);
9451fe20f1bSEugeniy Paltsev 
9468e55444dSSia Jee Heng 		hw_desc->len = xfer_len;
9478e55444dSSia Jee Heng 		desc->length += hw_desc->len;
9481fe20f1bSEugeniy Paltsev 		/* update the length and addresses for the next loop cycle */
9491fe20f1bSEugeniy Paltsev 		len -= xfer_len;
9501fe20f1bSEugeniy Paltsev 		dst_adr += xfer_len;
9511fe20f1bSEugeniy Paltsev 		src_adr += xfer_len;
952ef6fb2d6SSia Jee Heng 		num++;
9531fe20f1bSEugeniy Paltsev 	}
9541fe20f1bSEugeniy Paltsev 
9551fe20f1bSEugeniy Paltsev 	/* Set end-of-link to the last link descriptor of list */
956ef6fb2d6SSia Jee Heng 	set_desc_last(&desc->hw_desc[num - 1]);
957ef6fb2d6SSia Jee Heng 	/* Managed transfer list */
958ef6fb2d6SSia Jee Heng 	do {
959ef6fb2d6SSia Jee Heng 		hw_desc = &desc->hw_desc[--num];
960ef6fb2d6SSia Jee Heng 		write_desc_llp(hw_desc, llp | lms);
961ef6fb2d6SSia Jee Heng 		llp = hw_desc->llp;
962ef6fb2d6SSia Jee Heng 	} while (num);
9631fe20f1bSEugeniy Paltsev 
964ef6fb2d6SSia Jee Heng 	return vchan_tx_prep(&chan->vc, &desc->vd, flags);
9651fe20f1bSEugeniy Paltsev 
9661fe20f1bSEugeniy Paltsev err_desc_get:
967ef6fb2d6SSia Jee Heng 	if (desc)
968ef6fb2d6SSia Jee Heng 		axi_desc_put(desc);
9691fe20f1bSEugeniy Paltsev 	return NULL;
9701fe20f1bSEugeniy Paltsev }
9711fe20f1bSEugeniy Paltsev 
97266c6c945SSia Jee Heng static int dw_axi_dma_chan_slave_config(struct dma_chan *dchan,
97366c6c945SSia Jee Heng 					struct dma_slave_config *config)
97466c6c945SSia Jee Heng {
97566c6c945SSia Jee Heng 	struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
97666c6c945SSia Jee Heng 
97766c6c945SSia Jee Heng 	memcpy(&chan->config, config, sizeof(*config));
97866c6c945SSia Jee Heng 
97966c6c945SSia Jee Heng 	return 0;
98066c6c945SSia Jee Heng }
98166c6c945SSia Jee Heng 
9821fe20f1bSEugeniy Paltsev static void axi_chan_dump_lli(struct axi_dma_chan *chan,
983ef6fb2d6SSia Jee Heng 			      struct axi_dma_hw_desc *desc)
9841fe20f1bSEugeniy Paltsev {
98586cb0defSBen Dooks 	if (!desc->lli) {
98686cb0defSBen Dooks 		dev_err(dchan2dev(&chan->vc.chan), "NULL LLI\n");
98786cb0defSBen Dooks 		return;
98886cb0defSBen Dooks 	}
98986cb0defSBen Dooks 
9901fe20f1bSEugeniy Paltsev 	dev_err(dchan2dev(&chan->vc.chan),
9911fe20f1bSEugeniy Paltsev 		"SAR: 0x%llx DAR: 0x%llx LLP: 0x%llx BTS 0x%x CTL: 0x%x:%08x",
992ef6fb2d6SSia Jee Heng 		le64_to_cpu(desc->lli->sar),
993ef6fb2d6SSia Jee Heng 		le64_to_cpu(desc->lli->dar),
994ef6fb2d6SSia Jee Heng 		le64_to_cpu(desc->lli->llp),
995ef6fb2d6SSia Jee Heng 		le32_to_cpu(desc->lli->block_ts_lo),
996ef6fb2d6SSia Jee Heng 		le32_to_cpu(desc->lli->ctl_hi),
997ef6fb2d6SSia Jee Heng 		le32_to_cpu(desc->lli->ctl_lo));
9981fe20f1bSEugeniy Paltsev }
9991fe20f1bSEugeniy Paltsev 
10001fe20f1bSEugeniy Paltsev static void axi_chan_list_dump_lli(struct axi_dma_chan *chan,
10011fe20f1bSEugeniy Paltsev 				   struct axi_dma_desc *desc_head)
10021fe20f1bSEugeniy Paltsev {
1003ef6fb2d6SSia Jee Heng 	int count = atomic_read(&chan->descs_allocated);
1004ef6fb2d6SSia Jee Heng 	int i;
10051fe20f1bSEugeniy Paltsev 
1006ef6fb2d6SSia Jee Heng 	for (i = 0; i < count; i++)
1007ef6fb2d6SSia Jee Heng 		axi_chan_dump_lli(chan, &desc_head->hw_desc[i]);
10081fe20f1bSEugeniy Paltsev }
10091fe20f1bSEugeniy Paltsev 
10101fe20f1bSEugeniy Paltsev static noinline void axi_chan_handle_err(struct axi_dma_chan *chan, u32 status)
10111fe20f1bSEugeniy Paltsev {
10121fe20f1bSEugeniy Paltsev 	struct virt_dma_desc *vd;
10131fe20f1bSEugeniy Paltsev 	unsigned long flags;
10141fe20f1bSEugeniy Paltsev 
10151fe20f1bSEugeniy Paltsev 	spin_lock_irqsave(&chan->vc.lock, flags);
10161fe20f1bSEugeniy Paltsev 
10171fe20f1bSEugeniy Paltsev 	axi_chan_disable(chan);
10181fe20f1bSEugeniy Paltsev 
10191fe20f1bSEugeniy Paltsev 	/* The bad descriptor currently is in the head of vc list */
10201fe20f1bSEugeniy Paltsev 	vd = vchan_next_desc(&chan->vc);
10211fe20f1bSEugeniy Paltsev 	/* Remove the completed descriptor from issued list */
10221fe20f1bSEugeniy Paltsev 	list_del(&vd->node);
10231fe20f1bSEugeniy Paltsev 
10241fe20f1bSEugeniy Paltsev 	/* WARN about bad descriptor */
10251fe20f1bSEugeniy Paltsev 	dev_err(chan2dev(chan),
10261fe20f1bSEugeniy Paltsev 		"Bad descriptor submitted for %s, cookie: %d, irq: 0x%08x\n",
10271fe20f1bSEugeniy Paltsev 		axi_chan_name(chan), vd->tx.cookie, status);
10281fe20f1bSEugeniy Paltsev 	axi_chan_list_dump_lli(chan, vd_to_axi_desc(vd));
10291fe20f1bSEugeniy Paltsev 
10301fe20f1bSEugeniy Paltsev 	vchan_cookie_complete(vd);
10311fe20f1bSEugeniy Paltsev 
10321fe20f1bSEugeniy Paltsev 	/* Try to restart the controller */
10331fe20f1bSEugeniy Paltsev 	axi_chan_start_first_queued(chan);
10341fe20f1bSEugeniy Paltsev 
10351fe20f1bSEugeniy Paltsev 	spin_unlock_irqrestore(&chan->vc.lock, flags);
10361fe20f1bSEugeniy Paltsev }
10371fe20f1bSEugeniy Paltsev 
10381fe20f1bSEugeniy Paltsev static void axi_chan_block_xfer_complete(struct axi_dma_chan *chan)
10391fe20f1bSEugeniy Paltsev {
10401deb96c0SSia Jee Heng 	int count = atomic_read(&chan->descs_allocated);
10411deb96c0SSia Jee Heng 	struct axi_dma_hw_desc *hw_desc;
10421deb96c0SSia Jee Heng 	struct axi_dma_desc *desc;
10431fe20f1bSEugeniy Paltsev 	struct virt_dma_desc *vd;
10441fe20f1bSEugeniy Paltsev 	unsigned long flags;
10451deb96c0SSia Jee Heng 	u64 llp;
10461deb96c0SSia Jee Heng 	int i;
10471fe20f1bSEugeniy Paltsev 
10481fe20f1bSEugeniy Paltsev 	spin_lock_irqsave(&chan->vc.lock, flags);
10491fe20f1bSEugeniy Paltsev 	if (unlikely(axi_chan_is_hw_enable(chan))) {
10506a28ba26SColin Ian King 		dev_err(chan2dev(chan), "BUG: %s caught DWAXIDMAC_IRQ_DMA_TRF, but channel not idle!\n",
10511fe20f1bSEugeniy Paltsev 			axi_chan_name(chan));
10521fe20f1bSEugeniy Paltsev 		axi_chan_disable(chan);
10531fe20f1bSEugeniy Paltsev 	}
10541fe20f1bSEugeniy Paltsev 
10551fe20f1bSEugeniy Paltsev 	/* The completed descriptor currently is in the head of vc list */
10561fe20f1bSEugeniy Paltsev 	vd = vchan_next_desc(&chan->vc);
1057820f5ce9SBen Dooks 	if (!vd) {
1058820f5ce9SBen Dooks 		dev_err(chan2dev(chan), "BUG: %s, IRQ with no descriptors\n",
1059820f5ce9SBen Dooks 			axi_chan_name(chan));
1060820f5ce9SBen Dooks 		goto out;
1061820f5ce9SBen Dooks 	}
10621deb96c0SSia Jee Heng 
10631deb96c0SSia Jee Heng 	if (chan->cyclic) {
10641deb96c0SSia Jee Heng 		desc = vd_to_axi_desc(vd);
10651deb96c0SSia Jee Heng 		if (desc) {
10661deb96c0SSia Jee Heng 			llp = lo_hi_readq(chan->chan_regs + CH_LLP);
10671deb96c0SSia Jee Heng 			for (i = 0; i < count; i++) {
10681deb96c0SSia Jee Heng 				hw_desc = &desc->hw_desc[i];
10691deb96c0SSia Jee Heng 				if (hw_desc->llp == llp) {
10701deb96c0SSia Jee Heng 					axi_chan_irq_clear(chan, hw_desc->lli->status_lo);
10711deb96c0SSia Jee Heng 					hw_desc->lli->ctl_hi |= CH_CTL_H_LLI_VALID;
10721deb96c0SSia Jee Heng 					desc->completed_blocks = i;
1073f80f7c96SSia Jee Heng 
1074f80f7c96SSia Jee Heng 					if (((hw_desc->len * (i + 1)) % desc->period_len) == 0)
1075f80f7c96SSia Jee Heng 						vchan_cyclic_callback(vd);
10761deb96c0SSia Jee Heng 					break;
10771deb96c0SSia Jee Heng 				}
10781deb96c0SSia Jee Heng 			}
10791deb96c0SSia Jee Heng 
10801deb96c0SSia Jee Heng 			axi_chan_enable(chan);
10811deb96c0SSia Jee Heng 		}
10821deb96c0SSia Jee Heng 	} else {
10831fe20f1bSEugeniy Paltsev 		/* Remove the completed descriptor from issued list before completing */
10841fe20f1bSEugeniy Paltsev 		list_del(&vd->node);
10851fe20f1bSEugeniy Paltsev 		vchan_cookie_complete(vd);
10861fe20f1bSEugeniy Paltsev 
10871fe20f1bSEugeniy Paltsev 		/* Submit queued descriptors after processing the completed ones */
10881fe20f1bSEugeniy Paltsev 		axi_chan_start_first_queued(chan);
10891deb96c0SSia Jee Heng 	}
10901fe20f1bSEugeniy Paltsev 
1091820f5ce9SBen Dooks out:
10921fe20f1bSEugeniy Paltsev 	spin_unlock_irqrestore(&chan->vc.lock, flags);
10931fe20f1bSEugeniy Paltsev }
10941fe20f1bSEugeniy Paltsev 
10951fe20f1bSEugeniy Paltsev static irqreturn_t dw_axi_dma_interrupt(int irq, void *dev_id)
10961fe20f1bSEugeniy Paltsev {
10971fe20f1bSEugeniy Paltsev 	struct axi_dma_chip *chip = dev_id;
10981fe20f1bSEugeniy Paltsev 	struct dw_axi_dma *dw = chip->dw;
10991fe20f1bSEugeniy Paltsev 	struct axi_dma_chan *chan;
11001fe20f1bSEugeniy Paltsev 
11011fe20f1bSEugeniy Paltsev 	u32 status, i;
11021fe20f1bSEugeniy Paltsev 
1103e7c7a016STom Rix 	/* Disable DMAC interrupts. We'll enable them after processing channels */
11041fe20f1bSEugeniy Paltsev 	axi_dma_irq_disable(chip);
11051fe20f1bSEugeniy Paltsev 
1106e7c7a016STom Rix 	/* Poll, clear and process every channel interrupt status */
11071fe20f1bSEugeniy Paltsev 	for (i = 0; i < dw->hdata->nr_channels; i++) {
11081fe20f1bSEugeniy Paltsev 		chan = &dw->chan[i];
11091fe20f1bSEugeniy Paltsev 		status = axi_chan_irq_read(chan);
11101fe20f1bSEugeniy Paltsev 		axi_chan_irq_clear(chan, status);
11111fe20f1bSEugeniy Paltsev 
11121fe20f1bSEugeniy Paltsev 		dev_vdbg(chip->dev, "%s %u IRQ status: 0x%08x\n",
11131fe20f1bSEugeniy Paltsev 			axi_chan_name(chan), i, status);
11141fe20f1bSEugeniy Paltsev 
11151fe20f1bSEugeniy Paltsev 		if (status & DWAXIDMAC_IRQ_ALL_ERR)
11161fe20f1bSEugeniy Paltsev 			axi_chan_handle_err(chan, status);
11171fe20f1bSEugeniy Paltsev 		else if (status & DWAXIDMAC_IRQ_DMA_TRF)
11181fe20f1bSEugeniy Paltsev 			axi_chan_block_xfer_complete(chan);
11191fe20f1bSEugeniy Paltsev 	}
11201fe20f1bSEugeniy Paltsev 
11211fe20f1bSEugeniy Paltsev 	/* Re-enable interrupts */
11221fe20f1bSEugeniy Paltsev 	axi_dma_irq_enable(chip);
11231fe20f1bSEugeniy Paltsev 
11241fe20f1bSEugeniy Paltsev 	return IRQ_HANDLED;
11251fe20f1bSEugeniy Paltsev }
11261fe20f1bSEugeniy Paltsev 
11271fe20f1bSEugeniy Paltsev static int dma_chan_terminate_all(struct dma_chan *dchan)
11281fe20f1bSEugeniy Paltsev {
11291fe20f1bSEugeniy Paltsev 	struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
11301deb96c0SSia Jee Heng 	u32 chan_active = BIT(chan->id) << DMAC_CHAN_EN_SHIFT;
11311fe20f1bSEugeniy Paltsev 	unsigned long flags;
11321deb96c0SSia Jee Heng 	u32 val;
11331deb96c0SSia Jee Heng 	int ret;
11341fe20f1bSEugeniy Paltsev 	LIST_HEAD(head);
11351fe20f1bSEugeniy Paltsev 
11361fe20f1bSEugeniy Paltsev 	axi_chan_disable(chan);
11371fe20f1bSEugeniy Paltsev 
11381deb96c0SSia Jee Heng 	ret = readl_poll_timeout_atomic(chan->chip->regs + DMAC_CHEN, val,
11391deb96c0SSia Jee Heng 					!(val & chan_active), 1000, 10000);
11401deb96c0SSia Jee Heng 	if (ret == -ETIMEDOUT)
11411deb96c0SSia Jee Heng 		dev_warn(dchan2dev(dchan),
11421deb96c0SSia Jee Heng 			 "%s failed to stop\n", axi_chan_name(chan));
11431deb96c0SSia Jee Heng 
1144425c8a53SSia Jee Heng 	if (chan->direction != DMA_MEM_TO_MEM)
114532286e27SPandith N 		dw_axi_dma_set_hw_channel(chan, false);
1146f74b3025SSia Jee Heng 	if (chan->direction == DMA_MEM_TO_DEV)
1147f74b3025SSia Jee Heng 		dw_axi_dma_set_byte_halfword(chan, false);
1148425c8a53SSia Jee Heng 
11491deb96c0SSia Jee Heng 	spin_lock_irqsave(&chan->vc.lock, flags);
11501deb96c0SSia Jee Heng 
11511fe20f1bSEugeniy Paltsev 	vchan_get_all_descriptors(&chan->vc, &head);
11521fe20f1bSEugeniy Paltsev 
11531deb96c0SSia Jee Heng 	chan->cyclic = false;
11541fe20f1bSEugeniy Paltsev 	spin_unlock_irqrestore(&chan->vc.lock, flags);
11551fe20f1bSEugeniy Paltsev 
115651fe9cd2SSascha Hauer 	vchan_dma_desc_free_list(&chan->vc, &head);
115751fe9cd2SSascha Hauer 
11581fe20f1bSEugeniy Paltsev 	dev_vdbg(dchan2dev(dchan), "terminated: %s\n", axi_chan_name(chan));
11591fe20f1bSEugeniy Paltsev 
11601fe20f1bSEugeniy Paltsev 	return 0;
11611fe20f1bSEugeniy Paltsev }
11621fe20f1bSEugeniy Paltsev 
11631fe20f1bSEugeniy Paltsev static int dma_chan_pause(struct dma_chan *dchan)
11641fe20f1bSEugeniy Paltsev {
11651fe20f1bSEugeniy Paltsev 	struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
11661fe20f1bSEugeniy Paltsev 	unsigned long flags;
11671fe20f1bSEugeniy Paltsev 	unsigned int timeout = 20; /* timeout iterations */
11681fe20f1bSEugeniy Paltsev 	u32 val;
11691fe20f1bSEugeniy Paltsev 
11701fe20f1bSEugeniy Paltsev 	spin_lock_irqsave(&chan->vc.lock, flags);
11711fe20f1bSEugeniy Paltsev 
117282435166SPandith N 	if (chan->chip->dw->hdata->reg_map_8_channels) {
11731fe20f1bSEugeniy Paltsev 		val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
11741fe20f1bSEugeniy Paltsev 		val |= BIT(chan->id) << DMAC_CHAN_SUSP_SHIFT |
11751fe20f1bSEugeniy Paltsev 			BIT(chan->id) << DMAC_CHAN_SUSP_WE_SHIFT;
11761fe20f1bSEugeniy Paltsev 		axi_dma_iowrite32(chan->chip, DMAC_CHEN, val);
117782435166SPandith N 	} else {
117849db68d4SEmil Renner Berthing 		val = axi_dma_ioread32(chan->chip, DMAC_CHSUSPREG);
117949db68d4SEmil Renner Berthing 		val |= BIT(chan->id) << DMAC_CHAN_SUSP2_SHIFT |
118082435166SPandith N 			BIT(chan->id) << DMAC_CHAN_SUSP2_WE_SHIFT;
118182435166SPandith N 		axi_dma_iowrite32(chan->chip, DMAC_CHSUSPREG, val);
118282435166SPandith N 	}
11831fe20f1bSEugeniy Paltsev 
11841fe20f1bSEugeniy Paltsev 	do  {
11851fe20f1bSEugeniy Paltsev 		if (axi_chan_irq_read(chan) & DWAXIDMAC_IRQ_SUSPENDED)
11861fe20f1bSEugeniy Paltsev 			break;
11871fe20f1bSEugeniy Paltsev 
11881fe20f1bSEugeniy Paltsev 		udelay(2);
11891fe20f1bSEugeniy Paltsev 	} while (--timeout);
11901fe20f1bSEugeniy Paltsev 
11911fe20f1bSEugeniy Paltsev 	axi_chan_irq_clear(chan, DWAXIDMAC_IRQ_SUSPENDED);
11921fe20f1bSEugeniy Paltsev 
11931fe20f1bSEugeniy Paltsev 	chan->is_paused = true;
11941fe20f1bSEugeniy Paltsev 
11951fe20f1bSEugeniy Paltsev 	spin_unlock_irqrestore(&chan->vc.lock, flags);
11961fe20f1bSEugeniy Paltsev 
11971fe20f1bSEugeniy Paltsev 	return timeout ? 0 : -EAGAIN;
11981fe20f1bSEugeniy Paltsev }
11991fe20f1bSEugeniy Paltsev 
12001fe20f1bSEugeniy Paltsev /* Called in chan locked context */
12011fe20f1bSEugeniy Paltsev static inline void axi_chan_resume(struct axi_dma_chan *chan)
12021fe20f1bSEugeniy Paltsev {
12031fe20f1bSEugeniy Paltsev 	u32 val;
12041fe20f1bSEugeniy Paltsev 
120582435166SPandith N 	if (chan->chip->dw->hdata->reg_map_8_channels) {
120649db68d4SEmil Renner Berthing 		val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
12071fe20f1bSEugeniy Paltsev 		val &= ~(BIT(chan->id) << DMAC_CHAN_SUSP_SHIFT);
12081fe20f1bSEugeniy Paltsev 		val |=  (BIT(chan->id) << DMAC_CHAN_SUSP_WE_SHIFT);
12091fe20f1bSEugeniy Paltsev 		axi_dma_iowrite32(chan->chip, DMAC_CHEN, val);
121082435166SPandith N 	} else {
121149db68d4SEmil Renner Berthing 		val = axi_dma_ioread32(chan->chip, DMAC_CHSUSPREG);
121282435166SPandith N 		val &= ~(BIT(chan->id) << DMAC_CHAN_SUSP2_SHIFT);
121382435166SPandith N 		val |=  (BIT(chan->id) << DMAC_CHAN_SUSP2_WE_SHIFT);
121482435166SPandith N 		axi_dma_iowrite32(chan->chip, DMAC_CHSUSPREG, val);
121582435166SPandith N 	}
12161fe20f1bSEugeniy Paltsev 
12171fe20f1bSEugeniy Paltsev 	chan->is_paused = false;
12181fe20f1bSEugeniy Paltsev }
12191fe20f1bSEugeniy Paltsev 
12201fe20f1bSEugeniy Paltsev static int dma_chan_resume(struct dma_chan *dchan)
12211fe20f1bSEugeniy Paltsev {
12221fe20f1bSEugeniy Paltsev 	struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
12231fe20f1bSEugeniy Paltsev 	unsigned long flags;
12241fe20f1bSEugeniy Paltsev 
12251fe20f1bSEugeniy Paltsev 	spin_lock_irqsave(&chan->vc.lock, flags);
12261fe20f1bSEugeniy Paltsev 
12271fe20f1bSEugeniy Paltsev 	if (chan->is_paused)
12281fe20f1bSEugeniy Paltsev 		axi_chan_resume(chan);
12291fe20f1bSEugeniy Paltsev 
12301fe20f1bSEugeniy Paltsev 	spin_unlock_irqrestore(&chan->vc.lock, flags);
12311fe20f1bSEugeniy Paltsev 
12321fe20f1bSEugeniy Paltsev 	return 0;
12331fe20f1bSEugeniy Paltsev }
12341fe20f1bSEugeniy Paltsev 
12351fe20f1bSEugeniy Paltsev static int axi_dma_suspend(struct axi_dma_chip *chip)
12361fe20f1bSEugeniy Paltsev {
12371fe20f1bSEugeniy Paltsev 	axi_dma_irq_disable(chip);
12381fe20f1bSEugeniy Paltsev 	axi_dma_disable(chip);
12391fe20f1bSEugeniy Paltsev 
12401fe20f1bSEugeniy Paltsev 	clk_disable_unprepare(chip->core_clk);
12411fe20f1bSEugeniy Paltsev 	clk_disable_unprepare(chip->cfgr_clk);
12421fe20f1bSEugeniy Paltsev 
12431fe20f1bSEugeniy Paltsev 	return 0;
12441fe20f1bSEugeniy Paltsev }
12451fe20f1bSEugeniy Paltsev 
12461fe20f1bSEugeniy Paltsev static int axi_dma_resume(struct axi_dma_chip *chip)
12471fe20f1bSEugeniy Paltsev {
12481fe20f1bSEugeniy Paltsev 	int ret;
12491fe20f1bSEugeniy Paltsev 
12501fe20f1bSEugeniy Paltsev 	ret = clk_prepare_enable(chip->cfgr_clk);
12511fe20f1bSEugeniy Paltsev 	if (ret < 0)
12521fe20f1bSEugeniy Paltsev 		return ret;
12531fe20f1bSEugeniy Paltsev 
12541fe20f1bSEugeniy Paltsev 	ret = clk_prepare_enable(chip->core_clk);
12551fe20f1bSEugeniy Paltsev 	if (ret < 0)
12561fe20f1bSEugeniy Paltsev 		return ret;
12571fe20f1bSEugeniy Paltsev 
12581fe20f1bSEugeniy Paltsev 	axi_dma_enable(chip);
12591fe20f1bSEugeniy Paltsev 	axi_dma_irq_enable(chip);
12601fe20f1bSEugeniy Paltsev 
12611fe20f1bSEugeniy Paltsev 	return 0;
12621fe20f1bSEugeniy Paltsev }
12631fe20f1bSEugeniy Paltsev 
12641fe20f1bSEugeniy Paltsev static int __maybe_unused axi_dma_runtime_suspend(struct device *dev)
12651fe20f1bSEugeniy Paltsev {
12661fe20f1bSEugeniy Paltsev 	struct axi_dma_chip *chip = dev_get_drvdata(dev);
12671fe20f1bSEugeniy Paltsev 
12681fe20f1bSEugeniy Paltsev 	return axi_dma_suspend(chip);
12691fe20f1bSEugeniy Paltsev }
12701fe20f1bSEugeniy Paltsev 
12711fe20f1bSEugeniy Paltsev static int __maybe_unused axi_dma_runtime_resume(struct device *dev)
12721fe20f1bSEugeniy Paltsev {
12731fe20f1bSEugeniy Paltsev 	struct axi_dma_chip *chip = dev_get_drvdata(dev);
12741fe20f1bSEugeniy Paltsev 
12751fe20f1bSEugeniy Paltsev 	return axi_dma_resume(chip);
12761fe20f1bSEugeniy Paltsev }
12771fe20f1bSEugeniy Paltsev 
1278b428c6faSSia Jee Heng static struct dma_chan *dw_axi_dma_of_xlate(struct of_phandle_args *dma_spec,
1279b428c6faSSia Jee Heng 					    struct of_dma *ofdma)
1280b428c6faSSia Jee Heng {
1281b428c6faSSia Jee Heng 	struct dw_axi_dma *dw = ofdma->of_dma_data;
1282b428c6faSSia Jee Heng 	struct axi_dma_chan *chan;
1283b428c6faSSia Jee Heng 	struct dma_chan *dchan;
1284b428c6faSSia Jee Heng 
1285b428c6faSSia Jee Heng 	dchan = dma_get_any_slave_channel(&dw->dma);
1286b428c6faSSia Jee Heng 	if (!dchan)
1287b428c6faSSia Jee Heng 		return NULL;
1288b428c6faSSia Jee Heng 
1289b428c6faSSia Jee Heng 	chan = dchan_to_axi_dma_chan(dchan);
1290b428c6faSSia Jee Heng 	chan->hw_handshake_num = dma_spec->args[0];
1291b428c6faSSia Jee Heng 	return dchan;
1292b428c6faSSia Jee Heng }
1293b428c6faSSia Jee Heng 
12941fe20f1bSEugeniy Paltsev static int parse_device_properties(struct axi_dma_chip *chip)
12951fe20f1bSEugeniy Paltsev {
12961fe20f1bSEugeniy Paltsev 	struct device *dev = chip->dev;
12971fe20f1bSEugeniy Paltsev 	u32 tmp, carr[DMAC_MAX_CHANNELS];
12981fe20f1bSEugeniy Paltsev 	int ret;
12991fe20f1bSEugeniy Paltsev 
13001fe20f1bSEugeniy Paltsev 	ret = device_property_read_u32(dev, "dma-channels", &tmp);
13011fe20f1bSEugeniy Paltsev 	if (ret)
13021fe20f1bSEugeniy Paltsev 		return ret;
13031fe20f1bSEugeniy Paltsev 	if (tmp == 0 || tmp > DMAC_MAX_CHANNELS)
13041fe20f1bSEugeniy Paltsev 		return -EINVAL;
13051fe20f1bSEugeniy Paltsev 
13061fe20f1bSEugeniy Paltsev 	chip->dw->hdata->nr_channels = tmp;
130782435166SPandith N 	if (tmp <= DMA_REG_MAP_CH_REF)
130882435166SPandith N 		chip->dw->hdata->reg_map_8_channels = true;
13091fe20f1bSEugeniy Paltsev 
13101fe20f1bSEugeniy Paltsev 	ret = device_property_read_u32(dev, "snps,dma-masters", &tmp);
13111fe20f1bSEugeniy Paltsev 	if (ret)
13121fe20f1bSEugeniy Paltsev 		return ret;
13131fe20f1bSEugeniy Paltsev 	if (tmp == 0 || tmp > DMAC_MAX_MASTERS)
13141fe20f1bSEugeniy Paltsev 		return -EINVAL;
13151fe20f1bSEugeniy Paltsev 
13161fe20f1bSEugeniy Paltsev 	chip->dw->hdata->nr_masters = tmp;
13171fe20f1bSEugeniy Paltsev 
13181fe20f1bSEugeniy Paltsev 	ret = device_property_read_u32(dev, "snps,data-width", &tmp);
13191fe20f1bSEugeniy Paltsev 	if (ret)
13201fe20f1bSEugeniy Paltsev 		return ret;
13211fe20f1bSEugeniy Paltsev 	if (tmp > DWAXIDMAC_TRANS_WIDTH_MAX)
13221fe20f1bSEugeniy Paltsev 		return -EINVAL;
13231fe20f1bSEugeniy Paltsev 
13241fe20f1bSEugeniy Paltsev 	chip->dw->hdata->m_data_width = tmp;
13251fe20f1bSEugeniy Paltsev 
13261fe20f1bSEugeniy Paltsev 	ret = device_property_read_u32_array(dev, "snps,block-size", carr,
13271fe20f1bSEugeniy Paltsev 					     chip->dw->hdata->nr_channels);
13281fe20f1bSEugeniy Paltsev 	if (ret)
13291fe20f1bSEugeniy Paltsev 		return ret;
13301fe20f1bSEugeniy Paltsev 	for (tmp = 0; tmp < chip->dw->hdata->nr_channels; tmp++) {
13311fe20f1bSEugeniy Paltsev 		if (carr[tmp] == 0 || carr[tmp] > DMAC_MAX_BLK_SIZE)
13321fe20f1bSEugeniy Paltsev 			return -EINVAL;
13331fe20f1bSEugeniy Paltsev 
13341fe20f1bSEugeniy Paltsev 		chip->dw->hdata->block_size[tmp] = carr[tmp];
13351fe20f1bSEugeniy Paltsev 	}
13361fe20f1bSEugeniy Paltsev 
13371fe20f1bSEugeniy Paltsev 	ret = device_property_read_u32_array(dev, "snps,priority", carr,
13381fe20f1bSEugeniy Paltsev 					     chip->dw->hdata->nr_channels);
13391fe20f1bSEugeniy Paltsev 	if (ret)
13401fe20f1bSEugeniy Paltsev 		return ret;
13411fe20f1bSEugeniy Paltsev 	/* Priority value must be programmed within [0:nr_channels-1] range */
13421fe20f1bSEugeniy Paltsev 	for (tmp = 0; tmp < chip->dw->hdata->nr_channels; tmp++) {
13431fe20f1bSEugeniy Paltsev 		if (carr[tmp] >= chip->dw->hdata->nr_channels)
13441fe20f1bSEugeniy Paltsev 			return -EINVAL;
13451fe20f1bSEugeniy Paltsev 
13461fe20f1bSEugeniy Paltsev 		chip->dw->hdata->priority[tmp] = carr[tmp];
13471fe20f1bSEugeniy Paltsev 	}
13481fe20f1bSEugeniy Paltsev 
13491fe20f1bSEugeniy Paltsev 	/* axi-max-burst-len is optional property */
13501fe20f1bSEugeniy Paltsev 	ret = device_property_read_u32(dev, "snps,axi-max-burst-len", &tmp);
13511fe20f1bSEugeniy Paltsev 	if (!ret) {
13521fe20f1bSEugeniy Paltsev 		if (tmp > DWAXIDMAC_ARWLEN_MAX + 1)
13531fe20f1bSEugeniy Paltsev 			return -EINVAL;
13541fe20f1bSEugeniy Paltsev 		if (tmp < DWAXIDMAC_ARWLEN_MIN + 1)
13551fe20f1bSEugeniy Paltsev 			return -EINVAL;
13561fe20f1bSEugeniy Paltsev 
13571fe20f1bSEugeniy Paltsev 		chip->dw->hdata->restrict_axi_burst_len = true;
1358c454d16aSPandith N 		chip->dw->hdata->axi_rw_burst_len = tmp;
13591fe20f1bSEugeniy Paltsev 	}
13601fe20f1bSEugeniy Paltsev 
13611fe20f1bSEugeniy Paltsev 	return 0;
13621fe20f1bSEugeniy Paltsev }
13631fe20f1bSEugeniy Paltsev 
13641fe20f1bSEugeniy Paltsev static int dw_probe(struct platform_device *pdev)
13651fe20f1bSEugeniy Paltsev {
13663df2d81fSSia Jee Heng 	struct device_node *node = pdev->dev.of_node;
13671fe20f1bSEugeniy Paltsev 	struct axi_dma_chip *chip;
13681fe20f1bSEugeniy Paltsev 	struct dw_axi_dma *dw;
13691fe20f1bSEugeniy Paltsev 	struct dw_axi_dma_hcfg *hdata;
13701fe20f1bSEugeniy Paltsev 	u32 i;
13711fe20f1bSEugeniy Paltsev 	int ret;
13721fe20f1bSEugeniy Paltsev 
13731fe20f1bSEugeniy Paltsev 	chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
13741fe20f1bSEugeniy Paltsev 	if (!chip)
13751fe20f1bSEugeniy Paltsev 		return -ENOMEM;
13761fe20f1bSEugeniy Paltsev 
13771fe20f1bSEugeniy Paltsev 	dw = devm_kzalloc(&pdev->dev, sizeof(*dw), GFP_KERNEL);
13781fe20f1bSEugeniy Paltsev 	if (!dw)
13791fe20f1bSEugeniy Paltsev 		return -ENOMEM;
13801fe20f1bSEugeniy Paltsev 
13811fe20f1bSEugeniy Paltsev 	hdata = devm_kzalloc(&pdev->dev, sizeof(*hdata), GFP_KERNEL);
13821fe20f1bSEugeniy Paltsev 	if (!hdata)
13831fe20f1bSEugeniy Paltsev 		return -ENOMEM;
13841fe20f1bSEugeniy Paltsev 
13851fe20f1bSEugeniy Paltsev 	chip->dw = dw;
13861fe20f1bSEugeniy Paltsev 	chip->dev = &pdev->dev;
13871fe20f1bSEugeniy Paltsev 	chip->dw->hdata = hdata;
13881fe20f1bSEugeniy Paltsev 
13891fe20f1bSEugeniy Paltsev 	chip->irq = platform_get_irq(pdev, 0);
13901fe20f1bSEugeniy Paltsev 	if (chip->irq < 0)
13911fe20f1bSEugeniy Paltsev 		return chip->irq;
13921fe20f1bSEugeniy Paltsev 
1393*4b23603aSTudor Ambarus 	chip->regs = devm_platform_ioremap_resource(pdev, 0);
13941fe20f1bSEugeniy Paltsev 	if (IS_ERR(chip->regs))
13951fe20f1bSEugeniy Paltsev 		return PTR_ERR(chip->regs);
13961fe20f1bSEugeniy Paltsev 
13973df2d81fSSia Jee Heng 	if (of_device_is_compatible(node, "intel,kmb-axi-dma")) {
13983df2d81fSSia Jee Heng 		chip->apb_regs = devm_platform_ioremap_resource(pdev, 1);
13993df2d81fSSia Jee Heng 		if (IS_ERR(chip->apb_regs))
14003df2d81fSSia Jee Heng 			return PTR_ERR(chip->apb_regs);
14013df2d81fSSia Jee Heng 	}
14023df2d81fSSia Jee Heng 
14031fe20f1bSEugeniy Paltsev 	chip->core_clk = devm_clk_get(chip->dev, "core-clk");
14041fe20f1bSEugeniy Paltsev 	if (IS_ERR(chip->core_clk))
14051fe20f1bSEugeniy Paltsev 		return PTR_ERR(chip->core_clk);
14061fe20f1bSEugeniy Paltsev 
14071fe20f1bSEugeniy Paltsev 	chip->cfgr_clk = devm_clk_get(chip->dev, "cfgr-clk");
14081fe20f1bSEugeniy Paltsev 	if (IS_ERR(chip->cfgr_clk))
14091fe20f1bSEugeniy Paltsev 		return PTR_ERR(chip->cfgr_clk);
14101fe20f1bSEugeniy Paltsev 
14111fe20f1bSEugeniy Paltsev 	ret = parse_device_properties(chip);
14121fe20f1bSEugeniy Paltsev 	if (ret)
14131fe20f1bSEugeniy Paltsev 		return ret;
14141fe20f1bSEugeniy Paltsev 
14151fe20f1bSEugeniy Paltsev 	dw->chan = devm_kcalloc(chip->dev, hdata->nr_channels,
14161fe20f1bSEugeniy Paltsev 				sizeof(*dw->chan), GFP_KERNEL);
14171fe20f1bSEugeniy Paltsev 	if (!dw->chan)
14181fe20f1bSEugeniy Paltsev 		return -ENOMEM;
14191fe20f1bSEugeniy Paltsev 
14201fe20f1bSEugeniy Paltsev 	ret = devm_request_irq(chip->dev, chip->irq, dw_axi_dma_interrupt,
14211fe20f1bSEugeniy Paltsev 			       IRQF_SHARED, KBUILD_MODNAME, chip);
14221fe20f1bSEugeniy Paltsev 	if (ret)
14231fe20f1bSEugeniy Paltsev 		return ret;
14241fe20f1bSEugeniy Paltsev 
14251fe20f1bSEugeniy Paltsev 	INIT_LIST_HEAD(&dw->dma.channels);
14261fe20f1bSEugeniy Paltsev 	for (i = 0; i < hdata->nr_channels; i++) {
14271fe20f1bSEugeniy Paltsev 		struct axi_dma_chan *chan = &dw->chan[i];
14281fe20f1bSEugeniy Paltsev 
14291fe20f1bSEugeniy Paltsev 		chan->chip = chip;
14301fe20f1bSEugeniy Paltsev 		chan->id = i;
14311fe20f1bSEugeniy Paltsev 		chan->chan_regs = chip->regs + COMMON_REG_LEN + i * CHAN_REG_LEN;
14321fe20f1bSEugeniy Paltsev 		atomic_set(&chan->descs_allocated, 0);
14331fe20f1bSEugeniy Paltsev 
14341fe20f1bSEugeniy Paltsev 		chan->vc.desc_free = vchan_desc_put;
14351fe20f1bSEugeniy Paltsev 		vchan_init(&chan->vc, &dw->dma);
14361fe20f1bSEugeniy Paltsev 	}
14371fe20f1bSEugeniy Paltsev 
14381fe20f1bSEugeniy Paltsev 	/* Set capabilities */
14391fe20f1bSEugeniy Paltsev 	dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask);
1440eec91760SSia Jee Heng 	dma_cap_set(DMA_SLAVE, dw->dma.cap_mask);
14411deb96c0SSia Jee Heng 	dma_cap_set(DMA_CYCLIC, dw->dma.cap_mask);
14421fe20f1bSEugeniy Paltsev 
14431fe20f1bSEugeniy Paltsev 	/* DMA capabilities */
14441fe20f1bSEugeniy Paltsev 	dw->dma.chancnt = hdata->nr_channels;
1445c454d16aSPandith N 	dw->dma.max_burst = hdata->axi_rw_burst_len;
14461fe20f1bSEugeniy Paltsev 	dw->dma.src_addr_widths = AXI_DMA_BUSWIDTHS;
14471fe20f1bSEugeniy Paltsev 	dw->dma.dst_addr_widths = AXI_DMA_BUSWIDTHS;
14481fe20f1bSEugeniy Paltsev 	dw->dma.directions = BIT(DMA_MEM_TO_MEM);
1449eec91760SSia Jee Heng 	dw->dma.directions |= BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
14508e55444dSSia Jee Heng 	dw->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
14511fe20f1bSEugeniy Paltsev 
14521fe20f1bSEugeniy Paltsev 	dw->dma.dev = chip->dev;
14531fe20f1bSEugeniy Paltsev 	dw->dma.device_tx_status = dma_chan_tx_status;
14541fe20f1bSEugeniy Paltsev 	dw->dma.device_issue_pending = dma_chan_issue_pending;
14551fe20f1bSEugeniy Paltsev 	dw->dma.device_terminate_all = dma_chan_terminate_all;
14561fe20f1bSEugeniy Paltsev 	dw->dma.device_pause = dma_chan_pause;
14571fe20f1bSEugeniy Paltsev 	dw->dma.device_resume = dma_chan_resume;
14581fe20f1bSEugeniy Paltsev 
14591fe20f1bSEugeniy Paltsev 	dw->dma.device_alloc_chan_resources = dma_chan_alloc_chan_resources;
14601fe20f1bSEugeniy Paltsev 	dw->dma.device_free_chan_resources = dma_chan_free_chan_resources;
14611fe20f1bSEugeniy Paltsev 
14621fe20f1bSEugeniy Paltsev 	dw->dma.device_prep_dma_memcpy = dma_chan_prep_dma_memcpy;
146367b2e39fSSia Jee Heng 	dw->dma.device_synchronize = dw_axi_dma_synchronize;
146466c6c945SSia Jee Heng 	dw->dma.device_config = dw_axi_dma_chan_slave_config;
1465eec91760SSia Jee Heng 	dw->dma.device_prep_slave_sg = dw_axi_dma_chan_prep_slave_sg;
14661deb96c0SSia Jee Heng 	dw->dma.device_prep_dma_cyclic = dw_axi_dma_chan_prep_cyclic;
14671fe20f1bSEugeniy Paltsev 
146878a90a1eSSia Jee Heng 	/*
146978a90a1eSSia Jee Heng 	 * Synopsis DesignWare AxiDMA datasheet mentioned Maximum
147078a90a1eSSia Jee Heng 	 * supported blocks is 1024. Device register width is 4 bytes.
147178a90a1eSSia Jee Heng 	 * Therefore, set constraint to 1024 * 4.
147278a90a1eSSia Jee Heng 	 */
147378a90a1eSSia Jee Heng 	dw->dma.dev->dma_parms = &dw->dma_parms;
147478a90a1eSSia Jee Heng 	dma_set_max_seg_size(&pdev->dev, MAX_BLOCK_SIZE);
14751fe20f1bSEugeniy Paltsev 	platform_set_drvdata(pdev, chip);
14761fe20f1bSEugeniy Paltsev 
14771fe20f1bSEugeniy Paltsev 	pm_runtime_enable(chip->dev);
14781fe20f1bSEugeniy Paltsev 
14791fe20f1bSEugeniy Paltsev 	/*
14801fe20f1bSEugeniy Paltsev 	 * We can't just call pm_runtime_get here instead of
14811fe20f1bSEugeniy Paltsev 	 * pm_runtime_get_noresume + axi_dma_resume because we need
14821fe20f1bSEugeniy Paltsev 	 * driver to work also without Runtime PM.
14831fe20f1bSEugeniy Paltsev 	 */
14841fe20f1bSEugeniy Paltsev 	pm_runtime_get_noresume(chip->dev);
14851fe20f1bSEugeniy Paltsev 	ret = axi_dma_resume(chip);
14861fe20f1bSEugeniy Paltsev 	if (ret < 0)
14871fe20f1bSEugeniy Paltsev 		goto err_pm_disable;
14881fe20f1bSEugeniy Paltsev 
14891fe20f1bSEugeniy Paltsev 	axi_dma_hw_init(chip);
14901fe20f1bSEugeniy Paltsev 
14911fe20f1bSEugeniy Paltsev 	pm_runtime_put(chip->dev);
14921fe20f1bSEugeniy Paltsev 
1493c88c2d46SHuang Shijie 	ret = dmaenginem_async_device_register(&dw->dma);
14941fe20f1bSEugeniy Paltsev 	if (ret)
14951fe20f1bSEugeniy Paltsev 		goto err_pm_disable;
14961fe20f1bSEugeniy Paltsev 
1497b428c6faSSia Jee Heng 	/* Register with OF helpers for DMA lookups */
1498b428c6faSSia Jee Heng 	ret = of_dma_controller_register(pdev->dev.of_node,
1499b428c6faSSia Jee Heng 					 dw_axi_dma_of_xlate, dw);
1500b428c6faSSia Jee Heng 	if (ret < 0)
1501b428c6faSSia Jee Heng 		dev_warn(&pdev->dev,
1502b428c6faSSia Jee Heng 			 "Failed to register OF DMA controller, fallback to MEM_TO_MEM mode\n");
1503b428c6faSSia Jee Heng 
15041fe20f1bSEugeniy Paltsev 	dev_info(chip->dev, "DesignWare AXI DMA Controller, %d channels\n",
15051fe20f1bSEugeniy Paltsev 		 dw->hdata->nr_channels);
15061fe20f1bSEugeniy Paltsev 
15071fe20f1bSEugeniy Paltsev 	return 0;
15081fe20f1bSEugeniy Paltsev 
15091fe20f1bSEugeniy Paltsev err_pm_disable:
15101fe20f1bSEugeniy Paltsev 	pm_runtime_disable(chip->dev);
15111fe20f1bSEugeniy Paltsev 
15121fe20f1bSEugeniy Paltsev 	return ret;
15131fe20f1bSEugeniy Paltsev }
15141fe20f1bSEugeniy Paltsev 
15151fe20f1bSEugeniy Paltsev static int dw_remove(struct platform_device *pdev)
15161fe20f1bSEugeniy Paltsev {
15171fe20f1bSEugeniy Paltsev 	struct axi_dma_chip *chip = platform_get_drvdata(pdev);
15181fe20f1bSEugeniy Paltsev 	struct dw_axi_dma *dw = chip->dw;
15191fe20f1bSEugeniy Paltsev 	struct axi_dma_chan *chan, *_chan;
15201fe20f1bSEugeniy Paltsev 	u32 i;
15211fe20f1bSEugeniy Paltsev 
15221fe20f1bSEugeniy Paltsev 	/* Enable clk before accessing to registers */
15231fe20f1bSEugeniy Paltsev 	clk_prepare_enable(chip->cfgr_clk);
15241fe20f1bSEugeniy Paltsev 	clk_prepare_enable(chip->core_clk);
15251fe20f1bSEugeniy Paltsev 	axi_dma_irq_disable(chip);
15261fe20f1bSEugeniy Paltsev 	for (i = 0; i < dw->hdata->nr_channels; i++) {
15271fe20f1bSEugeniy Paltsev 		axi_chan_disable(&chip->dw->chan[i]);
15281fe20f1bSEugeniy Paltsev 		axi_chan_irq_disable(&chip->dw->chan[i], DWAXIDMAC_IRQ_ALL);
15291fe20f1bSEugeniy Paltsev 	}
15301fe20f1bSEugeniy Paltsev 	axi_dma_disable(chip);
15311fe20f1bSEugeniy Paltsev 
15321fe20f1bSEugeniy Paltsev 	pm_runtime_disable(chip->dev);
15331fe20f1bSEugeniy Paltsev 	axi_dma_suspend(chip);
15341fe20f1bSEugeniy Paltsev 
15351fe20f1bSEugeniy Paltsev 	devm_free_irq(chip->dev, chip->irq, chip);
15361fe20f1bSEugeniy Paltsev 
1537b428c6faSSia Jee Heng 	of_dma_controller_free(chip->dev->of_node);
1538b428c6faSSia Jee Heng 
15391fe20f1bSEugeniy Paltsev 	list_for_each_entry_safe(chan, _chan, &dw->dma.channels,
15401fe20f1bSEugeniy Paltsev 			vc.chan.device_node) {
15411fe20f1bSEugeniy Paltsev 		list_del(&chan->vc.chan.device_node);
15421fe20f1bSEugeniy Paltsev 		tasklet_kill(&chan->vc.task);
15431fe20f1bSEugeniy Paltsev 	}
15441fe20f1bSEugeniy Paltsev 
15451fe20f1bSEugeniy Paltsev 	return 0;
15461fe20f1bSEugeniy Paltsev }
15471fe20f1bSEugeniy Paltsev 
15481fe20f1bSEugeniy Paltsev static const struct dev_pm_ops dw_axi_dma_pm_ops = {
15491fe20f1bSEugeniy Paltsev 	SET_RUNTIME_PM_OPS(axi_dma_runtime_suspend, axi_dma_runtime_resume, NULL)
15501fe20f1bSEugeniy Paltsev };
15511fe20f1bSEugeniy Paltsev 
15521fe20f1bSEugeniy Paltsev static const struct of_device_id dw_dma_of_id_table[] = {
15531fe20f1bSEugeniy Paltsev 	{ .compatible = "snps,axi-dma-1.01a" },
15543df2d81fSSia Jee Heng 	{ .compatible = "intel,kmb-axi-dma" },
15551fe20f1bSEugeniy Paltsev 	{}
15561fe20f1bSEugeniy Paltsev };
15571fe20f1bSEugeniy Paltsev MODULE_DEVICE_TABLE(of, dw_dma_of_id_table);
15581fe20f1bSEugeniy Paltsev 
15591fe20f1bSEugeniy Paltsev static struct platform_driver dw_driver = {
15601fe20f1bSEugeniy Paltsev 	.probe		= dw_probe,
15611fe20f1bSEugeniy Paltsev 	.remove		= dw_remove,
15621fe20f1bSEugeniy Paltsev 	.driver = {
15631fe20f1bSEugeniy Paltsev 		.name	= KBUILD_MODNAME,
156460b6122eSKrzysztof Kozlowski 		.of_match_table = dw_dma_of_id_table,
15651fe20f1bSEugeniy Paltsev 		.pm = &dw_axi_dma_pm_ops,
15661fe20f1bSEugeniy Paltsev 	},
15671fe20f1bSEugeniy Paltsev };
15681fe20f1bSEugeniy Paltsev module_platform_driver(dw_driver);
15691fe20f1bSEugeniy Paltsev 
15701fe20f1bSEugeniy Paltsev MODULE_LICENSE("GPL v2");
15711fe20f1bSEugeniy Paltsev MODULE_DESCRIPTION("Synopsys DesignWare AXI DMA Controller platform driver");
15721fe20f1bSEugeniy Paltsev MODULE_AUTHOR("Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com>");
1573