xref: /openbmc/linux/drivers/dma/pch_dma.c (revision 03ab8e6297acd1bc0eedaa050e2a1635c576fd11)
11802d0beSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
20c42bd0eSYong Wang /*
30c42bd0eSYong Wang  * Topcliff PCH DMA controller driver
40c42bd0eSYong Wang  * Copyright (c) 2010 Intel Corporation
5e79e72beSTomoya MORINAGA  * Copyright (C) 2011 LAPIS Semiconductor Co., Ltd.
60c42bd0eSYong Wang  */
70c42bd0eSYong Wang 
80c42bd0eSYong Wang #include <linux/dmaengine.h>
90c42bd0eSYong Wang #include <linux/dma-mapping.h>
100c42bd0eSYong Wang #include <linux/init.h>
110c42bd0eSYong Wang #include <linux/pci.h>
12a15783c3SVinod Koul #include <linux/slab.h>
130c42bd0eSYong Wang #include <linux/interrupt.h>
140c42bd0eSYong Wang #include <linux/module.h>
150c42bd0eSYong Wang #include <linux/pch_dma.h>
160c42bd0eSYong Wang 
17d2ebfb33SRussell King - ARM Linux #include "dmaengine.h"
18d2ebfb33SRussell King - ARM Linux 
190c42bd0eSYong Wang #define DRV_NAME "pch-dma"
200c42bd0eSYong Wang 
210c42bd0eSYong Wang #define DMA_CTL0_DISABLE		0x0
220c42bd0eSYong Wang #define DMA_CTL0_SG			0x1
230c42bd0eSYong Wang #define DMA_CTL0_ONESHOT		0x2
240c42bd0eSYong Wang #define DMA_CTL0_MODE_MASK_BITS		0x3
250c42bd0eSYong Wang #define DMA_CTL0_DIR_SHIFT_BITS		2
260c42bd0eSYong Wang #define DMA_CTL0_BITS_PER_CH		4
270c42bd0eSYong Wang 
280c42bd0eSYong Wang #define DMA_CTL2_START_SHIFT_BITS	8
290c42bd0eSYong Wang #define DMA_CTL2_IRQ_ENABLE_MASK	((1UL << DMA_CTL2_START_SHIFT_BITS) - 1)
300c42bd0eSYong Wang 
310c42bd0eSYong Wang #define DMA_STATUS_IDLE			0x0
320c42bd0eSYong Wang #define DMA_STATUS_DESC_READ		0x1
330c42bd0eSYong Wang #define DMA_STATUS_WAIT			0x2
340c42bd0eSYong Wang #define DMA_STATUS_ACCESS		0x3
350c42bd0eSYong Wang #define DMA_STATUS_BITS_PER_CH		2
360c42bd0eSYong Wang #define DMA_STATUS_MASK_BITS		0x3
370c42bd0eSYong Wang #define DMA_STATUS_SHIFT_BITS		16
380c42bd0eSYong Wang #define DMA_STATUS_IRQ(x)		(0x1 << (x))
39c3d4913cSTomoya MORINAGA #define DMA_STATUS0_ERR(x)		(0x1 << ((x) + 8))
40c3d4913cSTomoya MORINAGA #define DMA_STATUS2_ERR(x)		(0x1 << (x))
410c42bd0eSYong Wang 
420c42bd0eSYong Wang #define DMA_DESC_WIDTH_SHIFT_BITS	12
430c42bd0eSYong Wang #define DMA_DESC_WIDTH_1_BYTE		(0x3 << DMA_DESC_WIDTH_SHIFT_BITS)
440c42bd0eSYong Wang #define DMA_DESC_WIDTH_2_BYTES		(0x2 << DMA_DESC_WIDTH_SHIFT_BITS)
450c42bd0eSYong Wang #define DMA_DESC_WIDTH_4_BYTES		(0x0 << DMA_DESC_WIDTH_SHIFT_BITS)
460c42bd0eSYong Wang #define DMA_DESC_MAX_COUNT_1_BYTE	0x3FF
470c42bd0eSYong Wang #define DMA_DESC_MAX_COUNT_2_BYTES	0x3FF
480c42bd0eSYong Wang #define DMA_DESC_MAX_COUNT_4_BYTES	0x7FF
490c42bd0eSYong Wang #define DMA_DESC_END_WITHOUT_IRQ	0x0
500c42bd0eSYong Wang #define DMA_DESC_END_WITH_IRQ		0x1
510c42bd0eSYong Wang #define DMA_DESC_FOLLOW_WITHOUT_IRQ	0x2
520c42bd0eSYong Wang #define DMA_DESC_FOLLOW_WITH_IRQ	0x3
530c42bd0eSYong Wang 
54c43f1508STomoya MORINAGA #define MAX_CHAN_NR			12
550c42bd0eSYong Wang 
560b052f4aSTomoya MORINAGA #define DMA_MASK_CTL0_MODE	0x33333333
570b052f4aSTomoya MORINAGA #define DMA_MASK_CTL2_MODE	0x00003333
580b052f4aSTomoya MORINAGA 
590c42bd0eSYong Wang static unsigned int init_nr_desc_per_channel = 64;
600c42bd0eSYong Wang module_param(init_nr_desc_per_channel, uint, 0644);
610c42bd0eSYong Wang MODULE_PARM_DESC(init_nr_desc_per_channel,
620c42bd0eSYong Wang 		 "initial descriptors per channel (default: 64)");
630c42bd0eSYong Wang 
640c42bd0eSYong Wang struct pch_dma_desc_regs {
650c42bd0eSYong Wang 	u32	dev_addr;
660c42bd0eSYong Wang 	u32	mem_addr;
670c42bd0eSYong Wang 	u32	size;
680c42bd0eSYong Wang 	u32	next;
690c42bd0eSYong Wang };
700c42bd0eSYong Wang 
710c42bd0eSYong Wang struct pch_dma_regs {
720c42bd0eSYong Wang 	u32	dma_ctl0;
730c42bd0eSYong Wang 	u32	dma_ctl1;
740c42bd0eSYong Wang 	u32	dma_ctl2;
75194f5f27STomoya MORINAGA 	u32	dma_ctl3;
760c42bd0eSYong Wang 	u32	dma_sts0;
770c42bd0eSYong Wang 	u32	dma_sts1;
78194f5f27STomoya MORINAGA 	u32	dma_sts2;
790c42bd0eSYong Wang 	u32	reserved3;
8026d890f0STomoya MORINAGA 	struct pch_dma_desc_regs desc[MAX_CHAN_NR];
810c42bd0eSYong Wang };
820c42bd0eSYong Wang 
830c42bd0eSYong Wang struct pch_dma_desc {
840c42bd0eSYong Wang 	struct pch_dma_desc_regs regs;
850c42bd0eSYong Wang 	struct dma_async_tx_descriptor txd;
860c42bd0eSYong Wang 	struct list_head	desc_node;
870c42bd0eSYong Wang 	struct list_head	tx_list;
880c42bd0eSYong Wang };
890c42bd0eSYong Wang 
900c42bd0eSYong Wang struct pch_dma_chan {
910c42bd0eSYong Wang 	struct dma_chan		chan;
920c42bd0eSYong Wang 	void __iomem *membase;
93db8196dfSVinod Koul 	enum dma_transfer_direction dir;
940c42bd0eSYong Wang 	struct tasklet_struct	tasklet;
950c42bd0eSYong Wang 	unsigned long		err_status;
960c42bd0eSYong Wang 
970c42bd0eSYong Wang 	spinlock_t		lock;
980c42bd0eSYong Wang 
990c42bd0eSYong Wang 	struct list_head	active_list;
1000c42bd0eSYong Wang 	struct list_head	queue;
1010c42bd0eSYong Wang 	struct list_head	free_list;
1020c42bd0eSYong Wang 	unsigned int		descs_allocated;
1030c42bd0eSYong Wang };
1040c42bd0eSYong Wang 
1050c42bd0eSYong Wang #define PDC_DEV_ADDR	0x00
1060c42bd0eSYong Wang #define PDC_MEM_ADDR	0x04
1070c42bd0eSYong Wang #define PDC_SIZE	0x08
1080c42bd0eSYong Wang #define PDC_NEXT	0x0C
1090c42bd0eSYong Wang 
1100c42bd0eSYong Wang #define channel_readl(pdc, name) \
1110c42bd0eSYong Wang 	readl((pdc)->membase + PDC_##name)
1120c42bd0eSYong Wang #define channel_writel(pdc, name, val) \
1130c42bd0eSYong Wang 	writel((val), (pdc)->membase + PDC_##name)
1140c42bd0eSYong Wang 
1150c42bd0eSYong Wang struct pch_dma {
1160c42bd0eSYong Wang 	struct dma_device	dma;
1170c42bd0eSYong Wang 	void __iomem *membase;
11810c191a1SRomain Perier 	struct dma_pool		*pool;
1190c42bd0eSYong Wang 	struct pch_dma_regs	regs;
1200c42bd0eSYong Wang 	struct pch_dma_desc_regs ch_regs[MAX_CHAN_NR];
12126d890f0STomoya MORINAGA 	struct pch_dma_chan	channels[MAX_CHAN_NR];
1220c42bd0eSYong Wang };
1230c42bd0eSYong Wang 
1240c42bd0eSYong Wang #define PCH_DMA_CTL0	0x00
1250c42bd0eSYong Wang #define PCH_DMA_CTL1	0x04
1260c42bd0eSYong Wang #define PCH_DMA_CTL2	0x08
127194f5f27STomoya MORINAGA #define PCH_DMA_CTL3	0x0C
1280c42bd0eSYong Wang #define PCH_DMA_STS0	0x10
1290c42bd0eSYong Wang #define PCH_DMA_STS1	0x14
130c3d4913cSTomoya MORINAGA #define PCH_DMA_STS2	0x18
1310c42bd0eSYong Wang 
1320c42bd0eSYong Wang #define dma_readl(pd, name) \
13361cd2203SYong Wang 	readl((pd)->membase + PCH_DMA_##name)
1340c42bd0eSYong Wang #define dma_writel(pd, name, val) \
13561cd2203SYong Wang 	writel((val), (pd)->membase + PCH_DMA_##name)
1360c42bd0eSYong Wang 
13708645fdcSTomoya MORINAGA static inline
to_pd_desc(struct dma_async_tx_descriptor * txd)13808645fdcSTomoya MORINAGA struct pch_dma_desc *to_pd_desc(struct dma_async_tx_descriptor *txd)
1390c42bd0eSYong Wang {
1400c42bd0eSYong Wang 	return container_of(txd, struct pch_dma_desc, txd);
1410c42bd0eSYong Wang }
1420c42bd0eSYong Wang 
to_pd_chan(struct dma_chan * chan)1430c42bd0eSYong Wang static inline struct pch_dma_chan *to_pd_chan(struct dma_chan *chan)
1440c42bd0eSYong Wang {
1450c42bd0eSYong Wang 	return container_of(chan, struct pch_dma_chan, chan);
1460c42bd0eSYong Wang }
1470c42bd0eSYong Wang 
to_pd(struct dma_device * ddev)1480c42bd0eSYong Wang static inline struct pch_dma *to_pd(struct dma_device *ddev)
1490c42bd0eSYong Wang {
1500c42bd0eSYong Wang 	return container_of(ddev, struct pch_dma, dma);
1510c42bd0eSYong Wang }
1520c42bd0eSYong Wang 
chan2dev(struct dma_chan * chan)1530c42bd0eSYong Wang static inline struct device *chan2dev(struct dma_chan *chan)
1540c42bd0eSYong Wang {
1550c42bd0eSYong Wang 	return &chan->dev->device;
1560c42bd0eSYong Wang }
1570c42bd0eSYong Wang 
chan2parent(struct dma_chan * chan)1580c42bd0eSYong Wang static inline struct device *chan2parent(struct dma_chan *chan)
1590c42bd0eSYong Wang {
1600c42bd0eSYong Wang 	return chan->dev->device.parent;
1610c42bd0eSYong Wang }
1620c42bd0eSYong Wang 
16308645fdcSTomoya MORINAGA static inline
pdc_first_active(struct pch_dma_chan * pd_chan)16408645fdcSTomoya MORINAGA struct pch_dma_desc *pdc_first_active(struct pch_dma_chan *pd_chan)
1650c42bd0eSYong Wang {
1660c42bd0eSYong Wang 	return list_first_entry(&pd_chan->active_list,
1670c42bd0eSYong Wang 				struct pch_dma_desc, desc_node);
1680c42bd0eSYong Wang }
1690c42bd0eSYong Wang 
17008645fdcSTomoya MORINAGA static inline
pdc_first_queued(struct pch_dma_chan * pd_chan)17108645fdcSTomoya MORINAGA struct pch_dma_desc *pdc_first_queued(struct pch_dma_chan *pd_chan)
1720c42bd0eSYong Wang {
1730c42bd0eSYong Wang 	return list_first_entry(&pd_chan->queue,
1740c42bd0eSYong Wang 				struct pch_dma_desc, desc_node);
1750c42bd0eSYong Wang }
1760c42bd0eSYong Wang 
pdc_enable_irq(struct dma_chan * chan,int enable)1770c42bd0eSYong Wang static void pdc_enable_irq(struct dma_chan *chan, int enable)
1780c42bd0eSYong Wang {
1790c42bd0eSYong Wang 	struct pch_dma *pd = to_pd(chan->device);
1800c42bd0eSYong Wang 	u32 val;
181c3d4913cSTomoya MORINAGA 	int pos;
182c3d4913cSTomoya MORINAGA 
183c3d4913cSTomoya MORINAGA 	if (chan->chan_id < 8)
184c3d4913cSTomoya MORINAGA 		pos = chan->chan_id;
185c3d4913cSTomoya MORINAGA 	else
186c3d4913cSTomoya MORINAGA 		pos = chan->chan_id + 8;
1870c42bd0eSYong Wang 
1880c42bd0eSYong Wang 	val = dma_readl(pd, CTL2);
1890c42bd0eSYong Wang 
1900c42bd0eSYong Wang 	if (enable)
191c3d4913cSTomoya MORINAGA 		val |= 0x1 << pos;
1920c42bd0eSYong Wang 	else
193c3d4913cSTomoya MORINAGA 		val &= ~(0x1 << pos);
1940c42bd0eSYong Wang 
1950c42bd0eSYong Wang 	dma_writel(pd, CTL2, val);
1960c42bd0eSYong Wang 
1970c42bd0eSYong Wang 	dev_dbg(chan2dev(chan), "pdc_enable_irq: chan %d -> %x\n",
1980c42bd0eSYong Wang 		chan->chan_id, val);
1990c42bd0eSYong Wang }
2000c42bd0eSYong Wang 
pdc_set_dir(struct dma_chan * chan)2010c42bd0eSYong Wang static void pdc_set_dir(struct dma_chan *chan)
2020c42bd0eSYong Wang {
2030c42bd0eSYong Wang 	struct pch_dma_chan *pd_chan = to_pd_chan(chan);
2040c42bd0eSYong Wang 	struct pch_dma *pd = to_pd(chan->device);
2050c42bd0eSYong Wang 	u32 val;
2060b052f4aSTomoya MORINAGA 	u32 mask_mode;
2070b052f4aSTomoya MORINAGA 	u32 mask_ctl;
2080c42bd0eSYong Wang 
209194f5f27STomoya MORINAGA 	if (chan->chan_id < 8) {
2100c42bd0eSYong Wang 		val = dma_readl(pd, CTL0);
2110c42bd0eSYong Wang 
2120b052f4aSTomoya MORINAGA 		mask_mode = DMA_CTL0_MODE_MASK_BITS <<
2130b052f4aSTomoya MORINAGA 					(DMA_CTL0_BITS_PER_CH * chan->chan_id);
2140b052f4aSTomoya MORINAGA 		mask_ctl = DMA_MASK_CTL0_MODE & ~(DMA_CTL0_MODE_MASK_BITS <<
2150b052f4aSTomoya MORINAGA 				       (DMA_CTL0_BITS_PER_CH * chan->chan_id));
2160b052f4aSTomoya MORINAGA 		val &= mask_mode;
217db8196dfSVinod Koul 		if (pd_chan->dir == DMA_MEM_TO_DEV)
2180c42bd0eSYong Wang 			val |= 0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id +
2190c42bd0eSYong Wang 				       DMA_CTL0_DIR_SHIFT_BITS);
2200c42bd0eSYong Wang 		else
2210c42bd0eSYong Wang 			val &= ~(0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id +
2220c42bd0eSYong Wang 					 DMA_CTL0_DIR_SHIFT_BITS));
2230c42bd0eSYong Wang 
2240b052f4aSTomoya MORINAGA 		val |= mask_ctl;
2250c42bd0eSYong Wang 		dma_writel(pd, CTL0, val);
226194f5f27STomoya MORINAGA 	} else {
227194f5f27STomoya MORINAGA 		int ch = chan->chan_id - 8; /* ch8-->0 ch9-->1 ... ch11->3 */
228194f5f27STomoya MORINAGA 		val = dma_readl(pd, CTL3);
229194f5f27STomoya MORINAGA 
2300b052f4aSTomoya MORINAGA 		mask_mode = DMA_CTL0_MODE_MASK_BITS <<
2310b052f4aSTomoya MORINAGA 						(DMA_CTL0_BITS_PER_CH * ch);
2320b052f4aSTomoya MORINAGA 		mask_ctl = DMA_MASK_CTL2_MODE & ~(DMA_CTL0_MODE_MASK_BITS <<
2330b052f4aSTomoya MORINAGA 						 (DMA_CTL0_BITS_PER_CH * ch));
2340b052f4aSTomoya MORINAGA 		val &= mask_mode;
235db8196dfSVinod Koul 		if (pd_chan->dir == DMA_MEM_TO_DEV)
236194f5f27STomoya MORINAGA 			val |= 0x1 << (DMA_CTL0_BITS_PER_CH * ch +
237194f5f27STomoya MORINAGA 				       DMA_CTL0_DIR_SHIFT_BITS);
238194f5f27STomoya MORINAGA 		else
239194f5f27STomoya MORINAGA 			val &= ~(0x1 << (DMA_CTL0_BITS_PER_CH * ch +
240194f5f27STomoya MORINAGA 					 DMA_CTL0_DIR_SHIFT_BITS));
2410b052f4aSTomoya MORINAGA 		val |= mask_ctl;
242194f5f27STomoya MORINAGA 		dma_writel(pd, CTL3, val);
243194f5f27STomoya MORINAGA 	}
2440c42bd0eSYong Wang 
2450c42bd0eSYong Wang 	dev_dbg(chan2dev(chan), "pdc_set_dir: chan %d -> %x\n",
2460c42bd0eSYong Wang 		chan->chan_id, val);
2470c42bd0eSYong Wang }
2480c42bd0eSYong Wang 
pdc_set_mode(struct dma_chan * chan,u32 mode)2490c42bd0eSYong Wang static void pdc_set_mode(struct dma_chan *chan, u32 mode)
2500c42bd0eSYong Wang {
2510c42bd0eSYong Wang 	struct pch_dma *pd = to_pd(chan->device);
2520c42bd0eSYong Wang 	u32 val;
2530b052f4aSTomoya MORINAGA 	u32 mask_ctl;
2540b052f4aSTomoya MORINAGA 	u32 mask_dir;
2550c42bd0eSYong Wang 
256194f5f27STomoya MORINAGA 	if (chan->chan_id < 8) {
2570b052f4aSTomoya MORINAGA 		mask_ctl = DMA_MASK_CTL0_MODE & ~(DMA_CTL0_MODE_MASK_BITS <<
2580c42bd0eSYong Wang 			   (DMA_CTL0_BITS_PER_CH * chan->chan_id));
2590b052f4aSTomoya MORINAGA 		mask_dir = 1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id +\
2600b052f4aSTomoya MORINAGA 				 DMA_CTL0_DIR_SHIFT_BITS);
2610b052f4aSTomoya MORINAGA 		val = dma_readl(pd, CTL0);
2620b052f4aSTomoya MORINAGA 		val &= mask_dir;
2630c42bd0eSYong Wang 		val |= mode << (DMA_CTL0_BITS_PER_CH * chan->chan_id);
2640b052f4aSTomoya MORINAGA 		val |= mask_ctl;
2650c42bd0eSYong Wang 		dma_writel(pd, CTL0, val);
266194f5f27STomoya MORINAGA 	} else {
267194f5f27STomoya MORINAGA 		int ch = chan->chan_id - 8; /* ch8-->0 ch9-->1 ... ch11->3 */
2680b052f4aSTomoya MORINAGA 		mask_ctl = DMA_MASK_CTL2_MODE & ~(DMA_CTL0_MODE_MASK_BITS <<
269194f5f27STomoya MORINAGA 						 (DMA_CTL0_BITS_PER_CH * ch));
2700b052f4aSTomoya MORINAGA 		mask_dir = 1 << (DMA_CTL0_BITS_PER_CH * ch +\
2710b052f4aSTomoya MORINAGA 				 DMA_CTL0_DIR_SHIFT_BITS);
2720b052f4aSTomoya MORINAGA 		val = dma_readl(pd, CTL3);
2730b052f4aSTomoya MORINAGA 		val &= mask_dir;
274194f5f27STomoya MORINAGA 		val |= mode << (DMA_CTL0_BITS_PER_CH * ch);
2750b052f4aSTomoya MORINAGA 		val |= mask_ctl;
276194f5f27STomoya MORINAGA 		dma_writel(pd, CTL3, val);
277194f5f27STomoya MORINAGA 	}
2780c42bd0eSYong Wang 
2790c42bd0eSYong Wang 	dev_dbg(chan2dev(chan), "pdc_set_mode: chan %d -> %x\n",
2800c42bd0eSYong Wang 		chan->chan_id, val);
2810c42bd0eSYong Wang }
2820c42bd0eSYong Wang 
pdc_get_status0(struct pch_dma_chan * pd_chan)283c3d4913cSTomoya MORINAGA static u32 pdc_get_status0(struct pch_dma_chan *pd_chan)
2840c42bd0eSYong Wang {
2850c42bd0eSYong Wang 	struct pch_dma *pd = to_pd(pd_chan->chan.device);
2860c42bd0eSYong Wang 	u32 val;
2870c42bd0eSYong Wang 
2880c42bd0eSYong Wang 	val = dma_readl(pd, STS0);
2890c42bd0eSYong Wang 	return DMA_STATUS_MASK_BITS & (val >> (DMA_STATUS_SHIFT_BITS +
2900c42bd0eSYong Wang 			DMA_STATUS_BITS_PER_CH * pd_chan->chan.chan_id));
2910c42bd0eSYong Wang }
2920c42bd0eSYong Wang 
pdc_get_status2(struct pch_dma_chan * pd_chan)293c3d4913cSTomoya MORINAGA static u32 pdc_get_status2(struct pch_dma_chan *pd_chan)
294c3d4913cSTomoya MORINAGA {
295c3d4913cSTomoya MORINAGA 	struct pch_dma *pd = to_pd(pd_chan->chan.device);
296c3d4913cSTomoya MORINAGA 	u32 val;
297c3d4913cSTomoya MORINAGA 
298c3d4913cSTomoya MORINAGA 	val = dma_readl(pd, STS2);
299c3d4913cSTomoya MORINAGA 	return DMA_STATUS_MASK_BITS & (val >> (DMA_STATUS_SHIFT_BITS +
300c3d4913cSTomoya MORINAGA 			DMA_STATUS_BITS_PER_CH * (pd_chan->chan.chan_id - 8)));
301c3d4913cSTomoya MORINAGA }
302c3d4913cSTomoya MORINAGA 
pdc_is_idle(struct pch_dma_chan * pd_chan)3030c42bd0eSYong Wang static bool pdc_is_idle(struct pch_dma_chan *pd_chan)
3040c42bd0eSYong Wang {
305c3d4913cSTomoya MORINAGA 	u32 sts;
306c3d4913cSTomoya MORINAGA 
307c3d4913cSTomoya MORINAGA 	if (pd_chan->chan.chan_id < 8)
308c3d4913cSTomoya MORINAGA 		sts = pdc_get_status0(pd_chan);
309c3d4913cSTomoya MORINAGA 	else
310c3d4913cSTomoya MORINAGA 		sts = pdc_get_status2(pd_chan);
311c3d4913cSTomoya MORINAGA 
312c3d4913cSTomoya MORINAGA 
313c3d4913cSTomoya MORINAGA 	if (sts == DMA_STATUS_IDLE)
3140c42bd0eSYong Wang 		return true;
3150c42bd0eSYong Wang 	else
3160c42bd0eSYong Wang 		return false;
3170c42bd0eSYong Wang }
3180c42bd0eSYong Wang 
pdc_dostart(struct pch_dma_chan * pd_chan,struct pch_dma_desc * desc)3190c42bd0eSYong Wang static void pdc_dostart(struct pch_dma_chan *pd_chan, struct pch_dma_desc* desc)
3200c42bd0eSYong Wang {
3210c42bd0eSYong Wang 	if (!pdc_is_idle(pd_chan)) {
3220c42bd0eSYong Wang 		dev_err(chan2dev(&pd_chan->chan),
3230c42bd0eSYong Wang 			"BUG: Attempt to start non-idle channel\n");
3240c42bd0eSYong Wang 		return;
3250c42bd0eSYong Wang 	}
3260c42bd0eSYong Wang 
3270c42bd0eSYong Wang 	dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> dev_addr: %x\n",
3280c42bd0eSYong Wang 		pd_chan->chan.chan_id, desc->regs.dev_addr);
3290c42bd0eSYong Wang 	dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> mem_addr: %x\n",
3300c42bd0eSYong Wang 		pd_chan->chan.chan_id, desc->regs.mem_addr);
3310c42bd0eSYong Wang 	dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> size: %x\n",
3320c42bd0eSYong Wang 		pd_chan->chan.chan_id, desc->regs.size);
3330c42bd0eSYong Wang 	dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> next: %x\n",
3340c42bd0eSYong Wang 		pd_chan->chan.chan_id, desc->regs.next);
3350c42bd0eSYong Wang 
336943d8d8bSTomoya MORINAGA 	if (list_empty(&desc->tx_list)) {
337943d8d8bSTomoya MORINAGA 		channel_writel(pd_chan, DEV_ADDR, desc->regs.dev_addr);
338943d8d8bSTomoya MORINAGA 		channel_writel(pd_chan, MEM_ADDR, desc->regs.mem_addr);
339943d8d8bSTomoya MORINAGA 		channel_writel(pd_chan, SIZE, desc->regs.size);
340943d8d8bSTomoya MORINAGA 		channel_writel(pd_chan, NEXT, desc->regs.next);
3410c42bd0eSYong Wang 		pdc_set_mode(&pd_chan->chan, DMA_CTL0_ONESHOT);
342943d8d8bSTomoya MORINAGA 	} else {
343943d8d8bSTomoya MORINAGA 		channel_writel(pd_chan, NEXT, desc->txd.phys);
3440c42bd0eSYong Wang 		pdc_set_mode(&pd_chan->chan, DMA_CTL0_SG);
345943d8d8bSTomoya MORINAGA 	}
3460c42bd0eSYong Wang }
3470c42bd0eSYong Wang 
pdc_chain_complete(struct pch_dma_chan * pd_chan,struct pch_dma_desc * desc)3480c42bd0eSYong Wang static void pdc_chain_complete(struct pch_dma_chan *pd_chan,
3490c42bd0eSYong Wang 			       struct pch_dma_desc *desc)
3500c42bd0eSYong Wang {
3510c42bd0eSYong Wang 	struct dma_async_tx_descriptor *txd = &desc->txd;
3525c066f7dSDave Jiang 	struct dmaengine_desc_callback cb;
3530c42bd0eSYong Wang 
3545c066f7dSDave Jiang 	dmaengine_desc_get_callback(txd, &cb);
3550c42bd0eSYong Wang 	list_splice_init(&desc->tx_list, &pd_chan->free_list);
3560c42bd0eSYong Wang 	list_move(&desc->desc_node, &pd_chan->free_list);
3570c42bd0eSYong Wang 
3585c066f7dSDave Jiang 	dmaengine_desc_callback_invoke(&cb, NULL);
3590c42bd0eSYong Wang }
3600c42bd0eSYong Wang 
pdc_complete_all(struct pch_dma_chan * pd_chan)3610c42bd0eSYong Wang static void pdc_complete_all(struct pch_dma_chan *pd_chan)
3620c42bd0eSYong Wang {
3630c42bd0eSYong Wang 	struct pch_dma_desc *desc, *_d;
3640c42bd0eSYong Wang 	LIST_HEAD(list);
3650c42bd0eSYong Wang 
3660c42bd0eSYong Wang 	BUG_ON(!pdc_is_idle(pd_chan));
3670c42bd0eSYong Wang 
3680c42bd0eSYong Wang 	if (!list_empty(&pd_chan->queue))
3690c42bd0eSYong Wang 		pdc_dostart(pd_chan, pdc_first_queued(pd_chan));
3700c42bd0eSYong Wang 
3710c42bd0eSYong Wang 	list_splice_init(&pd_chan->active_list, &list);
3720c42bd0eSYong Wang 	list_splice_init(&pd_chan->queue, &pd_chan->active_list);
3730c42bd0eSYong Wang 
3740c42bd0eSYong Wang 	list_for_each_entry_safe(desc, _d, &list, desc_node)
3750c42bd0eSYong Wang 		pdc_chain_complete(pd_chan, desc);
3760c42bd0eSYong Wang }
3770c42bd0eSYong Wang 
pdc_handle_error(struct pch_dma_chan * pd_chan)3780c42bd0eSYong Wang static void pdc_handle_error(struct pch_dma_chan *pd_chan)
3790c42bd0eSYong Wang {
3800c42bd0eSYong Wang 	struct pch_dma_desc *bad_desc;
3810c42bd0eSYong Wang 
3820c42bd0eSYong Wang 	bad_desc = pdc_first_active(pd_chan);
3830c42bd0eSYong Wang 	list_del(&bad_desc->desc_node);
3840c42bd0eSYong Wang 
3850c42bd0eSYong Wang 	list_splice_init(&pd_chan->queue, pd_chan->active_list.prev);
3860c42bd0eSYong Wang 
3870c42bd0eSYong Wang 	if (!list_empty(&pd_chan->active_list))
3880c42bd0eSYong Wang 		pdc_dostart(pd_chan, pdc_first_active(pd_chan));
3890c42bd0eSYong Wang 
3900c42bd0eSYong Wang 	dev_crit(chan2dev(&pd_chan->chan), "Bad descriptor submitted\n");
3910c42bd0eSYong Wang 	dev_crit(chan2dev(&pd_chan->chan), "descriptor cookie: %d\n",
3920c42bd0eSYong Wang 		 bad_desc->txd.cookie);
3930c42bd0eSYong Wang 
3940c42bd0eSYong Wang 	pdc_chain_complete(pd_chan, bad_desc);
3950c42bd0eSYong Wang }
3960c42bd0eSYong Wang 
pdc_advance_work(struct pch_dma_chan * pd_chan)3970c42bd0eSYong Wang static void pdc_advance_work(struct pch_dma_chan *pd_chan)
3980c42bd0eSYong Wang {
3990c42bd0eSYong Wang 	if (list_empty(&pd_chan->active_list) ||
4000c42bd0eSYong Wang 		list_is_singular(&pd_chan->active_list)) {
4010c42bd0eSYong Wang 		pdc_complete_all(pd_chan);
4020c42bd0eSYong Wang 	} else {
4030c42bd0eSYong Wang 		pdc_chain_complete(pd_chan, pdc_first_active(pd_chan));
4040c42bd0eSYong Wang 		pdc_dostart(pd_chan, pdc_first_active(pd_chan));
4050c42bd0eSYong Wang 	}
4060c42bd0eSYong Wang }
4070c42bd0eSYong Wang 
pd_tx_submit(struct dma_async_tx_descriptor * txd)4080c42bd0eSYong Wang static dma_cookie_t pd_tx_submit(struct dma_async_tx_descriptor *txd)
4090c42bd0eSYong Wang {
4100c42bd0eSYong Wang 	struct pch_dma_desc *desc = to_pd_desc(txd);
4110c42bd0eSYong Wang 	struct pch_dma_chan *pd_chan = to_pd_chan(txd->chan);
4120c42bd0eSYong Wang 
413c5a9f9d0STomoya MORINAGA 	spin_lock(&pd_chan->lock);
4140c42bd0eSYong Wang 
4150c42bd0eSYong Wang 	if (list_empty(&pd_chan->active_list)) {
4160c42bd0eSYong Wang 		list_add_tail(&desc->desc_node, &pd_chan->active_list);
4170c42bd0eSYong Wang 		pdc_dostart(pd_chan, desc);
4180c42bd0eSYong Wang 	} else {
4190c42bd0eSYong Wang 		list_add_tail(&desc->desc_node, &pd_chan->queue);
4200c42bd0eSYong Wang 	}
4210c42bd0eSYong Wang 
422c5a9f9d0STomoya MORINAGA 	spin_unlock(&pd_chan->lock);
4230c42bd0eSYong Wang 	return 0;
4240c42bd0eSYong Wang }
4250c42bd0eSYong Wang 
pdc_alloc_desc(struct dma_chan * chan,gfp_t flags)4260c42bd0eSYong Wang static struct pch_dma_desc *pdc_alloc_desc(struct dma_chan *chan, gfp_t flags)
4270c42bd0eSYong Wang {
4280c42bd0eSYong Wang 	struct pch_dma_desc *desc = NULL;
4290c42bd0eSYong Wang 	struct pch_dma *pd = to_pd(chan->device);
4300c42bd0eSYong Wang 	dma_addr_t addr;
4310c42bd0eSYong Wang 
43210c191a1SRomain Perier 	desc = dma_pool_zalloc(pd->pool, flags, &addr);
4330c42bd0eSYong Wang 	if (desc) {
4340c42bd0eSYong Wang 		INIT_LIST_HEAD(&desc->tx_list);
4350c42bd0eSYong Wang 		dma_async_tx_descriptor_init(&desc->txd, chan);
4360c42bd0eSYong Wang 		desc->txd.tx_submit = pd_tx_submit;
4370c42bd0eSYong Wang 		desc->txd.flags = DMA_CTRL_ACK;
4380c42bd0eSYong Wang 		desc->txd.phys = addr;
4390c42bd0eSYong Wang 	}
4400c42bd0eSYong Wang 
4410c42bd0eSYong Wang 	return desc;
4420c42bd0eSYong Wang }
4430c42bd0eSYong Wang 
pdc_desc_get(struct pch_dma_chan * pd_chan)4440c42bd0eSYong Wang static struct pch_dma_desc *pdc_desc_get(struct pch_dma_chan *pd_chan)
4450c42bd0eSYong Wang {
4460c42bd0eSYong Wang 	struct pch_dma_desc *desc, *_d;
4470c42bd0eSYong Wang 	struct pch_dma_desc *ret = NULL;
448364de778SLiu Yuan 	int i = 0;
4490c42bd0eSYong Wang 
450c5a9f9d0STomoya MORINAGA 	spin_lock(&pd_chan->lock);
4510c42bd0eSYong Wang 	list_for_each_entry_safe(desc, _d, &pd_chan->free_list, desc_node) {
4520c42bd0eSYong Wang 		i++;
4530c42bd0eSYong Wang 		if (async_tx_test_ack(&desc->txd)) {
4540c42bd0eSYong Wang 			list_del(&desc->desc_node);
4550c42bd0eSYong Wang 			ret = desc;
4560c42bd0eSYong Wang 			break;
4570c42bd0eSYong Wang 		}
4580c42bd0eSYong Wang 		dev_dbg(chan2dev(&pd_chan->chan), "desc %p not ACKed\n", desc);
4590c42bd0eSYong Wang 	}
460c5a9f9d0STomoya MORINAGA 	spin_unlock(&pd_chan->lock);
4610c42bd0eSYong Wang 	dev_dbg(chan2dev(&pd_chan->chan), "scanned %d descriptors\n", i);
4620c42bd0eSYong Wang 
4630c42bd0eSYong Wang 	if (!ret) {
4645c1ef591STomoya MORINAGA 		ret = pdc_alloc_desc(&pd_chan->chan, GFP_ATOMIC);
4650c42bd0eSYong Wang 		if (ret) {
466c5a9f9d0STomoya MORINAGA 			spin_lock(&pd_chan->lock);
4670c42bd0eSYong Wang 			pd_chan->descs_allocated++;
468c5a9f9d0STomoya MORINAGA 			spin_unlock(&pd_chan->lock);
4690c42bd0eSYong Wang 		} else {
4700c42bd0eSYong Wang 			dev_err(chan2dev(&pd_chan->chan),
4710c42bd0eSYong Wang 				"failed to alloc desc\n");
4720c42bd0eSYong Wang 		}
4730c42bd0eSYong Wang 	}
4740c42bd0eSYong Wang 
4750c42bd0eSYong Wang 	return ret;
4760c42bd0eSYong Wang }
4770c42bd0eSYong Wang 
pdc_desc_put(struct pch_dma_chan * pd_chan,struct pch_dma_desc * desc)4780c42bd0eSYong Wang static void pdc_desc_put(struct pch_dma_chan *pd_chan,
4790c42bd0eSYong Wang 			 struct pch_dma_desc *desc)
4800c42bd0eSYong Wang {
4810c42bd0eSYong Wang 	if (desc) {
482c5a9f9d0STomoya MORINAGA 		spin_lock(&pd_chan->lock);
4830c42bd0eSYong Wang 		list_splice_init(&desc->tx_list, &pd_chan->free_list);
4840c42bd0eSYong Wang 		list_add(&desc->desc_node, &pd_chan->free_list);
485c5a9f9d0STomoya MORINAGA 		spin_unlock(&pd_chan->lock);
4860c42bd0eSYong Wang 	}
4870c42bd0eSYong Wang }
4880c42bd0eSYong Wang 
pd_alloc_chan_resources(struct dma_chan * chan)4890c42bd0eSYong Wang static int pd_alloc_chan_resources(struct dma_chan *chan)
4900c42bd0eSYong Wang {
4910c42bd0eSYong Wang 	struct pch_dma_chan *pd_chan = to_pd_chan(chan);
4920c42bd0eSYong Wang 	struct pch_dma_desc *desc;
4930c42bd0eSYong Wang 	LIST_HEAD(tmp_list);
4940c42bd0eSYong Wang 	int i;
4950c42bd0eSYong Wang 
4960c42bd0eSYong Wang 	if (!pdc_is_idle(pd_chan)) {
4970c42bd0eSYong Wang 		dev_dbg(chan2dev(chan), "DMA channel not idle ?\n");
4980c42bd0eSYong Wang 		return -EIO;
4990c42bd0eSYong Wang 	}
5000c42bd0eSYong Wang 
5010c42bd0eSYong Wang 	if (!list_empty(&pd_chan->free_list))
5020c42bd0eSYong Wang 		return pd_chan->descs_allocated;
5030c42bd0eSYong Wang 
5040c42bd0eSYong Wang 	for (i = 0; i < init_nr_desc_per_channel; i++) {
5050c42bd0eSYong Wang 		desc = pdc_alloc_desc(chan, GFP_KERNEL);
5060c42bd0eSYong Wang 
5070c42bd0eSYong Wang 		if (!desc) {
5080c42bd0eSYong Wang 			dev_warn(chan2dev(chan),
5090c42bd0eSYong Wang 				"Only allocated %d initial descriptors\n", i);
5100c42bd0eSYong Wang 			break;
5110c42bd0eSYong Wang 		}
5120c42bd0eSYong Wang 
5130c42bd0eSYong Wang 		list_add_tail(&desc->desc_node, &tmp_list);
5140c42bd0eSYong Wang 	}
5150c42bd0eSYong Wang 
51670f18915SAlexander Stein 	spin_lock_irq(&pd_chan->lock);
5170c42bd0eSYong Wang 	list_splice(&tmp_list, &pd_chan->free_list);
5180c42bd0eSYong Wang 	pd_chan->descs_allocated = i;
519d3ee98cdSRussell King - ARM Linux 	dma_cookie_init(chan);
52070f18915SAlexander Stein 	spin_unlock_irq(&pd_chan->lock);
5210c42bd0eSYong Wang 
5220c42bd0eSYong Wang 	pdc_enable_irq(chan, 1);
5230c42bd0eSYong Wang 
5240c42bd0eSYong Wang 	return pd_chan->descs_allocated;
5250c42bd0eSYong Wang }
5260c42bd0eSYong Wang 
pd_free_chan_resources(struct dma_chan * chan)5270c42bd0eSYong Wang static void pd_free_chan_resources(struct dma_chan *chan)
5280c42bd0eSYong Wang {
5290c42bd0eSYong Wang 	struct pch_dma_chan *pd_chan = to_pd_chan(chan);
5300c42bd0eSYong Wang 	struct pch_dma *pd = to_pd(chan->device);
5310c42bd0eSYong Wang 	struct pch_dma_desc *desc, *_d;
5320c42bd0eSYong Wang 	LIST_HEAD(tmp_list);
5330c42bd0eSYong Wang 
5340c42bd0eSYong Wang 	BUG_ON(!pdc_is_idle(pd_chan));
5350c42bd0eSYong Wang 	BUG_ON(!list_empty(&pd_chan->active_list));
5360c42bd0eSYong Wang 	BUG_ON(!list_empty(&pd_chan->queue));
5370c42bd0eSYong Wang 
53870f18915SAlexander Stein 	spin_lock_irq(&pd_chan->lock);
5390c42bd0eSYong Wang 	list_splice_init(&pd_chan->free_list, &tmp_list);
5400c42bd0eSYong Wang 	pd_chan->descs_allocated = 0;
54170f18915SAlexander Stein 	spin_unlock_irq(&pd_chan->lock);
5420c42bd0eSYong Wang 
5430c42bd0eSYong Wang 	list_for_each_entry_safe(desc, _d, &tmp_list, desc_node)
54410c191a1SRomain Perier 		dma_pool_free(pd->pool, desc, desc->txd.phys);
5450c42bd0eSYong Wang 
5460c42bd0eSYong Wang 	pdc_enable_irq(chan, 0);
5470c42bd0eSYong Wang }
5480c42bd0eSYong Wang 
pd_tx_status(struct dma_chan * chan,dma_cookie_t cookie,struct dma_tx_state * txstate)5490c42bd0eSYong Wang static enum dma_status pd_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
5500c42bd0eSYong Wang 				    struct dma_tx_state *txstate)
5510c42bd0eSYong Wang {
552da0a908eSAndy Shevchenko 	return dma_cookie_status(chan, cookie, txstate);
5530c42bd0eSYong Wang }
5540c42bd0eSYong Wang 
pd_issue_pending(struct dma_chan * chan)5550c42bd0eSYong Wang static void pd_issue_pending(struct dma_chan *chan)
5560c42bd0eSYong Wang {
5570c42bd0eSYong Wang 	struct pch_dma_chan *pd_chan = to_pd_chan(chan);
5580c42bd0eSYong Wang 
5590c42bd0eSYong Wang 	if (pdc_is_idle(pd_chan)) {
560c5a9f9d0STomoya MORINAGA 		spin_lock(&pd_chan->lock);
5610c42bd0eSYong Wang 		pdc_advance_work(pd_chan);
562c5a9f9d0STomoya MORINAGA 		spin_unlock(&pd_chan->lock);
5630c42bd0eSYong Wang 	}
5640c42bd0eSYong Wang }
5650c42bd0eSYong Wang 
pd_prep_slave_sg(struct dma_chan * chan,struct scatterlist * sgl,unsigned int sg_len,enum dma_transfer_direction direction,unsigned long flags,void * context)5660c42bd0eSYong Wang static struct dma_async_tx_descriptor *pd_prep_slave_sg(struct dma_chan *chan,
5670c42bd0eSYong Wang 			struct scatterlist *sgl, unsigned int sg_len,
568185ecb5fSAlexandre Bounine 			enum dma_transfer_direction direction, unsigned long flags,
569185ecb5fSAlexandre Bounine 			void *context)
5700c42bd0eSYong Wang {
5710c42bd0eSYong Wang 	struct pch_dma_chan *pd_chan = to_pd_chan(chan);
5720c42bd0eSYong Wang 	struct pch_dma_slave *pd_slave = chan->private;
5730c42bd0eSYong Wang 	struct pch_dma_desc *first = NULL;
5740c42bd0eSYong Wang 	struct pch_dma_desc *prev = NULL;
5750c42bd0eSYong Wang 	struct pch_dma_desc *desc = NULL;
5760c42bd0eSYong Wang 	struct scatterlist *sg;
5770c42bd0eSYong Wang 	dma_addr_t reg;
5780c42bd0eSYong Wang 	int i;
5790c42bd0eSYong Wang 
5800c42bd0eSYong Wang 	if (unlikely(!sg_len)) {
5810c42bd0eSYong Wang 		dev_info(chan2dev(chan), "prep_slave_sg: length is zero!\n");
5820c42bd0eSYong Wang 		return NULL;
5830c42bd0eSYong Wang 	}
5840c42bd0eSYong Wang 
585db8196dfSVinod Koul 	if (direction == DMA_DEV_TO_MEM)
5860c42bd0eSYong Wang 		reg = pd_slave->rx_reg;
587db8196dfSVinod Koul 	else if (direction == DMA_MEM_TO_DEV)
5880c42bd0eSYong Wang 		reg = pd_slave->tx_reg;
5890c42bd0eSYong Wang 	else
5900c42bd0eSYong Wang 		return NULL;
5910c42bd0eSYong Wang 
592c8fcba60STomoya MORINAGA 	pd_chan->dir = direction;
593c8fcba60STomoya MORINAGA 	pdc_set_dir(chan);
594c8fcba60STomoya MORINAGA 
5950c42bd0eSYong Wang 	for_each_sg(sgl, sg, sg_len, i) {
5960c42bd0eSYong Wang 		desc = pdc_desc_get(pd_chan);
5970c42bd0eSYong Wang 
5980c42bd0eSYong Wang 		if (!desc)
5990c42bd0eSYong Wang 			goto err_desc_get;
6000c42bd0eSYong Wang 
6010c42bd0eSYong Wang 		desc->regs.dev_addr = reg;
602cbb796ccSLars-Peter Clausen 		desc->regs.mem_addr = sg_dma_address(sg);
6030c42bd0eSYong Wang 		desc->regs.size = sg_dma_len(sg);
6040c42bd0eSYong Wang 		desc->regs.next = DMA_DESC_FOLLOW_WITHOUT_IRQ;
6050c42bd0eSYong Wang 
6060c42bd0eSYong Wang 		switch (pd_slave->width) {
6070c42bd0eSYong Wang 		case PCH_DMA_WIDTH_1_BYTE:
6080c42bd0eSYong Wang 			if (desc->regs.size > DMA_DESC_MAX_COUNT_1_BYTE)
6090c42bd0eSYong Wang 				goto err_desc_get;
6100c42bd0eSYong Wang 			desc->regs.size |= DMA_DESC_WIDTH_1_BYTE;
6110c42bd0eSYong Wang 			break;
6120c42bd0eSYong Wang 		case PCH_DMA_WIDTH_2_BYTES:
6130c42bd0eSYong Wang 			if (desc->regs.size > DMA_DESC_MAX_COUNT_2_BYTES)
6140c42bd0eSYong Wang 				goto err_desc_get;
6150c42bd0eSYong Wang 			desc->regs.size |= DMA_DESC_WIDTH_2_BYTES;
6160c42bd0eSYong Wang 			break;
6170c42bd0eSYong Wang 		case PCH_DMA_WIDTH_4_BYTES:
6180c42bd0eSYong Wang 			if (desc->regs.size > DMA_DESC_MAX_COUNT_4_BYTES)
6190c42bd0eSYong Wang 				goto err_desc_get;
6200c42bd0eSYong Wang 			desc->regs.size |= DMA_DESC_WIDTH_4_BYTES;
6210c42bd0eSYong Wang 			break;
6220c42bd0eSYong Wang 		default:
6230c42bd0eSYong Wang 			goto err_desc_get;
6240c42bd0eSYong Wang 		}
6250c42bd0eSYong Wang 
6260c42bd0eSYong Wang 		if (!first) {
6270c42bd0eSYong Wang 			first = desc;
6280c42bd0eSYong Wang 		} else {
6290c42bd0eSYong Wang 			prev->regs.next |= desc->txd.phys;
6300c42bd0eSYong Wang 			list_add_tail(&desc->desc_node, &first->tx_list);
6310c42bd0eSYong Wang 		}
6320c42bd0eSYong Wang 
6330c42bd0eSYong Wang 		prev = desc;
6340c42bd0eSYong Wang 	}
6350c42bd0eSYong Wang 
6360c42bd0eSYong Wang 	if (flags & DMA_PREP_INTERRUPT)
6370c42bd0eSYong Wang 		desc->regs.next = DMA_DESC_END_WITH_IRQ;
6380c42bd0eSYong Wang 	else
6390c42bd0eSYong Wang 		desc->regs.next = DMA_DESC_END_WITHOUT_IRQ;
6400c42bd0eSYong Wang 
6410c42bd0eSYong Wang 	first->txd.cookie = -EBUSY;
6420c42bd0eSYong Wang 	desc->txd.flags = flags;
6430c42bd0eSYong Wang 
6440c42bd0eSYong Wang 	return &first->txd;
6450c42bd0eSYong Wang 
6460c42bd0eSYong Wang err_desc_get:
6470c42bd0eSYong Wang 	dev_err(chan2dev(chan), "failed to get desc or wrong parameters\n");
6480c42bd0eSYong Wang 	pdc_desc_put(pd_chan, first);
6490c42bd0eSYong Wang 	return NULL;
6500c42bd0eSYong Wang }
6510c42bd0eSYong Wang 
pd_device_terminate_all(struct dma_chan * chan)652c91781b4SMaxime Ripard static int pd_device_terminate_all(struct dma_chan *chan)
6530c42bd0eSYong Wang {
6540c42bd0eSYong Wang 	struct pch_dma_chan *pd_chan = to_pd_chan(chan);
6550c42bd0eSYong Wang 	struct pch_dma_desc *desc, *_d;
6560c42bd0eSYong Wang 	LIST_HEAD(list);
6570c42bd0eSYong Wang 
65870f18915SAlexander Stein 	spin_lock_irq(&pd_chan->lock);
6590c42bd0eSYong Wang 
6600c42bd0eSYong Wang 	pdc_set_mode(&pd_chan->chan, DMA_CTL0_DISABLE);
6610c42bd0eSYong Wang 
6620c42bd0eSYong Wang 	list_splice_init(&pd_chan->active_list, &list);
6630c42bd0eSYong Wang 	list_splice_init(&pd_chan->queue, &list);
6640c42bd0eSYong Wang 
6650c42bd0eSYong Wang 	list_for_each_entry_safe(desc, _d, &list, desc_node)
6660c42bd0eSYong Wang 		pdc_chain_complete(pd_chan, desc);
6670c42bd0eSYong Wang 
66870f18915SAlexander Stein 	spin_unlock_irq(&pd_chan->lock);
6690c42bd0eSYong Wang 
6700c42bd0eSYong Wang 	return 0;
6710c42bd0eSYong Wang }
6720c42bd0eSYong Wang 
pdc_tasklet(struct tasklet_struct * t)67388ff5093SAllen Pais static void pdc_tasklet(struct tasklet_struct *t)
6740c42bd0eSYong Wang {
67588ff5093SAllen Pais 	struct pch_dma_chan *pd_chan = from_tasklet(pd_chan, t, tasklet);
676c5a9f9d0STomoya MORINAGA 	unsigned long flags;
6770c42bd0eSYong Wang 
6780c42bd0eSYong Wang 	if (!pdc_is_idle(pd_chan)) {
6790c42bd0eSYong Wang 		dev_err(chan2dev(&pd_chan->chan),
6800c42bd0eSYong Wang 			"BUG: handle non-idle channel in tasklet\n");
6810c42bd0eSYong Wang 		return;
6820c42bd0eSYong Wang 	}
6830c42bd0eSYong Wang 
684c5a9f9d0STomoya MORINAGA 	spin_lock_irqsave(&pd_chan->lock, flags);
6850c42bd0eSYong Wang 	if (test_and_clear_bit(0, &pd_chan->err_status))
6860c42bd0eSYong Wang 		pdc_handle_error(pd_chan);
6870c42bd0eSYong Wang 	else
6880c42bd0eSYong Wang 		pdc_advance_work(pd_chan);
689c5a9f9d0STomoya MORINAGA 	spin_unlock_irqrestore(&pd_chan->lock, flags);
6900c42bd0eSYong Wang }
6910c42bd0eSYong Wang 
pd_irq(int irq,void * devid)6920c42bd0eSYong Wang static irqreturn_t pd_irq(int irq, void *devid)
6930c42bd0eSYong Wang {
6940c42bd0eSYong Wang 	struct pch_dma *pd = (struct pch_dma *)devid;
6950c42bd0eSYong Wang 	struct pch_dma_chan *pd_chan;
6960c42bd0eSYong Wang 	u32 sts0;
697c3d4913cSTomoya MORINAGA 	u32 sts2;
6980c42bd0eSYong Wang 	int i;
699c3d4913cSTomoya MORINAGA 	int ret0 = IRQ_NONE;
700c3d4913cSTomoya MORINAGA 	int ret2 = IRQ_NONE;
7010c42bd0eSYong Wang 
7020c42bd0eSYong Wang 	sts0 = dma_readl(pd, STS0);
703c3d4913cSTomoya MORINAGA 	sts2 = dma_readl(pd, STS2);
7040c42bd0eSYong Wang 
7050c42bd0eSYong Wang 	dev_dbg(pd->dma.dev, "pd_irq sts0: %x\n", sts0);
7060c42bd0eSYong Wang 
7070c42bd0eSYong Wang 	for (i = 0; i < pd->dma.chancnt; i++) {
7080c42bd0eSYong Wang 		pd_chan = &pd->channels[i];
7090c42bd0eSYong Wang 
710c3d4913cSTomoya MORINAGA 		if (i < 8) {
7110c42bd0eSYong Wang 			if (sts0 & DMA_STATUS_IRQ(i)) {
712c3d4913cSTomoya MORINAGA 				if (sts0 & DMA_STATUS0_ERR(i))
7130c42bd0eSYong Wang 					set_bit(0, &pd_chan->err_status);
7140c42bd0eSYong Wang 
7150c42bd0eSYong Wang 				tasklet_schedule(&pd_chan->tasklet);
716c3d4913cSTomoya MORINAGA 				ret0 = IRQ_HANDLED;
7170c42bd0eSYong Wang 			}
718c3d4913cSTomoya MORINAGA 		} else {
719c3d4913cSTomoya MORINAGA 			if (sts2 & DMA_STATUS_IRQ(i - 8)) {
720c3d4913cSTomoya MORINAGA 				if (sts2 & DMA_STATUS2_ERR(i))
721c3d4913cSTomoya MORINAGA 					set_bit(0, &pd_chan->err_status);
7220c42bd0eSYong Wang 
723c3d4913cSTomoya MORINAGA 				tasklet_schedule(&pd_chan->tasklet);
724c3d4913cSTomoya MORINAGA 				ret2 = IRQ_HANDLED;
725c3d4913cSTomoya MORINAGA 			}
726c3d4913cSTomoya MORINAGA 		}
7270c42bd0eSYong Wang 	}
7280c42bd0eSYong Wang 
7290c42bd0eSYong Wang 	/* clear interrupt bits in status register */
730c3d4913cSTomoya MORINAGA 	if (ret0)
7310c42bd0eSYong Wang 		dma_writel(pd, STS0, sts0);
732c3d4913cSTomoya MORINAGA 	if (ret2)
733c3d4913cSTomoya MORINAGA 		dma_writel(pd, STS2, sts2);
7340c42bd0eSYong Wang 
735c3d4913cSTomoya MORINAGA 	return ret0 | ret2;
7360c42bd0eSYong Wang }
7370c42bd0eSYong Wang 
pch_dma_save_regs(struct pch_dma * pd)7387db7f8e0SVaibhav Gupta static void __maybe_unused pch_dma_save_regs(struct pch_dma *pd)
7390c42bd0eSYong Wang {
7400c42bd0eSYong Wang 	struct pch_dma_chan *pd_chan;
7410c42bd0eSYong Wang 	struct dma_chan *chan, *_c;
7420c42bd0eSYong Wang 	int i = 0;
7430c42bd0eSYong Wang 
7440c42bd0eSYong Wang 	pd->regs.dma_ctl0 = dma_readl(pd, CTL0);
7450c42bd0eSYong Wang 	pd->regs.dma_ctl1 = dma_readl(pd, CTL1);
7460c42bd0eSYong Wang 	pd->regs.dma_ctl2 = dma_readl(pd, CTL2);
747194f5f27STomoya MORINAGA 	pd->regs.dma_ctl3 = dma_readl(pd, CTL3);
7480c42bd0eSYong Wang 
7490c42bd0eSYong Wang 	list_for_each_entry_safe(chan, _c, &pd->dma.channels, device_node) {
7500c42bd0eSYong Wang 		pd_chan = to_pd_chan(chan);
7510c42bd0eSYong Wang 
7520c42bd0eSYong Wang 		pd->ch_regs[i].dev_addr = channel_readl(pd_chan, DEV_ADDR);
7530c42bd0eSYong Wang 		pd->ch_regs[i].mem_addr = channel_readl(pd_chan, MEM_ADDR);
7540c42bd0eSYong Wang 		pd->ch_regs[i].size = channel_readl(pd_chan, SIZE);
7550c42bd0eSYong Wang 		pd->ch_regs[i].next = channel_readl(pd_chan, NEXT);
7560c42bd0eSYong Wang 
7570c42bd0eSYong Wang 		i++;
7580c42bd0eSYong Wang 	}
7590c42bd0eSYong Wang }
7600c42bd0eSYong Wang 
pch_dma_restore_regs(struct pch_dma * pd)7617db7f8e0SVaibhav Gupta static void __maybe_unused pch_dma_restore_regs(struct pch_dma *pd)
7620c42bd0eSYong Wang {
7630c42bd0eSYong Wang 	struct pch_dma_chan *pd_chan;
7640c42bd0eSYong Wang 	struct dma_chan *chan, *_c;
7650c42bd0eSYong Wang 	int i = 0;
7660c42bd0eSYong Wang 
7670c42bd0eSYong Wang 	dma_writel(pd, CTL0, pd->regs.dma_ctl0);
7680c42bd0eSYong Wang 	dma_writel(pd, CTL1, pd->regs.dma_ctl1);
7690c42bd0eSYong Wang 	dma_writel(pd, CTL2, pd->regs.dma_ctl2);
770194f5f27STomoya MORINAGA 	dma_writel(pd, CTL3, pd->regs.dma_ctl3);
7710c42bd0eSYong Wang 
7720c42bd0eSYong Wang 	list_for_each_entry_safe(chan, _c, &pd->dma.channels, device_node) {
7730c42bd0eSYong Wang 		pd_chan = to_pd_chan(chan);
7740c42bd0eSYong Wang 
7750c42bd0eSYong Wang 		channel_writel(pd_chan, DEV_ADDR, pd->ch_regs[i].dev_addr);
7760c42bd0eSYong Wang 		channel_writel(pd_chan, MEM_ADDR, pd->ch_regs[i].mem_addr);
7770c42bd0eSYong Wang 		channel_writel(pd_chan, SIZE, pd->ch_regs[i].size);
7780c42bd0eSYong Wang 		channel_writel(pd_chan, NEXT, pd->ch_regs[i].next);
7790c42bd0eSYong Wang 
7800c42bd0eSYong Wang 		i++;
7810c42bd0eSYong Wang 	}
7820c42bd0eSYong Wang }
7830c42bd0eSYong Wang 
pch_dma_suspend(struct device * dev)7847db7f8e0SVaibhav Gupta static int __maybe_unused pch_dma_suspend(struct device *dev)
7850c42bd0eSYong Wang {
7867db7f8e0SVaibhav Gupta 	struct pch_dma *pd = dev_get_drvdata(dev);
7870c42bd0eSYong Wang 
7880c42bd0eSYong Wang 	if (pd)
7890c42bd0eSYong Wang 		pch_dma_save_regs(pd);
7900c42bd0eSYong Wang 
7910c42bd0eSYong Wang 	return 0;
7920c42bd0eSYong Wang }
7930c42bd0eSYong Wang 
pch_dma_resume(struct device * dev)7947db7f8e0SVaibhav Gupta static int __maybe_unused pch_dma_resume(struct device *dev)
7950c42bd0eSYong Wang {
7967db7f8e0SVaibhav Gupta 	struct pch_dma *pd = dev_get_drvdata(dev);
7970c42bd0eSYong Wang 
7980c42bd0eSYong Wang 	if (pd)
7990c42bd0eSYong Wang 		pch_dma_restore_regs(pd);
8000c42bd0eSYong Wang 
8010c42bd0eSYong Wang 	return 0;
8020c42bd0eSYong Wang }
8030c42bd0eSYong Wang 
pch_dma_probe(struct pci_dev * pdev,const struct pci_device_id * id)804463a1f8bSBill Pemberton static int pch_dma_probe(struct pci_dev *pdev,
8050c42bd0eSYong Wang 				   const struct pci_device_id *id)
8060c42bd0eSYong Wang {
8070c42bd0eSYong Wang 	struct pch_dma *pd;
8080c42bd0eSYong Wang 	struct pch_dma_regs *regs;
8090c42bd0eSYong Wang 	unsigned int nr_channels;
8100c42bd0eSYong Wang 	int err;
8110c42bd0eSYong Wang 	int i;
8120c42bd0eSYong Wang 
8130c42bd0eSYong Wang 	nr_channels = id->driver_data;
81401631243STomoya MORINAGA 	pd = kzalloc(sizeof(*pd), GFP_KERNEL);
8150c42bd0eSYong Wang 	if (!pd)
8160c42bd0eSYong Wang 		return -ENOMEM;
8170c42bd0eSYong Wang 
8180c42bd0eSYong Wang 	pci_set_drvdata(pdev, pd);
8190c42bd0eSYong Wang 
8200c42bd0eSYong Wang 	err = pci_enable_device(pdev);
8210c42bd0eSYong Wang 	if (err) {
8220c42bd0eSYong Wang 		dev_err(&pdev->dev, "Cannot enable PCI device\n");
8230c42bd0eSYong Wang 		goto err_free_mem;
8240c42bd0eSYong Wang 	}
8250c42bd0eSYong Wang 
8260c42bd0eSYong Wang 	if (!(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
8270c42bd0eSYong Wang 		dev_err(&pdev->dev, "Cannot find proper base address\n");
82827abb2ffSWei Yongjun 		err = -ENODEV;
8290c42bd0eSYong Wang 		goto err_disable_pdev;
8300c42bd0eSYong Wang 	}
8310c42bd0eSYong Wang 
8320c42bd0eSYong Wang 	err = pci_request_regions(pdev, DRV_NAME);
8330c42bd0eSYong Wang 	if (err) {
8340c42bd0eSYong Wang 		dev_err(&pdev->dev, "Cannot obtain PCI resources\n");
8350c42bd0eSYong Wang 		goto err_disable_pdev;
8360c42bd0eSYong Wang 	}
8370c42bd0eSYong Wang 
838*64aa8f4bSChristophe JAILLET 	err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
8390c42bd0eSYong Wang 	if (err) {
8400c42bd0eSYong Wang 		dev_err(&pdev->dev, "Cannot set proper DMA config\n");
8410c42bd0eSYong Wang 		goto err_free_res;
8420c42bd0eSYong Wang 	}
8430c42bd0eSYong Wang 
8440c42bd0eSYong Wang 	regs = pd->membase = pci_iomap(pdev, 1, 0);
8450c42bd0eSYong Wang 	if (!pd->membase) {
8460c42bd0eSYong Wang 		dev_err(&pdev->dev, "Cannot map MMIO registers\n");
8470c42bd0eSYong Wang 		err = -ENOMEM;
8480c42bd0eSYong Wang 		goto err_free_res;
8490c42bd0eSYong Wang 	}
8500c42bd0eSYong Wang 
8510c42bd0eSYong Wang 	pci_set_master(pdev);
8522e45676aSMadhuparna Bhowmik 	pd->dma.dev = &pdev->dev;
8530c42bd0eSYong Wang 
8540c42bd0eSYong Wang 	err = request_irq(pdev->irq, pd_irq, IRQF_SHARED, DRV_NAME, pd);
8550c42bd0eSYong Wang 	if (err) {
8560c42bd0eSYong Wang 		dev_err(&pdev->dev, "Failed to request IRQ\n");
8570c42bd0eSYong Wang 		goto err_iounmap;
8580c42bd0eSYong Wang 	}
8590c42bd0eSYong Wang 
86010c191a1SRomain Perier 	pd->pool = dma_pool_create("pch_dma_desc_pool", &pdev->dev,
8610c42bd0eSYong Wang 				   sizeof(struct pch_dma_desc), 4, 0);
8620c42bd0eSYong Wang 	if (!pd->pool) {
8630c42bd0eSYong Wang 		dev_err(&pdev->dev, "Failed to alloc DMA descriptors\n");
8640c42bd0eSYong Wang 		err = -ENOMEM;
8650c42bd0eSYong Wang 		goto err_free_irq;
8660c42bd0eSYong Wang 	}
8670c42bd0eSYong Wang 
8680c42bd0eSYong Wang 
8690c42bd0eSYong Wang 	INIT_LIST_HEAD(&pd->dma.channels);
8700c42bd0eSYong Wang 
8710c42bd0eSYong Wang 	for (i = 0; i < nr_channels; i++) {
8720c42bd0eSYong Wang 		struct pch_dma_chan *pd_chan = &pd->channels[i];
8730c42bd0eSYong Wang 
8740c42bd0eSYong Wang 		pd_chan->chan.device = &pd->dma;
875d3ee98cdSRussell King - ARM Linux 		dma_cookie_init(&pd_chan->chan);
8760c42bd0eSYong Wang 
8770c42bd0eSYong Wang 		pd_chan->membase = &regs->desc[i];
8780c42bd0eSYong Wang 
8790c42bd0eSYong Wang 		spin_lock_init(&pd_chan->lock);
8800c42bd0eSYong Wang 
8810c42bd0eSYong Wang 		INIT_LIST_HEAD(&pd_chan->active_list);
8820c42bd0eSYong Wang 		INIT_LIST_HEAD(&pd_chan->queue);
8830c42bd0eSYong Wang 		INIT_LIST_HEAD(&pd_chan->free_list);
8840c42bd0eSYong Wang 
88588ff5093SAllen Pais 		tasklet_setup(&pd_chan->tasklet, pdc_tasklet);
8860c42bd0eSYong Wang 		list_add_tail(&pd_chan->chan.device_node, &pd->dma.channels);
8870c42bd0eSYong Wang 	}
8880c42bd0eSYong Wang 
8890c42bd0eSYong Wang 	dma_cap_zero(pd->dma.cap_mask);
8900c42bd0eSYong Wang 	dma_cap_set(DMA_PRIVATE, pd->dma.cap_mask);
8910c42bd0eSYong Wang 	dma_cap_set(DMA_SLAVE, pd->dma.cap_mask);
8920c42bd0eSYong Wang 
8930c42bd0eSYong Wang 	pd->dma.device_alloc_chan_resources = pd_alloc_chan_resources;
8940c42bd0eSYong Wang 	pd->dma.device_free_chan_resources = pd_free_chan_resources;
8950c42bd0eSYong Wang 	pd->dma.device_tx_status = pd_tx_status;
8960c42bd0eSYong Wang 	pd->dma.device_issue_pending = pd_issue_pending;
8970c42bd0eSYong Wang 	pd->dma.device_prep_slave_sg = pd_prep_slave_sg;
898c91781b4SMaxime Ripard 	pd->dma.device_terminate_all = pd_device_terminate_all;
8990c42bd0eSYong Wang 
9000c42bd0eSYong Wang 	err = dma_async_device_register(&pd->dma);
9010c42bd0eSYong Wang 	if (err) {
9020c42bd0eSYong Wang 		dev_err(&pdev->dev, "Failed to register DMA device\n");
9030c42bd0eSYong Wang 		goto err_free_pool;
9040c42bd0eSYong Wang 	}
9050c42bd0eSYong Wang 
9060c42bd0eSYong Wang 	return 0;
9070c42bd0eSYong Wang 
9080c42bd0eSYong Wang err_free_pool:
90910c191a1SRomain Perier 	dma_pool_destroy(pd->pool);
9100c42bd0eSYong Wang err_free_irq:
9110c42bd0eSYong Wang 	free_irq(pdev->irq, pd);
9120c42bd0eSYong Wang err_iounmap:
9130c42bd0eSYong Wang 	pci_iounmap(pdev, pd->membase);
9140c42bd0eSYong Wang err_free_res:
9150c42bd0eSYong Wang 	pci_release_regions(pdev);
9160c42bd0eSYong Wang err_disable_pdev:
9170c42bd0eSYong Wang 	pci_disable_device(pdev);
9180c42bd0eSYong Wang err_free_mem:
91912d7b7a2SAlexey Khoroshilov 	kfree(pd);
9200c42bd0eSYong Wang 	return err;
9210c42bd0eSYong Wang }
9220c42bd0eSYong Wang 
pch_dma_remove(struct pci_dev * pdev)9234bf27b8bSGreg Kroah-Hartman static void pch_dma_remove(struct pci_dev *pdev)
9240c42bd0eSYong Wang {
9250c42bd0eSYong Wang 	struct pch_dma *pd = pci_get_drvdata(pdev);
9260c42bd0eSYong Wang 	struct pch_dma_chan *pd_chan;
9270c42bd0eSYong Wang 	struct dma_chan *chan, *_c;
9280c42bd0eSYong Wang 
9290c42bd0eSYong Wang 	if (pd) {
9300c42bd0eSYong Wang 		dma_async_device_unregister(&pd->dma);
9310c42bd0eSYong Wang 
9329068b032SVinod Koul 		free_irq(pdev->irq, pd);
9339068b032SVinod Koul 
9340c42bd0eSYong Wang 		list_for_each_entry_safe(chan, _c, &pd->dma.channels,
9350c42bd0eSYong Wang 					 device_node) {
9360c42bd0eSYong Wang 			pd_chan = to_pd_chan(chan);
9370c42bd0eSYong Wang 
9380c42bd0eSYong Wang 			tasklet_kill(&pd_chan->tasklet);
9390c42bd0eSYong Wang 		}
9400c42bd0eSYong Wang 
94110c191a1SRomain Perier 		dma_pool_destroy(pd->pool);
9420c42bd0eSYong Wang 		pci_iounmap(pdev, pd->membase);
9430c42bd0eSYong Wang 		pci_release_regions(pdev);
9440c42bd0eSYong Wang 		pci_disable_device(pdev);
9450c42bd0eSYong Wang 		kfree(pd);
9460c42bd0eSYong Wang 	}
9470c42bd0eSYong Wang }
9480c42bd0eSYong Wang 
9490c42bd0eSYong Wang /* PCI Device ID of DMA device */
9502cdf2455STomoya MORINAGA #define PCI_DEVICE_ID_EG20T_PCH_DMA_8CH        0x8810
9512cdf2455STomoya MORINAGA #define PCI_DEVICE_ID_EG20T_PCH_DMA_4CH        0x8815
9522cdf2455STomoya MORINAGA #define PCI_DEVICE_ID_ML7213_DMA1_8CH	0x8026
9532cdf2455STomoya MORINAGA #define PCI_DEVICE_ID_ML7213_DMA2_8CH	0x802B
9542cdf2455STomoya MORINAGA #define PCI_DEVICE_ID_ML7213_DMA3_4CH	0x8034
955194f5f27STomoya MORINAGA #define PCI_DEVICE_ID_ML7213_DMA4_12CH	0x8032
956c0dfc04aSTomoya MORINAGA #define PCI_DEVICE_ID_ML7223_DMA1_4CH	0x800B
957c0dfc04aSTomoya MORINAGA #define PCI_DEVICE_ID_ML7223_DMA2_4CH	0x800E
958c0dfc04aSTomoya MORINAGA #define PCI_DEVICE_ID_ML7223_DMA3_4CH	0x8017
959c0dfc04aSTomoya MORINAGA #define PCI_DEVICE_ID_ML7223_DMA4_4CH	0x803B
960ca7fe2dbSTomoya MORINAGA #define PCI_DEVICE_ID_ML7831_DMA1_8CH	0x8810
961ca7fe2dbSTomoya MORINAGA #define PCI_DEVICE_ID_ML7831_DMA2_4CH	0x8815
9620c42bd0eSYong Wang 
963345e3123SMichele Curti static const struct pci_device_id pch_dma_id_table[] = {
9642cdf2455STomoya MORINAGA 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_8CH), 8 },
9652cdf2455STomoya MORINAGA 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_4CH), 4 },
9662cdf2455STomoya MORINAGA 	{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA1_8CH), 8}, /* UART Video */
9672cdf2455STomoya MORINAGA 	{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA2_8CH), 8}, /* PCMIF SPI */
9682cdf2455STomoya MORINAGA 	{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA3_4CH), 4}, /* FPGA */
969194f5f27STomoya MORINAGA 	{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA4_12CH), 12}, /* I2S */
970c0dfc04aSTomoya MORINAGA 	{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA1_4CH), 4}, /* UART */
971c0dfc04aSTomoya MORINAGA 	{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA2_4CH), 4}, /* Video SPI */
972c0dfc04aSTomoya MORINAGA 	{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA3_4CH), 4}, /* Security */
973c0dfc04aSTomoya MORINAGA 	{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA4_4CH), 4}, /* FPGA */
974ca7fe2dbSTomoya MORINAGA 	{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7831_DMA1_8CH), 8}, /* UART */
975ca7fe2dbSTomoya MORINAGA 	{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7831_DMA2_4CH), 4}, /* SPI */
97687acf5adSDzianis Kahanovich 	{ 0, },
9770c42bd0eSYong Wang };
9780c42bd0eSYong Wang 
9797db7f8e0SVaibhav Gupta static SIMPLE_DEV_PM_OPS(pch_dma_pm_ops, pch_dma_suspend, pch_dma_resume);
9807db7f8e0SVaibhav Gupta 
9810c42bd0eSYong Wang static struct pci_driver pch_dma_driver = {
9820c42bd0eSYong Wang 	.name		= DRV_NAME,
9830c42bd0eSYong Wang 	.id_table	= pch_dma_id_table,
9840c42bd0eSYong Wang 	.probe		= pch_dma_probe,
985a7d6e3ecSBill Pemberton 	.remove		= pch_dma_remove,
9867db7f8e0SVaibhav Gupta 	.driver.pm	= &pch_dma_pm_ops,
9870c42bd0eSYong Wang };
9880c42bd0eSYong Wang 
98953b9989bSWei Yongjun module_pci_driver(pch_dma_driver);
9900c42bd0eSYong Wang 
991ca7fe2dbSTomoya MORINAGA MODULE_DESCRIPTION("Intel EG20T PCH / LAPIS Semicon ML7213/ML7223/ML7831 IOH "
9922cdf2455STomoya MORINAGA 		   "DMA controller driver");
9930c42bd0eSYong Wang MODULE_AUTHOR("Yong Wang <yong.y.wang@intel.com>");
9940c42bd0eSYong Wang MODULE_LICENSE("GPL v2");
99558ddff20SBen Hutchings MODULE_DEVICE_TABLE(pci, pch_dma_id_table);
996