xref: /openbmc/linux/drivers/dma/plx_dma.c (revision c2dbcaa8c672d7ecc96c7a62fd6a34c9d3821bdb)
1905ca51eSLogan Gunthorpe // SPDX-License-Identifier: GPL-2.0
2905ca51eSLogan Gunthorpe /*
3905ca51eSLogan Gunthorpe  * Microsemi Switchtec(tm) PCIe Management Driver
4905ca51eSLogan Gunthorpe  * Copyright (c) 2019, Logan Gunthorpe <logang@deltatee.com>
5905ca51eSLogan Gunthorpe  * Copyright (c) 2019, GigaIO Networks, Inc
6905ca51eSLogan Gunthorpe  */
7905ca51eSLogan Gunthorpe 
8905ca51eSLogan Gunthorpe #include "dmaengine.h"
9905ca51eSLogan Gunthorpe 
10905ca51eSLogan Gunthorpe #include <linux/dmaengine.h>
11905ca51eSLogan Gunthorpe #include <linux/kref.h>
12905ca51eSLogan Gunthorpe #include <linux/list.h>
13905ca51eSLogan Gunthorpe #include <linux/module.h>
14905ca51eSLogan Gunthorpe #include <linux/pci.h>
15905ca51eSLogan Gunthorpe 
16905ca51eSLogan Gunthorpe MODULE_DESCRIPTION("PLX ExpressLane PEX PCI Switch DMA Engine");
17905ca51eSLogan Gunthorpe MODULE_VERSION("0.1");
18905ca51eSLogan Gunthorpe MODULE_LICENSE("GPL");
19905ca51eSLogan Gunthorpe MODULE_AUTHOR("Logan Gunthorpe");
20905ca51eSLogan Gunthorpe 
21*c2dbcaa8SLogan Gunthorpe #define PLX_REG_DESC_RING_ADDR			0x214
22*c2dbcaa8SLogan Gunthorpe #define PLX_REG_DESC_RING_ADDR_HI		0x218
23*c2dbcaa8SLogan Gunthorpe #define PLX_REG_DESC_RING_NEXT_ADDR		0x21C
24*c2dbcaa8SLogan Gunthorpe #define PLX_REG_DESC_RING_COUNT			0x220
25*c2dbcaa8SLogan Gunthorpe #define PLX_REG_DESC_RING_LAST_ADDR		0x224
26*c2dbcaa8SLogan Gunthorpe #define PLX_REG_DESC_RING_LAST_SIZE		0x228
27*c2dbcaa8SLogan Gunthorpe #define PLX_REG_PREF_LIMIT			0x234
28*c2dbcaa8SLogan Gunthorpe #define PLX_REG_CTRL				0x238
29*c2dbcaa8SLogan Gunthorpe #define PLX_REG_CTRL2				0x23A
30*c2dbcaa8SLogan Gunthorpe #define PLX_REG_INTR_CTRL			0x23C
31*c2dbcaa8SLogan Gunthorpe #define PLX_REG_INTR_STATUS			0x23E
32*c2dbcaa8SLogan Gunthorpe 
33*c2dbcaa8SLogan Gunthorpe #define PLX_REG_PREF_LIMIT_PREF_FOUR		8
34*c2dbcaa8SLogan Gunthorpe 
35*c2dbcaa8SLogan Gunthorpe #define PLX_REG_CTRL_GRACEFUL_PAUSE		BIT(0)
36*c2dbcaa8SLogan Gunthorpe #define PLX_REG_CTRL_ABORT			BIT(1)
37*c2dbcaa8SLogan Gunthorpe #define PLX_REG_CTRL_WRITE_BACK_EN		BIT(2)
38*c2dbcaa8SLogan Gunthorpe #define PLX_REG_CTRL_START			BIT(3)
39*c2dbcaa8SLogan Gunthorpe #define PLX_REG_CTRL_RING_STOP_MODE		BIT(4)
40*c2dbcaa8SLogan Gunthorpe #define PLX_REG_CTRL_DESC_MODE_BLOCK		(0 << 5)
41*c2dbcaa8SLogan Gunthorpe #define PLX_REG_CTRL_DESC_MODE_ON_CHIP		(1 << 5)
42*c2dbcaa8SLogan Gunthorpe #define PLX_REG_CTRL_DESC_MODE_OFF_CHIP		(2 << 5)
43*c2dbcaa8SLogan Gunthorpe #define PLX_REG_CTRL_DESC_INVALID		BIT(8)
44*c2dbcaa8SLogan Gunthorpe #define PLX_REG_CTRL_GRACEFUL_PAUSE_DONE	BIT(9)
45*c2dbcaa8SLogan Gunthorpe #define PLX_REG_CTRL_ABORT_DONE			BIT(10)
46*c2dbcaa8SLogan Gunthorpe #define PLX_REG_CTRL_IMM_PAUSE_DONE		BIT(12)
47*c2dbcaa8SLogan Gunthorpe #define PLX_REG_CTRL_IN_PROGRESS		BIT(30)
48*c2dbcaa8SLogan Gunthorpe 
49*c2dbcaa8SLogan Gunthorpe #define PLX_REG_CTRL_RESET_VAL	(PLX_REG_CTRL_DESC_INVALID | \
50*c2dbcaa8SLogan Gunthorpe 				 PLX_REG_CTRL_GRACEFUL_PAUSE_DONE | \
51*c2dbcaa8SLogan Gunthorpe 				 PLX_REG_CTRL_ABORT_DONE | \
52*c2dbcaa8SLogan Gunthorpe 				 PLX_REG_CTRL_IMM_PAUSE_DONE)
53*c2dbcaa8SLogan Gunthorpe 
54*c2dbcaa8SLogan Gunthorpe #define PLX_REG_CTRL_START_VAL	(PLX_REG_CTRL_WRITE_BACK_EN | \
55*c2dbcaa8SLogan Gunthorpe 				 PLX_REG_CTRL_DESC_MODE_OFF_CHIP | \
56*c2dbcaa8SLogan Gunthorpe 				 PLX_REG_CTRL_START | \
57*c2dbcaa8SLogan Gunthorpe 				 PLX_REG_CTRL_RESET_VAL)
58*c2dbcaa8SLogan Gunthorpe 
59*c2dbcaa8SLogan Gunthorpe #define PLX_REG_CTRL2_MAX_TXFR_SIZE_64B		0
60*c2dbcaa8SLogan Gunthorpe #define PLX_REG_CTRL2_MAX_TXFR_SIZE_128B	1
61*c2dbcaa8SLogan Gunthorpe #define PLX_REG_CTRL2_MAX_TXFR_SIZE_256B	2
62*c2dbcaa8SLogan Gunthorpe #define PLX_REG_CTRL2_MAX_TXFR_SIZE_512B	3
63*c2dbcaa8SLogan Gunthorpe #define PLX_REG_CTRL2_MAX_TXFR_SIZE_1KB		4
64*c2dbcaa8SLogan Gunthorpe #define PLX_REG_CTRL2_MAX_TXFR_SIZE_2KB		5
65*c2dbcaa8SLogan Gunthorpe #define PLX_REG_CTRL2_MAX_TXFR_SIZE_4B		7
66*c2dbcaa8SLogan Gunthorpe 
67*c2dbcaa8SLogan Gunthorpe #define PLX_REG_INTR_CRTL_ERROR_EN		BIT(0)
68*c2dbcaa8SLogan Gunthorpe #define PLX_REG_INTR_CRTL_INV_DESC_EN		BIT(1)
69*c2dbcaa8SLogan Gunthorpe #define PLX_REG_INTR_CRTL_ABORT_DONE_EN		BIT(3)
70*c2dbcaa8SLogan Gunthorpe #define PLX_REG_INTR_CRTL_PAUSE_DONE_EN		BIT(4)
71*c2dbcaa8SLogan Gunthorpe #define PLX_REG_INTR_CRTL_IMM_PAUSE_DONE_EN	BIT(5)
72*c2dbcaa8SLogan Gunthorpe 
73*c2dbcaa8SLogan Gunthorpe #define PLX_REG_INTR_STATUS_ERROR		BIT(0)
74*c2dbcaa8SLogan Gunthorpe #define PLX_REG_INTR_STATUS_INV_DESC		BIT(1)
75*c2dbcaa8SLogan Gunthorpe #define PLX_REG_INTR_STATUS_DESC_DONE		BIT(2)
76*c2dbcaa8SLogan Gunthorpe #define PLX_REG_INTR_CRTL_ABORT_DONE		BIT(3)
77*c2dbcaa8SLogan Gunthorpe 
78*c2dbcaa8SLogan Gunthorpe struct plx_dma_hw_std_desc {
79*c2dbcaa8SLogan Gunthorpe 	__le32 flags_and_size;
80*c2dbcaa8SLogan Gunthorpe 	__le16 dst_addr_hi;
81*c2dbcaa8SLogan Gunthorpe 	__le16 src_addr_hi;
82*c2dbcaa8SLogan Gunthorpe 	__le32 dst_addr_lo;
83*c2dbcaa8SLogan Gunthorpe 	__le32 src_addr_lo;
84*c2dbcaa8SLogan Gunthorpe };
85*c2dbcaa8SLogan Gunthorpe 
86*c2dbcaa8SLogan Gunthorpe #define PLX_DESC_SIZE_MASK		0x7ffffff
87*c2dbcaa8SLogan Gunthorpe #define PLX_DESC_FLAG_VALID		BIT(31)
88*c2dbcaa8SLogan Gunthorpe #define PLX_DESC_FLAG_INT_WHEN_DONE	BIT(30)
89*c2dbcaa8SLogan Gunthorpe 
90*c2dbcaa8SLogan Gunthorpe #define PLX_DESC_WB_SUCCESS		BIT(30)
91*c2dbcaa8SLogan Gunthorpe #define PLX_DESC_WB_RD_FAIL		BIT(29)
92*c2dbcaa8SLogan Gunthorpe #define PLX_DESC_WB_WR_FAIL		BIT(28)
93*c2dbcaa8SLogan Gunthorpe 
94*c2dbcaa8SLogan Gunthorpe #define PLX_DMA_RING_COUNT		2048
95*c2dbcaa8SLogan Gunthorpe 
96*c2dbcaa8SLogan Gunthorpe struct plx_dma_desc {
97*c2dbcaa8SLogan Gunthorpe 	struct dma_async_tx_descriptor txd;
98*c2dbcaa8SLogan Gunthorpe 	struct plx_dma_hw_std_desc *hw;
99*c2dbcaa8SLogan Gunthorpe 	u32 orig_size;
100*c2dbcaa8SLogan Gunthorpe };
101*c2dbcaa8SLogan Gunthorpe 
102905ca51eSLogan Gunthorpe struct plx_dma_dev {
103905ca51eSLogan Gunthorpe 	struct dma_device dma_dev;
104905ca51eSLogan Gunthorpe 	struct dma_chan dma_chan;
105*c2dbcaa8SLogan Gunthorpe 	struct pci_dev __rcu *pdev;
106905ca51eSLogan Gunthorpe 	void __iomem *bar;
107*c2dbcaa8SLogan Gunthorpe 	struct tasklet_struct desc_task;
108*c2dbcaa8SLogan Gunthorpe 
109*c2dbcaa8SLogan Gunthorpe 	spinlock_t ring_lock;
110*c2dbcaa8SLogan Gunthorpe 	bool ring_active;
111*c2dbcaa8SLogan Gunthorpe 	int head;
112*c2dbcaa8SLogan Gunthorpe 	int tail;
113*c2dbcaa8SLogan Gunthorpe 	struct plx_dma_hw_std_desc *hw_ring;
114*c2dbcaa8SLogan Gunthorpe 	dma_addr_t hw_ring_dma;
115*c2dbcaa8SLogan Gunthorpe 	struct plx_dma_desc **desc_ring;
116905ca51eSLogan Gunthorpe };
117905ca51eSLogan Gunthorpe 
118*c2dbcaa8SLogan Gunthorpe static struct plx_dma_dev *chan_to_plx_dma_dev(struct dma_chan *c)
119*c2dbcaa8SLogan Gunthorpe {
120*c2dbcaa8SLogan Gunthorpe 	return container_of(c, struct plx_dma_dev, dma_chan);
121*c2dbcaa8SLogan Gunthorpe }
122*c2dbcaa8SLogan Gunthorpe 
123*c2dbcaa8SLogan Gunthorpe static struct plx_dma_desc *plx_dma_get_desc(struct plx_dma_dev *plxdev, int i)
124*c2dbcaa8SLogan Gunthorpe {
125*c2dbcaa8SLogan Gunthorpe 	return plxdev->desc_ring[i & (PLX_DMA_RING_COUNT - 1)];
126*c2dbcaa8SLogan Gunthorpe }
127*c2dbcaa8SLogan Gunthorpe 
128*c2dbcaa8SLogan Gunthorpe static void plx_dma_process_desc(struct plx_dma_dev *plxdev)
129*c2dbcaa8SLogan Gunthorpe {
130*c2dbcaa8SLogan Gunthorpe 	struct dmaengine_result res;
131*c2dbcaa8SLogan Gunthorpe 	struct plx_dma_desc *desc;
132*c2dbcaa8SLogan Gunthorpe 	u32 flags;
133*c2dbcaa8SLogan Gunthorpe 
134*c2dbcaa8SLogan Gunthorpe 	spin_lock_bh(&plxdev->ring_lock);
135*c2dbcaa8SLogan Gunthorpe 
136*c2dbcaa8SLogan Gunthorpe 	while (plxdev->tail != plxdev->head) {
137*c2dbcaa8SLogan Gunthorpe 		desc = plx_dma_get_desc(plxdev, plxdev->tail);
138*c2dbcaa8SLogan Gunthorpe 
139*c2dbcaa8SLogan Gunthorpe 		flags = le32_to_cpu(READ_ONCE(desc->hw->flags_and_size));
140*c2dbcaa8SLogan Gunthorpe 
141*c2dbcaa8SLogan Gunthorpe 		if (flags & PLX_DESC_FLAG_VALID)
142*c2dbcaa8SLogan Gunthorpe 			break;
143*c2dbcaa8SLogan Gunthorpe 
144*c2dbcaa8SLogan Gunthorpe 		res.residue = desc->orig_size - (flags & PLX_DESC_SIZE_MASK);
145*c2dbcaa8SLogan Gunthorpe 
146*c2dbcaa8SLogan Gunthorpe 		if (flags & PLX_DESC_WB_SUCCESS)
147*c2dbcaa8SLogan Gunthorpe 			res.result = DMA_TRANS_NOERROR;
148*c2dbcaa8SLogan Gunthorpe 		else if (flags & PLX_DESC_WB_WR_FAIL)
149*c2dbcaa8SLogan Gunthorpe 			res.result = DMA_TRANS_WRITE_FAILED;
150*c2dbcaa8SLogan Gunthorpe 		else
151*c2dbcaa8SLogan Gunthorpe 			res.result = DMA_TRANS_READ_FAILED;
152*c2dbcaa8SLogan Gunthorpe 
153*c2dbcaa8SLogan Gunthorpe 		dma_cookie_complete(&desc->txd);
154*c2dbcaa8SLogan Gunthorpe 		dma_descriptor_unmap(&desc->txd);
155*c2dbcaa8SLogan Gunthorpe 		dmaengine_desc_get_callback_invoke(&desc->txd, &res);
156*c2dbcaa8SLogan Gunthorpe 		desc->txd.callback = NULL;
157*c2dbcaa8SLogan Gunthorpe 		desc->txd.callback_result = NULL;
158*c2dbcaa8SLogan Gunthorpe 
159*c2dbcaa8SLogan Gunthorpe 		plxdev->tail++;
160*c2dbcaa8SLogan Gunthorpe 	}
161*c2dbcaa8SLogan Gunthorpe 
162*c2dbcaa8SLogan Gunthorpe 	spin_unlock_bh(&plxdev->ring_lock);
163*c2dbcaa8SLogan Gunthorpe }
164*c2dbcaa8SLogan Gunthorpe 
165*c2dbcaa8SLogan Gunthorpe static void plx_dma_abort_desc(struct plx_dma_dev *plxdev)
166*c2dbcaa8SLogan Gunthorpe {
167*c2dbcaa8SLogan Gunthorpe 	struct dmaengine_result res;
168*c2dbcaa8SLogan Gunthorpe 	struct plx_dma_desc *desc;
169*c2dbcaa8SLogan Gunthorpe 
170*c2dbcaa8SLogan Gunthorpe 	plx_dma_process_desc(plxdev);
171*c2dbcaa8SLogan Gunthorpe 
172*c2dbcaa8SLogan Gunthorpe 	spin_lock_bh(&plxdev->ring_lock);
173*c2dbcaa8SLogan Gunthorpe 
174*c2dbcaa8SLogan Gunthorpe 	while (plxdev->tail != plxdev->head) {
175*c2dbcaa8SLogan Gunthorpe 		desc = plx_dma_get_desc(plxdev, plxdev->tail);
176*c2dbcaa8SLogan Gunthorpe 
177*c2dbcaa8SLogan Gunthorpe 		res.residue = desc->orig_size;
178*c2dbcaa8SLogan Gunthorpe 		res.result = DMA_TRANS_ABORTED;
179*c2dbcaa8SLogan Gunthorpe 
180*c2dbcaa8SLogan Gunthorpe 		dma_cookie_complete(&desc->txd);
181*c2dbcaa8SLogan Gunthorpe 		dma_descriptor_unmap(&desc->txd);
182*c2dbcaa8SLogan Gunthorpe 		dmaengine_desc_get_callback_invoke(&desc->txd, &res);
183*c2dbcaa8SLogan Gunthorpe 		desc->txd.callback = NULL;
184*c2dbcaa8SLogan Gunthorpe 		desc->txd.callback_result = NULL;
185*c2dbcaa8SLogan Gunthorpe 
186*c2dbcaa8SLogan Gunthorpe 		plxdev->tail++;
187*c2dbcaa8SLogan Gunthorpe 	}
188*c2dbcaa8SLogan Gunthorpe 
189*c2dbcaa8SLogan Gunthorpe 	spin_unlock_bh(&plxdev->ring_lock);
190*c2dbcaa8SLogan Gunthorpe }
191*c2dbcaa8SLogan Gunthorpe 
192*c2dbcaa8SLogan Gunthorpe static void __plx_dma_stop(struct plx_dma_dev *plxdev)
193*c2dbcaa8SLogan Gunthorpe {
194*c2dbcaa8SLogan Gunthorpe 	unsigned long timeout = jiffies + msecs_to_jiffies(1000);
195*c2dbcaa8SLogan Gunthorpe 	u32 val;
196*c2dbcaa8SLogan Gunthorpe 
197*c2dbcaa8SLogan Gunthorpe 	val = readl(plxdev->bar + PLX_REG_CTRL);
198*c2dbcaa8SLogan Gunthorpe 	if (!(val & ~PLX_REG_CTRL_GRACEFUL_PAUSE))
199*c2dbcaa8SLogan Gunthorpe 		return;
200*c2dbcaa8SLogan Gunthorpe 
201*c2dbcaa8SLogan Gunthorpe 	writel(PLX_REG_CTRL_RESET_VAL | PLX_REG_CTRL_GRACEFUL_PAUSE,
202*c2dbcaa8SLogan Gunthorpe 	       plxdev->bar + PLX_REG_CTRL);
203*c2dbcaa8SLogan Gunthorpe 
204*c2dbcaa8SLogan Gunthorpe 	while (!time_after(jiffies, timeout)) {
205*c2dbcaa8SLogan Gunthorpe 		val = readl(plxdev->bar + PLX_REG_CTRL);
206*c2dbcaa8SLogan Gunthorpe 		if (val & PLX_REG_CTRL_GRACEFUL_PAUSE_DONE)
207*c2dbcaa8SLogan Gunthorpe 			break;
208*c2dbcaa8SLogan Gunthorpe 
209*c2dbcaa8SLogan Gunthorpe 		cpu_relax();
210*c2dbcaa8SLogan Gunthorpe 	}
211*c2dbcaa8SLogan Gunthorpe 
212*c2dbcaa8SLogan Gunthorpe 	if (!(val & PLX_REG_CTRL_GRACEFUL_PAUSE_DONE))
213*c2dbcaa8SLogan Gunthorpe 		dev_err(plxdev->dma_dev.dev,
214*c2dbcaa8SLogan Gunthorpe 			"Timeout waiting for graceful pause!\n");
215*c2dbcaa8SLogan Gunthorpe 
216*c2dbcaa8SLogan Gunthorpe 	writel(PLX_REG_CTRL_RESET_VAL | PLX_REG_CTRL_GRACEFUL_PAUSE,
217*c2dbcaa8SLogan Gunthorpe 	       plxdev->bar + PLX_REG_CTRL);
218*c2dbcaa8SLogan Gunthorpe 
219*c2dbcaa8SLogan Gunthorpe 	writel(0, plxdev->bar + PLX_REG_DESC_RING_COUNT);
220*c2dbcaa8SLogan Gunthorpe 	writel(0, plxdev->bar + PLX_REG_DESC_RING_ADDR);
221*c2dbcaa8SLogan Gunthorpe 	writel(0, plxdev->bar + PLX_REG_DESC_RING_ADDR_HI);
222*c2dbcaa8SLogan Gunthorpe 	writel(0, plxdev->bar + PLX_REG_DESC_RING_NEXT_ADDR);
223*c2dbcaa8SLogan Gunthorpe }
224*c2dbcaa8SLogan Gunthorpe 
225*c2dbcaa8SLogan Gunthorpe static void plx_dma_stop(struct plx_dma_dev *plxdev)
226*c2dbcaa8SLogan Gunthorpe {
227*c2dbcaa8SLogan Gunthorpe 	rcu_read_lock();
228*c2dbcaa8SLogan Gunthorpe 	if (!rcu_dereference(plxdev->pdev)) {
229*c2dbcaa8SLogan Gunthorpe 		rcu_read_unlock();
230*c2dbcaa8SLogan Gunthorpe 		return;
231*c2dbcaa8SLogan Gunthorpe 	}
232*c2dbcaa8SLogan Gunthorpe 
233*c2dbcaa8SLogan Gunthorpe 	__plx_dma_stop(plxdev);
234*c2dbcaa8SLogan Gunthorpe 
235*c2dbcaa8SLogan Gunthorpe 	rcu_read_unlock();
236*c2dbcaa8SLogan Gunthorpe }
237*c2dbcaa8SLogan Gunthorpe 
238*c2dbcaa8SLogan Gunthorpe static void plx_dma_desc_task(unsigned long data)
239*c2dbcaa8SLogan Gunthorpe {
240*c2dbcaa8SLogan Gunthorpe 	struct plx_dma_dev *plxdev = (void *)data;
241*c2dbcaa8SLogan Gunthorpe 
242*c2dbcaa8SLogan Gunthorpe 	plx_dma_process_desc(plxdev);
243*c2dbcaa8SLogan Gunthorpe }
244*c2dbcaa8SLogan Gunthorpe 
245*c2dbcaa8SLogan Gunthorpe static irqreturn_t plx_dma_isr(int irq, void *devid)
246*c2dbcaa8SLogan Gunthorpe {
247*c2dbcaa8SLogan Gunthorpe 	struct plx_dma_dev *plxdev = devid;
248*c2dbcaa8SLogan Gunthorpe 	u32 status;
249*c2dbcaa8SLogan Gunthorpe 
250*c2dbcaa8SLogan Gunthorpe 	status = readw(plxdev->bar + PLX_REG_INTR_STATUS);
251*c2dbcaa8SLogan Gunthorpe 
252*c2dbcaa8SLogan Gunthorpe 	if (!status)
253*c2dbcaa8SLogan Gunthorpe 		return IRQ_NONE;
254*c2dbcaa8SLogan Gunthorpe 
255*c2dbcaa8SLogan Gunthorpe 	if (status & PLX_REG_INTR_STATUS_DESC_DONE && plxdev->ring_active)
256*c2dbcaa8SLogan Gunthorpe 		tasklet_schedule(&plxdev->desc_task);
257*c2dbcaa8SLogan Gunthorpe 
258*c2dbcaa8SLogan Gunthorpe 	writew(status, plxdev->bar + PLX_REG_INTR_STATUS);
259*c2dbcaa8SLogan Gunthorpe 
260*c2dbcaa8SLogan Gunthorpe 	return IRQ_HANDLED;
261*c2dbcaa8SLogan Gunthorpe }
262*c2dbcaa8SLogan Gunthorpe 
263*c2dbcaa8SLogan Gunthorpe static int plx_dma_alloc_desc(struct plx_dma_dev *plxdev)
264*c2dbcaa8SLogan Gunthorpe {
265*c2dbcaa8SLogan Gunthorpe 	struct plx_dma_desc *desc;
266*c2dbcaa8SLogan Gunthorpe 	int i;
267*c2dbcaa8SLogan Gunthorpe 
268*c2dbcaa8SLogan Gunthorpe 	plxdev->desc_ring = kcalloc(PLX_DMA_RING_COUNT,
269*c2dbcaa8SLogan Gunthorpe 				    sizeof(*plxdev->desc_ring), GFP_KERNEL);
270*c2dbcaa8SLogan Gunthorpe 	if (!plxdev->desc_ring)
271*c2dbcaa8SLogan Gunthorpe 		return -ENOMEM;
272*c2dbcaa8SLogan Gunthorpe 
273*c2dbcaa8SLogan Gunthorpe 	for (i = 0; i < PLX_DMA_RING_COUNT; i++) {
274*c2dbcaa8SLogan Gunthorpe 		desc = kzalloc(sizeof(*desc), GFP_KERNEL);
275*c2dbcaa8SLogan Gunthorpe 		if (!desc)
276*c2dbcaa8SLogan Gunthorpe 			goto free_and_exit;
277*c2dbcaa8SLogan Gunthorpe 
278*c2dbcaa8SLogan Gunthorpe 		dma_async_tx_descriptor_init(&desc->txd, &plxdev->dma_chan);
279*c2dbcaa8SLogan Gunthorpe 		desc->hw = &plxdev->hw_ring[i];
280*c2dbcaa8SLogan Gunthorpe 		plxdev->desc_ring[i] = desc;
281*c2dbcaa8SLogan Gunthorpe 	}
282*c2dbcaa8SLogan Gunthorpe 
283*c2dbcaa8SLogan Gunthorpe 	return 0;
284*c2dbcaa8SLogan Gunthorpe 
285*c2dbcaa8SLogan Gunthorpe free_and_exit:
286*c2dbcaa8SLogan Gunthorpe 	for (i = 0; i < PLX_DMA_RING_COUNT; i++)
287*c2dbcaa8SLogan Gunthorpe 		kfree(plxdev->desc_ring[i]);
288*c2dbcaa8SLogan Gunthorpe 	kfree(plxdev->desc_ring);
289*c2dbcaa8SLogan Gunthorpe 	return -ENOMEM;
290*c2dbcaa8SLogan Gunthorpe }
291*c2dbcaa8SLogan Gunthorpe 
292*c2dbcaa8SLogan Gunthorpe static int plx_dma_alloc_chan_resources(struct dma_chan *chan)
293*c2dbcaa8SLogan Gunthorpe {
294*c2dbcaa8SLogan Gunthorpe 	struct plx_dma_dev *plxdev = chan_to_plx_dma_dev(chan);
295*c2dbcaa8SLogan Gunthorpe 	size_t ring_sz = PLX_DMA_RING_COUNT * sizeof(*plxdev->hw_ring);
296*c2dbcaa8SLogan Gunthorpe 	int rc;
297*c2dbcaa8SLogan Gunthorpe 
298*c2dbcaa8SLogan Gunthorpe 	plxdev->head = plxdev->tail = 0;
299*c2dbcaa8SLogan Gunthorpe 	plxdev->hw_ring = dma_alloc_coherent(plxdev->dma_dev.dev, ring_sz,
300*c2dbcaa8SLogan Gunthorpe 					     &plxdev->hw_ring_dma, GFP_KERNEL);
301*c2dbcaa8SLogan Gunthorpe 	if (!plxdev->hw_ring)
302*c2dbcaa8SLogan Gunthorpe 		return -ENOMEM;
303*c2dbcaa8SLogan Gunthorpe 
304*c2dbcaa8SLogan Gunthorpe 	rc = plx_dma_alloc_desc(plxdev);
305*c2dbcaa8SLogan Gunthorpe 	if (rc)
306*c2dbcaa8SLogan Gunthorpe 		goto out_free_hw_ring;
307*c2dbcaa8SLogan Gunthorpe 
308*c2dbcaa8SLogan Gunthorpe 	rcu_read_lock();
309*c2dbcaa8SLogan Gunthorpe 	if (!rcu_dereference(plxdev->pdev)) {
310*c2dbcaa8SLogan Gunthorpe 		rcu_read_unlock();
311*c2dbcaa8SLogan Gunthorpe 		rc = -ENODEV;
312*c2dbcaa8SLogan Gunthorpe 		goto out_free_hw_ring;
313*c2dbcaa8SLogan Gunthorpe 	}
314*c2dbcaa8SLogan Gunthorpe 
315*c2dbcaa8SLogan Gunthorpe 	writel(PLX_REG_CTRL_RESET_VAL, plxdev->bar + PLX_REG_CTRL);
316*c2dbcaa8SLogan Gunthorpe 	writel(lower_32_bits(plxdev->hw_ring_dma),
317*c2dbcaa8SLogan Gunthorpe 	       plxdev->bar + PLX_REG_DESC_RING_ADDR);
318*c2dbcaa8SLogan Gunthorpe 	writel(upper_32_bits(plxdev->hw_ring_dma),
319*c2dbcaa8SLogan Gunthorpe 	       plxdev->bar + PLX_REG_DESC_RING_ADDR_HI);
320*c2dbcaa8SLogan Gunthorpe 	writel(lower_32_bits(plxdev->hw_ring_dma),
321*c2dbcaa8SLogan Gunthorpe 	       plxdev->bar + PLX_REG_DESC_RING_NEXT_ADDR);
322*c2dbcaa8SLogan Gunthorpe 	writel(PLX_DMA_RING_COUNT, plxdev->bar + PLX_REG_DESC_RING_COUNT);
323*c2dbcaa8SLogan Gunthorpe 	writel(PLX_REG_PREF_LIMIT_PREF_FOUR, plxdev->bar + PLX_REG_PREF_LIMIT);
324*c2dbcaa8SLogan Gunthorpe 
325*c2dbcaa8SLogan Gunthorpe 	plxdev->ring_active = true;
326*c2dbcaa8SLogan Gunthorpe 
327*c2dbcaa8SLogan Gunthorpe 	rcu_read_unlock();
328*c2dbcaa8SLogan Gunthorpe 
329*c2dbcaa8SLogan Gunthorpe 	return PLX_DMA_RING_COUNT;
330*c2dbcaa8SLogan Gunthorpe 
331*c2dbcaa8SLogan Gunthorpe out_free_hw_ring:
332*c2dbcaa8SLogan Gunthorpe 	dma_free_coherent(plxdev->dma_dev.dev, ring_sz, plxdev->hw_ring,
333*c2dbcaa8SLogan Gunthorpe 			  plxdev->hw_ring_dma);
334*c2dbcaa8SLogan Gunthorpe 	return rc;
335*c2dbcaa8SLogan Gunthorpe }
336*c2dbcaa8SLogan Gunthorpe 
337*c2dbcaa8SLogan Gunthorpe static void plx_dma_free_chan_resources(struct dma_chan *chan)
338*c2dbcaa8SLogan Gunthorpe {
339*c2dbcaa8SLogan Gunthorpe 	struct plx_dma_dev *plxdev = chan_to_plx_dma_dev(chan);
340*c2dbcaa8SLogan Gunthorpe 	size_t ring_sz = PLX_DMA_RING_COUNT * sizeof(*plxdev->hw_ring);
341*c2dbcaa8SLogan Gunthorpe 	struct pci_dev *pdev;
342*c2dbcaa8SLogan Gunthorpe 	int irq = -1;
343*c2dbcaa8SLogan Gunthorpe 	int i;
344*c2dbcaa8SLogan Gunthorpe 
345*c2dbcaa8SLogan Gunthorpe 	spin_lock_bh(&plxdev->ring_lock);
346*c2dbcaa8SLogan Gunthorpe 	plxdev->ring_active = false;
347*c2dbcaa8SLogan Gunthorpe 	spin_unlock_bh(&plxdev->ring_lock);
348*c2dbcaa8SLogan Gunthorpe 
349*c2dbcaa8SLogan Gunthorpe 	plx_dma_stop(plxdev);
350*c2dbcaa8SLogan Gunthorpe 
351*c2dbcaa8SLogan Gunthorpe 	rcu_read_lock();
352*c2dbcaa8SLogan Gunthorpe 	pdev = rcu_dereference(plxdev->pdev);
353*c2dbcaa8SLogan Gunthorpe 	if (pdev)
354*c2dbcaa8SLogan Gunthorpe 		irq = pci_irq_vector(pdev, 0);
355*c2dbcaa8SLogan Gunthorpe 	rcu_read_unlock();
356*c2dbcaa8SLogan Gunthorpe 
357*c2dbcaa8SLogan Gunthorpe 	if (irq > 0)
358*c2dbcaa8SLogan Gunthorpe 		synchronize_irq(irq);
359*c2dbcaa8SLogan Gunthorpe 
360*c2dbcaa8SLogan Gunthorpe 	tasklet_kill(&plxdev->desc_task);
361*c2dbcaa8SLogan Gunthorpe 
362*c2dbcaa8SLogan Gunthorpe 	plx_dma_abort_desc(plxdev);
363*c2dbcaa8SLogan Gunthorpe 
364*c2dbcaa8SLogan Gunthorpe 	for (i = 0; i < PLX_DMA_RING_COUNT; i++)
365*c2dbcaa8SLogan Gunthorpe 		kfree(plxdev->desc_ring[i]);
366*c2dbcaa8SLogan Gunthorpe 
367*c2dbcaa8SLogan Gunthorpe 	kfree(plxdev->desc_ring);
368*c2dbcaa8SLogan Gunthorpe 	dma_free_coherent(plxdev->dma_dev.dev, ring_sz, plxdev->hw_ring,
369*c2dbcaa8SLogan Gunthorpe 			  plxdev->hw_ring_dma);
370*c2dbcaa8SLogan Gunthorpe 
371*c2dbcaa8SLogan Gunthorpe }
372*c2dbcaa8SLogan Gunthorpe 
373905ca51eSLogan Gunthorpe static void plx_dma_release(struct dma_device *dma_dev)
374905ca51eSLogan Gunthorpe {
375905ca51eSLogan Gunthorpe 	struct plx_dma_dev *plxdev =
376905ca51eSLogan Gunthorpe 		container_of(dma_dev, struct plx_dma_dev, dma_dev);
377905ca51eSLogan Gunthorpe 
378905ca51eSLogan Gunthorpe 	put_device(dma_dev->dev);
379905ca51eSLogan Gunthorpe 	kfree(plxdev);
380905ca51eSLogan Gunthorpe }
381905ca51eSLogan Gunthorpe 
382905ca51eSLogan Gunthorpe static int plx_dma_create(struct pci_dev *pdev)
383905ca51eSLogan Gunthorpe {
384905ca51eSLogan Gunthorpe 	struct plx_dma_dev *plxdev;
385905ca51eSLogan Gunthorpe 	struct dma_device *dma;
386905ca51eSLogan Gunthorpe 	struct dma_chan *chan;
387905ca51eSLogan Gunthorpe 	int rc;
388905ca51eSLogan Gunthorpe 
389905ca51eSLogan Gunthorpe 	plxdev = kzalloc(sizeof(*plxdev), GFP_KERNEL);
390905ca51eSLogan Gunthorpe 	if (!plxdev)
391905ca51eSLogan Gunthorpe 		return -ENOMEM;
392905ca51eSLogan Gunthorpe 
393*c2dbcaa8SLogan Gunthorpe 	rc = request_irq(pci_irq_vector(pdev, 0), plx_dma_isr, 0,
394*c2dbcaa8SLogan Gunthorpe 			 KBUILD_MODNAME, plxdev);
395*c2dbcaa8SLogan Gunthorpe 	if (rc) {
396*c2dbcaa8SLogan Gunthorpe 		kfree(plxdev);
397*c2dbcaa8SLogan Gunthorpe 		return rc;
398*c2dbcaa8SLogan Gunthorpe 	}
399*c2dbcaa8SLogan Gunthorpe 
400*c2dbcaa8SLogan Gunthorpe 	spin_lock_init(&plxdev->ring_lock);
401*c2dbcaa8SLogan Gunthorpe 	tasklet_init(&plxdev->desc_task, plx_dma_desc_task,
402*c2dbcaa8SLogan Gunthorpe 		     (unsigned long)plxdev);
403*c2dbcaa8SLogan Gunthorpe 
404*c2dbcaa8SLogan Gunthorpe 	RCU_INIT_POINTER(plxdev->pdev, pdev);
405905ca51eSLogan Gunthorpe 	plxdev->bar = pcim_iomap_table(pdev)[0];
406905ca51eSLogan Gunthorpe 
407905ca51eSLogan Gunthorpe 	dma = &plxdev->dma_dev;
408905ca51eSLogan Gunthorpe 	dma->chancnt = 1;
409905ca51eSLogan Gunthorpe 	INIT_LIST_HEAD(&dma->channels);
410905ca51eSLogan Gunthorpe 	dma->copy_align = DMAENGINE_ALIGN_1_BYTE;
411905ca51eSLogan Gunthorpe 	dma->dev = get_device(&pdev->dev);
412905ca51eSLogan Gunthorpe 
413*c2dbcaa8SLogan Gunthorpe 	dma->device_alloc_chan_resources = plx_dma_alloc_chan_resources;
414*c2dbcaa8SLogan Gunthorpe 	dma->device_free_chan_resources = plx_dma_free_chan_resources;
415905ca51eSLogan Gunthorpe 	dma->device_release = plx_dma_release;
416905ca51eSLogan Gunthorpe 
417905ca51eSLogan Gunthorpe 	chan = &plxdev->dma_chan;
418905ca51eSLogan Gunthorpe 	chan->device = dma;
419905ca51eSLogan Gunthorpe 	dma_cookie_init(chan);
420905ca51eSLogan Gunthorpe 	list_add_tail(&chan->device_node, &dma->channels);
421905ca51eSLogan Gunthorpe 
422905ca51eSLogan Gunthorpe 	rc = dma_async_device_register(dma);
423905ca51eSLogan Gunthorpe 	if (rc) {
424905ca51eSLogan Gunthorpe 		pci_err(pdev, "Failed to register dma device: %d\n", rc);
425905ca51eSLogan Gunthorpe 		free_irq(pci_irq_vector(pdev, 0),  plxdev);
426905ca51eSLogan Gunthorpe 		kfree(plxdev);
427905ca51eSLogan Gunthorpe 		return rc;
428905ca51eSLogan Gunthorpe 	}
429905ca51eSLogan Gunthorpe 
430905ca51eSLogan Gunthorpe 	pci_set_drvdata(pdev, plxdev);
431905ca51eSLogan Gunthorpe 
432905ca51eSLogan Gunthorpe 	return 0;
433905ca51eSLogan Gunthorpe }
434905ca51eSLogan Gunthorpe 
435905ca51eSLogan Gunthorpe static int plx_dma_probe(struct pci_dev *pdev,
436905ca51eSLogan Gunthorpe 			 const struct pci_device_id *id)
437905ca51eSLogan Gunthorpe {
438905ca51eSLogan Gunthorpe 	int rc;
439905ca51eSLogan Gunthorpe 
440905ca51eSLogan Gunthorpe 	rc = pcim_enable_device(pdev);
441905ca51eSLogan Gunthorpe 	if (rc)
442905ca51eSLogan Gunthorpe 		return rc;
443905ca51eSLogan Gunthorpe 
444905ca51eSLogan Gunthorpe 	rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(48));
445905ca51eSLogan Gunthorpe 	if (rc)
446905ca51eSLogan Gunthorpe 		rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
447905ca51eSLogan Gunthorpe 	if (rc)
448905ca51eSLogan Gunthorpe 		return rc;
449905ca51eSLogan Gunthorpe 
450905ca51eSLogan Gunthorpe 	rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(48));
451905ca51eSLogan Gunthorpe 	if (rc)
452905ca51eSLogan Gunthorpe 		rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
453905ca51eSLogan Gunthorpe 	if (rc)
454905ca51eSLogan Gunthorpe 		return rc;
455905ca51eSLogan Gunthorpe 
456905ca51eSLogan Gunthorpe 	rc = pcim_iomap_regions(pdev, 1, KBUILD_MODNAME);
457905ca51eSLogan Gunthorpe 	if (rc)
458905ca51eSLogan Gunthorpe 		return rc;
459905ca51eSLogan Gunthorpe 
460905ca51eSLogan Gunthorpe 	rc = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
461905ca51eSLogan Gunthorpe 	if (rc <= 0)
462905ca51eSLogan Gunthorpe 		return rc;
463905ca51eSLogan Gunthorpe 
464905ca51eSLogan Gunthorpe 	pci_set_master(pdev);
465905ca51eSLogan Gunthorpe 
466905ca51eSLogan Gunthorpe 	rc = plx_dma_create(pdev);
467905ca51eSLogan Gunthorpe 	if (rc)
468905ca51eSLogan Gunthorpe 		goto err_free_irq_vectors;
469905ca51eSLogan Gunthorpe 
470905ca51eSLogan Gunthorpe 	pci_info(pdev, "PLX DMA Channel Registered\n");
471905ca51eSLogan Gunthorpe 
472905ca51eSLogan Gunthorpe 	return 0;
473905ca51eSLogan Gunthorpe 
474905ca51eSLogan Gunthorpe err_free_irq_vectors:
475905ca51eSLogan Gunthorpe 	pci_free_irq_vectors(pdev);
476905ca51eSLogan Gunthorpe 	return rc;
477905ca51eSLogan Gunthorpe }
478905ca51eSLogan Gunthorpe 
479905ca51eSLogan Gunthorpe static void plx_dma_remove(struct pci_dev *pdev)
480905ca51eSLogan Gunthorpe {
481905ca51eSLogan Gunthorpe 	struct plx_dma_dev *plxdev = pci_get_drvdata(pdev);
482905ca51eSLogan Gunthorpe 
483905ca51eSLogan Gunthorpe 	free_irq(pci_irq_vector(pdev, 0),  plxdev);
484905ca51eSLogan Gunthorpe 
485*c2dbcaa8SLogan Gunthorpe 	rcu_assign_pointer(plxdev->pdev, NULL);
486*c2dbcaa8SLogan Gunthorpe 	synchronize_rcu();
487*c2dbcaa8SLogan Gunthorpe 
488*c2dbcaa8SLogan Gunthorpe 	spin_lock_bh(&plxdev->ring_lock);
489*c2dbcaa8SLogan Gunthorpe 	plxdev->ring_active = false;
490*c2dbcaa8SLogan Gunthorpe 	spin_unlock_bh(&plxdev->ring_lock);
491*c2dbcaa8SLogan Gunthorpe 
492*c2dbcaa8SLogan Gunthorpe 	__plx_dma_stop(plxdev);
493*c2dbcaa8SLogan Gunthorpe 	plx_dma_abort_desc(plxdev);
494*c2dbcaa8SLogan Gunthorpe 
495905ca51eSLogan Gunthorpe 	plxdev->bar = NULL;
496905ca51eSLogan Gunthorpe 	dma_async_device_unregister(&plxdev->dma_dev);
497905ca51eSLogan Gunthorpe 
498905ca51eSLogan Gunthorpe 	pci_free_irq_vectors(pdev);
499905ca51eSLogan Gunthorpe }
500905ca51eSLogan Gunthorpe 
501905ca51eSLogan Gunthorpe static const struct pci_device_id plx_dma_pci_tbl[] = {
502905ca51eSLogan Gunthorpe 	{
503905ca51eSLogan Gunthorpe 		.vendor		= PCI_VENDOR_ID_PLX,
504905ca51eSLogan Gunthorpe 		.device		= 0x87D0,
505905ca51eSLogan Gunthorpe 		.subvendor	= PCI_ANY_ID,
506905ca51eSLogan Gunthorpe 		.subdevice	= PCI_ANY_ID,
507905ca51eSLogan Gunthorpe 		.class		= PCI_CLASS_SYSTEM_OTHER << 8,
508905ca51eSLogan Gunthorpe 		.class_mask	= 0xFFFFFFFF,
509905ca51eSLogan Gunthorpe 	},
510905ca51eSLogan Gunthorpe 	{0}
511905ca51eSLogan Gunthorpe };
512905ca51eSLogan Gunthorpe MODULE_DEVICE_TABLE(pci, plx_dma_pci_tbl);
513905ca51eSLogan Gunthorpe 
514905ca51eSLogan Gunthorpe static struct pci_driver plx_dma_pci_driver = {
515905ca51eSLogan Gunthorpe 	.name           = KBUILD_MODNAME,
516905ca51eSLogan Gunthorpe 	.id_table       = plx_dma_pci_tbl,
517905ca51eSLogan Gunthorpe 	.probe          = plx_dma_probe,
518905ca51eSLogan Gunthorpe 	.remove		= plx_dma_remove,
519905ca51eSLogan Gunthorpe };
520905ca51eSLogan Gunthorpe module_pci_driver(plx_dma_pci_driver);
521