xref: /openbmc/linux/drivers/dma/plx_dma.c (revision 4d3df16840a2bba5a345326426380e1381d976d2)
1905ca51eSLogan Gunthorpe // SPDX-License-Identifier: GPL-2.0
2905ca51eSLogan Gunthorpe /*
3905ca51eSLogan Gunthorpe  * Microsemi Switchtec(tm) PCIe Management Driver
4905ca51eSLogan Gunthorpe  * Copyright (c) 2019, Logan Gunthorpe <logang@deltatee.com>
5905ca51eSLogan Gunthorpe  * Copyright (c) 2019, GigaIO Networks, Inc
6905ca51eSLogan Gunthorpe  */
7905ca51eSLogan Gunthorpe 
8905ca51eSLogan Gunthorpe #include "dmaengine.h"
9905ca51eSLogan Gunthorpe 
10*4d3df168SLogan Gunthorpe #include <linux/circ_buf.h>
11905ca51eSLogan Gunthorpe #include <linux/dmaengine.h>
12905ca51eSLogan Gunthorpe #include <linux/kref.h>
13905ca51eSLogan Gunthorpe #include <linux/list.h>
14905ca51eSLogan Gunthorpe #include <linux/module.h>
15905ca51eSLogan Gunthorpe #include <linux/pci.h>
16905ca51eSLogan Gunthorpe 
17905ca51eSLogan Gunthorpe MODULE_DESCRIPTION("PLX ExpressLane PEX PCI Switch DMA Engine");
18905ca51eSLogan Gunthorpe MODULE_VERSION("0.1");
19905ca51eSLogan Gunthorpe MODULE_LICENSE("GPL");
20905ca51eSLogan Gunthorpe MODULE_AUTHOR("Logan Gunthorpe");
21905ca51eSLogan Gunthorpe 
22c2dbcaa8SLogan Gunthorpe #define PLX_REG_DESC_RING_ADDR			0x214
23c2dbcaa8SLogan Gunthorpe #define PLX_REG_DESC_RING_ADDR_HI		0x218
24c2dbcaa8SLogan Gunthorpe #define PLX_REG_DESC_RING_NEXT_ADDR		0x21C
25c2dbcaa8SLogan Gunthorpe #define PLX_REG_DESC_RING_COUNT			0x220
26c2dbcaa8SLogan Gunthorpe #define PLX_REG_DESC_RING_LAST_ADDR		0x224
27c2dbcaa8SLogan Gunthorpe #define PLX_REG_DESC_RING_LAST_SIZE		0x228
28c2dbcaa8SLogan Gunthorpe #define PLX_REG_PREF_LIMIT			0x234
29c2dbcaa8SLogan Gunthorpe #define PLX_REG_CTRL				0x238
30c2dbcaa8SLogan Gunthorpe #define PLX_REG_CTRL2				0x23A
31c2dbcaa8SLogan Gunthorpe #define PLX_REG_INTR_CTRL			0x23C
32c2dbcaa8SLogan Gunthorpe #define PLX_REG_INTR_STATUS			0x23E
33c2dbcaa8SLogan Gunthorpe 
34c2dbcaa8SLogan Gunthorpe #define PLX_REG_PREF_LIMIT_PREF_FOUR		8
35c2dbcaa8SLogan Gunthorpe 
36c2dbcaa8SLogan Gunthorpe #define PLX_REG_CTRL_GRACEFUL_PAUSE		BIT(0)
37c2dbcaa8SLogan Gunthorpe #define PLX_REG_CTRL_ABORT			BIT(1)
38c2dbcaa8SLogan Gunthorpe #define PLX_REG_CTRL_WRITE_BACK_EN		BIT(2)
39c2dbcaa8SLogan Gunthorpe #define PLX_REG_CTRL_START			BIT(3)
40c2dbcaa8SLogan Gunthorpe #define PLX_REG_CTRL_RING_STOP_MODE		BIT(4)
41c2dbcaa8SLogan Gunthorpe #define PLX_REG_CTRL_DESC_MODE_BLOCK		(0 << 5)
42c2dbcaa8SLogan Gunthorpe #define PLX_REG_CTRL_DESC_MODE_ON_CHIP		(1 << 5)
43c2dbcaa8SLogan Gunthorpe #define PLX_REG_CTRL_DESC_MODE_OFF_CHIP		(2 << 5)
44c2dbcaa8SLogan Gunthorpe #define PLX_REG_CTRL_DESC_INVALID		BIT(8)
45c2dbcaa8SLogan Gunthorpe #define PLX_REG_CTRL_GRACEFUL_PAUSE_DONE	BIT(9)
46c2dbcaa8SLogan Gunthorpe #define PLX_REG_CTRL_ABORT_DONE			BIT(10)
47c2dbcaa8SLogan Gunthorpe #define PLX_REG_CTRL_IMM_PAUSE_DONE		BIT(12)
48c2dbcaa8SLogan Gunthorpe #define PLX_REG_CTRL_IN_PROGRESS		BIT(30)
49c2dbcaa8SLogan Gunthorpe 
50c2dbcaa8SLogan Gunthorpe #define PLX_REG_CTRL_RESET_VAL	(PLX_REG_CTRL_DESC_INVALID | \
51c2dbcaa8SLogan Gunthorpe 				 PLX_REG_CTRL_GRACEFUL_PAUSE_DONE | \
52c2dbcaa8SLogan Gunthorpe 				 PLX_REG_CTRL_ABORT_DONE | \
53c2dbcaa8SLogan Gunthorpe 				 PLX_REG_CTRL_IMM_PAUSE_DONE)
54c2dbcaa8SLogan Gunthorpe 
55c2dbcaa8SLogan Gunthorpe #define PLX_REG_CTRL_START_VAL	(PLX_REG_CTRL_WRITE_BACK_EN | \
56c2dbcaa8SLogan Gunthorpe 				 PLX_REG_CTRL_DESC_MODE_OFF_CHIP | \
57c2dbcaa8SLogan Gunthorpe 				 PLX_REG_CTRL_START | \
58c2dbcaa8SLogan Gunthorpe 				 PLX_REG_CTRL_RESET_VAL)
59c2dbcaa8SLogan Gunthorpe 
60c2dbcaa8SLogan Gunthorpe #define PLX_REG_CTRL2_MAX_TXFR_SIZE_64B		0
61c2dbcaa8SLogan Gunthorpe #define PLX_REG_CTRL2_MAX_TXFR_SIZE_128B	1
62c2dbcaa8SLogan Gunthorpe #define PLX_REG_CTRL2_MAX_TXFR_SIZE_256B	2
63c2dbcaa8SLogan Gunthorpe #define PLX_REG_CTRL2_MAX_TXFR_SIZE_512B	3
64c2dbcaa8SLogan Gunthorpe #define PLX_REG_CTRL2_MAX_TXFR_SIZE_1KB		4
65c2dbcaa8SLogan Gunthorpe #define PLX_REG_CTRL2_MAX_TXFR_SIZE_2KB		5
66c2dbcaa8SLogan Gunthorpe #define PLX_REG_CTRL2_MAX_TXFR_SIZE_4B		7
67c2dbcaa8SLogan Gunthorpe 
68c2dbcaa8SLogan Gunthorpe #define PLX_REG_INTR_CRTL_ERROR_EN		BIT(0)
69c2dbcaa8SLogan Gunthorpe #define PLX_REG_INTR_CRTL_INV_DESC_EN		BIT(1)
70c2dbcaa8SLogan Gunthorpe #define PLX_REG_INTR_CRTL_ABORT_DONE_EN		BIT(3)
71c2dbcaa8SLogan Gunthorpe #define PLX_REG_INTR_CRTL_PAUSE_DONE_EN		BIT(4)
72c2dbcaa8SLogan Gunthorpe #define PLX_REG_INTR_CRTL_IMM_PAUSE_DONE_EN	BIT(5)
73c2dbcaa8SLogan Gunthorpe 
74c2dbcaa8SLogan Gunthorpe #define PLX_REG_INTR_STATUS_ERROR		BIT(0)
75c2dbcaa8SLogan Gunthorpe #define PLX_REG_INTR_STATUS_INV_DESC		BIT(1)
76c2dbcaa8SLogan Gunthorpe #define PLX_REG_INTR_STATUS_DESC_DONE		BIT(2)
77c2dbcaa8SLogan Gunthorpe #define PLX_REG_INTR_CRTL_ABORT_DONE		BIT(3)
78c2dbcaa8SLogan Gunthorpe 
79c2dbcaa8SLogan Gunthorpe struct plx_dma_hw_std_desc {
80c2dbcaa8SLogan Gunthorpe 	__le32 flags_and_size;
81c2dbcaa8SLogan Gunthorpe 	__le16 dst_addr_hi;
82c2dbcaa8SLogan Gunthorpe 	__le16 src_addr_hi;
83c2dbcaa8SLogan Gunthorpe 	__le32 dst_addr_lo;
84c2dbcaa8SLogan Gunthorpe 	__le32 src_addr_lo;
85c2dbcaa8SLogan Gunthorpe };
86c2dbcaa8SLogan Gunthorpe 
87c2dbcaa8SLogan Gunthorpe #define PLX_DESC_SIZE_MASK		0x7ffffff
88c2dbcaa8SLogan Gunthorpe #define PLX_DESC_FLAG_VALID		BIT(31)
89c2dbcaa8SLogan Gunthorpe #define PLX_DESC_FLAG_INT_WHEN_DONE	BIT(30)
90c2dbcaa8SLogan Gunthorpe 
91c2dbcaa8SLogan Gunthorpe #define PLX_DESC_WB_SUCCESS		BIT(30)
92c2dbcaa8SLogan Gunthorpe #define PLX_DESC_WB_RD_FAIL		BIT(29)
93c2dbcaa8SLogan Gunthorpe #define PLX_DESC_WB_WR_FAIL		BIT(28)
94c2dbcaa8SLogan Gunthorpe 
95c2dbcaa8SLogan Gunthorpe #define PLX_DMA_RING_COUNT		2048
96c2dbcaa8SLogan Gunthorpe 
97c2dbcaa8SLogan Gunthorpe struct plx_dma_desc {
98c2dbcaa8SLogan Gunthorpe 	struct dma_async_tx_descriptor txd;
99c2dbcaa8SLogan Gunthorpe 	struct plx_dma_hw_std_desc *hw;
100c2dbcaa8SLogan Gunthorpe 	u32 orig_size;
101c2dbcaa8SLogan Gunthorpe };
102c2dbcaa8SLogan Gunthorpe 
103905ca51eSLogan Gunthorpe struct plx_dma_dev {
104905ca51eSLogan Gunthorpe 	struct dma_device dma_dev;
105905ca51eSLogan Gunthorpe 	struct dma_chan dma_chan;
106c2dbcaa8SLogan Gunthorpe 	struct pci_dev __rcu *pdev;
107905ca51eSLogan Gunthorpe 	void __iomem *bar;
108c2dbcaa8SLogan Gunthorpe 	struct tasklet_struct desc_task;
109c2dbcaa8SLogan Gunthorpe 
110c2dbcaa8SLogan Gunthorpe 	spinlock_t ring_lock;
111c2dbcaa8SLogan Gunthorpe 	bool ring_active;
112c2dbcaa8SLogan Gunthorpe 	int head;
113c2dbcaa8SLogan Gunthorpe 	int tail;
114c2dbcaa8SLogan Gunthorpe 	struct plx_dma_hw_std_desc *hw_ring;
115c2dbcaa8SLogan Gunthorpe 	dma_addr_t hw_ring_dma;
116c2dbcaa8SLogan Gunthorpe 	struct plx_dma_desc **desc_ring;
117905ca51eSLogan Gunthorpe };
118905ca51eSLogan Gunthorpe 
119c2dbcaa8SLogan Gunthorpe static struct plx_dma_dev *chan_to_plx_dma_dev(struct dma_chan *c)
120c2dbcaa8SLogan Gunthorpe {
121c2dbcaa8SLogan Gunthorpe 	return container_of(c, struct plx_dma_dev, dma_chan);
122c2dbcaa8SLogan Gunthorpe }
123c2dbcaa8SLogan Gunthorpe 
124*4d3df168SLogan Gunthorpe static struct plx_dma_desc *to_plx_desc(struct dma_async_tx_descriptor *txd)
125*4d3df168SLogan Gunthorpe {
126*4d3df168SLogan Gunthorpe 	return container_of(txd, struct plx_dma_desc, txd);
127*4d3df168SLogan Gunthorpe }
128*4d3df168SLogan Gunthorpe 
129c2dbcaa8SLogan Gunthorpe static struct plx_dma_desc *plx_dma_get_desc(struct plx_dma_dev *plxdev, int i)
130c2dbcaa8SLogan Gunthorpe {
131c2dbcaa8SLogan Gunthorpe 	return plxdev->desc_ring[i & (PLX_DMA_RING_COUNT - 1)];
132c2dbcaa8SLogan Gunthorpe }
133c2dbcaa8SLogan Gunthorpe 
134c2dbcaa8SLogan Gunthorpe static void plx_dma_process_desc(struct plx_dma_dev *plxdev)
135c2dbcaa8SLogan Gunthorpe {
136c2dbcaa8SLogan Gunthorpe 	struct dmaengine_result res;
137c2dbcaa8SLogan Gunthorpe 	struct plx_dma_desc *desc;
138c2dbcaa8SLogan Gunthorpe 	u32 flags;
139c2dbcaa8SLogan Gunthorpe 
140c2dbcaa8SLogan Gunthorpe 	spin_lock_bh(&plxdev->ring_lock);
141c2dbcaa8SLogan Gunthorpe 
142c2dbcaa8SLogan Gunthorpe 	while (plxdev->tail != plxdev->head) {
143c2dbcaa8SLogan Gunthorpe 		desc = plx_dma_get_desc(plxdev, plxdev->tail);
144c2dbcaa8SLogan Gunthorpe 
145c2dbcaa8SLogan Gunthorpe 		flags = le32_to_cpu(READ_ONCE(desc->hw->flags_and_size));
146c2dbcaa8SLogan Gunthorpe 
147c2dbcaa8SLogan Gunthorpe 		if (flags & PLX_DESC_FLAG_VALID)
148c2dbcaa8SLogan Gunthorpe 			break;
149c2dbcaa8SLogan Gunthorpe 
150c2dbcaa8SLogan Gunthorpe 		res.residue = desc->orig_size - (flags & PLX_DESC_SIZE_MASK);
151c2dbcaa8SLogan Gunthorpe 
152c2dbcaa8SLogan Gunthorpe 		if (flags & PLX_DESC_WB_SUCCESS)
153c2dbcaa8SLogan Gunthorpe 			res.result = DMA_TRANS_NOERROR;
154c2dbcaa8SLogan Gunthorpe 		else if (flags & PLX_DESC_WB_WR_FAIL)
155c2dbcaa8SLogan Gunthorpe 			res.result = DMA_TRANS_WRITE_FAILED;
156c2dbcaa8SLogan Gunthorpe 		else
157c2dbcaa8SLogan Gunthorpe 			res.result = DMA_TRANS_READ_FAILED;
158c2dbcaa8SLogan Gunthorpe 
159c2dbcaa8SLogan Gunthorpe 		dma_cookie_complete(&desc->txd);
160c2dbcaa8SLogan Gunthorpe 		dma_descriptor_unmap(&desc->txd);
161c2dbcaa8SLogan Gunthorpe 		dmaengine_desc_get_callback_invoke(&desc->txd, &res);
162c2dbcaa8SLogan Gunthorpe 		desc->txd.callback = NULL;
163c2dbcaa8SLogan Gunthorpe 		desc->txd.callback_result = NULL;
164c2dbcaa8SLogan Gunthorpe 
165c2dbcaa8SLogan Gunthorpe 		plxdev->tail++;
166c2dbcaa8SLogan Gunthorpe 	}
167c2dbcaa8SLogan Gunthorpe 
168c2dbcaa8SLogan Gunthorpe 	spin_unlock_bh(&plxdev->ring_lock);
169c2dbcaa8SLogan Gunthorpe }
170c2dbcaa8SLogan Gunthorpe 
171c2dbcaa8SLogan Gunthorpe static void plx_dma_abort_desc(struct plx_dma_dev *plxdev)
172c2dbcaa8SLogan Gunthorpe {
173c2dbcaa8SLogan Gunthorpe 	struct dmaengine_result res;
174c2dbcaa8SLogan Gunthorpe 	struct plx_dma_desc *desc;
175c2dbcaa8SLogan Gunthorpe 
176c2dbcaa8SLogan Gunthorpe 	plx_dma_process_desc(plxdev);
177c2dbcaa8SLogan Gunthorpe 
178c2dbcaa8SLogan Gunthorpe 	spin_lock_bh(&plxdev->ring_lock);
179c2dbcaa8SLogan Gunthorpe 
180c2dbcaa8SLogan Gunthorpe 	while (plxdev->tail != plxdev->head) {
181c2dbcaa8SLogan Gunthorpe 		desc = plx_dma_get_desc(plxdev, plxdev->tail);
182c2dbcaa8SLogan Gunthorpe 
183c2dbcaa8SLogan Gunthorpe 		res.residue = desc->orig_size;
184c2dbcaa8SLogan Gunthorpe 		res.result = DMA_TRANS_ABORTED;
185c2dbcaa8SLogan Gunthorpe 
186c2dbcaa8SLogan Gunthorpe 		dma_cookie_complete(&desc->txd);
187c2dbcaa8SLogan Gunthorpe 		dma_descriptor_unmap(&desc->txd);
188c2dbcaa8SLogan Gunthorpe 		dmaengine_desc_get_callback_invoke(&desc->txd, &res);
189c2dbcaa8SLogan Gunthorpe 		desc->txd.callback = NULL;
190c2dbcaa8SLogan Gunthorpe 		desc->txd.callback_result = NULL;
191c2dbcaa8SLogan Gunthorpe 
192c2dbcaa8SLogan Gunthorpe 		plxdev->tail++;
193c2dbcaa8SLogan Gunthorpe 	}
194c2dbcaa8SLogan Gunthorpe 
195c2dbcaa8SLogan Gunthorpe 	spin_unlock_bh(&plxdev->ring_lock);
196c2dbcaa8SLogan Gunthorpe }
197c2dbcaa8SLogan Gunthorpe 
198c2dbcaa8SLogan Gunthorpe static void __plx_dma_stop(struct plx_dma_dev *plxdev)
199c2dbcaa8SLogan Gunthorpe {
200c2dbcaa8SLogan Gunthorpe 	unsigned long timeout = jiffies + msecs_to_jiffies(1000);
201c2dbcaa8SLogan Gunthorpe 	u32 val;
202c2dbcaa8SLogan Gunthorpe 
203c2dbcaa8SLogan Gunthorpe 	val = readl(plxdev->bar + PLX_REG_CTRL);
204c2dbcaa8SLogan Gunthorpe 	if (!(val & ~PLX_REG_CTRL_GRACEFUL_PAUSE))
205c2dbcaa8SLogan Gunthorpe 		return;
206c2dbcaa8SLogan Gunthorpe 
207c2dbcaa8SLogan Gunthorpe 	writel(PLX_REG_CTRL_RESET_VAL | PLX_REG_CTRL_GRACEFUL_PAUSE,
208c2dbcaa8SLogan Gunthorpe 	       plxdev->bar + PLX_REG_CTRL);
209c2dbcaa8SLogan Gunthorpe 
210c2dbcaa8SLogan Gunthorpe 	while (!time_after(jiffies, timeout)) {
211c2dbcaa8SLogan Gunthorpe 		val = readl(plxdev->bar + PLX_REG_CTRL);
212c2dbcaa8SLogan Gunthorpe 		if (val & PLX_REG_CTRL_GRACEFUL_PAUSE_DONE)
213c2dbcaa8SLogan Gunthorpe 			break;
214c2dbcaa8SLogan Gunthorpe 
215c2dbcaa8SLogan Gunthorpe 		cpu_relax();
216c2dbcaa8SLogan Gunthorpe 	}
217c2dbcaa8SLogan Gunthorpe 
218c2dbcaa8SLogan Gunthorpe 	if (!(val & PLX_REG_CTRL_GRACEFUL_PAUSE_DONE))
219c2dbcaa8SLogan Gunthorpe 		dev_err(plxdev->dma_dev.dev,
220c2dbcaa8SLogan Gunthorpe 			"Timeout waiting for graceful pause!\n");
221c2dbcaa8SLogan Gunthorpe 
222c2dbcaa8SLogan Gunthorpe 	writel(PLX_REG_CTRL_RESET_VAL | PLX_REG_CTRL_GRACEFUL_PAUSE,
223c2dbcaa8SLogan Gunthorpe 	       plxdev->bar + PLX_REG_CTRL);
224c2dbcaa8SLogan Gunthorpe 
225c2dbcaa8SLogan Gunthorpe 	writel(0, plxdev->bar + PLX_REG_DESC_RING_COUNT);
226c2dbcaa8SLogan Gunthorpe 	writel(0, plxdev->bar + PLX_REG_DESC_RING_ADDR);
227c2dbcaa8SLogan Gunthorpe 	writel(0, plxdev->bar + PLX_REG_DESC_RING_ADDR_HI);
228c2dbcaa8SLogan Gunthorpe 	writel(0, plxdev->bar + PLX_REG_DESC_RING_NEXT_ADDR);
229c2dbcaa8SLogan Gunthorpe }
230c2dbcaa8SLogan Gunthorpe 
231c2dbcaa8SLogan Gunthorpe static void plx_dma_stop(struct plx_dma_dev *plxdev)
232c2dbcaa8SLogan Gunthorpe {
233c2dbcaa8SLogan Gunthorpe 	rcu_read_lock();
234c2dbcaa8SLogan Gunthorpe 	if (!rcu_dereference(plxdev->pdev)) {
235c2dbcaa8SLogan Gunthorpe 		rcu_read_unlock();
236c2dbcaa8SLogan Gunthorpe 		return;
237c2dbcaa8SLogan Gunthorpe 	}
238c2dbcaa8SLogan Gunthorpe 
239c2dbcaa8SLogan Gunthorpe 	__plx_dma_stop(plxdev);
240c2dbcaa8SLogan Gunthorpe 
241c2dbcaa8SLogan Gunthorpe 	rcu_read_unlock();
242c2dbcaa8SLogan Gunthorpe }
243c2dbcaa8SLogan Gunthorpe 
244c2dbcaa8SLogan Gunthorpe static void plx_dma_desc_task(unsigned long data)
245c2dbcaa8SLogan Gunthorpe {
246c2dbcaa8SLogan Gunthorpe 	struct plx_dma_dev *plxdev = (void *)data;
247c2dbcaa8SLogan Gunthorpe 
248c2dbcaa8SLogan Gunthorpe 	plx_dma_process_desc(plxdev);
249c2dbcaa8SLogan Gunthorpe }
250c2dbcaa8SLogan Gunthorpe 
251*4d3df168SLogan Gunthorpe static struct dma_async_tx_descriptor *plx_dma_prep_memcpy(struct dma_chan *c,
252*4d3df168SLogan Gunthorpe 		dma_addr_t dma_dst, dma_addr_t dma_src, size_t len,
253*4d3df168SLogan Gunthorpe 		unsigned long flags)
254*4d3df168SLogan Gunthorpe 	__acquires(plxdev->ring_lock)
255*4d3df168SLogan Gunthorpe {
256*4d3df168SLogan Gunthorpe 	struct plx_dma_dev *plxdev = chan_to_plx_dma_dev(c);
257*4d3df168SLogan Gunthorpe 	struct plx_dma_desc *plxdesc;
258*4d3df168SLogan Gunthorpe 
259*4d3df168SLogan Gunthorpe 	spin_lock_bh(&plxdev->ring_lock);
260*4d3df168SLogan Gunthorpe 	if (!plxdev->ring_active)
261*4d3df168SLogan Gunthorpe 		goto err_unlock;
262*4d3df168SLogan Gunthorpe 
263*4d3df168SLogan Gunthorpe 	if (!CIRC_SPACE(plxdev->head, plxdev->tail, PLX_DMA_RING_COUNT))
264*4d3df168SLogan Gunthorpe 		goto err_unlock;
265*4d3df168SLogan Gunthorpe 
266*4d3df168SLogan Gunthorpe 	if (len > PLX_DESC_SIZE_MASK)
267*4d3df168SLogan Gunthorpe 		goto err_unlock;
268*4d3df168SLogan Gunthorpe 
269*4d3df168SLogan Gunthorpe 	plxdesc = plx_dma_get_desc(plxdev, plxdev->head);
270*4d3df168SLogan Gunthorpe 	plxdev->head++;
271*4d3df168SLogan Gunthorpe 
272*4d3df168SLogan Gunthorpe 	plxdesc->hw->dst_addr_lo = cpu_to_le32(lower_32_bits(dma_dst));
273*4d3df168SLogan Gunthorpe 	plxdesc->hw->dst_addr_hi = cpu_to_le16(upper_32_bits(dma_dst));
274*4d3df168SLogan Gunthorpe 	plxdesc->hw->src_addr_lo = cpu_to_le32(lower_32_bits(dma_src));
275*4d3df168SLogan Gunthorpe 	plxdesc->hw->src_addr_hi = cpu_to_le16(upper_32_bits(dma_src));
276*4d3df168SLogan Gunthorpe 
277*4d3df168SLogan Gunthorpe 	plxdesc->orig_size = len;
278*4d3df168SLogan Gunthorpe 
279*4d3df168SLogan Gunthorpe 	if (flags & DMA_PREP_INTERRUPT)
280*4d3df168SLogan Gunthorpe 		len |= PLX_DESC_FLAG_INT_WHEN_DONE;
281*4d3df168SLogan Gunthorpe 
282*4d3df168SLogan Gunthorpe 	plxdesc->hw->flags_and_size = cpu_to_le32(len);
283*4d3df168SLogan Gunthorpe 	plxdesc->txd.flags = flags;
284*4d3df168SLogan Gunthorpe 
285*4d3df168SLogan Gunthorpe 	/* return with the lock held, it will be released in tx_submit */
286*4d3df168SLogan Gunthorpe 
287*4d3df168SLogan Gunthorpe 	return &plxdesc->txd;
288*4d3df168SLogan Gunthorpe 
289*4d3df168SLogan Gunthorpe err_unlock:
290*4d3df168SLogan Gunthorpe 	/*
291*4d3df168SLogan Gunthorpe 	 * Keep sparse happy by restoring an even lock count on
292*4d3df168SLogan Gunthorpe 	 * this lock.
293*4d3df168SLogan Gunthorpe 	 */
294*4d3df168SLogan Gunthorpe 	__acquire(plxdev->ring_lock);
295*4d3df168SLogan Gunthorpe 
296*4d3df168SLogan Gunthorpe 	spin_unlock_bh(&plxdev->ring_lock);
297*4d3df168SLogan Gunthorpe 	return NULL;
298*4d3df168SLogan Gunthorpe }
299*4d3df168SLogan Gunthorpe 
300*4d3df168SLogan Gunthorpe static dma_cookie_t plx_dma_tx_submit(struct dma_async_tx_descriptor *desc)
301*4d3df168SLogan Gunthorpe 	__releases(plxdev->ring_lock)
302*4d3df168SLogan Gunthorpe {
303*4d3df168SLogan Gunthorpe 	struct plx_dma_dev *plxdev = chan_to_plx_dma_dev(desc->chan);
304*4d3df168SLogan Gunthorpe 	struct plx_dma_desc *plxdesc = to_plx_desc(desc);
305*4d3df168SLogan Gunthorpe 	dma_cookie_t cookie;
306*4d3df168SLogan Gunthorpe 
307*4d3df168SLogan Gunthorpe 	cookie = dma_cookie_assign(desc);
308*4d3df168SLogan Gunthorpe 
309*4d3df168SLogan Gunthorpe 	/*
310*4d3df168SLogan Gunthorpe 	 * Ensure the descriptor updates are visible to the dma device
311*4d3df168SLogan Gunthorpe 	 * before setting the valid bit.
312*4d3df168SLogan Gunthorpe 	 */
313*4d3df168SLogan Gunthorpe 	wmb();
314*4d3df168SLogan Gunthorpe 
315*4d3df168SLogan Gunthorpe 	plxdesc->hw->flags_and_size |= cpu_to_le32(PLX_DESC_FLAG_VALID);
316*4d3df168SLogan Gunthorpe 
317*4d3df168SLogan Gunthorpe 	spin_unlock_bh(&plxdev->ring_lock);
318*4d3df168SLogan Gunthorpe 
319*4d3df168SLogan Gunthorpe 	return cookie;
320*4d3df168SLogan Gunthorpe }
321*4d3df168SLogan Gunthorpe 
322*4d3df168SLogan Gunthorpe static enum dma_status plx_dma_tx_status(struct dma_chan *chan,
323*4d3df168SLogan Gunthorpe 		dma_cookie_t cookie, struct dma_tx_state *txstate)
324*4d3df168SLogan Gunthorpe {
325*4d3df168SLogan Gunthorpe 	struct plx_dma_dev *plxdev = chan_to_plx_dma_dev(chan);
326*4d3df168SLogan Gunthorpe 	enum dma_status ret;
327*4d3df168SLogan Gunthorpe 
328*4d3df168SLogan Gunthorpe 	ret = dma_cookie_status(chan, cookie, txstate);
329*4d3df168SLogan Gunthorpe 	if (ret == DMA_COMPLETE)
330*4d3df168SLogan Gunthorpe 		return ret;
331*4d3df168SLogan Gunthorpe 
332*4d3df168SLogan Gunthorpe 	plx_dma_process_desc(plxdev);
333*4d3df168SLogan Gunthorpe 
334*4d3df168SLogan Gunthorpe 	return dma_cookie_status(chan, cookie, txstate);
335*4d3df168SLogan Gunthorpe }
336*4d3df168SLogan Gunthorpe 
337*4d3df168SLogan Gunthorpe static void plx_dma_issue_pending(struct dma_chan *chan)
338*4d3df168SLogan Gunthorpe {
339*4d3df168SLogan Gunthorpe 	struct plx_dma_dev *plxdev = chan_to_plx_dma_dev(chan);
340*4d3df168SLogan Gunthorpe 
341*4d3df168SLogan Gunthorpe 	rcu_read_lock();
342*4d3df168SLogan Gunthorpe 	if (!rcu_dereference(plxdev->pdev)) {
343*4d3df168SLogan Gunthorpe 		rcu_read_unlock();
344*4d3df168SLogan Gunthorpe 		return;
345*4d3df168SLogan Gunthorpe 	}
346*4d3df168SLogan Gunthorpe 
347*4d3df168SLogan Gunthorpe 	/*
348*4d3df168SLogan Gunthorpe 	 * Ensure the valid bits are visible before starting the
349*4d3df168SLogan Gunthorpe 	 * DMA engine.
350*4d3df168SLogan Gunthorpe 	 */
351*4d3df168SLogan Gunthorpe 	wmb();
352*4d3df168SLogan Gunthorpe 
353*4d3df168SLogan Gunthorpe 	writew(PLX_REG_CTRL_START_VAL, plxdev->bar + PLX_REG_CTRL);
354*4d3df168SLogan Gunthorpe 
355*4d3df168SLogan Gunthorpe 	rcu_read_unlock();
356*4d3df168SLogan Gunthorpe }
357*4d3df168SLogan Gunthorpe 
358c2dbcaa8SLogan Gunthorpe static irqreturn_t plx_dma_isr(int irq, void *devid)
359c2dbcaa8SLogan Gunthorpe {
360c2dbcaa8SLogan Gunthorpe 	struct plx_dma_dev *plxdev = devid;
361c2dbcaa8SLogan Gunthorpe 	u32 status;
362c2dbcaa8SLogan Gunthorpe 
363c2dbcaa8SLogan Gunthorpe 	status = readw(plxdev->bar + PLX_REG_INTR_STATUS);
364c2dbcaa8SLogan Gunthorpe 
365c2dbcaa8SLogan Gunthorpe 	if (!status)
366c2dbcaa8SLogan Gunthorpe 		return IRQ_NONE;
367c2dbcaa8SLogan Gunthorpe 
368c2dbcaa8SLogan Gunthorpe 	if (status & PLX_REG_INTR_STATUS_DESC_DONE && plxdev->ring_active)
369c2dbcaa8SLogan Gunthorpe 		tasklet_schedule(&plxdev->desc_task);
370c2dbcaa8SLogan Gunthorpe 
371c2dbcaa8SLogan Gunthorpe 	writew(status, plxdev->bar + PLX_REG_INTR_STATUS);
372c2dbcaa8SLogan Gunthorpe 
373c2dbcaa8SLogan Gunthorpe 	return IRQ_HANDLED;
374c2dbcaa8SLogan Gunthorpe }
375c2dbcaa8SLogan Gunthorpe 
376c2dbcaa8SLogan Gunthorpe static int plx_dma_alloc_desc(struct plx_dma_dev *plxdev)
377c2dbcaa8SLogan Gunthorpe {
378c2dbcaa8SLogan Gunthorpe 	struct plx_dma_desc *desc;
379c2dbcaa8SLogan Gunthorpe 	int i;
380c2dbcaa8SLogan Gunthorpe 
381c2dbcaa8SLogan Gunthorpe 	plxdev->desc_ring = kcalloc(PLX_DMA_RING_COUNT,
382c2dbcaa8SLogan Gunthorpe 				    sizeof(*plxdev->desc_ring), GFP_KERNEL);
383c2dbcaa8SLogan Gunthorpe 	if (!plxdev->desc_ring)
384c2dbcaa8SLogan Gunthorpe 		return -ENOMEM;
385c2dbcaa8SLogan Gunthorpe 
386c2dbcaa8SLogan Gunthorpe 	for (i = 0; i < PLX_DMA_RING_COUNT; i++) {
387c2dbcaa8SLogan Gunthorpe 		desc = kzalloc(sizeof(*desc), GFP_KERNEL);
388c2dbcaa8SLogan Gunthorpe 		if (!desc)
389c2dbcaa8SLogan Gunthorpe 			goto free_and_exit;
390c2dbcaa8SLogan Gunthorpe 
391c2dbcaa8SLogan Gunthorpe 		dma_async_tx_descriptor_init(&desc->txd, &plxdev->dma_chan);
392*4d3df168SLogan Gunthorpe 		desc->txd.tx_submit = plx_dma_tx_submit;
393c2dbcaa8SLogan Gunthorpe 		desc->hw = &plxdev->hw_ring[i];
394*4d3df168SLogan Gunthorpe 
395c2dbcaa8SLogan Gunthorpe 		plxdev->desc_ring[i] = desc;
396c2dbcaa8SLogan Gunthorpe 	}
397c2dbcaa8SLogan Gunthorpe 
398c2dbcaa8SLogan Gunthorpe 	return 0;
399c2dbcaa8SLogan Gunthorpe 
400c2dbcaa8SLogan Gunthorpe free_and_exit:
401c2dbcaa8SLogan Gunthorpe 	for (i = 0; i < PLX_DMA_RING_COUNT; i++)
402c2dbcaa8SLogan Gunthorpe 		kfree(plxdev->desc_ring[i]);
403c2dbcaa8SLogan Gunthorpe 	kfree(plxdev->desc_ring);
404c2dbcaa8SLogan Gunthorpe 	return -ENOMEM;
405c2dbcaa8SLogan Gunthorpe }
406c2dbcaa8SLogan Gunthorpe 
407c2dbcaa8SLogan Gunthorpe static int plx_dma_alloc_chan_resources(struct dma_chan *chan)
408c2dbcaa8SLogan Gunthorpe {
409c2dbcaa8SLogan Gunthorpe 	struct plx_dma_dev *plxdev = chan_to_plx_dma_dev(chan);
410c2dbcaa8SLogan Gunthorpe 	size_t ring_sz = PLX_DMA_RING_COUNT * sizeof(*plxdev->hw_ring);
411c2dbcaa8SLogan Gunthorpe 	int rc;
412c2dbcaa8SLogan Gunthorpe 
413c2dbcaa8SLogan Gunthorpe 	plxdev->head = plxdev->tail = 0;
414c2dbcaa8SLogan Gunthorpe 	plxdev->hw_ring = dma_alloc_coherent(plxdev->dma_dev.dev, ring_sz,
415c2dbcaa8SLogan Gunthorpe 					     &plxdev->hw_ring_dma, GFP_KERNEL);
416c2dbcaa8SLogan Gunthorpe 	if (!plxdev->hw_ring)
417c2dbcaa8SLogan Gunthorpe 		return -ENOMEM;
418c2dbcaa8SLogan Gunthorpe 
419c2dbcaa8SLogan Gunthorpe 	rc = plx_dma_alloc_desc(plxdev);
420c2dbcaa8SLogan Gunthorpe 	if (rc)
421c2dbcaa8SLogan Gunthorpe 		goto out_free_hw_ring;
422c2dbcaa8SLogan Gunthorpe 
423c2dbcaa8SLogan Gunthorpe 	rcu_read_lock();
424c2dbcaa8SLogan Gunthorpe 	if (!rcu_dereference(plxdev->pdev)) {
425c2dbcaa8SLogan Gunthorpe 		rcu_read_unlock();
426c2dbcaa8SLogan Gunthorpe 		rc = -ENODEV;
427c2dbcaa8SLogan Gunthorpe 		goto out_free_hw_ring;
428c2dbcaa8SLogan Gunthorpe 	}
429c2dbcaa8SLogan Gunthorpe 
430c2dbcaa8SLogan Gunthorpe 	writel(PLX_REG_CTRL_RESET_VAL, plxdev->bar + PLX_REG_CTRL);
431c2dbcaa8SLogan Gunthorpe 	writel(lower_32_bits(plxdev->hw_ring_dma),
432c2dbcaa8SLogan Gunthorpe 	       plxdev->bar + PLX_REG_DESC_RING_ADDR);
433c2dbcaa8SLogan Gunthorpe 	writel(upper_32_bits(plxdev->hw_ring_dma),
434c2dbcaa8SLogan Gunthorpe 	       plxdev->bar + PLX_REG_DESC_RING_ADDR_HI);
435c2dbcaa8SLogan Gunthorpe 	writel(lower_32_bits(plxdev->hw_ring_dma),
436c2dbcaa8SLogan Gunthorpe 	       plxdev->bar + PLX_REG_DESC_RING_NEXT_ADDR);
437c2dbcaa8SLogan Gunthorpe 	writel(PLX_DMA_RING_COUNT, plxdev->bar + PLX_REG_DESC_RING_COUNT);
438c2dbcaa8SLogan Gunthorpe 	writel(PLX_REG_PREF_LIMIT_PREF_FOUR, plxdev->bar + PLX_REG_PREF_LIMIT);
439c2dbcaa8SLogan Gunthorpe 
440c2dbcaa8SLogan Gunthorpe 	plxdev->ring_active = true;
441c2dbcaa8SLogan Gunthorpe 
442c2dbcaa8SLogan Gunthorpe 	rcu_read_unlock();
443c2dbcaa8SLogan Gunthorpe 
444c2dbcaa8SLogan Gunthorpe 	return PLX_DMA_RING_COUNT;
445c2dbcaa8SLogan Gunthorpe 
446c2dbcaa8SLogan Gunthorpe out_free_hw_ring:
447c2dbcaa8SLogan Gunthorpe 	dma_free_coherent(plxdev->dma_dev.dev, ring_sz, plxdev->hw_ring,
448c2dbcaa8SLogan Gunthorpe 			  plxdev->hw_ring_dma);
449c2dbcaa8SLogan Gunthorpe 	return rc;
450c2dbcaa8SLogan Gunthorpe }
451c2dbcaa8SLogan Gunthorpe 
452c2dbcaa8SLogan Gunthorpe static void plx_dma_free_chan_resources(struct dma_chan *chan)
453c2dbcaa8SLogan Gunthorpe {
454c2dbcaa8SLogan Gunthorpe 	struct plx_dma_dev *plxdev = chan_to_plx_dma_dev(chan);
455c2dbcaa8SLogan Gunthorpe 	size_t ring_sz = PLX_DMA_RING_COUNT * sizeof(*plxdev->hw_ring);
456c2dbcaa8SLogan Gunthorpe 	struct pci_dev *pdev;
457c2dbcaa8SLogan Gunthorpe 	int irq = -1;
458c2dbcaa8SLogan Gunthorpe 	int i;
459c2dbcaa8SLogan Gunthorpe 
460c2dbcaa8SLogan Gunthorpe 	spin_lock_bh(&plxdev->ring_lock);
461c2dbcaa8SLogan Gunthorpe 	plxdev->ring_active = false;
462c2dbcaa8SLogan Gunthorpe 	spin_unlock_bh(&plxdev->ring_lock);
463c2dbcaa8SLogan Gunthorpe 
464c2dbcaa8SLogan Gunthorpe 	plx_dma_stop(plxdev);
465c2dbcaa8SLogan Gunthorpe 
466c2dbcaa8SLogan Gunthorpe 	rcu_read_lock();
467c2dbcaa8SLogan Gunthorpe 	pdev = rcu_dereference(plxdev->pdev);
468c2dbcaa8SLogan Gunthorpe 	if (pdev)
469c2dbcaa8SLogan Gunthorpe 		irq = pci_irq_vector(pdev, 0);
470c2dbcaa8SLogan Gunthorpe 	rcu_read_unlock();
471c2dbcaa8SLogan Gunthorpe 
472c2dbcaa8SLogan Gunthorpe 	if (irq > 0)
473c2dbcaa8SLogan Gunthorpe 		synchronize_irq(irq);
474c2dbcaa8SLogan Gunthorpe 
475c2dbcaa8SLogan Gunthorpe 	tasklet_kill(&plxdev->desc_task);
476c2dbcaa8SLogan Gunthorpe 
477c2dbcaa8SLogan Gunthorpe 	plx_dma_abort_desc(plxdev);
478c2dbcaa8SLogan Gunthorpe 
479c2dbcaa8SLogan Gunthorpe 	for (i = 0; i < PLX_DMA_RING_COUNT; i++)
480c2dbcaa8SLogan Gunthorpe 		kfree(plxdev->desc_ring[i]);
481c2dbcaa8SLogan Gunthorpe 
482c2dbcaa8SLogan Gunthorpe 	kfree(plxdev->desc_ring);
483c2dbcaa8SLogan Gunthorpe 	dma_free_coherent(plxdev->dma_dev.dev, ring_sz, plxdev->hw_ring,
484c2dbcaa8SLogan Gunthorpe 			  plxdev->hw_ring_dma);
485c2dbcaa8SLogan Gunthorpe 
486c2dbcaa8SLogan Gunthorpe }
487c2dbcaa8SLogan Gunthorpe 
488905ca51eSLogan Gunthorpe static void plx_dma_release(struct dma_device *dma_dev)
489905ca51eSLogan Gunthorpe {
490905ca51eSLogan Gunthorpe 	struct plx_dma_dev *plxdev =
491905ca51eSLogan Gunthorpe 		container_of(dma_dev, struct plx_dma_dev, dma_dev);
492905ca51eSLogan Gunthorpe 
493905ca51eSLogan Gunthorpe 	put_device(dma_dev->dev);
494905ca51eSLogan Gunthorpe 	kfree(plxdev);
495905ca51eSLogan Gunthorpe }
496905ca51eSLogan Gunthorpe 
497905ca51eSLogan Gunthorpe static int plx_dma_create(struct pci_dev *pdev)
498905ca51eSLogan Gunthorpe {
499905ca51eSLogan Gunthorpe 	struct plx_dma_dev *plxdev;
500905ca51eSLogan Gunthorpe 	struct dma_device *dma;
501905ca51eSLogan Gunthorpe 	struct dma_chan *chan;
502905ca51eSLogan Gunthorpe 	int rc;
503905ca51eSLogan Gunthorpe 
504905ca51eSLogan Gunthorpe 	plxdev = kzalloc(sizeof(*plxdev), GFP_KERNEL);
505905ca51eSLogan Gunthorpe 	if (!plxdev)
506905ca51eSLogan Gunthorpe 		return -ENOMEM;
507905ca51eSLogan Gunthorpe 
508c2dbcaa8SLogan Gunthorpe 	rc = request_irq(pci_irq_vector(pdev, 0), plx_dma_isr, 0,
509c2dbcaa8SLogan Gunthorpe 			 KBUILD_MODNAME, plxdev);
510c2dbcaa8SLogan Gunthorpe 	if (rc) {
511c2dbcaa8SLogan Gunthorpe 		kfree(plxdev);
512c2dbcaa8SLogan Gunthorpe 		return rc;
513c2dbcaa8SLogan Gunthorpe 	}
514c2dbcaa8SLogan Gunthorpe 
515c2dbcaa8SLogan Gunthorpe 	spin_lock_init(&plxdev->ring_lock);
516c2dbcaa8SLogan Gunthorpe 	tasklet_init(&plxdev->desc_task, plx_dma_desc_task,
517c2dbcaa8SLogan Gunthorpe 		     (unsigned long)plxdev);
518c2dbcaa8SLogan Gunthorpe 
519c2dbcaa8SLogan Gunthorpe 	RCU_INIT_POINTER(plxdev->pdev, pdev);
520905ca51eSLogan Gunthorpe 	plxdev->bar = pcim_iomap_table(pdev)[0];
521905ca51eSLogan Gunthorpe 
522905ca51eSLogan Gunthorpe 	dma = &plxdev->dma_dev;
523905ca51eSLogan Gunthorpe 	dma->chancnt = 1;
524905ca51eSLogan Gunthorpe 	INIT_LIST_HEAD(&dma->channels);
525*4d3df168SLogan Gunthorpe 	dma_cap_set(DMA_MEMCPY, dma->cap_mask);
526905ca51eSLogan Gunthorpe 	dma->copy_align = DMAENGINE_ALIGN_1_BYTE;
527905ca51eSLogan Gunthorpe 	dma->dev = get_device(&pdev->dev);
528905ca51eSLogan Gunthorpe 
529c2dbcaa8SLogan Gunthorpe 	dma->device_alloc_chan_resources = plx_dma_alloc_chan_resources;
530c2dbcaa8SLogan Gunthorpe 	dma->device_free_chan_resources = plx_dma_free_chan_resources;
531*4d3df168SLogan Gunthorpe 	dma->device_prep_dma_memcpy = plx_dma_prep_memcpy;
532*4d3df168SLogan Gunthorpe 	dma->device_issue_pending = plx_dma_issue_pending;
533*4d3df168SLogan Gunthorpe 	dma->device_tx_status = plx_dma_tx_status;
534905ca51eSLogan Gunthorpe 	dma->device_release = plx_dma_release;
535905ca51eSLogan Gunthorpe 
536905ca51eSLogan Gunthorpe 	chan = &plxdev->dma_chan;
537905ca51eSLogan Gunthorpe 	chan->device = dma;
538905ca51eSLogan Gunthorpe 	dma_cookie_init(chan);
539905ca51eSLogan Gunthorpe 	list_add_tail(&chan->device_node, &dma->channels);
540905ca51eSLogan Gunthorpe 
541905ca51eSLogan Gunthorpe 	rc = dma_async_device_register(dma);
542905ca51eSLogan Gunthorpe 	if (rc) {
543905ca51eSLogan Gunthorpe 		pci_err(pdev, "Failed to register dma device: %d\n", rc);
544905ca51eSLogan Gunthorpe 		free_irq(pci_irq_vector(pdev, 0),  plxdev);
545905ca51eSLogan Gunthorpe 		kfree(plxdev);
546905ca51eSLogan Gunthorpe 		return rc;
547905ca51eSLogan Gunthorpe 	}
548905ca51eSLogan Gunthorpe 
549905ca51eSLogan Gunthorpe 	pci_set_drvdata(pdev, plxdev);
550905ca51eSLogan Gunthorpe 
551905ca51eSLogan Gunthorpe 	return 0;
552905ca51eSLogan Gunthorpe }
553905ca51eSLogan Gunthorpe 
554905ca51eSLogan Gunthorpe static int plx_dma_probe(struct pci_dev *pdev,
555905ca51eSLogan Gunthorpe 			 const struct pci_device_id *id)
556905ca51eSLogan Gunthorpe {
557905ca51eSLogan Gunthorpe 	int rc;
558905ca51eSLogan Gunthorpe 
559905ca51eSLogan Gunthorpe 	rc = pcim_enable_device(pdev);
560905ca51eSLogan Gunthorpe 	if (rc)
561905ca51eSLogan Gunthorpe 		return rc;
562905ca51eSLogan Gunthorpe 
563905ca51eSLogan Gunthorpe 	rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(48));
564905ca51eSLogan Gunthorpe 	if (rc)
565905ca51eSLogan Gunthorpe 		rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
566905ca51eSLogan Gunthorpe 	if (rc)
567905ca51eSLogan Gunthorpe 		return rc;
568905ca51eSLogan Gunthorpe 
569905ca51eSLogan Gunthorpe 	rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(48));
570905ca51eSLogan Gunthorpe 	if (rc)
571905ca51eSLogan Gunthorpe 		rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
572905ca51eSLogan Gunthorpe 	if (rc)
573905ca51eSLogan Gunthorpe 		return rc;
574905ca51eSLogan Gunthorpe 
575905ca51eSLogan Gunthorpe 	rc = pcim_iomap_regions(pdev, 1, KBUILD_MODNAME);
576905ca51eSLogan Gunthorpe 	if (rc)
577905ca51eSLogan Gunthorpe 		return rc;
578905ca51eSLogan Gunthorpe 
579905ca51eSLogan Gunthorpe 	rc = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
580905ca51eSLogan Gunthorpe 	if (rc <= 0)
581905ca51eSLogan Gunthorpe 		return rc;
582905ca51eSLogan Gunthorpe 
583905ca51eSLogan Gunthorpe 	pci_set_master(pdev);
584905ca51eSLogan Gunthorpe 
585905ca51eSLogan Gunthorpe 	rc = plx_dma_create(pdev);
586905ca51eSLogan Gunthorpe 	if (rc)
587905ca51eSLogan Gunthorpe 		goto err_free_irq_vectors;
588905ca51eSLogan Gunthorpe 
589905ca51eSLogan Gunthorpe 	pci_info(pdev, "PLX DMA Channel Registered\n");
590905ca51eSLogan Gunthorpe 
591905ca51eSLogan Gunthorpe 	return 0;
592905ca51eSLogan Gunthorpe 
593905ca51eSLogan Gunthorpe err_free_irq_vectors:
594905ca51eSLogan Gunthorpe 	pci_free_irq_vectors(pdev);
595905ca51eSLogan Gunthorpe 	return rc;
596905ca51eSLogan Gunthorpe }
597905ca51eSLogan Gunthorpe 
598905ca51eSLogan Gunthorpe static void plx_dma_remove(struct pci_dev *pdev)
599905ca51eSLogan Gunthorpe {
600905ca51eSLogan Gunthorpe 	struct plx_dma_dev *plxdev = pci_get_drvdata(pdev);
601905ca51eSLogan Gunthorpe 
602905ca51eSLogan Gunthorpe 	free_irq(pci_irq_vector(pdev, 0),  plxdev);
603905ca51eSLogan Gunthorpe 
604c2dbcaa8SLogan Gunthorpe 	rcu_assign_pointer(plxdev->pdev, NULL);
605c2dbcaa8SLogan Gunthorpe 	synchronize_rcu();
606c2dbcaa8SLogan Gunthorpe 
607c2dbcaa8SLogan Gunthorpe 	spin_lock_bh(&plxdev->ring_lock);
608c2dbcaa8SLogan Gunthorpe 	plxdev->ring_active = false;
609c2dbcaa8SLogan Gunthorpe 	spin_unlock_bh(&plxdev->ring_lock);
610c2dbcaa8SLogan Gunthorpe 
611c2dbcaa8SLogan Gunthorpe 	__plx_dma_stop(plxdev);
612c2dbcaa8SLogan Gunthorpe 	plx_dma_abort_desc(plxdev);
613c2dbcaa8SLogan Gunthorpe 
614905ca51eSLogan Gunthorpe 	plxdev->bar = NULL;
615905ca51eSLogan Gunthorpe 	dma_async_device_unregister(&plxdev->dma_dev);
616905ca51eSLogan Gunthorpe 
617905ca51eSLogan Gunthorpe 	pci_free_irq_vectors(pdev);
618905ca51eSLogan Gunthorpe }
619905ca51eSLogan Gunthorpe 
620905ca51eSLogan Gunthorpe static const struct pci_device_id plx_dma_pci_tbl[] = {
621905ca51eSLogan Gunthorpe 	{
622905ca51eSLogan Gunthorpe 		.vendor		= PCI_VENDOR_ID_PLX,
623905ca51eSLogan Gunthorpe 		.device		= 0x87D0,
624905ca51eSLogan Gunthorpe 		.subvendor	= PCI_ANY_ID,
625905ca51eSLogan Gunthorpe 		.subdevice	= PCI_ANY_ID,
626905ca51eSLogan Gunthorpe 		.class		= PCI_CLASS_SYSTEM_OTHER << 8,
627905ca51eSLogan Gunthorpe 		.class_mask	= 0xFFFFFFFF,
628905ca51eSLogan Gunthorpe 	},
629905ca51eSLogan Gunthorpe 	{0}
630905ca51eSLogan Gunthorpe };
631905ca51eSLogan Gunthorpe MODULE_DEVICE_TABLE(pci, plx_dma_pci_tbl);
632905ca51eSLogan Gunthorpe 
633905ca51eSLogan Gunthorpe static struct pci_driver plx_dma_pci_driver = {
634905ca51eSLogan Gunthorpe 	.name           = KBUILD_MODNAME,
635905ca51eSLogan Gunthorpe 	.id_table       = plx_dma_pci_tbl,
636905ca51eSLogan Gunthorpe 	.probe          = plx_dma_probe,
637905ca51eSLogan Gunthorpe 	.remove		= plx_dma_remove,
638905ca51eSLogan Gunthorpe };
639905ca51eSLogan Gunthorpe module_pci_driver(plx_dma_pci_driver);
640