1905ca51eSLogan Gunthorpe // SPDX-License-Identifier: GPL-2.0
2905ca51eSLogan Gunthorpe /*
3905ca51eSLogan Gunthorpe * Microsemi Switchtec(tm) PCIe Management Driver
4905ca51eSLogan Gunthorpe * Copyright (c) 2019, Logan Gunthorpe <logang@deltatee.com>
5905ca51eSLogan Gunthorpe * Copyright (c) 2019, GigaIO Networks, Inc
6905ca51eSLogan Gunthorpe */
7905ca51eSLogan Gunthorpe
8905ca51eSLogan Gunthorpe #include "dmaengine.h"
9905ca51eSLogan Gunthorpe
104d3df168SLogan Gunthorpe #include <linux/circ_buf.h>
11905ca51eSLogan Gunthorpe #include <linux/dmaengine.h>
12905ca51eSLogan Gunthorpe #include <linux/kref.h>
13905ca51eSLogan Gunthorpe #include <linux/list.h>
14905ca51eSLogan Gunthorpe #include <linux/module.h>
15905ca51eSLogan Gunthorpe #include <linux/pci.h>
16905ca51eSLogan Gunthorpe
17905ca51eSLogan Gunthorpe MODULE_DESCRIPTION("PLX ExpressLane PEX PCI Switch DMA Engine");
18905ca51eSLogan Gunthorpe MODULE_VERSION("0.1");
19905ca51eSLogan Gunthorpe MODULE_LICENSE("GPL");
20905ca51eSLogan Gunthorpe MODULE_AUTHOR("Logan Gunthorpe");
21905ca51eSLogan Gunthorpe
22c2dbcaa8SLogan Gunthorpe #define PLX_REG_DESC_RING_ADDR 0x214
23c2dbcaa8SLogan Gunthorpe #define PLX_REG_DESC_RING_ADDR_HI 0x218
24c2dbcaa8SLogan Gunthorpe #define PLX_REG_DESC_RING_NEXT_ADDR 0x21C
25c2dbcaa8SLogan Gunthorpe #define PLX_REG_DESC_RING_COUNT 0x220
26c2dbcaa8SLogan Gunthorpe #define PLX_REG_DESC_RING_LAST_ADDR 0x224
27c2dbcaa8SLogan Gunthorpe #define PLX_REG_DESC_RING_LAST_SIZE 0x228
28c2dbcaa8SLogan Gunthorpe #define PLX_REG_PREF_LIMIT 0x234
29c2dbcaa8SLogan Gunthorpe #define PLX_REG_CTRL 0x238
30c2dbcaa8SLogan Gunthorpe #define PLX_REG_CTRL2 0x23A
31c2dbcaa8SLogan Gunthorpe #define PLX_REG_INTR_CTRL 0x23C
32c2dbcaa8SLogan Gunthorpe #define PLX_REG_INTR_STATUS 0x23E
33c2dbcaa8SLogan Gunthorpe
34c2dbcaa8SLogan Gunthorpe #define PLX_REG_PREF_LIMIT_PREF_FOUR 8
35c2dbcaa8SLogan Gunthorpe
36c2dbcaa8SLogan Gunthorpe #define PLX_REG_CTRL_GRACEFUL_PAUSE BIT(0)
37c2dbcaa8SLogan Gunthorpe #define PLX_REG_CTRL_ABORT BIT(1)
38c2dbcaa8SLogan Gunthorpe #define PLX_REG_CTRL_WRITE_BACK_EN BIT(2)
39c2dbcaa8SLogan Gunthorpe #define PLX_REG_CTRL_START BIT(3)
40c2dbcaa8SLogan Gunthorpe #define PLX_REG_CTRL_RING_STOP_MODE BIT(4)
41c2dbcaa8SLogan Gunthorpe #define PLX_REG_CTRL_DESC_MODE_BLOCK (0 << 5)
42c2dbcaa8SLogan Gunthorpe #define PLX_REG_CTRL_DESC_MODE_ON_CHIP (1 << 5)
43c2dbcaa8SLogan Gunthorpe #define PLX_REG_CTRL_DESC_MODE_OFF_CHIP (2 << 5)
44c2dbcaa8SLogan Gunthorpe #define PLX_REG_CTRL_DESC_INVALID BIT(8)
45c2dbcaa8SLogan Gunthorpe #define PLX_REG_CTRL_GRACEFUL_PAUSE_DONE BIT(9)
46c2dbcaa8SLogan Gunthorpe #define PLX_REG_CTRL_ABORT_DONE BIT(10)
47c2dbcaa8SLogan Gunthorpe #define PLX_REG_CTRL_IMM_PAUSE_DONE BIT(12)
48c2dbcaa8SLogan Gunthorpe #define PLX_REG_CTRL_IN_PROGRESS BIT(30)
49c2dbcaa8SLogan Gunthorpe
50c2dbcaa8SLogan Gunthorpe #define PLX_REG_CTRL_RESET_VAL (PLX_REG_CTRL_DESC_INVALID | \
51c2dbcaa8SLogan Gunthorpe PLX_REG_CTRL_GRACEFUL_PAUSE_DONE | \
52c2dbcaa8SLogan Gunthorpe PLX_REG_CTRL_ABORT_DONE | \
53c2dbcaa8SLogan Gunthorpe PLX_REG_CTRL_IMM_PAUSE_DONE)
54c2dbcaa8SLogan Gunthorpe
55c2dbcaa8SLogan Gunthorpe #define PLX_REG_CTRL_START_VAL (PLX_REG_CTRL_WRITE_BACK_EN | \
56c2dbcaa8SLogan Gunthorpe PLX_REG_CTRL_DESC_MODE_OFF_CHIP | \
57c2dbcaa8SLogan Gunthorpe PLX_REG_CTRL_START | \
58c2dbcaa8SLogan Gunthorpe PLX_REG_CTRL_RESET_VAL)
59c2dbcaa8SLogan Gunthorpe
60c2dbcaa8SLogan Gunthorpe #define PLX_REG_CTRL2_MAX_TXFR_SIZE_64B 0
61c2dbcaa8SLogan Gunthorpe #define PLX_REG_CTRL2_MAX_TXFR_SIZE_128B 1
62c2dbcaa8SLogan Gunthorpe #define PLX_REG_CTRL2_MAX_TXFR_SIZE_256B 2
63c2dbcaa8SLogan Gunthorpe #define PLX_REG_CTRL2_MAX_TXFR_SIZE_512B 3
64c2dbcaa8SLogan Gunthorpe #define PLX_REG_CTRL2_MAX_TXFR_SIZE_1KB 4
65c2dbcaa8SLogan Gunthorpe #define PLX_REG_CTRL2_MAX_TXFR_SIZE_2KB 5
66c2dbcaa8SLogan Gunthorpe #define PLX_REG_CTRL2_MAX_TXFR_SIZE_4B 7
67c2dbcaa8SLogan Gunthorpe
68c2dbcaa8SLogan Gunthorpe #define PLX_REG_INTR_CRTL_ERROR_EN BIT(0)
69c2dbcaa8SLogan Gunthorpe #define PLX_REG_INTR_CRTL_INV_DESC_EN BIT(1)
70c2dbcaa8SLogan Gunthorpe #define PLX_REG_INTR_CRTL_ABORT_DONE_EN BIT(3)
71c2dbcaa8SLogan Gunthorpe #define PLX_REG_INTR_CRTL_PAUSE_DONE_EN BIT(4)
72c2dbcaa8SLogan Gunthorpe #define PLX_REG_INTR_CRTL_IMM_PAUSE_DONE_EN BIT(5)
73c2dbcaa8SLogan Gunthorpe
74c2dbcaa8SLogan Gunthorpe #define PLX_REG_INTR_STATUS_ERROR BIT(0)
75c2dbcaa8SLogan Gunthorpe #define PLX_REG_INTR_STATUS_INV_DESC BIT(1)
76c2dbcaa8SLogan Gunthorpe #define PLX_REG_INTR_STATUS_DESC_DONE BIT(2)
77c2dbcaa8SLogan Gunthorpe #define PLX_REG_INTR_CRTL_ABORT_DONE BIT(3)
78c2dbcaa8SLogan Gunthorpe
79c2dbcaa8SLogan Gunthorpe struct plx_dma_hw_std_desc {
80c2dbcaa8SLogan Gunthorpe __le32 flags_and_size;
81c2dbcaa8SLogan Gunthorpe __le16 dst_addr_hi;
82c2dbcaa8SLogan Gunthorpe __le16 src_addr_hi;
83c2dbcaa8SLogan Gunthorpe __le32 dst_addr_lo;
84c2dbcaa8SLogan Gunthorpe __le32 src_addr_lo;
85c2dbcaa8SLogan Gunthorpe };
86c2dbcaa8SLogan Gunthorpe
87c2dbcaa8SLogan Gunthorpe #define PLX_DESC_SIZE_MASK 0x7ffffff
88c2dbcaa8SLogan Gunthorpe #define PLX_DESC_FLAG_VALID BIT(31)
89c2dbcaa8SLogan Gunthorpe #define PLX_DESC_FLAG_INT_WHEN_DONE BIT(30)
90c2dbcaa8SLogan Gunthorpe
91c2dbcaa8SLogan Gunthorpe #define PLX_DESC_WB_SUCCESS BIT(30)
92c2dbcaa8SLogan Gunthorpe #define PLX_DESC_WB_RD_FAIL BIT(29)
93c2dbcaa8SLogan Gunthorpe #define PLX_DESC_WB_WR_FAIL BIT(28)
94c2dbcaa8SLogan Gunthorpe
95c2dbcaa8SLogan Gunthorpe #define PLX_DMA_RING_COUNT 2048
96c2dbcaa8SLogan Gunthorpe
97c2dbcaa8SLogan Gunthorpe struct plx_dma_desc {
98c2dbcaa8SLogan Gunthorpe struct dma_async_tx_descriptor txd;
99c2dbcaa8SLogan Gunthorpe struct plx_dma_hw_std_desc *hw;
100c2dbcaa8SLogan Gunthorpe u32 orig_size;
101c2dbcaa8SLogan Gunthorpe };
102c2dbcaa8SLogan Gunthorpe
103905ca51eSLogan Gunthorpe struct plx_dma_dev {
104905ca51eSLogan Gunthorpe struct dma_device dma_dev;
105905ca51eSLogan Gunthorpe struct dma_chan dma_chan;
106c2dbcaa8SLogan Gunthorpe struct pci_dev __rcu *pdev;
107905ca51eSLogan Gunthorpe void __iomem *bar;
108c2dbcaa8SLogan Gunthorpe struct tasklet_struct desc_task;
109c2dbcaa8SLogan Gunthorpe
110c2dbcaa8SLogan Gunthorpe spinlock_t ring_lock;
111c2dbcaa8SLogan Gunthorpe bool ring_active;
112c2dbcaa8SLogan Gunthorpe int head;
113c2dbcaa8SLogan Gunthorpe int tail;
114c2dbcaa8SLogan Gunthorpe struct plx_dma_hw_std_desc *hw_ring;
115c2dbcaa8SLogan Gunthorpe dma_addr_t hw_ring_dma;
116c2dbcaa8SLogan Gunthorpe struct plx_dma_desc **desc_ring;
117905ca51eSLogan Gunthorpe };
118905ca51eSLogan Gunthorpe
chan_to_plx_dma_dev(struct dma_chan * c)119c2dbcaa8SLogan Gunthorpe static struct plx_dma_dev *chan_to_plx_dma_dev(struct dma_chan *c)
120c2dbcaa8SLogan Gunthorpe {
121c2dbcaa8SLogan Gunthorpe return container_of(c, struct plx_dma_dev, dma_chan);
122c2dbcaa8SLogan Gunthorpe }
123c2dbcaa8SLogan Gunthorpe
to_plx_desc(struct dma_async_tx_descriptor * txd)1244d3df168SLogan Gunthorpe static struct plx_dma_desc *to_plx_desc(struct dma_async_tx_descriptor *txd)
1254d3df168SLogan Gunthorpe {
1264d3df168SLogan Gunthorpe return container_of(txd, struct plx_dma_desc, txd);
1274d3df168SLogan Gunthorpe }
1284d3df168SLogan Gunthorpe
plx_dma_get_desc(struct plx_dma_dev * plxdev,int i)129c2dbcaa8SLogan Gunthorpe static struct plx_dma_desc *plx_dma_get_desc(struct plx_dma_dev *plxdev, int i)
130c2dbcaa8SLogan Gunthorpe {
131c2dbcaa8SLogan Gunthorpe return plxdev->desc_ring[i & (PLX_DMA_RING_COUNT - 1)];
132c2dbcaa8SLogan Gunthorpe }
133c2dbcaa8SLogan Gunthorpe
plx_dma_process_desc(struct plx_dma_dev * plxdev)134c2dbcaa8SLogan Gunthorpe static void plx_dma_process_desc(struct plx_dma_dev *plxdev)
135c2dbcaa8SLogan Gunthorpe {
136c2dbcaa8SLogan Gunthorpe struct dmaengine_result res;
137c2dbcaa8SLogan Gunthorpe struct plx_dma_desc *desc;
138c2dbcaa8SLogan Gunthorpe u32 flags;
139c2dbcaa8SLogan Gunthorpe
140*1d05a0bdSYunbo Yu spin_lock(&plxdev->ring_lock);
141c2dbcaa8SLogan Gunthorpe
142c2dbcaa8SLogan Gunthorpe while (plxdev->tail != plxdev->head) {
143c2dbcaa8SLogan Gunthorpe desc = plx_dma_get_desc(plxdev, plxdev->tail);
144c2dbcaa8SLogan Gunthorpe
145c2dbcaa8SLogan Gunthorpe flags = le32_to_cpu(READ_ONCE(desc->hw->flags_and_size));
146c2dbcaa8SLogan Gunthorpe
147c2dbcaa8SLogan Gunthorpe if (flags & PLX_DESC_FLAG_VALID)
148c2dbcaa8SLogan Gunthorpe break;
149c2dbcaa8SLogan Gunthorpe
150c2dbcaa8SLogan Gunthorpe res.residue = desc->orig_size - (flags & PLX_DESC_SIZE_MASK);
151c2dbcaa8SLogan Gunthorpe
152c2dbcaa8SLogan Gunthorpe if (flags & PLX_DESC_WB_SUCCESS)
153c2dbcaa8SLogan Gunthorpe res.result = DMA_TRANS_NOERROR;
154c2dbcaa8SLogan Gunthorpe else if (flags & PLX_DESC_WB_WR_FAIL)
155c2dbcaa8SLogan Gunthorpe res.result = DMA_TRANS_WRITE_FAILED;
156c2dbcaa8SLogan Gunthorpe else
157c2dbcaa8SLogan Gunthorpe res.result = DMA_TRANS_READ_FAILED;
158c2dbcaa8SLogan Gunthorpe
159c2dbcaa8SLogan Gunthorpe dma_cookie_complete(&desc->txd);
160c2dbcaa8SLogan Gunthorpe dma_descriptor_unmap(&desc->txd);
161c2dbcaa8SLogan Gunthorpe dmaengine_desc_get_callback_invoke(&desc->txd, &res);
162c2dbcaa8SLogan Gunthorpe desc->txd.callback = NULL;
163c2dbcaa8SLogan Gunthorpe desc->txd.callback_result = NULL;
164c2dbcaa8SLogan Gunthorpe
165c2dbcaa8SLogan Gunthorpe plxdev->tail++;
166c2dbcaa8SLogan Gunthorpe }
167c2dbcaa8SLogan Gunthorpe
168*1d05a0bdSYunbo Yu spin_unlock(&plxdev->ring_lock);
169c2dbcaa8SLogan Gunthorpe }
170c2dbcaa8SLogan Gunthorpe
plx_dma_abort_desc(struct plx_dma_dev * plxdev)171c2dbcaa8SLogan Gunthorpe static void plx_dma_abort_desc(struct plx_dma_dev *plxdev)
172c2dbcaa8SLogan Gunthorpe {
173c2dbcaa8SLogan Gunthorpe struct dmaengine_result res;
174c2dbcaa8SLogan Gunthorpe struct plx_dma_desc *desc;
175c2dbcaa8SLogan Gunthorpe
176c2dbcaa8SLogan Gunthorpe plx_dma_process_desc(plxdev);
177c2dbcaa8SLogan Gunthorpe
178c2dbcaa8SLogan Gunthorpe spin_lock_bh(&plxdev->ring_lock);
179c2dbcaa8SLogan Gunthorpe
180c2dbcaa8SLogan Gunthorpe while (plxdev->tail != plxdev->head) {
181c2dbcaa8SLogan Gunthorpe desc = plx_dma_get_desc(plxdev, plxdev->tail);
182c2dbcaa8SLogan Gunthorpe
183c2dbcaa8SLogan Gunthorpe res.residue = desc->orig_size;
184c2dbcaa8SLogan Gunthorpe res.result = DMA_TRANS_ABORTED;
185c2dbcaa8SLogan Gunthorpe
186c2dbcaa8SLogan Gunthorpe dma_cookie_complete(&desc->txd);
187c2dbcaa8SLogan Gunthorpe dma_descriptor_unmap(&desc->txd);
188c2dbcaa8SLogan Gunthorpe dmaengine_desc_get_callback_invoke(&desc->txd, &res);
189c2dbcaa8SLogan Gunthorpe desc->txd.callback = NULL;
190c2dbcaa8SLogan Gunthorpe desc->txd.callback_result = NULL;
191c2dbcaa8SLogan Gunthorpe
192c2dbcaa8SLogan Gunthorpe plxdev->tail++;
193c2dbcaa8SLogan Gunthorpe }
194c2dbcaa8SLogan Gunthorpe
195c2dbcaa8SLogan Gunthorpe spin_unlock_bh(&plxdev->ring_lock);
196c2dbcaa8SLogan Gunthorpe }
197c2dbcaa8SLogan Gunthorpe
__plx_dma_stop(struct plx_dma_dev * plxdev)198c2dbcaa8SLogan Gunthorpe static void __plx_dma_stop(struct plx_dma_dev *plxdev)
199c2dbcaa8SLogan Gunthorpe {
200c2dbcaa8SLogan Gunthorpe unsigned long timeout = jiffies + msecs_to_jiffies(1000);
201c2dbcaa8SLogan Gunthorpe u32 val;
202c2dbcaa8SLogan Gunthorpe
203c2dbcaa8SLogan Gunthorpe val = readl(plxdev->bar + PLX_REG_CTRL);
204c2dbcaa8SLogan Gunthorpe if (!(val & ~PLX_REG_CTRL_GRACEFUL_PAUSE))
205c2dbcaa8SLogan Gunthorpe return;
206c2dbcaa8SLogan Gunthorpe
207c2dbcaa8SLogan Gunthorpe writel(PLX_REG_CTRL_RESET_VAL | PLX_REG_CTRL_GRACEFUL_PAUSE,
208c2dbcaa8SLogan Gunthorpe plxdev->bar + PLX_REG_CTRL);
209c2dbcaa8SLogan Gunthorpe
210c2dbcaa8SLogan Gunthorpe while (!time_after(jiffies, timeout)) {
211c2dbcaa8SLogan Gunthorpe val = readl(plxdev->bar + PLX_REG_CTRL);
212c2dbcaa8SLogan Gunthorpe if (val & PLX_REG_CTRL_GRACEFUL_PAUSE_DONE)
213c2dbcaa8SLogan Gunthorpe break;
214c2dbcaa8SLogan Gunthorpe
215c2dbcaa8SLogan Gunthorpe cpu_relax();
216c2dbcaa8SLogan Gunthorpe }
217c2dbcaa8SLogan Gunthorpe
218c2dbcaa8SLogan Gunthorpe if (!(val & PLX_REG_CTRL_GRACEFUL_PAUSE_DONE))
219c2dbcaa8SLogan Gunthorpe dev_err(plxdev->dma_dev.dev,
220c2dbcaa8SLogan Gunthorpe "Timeout waiting for graceful pause!\n");
221c2dbcaa8SLogan Gunthorpe
222c2dbcaa8SLogan Gunthorpe writel(PLX_REG_CTRL_RESET_VAL | PLX_REG_CTRL_GRACEFUL_PAUSE,
223c2dbcaa8SLogan Gunthorpe plxdev->bar + PLX_REG_CTRL);
224c2dbcaa8SLogan Gunthorpe
225c2dbcaa8SLogan Gunthorpe writel(0, plxdev->bar + PLX_REG_DESC_RING_COUNT);
226c2dbcaa8SLogan Gunthorpe writel(0, plxdev->bar + PLX_REG_DESC_RING_ADDR);
227c2dbcaa8SLogan Gunthorpe writel(0, plxdev->bar + PLX_REG_DESC_RING_ADDR_HI);
228c2dbcaa8SLogan Gunthorpe writel(0, plxdev->bar + PLX_REG_DESC_RING_NEXT_ADDR);
229c2dbcaa8SLogan Gunthorpe }
230c2dbcaa8SLogan Gunthorpe
plx_dma_stop(struct plx_dma_dev * plxdev)231c2dbcaa8SLogan Gunthorpe static void plx_dma_stop(struct plx_dma_dev *plxdev)
232c2dbcaa8SLogan Gunthorpe {
233c2dbcaa8SLogan Gunthorpe rcu_read_lock();
234c2dbcaa8SLogan Gunthorpe if (!rcu_dereference(plxdev->pdev)) {
235c2dbcaa8SLogan Gunthorpe rcu_read_unlock();
236c2dbcaa8SLogan Gunthorpe return;
237c2dbcaa8SLogan Gunthorpe }
238c2dbcaa8SLogan Gunthorpe
239c2dbcaa8SLogan Gunthorpe __plx_dma_stop(plxdev);
240c2dbcaa8SLogan Gunthorpe
241c2dbcaa8SLogan Gunthorpe rcu_read_unlock();
242c2dbcaa8SLogan Gunthorpe }
243c2dbcaa8SLogan Gunthorpe
plx_dma_desc_task(struct tasklet_struct * t)2446c1fd9adSAllen Pais static void plx_dma_desc_task(struct tasklet_struct *t)
245c2dbcaa8SLogan Gunthorpe {
2466c1fd9adSAllen Pais struct plx_dma_dev *plxdev = from_tasklet(plxdev, t, desc_task);
247c2dbcaa8SLogan Gunthorpe
248c2dbcaa8SLogan Gunthorpe plx_dma_process_desc(plxdev);
249c2dbcaa8SLogan Gunthorpe }
250c2dbcaa8SLogan Gunthorpe
plx_dma_prep_memcpy(struct dma_chan * c,dma_addr_t dma_dst,dma_addr_t dma_src,size_t len,unsigned long flags)2514d3df168SLogan Gunthorpe static struct dma_async_tx_descriptor *plx_dma_prep_memcpy(struct dma_chan *c,
2524d3df168SLogan Gunthorpe dma_addr_t dma_dst, dma_addr_t dma_src, size_t len,
2534d3df168SLogan Gunthorpe unsigned long flags)
2544d3df168SLogan Gunthorpe __acquires(plxdev->ring_lock)
2554d3df168SLogan Gunthorpe {
2564d3df168SLogan Gunthorpe struct plx_dma_dev *plxdev = chan_to_plx_dma_dev(c);
2574d3df168SLogan Gunthorpe struct plx_dma_desc *plxdesc;
2584d3df168SLogan Gunthorpe
2594d3df168SLogan Gunthorpe spin_lock_bh(&plxdev->ring_lock);
2604d3df168SLogan Gunthorpe if (!plxdev->ring_active)
2614d3df168SLogan Gunthorpe goto err_unlock;
2624d3df168SLogan Gunthorpe
2634d3df168SLogan Gunthorpe if (!CIRC_SPACE(plxdev->head, plxdev->tail, PLX_DMA_RING_COUNT))
2644d3df168SLogan Gunthorpe goto err_unlock;
2654d3df168SLogan Gunthorpe
2664d3df168SLogan Gunthorpe if (len > PLX_DESC_SIZE_MASK)
2674d3df168SLogan Gunthorpe goto err_unlock;
2684d3df168SLogan Gunthorpe
2694d3df168SLogan Gunthorpe plxdesc = plx_dma_get_desc(plxdev, plxdev->head);
2704d3df168SLogan Gunthorpe plxdev->head++;
2714d3df168SLogan Gunthorpe
2724d3df168SLogan Gunthorpe plxdesc->hw->dst_addr_lo = cpu_to_le32(lower_32_bits(dma_dst));
2734d3df168SLogan Gunthorpe plxdesc->hw->dst_addr_hi = cpu_to_le16(upper_32_bits(dma_dst));
2744d3df168SLogan Gunthorpe plxdesc->hw->src_addr_lo = cpu_to_le32(lower_32_bits(dma_src));
2754d3df168SLogan Gunthorpe plxdesc->hw->src_addr_hi = cpu_to_le16(upper_32_bits(dma_src));
2764d3df168SLogan Gunthorpe
2774d3df168SLogan Gunthorpe plxdesc->orig_size = len;
2784d3df168SLogan Gunthorpe
2794d3df168SLogan Gunthorpe if (flags & DMA_PREP_INTERRUPT)
2804d3df168SLogan Gunthorpe len |= PLX_DESC_FLAG_INT_WHEN_DONE;
2814d3df168SLogan Gunthorpe
2824d3df168SLogan Gunthorpe plxdesc->hw->flags_and_size = cpu_to_le32(len);
2834d3df168SLogan Gunthorpe plxdesc->txd.flags = flags;
2844d3df168SLogan Gunthorpe
2854d3df168SLogan Gunthorpe /* return with the lock held, it will be released in tx_submit */
2864d3df168SLogan Gunthorpe
2874d3df168SLogan Gunthorpe return &plxdesc->txd;
2884d3df168SLogan Gunthorpe
2894d3df168SLogan Gunthorpe err_unlock:
2904d3df168SLogan Gunthorpe /*
2914d3df168SLogan Gunthorpe * Keep sparse happy by restoring an even lock count on
2924d3df168SLogan Gunthorpe * this lock.
2934d3df168SLogan Gunthorpe */
2944d3df168SLogan Gunthorpe __acquire(plxdev->ring_lock);
2954d3df168SLogan Gunthorpe
2964d3df168SLogan Gunthorpe spin_unlock_bh(&plxdev->ring_lock);
2974d3df168SLogan Gunthorpe return NULL;
2984d3df168SLogan Gunthorpe }
2994d3df168SLogan Gunthorpe
plx_dma_tx_submit(struct dma_async_tx_descriptor * desc)3004d3df168SLogan Gunthorpe static dma_cookie_t plx_dma_tx_submit(struct dma_async_tx_descriptor *desc)
3014d3df168SLogan Gunthorpe __releases(plxdev->ring_lock)
3024d3df168SLogan Gunthorpe {
3034d3df168SLogan Gunthorpe struct plx_dma_dev *plxdev = chan_to_plx_dma_dev(desc->chan);
3044d3df168SLogan Gunthorpe struct plx_dma_desc *plxdesc = to_plx_desc(desc);
3054d3df168SLogan Gunthorpe dma_cookie_t cookie;
3064d3df168SLogan Gunthorpe
3074d3df168SLogan Gunthorpe cookie = dma_cookie_assign(desc);
3084d3df168SLogan Gunthorpe
3094d3df168SLogan Gunthorpe /*
3104d3df168SLogan Gunthorpe * Ensure the descriptor updates are visible to the dma device
3114d3df168SLogan Gunthorpe * before setting the valid bit.
3124d3df168SLogan Gunthorpe */
3134d3df168SLogan Gunthorpe wmb();
3144d3df168SLogan Gunthorpe
3154d3df168SLogan Gunthorpe plxdesc->hw->flags_and_size |= cpu_to_le32(PLX_DESC_FLAG_VALID);
3164d3df168SLogan Gunthorpe
3174d3df168SLogan Gunthorpe spin_unlock_bh(&plxdev->ring_lock);
3184d3df168SLogan Gunthorpe
3194d3df168SLogan Gunthorpe return cookie;
3204d3df168SLogan Gunthorpe }
3214d3df168SLogan Gunthorpe
plx_dma_tx_status(struct dma_chan * chan,dma_cookie_t cookie,struct dma_tx_state * txstate)3224d3df168SLogan Gunthorpe static enum dma_status plx_dma_tx_status(struct dma_chan *chan,
3234d3df168SLogan Gunthorpe dma_cookie_t cookie, struct dma_tx_state *txstate)
3244d3df168SLogan Gunthorpe {
3254d3df168SLogan Gunthorpe struct plx_dma_dev *plxdev = chan_to_plx_dma_dev(chan);
3264d3df168SLogan Gunthorpe enum dma_status ret;
3274d3df168SLogan Gunthorpe
3284d3df168SLogan Gunthorpe ret = dma_cookie_status(chan, cookie, txstate);
3294d3df168SLogan Gunthorpe if (ret == DMA_COMPLETE)
3304d3df168SLogan Gunthorpe return ret;
3314d3df168SLogan Gunthorpe
3324d3df168SLogan Gunthorpe plx_dma_process_desc(plxdev);
3334d3df168SLogan Gunthorpe
3344d3df168SLogan Gunthorpe return dma_cookie_status(chan, cookie, txstate);
3354d3df168SLogan Gunthorpe }
3364d3df168SLogan Gunthorpe
plx_dma_issue_pending(struct dma_chan * chan)3374d3df168SLogan Gunthorpe static void plx_dma_issue_pending(struct dma_chan *chan)
3384d3df168SLogan Gunthorpe {
3394d3df168SLogan Gunthorpe struct plx_dma_dev *plxdev = chan_to_plx_dma_dev(chan);
3404d3df168SLogan Gunthorpe
3414d3df168SLogan Gunthorpe rcu_read_lock();
3424d3df168SLogan Gunthorpe if (!rcu_dereference(plxdev->pdev)) {
3434d3df168SLogan Gunthorpe rcu_read_unlock();
3444d3df168SLogan Gunthorpe return;
3454d3df168SLogan Gunthorpe }
3464d3df168SLogan Gunthorpe
3474d3df168SLogan Gunthorpe /*
3484d3df168SLogan Gunthorpe * Ensure the valid bits are visible before starting the
3494d3df168SLogan Gunthorpe * DMA engine.
3504d3df168SLogan Gunthorpe */
3514d3df168SLogan Gunthorpe wmb();
3524d3df168SLogan Gunthorpe
3534d3df168SLogan Gunthorpe writew(PLX_REG_CTRL_START_VAL, plxdev->bar + PLX_REG_CTRL);
3544d3df168SLogan Gunthorpe
3554d3df168SLogan Gunthorpe rcu_read_unlock();
3564d3df168SLogan Gunthorpe }
3574d3df168SLogan Gunthorpe
plx_dma_isr(int irq,void * devid)358c2dbcaa8SLogan Gunthorpe static irqreturn_t plx_dma_isr(int irq, void *devid)
359c2dbcaa8SLogan Gunthorpe {
360c2dbcaa8SLogan Gunthorpe struct plx_dma_dev *plxdev = devid;
361c2dbcaa8SLogan Gunthorpe u32 status;
362c2dbcaa8SLogan Gunthorpe
363c2dbcaa8SLogan Gunthorpe status = readw(plxdev->bar + PLX_REG_INTR_STATUS);
364c2dbcaa8SLogan Gunthorpe
365c2dbcaa8SLogan Gunthorpe if (!status)
366c2dbcaa8SLogan Gunthorpe return IRQ_NONE;
367c2dbcaa8SLogan Gunthorpe
368c2dbcaa8SLogan Gunthorpe if (status & PLX_REG_INTR_STATUS_DESC_DONE && plxdev->ring_active)
369c2dbcaa8SLogan Gunthorpe tasklet_schedule(&plxdev->desc_task);
370c2dbcaa8SLogan Gunthorpe
371c2dbcaa8SLogan Gunthorpe writew(status, plxdev->bar + PLX_REG_INTR_STATUS);
372c2dbcaa8SLogan Gunthorpe
373c2dbcaa8SLogan Gunthorpe return IRQ_HANDLED;
374c2dbcaa8SLogan Gunthorpe }
375c2dbcaa8SLogan Gunthorpe
plx_dma_alloc_desc(struct plx_dma_dev * plxdev)376c2dbcaa8SLogan Gunthorpe static int plx_dma_alloc_desc(struct plx_dma_dev *plxdev)
377c2dbcaa8SLogan Gunthorpe {
378c2dbcaa8SLogan Gunthorpe struct plx_dma_desc *desc;
379c2dbcaa8SLogan Gunthorpe int i;
380c2dbcaa8SLogan Gunthorpe
381c2dbcaa8SLogan Gunthorpe plxdev->desc_ring = kcalloc(PLX_DMA_RING_COUNT,
382c2dbcaa8SLogan Gunthorpe sizeof(*plxdev->desc_ring), GFP_KERNEL);
383c2dbcaa8SLogan Gunthorpe if (!plxdev->desc_ring)
384c2dbcaa8SLogan Gunthorpe return -ENOMEM;
385c2dbcaa8SLogan Gunthorpe
386c2dbcaa8SLogan Gunthorpe for (i = 0; i < PLX_DMA_RING_COUNT; i++) {
387c2dbcaa8SLogan Gunthorpe desc = kzalloc(sizeof(*desc), GFP_KERNEL);
388c2dbcaa8SLogan Gunthorpe if (!desc)
389c2dbcaa8SLogan Gunthorpe goto free_and_exit;
390c2dbcaa8SLogan Gunthorpe
391c2dbcaa8SLogan Gunthorpe dma_async_tx_descriptor_init(&desc->txd, &plxdev->dma_chan);
3924d3df168SLogan Gunthorpe desc->txd.tx_submit = plx_dma_tx_submit;
393c2dbcaa8SLogan Gunthorpe desc->hw = &plxdev->hw_ring[i];
3944d3df168SLogan Gunthorpe
395c2dbcaa8SLogan Gunthorpe plxdev->desc_ring[i] = desc;
396c2dbcaa8SLogan Gunthorpe }
397c2dbcaa8SLogan Gunthorpe
398c2dbcaa8SLogan Gunthorpe return 0;
399c2dbcaa8SLogan Gunthorpe
400c2dbcaa8SLogan Gunthorpe free_and_exit:
401c2dbcaa8SLogan Gunthorpe for (i = 0; i < PLX_DMA_RING_COUNT; i++)
402c2dbcaa8SLogan Gunthorpe kfree(plxdev->desc_ring[i]);
403c2dbcaa8SLogan Gunthorpe kfree(plxdev->desc_ring);
404c2dbcaa8SLogan Gunthorpe return -ENOMEM;
405c2dbcaa8SLogan Gunthorpe }
406c2dbcaa8SLogan Gunthorpe
plx_dma_alloc_chan_resources(struct dma_chan * chan)407c2dbcaa8SLogan Gunthorpe static int plx_dma_alloc_chan_resources(struct dma_chan *chan)
408c2dbcaa8SLogan Gunthorpe {
409c2dbcaa8SLogan Gunthorpe struct plx_dma_dev *plxdev = chan_to_plx_dma_dev(chan);
410c2dbcaa8SLogan Gunthorpe size_t ring_sz = PLX_DMA_RING_COUNT * sizeof(*plxdev->hw_ring);
411c2dbcaa8SLogan Gunthorpe int rc;
412c2dbcaa8SLogan Gunthorpe
413c2dbcaa8SLogan Gunthorpe plxdev->head = plxdev->tail = 0;
414c2dbcaa8SLogan Gunthorpe plxdev->hw_ring = dma_alloc_coherent(plxdev->dma_dev.dev, ring_sz,
415c2dbcaa8SLogan Gunthorpe &plxdev->hw_ring_dma, GFP_KERNEL);
416c2dbcaa8SLogan Gunthorpe if (!plxdev->hw_ring)
417c2dbcaa8SLogan Gunthorpe return -ENOMEM;
418c2dbcaa8SLogan Gunthorpe
419c2dbcaa8SLogan Gunthorpe rc = plx_dma_alloc_desc(plxdev);
420c2dbcaa8SLogan Gunthorpe if (rc)
421c2dbcaa8SLogan Gunthorpe goto out_free_hw_ring;
422c2dbcaa8SLogan Gunthorpe
423c2dbcaa8SLogan Gunthorpe rcu_read_lock();
424c2dbcaa8SLogan Gunthorpe if (!rcu_dereference(plxdev->pdev)) {
425c2dbcaa8SLogan Gunthorpe rcu_read_unlock();
426c2dbcaa8SLogan Gunthorpe rc = -ENODEV;
427c2dbcaa8SLogan Gunthorpe goto out_free_hw_ring;
428c2dbcaa8SLogan Gunthorpe }
429c2dbcaa8SLogan Gunthorpe
430c2dbcaa8SLogan Gunthorpe writel(PLX_REG_CTRL_RESET_VAL, plxdev->bar + PLX_REG_CTRL);
431c2dbcaa8SLogan Gunthorpe writel(lower_32_bits(plxdev->hw_ring_dma),
432c2dbcaa8SLogan Gunthorpe plxdev->bar + PLX_REG_DESC_RING_ADDR);
433c2dbcaa8SLogan Gunthorpe writel(upper_32_bits(plxdev->hw_ring_dma),
434c2dbcaa8SLogan Gunthorpe plxdev->bar + PLX_REG_DESC_RING_ADDR_HI);
435c2dbcaa8SLogan Gunthorpe writel(lower_32_bits(plxdev->hw_ring_dma),
436c2dbcaa8SLogan Gunthorpe plxdev->bar + PLX_REG_DESC_RING_NEXT_ADDR);
437c2dbcaa8SLogan Gunthorpe writel(PLX_DMA_RING_COUNT, plxdev->bar + PLX_REG_DESC_RING_COUNT);
438c2dbcaa8SLogan Gunthorpe writel(PLX_REG_PREF_LIMIT_PREF_FOUR, plxdev->bar + PLX_REG_PREF_LIMIT);
439c2dbcaa8SLogan Gunthorpe
440c2dbcaa8SLogan Gunthorpe plxdev->ring_active = true;
441c2dbcaa8SLogan Gunthorpe
442c2dbcaa8SLogan Gunthorpe rcu_read_unlock();
443c2dbcaa8SLogan Gunthorpe
444c2dbcaa8SLogan Gunthorpe return PLX_DMA_RING_COUNT;
445c2dbcaa8SLogan Gunthorpe
446c2dbcaa8SLogan Gunthorpe out_free_hw_ring:
447c2dbcaa8SLogan Gunthorpe dma_free_coherent(plxdev->dma_dev.dev, ring_sz, plxdev->hw_ring,
448c2dbcaa8SLogan Gunthorpe plxdev->hw_ring_dma);
449c2dbcaa8SLogan Gunthorpe return rc;
450c2dbcaa8SLogan Gunthorpe }
451c2dbcaa8SLogan Gunthorpe
plx_dma_free_chan_resources(struct dma_chan * chan)452c2dbcaa8SLogan Gunthorpe static void plx_dma_free_chan_resources(struct dma_chan *chan)
453c2dbcaa8SLogan Gunthorpe {
454c2dbcaa8SLogan Gunthorpe struct plx_dma_dev *plxdev = chan_to_plx_dma_dev(chan);
455c2dbcaa8SLogan Gunthorpe size_t ring_sz = PLX_DMA_RING_COUNT * sizeof(*plxdev->hw_ring);
456c2dbcaa8SLogan Gunthorpe struct pci_dev *pdev;
457c2dbcaa8SLogan Gunthorpe int irq = -1;
458c2dbcaa8SLogan Gunthorpe int i;
459c2dbcaa8SLogan Gunthorpe
460c2dbcaa8SLogan Gunthorpe spin_lock_bh(&plxdev->ring_lock);
461c2dbcaa8SLogan Gunthorpe plxdev->ring_active = false;
462c2dbcaa8SLogan Gunthorpe spin_unlock_bh(&plxdev->ring_lock);
463c2dbcaa8SLogan Gunthorpe
464c2dbcaa8SLogan Gunthorpe plx_dma_stop(plxdev);
465c2dbcaa8SLogan Gunthorpe
466c2dbcaa8SLogan Gunthorpe rcu_read_lock();
467c2dbcaa8SLogan Gunthorpe pdev = rcu_dereference(plxdev->pdev);
468c2dbcaa8SLogan Gunthorpe if (pdev)
469c2dbcaa8SLogan Gunthorpe irq = pci_irq_vector(pdev, 0);
470c2dbcaa8SLogan Gunthorpe rcu_read_unlock();
471c2dbcaa8SLogan Gunthorpe
472c2dbcaa8SLogan Gunthorpe if (irq > 0)
473c2dbcaa8SLogan Gunthorpe synchronize_irq(irq);
474c2dbcaa8SLogan Gunthorpe
475c2dbcaa8SLogan Gunthorpe tasklet_kill(&plxdev->desc_task);
476c2dbcaa8SLogan Gunthorpe
477c2dbcaa8SLogan Gunthorpe plx_dma_abort_desc(plxdev);
478c2dbcaa8SLogan Gunthorpe
479c2dbcaa8SLogan Gunthorpe for (i = 0; i < PLX_DMA_RING_COUNT; i++)
480c2dbcaa8SLogan Gunthorpe kfree(plxdev->desc_ring[i]);
481c2dbcaa8SLogan Gunthorpe
482c2dbcaa8SLogan Gunthorpe kfree(plxdev->desc_ring);
483c2dbcaa8SLogan Gunthorpe dma_free_coherent(plxdev->dma_dev.dev, ring_sz, plxdev->hw_ring,
484c2dbcaa8SLogan Gunthorpe plxdev->hw_ring_dma);
485c2dbcaa8SLogan Gunthorpe
486c2dbcaa8SLogan Gunthorpe }
487c2dbcaa8SLogan Gunthorpe
plx_dma_release(struct dma_device * dma_dev)488905ca51eSLogan Gunthorpe static void plx_dma_release(struct dma_device *dma_dev)
489905ca51eSLogan Gunthorpe {
490905ca51eSLogan Gunthorpe struct plx_dma_dev *plxdev =
491905ca51eSLogan Gunthorpe container_of(dma_dev, struct plx_dma_dev, dma_dev);
492905ca51eSLogan Gunthorpe
493905ca51eSLogan Gunthorpe put_device(dma_dev->dev);
494905ca51eSLogan Gunthorpe kfree(plxdev);
495905ca51eSLogan Gunthorpe }
496905ca51eSLogan Gunthorpe
plx_dma_create(struct pci_dev * pdev)497905ca51eSLogan Gunthorpe static int plx_dma_create(struct pci_dev *pdev)
498905ca51eSLogan Gunthorpe {
499905ca51eSLogan Gunthorpe struct plx_dma_dev *plxdev;
500905ca51eSLogan Gunthorpe struct dma_device *dma;
501905ca51eSLogan Gunthorpe struct dma_chan *chan;
502905ca51eSLogan Gunthorpe int rc;
503905ca51eSLogan Gunthorpe
504905ca51eSLogan Gunthorpe plxdev = kzalloc(sizeof(*plxdev), GFP_KERNEL);
505905ca51eSLogan Gunthorpe if (!plxdev)
506905ca51eSLogan Gunthorpe return -ENOMEM;
507905ca51eSLogan Gunthorpe
508c2dbcaa8SLogan Gunthorpe rc = request_irq(pci_irq_vector(pdev, 0), plx_dma_isr, 0,
509c2dbcaa8SLogan Gunthorpe KBUILD_MODNAME, plxdev);
51007503e6aSDan Carpenter if (rc)
51107503e6aSDan Carpenter goto free_plx;
512c2dbcaa8SLogan Gunthorpe
513c2dbcaa8SLogan Gunthorpe spin_lock_init(&plxdev->ring_lock);
5146c1fd9adSAllen Pais tasklet_setup(&plxdev->desc_task, plx_dma_desc_task);
515c2dbcaa8SLogan Gunthorpe
516c2dbcaa8SLogan Gunthorpe RCU_INIT_POINTER(plxdev->pdev, pdev);
517905ca51eSLogan Gunthorpe plxdev->bar = pcim_iomap_table(pdev)[0];
518905ca51eSLogan Gunthorpe
519905ca51eSLogan Gunthorpe dma = &plxdev->dma_dev;
520905ca51eSLogan Gunthorpe INIT_LIST_HEAD(&dma->channels);
5214d3df168SLogan Gunthorpe dma_cap_set(DMA_MEMCPY, dma->cap_mask);
522905ca51eSLogan Gunthorpe dma->copy_align = DMAENGINE_ALIGN_1_BYTE;
523905ca51eSLogan Gunthorpe dma->dev = get_device(&pdev->dev);
524905ca51eSLogan Gunthorpe
525c2dbcaa8SLogan Gunthorpe dma->device_alloc_chan_resources = plx_dma_alloc_chan_resources;
526c2dbcaa8SLogan Gunthorpe dma->device_free_chan_resources = plx_dma_free_chan_resources;
5274d3df168SLogan Gunthorpe dma->device_prep_dma_memcpy = plx_dma_prep_memcpy;
5284d3df168SLogan Gunthorpe dma->device_issue_pending = plx_dma_issue_pending;
5294d3df168SLogan Gunthorpe dma->device_tx_status = plx_dma_tx_status;
530905ca51eSLogan Gunthorpe dma->device_release = plx_dma_release;
531905ca51eSLogan Gunthorpe
532905ca51eSLogan Gunthorpe chan = &plxdev->dma_chan;
533905ca51eSLogan Gunthorpe chan->device = dma;
534905ca51eSLogan Gunthorpe dma_cookie_init(chan);
535905ca51eSLogan Gunthorpe list_add_tail(&chan->device_node, &dma->channels);
536905ca51eSLogan Gunthorpe
537905ca51eSLogan Gunthorpe rc = dma_async_device_register(dma);
538905ca51eSLogan Gunthorpe if (rc) {
539905ca51eSLogan Gunthorpe pci_err(pdev, "Failed to register dma device: %d\n", rc);
54007503e6aSDan Carpenter goto put_device;
541905ca51eSLogan Gunthorpe }
542905ca51eSLogan Gunthorpe
543905ca51eSLogan Gunthorpe pci_set_drvdata(pdev, plxdev);
544905ca51eSLogan Gunthorpe
545905ca51eSLogan Gunthorpe return 0;
54607503e6aSDan Carpenter
54707503e6aSDan Carpenter put_device:
54807503e6aSDan Carpenter put_device(&pdev->dev);
54907503e6aSDan Carpenter free_irq(pci_irq_vector(pdev, 0), plxdev);
55007503e6aSDan Carpenter free_plx:
55107503e6aSDan Carpenter kfree(plxdev);
55207503e6aSDan Carpenter
55307503e6aSDan Carpenter return rc;
554905ca51eSLogan Gunthorpe }
555905ca51eSLogan Gunthorpe
plx_dma_probe(struct pci_dev * pdev,const struct pci_device_id * id)556905ca51eSLogan Gunthorpe static int plx_dma_probe(struct pci_dev *pdev,
557905ca51eSLogan Gunthorpe const struct pci_device_id *id)
558905ca51eSLogan Gunthorpe {
559905ca51eSLogan Gunthorpe int rc;
560905ca51eSLogan Gunthorpe
561905ca51eSLogan Gunthorpe rc = pcim_enable_device(pdev);
562905ca51eSLogan Gunthorpe if (rc)
563905ca51eSLogan Gunthorpe return rc;
564905ca51eSLogan Gunthorpe
565c726c62dSQing Wang rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
566905ca51eSLogan Gunthorpe if (rc)
567c726c62dSQing Wang rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
568905ca51eSLogan Gunthorpe if (rc)
569905ca51eSLogan Gunthorpe return rc;
570905ca51eSLogan Gunthorpe
571905ca51eSLogan Gunthorpe rc = pcim_iomap_regions(pdev, 1, KBUILD_MODNAME);
572905ca51eSLogan Gunthorpe if (rc)
573905ca51eSLogan Gunthorpe return rc;
574905ca51eSLogan Gunthorpe
575905ca51eSLogan Gunthorpe rc = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
576905ca51eSLogan Gunthorpe if (rc <= 0)
577905ca51eSLogan Gunthorpe return rc;
578905ca51eSLogan Gunthorpe
579905ca51eSLogan Gunthorpe pci_set_master(pdev);
580905ca51eSLogan Gunthorpe
581905ca51eSLogan Gunthorpe rc = plx_dma_create(pdev);
582905ca51eSLogan Gunthorpe if (rc)
583905ca51eSLogan Gunthorpe goto err_free_irq_vectors;
584905ca51eSLogan Gunthorpe
585905ca51eSLogan Gunthorpe pci_info(pdev, "PLX DMA Channel Registered\n");
586905ca51eSLogan Gunthorpe
587905ca51eSLogan Gunthorpe return 0;
588905ca51eSLogan Gunthorpe
589905ca51eSLogan Gunthorpe err_free_irq_vectors:
590905ca51eSLogan Gunthorpe pci_free_irq_vectors(pdev);
591905ca51eSLogan Gunthorpe return rc;
592905ca51eSLogan Gunthorpe }
593905ca51eSLogan Gunthorpe
plx_dma_remove(struct pci_dev * pdev)594905ca51eSLogan Gunthorpe static void plx_dma_remove(struct pci_dev *pdev)
595905ca51eSLogan Gunthorpe {
596905ca51eSLogan Gunthorpe struct plx_dma_dev *plxdev = pci_get_drvdata(pdev);
597905ca51eSLogan Gunthorpe
598905ca51eSLogan Gunthorpe free_irq(pci_irq_vector(pdev, 0), plxdev);
599905ca51eSLogan Gunthorpe
600c2dbcaa8SLogan Gunthorpe rcu_assign_pointer(plxdev->pdev, NULL);
601c2dbcaa8SLogan Gunthorpe synchronize_rcu();
602c2dbcaa8SLogan Gunthorpe
603c2dbcaa8SLogan Gunthorpe spin_lock_bh(&plxdev->ring_lock);
604c2dbcaa8SLogan Gunthorpe plxdev->ring_active = false;
605c2dbcaa8SLogan Gunthorpe spin_unlock_bh(&plxdev->ring_lock);
606c2dbcaa8SLogan Gunthorpe
607c2dbcaa8SLogan Gunthorpe __plx_dma_stop(plxdev);
608c2dbcaa8SLogan Gunthorpe plx_dma_abort_desc(plxdev);
609c2dbcaa8SLogan Gunthorpe
610905ca51eSLogan Gunthorpe plxdev->bar = NULL;
611905ca51eSLogan Gunthorpe dma_async_device_unregister(&plxdev->dma_dev);
612905ca51eSLogan Gunthorpe
613905ca51eSLogan Gunthorpe pci_free_irq_vectors(pdev);
614905ca51eSLogan Gunthorpe }
615905ca51eSLogan Gunthorpe
616905ca51eSLogan Gunthorpe static const struct pci_device_id plx_dma_pci_tbl[] = {
617905ca51eSLogan Gunthorpe {
618905ca51eSLogan Gunthorpe .vendor = PCI_VENDOR_ID_PLX,
619905ca51eSLogan Gunthorpe .device = 0x87D0,
620905ca51eSLogan Gunthorpe .subvendor = PCI_ANY_ID,
621905ca51eSLogan Gunthorpe .subdevice = PCI_ANY_ID,
622905ca51eSLogan Gunthorpe .class = PCI_CLASS_SYSTEM_OTHER << 8,
623905ca51eSLogan Gunthorpe .class_mask = 0xFFFFFFFF,
624905ca51eSLogan Gunthorpe },
625905ca51eSLogan Gunthorpe {0}
626905ca51eSLogan Gunthorpe };
627905ca51eSLogan Gunthorpe MODULE_DEVICE_TABLE(pci, plx_dma_pci_tbl);
628905ca51eSLogan Gunthorpe
629905ca51eSLogan Gunthorpe static struct pci_driver plx_dma_pci_driver = {
630905ca51eSLogan Gunthorpe .name = KBUILD_MODNAME,
631905ca51eSLogan Gunthorpe .id_table = plx_dma_pci_tbl,
632905ca51eSLogan Gunthorpe .probe = plx_dma_probe,
633905ca51eSLogan Gunthorpe .remove = plx_dma_remove,
634905ca51eSLogan Gunthorpe };
635905ca51eSLogan Gunthorpe module_pci_driver(plx_dma_pci_driver);
636