1e9f08b65SZhou Wang // SPDX-License-Identifier: GPL-2.0-only
2fd5273faSJie Hai /* Copyright(c) 2019-2022 HiSilicon Limited. */
3fd5273faSJie Hai
4e9f08b65SZhou Wang #include <linux/bitfield.h>
5e9f08b65SZhou Wang #include <linux/dmaengine.h>
6e9f08b65SZhou Wang #include <linux/init.h>
7e9f08b65SZhou Wang #include <linux/iopoll.h>
8e9f08b65SZhou Wang #include <linux/module.h>
9e9f08b65SZhou Wang #include <linux/pci.h>
10e9f08b65SZhou Wang #include <linux/spinlock.h>
11e9f08b65SZhou Wang #include "virt-dma.h"
12e9f08b65SZhou Wang
13fd5273faSJie Hai /* HiSilicon DMA register common field define */
14fd5273faSJie Hai #define HISI_DMA_Q_SQ_BASE_L 0x0
15fd5273faSJie Hai #define HISI_DMA_Q_SQ_BASE_H 0x4
16fd5273faSJie Hai #define HISI_DMA_Q_SQ_DEPTH 0x8
17fd5273faSJie Hai #define HISI_DMA_Q_SQ_TAIL_PTR 0xc
18fd5273faSJie Hai #define HISI_DMA_Q_CQ_BASE_L 0x10
19fd5273faSJie Hai #define HISI_DMA_Q_CQ_BASE_H 0x14
20fd5273faSJie Hai #define HISI_DMA_Q_CQ_DEPTH 0x18
21fd5273faSJie Hai #define HISI_DMA_Q_CQ_HEAD_PTR 0x1c
22fd5273faSJie Hai #define HISI_DMA_Q_CTRL0 0x20
23fd5273faSJie Hai #define HISI_DMA_Q_CTRL0_QUEUE_EN BIT(0)
24fd5273faSJie Hai #define HISI_DMA_Q_CTRL0_QUEUE_PAUSE BIT(4)
25fd5273faSJie Hai #define HISI_DMA_Q_CTRL1 0x24
26fd5273faSJie Hai #define HISI_DMA_Q_CTRL1_QUEUE_RESET BIT(0)
27e9f08b65SZhou Wang #define HISI_DMA_Q_FSM_STS 0x30
28fd5273faSJie Hai #define HISI_DMA_Q_FSM_STS_MASK GENMASK(3, 0)
29fd5273faSJie Hai #define HISI_DMA_Q_ERR_INT_NUM0 0x84
30fd5273faSJie Hai #define HISI_DMA_Q_ERR_INT_NUM1 0x88
31fd5273faSJie Hai #define HISI_DMA_Q_ERR_INT_NUM2 0x8c
32e9f08b65SZhou Wang
33fd5273faSJie Hai /* HiSilicon IP08 DMA register and field define */
34fd5273faSJie Hai #define HISI_DMA_HIP08_MODE 0x217C
35fd5273faSJie Hai #define HISI_DMA_HIP08_Q_BASE 0x0
36fd5273faSJie Hai #define HISI_DMA_HIP08_Q_CTRL0_ERR_ABORT_EN BIT(2)
37fd5273faSJie Hai #define HISI_DMA_HIP08_Q_INT_STS 0x40
38fd5273faSJie Hai #define HISI_DMA_HIP08_Q_INT_MSK 0x44
39fd5273faSJie Hai #define HISI_DMA_HIP08_Q_INT_STS_MASK GENMASK(14, 0)
40fd5273faSJie Hai #define HISI_DMA_HIP08_Q_ERR_INT_NUM3 0x90
41fd5273faSJie Hai #define HISI_DMA_HIP08_Q_ERR_INT_NUM4 0x94
42fd5273faSJie Hai #define HISI_DMA_HIP08_Q_ERR_INT_NUM5 0x98
43fd5273faSJie Hai #define HISI_DMA_HIP08_Q_ERR_INT_NUM6 0x48
44fd5273faSJie Hai #define HISI_DMA_HIP08_Q_CTRL0_SQCQ_DRCT BIT(24)
45fd5273faSJie Hai
46fd5273faSJie Hai /* HiSilicon IP09 DMA register and field define */
47fd5273faSJie Hai #define HISI_DMA_HIP09_DMA_FLR_DISABLE 0xA00
48fd5273faSJie Hai #define HISI_DMA_HIP09_DMA_FLR_DISABLE_B BIT(0)
49fd5273faSJie Hai #define HISI_DMA_HIP09_Q_BASE 0x2000
50fd5273faSJie Hai #define HISI_DMA_HIP09_Q_CTRL0_ERR_ABORT_EN GENMASK(31, 28)
51fd5273faSJie Hai #define HISI_DMA_HIP09_Q_CTRL0_SQ_DRCT BIT(26)
52fd5273faSJie Hai #define HISI_DMA_HIP09_Q_CTRL0_CQ_DRCT BIT(27)
53fd5273faSJie Hai #define HISI_DMA_HIP09_Q_CTRL1_VA_ENABLE BIT(2)
54fd5273faSJie Hai #define HISI_DMA_HIP09_Q_INT_STS 0x40
55fd5273faSJie Hai #define HISI_DMA_HIP09_Q_INT_MSK 0x44
56fd5273faSJie Hai #define HISI_DMA_HIP09_Q_INT_STS_MASK 0x1
57fd5273faSJie Hai #define HISI_DMA_HIP09_Q_ERR_INT_STS 0x48
58fd5273faSJie Hai #define HISI_DMA_HIP09_Q_ERR_INT_MSK 0x4C
59fd5273faSJie Hai #define HISI_DMA_HIP09_Q_ERR_INT_STS_MASK GENMASK(18, 1)
60fd5273faSJie Hai #define HISI_DMA_HIP09_PORT_CFG_REG(port_id) (0x800 + \
61fd5273faSJie Hai (port_id) * 0x20)
62fd5273faSJie Hai #define HISI_DMA_HIP09_PORT_CFG_LINK_DOWN_MASK_B BIT(16)
63fd5273faSJie Hai
64fd5273faSJie Hai #define HISI_DMA_HIP09_MAX_PORT_NUM 16
65fd5273faSJie Hai
66fd5273faSJie Hai #define HISI_DMA_HIP08_MSI_NUM 32
67fd5273faSJie Hai #define HISI_DMA_HIP08_CHAN_NUM 30
68fd5273faSJie Hai #define HISI_DMA_HIP09_MSI_NUM 4
69fd5273faSJie Hai #define HISI_DMA_HIP09_CHAN_NUM 4
70fd5273faSJie Hai #define HISI_DMA_REVISION_HIP08B 0x21
71fd5273faSJie Hai #define HISI_DMA_REVISION_HIP09A 0x30
72fd5273faSJie Hai
73fd5273faSJie Hai #define HISI_DMA_Q_OFFSET 0x100
74e9f08b65SZhou Wang #define HISI_DMA_Q_DEPTH_VAL 1024
75e9f08b65SZhou Wang
76e9f08b65SZhou Wang #define PCI_BAR_2 2
77e9f08b65SZhou Wang
784aa69cf7SJie Hai #define HISI_DMA_POLL_Q_STS_DELAY_US 10
794aa69cf7SJie Hai #define HISI_DMA_POLL_Q_STS_TIME_OUT_US 1000
804aa69cf7SJie Hai
81*5dda7a62SJie Hai #define HISI_DMA_MAX_DIR_NAME_LEN 128
82*5dda7a62SJie Hai
83fd5273faSJie Hai /*
84fd5273faSJie Hai * The HIP08B(HiSilicon IP08) and HIP09A(HiSilicon IP09) are DMA iEPs, they
85fd5273faSJie Hai * have the same pci device id but different pci revision.
86fd5273faSJie Hai * Unfortunately, they have different register layouts, so two layout
87fd5273faSJie Hai * enumerations are defined.
88fd5273faSJie Hai */
89fd5273faSJie Hai enum hisi_dma_reg_layout {
90fd5273faSJie Hai HISI_DMA_REG_LAYOUT_INVALID = 0,
91fd5273faSJie Hai HISI_DMA_REG_LAYOUT_HIP08,
92fd5273faSJie Hai HISI_DMA_REG_LAYOUT_HIP09
93fd5273faSJie Hai };
94fd5273faSJie Hai
95e9f08b65SZhou Wang enum hisi_dma_mode {
96e9f08b65SZhou Wang EP = 0,
97e9f08b65SZhou Wang RC,
98e9f08b65SZhou Wang };
99e9f08b65SZhou Wang
100e9f08b65SZhou Wang enum hisi_dma_chan_status {
101e9f08b65SZhou Wang DISABLE = -1,
102e9f08b65SZhou Wang IDLE = 0,
103e9f08b65SZhou Wang RUN,
104e9f08b65SZhou Wang CPL,
105e9f08b65SZhou Wang PAUSE,
106e9f08b65SZhou Wang HALT,
107e9f08b65SZhou Wang ABORT,
108e9f08b65SZhou Wang WAIT,
109e9f08b65SZhou Wang BUFFCLR,
110e9f08b65SZhou Wang };
111e9f08b65SZhou Wang
112e9f08b65SZhou Wang struct hisi_dma_sqe {
113e9f08b65SZhou Wang __le32 dw0;
114e9f08b65SZhou Wang #define OPCODE_MASK GENMASK(3, 0)
115e9f08b65SZhou Wang #define OPCODE_SMALL_PACKAGE 0x1
116e9f08b65SZhou Wang #define OPCODE_M2M 0x4
117e9f08b65SZhou Wang #define LOCAL_IRQ_EN BIT(8)
118e9f08b65SZhou Wang #define ATTR_SRC_MASK GENMASK(14, 12)
119e9f08b65SZhou Wang __le32 dw1;
120e9f08b65SZhou Wang __le32 dw2;
121e9f08b65SZhou Wang #define ATTR_DST_MASK GENMASK(26, 24)
122e9f08b65SZhou Wang __le32 length;
123e9f08b65SZhou Wang __le64 src_addr;
124e9f08b65SZhou Wang __le64 dst_addr;
125e9f08b65SZhou Wang };
126e9f08b65SZhou Wang
127e9f08b65SZhou Wang struct hisi_dma_cqe {
128e9f08b65SZhou Wang __le32 rsv0;
129e9f08b65SZhou Wang __le32 rsv1;
130e9f08b65SZhou Wang __le16 sq_head;
131e9f08b65SZhou Wang __le16 rsv2;
132e9f08b65SZhou Wang __le16 rsv3;
133e9f08b65SZhou Wang __le16 w0;
134e9f08b65SZhou Wang #define STATUS_MASK GENMASK(15, 1)
135e9f08b65SZhou Wang #define STATUS_SUCC 0x0
136e9f08b65SZhou Wang #define VALID_BIT BIT(0)
137e9f08b65SZhou Wang };
138e9f08b65SZhou Wang
139e9f08b65SZhou Wang struct hisi_dma_desc {
140e9f08b65SZhou Wang struct virt_dma_desc vd;
141e9f08b65SZhou Wang struct hisi_dma_sqe sqe;
142e9f08b65SZhou Wang };
143e9f08b65SZhou Wang
144e9f08b65SZhou Wang struct hisi_dma_chan {
145e9f08b65SZhou Wang struct virt_dma_chan vc;
146e9f08b65SZhou Wang struct hisi_dma_dev *hdma_dev;
147e9f08b65SZhou Wang struct hisi_dma_sqe *sq;
148e9f08b65SZhou Wang struct hisi_dma_cqe *cq;
149e9f08b65SZhou Wang dma_addr_t sq_dma;
150e9f08b65SZhou Wang dma_addr_t cq_dma;
151e9f08b65SZhou Wang u32 sq_tail;
152e9f08b65SZhou Wang u32 cq_head;
153e9f08b65SZhou Wang u32 qp_num;
154e9f08b65SZhou Wang enum hisi_dma_chan_status status;
155e9f08b65SZhou Wang struct hisi_dma_desc *desc;
156e9f08b65SZhou Wang };
157e9f08b65SZhou Wang
158e9f08b65SZhou Wang struct hisi_dma_dev {
159e9f08b65SZhou Wang struct pci_dev *pdev;
160e9f08b65SZhou Wang void __iomem *base;
161e9f08b65SZhou Wang struct dma_device dma_dev;
162e9f08b65SZhou Wang u32 chan_num;
163e9f08b65SZhou Wang u32 chan_depth;
164fd5273faSJie Hai enum hisi_dma_reg_layout reg_layout;
165fd5273faSJie Hai void __iomem *queue_base; /* queue region start of register */
166e9f08b65SZhou Wang struct hisi_dma_chan chan[];
167e9f08b65SZhou Wang };
168e9f08b65SZhou Wang
169*5dda7a62SJie Hai #ifdef CONFIG_DEBUG_FS
170*5dda7a62SJie Hai
171*5dda7a62SJie Hai static const struct debugfs_reg32 hisi_dma_comm_chan_regs[] = {
172*5dda7a62SJie Hai {"DMA_QUEUE_SQ_DEPTH ", 0x0008ull},
173*5dda7a62SJie Hai {"DMA_QUEUE_SQ_TAIL_PTR ", 0x000Cull},
174*5dda7a62SJie Hai {"DMA_QUEUE_CQ_DEPTH ", 0x0018ull},
175*5dda7a62SJie Hai {"DMA_QUEUE_CQ_HEAD_PTR ", 0x001Cull},
176*5dda7a62SJie Hai {"DMA_QUEUE_CTRL0 ", 0x0020ull},
177*5dda7a62SJie Hai {"DMA_QUEUE_CTRL1 ", 0x0024ull},
178*5dda7a62SJie Hai {"DMA_QUEUE_FSM_STS ", 0x0030ull},
179*5dda7a62SJie Hai {"DMA_QUEUE_SQ_STS ", 0x0034ull},
180*5dda7a62SJie Hai {"DMA_QUEUE_CQ_TAIL_PTR ", 0x003Cull},
181*5dda7a62SJie Hai {"DMA_QUEUE_INT_STS ", 0x0040ull},
182*5dda7a62SJie Hai {"DMA_QUEUE_INT_MSK ", 0x0044ull},
183*5dda7a62SJie Hai {"DMA_QUEUE_INT_RO ", 0x006Cull},
184*5dda7a62SJie Hai };
185*5dda7a62SJie Hai
186*5dda7a62SJie Hai static const struct debugfs_reg32 hisi_dma_hip08_chan_regs[] = {
187*5dda7a62SJie Hai {"DMA_QUEUE_BYTE_CNT ", 0x0038ull},
188*5dda7a62SJie Hai {"DMA_ERR_INT_NUM6 ", 0x0048ull},
189*5dda7a62SJie Hai {"DMA_QUEUE_DESP0 ", 0x0050ull},
190*5dda7a62SJie Hai {"DMA_QUEUE_DESP1 ", 0x0054ull},
191*5dda7a62SJie Hai {"DMA_QUEUE_DESP2 ", 0x0058ull},
192*5dda7a62SJie Hai {"DMA_QUEUE_DESP3 ", 0x005Cull},
193*5dda7a62SJie Hai {"DMA_QUEUE_DESP4 ", 0x0074ull},
194*5dda7a62SJie Hai {"DMA_QUEUE_DESP5 ", 0x0078ull},
195*5dda7a62SJie Hai {"DMA_QUEUE_DESP6 ", 0x007Cull},
196*5dda7a62SJie Hai {"DMA_QUEUE_DESP7 ", 0x0080ull},
197*5dda7a62SJie Hai {"DMA_ERR_INT_NUM0 ", 0x0084ull},
198*5dda7a62SJie Hai {"DMA_ERR_INT_NUM1 ", 0x0088ull},
199*5dda7a62SJie Hai {"DMA_ERR_INT_NUM2 ", 0x008Cull},
200*5dda7a62SJie Hai {"DMA_ERR_INT_NUM3 ", 0x0090ull},
201*5dda7a62SJie Hai {"DMA_ERR_INT_NUM4 ", 0x0094ull},
202*5dda7a62SJie Hai {"DMA_ERR_INT_NUM5 ", 0x0098ull},
203*5dda7a62SJie Hai {"DMA_QUEUE_SQ_STS2 ", 0x00A4ull},
204*5dda7a62SJie Hai };
205*5dda7a62SJie Hai
206*5dda7a62SJie Hai static const struct debugfs_reg32 hisi_dma_hip09_chan_regs[] = {
207*5dda7a62SJie Hai {"DMA_QUEUE_ERR_INT_STS ", 0x0048ull},
208*5dda7a62SJie Hai {"DMA_QUEUE_ERR_INT_MSK ", 0x004Cull},
209*5dda7a62SJie Hai {"DFX_SQ_READ_ERR_PTR ", 0x0068ull},
210*5dda7a62SJie Hai {"DFX_DMA_ERR_INT_NUM0 ", 0x0084ull},
211*5dda7a62SJie Hai {"DFX_DMA_ERR_INT_NUM1 ", 0x0088ull},
212*5dda7a62SJie Hai {"DFX_DMA_ERR_INT_NUM2 ", 0x008Cull},
213*5dda7a62SJie Hai {"DFX_DMA_QUEUE_SQ_STS2 ", 0x00A4ull},
214*5dda7a62SJie Hai };
215*5dda7a62SJie Hai
216*5dda7a62SJie Hai static const struct debugfs_reg32 hisi_dma_hip08_comm_regs[] = {
217*5dda7a62SJie Hai {"DMA_ECC_ERR_ADDR ", 0x2004ull},
218*5dda7a62SJie Hai {"DMA_ECC_ECC_CNT ", 0x2014ull},
219*5dda7a62SJie Hai {"COMMON_AND_CH_ERR_STS ", 0x2030ull},
220*5dda7a62SJie Hai {"LOCAL_CPL_ID_STS_0 ", 0x20E0ull},
221*5dda7a62SJie Hai {"LOCAL_CPL_ID_STS_1 ", 0x20E4ull},
222*5dda7a62SJie Hai {"LOCAL_CPL_ID_STS_2 ", 0x20E8ull},
223*5dda7a62SJie Hai {"LOCAL_CPL_ID_STS_3 ", 0x20ECull},
224*5dda7a62SJie Hai {"LOCAL_TLP_NUM ", 0x2158ull},
225*5dda7a62SJie Hai {"SQCQ_TLP_NUM ", 0x2164ull},
226*5dda7a62SJie Hai {"CPL_NUM ", 0x2168ull},
227*5dda7a62SJie Hai {"INF_BACK_PRESS_STS ", 0x2170ull},
228*5dda7a62SJie Hai {"DMA_CH_RAS_LEVEL ", 0x2184ull},
229*5dda7a62SJie Hai {"DMA_CM_RAS_LEVEL ", 0x2188ull},
230*5dda7a62SJie Hai {"DMA_CH_ERR_STS ", 0x2190ull},
231*5dda7a62SJie Hai {"DMA_CH_DONE_STS ", 0x2194ull},
232*5dda7a62SJie Hai {"DMA_SQ_TAG_STS_0 ", 0x21A0ull},
233*5dda7a62SJie Hai {"DMA_SQ_TAG_STS_1 ", 0x21A4ull},
234*5dda7a62SJie Hai {"DMA_SQ_TAG_STS_2 ", 0x21A8ull},
235*5dda7a62SJie Hai {"DMA_SQ_TAG_STS_3 ", 0x21ACull},
236*5dda7a62SJie Hai {"LOCAL_P_ID_STS_0 ", 0x21B0ull},
237*5dda7a62SJie Hai {"LOCAL_P_ID_STS_1 ", 0x21B4ull},
238*5dda7a62SJie Hai {"LOCAL_P_ID_STS_2 ", 0x21B8ull},
239*5dda7a62SJie Hai {"LOCAL_P_ID_STS_3 ", 0x21BCull},
240*5dda7a62SJie Hai {"DMA_PREBUFF_INFO_0 ", 0x2200ull},
241*5dda7a62SJie Hai {"DMA_CM_TABLE_INFO_0 ", 0x2220ull},
242*5dda7a62SJie Hai {"DMA_CM_CE_RO ", 0x2244ull},
243*5dda7a62SJie Hai {"DMA_CM_NFE_RO ", 0x2248ull},
244*5dda7a62SJie Hai {"DMA_CM_FE_RO ", 0x224Cull},
245*5dda7a62SJie Hai };
246*5dda7a62SJie Hai
247*5dda7a62SJie Hai static const struct debugfs_reg32 hisi_dma_hip09_comm_regs[] = {
248*5dda7a62SJie Hai {"COMMON_AND_CH_ERR_STS ", 0x0030ull},
249*5dda7a62SJie Hai {"DMA_PORT_IDLE_STS ", 0x0150ull},
250*5dda7a62SJie Hai {"DMA_CH_RAS_LEVEL ", 0x0184ull},
251*5dda7a62SJie Hai {"DMA_CM_RAS_LEVEL ", 0x0188ull},
252*5dda7a62SJie Hai {"DMA_CM_CE_RO ", 0x0244ull},
253*5dda7a62SJie Hai {"DMA_CM_NFE_RO ", 0x0248ull},
254*5dda7a62SJie Hai {"DMA_CM_FE_RO ", 0x024Cull},
255*5dda7a62SJie Hai {"DFX_INF_BACK_PRESS_STS0 ", 0x1A40ull},
256*5dda7a62SJie Hai {"DFX_INF_BACK_PRESS_STS1 ", 0x1A44ull},
257*5dda7a62SJie Hai {"DFX_INF_BACK_PRESS_STS2 ", 0x1A48ull},
258*5dda7a62SJie Hai {"DFX_DMA_WRR_DISABLE ", 0x1A4Cull},
259*5dda7a62SJie Hai {"DFX_PA_REQ_TLP_NUM ", 0x1C00ull},
260*5dda7a62SJie Hai {"DFX_PA_BACK_TLP_NUM ", 0x1C04ull},
261*5dda7a62SJie Hai {"DFX_PA_RETRY_TLP_NUM ", 0x1C08ull},
262*5dda7a62SJie Hai {"DFX_LOCAL_NP_TLP_NUM ", 0x1C0Cull},
263*5dda7a62SJie Hai {"DFX_LOCAL_CPL_HEAD_TLP_NUM ", 0x1C10ull},
264*5dda7a62SJie Hai {"DFX_LOCAL_CPL_DATA_TLP_NUM ", 0x1C14ull},
265*5dda7a62SJie Hai {"DFX_LOCAL_CPL_EXT_DATA_TLP_NUM ", 0x1C18ull},
266*5dda7a62SJie Hai {"DFX_LOCAL_P_HEAD_TLP_NUM ", 0x1C1Cull},
267*5dda7a62SJie Hai {"DFX_LOCAL_P_ACK_TLP_NUM ", 0x1C20ull},
268*5dda7a62SJie Hai {"DFX_BUF_ALOC_PORT_REQ_NUM ", 0x1C24ull},
269*5dda7a62SJie Hai {"DFX_BUF_ALOC_PORT_RESULT_NUM ", 0x1C28ull},
270*5dda7a62SJie Hai {"DFX_BUF_FAIL_SIZE_NUM ", 0x1C2Cull},
271*5dda7a62SJie Hai {"DFX_BUF_ALOC_SIZE_NUM ", 0x1C30ull},
272*5dda7a62SJie Hai {"DFX_BUF_NP_RELEASE_SIZE_NUM ", 0x1C34ull},
273*5dda7a62SJie Hai {"DFX_BUF_P_RELEASE_SIZE_NUM ", 0x1C38ull},
274*5dda7a62SJie Hai {"DFX_BUF_PORT_RELEASE_SIZE_NUM ", 0x1C3Cull},
275*5dda7a62SJie Hai {"DFX_DMA_PREBUF_MEM0_ECC_ERR_ADDR ", 0x1CA8ull},
276*5dda7a62SJie Hai {"DFX_DMA_PREBUF_MEM0_ECC_CNT ", 0x1CACull},
277*5dda7a62SJie Hai {"DFX_DMA_LOC_NP_OSTB_ECC_ERR_ADDR ", 0x1CB0ull},
278*5dda7a62SJie Hai {"DFX_DMA_LOC_NP_OSTB_ECC_CNT ", 0x1CB4ull},
279*5dda7a62SJie Hai {"DFX_DMA_PREBUF_MEM1_ECC_ERR_ADDR ", 0x1CC0ull},
280*5dda7a62SJie Hai {"DFX_DMA_PREBUF_MEM1_ECC_CNT ", 0x1CC4ull},
281*5dda7a62SJie Hai {"DMA_CH_DONE_STS ", 0x02E0ull},
282*5dda7a62SJie Hai {"DMA_CH_ERR_STS ", 0x0320ull},
283*5dda7a62SJie Hai };
284*5dda7a62SJie Hai #endif /* CONFIG_DEBUG_FS*/
285*5dda7a62SJie Hai
hisi_dma_get_reg_layout(struct pci_dev * pdev)286fd5273faSJie Hai static enum hisi_dma_reg_layout hisi_dma_get_reg_layout(struct pci_dev *pdev)
287fd5273faSJie Hai {
288fd5273faSJie Hai if (pdev->revision == HISI_DMA_REVISION_HIP08B)
289fd5273faSJie Hai return HISI_DMA_REG_LAYOUT_HIP08;
290fd5273faSJie Hai else if (pdev->revision >= HISI_DMA_REVISION_HIP09A)
291fd5273faSJie Hai return HISI_DMA_REG_LAYOUT_HIP09;
292fd5273faSJie Hai
293fd5273faSJie Hai return HISI_DMA_REG_LAYOUT_INVALID;
294fd5273faSJie Hai }
295fd5273faSJie Hai
hisi_dma_get_chan_num(struct pci_dev * pdev)296fd5273faSJie Hai static u32 hisi_dma_get_chan_num(struct pci_dev *pdev)
297fd5273faSJie Hai {
298fd5273faSJie Hai if (pdev->revision == HISI_DMA_REVISION_HIP08B)
299fd5273faSJie Hai return HISI_DMA_HIP08_CHAN_NUM;
300fd5273faSJie Hai
301fd5273faSJie Hai return HISI_DMA_HIP09_CHAN_NUM;
302fd5273faSJie Hai }
303fd5273faSJie Hai
hisi_dma_get_msi_num(struct pci_dev * pdev)304fd5273faSJie Hai static u32 hisi_dma_get_msi_num(struct pci_dev *pdev)
305fd5273faSJie Hai {
306fd5273faSJie Hai if (pdev->revision == HISI_DMA_REVISION_HIP08B)
307fd5273faSJie Hai return HISI_DMA_HIP08_MSI_NUM;
308fd5273faSJie Hai
309fd5273faSJie Hai return HISI_DMA_HIP09_MSI_NUM;
310fd5273faSJie Hai }
311fd5273faSJie Hai
hisi_dma_get_queue_base(struct pci_dev * pdev)312fd5273faSJie Hai static u32 hisi_dma_get_queue_base(struct pci_dev *pdev)
313fd5273faSJie Hai {
314fd5273faSJie Hai if (pdev->revision == HISI_DMA_REVISION_HIP08B)
315fd5273faSJie Hai return HISI_DMA_HIP08_Q_BASE;
316fd5273faSJie Hai
317fd5273faSJie Hai return HISI_DMA_HIP09_Q_BASE;
318fd5273faSJie Hai }
319fd5273faSJie Hai
to_hisi_dma_chan(struct dma_chan * c)320e9f08b65SZhou Wang static inline struct hisi_dma_chan *to_hisi_dma_chan(struct dma_chan *c)
321e9f08b65SZhou Wang {
322e9f08b65SZhou Wang return container_of(c, struct hisi_dma_chan, vc.chan);
323e9f08b65SZhou Wang }
324e9f08b65SZhou Wang
to_hisi_dma_desc(struct virt_dma_desc * vd)325e9f08b65SZhou Wang static inline struct hisi_dma_desc *to_hisi_dma_desc(struct virt_dma_desc *vd)
326e9f08b65SZhou Wang {
327e9f08b65SZhou Wang return container_of(vd, struct hisi_dma_desc, vd);
328e9f08b65SZhou Wang }
329e9f08b65SZhou Wang
hisi_dma_chan_write(void __iomem * base,u32 reg,u32 index,u32 val)330e9f08b65SZhou Wang static inline void hisi_dma_chan_write(void __iomem *base, u32 reg, u32 index,
331e9f08b65SZhou Wang u32 val)
332e9f08b65SZhou Wang {
333fd5273faSJie Hai writel_relaxed(val, base + reg + index * HISI_DMA_Q_OFFSET);
334e9f08b65SZhou Wang }
335e9f08b65SZhou Wang
hisi_dma_update_bit(void __iomem * addr,u32 pos,bool val)336e9f08b65SZhou Wang static inline void hisi_dma_update_bit(void __iomem *addr, u32 pos, bool val)
337e9f08b65SZhou Wang {
338e9f08b65SZhou Wang u32 tmp;
339e9f08b65SZhou Wang
340e9f08b65SZhou Wang tmp = readl_relaxed(addr);
341fd5273faSJie Hai tmp = val ? tmp | pos : tmp & ~pos;
342e9f08b65SZhou Wang writel_relaxed(tmp, addr);
343e9f08b65SZhou Wang }
344e9f08b65SZhou Wang
hisi_dma_pause_dma(struct hisi_dma_dev * hdma_dev,u32 index,bool pause)345e9f08b65SZhou Wang static void hisi_dma_pause_dma(struct hisi_dma_dev *hdma_dev, u32 index,
346e9f08b65SZhou Wang bool pause)
347e9f08b65SZhou Wang {
348fd5273faSJie Hai void __iomem *addr;
349e9f08b65SZhou Wang
350fd5273faSJie Hai addr = hdma_dev->queue_base + HISI_DMA_Q_CTRL0 +
351fd5273faSJie Hai index * HISI_DMA_Q_OFFSET;
352fd5273faSJie Hai hisi_dma_update_bit(addr, HISI_DMA_Q_CTRL0_QUEUE_PAUSE, pause);
353e9f08b65SZhou Wang }
354e9f08b65SZhou Wang
hisi_dma_enable_dma(struct hisi_dma_dev * hdma_dev,u32 index,bool enable)355e9f08b65SZhou Wang static void hisi_dma_enable_dma(struct hisi_dma_dev *hdma_dev, u32 index,
356e9f08b65SZhou Wang bool enable)
357e9f08b65SZhou Wang {
358fd5273faSJie Hai void __iomem *addr;
359e9f08b65SZhou Wang
360fd5273faSJie Hai addr = hdma_dev->queue_base + HISI_DMA_Q_CTRL0 +
361fd5273faSJie Hai index * HISI_DMA_Q_OFFSET;
362fd5273faSJie Hai hisi_dma_update_bit(addr, HISI_DMA_Q_CTRL0_QUEUE_EN, enable);
363e9f08b65SZhou Wang }
364e9f08b65SZhou Wang
hisi_dma_mask_irq(struct hisi_dma_dev * hdma_dev,u32 qp_index)365e9f08b65SZhou Wang static void hisi_dma_mask_irq(struct hisi_dma_dev *hdma_dev, u32 qp_index)
366e9f08b65SZhou Wang {
367fd5273faSJie Hai void __iomem *q_base = hdma_dev->queue_base;
368fd5273faSJie Hai
369fd5273faSJie Hai if (hdma_dev->reg_layout == HISI_DMA_REG_LAYOUT_HIP08)
370fd5273faSJie Hai hisi_dma_chan_write(q_base, HISI_DMA_HIP08_Q_INT_MSK,
371fd5273faSJie Hai qp_index, HISI_DMA_HIP08_Q_INT_STS_MASK);
372fd5273faSJie Hai else {
373fd5273faSJie Hai hisi_dma_chan_write(q_base, HISI_DMA_HIP09_Q_INT_MSK,
374fd5273faSJie Hai qp_index, HISI_DMA_HIP09_Q_INT_STS_MASK);
375fd5273faSJie Hai hisi_dma_chan_write(q_base, HISI_DMA_HIP09_Q_ERR_INT_MSK,
376fd5273faSJie Hai qp_index,
377fd5273faSJie Hai HISI_DMA_HIP09_Q_ERR_INT_STS_MASK);
378fd5273faSJie Hai }
379e9f08b65SZhou Wang }
380e9f08b65SZhou Wang
hisi_dma_unmask_irq(struct hisi_dma_dev * hdma_dev,u32 qp_index)381e9f08b65SZhou Wang static void hisi_dma_unmask_irq(struct hisi_dma_dev *hdma_dev, u32 qp_index)
382e9f08b65SZhou Wang {
383fd5273faSJie Hai void __iomem *q_base = hdma_dev->queue_base;
384e9f08b65SZhou Wang
385fd5273faSJie Hai if (hdma_dev->reg_layout == HISI_DMA_REG_LAYOUT_HIP08) {
386fd5273faSJie Hai hisi_dma_chan_write(q_base, HISI_DMA_HIP08_Q_INT_STS,
387fd5273faSJie Hai qp_index, HISI_DMA_HIP08_Q_INT_STS_MASK);
388fd5273faSJie Hai hisi_dma_chan_write(q_base, HISI_DMA_HIP08_Q_INT_MSK,
389fd5273faSJie Hai qp_index, 0);
390fd5273faSJie Hai } else {
391fd5273faSJie Hai hisi_dma_chan_write(q_base, HISI_DMA_HIP09_Q_INT_STS,
392fd5273faSJie Hai qp_index, HISI_DMA_HIP09_Q_INT_STS_MASK);
393fd5273faSJie Hai hisi_dma_chan_write(q_base, HISI_DMA_HIP09_Q_ERR_INT_STS,
394fd5273faSJie Hai qp_index,
395fd5273faSJie Hai HISI_DMA_HIP09_Q_ERR_INT_STS_MASK);
396fd5273faSJie Hai hisi_dma_chan_write(q_base, HISI_DMA_HIP09_Q_INT_MSK,
397fd5273faSJie Hai qp_index, 0);
398fd5273faSJie Hai hisi_dma_chan_write(q_base, HISI_DMA_HIP09_Q_ERR_INT_MSK,
399fd5273faSJie Hai qp_index, 0);
400fd5273faSJie Hai }
401e9f08b65SZhou Wang }
402e9f08b65SZhou Wang
hisi_dma_do_reset(struct hisi_dma_dev * hdma_dev,u32 index)403e9f08b65SZhou Wang static void hisi_dma_do_reset(struct hisi_dma_dev *hdma_dev, u32 index)
404e9f08b65SZhou Wang {
405fd5273faSJie Hai void __iomem *addr;
406e9f08b65SZhou Wang
407fd5273faSJie Hai addr = hdma_dev->queue_base +
408fd5273faSJie Hai HISI_DMA_Q_CTRL1 + index * HISI_DMA_Q_OFFSET;
409fd5273faSJie Hai hisi_dma_update_bit(addr, HISI_DMA_Q_CTRL1_QUEUE_RESET, 1);
410e9f08b65SZhou Wang }
411e9f08b65SZhou Wang
hisi_dma_reset_qp_point(struct hisi_dma_dev * hdma_dev,u32 index)412e9f08b65SZhou Wang static void hisi_dma_reset_qp_point(struct hisi_dma_dev *hdma_dev, u32 index)
413e9f08b65SZhou Wang {
414fd5273faSJie Hai void __iomem *q_base = hdma_dev->queue_base;
415fd5273faSJie Hai
416fd5273faSJie Hai hisi_dma_chan_write(q_base, HISI_DMA_Q_SQ_TAIL_PTR, index, 0);
417fd5273faSJie Hai hisi_dma_chan_write(q_base, HISI_DMA_Q_CQ_HEAD_PTR, index, 0);
418e9f08b65SZhou Wang }
419e9f08b65SZhou Wang
hisi_dma_reset_or_disable_hw_chan(struct hisi_dma_chan * chan,bool disable)420e3bdaa04SJie Hai static void hisi_dma_reset_or_disable_hw_chan(struct hisi_dma_chan *chan,
421e3bdaa04SJie Hai bool disable)
422e9f08b65SZhou Wang {
423e9f08b65SZhou Wang struct hisi_dma_dev *hdma_dev = chan->hdma_dev;
424e9f08b65SZhou Wang u32 index = chan->qp_num, tmp;
4254aa69cf7SJie Hai void __iomem *addr;
426e9f08b65SZhou Wang int ret;
427e9f08b65SZhou Wang
428e9f08b65SZhou Wang hisi_dma_pause_dma(hdma_dev, index, true);
429e9f08b65SZhou Wang hisi_dma_enable_dma(hdma_dev, index, false);
430e9f08b65SZhou Wang hisi_dma_mask_irq(hdma_dev, index);
431e9f08b65SZhou Wang
432fd5273faSJie Hai addr = hdma_dev->queue_base +
433fd5273faSJie Hai HISI_DMA_Q_FSM_STS + index * HISI_DMA_Q_OFFSET;
4344aa69cf7SJie Hai
4354aa69cf7SJie Hai ret = readl_relaxed_poll_timeout(addr, tmp,
436fd5273faSJie Hai FIELD_GET(HISI_DMA_Q_FSM_STS_MASK, tmp) != RUN,
4374aa69cf7SJie Hai HISI_DMA_POLL_Q_STS_DELAY_US, HISI_DMA_POLL_Q_STS_TIME_OUT_US);
438e9f08b65SZhou Wang if (ret) {
439e9f08b65SZhou Wang dev_err(&hdma_dev->pdev->dev, "disable channel timeout!\n");
440e9f08b65SZhou Wang WARN_ON(1);
441e9f08b65SZhou Wang }
442e9f08b65SZhou Wang
443e9f08b65SZhou Wang hisi_dma_do_reset(hdma_dev, index);
444e9f08b65SZhou Wang hisi_dma_reset_qp_point(hdma_dev, index);
445e9f08b65SZhou Wang hisi_dma_pause_dma(hdma_dev, index, false);
446e3bdaa04SJie Hai
447e3bdaa04SJie Hai if (!disable) {
448e9f08b65SZhou Wang hisi_dma_enable_dma(hdma_dev, index, true);
449e9f08b65SZhou Wang hisi_dma_unmask_irq(hdma_dev, index);
450e3bdaa04SJie Hai }
451e9f08b65SZhou Wang
4524aa69cf7SJie Hai ret = readl_relaxed_poll_timeout(addr, tmp,
453fd5273faSJie Hai FIELD_GET(HISI_DMA_Q_FSM_STS_MASK, tmp) == IDLE,
4544aa69cf7SJie Hai HISI_DMA_POLL_Q_STS_DELAY_US, HISI_DMA_POLL_Q_STS_TIME_OUT_US);
455e9f08b65SZhou Wang if (ret) {
456e9f08b65SZhou Wang dev_err(&hdma_dev->pdev->dev, "reset channel timeout!\n");
457e9f08b65SZhou Wang WARN_ON(1);
458e9f08b65SZhou Wang }
459e9f08b65SZhou Wang }
460e9f08b65SZhou Wang
hisi_dma_free_chan_resources(struct dma_chan * c)461e9f08b65SZhou Wang static void hisi_dma_free_chan_resources(struct dma_chan *c)
462e9f08b65SZhou Wang {
463e9f08b65SZhou Wang struct hisi_dma_chan *chan = to_hisi_dma_chan(c);
464e9f08b65SZhou Wang struct hisi_dma_dev *hdma_dev = chan->hdma_dev;
465e9f08b65SZhou Wang
466e3bdaa04SJie Hai hisi_dma_reset_or_disable_hw_chan(chan, false);
467e9f08b65SZhou Wang vchan_free_chan_resources(&chan->vc);
468e9f08b65SZhou Wang
469e9f08b65SZhou Wang memset(chan->sq, 0, sizeof(struct hisi_dma_sqe) * hdma_dev->chan_depth);
470e9f08b65SZhou Wang memset(chan->cq, 0, sizeof(struct hisi_dma_cqe) * hdma_dev->chan_depth);
471e9f08b65SZhou Wang chan->sq_tail = 0;
472e9f08b65SZhou Wang chan->cq_head = 0;
473e9f08b65SZhou Wang chan->status = DISABLE;
474e9f08b65SZhou Wang }
475e9f08b65SZhou Wang
hisi_dma_desc_free(struct virt_dma_desc * vd)476e9f08b65SZhou Wang static void hisi_dma_desc_free(struct virt_dma_desc *vd)
477e9f08b65SZhou Wang {
478e9f08b65SZhou Wang kfree(to_hisi_dma_desc(vd));
479e9f08b65SZhou Wang }
480e9f08b65SZhou Wang
481e9f08b65SZhou Wang static struct dma_async_tx_descriptor *
hisi_dma_prep_dma_memcpy(struct dma_chan * c,dma_addr_t dst,dma_addr_t src,size_t len,unsigned long flags)482e9f08b65SZhou Wang hisi_dma_prep_dma_memcpy(struct dma_chan *c, dma_addr_t dst, dma_addr_t src,
483e9f08b65SZhou Wang size_t len, unsigned long flags)
484e9f08b65SZhou Wang {
485e9f08b65SZhou Wang struct hisi_dma_chan *chan = to_hisi_dma_chan(c);
486e9f08b65SZhou Wang struct hisi_dma_desc *desc;
487e9f08b65SZhou Wang
488e9f08b65SZhou Wang desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
489e9f08b65SZhou Wang if (!desc)
490e9f08b65SZhou Wang return NULL;
491e9f08b65SZhou Wang
492e9f08b65SZhou Wang desc->sqe.length = cpu_to_le32(len);
493e9f08b65SZhou Wang desc->sqe.src_addr = cpu_to_le64(src);
494e9f08b65SZhou Wang desc->sqe.dst_addr = cpu_to_le64(dst);
495e9f08b65SZhou Wang
496e9f08b65SZhou Wang return vchan_tx_prep(&chan->vc, &desc->vd, flags);
497e9f08b65SZhou Wang }
498e9f08b65SZhou Wang
499e9f08b65SZhou Wang static enum dma_status
hisi_dma_tx_status(struct dma_chan * c,dma_cookie_t cookie,struct dma_tx_state * txstate)500e9f08b65SZhou Wang hisi_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie,
501e9f08b65SZhou Wang struct dma_tx_state *txstate)
502e9f08b65SZhou Wang {
503e9f08b65SZhou Wang return dma_cookie_status(c, cookie, txstate);
504e9f08b65SZhou Wang }
505e9f08b65SZhou Wang
hisi_dma_start_transfer(struct hisi_dma_chan * chan)506e9f08b65SZhou Wang static void hisi_dma_start_transfer(struct hisi_dma_chan *chan)
507e9f08b65SZhou Wang {
508e9f08b65SZhou Wang struct hisi_dma_sqe *sqe = chan->sq + chan->sq_tail;
509e9f08b65SZhou Wang struct hisi_dma_dev *hdma_dev = chan->hdma_dev;
510e9f08b65SZhou Wang struct hisi_dma_desc *desc;
511e9f08b65SZhou Wang struct virt_dma_desc *vd;
512e9f08b65SZhou Wang
513e9f08b65SZhou Wang vd = vchan_next_desc(&chan->vc);
514e9f08b65SZhou Wang if (!vd) {
515e9f08b65SZhou Wang chan->desc = NULL;
516e9f08b65SZhou Wang return;
517e9f08b65SZhou Wang }
518e9f08b65SZhou Wang list_del(&vd->node);
519e9f08b65SZhou Wang desc = to_hisi_dma_desc(vd);
520e9f08b65SZhou Wang chan->desc = desc;
521e9f08b65SZhou Wang
522e9f08b65SZhou Wang memcpy(sqe, &desc->sqe, sizeof(struct hisi_dma_sqe));
523e9f08b65SZhou Wang
524e9f08b65SZhou Wang /* update other field in sqe */
525e9f08b65SZhou Wang sqe->dw0 = cpu_to_le32(FIELD_PREP(OPCODE_MASK, OPCODE_M2M));
526e9f08b65SZhou Wang sqe->dw0 |= cpu_to_le32(LOCAL_IRQ_EN);
527e9f08b65SZhou Wang
528e9f08b65SZhou Wang /* make sure data has been updated in sqe */
529e9f08b65SZhou Wang wmb();
530e9f08b65SZhou Wang
531e9f08b65SZhou Wang /* update sq tail, point to new sqe position */
532e9f08b65SZhou Wang chan->sq_tail = (chan->sq_tail + 1) % hdma_dev->chan_depth;
533e9f08b65SZhou Wang
534e9f08b65SZhou Wang /* update sq_tail to trigger a new task */
535fd5273faSJie Hai hisi_dma_chan_write(hdma_dev->queue_base, HISI_DMA_Q_SQ_TAIL_PTR,
536fd5273faSJie Hai chan->qp_num, chan->sq_tail);
537e9f08b65SZhou Wang }
538e9f08b65SZhou Wang
hisi_dma_issue_pending(struct dma_chan * c)539e9f08b65SZhou Wang static void hisi_dma_issue_pending(struct dma_chan *c)
540e9f08b65SZhou Wang {
541e9f08b65SZhou Wang struct hisi_dma_chan *chan = to_hisi_dma_chan(c);
542e9f08b65SZhou Wang unsigned long flags;
543e9f08b65SZhou Wang
544e9f08b65SZhou Wang spin_lock_irqsave(&chan->vc.lock, flags);
545e9f08b65SZhou Wang
5462cbb9588SJie Hai if (vchan_issue_pending(&chan->vc) && !chan->desc)
547e9f08b65SZhou Wang hisi_dma_start_transfer(chan);
548e9f08b65SZhou Wang
549e9f08b65SZhou Wang spin_unlock_irqrestore(&chan->vc.lock, flags);
550e9f08b65SZhou Wang }
551e9f08b65SZhou Wang
hisi_dma_terminate_all(struct dma_chan * c)552e9f08b65SZhou Wang static int hisi_dma_terminate_all(struct dma_chan *c)
553e9f08b65SZhou Wang {
554e9f08b65SZhou Wang struct hisi_dma_chan *chan = to_hisi_dma_chan(c);
555e9f08b65SZhou Wang unsigned long flags;
556e9f08b65SZhou Wang LIST_HEAD(head);
557e9f08b65SZhou Wang
558e9f08b65SZhou Wang spin_lock_irqsave(&chan->vc.lock, flags);
559e9f08b65SZhou Wang
560e9f08b65SZhou Wang hisi_dma_pause_dma(chan->hdma_dev, chan->qp_num, true);
561e9f08b65SZhou Wang if (chan->desc) {
562e9f08b65SZhou Wang vchan_terminate_vdesc(&chan->desc->vd);
563e9f08b65SZhou Wang chan->desc = NULL;
564e9f08b65SZhou Wang }
565e9f08b65SZhou Wang
566e9f08b65SZhou Wang vchan_get_all_descriptors(&chan->vc, &head);
567e9f08b65SZhou Wang
568e9f08b65SZhou Wang spin_unlock_irqrestore(&chan->vc.lock, flags);
569e9f08b65SZhou Wang
570e9f08b65SZhou Wang vchan_dma_desc_free_list(&chan->vc, &head);
571e9f08b65SZhou Wang hisi_dma_pause_dma(chan->hdma_dev, chan->qp_num, false);
572e9f08b65SZhou Wang
573e9f08b65SZhou Wang return 0;
574e9f08b65SZhou Wang }
575e9f08b65SZhou Wang
hisi_dma_synchronize(struct dma_chan * c)576e9f08b65SZhou Wang static void hisi_dma_synchronize(struct dma_chan *c)
577e9f08b65SZhou Wang {
578e9f08b65SZhou Wang struct hisi_dma_chan *chan = to_hisi_dma_chan(c);
579e9f08b65SZhou Wang
580e9f08b65SZhou Wang vchan_synchronize(&chan->vc);
581e9f08b65SZhou Wang }
582e9f08b65SZhou Wang
hisi_dma_alloc_qps_mem(struct hisi_dma_dev * hdma_dev)583e9f08b65SZhou Wang static int hisi_dma_alloc_qps_mem(struct hisi_dma_dev *hdma_dev)
584e9f08b65SZhou Wang {
585e9f08b65SZhou Wang size_t sq_size = sizeof(struct hisi_dma_sqe) * hdma_dev->chan_depth;
586e9f08b65SZhou Wang size_t cq_size = sizeof(struct hisi_dma_cqe) * hdma_dev->chan_depth;
587e9f08b65SZhou Wang struct device *dev = &hdma_dev->pdev->dev;
588e9f08b65SZhou Wang struct hisi_dma_chan *chan;
589e9f08b65SZhou Wang int i;
590e9f08b65SZhou Wang
591e9f08b65SZhou Wang for (i = 0; i < hdma_dev->chan_num; i++) {
592e9f08b65SZhou Wang chan = &hdma_dev->chan[i];
593e9f08b65SZhou Wang chan->sq = dmam_alloc_coherent(dev, sq_size, &chan->sq_dma,
594e9f08b65SZhou Wang GFP_KERNEL);
595e9f08b65SZhou Wang if (!chan->sq)
596e9f08b65SZhou Wang return -ENOMEM;
597e9f08b65SZhou Wang
598e9f08b65SZhou Wang chan->cq = dmam_alloc_coherent(dev, cq_size, &chan->cq_dma,
599e9f08b65SZhou Wang GFP_KERNEL);
600e9f08b65SZhou Wang if (!chan->cq)
601e9f08b65SZhou Wang return -ENOMEM;
602e9f08b65SZhou Wang }
603e9f08b65SZhou Wang
604e9f08b65SZhou Wang return 0;
605e9f08b65SZhou Wang }
606e9f08b65SZhou Wang
hisi_dma_init_hw_qp(struct hisi_dma_dev * hdma_dev,u32 index)607e9f08b65SZhou Wang static void hisi_dma_init_hw_qp(struct hisi_dma_dev *hdma_dev, u32 index)
608e9f08b65SZhou Wang {
609e9f08b65SZhou Wang struct hisi_dma_chan *chan = &hdma_dev->chan[index];
610fd5273faSJie Hai void __iomem *q_base = hdma_dev->queue_base;
611e9f08b65SZhou Wang u32 hw_depth = hdma_dev->chan_depth - 1;
612fd5273faSJie Hai void __iomem *addr;
613fd5273faSJie Hai u32 tmp;
614e9f08b65SZhou Wang
615e9f08b65SZhou Wang /* set sq, cq base */
616fd5273faSJie Hai hisi_dma_chan_write(q_base, HISI_DMA_Q_SQ_BASE_L, index,
617e9f08b65SZhou Wang lower_32_bits(chan->sq_dma));
618fd5273faSJie Hai hisi_dma_chan_write(q_base, HISI_DMA_Q_SQ_BASE_H, index,
619e9f08b65SZhou Wang upper_32_bits(chan->sq_dma));
620fd5273faSJie Hai hisi_dma_chan_write(q_base, HISI_DMA_Q_CQ_BASE_L, index,
621e9f08b65SZhou Wang lower_32_bits(chan->cq_dma));
622fd5273faSJie Hai hisi_dma_chan_write(q_base, HISI_DMA_Q_CQ_BASE_H, index,
623e9f08b65SZhou Wang upper_32_bits(chan->cq_dma));
624e9f08b65SZhou Wang
625e9f08b65SZhou Wang /* set sq, cq depth */
626fd5273faSJie Hai hisi_dma_chan_write(q_base, HISI_DMA_Q_SQ_DEPTH, index, hw_depth);
627fd5273faSJie Hai hisi_dma_chan_write(q_base, HISI_DMA_Q_CQ_DEPTH, index, hw_depth);
628e9f08b65SZhou Wang
629e9f08b65SZhou Wang /* init sq tail and cq head */
630fd5273faSJie Hai hisi_dma_chan_write(q_base, HISI_DMA_Q_SQ_TAIL_PTR, index, 0);
631fd5273faSJie Hai hisi_dma_chan_write(q_base, HISI_DMA_Q_CQ_HEAD_PTR, index, 0);
632fd5273faSJie Hai
633fd5273faSJie Hai /* init error interrupt stats */
634fd5273faSJie Hai hisi_dma_chan_write(q_base, HISI_DMA_Q_ERR_INT_NUM0, index, 0);
635fd5273faSJie Hai hisi_dma_chan_write(q_base, HISI_DMA_Q_ERR_INT_NUM1, index, 0);
636fd5273faSJie Hai hisi_dma_chan_write(q_base, HISI_DMA_Q_ERR_INT_NUM2, index, 0);
637fd5273faSJie Hai
638fd5273faSJie Hai if (hdma_dev->reg_layout == HISI_DMA_REG_LAYOUT_HIP08) {
639fd5273faSJie Hai hisi_dma_chan_write(q_base, HISI_DMA_HIP08_Q_ERR_INT_NUM3,
640fd5273faSJie Hai index, 0);
641fd5273faSJie Hai hisi_dma_chan_write(q_base, HISI_DMA_HIP08_Q_ERR_INT_NUM4,
642fd5273faSJie Hai index, 0);
643fd5273faSJie Hai hisi_dma_chan_write(q_base, HISI_DMA_HIP08_Q_ERR_INT_NUM5,
644fd5273faSJie Hai index, 0);
645fd5273faSJie Hai hisi_dma_chan_write(q_base, HISI_DMA_HIP08_Q_ERR_INT_NUM6,
646fd5273faSJie Hai index, 0);
647fd5273faSJie Hai /*
648fd5273faSJie Hai * init SQ/CQ direction selecting register.
649fd5273faSJie Hai * "0" is to local side and "1" is to remote side.
650fd5273faSJie Hai */
651fd5273faSJie Hai addr = q_base + HISI_DMA_Q_CTRL0 + index * HISI_DMA_Q_OFFSET;
652fd5273faSJie Hai hisi_dma_update_bit(addr, HISI_DMA_HIP08_Q_CTRL0_SQCQ_DRCT, 0);
653fd5273faSJie Hai
654fd5273faSJie Hai /*
655fd5273faSJie Hai * 0 - Continue to next descriptor if error occurs.
656fd5273faSJie Hai * 1 - Abort the DMA queue if error occurs.
657fd5273faSJie Hai */
658fd5273faSJie Hai hisi_dma_update_bit(addr,
659fd5273faSJie Hai HISI_DMA_HIP08_Q_CTRL0_ERR_ABORT_EN, 0);
660fd5273faSJie Hai } else {
661fd5273faSJie Hai addr = q_base + HISI_DMA_Q_CTRL0 + index * HISI_DMA_Q_OFFSET;
662fd5273faSJie Hai
663fd5273faSJie Hai /*
664fd5273faSJie Hai * init SQ/CQ direction selecting register.
665fd5273faSJie Hai * "0" is to local side and "1" is to remote side.
666fd5273faSJie Hai */
667fd5273faSJie Hai hisi_dma_update_bit(addr, HISI_DMA_HIP09_Q_CTRL0_SQ_DRCT, 0);
668fd5273faSJie Hai hisi_dma_update_bit(addr, HISI_DMA_HIP09_Q_CTRL0_CQ_DRCT, 0);
669fd5273faSJie Hai
670fd5273faSJie Hai /*
671fd5273faSJie Hai * 0 - Continue to next descriptor if error occurs.
672fd5273faSJie Hai * 1 - Abort the DMA queue if error occurs.
673fd5273faSJie Hai */
674fd5273faSJie Hai
675fd5273faSJie Hai tmp = readl_relaxed(addr);
676fd5273faSJie Hai tmp &= ~HISI_DMA_HIP09_Q_CTRL0_ERR_ABORT_EN;
677fd5273faSJie Hai writel_relaxed(tmp, addr);
678fd5273faSJie Hai
679fd5273faSJie Hai /*
680fd5273faSJie Hai * 0 - dma should process FLR whith CPU.
681fd5273faSJie Hai * 1 - dma not process FLR, only cpu process FLR.
682fd5273faSJie Hai */
683fd5273faSJie Hai addr = q_base + HISI_DMA_HIP09_DMA_FLR_DISABLE +
684fd5273faSJie Hai index * HISI_DMA_Q_OFFSET;
685fd5273faSJie Hai hisi_dma_update_bit(addr, HISI_DMA_HIP09_DMA_FLR_DISABLE_B, 0);
686fd5273faSJie Hai
687fd5273faSJie Hai addr = q_base + HISI_DMA_Q_CTRL1 + index * HISI_DMA_Q_OFFSET;
688fd5273faSJie Hai hisi_dma_update_bit(addr, HISI_DMA_HIP09_Q_CTRL1_VA_ENABLE, 1);
689fd5273faSJie Hai }
690e9f08b65SZhou Wang }
691e9f08b65SZhou Wang
hisi_dma_enable_qp(struct hisi_dma_dev * hdma_dev,u32 qp_index)692e9f08b65SZhou Wang static void hisi_dma_enable_qp(struct hisi_dma_dev *hdma_dev, u32 qp_index)
693e9f08b65SZhou Wang {
694e9f08b65SZhou Wang hisi_dma_init_hw_qp(hdma_dev, qp_index);
695e9f08b65SZhou Wang hisi_dma_unmask_irq(hdma_dev, qp_index);
696e9f08b65SZhou Wang hisi_dma_enable_dma(hdma_dev, qp_index, true);
697e9f08b65SZhou Wang }
698e9f08b65SZhou Wang
hisi_dma_disable_qp(struct hisi_dma_dev * hdma_dev,u32 qp_index)699e9f08b65SZhou Wang static void hisi_dma_disable_qp(struct hisi_dma_dev *hdma_dev, u32 qp_index)
700e9f08b65SZhou Wang {
701e3bdaa04SJie Hai hisi_dma_reset_or_disable_hw_chan(&hdma_dev->chan[qp_index], true);
702e9f08b65SZhou Wang }
703e9f08b65SZhou Wang
hisi_dma_enable_qps(struct hisi_dma_dev * hdma_dev)704e9f08b65SZhou Wang static void hisi_dma_enable_qps(struct hisi_dma_dev *hdma_dev)
705e9f08b65SZhou Wang {
706e9f08b65SZhou Wang int i;
707e9f08b65SZhou Wang
708e9f08b65SZhou Wang for (i = 0; i < hdma_dev->chan_num; i++) {
709e9f08b65SZhou Wang hdma_dev->chan[i].qp_num = i;
710e9f08b65SZhou Wang hdma_dev->chan[i].hdma_dev = hdma_dev;
711e9f08b65SZhou Wang hdma_dev->chan[i].vc.desc_free = hisi_dma_desc_free;
712e9f08b65SZhou Wang vchan_init(&hdma_dev->chan[i].vc, &hdma_dev->dma_dev);
713e9f08b65SZhou Wang hisi_dma_enable_qp(hdma_dev, i);
714e9f08b65SZhou Wang }
715e9f08b65SZhou Wang }
716e9f08b65SZhou Wang
hisi_dma_disable_qps(struct hisi_dma_dev * hdma_dev)717e9f08b65SZhou Wang static void hisi_dma_disable_qps(struct hisi_dma_dev *hdma_dev)
718e9f08b65SZhou Wang {
719e9f08b65SZhou Wang int i;
720e9f08b65SZhou Wang
721e9f08b65SZhou Wang for (i = 0; i < hdma_dev->chan_num; i++) {
722e9f08b65SZhou Wang hisi_dma_disable_qp(hdma_dev, i);
723e9f08b65SZhou Wang tasklet_kill(&hdma_dev->chan[i].vc.task);
724e9f08b65SZhou Wang }
725e9f08b65SZhou Wang }
726e9f08b65SZhou Wang
hisi_dma_irq(int irq,void * data)727e9f08b65SZhou Wang static irqreturn_t hisi_dma_irq(int irq, void *data)
728e9f08b65SZhou Wang {
729e9f08b65SZhou Wang struct hisi_dma_chan *chan = data;
730e9f08b65SZhou Wang struct hisi_dma_dev *hdma_dev = chan->hdma_dev;
731e9f08b65SZhou Wang struct hisi_dma_desc *desc;
732e9f08b65SZhou Wang struct hisi_dma_cqe *cqe;
733fd5273faSJie Hai void __iomem *q_base;
734e9f08b65SZhou Wang
735d9c8d4b2SBarry Song spin_lock(&chan->vc.lock);
736e9f08b65SZhou Wang
737e9f08b65SZhou Wang desc = chan->desc;
738e9f08b65SZhou Wang cqe = chan->cq + chan->cq_head;
739fd5273faSJie Hai q_base = hdma_dev->queue_base;
740e9f08b65SZhou Wang if (desc) {
74194477a79SJie Hai chan->cq_head = (chan->cq_head + 1) % hdma_dev->chan_depth;
742fd5273faSJie Hai hisi_dma_chan_write(q_base, HISI_DMA_Q_CQ_HEAD_PTR,
74394477a79SJie Hai chan->qp_num, chan->cq_head);
744e9f08b65SZhou Wang if (FIELD_GET(STATUS_MASK, cqe->w0) == STATUS_SUCC) {
745e9f08b65SZhou Wang vchan_cookie_complete(&desc->vd);
7462cbb9588SJie Hai hisi_dma_start_transfer(chan);
747e9f08b65SZhou Wang } else {
748e9f08b65SZhou Wang dev_err(&hdma_dev->pdev->dev, "task error!\n");
749e9f08b65SZhou Wang }
750e9f08b65SZhou Wang }
751e9f08b65SZhou Wang
752d9c8d4b2SBarry Song spin_unlock(&chan->vc.lock);
753e9f08b65SZhou Wang
754e9f08b65SZhou Wang return IRQ_HANDLED;
755e9f08b65SZhou Wang }
756e9f08b65SZhou Wang
hisi_dma_request_qps_irq(struct hisi_dma_dev * hdma_dev)757e9f08b65SZhou Wang static int hisi_dma_request_qps_irq(struct hisi_dma_dev *hdma_dev)
758e9f08b65SZhou Wang {
759e9f08b65SZhou Wang struct pci_dev *pdev = hdma_dev->pdev;
760e9f08b65SZhou Wang int i, ret;
761e9f08b65SZhou Wang
762e9f08b65SZhou Wang for (i = 0; i < hdma_dev->chan_num; i++) {
763e9f08b65SZhou Wang ret = devm_request_irq(&pdev->dev, pci_irq_vector(pdev, i),
764e9f08b65SZhou Wang hisi_dma_irq, IRQF_SHARED, "hisi_dma",
765e9f08b65SZhou Wang &hdma_dev->chan[i]);
766e9f08b65SZhou Wang if (ret)
767e9f08b65SZhou Wang return ret;
768e9f08b65SZhou Wang }
769e9f08b65SZhou Wang
770e9f08b65SZhou Wang return 0;
771e9f08b65SZhou Wang }
772e9f08b65SZhou Wang
773e9f08b65SZhou Wang /* This function enables all hw channels in a device */
hisi_dma_enable_hw_channels(struct hisi_dma_dev * hdma_dev)774e9f08b65SZhou Wang static int hisi_dma_enable_hw_channels(struct hisi_dma_dev *hdma_dev)
775e9f08b65SZhou Wang {
776e9f08b65SZhou Wang int ret;
777e9f08b65SZhou Wang
778e9f08b65SZhou Wang ret = hisi_dma_alloc_qps_mem(hdma_dev);
779e9f08b65SZhou Wang if (ret) {
780e9f08b65SZhou Wang dev_err(&hdma_dev->pdev->dev, "fail to allocate qp memory!\n");
781e9f08b65SZhou Wang return ret;
782e9f08b65SZhou Wang }
783e9f08b65SZhou Wang
784e9f08b65SZhou Wang ret = hisi_dma_request_qps_irq(hdma_dev);
785e9f08b65SZhou Wang if (ret) {
786e9f08b65SZhou Wang dev_err(&hdma_dev->pdev->dev, "fail to request qp irq!\n");
787e9f08b65SZhou Wang return ret;
788e9f08b65SZhou Wang }
789e9f08b65SZhou Wang
790e9f08b65SZhou Wang hisi_dma_enable_qps(hdma_dev);
791e9f08b65SZhou Wang
792e9f08b65SZhou Wang return 0;
793e9f08b65SZhou Wang }
794e9f08b65SZhou Wang
hisi_dma_disable_hw_channels(void * data)795e9f08b65SZhou Wang static void hisi_dma_disable_hw_channels(void *data)
796e9f08b65SZhou Wang {
797e9f08b65SZhou Wang hisi_dma_disable_qps(data);
798e9f08b65SZhou Wang }
799e9f08b65SZhou Wang
hisi_dma_set_mode(struct hisi_dma_dev * hdma_dev,enum hisi_dma_mode mode)800e9f08b65SZhou Wang static void hisi_dma_set_mode(struct hisi_dma_dev *hdma_dev,
801e9f08b65SZhou Wang enum hisi_dma_mode mode)
802e9f08b65SZhou Wang {
803fd5273faSJie Hai if (hdma_dev->reg_layout == HISI_DMA_REG_LAYOUT_HIP08)
804fd5273faSJie Hai writel_relaxed(mode == RC ? 1 : 0,
805fd5273faSJie Hai hdma_dev->base + HISI_DMA_HIP08_MODE);
806fd5273faSJie Hai }
807fd5273faSJie Hai
hisi_dma_init_hw(struct hisi_dma_dev * hdma_dev)808fd5273faSJie Hai static void hisi_dma_init_hw(struct hisi_dma_dev *hdma_dev)
809fd5273faSJie Hai {
810fd5273faSJie Hai void __iomem *addr;
811fd5273faSJie Hai int i;
812fd5273faSJie Hai
813fd5273faSJie Hai if (hdma_dev->reg_layout == HISI_DMA_REG_LAYOUT_HIP09) {
814fd5273faSJie Hai for (i = 0; i < HISI_DMA_HIP09_MAX_PORT_NUM; i++) {
815fd5273faSJie Hai addr = hdma_dev->base + HISI_DMA_HIP09_PORT_CFG_REG(i);
816fd5273faSJie Hai hisi_dma_update_bit(addr,
817fd5273faSJie Hai HISI_DMA_HIP09_PORT_CFG_LINK_DOWN_MASK_B, 1);
818fd5273faSJie Hai }
819fd5273faSJie Hai }
820fd5273faSJie Hai }
821fd5273faSJie Hai
hisi_dma_init_dma_dev(struct hisi_dma_dev * hdma_dev)822fd5273faSJie Hai static void hisi_dma_init_dma_dev(struct hisi_dma_dev *hdma_dev)
823fd5273faSJie Hai {
824fd5273faSJie Hai struct dma_device *dma_dev;
825fd5273faSJie Hai
826fd5273faSJie Hai dma_dev = &hdma_dev->dma_dev;
827fd5273faSJie Hai dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
828fd5273faSJie Hai dma_dev->device_free_chan_resources = hisi_dma_free_chan_resources;
829fd5273faSJie Hai dma_dev->device_prep_dma_memcpy = hisi_dma_prep_dma_memcpy;
830fd5273faSJie Hai dma_dev->device_tx_status = hisi_dma_tx_status;
831fd5273faSJie Hai dma_dev->device_issue_pending = hisi_dma_issue_pending;
832fd5273faSJie Hai dma_dev->device_terminate_all = hisi_dma_terminate_all;
833fd5273faSJie Hai dma_dev->device_synchronize = hisi_dma_synchronize;
834fd5273faSJie Hai dma_dev->directions = BIT(DMA_MEM_TO_MEM);
835fd5273faSJie Hai dma_dev->dev = &hdma_dev->pdev->dev;
836fd5273faSJie Hai INIT_LIST_HEAD(&dma_dev->channels);
837e9f08b65SZhou Wang }
838e9f08b65SZhou Wang
839*5dda7a62SJie Hai /* --- debugfs implementation --- */
840*5dda7a62SJie Hai #ifdef CONFIG_DEBUG_FS
841*5dda7a62SJie Hai #include <linux/debugfs.h>
hisi_dma_get_ch_regs(struct hisi_dma_dev * hdma_dev,u32 * regs_sz)842*5dda7a62SJie Hai static struct debugfs_reg32 *hisi_dma_get_ch_regs(struct hisi_dma_dev *hdma_dev,
843*5dda7a62SJie Hai u32 *regs_sz)
844*5dda7a62SJie Hai {
845*5dda7a62SJie Hai struct device *dev = &hdma_dev->pdev->dev;
846*5dda7a62SJie Hai struct debugfs_reg32 *regs;
847*5dda7a62SJie Hai u32 regs_sz_comm;
848*5dda7a62SJie Hai
849*5dda7a62SJie Hai regs_sz_comm = ARRAY_SIZE(hisi_dma_comm_chan_regs);
850*5dda7a62SJie Hai
851*5dda7a62SJie Hai if (hdma_dev->reg_layout == HISI_DMA_REG_LAYOUT_HIP08)
852*5dda7a62SJie Hai *regs_sz = regs_sz_comm + ARRAY_SIZE(hisi_dma_hip08_chan_regs);
853*5dda7a62SJie Hai else
854*5dda7a62SJie Hai *regs_sz = regs_sz_comm + ARRAY_SIZE(hisi_dma_hip09_chan_regs);
855*5dda7a62SJie Hai
856*5dda7a62SJie Hai regs = devm_kcalloc(dev, *regs_sz, sizeof(struct debugfs_reg32),
857*5dda7a62SJie Hai GFP_KERNEL);
858*5dda7a62SJie Hai if (!regs)
859*5dda7a62SJie Hai return NULL;
860*5dda7a62SJie Hai memcpy(regs, hisi_dma_comm_chan_regs, sizeof(hisi_dma_comm_chan_regs));
861*5dda7a62SJie Hai
862*5dda7a62SJie Hai if (hdma_dev->reg_layout == HISI_DMA_REG_LAYOUT_HIP08)
863*5dda7a62SJie Hai memcpy(regs + regs_sz_comm, hisi_dma_hip08_chan_regs,
864*5dda7a62SJie Hai sizeof(hisi_dma_hip08_chan_regs));
865*5dda7a62SJie Hai else
866*5dda7a62SJie Hai memcpy(regs + regs_sz_comm, hisi_dma_hip09_chan_regs,
867*5dda7a62SJie Hai sizeof(hisi_dma_hip09_chan_regs));
868*5dda7a62SJie Hai
869*5dda7a62SJie Hai return regs;
870*5dda7a62SJie Hai }
871*5dda7a62SJie Hai
hisi_dma_create_chan_dir(struct hisi_dma_dev * hdma_dev)872*5dda7a62SJie Hai static int hisi_dma_create_chan_dir(struct hisi_dma_dev *hdma_dev)
873*5dda7a62SJie Hai {
874*5dda7a62SJie Hai char dir_name[HISI_DMA_MAX_DIR_NAME_LEN];
875*5dda7a62SJie Hai struct debugfs_regset32 *regsets;
876*5dda7a62SJie Hai struct debugfs_reg32 *regs;
877*5dda7a62SJie Hai struct dentry *chan_dir;
878*5dda7a62SJie Hai struct device *dev;
879*5dda7a62SJie Hai u32 regs_sz;
880*5dda7a62SJie Hai int ret;
881*5dda7a62SJie Hai int i;
882*5dda7a62SJie Hai
883*5dda7a62SJie Hai dev = &hdma_dev->pdev->dev;
884*5dda7a62SJie Hai
885*5dda7a62SJie Hai regsets = devm_kcalloc(dev, hdma_dev->chan_num,
886*5dda7a62SJie Hai sizeof(*regsets), GFP_KERNEL);
887*5dda7a62SJie Hai if (!regsets)
888*5dda7a62SJie Hai return -ENOMEM;
889*5dda7a62SJie Hai
890*5dda7a62SJie Hai regs = hisi_dma_get_ch_regs(hdma_dev, ®s_sz);
891*5dda7a62SJie Hai if (!regs)
892*5dda7a62SJie Hai return -ENOMEM;
893*5dda7a62SJie Hai
894*5dda7a62SJie Hai for (i = 0; i < hdma_dev->chan_num; i++) {
895*5dda7a62SJie Hai regsets[i].regs = regs;
896*5dda7a62SJie Hai regsets[i].nregs = regs_sz;
897*5dda7a62SJie Hai regsets[i].base = hdma_dev->queue_base + i * HISI_DMA_Q_OFFSET;
898*5dda7a62SJie Hai regsets[i].dev = dev;
899*5dda7a62SJie Hai
900*5dda7a62SJie Hai memset(dir_name, 0, HISI_DMA_MAX_DIR_NAME_LEN);
901*5dda7a62SJie Hai ret = sprintf(dir_name, "channel%d", i);
902*5dda7a62SJie Hai if (ret < 0)
903*5dda7a62SJie Hai return ret;
904*5dda7a62SJie Hai
905*5dda7a62SJie Hai chan_dir = debugfs_create_dir(dir_name,
906*5dda7a62SJie Hai hdma_dev->dma_dev.dbg_dev_root);
907*5dda7a62SJie Hai debugfs_create_regset32("regs", 0444, chan_dir, ®sets[i]);
908*5dda7a62SJie Hai }
909*5dda7a62SJie Hai
910*5dda7a62SJie Hai return 0;
911*5dda7a62SJie Hai }
912*5dda7a62SJie Hai
hisi_dma_create_debugfs(struct hisi_dma_dev * hdma_dev)913*5dda7a62SJie Hai static void hisi_dma_create_debugfs(struct hisi_dma_dev *hdma_dev)
914*5dda7a62SJie Hai {
915*5dda7a62SJie Hai struct debugfs_regset32 *regset;
916*5dda7a62SJie Hai struct device *dev;
917*5dda7a62SJie Hai int ret;
918*5dda7a62SJie Hai
919*5dda7a62SJie Hai dev = &hdma_dev->pdev->dev;
920*5dda7a62SJie Hai
921*5dda7a62SJie Hai if (hdma_dev->dma_dev.dbg_dev_root == NULL)
922*5dda7a62SJie Hai return;
923*5dda7a62SJie Hai
924*5dda7a62SJie Hai regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL);
925*5dda7a62SJie Hai if (!regset)
926*5dda7a62SJie Hai return;
927*5dda7a62SJie Hai
928*5dda7a62SJie Hai if (hdma_dev->reg_layout == HISI_DMA_REG_LAYOUT_HIP08) {
929*5dda7a62SJie Hai regset->regs = hisi_dma_hip08_comm_regs;
930*5dda7a62SJie Hai regset->nregs = ARRAY_SIZE(hisi_dma_hip08_comm_regs);
931*5dda7a62SJie Hai } else {
932*5dda7a62SJie Hai regset->regs = hisi_dma_hip09_comm_regs;
933*5dda7a62SJie Hai regset->nregs = ARRAY_SIZE(hisi_dma_hip09_comm_regs);
934*5dda7a62SJie Hai }
935*5dda7a62SJie Hai regset->base = hdma_dev->base;
936*5dda7a62SJie Hai regset->dev = dev;
937*5dda7a62SJie Hai
938*5dda7a62SJie Hai debugfs_create_regset32("regs", 0444,
939*5dda7a62SJie Hai hdma_dev->dma_dev.dbg_dev_root, regset);
940*5dda7a62SJie Hai
941*5dda7a62SJie Hai ret = hisi_dma_create_chan_dir(hdma_dev);
942*5dda7a62SJie Hai if (ret < 0)
943*5dda7a62SJie Hai dev_info(&hdma_dev->pdev->dev, "fail to create debugfs for channels!\n");
944*5dda7a62SJie Hai }
945*5dda7a62SJie Hai #else
hisi_dma_create_debugfs(struct hisi_dma_dev * hdma_dev)946*5dda7a62SJie Hai static void hisi_dma_create_debugfs(struct hisi_dma_dev *hdma_dev) { }
947*5dda7a62SJie Hai #endif /* CONFIG_DEBUG_FS*/
948*5dda7a62SJie Hai /* --- debugfs implementation --- */
949*5dda7a62SJie Hai
hisi_dma_probe(struct pci_dev * pdev,const struct pci_device_id * id)950e9f08b65SZhou Wang static int hisi_dma_probe(struct pci_dev *pdev, const struct pci_device_id *id)
951e9f08b65SZhou Wang {
952fd5273faSJie Hai enum hisi_dma_reg_layout reg_layout;
953e9f08b65SZhou Wang struct device *dev = &pdev->dev;
954e9f08b65SZhou Wang struct hisi_dma_dev *hdma_dev;
955e9f08b65SZhou Wang struct dma_device *dma_dev;
956fd5273faSJie Hai u32 chan_num;
957fd5273faSJie Hai u32 msi_num;
958e9f08b65SZhou Wang int ret;
959e9f08b65SZhou Wang
960fd5273faSJie Hai reg_layout = hisi_dma_get_reg_layout(pdev);
961fd5273faSJie Hai if (reg_layout == HISI_DMA_REG_LAYOUT_INVALID) {
962fd5273faSJie Hai dev_err(dev, "unsupported device!\n");
963fd5273faSJie Hai return -EINVAL;
964fd5273faSJie Hai }
965fd5273faSJie Hai
966e9f08b65SZhou Wang ret = pcim_enable_device(pdev);
967e9f08b65SZhou Wang if (ret) {
968e9f08b65SZhou Wang dev_err(dev, "failed to enable device mem!\n");
969e9f08b65SZhou Wang return ret;
970e9f08b65SZhou Wang }
971e9f08b65SZhou Wang
972e9f08b65SZhou Wang ret = pcim_iomap_regions(pdev, 1 << PCI_BAR_2, pci_name(pdev));
973e9f08b65SZhou Wang if (ret) {
974e9f08b65SZhou Wang dev_err(dev, "failed to remap I/O region!\n");
975e9f08b65SZhou Wang return ret;
976e9f08b65SZhou Wang }
977e9f08b65SZhou Wang
978d77143ddSQing Wang ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
979e9f08b65SZhou Wang if (ret)
980e9f08b65SZhou Wang return ret;
981e9f08b65SZhou Wang
982fd5273faSJie Hai chan_num = hisi_dma_get_chan_num(pdev);
983fd5273faSJie Hai hdma_dev = devm_kzalloc(dev, struct_size(hdma_dev, chan, chan_num),
984fd5273faSJie Hai GFP_KERNEL);
985e9f08b65SZhou Wang if (!hdma_dev)
986e9f08b65SZhou Wang return -EINVAL;
987e9f08b65SZhou Wang
988e9f08b65SZhou Wang hdma_dev->base = pcim_iomap_table(pdev)[PCI_BAR_2];
989e9f08b65SZhou Wang hdma_dev->pdev = pdev;
990e9f08b65SZhou Wang hdma_dev->chan_depth = HISI_DMA_Q_DEPTH_VAL;
991fd5273faSJie Hai hdma_dev->chan_num = chan_num;
992fd5273faSJie Hai hdma_dev->reg_layout = reg_layout;
993fd5273faSJie Hai hdma_dev->queue_base = hdma_dev->base + hisi_dma_get_queue_base(pdev);
994e9f08b65SZhou Wang
995e9f08b65SZhou Wang pci_set_drvdata(pdev, hdma_dev);
996e9f08b65SZhou Wang pci_set_master(pdev);
997e9f08b65SZhou Wang
998fd5273faSJie Hai msi_num = hisi_dma_get_msi_num(pdev);
999fd5273faSJie Hai
100026f1ca91SChristophe JAILLET /* This will be freed by 'pcim_release()'. See 'pcim_enable_device()' */
1001fd5273faSJie Hai ret = pci_alloc_irq_vectors(pdev, msi_num, msi_num, PCI_IRQ_MSI);
1002e9f08b65SZhou Wang if (ret < 0) {
1003e9f08b65SZhou Wang dev_err(dev, "Failed to allocate MSI vectors!\n");
1004e9f08b65SZhou Wang return ret;
1005e9f08b65SZhou Wang }
1006e9f08b65SZhou Wang
1007fd5273faSJie Hai hisi_dma_init_dma_dev(hdma_dev);
1008e9f08b65SZhou Wang
1009e9f08b65SZhou Wang hisi_dma_set_mode(hdma_dev, RC);
1010e9f08b65SZhou Wang
1011fd5273faSJie Hai hisi_dma_init_hw(hdma_dev);
1012fd5273faSJie Hai
1013e9f08b65SZhou Wang ret = hisi_dma_enable_hw_channels(hdma_dev);
1014e9f08b65SZhou Wang if (ret < 0) {
1015e9f08b65SZhou Wang dev_err(dev, "failed to enable hw channel!\n");
1016e9f08b65SZhou Wang return ret;
1017e9f08b65SZhou Wang }
1018e9f08b65SZhou Wang
1019e9f08b65SZhou Wang ret = devm_add_action_or_reset(dev, hisi_dma_disable_hw_channels,
1020e9f08b65SZhou Wang hdma_dev);
1021e9f08b65SZhou Wang if (ret)
1022e9f08b65SZhou Wang return ret;
1023e9f08b65SZhou Wang
1024fd5273faSJie Hai dma_dev = &hdma_dev->dma_dev;
1025e9f08b65SZhou Wang ret = dmaenginem_async_device_register(dma_dev);
1026*5dda7a62SJie Hai if (ret < 0) {
1027e9f08b65SZhou Wang dev_err(dev, "failed to register device!\n");
1028e9f08b65SZhou Wang return ret;
1029e9f08b65SZhou Wang }
1030e9f08b65SZhou Wang
1031*5dda7a62SJie Hai hisi_dma_create_debugfs(hdma_dev);
1032*5dda7a62SJie Hai
1033*5dda7a62SJie Hai return 0;
1034*5dda7a62SJie Hai }
1035*5dda7a62SJie Hai
1036e9f08b65SZhou Wang static const struct pci_device_id hisi_dma_pci_tbl[] = {
1037e9f08b65SZhou Wang { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, 0xa122) },
1038e9f08b65SZhou Wang { 0, }
1039e9f08b65SZhou Wang };
1040e9f08b65SZhou Wang
1041e9f08b65SZhou Wang static struct pci_driver hisi_dma_pci_driver = {
1042e9f08b65SZhou Wang .name = "hisi_dma",
1043e9f08b65SZhou Wang .id_table = hisi_dma_pci_tbl,
1044e9f08b65SZhou Wang .probe = hisi_dma_probe,
1045e9f08b65SZhou Wang };
1046e9f08b65SZhou Wang
1047e9f08b65SZhou Wang module_pci_driver(hisi_dma_pci_driver);
1048e9f08b65SZhou Wang
1049e9f08b65SZhou Wang MODULE_AUTHOR("Zhou Wang <wangzhou1@hisilicon.com>");
1050e9f08b65SZhou Wang MODULE_AUTHOR("Zhenfa Qiu <qiuzhenfa@hisilicon.com>");
1051e9f08b65SZhou Wang MODULE_DESCRIPTION("HiSilicon Kunpeng DMA controller driver");
1052e9f08b65SZhou Wang MODULE_LICENSE("GPL v2");
1053e9f08b65SZhou Wang MODULE_DEVICE_TABLE(pci, hisi_dma_pci_tbl);
1054