1ee3a5b23SShameer Kolothum // SPDX-License-Identifier: GPL-2.0-only
2ee3a5b23SShameer Kolothum /*
3ee3a5b23SShameer Kolothum  * Copyright (c) 2021, HiSilicon Ltd.
4ee3a5b23SShameer Kolothum  */
5ee3a5b23SShameer Kolothum 
6ee3a5b23SShameer Kolothum #include <linux/device.h>
7ee3a5b23SShameer Kolothum #include <linux/eventfd.h>
8ee3a5b23SShameer Kolothum #include <linux/file.h>
9ee3a5b23SShameer Kolothum #include <linux/hisi_acc_qm.h>
10ee3a5b23SShameer Kolothum #include <linux/interrupt.h>
11ee3a5b23SShameer Kolothum #include <linux/module.h>
12ee3a5b23SShameer Kolothum #include <linux/pci.h>
13ee3a5b23SShameer Kolothum #include <linux/vfio.h>
14ee3a5b23SShameer Kolothum #include <linux/vfio_pci_core.h>
15b0eed085SLongfang Liu #include <linux/anon_inodes.h>
16b0eed085SLongfang Liu 
17b0eed085SLongfang Liu #include "hisi_acc_vfio_pci.h"
18b0eed085SLongfang Liu 
1942e1d1eeSLongfang Liu /* Return 0 on VM acc device ready, -ETIMEDOUT hardware timeout */
20b0eed085SLongfang Liu static int qm_wait_dev_not_ready(struct hisi_qm *qm)
21b0eed085SLongfang Liu {
22b0eed085SLongfang Liu 	u32 val;
23b0eed085SLongfang Liu 
24b0eed085SLongfang Liu 	return readl_relaxed_poll_timeout(qm->io_base + QM_VF_STATE,
25b0eed085SLongfang Liu 				val, !(val & 0x1), MB_POLL_PERIOD_US,
26b0eed085SLongfang Liu 				MB_POLL_TIMEOUT_US);
27b0eed085SLongfang Liu }
28b0eed085SLongfang Liu 
29b0eed085SLongfang Liu /*
30b0eed085SLongfang Liu  * Each state Reg is checked 100 times,
31b0eed085SLongfang Liu  * with a delay of 100 microseconds after each check
32b0eed085SLongfang Liu  */
33b0eed085SLongfang Liu static u32 qm_check_reg_state(struct hisi_qm *qm, u32 regs)
34b0eed085SLongfang Liu {
35b0eed085SLongfang Liu 	int check_times = 0;
36b0eed085SLongfang Liu 	u32 state;
37b0eed085SLongfang Liu 
38b0eed085SLongfang Liu 	state = readl(qm->io_base + regs);
39b0eed085SLongfang Liu 	while (state && check_times < ERROR_CHECK_TIMEOUT) {
40b0eed085SLongfang Liu 		udelay(CHECK_DELAY_TIME);
41b0eed085SLongfang Liu 		state = readl(qm->io_base + regs);
42b0eed085SLongfang Liu 		check_times++;
43b0eed085SLongfang Liu 	}
44b0eed085SLongfang Liu 
45b0eed085SLongfang Liu 	return state;
46b0eed085SLongfang Liu }
47b0eed085SLongfang Liu 
48b0eed085SLongfang Liu static int qm_read_regs(struct hisi_qm *qm, u32 reg_addr,
49b0eed085SLongfang Liu 			u32 *data, u8 nums)
50b0eed085SLongfang Liu {
51b0eed085SLongfang Liu 	int i;
52b0eed085SLongfang Liu 
53b0eed085SLongfang Liu 	if (nums < 1 || nums > QM_REGS_MAX_LEN)
54b0eed085SLongfang Liu 		return -EINVAL;
55b0eed085SLongfang Liu 
56b0eed085SLongfang Liu 	for (i = 0; i < nums; i++) {
57b0eed085SLongfang Liu 		data[i] = readl(qm->io_base + reg_addr);
58b0eed085SLongfang Liu 		reg_addr += QM_REG_ADDR_OFFSET;
59b0eed085SLongfang Liu 	}
60b0eed085SLongfang Liu 
61b0eed085SLongfang Liu 	return 0;
62b0eed085SLongfang Liu }
63b0eed085SLongfang Liu 
64b0eed085SLongfang Liu static int qm_write_regs(struct hisi_qm *qm, u32 reg,
65b0eed085SLongfang Liu 			 u32 *data, u8 nums)
66b0eed085SLongfang Liu {
67b0eed085SLongfang Liu 	int i;
68b0eed085SLongfang Liu 
69b0eed085SLongfang Liu 	if (nums < 1 || nums > QM_REGS_MAX_LEN)
70b0eed085SLongfang Liu 		return -EINVAL;
71b0eed085SLongfang Liu 
72b0eed085SLongfang Liu 	for (i = 0; i < nums; i++)
73b0eed085SLongfang Liu 		writel(data[i], qm->io_base + reg + i * QM_REG_ADDR_OFFSET);
74b0eed085SLongfang Liu 
75b0eed085SLongfang Liu 	return 0;
76b0eed085SLongfang Liu }
77b0eed085SLongfang Liu 
78b0eed085SLongfang Liu static int qm_get_vft(struct hisi_qm *qm, u32 *base)
79b0eed085SLongfang Liu {
80b0eed085SLongfang Liu 	u64 sqc_vft;
81b0eed085SLongfang Liu 	u32 qp_num;
82b0eed085SLongfang Liu 	int ret;
83b0eed085SLongfang Liu 
84b0eed085SLongfang Liu 	ret = hisi_qm_mb(qm, QM_MB_CMD_SQC_VFT_V2, 0, 0, 1);
85b0eed085SLongfang Liu 	if (ret)
86b0eed085SLongfang Liu 		return ret;
87b0eed085SLongfang Liu 
88b0eed085SLongfang Liu 	sqc_vft = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) |
89b0eed085SLongfang Liu 		  ((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) <<
90b0eed085SLongfang Liu 		  QM_XQC_ADDR_OFFSET);
91b0eed085SLongfang Liu 	*base = QM_SQC_VFT_BASE_MASK_V2 & (sqc_vft >> QM_SQC_VFT_BASE_SHIFT_V2);
92b0eed085SLongfang Liu 	qp_num = (QM_SQC_VFT_NUM_MASK_V2 &
93b0eed085SLongfang Liu 		  (sqc_vft >> QM_SQC_VFT_NUM_SHIFT_V2)) + 1;
94b0eed085SLongfang Liu 
95b0eed085SLongfang Liu 	return qp_num;
96b0eed085SLongfang Liu }
97b0eed085SLongfang Liu 
98b0eed085SLongfang Liu static int qm_get_sqc(struct hisi_qm *qm, u64 *addr)
99b0eed085SLongfang Liu {
100b0eed085SLongfang Liu 	int ret;
101b0eed085SLongfang Liu 
102b0eed085SLongfang Liu 	ret = hisi_qm_mb(qm, QM_MB_CMD_SQC_BT, 0, 0, 1);
103b0eed085SLongfang Liu 	if (ret)
104b0eed085SLongfang Liu 		return ret;
105b0eed085SLongfang Liu 
106b0eed085SLongfang Liu 	*addr = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) |
107b0eed085SLongfang Liu 		  ((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) <<
108b0eed085SLongfang Liu 		  QM_XQC_ADDR_OFFSET);
109b0eed085SLongfang Liu 
110b0eed085SLongfang Liu 	return 0;
111b0eed085SLongfang Liu }
112b0eed085SLongfang Liu 
113b0eed085SLongfang Liu static int qm_get_cqc(struct hisi_qm *qm, u64 *addr)
114b0eed085SLongfang Liu {
115b0eed085SLongfang Liu 	int ret;
116b0eed085SLongfang Liu 
117b0eed085SLongfang Liu 	ret = hisi_qm_mb(qm, QM_MB_CMD_CQC_BT, 0, 0, 1);
118b0eed085SLongfang Liu 	if (ret)
119b0eed085SLongfang Liu 		return ret;
120b0eed085SLongfang Liu 
121b0eed085SLongfang Liu 	*addr = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) |
122b0eed085SLongfang Liu 		  ((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) <<
123b0eed085SLongfang Liu 		  QM_XQC_ADDR_OFFSET);
124b0eed085SLongfang Liu 
125b0eed085SLongfang Liu 	return 0;
126b0eed085SLongfang Liu }
127b0eed085SLongfang Liu 
128b0eed085SLongfang Liu static int qm_get_regs(struct hisi_qm *qm, struct acc_vf_data *vf_data)
129b0eed085SLongfang Liu {
130b0eed085SLongfang Liu 	struct device *dev = &qm->pdev->dev;
131b0eed085SLongfang Liu 	int ret;
132b0eed085SLongfang Liu 
133b0eed085SLongfang Liu 	ret = qm_read_regs(qm, QM_VF_AEQ_INT_MASK, &vf_data->aeq_int_mask, 1);
134b0eed085SLongfang Liu 	if (ret) {
135b0eed085SLongfang Liu 		dev_err(dev, "failed to read QM_VF_AEQ_INT_MASK\n");
136b0eed085SLongfang Liu 		return ret;
137b0eed085SLongfang Liu 	}
138b0eed085SLongfang Liu 
139b0eed085SLongfang Liu 	ret = qm_read_regs(qm, QM_VF_EQ_INT_MASK, &vf_data->eq_int_mask, 1);
140b0eed085SLongfang Liu 	if (ret) {
141b0eed085SLongfang Liu 		dev_err(dev, "failed to read QM_VF_EQ_INT_MASK\n");
142b0eed085SLongfang Liu 		return ret;
143b0eed085SLongfang Liu 	}
144b0eed085SLongfang Liu 
145b0eed085SLongfang Liu 	ret = qm_read_regs(qm, QM_IFC_INT_SOURCE_V,
146b0eed085SLongfang Liu 			   &vf_data->ifc_int_source, 1);
147b0eed085SLongfang Liu 	if (ret) {
148b0eed085SLongfang Liu 		dev_err(dev, "failed to read QM_IFC_INT_SOURCE_V\n");
149b0eed085SLongfang Liu 		return ret;
150b0eed085SLongfang Liu 	}
151b0eed085SLongfang Liu 
152b0eed085SLongfang Liu 	ret = qm_read_regs(qm, QM_IFC_INT_MASK, &vf_data->ifc_int_mask, 1);
153b0eed085SLongfang Liu 	if (ret) {
154b0eed085SLongfang Liu 		dev_err(dev, "failed to read QM_IFC_INT_MASK\n");
155b0eed085SLongfang Liu 		return ret;
156b0eed085SLongfang Liu 	}
157b0eed085SLongfang Liu 
158b0eed085SLongfang Liu 	ret = qm_read_regs(qm, QM_IFC_INT_SET_V, &vf_data->ifc_int_set, 1);
159b0eed085SLongfang Liu 	if (ret) {
160b0eed085SLongfang Liu 		dev_err(dev, "failed to read QM_IFC_INT_SET_V\n");
161b0eed085SLongfang Liu 		return ret;
162b0eed085SLongfang Liu 	}
163b0eed085SLongfang Liu 
164b0eed085SLongfang Liu 	ret = qm_read_regs(qm, QM_PAGE_SIZE, &vf_data->page_size, 1);
165b0eed085SLongfang Liu 	if (ret) {
166b0eed085SLongfang Liu 		dev_err(dev, "failed to read QM_PAGE_SIZE\n");
167b0eed085SLongfang Liu 		return ret;
168b0eed085SLongfang Liu 	}
169b0eed085SLongfang Liu 
170b0eed085SLongfang Liu 	/* QM_EQC_DW has 7 regs */
171b0eed085SLongfang Liu 	ret = qm_read_regs(qm, QM_EQC_DW0, vf_data->qm_eqc_dw, 7);
172b0eed085SLongfang Liu 	if (ret) {
173b0eed085SLongfang Liu 		dev_err(dev, "failed to read QM_EQC_DW\n");
174b0eed085SLongfang Liu 		return ret;
175b0eed085SLongfang Liu 	}
176b0eed085SLongfang Liu 
177b0eed085SLongfang Liu 	/* QM_AEQC_DW has 7 regs */
178b0eed085SLongfang Liu 	ret = qm_read_regs(qm, QM_AEQC_DW0, vf_data->qm_aeqc_dw, 7);
179b0eed085SLongfang Liu 	if (ret) {
180b0eed085SLongfang Liu 		dev_err(dev, "failed to read QM_AEQC_DW\n");
181b0eed085SLongfang Liu 		return ret;
182b0eed085SLongfang Liu 	}
183b0eed085SLongfang Liu 
184b0eed085SLongfang Liu 	return 0;
185b0eed085SLongfang Liu }
186b0eed085SLongfang Liu 
187b0eed085SLongfang Liu static int qm_set_regs(struct hisi_qm *qm, struct acc_vf_data *vf_data)
188b0eed085SLongfang Liu {
189b0eed085SLongfang Liu 	struct device *dev = &qm->pdev->dev;
190b0eed085SLongfang Liu 	int ret;
191b0eed085SLongfang Liu 
19242e1d1eeSLongfang Liu 	/* Check VF state */
193b0eed085SLongfang Liu 	if (unlikely(hisi_qm_wait_mb_ready(qm))) {
194b0eed085SLongfang Liu 		dev_err(&qm->pdev->dev, "QM device is not ready to write\n");
195b0eed085SLongfang Liu 		return -EBUSY;
196b0eed085SLongfang Liu 	}
197b0eed085SLongfang Liu 
198b0eed085SLongfang Liu 	ret = qm_write_regs(qm, QM_VF_AEQ_INT_MASK, &vf_data->aeq_int_mask, 1);
199b0eed085SLongfang Liu 	if (ret) {
200b0eed085SLongfang Liu 		dev_err(dev, "failed to write QM_VF_AEQ_INT_MASK\n");
201b0eed085SLongfang Liu 		return ret;
202b0eed085SLongfang Liu 	}
203b0eed085SLongfang Liu 
204b0eed085SLongfang Liu 	ret = qm_write_regs(qm, QM_VF_EQ_INT_MASK, &vf_data->eq_int_mask, 1);
205b0eed085SLongfang Liu 	if (ret) {
206b0eed085SLongfang Liu 		dev_err(dev, "failed to write QM_VF_EQ_INT_MASK\n");
207b0eed085SLongfang Liu 		return ret;
208b0eed085SLongfang Liu 	}
209b0eed085SLongfang Liu 
210b0eed085SLongfang Liu 	ret = qm_write_regs(qm, QM_IFC_INT_SOURCE_V,
211b0eed085SLongfang Liu 			    &vf_data->ifc_int_source, 1);
212b0eed085SLongfang Liu 	if (ret) {
213b0eed085SLongfang Liu 		dev_err(dev, "failed to write QM_IFC_INT_SOURCE_V\n");
214b0eed085SLongfang Liu 		return ret;
215b0eed085SLongfang Liu 	}
216b0eed085SLongfang Liu 
217b0eed085SLongfang Liu 	ret = qm_write_regs(qm, QM_IFC_INT_MASK, &vf_data->ifc_int_mask, 1);
218b0eed085SLongfang Liu 	if (ret) {
219b0eed085SLongfang Liu 		dev_err(dev, "failed to write QM_IFC_INT_MASK\n");
220b0eed085SLongfang Liu 		return ret;
221b0eed085SLongfang Liu 	}
222b0eed085SLongfang Liu 
223b0eed085SLongfang Liu 	ret = qm_write_regs(qm, QM_IFC_INT_SET_V, &vf_data->ifc_int_set, 1);
224b0eed085SLongfang Liu 	if (ret) {
225b0eed085SLongfang Liu 		dev_err(dev, "failed to write QM_IFC_INT_SET_V\n");
226b0eed085SLongfang Liu 		return ret;
227b0eed085SLongfang Liu 	}
228b0eed085SLongfang Liu 
229b0eed085SLongfang Liu 	ret = qm_write_regs(qm, QM_QUE_ISO_CFG_V, &vf_data->que_iso_cfg, 1);
230b0eed085SLongfang Liu 	if (ret) {
231b0eed085SLongfang Liu 		dev_err(dev, "failed to write QM_QUE_ISO_CFG_V\n");
232b0eed085SLongfang Liu 		return ret;
233b0eed085SLongfang Liu 	}
234b0eed085SLongfang Liu 
235b0eed085SLongfang Liu 	ret = qm_write_regs(qm, QM_PAGE_SIZE, &vf_data->page_size, 1);
236b0eed085SLongfang Liu 	if (ret) {
237b0eed085SLongfang Liu 		dev_err(dev, "failed to write QM_PAGE_SIZE\n");
238b0eed085SLongfang Liu 		return ret;
239b0eed085SLongfang Liu 	}
240b0eed085SLongfang Liu 
241b0eed085SLongfang Liu 	/* QM_EQC_DW has 7 regs */
242b0eed085SLongfang Liu 	ret = qm_write_regs(qm, QM_EQC_DW0, vf_data->qm_eqc_dw, 7);
243b0eed085SLongfang Liu 	if (ret) {
244b0eed085SLongfang Liu 		dev_err(dev, "failed to write QM_EQC_DW\n");
245b0eed085SLongfang Liu 		return ret;
246b0eed085SLongfang Liu 	}
247b0eed085SLongfang Liu 
248b0eed085SLongfang Liu 	/* QM_AEQC_DW has 7 regs */
249b0eed085SLongfang Liu 	ret = qm_write_regs(qm, QM_AEQC_DW0, vf_data->qm_aeqc_dw, 7);
250b0eed085SLongfang Liu 	if (ret) {
251b0eed085SLongfang Liu 		dev_err(dev, "failed to write QM_AEQC_DW\n");
252b0eed085SLongfang Liu 		return ret;
253b0eed085SLongfang Liu 	}
254b0eed085SLongfang Liu 
255b0eed085SLongfang Liu 	return 0;
256b0eed085SLongfang Liu }
257b0eed085SLongfang Liu 
258b0eed085SLongfang Liu static void qm_db(struct hisi_qm *qm, u16 qn, u8 cmd,
259b0eed085SLongfang Liu 		  u16 index, u8 priority)
260b0eed085SLongfang Liu {
261b0eed085SLongfang Liu 	u64 doorbell;
262b0eed085SLongfang Liu 	u64 dbase;
263b0eed085SLongfang Liu 	u16 randata = 0;
264b0eed085SLongfang Liu 
265b0eed085SLongfang Liu 	if (cmd == QM_DOORBELL_CMD_SQ || cmd == QM_DOORBELL_CMD_CQ)
266b0eed085SLongfang Liu 		dbase = QM_DOORBELL_SQ_CQ_BASE_V2;
267b0eed085SLongfang Liu 	else
268b0eed085SLongfang Liu 		dbase = QM_DOORBELL_EQ_AEQ_BASE_V2;
269b0eed085SLongfang Liu 
270b0eed085SLongfang Liu 	doorbell = qn | ((u64)cmd << QM_DB_CMD_SHIFT_V2) |
271b0eed085SLongfang Liu 		   ((u64)randata << QM_DB_RAND_SHIFT_V2) |
272b0eed085SLongfang Liu 		   ((u64)index << QM_DB_INDEX_SHIFT_V2)	 |
273b0eed085SLongfang Liu 		   ((u64)priority << QM_DB_PRIORITY_SHIFT_V2);
274b0eed085SLongfang Liu 
275b0eed085SLongfang Liu 	writeq(doorbell, qm->io_base + dbase);
276b0eed085SLongfang Liu }
277b0eed085SLongfang Liu 
278b0eed085SLongfang Liu static int pf_qm_get_qp_num(struct hisi_qm *qm, int vf_id, u32 *rbase)
279b0eed085SLongfang Liu {
280b0eed085SLongfang Liu 	unsigned int val;
281b0eed085SLongfang Liu 	u64 sqc_vft;
282b0eed085SLongfang Liu 	u32 qp_num;
283b0eed085SLongfang Liu 	int ret;
284b0eed085SLongfang Liu 
285b0eed085SLongfang Liu 	ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val,
286b0eed085SLongfang Liu 					 val & BIT(0), MB_POLL_PERIOD_US,
287b0eed085SLongfang Liu 					 MB_POLL_TIMEOUT_US);
288b0eed085SLongfang Liu 	if (ret)
289b0eed085SLongfang Liu 		return ret;
290b0eed085SLongfang Liu 
291b0eed085SLongfang Liu 	writel(0x1, qm->io_base + QM_VFT_CFG_OP_WR);
292b0eed085SLongfang Liu 	/* 0 mean SQC VFT */
293b0eed085SLongfang Liu 	writel(0x0, qm->io_base + QM_VFT_CFG_TYPE);
294b0eed085SLongfang Liu 	writel(vf_id, qm->io_base + QM_VFT_CFG);
295b0eed085SLongfang Liu 
296b0eed085SLongfang Liu 	writel(0x0, qm->io_base + QM_VFT_CFG_RDY);
297b0eed085SLongfang Liu 	writel(0x1, qm->io_base + QM_VFT_CFG_OP_ENABLE);
298b0eed085SLongfang Liu 
299b0eed085SLongfang Liu 	ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val,
300b0eed085SLongfang Liu 					 val & BIT(0), MB_POLL_PERIOD_US,
301b0eed085SLongfang Liu 					 MB_POLL_TIMEOUT_US);
302b0eed085SLongfang Liu 	if (ret)
303b0eed085SLongfang Liu 		return ret;
304b0eed085SLongfang Liu 
305b0eed085SLongfang Liu 	sqc_vft = readl(qm->io_base + QM_VFT_CFG_DATA_L) |
306b0eed085SLongfang Liu 		  ((u64)readl(qm->io_base + QM_VFT_CFG_DATA_H) <<
307b0eed085SLongfang Liu 		  QM_XQC_ADDR_OFFSET);
308b0eed085SLongfang Liu 	*rbase = QM_SQC_VFT_BASE_MASK_V2 &
309b0eed085SLongfang Liu 		  (sqc_vft >> QM_SQC_VFT_BASE_SHIFT_V2);
310b0eed085SLongfang Liu 	qp_num = (QM_SQC_VFT_NUM_MASK_V2 &
311b0eed085SLongfang Liu 		  (sqc_vft >> QM_SQC_VFT_NUM_SHIFT_V2)) + 1;
312b0eed085SLongfang Liu 
313b0eed085SLongfang Liu 	return qp_num;
314b0eed085SLongfang Liu }
315b0eed085SLongfang Liu 
316b0eed085SLongfang Liu static void qm_dev_cmd_init(struct hisi_qm *qm)
317b0eed085SLongfang Liu {
318b0eed085SLongfang Liu 	/* Clear VF communication status registers. */
319b0eed085SLongfang Liu 	writel(0x1, qm->io_base + QM_IFC_INT_SOURCE_V);
320b0eed085SLongfang Liu 
321b0eed085SLongfang Liu 	/* Enable pf and vf communication. */
322b0eed085SLongfang Liu 	writel(0x0, qm->io_base + QM_IFC_INT_MASK);
323b0eed085SLongfang Liu }
324b0eed085SLongfang Liu 
325b0eed085SLongfang Liu static int vf_qm_cache_wb(struct hisi_qm *qm)
326b0eed085SLongfang Liu {
327b0eed085SLongfang Liu 	unsigned int val;
328b0eed085SLongfang Liu 
329b0eed085SLongfang Liu 	writel(0x1, qm->io_base + QM_CACHE_WB_START);
330b0eed085SLongfang Liu 	if (readl_relaxed_poll_timeout(qm->io_base + QM_CACHE_WB_DONE,
331b0eed085SLongfang Liu 				       val, val & BIT(0), MB_POLL_PERIOD_US,
332b0eed085SLongfang Liu 				       MB_POLL_TIMEOUT_US)) {
333b0eed085SLongfang Liu 		dev_err(&qm->pdev->dev, "vf QM writeback sqc cache fail\n");
334b0eed085SLongfang Liu 		return -EINVAL;
335b0eed085SLongfang Liu 	}
336b0eed085SLongfang Liu 
337b0eed085SLongfang Liu 	return 0;
338b0eed085SLongfang Liu }
339b0eed085SLongfang Liu 
340af72f53cSLongfang Liu static void vf_qm_fun_reset(struct hisi_qm *qm)
341b0eed085SLongfang Liu {
342b0eed085SLongfang Liu 	int i;
343b0eed085SLongfang Liu 
344b0eed085SLongfang Liu 	for (i = 0; i < qm->qp_num; i++)
345b0eed085SLongfang Liu 		qm_db(qm, i, QM_DOORBELL_CMD_SQ, 0, 1);
346b0eed085SLongfang Liu }
347b0eed085SLongfang Liu 
348b0eed085SLongfang Liu static int vf_qm_func_stop(struct hisi_qm *qm)
349b0eed085SLongfang Liu {
350b0eed085SLongfang Liu 	return hisi_qm_mb(qm, QM_MB_CMD_PAUSE_QM, 0, 0, 0);
351b0eed085SLongfang Liu }
352b0eed085SLongfang Liu 
353b0eed085SLongfang Liu static int vf_qm_check_match(struct hisi_acc_vf_core_device *hisi_acc_vdev,
354b0eed085SLongfang Liu 			     struct hisi_acc_vf_migration_file *migf)
355b0eed085SLongfang Liu {
356b0eed085SLongfang Liu 	struct acc_vf_data *vf_data = &migf->vf_data;
357b0eed085SLongfang Liu 	struct hisi_qm *vf_qm = &hisi_acc_vdev->vf_qm;
358b0eed085SLongfang Liu 	struct hisi_qm *pf_qm = hisi_acc_vdev->pf_qm;
359b0eed085SLongfang Liu 	struct device *dev = &vf_qm->pdev->dev;
360b0eed085SLongfang Liu 	u32 que_iso_state;
361b0eed085SLongfang Liu 	int ret;
362b0eed085SLongfang Liu 
363*190125adSShameer Kolothum 	if (migf->total_length < QM_MATCH_SIZE || hisi_acc_vdev->match_done)
364*190125adSShameer Kolothum 		return 0;
365b0eed085SLongfang Liu 
366b0eed085SLongfang Liu 	if (vf_data->acc_magic != ACC_DEV_MAGIC) {
367b0eed085SLongfang Liu 		dev_err(dev, "failed to match ACC_DEV_MAGIC\n");
368b0eed085SLongfang Liu 		return -EINVAL;
369b0eed085SLongfang Liu 	}
370b0eed085SLongfang Liu 
371b0eed085SLongfang Liu 	if (vf_data->dev_id != hisi_acc_vdev->vf_dev->device) {
372b0eed085SLongfang Liu 		dev_err(dev, "failed to match VF devices\n");
373b0eed085SLongfang Liu 		return -EINVAL;
374b0eed085SLongfang Liu 	}
375b0eed085SLongfang Liu 
37642e1d1eeSLongfang Liu 	/* VF qp num check */
377b0eed085SLongfang Liu 	ret = qm_get_vft(vf_qm, &vf_qm->qp_base);
378b0eed085SLongfang Liu 	if (ret <= 0) {
379b0eed085SLongfang Liu 		dev_err(dev, "failed to get vft qp nums\n");
380b0eed085SLongfang Liu 		return -EINVAL;
381b0eed085SLongfang Liu 	}
382b0eed085SLongfang Liu 
383b0eed085SLongfang Liu 	if (ret != vf_data->qp_num) {
384b0eed085SLongfang Liu 		dev_err(dev, "failed to match VF qp num\n");
385b0eed085SLongfang Liu 		return -EINVAL;
386b0eed085SLongfang Liu 	}
387b0eed085SLongfang Liu 
388b0eed085SLongfang Liu 	vf_qm->qp_num = ret;
389b0eed085SLongfang Liu 
39042e1d1eeSLongfang Liu 	/* VF isolation state check */
391b0eed085SLongfang Liu 	ret = qm_read_regs(pf_qm, QM_QUE_ISO_CFG_V, &que_iso_state, 1);
392b0eed085SLongfang Liu 	if (ret) {
393b0eed085SLongfang Liu 		dev_err(dev, "failed to read QM_QUE_ISO_CFG_V\n");
394b0eed085SLongfang Liu 		return ret;
395b0eed085SLongfang Liu 	}
396b0eed085SLongfang Liu 
397b0eed085SLongfang Liu 	if (vf_data->que_iso_cfg != que_iso_state) {
398b0eed085SLongfang Liu 		dev_err(dev, "failed to match isolation state\n");
399948f5adaSLongfang Liu 		return -EINVAL;
400b0eed085SLongfang Liu 	}
401b0eed085SLongfang Liu 
402b0eed085SLongfang Liu 	ret = qm_write_regs(vf_qm, QM_VF_STATE, &vf_data->vf_qm_state, 1);
403b0eed085SLongfang Liu 	if (ret) {
404b0eed085SLongfang Liu 		dev_err(dev, "failed to write QM_VF_STATE\n");
405b0eed085SLongfang Liu 		return ret;
406b0eed085SLongfang Liu 	}
407b0eed085SLongfang Liu 
408b0eed085SLongfang Liu 	hisi_acc_vdev->vf_qm_state = vf_data->vf_qm_state;
409*190125adSShameer Kolothum 	hisi_acc_vdev->match_done = true;
410b0eed085SLongfang Liu 	return 0;
411b0eed085SLongfang Liu }
412b0eed085SLongfang Liu 
413b0eed085SLongfang Liu static int vf_qm_get_match_data(struct hisi_acc_vf_core_device *hisi_acc_vdev,
414b0eed085SLongfang Liu 				struct acc_vf_data *vf_data)
415b0eed085SLongfang Liu {
416b0eed085SLongfang Liu 	struct hisi_qm *pf_qm = hisi_acc_vdev->pf_qm;
417b0eed085SLongfang Liu 	struct device *dev = &pf_qm->pdev->dev;
418b0eed085SLongfang Liu 	int vf_id = hisi_acc_vdev->vf_id;
419b0eed085SLongfang Liu 	int ret;
420b0eed085SLongfang Liu 
421b0eed085SLongfang Liu 	vf_data->acc_magic = ACC_DEV_MAGIC;
42242e1d1eeSLongfang Liu 	/* Save device id */
423b0eed085SLongfang Liu 	vf_data->dev_id = hisi_acc_vdev->vf_dev->device;
424b0eed085SLongfang Liu 
42542e1d1eeSLongfang Liu 	/* VF qp num save from PF */
426b0eed085SLongfang Liu 	ret = pf_qm_get_qp_num(pf_qm, vf_id, &vf_data->qp_base);
427b0eed085SLongfang Liu 	if (ret <= 0) {
428b0eed085SLongfang Liu 		dev_err(dev, "failed to get vft qp nums!\n");
429b0eed085SLongfang Liu 		return -EINVAL;
430b0eed085SLongfang Liu 	}
431b0eed085SLongfang Liu 
432b0eed085SLongfang Liu 	vf_data->qp_num = ret;
433b0eed085SLongfang Liu 
434b0eed085SLongfang Liu 	/* VF isolation state save from PF */
435b0eed085SLongfang Liu 	ret = qm_read_regs(pf_qm, QM_QUE_ISO_CFG_V, &vf_data->que_iso_cfg, 1);
436b0eed085SLongfang Liu 	if (ret) {
437b0eed085SLongfang Liu 		dev_err(dev, "failed to read QM_QUE_ISO_CFG_V!\n");
438b0eed085SLongfang Liu 		return ret;
439b0eed085SLongfang Liu 	}
440b0eed085SLongfang Liu 
441b0eed085SLongfang Liu 	return 0;
442b0eed085SLongfang Liu }
443b0eed085SLongfang Liu 
444b0eed085SLongfang Liu static int vf_qm_load_data(struct hisi_acc_vf_core_device *hisi_acc_vdev,
445b0eed085SLongfang Liu 			   struct hisi_acc_vf_migration_file *migf)
446b0eed085SLongfang Liu {
447b0eed085SLongfang Liu 	struct hisi_qm *qm = &hisi_acc_vdev->vf_qm;
448b0eed085SLongfang Liu 	struct device *dev = &qm->pdev->dev;
449b0eed085SLongfang Liu 	struct acc_vf_data *vf_data = &migf->vf_data;
450b0eed085SLongfang Liu 	int ret;
451b0eed085SLongfang Liu 
452b0eed085SLongfang Liu 	/* Return if only match data was transferred */
453b0eed085SLongfang Liu 	if (migf->total_length == QM_MATCH_SIZE)
454b0eed085SLongfang Liu 		return 0;
455b0eed085SLongfang Liu 
456b0eed085SLongfang Liu 	if (migf->total_length < sizeof(struct acc_vf_data))
457b0eed085SLongfang Liu 		return -EINVAL;
458b0eed085SLongfang Liu 
459b0eed085SLongfang Liu 	qm->eqe_dma = vf_data->eqe_dma;
460b0eed085SLongfang Liu 	qm->aeqe_dma = vf_data->aeqe_dma;
461b0eed085SLongfang Liu 	qm->sqc_dma = vf_data->sqc_dma;
462b0eed085SLongfang Liu 	qm->cqc_dma = vf_data->cqc_dma;
463b0eed085SLongfang Liu 
464b0eed085SLongfang Liu 	qm->qp_base = vf_data->qp_base;
465b0eed085SLongfang Liu 	qm->qp_num = vf_data->qp_num;
466b0eed085SLongfang Liu 
467b0eed085SLongfang Liu 	ret = qm_set_regs(qm, vf_data);
468b0eed085SLongfang Liu 	if (ret) {
46942e1d1eeSLongfang Liu 		dev_err(dev, "set VF regs failed\n");
470b0eed085SLongfang Liu 		return ret;
471b0eed085SLongfang Liu 	}
472b0eed085SLongfang Liu 
473b0eed085SLongfang Liu 	ret = hisi_qm_mb(qm, QM_MB_CMD_SQC_BT, qm->sqc_dma, 0, 0);
474b0eed085SLongfang Liu 	if (ret) {
47542e1d1eeSLongfang Liu 		dev_err(dev, "set sqc failed\n");
476b0eed085SLongfang Liu 		return ret;
477b0eed085SLongfang Liu 	}
478b0eed085SLongfang Liu 
479b0eed085SLongfang Liu 	ret = hisi_qm_mb(qm, QM_MB_CMD_CQC_BT, qm->cqc_dma, 0, 0);
480b0eed085SLongfang Liu 	if (ret) {
48142e1d1eeSLongfang Liu 		dev_err(dev, "set cqc failed\n");
482b0eed085SLongfang Liu 		return ret;
483b0eed085SLongfang Liu 	}
484b0eed085SLongfang Liu 
485b0eed085SLongfang Liu 	qm_dev_cmd_init(qm);
486b0eed085SLongfang Liu 	return 0;
487b0eed085SLongfang Liu }
488b0eed085SLongfang Liu 
489b0eed085SLongfang Liu static int vf_qm_state_save(struct hisi_acc_vf_core_device *hisi_acc_vdev,
490b0eed085SLongfang Liu 			    struct hisi_acc_vf_migration_file *migf)
491b0eed085SLongfang Liu {
492b0eed085SLongfang Liu 	struct acc_vf_data *vf_data = &migf->vf_data;
493b0eed085SLongfang Liu 	struct hisi_qm *vf_qm = &hisi_acc_vdev->vf_qm;
494b0eed085SLongfang Liu 	struct device *dev = &vf_qm->pdev->dev;
495b0eed085SLongfang Liu 	int ret;
496b0eed085SLongfang Liu 
497b0eed085SLongfang Liu 	if (unlikely(qm_wait_dev_not_ready(vf_qm))) {
498b0eed085SLongfang Liu 		/* Update state and return with match data */
499b0eed085SLongfang Liu 		vf_data->vf_qm_state = QM_NOT_READY;
500b0eed085SLongfang Liu 		hisi_acc_vdev->vf_qm_state = vf_data->vf_qm_state;
501b0eed085SLongfang Liu 		migf->total_length = QM_MATCH_SIZE;
502b0eed085SLongfang Liu 		return 0;
503b0eed085SLongfang Liu 	}
504b0eed085SLongfang Liu 
505b0eed085SLongfang Liu 	vf_data->vf_qm_state = QM_READY;
506b0eed085SLongfang Liu 	hisi_acc_vdev->vf_qm_state = vf_data->vf_qm_state;
507b0eed085SLongfang Liu 
508b0eed085SLongfang Liu 	ret = vf_qm_cache_wb(vf_qm);
509b0eed085SLongfang Liu 	if (ret) {
510b0eed085SLongfang Liu 		dev_err(dev, "failed to writeback QM Cache!\n");
511b0eed085SLongfang Liu 		return ret;
512b0eed085SLongfang Liu 	}
513b0eed085SLongfang Liu 
514b0eed085SLongfang Liu 	ret = qm_get_regs(vf_qm, vf_data);
515b0eed085SLongfang Liu 	if (ret)
516b0eed085SLongfang Liu 		return -EINVAL;
517b0eed085SLongfang Liu 
518b0eed085SLongfang Liu 	/* Every reg is 32 bit, the dma address is 64 bit. */
519008e5e99SLongfang Liu 	vf_data->eqe_dma = vf_data->qm_eqc_dw[1];
520b0eed085SLongfang Liu 	vf_data->eqe_dma <<= QM_XQC_ADDR_OFFSET;
521008e5e99SLongfang Liu 	vf_data->eqe_dma |= vf_data->qm_eqc_dw[0];
522008e5e99SLongfang Liu 	vf_data->aeqe_dma = vf_data->qm_aeqc_dw[1];
523b0eed085SLongfang Liu 	vf_data->aeqe_dma <<= QM_XQC_ADDR_OFFSET;
524008e5e99SLongfang Liu 	vf_data->aeqe_dma |= vf_data->qm_aeqc_dw[0];
525b0eed085SLongfang Liu 
526b0eed085SLongfang Liu 	/* Through SQC_BT/CQC_BT to get sqc and cqc address */
527b0eed085SLongfang Liu 	ret = qm_get_sqc(vf_qm, &vf_data->sqc_dma);
528b0eed085SLongfang Liu 	if (ret) {
529b0eed085SLongfang Liu 		dev_err(dev, "failed to read SQC addr!\n");
530b0eed085SLongfang Liu 		return -EINVAL;
531b0eed085SLongfang Liu 	}
532b0eed085SLongfang Liu 
533b0eed085SLongfang Liu 	ret = qm_get_cqc(vf_qm, &vf_data->cqc_dma);
534b0eed085SLongfang Liu 	if (ret) {
535b0eed085SLongfang Liu 		dev_err(dev, "failed to read CQC addr!\n");
536b0eed085SLongfang Liu 		return -EINVAL;
537b0eed085SLongfang Liu 	}
538b0eed085SLongfang Liu 
539b0eed085SLongfang Liu 	migf->total_length = sizeof(struct acc_vf_data);
540b0eed085SLongfang Liu 	return 0;
541b0eed085SLongfang Liu }
542b0eed085SLongfang Liu 
543245898ebSShameer Kolothum static struct hisi_acc_vf_core_device *hisi_acc_drvdata(struct pci_dev *pdev)
544245898ebSShameer Kolothum {
545245898ebSShameer Kolothum 	struct vfio_pci_core_device *core_device = dev_get_drvdata(&pdev->dev);
546245898ebSShameer Kolothum 
547245898ebSShameer Kolothum 	return container_of(core_device, struct hisi_acc_vf_core_device,
548245898ebSShameer Kolothum 			    core_device);
549245898ebSShameer Kolothum }
550245898ebSShameer Kolothum 
551b0eed085SLongfang Liu /* Check the PF's RAS state and Function INT state */
552b0eed085SLongfang Liu static int
553b0eed085SLongfang Liu hisi_acc_check_int_state(struct hisi_acc_vf_core_device *hisi_acc_vdev)
554b0eed085SLongfang Liu {
555b0eed085SLongfang Liu 	struct hisi_qm *vfqm = &hisi_acc_vdev->vf_qm;
556b0eed085SLongfang Liu 	struct hisi_qm *qm = hisi_acc_vdev->pf_qm;
557b0eed085SLongfang Liu 	struct pci_dev *vf_pdev = hisi_acc_vdev->vf_dev;
558b0eed085SLongfang Liu 	struct device *dev = &qm->pdev->dev;
559b0eed085SLongfang Liu 	u32 state;
560b0eed085SLongfang Liu 
561b0eed085SLongfang Liu 	/* Check RAS state */
562b0eed085SLongfang Liu 	state = qm_check_reg_state(qm, QM_ABNORMAL_INT_STATUS);
563b0eed085SLongfang Liu 	if (state) {
564b0eed085SLongfang Liu 		dev_err(dev, "failed to check QM RAS state!\n");
565b0eed085SLongfang Liu 		return -EBUSY;
566b0eed085SLongfang Liu 	}
567b0eed085SLongfang Liu 
568b0eed085SLongfang Liu 	/* Check Function Communication state between PF and VF */
569b0eed085SLongfang Liu 	state = qm_check_reg_state(vfqm, QM_IFC_INT_STATUS);
570b0eed085SLongfang Liu 	if (state) {
571b0eed085SLongfang Liu 		dev_err(dev, "failed to check QM IFC INT state!\n");
572b0eed085SLongfang Liu 		return -EBUSY;
573b0eed085SLongfang Liu 	}
574b0eed085SLongfang Liu 	state = qm_check_reg_state(vfqm, QM_IFC_INT_SET_V);
575b0eed085SLongfang Liu 	if (state) {
576b0eed085SLongfang Liu 		dev_err(dev, "failed to check QM IFC INT SET state!\n");
577b0eed085SLongfang Liu 		return -EBUSY;
578b0eed085SLongfang Liu 	}
579b0eed085SLongfang Liu 
580b0eed085SLongfang Liu 	/* Check submodule task state */
581b0eed085SLongfang Liu 	switch (vf_pdev->device) {
582b0eed085SLongfang Liu 	case PCI_DEVICE_ID_HUAWEI_SEC_VF:
583b0eed085SLongfang Liu 		state = qm_check_reg_state(qm, SEC_CORE_INT_STATUS);
584b0eed085SLongfang Liu 		if (state) {
585b0eed085SLongfang Liu 			dev_err(dev, "failed to check QM SEC Core INT state!\n");
586b0eed085SLongfang Liu 			return -EBUSY;
587b0eed085SLongfang Liu 		}
588b0eed085SLongfang Liu 		return 0;
589b0eed085SLongfang Liu 	case PCI_DEVICE_ID_HUAWEI_HPRE_VF:
590b0eed085SLongfang Liu 		state = qm_check_reg_state(qm, HPRE_HAC_INT_STATUS);
591b0eed085SLongfang Liu 		if (state) {
592b0eed085SLongfang Liu 			dev_err(dev, "failed to check QM HPRE HAC INT state!\n");
593b0eed085SLongfang Liu 			return -EBUSY;
594b0eed085SLongfang Liu 		}
595b0eed085SLongfang Liu 		return 0;
596b0eed085SLongfang Liu 	case PCI_DEVICE_ID_HUAWEI_ZIP_VF:
597b0eed085SLongfang Liu 		state = qm_check_reg_state(qm, HZIP_CORE_INT_STATUS);
598b0eed085SLongfang Liu 		if (state) {
599b0eed085SLongfang Liu 			dev_err(dev, "failed to check QM ZIP Core INT state!\n");
600b0eed085SLongfang Liu 			return -EBUSY;
601b0eed085SLongfang Liu 		}
602b0eed085SLongfang Liu 		return 0;
603b0eed085SLongfang Liu 	default:
604b0eed085SLongfang Liu 		dev_err(dev, "failed to detect acc module type!\n");
605b0eed085SLongfang Liu 		return -EINVAL;
606b0eed085SLongfang Liu 	}
607b0eed085SLongfang Liu }
608b0eed085SLongfang Liu 
609b0eed085SLongfang Liu static void hisi_acc_vf_disable_fd(struct hisi_acc_vf_migration_file *migf)
610b0eed085SLongfang Liu {
611b0eed085SLongfang Liu 	mutex_lock(&migf->lock);
612b0eed085SLongfang Liu 	migf->disabled = true;
613b0eed085SLongfang Liu 	migf->total_length = 0;
614b0eed085SLongfang Liu 	migf->filp->f_pos = 0;
615b0eed085SLongfang Liu 	mutex_unlock(&migf->lock);
616b0eed085SLongfang Liu }
617b0eed085SLongfang Liu 
618b0eed085SLongfang Liu static void hisi_acc_vf_disable_fds(struct hisi_acc_vf_core_device *hisi_acc_vdev)
619b0eed085SLongfang Liu {
620b0eed085SLongfang Liu 	if (hisi_acc_vdev->resuming_migf) {
621b0eed085SLongfang Liu 		hisi_acc_vf_disable_fd(hisi_acc_vdev->resuming_migf);
622b0eed085SLongfang Liu 		fput(hisi_acc_vdev->resuming_migf->filp);
623b0eed085SLongfang Liu 		hisi_acc_vdev->resuming_migf = NULL;
624b0eed085SLongfang Liu 	}
625b0eed085SLongfang Liu 
626b0eed085SLongfang Liu 	if (hisi_acc_vdev->saving_migf) {
627b0eed085SLongfang Liu 		hisi_acc_vf_disable_fd(hisi_acc_vdev->saving_migf);
628b0eed085SLongfang Liu 		fput(hisi_acc_vdev->saving_migf->filp);
629b0eed085SLongfang Liu 		hisi_acc_vdev->saving_migf = NULL;
630b0eed085SLongfang Liu 	}
631b0eed085SLongfang Liu }
632b0eed085SLongfang Liu 
6334406f46cSShameer Kolothum /*
6344406f46cSShameer Kolothum  * This function is called in all state_mutex unlock cases to
6354406f46cSShameer Kolothum  * handle a 'deferred_reset' if exists.
6364406f46cSShameer Kolothum  */
6374406f46cSShameer Kolothum static void
6384406f46cSShameer Kolothum hisi_acc_vf_state_mutex_unlock(struct hisi_acc_vf_core_device *hisi_acc_vdev)
6394406f46cSShameer Kolothum {
6404406f46cSShameer Kolothum again:
6414406f46cSShameer Kolothum 	spin_lock(&hisi_acc_vdev->reset_lock);
6424406f46cSShameer Kolothum 	if (hisi_acc_vdev->deferred_reset) {
6434406f46cSShameer Kolothum 		hisi_acc_vdev->deferred_reset = false;
6444406f46cSShameer Kolothum 		spin_unlock(&hisi_acc_vdev->reset_lock);
6454406f46cSShameer Kolothum 		hisi_acc_vdev->vf_qm_state = QM_NOT_READY;
6464406f46cSShameer Kolothum 		hisi_acc_vdev->mig_state = VFIO_DEVICE_STATE_RUNNING;
6474406f46cSShameer Kolothum 		hisi_acc_vf_disable_fds(hisi_acc_vdev);
6484406f46cSShameer Kolothum 		goto again;
6494406f46cSShameer Kolothum 	}
6504406f46cSShameer Kolothum 	mutex_unlock(&hisi_acc_vdev->state_mutex);
6514406f46cSShameer Kolothum 	spin_unlock(&hisi_acc_vdev->reset_lock);
6524406f46cSShameer Kolothum }
6534406f46cSShameer Kolothum 
654b0eed085SLongfang Liu static void hisi_acc_vf_start_device(struct hisi_acc_vf_core_device *hisi_acc_vdev)
655b0eed085SLongfang Liu {
656b0eed085SLongfang Liu 	struct hisi_qm *vf_qm = &hisi_acc_vdev->vf_qm;
657b0eed085SLongfang Liu 
658b0eed085SLongfang Liu 	if (hisi_acc_vdev->vf_qm_state != QM_READY)
659b0eed085SLongfang Liu 		return;
660b0eed085SLongfang Liu 
661af72f53cSLongfang Liu 	/* Make sure the device is enabled */
662af72f53cSLongfang Liu 	qm_dev_cmd_init(vf_qm);
663af72f53cSLongfang Liu 
664af72f53cSLongfang Liu 	vf_qm_fun_reset(vf_qm);
665b0eed085SLongfang Liu }
666b0eed085SLongfang Liu 
667b0eed085SLongfang Liu static int hisi_acc_vf_load_state(struct hisi_acc_vf_core_device *hisi_acc_vdev)
668b0eed085SLongfang Liu {
669b0eed085SLongfang Liu 	struct device *dev = &hisi_acc_vdev->vf_dev->dev;
670b0eed085SLongfang Liu 	struct hisi_acc_vf_migration_file *migf = hisi_acc_vdev->resuming_migf;
671b0eed085SLongfang Liu 	int ret;
672b0eed085SLongfang Liu 
673b0eed085SLongfang Liu 	/* Recover data to VF */
674b0eed085SLongfang Liu 	ret = vf_qm_load_data(hisi_acc_vdev, migf);
675b0eed085SLongfang Liu 	if (ret) {
676b0eed085SLongfang Liu 		dev_err(dev, "failed to recover the VF!\n");
677b0eed085SLongfang Liu 		return ret;
678b0eed085SLongfang Liu 	}
679b0eed085SLongfang Liu 
680b0eed085SLongfang Liu 	return 0;
681b0eed085SLongfang Liu }
682b0eed085SLongfang Liu 
683b0eed085SLongfang Liu static int hisi_acc_vf_release_file(struct inode *inode, struct file *filp)
684b0eed085SLongfang Liu {
685b0eed085SLongfang Liu 	struct hisi_acc_vf_migration_file *migf = filp->private_data;
686b0eed085SLongfang Liu 
687b0eed085SLongfang Liu 	hisi_acc_vf_disable_fd(migf);
688b0eed085SLongfang Liu 	mutex_destroy(&migf->lock);
689b0eed085SLongfang Liu 	kfree(migf);
690b0eed085SLongfang Liu 	return 0;
691b0eed085SLongfang Liu }
692b0eed085SLongfang Liu 
693b0eed085SLongfang Liu static ssize_t hisi_acc_vf_resume_write(struct file *filp, const char __user *buf,
694b0eed085SLongfang Liu 					size_t len, loff_t *pos)
695b0eed085SLongfang Liu {
696b0eed085SLongfang Liu 	struct hisi_acc_vf_migration_file *migf = filp->private_data;
697b0eed085SLongfang Liu 	loff_t requested_length;
698b0eed085SLongfang Liu 	ssize_t done = 0;
699b0eed085SLongfang Liu 	int ret;
700b0eed085SLongfang Liu 
701b0eed085SLongfang Liu 	if (pos)
702b0eed085SLongfang Liu 		return -ESPIPE;
703b0eed085SLongfang Liu 	pos = &filp->f_pos;
704b0eed085SLongfang Liu 
705b0eed085SLongfang Liu 	if (*pos < 0 ||
706b0eed085SLongfang Liu 	    check_add_overflow((loff_t)len, *pos, &requested_length))
707b0eed085SLongfang Liu 		return -EINVAL;
708b0eed085SLongfang Liu 
709b0eed085SLongfang Liu 	if (requested_length > sizeof(struct acc_vf_data))
710b0eed085SLongfang Liu 		return -ENOMEM;
711b0eed085SLongfang Liu 
712b0eed085SLongfang Liu 	mutex_lock(&migf->lock);
713b0eed085SLongfang Liu 	if (migf->disabled) {
714b0eed085SLongfang Liu 		done = -ENODEV;
715b0eed085SLongfang Liu 		goto out_unlock;
716b0eed085SLongfang Liu 	}
717b0eed085SLongfang Liu 
718b0eed085SLongfang Liu 	ret = copy_from_user(&migf->vf_data, buf, len);
719b0eed085SLongfang Liu 	if (ret) {
720b0eed085SLongfang Liu 		done = -EFAULT;
721b0eed085SLongfang Liu 		goto out_unlock;
722b0eed085SLongfang Liu 	}
723b0eed085SLongfang Liu 	*pos += len;
724b0eed085SLongfang Liu 	done = len;
725b0eed085SLongfang Liu 	migf->total_length += len;
726*190125adSShameer Kolothum 
727*190125adSShameer Kolothum 	ret = vf_qm_check_match(migf->hisi_acc_vdev, migf);
728*190125adSShameer Kolothum 	if (ret)
729*190125adSShameer Kolothum 		done = -EFAULT;
730b0eed085SLongfang Liu out_unlock:
731b0eed085SLongfang Liu 	mutex_unlock(&migf->lock);
732b0eed085SLongfang Liu 	return done;
733b0eed085SLongfang Liu }
734b0eed085SLongfang Liu 
735b0eed085SLongfang Liu static const struct file_operations hisi_acc_vf_resume_fops = {
736b0eed085SLongfang Liu 	.owner = THIS_MODULE,
737b0eed085SLongfang Liu 	.write = hisi_acc_vf_resume_write,
738b0eed085SLongfang Liu 	.release = hisi_acc_vf_release_file,
739b0eed085SLongfang Liu 	.llseek = no_llseek,
740b0eed085SLongfang Liu };
741b0eed085SLongfang Liu 
742b0eed085SLongfang Liu static struct hisi_acc_vf_migration_file *
743b0eed085SLongfang Liu hisi_acc_vf_pci_resume(struct hisi_acc_vf_core_device *hisi_acc_vdev)
744b0eed085SLongfang Liu {
745b0eed085SLongfang Liu 	struct hisi_acc_vf_migration_file *migf;
746b0eed085SLongfang Liu 
747b0eed085SLongfang Liu 	migf = kzalloc(sizeof(*migf), GFP_KERNEL);
748b0eed085SLongfang Liu 	if (!migf)
749b0eed085SLongfang Liu 		return ERR_PTR(-ENOMEM);
750b0eed085SLongfang Liu 
751b0eed085SLongfang Liu 	migf->filp = anon_inode_getfile("hisi_acc_vf_mig", &hisi_acc_vf_resume_fops, migf,
752b0eed085SLongfang Liu 					O_WRONLY);
753b0eed085SLongfang Liu 	if (IS_ERR(migf->filp)) {
754b0eed085SLongfang Liu 		int err = PTR_ERR(migf->filp);
755b0eed085SLongfang Liu 
756b0eed085SLongfang Liu 		kfree(migf);
757b0eed085SLongfang Liu 		return ERR_PTR(err);
758b0eed085SLongfang Liu 	}
759b0eed085SLongfang Liu 
760b0eed085SLongfang Liu 	stream_open(migf->filp->f_inode, migf->filp);
761b0eed085SLongfang Liu 	mutex_init(&migf->lock);
76264ffbbb1SShameer Kolothum 	migf->hisi_acc_vdev = hisi_acc_vdev;
763b0eed085SLongfang Liu 	return migf;
764b0eed085SLongfang Liu }
765b0eed085SLongfang Liu 
76664ffbbb1SShameer Kolothum static long hisi_acc_vf_precopy_ioctl(struct file *filp,
76764ffbbb1SShameer Kolothum 				      unsigned int cmd, unsigned long arg)
76864ffbbb1SShameer Kolothum {
76964ffbbb1SShameer Kolothum 	struct hisi_acc_vf_migration_file *migf = filp->private_data;
77064ffbbb1SShameer Kolothum 	struct hisi_acc_vf_core_device *hisi_acc_vdev = migf->hisi_acc_vdev;
77164ffbbb1SShameer Kolothum 	loff_t *pos = &filp->f_pos;
77264ffbbb1SShameer Kolothum 	struct vfio_precopy_info info;
77364ffbbb1SShameer Kolothum 	unsigned long minsz;
77464ffbbb1SShameer Kolothum 	int ret;
77564ffbbb1SShameer Kolothum 
77664ffbbb1SShameer Kolothum 	if (cmd != VFIO_MIG_GET_PRECOPY_INFO)
77764ffbbb1SShameer Kolothum 		return -ENOTTY;
77864ffbbb1SShameer Kolothum 
77964ffbbb1SShameer Kolothum 	minsz = offsetofend(struct vfio_precopy_info, dirty_bytes);
78064ffbbb1SShameer Kolothum 
78164ffbbb1SShameer Kolothum 	if (copy_from_user(&info, (void __user *)arg, minsz))
78264ffbbb1SShameer Kolothum 		return -EFAULT;
78364ffbbb1SShameer Kolothum 	if (info.argsz < minsz)
78464ffbbb1SShameer Kolothum 		return -EINVAL;
78564ffbbb1SShameer Kolothum 
78664ffbbb1SShameer Kolothum 	mutex_lock(&hisi_acc_vdev->state_mutex);
78764ffbbb1SShameer Kolothum 	if (hisi_acc_vdev->mig_state != VFIO_DEVICE_STATE_PRE_COPY) {
78864ffbbb1SShameer Kolothum 		mutex_unlock(&hisi_acc_vdev->state_mutex);
78964ffbbb1SShameer Kolothum 		return -EINVAL;
79064ffbbb1SShameer Kolothum 	}
79164ffbbb1SShameer Kolothum 
79264ffbbb1SShameer Kolothum 	mutex_lock(&migf->lock);
79364ffbbb1SShameer Kolothum 
79464ffbbb1SShameer Kolothum 	if (migf->disabled) {
79564ffbbb1SShameer Kolothum 		ret = -ENODEV;
79664ffbbb1SShameer Kolothum 		goto out;
79764ffbbb1SShameer Kolothum 	}
79864ffbbb1SShameer Kolothum 
79964ffbbb1SShameer Kolothum 	if (*pos > migf->total_length) {
80064ffbbb1SShameer Kolothum 		ret = -EINVAL;
80164ffbbb1SShameer Kolothum 		goto out;
80264ffbbb1SShameer Kolothum 	}
80364ffbbb1SShameer Kolothum 
80464ffbbb1SShameer Kolothum 	info.dirty_bytes = 0;
80564ffbbb1SShameer Kolothum 	info.initial_bytes = migf->total_length - *pos;
80664ffbbb1SShameer Kolothum 
80764ffbbb1SShameer Kolothum 	ret = copy_to_user((void __user *)arg, &info, minsz) ? -EFAULT : 0;
80864ffbbb1SShameer Kolothum out:
80964ffbbb1SShameer Kolothum 	mutex_unlock(&migf->lock);
81064ffbbb1SShameer Kolothum 	mutex_unlock(&hisi_acc_vdev->state_mutex);
81164ffbbb1SShameer Kolothum 	return ret;
81264ffbbb1SShameer Kolothum }
81364ffbbb1SShameer Kolothum 
814b0eed085SLongfang Liu static ssize_t hisi_acc_vf_save_read(struct file *filp, char __user *buf, size_t len,
815b0eed085SLongfang Liu 				     loff_t *pos)
816b0eed085SLongfang Liu {
817b0eed085SLongfang Liu 	struct hisi_acc_vf_migration_file *migf = filp->private_data;
818b0eed085SLongfang Liu 	ssize_t done = 0;
819b0eed085SLongfang Liu 	int ret;
820b0eed085SLongfang Liu 
821b0eed085SLongfang Liu 	if (pos)
822b0eed085SLongfang Liu 		return -ESPIPE;
823b0eed085SLongfang Liu 	pos = &filp->f_pos;
824b0eed085SLongfang Liu 
825b0eed085SLongfang Liu 	mutex_lock(&migf->lock);
826b0eed085SLongfang Liu 	if (*pos > migf->total_length) {
827b0eed085SLongfang Liu 		done = -EINVAL;
828b0eed085SLongfang Liu 		goto out_unlock;
829b0eed085SLongfang Liu 	}
830b0eed085SLongfang Liu 
831b0eed085SLongfang Liu 	if (migf->disabled) {
832b0eed085SLongfang Liu 		done = -ENODEV;
833b0eed085SLongfang Liu 		goto out_unlock;
834b0eed085SLongfang Liu 	}
835b0eed085SLongfang Liu 
836b0eed085SLongfang Liu 	len = min_t(size_t, migf->total_length - *pos, len);
837b0eed085SLongfang Liu 	if (len) {
838b0eed085SLongfang Liu 		ret = copy_to_user(buf, &migf->vf_data, len);
839b0eed085SLongfang Liu 		if (ret) {
840b0eed085SLongfang Liu 			done = -EFAULT;
841b0eed085SLongfang Liu 			goto out_unlock;
842b0eed085SLongfang Liu 		}
843b0eed085SLongfang Liu 		*pos += len;
844b0eed085SLongfang Liu 		done = len;
845b0eed085SLongfang Liu 	}
846b0eed085SLongfang Liu out_unlock:
847b0eed085SLongfang Liu 	mutex_unlock(&migf->lock);
848b0eed085SLongfang Liu 	return done;
849b0eed085SLongfang Liu }
850b0eed085SLongfang Liu 
851b0eed085SLongfang Liu static const struct file_operations hisi_acc_vf_save_fops = {
852b0eed085SLongfang Liu 	.owner = THIS_MODULE,
853b0eed085SLongfang Liu 	.read = hisi_acc_vf_save_read,
85464ffbbb1SShameer Kolothum 	.unlocked_ioctl = hisi_acc_vf_precopy_ioctl,
85564ffbbb1SShameer Kolothum 	.compat_ioctl = compat_ptr_ioctl,
856b0eed085SLongfang Liu 	.release = hisi_acc_vf_release_file,
857b0eed085SLongfang Liu 	.llseek = no_llseek,
858b0eed085SLongfang Liu };
859b0eed085SLongfang Liu 
860b0eed085SLongfang Liu static struct hisi_acc_vf_migration_file *
861d9a871e4SShameer Kolothum hisi_acc_open_saving_migf(struct hisi_acc_vf_core_device *hisi_acc_vdev)
862b0eed085SLongfang Liu {
863b0eed085SLongfang Liu 	struct hisi_acc_vf_migration_file *migf;
864b0eed085SLongfang Liu 	int ret;
865b0eed085SLongfang Liu 
866b0eed085SLongfang Liu 	migf = kzalloc(sizeof(*migf), GFP_KERNEL);
867b0eed085SLongfang Liu 	if (!migf)
868b0eed085SLongfang Liu 		return ERR_PTR(-ENOMEM);
869b0eed085SLongfang Liu 
870b0eed085SLongfang Liu 	migf->filp = anon_inode_getfile("hisi_acc_vf_mig", &hisi_acc_vf_save_fops, migf,
871b0eed085SLongfang Liu 					O_RDONLY);
872b0eed085SLongfang Liu 	if (IS_ERR(migf->filp)) {
873b0eed085SLongfang Liu 		int err = PTR_ERR(migf->filp);
874b0eed085SLongfang Liu 
875b0eed085SLongfang Liu 		kfree(migf);
876b0eed085SLongfang Liu 		return ERR_PTR(err);
877b0eed085SLongfang Liu 	}
878b0eed085SLongfang Liu 
879b0eed085SLongfang Liu 	stream_open(migf->filp->f_inode, migf->filp);
880b0eed085SLongfang Liu 	mutex_init(&migf->lock);
88164ffbbb1SShameer Kolothum 	migf->hisi_acc_vdev = hisi_acc_vdev;
882b0eed085SLongfang Liu 
883d9a871e4SShameer Kolothum 	ret = vf_qm_get_match_data(hisi_acc_vdev, &migf->vf_data);
884b0eed085SLongfang Liu 	if (ret) {
885b0eed085SLongfang Liu 		fput(migf->filp);
886b0eed085SLongfang Liu 		return ERR_PTR(ret);
887b0eed085SLongfang Liu 	}
888b0eed085SLongfang Liu 
889b0eed085SLongfang Liu 	return migf;
890b0eed085SLongfang Liu }
891b0eed085SLongfang Liu 
892d9a871e4SShameer Kolothum static struct hisi_acc_vf_migration_file *
893d9a871e4SShameer Kolothum hisi_acc_vf_pre_copy(struct hisi_acc_vf_core_device *hisi_acc_vdev)
894d9a871e4SShameer Kolothum {
895d9a871e4SShameer Kolothum 	struct hisi_acc_vf_migration_file *migf;
896d9a871e4SShameer Kolothum 
897d9a871e4SShameer Kolothum 	migf = hisi_acc_open_saving_migf(hisi_acc_vdev);
898d9a871e4SShameer Kolothum 	if (IS_ERR(migf))
899d9a871e4SShameer Kolothum 		return migf;
900d9a871e4SShameer Kolothum 
901d9a871e4SShameer Kolothum 	migf->total_length = QM_MATCH_SIZE;
902d9a871e4SShameer Kolothum 	return migf;
903d9a871e4SShameer Kolothum }
904d9a871e4SShameer Kolothum 
905d9a871e4SShameer Kolothum static struct hisi_acc_vf_migration_file *
906d9a871e4SShameer Kolothum hisi_acc_vf_stop_copy(struct hisi_acc_vf_core_device *hisi_acc_vdev, bool open)
907d9a871e4SShameer Kolothum {
908d9a871e4SShameer Kolothum 	int ret;
909d9a871e4SShameer Kolothum 	struct hisi_acc_vf_migration_file *migf = NULL;
910d9a871e4SShameer Kolothum 
911d9a871e4SShameer Kolothum 	if (open) {
912d9a871e4SShameer Kolothum 		/*
913d9a871e4SShameer Kolothum 		 * Userspace didn't use PRECOPY support. Hence saving_migf
914d9a871e4SShameer Kolothum 		 * is not opened yet.
915d9a871e4SShameer Kolothum 		 */
916d9a871e4SShameer Kolothum 		migf = hisi_acc_open_saving_migf(hisi_acc_vdev);
917d9a871e4SShameer Kolothum 		if (IS_ERR(migf))
918d9a871e4SShameer Kolothum 			return migf;
919d9a871e4SShameer Kolothum 	} else {
920d9a871e4SShameer Kolothum 		migf = hisi_acc_vdev->saving_migf;
921d9a871e4SShameer Kolothum 	}
922d9a871e4SShameer Kolothum 
923d9a871e4SShameer Kolothum 	ret = vf_qm_state_save(hisi_acc_vdev, migf);
924d9a871e4SShameer Kolothum 	if (ret)
925d9a871e4SShameer Kolothum 		return ERR_PTR(ret);
926d9a871e4SShameer Kolothum 
927d9a871e4SShameer Kolothum 	return open ? migf : NULL;
928d9a871e4SShameer Kolothum }
929d9a871e4SShameer Kolothum 
930b0eed085SLongfang Liu static int hisi_acc_vf_stop_device(struct hisi_acc_vf_core_device *hisi_acc_vdev)
931b0eed085SLongfang Liu {
932b0eed085SLongfang Liu 	struct device *dev = &hisi_acc_vdev->vf_dev->dev;
933b0eed085SLongfang Liu 	struct hisi_qm *vf_qm = &hisi_acc_vdev->vf_qm;
934b0eed085SLongfang Liu 	int ret;
935b0eed085SLongfang Liu 
936b0eed085SLongfang Liu 	ret = vf_qm_func_stop(vf_qm);
937b0eed085SLongfang Liu 	if (ret) {
938b0eed085SLongfang Liu 		dev_err(dev, "failed to stop QM VF function!\n");
939b0eed085SLongfang Liu 		return ret;
940b0eed085SLongfang Liu 	}
941b0eed085SLongfang Liu 
942b0eed085SLongfang Liu 	ret = hisi_acc_check_int_state(hisi_acc_vdev);
943b0eed085SLongfang Liu 	if (ret) {
944b0eed085SLongfang Liu 		dev_err(dev, "failed to check QM INT state!\n");
945b0eed085SLongfang Liu 		return ret;
946b0eed085SLongfang Liu 	}
947b0eed085SLongfang Liu 	return 0;
948b0eed085SLongfang Liu }
949b0eed085SLongfang Liu 
950b0eed085SLongfang Liu static struct file *
951b0eed085SLongfang Liu hisi_acc_vf_set_device_state(struct hisi_acc_vf_core_device *hisi_acc_vdev,
952b0eed085SLongfang Liu 			     u32 new)
953b0eed085SLongfang Liu {
954b0eed085SLongfang Liu 	u32 cur = hisi_acc_vdev->mig_state;
955b0eed085SLongfang Liu 	int ret;
956b0eed085SLongfang Liu 
957d9a871e4SShameer Kolothum 	if (cur == VFIO_DEVICE_STATE_RUNNING && new == VFIO_DEVICE_STATE_PRE_COPY) {
958d9a871e4SShameer Kolothum 		struct hisi_acc_vf_migration_file *migf;
959d9a871e4SShameer Kolothum 
960d9a871e4SShameer Kolothum 		migf = hisi_acc_vf_pre_copy(hisi_acc_vdev);
961d9a871e4SShameer Kolothum 		if (IS_ERR(migf))
962d9a871e4SShameer Kolothum 			return ERR_CAST(migf);
963d9a871e4SShameer Kolothum 		get_file(migf->filp);
964d9a871e4SShameer Kolothum 		hisi_acc_vdev->saving_migf = migf;
965d9a871e4SShameer Kolothum 		return migf->filp;
966d9a871e4SShameer Kolothum 	}
967d9a871e4SShameer Kolothum 
968d9a871e4SShameer Kolothum 	if (cur == VFIO_DEVICE_STATE_PRE_COPY && new == VFIO_DEVICE_STATE_STOP_COPY) {
969d9a871e4SShameer Kolothum 		struct hisi_acc_vf_migration_file *migf;
970d9a871e4SShameer Kolothum 
971d9a871e4SShameer Kolothum 		ret = hisi_acc_vf_stop_device(hisi_acc_vdev);
972d9a871e4SShameer Kolothum 		if (ret)
973d9a871e4SShameer Kolothum 			return ERR_PTR(ret);
974d9a871e4SShameer Kolothum 
975d9a871e4SShameer Kolothum 		migf = hisi_acc_vf_stop_copy(hisi_acc_vdev, false);
976d9a871e4SShameer Kolothum 		if (IS_ERR(migf))
977d9a871e4SShameer Kolothum 			return ERR_CAST(migf);
978d9a871e4SShameer Kolothum 
979d9a871e4SShameer Kolothum 		return NULL;
980d9a871e4SShameer Kolothum 	}
981d9a871e4SShameer Kolothum 
982b0eed085SLongfang Liu 	if (cur == VFIO_DEVICE_STATE_RUNNING && new == VFIO_DEVICE_STATE_STOP) {
983b0eed085SLongfang Liu 		ret = hisi_acc_vf_stop_device(hisi_acc_vdev);
984b0eed085SLongfang Liu 		if (ret)
985b0eed085SLongfang Liu 			return ERR_PTR(ret);
986b0eed085SLongfang Liu 		return NULL;
987b0eed085SLongfang Liu 	}
988b0eed085SLongfang Liu 
989b0eed085SLongfang Liu 	if (cur == VFIO_DEVICE_STATE_STOP && new == VFIO_DEVICE_STATE_STOP_COPY) {
990b0eed085SLongfang Liu 		struct hisi_acc_vf_migration_file *migf;
991b0eed085SLongfang Liu 
992d9a871e4SShameer Kolothum 		migf = hisi_acc_vf_stop_copy(hisi_acc_vdev, true);
993b0eed085SLongfang Liu 		if (IS_ERR(migf))
994b0eed085SLongfang Liu 			return ERR_CAST(migf);
995b0eed085SLongfang Liu 		get_file(migf->filp);
996b0eed085SLongfang Liu 		hisi_acc_vdev->saving_migf = migf;
997b0eed085SLongfang Liu 		return migf->filp;
998b0eed085SLongfang Liu 	}
999b0eed085SLongfang Liu 
1000b0eed085SLongfang Liu 	if ((cur == VFIO_DEVICE_STATE_STOP_COPY && new == VFIO_DEVICE_STATE_STOP)) {
1001b0eed085SLongfang Liu 		hisi_acc_vf_disable_fds(hisi_acc_vdev);
1002b0eed085SLongfang Liu 		return NULL;
1003b0eed085SLongfang Liu 	}
1004b0eed085SLongfang Liu 
1005b0eed085SLongfang Liu 	if (cur == VFIO_DEVICE_STATE_STOP && new == VFIO_DEVICE_STATE_RESUMING) {
1006b0eed085SLongfang Liu 		struct hisi_acc_vf_migration_file *migf;
1007b0eed085SLongfang Liu 
1008b0eed085SLongfang Liu 		migf = hisi_acc_vf_pci_resume(hisi_acc_vdev);
1009b0eed085SLongfang Liu 		if (IS_ERR(migf))
1010b0eed085SLongfang Liu 			return ERR_CAST(migf);
1011b0eed085SLongfang Liu 		get_file(migf->filp);
1012b0eed085SLongfang Liu 		hisi_acc_vdev->resuming_migf = migf;
1013b0eed085SLongfang Liu 		return migf->filp;
1014b0eed085SLongfang Liu 	}
1015b0eed085SLongfang Liu 
1016b0eed085SLongfang Liu 	if (cur == VFIO_DEVICE_STATE_RESUMING && new == VFIO_DEVICE_STATE_STOP) {
1017b0eed085SLongfang Liu 		ret = hisi_acc_vf_load_state(hisi_acc_vdev);
1018b0eed085SLongfang Liu 		if (ret)
1019b0eed085SLongfang Liu 			return ERR_PTR(ret);
1020b0eed085SLongfang Liu 		hisi_acc_vf_disable_fds(hisi_acc_vdev);
1021b0eed085SLongfang Liu 		return NULL;
1022b0eed085SLongfang Liu 	}
1023b0eed085SLongfang Liu 
1024d9a871e4SShameer Kolothum 	if (cur == VFIO_DEVICE_STATE_PRE_COPY && new == VFIO_DEVICE_STATE_RUNNING) {
1025d9a871e4SShameer Kolothum 		hisi_acc_vf_disable_fds(hisi_acc_vdev);
1026d9a871e4SShameer Kolothum 		return NULL;
1027d9a871e4SShameer Kolothum 	}
1028d9a871e4SShameer Kolothum 
1029b0eed085SLongfang Liu 	if (cur == VFIO_DEVICE_STATE_STOP && new == VFIO_DEVICE_STATE_RUNNING) {
1030b0eed085SLongfang Liu 		hisi_acc_vf_start_device(hisi_acc_vdev);
1031b0eed085SLongfang Liu 		return NULL;
1032b0eed085SLongfang Liu 	}
1033b0eed085SLongfang Liu 
1034b0eed085SLongfang Liu 	/*
1035b0eed085SLongfang Liu 	 * vfio_mig_get_next_state() does not use arcs other than the above
1036b0eed085SLongfang Liu 	 */
1037b0eed085SLongfang Liu 	WARN_ON(true);
1038b0eed085SLongfang Liu 	return ERR_PTR(-EINVAL);
1039b0eed085SLongfang Liu }
1040b0eed085SLongfang Liu 
1041b0eed085SLongfang Liu static struct file *
1042b0eed085SLongfang Liu hisi_acc_vfio_pci_set_device_state(struct vfio_device *vdev,
1043b0eed085SLongfang Liu 				   enum vfio_device_mig_state new_state)
1044b0eed085SLongfang Liu {
1045b0eed085SLongfang Liu 	struct hisi_acc_vf_core_device *hisi_acc_vdev = container_of(vdev,
1046b0eed085SLongfang Liu 			struct hisi_acc_vf_core_device, core_device.vdev);
1047b0eed085SLongfang Liu 	enum vfio_device_mig_state next_state;
1048b0eed085SLongfang Liu 	struct file *res = NULL;
1049b0eed085SLongfang Liu 	int ret;
1050b0eed085SLongfang Liu 
1051b0eed085SLongfang Liu 	mutex_lock(&hisi_acc_vdev->state_mutex);
1052b0eed085SLongfang Liu 	while (new_state != hisi_acc_vdev->mig_state) {
1053b0eed085SLongfang Liu 		ret = vfio_mig_get_next_state(vdev,
1054b0eed085SLongfang Liu 					      hisi_acc_vdev->mig_state,
1055b0eed085SLongfang Liu 					      new_state, &next_state);
1056b0eed085SLongfang Liu 		if (ret) {
1057b0eed085SLongfang Liu 			res = ERR_PTR(-EINVAL);
1058b0eed085SLongfang Liu 			break;
1059b0eed085SLongfang Liu 		}
1060b0eed085SLongfang Liu 
1061b0eed085SLongfang Liu 		res = hisi_acc_vf_set_device_state(hisi_acc_vdev, next_state);
1062b0eed085SLongfang Liu 		if (IS_ERR(res))
1063b0eed085SLongfang Liu 			break;
1064b0eed085SLongfang Liu 		hisi_acc_vdev->mig_state = next_state;
1065b0eed085SLongfang Liu 		if (WARN_ON(res && new_state != hisi_acc_vdev->mig_state)) {
1066b0eed085SLongfang Liu 			fput(res);
1067b0eed085SLongfang Liu 			res = ERR_PTR(-EINVAL);
1068b0eed085SLongfang Liu 			break;
1069b0eed085SLongfang Liu 		}
1070b0eed085SLongfang Liu 	}
10714406f46cSShameer Kolothum 	hisi_acc_vf_state_mutex_unlock(hisi_acc_vdev);
1072b0eed085SLongfang Liu 	return res;
1073b0eed085SLongfang Liu }
1074b0eed085SLongfang Liu 
1075b0eed085SLongfang Liu static int
10764e016f96SYishai Hadas hisi_acc_vfio_pci_get_data_size(struct vfio_device *vdev,
10774e016f96SYishai Hadas 				unsigned long *stop_copy_length)
10784e016f96SYishai Hadas {
10794e016f96SYishai Hadas 	*stop_copy_length = sizeof(struct acc_vf_data);
10804e016f96SYishai Hadas 	return 0;
10814e016f96SYishai Hadas }
10824e016f96SYishai Hadas 
10834e016f96SYishai Hadas static int
1084b0eed085SLongfang Liu hisi_acc_vfio_pci_get_device_state(struct vfio_device *vdev,
1085b0eed085SLongfang Liu 				   enum vfio_device_mig_state *curr_state)
1086b0eed085SLongfang Liu {
1087b0eed085SLongfang Liu 	struct hisi_acc_vf_core_device *hisi_acc_vdev = container_of(vdev,
1088b0eed085SLongfang Liu 			struct hisi_acc_vf_core_device, core_device.vdev);
1089b0eed085SLongfang Liu 
1090b0eed085SLongfang Liu 	mutex_lock(&hisi_acc_vdev->state_mutex);
1091b0eed085SLongfang Liu 	*curr_state = hisi_acc_vdev->mig_state;
10924406f46cSShameer Kolothum 	hisi_acc_vf_state_mutex_unlock(hisi_acc_vdev);
1093b0eed085SLongfang Liu 	return 0;
1094b0eed085SLongfang Liu }
1095b0eed085SLongfang Liu 
10964406f46cSShameer Kolothum static void hisi_acc_vf_pci_aer_reset_done(struct pci_dev *pdev)
10974406f46cSShameer Kolothum {
1098245898ebSShameer Kolothum 	struct hisi_acc_vf_core_device *hisi_acc_vdev = hisi_acc_drvdata(pdev);
10994406f46cSShameer Kolothum 
11004406f46cSShameer Kolothum 	if (hisi_acc_vdev->core_device.vdev.migration_flags !=
11014406f46cSShameer Kolothum 				VFIO_MIGRATION_STOP_COPY)
11024406f46cSShameer Kolothum 		return;
11034406f46cSShameer Kolothum 
11044406f46cSShameer Kolothum 	/*
11054406f46cSShameer Kolothum 	 * As the higher VFIO layers are holding locks across reset and using
11064406f46cSShameer Kolothum 	 * those same locks with the mm_lock we need to prevent ABBA deadlock
11074406f46cSShameer Kolothum 	 * with the state_mutex and mm_lock.
11084406f46cSShameer Kolothum 	 * In case the state_mutex was taken already we defer the cleanup work
11094406f46cSShameer Kolothum 	 * to the unlock flow of the other running context.
11104406f46cSShameer Kolothum 	 */
11114406f46cSShameer Kolothum 	spin_lock(&hisi_acc_vdev->reset_lock);
11124406f46cSShameer Kolothum 	hisi_acc_vdev->deferred_reset = true;
11134406f46cSShameer Kolothum 	if (!mutex_trylock(&hisi_acc_vdev->state_mutex)) {
11144406f46cSShameer Kolothum 		spin_unlock(&hisi_acc_vdev->reset_lock);
11154406f46cSShameer Kolothum 		return;
11164406f46cSShameer Kolothum 	}
11174406f46cSShameer Kolothum 	spin_unlock(&hisi_acc_vdev->reset_lock);
11184406f46cSShameer Kolothum 	hisi_acc_vf_state_mutex_unlock(hisi_acc_vdev);
11194406f46cSShameer Kolothum }
11204406f46cSShameer Kolothum 
1121b0eed085SLongfang Liu static int hisi_acc_vf_qm_init(struct hisi_acc_vf_core_device *hisi_acc_vdev)
1122b0eed085SLongfang Liu {
1123b0eed085SLongfang Liu 	struct vfio_pci_core_device *vdev = &hisi_acc_vdev->core_device;
1124b0eed085SLongfang Liu 	struct hisi_qm *vf_qm = &hisi_acc_vdev->vf_qm;
1125b0eed085SLongfang Liu 	struct pci_dev *vf_dev = vdev->pdev;
1126b0eed085SLongfang Liu 
1127b0eed085SLongfang Liu 	/*
1128b0eed085SLongfang Liu 	 * ACC VF dev BAR2 region consists of both functional register space
1129b0eed085SLongfang Liu 	 * and migration control register space. For migration to work, we
1130b0eed085SLongfang Liu 	 * need access to both. Hence, we map the entire BAR2 region here.
1131b0eed085SLongfang Liu 	 * But unnecessarily exposing the migration BAR region to the Guest
1132b0eed085SLongfang Liu 	 * has the potential to prevent/corrupt the Guest migration. Hence,
1133b0eed085SLongfang Liu 	 * we restrict access to the migration control space from
1134b0eed085SLongfang Liu 	 * Guest(Please see mmap/ioctl/read/write override functions).
1135b0eed085SLongfang Liu 	 *
1136b0eed085SLongfang Liu 	 * Please note that it is OK to expose the entire VF BAR if migration
1137b0eed085SLongfang Liu 	 * is not supported or required as this cannot affect the ACC PF
1138b0eed085SLongfang Liu 	 * configurations.
1139b0eed085SLongfang Liu 	 *
1140b0eed085SLongfang Liu 	 * Also the HiSilicon ACC VF devices supported by this driver on
1141b0eed085SLongfang Liu 	 * HiSilicon hardware platforms are integrated end point devices
1142b0eed085SLongfang Liu 	 * and the platform lacks the capability to perform any PCIe P2P
1143b0eed085SLongfang Liu 	 * between these devices.
1144b0eed085SLongfang Liu 	 */
1145b0eed085SLongfang Liu 
1146b0eed085SLongfang Liu 	vf_qm->io_base =
1147b0eed085SLongfang Liu 		ioremap(pci_resource_start(vf_dev, VFIO_PCI_BAR2_REGION_INDEX),
1148b0eed085SLongfang Liu 			pci_resource_len(vf_dev, VFIO_PCI_BAR2_REGION_INDEX));
1149b0eed085SLongfang Liu 	if (!vf_qm->io_base)
1150b0eed085SLongfang Liu 		return -EIO;
1151b0eed085SLongfang Liu 
1152b0eed085SLongfang Liu 	vf_qm->fun_type = QM_HW_VF;
1153b0eed085SLongfang Liu 	vf_qm->pdev = vf_dev;
1154b0eed085SLongfang Liu 	mutex_init(&vf_qm->mailbox_lock);
1155b0eed085SLongfang Liu 
1156b0eed085SLongfang Liu 	return 0;
1157b0eed085SLongfang Liu }
1158b0eed085SLongfang Liu 
1159b0eed085SLongfang Liu static struct hisi_qm *hisi_acc_get_pf_qm(struct pci_dev *pdev)
1160b0eed085SLongfang Liu {
1161b0eed085SLongfang Liu 	struct hisi_qm	*pf_qm;
1162b0eed085SLongfang Liu 	struct pci_driver *pf_driver;
1163b0eed085SLongfang Liu 
1164b0eed085SLongfang Liu 	if (!pdev->is_virtfn)
1165b0eed085SLongfang Liu 		return NULL;
1166b0eed085SLongfang Liu 
1167b0eed085SLongfang Liu 	switch (pdev->device) {
1168b0eed085SLongfang Liu 	case PCI_DEVICE_ID_HUAWEI_SEC_VF:
1169b0eed085SLongfang Liu 		pf_driver = hisi_sec_get_pf_driver();
1170b0eed085SLongfang Liu 		break;
1171b0eed085SLongfang Liu 	case PCI_DEVICE_ID_HUAWEI_HPRE_VF:
1172b0eed085SLongfang Liu 		pf_driver = hisi_hpre_get_pf_driver();
1173b0eed085SLongfang Liu 		break;
1174b0eed085SLongfang Liu 	case PCI_DEVICE_ID_HUAWEI_ZIP_VF:
1175b0eed085SLongfang Liu 		pf_driver = hisi_zip_get_pf_driver();
1176b0eed085SLongfang Liu 		break;
1177b0eed085SLongfang Liu 	default:
1178b0eed085SLongfang Liu 		return NULL;
1179b0eed085SLongfang Liu 	}
1180b0eed085SLongfang Liu 
1181b0eed085SLongfang Liu 	if (!pf_driver)
1182b0eed085SLongfang Liu 		return NULL;
1183b0eed085SLongfang Liu 
1184b0eed085SLongfang Liu 	pf_qm = pci_iov_get_pf_drvdata(pdev, pf_driver);
1185b0eed085SLongfang Liu 
1186b0eed085SLongfang Liu 	return !IS_ERR(pf_qm) ? pf_qm : NULL;
1187b0eed085SLongfang Liu }
1188ee3a5b23SShameer Kolothum 
11896abdce51SShameer Kolothum static int hisi_acc_pci_rw_access_check(struct vfio_device *core_vdev,
11906abdce51SShameer Kolothum 					size_t count, loff_t *ppos,
11916abdce51SShameer Kolothum 					size_t *new_count)
11926abdce51SShameer Kolothum {
11936abdce51SShameer Kolothum 	unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
11946abdce51SShameer Kolothum 	struct vfio_pci_core_device *vdev =
11956abdce51SShameer Kolothum 		container_of(core_vdev, struct vfio_pci_core_device, vdev);
11966abdce51SShameer Kolothum 
11976abdce51SShameer Kolothum 	if (index == VFIO_PCI_BAR2_REGION_INDEX) {
11986abdce51SShameer Kolothum 		loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
11996abdce51SShameer Kolothum 		resource_size_t end = pci_resource_len(vdev->pdev, index) / 2;
12006abdce51SShameer Kolothum 
12016abdce51SShameer Kolothum 		/* Check if access is for migration control region */
12026abdce51SShameer Kolothum 		if (pos >= end)
12036abdce51SShameer Kolothum 			return -EINVAL;
12046abdce51SShameer Kolothum 
12056abdce51SShameer Kolothum 		*new_count = min(count, (size_t)(end - pos));
12066abdce51SShameer Kolothum 	}
12076abdce51SShameer Kolothum 
12086abdce51SShameer Kolothum 	return 0;
12096abdce51SShameer Kolothum }
12106abdce51SShameer Kolothum 
12116abdce51SShameer Kolothum static int hisi_acc_vfio_pci_mmap(struct vfio_device *core_vdev,
12126abdce51SShameer Kolothum 				  struct vm_area_struct *vma)
12136abdce51SShameer Kolothum {
12146abdce51SShameer Kolothum 	struct vfio_pci_core_device *vdev =
12156abdce51SShameer Kolothum 		container_of(core_vdev, struct vfio_pci_core_device, vdev);
12166abdce51SShameer Kolothum 	unsigned int index;
12176abdce51SShameer Kolothum 
12186abdce51SShameer Kolothum 	index = vma->vm_pgoff >> (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT);
12196abdce51SShameer Kolothum 	if (index == VFIO_PCI_BAR2_REGION_INDEX) {
12206abdce51SShameer Kolothum 		u64 req_len, pgoff, req_start;
12216abdce51SShameer Kolothum 		resource_size_t end = pci_resource_len(vdev->pdev, index) / 2;
12226abdce51SShameer Kolothum 
12236abdce51SShameer Kolothum 		req_len = vma->vm_end - vma->vm_start;
12246abdce51SShameer Kolothum 		pgoff = vma->vm_pgoff &
12256abdce51SShameer Kolothum 			((1U << (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT)) - 1);
12266abdce51SShameer Kolothum 		req_start = pgoff << PAGE_SHIFT;
12276abdce51SShameer Kolothum 
12286abdce51SShameer Kolothum 		if (req_start + req_len > end)
12296abdce51SShameer Kolothum 			return -EINVAL;
12306abdce51SShameer Kolothum 	}
12316abdce51SShameer Kolothum 
12326abdce51SShameer Kolothum 	return vfio_pci_core_mmap(core_vdev, vma);
12336abdce51SShameer Kolothum }
12346abdce51SShameer Kolothum 
12356abdce51SShameer Kolothum static ssize_t hisi_acc_vfio_pci_write(struct vfio_device *core_vdev,
12366abdce51SShameer Kolothum 				       const char __user *buf, size_t count,
12376abdce51SShameer Kolothum 				       loff_t *ppos)
12386abdce51SShameer Kolothum {
12396abdce51SShameer Kolothum 	size_t new_count = count;
12406abdce51SShameer Kolothum 	int ret;
12416abdce51SShameer Kolothum 
12426abdce51SShameer Kolothum 	ret = hisi_acc_pci_rw_access_check(core_vdev, count, ppos, &new_count);
12436abdce51SShameer Kolothum 	if (ret)
12446abdce51SShameer Kolothum 		return ret;
12456abdce51SShameer Kolothum 
12466abdce51SShameer Kolothum 	return vfio_pci_core_write(core_vdev, buf, new_count, ppos);
12476abdce51SShameer Kolothum }
12486abdce51SShameer Kolothum 
12496abdce51SShameer Kolothum static ssize_t hisi_acc_vfio_pci_read(struct vfio_device *core_vdev,
12506abdce51SShameer Kolothum 				      char __user *buf, size_t count,
12516abdce51SShameer Kolothum 				      loff_t *ppos)
12526abdce51SShameer Kolothum {
12536abdce51SShameer Kolothum 	size_t new_count = count;
12546abdce51SShameer Kolothum 	int ret;
12556abdce51SShameer Kolothum 
12566abdce51SShameer Kolothum 	ret = hisi_acc_pci_rw_access_check(core_vdev, count, ppos, &new_count);
12576abdce51SShameer Kolothum 	if (ret)
12586abdce51SShameer Kolothum 		return ret;
12596abdce51SShameer Kolothum 
12606abdce51SShameer Kolothum 	return vfio_pci_core_read(core_vdev, buf, new_count, ppos);
12616abdce51SShameer Kolothum }
12626abdce51SShameer Kolothum 
12636abdce51SShameer Kolothum static long hisi_acc_vfio_pci_ioctl(struct vfio_device *core_vdev, unsigned int cmd,
12646abdce51SShameer Kolothum 				    unsigned long arg)
12656abdce51SShameer Kolothum {
12666abdce51SShameer Kolothum 	if (cmd == VFIO_DEVICE_GET_REGION_INFO) {
12676abdce51SShameer Kolothum 		struct vfio_pci_core_device *vdev =
12686abdce51SShameer Kolothum 			container_of(core_vdev, struct vfio_pci_core_device, vdev);
12696abdce51SShameer Kolothum 		struct pci_dev *pdev = vdev->pdev;
12706abdce51SShameer Kolothum 		struct vfio_region_info info;
12716abdce51SShameer Kolothum 		unsigned long minsz;
12726abdce51SShameer Kolothum 
12736abdce51SShameer Kolothum 		minsz = offsetofend(struct vfio_region_info, offset);
12746abdce51SShameer Kolothum 
12756abdce51SShameer Kolothum 		if (copy_from_user(&info, (void __user *)arg, minsz))
12766abdce51SShameer Kolothum 			return -EFAULT;
12776abdce51SShameer Kolothum 
12786abdce51SShameer Kolothum 		if (info.argsz < minsz)
12796abdce51SShameer Kolothum 			return -EINVAL;
12806abdce51SShameer Kolothum 
12816abdce51SShameer Kolothum 		if (info.index == VFIO_PCI_BAR2_REGION_INDEX) {
12826abdce51SShameer Kolothum 			info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
12836abdce51SShameer Kolothum 
12846abdce51SShameer Kolothum 			/*
12856abdce51SShameer Kolothum 			 * ACC VF dev BAR2 region consists of both functional
12866abdce51SShameer Kolothum 			 * register space and migration control register space.
12876abdce51SShameer Kolothum 			 * Report only the functional region to Guest.
12886abdce51SShameer Kolothum 			 */
12896abdce51SShameer Kolothum 			info.size = pci_resource_len(pdev, info.index) / 2;
12906abdce51SShameer Kolothum 
12916abdce51SShameer Kolothum 			info.flags = VFIO_REGION_INFO_FLAG_READ |
12926abdce51SShameer Kolothum 					VFIO_REGION_INFO_FLAG_WRITE |
12936abdce51SShameer Kolothum 					VFIO_REGION_INFO_FLAG_MMAP;
12946abdce51SShameer Kolothum 
12956abdce51SShameer Kolothum 			return copy_to_user((void __user *)arg, &info, minsz) ?
12966abdce51SShameer Kolothum 					    -EFAULT : 0;
12976abdce51SShameer Kolothum 		}
12986abdce51SShameer Kolothum 	}
12996abdce51SShameer Kolothum 	return vfio_pci_core_ioctl(core_vdev, cmd, arg);
13006abdce51SShameer Kolothum }
13016abdce51SShameer Kolothum 
1302ee3a5b23SShameer Kolothum static int hisi_acc_vfio_pci_open_device(struct vfio_device *core_vdev)
1303ee3a5b23SShameer Kolothum {
1304b0eed085SLongfang Liu 	struct hisi_acc_vf_core_device *hisi_acc_vdev = container_of(core_vdev,
1305b0eed085SLongfang Liu 			struct hisi_acc_vf_core_device, core_device.vdev);
1306b0eed085SLongfang Liu 	struct vfio_pci_core_device *vdev = &hisi_acc_vdev->core_device;
1307ee3a5b23SShameer Kolothum 	int ret;
1308ee3a5b23SShameer Kolothum 
1309ee3a5b23SShameer Kolothum 	ret = vfio_pci_core_enable(vdev);
1310ee3a5b23SShameer Kolothum 	if (ret)
1311ee3a5b23SShameer Kolothum 		return ret;
1312ee3a5b23SShameer Kolothum 
13136e97eba8SYishai Hadas 	if (core_vdev->mig_ops) {
1314b0eed085SLongfang Liu 		ret = hisi_acc_vf_qm_init(hisi_acc_vdev);
1315b0eed085SLongfang Liu 		if (ret) {
1316b0eed085SLongfang Liu 			vfio_pci_core_disable(vdev);
1317b0eed085SLongfang Liu 			return ret;
1318b0eed085SLongfang Liu 		}
1319b0eed085SLongfang Liu 		hisi_acc_vdev->mig_state = VFIO_DEVICE_STATE_RUNNING;
1320b0eed085SLongfang Liu 	}
1321ee3a5b23SShameer Kolothum 
1322b0eed085SLongfang Liu 	vfio_pci_core_finish_enable(vdev);
1323ee3a5b23SShameer Kolothum 	return 0;
1324ee3a5b23SShameer Kolothum }
1325ee3a5b23SShameer Kolothum 
1326b0eed085SLongfang Liu static void hisi_acc_vfio_pci_close_device(struct vfio_device *core_vdev)
1327b0eed085SLongfang Liu {
1328b0eed085SLongfang Liu 	struct hisi_acc_vf_core_device *hisi_acc_vdev = container_of(core_vdev,
1329b0eed085SLongfang Liu 			struct hisi_acc_vf_core_device, core_device.vdev);
1330b0eed085SLongfang Liu 	struct hisi_qm *vf_qm = &hisi_acc_vdev->vf_qm;
1331b0eed085SLongfang Liu 
1332b0eed085SLongfang Liu 	iounmap(vf_qm->io_base);
1333b0eed085SLongfang Liu 	vfio_pci_core_close_device(core_vdev);
1334b0eed085SLongfang Liu }
1335b0eed085SLongfang Liu 
13366e97eba8SYishai Hadas static const struct vfio_migration_ops hisi_acc_vfio_pci_migrn_state_ops = {
13376e97eba8SYishai Hadas 	.migration_set_state = hisi_acc_vfio_pci_set_device_state,
13386e97eba8SYishai Hadas 	.migration_get_state = hisi_acc_vfio_pci_get_device_state,
13394e016f96SYishai Hadas 	.migration_get_data_size = hisi_acc_vfio_pci_get_data_size,
13406e97eba8SYishai Hadas };
13416e97eba8SYishai Hadas 
134227aeb915SYi Liu static int hisi_acc_vfio_pci_migrn_init_dev(struct vfio_device *core_vdev)
134327aeb915SYi Liu {
134427aeb915SYi Liu 	struct hisi_acc_vf_core_device *hisi_acc_vdev = container_of(core_vdev,
134527aeb915SYi Liu 			struct hisi_acc_vf_core_device, core_device.vdev);
134627aeb915SYi Liu 	struct pci_dev *pdev = to_pci_dev(core_vdev->dev);
134727aeb915SYi Liu 	struct hisi_qm *pf_qm = hisi_acc_get_pf_qm(pdev);
134827aeb915SYi Liu 
134927aeb915SYi Liu 	hisi_acc_vdev->vf_id = pci_iov_vf_id(pdev) + 1;
135027aeb915SYi Liu 	hisi_acc_vdev->pf_qm = pf_qm;
135127aeb915SYi Liu 	hisi_acc_vdev->vf_dev = pdev;
135227aeb915SYi Liu 	mutex_init(&hisi_acc_vdev->state_mutex);
135327aeb915SYi Liu 
135427aeb915SYi Liu 	core_vdev->migration_flags = VFIO_MIGRATION_STOP_COPY;
135527aeb915SYi Liu 	core_vdev->mig_ops = &hisi_acc_vfio_pci_migrn_state_ops;
135627aeb915SYi Liu 
135727aeb915SYi Liu 	return vfio_pci_core_init_dev(core_vdev);
135827aeb915SYi Liu }
135927aeb915SYi Liu 
13606abdce51SShameer Kolothum static const struct vfio_device_ops hisi_acc_vfio_pci_migrn_ops = {
13616abdce51SShameer Kolothum 	.name = "hisi-acc-vfio-pci-migration",
136227aeb915SYi Liu 	.init = hisi_acc_vfio_pci_migrn_init_dev,
136327aeb915SYi Liu 	.release = vfio_pci_core_release_dev,
13646abdce51SShameer Kolothum 	.open_device = hisi_acc_vfio_pci_open_device,
1365b0eed085SLongfang Liu 	.close_device = hisi_acc_vfio_pci_close_device,
13666abdce51SShameer Kolothum 	.ioctl = hisi_acc_vfio_pci_ioctl,
13676abdce51SShameer Kolothum 	.device_feature = vfio_pci_core_ioctl_feature,
13686abdce51SShameer Kolothum 	.read = hisi_acc_vfio_pci_read,
13696abdce51SShameer Kolothum 	.write = hisi_acc_vfio_pci_write,
13706abdce51SShameer Kolothum 	.mmap = hisi_acc_vfio_pci_mmap,
13716abdce51SShameer Kolothum 	.request = vfio_pci_core_request,
13726abdce51SShameer Kolothum 	.match = vfio_pci_core_match,
13736abdce51SShameer Kolothum };
13746abdce51SShameer Kolothum 
1375ee3a5b23SShameer Kolothum static const struct vfio_device_ops hisi_acc_vfio_pci_ops = {
1376ee3a5b23SShameer Kolothum 	.name = "hisi-acc-vfio-pci",
137727aeb915SYi Liu 	.init = vfio_pci_core_init_dev,
137827aeb915SYi Liu 	.release = vfio_pci_core_release_dev,
1379ee3a5b23SShameer Kolothum 	.open_device = hisi_acc_vfio_pci_open_device,
1380ee3a5b23SShameer Kolothum 	.close_device = vfio_pci_core_close_device,
1381ee3a5b23SShameer Kolothum 	.ioctl = vfio_pci_core_ioctl,
1382ee3a5b23SShameer Kolothum 	.device_feature = vfio_pci_core_ioctl_feature,
1383ee3a5b23SShameer Kolothum 	.read = vfio_pci_core_read,
1384ee3a5b23SShameer Kolothum 	.write = vfio_pci_core_write,
1385ee3a5b23SShameer Kolothum 	.mmap = vfio_pci_core_mmap,
1386ee3a5b23SShameer Kolothum 	.request = vfio_pci_core_request,
1387ee3a5b23SShameer Kolothum 	.match = vfio_pci_core_match,
1388ee3a5b23SShameer Kolothum };
1389ee3a5b23SShameer Kolothum 
1390ee3a5b23SShameer Kolothum static int hisi_acc_vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1391ee3a5b23SShameer Kolothum {
1392b0eed085SLongfang Liu 	struct hisi_acc_vf_core_device *hisi_acc_vdev;
139327aeb915SYi Liu 	const struct vfio_device_ops *ops = &hisi_acc_vfio_pci_ops;
1394b0eed085SLongfang Liu 	struct hisi_qm *pf_qm;
139527aeb915SYi Liu 	int vf_id;
1396ee3a5b23SShameer Kolothum 	int ret;
1397ee3a5b23SShameer Kolothum 
1398b0eed085SLongfang Liu 	pf_qm = hisi_acc_get_pf_qm(pdev);
1399b0eed085SLongfang Liu 	if (pf_qm && pf_qm->ver >= QM_HW_V3) {
140027aeb915SYi Liu 		vf_id = pci_iov_vf_id(pdev);
140127aeb915SYi Liu 		if (vf_id >= 0)
140227aeb915SYi Liu 			ops = &hisi_acc_vfio_pci_migrn_ops;
140327aeb915SYi Liu 		else
1404b0eed085SLongfang Liu 			pci_warn(pdev, "migration support failed, continue with generic interface\n");
1405b0eed085SLongfang Liu 	}
140627aeb915SYi Liu 
140727aeb915SYi Liu 	hisi_acc_vdev = vfio_alloc_device(hisi_acc_vf_core_device,
140827aeb915SYi Liu 					  core_device.vdev, &pdev->dev, ops);
140927aeb915SYi Liu 	if (IS_ERR(hisi_acc_vdev))
141027aeb915SYi Liu 		return PTR_ERR(hisi_acc_vdev);
1411ee3a5b23SShameer Kolothum 
141291be0bd6SJason Gunthorpe 	dev_set_drvdata(&pdev->dev, &hisi_acc_vdev->core_device);
1413b0eed085SLongfang Liu 	ret = vfio_pci_core_register_device(&hisi_acc_vdev->core_device);
1414ee3a5b23SShameer Kolothum 	if (ret)
141527aeb915SYi Liu 		goto out_put_vdev;
1416ee3a5b23SShameer Kolothum 	return 0;
1417ee3a5b23SShameer Kolothum 
141827aeb915SYi Liu out_put_vdev:
141927aeb915SYi Liu 	vfio_put_device(&hisi_acc_vdev->core_device.vdev);
1420ee3a5b23SShameer Kolothum 	return ret;
1421ee3a5b23SShameer Kolothum }
1422ee3a5b23SShameer Kolothum 
1423ee3a5b23SShameer Kolothum static void hisi_acc_vfio_pci_remove(struct pci_dev *pdev)
1424ee3a5b23SShameer Kolothum {
1425245898ebSShameer Kolothum 	struct hisi_acc_vf_core_device *hisi_acc_vdev = hisi_acc_drvdata(pdev);
1426ee3a5b23SShameer Kolothum 
1427b0eed085SLongfang Liu 	vfio_pci_core_unregister_device(&hisi_acc_vdev->core_device);
142827aeb915SYi Liu 	vfio_put_device(&hisi_acc_vdev->core_device.vdev);
1429ee3a5b23SShameer Kolothum }
1430ee3a5b23SShameer Kolothum 
1431ee3a5b23SShameer Kolothum static const struct pci_device_id hisi_acc_vfio_pci_table[] = {
1432ee3a5b23SShameer Kolothum 	{ PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_HUAWEI_SEC_VF) },
1433ee3a5b23SShameer Kolothum 	{ PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_HUAWEI_HPRE_VF) },
1434ee3a5b23SShameer Kolothum 	{ PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_HUAWEI_ZIP_VF) },
1435ee3a5b23SShameer Kolothum 	{ }
1436ee3a5b23SShameer Kolothum };
1437ee3a5b23SShameer Kolothum 
1438ee3a5b23SShameer Kolothum MODULE_DEVICE_TABLE(pci, hisi_acc_vfio_pci_table);
1439ee3a5b23SShameer Kolothum 
14404406f46cSShameer Kolothum static const struct pci_error_handlers hisi_acc_vf_err_handlers = {
14414406f46cSShameer Kolothum 	.reset_done = hisi_acc_vf_pci_aer_reset_done,
14424406f46cSShameer Kolothum 	.error_detected = vfio_pci_core_aer_err_detected,
14434406f46cSShameer Kolothum };
14444406f46cSShameer Kolothum 
1445ee3a5b23SShameer Kolothum static struct pci_driver hisi_acc_vfio_pci_driver = {
1446ee3a5b23SShameer Kolothum 	.name = KBUILD_MODNAME,
1447ee3a5b23SShameer Kolothum 	.id_table = hisi_acc_vfio_pci_table,
1448ee3a5b23SShameer Kolothum 	.probe = hisi_acc_vfio_pci_probe,
1449ee3a5b23SShameer Kolothum 	.remove = hisi_acc_vfio_pci_remove,
14504406f46cSShameer Kolothum 	.err_handler = &hisi_acc_vf_err_handlers,
1451c490513cSJason Gunthorpe 	.driver_managed_dma = true,
1452ee3a5b23SShameer Kolothum };
1453ee3a5b23SShameer Kolothum 
1454ee3a5b23SShameer Kolothum module_pci_driver(hisi_acc_vfio_pci_driver);
1455ee3a5b23SShameer Kolothum 
1456ee3a5b23SShameer Kolothum MODULE_LICENSE("GPL v2");
1457ee3a5b23SShameer Kolothum MODULE_AUTHOR("Liu Longfang <liulongfang@huawei.com>");
1458ee3a5b23SShameer Kolothum MODULE_AUTHOR("Shameer Kolothum <shameerali.kolothum.thodi@huawei.com>");
1459b0eed085SLongfang Liu MODULE_DESCRIPTION("HiSilicon VFIO PCI - VFIO PCI driver with live migration support for HiSilicon ACC device family");
1460