1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2021, HiSilicon Ltd.
4 */
5
6 #include <linux/device.h>
7 #include <linux/eventfd.h>
8 #include <linux/file.h>
9 #include <linux/hisi_acc_qm.h>
10 #include <linux/interrupt.h>
11 #include <linux/module.h>
12 #include <linux/pci.h>
13 #include <linux/vfio.h>
14 #include <linux/vfio_pci_core.h>
15 #include <linux/anon_inodes.h>
16
17 #include "hisi_acc_vfio_pci.h"
18
19 /* Return 0 on VM acc device ready, -ETIMEDOUT hardware timeout */
qm_wait_dev_not_ready(struct hisi_qm * qm)20 static int qm_wait_dev_not_ready(struct hisi_qm *qm)
21 {
22 u32 val;
23
24 return readl_relaxed_poll_timeout(qm->io_base + QM_VF_STATE,
25 val, !(val & 0x1), MB_POLL_PERIOD_US,
26 MB_POLL_TIMEOUT_US);
27 }
28
29 /*
30 * Each state Reg is checked 100 times,
31 * with a delay of 100 microseconds after each check
32 */
qm_check_reg_state(struct hisi_qm * qm,u32 regs)33 static u32 qm_check_reg_state(struct hisi_qm *qm, u32 regs)
34 {
35 int check_times = 0;
36 u32 state;
37
38 state = readl(qm->io_base + regs);
39 while (state && check_times < ERROR_CHECK_TIMEOUT) {
40 udelay(CHECK_DELAY_TIME);
41 state = readl(qm->io_base + regs);
42 check_times++;
43 }
44
45 return state;
46 }
47
qm_read_regs(struct hisi_qm * qm,u32 reg_addr,u32 * data,u8 nums)48 static int qm_read_regs(struct hisi_qm *qm, u32 reg_addr,
49 u32 *data, u8 nums)
50 {
51 int i;
52
53 if (nums < 1 || nums > QM_REGS_MAX_LEN)
54 return -EINVAL;
55
56 for (i = 0; i < nums; i++) {
57 data[i] = readl(qm->io_base + reg_addr);
58 reg_addr += QM_REG_ADDR_OFFSET;
59 }
60
61 return 0;
62 }
63
qm_write_regs(struct hisi_qm * qm,u32 reg,u32 * data,u8 nums)64 static int qm_write_regs(struct hisi_qm *qm, u32 reg,
65 u32 *data, u8 nums)
66 {
67 int i;
68
69 if (nums < 1 || nums > QM_REGS_MAX_LEN)
70 return -EINVAL;
71
72 for (i = 0; i < nums; i++)
73 writel(data[i], qm->io_base + reg + i * QM_REG_ADDR_OFFSET);
74
75 return 0;
76 }
77
qm_get_vft(struct hisi_qm * qm,u32 * base)78 static int qm_get_vft(struct hisi_qm *qm, u32 *base)
79 {
80 u64 sqc_vft;
81 u32 qp_num;
82 int ret;
83
84 ret = hisi_qm_mb(qm, QM_MB_CMD_SQC_VFT_V2, 0, 0, 1);
85 if (ret)
86 return ret;
87
88 sqc_vft = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) |
89 ((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) <<
90 QM_XQC_ADDR_OFFSET);
91 *base = QM_SQC_VFT_BASE_MASK_V2 & (sqc_vft >> QM_SQC_VFT_BASE_SHIFT_V2);
92 qp_num = (QM_SQC_VFT_NUM_MASK_V2 &
93 (sqc_vft >> QM_SQC_VFT_NUM_SHIFT_V2)) + 1;
94
95 return qp_num;
96 }
97
qm_get_sqc(struct hisi_qm * qm,u64 * addr)98 static int qm_get_sqc(struct hisi_qm *qm, u64 *addr)
99 {
100 int ret;
101
102 ret = hisi_qm_mb(qm, QM_MB_CMD_SQC_BT, 0, 0, 1);
103 if (ret)
104 return ret;
105
106 *addr = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) |
107 ((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) <<
108 QM_XQC_ADDR_OFFSET);
109
110 return 0;
111 }
112
qm_get_cqc(struct hisi_qm * qm,u64 * addr)113 static int qm_get_cqc(struct hisi_qm *qm, u64 *addr)
114 {
115 int ret;
116
117 ret = hisi_qm_mb(qm, QM_MB_CMD_CQC_BT, 0, 0, 1);
118 if (ret)
119 return ret;
120
121 *addr = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) |
122 ((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) <<
123 QM_XQC_ADDR_OFFSET);
124
125 return 0;
126 }
127
qm_get_regs(struct hisi_qm * qm,struct acc_vf_data * vf_data)128 static int qm_get_regs(struct hisi_qm *qm, struct acc_vf_data *vf_data)
129 {
130 struct device *dev = &qm->pdev->dev;
131 int ret;
132
133 ret = qm_read_regs(qm, QM_VF_AEQ_INT_MASK, &vf_data->aeq_int_mask, 1);
134 if (ret) {
135 dev_err(dev, "failed to read QM_VF_AEQ_INT_MASK\n");
136 return ret;
137 }
138
139 ret = qm_read_regs(qm, QM_VF_EQ_INT_MASK, &vf_data->eq_int_mask, 1);
140 if (ret) {
141 dev_err(dev, "failed to read QM_VF_EQ_INT_MASK\n");
142 return ret;
143 }
144
145 ret = qm_read_regs(qm, QM_IFC_INT_SOURCE_V,
146 &vf_data->ifc_int_source, 1);
147 if (ret) {
148 dev_err(dev, "failed to read QM_IFC_INT_SOURCE_V\n");
149 return ret;
150 }
151
152 ret = qm_read_regs(qm, QM_IFC_INT_MASK, &vf_data->ifc_int_mask, 1);
153 if (ret) {
154 dev_err(dev, "failed to read QM_IFC_INT_MASK\n");
155 return ret;
156 }
157
158 ret = qm_read_regs(qm, QM_IFC_INT_SET_V, &vf_data->ifc_int_set, 1);
159 if (ret) {
160 dev_err(dev, "failed to read QM_IFC_INT_SET_V\n");
161 return ret;
162 }
163
164 ret = qm_read_regs(qm, QM_PAGE_SIZE, &vf_data->page_size, 1);
165 if (ret) {
166 dev_err(dev, "failed to read QM_PAGE_SIZE\n");
167 return ret;
168 }
169
170 /* QM_EQC_DW has 7 regs */
171 ret = qm_read_regs(qm, QM_EQC_DW0, vf_data->qm_eqc_dw, 7);
172 if (ret) {
173 dev_err(dev, "failed to read QM_EQC_DW\n");
174 return ret;
175 }
176
177 /* QM_AEQC_DW has 7 regs */
178 ret = qm_read_regs(qm, QM_AEQC_DW0, vf_data->qm_aeqc_dw, 7);
179 if (ret) {
180 dev_err(dev, "failed to read QM_AEQC_DW\n");
181 return ret;
182 }
183
184 return 0;
185 }
186
qm_set_regs(struct hisi_qm * qm,struct acc_vf_data * vf_data)187 static int qm_set_regs(struct hisi_qm *qm, struct acc_vf_data *vf_data)
188 {
189 struct device *dev = &qm->pdev->dev;
190 int ret;
191
192 /* Check VF state */
193 if (unlikely(hisi_qm_wait_mb_ready(qm))) {
194 dev_err(&qm->pdev->dev, "QM device is not ready to write\n");
195 return -EBUSY;
196 }
197
198 ret = qm_write_regs(qm, QM_VF_AEQ_INT_MASK, &vf_data->aeq_int_mask, 1);
199 if (ret) {
200 dev_err(dev, "failed to write QM_VF_AEQ_INT_MASK\n");
201 return ret;
202 }
203
204 ret = qm_write_regs(qm, QM_VF_EQ_INT_MASK, &vf_data->eq_int_mask, 1);
205 if (ret) {
206 dev_err(dev, "failed to write QM_VF_EQ_INT_MASK\n");
207 return ret;
208 }
209
210 ret = qm_write_regs(qm, QM_IFC_INT_SOURCE_V,
211 &vf_data->ifc_int_source, 1);
212 if (ret) {
213 dev_err(dev, "failed to write QM_IFC_INT_SOURCE_V\n");
214 return ret;
215 }
216
217 ret = qm_write_regs(qm, QM_IFC_INT_MASK, &vf_data->ifc_int_mask, 1);
218 if (ret) {
219 dev_err(dev, "failed to write QM_IFC_INT_MASK\n");
220 return ret;
221 }
222
223 ret = qm_write_regs(qm, QM_IFC_INT_SET_V, &vf_data->ifc_int_set, 1);
224 if (ret) {
225 dev_err(dev, "failed to write QM_IFC_INT_SET_V\n");
226 return ret;
227 }
228
229 ret = qm_write_regs(qm, QM_QUE_ISO_CFG_V, &vf_data->que_iso_cfg, 1);
230 if (ret) {
231 dev_err(dev, "failed to write QM_QUE_ISO_CFG_V\n");
232 return ret;
233 }
234
235 ret = qm_write_regs(qm, QM_PAGE_SIZE, &vf_data->page_size, 1);
236 if (ret) {
237 dev_err(dev, "failed to write QM_PAGE_SIZE\n");
238 return ret;
239 }
240
241 /* QM_EQC_DW has 7 regs */
242 ret = qm_write_regs(qm, QM_EQC_DW0, vf_data->qm_eqc_dw, 7);
243 if (ret) {
244 dev_err(dev, "failed to write QM_EQC_DW\n");
245 return ret;
246 }
247
248 /* QM_AEQC_DW has 7 regs */
249 ret = qm_write_regs(qm, QM_AEQC_DW0, vf_data->qm_aeqc_dw, 7);
250 if (ret) {
251 dev_err(dev, "failed to write QM_AEQC_DW\n");
252 return ret;
253 }
254
255 return 0;
256 }
257
qm_db(struct hisi_qm * qm,u16 qn,u8 cmd,u16 index,u8 priority)258 static void qm_db(struct hisi_qm *qm, u16 qn, u8 cmd,
259 u16 index, u8 priority)
260 {
261 u64 doorbell;
262 u64 dbase;
263 u16 randata = 0;
264
265 if (cmd == QM_DOORBELL_CMD_SQ || cmd == QM_DOORBELL_CMD_CQ)
266 dbase = QM_DOORBELL_SQ_CQ_BASE_V2;
267 else
268 dbase = QM_DOORBELL_EQ_AEQ_BASE_V2;
269
270 doorbell = qn | ((u64)cmd << QM_DB_CMD_SHIFT_V2) |
271 ((u64)randata << QM_DB_RAND_SHIFT_V2) |
272 ((u64)index << QM_DB_INDEX_SHIFT_V2) |
273 ((u64)priority << QM_DB_PRIORITY_SHIFT_V2);
274
275 writeq(doorbell, qm->io_base + dbase);
276 }
277
pf_qm_get_qp_num(struct hisi_qm * qm,int vf_id,u32 * rbase)278 static int pf_qm_get_qp_num(struct hisi_qm *qm, int vf_id, u32 *rbase)
279 {
280 unsigned int val;
281 u64 sqc_vft;
282 u32 qp_num;
283 int ret;
284
285 ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val,
286 val & BIT(0), MB_POLL_PERIOD_US,
287 MB_POLL_TIMEOUT_US);
288 if (ret)
289 return ret;
290
291 writel(0x1, qm->io_base + QM_VFT_CFG_OP_WR);
292 /* 0 mean SQC VFT */
293 writel(0x0, qm->io_base + QM_VFT_CFG_TYPE);
294 writel(vf_id, qm->io_base + QM_VFT_CFG);
295
296 writel(0x0, qm->io_base + QM_VFT_CFG_RDY);
297 writel(0x1, qm->io_base + QM_VFT_CFG_OP_ENABLE);
298
299 ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val,
300 val & BIT(0), MB_POLL_PERIOD_US,
301 MB_POLL_TIMEOUT_US);
302 if (ret)
303 return ret;
304
305 sqc_vft = readl(qm->io_base + QM_VFT_CFG_DATA_L) |
306 ((u64)readl(qm->io_base + QM_VFT_CFG_DATA_H) <<
307 QM_XQC_ADDR_OFFSET);
308 *rbase = QM_SQC_VFT_BASE_MASK_V2 &
309 (sqc_vft >> QM_SQC_VFT_BASE_SHIFT_V2);
310 qp_num = (QM_SQC_VFT_NUM_MASK_V2 &
311 (sqc_vft >> QM_SQC_VFT_NUM_SHIFT_V2)) + 1;
312
313 return qp_num;
314 }
315
qm_dev_cmd_init(struct hisi_qm * qm)316 static void qm_dev_cmd_init(struct hisi_qm *qm)
317 {
318 /* Clear VF communication status registers. */
319 writel(0x1, qm->io_base + QM_IFC_INT_SOURCE_V);
320
321 /* Enable pf and vf communication. */
322 writel(0x0, qm->io_base + QM_IFC_INT_MASK);
323 }
324
vf_qm_cache_wb(struct hisi_qm * qm)325 static int vf_qm_cache_wb(struct hisi_qm *qm)
326 {
327 unsigned int val;
328
329 writel(0x1, qm->io_base + QM_CACHE_WB_START);
330 if (readl_relaxed_poll_timeout(qm->io_base + QM_CACHE_WB_DONE,
331 val, val & BIT(0), MB_POLL_PERIOD_US,
332 MB_POLL_TIMEOUT_US)) {
333 dev_err(&qm->pdev->dev, "vf QM writeback sqc cache fail\n");
334 return -EINVAL;
335 }
336
337 return 0;
338 }
339
vf_qm_fun_reset(struct hisi_qm * qm)340 static void vf_qm_fun_reset(struct hisi_qm *qm)
341 {
342 int i;
343
344 for (i = 0; i < qm->qp_num; i++)
345 qm_db(qm, i, QM_DOORBELL_CMD_SQ, 0, 1);
346 }
347
vf_qm_func_stop(struct hisi_qm * qm)348 static int vf_qm_func_stop(struct hisi_qm *qm)
349 {
350 return hisi_qm_mb(qm, QM_MB_CMD_PAUSE_QM, 0, 0, 0);
351 }
352
vf_qm_check_match(struct hisi_acc_vf_core_device * hisi_acc_vdev,struct hisi_acc_vf_migration_file * migf)353 static int vf_qm_check_match(struct hisi_acc_vf_core_device *hisi_acc_vdev,
354 struct hisi_acc_vf_migration_file *migf)
355 {
356 struct acc_vf_data *vf_data = &migf->vf_data;
357 struct hisi_qm *vf_qm = &hisi_acc_vdev->vf_qm;
358 struct hisi_qm *pf_qm = hisi_acc_vdev->pf_qm;
359 struct device *dev = &vf_qm->pdev->dev;
360 u32 que_iso_state;
361 int ret;
362
363 if (migf->total_length < QM_MATCH_SIZE || hisi_acc_vdev->match_done)
364 return 0;
365
366 if (vf_data->acc_magic != ACC_DEV_MAGIC) {
367 dev_err(dev, "failed to match ACC_DEV_MAGIC\n");
368 return -EINVAL;
369 }
370
371 if (vf_data->dev_id != hisi_acc_vdev->vf_dev->device) {
372 dev_err(dev, "failed to match VF devices\n");
373 return -EINVAL;
374 }
375
376 /* VF qp num check */
377 ret = qm_get_vft(vf_qm, &vf_qm->qp_base);
378 if (ret <= 0) {
379 dev_err(dev, "failed to get vft qp nums\n");
380 return -EINVAL;
381 }
382
383 if (ret != vf_data->qp_num) {
384 dev_err(dev, "failed to match VF qp num\n");
385 return -EINVAL;
386 }
387
388 vf_qm->qp_num = ret;
389
390 /* VF isolation state check */
391 ret = qm_read_regs(pf_qm, QM_QUE_ISO_CFG_V, &que_iso_state, 1);
392 if (ret) {
393 dev_err(dev, "failed to read QM_QUE_ISO_CFG_V\n");
394 return ret;
395 }
396
397 if (vf_data->que_iso_cfg != que_iso_state) {
398 dev_err(dev, "failed to match isolation state\n");
399 return -EINVAL;
400 }
401
402 ret = qm_write_regs(vf_qm, QM_VF_STATE, &vf_data->vf_qm_state, 1);
403 if (ret) {
404 dev_err(dev, "failed to write QM_VF_STATE\n");
405 return ret;
406 }
407
408 hisi_acc_vdev->vf_qm_state = vf_data->vf_qm_state;
409 hisi_acc_vdev->match_done = true;
410 return 0;
411 }
412
vf_qm_get_match_data(struct hisi_acc_vf_core_device * hisi_acc_vdev,struct acc_vf_data * vf_data)413 static int vf_qm_get_match_data(struct hisi_acc_vf_core_device *hisi_acc_vdev,
414 struct acc_vf_data *vf_data)
415 {
416 struct hisi_qm *pf_qm = hisi_acc_vdev->pf_qm;
417 struct device *dev = &pf_qm->pdev->dev;
418 int vf_id = hisi_acc_vdev->vf_id;
419 int ret;
420
421 vf_data->acc_magic = ACC_DEV_MAGIC;
422 /* Save device id */
423 vf_data->dev_id = hisi_acc_vdev->vf_dev->device;
424
425 /* VF qp num save from PF */
426 ret = pf_qm_get_qp_num(pf_qm, vf_id, &vf_data->qp_base);
427 if (ret <= 0) {
428 dev_err(dev, "failed to get vft qp nums!\n");
429 return -EINVAL;
430 }
431
432 vf_data->qp_num = ret;
433
434 /* VF isolation state save from PF */
435 ret = qm_read_regs(pf_qm, QM_QUE_ISO_CFG_V, &vf_data->que_iso_cfg, 1);
436 if (ret) {
437 dev_err(dev, "failed to read QM_QUE_ISO_CFG_V!\n");
438 return ret;
439 }
440
441 return 0;
442 }
443
vf_qm_load_data(struct hisi_acc_vf_core_device * hisi_acc_vdev,struct hisi_acc_vf_migration_file * migf)444 static int vf_qm_load_data(struct hisi_acc_vf_core_device *hisi_acc_vdev,
445 struct hisi_acc_vf_migration_file *migf)
446 {
447 struct hisi_qm *qm = &hisi_acc_vdev->vf_qm;
448 struct device *dev = &qm->pdev->dev;
449 struct acc_vf_data *vf_data = &migf->vf_data;
450 int ret;
451
452 /* Return if only match data was transferred */
453 if (migf->total_length == QM_MATCH_SIZE)
454 return 0;
455
456 if (migf->total_length < sizeof(struct acc_vf_data))
457 return -EINVAL;
458
459 qm->eqe_dma = vf_data->eqe_dma;
460 qm->aeqe_dma = vf_data->aeqe_dma;
461 qm->sqc_dma = vf_data->sqc_dma;
462 qm->cqc_dma = vf_data->cqc_dma;
463
464 qm->qp_base = vf_data->qp_base;
465 qm->qp_num = vf_data->qp_num;
466
467 ret = qm_set_regs(qm, vf_data);
468 if (ret) {
469 dev_err(dev, "set VF regs failed\n");
470 return ret;
471 }
472
473 ret = hisi_qm_mb(qm, QM_MB_CMD_SQC_BT, qm->sqc_dma, 0, 0);
474 if (ret) {
475 dev_err(dev, "set sqc failed\n");
476 return ret;
477 }
478
479 ret = hisi_qm_mb(qm, QM_MB_CMD_CQC_BT, qm->cqc_dma, 0, 0);
480 if (ret) {
481 dev_err(dev, "set cqc failed\n");
482 return ret;
483 }
484
485 qm_dev_cmd_init(qm);
486 return 0;
487 }
488
vf_qm_state_save(struct hisi_acc_vf_core_device * hisi_acc_vdev,struct hisi_acc_vf_migration_file * migf)489 static int vf_qm_state_save(struct hisi_acc_vf_core_device *hisi_acc_vdev,
490 struct hisi_acc_vf_migration_file *migf)
491 {
492 struct acc_vf_data *vf_data = &migf->vf_data;
493 struct hisi_qm *vf_qm = &hisi_acc_vdev->vf_qm;
494 struct device *dev = &vf_qm->pdev->dev;
495 int ret;
496
497 if (unlikely(qm_wait_dev_not_ready(vf_qm))) {
498 /* Update state and return with match data */
499 vf_data->vf_qm_state = QM_NOT_READY;
500 hisi_acc_vdev->vf_qm_state = vf_data->vf_qm_state;
501 migf->total_length = QM_MATCH_SIZE;
502 return 0;
503 }
504
505 vf_data->vf_qm_state = QM_READY;
506 hisi_acc_vdev->vf_qm_state = vf_data->vf_qm_state;
507
508 ret = vf_qm_cache_wb(vf_qm);
509 if (ret) {
510 dev_err(dev, "failed to writeback QM Cache!\n");
511 return ret;
512 }
513
514 ret = qm_get_regs(vf_qm, vf_data);
515 if (ret)
516 return -EINVAL;
517
518 /* Every reg is 32 bit, the dma address is 64 bit. */
519 vf_data->eqe_dma = vf_data->qm_eqc_dw[1];
520 vf_data->eqe_dma <<= QM_XQC_ADDR_OFFSET;
521 vf_data->eqe_dma |= vf_data->qm_eqc_dw[0];
522 vf_data->aeqe_dma = vf_data->qm_aeqc_dw[1];
523 vf_data->aeqe_dma <<= QM_XQC_ADDR_OFFSET;
524 vf_data->aeqe_dma |= vf_data->qm_aeqc_dw[0];
525
526 /* Through SQC_BT/CQC_BT to get sqc and cqc address */
527 ret = qm_get_sqc(vf_qm, &vf_data->sqc_dma);
528 if (ret) {
529 dev_err(dev, "failed to read SQC addr!\n");
530 return -EINVAL;
531 }
532
533 ret = qm_get_cqc(vf_qm, &vf_data->cqc_dma);
534 if (ret) {
535 dev_err(dev, "failed to read CQC addr!\n");
536 return -EINVAL;
537 }
538
539 migf->total_length = sizeof(struct acc_vf_data);
540 return 0;
541 }
542
hisi_acc_drvdata(struct pci_dev * pdev)543 static struct hisi_acc_vf_core_device *hisi_acc_drvdata(struct pci_dev *pdev)
544 {
545 struct vfio_pci_core_device *core_device = dev_get_drvdata(&pdev->dev);
546
547 return container_of(core_device, struct hisi_acc_vf_core_device,
548 core_device);
549 }
550
551 /* Check the PF's RAS state and Function INT state */
552 static int
hisi_acc_check_int_state(struct hisi_acc_vf_core_device * hisi_acc_vdev)553 hisi_acc_check_int_state(struct hisi_acc_vf_core_device *hisi_acc_vdev)
554 {
555 struct hisi_qm *vfqm = &hisi_acc_vdev->vf_qm;
556 struct hisi_qm *qm = hisi_acc_vdev->pf_qm;
557 struct pci_dev *vf_pdev = hisi_acc_vdev->vf_dev;
558 struct device *dev = &qm->pdev->dev;
559 u32 state;
560
561 /* Check RAS state */
562 state = qm_check_reg_state(qm, QM_ABNORMAL_INT_STATUS);
563 if (state) {
564 dev_err(dev, "failed to check QM RAS state!\n");
565 return -EBUSY;
566 }
567
568 /* Check Function Communication state between PF and VF */
569 state = qm_check_reg_state(vfqm, QM_IFC_INT_STATUS);
570 if (state) {
571 dev_err(dev, "failed to check QM IFC INT state!\n");
572 return -EBUSY;
573 }
574 state = qm_check_reg_state(vfqm, QM_IFC_INT_SET_V);
575 if (state) {
576 dev_err(dev, "failed to check QM IFC INT SET state!\n");
577 return -EBUSY;
578 }
579
580 /* Check submodule task state */
581 switch (vf_pdev->device) {
582 case PCI_DEVICE_ID_HUAWEI_SEC_VF:
583 state = qm_check_reg_state(qm, SEC_CORE_INT_STATUS);
584 if (state) {
585 dev_err(dev, "failed to check QM SEC Core INT state!\n");
586 return -EBUSY;
587 }
588 return 0;
589 case PCI_DEVICE_ID_HUAWEI_HPRE_VF:
590 state = qm_check_reg_state(qm, HPRE_HAC_INT_STATUS);
591 if (state) {
592 dev_err(dev, "failed to check QM HPRE HAC INT state!\n");
593 return -EBUSY;
594 }
595 return 0;
596 case PCI_DEVICE_ID_HUAWEI_ZIP_VF:
597 state = qm_check_reg_state(qm, HZIP_CORE_INT_STATUS);
598 if (state) {
599 dev_err(dev, "failed to check QM ZIP Core INT state!\n");
600 return -EBUSY;
601 }
602 return 0;
603 default:
604 dev_err(dev, "failed to detect acc module type!\n");
605 return -EINVAL;
606 }
607 }
608
hisi_acc_vf_disable_fd(struct hisi_acc_vf_migration_file * migf)609 static void hisi_acc_vf_disable_fd(struct hisi_acc_vf_migration_file *migf)
610 {
611 mutex_lock(&migf->lock);
612 migf->disabled = true;
613 migf->total_length = 0;
614 migf->filp->f_pos = 0;
615 mutex_unlock(&migf->lock);
616 }
617
hisi_acc_vf_disable_fds(struct hisi_acc_vf_core_device * hisi_acc_vdev)618 static void hisi_acc_vf_disable_fds(struct hisi_acc_vf_core_device *hisi_acc_vdev)
619 {
620 if (hisi_acc_vdev->resuming_migf) {
621 hisi_acc_vf_disable_fd(hisi_acc_vdev->resuming_migf);
622 fput(hisi_acc_vdev->resuming_migf->filp);
623 hisi_acc_vdev->resuming_migf = NULL;
624 }
625
626 if (hisi_acc_vdev->saving_migf) {
627 hisi_acc_vf_disable_fd(hisi_acc_vdev->saving_migf);
628 fput(hisi_acc_vdev->saving_migf->filp);
629 hisi_acc_vdev->saving_migf = NULL;
630 }
631 }
632
633 /*
634 * This function is called in all state_mutex unlock cases to
635 * handle a 'deferred_reset' if exists.
636 */
637 static void
hisi_acc_vf_state_mutex_unlock(struct hisi_acc_vf_core_device * hisi_acc_vdev)638 hisi_acc_vf_state_mutex_unlock(struct hisi_acc_vf_core_device *hisi_acc_vdev)
639 {
640 again:
641 spin_lock(&hisi_acc_vdev->reset_lock);
642 if (hisi_acc_vdev->deferred_reset) {
643 hisi_acc_vdev->deferred_reset = false;
644 spin_unlock(&hisi_acc_vdev->reset_lock);
645 hisi_acc_vdev->vf_qm_state = QM_NOT_READY;
646 hisi_acc_vdev->mig_state = VFIO_DEVICE_STATE_RUNNING;
647 hisi_acc_vf_disable_fds(hisi_acc_vdev);
648 goto again;
649 }
650 mutex_unlock(&hisi_acc_vdev->state_mutex);
651 spin_unlock(&hisi_acc_vdev->reset_lock);
652 }
653
hisi_acc_vf_start_device(struct hisi_acc_vf_core_device * hisi_acc_vdev)654 static void hisi_acc_vf_start_device(struct hisi_acc_vf_core_device *hisi_acc_vdev)
655 {
656 struct hisi_qm *vf_qm = &hisi_acc_vdev->vf_qm;
657
658 if (hisi_acc_vdev->vf_qm_state != QM_READY)
659 return;
660
661 /* Make sure the device is enabled */
662 qm_dev_cmd_init(vf_qm);
663
664 vf_qm_fun_reset(vf_qm);
665 }
666
hisi_acc_vf_load_state(struct hisi_acc_vf_core_device * hisi_acc_vdev)667 static int hisi_acc_vf_load_state(struct hisi_acc_vf_core_device *hisi_acc_vdev)
668 {
669 struct device *dev = &hisi_acc_vdev->vf_dev->dev;
670 struct hisi_acc_vf_migration_file *migf = hisi_acc_vdev->resuming_migf;
671 int ret;
672
673 /* Recover data to VF */
674 ret = vf_qm_load_data(hisi_acc_vdev, migf);
675 if (ret) {
676 dev_err(dev, "failed to recover the VF!\n");
677 return ret;
678 }
679
680 return 0;
681 }
682
hisi_acc_vf_release_file(struct inode * inode,struct file * filp)683 static int hisi_acc_vf_release_file(struct inode *inode, struct file *filp)
684 {
685 struct hisi_acc_vf_migration_file *migf = filp->private_data;
686
687 hisi_acc_vf_disable_fd(migf);
688 mutex_destroy(&migf->lock);
689 kfree(migf);
690 return 0;
691 }
692
hisi_acc_vf_resume_write(struct file * filp,const char __user * buf,size_t len,loff_t * pos)693 static ssize_t hisi_acc_vf_resume_write(struct file *filp, const char __user *buf,
694 size_t len, loff_t *pos)
695 {
696 struct hisi_acc_vf_migration_file *migf = filp->private_data;
697 u8 *vf_data = (u8 *)&migf->vf_data;
698 loff_t requested_length;
699 ssize_t done = 0;
700 int ret;
701
702 if (pos)
703 return -ESPIPE;
704 pos = &filp->f_pos;
705
706 if (*pos < 0 ||
707 check_add_overflow((loff_t)len, *pos, &requested_length))
708 return -EINVAL;
709
710 if (requested_length > sizeof(struct acc_vf_data))
711 return -ENOMEM;
712
713 mutex_lock(&migf->lock);
714 if (migf->disabled) {
715 done = -ENODEV;
716 goto out_unlock;
717 }
718
719 ret = copy_from_user(vf_data + *pos, buf, len);
720 if (ret) {
721 done = -EFAULT;
722 goto out_unlock;
723 }
724 *pos += len;
725 done = len;
726 migf->total_length += len;
727
728 ret = vf_qm_check_match(migf->hisi_acc_vdev, migf);
729 if (ret)
730 done = -EFAULT;
731 out_unlock:
732 mutex_unlock(&migf->lock);
733 return done;
734 }
735
736 static const struct file_operations hisi_acc_vf_resume_fops = {
737 .owner = THIS_MODULE,
738 .write = hisi_acc_vf_resume_write,
739 .release = hisi_acc_vf_release_file,
740 .llseek = no_llseek,
741 };
742
743 static struct hisi_acc_vf_migration_file *
hisi_acc_vf_pci_resume(struct hisi_acc_vf_core_device * hisi_acc_vdev)744 hisi_acc_vf_pci_resume(struct hisi_acc_vf_core_device *hisi_acc_vdev)
745 {
746 struct hisi_acc_vf_migration_file *migf;
747
748 migf = kzalloc(sizeof(*migf), GFP_KERNEL_ACCOUNT);
749 if (!migf)
750 return ERR_PTR(-ENOMEM);
751
752 migf->filp = anon_inode_getfile("hisi_acc_vf_mig", &hisi_acc_vf_resume_fops, migf,
753 O_WRONLY);
754 if (IS_ERR(migf->filp)) {
755 int err = PTR_ERR(migf->filp);
756
757 kfree(migf);
758 return ERR_PTR(err);
759 }
760
761 stream_open(migf->filp->f_inode, migf->filp);
762 mutex_init(&migf->lock);
763 migf->hisi_acc_vdev = hisi_acc_vdev;
764 return migf;
765 }
766
hisi_acc_vf_precopy_ioctl(struct file * filp,unsigned int cmd,unsigned long arg)767 static long hisi_acc_vf_precopy_ioctl(struct file *filp,
768 unsigned int cmd, unsigned long arg)
769 {
770 struct hisi_acc_vf_migration_file *migf = filp->private_data;
771 struct hisi_acc_vf_core_device *hisi_acc_vdev = migf->hisi_acc_vdev;
772 loff_t *pos = &filp->f_pos;
773 struct vfio_precopy_info info;
774 unsigned long minsz;
775 int ret;
776
777 if (cmd != VFIO_MIG_GET_PRECOPY_INFO)
778 return -ENOTTY;
779
780 minsz = offsetofend(struct vfio_precopy_info, dirty_bytes);
781
782 if (copy_from_user(&info, (void __user *)arg, minsz))
783 return -EFAULT;
784 if (info.argsz < minsz)
785 return -EINVAL;
786
787 mutex_lock(&hisi_acc_vdev->state_mutex);
788 if (hisi_acc_vdev->mig_state != VFIO_DEVICE_STATE_PRE_COPY) {
789 mutex_unlock(&hisi_acc_vdev->state_mutex);
790 return -EINVAL;
791 }
792
793 mutex_lock(&migf->lock);
794
795 if (migf->disabled) {
796 ret = -ENODEV;
797 goto out;
798 }
799
800 if (*pos > migf->total_length) {
801 ret = -EINVAL;
802 goto out;
803 }
804
805 info.dirty_bytes = 0;
806 info.initial_bytes = migf->total_length - *pos;
807
808 ret = copy_to_user((void __user *)arg, &info, minsz) ? -EFAULT : 0;
809 out:
810 mutex_unlock(&migf->lock);
811 mutex_unlock(&hisi_acc_vdev->state_mutex);
812 return ret;
813 }
814
hisi_acc_vf_save_read(struct file * filp,char __user * buf,size_t len,loff_t * pos)815 static ssize_t hisi_acc_vf_save_read(struct file *filp, char __user *buf, size_t len,
816 loff_t *pos)
817 {
818 struct hisi_acc_vf_migration_file *migf = filp->private_data;
819 ssize_t done = 0;
820 int ret;
821
822 if (pos)
823 return -ESPIPE;
824 pos = &filp->f_pos;
825
826 mutex_lock(&migf->lock);
827 if (*pos > migf->total_length) {
828 done = -EINVAL;
829 goto out_unlock;
830 }
831
832 if (migf->disabled) {
833 done = -ENODEV;
834 goto out_unlock;
835 }
836
837 len = min_t(size_t, migf->total_length - *pos, len);
838 if (len) {
839 u8 *vf_data = (u8 *)&migf->vf_data;
840
841 ret = copy_to_user(buf, vf_data + *pos, len);
842 if (ret) {
843 done = -EFAULT;
844 goto out_unlock;
845 }
846 *pos += len;
847 done = len;
848 }
849 out_unlock:
850 mutex_unlock(&migf->lock);
851 return done;
852 }
853
854 static const struct file_operations hisi_acc_vf_save_fops = {
855 .owner = THIS_MODULE,
856 .read = hisi_acc_vf_save_read,
857 .unlocked_ioctl = hisi_acc_vf_precopy_ioctl,
858 .compat_ioctl = compat_ptr_ioctl,
859 .release = hisi_acc_vf_release_file,
860 .llseek = no_llseek,
861 };
862
863 static struct hisi_acc_vf_migration_file *
hisi_acc_open_saving_migf(struct hisi_acc_vf_core_device * hisi_acc_vdev)864 hisi_acc_open_saving_migf(struct hisi_acc_vf_core_device *hisi_acc_vdev)
865 {
866 struct hisi_acc_vf_migration_file *migf;
867 int ret;
868
869 migf = kzalloc(sizeof(*migf), GFP_KERNEL_ACCOUNT);
870 if (!migf)
871 return ERR_PTR(-ENOMEM);
872
873 migf->filp = anon_inode_getfile("hisi_acc_vf_mig", &hisi_acc_vf_save_fops, migf,
874 O_RDONLY);
875 if (IS_ERR(migf->filp)) {
876 int err = PTR_ERR(migf->filp);
877
878 kfree(migf);
879 return ERR_PTR(err);
880 }
881
882 stream_open(migf->filp->f_inode, migf->filp);
883 mutex_init(&migf->lock);
884 migf->hisi_acc_vdev = hisi_acc_vdev;
885
886 ret = vf_qm_get_match_data(hisi_acc_vdev, &migf->vf_data);
887 if (ret) {
888 fput(migf->filp);
889 return ERR_PTR(ret);
890 }
891
892 return migf;
893 }
894
895 static struct hisi_acc_vf_migration_file *
hisi_acc_vf_pre_copy(struct hisi_acc_vf_core_device * hisi_acc_vdev)896 hisi_acc_vf_pre_copy(struct hisi_acc_vf_core_device *hisi_acc_vdev)
897 {
898 struct hisi_acc_vf_migration_file *migf;
899
900 migf = hisi_acc_open_saving_migf(hisi_acc_vdev);
901 if (IS_ERR(migf))
902 return migf;
903
904 migf->total_length = QM_MATCH_SIZE;
905 return migf;
906 }
907
908 static struct hisi_acc_vf_migration_file *
hisi_acc_vf_stop_copy(struct hisi_acc_vf_core_device * hisi_acc_vdev,bool open)909 hisi_acc_vf_stop_copy(struct hisi_acc_vf_core_device *hisi_acc_vdev, bool open)
910 {
911 int ret;
912 struct hisi_acc_vf_migration_file *migf = NULL;
913
914 if (open) {
915 /*
916 * Userspace didn't use PRECOPY support. Hence saving_migf
917 * is not opened yet.
918 */
919 migf = hisi_acc_open_saving_migf(hisi_acc_vdev);
920 if (IS_ERR(migf))
921 return migf;
922 } else {
923 migf = hisi_acc_vdev->saving_migf;
924 }
925
926 ret = vf_qm_state_save(hisi_acc_vdev, migf);
927 if (ret)
928 return ERR_PTR(ret);
929
930 return open ? migf : NULL;
931 }
932
hisi_acc_vf_stop_device(struct hisi_acc_vf_core_device * hisi_acc_vdev)933 static int hisi_acc_vf_stop_device(struct hisi_acc_vf_core_device *hisi_acc_vdev)
934 {
935 struct device *dev = &hisi_acc_vdev->vf_dev->dev;
936 struct hisi_qm *vf_qm = &hisi_acc_vdev->vf_qm;
937 int ret;
938
939 ret = vf_qm_func_stop(vf_qm);
940 if (ret) {
941 dev_err(dev, "failed to stop QM VF function!\n");
942 return ret;
943 }
944
945 ret = hisi_acc_check_int_state(hisi_acc_vdev);
946 if (ret) {
947 dev_err(dev, "failed to check QM INT state!\n");
948 return ret;
949 }
950 return 0;
951 }
952
953 static struct file *
hisi_acc_vf_set_device_state(struct hisi_acc_vf_core_device * hisi_acc_vdev,u32 new)954 hisi_acc_vf_set_device_state(struct hisi_acc_vf_core_device *hisi_acc_vdev,
955 u32 new)
956 {
957 u32 cur = hisi_acc_vdev->mig_state;
958 int ret;
959
960 if (cur == VFIO_DEVICE_STATE_RUNNING && new == VFIO_DEVICE_STATE_PRE_COPY) {
961 struct hisi_acc_vf_migration_file *migf;
962
963 migf = hisi_acc_vf_pre_copy(hisi_acc_vdev);
964 if (IS_ERR(migf))
965 return ERR_CAST(migf);
966 get_file(migf->filp);
967 hisi_acc_vdev->saving_migf = migf;
968 return migf->filp;
969 }
970
971 if (cur == VFIO_DEVICE_STATE_PRE_COPY && new == VFIO_DEVICE_STATE_STOP_COPY) {
972 struct hisi_acc_vf_migration_file *migf;
973
974 ret = hisi_acc_vf_stop_device(hisi_acc_vdev);
975 if (ret)
976 return ERR_PTR(ret);
977
978 migf = hisi_acc_vf_stop_copy(hisi_acc_vdev, false);
979 if (IS_ERR(migf))
980 return ERR_CAST(migf);
981
982 return NULL;
983 }
984
985 if (cur == VFIO_DEVICE_STATE_RUNNING && new == VFIO_DEVICE_STATE_STOP) {
986 ret = hisi_acc_vf_stop_device(hisi_acc_vdev);
987 if (ret)
988 return ERR_PTR(ret);
989 return NULL;
990 }
991
992 if (cur == VFIO_DEVICE_STATE_STOP && new == VFIO_DEVICE_STATE_STOP_COPY) {
993 struct hisi_acc_vf_migration_file *migf;
994
995 migf = hisi_acc_vf_stop_copy(hisi_acc_vdev, true);
996 if (IS_ERR(migf))
997 return ERR_CAST(migf);
998 get_file(migf->filp);
999 hisi_acc_vdev->saving_migf = migf;
1000 return migf->filp;
1001 }
1002
1003 if ((cur == VFIO_DEVICE_STATE_STOP_COPY && new == VFIO_DEVICE_STATE_STOP)) {
1004 hisi_acc_vf_disable_fds(hisi_acc_vdev);
1005 return NULL;
1006 }
1007
1008 if (cur == VFIO_DEVICE_STATE_STOP && new == VFIO_DEVICE_STATE_RESUMING) {
1009 struct hisi_acc_vf_migration_file *migf;
1010
1011 migf = hisi_acc_vf_pci_resume(hisi_acc_vdev);
1012 if (IS_ERR(migf))
1013 return ERR_CAST(migf);
1014 get_file(migf->filp);
1015 hisi_acc_vdev->resuming_migf = migf;
1016 return migf->filp;
1017 }
1018
1019 if (cur == VFIO_DEVICE_STATE_RESUMING && new == VFIO_DEVICE_STATE_STOP) {
1020 ret = hisi_acc_vf_load_state(hisi_acc_vdev);
1021 if (ret)
1022 return ERR_PTR(ret);
1023 hisi_acc_vf_disable_fds(hisi_acc_vdev);
1024 return NULL;
1025 }
1026
1027 if (cur == VFIO_DEVICE_STATE_PRE_COPY && new == VFIO_DEVICE_STATE_RUNNING) {
1028 hisi_acc_vf_disable_fds(hisi_acc_vdev);
1029 return NULL;
1030 }
1031
1032 if (cur == VFIO_DEVICE_STATE_STOP && new == VFIO_DEVICE_STATE_RUNNING) {
1033 hisi_acc_vf_start_device(hisi_acc_vdev);
1034 return NULL;
1035 }
1036
1037 /*
1038 * vfio_mig_get_next_state() does not use arcs other than the above
1039 */
1040 WARN_ON(true);
1041 return ERR_PTR(-EINVAL);
1042 }
1043
1044 static struct file *
hisi_acc_vfio_pci_set_device_state(struct vfio_device * vdev,enum vfio_device_mig_state new_state)1045 hisi_acc_vfio_pci_set_device_state(struct vfio_device *vdev,
1046 enum vfio_device_mig_state new_state)
1047 {
1048 struct hisi_acc_vf_core_device *hisi_acc_vdev = container_of(vdev,
1049 struct hisi_acc_vf_core_device, core_device.vdev);
1050 enum vfio_device_mig_state next_state;
1051 struct file *res = NULL;
1052 int ret;
1053
1054 mutex_lock(&hisi_acc_vdev->state_mutex);
1055 while (new_state != hisi_acc_vdev->mig_state) {
1056 ret = vfio_mig_get_next_state(vdev,
1057 hisi_acc_vdev->mig_state,
1058 new_state, &next_state);
1059 if (ret) {
1060 res = ERR_PTR(-EINVAL);
1061 break;
1062 }
1063
1064 res = hisi_acc_vf_set_device_state(hisi_acc_vdev, next_state);
1065 if (IS_ERR(res))
1066 break;
1067 hisi_acc_vdev->mig_state = next_state;
1068 if (WARN_ON(res && new_state != hisi_acc_vdev->mig_state)) {
1069 fput(res);
1070 res = ERR_PTR(-EINVAL);
1071 break;
1072 }
1073 }
1074 hisi_acc_vf_state_mutex_unlock(hisi_acc_vdev);
1075 return res;
1076 }
1077
1078 static int
hisi_acc_vfio_pci_get_data_size(struct vfio_device * vdev,unsigned long * stop_copy_length)1079 hisi_acc_vfio_pci_get_data_size(struct vfio_device *vdev,
1080 unsigned long *stop_copy_length)
1081 {
1082 *stop_copy_length = sizeof(struct acc_vf_data);
1083 return 0;
1084 }
1085
1086 static int
hisi_acc_vfio_pci_get_device_state(struct vfio_device * vdev,enum vfio_device_mig_state * curr_state)1087 hisi_acc_vfio_pci_get_device_state(struct vfio_device *vdev,
1088 enum vfio_device_mig_state *curr_state)
1089 {
1090 struct hisi_acc_vf_core_device *hisi_acc_vdev = container_of(vdev,
1091 struct hisi_acc_vf_core_device, core_device.vdev);
1092
1093 mutex_lock(&hisi_acc_vdev->state_mutex);
1094 *curr_state = hisi_acc_vdev->mig_state;
1095 hisi_acc_vf_state_mutex_unlock(hisi_acc_vdev);
1096 return 0;
1097 }
1098
hisi_acc_vf_pci_aer_reset_done(struct pci_dev * pdev)1099 static void hisi_acc_vf_pci_aer_reset_done(struct pci_dev *pdev)
1100 {
1101 struct hisi_acc_vf_core_device *hisi_acc_vdev = hisi_acc_drvdata(pdev);
1102
1103 if (hisi_acc_vdev->core_device.vdev.migration_flags !=
1104 VFIO_MIGRATION_STOP_COPY)
1105 return;
1106
1107 /*
1108 * As the higher VFIO layers are holding locks across reset and using
1109 * those same locks with the mm_lock we need to prevent ABBA deadlock
1110 * with the state_mutex and mm_lock.
1111 * In case the state_mutex was taken already we defer the cleanup work
1112 * to the unlock flow of the other running context.
1113 */
1114 spin_lock(&hisi_acc_vdev->reset_lock);
1115 hisi_acc_vdev->deferred_reset = true;
1116 if (!mutex_trylock(&hisi_acc_vdev->state_mutex)) {
1117 spin_unlock(&hisi_acc_vdev->reset_lock);
1118 return;
1119 }
1120 spin_unlock(&hisi_acc_vdev->reset_lock);
1121 hisi_acc_vf_state_mutex_unlock(hisi_acc_vdev);
1122 }
1123
hisi_acc_vf_qm_init(struct hisi_acc_vf_core_device * hisi_acc_vdev)1124 static int hisi_acc_vf_qm_init(struct hisi_acc_vf_core_device *hisi_acc_vdev)
1125 {
1126 struct vfio_pci_core_device *vdev = &hisi_acc_vdev->core_device;
1127 struct hisi_qm *vf_qm = &hisi_acc_vdev->vf_qm;
1128 struct pci_dev *vf_dev = vdev->pdev;
1129
1130 /*
1131 * ACC VF dev BAR2 region consists of both functional register space
1132 * and migration control register space. For migration to work, we
1133 * need access to both. Hence, we map the entire BAR2 region here.
1134 * But unnecessarily exposing the migration BAR region to the Guest
1135 * has the potential to prevent/corrupt the Guest migration. Hence,
1136 * we restrict access to the migration control space from
1137 * Guest(Please see mmap/ioctl/read/write override functions).
1138 *
1139 * Please note that it is OK to expose the entire VF BAR if migration
1140 * is not supported or required as this cannot affect the ACC PF
1141 * configurations.
1142 *
1143 * Also the HiSilicon ACC VF devices supported by this driver on
1144 * HiSilicon hardware platforms are integrated end point devices
1145 * and the platform lacks the capability to perform any PCIe P2P
1146 * between these devices.
1147 */
1148
1149 vf_qm->io_base =
1150 ioremap(pci_resource_start(vf_dev, VFIO_PCI_BAR2_REGION_INDEX),
1151 pci_resource_len(vf_dev, VFIO_PCI_BAR2_REGION_INDEX));
1152 if (!vf_qm->io_base)
1153 return -EIO;
1154
1155 vf_qm->fun_type = QM_HW_VF;
1156 vf_qm->pdev = vf_dev;
1157 mutex_init(&vf_qm->mailbox_lock);
1158
1159 return 0;
1160 }
1161
hisi_acc_get_pf_qm(struct pci_dev * pdev)1162 static struct hisi_qm *hisi_acc_get_pf_qm(struct pci_dev *pdev)
1163 {
1164 struct hisi_qm *pf_qm;
1165 struct pci_driver *pf_driver;
1166
1167 if (!pdev->is_virtfn)
1168 return NULL;
1169
1170 switch (pdev->device) {
1171 case PCI_DEVICE_ID_HUAWEI_SEC_VF:
1172 pf_driver = hisi_sec_get_pf_driver();
1173 break;
1174 case PCI_DEVICE_ID_HUAWEI_HPRE_VF:
1175 pf_driver = hisi_hpre_get_pf_driver();
1176 break;
1177 case PCI_DEVICE_ID_HUAWEI_ZIP_VF:
1178 pf_driver = hisi_zip_get_pf_driver();
1179 break;
1180 default:
1181 return NULL;
1182 }
1183
1184 if (!pf_driver)
1185 return NULL;
1186
1187 pf_qm = pci_iov_get_pf_drvdata(pdev, pf_driver);
1188
1189 return !IS_ERR(pf_qm) ? pf_qm : NULL;
1190 }
1191
hisi_acc_pci_rw_access_check(struct vfio_device * core_vdev,size_t count,loff_t * ppos,size_t * new_count)1192 static int hisi_acc_pci_rw_access_check(struct vfio_device *core_vdev,
1193 size_t count, loff_t *ppos,
1194 size_t *new_count)
1195 {
1196 unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
1197 struct vfio_pci_core_device *vdev =
1198 container_of(core_vdev, struct vfio_pci_core_device, vdev);
1199
1200 if (index == VFIO_PCI_BAR2_REGION_INDEX) {
1201 loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
1202 resource_size_t end = pci_resource_len(vdev->pdev, index) / 2;
1203
1204 /* Check if access is for migration control region */
1205 if (pos >= end)
1206 return -EINVAL;
1207
1208 *new_count = min(count, (size_t)(end - pos));
1209 }
1210
1211 return 0;
1212 }
1213
hisi_acc_vfio_pci_mmap(struct vfio_device * core_vdev,struct vm_area_struct * vma)1214 static int hisi_acc_vfio_pci_mmap(struct vfio_device *core_vdev,
1215 struct vm_area_struct *vma)
1216 {
1217 struct vfio_pci_core_device *vdev =
1218 container_of(core_vdev, struct vfio_pci_core_device, vdev);
1219 unsigned int index;
1220
1221 index = vma->vm_pgoff >> (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT);
1222 if (index == VFIO_PCI_BAR2_REGION_INDEX) {
1223 u64 req_len, pgoff, req_start;
1224 resource_size_t end = pci_resource_len(vdev->pdev, index) / 2;
1225
1226 req_len = vma->vm_end - vma->vm_start;
1227 pgoff = vma->vm_pgoff &
1228 ((1U << (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT)) - 1);
1229 req_start = pgoff << PAGE_SHIFT;
1230
1231 if (req_start + req_len > end)
1232 return -EINVAL;
1233 }
1234
1235 return vfio_pci_core_mmap(core_vdev, vma);
1236 }
1237
hisi_acc_vfio_pci_write(struct vfio_device * core_vdev,const char __user * buf,size_t count,loff_t * ppos)1238 static ssize_t hisi_acc_vfio_pci_write(struct vfio_device *core_vdev,
1239 const char __user *buf, size_t count,
1240 loff_t *ppos)
1241 {
1242 size_t new_count = count;
1243 int ret;
1244
1245 ret = hisi_acc_pci_rw_access_check(core_vdev, count, ppos, &new_count);
1246 if (ret)
1247 return ret;
1248
1249 return vfio_pci_core_write(core_vdev, buf, new_count, ppos);
1250 }
1251
hisi_acc_vfio_pci_read(struct vfio_device * core_vdev,char __user * buf,size_t count,loff_t * ppos)1252 static ssize_t hisi_acc_vfio_pci_read(struct vfio_device *core_vdev,
1253 char __user *buf, size_t count,
1254 loff_t *ppos)
1255 {
1256 size_t new_count = count;
1257 int ret;
1258
1259 ret = hisi_acc_pci_rw_access_check(core_vdev, count, ppos, &new_count);
1260 if (ret)
1261 return ret;
1262
1263 return vfio_pci_core_read(core_vdev, buf, new_count, ppos);
1264 }
1265
hisi_acc_vfio_pci_ioctl(struct vfio_device * core_vdev,unsigned int cmd,unsigned long arg)1266 static long hisi_acc_vfio_pci_ioctl(struct vfio_device *core_vdev, unsigned int cmd,
1267 unsigned long arg)
1268 {
1269 if (cmd == VFIO_DEVICE_GET_REGION_INFO) {
1270 struct vfio_pci_core_device *vdev =
1271 container_of(core_vdev, struct vfio_pci_core_device, vdev);
1272 struct pci_dev *pdev = vdev->pdev;
1273 struct vfio_region_info info;
1274 unsigned long minsz;
1275
1276 minsz = offsetofend(struct vfio_region_info, offset);
1277
1278 if (copy_from_user(&info, (void __user *)arg, minsz))
1279 return -EFAULT;
1280
1281 if (info.argsz < minsz)
1282 return -EINVAL;
1283
1284 if (info.index == VFIO_PCI_BAR2_REGION_INDEX) {
1285 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
1286
1287 /*
1288 * ACC VF dev BAR2 region consists of both functional
1289 * register space and migration control register space.
1290 * Report only the functional region to Guest.
1291 */
1292 info.size = pci_resource_len(pdev, info.index) / 2;
1293
1294 info.flags = VFIO_REGION_INFO_FLAG_READ |
1295 VFIO_REGION_INFO_FLAG_WRITE |
1296 VFIO_REGION_INFO_FLAG_MMAP;
1297
1298 return copy_to_user((void __user *)arg, &info, minsz) ?
1299 -EFAULT : 0;
1300 }
1301 }
1302 return vfio_pci_core_ioctl(core_vdev, cmd, arg);
1303 }
1304
hisi_acc_vfio_pci_open_device(struct vfio_device * core_vdev)1305 static int hisi_acc_vfio_pci_open_device(struct vfio_device *core_vdev)
1306 {
1307 struct hisi_acc_vf_core_device *hisi_acc_vdev = container_of(core_vdev,
1308 struct hisi_acc_vf_core_device, core_device.vdev);
1309 struct vfio_pci_core_device *vdev = &hisi_acc_vdev->core_device;
1310 int ret;
1311
1312 ret = vfio_pci_core_enable(vdev);
1313 if (ret)
1314 return ret;
1315
1316 if (core_vdev->mig_ops) {
1317 ret = hisi_acc_vf_qm_init(hisi_acc_vdev);
1318 if (ret) {
1319 vfio_pci_core_disable(vdev);
1320 return ret;
1321 }
1322 hisi_acc_vdev->mig_state = VFIO_DEVICE_STATE_RUNNING;
1323 }
1324
1325 vfio_pci_core_finish_enable(vdev);
1326 return 0;
1327 }
1328
hisi_acc_vfio_pci_close_device(struct vfio_device * core_vdev)1329 static void hisi_acc_vfio_pci_close_device(struct vfio_device *core_vdev)
1330 {
1331 struct hisi_acc_vf_core_device *hisi_acc_vdev = container_of(core_vdev,
1332 struct hisi_acc_vf_core_device, core_device.vdev);
1333 struct hisi_qm *vf_qm = &hisi_acc_vdev->vf_qm;
1334
1335 iounmap(vf_qm->io_base);
1336 vfio_pci_core_close_device(core_vdev);
1337 }
1338
1339 static const struct vfio_migration_ops hisi_acc_vfio_pci_migrn_state_ops = {
1340 .migration_set_state = hisi_acc_vfio_pci_set_device_state,
1341 .migration_get_state = hisi_acc_vfio_pci_get_device_state,
1342 .migration_get_data_size = hisi_acc_vfio_pci_get_data_size,
1343 };
1344
hisi_acc_vfio_pci_migrn_init_dev(struct vfio_device * core_vdev)1345 static int hisi_acc_vfio_pci_migrn_init_dev(struct vfio_device *core_vdev)
1346 {
1347 struct hisi_acc_vf_core_device *hisi_acc_vdev = container_of(core_vdev,
1348 struct hisi_acc_vf_core_device, core_device.vdev);
1349 struct pci_dev *pdev = to_pci_dev(core_vdev->dev);
1350 struct hisi_qm *pf_qm = hisi_acc_get_pf_qm(pdev);
1351
1352 hisi_acc_vdev->vf_id = pci_iov_vf_id(pdev) + 1;
1353 hisi_acc_vdev->pf_qm = pf_qm;
1354 hisi_acc_vdev->vf_dev = pdev;
1355 mutex_init(&hisi_acc_vdev->state_mutex);
1356
1357 core_vdev->migration_flags = VFIO_MIGRATION_STOP_COPY | VFIO_MIGRATION_PRE_COPY;
1358 core_vdev->mig_ops = &hisi_acc_vfio_pci_migrn_state_ops;
1359
1360 return vfio_pci_core_init_dev(core_vdev);
1361 }
1362
1363 static const struct vfio_device_ops hisi_acc_vfio_pci_migrn_ops = {
1364 .name = "hisi-acc-vfio-pci-migration",
1365 .init = hisi_acc_vfio_pci_migrn_init_dev,
1366 .release = vfio_pci_core_release_dev,
1367 .open_device = hisi_acc_vfio_pci_open_device,
1368 .close_device = hisi_acc_vfio_pci_close_device,
1369 .ioctl = hisi_acc_vfio_pci_ioctl,
1370 .device_feature = vfio_pci_core_ioctl_feature,
1371 .read = hisi_acc_vfio_pci_read,
1372 .write = hisi_acc_vfio_pci_write,
1373 .mmap = hisi_acc_vfio_pci_mmap,
1374 .request = vfio_pci_core_request,
1375 .match = vfio_pci_core_match,
1376 .bind_iommufd = vfio_iommufd_physical_bind,
1377 .unbind_iommufd = vfio_iommufd_physical_unbind,
1378 .attach_ioas = vfio_iommufd_physical_attach_ioas,
1379 .detach_ioas = vfio_iommufd_physical_detach_ioas,
1380 };
1381
1382 static const struct vfio_device_ops hisi_acc_vfio_pci_ops = {
1383 .name = "hisi-acc-vfio-pci",
1384 .init = vfio_pci_core_init_dev,
1385 .release = vfio_pci_core_release_dev,
1386 .open_device = hisi_acc_vfio_pci_open_device,
1387 .close_device = vfio_pci_core_close_device,
1388 .ioctl = vfio_pci_core_ioctl,
1389 .device_feature = vfio_pci_core_ioctl_feature,
1390 .read = vfio_pci_core_read,
1391 .write = vfio_pci_core_write,
1392 .mmap = vfio_pci_core_mmap,
1393 .request = vfio_pci_core_request,
1394 .match = vfio_pci_core_match,
1395 .bind_iommufd = vfio_iommufd_physical_bind,
1396 .unbind_iommufd = vfio_iommufd_physical_unbind,
1397 .attach_ioas = vfio_iommufd_physical_attach_ioas,
1398 .detach_ioas = vfio_iommufd_physical_detach_ioas,
1399 };
1400
hisi_acc_vfio_pci_probe(struct pci_dev * pdev,const struct pci_device_id * id)1401 static int hisi_acc_vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1402 {
1403 struct hisi_acc_vf_core_device *hisi_acc_vdev;
1404 const struct vfio_device_ops *ops = &hisi_acc_vfio_pci_ops;
1405 struct hisi_qm *pf_qm;
1406 int vf_id;
1407 int ret;
1408
1409 pf_qm = hisi_acc_get_pf_qm(pdev);
1410 if (pf_qm && pf_qm->ver >= QM_HW_V3) {
1411 vf_id = pci_iov_vf_id(pdev);
1412 if (vf_id >= 0)
1413 ops = &hisi_acc_vfio_pci_migrn_ops;
1414 else
1415 pci_warn(pdev, "migration support failed, continue with generic interface\n");
1416 }
1417
1418 hisi_acc_vdev = vfio_alloc_device(hisi_acc_vf_core_device,
1419 core_device.vdev, &pdev->dev, ops);
1420 if (IS_ERR(hisi_acc_vdev))
1421 return PTR_ERR(hisi_acc_vdev);
1422
1423 dev_set_drvdata(&pdev->dev, &hisi_acc_vdev->core_device);
1424 ret = vfio_pci_core_register_device(&hisi_acc_vdev->core_device);
1425 if (ret)
1426 goto out_put_vdev;
1427 return 0;
1428
1429 out_put_vdev:
1430 vfio_put_device(&hisi_acc_vdev->core_device.vdev);
1431 return ret;
1432 }
1433
hisi_acc_vfio_pci_remove(struct pci_dev * pdev)1434 static void hisi_acc_vfio_pci_remove(struct pci_dev *pdev)
1435 {
1436 struct hisi_acc_vf_core_device *hisi_acc_vdev = hisi_acc_drvdata(pdev);
1437
1438 vfio_pci_core_unregister_device(&hisi_acc_vdev->core_device);
1439 vfio_put_device(&hisi_acc_vdev->core_device.vdev);
1440 }
1441
1442 static const struct pci_device_id hisi_acc_vfio_pci_table[] = {
1443 { PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_HUAWEI_SEC_VF) },
1444 { PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_HUAWEI_HPRE_VF) },
1445 { PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_HUAWEI_ZIP_VF) },
1446 { }
1447 };
1448
1449 MODULE_DEVICE_TABLE(pci, hisi_acc_vfio_pci_table);
1450
1451 static const struct pci_error_handlers hisi_acc_vf_err_handlers = {
1452 .reset_done = hisi_acc_vf_pci_aer_reset_done,
1453 .error_detected = vfio_pci_core_aer_err_detected,
1454 };
1455
1456 static struct pci_driver hisi_acc_vfio_pci_driver = {
1457 .name = KBUILD_MODNAME,
1458 .id_table = hisi_acc_vfio_pci_table,
1459 .probe = hisi_acc_vfio_pci_probe,
1460 .remove = hisi_acc_vfio_pci_remove,
1461 .err_handler = &hisi_acc_vf_err_handlers,
1462 .driver_managed_dma = true,
1463 };
1464
1465 module_pci_driver(hisi_acc_vfio_pci_driver);
1466
1467 MODULE_LICENSE("GPL v2");
1468 MODULE_AUTHOR("Liu Longfang <liulongfang@huawei.com>");
1469 MODULE_AUTHOR("Shameer Kolothum <shameerali.kolothum.thodi@huawei.com>");
1470 MODULE_DESCRIPTION("HiSilicon VFIO PCI - VFIO PCI driver with live migration support for HiSilicon ACC device family");
1471