1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * SolidRun DPU driver for control plane
4 *
5 * Copyright (C) 2022-2023 SolidRun
6 *
7 * Author: Alvaro Karsz <alvaro.karsz@solid-run.com>
8 *
9 */
10 #include <linux/iopoll.h>
11
12 #include "snet_vdpa.h"
13
14 /* SNET DPU device ID */
15 #define SNET_DEVICE_ID 0x1000
16 /* SNET signature */
17 #define SNET_SIGNATURE 0xD0D06363
18 /* Max. config version that we can work with */
19 #define SNET_CFG_VERSION 0x2
20 /* Queue align */
21 #define SNET_QUEUE_ALIGNMENT PAGE_SIZE
22 /* Kick value to notify that new data is available */
23 #define SNET_KICK_VAL 0x1
24 #define SNET_CONFIG_OFF 0x0
25 /* How long we are willing to wait for a SNET device */
26 #define SNET_DETECT_TIMEOUT 5000000
27 /* How long should we wait for the DPU to read our config */
28 #define SNET_READ_CFG_TIMEOUT 3000000
29 /* Size of configs written to the DPU */
30 #define SNET_GENERAL_CFG_LEN 36
31 #define SNET_GENERAL_CFG_VQ_LEN 40
32
vdpa_to_snet(struct vdpa_device * vdpa)33 static struct snet *vdpa_to_snet(struct vdpa_device *vdpa)
34 {
35 return container_of(vdpa, struct snet, vdpa);
36 }
37
snet_cfg_irq_hndlr(int irq,void * data)38 static irqreturn_t snet_cfg_irq_hndlr(int irq, void *data)
39 {
40 struct snet *snet = data;
41 /* Call callback if any */
42 if (likely(snet->cb.callback))
43 return snet->cb.callback(snet->cb.private);
44
45 return IRQ_HANDLED;
46 }
47
snet_vq_irq_hndlr(int irq,void * data)48 static irqreturn_t snet_vq_irq_hndlr(int irq, void *data)
49 {
50 struct snet_vq *vq = data;
51 /* Call callback if any */
52 if (likely(vq->cb.callback))
53 return vq->cb.callback(vq->cb.private);
54
55 return IRQ_HANDLED;
56 }
57
snet_free_irqs(struct snet * snet)58 static void snet_free_irqs(struct snet *snet)
59 {
60 struct psnet *psnet = snet->psnet;
61 struct pci_dev *pdev;
62 u32 i;
63
64 /* Which Device allcoated the IRQs? */
65 if (PSNET_FLAG_ON(psnet, SNET_CFG_FLAG_IRQ_PF))
66 pdev = snet->pdev->physfn;
67 else
68 pdev = snet->pdev;
69
70 /* Free config's IRQ */
71 if (snet->cfg_irq != -1) {
72 devm_free_irq(&pdev->dev, snet->cfg_irq, snet);
73 snet->cfg_irq = -1;
74 }
75 /* Free VQ IRQs */
76 for (i = 0; i < snet->cfg->vq_num; i++) {
77 if (snet->vqs[i] && snet->vqs[i]->irq != -1) {
78 devm_free_irq(&pdev->dev, snet->vqs[i]->irq, snet->vqs[i]);
79 snet->vqs[i]->irq = -1;
80 }
81 }
82
83 /* IRQ vectors are freed when the pci remove callback is called */
84 }
85
snet_set_vq_address(struct vdpa_device * vdev,u16 idx,u64 desc_area,u64 driver_area,u64 device_area)86 static int snet_set_vq_address(struct vdpa_device *vdev, u16 idx, u64 desc_area,
87 u64 driver_area, u64 device_area)
88 {
89 struct snet *snet = vdpa_to_snet(vdev);
90 /* save received parameters in vqueue sturct */
91 snet->vqs[idx]->desc_area = desc_area;
92 snet->vqs[idx]->driver_area = driver_area;
93 snet->vqs[idx]->device_area = device_area;
94
95 return 0;
96 }
97
snet_set_vq_num(struct vdpa_device * vdev,u16 idx,u32 num)98 static void snet_set_vq_num(struct vdpa_device *vdev, u16 idx, u32 num)
99 {
100 struct snet *snet = vdpa_to_snet(vdev);
101 /* save num in vqueue */
102 snet->vqs[idx]->num = num;
103 }
104
snet_kick_vq(struct vdpa_device * vdev,u16 idx)105 static void snet_kick_vq(struct vdpa_device *vdev, u16 idx)
106 {
107 struct snet *snet = vdpa_to_snet(vdev);
108 /* not ready - ignore */
109 if (unlikely(!snet->vqs[idx]->ready))
110 return;
111
112 iowrite32(SNET_KICK_VAL, snet->vqs[idx]->kick_ptr);
113 }
114
snet_kick_vq_with_data(struct vdpa_device * vdev,u32 data)115 static void snet_kick_vq_with_data(struct vdpa_device *vdev, u32 data)
116 {
117 struct snet *snet = vdpa_to_snet(vdev);
118 u16 idx = data & 0xFFFF;
119
120 /* not ready - ignore */
121 if (unlikely(!snet->vqs[idx]->ready))
122 return;
123
124 iowrite32((data & 0xFFFF0000) | SNET_KICK_VAL, snet->vqs[idx]->kick_ptr);
125 }
126
snet_set_vq_cb(struct vdpa_device * vdev,u16 idx,struct vdpa_callback * cb)127 static void snet_set_vq_cb(struct vdpa_device *vdev, u16 idx, struct vdpa_callback *cb)
128 {
129 struct snet *snet = vdpa_to_snet(vdev);
130
131 snet->vqs[idx]->cb.callback = cb->callback;
132 snet->vqs[idx]->cb.private = cb->private;
133 }
134
snet_set_vq_ready(struct vdpa_device * vdev,u16 idx,bool ready)135 static void snet_set_vq_ready(struct vdpa_device *vdev, u16 idx, bool ready)
136 {
137 struct snet *snet = vdpa_to_snet(vdev);
138
139 snet->vqs[idx]->ready = ready;
140 }
141
snet_get_vq_ready(struct vdpa_device * vdev,u16 idx)142 static bool snet_get_vq_ready(struct vdpa_device *vdev, u16 idx)
143 {
144 struct snet *snet = vdpa_to_snet(vdev);
145
146 return snet->vqs[idx]->ready;
147 }
148
snet_vq_state_is_initial(struct snet * snet,const struct vdpa_vq_state * state)149 static bool snet_vq_state_is_initial(struct snet *snet, const struct vdpa_vq_state *state)
150 {
151 if (SNET_HAS_FEATURE(snet, VIRTIO_F_RING_PACKED)) {
152 const struct vdpa_vq_state_packed *p = &state->packed;
153
154 if (p->last_avail_counter == 1 && p->last_used_counter == 1 &&
155 p->last_avail_idx == 0 && p->last_used_idx == 0)
156 return true;
157 } else {
158 const struct vdpa_vq_state_split *s = &state->split;
159
160 if (s->avail_index == 0)
161 return true;
162 }
163
164 return false;
165 }
166
snet_set_vq_state(struct vdpa_device * vdev,u16 idx,const struct vdpa_vq_state * state)167 static int snet_set_vq_state(struct vdpa_device *vdev, u16 idx, const struct vdpa_vq_state *state)
168 {
169 struct snet *snet = vdpa_to_snet(vdev);
170
171 /* We can set any state for config version 2+ */
172 if (SNET_CFG_VER(snet, 2)) {
173 memcpy(&snet->vqs[idx]->vq_state, state, sizeof(*state));
174 return 0;
175 }
176
177 /* Older config - we can't set the VQ state.
178 * Return 0 only if this is the initial state we use in the DPU.
179 */
180 if (snet_vq_state_is_initial(snet, state))
181 return 0;
182
183 return -EOPNOTSUPP;
184 }
185
snet_get_vq_state(struct vdpa_device * vdev,u16 idx,struct vdpa_vq_state * state)186 static int snet_get_vq_state(struct vdpa_device *vdev, u16 idx, struct vdpa_vq_state *state)
187 {
188 struct snet *snet = vdpa_to_snet(vdev);
189
190 return snet_read_vq_state(snet, idx, state);
191 }
192
snet_get_vq_irq(struct vdpa_device * vdev,u16 idx)193 static int snet_get_vq_irq(struct vdpa_device *vdev, u16 idx)
194 {
195 struct snet *snet = vdpa_to_snet(vdev);
196
197 return snet->vqs[idx]->irq;
198 }
199
snet_get_vq_align(struct vdpa_device * vdev)200 static u32 snet_get_vq_align(struct vdpa_device *vdev)
201 {
202 return (u32)SNET_QUEUE_ALIGNMENT;
203 }
204
snet_reset_dev(struct snet * snet)205 static int snet_reset_dev(struct snet *snet)
206 {
207 struct pci_dev *pdev = snet->pdev;
208 int ret = 0;
209 u32 i;
210
211 /* If status is 0, nothing to do */
212 if (!snet->status)
213 return 0;
214
215 /* If DPU started, destroy it */
216 if (snet->status & VIRTIO_CONFIG_S_DRIVER_OK)
217 ret = snet_destroy_dev(snet);
218
219 /* Clear VQs */
220 for (i = 0; i < snet->cfg->vq_num; i++) {
221 if (!snet->vqs[i])
222 continue;
223 snet->vqs[i]->cb.callback = NULL;
224 snet->vqs[i]->cb.private = NULL;
225 snet->vqs[i]->desc_area = 0;
226 snet->vqs[i]->device_area = 0;
227 snet->vqs[i]->driver_area = 0;
228 snet->vqs[i]->ready = false;
229 }
230
231 /* Clear config callback */
232 snet->cb.callback = NULL;
233 snet->cb.private = NULL;
234 /* Free IRQs */
235 snet_free_irqs(snet);
236 /* Reset status */
237 snet->status = 0;
238 snet->dpu_ready = false;
239
240 if (ret)
241 SNET_WARN(pdev, "Incomplete reset to SNET[%u] device, err: %d\n", snet->sid, ret);
242 else
243 SNET_DBG(pdev, "Reset SNET[%u] device\n", snet->sid);
244
245 return 0;
246 }
247
snet_reset(struct vdpa_device * vdev)248 static int snet_reset(struct vdpa_device *vdev)
249 {
250 struct snet *snet = vdpa_to_snet(vdev);
251
252 return snet_reset_dev(snet);
253 }
254
snet_get_config_size(struct vdpa_device * vdev)255 static size_t snet_get_config_size(struct vdpa_device *vdev)
256 {
257 struct snet *snet = vdpa_to_snet(vdev);
258
259 return (size_t)snet->cfg->cfg_size;
260 }
261
snet_get_features(struct vdpa_device * vdev)262 static u64 snet_get_features(struct vdpa_device *vdev)
263 {
264 struct snet *snet = vdpa_to_snet(vdev);
265
266 return snet->cfg->features;
267 }
268
snet_set_drv_features(struct vdpa_device * vdev,u64 features)269 static int snet_set_drv_features(struct vdpa_device *vdev, u64 features)
270 {
271 struct snet *snet = vdpa_to_snet(vdev);
272
273 snet->negotiated_features = snet->cfg->features & features;
274 return 0;
275 }
276
snet_get_drv_features(struct vdpa_device * vdev)277 static u64 snet_get_drv_features(struct vdpa_device *vdev)
278 {
279 struct snet *snet = vdpa_to_snet(vdev);
280
281 return snet->negotiated_features;
282 }
283
snet_get_vq_num_max(struct vdpa_device * vdev)284 static u16 snet_get_vq_num_max(struct vdpa_device *vdev)
285 {
286 struct snet *snet = vdpa_to_snet(vdev);
287
288 return (u16)snet->cfg->vq_size;
289 }
290
snet_set_config_cb(struct vdpa_device * vdev,struct vdpa_callback * cb)291 static void snet_set_config_cb(struct vdpa_device *vdev, struct vdpa_callback *cb)
292 {
293 struct snet *snet = vdpa_to_snet(vdev);
294
295 snet->cb.callback = cb->callback;
296 snet->cb.private = cb->private;
297 }
298
snet_get_device_id(struct vdpa_device * vdev)299 static u32 snet_get_device_id(struct vdpa_device *vdev)
300 {
301 struct snet *snet = vdpa_to_snet(vdev);
302
303 return snet->cfg->virtio_id;
304 }
305
snet_get_vendor_id(struct vdpa_device * vdev)306 static u32 snet_get_vendor_id(struct vdpa_device *vdev)
307 {
308 return (u32)PCI_VENDOR_ID_SOLIDRUN;
309 }
310
snet_get_status(struct vdpa_device * vdev)311 static u8 snet_get_status(struct vdpa_device *vdev)
312 {
313 struct snet *snet = vdpa_to_snet(vdev);
314
315 return snet->status;
316 }
317
snet_write_conf(struct snet * snet)318 static int snet_write_conf(struct snet *snet)
319 {
320 u32 off, i, tmp;
321 int ret;
322
323 /* No need to write the config twice */
324 if (snet->dpu_ready)
325 return true;
326
327 /* Snet data :
328 *
329 * General data: SNET_GENERAL_CFG_LEN bytes long
330 * 0 0x4 0x8 0xC 0x10 0x14 0x1C 0x24
331 * | MAGIC NUMBER | CFG VER | SNET SID | NUMBER OF QUEUES | IRQ IDX | FEATURES | RSVD |
332 *
333 * For every VQ: SNET_GENERAL_CFG_VQ_LEN bytes long
334 * 0 0x4 0x8
335 * | VQ SID AND QUEUE SIZE | IRQ Index |
336 * | DESC AREA |
337 * | DEVICE AREA |
338 * | DRIVER AREA |
339 * | VQ STATE (CFG 2+) | RSVD |
340 *
341 * Magic number should be written last, this is the DPU indication that the data is ready
342 */
343
344 /* Init offset */
345 off = snet->psnet->cfg.host_cfg_off;
346
347 /* Ignore magic number for now */
348 off += 4;
349 snet_write32(snet, off, snet->psnet->negotiated_cfg_ver);
350 off += 4;
351 snet_write32(snet, off, snet->sid);
352 off += 4;
353 snet_write32(snet, off, snet->cfg->vq_num);
354 off += 4;
355 snet_write32(snet, off, snet->cfg_irq_idx);
356 off += 4;
357 snet_write64(snet, off, snet->negotiated_features);
358 off += 8;
359 /* Ignore reserved */
360 off += 8;
361 /* Write VQs */
362 for (i = 0 ; i < snet->cfg->vq_num ; i++) {
363 tmp = (i << 16) | (snet->vqs[i]->num & 0xFFFF);
364 snet_write32(snet, off, tmp);
365 off += 4;
366 snet_write32(snet, off, snet->vqs[i]->irq_idx);
367 off += 4;
368 snet_write64(snet, off, snet->vqs[i]->desc_area);
369 off += 8;
370 snet_write64(snet, off, snet->vqs[i]->device_area);
371 off += 8;
372 snet_write64(snet, off, snet->vqs[i]->driver_area);
373 off += 8;
374 /* Write VQ state if config version is 2+ */
375 if (SNET_CFG_VER(snet, 2))
376 snet_write32(snet, off, *(u32 *)&snet->vqs[i]->vq_state);
377 off += 4;
378
379 /* Ignore reserved */
380 off += 4;
381 }
382
383 /* Write magic number - data is ready */
384 snet_write32(snet, snet->psnet->cfg.host_cfg_off, SNET_SIGNATURE);
385
386 /* The DPU will ACK the config by clearing the signature */
387 ret = readx_poll_timeout(ioread32, snet->bar + snet->psnet->cfg.host_cfg_off,
388 tmp, !tmp, 10, SNET_READ_CFG_TIMEOUT);
389 if (ret) {
390 SNET_ERR(snet->pdev, "Timeout waiting for the DPU to read the config\n");
391 return false;
392 }
393
394 /* set DPU flag */
395 snet->dpu_ready = true;
396
397 return true;
398 }
399
snet_request_irqs(struct pci_dev * pdev,struct snet * snet)400 static int snet_request_irqs(struct pci_dev *pdev, struct snet *snet)
401 {
402 int ret, i, irq;
403
404 /* Request config IRQ */
405 irq = pci_irq_vector(pdev, snet->cfg_irq_idx);
406 ret = devm_request_irq(&pdev->dev, irq, snet_cfg_irq_hndlr, 0,
407 snet->cfg_irq_name, snet);
408 if (ret) {
409 SNET_ERR(pdev, "Failed to request IRQ\n");
410 return ret;
411 }
412 snet->cfg_irq = irq;
413
414 /* Request IRQ for every VQ */
415 for (i = 0; i < snet->cfg->vq_num; i++) {
416 irq = pci_irq_vector(pdev, snet->vqs[i]->irq_idx);
417 ret = devm_request_irq(&pdev->dev, irq, snet_vq_irq_hndlr, 0,
418 snet->vqs[i]->irq_name, snet->vqs[i]);
419 if (ret) {
420 SNET_ERR(pdev, "Failed to request IRQ\n");
421 return ret;
422 }
423 snet->vqs[i]->irq = irq;
424 }
425 return 0;
426 }
427
snet_set_status(struct vdpa_device * vdev,u8 status)428 static void snet_set_status(struct vdpa_device *vdev, u8 status)
429 {
430 struct snet *snet = vdpa_to_snet(vdev);
431 struct psnet *psnet = snet->psnet;
432 struct pci_dev *pdev = snet->pdev;
433 int ret;
434 bool pf_irqs;
435
436 if (status == snet->status)
437 return;
438
439 if ((status & VIRTIO_CONFIG_S_DRIVER_OK) &&
440 !(snet->status & VIRTIO_CONFIG_S_DRIVER_OK)) {
441 /* Request IRQs */
442 pf_irqs = PSNET_FLAG_ON(psnet, SNET_CFG_FLAG_IRQ_PF);
443 ret = snet_request_irqs(pf_irqs ? pdev->physfn : pdev, snet);
444 if (ret)
445 goto set_err;
446
447 /* Write config to the DPU */
448 if (snet_write_conf(snet)) {
449 SNET_INFO(pdev, "Create SNET[%u] device\n", snet->sid);
450 } else {
451 snet_free_irqs(snet);
452 goto set_err;
453 }
454 }
455
456 /* Save the new status */
457 snet->status = status;
458 return;
459
460 set_err:
461 snet->status |= VIRTIO_CONFIG_S_FAILED;
462 }
463
snet_get_config(struct vdpa_device * vdev,unsigned int offset,void * buf,unsigned int len)464 static void snet_get_config(struct vdpa_device *vdev, unsigned int offset,
465 void *buf, unsigned int len)
466 {
467 struct snet *snet = vdpa_to_snet(vdev);
468 void __iomem *cfg_ptr = snet->cfg->virtio_cfg + offset;
469 u8 *buf_ptr = buf;
470 u32 i;
471
472 /* check for offset error */
473 if (offset + len > snet->cfg->cfg_size)
474 return;
475
476 /* Write into buffer */
477 for (i = 0; i < len; i++)
478 *buf_ptr++ = ioread8(cfg_ptr + i);
479 }
480
snet_set_config(struct vdpa_device * vdev,unsigned int offset,const void * buf,unsigned int len)481 static void snet_set_config(struct vdpa_device *vdev, unsigned int offset,
482 const void *buf, unsigned int len)
483 {
484 struct snet *snet = vdpa_to_snet(vdev);
485 void __iomem *cfg_ptr = snet->cfg->virtio_cfg + offset;
486 const u8 *buf_ptr = buf;
487 u32 i;
488
489 /* check for offset error */
490 if (offset + len > snet->cfg->cfg_size)
491 return;
492
493 /* Write into PCI BAR */
494 for (i = 0; i < len; i++)
495 iowrite8(*buf_ptr++, cfg_ptr + i);
496 }
497
snet_suspend(struct vdpa_device * vdev)498 static int snet_suspend(struct vdpa_device *vdev)
499 {
500 struct snet *snet = vdpa_to_snet(vdev);
501 int ret;
502
503 ret = snet_suspend_dev(snet);
504 if (ret)
505 SNET_ERR(snet->pdev, "SNET[%u] suspend failed, err: %d\n", snet->sid, ret);
506 else
507 SNET_DBG(snet->pdev, "Suspend SNET[%u] device\n", snet->sid);
508
509 return ret;
510 }
511
snet_resume(struct vdpa_device * vdev)512 static int snet_resume(struct vdpa_device *vdev)
513 {
514 struct snet *snet = vdpa_to_snet(vdev);
515 int ret;
516
517 ret = snet_resume_dev(snet);
518 if (ret)
519 SNET_ERR(snet->pdev, "SNET[%u] resume failed, err: %d\n", snet->sid, ret);
520 else
521 SNET_DBG(snet->pdev, "Resume SNET[%u] device\n", snet->sid);
522
523 return ret;
524 }
525
526 static const struct vdpa_config_ops snet_config_ops = {
527 .set_vq_address = snet_set_vq_address,
528 .set_vq_num = snet_set_vq_num,
529 .kick_vq = snet_kick_vq,
530 .kick_vq_with_data = snet_kick_vq_with_data,
531 .set_vq_cb = snet_set_vq_cb,
532 .set_vq_ready = snet_set_vq_ready,
533 .get_vq_ready = snet_get_vq_ready,
534 .set_vq_state = snet_set_vq_state,
535 .get_vq_state = snet_get_vq_state,
536 .get_vq_irq = snet_get_vq_irq,
537 .get_vq_align = snet_get_vq_align,
538 .reset = snet_reset,
539 .get_config_size = snet_get_config_size,
540 .get_device_features = snet_get_features,
541 .set_driver_features = snet_set_drv_features,
542 .get_driver_features = snet_get_drv_features,
543 .get_vq_num_min = snet_get_vq_num_max,
544 .get_vq_num_max = snet_get_vq_num_max,
545 .set_config_cb = snet_set_config_cb,
546 .get_device_id = snet_get_device_id,
547 .get_vendor_id = snet_get_vendor_id,
548 .get_status = snet_get_status,
549 .set_status = snet_set_status,
550 .get_config = snet_get_config,
551 .set_config = snet_set_config,
552 .suspend = snet_suspend,
553 .resume = snet_resume,
554 };
555
psnet_open_pf_bar(struct pci_dev * pdev,struct psnet * psnet)556 static int psnet_open_pf_bar(struct pci_dev *pdev, struct psnet *psnet)
557 {
558 char name[50];
559 int ret, i, mask = 0;
560 /* We don't know which BAR will be used to communicate..
561 * We will map every bar with len > 0.
562 *
563 * Later, we will discover the BAR and unmap all other BARs.
564 */
565 for (i = 0; i < PCI_STD_NUM_BARS; i++) {
566 if (pci_resource_len(pdev, i))
567 mask |= (1 << i);
568 }
569
570 /* No BAR can be used.. */
571 if (!mask) {
572 SNET_ERR(pdev, "Failed to find a PCI BAR\n");
573 return -ENODEV;
574 }
575
576 snprintf(name, sizeof(name), "psnet[%s]-bars", pci_name(pdev));
577 ret = pcim_iomap_regions(pdev, mask, name);
578 if (ret) {
579 SNET_ERR(pdev, "Failed to request and map PCI BARs\n");
580 return ret;
581 }
582
583 for (i = 0; i < PCI_STD_NUM_BARS; i++) {
584 if (mask & (1 << i))
585 psnet->bars[i] = pcim_iomap_table(pdev)[i];
586 }
587
588 return 0;
589 }
590
snet_open_vf_bar(struct pci_dev * pdev,struct snet * snet)591 static int snet_open_vf_bar(struct pci_dev *pdev, struct snet *snet)
592 {
593 char name[50];
594 int ret;
595
596 snprintf(name, sizeof(name), "snet[%s]-bar", pci_name(pdev));
597 /* Request and map BAR */
598 ret = pcim_iomap_regions(pdev, BIT(snet->psnet->cfg.vf_bar), name);
599 if (ret) {
600 SNET_ERR(pdev, "Failed to request and map PCI BAR for a VF\n");
601 return ret;
602 }
603
604 snet->bar = pcim_iomap_table(pdev)[snet->psnet->cfg.vf_bar];
605
606 return 0;
607 }
608
snet_free_cfg(struct snet_cfg * cfg)609 static void snet_free_cfg(struct snet_cfg *cfg)
610 {
611 u32 i;
612
613 if (!cfg->devs)
614 return;
615
616 /* Free devices */
617 for (i = 0; i < cfg->devices_num; i++) {
618 if (!cfg->devs[i])
619 break;
620
621 kfree(cfg->devs[i]);
622 }
623 /* Free pointers to devices */
624 kfree(cfg->devs);
625 }
626
627 /* Detect which BAR is used for communication with the device. */
psnet_detect_bar(struct psnet * psnet,u32 off)628 static int psnet_detect_bar(struct psnet *psnet, u32 off)
629 {
630 unsigned long exit_time;
631 int i;
632
633 exit_time = jiffies + usecs_to_jiffies(SNET_DETECT_TIMEOUT);
634
635 /* SNET DPU will write SNET's signature when the config is ready. */
636 while (time_before(jiffies, exit_time)) {
637 for (i = 0; i < PCI_STD_NUM_BARS; i++) {
638 /* Is this BAR mapped? */
639 if (!psnet->bars[i])
640 continue;
641
642 if (ioread32(psnet->bars[i] + off) == SNET_SIGNATURE)
643 return i;
644 }
645 usleep_range(1000, 10000);
646 }
647
648 return -ENODEV;
649 }
650
psnet_unmap_unused_bars(struct pci_dev * pdev,struct psnet * psnet)651 static void psnet_unmap_unused_bars(struct pci_dev *pdev, struct psnet *psnet)
652 {
653 int i, mask = 0;
654
655 for (i = 0; i < PCI_STD_NUM_BARS; i++) {
656 if (psnet->bars[i] && i != psnet->barno)
657 mask |= (1 << i);
658 }
659
660 if (mask)
661 pcim_iounmap_regions(pdev, mask);
662 }
663
664 /* Read SNET config from PCI BAR */
psnet_read_cfg(struct pci_dev * pdev,struct psnet * psnet)665 static int psnet_read_cfg(struct pci_dev *pdev, struct psnet *psnet)
666 {
667 struct snet_cfg *cfg = &psnet->cfg;
668 u32 i, off;
669 int barno;
670
671 /* Move to where the config starts */
672 off = SNET_CONFIG_OFF;
673
674 /* Find BAR used for communication */
675 barno = psnet_detect_bar(psnet, off);
676 if (barno < 0) {
677 SNET_ERR(pdev, "SNET config is not ready.\n");
678 return barno;
679 }
680
681 /* Save used BAR number and unmap all other BARs */
682 psnet->barno = barno;
683 SNET_DBG(pdev, "Using BAR number %d\n", barno);
684
685 psnet_unmap_unused_bars(pdev, psnet);
686
687 /* load config from BAR */
688 cfg->key = psnet_read32(psnet, off);
689 off += 4;
690 cfg->cfg_size = psnet_read32(psnet, off);
691 off += 4;
692 cfg->cfg_ver = psnet_read32(psnet, off);
693 off += 4;
694 /* The negotiated config version is the lower one between this driver's config
695 * and the DPU's.
696 */
697 psnet->negotiated_cfg_ver = min_t(u32, cfg->cfg_ver, SNET_CFG_VERSION);
698 SNET_DBG(pdev, "SNET config version %u\n", psnet->negotiated_cfg_ver);
699
700 cfg->vf_num = psnet_read32(psnet, off);
701 off += 4;
702 cfg->vf_bar = psnet_read32(psnet, off);
703 off += 4;
704 cfg->host_cfg_off = psnet_read32(psnet, off);
705 off += 4;
706 cfg->max_size_host_cfg = psnet_read32(psnet, off);
707 off += 4;
708 cfg->virtio_cfg_off = psnet_read32(psnet, off);
709 off += 4;
710 cfg->kick_off = psnet_read32(psnet, off);
711 off += 4;
712 cfg->hwmon_off = psnet_read32(psnet, off);
713 off += 4;
714 cfg->ctrl_off = psnet_read32(psnet, off);
715 off += 4;
716 cfg->flags = psnet_read32(psnet, off);
717 off += 4;
718 /* Ignore Reserved */
719 off += sizeof(cfg->rsvd);
720
721 cfg->devices_num = psnet_read32(psnet, off);
722 off += 4;
723 /* Allocate memory to hold pointer to the devices */
724 cfg->devs = kcalloc(cfg->devices_num, sizeof(void *), GFP_KERNEL);
725 if (!cfg->devs)
726 return -ENOMEM;
727
728 /* Load device configuration from BAR */
729 for (i = 0; i < cfg->devices_num; i++) {
730 cfg->devs[i] = kzalloc(sizeof(*cfg->devs[i]), GFP_KERNEL);
731 if (!cfg->devs[i]) {
732 snet_free_cfg(cfg);
733 return -ENOMEM;
734 }
735 /* Read device config */
736 cfg->devs[i]->virtio_id = psnet_read32(psnet, off);
737 off += 4;
738 cfg->devs[i]->vq_num = psnet_read32(psnet, off);
739 off += 4;
740 cfg->devs[i]->vq_size = psnet_read32(psnet, off);
741 off += 4;
742 cfg->devs[i]->vfid = psnet_read32(psnet, off);
743 off += 4;
744 cfg->devs[i]->features = psnet_read64(psnet, off);
745 off += 8;
746 /* Ignore Reserved */
747 off += sizeof(cfg->devs[i]->rsvd);
748
749 cfg->devs[i]->cfg_size = psnet_read32(psnet, off);
750 off += 4;
751
752 /* Is the config witten to the DPU going to be too big? */
753 if (SNET_GENERAL_CFG_LEN + SNET_GENERAL_CFG_VQ_LEN * cfg->devs[i]->vq_num >
754 cfg->max_size_host_cfg) {
755 SNET_ERR(pdev, "Failed to read SNET config, the config is too big..\n");
756 snet_free_cfg(cfg);
757 return -EINVAL;
758 }
759 }
760 return 0;
761 }
762
psnet_alloc_irq_vector(struct pci_dev * pdev,struct psnet * psnet)763 static int psnet_alloc_irq_vector(struct pci_dev *pdev, struct psnet *psnet)
764 {
765 int ret = 0;
766 u32 i, irq_num = 0;
767
768 /* Let's count how many IRQs we need, 1 for every VQ + 1 for config change */
769 for (i = 0; i < psnet->cfg.devices_num; i++)
770 irq_num += psnet->cfg.devs[i]->vq_num + 1;
771
772 ret = pci_alloc_irq_vectors(pdev, irq_num, irq_num, PCI_IRQ_MSIX);
773 if (ret != irq_num) {
774 SNET_ERR(pdev, "Failed to allocate IRQ vectors\n");
775 return ret;
776 }
777 SNET_DBG(pdev, "Allocated %u IRQ vectors from physical function\n", irq_num);
778
779 return 0;
780 }
781
snet_alloc_irq_vector(struct pci_dev * pdev,struct snet_dev_cfg * snet_cfg)782 static int snet_alloc_irq_vector(struct pci_dev *pdev, struct snet_dev_cfg *snet_cfg)
783 {
784 int ret = 0;
785 u32 irq_num;
786
787 /* We want 1 IRQ for every VQ + 1 for config change events */
788 irq_num = snet_cfg->vq_num + 1;
789
790 ret = pci_alloc_irq_vectors(pdev, irq_num, irq_num, PCI_IRQ_MSIX);
791 if (ret <= 0) {
792 SNET_ERR(pdev, "Failed to allocate IRQ vectors\n");
793 return ret;
794 }
795
796 return 0;
797 }
798
snet_free_vqs(struct snet * snet)799 static void snet_free_vqs(struct snet *snet)
800 {
801 u32 i;
802
803 if (!snet->vqs)
804 return;
805
806 for (i = 0 ; i < snet->cfg->vq_num ; i++) {
807 if (!snet->vqs[i])
808 break;
809
810 kfree(snet->vqs[i]);
811 }
812 kfree(snet->vqs);
813 }
814
snet_build_vqs(struct snet * snet)815 static int snet_build_vqs(struct snet *snet)
816 {
817 u32 i;
818 /* Allocate the VQ pointers array */
819 snet->vqs = kcalloc(snet->cfg->vq_num, sizeof(void *), GFP_KERNEL);
820 if (!snet->vqs)
821 return -ENOMEM;
822
823 /* Allocate the VQs */
824 for (i = 0; i < snet->cfg->vq_num; i++) {
825 snet->vqs[i] = kzalloc(sizeof(*snet->vqs[i]), GFP_KERNEL);
826 if (!snet->vqs[i]) {
827 snet_free_vqs(snet);
828 return -ENOMEM;
829 }
830 /* Reset IRQ num */
831 snet->vqs[i]->irq = -1;
832 /* VQ serial ID */
833 snet->vqs[i]->sid = i;
834 /* Kick address - every VQ gets 4B */
835 snet->vqs[i]->kick_ptr = snet->bar + snet->psnet->cfg.kick_off +
836 snet->vqs[i]->sid * 4;
837 /* Clear kick address for this VQ */
838 iowrite32(0, snet->vqs[i]->kick_ptr);
839 }
840 return 0;
841 }
842
psnet_get_next_irq_num(struct psnet * psnet)843 static int psnet_get_next_irq_num(struct psnet *psnet)
844 {
845 int irq;
846
847 spin_lock(&psnet->lock);
848 irq = psnet->next_irq++;
849 spin_unlock(&psnet->lock);
850
851 return irq;
852 }
853
snet_reserve_irq_idx(struct pci_dev * pdev,struct snet * snet)854 static void snet_reserve_irq_idx(struct pci_dev *pdev, struct snet *snet)
855 {
856 struct psnet *psnet = snet->psnet;
857 int i;
858
859 /* one IRQ for every VQ, and one for config changes */
860 snet->cfg_irq_idx = psnet_get_next_irq_num(psnet);
861 snprintf(snet->cfg_irq_name, SNET_NAME_SIZE, "snet[%s]-cfg[%d]",
862 pci_name(pdev), snet->cfg_irq_idx);
863
864 for (i = 0; i < snet->cfg->vq_num; i++) {
865 /* Get next free IRQ ID */
866 snet->vqs[i]->irq_idx = psnet_get_next_irq_num(psnet);
867 /* Write IRQ name */
868 snprintf(snet->vqs[i]->irq_name, SNET_NAME_SIZE, "snet[%s]-vq[%d]",
869 pci_name(pdev), snet->vqs[i]->irq_idx);
870 }
871 }
872
873 /* Find a device config based on virtual function id */
snet_find_dev_cfg(struct snet_cfg * cfg,u32 vfid)874 static struct snet_dev_cfg *snet_find_dev_cfg(struct snet_cfg *cfg, u32 vfid)
875 {
876 u32 i;
877
878 for (i = 0; i < cfg->devices_num; i++) {
879 if (cfg->devs[i]->vfid == vfid)
880 return cfg->devs[i];
881 }
882 /* Oppss.. no config found.. */
883 return NULL;
884 }
885
886 /* Probe function for a physical PCI function */
snet_vdpa_probe_pf(struct pci_dev * pdev)887 static int snet_vdpa_probe_pf(struct pci_dev *pdev)
888 {
889 struct psnet *psnet;
890 int ret = 0;
891 bool pf_irqs = false;
892
893 ret = pcim_enable_device(pdev);
894 if (ret) {
895 SNET_ERR(pdev, "Failed to enable PCI device\n");
896 return ret;
897 }
898
899 /* Allocate a PCI physical function device */
900 psnet = kzalloc(sizeof(*psnet), GFP_KERNEL);
901 if (!psnet)
902 return -ENOMEM;
903
904 /* Init PSNET spinlock */
905 spin_lock_init(&psnet->lock);
906
907 pci_set_master(pdev);
908 pci_set_drvdata(pdev, psnet);
909
910 /* Open SNET MAIN BAR */
911 ret = psnet_open_pf_bar(pdev, psnet);
912 if (ret)
913 goto free_psnet;
914
915 /* Try to read SNET's config from PCI BAR */
916 ret = psnet_read_cfg(pdev, psnet);
917 if (ret)
918 goto free_psnet;
919
920 /* If SNET_CFG_FLAG_IRQ_PF flag is set, we should use
921 * PF MSI-X vectors
922 */
923 pf_irqs = PSNET_FLAG_ON(psnet, SNET_CFG_FLAG_IRQ_PF);
924
925 if (pf_irqs) {
926 ret = psnet_alloc_irq_vector(pdev, psnet);
927 if (ret)
928 goto free_cfg;
929 }
930
931 SNET_DBG(pdev, "Enable %u virtual functions\n", psnet->cfg.vf_num);
932 ret = pci_enable_sriov(pdev, psnet->cfg.vf_num);
933 if (ret) {
934 SNET_ERR(pdev, "Failed to enable SR-IOV\n");
935 goto free_irq;
936 }
937
938 /* Create HW monitor device */
939 if (PSNET_FLAG_ON(psnet, SNET_CFG_FLAG_HWMON)) {
940 #if IS_ENABLED(CONFIG_HWMON)
941 psnet_create_hwmon(pdev);
942 #else
943 SNET_WARN(pdev, "Can't start HWMON, CONFIG_HWMON is not enabled\n");
944 #endif
945 }
946
947 return 0;
948
949 free_irq:
950 if (pf_irqs)
951 pci_free_irq_vectors(pdev);
952 free_cfg:
953 snet_free_cfg(&psnet->cfg);
954 free_psnet:
955 kfree(psnet);
956 return ret;
957 }
958
959 /* Probe function for a virtual PCI function */
snet_vdpa_probe_vf(struct pci_dev * pdev)960 static int snet_vdpa_probe_vf(struct pci_dev *pdev)
961 {
962 struct pci_dev *pdev_pf = pdev->physfn;
963 struct psnet *psnet = pci_get_drvdata(pdev_pf);
964 struct snet_dev_cfg *dev_cfg;
965 struct snet *snet;
966 u32 vfid;
967 int ret;
968 bool pf_irqs = false;
969
970 /* Get virtual function id.
971 * (the DPU counts the VFs from 1)
972 */
973 ret = pci_iov_vf_id(pdev);
974 if (ret < 0) {
975 SNET_ERR(pdev, "Failed to find a VF id\n");
976 return ret;
977 }
978 vfid = ret + 1;
979
980 /* Find the snet_dev_cfg based on vfid */
981 dev_cfg = snet_find_dev_cfg(&psnet->cfg, vfid);
982 if (!dev_cfg) {
983 SNET_WARN(pdev, "Failed to find a VF config..\n");
984 return -ENODEV;
985 }
986
987 /* Which PCI device should allocate the IRQs?
988 * If the SNET_CFG_FLAG_IRQ_PF flag set, the PF device allocates the IRQs
989 */
990 pf_irqs = PSNET_FLAG_ON(psnet, SNET_CFG_FLAG_IRQ_PF);
991
992 ret = pcim_enable_device(pdev);
993 if (ret) {
994 SNET_ERR(pdev, "Failed to enable PCI VF device\n");
995 return ret;
996 }
997
998 /* Request for MSI-X IRQs */
999 if (!pf_irqs) {
1000 ret = snet_alloc_irq_vector(pdev, dev_cfg);
1001 if (ret)
1002 return ret;
1003 }
1004
1005 /* Allocate vdpa device */
1006 snet = vdpa_alloc_device(struct snet, vdpa, &pdev->dev, &snet_config_ops, 1, 1, NULL,
1007 false);
1008 if (!snet) {
1009 SNET_ERR(pdev, "Failed to allocate a vdpa device\n");
1010 ret = -ENOMEM;
1011 goto free_irqs;
1012 }
1013
1014 /* Init control mutex and spinlock */
1015 mutex_init(&snet->ctrl_lock);
1016 spin_lock_init(&snet->ctrl_spinlock);
1017
1018 /* Save pci device pointer */
1019 snet->pdev = pdev;
1020 snet->psnet = psnet;
1021 snet->cfg = dev_cfg;
1022 snet->dpu_ready = false;
1023 snet->sid = vfid;
1024 /* Reset IRQ value */
1025 snet->cfg_irq = -1;
1026
1027 ret = snet_open_vf_bar(pdev, snet);
1028 if (ret)
1029 goto put_device;
1030
1031 /* Create a VirtIO config pointer */
1032 snet->cfg->virtio_cfg = snet->bar + snet->psnet->cfg.virtio_cfg_off;
1033
1034 /* Clear control registers */
1035 snet_ctrl_clear(snet);
1036
1037 pci_set_master(pdev);
1038 pci_set_drvdata(pdev, snet);
1039
1040 ret = snet_build_vqs(snet);
1041 if (ret)
1042 goto put_device;
1043
1044 /* Reserve IRQ indexes,
1045 * The IRQs may be requested and freed multiple times,
1046 * but the indexes won't change.
1047 */
1048 snet_reserve_irq_idx(pf_irqs ? pdev_pf : pdev, snet);
1049
1050 /*set DMA device*/
1051 snet->vdpa.dma_dev = &pdev->dev;
1052
1053 /* Register VDPA device */
1054 ret = vdpa_register_device(&snet->vdpa, snet->cfg->vq_num);
1055 if (ret) {
1056 SNET_ERR(pdev, "Failed to register vdpa device\n");
1057 goto free_vqs;
1058 }
1059
1060 return 0;
1061
1062 free_vqs:
1063 snet_free_vqs(snet);
1064 put_device:
1065 put_device(&snet->vdpa.dev);
1066 free_irqs:
1067 if (!pf_irqs)
1068 pci_free_irq_vectors(pdev);
1069 return ret;
1070 }
1071
snet_vdpa_probe(struct pci_dev * pdev,const struct pci_device_id * id)1072 static int snet_vdpa_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1073 {
1074 if (pdev->is_virtfn)
1075 return snet_vdpa_probe_vf(pdev);
1076 else
1077 return snet_vdpa_probe_pf(pdev);
1078 }
1079
snet_vdpa_remove_pf(struct pci_dev * pdev)1080 static void snet_vdpa_remove_pf(struct pci_dev *pdev)
1081 {
1082 struct psnet *psnet = pci_get_drvdata(pdev);
1083
1084 pci_disable_sriov(pdev);
1085 /* If IRQs are allocated from the PF, we should free the IRQs */
1086 if (PSNET_FLAG_ON(psnet, SNET_CFG_FLAG_IRQ_PF))
1087 pci_free_irq_vectors(pdev);
1088
1089 snet_free_cfg(&psnet->cfg);
1090 kfree(psnet);
1091 }
1092
snet_vdpa_remove_vf(struct pci_dev * pdev)1093 static void snet_vdpa_remove_vf(struct pci_dev *pdev)
1094 {
1095 struct snet *snet = pci_get_drvdata(pdev);
1096 struct psnet *psnet = snet->psnet;
1097
1098 vdpa_unregister_device(&snet->vdpa);
1099 snet_free_vqs(snet);
1100 /* If IRQs are allocated from the VF, we should free the IRQs */
1101 if (!PSNET_FLAG_ON(psnet, SNET_CFG_FLAG_IRQ_PF))
1102 pci_free_irq_vectors(pdev);
1103 }
1104
snet_vdpa_remove(struct pci_dev * pdev)1105 static void snet_vdpa_remove(struct pci_dev *pdev)
1106 {
1107 if (pdev->is_virtfn)
1108 snet_vdpa_remove_vf(pdev);
1109 else
1110 snet_vdpa_remove_pf(pdev);
1111 }
1112
1113 static struct pci_device_id snet_driver_pci_ids[] = {
1114 { PCI_DEVICE_SUB(PCI_VENDOR_ID_SOLIDRUN, SNET_DEVICE_ID,
1115 PCI_VENDOR_ID_SOLIDRUN, SNET_DEVICE_ID) },
1116 { 0 },
1117 };
1118
1119 MODULE_DEVICE_TABLE(pci, snet_driver_pci_ids);
1120
1121 static struct pci_driver snet_vdpa_driver = {
1122 .name = "snet-vdpa-driver",
1123 .id_table = snet_driver_pci_ids,
1124 .probe = snet_vdpa_probe,
1125 .remove = snet_vdpa_remove,
1126 };
1127
1128 module_pci_driver(snet_vdpa_driver);
1129
1130 MODULE_AUTHOR("Alvaro Karsz <alvaro.karsz@solid-run.com>");
1131 MODULE_DESCRIPTION("SolidRun vDPA driver");
1132 MODULE_LICENSE("GPL v2");
1133