1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (C) 2020 Marvell. */ 3 4 #include "otx2_cpt_common.h" 5 #include "otx2_cptvf.h" 6 #include "otx2_cptlf.h" 7 #include "otx2_cptvf_algs.h" 8 #include <rvu_reg.h> 9 10 #define OTX2_CPTVF_DRV_NAME "octeontx2-cptvf" 11 12 static void cptvf_enable_pfvf_mbox_intrs(struct otx2_cptvf_dev *cptvf) 13 { 14 /* Clear interrupt if any */ 15 otx2_cpt_write64(cptvf->reg_base, BLKADDR_RVUM, 0, OTX2_RVU_VF_INT, 16 0x1ULL); 17 18 /* Enable PF-VF interrupt */ 19 otx2_cpt_write64(cptvf->reg_base, BLKADDR_RVUM, 0, 20 OTX2_RVU_VF_INT_ENA_W1S, 0x1ULL); 21 } 22 23 static void cptvf_disable_pfvf_mbox_intrs(struct otx2_cptvf_dev *cptvf) 24 { 25 /* Disable PF-VF interrupt */ 26 otx2_cpt_write64(cptvf->reg_base, BLKADDR_RVUM, 0, 27 OTX2_RVU_VF_INT_ENA_W1C, 0x1ULL); 28 29 /* Clear interrupt if any */ 30 otx2_cpt_write64(cptvf->reg_base, BLKADDR_RVUM, 0, OTX2_RVU_VF_INT, 31 0x1ULL); 32 } 33 34 static int cptvf_register_interrupts(struct otx2_cptvf_dev *cptvf) 35 { 36 int ret, irq; 37 int num_vec; 38 39 num_vec = pci_msix_vec_count(cptvf->pdev); 40 if (num_vec <= 0) 41 return -EINVAL; 42 43 /* Enable MSI-X */ 44 ret = pci_alloc_irq_vectors(cptvf->pdev, num_vec, num_vec, 45 PCI_IRQ_MSIX); 46 if (ret < 0) { 47 dev_err(&cptvf->pdev->dev, 48 "Request for %d msix vectors failed\n", num_vec); 49 return ret; 50 } 51 irq = pci_irq_vector(cptvf->pdev, OTX2_CPT_VF_INT_VEC_E_MBOX); 52 /* Register VF<=>PF mailbox interrupt handler */ 53 ret = devm_request_irq(&cptvf->pdev->dev, irq, 54 otx2_cptvf_pfvf_mbox_intr, 0, 55 "CPTPFVF Mbox", cptvf); 56 if (ret) 57 return ret; 58 /* Enable PF-VF mailbox interrupts */ 59 cptvf_enable_pfvf_mbox_intrs(cptvf); 60 61 ret = otx2_cpt_send_ready_msg(&cptvf->pfvf_mbox, cptvf->pdev); 62 if (ret) { 63 dev_warn(&cptvf->pdev->dev, 64 "PF not responding to mailbox, deferring probe\n"); 65 cptvf_disable_pfvf_mbox_intrs(cptvf); 66 return -EPROBE_DEFER; 67 } 68 return 0; 69 } 70 71 static int cptvf_pfvf_mbox_init(struct otx2_cptvf_dev *cptvf) 72 { 73 int ret; 74 75 cptvf->pfvf_mbox_wq = alloc_workqueue("cpt_pfvf_mailbox", 76 WQ_UNBOUND | WQ_HIGHPRI | 77 WQ_MEM_RECLAIM, 1); 78 if (!cptvf->pfvf_mbox_wq) 79 return -ENOMEM; 80 81 ret = otx2_mbox_init(&cptvf->pfvf_mbox, cptvf->pfvf_mbox_base, 82 cptvf->pdev, cptvf->reg_base, MBOX_DIR_VFPF, 1); 83 if (ret) 84 goto free_wqe; 85 86 INIT_WORK(&cptvf->pfvf_mbox_work, otx2_cptvf_pfvf_mbox_handler); 87 return 0; 88 89 free_wqe: 90 destroy_workqueue(cptvf->pfvf_mbox_wq); 91 return ret; 92 } 93 94 static void cptvf_pfvf_mbox_destroy(struct otx2_cptvf_dev *cptvf) 95 { 96 destroy_workqueue(cptvf->pfvf_mbox_wq); 97 otx2_mbox_destroy(&cptvf->pfvf_mbox); 98 } 99 100 static void cptlf_work_handler(unsigned long data) 101 { 102 otx2_cpt_post_process((struct otx2_cptlf_wqe *) data); 103 } 104 105 static void cleanup_tasklet_work(struct otx2_cptlfs_info *lfs) 106 { 107 int i; 108 109 for (i = 0; i < lfs->lfs_num; i++) { 110 if (!lfs->lf[i].wqe) 111 continue; 112 113 tasklet_kill(&lfs->lf[i].wqe->work); 114 kfree(lfs->lf[i].wqe); 115 lfs->lf[i].wqe = NULL; 116 } 117 } 118 119 static int init_tasklet_work(struct otx2_cptlfs_info *lfs) 120 { 121 struct otx2_cptlf_wqe *wqe; 122 int i, ret = 0; 123 124 for (i = 0; i < lfs->lfs_num; i++) { 125 wqe = kzalloc(sizeof(struct otx2_cptlf_wqe), GFP_KERNEL); 126 if (!wqe) { 127 ret = -ENOMEM; 128 goto cleanup_tasklet; 129 } 130 131 tasklet_init(&wqe->work, cptlf_work_handler, (u64) wqe); 132 wqe->lfs = lfs; 133 wqe->lf_num = i; 134 lfs->lf[i].wqe = wqe; 135 } 136 return 0; 137 138 cleanup_tasklet: 139 cleanup_tasklet_work(lfs); 140 return ret; 141 } 142 143 static void free_pending_queues(struct otx2_cptlfs_info *lfs) 144 { 145 int i; 146 147 for (i = 0; i < lfs->lfs_num; i++) { 148 kfree(lfs->lf[i].pqueue.head); 149 lfs->lf[i].pqueue.head = NULL; 150 } 151 } 152 153 static int alloc_pending_queues(struct otx2_cptlfs_info *lfs) 154 { 155 int size, ret, i; 156 157 if (!lfs->lfs_num) 158 return -EINVAL; 159 160 for (i = 0; i < lfs->lfs_num; i++) { 161 lfs->lf[i].pqueue.qlen = OTX2_CPT_INST_QLEN_MSGS; 162 size = lfs->lf[i].pqueue.qlen * 163 sizeof(struct otx2_cpt_pending_entry); 164 165 lfs->lf[i].pqueue.head = kzalloc(size, GFP_KERNEL); 166 if (!lfs->lf[i].pqueue.head) { 167 ret = -ENOMEM; 168 goto error; 169 } 170 171 /* Initialize spin lock */ 172 spin_lock_init(&lfs->lf[i].pqueue.lock); 173 } 174 return 0; 175 176 error: 177 free_pending_queues(lfs); 178 return ret; 179 } 180 181 static void lf_sw_cleanup(struct otx2_cptlfs_info *lfs) 182 { 183 cleanup_tasklet_work(lfs); 184 free_pending_queues(lfs); 185 } 186 187 static int lf_sw_init(struct otx2_cptlfs_info *lfs) 188 { 189 int ret; 190 191 ret = alloc_pending_queues(lfs); 192 if (ret) { 193 dev_err(&lfs->pdev->dev, 194 "Allocating pending queues failed\n"); 195 return ret; 196 } 197 ret = init_tasklet_work(lfs); 198 if (ret) { 199 dev_err(&lfs->pdev->dev, 200 "Tasklet work init failed\n"); 201 goto pending_queues_free; 202 } 203 return 0; 204 205 pending_queues_free: 206 free_pending_queues(lfs); 207 return ret; 208 } 209 210 static void cptvf_lf_shutdown(struct otx2_cptlfs_info *lfs) 211 { 212 atomic_set(&lfs->state, OTX2_CPTLF_IN_RESET); 213 214 /* Remove interrupts affinity */ 215 otx2_cptlf_free_irqs_affinity(lfs); 216 /* Disable instruction queue */ 217 otx2_cptlf_disable_iqueues(lfs); 218 /* Unregister crypto algorithms */ 219 otx2_cpt_crypto_exit(lfs->pdev, THIS_MODULE); 220 /* Unregister LFs interrupts */ 221 otx2_cptlf_unregister_interrupts(lfs); 222 /* Cleanup LFs software side */ 223 lf_sw_cleanup(lfs); 224 /* Send request to detach LFs */ 225 otx2_cpt_detach_rsrcs_msg(lfs); 226 } 227 228 static int cptvf_lf_init(struct otx2_cptvf_dev *cptvf) 229 { 230 struct otx2_cptlfs_info *lfs = &cptvf->lfs; 231 struct device *dev = &cptvf->pdev->dev; 232 int ret, lfs_num; 233 u8 eng_grp_msk; 234 235 /* Get engine group number for symmetric crypto */ 236 cptvf->lfs.kcrypto_eng_grp_num = OTX2_CPT_INVALID_CRYPTO_ENG_GRP; 237 ret = otx2_cptvf_send_eng_grp_num_msg(cptvf, OTX2_CPT_SE_TYPES); 238 if (ret) 239 return ret; 240 241 if (cptvf->lfs.kcrypto_eng_grp_num == OTX2_CPT_INVALID_CRYPTO_ENG_GRP) { 242 dev_err(dev, "Engine group for kernel crypto not available\n"); 243 ret = -ENOENT; 244 return ret; 245 } 246 eng_grp_msk = 1 << cptvf->lfs.kcrypto_eng_grp_num; 247 248 ret = otx2_cptvf_send_kvf_limits_msg(cptvf); 249 if (ret) 250 return ret; 251 252 lfs->reg_base = cptvf->reg_base; 253 lfs->pdev = cptvf->pdev; 254 lfs->mbox = &cptvf->pfvf_mbox; 255 256 lfs_num = cptvf->lfs.kvf_limits ? cptvf->lfs.kvf_limits : 257 num_online_cpus(); 258 ret = otx2_cptlf_init(lfs, eng_grp_msk, OTX2_CPT_QUEUE_HI_PRIO, 259 lfs_num); 260 if (ret) 261 return ret; 262 263 /* Get msix offsets for attached LFs */ 264 ret = otx2_cpt_msix_offset_msg(lfs); 265 if (ret) 266 goto cleanup_lf; 267 268 /* Initialize LFs software side */ 269 ret = lf_sw_init(lfs); 270 if (ret) 271 goto cleanup_lf; 272 273 /* Register LFs interrupts */ 274 ret = otx2_cptlf_register_interrupts(lfs); 275 if (ret) 276 goto cleanup_lf_sw; 277 278 /* Set interrupts affinity */ 279 ret = otx2_cptlf_set_irqs_affinity(lfs); 280 if (ret) 281 goto unregister_intr; 282 283 atomic_set(&lfs->state, OTX2_CPTLF_STARTED); 284 /* Register crypto algorithms */ 285 ret = otx2_cpt_crypto_init(lfs->pdev, THIS_MODULE, lfs_num, 1); 286 if (ret) { 287 dev_err(&lfs->pdev->dev, "algorithms registration failed\n"); 288 goto disable_irqs; 289 } 290 return 0; 291 292 disable_irqs: 293 otx2_cptlf_free_irqs_affinity(lfs); 294 unregister_intr: 295 otx2_cptlf_unregister_interrupts(lfs); 296 cleanup_lf_sw: 297 lf_sw_cleanup(lfs); 298 cleanup_lf: 299 otx2_cptlf_shutdown(lfs); 300 301 return ret; 302 } 303 304 static int otx2_cptvf_probe(struct pci_dev *pdev, 305 const struct pci_device_id *ent) 306 { 307 struct device *dev = &pdev->dev; 308 resource_size_t offset, size; 309 struct otx2_cptvf_dev *cptvf; 310 int ret; 311 312 cptvf = devm_kzalloc(dev, sizeof(*cptvf), GFP_KERNEL); 313 if (!cptvf) 314 return -ENOMEM; 315 316 ret = pcim_enable_device(pdev); 317 if (ret) { 318 dev_err(dev, "Failed to enable PCI device\n"); 319 goto clear_drvdata; 320 } 321 322 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48)); 323 if (ret) { 324 dev_err(dev, "Unable to get usable DMA configuration\n"); 325 goto clear_drvdata; 326 } 327 /* Map VF's configuration registers */ 328 ret = pcim_iomap_regions_request_all(pdev, 1 << PCI_PF_REG_BAR_NUM, 329 OTX2_CPTVF_DRV_NAME); 330 if (ret) { 331 dev_err(dev, "Couldn't get PCI resources 0x%x\n", ret); 332 goto clear_drvdata; 333 } 334 pci_set_master(pdev); 335 pci_set_drvdata(pdev, cptvf); 336 cptvf->pdev = pdev; 337 338 cptvf->reg_base = pcim_iomap_table(pdev)[PCI_PF_REG_BAR_NUM]; 339 340 offset = pci_resource_start(pdev, PCI_MBOX_BAR_NUM); 341 size = pci_resource_len(pdev, PCI_MBOX_BAR_NUM); 342 /* Map PF-VF mailbox memory */ 343 cptvf->pfvf_mbox_base = devm_ioremap_wc(dev, offset, size); 344 if (!cptvf->pfvf_mbox_base) { 345 dev_err(&pdev->dev, "Unable to map BAR4\n"); 346 ret = -ENODEV; 347 goto clear_drvdata; 348 } 349 /* Initialize PF<=>VF mailbox */ 350 ret = cptvf_pfvf_mbox_init(cptvf); 351 if (ret) 352 goto clear_drvdata; 353 354 /* Register interrupts */ 355 ret = cptvf_register_interrupts(cptvf); 356 if (ret) 357 goto destroy_pfvf_mbox; 358 359 /* Initialize CPT LFs */ 360 ret = cptvf_lf_init(cptvf); 361 if (ret) 362 goto unregister_interrupts; 363 364 return 0; 365 366 unregister_interrupts: 367 cptvf_disable_pfvf_mbox_intrs(cptvf); 368 destroy_pfvf_mbox: 369 cptvf_pfvf_mbox_destroy(cptvf); 370 clear_drvdata: 371 pci_set_drvdata(pdev, NULL); 372 373 return ret; 374 } 375 376 static void otx2_cptvf_remove(struct pci_dev *pdev) 377 { 378 struct otx2_cptvf_dev *cptvf = pci_get_drvdata(pdev); 379 380 if (!cptvf) { 381 dev_err(&pdev->dev, "Invalid CPT VF device.\n"); 382 return; 383 } 384 cptvf_lf_shutdown(&cptvf->lfs); 385 /* Disable PF-VF mailbox interrupt */ 386 cptvf_disable_pfvf_mbox_intrs(cptvf); 387 /* Destroy PF-VF mbox */ 388 cptvf_pfvf_mbox_destroy(cptvf); 389 pci_set_drvdata(pdev, NULL); 390 } 391 392 /* Supported devices */ 393 static const struct pci_device_id otx2_cptvf_id_table[] = { 394 {PCI_VDEVICE(CAVIUM, OTX2_CPT_PCI_VF_DEVICE_ID), 0}, 395 { 0, } /* end of table */ 396 }; 397 398 static struct pci_driver otx2_cptvf_pci_driver = { 399 .name = OTX2_CPTVF_DRV_NAME, 400 .id_table = otx2_cptvf_id_table, 401 .probe = otx2_cptvf_probe, 402 .remove = otx2_cptvf_remove, 403 }; 404 405 module_pci_driver(otx2_cptvf_pci_driver); 406 407 MODULE_AUTHOR("Marvell"); 408 MODULE_DESCRIPTION("Marvell OcteonTX2 CPT Virtual Function Driver"); 409 MODULE_LICENSE("GPL v2"); 410 MODULE_DEVICE_TABLE(pci, otx2_cptvf_id_table); 411