1 // SPDX-License-Identifier: GPL-2.0 2 /* Marvell OcteonTX CPT driver 3 * 4 * Copyright (C) 2019 Marvell International Ltd. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 11 #include <linux/interrupt.h> 12 #include <linux/module.h> 13 #include "otx_cptvf.h" 14 #include "otx_cptvf_algs.h" 15 #include "otx_cptvf_reqmgr.h" 16 17 #define DRV_NAME "octeontx-cptvf" 18 #define DRV_VERSION "1.0" 19 20 static void vq_work_handler(unsigned long data) 21 { 22 struct otx_cptvf_wqe_info *cwqe_info = 23 (struct otx_cptvf_wqe_info *) data; 24 25 otx_cpt_post_process(&cwqe_info->vq_wqe[0]); 26 } 27 28 static int init_worker_threads(struct otx_cptvf *cptvf) 29 { 30 struct pci_dev *pdev = cptvf->pdev; 31 struct otx_cptvf_wqe_info *cwqe_info; 32 int i; 33 34 cwqe_info = kzalloc(sizeof(*cwqe_info), GFP_KERNEL); 35 if (!cwqe_info) 36 return -ENOMEM; 37 38 if (cptvf->num_queues) { 39 dev_dbg(&pdev->dev, "Creating VQ worker threads (%d)\n", 40 cptvf->num_queues); 41 } 42 43 for (i = 0; i < cptvf->num_queues; i++) { 44 tasklet_init(&cwqe_info->vq_wqe[i].twork, vq_work_handler, 45 (u64)cwqe_info); 46 cwqe_info->vq_wqe[i].cptvf = cptvf; 47 } 48 cptvf->wqe_info = cwqe_info; 49 50 return 0; 51 } 52 53 static void cleanup_worker_threads(struct otx_cptvf *cptvf) 54 { 55 struct pci_dev *pdev = cptvf->pdev; 56 struct otx_cptvf_wqe_info *cwqe_info; 57 int i; 58 59 cwqe_info = (struct otx_cptvf_wqe_info *)cptvf->wqe_info; 60 if (!cwqe_info) 61 return; 62 63 if (cptvf->num_queues) { 64 dev_dbg(&pdev->dev, "Cleaning VQ worker threads (%u)\n", 65 cptvf->num_queues); 66 } 67 68 for (i = 0; i < cptvf->num_queues; i++) 69 tasklet_kill(&cwqe_info->vq_wqe[i].twork); 70 71 kfree_sensitive(cwqe_info); 72 cptvf->wqe_info = NULL; 73 } 74 75 static void free_pending_queues(struct otx_cpt_pending_qinfo *pqinfo) 76 { 77 struct otx_cpt_pending_queue *queue; 78 int i; 79 80 for_each_pending_queue(pqinfo, queue, i) { 81 if (!queue->head) 82 continue; 83 84 /* free single queue */ 85 kfree_sensitive((queue->head)); 86 queue->front = 0; 87 queue->rear = 0; 88 queue->qlen = 0; 89 } 90 pqinfo->num_queues = 0; 91 } 92 93 static int alloc_pending_queues(struct otx_cpt_pending_qinfo *pqinfo, u32 qlen, 94 u32 num_queues) 95 { 96 struct otx_cpt_pending_queue *queue = NULL; 97 int ret; 98 u32 i; 99 100 pqinfo->num_queues = num_queues; 101 102 for_each_pending_queue(pqinfo, queue, i) { 103 queue->head = kcalloc(qlen, sizeof(*queue->head), GFP_KERNEL); 104 if (!queue->head) { 105 ret = -ENOMEM; 106 goto pending_qfail; 107 } 108 109 queue->pending_count = 0; 110 queue->front = 0; 111 queue->rear = 0; 112 queue->qlen = qlen; 113 114 /* init queue spin lock */ 115 spin_lock_init(&queue->lock); 116 } 117 return 0; 118 119 pending_qfail: 120 free_pending_queues(pqinfo); 121 122 return ret; 123 } 124 125 static int init_pending_queues(struct otx_cptvf *cptvf, u32 qlen, 126 u32 num_queues) 127 { 128 struct pci_dev *pdev = cptvf->pdev; 129 int ret; 130 131 if (!num_queues) 132 return 0; 133 134 ret = alloc_pending_queues(&cptvf->pqinfo, qlen, num_queues); 135 if (ret) { 136 dev_err(&pdev->dev, "Failed to setup pending queues (%u)\n", 137 num_queues); 138 return ret; 139 } 140 return 0; 141 } 142 143 static void cleanup_pending_queues(struct otx_cptvf *cptvf) 144 { 145 struct pci_dev *pdev = cptvf->pdev; 146 147 if (!cptvf->num_queues) 148 return; 149 150 dev_dbg(&pdev->dev, "Cleaning VQ pending queue (%u)\n", 151 cptvf->num_queues); 152 free_pending_queues(&cptvf->pqinfo); 153 } 154 155 static void free_command_queues(struct otx_cptvf *cptvf, 156 struct otx_cpt_cmd_qinfo *cqinfo) 157 { 158 struct otx_cpt_cmd_queue *queue = NULL; 159 struct otx_cpt_cmd_chunk *chunk = NULL; 160 struct pci_dev *pdev = cptvf->pdev; 161 int i; 162 163 /* clean up for each queue */ 164 for (i = 0; i < cptvf->num_queues; i++) { 165 queue = &cqinfo->queue[i]; 166 167 while (!list_empty(&cqinfo->queue[i].chead)) { 168 chunk = list_first_entry(&cqinfo->queue[i].chead, 169 struct otx_cpt_cmd_chunk, nextchunk); 170 171 dma_free_coherent(&pdev->dev, chunk->size, 172 chunk->head, 173 chunk->dma_addr); 174 chunk->head = NULL; 175 chunk->dma_addr = 0; 176 list_del(&chunk->nextchunk); 177 kfree_sensitive(chunk); 178 } 179 queue->num_chunks = 0; 180 queue->idx = 0; 181 182 } 183 } 184 185 static int alloc_command_queues(struct otx_cptvf *cptvf, 186 struct otx_cpt_cmd_qinfo *cqinfo, 187 u32 qlen) 188 { 189 struct otx_cpt_cmd_chunk *curr, *first, *last; 190 struct otx_cpt_cmd_queue *queue = NULL; 191 struct pci_dev *pdev = cptvf->pdev; 192 size_t q_size, c_size, rem_q_size; 193 u32 qcsize_bytes; 194 int i; 195 196 197 /* Qsize in dwords, needed for SADDR config, 1-next chunk pointer */ 198 cptvf->qsize = min(qlen, cqinfo->qchunksize) * 199 OTX_CPT_NEXT_CHUNK_PTR_SIZE + 1; 200 /* Qsize in bytes to create space for alignment */ 201 q_size = qlen * OTX_CPT_INST_SIZE; 202 203 qcsize_bytes = cqinfo->qchunksize * OTX_CPT_INST_SIZE; 204 205 /* per queue initialization */ 206 for (i = 0; i < cptvf->num_queues; i++) { 207 c_size = 0; 208 rem_q_size = q_size; 209 first = NULL; 210 last = NULL; 211 212 queue = &cqinfo->queue[i]; 213 INIT_LIST_HEAD(&queue->chead); 214 do { 215 curr = kzalloc(sizeof(*curr), GFP_KERNEL); 216 if (!curr) 217 goto cmd_qfail; 218 219 c_size = (rem_q_size > qcsize_bytes) ? qcsize_bytes : 220 rem_q_size; 221 curr->head = dma_alloc_coherent(&pdev->dev, 222 c_size + OTX_CPT_NEXT_CHUNK_PTR_SIZE, 223 &curr->dma_addr, GFP_KERNEL); 224 if (!curr->head) { 225 dev_err(&pdev->dev, 226 "Command Q (%d) chunk (%d) allocation failed\n", 227 i, queue->num_chunks); 228 goto free_curr; 229 } 230 curr->size = c_size; 231 232 if (queue->num_chunks == 0) { 233 first = curr; 234 queue->base = first; 235 } 236 list_add_tail(&curr->nextchunk, 237 &cqinfo->queue[i].chead); 238 239 queue->num_chunks++; 240 rem_q_size -= c_size; 241 if (last) 242 *((u64 *)(&last->head[last->size])) = 243 (u64)curr->dma_addr; 244 245 last = curr; 246 } while (rem_q_size); 247 248 /* 249 * Make the queue circular, tie back last chunk entry to head 250 */ 251 curr = first; 252 *((u64 *)(&last->head[last->size])) = (u64)curr->dma_addr; 253 queue->qhead = curr; 254 } 255 return 0; 256 free_curr: 257 kfree(curr); 258 cmd_qfail: 259 free_command_queues(cptvf, cqinfo); 260 return -ENOMEM; 261 } 262 263 static int init_command_queues(struct otx_cptvf *cptvf, u32 qlen) 264 { 265 struct pci_dev *pdev = cptvf->pdev; 266 int ret; 267 268 /* setup command queues */ 269 ret = alloc_command_queues(cptvf, &cptvf->cqinfo, qlen); 270 if (ret) { 271 dev_err(&pdev->dev, "Failed to allocate command queues (%u)\n", 272 cptvf->num_queues); 273 return ret; 274 } 275 return ret; 276 } 277 278 static void cleanup_command_queues(struct otx_cptvf *cptvf) 279 { 280 struct pci_dev *pdev = cptvf->pdev; 281 282 if (!cptvf->num_queues) 283 return; 284 285 dev_dbg(&pdev->dev, "Cleaning VQ command queue (%u)\n", 286 cptvf->num_queues); 287 free_command_queues(cptvf, &cptvf->cqinfo); 288 } 289 290 static void cptvf_sw_cleanup(struct otx_cptvf *cptvf) 291 { 292 cleanup_worker_threads(cptvf); 293 cleanup_pending_queues(cptvf); 294 cleanup_command_queues(cptvf); 295 } 296 297 static int cptvf_sw_init(struct otx_cptvf *cptvf, u32 qlen, u32 num_queues) 298 { 299 struct pci_dev *pdev = cptvf->pdev; 300 u32 max_dev_queues = 0; 301 int ret; 302 303 max_dev_queues = OTX_CPT_NUM_QS_PER_VF; 304 /* possible cpus */ 305 num_queues = min_t(u32, num_queues, max_dev_queues); 306 cptvf->num_queues = num_queues; 307 308 ret = init_command_queues(cptvf, qlen); 309 if (ret) { 310 dev_err(&pdev->dev, "Failed to setup command queues (%u)\n", 311 num_queues); 312 return ret; 313 } 314 315 ret = init_pending_queues(cptvf, qlen, num_queues); 316 if (ret) { 317 dev_err(&pdev->dev, "Failed to setup pending queues (%u)\n", 318 num_queues); 319 goto setup_pqfail; 320 } 321 322 /* Create worker threads for BH processing */ 323 ret = init_worker_threads(cptvf); 324 if (ret) { 325 dev_err(&pdev->dev, "Failed to setup worker threads\n"); 326 goto init_work_fail; 327 } 328 return 0; 329 330 init_work_fail: 331 cleanup_worker_threads(cptvf); 332 cleanup_pending_queues(cptvf); 333 334 setup_pqfail: 335 cleanup_command_queues(cptvf); 336 337 return ret; 338 } 339 340 static void cptvf_free_irq_affinity(struct otx_cptvf *cptvf, int vec) 341 { 342 irq_set_affinity_hint(pci_irq_vector(cptvf->pdev, vec), NULL); 343 free_cpumask_var(cptvf->affinity_mask[vec]); 344 } 345 346 static void cptvf_write_vq_ctl(struct otx_cptvf *cptvf, bool val) 347 { 348 union otx_cptx_vqx_ctl vqx_ctl; 349 350 vqx_ctl.u = readq(cptvf->reg_base + OTX_CPT_VQX_CTL(0)); 351 vqx_ctl.s.ena = val; 352 writeq(vqx_ctl.u, cptvf->reg_base + OTX_CPT_VQX_CTL(0)); 353 } 354 355 void otx_cptvf_write_vq_doorbell(struct otx_cptvf *cptvf, u32 val) 356 { 357 union otx_cptx_vqx_doorbell vqx_dbell; 358 359 vqx_dbell.u = readq(cptvf->reg_base + OTX_CPT_VQX_DOORBELL(0)); 360 vqx_dbell.s.dbell_cnt = val * 8; /* Num of Instructions * 8 words */ 361 writeq(vqx_dbell.u, cptvf->reg_base + OTX_CPT_VQX_DOORBELL(0)); 362 } 363 364 static void cptvf_write_vq_inprog(struct otx_cptvf *cptvf, u8 val) 365 { 366 union otx_cptx_vqx_inprog vqx_inprg; 367 368 vqx_inprg.u = readq(cptvf->reg_base + OTX_CPT_VQX_INPROG(0)); 369 vqx_inprg.s.inflight = val; 370 writeq(vqx_inprg.u, cptvf->reg_base + OTX_CPT_VQX_INPROG(0)); 371 } 372 373 static void cptvf_write_vq_done_numwait(struct otx_cptvf *cptvf, u32 val) 374 { 375 union otx_cptx_vqx_done_wait vqx_dwait; 376 377 vqx_dwait.u = readq(cptvf->reg_base + OTX_CPT_VQX_DONE_WAIT(0)); 378 vqx_dwait.s.num_wait = val; 379 writeq(vqx_dwait.u, cptvf->reg_base + OTX_CPT_VQX_DONE_WAIT(0)); 380 } 381 382 static u32 cptvf_read_vq_done_numwait(struct otx_cptvf *cptvf) 383 { 384 union otx_cptx_vqx_done_wait vqx_dwait; 385 386 vqx_dwait.u = readq(cptvf->reg_base + OTX_CPT_VQX_DONE_WAIT(0)); 387 return vqx_dwait.s.num_wait; 388 } 389 390 static void cptvf_write_vq_done_timewait(struct otx_cptvf *cptvf, u16 time) 391 { 392 union otx_cptx_vqx_done_wait vqx_dwait; 393 394 vqx_dwait.u = readq(cptvf->reg_base + OTX_CPT_VQX_DONE_WAIT(0)); 395 vqx_dwait.s.time_wait = time; 396 writeq(vqx_dwait.u, cptvf->reg_base + OTX_CPT_VQX_DONE_WAIT(0)); 397 } 398 399 400 static u16 cptvf_read_vq_done_timewait(struct otx_cptvf *cptvf) 401 { 402 union otx_cptx_vqx_done_wait vqx_dwait; 403 404 vqx_dwait.u = readq(cptvf->reg_base + OTX_CPT_VQX_DONE_WAIT(0)); 405 return vqx_dwait.s.time_wait; 406 } 407 408 static void cptvf_enable_swerr_interrupts(struct otx_cptvf *cptvf) 409 { 410 union otx_cptx_vqx_misc_ena_w1s vqx_misc_ena; 411 412 vqx_misc_ena.u = readq(cptvf->reg_base + OTX_CPT_VQX_MISC_ENA_W1S(0)); 413 /* Enable SWERR interrupts for the requested VF */ 414 vqx_misc_ena.s.swerr = 1; 415 writeq(vqx_misc_ena.u, cptvf->reg_base + OTX_CPT_VQX_MISC_ENA_W1S(0)); 416 } 417 418 static void cptvf_enable_mbox_interrupts(struct otx_cptvf *cptvf) 419 { 420 union otx_cptx_vqx_misc_ena_w1s vqx_misc_ena; 421 422 vqx_misc_ena.u = readq(cptvf->reg_base + OTX_CPT_VQX_MISC_ENA_W1S(0)); 423 /* Enable MBOX interrupt for the requested VF */ 424 vqx_misc_ena.s.mbox = 1; 425 writeq(vqx_misc_ena.u, cptvf->reg_base + OTX_CPT_VQX_MISC_ENA_W1S(0)); 426 } 427 428 static void cptvf_enable_done_interrupts(struct otx_cptvf *cptvf) 429 { 430 union otx_cptx_vqx_done_ena_w1s vqx_done_ena; 431 432 vqx_done_ena.u = readq(cptvf->reg_base + OTX_CPT_VQX_DONE_ENA_W1S(0)); 433 /* Enable DONE interrupt for the requested VF */ 434 vqx_done_ena.s.done = 1; 435 writeq(vqx_done_ena.u, cptvf->reg_base + OTX_CPT_VQX_DONE_ENA_W1S(0)); 436 } 437 438 static void cptvf_clear_dovf_intr(struct otx_cptvf *cptvf) 439 { 440 union otx_cptx_vqx_misc_int vqx_misc_int; 441 442 vqx_misc_int.u = readq(cptvf->reg_base + OTX_CPT_VQX_MISC_INT(0)); 443 /* W1C for the VF */ 444 vqx_misc_int.s.dovf = 1; 445 writeq(vqx_misc_int.u, cptvf->reg_base + OTX_CPT_VQX_MISC_INT(0)); 446 } 447 448 static void cptvf_clear_irde_intr(struct otx_cptvf *cptvf) 449 { 450 union otx_cptx_vqx_misc_int vqx_misc_int; 451 452 vqx_misc_int.u = readq(cptvf->reg_base + OTX_CPT_VQX_MISC_INT(0)); 453 /* W1C for the VF */ 454 vqx_misc_int.s.irde = 1; 455 writeq(vqx_misc_int.u, cptvf->reg_base + OTX_CPT_VQX_MISC_INT(0)); 456 } 457 458 static void cptvf_clear_nwrp_intr(struct otx_cptvf *cptvf) 459 { 460 union otx_cptx_vqx_misc_int vqx_misc_int; 461 462 vqx_misc_int.u = readq(cptvf->reg_base + OTX_CPT_VQX_MISC_INT(0)); 463 /* W1C for the VF */ 464 vqx_misc_int.s.nwrp = 1; 465 writeq(vqx_misc_int.u, cptvf->reg_base + OTX_CPT_VQX_MISC_INT(0)); 466 } 467 468 static void cptvf_clear_mbox_intr(struct otx_cptvf *cptvf) 469 { 470 union otx_cptx_vqx_misc_int vqx_misc_int; 471 472 vqx_misc_int.u = readq(cptvf->reg_base + OTX_CPT_VQX_MISC_INT(0)); 473 /* W1C for the VF */ 474 vqx_misc_int.s.mbox = 1; 475 writeq(vqx_misc_int.u, cptvf->reg_base + OTX_CPT_VQX_MISC_INT(0)); 476 } 477 478 static void cptvf_clear_swerr_intr(struct otx_cptvf *cptvf) 479 { 480 union otx_cptx_vqx_misc_int vqx_misc_int; 481 482 vqx_misc_int.u = readq(cptvf->reg_base + OTX_CPT_VQX_MISC_INT(0)); 483 /* W1C for the VF */ 484 vqx_misc_int.s.swerr = 1; 485 writeq(vqx_misc_int.u, cptvf->reg_base + OTX_CPT_VQX_MISC_INT(0)); 486 } 487 488 static u64 cptvf_read_vf_misc_intr_status(struct otx_cptvf *cptvf) 489 { 490 return readq(cptvf->reg_base + OTX_CPT_VQX_MISC_INT(0)); 491 } 492 493 static irqreturn_t cptvf_misc_intr_handler(int __always_unused irq, 494 void *arg) 495 { 496 struct otx_cptvf *cptvf = arg; 497 struct pci_dev *pdev = cptvf->pdev; 498 u64 intr; 499 500 intr = cptvf_read_vf_misc_intr_status(cptvf); 501 /* Check for MISC interrupt types */ 502 if (likely(intr & OTX_CPT_VF_INTR_MBOX_MASK)) { 503 dev_dbg(&pdev->dev, "Mailbox interrupt 0x%llx on CPT VF %d\n", 504 intr, cptvf->vfid); 505 otx_cptvf_handle_mbox_intr(cptvf); 506 cptvf_clear_mbox_intr(cptvf); 507 } else if (unlikely(intr & OTX_CPT_VF_INTR_DOVF_MASK)) { 508 cptvf_clear_dovf_intr(cptvf); 509 /* Clear doorbell count */ 510 otx_cptvf_write_vq_doorbell(cptvf, 0); 511 dev_err(&pdev->dev, 512 "Doorbell overflow error interrupt 0x%llx on CPT VF %d\n", 513 intr, cptvf->vfid); 514 } else if (unlikely(intr & OTX_CPT_VF_INTR_IRDE_MASK)) { 515 cptvf_clear_irde_intr(cptvf); 516 dev_err(&pdev->dev, 517 "Instruction NCB read error interrupt 0x%llx on CPT VF %d\n", 518 intr, cptvf->vfid); 519 } else if (unlikely(intr & OTX_CPT_VF_INTR_NWRP_MASK)) { 520 cptvf_clear_nwrp_intr(cptvf); 521 dev_err(&pdev->dev, 522 "NCB response write error interrupt 0x%llx on CPT VF %d\n", 523 intr, cptvf->vfid); 524 } else if (unlikely(intr & OTX_CPT_VF_INTR_SERR_MASK)) { 525 cptvf_clear_swerr_intr(cptvf); 526 dev_err(&pdev->dev, 527 "Software error interrupt 0x%llx on CPT VF %d\n", 528 intr, cptvf->vfid); 529 } else { 530 dev_err(&pdev->dev, "Unhandled interrupt in OTX_CPT VF %d\n", 531 cptvf->vfid); 532 } 533 534 return IRQ_HANDLED; 535 } 536 537 static inline struct otx_cptvf_wqe *get_cptvf_vq_wqe(struct otx_cptvf *cptvf, 538 int qno) 539 { 540 struct otx_cptvf_wqe_info *nwqe_info; 541 542 if (unlikely(qno >= cptvf->num_queues)) 543 return NULL; 544 nwqe_info = (struct otx_cptvf_wqe_info *)cptvf->wqe_info; 545 546 return &nwqe_info->vq_wqe[qno]; 547 } 548 549 static inline u32 cptvf_read_vq_done_count(struct otx_cptvf *cptvf) 550 { 551 union otx_cptx_vqx_done vqx_done; 552 553 vqx_done.u = readq(cptvf->reg_base + OTX_CPT_VQX_DONE(0)); 554 return vqx_done.s.done; 555 } 556 557 static inline void cptvf_write_vq_done_ack(struct otx_cptvf *cptvf, 558 u32 ackcnt) 559 { 560 union otx_cptx_vqx_done_ack vqx_dack_cnt; 561 562 vqx_dack_cnt.u = readq(cptvf->reg_base + OTX_CPT_VQX_DONE_ACK(0)); 563 vqx_dack_cnt.s.done_ack = ackcnt; 564 writeq(vqx_dack_cnt.u, cptvf->reg_base + OTX_CPT_VQX_DONE_ACK(0)); 565 } 566 567 static irqreturn_t cptvf_done_intr_handler(int __always_unused irq, 568 void *cptvf_dev) 569 { 570 struct otx_cptvf *cptvf = (struct otx_cptvf *)cptvf_dev; 571 struct pci_dev *pdev = cptvf->pdev; 572 /* Read the number of completions */ 573 u32 intr = cptvf_read_vq_done_count(cptvf); 574 575 if (intr) { 576 struct otx_cptvf_wqe *wqe; 577 578 /* 579 * Acknowledge the number of scheduled completions for 580 * processing 581 */ 582 cptvf_write_vq_done_ack(cptvf, intr); 583 wqe = get_cptvf_vq_wqe(cptvf, 0); 584 if (unlikely(!wqe)) { 585 dev_err(&pdev->dev, "No work to schedule for VF (%d)\n", 586 cptvf->vfid); 587 return IRQ_NONE; 588 } 589 tasklet_hi_schedule(&wqe->twork); 590 } 591 592 return IRQ_HANDLED; 593 } 594 595 static void cptvf_set_irq_affinity(struct otx_cptvf *cptvf, int vec) 596 { 597 struct pci_dev *pdev = cptvf->pdev; 598 int cpu; 599 600 if (!zalloc_cpumask_var(&cptvf->affinity_mask[vec], 601 GFP_KERNEL)) { 602 dev_err(&pdev->dev, 603 "Allocation failed for affinity_mask for VF %d\n", 604 cptvf->vfid); 605 return; 606 } 607 608 cpu = cptvf->vfid % num_online_cpus(); 609 cpumask_set_cpu(cpumask_local_spread(cpu, cptvf->node), 610 cptvf->affinity_mask[vec]); 611 irq_set_affinity_hint(pci_irq_vector(pdev, vec), 612 cptvf->affinity_mask[vec]); 613 } 614 615 static void cptvf_write_vq_saddr(struct otx_cptvf *cptvf, u64 val) 616 { 617 union otx_cptx_vqx_saddr vqx_saddr; 618 619 vqx_saddr.u = val; 620 writeq(vqx_saddr.u, cptvf->reg_base + OTX_CPT_VQX_SADDR(0)); 621 } 622 623 static void cptvf_device_init(struct otx_cptvf *cptvf) 624 { 625 u64 base_addr = 0; 626 627 /* Disable the VQ */ 628 cptvf_write_vq_ctl(cptvf, 0); 629 /* Reset the doorbell */ 630 otx_cptvf_write_vq_doorbell(cptvf, 0); 631 /* Clear inflight */ 632 cptvf_write_vq_inprog(cptvf, 0); 633 /* Write VQ SADDR */ 634 base_addr = (u64)(cptvf->cqinfo.queue[0].qhead->dma_addr); 635 cptvf_write_vq_saddr(cptvf, base_addr); 636 /* Configure timerhold / coalescence */ 637 cptvf_write_vq_done_timewait(cptvf, OTX_CPT_TIMER_HOLD); 638 cptvf_write_vq_done_numwait(cptvf, OTX_CPT_COUNT_HOLD); 639 /* Enable the VQ */ 640 cptvf_write_vq_ctl(cptvf, 1); 641 /* Flag the VF ready */ 642 cptvf->flags |= OTX_CPT_FLAG_DEVICE_READY; 643 } 644 645 static ssize_t vf_type_show(struct device *dev, 646 struct device_attribute *attr, 647 char *buf) 648 { 649 struct otx_cptvf *cptvf = dev_get_drvdata(dev); 650 char *msg; 651 652 switch (cptvf->vftype) { 653 case OTX_CPT_AE_TYPES: 654 msg = "AE"; 655 break; 656 657 case OTX_CPT_SE_TYPES: 658 msg = "SE"; 659 break; 660 661 default: 662 msg = "Invalid"; 663 } 664 665 return scnprintf(buf, PAGE_SIZE, "%s\n", msg); 666 } 667 668 static ssize_t vf_engine_group_show(struct device *dev, 669 struct device_attribute *attr, 670 char *buf) 671 { 672 struct otx_cptvf *cptvf = dev_get_drvdata(dev); 673 674 return scnprintf(buf, PAGE_SIZE, "%d\n", cptvf->vfgrp); 675 } 676 677 static ssize_t vf_engine_group_store(struct device *dev, 678 struct device_attribute *attr, 679 const char *buf, size_t count) 680 { 681 struct otx_cptvf *cptvf = dev_get_drvdata(dev); 682 int val, ret; 683 684 ret = kstrtoint(buf, 10, &val); 685 if (ret) 686 return ret; 687 688 if (val < 0) 689 return -EINVAL; 690 691 if (val >= OTX_CPT_MAX_ENGINE_GROUPS) { 692 dev_err(dev, "Engine group >= than max available groups %d\n", 693 OTX_CPT_MAX_ENGINE_GROUPS); 694 return -EINVAL; 695 } 696 697 ret = otx_cptvf_send_vf_to_grp_msg(cptvf, val); 698 if (ret) 699 return ret; 700 701 return count; 702 } 703 704 static ssize_t vf_coalesc_time_wait_show(struct device *dev, 705 struct device_attribute *attr, 706 char *buf) 707 { 708 struct otx_cptvf *cptvf = dev_get_drvdata(dev); 709 710 return scnprintf(buf, PAGE_SIZE, "%d\n", 711 cptvf_read_vq_done_timewait(cptvf)); 712 } 713 714 static ssize_t vf_coalesc_num_wait_show(struct device *dev, 715 struct device_attribute *attr, 716 char *buf) 717 { 718 struct otx_cptvf *cptvf = dev_get_drvdata(dev); 719 720 return scnprintf(buf, PAGE_SIZE, "%d\n", 721 cptvf_read_vq_done_numwait(cptvf)); 722 } 723 724 static ssize_t vf_coalesc_time_wait_store(struct device *dev, 725 struct device_attribute *attr, 726 const char *buf, size_t count) 727 { 728 struct otx_cptvf *cptvf = dev_get_drvdata(dev); 729 long val; 730 int ret; 731 732 ret = kstrtol(buf, 10, &val); 733 if (ret != 0) 734 return ret; 735 736 if (val < OTX_CPT_COALESC_MIN_TIME_WAIT || 737 val > OTX_CPT_COALESC_MAX_TIME_WAIT) 738 return -EINVAL; 739 740 cptvf_write_vq_done_timewait(cptvf, val); 741 return count; 742 } 743 744 static ssize_t vf_coalesc_num_wait_store(struct device *dev, 745 struct device_attribute *attr, 746 const char *buf, size_t count) 747 { 748 struct otx_cptvf *cptvf = dev_get_drvdata(dev); 749 long val; 750 int ret; 751 752 ret = kstrtol(buf, 10, &val); 753 if (ret != 0) 754 return ret; 755 756 if (val < OTX_CPT_COALESC_MIN_NUM_WAIT || 757 val > OTX_CPT_COALESC_MAX_NUM_WAIT) 758 return -EINVAL; 759 760 cptvf_write_vq_done_numwait(cptvf, val); 761 return count; 762 } 763 764 static DEVICE_ATTR_RO(vf_type); 765 static DEVICE_ATTR_RW(vf_engine_group); 766 static DEVICE_ATTR_RW(vf_coalesc_time_wait); 767 static DEVICE_ATTR_RW(vf_coalesc_num_wait); 768 769 static struct attribute *otx_cptvf_attrs[] = { 770 &dev_attr_vf_type.attr, 771 &dev_attr_vf_engine_group.attr, 772 &dev_attr_vf_coalesc_time_wait.attr, 773 &dev_attr_vf_coalesc_num_wait.attr, 774 NULL 775 }; 776 777 static const struct attribute_group otx_cptvf_sysfs_group = { 778 .attrs = otx_cptvf_attrs, 779 }; 780 781 static int otx_cptvf_probe(struct pci_dev *pdev, 782 const struct pci_device_id *ent) 783 { 784 struct device *dev = &pdev->dev; 785 struct otx_cptvf *cptvf; 786 int err; 787 788 cptvf = devm_kzalloc(dev, sizeof(*cptvf), GFP_KERNEL); 789 if (!cptvf) 790 return -ENOMEM; 791 792 pci_set_drvdata(pdev, cptvf); 793 cptvf->pdev = pdev; 794 795 err = pci_enable_device(pdev); 796 if (err) { 797 dev_err(dev, "Failed to enable PCI device\n"); 798 goto clear_drvdata; 799 } 800 err = pci_request_regions(pdev, DRV_NAME); 801 if (err) { 802 dev_err(dev, "PCI request regions failed 0x%x\n", err); 803 goto disable_device; 804 } 805 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48)); 806 if (err) { 807 dev_err(dev, "Unable to get usable 48-bit DMA configuration\n"); 808 goto release_regions; 809 } 810 811 /* MAP PF's configuration registers */ 812 cptvf->reg_base = pci_iomap(pdev, OTX_CPT_VF_PCI_CFG_BAR, 0); 813 if (!cptvf->reg_base) { 814 dev_err(dev, "Cannot map config register space, aborting\n"); 815 err = -ENOMEM; 816 goto release_regions; 817 } 818 819 cptvf->node = dev_to_node(&pdev->dev); 820 err = pci_alloc_irq_vectors(pdev, OTX_CPT_VF_MSIX_VECTORS, 821 OTX_CPT_VF_MSIX_VECTORS, PCI_IRQ_MSIX); 822 if (err < 0) { 823 dev_err(dev, "Request for #%d msix vectors failed\n", 824 OTX_CPT_VF_MSIX_VECTORS); 825 goto unmap_region; 826 } 827 828 err = request_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_MISC), 829 cptvf_misc_intr_handler, 0, "CPT VF misc intr", 830 cptvf); 831 if (err) { 832 dev_err(dev, "Failed to request misc irq\n"); 833 goto free_vectors; 834 } 835 836 /* Enable mailbox interrupt */ 837 cptvf_enable_mbox_interrupts(cptvf); 838 cptvf_enable_swerr_interrupts(cptvf); 839 840 /* Check cpt pf status, gets chip ID / device Id from PF if ready */ 841 err = otx_cptvf_check_pf_ready(cptvf); 842 if (err) 843 goto free_misc_irq; 844 845 /* CPT VF software resources initialization */ 846 cptvf->cqinfo.qchunksize = OTX_CPT_CMD_QCHUNK_SIZE; 847 err = cptvf_sw_init(cptvf, OTX_CPT_CMD_QLEN, OTX_CPT_NUM_QS_PER_VF); 848 if (err) { 849 dev_err(dev, "cptvf_sw_init() failed\n"); 850 goto free_misc_irq; 851 } 852 /* Convey VQ LEN to PF */ 853 err = otx_cptvf_send_vq_size_msg(cptvf); 854 if (err) 855 goto sw_cleanup; 856 857 /* CPT VF device initialization */ 858 cptvf_device_init(cptvf); 859 /* Send msg to PF to assign currnet Q to required group */ 860 err = otx_cptvf_send_vf_to_grp_msg(cptvf, cptvf->vfgrp); 861 if (err) 862 goto sw_cleanup; 863 864 cptvf->priority = 1; 865 err = otx_cptvf_send_vf_priority_msg(cptvf); 866 if (err) 867 goto sw_cleanup; 868 869 err = request_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_DONE), 870 cptvf_done_intr_handler, 0, "CPT VF done intr", 871 cptvf); 872 if (err) { 873 dev_err(dev, "Failed to request done irq\n"); 874 goto free_done_irq; 875 } 876 877 /* Enable done interrupt */ 878 cptvf_enable_done_interrupts(cptvf); 879 880 /* Set irq affinity masks */ 881 cptvf_set_irq_affinity(cptvf, CPT_VF_INT_VEC_E_MISC); 882 cptvf_set_irq_affinity(cptvf, CPT_VF_INT_VEC_E_DONE); 883 884 err = otx_cptvf_send_vf_up(cptvf); 885 if (err) 886 goto free_irq_affinity; 887 888 /* Initialize algorithms and set ops */ 889 err = otx_cpt_crypto_init(pdev, THIS_MODULE, 890 cptvf->vftype == OTX_CPT_SE_TYPES ? OTX_CPT_SE : OTX_CPT_AE, 891 cptvf->vftype, 1, cptvf->num_vfs); 892 if (err) { 893 dev_err(dev, "Failed to register crypto algs\n"); 894 goto free_irq_affinity; 895 } 896 897 err = sysfs_create_group(&dev->kobj, &otx_cptvf_sysfs_group); 898 if (err) { 899 dev_err(dev, "Creating sysfs entries failed\n"); 900 goto crypto_exit; 901 } 902 903 return 0; 904 905 crypto_exit: 906 otx_cpt_crypto_exit(pdev, THIS_MODULE, cptvf->vftype); 907 free_irq_affinity: 908 cptvf_free_irq_affinity(cptvf, CPT_VF_INT_VEC_E_DONE); 909 cptvf_free_irq_affinity(cptvf, CPT_VF_INT_VEC_E_MISC); 910 free_done_irq: 911 free_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_DONE), cptvf); 912 sw_cleanup: 913 cptvf_sw_cleanup(cptvf); 914 free_misc_irq: 915 free_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_MISC), cptvf); 916 free_vectors: 917 pci_free_irq_vectors(cptvf->pdev); 918 unmap_region: 919 pci_iounmap(pdev, cptvf->reg_base); 920 release_regions: 921 pci_release_regions(pdev); 922 disable_device: 923 pci_disable_device(pdev); 924 clear_drvdata: 925 pci_set_drvdata(pdev, NULL); 926 927 return err; 928 } 929 930 static void otx_cptvf_remove(struct pci_dev *pdev) 931 { 932 struct otx_cptvf *cptvf = pci_get_drvdata(pdev); 933 934 if (!cptvf) { 935 dev_err(&pdev->dev, "Invalid CPT-VF device\n"); 936 return; 937 } 938 939 /* Convey DOWN to PF */ 940 if (otx_cptvf_send_vf_down(cptvf)) { 941 dev_err(&pdev->dev, "PF not responding to DOWN msg\n"); 942 } else { 943 sysfs_remove_group(&pdev->dev.kobj, &otx_cptvf_sysfs_group); 944 otx_cpt_crypto_exit(pdev, THIS_MODULE, cptvf->vftype); 945 cptvf_free_irq_affinity(cptvf, CPT_VF_INT_VEC_E_DONE); 946 cptvf_free_irq_affinity(cptvf, CPT_VF_INT_VEC_E_MISC); 947 free_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_DONE), cptvf); 948 free_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_MISC), cptvf); 949 cptvf_sw_cleanup(cptvf); 950 pci_free_irq_vectors(cptvf->pdev); 951 pci_iounmap(pdev, cptvf->reg_base); 952 pci_release_regions(pdev); 953 pci_disable_device(pdev); 954 pci_set_drvdata(pdev, NULL); 955 } 956 } 957 958 /* Supported devices */ 959 static const struct pci_device_id otx_cptvf_id_table[] = { 960 {PCI_VDEVICE(CAVIUM, OTX_CPT_PCI_VF_DEVICE_ID), 0}, 961 { 0, } /* end of table */ 962 }; 963 964 static struct pci_driver otx_cptvf_pci_driver = { 965 .name = DRV_NAME, 966 .id_table = otx_cptvf_id_table, 967 .probe = otx_cptvf_probe, 968 .remove = otx_cptvf_remove, 969 }; 970 971 module_pci_driver(otx_cptvf_pci_driver); 972 973 MODULE_AUTHOR("Marvell International Ltd."); 974 MODULE_DESCRIPTION("Marvell OcteonTX CPT Virtual Function Driver"); 975 MODULE_LICENSE("GPL v2"); 976 MODULE_VERSION(DRV_VERSION); 977 MODULE_DEVICE_TABLE(pci, otx_cptvf_id_table); 978