1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (C) 2020 Marvell. */ 3 4 #include "otx2_cpt_common.h" 5 #include "otx2_cptlf.h" 6 #include "rvu_reg.h" 7 8 #define CPT_TIMER_HOLD 0x03F 9 #define CPT_COUNT_HOLD 32 10 11 static void cptlf_do_set_done_time_wait(struct otx2_cptlf_info *lf, 12 int time_wait) 13 { 14 union otx2_cptx_lf_done_wait done_wait; 15 16 done_wait.u = otx2_cpt_read64(lf->lfs->reg_base, lf->lfs->blkaddr, 17 lf->slot, OTX2_CPT_LF_DONE_WAIT); 18 done_wait.s.time_wait = time_wait; 19 otx2_cpt_write64(lf->lfs->reg_base, lf->lfs->blkaddr, lf->slot, 20 OTX2_CPT_LF_DONE_WAIT, done_wait.u); 21 } 22 23 static void cptlf_do_set_done_num_wait(struct otx2_cptlf_info *lf, int num_wait) 24 { 25 union otx2_cptx_lf_done_wait done_wait; 26 27 done_wait.u = otx2_cpt_read64(lf->lfs->reg_base, lf->lfs->blkaddr, 28 lf->slot, OTX2_CPT_LF_DONE_WAIT); 29 done_wait.s.num_wait = num_wait; 30 otx2_cpt_write64(lf->lfs->reg_base, lf->lfs->blkaddr, lf->slot, 31 OTX2_CPT_LF_DONE_WAIT, done_wait.u); 32 } 33 34 static void cptlf_set_done_time_wait(struct otx2_cptlfs_info *lfs, 35 int time_wait) 36 { 37 int slot; 38 39 for (slot = 0; slot < lfs->lfs_num; slot++) 40 cptlf_do_set_done_time_wait(&lfs->lf[slot], time_wait); 41 } 42 43 static void cptlf_set_done_num_wait(struct otx2_cptlfs_info *lfs, int num_wait) 44 { 45 int slot; 46 47 for (slot = 0; slot < lfs->lfs_num; slot++) 48 cptlf_do_set_done_num_wait(&lfs->lf[slot], num_wait); 49 } 50 51 static int cptlf_set_pri(struct otx2_cptlf_info *lf, int pri) 52 { 53 struct otx2_cptlfs_info *lfs = lf->lfs; 54 union otx2_cptx_af_lf_ctrl lf_ctrl; 55 int ret; 56 57 ret = otx2_cpt_read_af_reg(lfs->mbox, lfs->pdev, 58 CPT_AF_LFX_CTL(lf->slot), 59 &lf_ctrl.u, lfs->blkaddr); 60 if (ret) 61 return ret; 62 63 lf_ctrl.s.pri = pri ? 1 : 0; 64 65 ret = otx2_cpt_write_af_reg(lfs->mbox, lfs->pdev, 66 CPT_AF_LFX_CTL(lf->slot), 67 lf_ctrl.u, lfs->blkaddr); 68 return ret; 69 } 70 71 static int cptlf_set_eng_grps_mask(struct otx2_cptlf_info *lf, 72 int eng_grps_mask) 73 { 74 struct otx2_cptlfs_info *lfs = lf->lfs; 75 union otx2_cptx_af_lf_ctrl lf_ctrl; 76 int ret; 77 78 ret = otx2_cpt_read_af_reg(lfs->mbox, lfs->pdev, 79 CPT_AF_LFX_CTL(lf->slot), 80 &lf_ctrl.u, lfs->blkaddr); 81 if (ret) 82 return ret; 83 84 lf_ctrl.s.grp = eng_grps_mask; 85 86 ret = otx2_cpt_write_af_reg(lfs->mbox, lfs->pdev, 87 CPT_AF_LFX_CTL(lf->slot), 88 lf_ctrl.u, lfs->blkaddr); 89 return ret; 90 } 91 92 static int cptlf_set_grp_and_pri(struct otx2_cptlfs_info *lfs, 93 int eng_grp_mask, int pri) 94 { 95 int slot, ret = 0; 96 97 for (slot = 0; slot < lfs->lfs_num; slot++) { 98 ret = cptlf_set_pri(&lfs->lf[slot], pri); 99 if (ret) 100 return ret; 101 102 ret = cptlf_set_eng_grps_mask(&lfs->lf[slot], eng_grp_mask); 103 if (ret) 104 return ret; 105 } 106 return ret; 107 } 108 109 static void cptlf_hw_init(struct otx2_cptlfs_info *lfs) 110 { 111 /* Disable instruction queues */ 112 otx2_cptlf_disable_iqueues(lfs); 113 114 /* Set instruction queues base addresses */ 115 otx2_cptlf_set_iqueues_base_addr(lfs); 116 117 /* Set instruction queues sizes */ 118 otx2_cptlf_set_iqueues_size(lfs); 119 120 /* Set done interrupts time wait */ 121 cptlf_set_done_time_wait(lfs, CPT_TIMER_HOLD); 122 123 /* Set done interrupts num wait */ 124 cptlf_set_done_num_wait(lfs, CPT_COUNT_HOLD); 125 126 /* Enable instruction queues */ 127 otx2_cptlf_enable_iqueues(lfs); 128 } 129 130 static void cptlf_hw_cleanup(struct otx2_cptlfs_info *lfs) 131 { 132 /* Disable instruction queues */ 133 otx2_cptlf_disable_iqueues(lfs); 134 } 135 136 static void cptlf_set_misc_intrs(struct otx2_cptlfs_info *lfs, u8 enable) 137 { 138 union otx2_cptx_lf_misc_int_ena_w1s irq_misc = { .u = 0x0 }; 139 u64 reg = enable ? OTX2_CPT_LF_MISC_INT_ENA_W1S : 140 OTX2_CPT_LF_MISC_INT_ENA_W1C; 141 int slot; 142 143 irq_misc.s.fault = 0x1; 144 irq_misc.s.hwerr = 0x1; 145 irq_misc.s.irde = 0x1; 146 irq_misc.s.nqerr = 0x1; 147 irq_misc.s.nwrp = 0x1; 148 149 for (slot = 0; slot < lfs->lfs_num; slot++) 150 otx2_cpt_write64(lfs->reg_base, lfs->blkaddr, slot, reg, 151 irq_misc.u); 152 } 153 154 static void cptlf_enable_intrs(struct otx2_cptlfs_info *lfs) 155 { 156 int slot; 157 158 /* Enable done interrupts */ 159 for (slot = 0; slot < lfs->lfs_num; slot++) 160 otx2_cpt_write64(lfs->reg_base, lfs->blkaddr, slot, 161 OTX2_CPT_LF_DONE_INT_ENA_W1S, 0x1); 162 /* Enable Misc interrupts */ 163 cptlf_set_misc_intrs(lfs, true); 164 } 165 166 static void cptlf_disable_intrs(struct otx2_cptlfs_info *lfs) 167 { 168 int slot; 169 170 for (slot = 0; slot < lfs->lfs_num; slot++) 171 otx2_cpt_write64(lfs->reg_base, lfs->blkaddr, slot, 172 OTX2_CPT_LF_DONE_INT_ENA_W1C, 0x1); 173 cptlf_set_misc_intrs(lfs, false); 174 } 175 176 static inline int cptlf_read_done_cnt(struct otx2_cptlf_info *lf) 177 { 178 union otx2_cptx_lf_done irq_cnt; 179 180 irq_cnt.u = otx2_cpt_read64(lf->lfs->reg_base, lf->lfs->blkaddr, lf->slot, 181 OTX2_CPT_LF_DONE); 182 return irq_cnt.s.done; 183 } 184 185 static irqreturn_t cptlf_misc_intr_handler(int __always_unused irq, void *arg) 186 { 187 union otx2_cptx_lf_misc_int irq_misc, irq_misc_ack; 188 struct otx2_cptlf_info *lf = arg; 189 struct device *dev; 190 191 dev = &lf->lfs->pdev->dev; 192 irq_misc.u = otx2_cpt_read64(lf->lfs->reg_base, lf->lfs->blkaddr, 193 lf->slot, OTX2_CPT_LF_MISC_INT); 194 irq_misc_ack.u = 0x0; 195 196 if (irq_misc.s.fault) { 197 dev_err(dev, "Memory error detected while executing CPT_INST_S, LF %d.\n", 198 lf->slot); 199 irq_misc_ack.s.fault = 0x1; 200 201 } else if (irq_misc.s.hwerr) { 202 dev_err(dev, "HW error from an engine executing CPT_INST_S, LF %d.", 203 lf->slot); 204 irq_misc_ack.s.hwerr = 0x1; 205 206 } else if (irq_misc.s.nwrp) { 207 dev_err(dev, "SMMU fault while writing CPT_RES_S to CPT_INST_S[RES_ADDR], LF %d.\n", 208 lf->slot); 209 irq_misc_ack.s.nwrp = 0x1; 210 211 } else if (irq_misc.s.irde) { 212 dev_err(dev, "Memory error when accessing instruction memory queue CPT_LF_Q_BASE[ADDR].\n"); 213 irq_misc_ack.s.irde = 0x1; 214 215 } else if (irq_misc.s.nqerr) { 216 dev_err(dev, "Error enqueuing an instruction received at CPT_LF_NQ.\n"); 217 irq_misc_ack.s.nqerr = 0x1; 218 219 } else { 220 dev_err(dev, "Unhandled interrupt in CPT LF %d\n", lf->slot); 221 return IRQ_NONE; 222 } 223 224 /* Acknowledge interrupts */ 225 otx2_cpt_write64(lf->lfs->reg_base, lf->lfs->blkaddr, lf->slot, 226 OTX2_CPT_LF_MISC_INT, irq_misc_ack.u); 227 228 return IRQ_HANDLED; 229 } 230 231 static irqreturn_t cptlf_done_intr_handler(int irq, void *arg) 232 { 233 union otx2_cptx_lf_done_wait done_wait; 234 struct otx2_cptlf_info *lf = arg; 235 int irq_cnt; 236 237 /* Read the number of completed requests */ 238 irq_cnt = cptlf_read_done_cnt(lf); 239 if (irq_cnt) { 240 done_wait.u = otx2_cpt_read64(lf->lfs->reg_base, lf->lfs->blkaddr, 241 lf->slot, OTX2_CPT_LF_DONE_WAIT); 242 /* Acknowledge the number of completed requests */ 243 otx2_cpt_write64(lf->lfs->reg_base, lf->lfs->blkaddr, lf->slot, 244 OTX2_CPT_LF_DONE_ACK, irq_cnt); 245 246 otx2_cpt_write64(lf->lfs->reg_base, lf->lfs->blkaddr, lf->slot, 247 OTX2_CPT_LF_DONE_WAIT, done_wait.u); 248 if (unlikely(!lf->wqe)) { 249 dev_err(&lf->lfs->pdev->dev, "No work for LF %d\n", 250 lf->slot); 251 return IRQ_NONE; 252 } 253 254 /* Schedule processing of completed requests */ 255 tasklet_hi_schedule(&lf->wqe->work); 256 } 257 return IRQ_HANDLED; 258 } 259 260 void otx2_cptlf_unregister_interrupts(struct otx2_cptlfs_info *lfs) 261 { 262 int i, offs, vector; 263 264 for (i = 0; i < lfs->lfs_num; i++) { 265 for (offs = 0; offs < OTX2_CPT_LF_MSIX_VECTORS; offs++) { 266 if (!lfs->lf[i].is_irq_reg[offs]) 267 continue; 268 269 vector = pci_irq_vector(lfs->pdev, 270 lfs->lf[i].msix_offset + offs); 271 free_irq(vector, &lfs->lf[i]); 272 lfs->lf[i].is_irq_reg[offs] = false; 273 } 274 } 275 cptlf_disable_intrs(lfs); 276 } 277 EXPORT_SYMBOL_NS_GPL(otx2_cptlf_unregister_interrupts, 278 CRYPTO_DEV_OCTEONTX2_CPT); 279 280 static int cptlf_do_register_interrrupts(struct otx2_cptlfs_info *lfs, 281 int lf_num, int irq_offset, 282 irq_handler_t handler) 283 { 284 int ret, vector; 285 286 vector = pci_irq_vector(lfs->pdev, lfs->lf[lf_num].msix_offset + 287 irq_offset); 288 ret = request_irq(vector, handler, 0, 289 lfs->lf[lf_num].irq_name[irq_offset], 290 &lfs->lf[lf_num]); 291 if (ret) 292 return ret; 293 294 lfs->lf[lf_num].is_irq_reg[irq_offset] = true; 295 296 return ret; 297 } 298 299 int otx2_cptlf_register_interrupts(struct otx2_cptlfs_info *lfs) 300 { 301 int irq_offs, ret, i; 302 303 for (i = 0; i < lfs->lfs_num; i++) { 304 irq_offs = OTX2_CPT_LF_INT_VEC_E_MISC; 305 snprintf(lfs->lf[i].irq_name[irq_offs], 32, "CPTLF Misc%d", i); 306 ret = cptlf_do_register_interrrupts(lfs, i, irq_offs, 307 cptlf_misc_intr_handler); 308 if (ret) 309 goto free_irq; 310 311 irq_offs = OTX2_CPT_LF_INT_VEC_E_DONE; 312 snprintf(lfs->lf[i].irq_name[irq_offs], 32, "OTX2_CPTLF Done%d", 313 i); 314 ret = cptlf_do_register_interrrupts(lfs, i, irq_offs, 315 cptlf_done_intr_handler); 316 if (ret) 317 goto free_irq; 318 } 319 cptlf_enable_intrs(lfs); 320 return 0; 321 322 free_irq: 323 otx2_cptlf_unregister_interrupts(lfs); 324 return ret; 325 } 326 EXPORT_SYMBOL_NS_GPL(otx2_cptlf_register_interrupts, CRYPTO_DEV_OCTEONTX2_CPT); 327 328 void otx2_cptlf_free_irqs_affinity(struct otx2_cptlfs_info *lfs) 329 { 330 int slot, offs; 331 332 for (slot = 0; slot < lfs->lfs_num; slot++) { 333 for (offs = 0; offs < OTX2_CPT_LF_MSIX_VECTORS; offs++) 334 irq_set_affinity_hint(pci_irq_vector(lfs->pdev, 335 lfs->lf[slot].msix_offset + 336 offs), NULL); 337 free_cpumask_var(lfs->lf[slot].affinity_mask); 338 } 339 } 340 EXPORT_SYMBOL_NS_GPL(otx2_cptlf_free_irqs_affinity, CRYPTO_DEV_OCTEONTX2_CPT); 341 342 int otx2_cptlf_set_irqs_affinity(struct otx2_cptlfs_info *lfs) 343 { 344 struct otx2_cptlf_info *lf = lfs->lf; 345 int slot, offs, ret; 346 347 for (slot = 0; slot < lfs->lfs_num; slot++) { 348 if (!zalloc_cpumask_var(&lf[slot].affinity_mask, GFP_KERNEL)) { 349 dev_err(&lfs->pdev->dev, 350 "cpumask allocation failed for LF %d", slot); 351 ret = -ENOMEM; 352 goto free_affinity_mask; 353 } 354 355 cpumask_set_cpu(cpumask_local_spread(slot, 356 dev_to_node(&lfs->pdev->dev)), 357 lf[slot].affinity_mask); 358 359 for (offs = 0; offs < OTX2_CPT_LF_MSIX_VECTORS; offs++) { 360 ret = irq_set_affinity_hint(pci_irq_vector(lfs->pdev, 361 lf[slot].msix_offset + offs), 362 lf[slot].affinity_mask); 363 if (ret) 364 goto free_affinity_mask; 365 } 366 } 367 return 0; 368 369 free_affinity_mask: 370 otx2_cptlf_free_irqs_affinity(lfs); 371 return ret; 372 } 373 EXPORT_SYMBOL_NS_GPL(otx2_cptlf_set_irqs_affinity, CRYPTO_DEV_OCTEONTX2_CPT); 374 375 int otx2_cptlf_init(struct otx2_cptlfs_info *lfs, u8 eng_grp_mask, int pri, 376 int lfs_num) 377 { 378 int slot, ret; 379 380 if (!lfs->pdev || !lfs->reg_base) 381 return -EINVAL; 382 383 lfs->lfs_num = lfs_num; 384 for (slot = 0; slot < lfs->lfs_num; slot++) { 385 lfs->lf[slot].lfs = lfs; 386 lfs->lf[slot].slot = slot; 387 if (lfs->lmt_base) 388 lfs->lf[slot].lmtline = lfs->lmt_base + 389 (slot * LMTLINE_SIZE); 390 else 391 lfs->lf[slot].lmtline = lfs->reg_base + 392 OTX2_CPT_RVU_FUNC_ADDR_S(BLKADDR_LMT, slot, 393 OTX2_CPT_LMT_LF_LMTLINEX(0)); 394 395 lfs->lf[slot].ioreg = lfs->reg_base + 396 OTX2_CPT_RVU_FUNC_ADDR_S(lfs->blkaddr, slot, 397 OTX2_CPT_LF_NQX(0)); 398 } 399 /* Send request to attach LFs */ 400 ret = otx2_cpt_attach_rscrs_msg(lfs); 401 if (ret) 402 goto clear_lfs_num; 403 404 ret = otx2_cpt_alloc_instruction_queues(lfs); 405 if (ret) { 406 dev_err(&lfs->pdev->dev, 407 "Allocating instruction queues failed\n"); 408 goto detach_rsrcs; 409 } 410 cptlf_hw_init(lfs); 411 /* 412 * Allow each LF to execute requests destined to any of 8 engine 413 * groups and set queue priority of each LF to high 414 */ 415 ret = cptlf_set_grp_and_pri(lfs, eng_grp_mask, pri); 416 if (ret) 417 goto free_iq; 418 419 return 0; 420 421 free_iq: 422 cptlf_hw_cleanup(lfs); 423 otx2_cpt_free_instruction_queues(lfs); 424 detach_rsrcs: 425 otx2_cpt_detach_rsrcs_msg(lfs); 426 clear_lfs_num: 427 lfs->lfs_num = 0; 428 return ret; 429 } 430 EXPORT_SYMBOL_NS_GPL(otx2_cptlf_init, CRYPTO_DEV_OCTEONTX2_CPT); 431 432 void otx2_cptlf_shutdown(struct otx2_cptlfs_info *lfs) 433 { 434 /* Cleanup LFs hardware side */ 435 cptlf_hw_cleanup(lfs); 436 /* Free instruction queues */ 437 otx2_cpt_free_instruction_queues(lfs); 438 /* Send request to detach LFs */ 439 otx2_cpt_detach_rsrcs_msg(lfs); 440 lfs->lfs_num = 0; 441 } 442 EXPORT_SYMBOL_NS_GPL(otx2_cptlf_shutdown, CRYPTO_DEV_OCTEONTX2_CPT); 443 444 MODULE_AUTHOR("Marvell"); 445 MODULE_DESCRIPTION("Marvell RVU CPT Common module"); 446 MODULE_LICENSE("GPL"); 447