Lines Matching +full:se +full:- +full:pos
1 // SPDX-License-Identifier: GPL-2.0-only
16 #define DRV_NAME "thunder-cpt"
21 MODULE_PARM_DESC(num_vfs, "Number of VFs to enable(1-16)");
32 struct device *dev = &cpt->pdev->dev; in cpt_disable_cores()
35 coremask = (coremask << cpt->max_se_cores); in cpt_disable_cores()
38 grpmask = cpt_read_csr64(cpt->reg_base, CPTX_PF_GX_EN(0, grp)); in cpt_disable_cores()
39 cpt_write_csr64(cpt->reg_base, CPTX_PF_GX_EN(0, grp), in cpt_disable_cores()
42 grp = cpt_read_csr64(cpt->reg_base, CPTX_PF_EXEC_BUSY(0)); in cpt_disable_cores()
45 grp = cpt_read_csr64(cpt->reg_base, in cpt_disable_cores()
47 if (!timeout--) in cpt_disable_cores()
54 pf_exe_ctl = cpt_read_csr64(cpt->reg_base, CPTX_PF_EXE_CTL(0)); in cpt_disable_cores()
55 cpt_write_csr64(cpt->reg_base, CPTX_PF_EXE_CTL(0), in cpt_disable_cores()
69 coremask = (coremask << cpt->max_se_cores); in cpt_enable_cores()
71 pf_exe_ctl = cpt_read_csr64(cpt->reg_base, CPTX_PF_EXE_CTL(0)); in cpt_enable_cores()
72 cpt_write_csr64(cpt->reg_base, CPTX_PF_EXE_CTL(0), in cpt_enable_cores()
83 coremask = (coremask << cpt->max_se_cores); in cpt_configure_group()
85 pf_gx_en = cpt_read_csr64(cpt->reg_base, CPTX_PF_GX_EN(0, grp)); in cpt_configure_group()
86 cpt_write_csr64(cpt->reg_base, CPTX_PF_GX_EN(0, grp), in cpt_configure_group()
94 cpt_write_csr64(cpt->reg_base, CPTX_PF_MBOX_ENA_W1CX(0, 0), ~0ull); in cpt_disable_mbox_interrupts()
100 cpt_write_csr64(cpt->reg_base, CPTX_PF_ECC0_ENA_W1C(0), ~0ull); in cpt_disable_ecc_interrupts()
106 cpt_write_csr64(cpt->reg_base, CPTX_PF_EXEC_ENA_W1C(0), ~0ull); in cpt_disable_exec_interrupts()
119 cpt_write_csr64(cpt->reg_base, CPTX_PF_MBOX_ENA_W1SX(0, 0), ~0ull); in cpt_enable_mbox_interrupts()
126 struct device *dev = &cpt->pdev->dev; in cpt_load_microcode()
128 if (!mcode || !mcode->code) { in cpt_load_microcode()
130 return -EINVAL; in cpt_load_microcode()
133 if (mcode->code_size == 0) { in cpt_load_microcode()
135 return -EINVAL; in cpt_load_microcode()
138 /* Assumes 0-9 are SE cores for UCODE_BASE registers and in cpt_load_microcode()
141 if (mcode->is_ae) { in cpt_load_microcode()
151 if (mcode->core_mask & (1 << shift)) { in cpt_load_microcode()
152 cpt_write_csr64(cpt->reg_base, in cpt_load_microcode()
154 (u64)mcode->phys_base); in cpt_load_microcode()
163 struct device *dev = &cpt->pdev->dev; in do_cpt_init()
166 cpt->flags &= ~CPT_FLAG_DEVICE_READY; in do_cpt_init()
170 if (mcode->is_ae) { in do_cpt_init()
171 if (mcode->num_cores > cpt->max_ae_cores) { in do_cpt_init()
173 ret = -EINVAL; in do_cpt_init()
177 if (cpt->next_group >= CPT_MAX_CORE_GROUPS) { in do_cpt_init()
179 return -ENFILE; in do_cpt_init()
182 mcode->group = cpt->next_group; in do_cpt_init()
184 mcode->core_mask = GENMASK(mcode->num_cores, 0); in do_cpt_init()
185 cpt_disable_cores(cpt, mcode->core_mask, AE_TYPES, in do_cpt_init()
186 mcode->group); in do_cpt_init()
191 mcode->version); in do_cpt_init()
194 cpt->next_group++; in do_cpt_init()
196 cpt_configure_group(cpt, mcode->group, mcode->core_mask, in do_cpt_init()
199 cpt_enable_cores(cpt, mcode->core_mask, AE_TYPES); in do_cpt_init()
201 if (mcode->num_cores > cpt->max_se_cores) { in do_cpt_init()
202 dev_err(dev, "Requested for more cores than available SE cores\n"); in do_cpt_init()
203 ret = -EINVAL; in do_cpt_init()
206 if (cpt->next_group >= CPT_MAX_CORE_GROUPS) { in do_cpt_init()
208 return -ENFILE; in do_cpt_init()
211 mcode->group = cpt->next_group; in do_cpt_init()
213 mcode->core_mask = GENMASK(mcode->num_cores, 0); in do_cpt_init()
214 cpt_disable_cores(cpt, mcode->core_mask, SE_TYPES, in do_cpt_init()
215 mcode->group); in do_cpt_init()
216 /* Load microcode for SE engines */ in do_cpt_init()
220 mcode->version); in do_cpt_init()
223 cpt->next_group++; in do_cpt_init()
225 cpt_configure_group(cpt, mcode->group, mcode->core_mask, in do_cpt_init()
227 /* Enable SE cores for the group mask */ in do_cpt_init()
228 cpt_enable_cores(cpt, mcode->core_mask, SE_TYPES); in do_cpt_init()
233 cpt->flags |= CPT_FLAG_DEVICE_READY; in do_cpt_init()
254 struct device *dev = &cpt->pdev->dev; in cpt_ucode_load_fw()
264 ucode = (struct ucode_header *)fw_entry->data; in cpt_ucode_load_fw()
265 mcode = &cpt->mcode[cpt->next_mc_idx]; in cpt_ucode_load_fw()
266 memcpy(mcode->version, (u8 *)fw_entry->data, CPT_UCODE_VERSION_SZ); in cpt_ucode_load_fw()
267 code_length = ntohl(ucode->code_length); in cpt_ucode_load_fw()
269 ret = -EINVAL; in cpt_ucode_load_fw()
272 mcode->code_size = code_length * 2; in cpt_ucode_load_fw()
274 mcode->is_ae = is_ae; in cpt_ucode_load_fw()
275 mcode->core_mask = 0ULL; in cpt_ucode_load_fw()
276 mcode->num_cores = is_ae ? 6 : 10; in cpt_ucode_load_fw()
279 mcode->code = dma_alloc_coherent(&cpt->pdev->dev, mcode->code_size, in cpt_ucode_load_fw()
280 &mcode->phys_base, GFP_KERNEL); in cpt_ucode_load_fw()
281 if (!mcode->code) { in cpt_ucode_load_fw()
283 ret = -ENOMEM; in cpt_ucode_load_fw()
287 memcpy((void *)mcode->code, (void *)(fw_entry->data + sizeof(*ucode)), in cpt_ucode_load_fw()
288 mcode->code_size); in cpt_ucode_load_fw()
290 /* Byte swap 64-bit */ in cpt_ucode_load_fw()
291 for (j = 0; j < (mcode->code_size / 8); j++) in cpt_ucode_load_fw()
292 ((__be64 *)mcode->code)[j] = cpu_to_be64(((u64 *)mcode->code)[j]); in cpt_ucode_load_fw()
293 /* MC needs 16-bit swap */ in cpt_ucode_load_fw()
294 for (j = 0; j < (mcode->code_size / 2); j++) in cpt_ucode_load_fw()
295 ((__be16 *)mcode->code)[j] = cpu_to_be16(((u16 *)mcode->code)[j]); in cpt_ucode_load_fw()
297 dev_dbg(dev, "mcode->code_size = %u\n", mcode->code_size); in cpt_ucode_load_fw()
298 dev_dbg(dev, "mcode->is_ae = %u\n", mcode->is_ae); in cpt_ucode_load_fw()
299 dev_dbg(dev, "mcode->num_cores = %u\n", mcode->num_cores); in cpt_ucode_load_fw()
300 dev_dbg(dev, "mcode->code = %llx\n", (u64)mcode->code); in cpt_ucode_load_fw()
301 dev_dbg(dev, "mcode->phys_base = %llx\n", mcode->phys_base); in cpt_ucode_load_fw()
305 dma_free_coherent(&cpt->pdev->dev, mcode->code_size, in cpt_ucode_load_fw()
306 mcode->code, mcode->phys_base); in cpt_ucode_load_fw()
311 dev_info(dev, "Microcode Loaded %s\n", mcode->version); in cpt_ucode_load_fw()
312 mcode->is_mc_valid = 1; in cpt_ucode_load_fw()
313 cpt->next_mc_idx++; in cpt_ucode_load_fw()
324 struct device *dev = &cpt->pdev->dev; in cpt_ucode_load()
326 ret = cpt_ucode_load_fw(cpt, "cpt8x-mc-ae.out", true); in cpt_ucode_load()
331 ret = cpt_ucode_load_fw(cpt, "cpt8x-mc-se.out", false); in cpt_ucode_load()
333 dev_err(dev, "se:cpt_ucode_load failed with ret: %d\n", ret); in cpt_ucode_load()
351 cpt_write_csr64(cpt->reg_base, CPTX_PF_RESET(0), 1); in cpt_reset()
358 pf_cnsts.u = cpt_read_csr64(cpt->reg_base, CPTX_PF_CONSTANTS(0)); in cpt_find_max_enabled_cores()
359 cpt->max_se_cores = pf_cnsts.s.se; in cpt_find_max_enabled_cores()
360 cpt->max_ae_cores = pf_cnsts.s.ae; in cpt_find_max_enabled_cores()
367 bist_sts.u = cpt_read_csr64(cpt->reg_base, in cpt_check_bist_status()
377 bist_sts.u = cpt_read_csr64(cpt->reg_base, in cpt_check_exe_bist_status()
386 struct device *dev = &cpt->pdev->dev; in cpt_disable_all_cores()
390 cpt_write_csr64(cpt->reg_base, CPTX_PF_GX_EN(0, grp), 0); in cpt_disable_all_cores()
394 grp = cpt_read_csr64(cpt->reg_base, CPTX_PF_EXEC_BUSY(0)); in cpt_disable_all_cores()
397 grp = cpt_read_csr64(cpt->reg_base, in cpt_disable_all_cores()
399 if (!timeout--) in cpt_disable_all_cores()
405 cpt_write_csr64(cpt->reg_base, CPTX_PF_EXE_CTL(0), 0); in cpt_disable_all_cores()
419 struct microcode *mcode = &cpt->mcode[grp]; in cpt_unload_microcode()
421 if (cpt->mcode[grp].code) in cpt_unload_microcode()
422 dma_free_coherent(&cpt->pdev->dev, mcode->code_size, in cpt_unload_microcode()
423 mcode->code, mcode->phys_base); in cpt_unload_microcode()
424 mcode->code = NULL; in cpt_unload_microcode()
428 cpt_write_csr64(cpt->reg_base, in cpt_unload_microcode()
435 struct device *dev = &cpt->pdev->dev; in cpt_device_init()
445 return -ENODEV; in cpt_device_init()
451 return -ENODEV; in cpt_device_init()
460 cpt->next_mc_idx = 0; in cpt_device_init()
461 cpt->next_group = 0; in cpt_device_init()
463 cpt->flags |= CPT_FLAG_DEVICE_READY; in cpt_device_init()
471 struct device *dev = &cpt->pdev->dev; in cpt_register_interrupts()
473 /* Enable MSI-X */ in cpt_register_interrupts()
474 ret = pci_alloc_irq_vectors(cpt->pdev, CPT_PF_MSIX_VECTORS, in cpt_register_interrupts()
477 dev_err(&cpt->pdev->dev, "Request for #%d msix vectors failed\n", in cpt_register_interrupts()
483 ret = request_irq(pci_irq_vector(cpt->pdev, CPT_PF_INT_VEC_E_MBOXX(0)), in cpt_register_interrupts()
494 pci_disable_msix(cpt->pdev); in cpt_register_interrupts()
500 free_irq(pci_irq_vector(cpt->pdev, CPT_PF_INT_VEC_E_MBOXX(0)), cpt); in cpt_unregister_interrupts()
501 pci_disable_msix(cpt->pdev); in cpt_unregister_interrupts()
506 int pos = 0; in cpt_sriov_init() local
509 struct pci_dev *pdev = cpt->pdev; in cpt_sriov_init()
511 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); in cpt_sriov_init()
512 if (!pos) { in cpt_sriov_init()
513 dev_err(&pdev->dev, "SRIOV capability is not found in PCIe config space\n"); in cpt_sriov_init()
514 return -ENODEV; in cpt_sriov_init()
517 cpt->num_vf_en = num_vfs; /* User requested VFs */ in cpt_sriov_init()
518 pci_read_config_word(pdev, (pos + PCI_SRIOV_TOTAL_VF), &total_vf_cnt); in cpt_sriov_init()
519 if (total_vf_cnt < cpt->num_vf_en) in cpt_sriov_init()
520 cpt->num_vf_en = total_vf_cnt; in cpt_sriov_init()
526 err = pci_enable_sriov(pdev, cpt->num_vf_en); in cpt_sriov_init()
528 dev_err(&pdev->dev, "SRIOV enable failed, num VF is %d\n", in cpt_sriov_init()
529 cpt->num_vf_en); in cpt_sriov_init()
530 cpt->num_vf_en = 0; in cpt_sriov_init()
536 dev_info(&pdev->dev, "SRIOV enabled, number of VF available %d\n", in cpt_sriov_init()
537 cpt->num_vf_en); in cpt_sriov_init()
539 cpt->flags |= CPT_FLAG_SRIOV_ENABLED; in cpt_sriov_init()
546 struct device *dev = &pdev->dev; in cpt_probe()
558 return -ENOMEM; in cpt_probe()
561 cpt->pdev = pdev; in cpt_probe()
575 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48)); in cpt_probe()
577 dev_err(dev, "Unable to get usable 48-bit DMA configuration\n"); in cpt_probe()
582 cpt->reg_base = pcim_iomap(pdev, 0, 0); in cpt_probe()
583 if (!cpt->reg_base) { in cpt_probe()
585 err = -ENOMEM; in cpt_probe()
622 /* Disengage SE and AE cores from all groups*/ in cpt_remove()
640 dev_info(&pdev->dev, "Shutdown device %x:%x.\n", in cpt_shutdown()
641 (u32)pdev->vendor, (u32)pdev->device); in cpt_shutdown()