Lines Matching +full:non +full:- +full:disruptive

1 // SPDX-License-Identifier: GPL-2.0-or-later
18 #include <asm/pnv-pci.h>
89 #define AFUD_READ(afu, off) in_be64(afu->native->afu_desc_mmio + off)
90 #define AFUD_READ_LE(afu, off) in_le64(afu->native->afu_desc_mmio + off)
161 dev_info(&dev->dev, "dump_cxl_config_space\n"); in dump_cxl_config_space()
164 dev_info(&dev->dev, "BAR0: %#.8x\n", val); in dump_cxl_config_space()
166 dev_info(&dev->dev, "BAR1: %#.8x\n", val); in dump_cxl_config_space()
168 dev_info(&dev->dev, "BAR2: %#.8x\n", val); in dump_cxl_config_space()
170 dev_info(&dev->dev, "BAR3: %#.8x\n", val); in dump_cxl_config_space()
172 dev_info(&dev->dev, "BAR4: %#.8x\n", val); in dump_cxl_config_space()
174 dev_info(&dev->dev, "BAR5: %#.8x\n", val); in dump_cxl_config_space()
176 dev_info(&dev->dev, "p1 regs: %#llx, len: %#llx\n", in dump_cxl_config_space()
178 dev_info(&dev->dev, "p2 regs: %#llx, len: %#llx\n", in dump_cxl_config_space()
180 dev_info(&dev->dev, "BAR 4/5: %#llx, len: %#llx\n", in dump_cxl_config_space()
187 dev_info(&dev->dev, "cxl vsec: %30s: %#x\n", name, what) in dump_cxl_config_space()
265 dev_info(&afu->dev, "afu desc: %30s: %#llx\n", name, what) in dump_afu_descriptor()
282 show_reg("Reserved", (val >> (63-7)) & 0xff); in dump_afu_descriptor()
291 show_reg("PerProcessPSA_control", (val >> (63-7)) & 0xff); in dump_afu_descriptor()
298 show_reg("Reserved", (val >> (63-7)) & 0xff); in dump_afu_descriptor()
319 if (of_property_read_u32(np, "ibm,phb-index", phb_index)) in get_phb_index()
320 return -ENODEV; in get_phb_index()
328 * - For chips other than POWER8NVL, we only have CAPP 0, in get_capp_unit_id()
330 * - For POWER8NVL, assume CAPP 0 is attached to PHB0 and in get_capp_unit_id()
347 * PEC1 (PHB1 - PHB2). No capi mode in get_capp_unit_id()
348 * PEC2 (PHB3 - PHB4 - PHB5): Capi mode on PHB3 only. Capp ID = CAPP1 (0b1110_0000) in get_capp_unit_id()
369 return -ENODEV; in cxl_calc_capp_routing()
371 while (np && !(prop = of_get_property(np, "ibm,chip-id", NULL))) in cxl_calc_capp_routing()
374 return -ENODEV; in cxl_calc_capp_routing()
388 …pr_err("cxl: No capp unit found for PHB[%lld,%d]. Make sure the adapter is on a capi-compatible sl… in cxl_calc_capp_routing()
390 return -ENODEV; in cxl_calc_capp_routing()
409 return -ENODEV; in get_phb_indications()
412 prop = of_get_property(np, "ibm,phb-indications", NULL); in get_phb_indications()
438 * bit 61:60 MSI bits --> 0 in cxl_get_xsl9_dsnctl()
439 * bit 59 TVT selector --> 0 in cxl_get_xsl9_dsnctl()
442 return -ENODEV; in cxl_get_xsl9_dsnctl()
448 xsl_dsnctl = (capiind << (63-15)); /* Bit 57 */ in cxl_get_xsl9_dsnctl()
449 xsl_dsnctl |= (capp_unit_id << (63-15)); in cxl_get_xsl9_dsnctl()
452 xsl_dsnctl |= ((u64)0x09 << (63-28)); in cxl_get_xsl9_dsnctl()
456 * the Non-Blocking queues by the PHB. This field should match in cxl_get_xsl9_dsnctl()
461 xsl_dsnctl |= (nbwind << (63-55)); in cxl_get_xsl9_dsnctl()
495 psl_fircntl = (0x2ULL << (63-3)); /* ce_report */ in init_implementation_adapter_regs_psl9()
496 psl_fircntl |= (0x1ULL << (63-6)); /* FIR_report */ in init_implementation_adapter_regs_psl9()
519 /* disable machines 31-47 and 20-27 for DMA */ in init_implementation_adapter_regs_psl9()
530 * Check if PSL has data-cache. We need to flush adapter datacache in init_implementation_adapter_regs_psl9()
535 dev_dbg(&dev->dev, "No data-cache present\n"); in init_implementation_adapter_regs_psl9()
536 adapter->native->no_data_cache = true; in init_implementation_adapter_regs_psl9()
555 psl_dsnctl |= (0x2ULL << (63-38)); /* MMIO hang pulse: 256 us */ in init_implementation_adapter_regs_psl8()
557 psl_dsnctl |= (chipid << (63-5)); in init_implementation_adapter_regs_psl8()
558 psl_dsnctl |= (capp_unit_id << (63-13)); in init_implementation_adapter_regs_psl8()
565 psl_fircntl = (0x2ULL << (63-3)); /* ce_report */ in init_implementation_adapter_regs_psl8()
566 psl_fircntl |= (0x1ULL << (63-6)); /* FIR_report */ in init_implementation_adapter_regs_psl8()
576 #define TBSYNC_CAL(n) (((u64)n & 0x7) << (63-3))
577 #define TBSYNC_CNT(n) (((u64)n & 0x7) << (63-6))
601 adapter->psl_timebase_synced = false; in cxl_setup_psl_timebase()
608 if (! of_get_property(np, "ibm,capp-timebase-sync", NULL)) { in cxl_setup_psl_timebase()
610 dev_info(&dev->dev, "PSL timebase inactive: OPAL support missing\n"); in cxl_setup_psl_timebase()
619 if (adapter->native->sl_ops->write_timebase_ctrl) in cxl_setup_psl_timebase()
620 adapter->native->sl_ops->write_timebase_ctrl(adapter); in cxl_setup_psl_timebase()
650 struct pci_dev *dev = to_pci_dev(adapter->dev.parent); in cxl_pci_setup_irq()
657 struct pci_dev *dev = to_pci_dev(adapter->dev.parent); in cxl_update_image_control()
663 dev_err(&dev->dev, "ABORTING: CXL VSEC not found!\n"); in cxl_update_image_control()
664 return -ENODEV; in cxl_update_image_control()
668 dev_err(&dev->dev, "failed to read image state: %i\n", rc); in cxl_update_image_control()
672 if (adapter->perst_loads_image) in cxl_update_image_control()
677 if (adapter->perst_select_user) in cxl_update_image_control()
683 dev_err(&dev->dev, "failed to update image control: %i\n", rc); in cxl_update_image_control()
692 struct pci_dev *dev = to_pci_dev(adapter->dev.parent); in cxl_pci_alloc_one_irq()
699 struct pci_dev *dev = to_pci_dev(adapter->dev.parent); in cxl_pci_release_one_irq()
707 struct pci_dev *dev = to_pci_dev(adapter->dev.parent); in cxl_pci_alloc_irq_ranges()
715 struct pci_dev *dev = to_pci_dev(adapter->dev.parent); in cxl_pci_release_irq_ranges()
725 dev_err(&dev->dev, "ABORTING: M32 BAR assignment incompatible with CXL\n"); in setup_cxl_bars()
726 return -ENODEV; in setup_cxl_bars()
740 /* pciex node: ibm,opal-m64-window = <0x3d058 0x0 0x3d058 0x0 0x8 0x0>; */
747 dev_info(&dev->dev, "switch card to CXL\n"); in switch_card_to_cxl()
750 dev_err(&dev->dev, "ABORTING: CXL VSEC not found!\n"); in switch_card_to_cxl()
751 return -ENODEV; in switch_card_to_cxl()
755 dev_err(&dev->dev, "failed to read current mode control: %i", rc); in switch_card_to_cxl()
761 dev_err(&dev->dev, "failed to enable CXL protocol: %i", rc); in switch_card_to_cxl()
765 * The CAIA spec (v0.12 11.6 Bi-modal Device Support) states in switch_card_to_cxl()
780 p1n_base = p1_base(dev) + 0x10000 + (afu->slice * p1n_size); in pci_map_slice_regs()
781 p2n_base = p2_base(dev) + (afu->slice * p2n_size); in pci_map_slice_regs()
782 afu->psn_phys = p2_base(dev) + (adapter->native->ps_off + (afu->slice * adapter->ps_size)); in pci_map_slice_regs()
783 …afu_desc = p2_base(dev) + adapter->native->afu_desc_off + (afu->slice * adapter->native->afu_desc_… in pci_map_slice_regs()
785 if (!(afu->native->p1n_mmio = ioremap(p1n_base, p1n_size))) in pci_map_slice_regs()
787 if (!(afu->p2n_mmio = ioremap(p2n_base, p2n_size))) in pci_map_slice_regs()
790 if (!(afu->native->afu_desc_mmio = ioremap(afu_desc, adapter->native->afu_desc_size))) in pci_map_slice_regs()
796 iounmap(afu->p2n_mmio); in pci_map_slice_regs()
798 iounmap(afu->native->p1n_mmio); in pci_map_slice_regs()
800 dev_err(&afu->dev, "Error mapping AFU MMIO regions\n"); in pci_map_slice_regs()
801 return -ENOMEM; in pci_map_slice_regs()
806 if (afu->p2n_mmio) { in pci_unmap_slice_regs()
807 iounmap(afu->p2n_mmio); in pci_unmap_slice_regs()
808 afu->p2n_mmio = NULL; in pci_unmap_slice_regs()
810 if (afu->native->p1n_mmio) { in pci_unmap_slice_regs()
811 iounmap(afu->native->p1n_mmio); in pci_unmap_slice_regs()
812 afu->native->p1n_mmio = NULL; in pci_unmap_slice_regs()
814 if (afu->native->afu_desc_mmio) { in pci_unmap_slice_regs()
815 iounmap(afu->native->afu_desc_mmio); in pci_unmap_slice_regs()
816 afu->native->afu_desc_mmio = NULL; in pci_unmap_slice_regs()
826 idr_destroy(&afu->contexts_idr); in cxl_pci_release_afu()
829 kfree(afu->native); in cxl_pci_release_afu()
839 afu->pp_irqs = AFUD_NUM_INTS_PER_PROC(val); in cxl_read_afu_descriptor()
840 afu->max_procs_virtualised = AFUD_NUM_PROCS(val); in cxl_read_afu_descriptor()
841 afu->crs_num = AFUD_NUM_CRS(val); in cxl_read_afu_descriptor()
844 afu->modes_supported |= CXL_MODE_DIRECTED; in cxl_read_afu_descriptor()
846 afu->modes_supported |= CXL_MODE_DEDICATED; in cxl_read_afu_descriptor()
848 afu->modes_supported |= CXL_MODE_TIME_SLICED; in cxl_read_afu_descriptor()
851 afu->pp_size = AFUD_PPPSA_LEN(val) * 4096; in cxl_read_afu_descriptor()
852 afu->psa = AFUD_PPPSA_PSA(val); in cxl_read_afu_descriptor()
853 if ((afu->pp_psa = AFUD_PPPSA_PP(val))) in cxl_read_afu_descriptor()
854 afu->native->pp_offset = AFUD_READ_PPPSA_OFF(afu); in cxl_read_afu_descriptor()
857 afu->crs_len = AFUD_CR_LEN(val) * 256; in cxl_read_afu_descriptor()
858 afu->crs_offset = AFUD_READ_CR_OFF(afu); in cxl_read_afu_descriptor()
862 afu->eb_len = AFUD_EB_LEN(AFUD_READ_EB(afu)) * 4096; in cxl_read_afu_descriptor()
863 afu->eb_offset = AFUD_READ_EB_OFF(afu); in cxl_read_afu_descriptor()
866 if (EXTRACT_PPC_BITS(afu->eb_offset, 0, 11) != 0) { in cxl_read_afu_descriptor()
867 dev_warn(&afu->dev, in cxl_read_afu_descriptor()
869 afu->eb_offset); in cxl_read_afu_descriptor()
870 dev_info(&afu->dev, in cxl_read_afu_descriptor()
873 afu->eb_len = 0; in cxl_read_afu_descriptor()
884 if (afu->psa && afu->adapter->ps_size < in cxl_afu_descriptor_looks_ok()
885 (afu->native->pp_offset + afu->pp_size*afu->max_procs_virtualised)) { in cxl_afu_descriptor_looks_ok()
886 dev_err(&afu->dev, "per-process PSA can't fit inside the PSA!\n"); in cxl_afu_descriptor_looks_ok()
887 return -ENODEV; in cxl_afu_descriptor_looks_ok()
890 if (afu->pp_psa && (afu->pp_size < PAGE_SIZE)) in cxl_afu_descriptor_looks_ok()
891 dev_warn(&afu->dev, "AFU uses pp_size(%#016llx) < PAGE_SIZE per-process PSA!\n", afu->pp_size); in cxl_afu_descriptor_looks_ok()
893 for (i = 0; i < afu->crs_num; i++) { in cxl_afu_descriptor_looks_ok()
894 rc = cxl_ops->afu_cr_read32(afu, i, 0, &val); in cxl_afu_descriptor_looks_ok()
896 dev_err(&afu->dev, "ABORTING: AFU configuration record %i is invalid\n", i); in cxl_afu_descriptor_looks_ok()
897 return -EINVAL; in cxl_afu_descriptor_looks_ok()
901 if ((afu->modes_supported & ~CXL_MODE_DEDICATED) && afu->max_procs_virtualised == 0) { in cxl_afu_descriptor_looks_ok()
912 dev_err(&afu->dev, "AFU does not support any processes\n"); in cxl_afu_descriptor_looks_ok()
913 return -EINVAL; in cxl_afu_descriptor_looks_ok()
930 dev_warn(&afu->dev, "WARNING: AFU was not disabled: %#016llx\n", reg); in sanitise_afu_regs_psl9()
931 if (cxl_ops->afu_reset(afu)) in sanitise_afu_regs_psl9()
932 return -EIO; in sanitise_afu_regs_psl9()
934 return -EIO; in sanitise_afu_regs_psl9()
936 return -EIO; in sanitise_afu_regs_psl9()
942 dev_warn(&afu->dev, "AFU had pending DSISR: %#016llx\n", reg); in sanitise_afu_regs_psl9()
948 if (afu->adapter->native->sl_ops->register_serr_irq) { in sanitise_afu_regs_psl9()
952 dev_warn(&afu->dev, "AFU had pending SERR: %#016llx\n", reg); in sanitise_afu_regs_psl9()
958 dev_warn(&afu->dev, "AFU had pending error status: %#016llx\n", reg); in sanitise_afu_regs_psl9()
976 dev_warn(&afu->dev, "WARNING: AFU was not disabled: %#016llx\n", reg); in sanitise_afu_regs_psl8()
977 if (cxl_ops->afu_reset(afu)) in sanitise_afu_regs_psl8()
978 return -EIO; in sanitise_afu_regs_psl8()
980 return -EIO; in sanitise_afu_regs_psl8()
982 return -EIO; in sanitise_afu_regs_psl8()
997 dev_warn(&afu->dev, "AFU had pending DSISR: %#016llx\n", reg); in sanitise_afu_regs_psl8()
1003 if (afu->adapter->native->sl_ops->register_serr_irq) { in sanitise_afu_regs_psl8()
1007 dev_warn(&afu->dev, "AFU had pending SERR: %#016llx\n", reg); in sanitise_afu_regs_psl8()
1013 dev_warn(&afu->dev, "AFU had pending error status: %#016llx\n", reg); in sanitise_afu_regs_psl8()
1033 const void __iomem *ebuf = afu->native->afu_desc_mmio + afu->eb_offset; in cxl_pci_afu_read_err_buffer()
1035 if (count == 0 || off < 0 || (size_t)off >= afu->eb_len) in cxl_pci_afu_read_err_buffer()
1039 count = min((size_t)(afu->eb_len - off), count); in cxl_pci_afu_read_err_buffer()
1042 aligned_length = aligned_end - aligned_start; in cxl_pci_afu_read_err_buffer()
1047 count = ERR_BUFF_MAX_COPY_SIZE - (off & 0x7); in cxl_pci_afu_read_err_buffer()
1053 return -ENOMEM; in cxl_pci_afu_read_err_buffer()
1071 if (adapter->native->sl_ops->sanitise_afu_regs) { in pci_configure_afu()
1072 rc = adapter->native->sl_ops->sanitise_afu_regs(afu); in pci_configure_afu()
1078 if ((rc = cxl_ops->afu_reset(afu))) in pci_configure_afu()
1090 if (adapter->native->sl_ops->afu_regs_init) in pci_configure_afu()
1091 if ((rc = adapter->native->sl_ops->afu_regs_init(afu))) in pci_configure_afu()
1094 if (adapter->native->sl_ops->register_serr_irq) in pci_configure_afu()
1095 if ((rc = adapter->native->sl_ops->register_serr_irq(afu))) in pci_configure_afu()
1101 atomic_set(&afu->configured_state, 0); in pci_configure_afu()
1105 if (adapter->native->sl_ops->release_serr_irq) in pci_configure_afu()
1106 adapter->native->sl_ops->release_serr_irq(afu); in pci_configure_afu()
1118 if (atomic_read(&afu->configured_state) != -1) { in pci_deconfigure_afu()
1119 while (atomic_cmpxchg(&afu->configured_state, 0, -1) != -1) in pci_deconfigure_afu()
1123 if (afu->adapter->native->sl_ops->release_serr_irq) in pci_deconfigure_afu()
1124 afu->adapter->native->sl_ops->release_serr_irq(afu); in pci_deconfigure_afu()
1131 int rc = -ENOMEM; in pci_init_afu()
1135 return -ENOMEM; in pci_init_afu()
1137 afu->native = kzalloc(sizeof(struct cxl_afu_native), GFP_KERNEL); in pci_init_afu()
1138 if (!afu->native) in pci_init_afu()
1141 mutex_init(&afu->native->spa_mutex); in pci_init_afu()
1143 rc = dev_set_name(&afu->dev, "afu%i.%i", adapter->adapter_num, slice); in pci_init_afu()
1164 adapter->afu[afu->slice] = afu; in pci_init_afu()
1167 dev_info(&afu->dev, "Can't register vPHB\n"); in pci_init_afu()
1172 device_del(&afu->dev); in pci_init_afu()
1176 put_device(&afu->dev); in pci_init_afu()
1180 kfree(afu->native); in pci_init_afu()
1198 spin_lock(&afu->adapter->afu_list_lock); in cxl_pci_remove_afu()
1199 afu->adapter->afu[afu->slice] = NULL; in cxl_pci_remove_afu()
1200 spin_unlock(&afu->adapter->afu_list_lock); in cxl_pci_remove_afu()
1203 cxl_ops->afu_deactivate_mode(afu, afu->current_mode); in cxl_pci_remove_afu()
1206 device_unregister(&afu->dev); in cxl_pci_remove_afu()
1211 struct pci_dev *dev = to_pci_dev(adapter->dev.parent); in cxl_pci_reset()
1214 if (adapter->perst_same_image) { in cxl_pci_reset()
1215 dev_warn(&dev->dev, in cxl_pci_reset()
1217 return -EINVAL; in cxl_pci_reset()
1220 dev_info(&dev->dev, "CXL reset\n"); in cxl_pci_reset()
1231 dev_err(&dev->dev, "cxl: pcie_warm_reset failed\n"); in cxl_pci_reset()
1248 if (!(adapter->native->p1_mmio = ioremap(p1_base(dev), p1_size(dev)))) in cxl_map_adapter_regs()
1251 if (!(adapter->native->p2_mmio = ioremap(p2_base(dev), p2_size(dev)))) in cxl_map_adapter_regs()
1257 iounmap(adapter->native->p1_mmio); in cxl_map_adapter_regs()
1258 adapter->native->p1_mmio = NULL; in cxl_map_adapter_regs()
1264 return -ENOMEM; in cxl_map_adapter_regs()
1269 if (adapter->native->p1_mmio) { in cxl_unmap_adapter_regs()
1270 iounmap(adapter->native->p1_mmio); in cxl_unmap_adapter_regs()
1271 adapter->native->p1_mmio = NULL; in cxl_unmap_adapter_regs()
1272 pci_release_region(to_pci_dev(adapter->dev.parent), 2); in cxl_unmap_adapter_regs()
1274 if (adapter->native->p2_mmio) { in cxl_unmap_adapter_regs()
1275 iounmap(adapter->native->p2_mmio); in cxl_unmap_adapter_regs()
1276 adapter->native->p2_mmio = NULL; in cxl_unmap_adapter_regs()
1277 pci_release_region(to_pci_dev(adapter->dev.parent), 0); in cxl_unmap_adapter_regs()
1290 dev_err(&dev->dev, "ABORTING: CXL VSEC not found!\n"); in cxl_read_vsec()
1291 return -ENODEV; in cxl_read_vsec()
1296 dev_err(&dev->dev, "ABORTING: CXL VSEC too short\n"); in cxl_read_vsec()
1297 return -EINVAL; in cxl_read_vsec()
1300 CXL_READ_VSEC_STATUS(dev, vsec, &adapter->vsec_status); in cxl_read_vsec()
1301 CXL_READ_VSEC_PSL_REVISION(dev, vsec, &adapter->psl_rev); in cxl_read_vsec()
1302 CXL_READ_VSEC_CAIA_MAJOR(dev, vsec, &adapter->caia_major); in cxl_read_vsec()
1303 CXL_READ_VSEC_CAIA_MINOR(dev, vsec, &adapter->caia_minor); in cxl_read_vsec()
1304 CXL_READ_VSEC_BASE_IMAGE(dev, vsec, &adapter->base_image); in cxl_read_vsec()
1306 adapter->user_image_loaded = !!(image_state & CXL_VSEC_USER_IMAGE_LOADED); in cxl_read_vsec()
1307 adapter->perst_select_user = !!(image_state & CXL_VSEC_USER_IMAGE_LOADED); in cxl_read_vsec()
1308 adapter->perst_loads_image = !!(image_state & CXL_VSEC_PERST_LOADS_IMAGE); in cxl_read_vsec()
1310 CXL_READ_VSEC_NAFUS(dev, vsec, &adapter->slices); in cxl_read_vsec()
1317 * code a month later and forget what units these are in ;-) */ in cxl_read_vsec()
1318 adapter->native->ps_off = ps_off * 64 * 1024; in cxl_read_vsec()
1319 adapter->ps_size = ps_size * 64 * 1024; in cxl_read_vsec()
1320 adapter->native->afu_desc_off = afu_desc_off * 64 * 1024; in cxl_read_vsec()
1321 adapter->native->afu_desc_size = afu_desc_size * 64 * 1024; in cxl_read_vsec()
1323 /* Total IRQs - 1 PSL ERROR - #AFU*(1 slice error + 1 DSI) */ in cxl_read_vsec()
1324 adapter->user_irqs = pnv_cxl_get_irq_count(dev) - 1 - 2*adapter->slices; in cxl_read_vsec()
1342 if (adapter->psl_rev & 0xf000) in cxl_fixup_malformed_tlp()
1357 if (cxl_is_power8() && (adapter->caia_major == 1)) in cxl_compatible_caia_version()
1360 if (cxl_is_power9() && (adapter->caia_major == 2)) in cxl_compatible_caia_version()
1368 if (adapter->vsec_status & CXL_STATUS_SECOND_PORT) in cxl_vsec_looks_ok()
1369 return -EBUSY; in cxl_vsec_looks_ok()
1371 if (adapter->vsec_status & CXL_UNSUPPORTED_FEATURES) { in cxl_vsec_looks_ok()
1372 dev_err(&dev->dev, "ABORTING: CXL requires unsupported features\n"); in cxl_vsec_looks_ok()
1373 return -EINVAL; in cxl_vsec_looks_ok()
1377 dev_info(&dev->dev, "Ignoring card. PSL type is not supported (caia version: %d)\n", in cxl_vsec_looks_ok()
1378 adapter->caia_major); in cxl_vsec_looks_ok()
1379 return -ENODEV; in cxl_vsec_looks_ok()
1382 if (!adapter->slices) { in cxl_vsec_looks_ok()
1385 dev_err(&dev->dev, "ABORTING: Device has no AFUs\n"); in cxl_vsec_looks_ok()
1386 return -EINVAL; in cxl_vsec_looks_ok()
1389 if (!adapter->native->afu_desc_off || !adapter->native->afu_desc_size) { in cxl_vsec_looks_ok()
1390 dev_err(&dev->dev, "ABORTING: VSEC shows no AFU descriptors\n"); in cxl_vsec_looks_ok()
1391 return -EINVAL; in cxl_vsec_looks_ok()
1394 if (adapter->ps_size > p2_size(dev) - adapter->native->ps_off) { in cxl_vsec_looks_ok()
1395 dev_err(&dev->dev, "ABORTING: Problem state size larger than " in cxl_vsec_looks_ok()
1397 adapter->ps_size, p2_size(dev) - adapter->native->ps_off); in cxl_vsec_looks_ok()
1398 return -EINVAL; in cxl_vsec_looks_ok()
1406 return pci_read_vpd(to_pci_dev(adapter->dev.parent), 0, len, buf); in cxl_pci_read_adapter_vpd()
1417 kfree(adapter->native); in cxl_release_adapter()
1421 #define CXL_PSL_ErrIVTE_tberror (0x1ull << (63-31))
1430 if (adapter->native->sl_ops->invalidate_all) { in sanitise_adapter_regs()
1432 if (cxl_is_power9() && (adapter->perst_loads_image)) in sanitise_adapter_regs()
1434 rc = adapter->native->sl_ops->invalidate_all(adapter); in sanitise_adapter_regs()
1447 adapter->dev.parent = &dev->dev; in cxl_configure_adapter()
1448 adapter->dev.release = cxl_release_adapter; in cxl_configure_adapter()
1453 dev_err(&dev->dev, "pci_enable_device failed: %i\n", rc); in cxl_configure_adapter()
1480 if ((rc = adapter->native->sl_ops->adapter_regs_init(adapter, dev))) in cxl_configure_adapter()
1486 adapter->tunneled_ops_supported = false; in cxl_configure_adapter()
1490 dev_info(&dev->dev, "Tunneled operations unsupported\n"); in cxl_configure_adapter()
1492 adapter->tunneled_ops_supported = true; in cxl_configure_adapter()
1495 if ((rc = pnv_phb_to_cxl_mode(dev, adapter->native->sl_ops->capi_mode))) in cxl_configure_adapter()
1499 * In the non-recovery case this has no effect */ in cxl_configure_adapter()
1519 struct pci_dev *pdev = to_pci_dev(adapter->dev.parent); in cxl_deconfigure_adapter()
1534 struct pci_dev *dev = to_pci_dev(adapter->dev.parent); in cxl_stop_trace_psl9()
1539 trace_mask = (0x3ULL << (62 - traceid * 2)); in cxl_stop_trace_psl9()
1540 trace_state = (trace_state & trace_mask) >> (62 - traceid * 2); in cxl_stop_trace_psl9()
1541 dev_dbg(&dev->dev, "cxl: Traceid-%d trace_state=0x%0llX\n", in cxl_stop_trace_psl9()
1559 spin_lock(&adapter->afu_list_lock); in cxl_stop_trace_psl8()
1560 for (slice = 0; slice < adapter->slices; slice++) { in cxl_stop_trace_psl8()
1561 if (adapter->afu[slice]) in cxl_stop_trace_psl8()
1562 cxl_p1n_write(adapter->afu[slice], CXL_PSL_SLICE_TRACE, in cxl_stop_trace_psl8()
1565 spin_unlock(&adapter->afu_list_lock); in cxl_stop_trace_psl8()
1618 dev_info(&dev->dev, "Device uses a PSL8\n"); in set_sl_ops()
1619 adapter->native->sl_ops = &psl8_ops; in set_sl_ops()
1621 dev_info(&dev->dev, "Device uses a PSL9\n"); in set_sl_ops()
1622 adapter->native->sl_ops = &psl9_ops; in set_sl_ops()
1634 return ERR_PTR(-ENOMEM); in cxl_pci_init_adapter()
1636 adapter->native = kzalloc(sizeof(struct cxl_native), GFP_KERNEL); in cxl_pci_init_adapter()
1637 if (!adapter->native) { in cxl_pci_init_adapter()
1638 rc = -ENOMEM; in cxl_pci_init_adapter()
1647 adapter->perst_loads_image = true; in cxl_pci_init_adapter()
1648 adapter->perst_same_image = false; in cxl_pci_init_adapter()
1675 device_del(&adapter->dev); in cxl_pci_init_adapter()
1682 put_device(&adapter->dev); in cxl_pci_init_adapter()
1686 cxl_release_adapter(&adapter->dev); in cxl_pci_init_adapter()
1704 device_unregister(&adapter->dev); in cxl_pci_remove_adapter()
1716 return -ENODEV; in cxl_slot_is_switched()
1736 dev_dbg(&dev->dev, "cxl_init_adapter: Ignoring cxl vphb device\n"); in cxl_probe()
1737 return -ENODEV; in cxl_probe()
1741 dev_info(&dev->dev, "Ignoring card on incompatible PCI slot\n"); in cxl_probe()
1742 return -ENODEV; in cxl_probe()
1746 dev_info(&dev->dev, "Only Radix mode supported\n"); in cxl_probe()
1747 return -ENODEV; in cxl_probe()
1755 dev_err(&dev->dev, "cxl_init_adapter failed: %li\n", PTR_ERR(adapter)); in cxl_probe()
1759 for (slice = 0; slice < adapter->slices; slice++) { in cxl_probe()
1761 dev_err(&dev->dev, "AFU %i failed to initialise: %i\n", slice, rc); in cxl_probe()
1765 rc = cxl_afu_select_best_mode(adapter->afu[slice]); in cxl_probe()
1767 dev_err(&dev->dev, "AFU %i failed to start: %i\n", slice, rc); in cxl_probe()
1783 for (i = 0; i < adapter->slices; i++) { in cxl_remove()
1784 afu = adapter->afu[i]; in cxl_remove()
1802 if (afu == NULL || afu->phb == NULL) in cxl_vphb_error_detected()
1805 list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) { in cxl_vphb_error_detected()
1806 afu_drv = to_pci_driver(afu_dev->dev.driver); in cxl_vphb_error_detected()
1810 afu_dev->error_state = state; in cxl_vphb_error_detected()
1812 err_handler = afu_drv->err_handler; in cxl_vphb_error_detected()
1814 afu_result = err_handler->error_detected(afu_dev, in cxl_vphb_error_detected()
1843 spin_lock(&adapter->afu_list_lock); in cxl_pci_error_detected()
1844 for (i = 0; i < adapter->slices; i++) { in cxl_pci_error_detected()
1845 afu = adapter->afu[i]; in cxl_pci_error_detected()
1852 spin_unlock(&adapter->afu_list_lock); in cxl_pci_error_detected()
1859 * different, including a non-CAPI card. As such, by default in cxl_pci_error_detected()
1861 * the slot re-probed. (TODO: check EEH doesn't blindly rebind in cxl_pci_error_detected()
1866 * order to get back to a more reliable known-good state. in cxl_pci_error_detected()
1869 * trust that we'll come back the same - we could have a new in cxl_pci_error_detected()
1872 * back the same - for example a regular EEH event. in cxl_pci_error_detected()
1878 if (adapter->perst_loads_image && !adapter->perst_same_image) { in cxl_pci_error_detected()
1880 dev_info(&pdev->dev, "reflashing, so opting out of EEH!\n"); in cxl_pci_error_detected()
1889 * - We send the driver, if bound, an error_detected callback. in cxl_pci_error_detected()
1894 * - We detach all contexts associated with the AFU. This in cxl_pci_error_detected()
1900 * - We clean up our side: releasing and unmapping resources we hold in cxl_pci_error_detected()
1905 * - Any contexts you create in your kernel driver (except in cxl_pci_error_detected()
1910 * - We will take responsibility for re-initialising the in cxl_pci_error_detected()
1936 spin_lock(&adapter->afu_list_lock); in cxl_pci_error_detected()
1938 for (i = 0; i < adapter->slices; i++) { in cxl_pci_error_detected()
1939 afu = adapter->afu[i]; in cxl_pci_error_detected()
1946 cxl_ops->afu_deactivate_mode(afu, afu->current_mode); in cxl_pci_error_detected()
1956 spin_unlock(&adapter->afu_list_lock); in cxl_pci_error_detected()
1960 dev_warn(&adapter->dev, in cxl_pci_error_detected()
1961 "Couldn't take context lock with %d active-contexts\n", in cxl_pci_error_detected()
1962 atomic_read(&adapter->contexts_num)); in cxl_pci_error_detected()
1991 spin_lock(&adapter->afu_list_lock); in cxl_pci_slot_reset()
1992 for (i = 0; i < adapter->slices; i++) { in cxl_pci_slot_reset()
1993 afu = adapter->afu[i]; in cxl_pci_slot_reset()
2004 if (afu->phb == NULL) in cxl_pci_slot_reset()
2007 list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) { in cxl_pci_slot_reset()
2009 * TODO: make this less disruptive in cxl_pci_slot_reset()
2020 afu_dev->dev.archdata.cxl_ctx = ctx; in cxl_pci_slot_reset()
2022 if (cxl_ops->afu_check_and_enable(afu)) in cxl_pci_slot_reset()
2025 afu_dev->error_state = pci_channel_io_normal; in cxl_pci_slot_reset()
2033 afu_drv = to_pci_driver(afu_dev->dev.driver); in cxl_pci_slot_reset()
2037 err_handler = afu_drv->err_handler; in cxl_pci_slot_reset()
2038 if (err_handler && err_handler->slot_reset) in cxl_pci_slot_reset()
2039 afu_result = err_handler->slot_reset(afu_dev); in cxl_pci_slot_reset()
2046 spin_unlock(&adapter->afu_list_lock); in cxl_pci_slot_reset()
2050 spin_unlock(&adapter->afu_list_lock); in cxl_pci_slot_reset()
2057 dev_err(&pdev->dev, "EEH recovery failed. Asking to be disconnected.\n"); in cxl_pci_slot_reset()
2074 spin_lock(&adapter->afu_list_lock); in cxl_pci_resume()
2075 for (i = 0; i < adapter->slices; i++) { in cxl_pci_resume()
2076 afu = adapter->afu[i]; in cxl_pci_resume()
2078 if (afu == NULL || afu->phb == NULL) in cxl_pci_resume()
2081 list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) { in cxl_pci_resume()
2082 afu_drv = to_pci_driver(afu_dev->dev.driver); in cxl_pci_resume()
2086 err_handler = afu_drv->err_handler; in cxl_pci_resume()
2087 if (err_handler && err_handler->resume) in cxl_pci_resume()
2088 err_handler->resume(afu_dev); in cxl_pci_resume()
2091 spin_unlock(&adapter->afu_list_lock); in cxl_pci_resume()
2101 .name = "cxl-pci",