1 /* 2 * Copyright 2014 IBM Corp. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation; either version 7 * 2 of the License, or (at your option) any later version. 8 */ 9 10 #include <linux/pci_regs.h> 11 #include <linux/pci_ids.h> 12 #include <linux/device.h> 13 #include <linux/module.h> 14 #include <linux/kernel.h> 15 #include <linux/slab.h> 16 #include <linux/sort.h> 17 #include <linux/pci.h> 18 #include <linux/of.h> 19 #include <linux/delay.h> 20 #include <asm/opal.h> 21 #include <asm/msi_bitmap.h> 22 #include <asm/pci-bridge.h> /* for struct pci_controller */ 23 #include <asm/pnv-pci.h> 24 #include <asm/io.h> 25 26 #include "cxl.h" 27 28 29 #define CXL_PCI_VSEC_ID 0x1280 30 #define CXL_VSEC_MIN_SIZE 0x80 31 32 #define CXL_READ_VSEC_LENGTH(dev, vsec, dest) \ 33 { \ 34 pci_read_config_word(dev, vsec + 0x6, dest); \ 35 *dest >>= 4; \ 36 } 37 #define CXL_READ_VSEC_NAFUS(dev, vsec, dest) \ 38 pci_read_config_byte(dev, vsec + 0x8, dest) 39 40 #define CXL_READ_VSEC_STATUS(dev, vsec, dest) \ 41 pci_read_config_byte(dev, vsec + 0x9, dest) 42 #define CXL_STATUS_SECOND_PORT 0x80 43 #define CXL_STATUS_MSI_X_FULL 0x40 44 #define CXL_STATUS_MSI_X_SINGLE 0x20 45 #define CXL_STATUS_FLASH_RW 0x08 46 #define CXL_STATUS_FLASH_RO 0x04 47 #define CXL_STATUS_LOADABLE_AFU 0x02 48 #define CXL_STATUS_LOADABLE_PSL 0x01 49 /* If we see these features we won't try to use the card */ 50 #define CXL_UNSUPPORTED_FEATURES \ 51 (CXL_STATUS_MSI_X_FULL | CXL_STATUS_MSI_X_SINGLE) 52 53 #define CXL_READ_VSEC_MODE_CONTROL(dev, vsec, dest) \ 54 pci_read_config_byte(dev, vsec + 0xa, dest) 55 #define CXL_WRITE_VSEC_MODE_CONTROL(dev, vsec, val) \ 56 pci_write_config_byte(dev, vsec + 0xa, val) 57 #define CXL_VSEC_PROTOCOL_MASK 0xe0 58 #define CXL_VSEC_PROTOCOL_1024TB 0x80 59 #define CXL_VSEC_PROTOCOL_512TB 0x40 60 #define CXL_VSEC_PROTOCOL_256TB 0x20 /* Power 8 uses this */ 61 #define CXL_VSEC_PROTOCOL_ENABLE 0x01 62 63 #define CXL_READ_VSEC_PSL_REVISION(dev, vsec, dest) \ 64 pci_read_config_word(dev, vsec + 0xc, dest) 65 #define CXL_READ_VSEC_CAIA_MINOR(dev, vsec, dest) \ 66 pci_read_config_byte(dev, vsec + 0xe, dest) 67 #define CXL_READ_VSEC_CAIA_MAJOR(dev, vsec, dest) \ 68 pci_read_config_byte(dev, vsec + 0xf, dest) 69 #define CXL_READ_VSEC_BASE_IMAGE(dev, vsec, dest) \ 70 pci_read_config_word(dev, vsec + 0x10, dest) 71 72 #define CXL_READ_VSEC_IMAGE_STATE(dev, vsec, dest) \ 73 pci_read_config_byte(dev, vsec + 0x13, dest) 74 #define CXL_WRITE_VSEC_IMAGE_STATE(dev, vsec, val) \ 75 pci_write_config_byte(dev, vsec + 0x13, val) 76 #define CXL_VSEC_USER_IMAGE_LOADED 0x80 /* RO */ 77 #define CXL_VSEC_PERST_LOADS_IMAGE 0x20 /* RW */ 78 #define CXL_VSEC_PERST_SELECT_USER 0x10 /* RW */ 79 80 #define CXL_READ_VSEC_AFU_DESC_OFF(dev, vsec, dest) \ 81 pci_read_config_dword(dev, vsec + 0x20, dest) 82 #define CXL_READ_VSEC_AFU_DESC_SIZE(dev, vsec, dest) \ 83 pci_read_config_dword(dev, vsec + 0x24, dest) 84 #define CXL_READ_VSEC_PS_OFF(dev, vsec, dest) \ 85 pci_read_config_dword(dev, vsec + 0x28, dest) 86 #define CXL_READ_VSEC_PS_SIZE(dev, vsec, dest) \ 87 pci_read_config_dword(dev, vsec + 0x2c, dest) 88 89 90 /* This works a little different than the p1/p2 register accesses to make it 91 * easier to pull out individual fields */ 92 #define AFUD_READ(afu, off) in_be64(afu->afu_desc_mmio + off) 93 #define AFUD_READ_LE(afu, off) in_le64(afu->afu_desc_mmio + off) 94 #define EXTRACT_PPC_BIT(val, bit) (!!(val & PPC_BIT(bit))) 95 #define EXTRACT_PPC_BITS(val, bs, be) ((val & PPC_BITMASK(bs, be)) >> PPC_BITLSHIFT(be)) 96 97 #define AFUD_READ_INFO(afu) AFUD_READ(afu, 0x0) 98 #define AFUD_NUM_INTS_PER_PROC(val) EXTRACT_PPC_BITS(val, 0, 15) 99 #define AFUD_NUM_PROCS(val) EXTRACT_PPC_BITS(val, 16, 31) 100 #define AFUD_NUM_CRS(val) EXTRACT_PPC_BITS(val, 32, 47) 101 #define AFUD_MULTIMODE(val) EXTRACT_PPC_BIT(val, 48) 102 #define AFUD_PUSH_BLOCK_TRANSFER(val) EXTRACT_PPC_BIT(val, 55) 103 #define AFUD_DEDICATED_PROCESS(val) EXTRACT_PPC_BIT(val, 59) 104 #define AFUD_AFU_DIRECTED(val) EXTRACT_PPC_BIT(val, 61) 105 #define AFUD_TIME_SLICED(val) EXTRACT_PPC_BIT(val, 63) 106 #define AFUD_READ_CR(afu) AFUD_READ(afu, 0x20) 107 #define AFUD_CR_LEN(val) EXTRACT_PPC_BITS(val, 8, 63) 108 #define AFUD_READ_CR_OFF(afu) AFUD_READ(afu, 0x28) 109 #define AFUD_READ_PPPSA(afu) AFUD_READ(afu, 0x30) 110 #define AFUD_PPPSA_PP(val) EXTRACT_PPC_BIT(val, 6) 111 #define AFUD_PPPSA_PSA(val) EXTRACT_PPC_BIT(val, 7) 112 #define AFUD_PPPSA_LEN(val) EXTRACT_PPC_BITS(val, 8, 63) 113 #define AFUD_READ_PPPSA_OFF(afu) AFUD_READ(afu, 0x38) 114 #define AFUD_READ_EB(afu) AFUD_READ(afu, 0x40) 115 #define AFUD_EB_LEN(val) EXTRACT_PPC_BITS(val, 8, 63) 116 #define AFUD_READ_EB_OFF(afu) AFUD_READ(afu, 0x48) 117 118 u16 cxl_afu_cr_read16(struct cxl_afu *afu, int cr, u64 off) 119 { 120 u64 aligned_off = off & ~0x3L; 121 u32 val; 122 123 val = cxl_afu_cr_read32(afu, cr, aligned_off); 124 return (val >> ((off & 0x2) * 8)) & 0xffff; 125 } 126 127 u8 cxl_afu_cr_read8(struct cxl_afu *afu, int cr, u64 off) 128 { 129 u64 aligned_off = off & ~0x3L; 130 u32 val; 131 132 val = cxl_afu_cr_read32(afu, cr, aligned_off); 133 return (val >> ((off & 0x3) * 8)) & 0xff; 134 } 135 136 static DEFINE_PCI_DEVICE_TABLE(cxl_pci_tbl) = { 137 { PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x0477), }, 138 { PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x044b), }, 139 { PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x04cf), }, 140 { PCI_DEVICE_CLASS(0x120000, ~0), }, 141 142 { } 143 }; 144 MODULE_DEVICE_TABLE(pci, cxl_pci_tbl); 145 146 147 /* 148 * Mostly using these wrappers to avoid confusion: 149 * priv 1 is BAR2, while priv 2 is BAR0 150 */ 151 static inline resource_size_t p1_base(struct pci_dev *dev) 152 { 153 return pci_resource_start(dev, 2); 154 } 155 156 static inline resource_size_t p1_size(struct pci_dev *dev) 157 { 158 return pci_resource_len(dev, 2); 159 } 160 161 static inline resource_size_t p2_base(struct pci_dev *dev) 162 { 163 return pci_resource_start(dev, 0); 164 } 165 166 static inline resource_size_t p2_size(struct pci_dev *dev) 167 { 168 return pci_resource_len(dev, 0); 169 } 170 171 static int find_cxl_vsec(struct pci_dev *dev) 172 { 173 int vsec = 0; 174 u16 val; 175 176 while ((vsec = pci_find_next_ext_capability(dev, vsec, PCI_EXT_CAP_ID_VNDR))) { 177 pci_read_config_word(dev, vsec + 0x4, &val); 178 if (val == CXL_PCI_VSEC_ID) 179 return vsec; 180 } 181 return 0; 182 183 } 184 185 static void dump_cxl_config_space(struct pci_dev *dev) 186 { 187 int vsec; 188 u32 val; 189 190 dev_info(&dev->dev, "dump_cxl_config_space\n"); 191 192 pci_read_config_dword(dev, PCI_BASE_ADDRESS_0, &val); 193 dev_info(&dev->dev, "BAR0: %#.8x\n", val); 194 pci_read_config_dword(dev, PCI_BASE_ADDRESS_1, &val); 195 dev_info(&dev->dev, "BAR1: %#.8x\n", val); 196 pci_read_config_dword(dev, PCI_BASE_ADDRESS_2, &val); 197 dev_info(&dev->dev, "BAR2: %#.8x\n", val); 198 pci_read_config_dword(dev, PCI_BASE_ADDRESS_3, &val); 199 dev_info(&dev->dev, "BAR3: %#.8x\n", val); 200 pci_read_config_dword(dev, PCI_BASE_ADDRESS_4, &val); 201 dev_info(&dev->dev, "BAR4: %#.8x\n", val); 202 pci_read_config_dword(dev, PCI_BASE_ADDRESS_5, &val); 203 dev_info(&dev->dev, "BAR5: %#.8x\n", val); 204 205 dev_info(&dev->dev, "p1 regs: %#llx, len: %#llx\n", 206 p1_base(dev), p1_size(dev)); 207 dev_info(&dev->dev, "p2 regs: %#llx, len: %#llx\n", 208 p2_base(dev), p2_size(dev)); 209 dev_info(&dev->dev, "BAR 4/5: %#llx, len: %#llx\n", 210 pci_resource_start(dev, 4), pci_resource_len(dev, 4)); 211 212 if (!(vsec = find_cxl_vsec(dev))) 213 return; 214 215 #define show_reg(name, what) \ 216 dev_info(&dev->dev, "cxl vsec: %30s: %#x\n", name, what) 217 218 pci_read_config_dword(dev, vsec + 0x0, &val); 219 show_reg("Cap ID", (val >> 0) & 0xffff); 220 show_reg("Cap Ver", (val >> 16) & 0xf); 221 show_reg("Next Cap Ptr", (val >> 20) & 0xfff); 222 pci_read_config_dword(dev, vsec + 0x4, &val); 223 show_reg("VSEC ID", (val >> 0) & 0xffff); 224 show_reg("VSEC Rev", (val >> 16) & 0xf); 225 show_reg("VSEC Length", (val >> 20) & 0xfff); 226 pci_read_config_dword(dev, vsec + 0x8, &val); 227 show_reg("Num AFUs", (val >> 0) & 0xff); 228 show_reg("Status", (val >> 8) & 0xff); 229 show_reg("Mode Control", (val >> 16) & 0xff); 230 show_reg("Reserved", (val >> 24) & 0xff); 231 pci_read_config_dword(dev, vsec + 0xc, &val); 232 show_reg("PSL Rev", (val >> 0) & 0xffff); 233 show_reg("CAIA Ver", (val >> 16) & 0xffff); 234 pci_read_config_dword(dev, vsec + 0x10, &val); 235 show_reg("Base Image Rev", (val >> 0) & 0xffff); 236 show_reg("Reserved", (val >> 16) & 0x0fff); 237 show_reg("Image Control", (val >> 28) & 0x3); 238 show_reg("Reserved", (val >> 30) & 0x1); 239 show_reg("Image Loaded", (val >> 31) & 0x1); 240 241 pci_read_config_dword(dev, vsec + 0x14, &val); 242 show_reg("Reserved", val); 243 pci_read_config_dword(dev, vsec + 0x18, &val); 244 show_reg("Reserved", val); 245 pci_read_config_dword(dev, vsec + 0x1c, &val); 246 show_reg("Reserved", val); 247 248 pci_read_config_dword(dev, vsec + 0x20, &val); 249 show_reg("AFU Descriptor Offset", val); 250 pci_read_config_dword(dev, vsec + 0x24, &val); 251 show_reg("AFU Descriptor Size", val); 252 pci_read_config_dword(dev, vsec + 0x28, &val); 253 show_reg("Problem State Offset", val); 254 pci_read_config_dword(dev, vsec + 0x2c, &val); 255 show_reg("Problem State Size", val); 256 257 pci_read_config_dword(dev, vsec + 0x30, &val); 258 show_reg("Reserved", val); 259 pci_read_config_dword(dev, vsec + 0x34, &val); 260 show_reg("Reserved", val); 261 pci_read_config_dword(dev, vsec + 0x38, &val); 262 show_reg("Reserved", val); 263 pci_read_config_dword(dev, vsec + 0x3c, &val); 264 show_reg("Reserved", val); 265 266 pci_read_config_dword(dev, vsec + 0x40, &val); 267 show_reg("PSL Programming Port", val); 268 pci_read_config_dword(dev, vsec + 0x44, &val); 269 show_reg("PSL Programming Control", val); 270 271 pci_read_config_dword(dev, vsec + 0x48, &val); 272 show_reg("Reserved", val); 273 pci_read_config_dword(dev, vsec + 0x4c, &val); 274 show_reg("Reserved", val); 275 276 pci_read_config_dword(dev, vsec + 0x50, &val); 277 show_reg("Flash Address Register", val); 278 pci_read_config_dword(dev, vsec + 0x54, &val); 279 show_reg("Flash Size Register", val); 280 pci_read_config_dword(dev, vsec + 0x58, &val); 281 show_reg("Flash Status/Control Register", val); 282 pci_read_config_dword(dev, vsec + 0x58, &val); 283 show_reg("Flash Data Port", val); 284 285 #undef show_reg 286 } 287 288 static void dump_afu_descriptor(struct cxl_afu *afu) 289 { 290 u64 val, afu_cr_num, afu_cr_off, afu_cr_len; 291 int i; 292 293 #define show_reg(name, what) \ 294 dev_info(&afu->dev, "afu desc: %30s: %#llx\n", name, what) 295 296 val = AFUD_READ_INFO(afu); 297 show_reg("num_ints_per_process", AFUD_NUM_INTS_PER_PROC(val)); 298 show_reg("num_of_processes", AFUD_NUM_PROCS(val)); 299 show_reg("num_of_afu_CRs", AFUD_NUM_CRS(val)); 300 show_reg("req_prog_mode", val & 0xffffULL); 301 afu_cr_num = AFUD_NUM_CRS(val); 302 303 val = AFUD_READ(afu, 0x8); 304 show_reg("Reserved", val); 305 val = AFUD_READ(afu, 0x10); 306 show_reg("Reserved", val); 307 val = AFUD_READ(afu, 0x18); 308 show_reg("Reserved", val); 309 310 val = AFUD_READ_CR(afu); 311 show_reg("Reserved", (val >> (63-7)) & 0xff); 312 show_reg("AFU_CR_len", AFUD_CR_LEN(val)); 313 afu_cr_len = AFUD_CR_LEN(val) * 256; 314 315 val = AFUD_READ_CR_OFF(afu); 316 afu_cr_off = val; 317 show_reg("AFU_CR_offset", val); 318 319 val = AFUD_READ_PPPSA(afu); 320 show_reg("PerProcessPSA_control", (val >> (63-7)) & 0xff); 321 show_reg("PerProcessPSA Length", AFUD_PPPSA_LEN(val)); 322 323 val = AFUD_READ_PPPSA_OFF(afu); 324 show_reg("PerProcessPSA_offset", val); 325 326 val = AFUD_READ_EB(afu); 327 show_reg("Reserved", (val >> (63-7)) & 0xff); 328 show_reg("AFU_EB_len", AFUD_EB_LEN(val)); 329 330 val = AFUD_READ_EB_OFF(afu); 331 show_reg("AFU_EB_offset", val); 332 333 for (i = 0; i < afu_cr_num; i++) { 334 val = AFUD_READ_LE(afu, afu_cr_off + i * afu_cr_len); 335 show_reg("CR Vendor", val & 0xffff); 336 show_reg("CR Device", (val >> 16) & 0xffff); 337 } 338 #undef show_reg 339 } 340 341 static int init_implementation_adapter_regs(struct cxl *adapter, struct pci_dev *dev) 342 { 343 struct device_node *np; 344 const __be32 *prop; 345 u64 psl_dsnctl; 346 u64 chipid; 347 348 if (!(np = pnv_pci_get_phb_node(dev))) 349 return -ENODEV; 350 351 while (np && !(prop = of_get_property(np, "ibm,chip-id", NULL))) 352 np = of_get_next_parent(np); 353 if (!np) 354 return -ENODEV; 355 chipid = be32_to_cpup(prop); 356 of_node_put(np); 357 358 /* Tell PSL where to route data to */ 359 psl_dsnctl = 0x02E8900002000000ULL | (chipid << (63-5)); 360 cxl_p1_write(adapter, CXL_PSL_DSNDCTL, psl_dsnctl); 361 cxl_p1_write(adapter, CXL_PSL_RESLCKTO, 0x20000000200ULL); 362 /* snoop write mask */ 363 cxl_p1_write(adapter, CXL_PSL_SNWRALLOC, 0x00000000FFFFFFFFULL); 364 /* set fir_accum */ 365 cxl_p1_write(adapter, CXL_PSL_FIR_CNTL, 0x0800000000000000ULL); 366 /* for debugging with trace arrays */ 367 cxl_p1_write(adapter, CXL_PSL_TRACE, 0x0000FF7C00000000ULL); 368 369 return 0; 370 } 371 372 static int init_implementation_afu_regs(struct cxl_afu *afu) 373 { 374 /* read/write masks for this slice */ 375 cxl_p1n_write(afu, CXL_PSL_APCALLOC_A, 0xFFFFFFFEFEFEFEFEULL); 376 /* APC read/write masks for this slice */ 377 cxl_p1n_write(afu, CXL_PSL_COALLOC_A, 0xFF000000FEFEFEFEULL); 378 /* for debugging with trace arrays */ 379 cxl_p1n_write(afu, CXL_PSL_SLICE_TRACE, 0x0000FFFF00000000ULL); 380 cxl_p1n_write(afu, CXL_PSL_RXCTL_A, CXL_PSL_RXCTL_AFUHP_4S); 381 382 return 0; 383 } 384 385 int cxl_setup_irq(struct cxl *adapter, unsigned int hwirq, 386 unsigned int virq) 387 { 388 struct pci_dev *dev = to_pci_dev(adapter->dev.parent); 389 390 return pnv_cxl_ioda_msi_setup(dev, hwirq, virq); 391 } 392 393 int cxl_update_image_control(struct cxl *adapter) 394 { 395 struct pci_dev *dev = to_pci_dev(adapter->dev.parent); 396 int rc; 397 int vsec; 398 u8 image_state; 399 400 if (!(vsec = find_cxl_vsec(dev))) { 401 dev_err(&dev->dev, "ABORTING: CXL VSEC not found!\n"); 402 return -ENODEV; 403 } 404 405 if ((rc = CXL_READ_VSEC_IMAGE_STATE(dev, vsec, &image_state))) { 406 dev_err(&dev->dev, "failed to read image state: %i\n", rc); 407 return rc; 408 } 409 410 if (adapter->perst_loads_image) 411 image_state |= CXL_VSEC_PERST_LOADS_IMAGE; 412 else 413 image_state &= ~CXL_VSEC_PERST_LOADS_IMAGE; 414 415 if (adapter->perst_select_user) 416 image_state |= CXL_VSEC_PERST_SELECT_USER; 417 else 418 image_state &= ~CXL_VSEC_PERST_SELECT_USER; 419 420 if ((rc = CXL_WRITE_VSEC_IMAGE_STATE(dev, vsec, image_state))) { 421 dev_err(&dev->dev, "failed to update image control: %i\n", rc); 422 return rc; 423 } 424 425 return 0; 426 } 427 428 int cxl_alloc_one_irq(struct cxl *adapter) 429 { 430 struct pci_dev *dev = to_pci_dev(adapter->dev.parent); 431 432 return pnv_cxl_alloc_hwirqs(dev, 1); 433 } 434 435 void cxl_release_one_irq(struct cxl *adapter, int hwirq) 436 { 437 struct pci_dev *dev = to_pci_dev(adapter->dev.parent); 438 439 return pnv_cxl_release_hwirqs(dev, hwirq, 1); 440 } 441 442 int cxl_alloc_irq_ranges(struct cxl_irq_ranges *irqs, struct cxl *adapter, unsigned int num) 443 { 444 struct pci_dev *dev = to_pci_dev(adapter->dev.parent); 445 446 return pnv_cxl_alloc_hwirq_ranges(irqs, dev, num); 447 } 448 449 void cxl_release_irq_ranges(struct cxl_irq_ranges *irqs, struct cxl *adapter) 450 { 451 struct pci_dev *dev = to_pci_dev(adapter->dev.parent); 452 453 pnv_cxl_release_hwirq_ranges(irqs, dev); 454 } 455 456 static int setup_cxl_bars(struct pci_dev *dev) 457 { 458 /* Safety check in case we get backported to < 3.17 without M64 */ 459 if ((p1_base(dev) < 0x100000000ULL) || 460 (p2_base(dev) < 0x100000000ULL)) { 461 dev_err(&dev->dev, "ABORTING: M32 BAR assignment incompatible with CXL\n"); 462 return -ENODEV; 463 } 464 465 /* 466 * BAR 4/5 has a special meaning for CXL and must be programmed with a 467 * special value corresponding to the CXL protocol address range. 468 * For POWER 8 that means bits 48:49 must be set to 10 469 */ 470 pci_write_config_dword(dev, PCI_BASE_ADDRESS_4, 0x00000000); 471 pci_write_config_dword(dev, PCI_BASE_ADDRESS_5, 0x00020000); 472 473 return 0; 474 } 475 476 /* pciex node: ibm,opal-m64-window = <0x3d058 0x0 0x3d058 0x0 0x8 0x0>; */ 477 static int switch_card_to_cxl(struct pci_dev *dev) 478 { 479 int vsec; 480 u8 val; 481 int rc; 482 483 dev_info(&dev->dev, "switch card to CXL\n"); 484 485 if (!(vsec = find_cxl_vsec(dev))) { 486 dev_err(&dev->dev, "ABORTING: CXL VSEC not found!\n"); 487 return -ENODEV; 488 } 489 490 if ((rc = CXL_READ_VSEC_MODE_CONTROL(dev, vsec, &val))) { 491 dev_err(&dev->dev, "failed to read current mode control: %i", rc); 492 return rc; 493 } 494 val &= ~CXL_VSEC_PROTOCOL_MASK; 495 val |= CXL_VSEC_PROTOCOL_256TB | CXL_VSEC_PROTOCOL_ENABLE; 496 if ((rc = CXL_WRITE_VSEC_MODE_CONTROL(dev, vsec, val))) { 497 dev_err(&dev->dev, "failed to enable CXL protocol: %i", rc); 498 return rc; 499 } 500 /* 501 * The CAIA spec (v0.12 11.6 Bi-modal Device Support) states 502 * we must wait 100ms after this mode switch before touching 503 * PCIe config space. 504 */ 505 msleep(100); 506 507 return 0; 508 } 509 510 static int cxl_map_slice_regs(struct cxl_afu *afu, struct cxl *adapter, struct pci_dev *dev) 511 { 512 u64 p1n_base, p2n_base, afu_desc; 513 const u64 p1n_size = 0x100; 514 const u64 p2n_size = 0x1000; 515 516 p1n_base = p1_base(dev) + 0x10000 + (afu->slice * p1n_size); 517 p2n_base = p2_base(dev) + (afu->slice * p2n_size); 518 afu->psn_phys = p2_base(dev) + (adapter->ps_off + (afu->slice * adapter->ps_size)); 519 afu_desc = p2_base(dev) + adapter->afu_desc_off + (afu->slice * adapter->afu_desc_size); 520 521 if (!(afu->p1n_mmio = ioremap(p1n_base, p1n_size))) 522 goto err; 523 if (!(afu->p2n_mmio = ioremap(p2n_base, p2n_size))) 524 goto err1; 525 if (afu_desc) { 526 if (!(afu->afu_desc_mmio = ioremap(afu_desc, adapter->afu_desc_size))) 527 goto err2; 528 } 529 530 return 0; 531 err2: 532 iounmap(afu->p2n_mmio); 533 err1: 534 iounmap(afu->p1n_mmio); 535 err: 536 dev_err(&afu->dev, "Error mapping AFU MMIO regions\n"); 537 return -ENOMEM; 538 } 539 540 static void cxl_unmap_slice_regs(struct cxl_afu *afu) 541 { 542 if (afu->p2n_mmio) 543 iounmap(afu->p2n_mmio); 544 if (afu->p1n_mmio) 545 iounmap(afu->p1n_mmio); 546 } 547 548 static void cxl_release_afu(struct device *dev) 549 { 550 struct cxl_afu *afu = to_cxl_afu(dev); 551 552 pr_devel("cxl_release_afu\n"); 553 554 kfree(afu); 555 } 556 557 static struct cxl_afu *cxl_alloc_afu(struct cxl *adapter, int slice) 558 { 559 struct cxl_afu *afu; 560 561 if (!(afu = kzalloc(sizeof(struct cxl_afu), GFP_KERNEL))) 562 return NULL; 563 564 afu->adapter = adapter; 565 afu->dev.parent = &adapter->dev; 566 afu->dev.release = cxl_release_afu; 567 afu->slice = slice; 568 idr_init(&afu->contexts_idr); 569 mutex_init(&afu->contexts_lock); 570 spin_lock_init(&afu->afu_cntl_lock); 571 mutex_init(&afu->spa_mutex); 572 573 afu->prefault_mode = CXL_PREFAULT_NONE; 574 afu->irqs_max = afu->adapter->user_irqs; 575 576 return afu; 577 } 578 579 /* Expects AFU struct to have recently been zeroed out */ 580 static int cxl_read_afu_descriptor(struct cxl_afu *afu) 581 { 582 u64 val; 583 584 val = AFUD_READ_INFO(afu); 585 afu->pp_irqs = AFUD_NUM_INTS_PER_PROC(val); 586 afu->max_procs_virtualised = AFUD_NUM_PROCS(val); 587 afu->crs_num = AFUD_NUM_CRS(val); 588 589 if (AFUD_AFU_DIRECTED(val)) 590 afu->modes_supported |= CXL_MODE_DIRECTED; 591 if (AFUD_DEDICATED_PROCESS(val)) 592 afu->modes_supported |= CXL_MODE_DEDICATED; 593 if (AFUD_TIME_SLICED(val)) 594 afu->modes_supported |= CXL_MODE_TIME_SLICED; 595 596 val = AFUD_READ_PPPSA(afu); 597 afu->pp_size = AFUD_PPPSA_LEN(val) * 4096; 598 afu->psa = AFUD_PPPSA_PSA(val); 599 if ((afu->pp_psa = AFUD_PPPSA_PP(val))) 600 afu->pp_offset = AFUD_READ_PPPSA_OFF(afu); 601 602 val = AFUD_READ_CR(afu); 603 afu->crs_len = AFUD_CR_LEN(val) * 256; 604 afu->crs_offset = AFUD_READ_CR_OFF(afu); 605 606 607 /* eb_len is in multiple of 4K */ 608 afu->eb_len = AFUD_EB_LEN(AFUD_READ_EB(afu)) * 4096; 609 afu->eb_offset = AFUD_READ_EB_OFF(afu); 610 611 /* eb_off is 4K aligned so lower 12 bits are always zero */ 612 if (EXTRACT_PPC_BITS(afu->eb_offset, 0, 11) != 0) { 613 dev_warn(&afu->dev, 614 "Invalid AFU error buffer offset %Lx\n", 615 afu->eb_offset); 616 dev_info(&afu->dev, 617 "Ignoring AFU error buffer in the descriptor\n"); 618 /* indicate that no afu buffer exists */ 619 afu->eb_len = 0; 620 } 621 622 return 0; 623 } 624 625 static int cxl_afu_descriptor_looks_ok(struct cxl_afu *afu) 626 { 627 int i; 628 629 if (afu->psa && afu->adapter->ps_size < 630 (afu->pp_offset + afu->pp_size*afu->max_procs_virtualised)) { 631 dev_err(&afu->dev, "per-process PSA can't fit inside the PSA!\n"); 632 return -ENODEV; 633 } 634 635 if (afu->pp_psa && (afu->pp_size < PAGE_SIZE)) 636 dev_warn(&afu->dev, "AFU uses < PAGE_SIZE per-process PSA!"); 637 638 for (i = 0; i < afu->crs_num; i++) { 639 if ((cxl_afu_cr_read32(afu, i, 0) == 0)) { 640 dev_err(&afu->dev, "ABORTING: AFU configuration record %i is invalid\n", i); 641 return -EINVAL; 642 } 643 } 644 645 return 0; 646 } 647 648 static int sanitise_afu_regs(struct cxl_afu *afu) 649 { 650 u64 reg; 651 652 /* 653 * Clear out any regs that contain either an IVTE or address or may be 654 * waiting on an acknowledgement to try to be a bit safer as we bring 655 * it online 656 */ 657 reg = cxl_p2n_read(afu, CXL_AFU_Cntl_An); 658 if ((reg & CXL_AFU_Cntl_An_ES_MASK) != CXL_AFU_Cntl_An_ES_Disabled) { 659 dev_warn(&afu->dev, "WARNING: AFU was not disabled: %#.16llx\n", reg); 660 if (__cxl_afu_reset(afu)) 661 return -EIO; 662 if (cxl_afu_disable(afu)) 663 return -EIO; 664 if (cxl_psl_purge(afu)) 665 return -EIO; 666 } 667 cxl_p1n_write(afu, CXL_PSL_SPAP_An, 0x0000000000000000); 668 cxl_p1n_write(afu, CXL_PSL_IVTE_Limit_An, 0x0000000000000000); 669 cxl_p1n_write(afu, CXL_PSL_IVTE_Offset_An, 0x0000000000000000); 670 cxl_p1n_write(afu, CXL_PSL_AMBAR_An, 0x0000000000000000); 671 cxl_p1n_write(afu, CXL_PSL_SPOffset_An, 0x0000000000000000); 672 cxl_p1n_write(afu, CXL_HAURP_An, 0x0000000000000000); 673 cxl_p2n_write(afu, CXL_CSRP_An, 0x0000000000000000); 674 cxl_p2n_write(afu, CXL_AURP1_An, 0x0000000000000000); 675 cxl_p2n_write(afu, CXL_AURP0_An, 0x0000000000000000); 676 cxl_p2n_write(afu, CXL_SSTP1_An, 0x0000000000000000); 677 cxl_p2n_write(afu, CXL_SSTP0_An, 0x0000000000000000); 678 reg = cxl_p2n_read(afu, CXL_PSL_DSISR_An); 679 if (reg) { 680 dev_warn(&afu->dev, "AFU had pending DSISR: %#.16llx\n", reg); 681 if (reg & CXL_PSL_DSISR_TRANS) 682 cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_AE); 683 else 684 cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_A); 685 } 686 reg = cxl_p1n_read(afu, CXL_PSL_SERR_An); 687 if (reg) { 688 if (reg & ~0xffff) 689 dev_warn(&afu->dev, "AFU had pending SERR: %#.16llx\n", reg); 690 cxl_p1n_write(afu, CXL_PSL_SERR_An, reg & ~0xffff); 691 } 692 reg = cxl_p2n_read(afu, CXL_PSL_ErrStat_An); 693 if (reg) { 694 dev_warn(&afu->dev, "AFU had pending error status: %#.16llx\n", reg); 695 cxl_p2n_write(afu, CXL_PSL_ErrStat_An, reg); 696 } 697 698 return 0; 699 } 700 701 #define ERR_BUFF_MAX_COPY_SIZE PAGE_SIZE 702 /* 703 * afu_eb_read: 704 * Called from sysfs and reads the afu error info buffer. The h/w only supports 705 * 4/8 bytes aligned access. So in case the requested offset/count arent 8 byte 706 * aligned the function uses a bounce buffer which can be max PAGE_SIZE. 707 */ 708 ssize_t cxl_afu_read_err_buffer(struct cxl_afu *afu, char *buf, 709 loff_t off, size_t count) 710 { 711 loff_t aligned_start, aligned_end; 712 size_t aligned_length; 713 void *tbuf; 714 const void __iomem *ebuf = afu->afu_desc_mmio + afu->eb_offset; 715 716 if (count == 0 || off < 0 || (size_t)off >= afu->eb_len) 717 return 0; 718 719 /* calculate aligned read window */ 720 count = min((size_t)(afu->eb_len - off), count); 721 aligned_start = round_down(off, 8); 722 aligned_end = round_up(off + count, 8); 723 aligned_length = aligned_end - aligned_start; 724 725 /* max we can copy in one read is PAGE_SIZE */ 726 if (aligned_length > ERR_BUFF_MAX_COPY_SIZE) { 727 aligned_length = ERR_BUFF_MAX_COPY_SIZE; 728 count = ERR_BUFF_MAX_COPY_SIZE - (off & 0x7); 729 } 730 731 /* use bounce buffer for copy */ 732 tbuf = (void *)__get_free_page(GFP_TEMPORARY); 733 if (!tbuf) 734 return -ENOMEM; 735 736 /* perform aligned read from the mmio region */ 737 memcpy_fromio(tbuf, ebuf + aligned_start, aligned_length); 738 memcpy(buf, tbuf + (off & 0x7), count); 739 740 free_page((unsigned long)tbuf); 741 742 return count; 743 } 744 745 static int cxl_init_afu(struct cxl *adapter, int slice, struct pci_dev *dev) 746 { 747 struct cxl_afu *afu; 748 bool free = true; 749 int rc; 750 751 if (!(afu = cxl_alloc_afu(adapter, slice))) 752 return -ENOMEM; 753 754 if ((rc = dev_set_name(&afu->dev, "afu%i.%i", adapter->adapter_num, slice))) 755 goto err1; 756 757 if ((rc = cxl_map_slice_regs(afu, adapter, dev))) 758 goto err1; 759 760 if ((rc = sanitise_afu_regs(afu))) 761 goto err2; 762 763 /* We need to reset the AFU before we can read the AFU descriptor */ 764 if ((rc = __cxl_afu_reset(afu))) 765 goto err2; 766 767 if (cxl_verbose) 768 dump_afu_descriptor(afu); 769 770 if ((rc = cxl_read_afu_descriptor(afu))) 771 goto err2; 772 773 if ((rc = cxl_afu_descriptor_looks_ok(afu))) 774 goto err2; 775 776 if ((rc = init_implementation_afu_regs(afu))) 777 goto err2; 778 779 if ((rc = cxl_register_serr_irq(afu))) 780 goto err2; 781 782 if ((rc = cxl_register_psl_irq(afu))) 783 goto err3; 784 785 /* Don't care if this fails */ 786 cxl_debugfs_afu_add(afu); 787 788 /* 789 * After we call this function we must not free the afu directly, even 790 * if it returns an error! 791 */ 792 if ((rc = cxl_register_afu(afu))) 793 goto err_put1; 794 795 if ((rc = cxl_sysfs_afu_add(afu))) 796 goto err_put1; 797 798 799 if ((rc = cxl_afu_select_best_mode(afu))) 800 goto err_put2; 801 802 adapter->afu[afu->slice] = afu; 803 804 if ((rc = cxl_pci_vphb_add(afu))) 805 dev_info(&afu->dev, "Can't register vPHB\n"); 806 807 return 0; 808 809 err_put2: 810 cxl_sysfs_afu_remove(afu); 811 err_put1: 812 device_unregister(&afu->dev); 813 free = false; 814 cxl_debugfs_afu_remove(afu); 815 cxl_release_psl_irq(afu); 816 err3: 817 cxl_release_serr_irq(afu); 818 err2: 819 cxl_unmap_slice_regs(afu); 820 err1: 821 if (free) 822 kfree(afu); 823 return rc; 824 } 825 826 static void cxl_remove_afu(struct cxl_afu *afu) 827 { 828 pr_devel("cxl_remove_afu\n"); 829 830 if (!afu) 831 return; 832 833 cxl_sysfs_afu_remove(afu); 834 cxl_debugfs_afu_remove(afu); 835 836 spin_lock(&afu->adapter->afu_list_lock); 837 afu->adapter->afu[afu->slice] = NULL; 838 spin_unlock(&afu->adapter->afu_list_lock); 839 840 cxl_context_detach_all(afu); 841 cxl_afu_deactivate_mode(afu); 842 843 cxl_release_psl_irq(afu); 844 cxl_release_serr_irq(afu); 845 cxl_unmap_slice_regs(afu); 846 847 device_unregister(&afu->dev); 848 } 849 850 int cxl_reset(struct cxl *adapter) 851 { 852 struct pci_dev *dev = to_pci_dev(adapter->dev.parent); 853 int rc; 854 int i; 855 u32 val; 856 857 dev_info(&dev->dev, "CXL reset\n"); 858 859 for (i = 0; i < adapter->slices; i++) { 860 cxl_pci_vphb_remove(adapter->afu[i]); 861 cxl_remove_afu(adapter->afu[i]); 862 } 863 864 /* pcie_warm_reset requests a fundamental pci reset which includes a 865 * PERST assert/deassert. PERST triggers a loading of the image 866 * if "user" or "factory" is selected in sysfs */ 867 if ((rc = pci_set_pcie_reset_state(dev, pcie_warm_reset))) { 868 dev_err(&dev->dev, "cxl: pcie_warm_reset failed\n"); 869 return rc; 870 } 871 872 /* the PERST done above fences the PHB. So, reset depends on EEH 873 * to unbind the driver, tell Sapphire to reinit the PHB, and rebind 874 * the driver. Do an mmio read explictly to ensure EEH notices the 875 * fenced PHB. Retry for a few seconds before giving up. */ 876 i = 0; 877 while (((val = mmio_read32be(adapter->p1_mmio)) != 0xffffffff) && 878 (i < 5)) { 879 msleep(500); 880 i++; 881 } 882 883 if (val != 0xffffffff) 884 dev_err(&dev->dev, "cxl: PERST failed to trigger EEH\n"); 885 886 return rc; 887 } 888 889 static int cxl_map_adapter_regs(struct cxl *adapter, struct pci_dev *dev) 890 { 891 if (pci_request_region(dev, 2, "priv 2 regs")) 892 goto err1; 893 if (pci_request_region(dev, 0, "priv 1 regs")) 894 goto err2; 895 896 pr_devel("cxl_map_adapter_regs: p1: %#.16llx %#llx, p2: %#.16llx %#llx", 897 p1_base(dev), p1_size(dev), p2_base(dev), p2_size(dev)); 898 899 if (!(adapter->p1_mmio = ioremap(p1_base(dev), p1_size(dev)))) 900 goto err3; 901 902 if (!(adapter->p2_mmio = ioremap(p2_base(dev), p2_size(dev)))) 903 goto err4; 904 905 return 0; 906 907 err4: 908 iounmap(adapter->p1_mmio); 909 adapter->p1_mmio = NULL; 910 err3: 911 pci_release_region(dev, 0); 912 err2: 913 pci_release_region(dev, 2); 914 err1: 915 return -ENOMEM; 916 } 917 918 static void cxl_unmap_adapter_regs(struct cxl *adapter) 919 { 920 if (adapter->p1_mmio) 921 iounmap(adapter->p1_mmio); 922 if (adapter->p2_mmio) 923 iounmap(adapter->p2_mmio); 924 } 925 926 static int cxl_read_vsec(struct cxl *adapter, struct pci_dev *dev) 927 { 928 int vsec; 929 u32 afu_desc_off, afu_desc_size; 930 u32 ps_off, ps_size; 931 u16 vseclen; 932 u8 image_state; 933 934 if (!(vsec = find_cxl_vsec(dev))) { 935 dev_err(&dev->dev, "ABORTING: CXL VSEC not found!\n"); 936 return -ENODEV; 937 } 938 939 CXL_READ_VSEC_LENGTH(dev, vsec, &vseclen); 940 if (vseclen < CXL_VSEC_MIN_SIZE) { 941 dev_err(&dev->dev, "ABORTING: CXL VSEC too short\n"); 942 return -EINVAL; 943 } 944 945 CXL_READ_VSEC_STATUS(dev, vsec, &adapter->vsec_status); 946 CXL_READ_VSEC_PSL_REVISION(dev, vsec, &adapter->psl_rev); 947 CXL_READ_VSEC_CAIA_MAJOR(dev, vsec, &adapter->caia_major); 948 CXL_READ_VSEC_CAIA_MINOR(dev, vsec, &adapter->caia_minor); 949 CXL_READ_VSEC_BASE_IMAGE(dev, vsec, &adapter->base_image); 950 CXL_READ_VSEC_IMAGE_STATE(dev, vsec, &image_state); 951 adapter->user_image_loaded = !!(image_state & CXL_VSEC_USER_IMAGE_LOADED); 952 adapter->perst_loads_image = true; 953 adapter->perst_select_user = !!(image_state & CXL_VSEC_USER_IMAGE_LOADED); 954 955 CXL_READ_VSEC_NAFUS(dev, vsec, &adapter->slices); 956 CXL_READ_VSEC_AFU_DESC_OFF(dev, vsec, &afu_desc_off); 957 CXL_READ_VSEC_AFU_DESC_SIZE(dev, vsec, &afu_desc_size); 958 CXL_READ_VSEC_PS_OFF(dev, vsec, &ps_off); 959 CXL_READ_VSEC_PS_SIZE(dev, vsec, &ps_size); 960 961 /* Convert everything to bytes, because there is NO WAY I'd look at the 962 * code a month later and forget what units these are in ;-) */ 963 adapter->ps_off = ps_off * 64 * 1024; 964 adapter->ps_size = ps_size * 64 * 1024; 965 adapter->afu_desc_off = afu_desc_off * 64 * 1024; 966 adapter->afu_desc_size = afu_desc_size *64 * 1024; 967 968 /* Total IRQs - 1 PSL ERROR - #AFU*(1 slice error + 1 DSI) */ 969 adapter->user_irqs = pnv_cxl_get_irq_count(dev) - 1 - 2*adapter->slices; 970 971 return 0; 972 } 973 974 static int cxl_vsec_looks_ok(struct cxl *adapter, struct pci_dev *dev) 975 { 976 if (adapter->vsec_status & CXL_STATUS_SECOND_PORT) 977 return -EBUSY; 978 979 if (adapter->vsec_status & CXL_UNSUPPORTED_FEATURES) { 980 dev_err(&dev->dev, "ABORTING: CXL requires unsupported features\n"); 981 return -EINVAL; 982 } 983 984 if (!adapter->slices) { 985 /* Once we support dynamic reprogramming we can use the card if 986 * it supports loadable AFUs */ 987 dev_err(&dev->dev, "ABORTING: Device has no AFUs\n"); 988 return -EINVAL; 989 } 990 991 if (!adapter->afu_desc_off || !adapter->afu_desc_size) { 992 dev_err(&dev->dev, "ABORTING: VSEC shows no AFU descriptors\n"); 993 return -EINVAL; 994 } 995 996 if (adapter->ps_size > p2_size(dev) - adapter->ps_off) { 997 dev_err(&dev->dev, "ABORTING: Problem state size larger than " 998 "available in BAR2: 0x%llx > 0x%llx\n", 999 adapter->ps_size, p2_size(dev) - adapter->ps_off); 1000 return -EINVAL; 1001 } 1002 1003 return 0; 1004 } 1005 1006 static void cxl_release_adapter(struct device *dev) 1007 { 1008 struct cxl *adapter = to_cxl_adapter(dev); 1009 1010 pr_devel("cxl_release_adapter\n"); 1011 1012 kfree(adapter); 1013 } 1014 1015 static struct cxl *cxl_alloc_adapter(struct pci_dev *dev) 1016 { 1017 struct cxl *adapter; 1018 1019 if (!(adapter = kzalloc(sizeof(struct cxl), GFP_KERNEL))) 1020 return NULL; 1021 1022 adapter->dev.parent = &dev->dev; 1023 adapter->dev.release = cxl_release_adapter; 1024 pci_set_drvdata(dev, adapter); 1025 spin_lock_init(&adapter->afu_list_lock); 1026 1027 return adapter; 1028 } 1029 1030 static int sanitise_adapter_regs(struct cxl *adapter) 1031 { 1032 cxl_p1_write(adapter, CXL_PSL_ErrIVTE, 0x0000000000000000); 1033 return cxl_tlb_slb_invalidate(adapter); 1034 } 1035 1036 static struct cxl *cxl_init_adapter(struct pci_dev *dev) 1037 { 1038 struct cxl *adapter; 1039 bool free = true; 1040 int rc; 1041 1042 1043 if (!(adapter = cxl_alloc_adapter(dev))) 1044 return ERR_PTR(-ENOMEM); 1045 1046 if ((rc = cxl_read_vsec(adapter, dev))) 1047 goto err1; 1048 1049 if ((rc = cxl_vsec_looks_ok(adapter, dev))) 1050 goto err1; 1051 1052 if ((rc = setup_cxl_bars(dev))) 1053 goto err1; 1054 1055 if ((rc = switch_card_to_cxl(dev))) 1056 goto err1; 1057 1058 if ((rc = cxl_alloc_adapter_nr(adapter))) 1059 goto err1; 1060 1061 if ((rc = dev_set_name(&adapter->dev, "card%i", adapter->adapter_num))) 1062 goto err2; 1063 1064 if ((rc = cxl_update_image_control(adapter))) 1065 goto err2; 1066 1067 if ((rc = cxl_map_adapter_regs(adapter, dev))) 1068 goto err2; 1069 1070 if ((rc = sanitise_adapter_regs(adapter))) 1071 goto err2; 1072 1073 if ((rc = init_implementation_adapter_regs(adapter, dev))) 1074 goto err3; 1075 1076 if ((rc = pnv_phb_to_cxl_mode(dev, OPAL_PHB_CAPI_MODE_CAPI))) 1077 goto err3; 1078 1079 /* If recovery happened, the last step is to turn on snooping. 1080 * In the non-recovery case this has no effect */ 1081 if ((rc = pnv_phb_to_cxl_mode(dev, OPAL_PHB_CAPI_MODE_SNOOP_ON))) { 1082 goto err3; 1083 } 1084 1085 if ((rc = cxl_register_psl_err_irq(adapter))) 1086 goto err3; 1087 1088 /* Don't care if this one fails: */ 1089 cxl_debugfs_adapter_add(adapter); 1090 1091 /* 1092 * After we call this function we must not free the adapter directly, 1093 * even if it returns an error! 1094 */ 1095 if ((rc = cxl_register_adapter(adapter))) 1096 goto err_put1; 1097 1098 if ((rc = cxl_sysfs_adapter_add(adapter))) 1099 goto err_put1; 1100 1101 return adapter; 1102 1103 err_put1: 1104 device_unregister(&adapter->dev); 1105 free = false; 1106 cxl_debugfs_adapter_remove(adapter); 1107 cxl_release_psl_err_irq(adapter); 1108 err3: 1109 cxl_unmap_adapter_regs(adapter); 1110 err2: 1111 cxl_remove_adapter_nr(adapter); 1112 err1: 1113 if (free) 1114 kfree(adapter); 1115 return ERR_PTR(rc); 1116 } 1117 1118 static void cxl_remove_adapter(struct cxl *adapter) 1119 { 1120 struct pci_dev *pdev = to_pci_dev(adapter->dev.parent); 1121 1122 pr_devel("cxl_release_adapter\n"); 1123 1124 cxl_sysfs_adapter_remove(adapter); 1125 cxl_debugfs_adapter_remove(adapter); 1126 cxl_release_psl_err_irq(adapter); 1127 cxl_unmap_adapter_regs(adapter); 1128 cxl_remove_adapter_nr(adapter); 1129 1130 device_unregister(&adapter->dev); 1131 1132 pci_release_region(pdev, 0); 1133 pci_release_region(pdev, 2); 1134 pci_disable_device(pdev); 1135 } 1136 1137 static int cxl_probe(struct pci_dev *dev, const struct pci_device_id *id) 1138 { 1139 struct cxl *adapter; 1140 int slice; 1141 int rc; 1142 1143 pci_dev_get(dev); 1144 1145 if (cxl_verbose) 1146 dump_cxl_config_space(dev); 1147 1148 if ((rc = pci_enable_device(dev))) { 1149 dev_err(&dev->dev, "pci_enable_device failed: %i\n", rc); 1150 return rc; 1151 } 1152 1153 adapter = cxl_init_adapter(dev); 1154 if (IS_ERR(adapter)) { 1155 dev_err(&dev->dev, "cxl_init_adapter failed: %li\n", PTR_ERR(adapter)); 1156 pci_disable_device(dev); 1157 return PTR_ERR(adapter); 1158 } 1159 1160 for (slice = 0; slice < adapter->slices; slice++) { 1161 if ((rc = cxl_init_afu(adapter, slice, dev))) 1162 dev_err(&dev->dev, "AFU %i failed to initialise: %i\n", slice, rc); 1163 } 1164 1165 return 0; 1166 } 1167 1168 static void cxl_remove(struct pci_dev *dev) 1169 { 1170 struct cxl *adapter = pci_get_drvdata(dev); 1171 struct cxl_afu *afu; 1172 int i; 1173 1174 /* 1175 * Lock to prevent someone grabbing a ref through the adapter list as 1176 * we are removing it 1177 */ 1178 for (i = 0; i < adapter->slices; i++) { 1179 afu = adapter->afu[i]; 1180 cxl_pci_vphb_remove(afu); 1181 cxl_remove_afu(afu); 1182 } 1183 cxl_remove_adapter(adapter); 1184 } 1185 1186 struct pci_driver cxl_pci_driver = { 1187 .name = "cxl-pci", 1188 .id_table = cxl_pci_tbl, 1189 .probe = cxl_probe, 1190 .remove = cxl_remove, 1191 .shutdown = cxl_remove, 1192 }; 1193