1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2014 Hauke Mehrtens <hauke@hauke-m.de> 4 * Copyright (C) 2015 Broadcom Corporation 5 */ 6 7 #include <linux/kernel.h> 8 #include <linux/pci.h> 9 #include <linux/pci-ecam.h> 10 #include <linux/msi.h> 11 #include <linux/clk.h> 12 #include <linux/module.h> 13 #include <linux/mbus.h> 14 #include <linux/slab.h> 15 #include <linux/delay.h> 16 #include <linux/interrupt.h> 17 #include <linux/irqchip/arm-gic-v3.h> 18 #include <linux/platform_device.h> 19 #include <linux/of_address.h> 20 #include <linux/of_pci.h> 21 #include <linux/of_irq.h> 22 #include <linux/of_platform.h> 23 #include <linux/phy/phy.h> 24 25 #include "pcie-iproc.h" 26 27 #define EP_PERST_SOURCE_SELECT_SHIFT 2 28 #define EP_PERST_SOURCE_SELECT BIT(EP_PERST_SOURCE_SELECT_SHIFT) 29 #define EP_MODE_SURVIVE_PERST_SHIFT 1 30 #define EP_MODE_SURVIVE_PERST BIT(EP_MODE_SURVIVE_PERST_SHIFT) 31 #define RC_PCIE_RST_OUTPUT_SHIFT 0 32 #define RC_PCIE_RST_OUTPUT BIT(RC_PCIE_RST_OUTPUT_SHIFT) 33 #define PAXC_RESET_MASK 0x7f 34 35 #define GIC_V3_CFG_SHIFT 0 36 #define GIC_V3_CFG BIT(GIC_V3_CFG_SHIFT) 37 38 #define MSI_ENABLE_CFG_SHIFT 0 39 #define MSI_ENABLE_CFG BIT(MSI_ENABLE_CFG_SHIFT) 40 41 #define CFG_IND_ADDR_MASK 0x00001ffc 42 43 #define CFG_ADDR_REG_NUM_MASK 0x00000ffc 44 #define CFG_ADDR_CFG_TYPE_1 1 45 46 #define SYS_RC_INTX_MASK 0xf 47 48 #define PCIE_PHYLINKUP_SHIFT 3 49 #define PCIE_PHYLINKUP BIT(PCIE_PHYLINKUP_SHIFT) 50 #define PCIE_DL_ACTIVE_SHIFT 2 51 #define PCIE_DL_ACTIVE BIT(PCIE_DL_ACTIVE_SHIFT) 52 53 #define APB_ERR_EN_SHIFT 0 54 #define APB_ERR_EN BIT(APB_ERR_EN_SHIFT) 55 56 #define CFG_RD_SUCCESS 0 57 #define CFG_RD_UR 1 58 #define CFG_RD_CRS 2 59 #define CFG_RD_CA 3 60 #define CFG_RETRY_STATUS 0xffff0001 61 #define CFG_RETRY_STATUS_TIMEOUT_US 500000 /* 500 milliseconds */ 62 63 /* derive the enum index of the outbound/inbound mapping registers */ 64 #define MAP_REG(base_reg, index) ((base_reg) + (index) * 2) 65 66 /* 67 * Maximum number of outbound mapping window sizes that can be supported by any 68 * OARR/OMAP mapping pair 69 */ 70 #define MAX_NUM_OB_WINDOW_SIZES 4 71 72 #define OARR_VALID_SHIFT 0 73 #define OARR_VALID BIT(OARR_VALID_SHIFT) 74 #define OARR_SIZE_CFG_SHIFT 1 75 76 /* 77 * Maximum number of inbound mapping region sizes that can be supported by an 78 * IARR 79 */ 80 #define MAX_NUM_IB_REGION_SIZES 9 81 82 #define IMAP_VALID_SHIFT 0 83 #define IMAP_VALID BIT(IMAP_VALID_SHIFT) 84 85 #define IPROC_PCI_PM_CAP 0x48 86 #define IPROC_PCI_PM_CAP_MASK 0xffff 87 #define IPROC_PCI_EXP_CAP 0xac 88 89 #define IPROC_PCIE_REG_INVALID 0xffff 90 91 /** 92 * iProc PCIe outbound mapping controller specific parameters 93 * 94 * @window_sizes: list of supported outbound mapping window sizes in MB 95 * @nr_sizes: number of supported outbound mapping window sizes 96 */ 97 struct iproc_pcie_ob_map { 98 resource_size_t window_sizes[MAX_NUM_OB_WINDOW_SIZES]; 99 unsigned int nr_sizes; 100 }; 101 102 static const struct iproc_pcie_ob_map paxb_ob_map[] = { 103 { 104 /* OARR0/OMAP0 */ 105 .window_sizes = { 128, 256 }, 106 .nr_sizes = 2, 107 }, 108 { 109 /* OARR1/OMAP1 */ 110 .window_sizes = { 128, 256 }, 111 .nr_sizes = 2, 112 }, 113 }; 114 115 static const struct iproc_pcie_ob_map paxb_v2_ob_map[] = { 116 { 117 /* OARR0/OMAP0 */ 118 .window_sizes = { 128, 256 }, 119 .nr_sizes = 2, 120 }, 121 { 122 /* OARR1/OMAP1 */ 123 .window_sizes = { 128, 256 }, 124 .nr_sizes = 2, 125 }, 126 { 127 /* OARR2/OMAP2 */ 128 .window_sizes = { 128, 256, 512, 1024 }, 129 .nr_sizes = 4, 130 }, 131 { 132 /* OARR3/OMAP3 */ 133 .window_sizes = { 128, 256, 512, 1024 }, 134 .nr_sizes = 4, 135 }, 136 }; 137 138 /** 139 * iProc PCIe inbound mapping type 140 */ 141 enum iproc_pcie_ib_map_type { 142 /* for DDR memory */ 143 IPROC_PCIE_IB_MAP_MEM = 0, 144 145 /* for device I/O memory */ 146 IPROC_PCIE_IB_MAP_IO, 147 148 /* invalid or unused */ 149 IPROC_PCIE_IB_MAP_INVALID 150 }; 151 152 /** 153 * iProc PCIe inbound mapping controller specific parameters 154 * 155 * @type: inbound mapping region type 156 * @size_unit: inbound mapping region size unit, could be SZ_1K, SZ_1M, or 157 * SZ_1G 158 * @region_sizes: list of supported inbound mapping region sizes in KB, MB, or 159 * GB, depending on the size unit 160 * @nr_sizes: number of supported inbound mapping region sizes 161 * @nr_windows: number of supported inbound mapping windows for the region 162 * @imap_addr_offset: register offset between the upper and lower 32-bit 163 * IMAP address registers 164 * @imap_window_offset: register offset between each IMAP window 165 */ 166 struct iproc_pcie_ib_map { 167 enum iproc_pcie_ib_map_type type; 168 unsigned int size_unit; 169 resource_size_t region_sizes[MAX_NUM_IB_REGION_SIZES]; 170 unsigned int nr_sizes; 171 unsigned int nr_windows; 172 u16 imap_addr_offset; 173 u16 imap_window_offset; 174 }; 175 176 static const struct iproc_pcie_ib_map paxb_v2_ib_map[] = { 177 { 178 /* IARR0/IMAP0 */ 179 .type = IPROC_PCIE_IB_MAP_IO, 180 .size_unit = SZ_1K, 181 .region_sizes = { 32 }, 182 .nr_sizes = 1, 183 .nr_windows = 8, 184 .imap_addr_offset = 0x40, 185 .imap_window_offset = 0x4, 186 }, 187 { 188 /* IARR1/IMAP1 */ 189 .type = IPROC_PCIE_IB_MAP_MEM, 190 .size_unit = SZ_1M, 191 .region_sizes = { 8 }, 192 .nr_sizes = 1, 193 .nr_windows = 8, 194 .imap_addr_offset = 0x4, 195 .imap_window_offset = 0x8, 196 197 }, 198 { 199 /* IARR2/IMAP2 */ 200 .type = IPROC_PCIE_IB_MAP_MEM, 201 .size_unit = SZ_1M, 202 .region_sizes = { 64, 128, 256, 512, 1024, 2048, 4096, 8192, 203 16384 }, 204 .nr_sizes = 9, 205 .nr_windows = 1, 206 .imap_addr_offset = 0x4, 207 .imap_window_offset = 0x8, 208 }, 209 { 210 /* IARR3/IMAP3 */ 211 .type = IPROC_PCIE_IB_MAP_MEM, 212 .size_unit = SZ_1G, 213 .region_sizes = { 1, 2, 4, 8, 16, 32 }, 214 .nr_sizes = 6, 215 .nr_windows = 8, 216 .imap_addr_offset = 0x4, 217 .imap_window_offset = 0x8, 218 }, 219 { 220 /* IARR4/IMAP4 */ 221 .type = IPROC_PCIE_IB_MAP_MEM, 222 .size_unit = SZ_1G, 223 .region_sizes = { 32, 64, 128, 256, 512 }, 224 .nr_sizes = 5, 225 .nr_windows = 8, 226 .imap_addr_offset = 0x4, 227 .imap_window_offset = 0x8, 228 }, 229 }; 230 231 /* 232 * iProc PCIe host registers 233 */ 234 enum iproc_pcie_reg { 235 /* clock/reset signal control */ 236 IPROC_PCIE_CLK_CTRL = 0, 237 238 /* 239 * To allow MSI to be steered to an external MSI controller (e.g., ARM 240 * GICv3 ITS) 241 */ 242 IPROC_PCIE_MSI_GIC_MODE, 243 244 /* 245 * IPROC_PCIE_MSI_BASE_ADDR and IPROC_PCIE_MSI_WINDOW_SIZE define the 246 * window where the MSI posted writes are written, for the writes to be 247 * interpreted as MSI writes. 248 */ 249 IPROC_PCIE_MSI_BASE_ADDR, 250 IPROC_PCIE_MSI_WINDOW_SIZE, 251 252 /* 253 * To hold the address of the register where the MSI writes are 254 * programed. When ARM GICv3 ITS is used, this should be programmed 255 * with the address of the GITS_TRANSLATER register. 256 */ 257 IPROC_PCIE_MSI_ADDR_LO, 258 IPROC_PCIE_MSI_ADDR_HI, 259 260 /* enable MSI */ 261 IPROC_PCIE_MSI_EN_CFG, 262 263 /* allow access to root complex configuration space */ 264 IPROC_PCIE_CFG_IND_ADDR, 265 IPROC_PCIE_CFG_IND_DATA, 266 267 /* allow access to device configuration space */ 268 IPROC_PCIE_CFG_ADDR, 269 IPROC_PCIE_CFG_DATA, 270 271 /* enable INTx */ 272 IPROC_PCIE_INTX_EN, 273 274 /* outbound address mapping */ 275 IPROC_PCIE_OARR0, 276 IPROC_PCIE_OMAP0, 277 IPROC_PCIE_OARR1, 278 IPROC_PCIE_OMAP1, 279 IPROC_PCIE_OARR2, 280 IPROC_PCIE_OMAP2, 281 IPROC_PCIE_OARR3, 282 IPROC_PCIE_OMAP3, 283 284 /* inbound address mapping */ 285 IPROC_PCIE_IARR0, 286 IPROC_PCIE_IMAP0, 287 IPROC_PCIE_IARR1, 288 IPROC_PCIE_IMAP1, 289 IPROC_PCIE_IARR2, 290 IPROC_PCIE_IMAP2, 291 IPROC_PCIE_IARR3, 292 IPROC_PCIE_IMAP3, 293 IPROC_PCIE_IARR4, 294 IPROC_PCIE_IMAP4, 295 296 /* config read status */ 297 IPROC_PCIE_CFG_RD_STATUS, 298 299 /* link status */ 300 IPROC_PCIE_LINK_STATUS, 301 302 /* enable APB error for unsupported requests */ 303 IPROC_PCIE_APB_ERR_EN, 304 305 /* total number of core registers */ 306 IPROC_PCIE_MAX_NUM_REG, 307 }; 308 309 /* iProc PCIe PAXB BCMA registers */ 310 static const u16 iproc_pcie_reg_paxb_bcma[IPROC_PCIE_MAX_NUM_REG] = { 311 [IPROC_PCIE_CLK_CTRL] = 0x000, 312 [IPROC_PCIE_CFG_IND_ADDR] = 0x120, 313 [IPROC_PCIE_CFG_IND_DATA] = 0x124, 314 [IPROC_PCIE_CFG_ADDR] = 0x1f8, 315 [IPROC_PCIE_CFG_DATA] = 0x1fc, 316 [IPROC_PCIE_INTX_EN] = 0x330, 317 [IPROC_PCIE_LINK_STATUS] = 0xf0c, 318 }; 319 320 /* iProc PCIe PAXB registers */ 321 static const u16 iproc_pcie_reg_paxb[IPROC_PCIE_MAX_NUM_REG] = { 322 [IPROC_PCIE_CLK_CTRL] = 0x000, 323 [IPROC_PCIE_CFG_IND_ADDR] = 0x120, 324 [IPROC_PCIE_CFG_IND_DATA] = 0x124, 325 [IPROC_PCIE_CFG_ADDR] = 0x1f8, 326 [IPROC_PCIE_CFG_DATA] = 0x1fc, 327 [IPROC_PCIE_INTX_EN] = 0x330, 328 [IPROC_PCIE_OARR0] = 0xd20, 329 [IPROC_PCIE_OMAP0] = 0xd40, 330 [IPROC_PCIE_OARR1] = 0xd28, 331 [IPROC_PCIE_OMAP1] = 0xd48, 332 [IPROC_PCIE_LINK_STATUS] = 0xf0c, 333 [IPROC_PCIE_APB_ERR_EN] = 0xf40, 334 }; 335 336 /* iProc PCIe PAXB v2 registers */ 337 static const u16 iproc_pcie_reg_paxb_v2[IPROC_PCIE_MAX_NUM_REG] = { 338 [IPROC_PCIE_CLK_CTRL] = 0x000, 339 [IPROC_PCIE_CFG_IND_ADDR] = 0x120, 340 [IPROC_PCIE_CFG_IND_DATA] = 0x124, 341 [IPROC_PCIE_CFG_ADDR] = 0x1f8, 342 [IPROC_PCIE_CFG_DATA] = 0x1fc, 343 [IPROC_PCIE_INTX_EN] = 0x330, 344 [IPROC_PCIE_OARR0] = 0xd20, 345 [IPROC_PCIE_OMAP0] = 0xd40, 346 [IPROC_PCIE_OARR1] = 0xd28, 347 [IPROC_PCIE_OMAP1] = 0xd48, 348 [IPROC_PCIE_OARR2] = 0xd60, 349 [IPROC_PCIE_OMAP2] = 0xd68, 350 [IPROC_PCIE_OARR3] = 0xdf0, 351 [IPROC_PCIE_OMAP3] = 0xdf8, 352 [IPROC_PCIE_IARR0] = 0xd00, 353 [IPROC_PCIE_IMAP0] = 0xc00, 354 [IPROC_PCIE_IARR1] = 0xd08, 355 [IPROC_PCIE_IMAP1] = 0xd70, 356 [IPROC_PCIE_IARR2] = 0xd10, 357 [IPROC_PCIE_IMAP2] = 0xcc0, 358 [IPROC_PCIE_IARR3] = 0xe00, 359 [IPROC_PCIE_IMAP3] = 0xe08, 360 [IPROC_PCIE_IARR4] = 0xe68, 361 [IPROC_PCIE_IMAP4] = 0xe70, 362 [IPROC_PCIE_CFG_RD_STATUS] = 0xee0, 363 [IPROC_PCIE_LINK_STATUS] = 0xf0c, 364 [IPROC_PCIE_APB_ERR_EN] = 0xf40, 365 }; 366 367 /* iProc PCIe PAXC v1 registers */ 368 static const u16 iproc_pcie_reg_paxc[IPROC_PCIE_MAX_NUM_REG] = { 369 [IPROC_PCIE_CLK_CTRL] = 0x000, 370 [IPROC_PCIE_CFG_IND_ADDR] = 0x1f0, 371 [IPROC_PCIE_CFG_IND_DATA] = 0x1f4, 372 [IPROC_PCIE_CFG_ADDR] = 0x1f8, 373 [IPROC_PCIE_CFG_DATA] = 0x1fc, 374 }; 375 376 /* iProc PCIe PAXC v2 registers */ 377 static const u16 iproc_pcie_reg_paxc_v2[IPROC_PCIE_MAX_NUM_REG] = { 378 [IPROC_PCIE_MSI_GIC_MODE] = 0x050, 379 [IPROC_PCIE_MSI_BASE_ADDR] = 0x074, 380 [IPROC_PCIE_MSI_WINDOW_SIZE] = 0x078, 381 [IPROC_PCIE_MSI_ADDR_LO] = 0x07c, 382 [IPROC_PCIE_MSI_ADDR_HI] = 0x080, 383 [IPROC_PCIE_MSI_EN_CFG] = 0x09c, 384 [IPROC_PCIE_CFG_IND_ADDR] = 0x1f0, 385 [IPROC_PCIE_CFG_IND_DATA] = 0x1f4, 386 [IPROC_PCIE_CFG_ADDR] = 0x1f8, 387 [IPROC_PCIE_CFG_DATA] = 0x1fc, 388 }; 389 390 /* 391 * List of device IDs of controllers that have corrupted capability list that 392 * require SW fixup 393 */ 394 static const u16 iproc_pcie_corrupt_cap_did[] = { 395 0x16cd, 396 0x16f0, 397 0xd802, 398 0xd804 399 }; 400 401 static inline struct iproc_pcie *iproc_data(struct pci_bus *bus) 402 { 403 struct iproc_pcie *pcie = bus->sysdata; 404 return pcie; 405 } 406 407 static inline bool iproc_pcie_reg_is_invalid(u16 reg_offset) 408 { 409 return !!(reg_offset == IPROC_PCIE_REG_INVALID); 410 } 411 412 static inline u16 iproc_pcie_reg_offset(struct iproc_pcie *pcie, 413 enum iproc_pcie_reg reg) 414 { 415 return pcie->reg_offsets[reg]; 416 } 417 418 static inline u32 iproc_pcie_read_reg(struct iproc_pcie *pcie, 419 enum iproc_pcie_reg reg) 420 { 421 u16 offset = iproc_pcie_reg_offset(pcie, reg); 422 423 if (iproc_pcie_reg_is_invalid(offset)) 424 return 0; 425 426 return readl(pcie->base + offset); 427 } 428 429 static inline void iproc_pcie_write_reg(struct iproc_pcie *pcie, 430 enum iproc_pcie_reg reg, u32 val) 431 { 432 u16 offset = iproc_pcie_reg_offset(pcie, reg); 433 434 if (iproc_pcie_reg_is_invalid(offset)) 435 return; 436 437 writel(val, pcie->base + offset); 438 } 439 440 /** 441 * APB error forwarding can be disabled during access of configuration 442 * registers of the endpoint device, to prevent unsupported requests 443 * (typically seen during enumeration with multi-function devices) from 444 * triggering a system exception. 445 */ 446 static inline void iproc_pcie_apb_err_disable(struct pci_bus *bus, 447 bool disable) 448 { 449 struct iproc_pcie *pcie = iproc_data(bus); 450 u32 val; 451 452 if (bus->number && pcie->has_apb_err_disable) { 453 val = iproc_pcie_read_reg(pcie, IPROC_PCIE_APB_ERR_EN); 454 if (disable) 455 val &= ~APB_ERR_EN; 456 else 457 val |= APB_ERR_EN; 458 iproc_pcie_write_reg(pcie, IPROC_PCIE_APB_ERR_EN, val); 459 } 460 } 461 462 static void __iomem *iproc_pcie_map_ep_cfg_reg(struct iproc_pcie *pcie, 463 unsigned int busno, 464 unsigned int devfn, 465 int where) 466 { 467 u16 offset; 468 u32 val; 469 470 /* EP device access */ 471 val = ALIGN_DOWN(PCIE_ECAM_OFFSET(busno, devfn, where), 4) | 472 CFG_ADDR_CFG_TYPE_1; 473 474 iproc_pcie_write_reg(pcie, IPROC_PCIE_CFG_ADDR, val); 475 offset = iproc_pcie_reg_offset(pcie, IPROC_PCIE_CFG_DATA); 476 477 if (iproc_pcie_reg_is_invalid(offset)) 478 return NULL; 479 480 return (pcie->base + offset); 481 } 482 483 static unsigned int iproc_pcie_cfg_retry(struct iproc_pcie *pcie, 484 void __iomem *cfg_data_p) 485 { 486 int timeout = CFG_RETRY_STATUS_TIMEOUT_US; 487 unsigned int data; 488 u32 status; 489 490 /* 491 * As per PCIe spec r3.1, sec 2.3.2, CRS Software Visibility only 492 * affects config reads of the Vendor ID. For config writes or any 493 * other config reads, the Root may automatically reissue the 494 * configuration request again as a new request. 495 * 496 * For config reads, this hardware returns CFG_RETRY_STATUS data 497 * when it receives a CRS completion, regardless of the address of 498 * the read or the CRS Software Visibility Enable bit. As a 499 * partial workaround for this, we retry in software any read that 500 * returns CFG_RETRY_STATUS. 501 * 502 * Note that a non-Vendor ID config register may have a value of 503 * CFG_RETRY_STATUS. If we read that, we can't distinguish it from 504 * a CRS completion, so we will incorrectly retry the read and 505 * eventually return the wrong data (0xffffffff). 506 */ 507 data = readl(cfg_data_p); 508 while (data == CFG_RETRY_STATUS && timeout--) { 509 /* 510 * CRS state is set in CFG_RD status register 511 * This will handle the case where CFG_RETRY_STATUS is 512 * valid config data. 513 */ 514 status = iproc_pcie_read_reg(pcie, IPROC_PCIE_CFG_RD_STATUS); 515 if (status != CFG_RD_CRS) 516 return data; 517 518 udelay(1); 519 data = readl(cfg_data_p); 520 } 521 522 if (data == CFG_RETRY_STATUS) 523 data = 0xffffffff; 524 525 return data; 526 } 527 528 static void iproc_pcie_fix_cap(struct iproc_pcie *pcie, int where, u32 *val) 529 { 530 u32 i, dev_id; 531 532 switch (where & ~0x3) { 533 case PCI_VENDOR_ID: 534 dev_id = *val >> 16; 535 536 /* 537 * Activate fixup for those controllers that have corrupted 538 * capability list registers 539 */ 540 for (i = 0; i < ARRAY_SIZE(iproc_pcie_corrupt_cap_did); i++) 541 if (dev_id == iproc_pcie_corrupt_cap_did[i]) 542 pcie->fix_paxc_cap = true; 543 break; 544 545 case IPROC_PCI_PM_CAP: 546 if (pcie->fix_paxc_cap) { 547 /* advertise PM, force next capability to PCIe */ 548 *val &= ~IPROC_PCI_PM_CAP_MASK; 549 *val |= IPROC_PCI_EXP_CAP << 8 | PCI_CAP_ID_PM; 550 } 551 break; 552 553 case IPROC_PCI_EXP_CAP: 554 if (pcie->fix_paxc_cap) { 555 /* advertise root port, version 2, terminate here */ 556 *val = (PCI_EXP_TYPE_ROOT_PORT << 4 | 2) << 16 | 557 PCI_CAP_ID_EXP; 558 } 559 break; 560 561 case IPROC_PCI_EXP_CAP + PCI_EXP_RTCTL: 562 /* Don't advertise CRS SV support */ 563 *val &= ~(PCI_EXP_RTCAP_CRSVIS << 16); 564 break; 565 566 default: 567 break; 568 } 569 } 570 571 static int iproc_pcie_config_read(struct pci_bus *bus, unsigned int devfn, 572 int where, int size, u32 *val) 573 { 574 struct iproc_pcie *pcie = iproc_data(bus); 575 unsigned int busno = bus->number; 576 void __iomem *cfg_data_p; 577 unsigned int data; 578 int ret; 579 580 /* root complex access */ 581 if (busno == 0) { 582 ret = pci_generic_config_read32(bus, devfn, where, size, val); 583 if (ret == PCIBIOS_SUCCESSFUL) 584 iproc_pcie_fix_cap(pcie, where, val); 585 586 return ret; 587 } 588 589 cfg_data_p = iproc_pcie_map_ep_cfg_reg(pcie, busno, devfn, where); 590 591 if (!cfg_data_p) 592 return PCIBIOS_DEVICE_NOT_FOUND; 593 594 data = iproc_pcie_cfg_retry(pcie, cfg_data_p); 595 596 *val = data; 597 if (size <= 2) 598 *val = (data >> (8 * (where & 3))) & ((1 << (size * 8)) - 1); 599 600 /* 601 * For PAXC and PAXCv2, the total number of PFs that one can enumerate 602 * depends on the firmware configuration. Unfortunately, due to an ASIC 603 * bug, unconfigured PFs cannot be properly hidden from the root 604 * complex. As a result, write access to these PFs will cause bus lock 605 * up on the embedded processor 606 * 607 * Since all unconfigured PFs are left with an incorrect, staled device 608 * ID of 0x168e (PCI_DEVICE_ID_NX2_57810), we try to catch those access 609 * early here and reject them all 610 */ 611 #define DEVICE_ID_MASK 0xffff0000 612 #define DEVICE_ID_SHIFT 16 613 if (pcie->rej_unconfig_pf && 614 (where & CFG_ADDR_REG_NUM_MASK) == PCI_VENDOR_ID) 615 if ((*val & DEVICE_ID_MASK) == 616 (PCI_DEVICE_ID_NX2_57810 << DEVICE_ID_SHIFT)) 617 return PCIBIOS_FUNC_NOT_SUPPORTED; 618 619 return PCIBIOS_SUCCESSFUL; 620 } 621 622 /** 623 * Note access to the configuration registers are protected at the higher layer 624 * by 'pci_lock' in drivers/pci/access.c 625 */ 626 static void __iomem *iproc_pcie_map_cfg_bus(struct iproc_pcie *pcie, 627 int busno, unsigned int devfn, 628 int where) 629 { 630 u16 offset; 631 632 /* root complex access */ 633 if (busno == 0) { 634 if (PCIE_ECAM_DEVFN(devfn) > 0) 635 return NULL; 636 637 iproc_pcie_write_reg(pcie, IPROC_PCIE_CFG_IND_ADDR, 638 where & CFG_IND_ADDR_MASK); 639 offset = iproc_pcie_reg_offset(pcie, IPROC_PCIE_CFG_IND_DATA); 640 if (iproc_pcie_reg_is_invalid(offset)) 641 return NULL; 642 else 643 return (pcie->base + offset); 644 } 645 646 return iproc_pcie_map_ep_cfg_reg(pcie, busno, devfn, where); 647 } 648 649 static void __iomem *iproc_pcie_bus_map_cfg_bus(struct pci_bus *bus, 650 unsigned int devfn, 651 int where) 652 { 653 return iproc_pcie_map_cfg_bus(iproc_data(bus), bus->number, devfn, 654 where); 655 } 656 657 static int iproc_pci_raw_config_read32(struct iproc_pcie *pcie, 658 unsigned int devfn, int where, 659 int size, u32 *val) 660 { 661 void __iomem *addr; 662 663 addr = iproc_pcie_map_cfg_bus(pcie, 0, devfn, where & ~0x3); 664 if (!addr) { 665 *val = ~0; 666 return PCIBIOS_DEVICE_NOT_FOUND; 667 } 668 669 *val = readl(addr); 670 671 if (size <= 2) 672 *val = (*val >> (8 * (where & 3))) & ((1 << (size * 8)) - 1); 673 674 return PCIBIOS_SUCCESSFUL; 675 } 676 677 static int iproc_pci_raw_config_write32(struct iproc_pcie *pcie, 678 unsigned int devfn, int where, 679 int size, u32 val) 680 { 681 void __iomem *addr; 682 u32 mask, tmp; 683 684 addr = iproc_pcie_map_cfg_bus(pcie, 0, devfn, where & ~0x3); 685 if (!addr) 686 return PCIBIOS_DEVICE_NOT_FOUND; 687 688 if (size == 4) { 689 writel(val, addr); 690 return PCIBIOS_SUCCESSFUL; 691 } 692 693 mask = ~(((1 << (size * 8)) - 1) << ((where & 0x3) * 8)); 694 tmp = readl(addr) & mask; 695 tmp |= val << ((where & 0x3) * 8); 696 writel(tmp, addr); 697 698 return PCIBIOS_SUCCESSFUL; 699 } 700 701 static int iproc_pcie_config_read32(struct pci_bus *bus, unsigned int devfn, 702 int where, int size, u32 *val) 703 { 704 int ret; 705 struct iproc_pcie *pcie = iproc_data(bus); 706 707 iproc_pcie_apb_err_disable(bus, true); 708 if (pcie->iproc_cfg_read) 709 ret = iproc_pcie_config_read(bus, devfn, where, size, val); 710 else 711 ret = pci_generic_config_read32(bus, devfn, where, size, val); 712 iproc_pcie_apb_err_disable(bus, false); 713 714 return ret; 715 } 716 717 static int iproc_pcie_config_write32(struct pci_bus *bus, unsigned int devfn, 718 int where, int size, u32 val) 719 { 720 int ret; 721 722 iproc_pcie_apb_err_disable(bus, true); 723 ret = pci_generic_config_write32(bus, devfn, where, size, val); 724 iproc_pcie_apb_err_disable(bus, false); 725 726 return ret; 727 } 728 729 static struct pci_ops iproc_pcie_ops = { 730 .map_bus = iproc_pcie_bus_map_cfg_bus, 731 .read = iproc_pcie_config_read32, 732 .write = iproc_pcie_config_write32, 733 }; 734 735 static void iproc_pcie_perst_ctrl(struct iproc_pcie *pcie, bool assert) 736 { 737 u32 val; 738 739 /* 740 * PAXC and the internal emulated endpoint device downstream should not 741 * be reset. If firmware has been loaded on the endpoint device at an 742 * earlier boot stage, reset here causes issues. 743 */ 744 if (pcie->ep_is_internal) 745 return; 746 747 if (assert) { 748 val = iproc_pcie_read_reg(pcie, IPROC_PCIE_CLK_CTRL); 749 val &= ~EP_PERST_SOURCE_SELECT & ~EP_MODE_SURVIVE_PERST & 750 ~RC_PCIE_RST_OUTPUT; 751 iproc_pcie_write_reg(pcie, IPROC_PCIE_CLK_CTRL, val); 752 udelay(250); 753 } else { 754 val = iproc_pcie_read_reg(pcie, IPROC_PCIE_CLK_CTRL); 755 val |= RC_PCIE_RST_OUTPUT; 756 iproc_pcie_write_reg(pcie, IPROC_PCIE_CLK_CTRL, val); 757 msleep(100); 758 } 759 } 760 761 int iproc_pcie_shutdown(struct iproc_pcie *pcie) 762 { 763 iproc_pcie_perst_ctrl(pcie, true); 764 msleep(500); 765 766 return 0; 767 } 768 EXPORT_SYMBOL_GPL(iproc_pcie_shutdown); 769 770 static int iproc_pcie_check_link(struct iproc_pcie *pcie) 771 { 772 struct device *dev = pcie->dev; 773 u32 hdr_type, link_ctrl, link_status, class, val; 774 bool link_is_active = false; 775 776 /* 777 * PAXC connects to emulated endpoint devices directly and does not 778 * have a Serdes. Therefore skip the link detection logic here. 779 */ 780 if (pcie->ep_is_internal) 781 return 0; 782 783 val = iproc_pcie_read_reg(pcie, IPROC_PCIE_LINK_STATUS); 784 if (!(val & PCIE_PHYLINKUP) || !(val & PCIE_DL_ACTIVE)) { 785 dev_err(dev, "PHY or data link is INACTIVE!\n"); 786 return -ENODEV; 787 } 788 789 /* make sure we are not in EP mode */ 790 iproc_pci_raw_config_read32(pcie, 0, PCI_HEADER_TYPE, 1, &hdr_type); 791 if ((hdr_type & 0x7f) != PCI_HEADER_TYPE_BRIDGE) { 792 dev_err(dev, "in EP mode, hdr=%#02x\n", hdr_type); 793 return -EFAULT; 794 } 795 796 /* force class to PCI_CLASS_BRIDGE_PCI (0x0604) */ 797 #define PCI_BRIDGE_CTRL_REG_OFFSET 0x43c 798 #define PCI_CLASS_BRIDGE_MASK 0xffff00 799 #define PCI_CLASS_BRIDGE_SHIFT 8 800 iproc_pci_raw_config_read32(pcie, 0, PCI_BRIDGE_CTRL_REG_OFFSET, 801 4, &class); 802 class &= ~PCI_CLASS_BRIDGE_MASK; 803 class |= (PCI_CLASS_BRIDGE_PCI << PCI_CLASS_BRIDGE_SHIFT); 804 iproc_pci_raw_config_write32(pcie, 0, PCI_BRIDGE_CTRL_REG_OFFSET, 805 4, class); 806 807 /* check link status to see if link is active */ 808 iproc_pci_raw_config_read32(pcie, 0, IPROC_PCI_EXP_CAP + PCI_EXP_LNKSTA, 809 2, &link_status); 810 if (link_status & PCI_EXP_LNKSTA_NLW) 811 link_is_active = true; 812 813 if (!link_is_active) { 814 /* try GEN 1 link speed */ 815 #define PCI_TARGET_LINK_SPEED_MASK 0xf 816 #define PCI_TARGET_LINK_SPEED_GEN2 0x2 817 #define PCI_TARGET_LINK_SPEED_GEN1 0x1 818 iproc_pci_raw_config_read32(pcie, 0, 819 IPROC_PCI_EXP_CAP + PCI_EXP_LNKCTL2, 820 4, &link_ctrl); 821 if ((link_ctrl & PCI_TARGET_LINK_SPEED_MASK) == 822 PCI_TARGET_LINK_SPEED_GEN2) { 823 link_ctrl &= ~PCI_TARGET_LINK_SPEED_MASK; 824 link_ctrl |= PCI_TARGET_LINK_SPEED_GEN1; 825 iproc_pci_raw_config_write32(pcie, 0, 826 IPROC_PCI_EXP_CAP + PCI_EXP_LNKCTL2, 827 4, link_ctrl); 828 msleep(100); 829 830 iproc_pci_raw_config_read32(pcie, 0, 831 IPROC_PCI_EXP_CAP + PCI_EXP_LNKSTA, 832 2, &link_status); 833 if (link_status & PCI_EXP_LNKSTA_NLW) 834 link_is_active = true; 835 } 836 } 837 838 dev_info(dev, "link: %s\n", link_is_active ? "UP" : "DOWN"); 839 840 return link_is_active ? 0 : -ENODEV; 841 } 842 843 static void iproc_pcie_enable(struct iproc_pcie *pcie) 844 { 845 iproc_pcie_write_reg(pcie, IPROC_PCIE_INTX_EN, SYS_RC_INTX_MASK); 846 } 847 848 static inline bool iproc_pcie_ob_is_valid(struct iproc_pcie *pcie, 849 int window_idx) 850 { 851 u32 val; 852 853 val = iproc_pcie_read_reg(pcie, MAP_REG(IPROC_PCIE_OARR0, window_idx)); 854 855 return !!(val & OARR_VALID); 856 } 857 858 static inline int iproc_pcie_ob_write(struct iproc_pcie *pcie, int window_idx, 859 int size_idx, u64 axi_addr, u64 pci_addr) 860 { 861 struct device *dev = pcie->dev; 862 u16 oarr_offset, omap_offset; 863 864 /* 865 * Derive the OARR/OMAP offset from the first pair (OARR0/OMAP0) based 866 * on window index. 867 */ 868 oarr_offset = iproc_pcie_reg_offset(pcie, MAP_REG(IPROC_PCIE_OARR0, 869 window_idx)); 870 omap_offset = iproc_pcie_reg_offset(pcie, MAP_REG(IPROC_PCIE_OMAP0, 871 window_idx)); 872 if (iproc_pcie_reg_is_invalid(oarr_offset) || 873 iproc_pcie_reg_is_invalid(omap_offset)) 874 return -EINVAL; 875 876 /* 877 * Program the OARR registers. The upper 32-bit OARR register is 878 * always right after the lower 32-bit OARR register. 879 */ 880 writel(lower_32_bits(axi_addr) | (size_idx << OARR_SIZE_CFG_SHIFT) | 881 OARR_VALID, pcie->base + oarr_offset); 882 writel(upper_32_bits(axi_addr), pcie->base + oarr_offset + 4); 883 884 /* now program the OMAP registers */ 885 writel(lower_32_bits(pci_addr), pcie->base + omap_offset); 886 writel(upper_32_bits(pci_addr), pcie->base + omap_offset + 4); 887 888 dev_dbg(dev, "ob window [%d]: offset 0x%x axi %pap pci %pap\n", 889 window_idx, oarr_offset, &axi_addr, &pci_addr); 890 dev_dbg(dev, "oarr lo 0x%x oarr hi 0x%x\n", 891 readl(pcie->base + oarr_offset), 892 readl(pcie->base + oarr_offset + 4)); 893 dev_dbg(dev, "omap lo 0x%x omap hi 0x%x\n", 894 readl(pcie->base + omap_offset), 895 readl(pcie->base + omap_offset + 4)); 896 897 return 0; 898 } 899 900 /** 901 * Some iProc SoCs require the SW to configure the outbound address mapping 902 * 903 * Outbound address translation: 904 * 905 * iproc_pcie_address = axi_address - axi_offset 906 * OARR = iproc_pcie_address 907 * OMAP = pci_addr 908 * 909 * axi_addr -> iproc_pcie_address -> OARR -> OMAP -> pci_address 910 */ 911 static int iproc_pcie_setup_ob(struct iproc_pcie *pcie, u64 axi_addr, 912 u64 pci_addr, resource_size_t size) 913 { 914 struct iproc_pcie_ob *ob = &pcie->ob; 915 struct device *dev = pcie->dev; 916 int ret = -EINVAL, window_idx, size_idx; 917 918 if (axi_addr < ob->axi_offset) { 919 dev_err(dev, "axi address %pap less than offset %pap\n", 920 &axi_addr, &ob->axi_offset); 921 return -EINVAL; 922 } 923 924 /* 925 * Translate the AXI address to the internal address used by the iProc 926 * PCIe core before programming the OARR 927 */ 928 axi_addr -= ob->axi_offset; 929 930 /* iterate through all OARR/OMAP mapping windows */ 931 for (window_idx = ob->nr_windows - 1; window_idx >= 0; window_idx--) { 932 const struct iproc_pcie_ob_map *ob_map = 933 &pcie->ob_map[window_idx]; 934 935 /* 936 * If current outbound window is already in use, move on to the 937 * next one. 938 */ 939 if (iproc_pcie_ob_is_valid(pcie, window_idx)) 940 continue; 941 942 /* 943 * Iterate through all supported window sizes within the 944 * OARR/OMAP pair to find a match. Go through the window sizes 945 * in a descending order. 946 */ 947 for (size_idx = ob_map->nr_sizes - 1; size_idx >= 0; 948 size_idx--) { 949 resource_size_t window_size = 950 ob_map->window_sizes[size_idx] * SZ_1M; 951 952 /* 953 * Keep iterating until we reach the last window and 954 * with the minimal window size at index zero. In this 955 * case, we take a compromise by mapping it using the 956 * minimum window size that can be supported 957 */ 958 if (size < window_size) { 959 if (size_idx > 0 || window_idx > 0) 960 continue; 961 962 /* 963 * For the corner case of reaching the minimal 964 * window size that can be supported on the 965 * last window 966 */ 967 axi_addr = ALIGN_DOWN(axi_addr, window_size); 968 pci_addr = ALIGN_DOWN(pci_addr, window_size); 969 size = window_size; 970 } 971 972 if (!IS_ALIGNED(axi_addr, window_size) || 973 !IS_ALIGNED(pci_addr, window_size)) { 974 dev_err(dev, 975 "axi %pap or pci %pap not aligned\n", 976 &axi_addr, &pci_addr); 977 return -EINVAL; 978 } 979 980 /* 981 * Match found! Program both OARR and OMAP and mark 982 * them as a valid entry. 983 */ 984 ret = iproc_pcie_ob_write(pcie, window_idx, size_idx, 985 axi_addr, pci_addr); 986 if (ret) 987 goto err_ob; 988 989 size -= window_size; 990 if (size == 0) 991 return 0; 992 993 /* 994 * If we are here, we are done with the current window, 995 * but not yet finished all mappings. Need to move on 996 * to the next window. 997 */ 998 axi_addr += window_size; 999 pci_addr += window_size; 1000 break; 1001 } 1002 } 1003 1004 err_ob: 1005 dev_err(dev, "unable to configure outbound mapping\n"); 1006 dev_err(dev, 1007 "axi %pap, axi offset %pap, pci %pap, res size %pap\n", 1008 &axi_addr, &ob->axi_offset, &pci_addr, &size); 1009 1010 return ret; 1011 } 1012 1013 static int iproc_pcie_map_ranges(struct iproc_pcie *pcie, 1014 struct list_head *resources) 1015 { 1016 struct device *dev = pcie->dev; 1017 struct resource_entry *window; 1018 int ret; 1019 1020 resource_list_for_each_entry(window, resources) { 1021 struct resource *res = window->res; 1022 u64 res_type = resource_type(res); 1023 1024 switch (res_type) { 1025 case IORESOURCE_IO: 1026 case IORESOURCE_BUS: 1027 break; 1028 case IORESOURCE_MEM: 1029 ret = iproc_pcie_setup_ob(pcie, res->start, 1030 res->start - window->offset, 1031 resource_size(res)); 1032 if (ret) 1033 return ret; 1034 break; 1035 default: 1036 dev_err(dev, "invalid resource %pR\n", res); 1037 return -EINVAL; 1038 } 1039 } 1040 1041 return 0; 1042 } 1043 1044 static inline bool iproc_pcie_ib_is_in_use(struct iproc_pcie *pcie, 1045 int region_idx) 1046 { 1047 const struct iproc_pcie_ib_map *ib_map = &pcie->ib_map[region_idx]; 1048 u32 val; 1049 1050 val = iproc_pcie_read_reg(pcie, MAP_REG(IPROC_PCIE_IARR0, region_idx)); 1051 1052 return !!(val & (BIT(ib_map->nr_sizes) - 1)); 1053 } 1054 1055 static inline bool iproc_pcie_ib_check_type(const struct iproc_pcie_ib_map *ib_map, 1056 enum iproc_pcie_ib_map_type type) 1057 { 1058 return !!(ib_map->type == type); 1059 } 1060 1061 static int iproc_pcie_ib_write(struct iproc_pcie *pcie, int region_idx, 1062 int size_idx, int nr_windows, u64 axi_addr, 1063 u64 pci_addr, resource_size_t size) 1064 { 1065 struct device *dev = pcie->dev; 1066 const struct iproc_pcie_ib_map *ib_map = &pcie->ib_map[region_idx]; 1067 u16 iarr_offset, imap_offset; 1068 u32 val; 1069 int window_idx; 1070 1071 iarr_offset = iproc_pcie_reg_offset(pcie, 1072 MAP_REG(IPROC_PCIE_IARR0, region_idx)); 1073 imap_offset = iproc_pcie_reg_offset(pcie, 1074 MAP_REG(IPROC_PCIE_IMAP0, region_idx)); 1075 if (iproc_pcie_reg_is_invalid(iarr_offset) || 1076 iproc_pcie_reg_is_invalid(imap_offset)) 1077 return -EINVAL; 1078 1079 dev_dbg(dev, "ib region [%d]: offset 0x%x axi %pap pci %pap\n", 1080 region_idx, iarr_offset, &axi_addr, &pci_addr); 1081 1082 /* 1083 * Program the IARR registers. The upper 32-bit IARR register is 1084 * always right after the lower 32-bit IARR register. 1085 */ 1086 writel(lower_32_bits(pci_addr) | BIT(size_idx), 1087 pcie->base + iarr_offset); 1088 writel(upper_32_bits(pci_addr), pcie->base + iarr_offset + 4); 1089 1090 dev_dbg(dev, "iarr lo 0x%x iarr hi 0x%x\n", 1091 readl(pcie->base + iarr_offset), 1092 readl(pcie->base + iarr_offset + 4)); 1093 1094 /* 1095 * Now program the IMAP registers. Each IARR region may have one or 1096 * more IMAP windows. 1097 */ 1098 size >>= ilog2(nr_windows); 1099 for (window_idx = 0; window_idx < nr_windows; window_idx++) { 1100 val = readl(pcie->base + imap_offset); 1101 val |= lower_32_bits(axi_addr) | IMAP_VALID; 1102 writel(val, pcie->base + imap_offset); 1103 writel(upper_32_bits(axi_addr), 1104 pcie->base + imap_offset + ib_map->imap_addr_offset); 1105 1106 dev_dbg(dev, "imap window [%d] lo 0x%x hi 0x%x\n", 1107 window_idx, readl(pcie->base + imap_offset), 1108 readl(pcie->base + imap_offset + 1109 ib_map->imap_addr_offset)); 1110 1111 imap_offset += ib_map->imap_window_offset; 1112 axi_addr += size; 1113 } 1114 1115 return 0; 1116 } 1117 1118 static int iproc_pcie_setup_ib(struct iproc_pcie *pcie, 1119 struct resource_entry *entry, 1120 enum iproc_pcie_ib_map_type type) 1121 { 1122 struct device *dev = pcie->dev; 1123 struct iproc_pcie_ib *ib = &pcie->ib; 1124 int ret; 1125 unsigned int region_idx, size_idx; 1126 u64 axi_addr = entry->res->start; 1127 u64 pci_addr = entry->res->start - entry->offset; 1128 resource_size_t size = resource_size(entry->res); 1129 1130 /* iterate through all IARR mapping regions */ 1131 for (region_idx = 0; region_idx < ib->nr_regions; region_idx++) { 1132 const struct iproc_pcie_ib_map *ib_map = 1133 &pcie->ib_map[region_idx]; 1134 1135 /* 1136 * If current inbound region is already in use or not a 1137 * compatible type, move on to the next. 1138 */ 1139 if (iproc_pcie_ib_is_in_use(pcie, region_idx) || 1140 !iproc_pcie_ib_check_type(ib_map, type)) 1141 continue; 1142 1143 /* iterate through all supported region sizes to find a match */ 1144 for (size_idx = 0; size_idx < ib_map->nr_sizes; size_idx++) { 1145 resource_size_t region_size = 1146 ib_map->region_sizes[size_idx] * ib_map->size_unit; 1147 1148 if (size != region_size) 1149 continue; 1150 1151 if (!IS_ALIGNED(axi_addr, region_size) || 1152 !IS_ALIGNED(pci_addr, region_size)) { 1153 dev_err(dev, 1154 "axi %pap or pci %pap not aligned\n", 1155 &axi_addr, &pci_addr); 1156 return -EINVAL; 1157 } 1158 1159 /* Match found! Program IARR and all IMAP windows. */ 1160 ret = iproc_pcie_ib_write(pcie, region_idx, size_idx, 1161 ib_map->nr_windows, axi_addr, 1162 pci_addr, size); 1163 if (ret) 1164 goto err_ib; 1165 else 1166 return 0; 1167 1168 } 1169 } 1170 ret = -EINVAL; 1171 1172 err_ib: 1173 dev_err(dev, "unable to configure inbound mapping\n"); 1174 dev_err(dev, "axi %pap, pci %pap, res size %pap\n", 1175 &axi_addr, &pci_addr, &size); 1176 1177 return ret; 1178 } 1179 1180 static int iproc_pcie_map_dma_ranges(struct iproc_pcie *pcie) 1181 { 1182 struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie); 1183 struct resource_entry *entry; 1184 int ret = 0; 1185 1186 resource_list_for_each_entry(entry, &host->dma_ranges) { 1187 /* Each range entry corresponds to an inbound mapping region */ 1188 ret = iproc_pcie_setup_ib(pcie, entry, IPROC_PCIE_IB_MAP_MEM); 1189 if (ret) 1190 break; 1191 } 1192 1193 return ret; 1194 } 1195 1196 static void iproc_pcie_invalidate_mapping(struct iproc_pcie *pcie) 1197 { 1198 struct iproc_pcie_ib *ib = &pcie->ib; 1199 struct iproc_pcie_ob *ob = &pcie->ob; 1200 int idx; 1201 1202 if (pcie->ep_is_internal) 1203 return; 1204 1205 if (pcie->need_ob_cfg) { 1206 /* iterate through all OARR mapping regions */ 1207 for (idx = ob->nr_windows - 1; idx >= 0; idx--) { 1208 iproc_pcie_write_reg(pcie, 1209 MAP_REG(IPROC_PCIE_OARR0, idx), 0); 1210 } 1211 } 1212 1213 if (pcie->need_ib_cfg) { 1214 /* iterate through all IARR mapping regions */ 1215 for (idx = 0; idx < ib->nr_regions; idx++) { 1216 iproc_pcie_write_reg(pcie, 1217 MAP_REG(IPROC_PCIE_IARR0, idx), 0); 1218 } 1219 } 1220 } 1221 1222 static int iproce_pcie_get_msi(struct iproc_pcie *pcie, 1223 struct device_node *msi_node, 1224 u64 *msi_addr) 1225 { 1226 struct device *dev = pcie->dev; 1227 int ret; 1228 struct resource res; 1229 1230 /* 1231 * Check if 'msi-map' points to ARM GICv3 ITS, which is the only 1232 * supported external MSI controller that requires steering. 1233 */ 1234 if (!of_device_is_compatible(msi_node, "arm,gic-v3-its")) { 1235 dev_err(dev, "unable to find compatible MSI controller\n"); 1236 return -ENODEV; 1237 } 1238 1239 /* derive GITS_TRANSLATER address from GICv3 */ 1240 ret = of_address_to_resource(msi_node, 0, &res); 1241 if (ret < 0) { 1242 dev_err(dev, "unable to obtain MSI controller resources\n"); 1243 return ret; 1244 } 1245 1246 *msi_addr = res.start + GITS_TRANSLATER; 1247 return 0; 1248 } 1249 1250 static int iproc_pcie_paxb_v2_msi_steer(struct iproc_pcie *pcie, u64 msi_addr) 1251 { 1252 int ret; 1253 struct resource_entry entry; 1254 1255 memset(&entry, 0, sizeof(entry)); 1256 entry.res = &entry.__res; 1257 1258 msi_addr &= ~(SZ_32K - 1); 1259 entry.res->start = msi_addr; 1260 entry.res->end = msi_addr + SZ_32K - 1; 1261 1262 ret = iproc_pcie_setup_ib(pcie, &entry, IPROC_PCIE_IB_MAP_IO); 1263 return ret; 1264 } 1265 1266 static void iproc_pcie_paxc_v2_msi_steer(struct iproc_pcie *pcie, u64 msi_addr, 1267 bool enable) 1268 { 1269 u32 val; 1270 1271 if (!enable) { 1272 /* 1273 * Disable PAXC MSI steering. All write transfers will be 1274 * treated as non-MSI transfers 1275 */ 1276 val = iproc_pcie_read_reg(pcie, IPROC_PCIE_MSI_EN_CFG); 1277 val &= ~MSI_ENABLE_CFG; 1278 iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_EN_CFG, val); 1279 return; 1280 } 1281 1282 /* 1283 * Program bits [43:13] of address of GITS_TRANSLATER register into 1284 * bits [30:0] of the MSI base address register. In fact, in all iProc 1285 * based SoCs, all I/O register bases are well below the 32-bit 1286 * boundary, so we can safely assume bits [43:32] are always zeros. 1287 */ 1288 iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_BASE_ADDR, 1289 (u32)(msi_addr >> 13)); 1290 1291 /* use a default 8K window size */ 1292 iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_WINDOW_SIZE, 0); 1293 1294 /* steering MSI to GICv3 ITS */ 1295 val = iproc_pcie_read_reg(pcie, IPROC_PCIE_MSI_GIC_MODE); 1296 val |= GIC_V3_CFG; 1297 iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_GIC_MODE, val); 1298 1299 /* 1300 * Program bits [43:2] of address of GITS_TRANSLATER register into the 1301 * iProc MSI address registers. 1302 */ 1303 msi_addr >>= 2; 1304 iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_ADDR_HI, 1305 upper_32_bits(msi_addr)); 1306 iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_ADDR_LO, 1307 lower_32_bits(msi_addr)); 1308 1309 /* enable MSI */ 1310 val = iproc_pcie_read_reg(pcie, IPROC_PCIE_MSI_EN_CFG); 1311 val |= MSI_ENABLE_CFG; 1312 iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_EN_CFG, val); 1313 } 1314 1315 static int iproc_pcie_msi_steer(struct iproc_pcie *pcie, 1316 struct device_node *msi_node) 1317 { 1318 struct device *dev = pcie->dev; 1319 int ret; 1320 u64 msi_addr; 1321 1322 ret = iproce_pcie_get_msi(pcie, msi_node, &msi_addr); 1323 if (ret < 0) { 1324 dev_err(dev, "msi steering failed\n"); 1325 return ret; 1326 } 1327 1328 switch (pcie->type) { 1329 case IPROC_PCIE_PAXB_V2: 1330 ret = iproc_pcie_paxb_v2_msi_steer(pcie, msi_addr); 1331 if (ret) 1332 return ret; 1333 break; 1334 case IPROC_PCIE_PAXC_V2: 1335 iproc_pcie_paxc_v2_msi_steer(pcie, msi_addr, true); 1336 break; 1337 default: 1338 return -EINVAL; 1339 } 1340 1341 return 0; 1342 } 1343 1344 static int iproc_pcie_msi_enable(struct iproc_pcie *pcie) 1345 { 1346 struct device_node *msi_node; 1347 int ret; 1348 1349 /* 1350 * Either the "msi-parent" or the "msi-map" phandle needs to exist 1351 * for us to obtain the MSI node. 1352 */ 1353 1354 msi_node = of_parse_phandle(pcie->dev->of_node, "msi-parent", 0); 1355 if (!msi_node) { 1356 const __be32 *msi_map = NULL; 1357 int len; 1358 u32 phandle; 1359 1360 msi_map = of_get_property(pcie->dev->of_node, "msi-map", &len); 1361 if (!msi_map) 1362 return -ENODEV; 1363 1364 phandle = be32_to_cpup(msi_map + 1); 1365 msi_node = of_find_node_by_phandle(phandle); 1366 if (!msi_node) 1367 return -ENODEV; 1368 } 1369 1370 /* 1371 * Certain revisions of the iProc PCIe controller require additional 1372 * configurations to steer the MSI writes towards an external MSI 1373 * controller. 1374 */ 1375 if (pcie->need_msi_steer) { 1376 ret = iproc_pcie_msi_steer(pcie, msi_node); 1377 if (ret) 1378 goto out_put_node; 1379 } 1380 1381 /* 1382 * If another MSI controller is being used, the call below should fail 1383 * but that is okay 1384 */ 1385 ret = iproc_msi_init(pcie, msi_node); 1386 1387 out_put_node: 1388 of_node_put(msi_node); 1389 return ret; 1390 } 1391 1392 static void iproc_pcie_msi_disable(struct iproc_pcie *pcie) 1393 { 1394 iproc_msi_exit(pcie); 1395 } 1396 1397 static int iproc_pcie_rev_init(struct iproc_pcie *pcie) 1398 { 1399 struct device *dev = pcie->dev; 1400 unsigned int reg_idx; 1401 const u16 *regs; 1402 1403 switch (pcie->type) { 1404 case IPROC_PCIE_PAXB_BCMA: 1405 regs = iproc_pcie_reg_paxb_bcma; 1406 break; 1407 case IPROC_PCIE_PAXB: 1408 regs = iproc_pcie_reg_paxb; 1409 pcie->has_apb_err_disable = true; 1410 if (pcie->need_ob_cfg) { 1411 pcie->ob_map = paxb_ob_map; 1412 pcie->ob.nr_windows = ARRAY_SIZE(paxb_ob_map); 1413 } 1414 break; 1415 case IPROC_PCIE_PAXB_V2: 1416 regs = iproc_pcie_reg_paxb_v2; 1417 pcie->iproc_cfg_read = true; 1418 pcie->has_apb_err_disable = true; 1419 if (pcie->need_ob_cfg) { 1420 pcie->ob_map = paxb_v2_ob_map; 1421 pcie->ob.nr_windows = ARRAY_SIZE(paxb_v2_ob_map); 1422 } 1423 pcie->ib.nr_regions = ARRAY_SIZE(paxb_v2_ib_map); 1424 pcie->ib_map = paxb_v2_ib_map; 1425 pcie->need_msi_steer = true; 1426 dev_warn(dev, "reads of config registers that contain %#x return incorrect data\n", 1427 CFG_RETRY_STATUS); 1428 break; 1429 case IPROC_PCIE_PAXC: 1430 regs = iproc_pcie_reg_paxc; 1431 pcie->ep_is_internal = true; 1432 pcie->iproc_cfg_read = true; 1433 pcie->rej_unconfig_pf = true; 1434 break; 1435 case IPROC_PCIE_PAXC_V2: 1436 regs = iproc_pcie_reg_paxc_v2; 1437 pcie->ep_is_internal = true; 1438 pcie->iproc_cfg_read = true; 1439 pcie->rej_unconfig_pf = true; 1440 pcie->need_msi_steer = true; 1441 break; 1442 default: 1443 dev_err(dev, "incompatible iProc PCIe interface\n"); 1444 return -EINVAL; 1445 } 1446 1447 pcie->reg_offsets = devm_kcalloc(dev, IPROC_PCIE_MAX_NUM_REG, 1448 sizeof(*pcie->reg_offsets), 1449 GFP_KERNEL); 1450 if (!pcie->reg_offsets) 1451 return -ENOMEM; 1452 1453 /* go through the register table and populate all valid registers */ 1454 pcie->reg_offsets[0] = (pcie->type == IPROC_PCIE_PAXC_V2) ? 1455 IPROC_PCIE_REG_INVALID : regs[0]; 1456 for (reg_idx = 1; reg_idx < IPROC_PCIE_MAX_NUM_REG; reg_idx++) 1457 pcie->reg_offsets[reg_idx] = regs[reg_idx] ? 1458 regs[reg_idx] : IPROC_PCIE_REG_INVALID; 1459 1460 return 0; 1461 } 1462 1463 int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res) 1464 { 1465 struct device *dev; 1466 int ret; 1467 struct pci_dev *pdev; 1468 struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie); 1469 1470 dev = pcie->dev; 1471 1472 ret = iproc_pcie_rev_init(pcie); 1473 if (ret) { 1474 dev_err(dev, "unable to initialize controller parameters\n"); 1475 return ret; 1476 } 1477 1478 ret = phy_init(pcie->phy); 1479 if (ret) { 1480 dev_err(dev, "unable to initialize PCIe PHY\n"); 1481 return ret; 1482 } 1483 1484 ret = phy_power_on(pcie->phy); 1485 if (ret) { 1486 dev_err(dev, "unable to power on PCIe PHY\n"); 1487 goto err_exit_phy; 1488 } 1489 1490 iproc_pcie_perst_ctrl(pcie, true); 1491 iproc_pcie_perst_ctrl(pcie, false); 1492 1493 iproc_pcie_invalidate_mapping(pcie); 1494 1495 if (pcie->need_ob_cfg) { 1496 ret = iproc_pcie_map_ranges(pcie, res); 1497 if (ret) { 1498 dev_err(dev, "map failed\n"); 1499 goto err_power_off_phy; 1500 } 1501 } 1502 1503 if (pcie->need_ib_cfg) { 1504 ret = iproc_pcie_map_dma_ranges(pcie); 1505 if (ret && ret != -ENOENT) 1506 goto err_power_off_phy; 1507 } 1508 1509 ret = iproc_pcie_check_link(pcie); 1510 if (ret) { 1511 dev_err(dev, "no PCIe EP device detected\n"); 1512 goto err_power_off_phy; 1513 } 1514 1515 iproc_pcie_enable(pcie); 1516 1517 if (IS_ENABLED(CONFIG_PCI_MSI)) 1518 if (iproc_pcie_msi_enable(pcie)) 1519 dev_info(dev, "not using iProc MSI\n"); 1520 1521 host->ops = &iproc_pcie_ops; 1522 host->sysdata = pcie; 1523 host->map_irq = pcie->map_irq; 1524 1525 ret = pci_host_probe(host); 1526 if (ret < 0) { 1527 dev_err(dev, "failed to scan host: %d\n", ret); 1528 goto err_power_off_phy; 1529 } 1530 1531 for_each_pci_bridge(pdev, host->bus) { 1532 if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT) 1533 pcie_print_link_status(pdev); 1534 } 1535 1536 return 0; 1537 1538 err_power_off_phy: 1539 phy_power_off(pcie->phy); 1540 err_exit_phy: 1541 phy_exit(pcie->phy); 1542 return ret; 1543 } 1544 EXPORT_SYMBOL(iproc_pcie_setup); 1545 1546 int iproc_pcie_remove(struct iproc_pcie *pcie) 1547 { 1548 struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie); 1549 1550 pci_stop_root_bus(host->bus); 1551 pci_remove_root_bus(host->bus); 1552 1553 iproc_pcie_msi_disable(pcie); 1554 1555 phy_power_off(pcie->phy); 1556 phy_exit(pcie->phy); 1557 1558 return 0; 1559 } 1560 EXPORT_SYMBOL(iproc_pcie_remove); 1561 1562 /* 1563 * The MSI parsing logic in certain revisions of Broadcom PAXC based root 1564 * complex does not work and needs to be disabled 1565 */ 1566 static void quirk_paxc_disable_msi_parsing(struct pci_dev *pdev) 1567 { 1568 struct iproc_pcie *pcie = iproc_data(pdev->bus); 1569 1570 if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE) 1571 iproc_pcie_paxc_v2_msi_steer(pcie, 0, false); 1572 } 1573 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x16f0, 1574 quirk_paxc_disable_msi_parsing); 1575 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0xd802, 1576 quirk_paxc_disable_msi_parsing); 1577 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0xd804, 1578 quirk_paxc_disable_msi_parsing); 1579 1580 static void quirk_paxc_bridge(struct pci_dev *pdev) 1581 { 1582 /* 1583 * The PCI config space is shared with the PAXC root port and the first 1584 * Ethernet device. So, we need to workaround this by telling the PCI 1585 * code that the bridge is not an Ethernet device. 1586 */ 1587 if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE) 1588 pdev->class = PCI_CLASS_BRIDGE_PCI << 8; 1589 1590 /* 1591 * MPSS is not being set properly (as it is currently 0). This is 1592 * because that area of the PCI config space is hard coded to zero, and 1593 * is not modifiable by firmware. Set this to 2 (e.g., 512 byte MPS) 1594 * so that the MPS can be set to the real max value. 1595 */ 1596 pdev->pcie_mpss = 2; 1597 } 1598 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x16cd, quirk_paxc_bridge); 1599 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x16f0, quirk_paxc_bridge); 1600 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0xd750, quirk_paxc_bridge); 1601 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0xd802, quirk_paxc_bridge); 1602 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0xd804, quirk_paxc_bridge); 1603 1604 MODULE_AUTHOR("Ray Jui <rjui@broadcom.com>"); 1605 MODULE_DESCRIPTION("Broadcom iPROC PCIe common driver"); 1606 MODULE_LICENSE("GPL v2"); 1607