1 /* 2 * Copyright 2014-2015 Freescale Semiconductor, Inc. 3 * Layerscape PCIe driver 4 * 5 * SPDX-License-Identifier: GPL-2.0+ 6 */ 7 8 #include <common.h> 9 #include <asm/arch/fsl_serdes.h> 10 #include <pci.h> 11 #include <asm/io.h> 12 #include <errno.h> 13 #include <malloc.h> 14 #include <dm.h> 15 #include "pcie_layerscape.h" 16 17 DECLARE_GLOBAL_DATA_PTR; 18 19 LIST_HEAD(ls_pcie_list); 20 21 static unsigned int dbi_readl(struct ls_pcie *pcie, unsigned int offset) 22 { 23 return in_le32(pcie->dbi + offset); 24 } 25 26 static void dbi_writel(struct ls_pcie *pcie, unsigned int value, 27 unsigned int offset) 28 { 29 out_le32(pcie->dbi + offset, value); 30 } 31 32 static unsigned int ctrl_readl(struct ls_pcie *pcie, unsigned int offset) 33 { 34 if (pcie->big_endian) 35 return in_be32(pcie->ctrl + offset); 36 else 37 return in_le32(pcie->ctrl + offset); 38 } 39 40 static void ctrl_writel(struct ls_pcie *pcie, unsigned int value, 41 unsigned int offset) 42 { 43 if (pcie->big_endian) 44 out_be32(pcie->ctrl + offset, value); 45 else 46 out_le32(pcie->ctrl + offset, value); 47 } 48 49 static int ls_pcie_ltssm(struct ls_pcie *pcie) 50 { 51 u32 state; 52 uint svr; 53 54 svr = get_svr(); 55 if (((svr >> SVR_VAR_PER_SHIFT) & SVR_LS102XA_MASK) == SVR_LS102XA) { 56 state = ctrl_readl(pcie, LS1021_PEXMSCPORTSR(pcie->idx)); 57 state = (state >> LS1021_LTSSM_STATE_SHIFT) & LTSSM_STATE_MASK; 58 } else { 59 state = ctrl_readl(pcie, PCIE_PF_DBG) & LTSSM_STATE_MASK; 60 } 61 62 return state; 63 } 64 65 static int ls_pcie_link_up(struct ls_pcie *pcie) 66 { 67 int ltssm; 68 69 ltssm = ls_pcie_ltssm(pcie); 70 if (ltssm < LTSSM_PCIE_L0) 71 return 0; 72 73 return 1; 74 } 75 76 static void ls_pcie_cfg0_set_busdev(struct ls_pcie *pcie, u32 busdev) 77 { 78 dbi_writel(pcie, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX0, 79 PCIE_ATU_VIEWPORT); 80 dbi_writel(pcie, busdev, PCIE_ATU_LOWER_TARGET); 81 } 82 83 static void ls_pcie_cfg1_set_busdev(struct ls_pcie *pcie, u32 busdev) 84 { 85 dbi_writel(pcie, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX1, 86 PCIE_ATU_VIEWPORT); 87 dbi_writel(pcie, busdev, PCIE_ATU_LOWER_TARGET); 88 } 89 90 static void ls_pcie_atu_outbound_set(struct ls_pcie *pcie, int idx, int type, 91 u64 phys, u64 bus_addr, pci_size_t size) 92 { 93 dbi_writel(pcie, PCIE_ATU_REGION_OUTBOUND | idx, PCIE_ATU_VIEWPORT); 94 dbi_writel(pcie, (u32)phys, PCIE_ATU_LOWER_BASE); 95 dbi_writel(pcie, phys >> 32, PCIE_ATU_UPPER_BASE); 96 dbi_writel(pcie, (u32)phys + size - 1, PCIE_ATU_LIMIT); 97 dbi_writel(pcie, (u32)bus_addr, PCIE_ATU_LOWER_TARGET); 98 dbi_writel(pcie, bus_addr >> 32, PCIE_ATU_UPPER_TARGET); 99 dbi_writel(pcie, type, PCIE_ATU_CR1); 100 dbi_writel(pcie, PCIE_ATU_ENABLE, PCIE_ATU_CR2); 101 } 102 103 /* Use bar match mode and MEM type as default */ 104 static void ls_pcie_atu_inbound_set(struct ls_pcie *pcie, int idx, 105 int bar, u64 phys) 106 { 107 dbi_writel(pcie, PCIE_ATU_REGION_INBOUND | idx, PCIE_ATU_VIEWPORT); 108 dbi_writel(pcie, (u32)phys, PCIE_ATU_LOWER_TARGET); 109 dbi_writel(pcie, phys >> 32, PCIE_ATU_UPPER_TARGET); 110 dbi_writel(pcie, PCIE_ATU_TYPE_MEM, PCIE_ATU_CR1); 111 dbi_writel(pcie, PCIE_ATU_ENABLE | PCIE_ATU_BAR_MODE_ENABLE | 112 PCIE_ATU_BAR_NUM(bar), PCIE_ATU_CR2); 113 } 114 115 static void ls_pcie_dump_atu(struct ls_pcie *pcie) 116 { 117 int i; 118 119 for (i = 0; i < PCIE_ATU_REGION_NUM; i++) { 120 dbi_writel(pcie, PCIE_ATU_REGION_OUTBOUND | i, 121 PCIE_ATU_VIEWPORT); 122 debug("iATU%d:\n", i); 123 debug("\tLOWER PHYS 0x%08x\n", 124 dbi_readl(pcie, PCIE_ATU_LOWER_BASE)); 125 debug("\tUPPER PHYS 0x%08x\n", 126 dbi_readl(pcie, PCIE_ATU_UPPER_BASE)); 127 debug("\tLOWER BUS 0x%08x\n", 128 dbi_readl(pcie, PCIE_ATU_LOWER_TARGET)); 129 debug("\tUPPER BUS 0x%08x\n", 130 dbi_readl(pcie, PCIE_ATU_UPPER_TARGET)); 131 debug("\tLIMIT 0x%08x\n", 132 readl(pcie->dbi + PCIE_ATU_LIMIT)); 133 debug("\tCR1 0x%08x\n", 134 dbi_readl(pcie, PCIE_ATU_CR1)); 135 debug("\tCR2 0x%08x\n", 136 dbi_readl(pcie, PCIE_ATU_CR2)); 137 } 138 } 139 140 static void ls_pcie_setup_atu(struct ls_pcie *pcie) 141 { 142 struct pci_region *io, *mem, *pref; 143 unsigned long long offset = 0; 144 int idx = 0; 145 uint svr; 146 147 svr = get_svr(); 148 if (((svr >> SVR_VAR_PER_SHIFT) & SVR_LS102XA_MASK) == SVR_LS102XA) { 149 offset = LS1021_PCIE_SPACE_OFFSET + 150 LS1021_PCIE_SPACE_SIZE * pcie->idx; 151 } 152 153 /* ATU 0 : OUTBOUND : CFG0 */ 154 ls_pcie_atu_outbound_set(pcie, PCIE_ATU_REGION_INDEX0, 155 PCIE_ATU_TYPE_CFG0, 156 pcie->cfg_res.start + offset, 157 0, 158 fdt_resource_size(&pcie->cfg_res) / 2); 159 /* ATU 1 : OUTBOUND : CFG1 */ 160 ls_pcie_atu_outbound_set(pcie, PCIE_ATU_REGION_INDEX1, 161 PCIE_ATU_TYPE_CFG1, 162 pcie->cfg_res.start + offset + 163 fdt_resource_size(&pcie->cfg_res) / 2, 164 0, 165 fdt_resource_size(&pcie->cfg_res) / 2); 166 167 pci_get_regions(pcie->bus, &io, &mem, &pref); 168 idx = PCIE_ATU_REGION_INDEX1 + 1; 169 170 /* Fix the pcie memory map for LS2088A series SoCs */ 171 svr = (svr >> SVR_VAR_PER_SHIFT) & 0xFFFFFE; 172 if (svr == SVR_LS2088A || svr == SVR_LS2084A || 173 svr == SVR_LS2048A || svr == SVR_LS2044A) { 174 if (io) 175 io->phys_start = (io->phys_start & 176 (PCIE_PHYS_SIZE - 1)) + 177 LS2088A_PCIE1_PHYS_ADDR + 178 LS2088A_PCIE_PHYS_SIZE * pcie->idx; 179 if (mem) 180 mem->phys_start = (mem->phys_start & 181 (PCIE_PHYS_SIZE - 1)) + 182 LS2088A_PCIE1_PHYS_ADDR + 183 LS2088A_PCIE_PHYS_SIZE * pcie->idx; 184 if (pref) 185 pref->phys_start = (pref->phys_start & 186 (PCIE_PHYS_SIZE - 1)) + 187 LS2088A_PCIE1_PHYS_ADDR + 188 LS2088A_PCIE_PHYS_SIZE * pcie->idx; 189 } 190 191 if (io) 192 /* ATU : OUTBOUND : IO */ 193 ls_pcie_atu_outbound_set(pcie, idx++, 194 PCIE_ATU_TYPE_IO, 195 io->phys_start + offset, 196 io->bus_start, 197 io->size); 198 199 if (mem) 200 /* ATU : OUTBOUND : MEM */ 201 ls_pcie_atu_outbound_set(pcie, idx++, 202 PCIE_ATU_TYPE_MEM, 203 mem->phys_start + offset, 204 mem->bus_start, 205 mem->size); 206 207 if (pref) 208 /* ATU : OUTBOUND : pref */ 209 ls_pcie_atu_outbound_set(pcie, idx++, 210 PCIE_ATU_TYPE_MEM, 211 pref->phys_start + offset, 212 pref->bus_start, 213 pref->size); 214 215 ls_pcie_dump_atu(pcie); 216 } 217 218 /* Return 0 if the address is valid, -errno if not valid */ 219 static int ls_pcie_addr_valid(struct ls_pcie *pcie, pci_dev_t bdf) 220 { 221 struct udevice *bus = pcie->bus; 222 223 if (!pcie->enabled) 224 return -ENXIO; 225 226 if (PCI_BUS(bdf) < bus->seq) 227 return -EINVAL; 228 229 if ((PCI_BUS(bdf) > bus->seq) && (!ls_pcie_link_up(pcie))) 230 return -EINVAL; 231 232 if (PCI_BUS(bdf) <= (bus->seq + 1) && (PCI_DEV(bdf) > 0)) 233 return -EINVAL; 234 235 return 0; 236 } 237 238 void *ls_pcie_conf_address(struct ls_pcie *pcie, pci_dev_t bdf, 239 int offset) 240 { 241 struct udevice *bus = pcie->bus; 242 u32 busdev; 243 244 if (PCI_BUS(bdf) == bus->seq) 245 return pcie->dbi + offset; 246 247 busdev = PCIE_ATU_BUS(PCI_BUS(bdf)) | 248 PCIE_ATU_DEV(PCI_DEV(bdf)) | 249 PCIE_ATU_FUNC(PCI_FUNC(bdf)); 250 251 if (PCI_BUS(bdf) == bus->seq + 1) { 252 ls_pcie_cfg0_set_busdev(pcie, busdev); 253 return pcie->cfg0 + offset; 254 } else { 255 ls_pcie_cfg1_set_busdev(pcie, busdev); 256 return pcie->cfg1 + offset; 257 } 258 } 259 260 static int ls_pcie_read_config(struct udevice *bus, pci_dev_t bdf, 261 uint offset, ulong *valuep, 262 enum pci_size_t size) 263 { 264 struct ls_pcie *pcie = dev_get_priv(bus); 265 void *address; 266 267 if (ls_pcie_addr_valid(pcie, bdf)) { 268 *valuep = pci_get_ff(size); 269 return 0; 270 } 271 272 address = ls_pcie_conf_address(pcie, bdf, offset); 273 274 switch (size) { 275 case PCI_SIZE_8: 276 *valuep = readb(address); 277 return 0; 278 case PCI_SIZE_16: 279 *valuep = readw(address); 280 return 0; 281 case PCI_SIZE_32: 282 *valuep = readl(address); 283 return 0; 284 default: 285 return -EINVAL; 286 } 287 } 288 289 static int ls_pcie_write_config(struct udevice *bus, pci_dev_t bdf, 290 uint offset, ulong value, 291 enum pci_size_t size) 292 { 293 struct ls_pcie *pcie = dev_get_priv(bus); 294 void *address; 295 296 if (ls_pcie_addr_valid(pcie, bdf)) 297 return 0; 298 299 address = ls_pcie_conf_address(pcie, bdf, offset); 300 301 switch (size) { 302 case PCI_SIZE_8: 303 writeb(value, address); 304 return 0; 305 case PCI_SIZE_16: 306 writew(value, address); 307 return 0; 308 case PCI_SIZE_32: 309 writel(value, address); 310 return 0; 311 default: 312 return -EINVAL; 313 } 314 } 315 316 /* Clear multi-function bit */ 317 static void ls_pcie_clear_multifunction(struct ls_pcie *pcie) 318 { 319 writeb(PCI_HEADER_TYPE_BRIDGE, pcie->dbi + PCI_HEADER_TYPE); 320 } 321 322 /* Fix class value */ 323 static void ls_pcie_fix_class(struct ls_pcie *pcie) 324 { 325 writew(PCI_CLASS_BRIDGE_PCI, pcie->dbi + PCI_CLASS_DEVICE); 326 } 327 328 /* Drop MSG TLP except for Vendor MSG */ 329 static void ls_pcie_drop_msg_tlp(struct ls_pcie *pcie) 330 { 331 u32 val; 332 333 val = dbi_readl(pcie, PCIE_STRFMR1); 334 val &= 0xDFFFFFFF; 335 dbi_writel(pcie, val, PCIE_STRFMR1); 336 } 337 338 /* Disable all bars in RC mode */ 339 static void ls_pcie_disable_bars(struct ls_pcie *pcie) 340 { 341 u32 sriov; 342 343 sriov = in_le32(pcie->dbi + PCIE_SRIOV); 344 345 /* 346 * TODO: For PCIe controller with SRIOV, the method to disable bars 347 * is different and more complex, so will add later. 348 */ 349 if (PCI_EXT_CAP_ID(sriov) == PCI_EXT_CAP_ID_SRIOV) 350 return; 351 352 dbi_writel(pcie, 0, PCIE_CS2_OFFSET + PCI_BASE_ADDRESS_0); 353 dbi_writel(pcie, 0, PCIE_CS2_OFFSET + PCI_BASE_ADDRESS_1); 354 dbi_writel(pcie, 0, PCIE_CS2_OFFSET + PCI_ROM_ADDRESS1); 355 } 356 357 static void ls_pcie_setup_ctrl(struct ls_pcie *pcie) 358 { 359 ls_pcie_setup_atu(pcie); 360 361 dbi_writel(pcie, 1, PCIE_DBI_RO_WR_EN); 362 ls_pcie_fix_class(pcie); 363 ls_pcie_clear_multifunction(pcie); 364 ls_pcie_drop_msg_tlp(pcie); 365 dbi_writel(pcie, 0, PCIE_DBI_RO_WR_EN); 366 367 ls_pcie_disable_bars(pcie); 368 } 369 370 static void ls_pcie_ep_setup_atu(struct ls_pcie *pcie) 371 { 372 u64 phys = CONFIG_SYS_PCI_EP_MEMORY_BASE; 373 374 /* ATU 0 : INBOUND : map BAR0 */ 375 ls_pcie_atu_inbound_set(pcie, 0, 0, phys); 376 /* ATU 1 : INBOUND : map BAR1 */ 377 phys += PCIE_BAR1_SIZE; 378 ls_pcie_atu_inbound_set(pcie, 1, 1, phys); 379 /* ATU 2 : INBOUND : map BAR2 */ 380 phys += PCIE_BAR2_SIZE; 381 ls_pcie_atu_inbound_set(pcie, 2, 2, phys); 382 /* ATU 3 : INBOUND : map BAR4 */ 383 phys = CONFIG_SYS_PCI_EP_MEMORY_BASE + PCIE_BAR4_SIZE; 384 ls_pcie_atu_inbound_set(pcie, 3, 4, phys); 385 386 /* ATU 0 : OUTBOUND : map MEM */ 387 ls_pcie_atu_outbound_set(pcie, 0, 388 PCIE_ATU_TYPE_MEM, 389 pcie->cfg_res.start, 390 0, 391 CONFIG_SYS_PCI_MEMORY_SIZE); 392 } 393 394 /* BAR0 and BAR1 are 32bit BAR2 and BAR4 are 64bit */ 395 static void ls_pcie_ep_setup_bar(void *bar_base, int bar, u32 size) 396 { 397 /* The least inbound window is 4KiB */ 398 if (size < 4 * 1024) 399 return; 400 401 switch (bar) { 402 case 0: 403 writel(size - 1, bar_base + PCI_BASE_ADDRESS_0); 404 break; 405 case 1: 406 writel(size - 1, bar_base + PCI_BASE_ADDRESS_1); 407 break; 408 case 2: 409 writel(size - 1, bar_base + PCI_BASE_ADDRESS_2); 410 writel(0, bar_base + PCI_BASE_ADDRESS_3); 411 break; 412 case 4: 413 writel(size - 1, bar_base + PCI_BASE_ADDRESS_4); 414 writel(0, bar_base + PCI_BASE_ADDRESS_5); 415 break; 416 default: 417 break; 418 } 419 } 420 421 static void ls_pcie_ep_setup_bars(void *bar_base) 422 { 423 /* BAR0 - 32bit - 4K configuration */ 424 ls_pcie_ep_setup_bar(bar_base, 0, PCIE_BAR0_SIZE); 425 /* BAR1 - 32bit - 8K MSIX*/ 426 ls_pcie_ep_setup_bar(bar_base, 1, PCIE_BAR1_SIZE); 427 /* BAR2 - 64bit - 4K MEM desciptor */ 428 ls_pcie_ep_setup_bar(bar_base, 2, PCIE_BAR2_SIZE); 429 /* BAR4 - 64bit - 1M MEM*/ 430 ls_pcie_ep_setup_bar(bar_base, 4, PCIE_BAR4_SIZE); 431 } 432 433 static void ls_pcie_ep_enable_cfg(struct ls_pcie *pcie) 434 { 435 ctrl_writel(pcie, PCIE_CONFIG_READY, PCIE_PF_CONFIG); 436 } 437 438 static void ls_pcie_setup_ep(struct ls_pcie *pcie) 439 { 440 u32 sriov; 441 442 sriov = readl(pcie->dbi + PCIE_SRIOV); 443 if (PCI_EXT_CAP_ID(sriov) == PCI_EXT_CAP_ID_SRIOV) { 444 int pf, vf; 445 446 for (pf = 0; pf < PCIE_PF_NUM; pf++) { 447 for (vf = 0; vf <= PCIE_VF_NUM; vf++) { 448 ctrl_writel(pcie, PCIE_LCTRL0_VAL(pf, vf), 449 PCIE_PF_VF_CTRL); 450 451 ls_pcie_ep_setup_bars(pcie->dbi); 452 ls_pcie_ep_setup_atu(pcie); 453 } 454 } 455 /* Disable CFG2 */ 456 ctrl_writel(pcie, 0, PCIE_PF_VF_CTRL); 457 } else { 458 ls_pcie_ep_setup_bars(pcie->dbi + PCIE_NO_SRIOV_BAR_BASE); 459 ls_pcie_ep_setup_atu(pcie); 460 } 461 462 ls_pcie_ep_enable_cfg(pcie); 463 } 464 465 static int ls_pcie_probe(struct udevice *dev) 466 { 467 struct ls_pcie *pcie = dev_get_priv(dev); 468 const void *fdt = gd->fdt_blob; 469 int node = dev_of_offset(dev); 470 u8 header_type; 471 u16 link_sta; 472 bool ep_mode; 473 uint svr; 474 int ret; 475 476 pcie->bus = dev; 477 478 ret = fdt_get_named_resource(fdt, node, "reg", "reg-names", 479 "dbi", &pcie->dbi_res); 480 if (ret) { 481 printf("ls-pcie: resource \"dbi\" not found\n"); 482 return ret; 483 } 484 485 pcie->idx = (pcie->dbi_res.start - PCIE_SYS_BASE_ADDR) / PCIE_CCSR_SIZE; 486 487 list_add(&pcie->list, &ls_pcie_list); 488 489 pcie->enabled = is_serdes_configured(PCIE_SRDS_PRTCL(pcie->idx)); 490 if (!pcie->enabled) { 491 printf("PCIe%d: %s disabled\n", pcie->idx, dev->name); 492 return 0; 493 } 494 495 pcie->dbi = map_physmem(pcie->dbi_res.start, 496 fdt_resource_size(&pcie->dbi_res), 497 MAP_NOCACHE); 498 499 ret = fdt_get_named_resource(fdt, node, "reg", "reg-names", 500 "lut", &pcie->lut_res); 501 if (!ret) 502 pcie->lut = map_physmem(pcie->lut_res.start, 503 fdt_resource_size(&pcie->lut_res), 504 MAP_NOCACHE); 505 506 ret = fdt_get_named_resource(fdt, node, "reg", "reg-names", 507 "ctrl", &pcie->ctrl_res); 508 if (!ret) 509 pcie->ctrl = map_physmem(pcie->ctrl_res.start, 510 fdt_resource_size(&pcie->ctrl_res), 511 MAP_NOCACHE); 512 if (!pcie->ctrl) 513 pcie->ctrl = pcie->lut; 514 515 if (!pcie->ctrl) { 516 printf("%s: NOT find CTRL\n", dev->name); 517 return -1; 518 } 519 520 ret = fdt_get_named_resource(fdt, node, "reg", "reg-names", 521 "config", &pcie->cfg_res); 522 if (ret) { 523 printf("%s: resource \"config\" not found\n", dev->name); 524 return ret; 525 } 526 527 /* 528 * Fix the pcie memory map address and PF control registers address 529 * for LS2088A series SoCs 530 */ 531 svr = get_svr(); 532 svr = (svr >> SVR_VAR_PER_SHIFT) & 0xFFFFFE; 533 if (svr == SVR_LS2088A || svr == SVR_LS2084A || 534 svr == SVR_LS2048A || svr == SVR_LS2044A) { 535 pcie->cfg_res.start = LS2088A_PCIE1_PHYS_ADDR + 536 LS2088A_PCIE_PHYS_SIZE * pcie->idx; 537 pcie->ctrl = pcie->lut + 0x40000; 538 } 539 540 pcie->cfg0 = map_physmem(pcie->cfg_res.start, 541 fdt_resource_size(&pcie->cfg_res), 542 MAP_NOCACHE); 543 pcie->cfg1 = pcie->cfg0 + fdt_resource_size(&pcie->cfg_res) / 2; 544 545 pcie->big_endian = fdtdec_get_bool(fdt, node, "big-endian"); 546 547 debug("%s dbi:%lx lut:%lx ctrl:0x%lx cfg0:0x%lx, big-endian:%d\n", 548 dev->name, (unsigned long)pcie->dbi, (unsigned long)pcie->lut, 549 (unsigned long)pcie->ctrl, (unsigned long)pcie->cfg0, 550 pcie->big_endian); 551 552 header_type = readb(pcie->dbi + PCI_HEADER_TYPE); 553 ep_mode = (header_type & 0x7f) == PCI_HEADER_TYPE_NORMAL; 554 printf("PCIe%u: %s %s", pcie->idx, dev->name, 555 ep_mode ? "Endpoint" : "Root Complex"); 556 557 if (ep_mode) 558 ls_pcie_setup_ep(pcie); 559 else 560 ls_pcie_setup_ctrl(pcie); 561 562 if (!ls_pcie_link_up(pcie)) { 563 /* Let the user know there's no PCIe link */ 564 printf(": no link\n"); 565 return 0; 566 } 567 568 /* Print the negotiated PCIe link width */ 569 link_sta = readw(pcie->dbi + PCIE_LINK_STA); 570 printf(": x%d gen%d\n", (link_sta & PCIE_LINK_WIDTH_MASK) >> 4, 571 link_sta & PCIE_LINK_SPEED_MASK); 572 573 return 0; 574 } 575 576 static const struct dm_pci_ops ls_pcie_ops = { 577 .read_config = ls_pcie_read_config, 578 .write_config = ls_pcie_write_config, 579 }; 580 581 static const struct udevice_id ls_pcie_ids[] = { 582 { .compatible = "fsl,ls-pcie" }, 583 { } 584 }; 585 586 U_BOOT_DRIVER(pci_layerscape) = { 587 .name = "pci_layerscape", 588 .id = UCLASS_PCI, 589 .of_match = ls_pcie_ids, 590 .ops = &ls_pcie_ops, 591 .probe = ls_pcie_probe, 592 .priv_auto_alloc_size = sizeof(struct ls_pcie), 593 }; 594