1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * PCIe host controller driver for Freescale i.MX6 SoCs 4 * 5 * Copyright (C) 2013 Kosagi 6 * http://www.kosagi.com 7 * 8 * Author: Sean Cross <xobs@kosagi.com> 9 */ 10 11 #include <linux/clk.h> 12 #include <linux/delay.h> 13 #include <linux/gpio.h> 14 #include <linux/kernel.h> 15 #include <linux/mfd/syscon.h> 16 #include <linux/mfd/syscon/imx6q-iomuxc-gpr.h> 17 #include <linux/mfd/syscon/imx7-iomuxc-gpr.h> 18 #include <linux/module.h> 19 #include <linux/of_gpio.h> 20 #include <linux/of_device.h> 21 #include <linux/pci.h> 22 #include <linux/platform_device.h> 23 #include <linux/regmap.h> 24 #include <linux/regulator/consumer.h> 25 #include <linux/resource.h> 26 #include <linux/signal.h> 27 #include <linux/types.h> 28 #include <linux/interrupt.h> 29 #include <linux/reset.h> 30 31 #include "pcie-designware.h" 32 33 #define to_imx6_pcie(x) dev_get_drvdata((x)->dev) 34 35 enum imx6_pcie_variants { 36 IMX6Q, 37 IMX6SX, 38 IMX6QP, 39 IMX7D, 40 }; 41 42 struct imx6_pcie { 43 struct dw_pcie *pci; 44 int reset_gpio; 45 bool gpio_active_high; 46 struct clk *pcie_bus; 47 struct clk *pcie_phy; 48 struct clk *pcie_inbound_axi; 49 struct clk *pcie; 50 struct regmap *iomuxc_gpr; 51 struct reset_control *pciephy_reset; 52 struct reset_control *apps_reset; 53 struct reset_control *turnoff_reset; 54 enum imx6_pcie_variants variant; 55 u32 tx_deemph_gen1; 56 u32 tx_deemph_gen2_3p5db; 57 u32 tx_deemph_gen2_6db; 58 u32 tx_swing_full; 59 u32 tx_swing_low; 60 int link_gen; 61 struct regulator *vpcie; 62 }; 63 64 /* Parameters for the waiting for PCIe PHY PLL to lock on i.MX7 */ 65 #define PHY_PLL_LOCK_WAIT_MAX_RETRIES 2000 66 #define PHY_PLL_LOCK_WAIT_USLEEP_MIN 50 67 #define PHY_PLL_LOCK_WAIT_USLEEP_MAX 200 68 69 /* PCIe Root Complex registers (memory-mapped) */ 70 #define PCIE_RC_LCR 0x7c 71 #define PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN1 0x1 72 #define PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2 0x2 73 #define PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK 0xf 74 75 #define PCIE_RC_LCSR 0x80 76 77 /* PCIe Port Logic registers (memory-mapped) */ 78 #define PL_OFFSET 0x700 79 #define PCIE_PL_PFLR (PL_OFFSET + 0x08) 80 #define PCIE_PL_PFLR_LINK_STATE_MASK (0x3f << 16) 81 #define PCIE_PL_PFLR_FORCE_LINK (1 << 15) 82 #define PCIE_PHY_DEBUG_R0 (PL_OFFSET + 0x28) 83 #define PCIE_PHY_DEBUG_R1 (PL_OFFSET + 0x2c) 84 #define PCIE_PHY_DEBUG_R1_XMLH_LINK_IN_TRAINING (1 << 29) 85 #define PCIE_PHY_DEBUG_R1_XMLH_LINK_UP (1 << 4) 86 87 #define PCIE_PHY_CTRL (PL_OFFSET + 0x114) 88 #define PCIE_PHY_CTRL_DATA_LOC 0 89 #define PCIE_PHY_CTRL_CAP_ADR_LOC 16 90 #define PCIE_PHY_CTRL_CAP_DAT_LOC 17 91 #define PCIE_PHY_CTRL_WR_LOC 18 92 #define PCIE_PHY_CTRL_RD_LOC 19 93 94 #define PCIE_PHY_STAT (PL_OFFSET + 0x110) 95 #define PCIE_PHY_STAT_ACK_LOC 16 96 97 #define PCIE_LINK_WIDTH_SPEED_CONTROL 0x80C 98 #define PORT_LOGIC_SPEED_CHANGE (0x1 << 17) 99 100 /* PHY registers (not memory-mapped) */ 101 #define PCIE_PHY_ATEOVRD 0x10 102 #define PCIE_PHY_ATEOVRD_EN (0x1 << 2) 103 #define PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT 0 104 #define PCIE_PHY_ATEOVRD_REF_CLKDIV_MASK 0x1 105 106 #define PCIE_PHY_MPLL_OVRD_IN_LO 0x11 107 #define PCIE_PHY_MPLL_MULTIPLIER_SHIFT 2 108 #define PCIE_PHY_MPLL_MULTIPLIER_MASK 0x7f 109 #define PCIE_PHY_MPLL_MULTIPLIER_OVRD (0x1 << 9) 110 111 #define PCIE_PHY_RX_ASIC_OUT 0x100D 112 #define PCIE_PHY_RX_ASIC_OUT_VALID (1 << 0) 113 114 #define PHY_RX_OVRD_IN_LO 0x1005 115 #define PHY_RX_OVRD_IN_LO_RX_DATA_EN (1 << 5) 116 #define PHY_RX_OVRD_IN_LO_RX_PLL_EN (1 << 3) 117 118 static int pcie_phy_poll_ack(struct imx6_pcie *imx6_pcie, int exp_val) 119 { 120 struct dw_pcie *pci = imx6_pcie->pci; 121 u32 val; 122 u32 max_iterations = 10; 123 u32 wait_counter = 0; 124 125 do { 126 val = dw_pcie_readl_dbi(pci, PCIE_PHY_STAT); 127 val = (val >> PCIE_PHY_STAT_ACK_LOC) & 0x1; 128 wait_counter++; 129 130 if (val == exp_val) 131 return 0; 132 133 udelay(1); 134 } while (wait_counter < max_iterations); 135 136 return -ETIMEDOUT; 137 } 138 139 static int pcie_phy_wait_ack(struct imx6_pcie *imx6_pcie, int addr) 140 { 141 struct dw_pcie *pci = imx6_pcie->pci; 142 u32 val; 143 int ret; 144 145 val = addr << PCIE_PHY_CTRL_DATA_LOC; 146 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val); 147 148 val |= (0x1 << PCIE_PHY_CTRL_CAP_ADR_LOC); 149 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val); 150 151 ret = pcie_phy_poll_ack(imx6_pcie, 1); 152 if (ret) 153 return ret; 154 155 val = addr << PCIE_PHY_CTRL_DATA_LOC; 156 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val); 157 158 return pcie_phy_poll_ack(imx6_pcie, 0); 159 } 160 161 /* Read from the 16-bit PCIe PHY control registers (not memory-mapped) */ 162 static int pcie_phy_read(struct imx6_pcie *imx6_pcie, int addr, int *data) 163 { 164 struct dw_pcie *pci = imx6_pcie->pci; 165 u32 val, phy_ctl; 166 int ret; 167 168 ret = pcie_phy_wait_ack(imx6_pcie, addr); 169 if (ret) 170 return ret; 171 172 /* assert Read signal */ 173 phy_ctl = 0x1 << PCIE_PHY_CTRL_RD_LOC; 174 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, phy_ctl); 175 176 ret = pcie_phy_poll_ack(imx6_pcie, 1); 177 if (ret) 178 return ret; 179 180 val = dw_pcie_readl_dbi(pci, PCIE_PHY_STAT); 181 *data = val & 0xffff; 182 183 /* deassert Read signal */ 184 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, 0x00); 185 186 return pcie_phy_poll_ack(imx6_pcie, 0); 187 } 188 189 static int pcie_phy_write(struct imx6_pcie *imx6_pcie, int addr, int data) 190 { 191 struct dw_pcie *pci = imx6_pcie->pci; 192 u32 var; 193 int ret; 194 195 /* write addr */ 196 /* cap addr */ 197 ret = pcie_phy_wait_ack(imx6_pcie, addr); 198 if (ret) 199 return ret; 200 201 var = data << PCIE_PHY_CTRL_DATA_LOC; 202 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var); 203 204 /* capture data */ 205 var |= (0x1 << PCIE_PHY_CTRL_CAP_DAT_LOC); 206 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var); 207 208 ret = pcie_phy_poll_ack(imx6_pcie, 1); 209 if (ret) 210 return ret; 211 212 /* deassert cap data */ 213 var = data << PCIE_PHY_CTRL_DATA_LOC; 214 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var); 215 216 /* wait for ack de-assertion */ 217 ret = pcie_phy_poll_ack(imx6_pcie, 0); 218 if (ret) 219 return ret; 220 221 /* assert wr signal */ 222 var = 0x1 << PCIE_PHY_CTRL_WR_LOC; 223 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var); 224 225 /* wait for ack */ 226 ret = pcie_phy_poll_ack(imx6_pcie, 1); 227 if (ret) 228 return ret; 229 230 /* deassert wr signal */ 231 var = data << PCIE_PHY_CTRL_DATA_LOC; 232 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var); 233 234 /* wait for ack de-assertion */ 235 ret = pcie_phy_poll_ack(imx6_pcie, 0); 236 if (ret) 237 return ret; 238 239 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, 0x0); 240 241 return 0; 242 } 243 244 static void imx6_pcie_reset_phy(struct imx6_pcie *imx6_pcie) 245 { 246 u32 tmp; 247 248 pcie_phy_read(imx6_pcie, PHY_RX_OVRD_IN_LO, &tmp); 249 tmp |= (PHY_RX_OVRD_IN_LO_RX_DATA_EN | 250 PHY_RX_OVRD_IN_LO_RX_PLL_EN); 251 pcie_phy_write(imx6_pcie, PHY_RX_OVRD_IN_LO, tmp); 252 253 usleep_range(2000, 3000); 254 255 pcie_phy_read(imx6_pcie, PHY_RX_OVRD_IN_LO, &tmp); 256 tmp &= ~(PHY_RX_OVRD_IN_LO_RX_DATA_EN | 257 PHY_RX_OVRD_IN_LO_RX_PLL_EN); 258 pcie_phy_write(imx6_pcie, PHY_RX_OVRD_IN_LO, tmp); 259 } 260 261 /* Added for PCI abort handling */ 262 static int imx6q_pcie_abort_handler(unsigned long addr, 263 unsigned int fsr, struct pt_regs *regs) 264 { 265 unsigned long pc = instruction_pointer(regs); 266 unsigned long instr = *(unsigned long *)pc; 267 int reg = (instr >> 12) & 15; 268 269 /* 270 * If the instruction being executed was a read, 271 * make it look like it read all-ones. 272 */ 273 if ((instr & 0x0c100000) == 0x04100000) { 274 unsigned long val; 275 276 if (instr & 0x00400000) 277 val = 255; 278 else 279 val = -1; 280 281 regs->uregs[reg] = val; 282 regs->ARM_pc += 4; 283 return 0; 284 } 285 286 if ((instr & 0x0e100090) == 0x00100090) { 287 regs->uregs[reg] = -1; 288 regs->ARM_pc += 4; 289 return 0; 290 } 291 292 return 1; 293 } 294 295 static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie) 296 { 297 struct device *dev = imx6_pcie->pci->dev; 298 299 switch (imx6_pcie->variant) { 300 case IMX7D: 301 reset_control_assert(imx6_pcie->pciephy_reset); 302 reset_control_assert(imx6_pcie->apps_reset); 303 break; 304 case IMX6SX: 305 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, 306 IMX6SX_GPR12_PCIE_TEST_POWERDOWN, 307 IMX6SX_GPR12_PCIE_TEST_POWERDOWN); 308 /* Force PCIe PHY reset */ 309 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR5, 310 IMX6SX_GPR5_PCIE_BTNRST_RESET, 311 IMX6SX_GPR5_PCIE_BTNRST_RESET); 312 break; 313 case IMX6QP: 314 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, 315 IMX6Q_GPR1_PCIE_SW_RST, 316 IMX6Q_GPR1_PCIE_SW_RST); 317 break; 318 case IMX6Q: 319 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, 320 IMX6Q_GPR1_PCIE_TEST_PD, 1 << 18); 321 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, 322 IMX6Q_GPR1_PCIE_REF_CLK_EN, 0 << 16); 323 break; 324 } 325 326 if (imx6_pcie->vpcie && regulator_is_enabled(imx6_pcie->vpcie) > 0) { 327 int ret = regulator_disable(imx6_pcie->vpcie); 328 329 if (ret) 330 dev_err(dev, "failed to disable vpcie regulator: %d\n", 331 ret); 332 } 333 } 334 335 static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie) 336 { 337 struct dw_pcie *pci = imx6_pcie->pci; 338 struct device *dev = pci->dev; 339 int ret = 0; 340 341 switch (imx6_pcie->variant) { 342 case IMX6SX: 343 ret = clk_prepare_enable(imx6_pcie->pcie_inbound_axi); 344 if (ret) { 345 dev_err(dev, "unable to enable pcie_axi clock\n"); 346 break; 347 } 348 349 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, 350 IMX6SX_GPR12_PCIE_TEST_POWERDOWN, 0); 351 break; 352 case IMX6QP: /* FALLTHROUGH */ 353 case IMX6Q: 354 /* power up core phy and enable ref clock */ 355 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, 356 IMX6Q_GPR1_PCIE_TEST_PD, 0 << 18); 357 /* 358 * the async reset input need ref clock to sync internally, 359 * when the ref clock comes after reset, internal synced 360 * reset time is too short, cannot meet the requirement. 361 * add one ~10us delay here. 362 */ 363 udelay(10); 364 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, 365 IMX6Q_GPR1_PCIE_REF_CLK_EN, 1 << 16); 366 break; 367 case IMX7D: 368 break; 369 } 370 371 return ret; 372 } 373 374 static void imx7d_pcie_wait_for_phy_pll_lock(struct imx6_pcie *imx6_pcie) 375 { 376 u32 val; 377 unsigned int retries; 378 struct device *dev = imx6_pcie->pci->dev; 379 380 for (retries = 0; retries < PHY_PLL_LOCK_WAIT_MAX_RETRIES; retries++) { 381 regmap_read(imx6_pcie->iomuxc_gpr, IOMUXC_GPR22, &val); 382 383 if (val & IMX7D_GPR22_PCIE_PHY_PLL_LOCKED) 384 return; 385 386 usleep_range(PHY_PLL_LOCK_WAIT_USLEEP_MIN, 387 PHY_PLL_LOCK_WAIT_USLEEP_MAX); 388 } 389 390 dev_err(dev, "PCIe PLL lock timeout\n"); 391 } 392 393 static void imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie) 394 { 395 struct dw_pcie *pci = imx6_pcie->pci; 396 struct device *dev = pci->dev; 397 int ret; 398 399 if (imx6_pcie->vpcie && !regulator_is_enabled(imx6_pcie->vpcie)) { 400 ret = regulator_enable(imx6_pcie->vpcie); 401 if (ret) { 402 dev_err(dev, "failed to enable vpcie regulator: %d\n", 403 ret); 404 return; 405 } 406 } 407 408 ret = clk_prepare_enable(imx6_pcie->pcie_phy); 409 if (ret) { 410 dev_err(dev, "unable to enable pcie_phy clock\n"); 411 goto err_pcie_phy; 412 } 413 414 ret = clk_prepare_enable(imx6_pcie->pcie_bus); 415 if (ret) { 416 dev_err(dev, "unable to enable pcie_bus clock\n"); 417 goto err_pcie_bus; 418 } 419 420 ret = clk_prepare_enable(imx6_pcie->pcie); 421 if (ret) { 422 dev_err(dev, "unable to enable pcie clock\n"); 423 goto err_pcie; 424 } 425 426 ret = imx6_pcie_enable_ref_clk(imx6_pcie); 427 if (ret) { 428 dev_err(dev, "unable to enable pcie ref clock\n"); 429 goto err_ref_clk; 430 } 431 432 /* allow the clocks to stabilize */ 433 usleep_range(200, 500); 434 435 /* Some boards don't have PCIe reset GPIO. */ 436 if (gpio_is_valid(imx6_pcie->reset_gpio)) { 437 gpio_set_value_cansleep(imx6_pcie->reset_gpio, 438 imx6_pcie->gpio_active_high); 439 msleep(100); 440 gpio_set_value_cansleep(imx6_pcie->reset_gpio, 441 !imx6_pcie->gpio_active_high); 442 } 443 444 switch (imx6_pcie->variant) { 445 case IMX7D: 446 reset_control_deassert(imx6_pcie->pciephy_reset); 447 imx7d_pcie_wait_for_phy_pll_lock(imx6_pcie); 448 break; 449 case IMX6SX: 450 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR5, 451 IMX6SX_GPR5_PCIE_BTNRST_RESET, 0); 452 break; 453 case IMX6QP: 454 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, 455 IMX6Q_GPR1_PCIE_SW_RST, 0); 456 457 usleep_range(200, 500); 458 break; 459 case IMX6Q: /* Nothing to do */ 460 break; 461 } 462 463 return; 464 465 err_ref_clk: 466 clk_disable_unprepare(imx6_pcie->pcie); 467 err_pcie: 468 clk_disable_unprepare(imx6_pcie->pcie_bus); 469 err_pcie_bus: 470 clk_disable_unprepare(imx6_pcie->pcie_phy); 471 err_pcie_phy: 472 if (imx6_pcie->vpcie && regulator_is_enabled(imx6_pcie->vpcie) > 0) { 473 ret = regulator_disable(imx6_pcie->vpcie); 474 if (ret) 475 dev_err(dev, "failed to disable vpcie regulator: %d\n", 476 ret); 477 } 478 } 479 480 static void imx6_pcie_init_phy(struct imx6_pcie *imx6_pcie) 481 { 482 switch (imx6_pcie->variant) { 483 case IMX7D: 484 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, 485 IMX7D_GPR12_PCIE_PHY_REFCLK_SEL, 0); 486 break; 487 case IMX6SX: 488 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, 489 IMX6SX_GPR12_PCIE_RX_EQ_MASK, 490 IMX6SX_GPR12_PCIE_RX_EQ_2); 491 /* FALLTHROUGH */ 492 default: 493 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, 494 IMX6Q_GPR12_PCIE_CTL_2, 0 << 10); 495 496 /* configure constant input signal to the pcie ctrl and phy */ 497 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, 498 IMX6Q_GPR12_LOS_LEVEL, 9 << 4); 499 500 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8, 501 IMX6Q_GPR8_TX_DEEMPH_GEN1, 502 imx6_pcie->tx_deemph_gen1 << 0); 503 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8, 504 IMX6Q_GPR8_TX_DEEMPH_GEN2_3P5DB, 505 imx6_pcie->tx_deemph_gen2_3p5db << 6); 506 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8, 507 IMX6Q_GPR8_TX_DEEMPH_GEN2_6DB, 508 imx6_pcie->tx_deemph_gen2_6db << 12); 509 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8, 510 IMX6Q_GPR8_TX_SWING_FULL, 511 imx6_pcie->tx_swing_full << 18); 512 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8, 513 IMX6Q_GPR8_TX_SWING_LOW, 514 imx6_pcie->tx_swing_low << 25); 515 break; 516 } 517 518 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, 519 IMX6Q_GPR12_DEVICE_TYPE, PCI_EXP_TYPE_ROOT_PORT << 12); 520 } 521 522 static int imx6_setup_phy_mpll(struct imx6_pcie *imx6_pcie) 523 { 524 unsigned long phy_rate = clk_get_rate(imx6_pcie->pcie_phy); 525 int mult, div; 526 u32 val; 527 528 switch (phy_rate) { 529 case 125000000: 530 /* 531 * The default settings of the MPLL are for a 125MHz input 532 * clock, so no need to reconfigure anything in that case. 533 */ 534 return 0; 535 case 100000000: 536 mult = 25; 537 div = 0; 538 break; 539 case 200000000: 540 mult = 25; 541 div = 1; 542 break; 543 default: 544 dev_err(imx6_pcie->pci->dev, 545 "Unsupported PHY reference clock rate %lu\n", phy_rate); 546 return -EINVAL; 547 } 548 549 pcie_phy_read(imx6_pcie, PCIE_PHY_MPLL_OVRD_IN_LO, &val); 550 val &= ~(PCIE_PHY_MPLL_MULTIPLIER_MASK << 551 PCIE_PHY_MPLL_MULTIPLIER_SHIFT); 552 val |= mult << PCIE_PHY_MPLL_MULTIPLIER_SHIFT; 553 val |= PCIE_PHY_MPLL_MULTIPLIER_OVRD; 554 pcie_phy_write(imx6_pcie, PCIE_PHY_MPLL_OVRD_IN_LO, val); 555 556 pcie_phy_read(imx6_pcie, PCIE_PHY_ATEOVRD, &val); 557 val &= ~(PCIE_PHY_ATEOVRD_REF_CLKDIV_MASK << 558 PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT); 559 val |= div << PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT; 560 val |= PCIE_PHY_ATEOVRD_EN; 561 pcie_phy_write(imx6_pcie, PCIE_PHY_ATEOVRD, val); 562 563 return 0; 564 } 565 566 static int imx6_pcie_wait_for_link(struct imx6_pcie *imx6_pcie) 567 { 568 struct dw_pcie *pci = imx6_pcie->pci; 569 struct device *dev = pci->dev; 570 571 /* check if the link is up or not */ 572 if (!dw_pcie_wait_for_link(pci)) 573 return 0; 574 575 dev_dbg(dev, "DEBUG_R0: 0x%08x, DEBUG_R1: 0x%08x\n", 576 dw_pcie_readl_dbi(pci, PCIE_PHY_DEBUG_R0), 577 dw_pcie_readl_dbi(pci, PCIE_PHY_DEBUG_R1)); 578 return -ETIMEDOUT; 579 } 580 581 static int imx6_pcie_wait_for_speed_change(struct imx6_pcie *imx6_pcie) 582 { 583 struct dw_pcie *pci = imx6_pcie->pci; 584 struct device *dev = pci->dev; 585 u32 tmp; 586 unsigned int retries; 587 588 for (retries = 0; retries < 200; retries++) { 589 tmp = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL); 590 /* Test if the speed change finished. */ 591 if (!(tmp & PORT_LOGIC_SPEED_CHANGE)) 592 return 0; 593 usleep_range(100, 1000); 594 } 595 596 dev_err(dev, "Speed change timeout\n"); 597 return -EINVAL; 598 } 599 600 static void imx6_pcie_ltssm_enable(struct device *dev) 601 { 602 struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev); 603 604 switch (imx6_pcie->variant) { 605 case IMX6Q: 606 case IMX6SX: 607 case IMX6QP: 608 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, 609 IMX6Q_GPR12_PCIE_CTL_2, 610 IMX6Q_GPR12_PCIE_CTL_2); 611 break; 612 case IMX7D: 613 reset_control_deassert(imx6_pcie->apps_reset); 614 break; 615 } 616 } 617 618 static int imx6_pcie_establish_link(struct imx6_pcie *imx6_pcie) 619 { 620 struct dw_pcie *pci = imx6_pcie->pci; 621 struct device *dev = pci->dev; 622 u32 tmp; 623 int ret; 624 625 /* 626 * Force Gen1 operation when starting the link. In case the link is 627 * started in Gen2 mode, there is a possibility the devices on the 628 * bus will not be detected at all. This happens with PCIe switches. 629 */ 630 tmp = dw_pcie_readl_dbi(pci, PCIE_RC_LCR); 631 tmp &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK; 632 tmp |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN1; 633 dw_pcie_writel_dbi(pci, PCIE_RC_LCR, tmp); 634 635 /* Start LTSSM. */ 636 imx6_pcie_ltssm_enable(dev); 637 638 ret = imx6_pcie_wait_for_link(imx6_pcie); 639 if (ret) 640 goto err_reset_phy; 641 642 if (imx6_pcie->link_gen == 2) { 643 /* Allow Gen2 mode after the link is up. */ 644 tmp = dw_pcie_readl_dbi(pci, PCIE_RC_LCR); 645 tmp &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK; 646 tmp |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2; 647 dw_pcie_writel_dbi(pci, PCIE_RC_LCR, tmp); 648 649 /* 650 * Start Directed Speed Change so the best possible 651 * speed both link partners support can be negotiated. 652 */ 653 tmp = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL); 654 tmp |= PORT_LOGIC_SPEED_CHANGE; 655 dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, tmp); 656 657 if (imx6_pcie->variant != IMX7D) { 658 /* 659 * On i.MX7, DIRECT_SPEED_CHANGE behaves differently 660 * from i.MX6 family when no link speed transition 661 * occurs and we go Gen1 -> yep, Gen1. The difference 662 * is that, in such case, it will not be cleared by HW 663 * which will cause the following code to report false 664 * failure. 665 */ 666 667 ret = imx6_pcie_wait_for_speed_change(imx6_pcie); 668 if (ret) { 669 dev_err(dev, "Failed to bring link up!\n"); 670 goto err_reset_phy; 671 } 672 } 673 674 /* Make sure link training is finished as well! */ 675 ret = imx6_pcie_wait_for_link(imx6_pcie); 676 if (ret) { 677 dev_err(dev, "Failed to bring link up!\n"); 678 goto err_reset_phy; 679 } 680 } else { 681 dev_info(dev, "Link: Gen2 disabled\n"); 682 } 683 684 tmp = dw_pcie_readl_dbi(pci, PCIE_RC_LCSR); 685 dev_info(dev, "Link up, Gen%i\n", (tmp >> 16) & 0xf); 686 return 0; 687 688 err_reset_phy: 689 dev_dbg(dev, "PHY DEBUG_R0=0x%08x DEBUG_R1=0x%08x\n", 690 dw_pcie_readl_dbi(pci, PCIE_PHY_DEBUG_R0), 691 dw_pcie_readl_dbi(pci, PCIE_PHY_DEBUG_R1)); 692 imx6_pcie_reset_phy(imx6_pcie); 693 return ret; 694 } 695 696 static int imx6_pcie_host_init(struct pcie_port *pp) 697 { 698 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 699 struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci); 700 701 imx6_pcie_assert_core_reset(imx6_pcie); 702 imx6_pcie_init_phy(imx6_pcie); 703 imx6_pcie_deassert_core_reset(imx6_pcie); 704 imx6_setup_phy_mpll(imx6_pcie); 705 dw_pcie_setup_rc(pp); 706 imx6_pcie_establish_link(imx6_pcie); 707 708 if (IS_ENABLED(CONFIG_PCI_MSI)) 709 dw_pcie_msi_init(pp); 710 711 return 0; 712 } 713 714 static int imx6_pcie_link_up(struct dw_pcie *pci) 715 { 716 return dw_pcie_readl_dbi(pci, PCIE_PHY_DEBUG_R1) & 717 PCIE_PHY_DEBUG_R1_XMLH_LINK_UP; 718 } 719 720 static const struct dw_pcie_host_ops imx6_pcie_host_ops = { 721 .host_init = imx6_pcie_host_init, 722 }; 723 724 static int imx6_add_pcie_port(struct imx6_pcie *imx6_pcie, 725 struct platform_device *pdev) 726 { 727 struct dw_pcie *pci = imx6_pcie->pci; 728 struct pcie_port *pp = &pci->pp; 729 struct device *dev = &pdev->dev; 730 int ret; 731 732 if (IS_ENABLED(CONFIG_PCI_MSI)) { 733 pp->msi_irq = platform_get_irq_byname(pdev, "msi"); 734 if (pp->msi_irq <= 0) { 735 dev_err(dev, "failed to get MSI irq\n"); 736 return -ENODEV; 737 } 738 } 739 740 pp->ops = &imx6_pcie_host_ops; 741 742 ret = dw_pcie_host_init(pp); 743 if (ret) { 744 dev_err(dev, "failed to initialize host\n"); 745 return ret; 746 } 747 748 return 0; 749 } 750 751 static const struct dw_pcie_ops dw_pcie_ops = { 752 .link_up = imx6_pcie_link_up, 753 }; 754 755 #ifdef CONFIG_PM_SLEEP 756 static void imx6_pcie_ltssm_disable(struct device *dev) 757 { 758 struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev); 759 760 switch (imx6_pcie->variant) { 761 case IMX6SX: 762 case IMX6QP: 763 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, 764 IMX6Q_GPR12_PCIE_CTL_2, 0); 765 break; 766 case IMX7D: 767 reset_control_assert(imx6_pcie->apps_reset); 768 break; 769 default: 770 dev_err(dev, "ltssm_disable not supported\n"); 771 } 772 } 773 774 static void imx6_pcie_pm_turnoff(struct imx6_pcie *imx6_pcie) 775 { 776 reset_control_assert(imx6_pcie->turnoff_reset); 777 reset_control_deassert(imx6_pcie->turnoff_reset); 778 779 /* 780 * Components with an upstream port must respond to 781 * PME_Turn_Off with PME_TO_Ack but we can't check. 782 * 783 * The standard recommends a 1-10ms timeout after which to 784 * proceed anyway as if acks were received. 785 */ 786 usleep_range(1000, 10000); 787 } 788 789 static void imx6_pcie_clk_disable(struct imx6_pcie *imx6_pcie) 790 { 791 clk_disable_unprepare(imx6_pcie->pcie); 792 clk_disable_unprepare(imx6_pcie->pcie_phy); 793 clk_disable_unprepare(imx6_pcie->pcie_bus); 794 795 if (imx6_pcie->variant == IMX7D) { 796 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, 797 IMX7D_GPR12_PCIE_PHY_REFCLK_SEL, 798 IMX7D_GPR12_PCIE_PHY_REFCLK_SEL); 799 } 800 } 801 802 static int imx6_pcie_suspend_noirq(struct device *dev) 803 { 804 struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev); 805 806 if (imx6_pcie->variant != IMX7D) 807 return 0; 808 809 imx6_pcie_pm_turnoff(imx6_pcie); 810 imx6_pcie_clk_disable(imx6_pcie); 811 imx6_pcie_ltssm_disable(dev); 812 813 return 0; 814 } 815 816 static int imx6_pcie_resume_noirq(struct device *dev) 817 { 818 int ret; 819 struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev); 820 struct pcie_port *pp = &imx6_pcie->pci->pp; 821 822 if (imx6_pcie->variant != IMX7D) 823 return 0; 824 825 imx6_pcie_assert_core_reset(imx6_pcie); 826 imx6_pcie_init_phy(imx6_pcie); 827 imx6_pcie_deassert_core_reset(imx6_pcie); 828 dw_pcie_setup_rc(pp); 829 830 ret = imx6_pcie_establish_link(imx6_pcie); 831 if (ret < 0) 832 dev_info(dev, "pcie link is down after resume.\n"); 833 834 return 0; 835 } 836 #endif 837 838 static const struct dev_pm_ops imx6_pcie_pm_ops = { 839 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(imx6_pcie_suspend_noirq, 840 imx6_pcie_resume_noirq) 841 }; 842 843 static int imx6_pcie_probe(struct platform_device *pdev) 844 { 845 struct device *dev = &pdev->dev; 846 struct dw_pcie *pci; 847 struct imx6_pcie *imx6_pcie; 848 struct resource *dbi_base; 849 struct device_node *node = dev->of_node; 850 int ret; 851 852 imx6_pcie = devm_kzalloc(dev, sizeof(*imx6_pcie), GFP_KERNEL); 853 if (!imx6_pcie) 854 return -ENOMEM; 855 856 pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); 857 if (!pci) 858 return -ENOMEM; 859 860 pci->dev = dev; 861 pci->ops = &dw_pcie_ops; 862 863 imx6_pcie->pci = pci; 864 imx6_pcie->variant = 865 (enum imx6_pcie_variants)of_device_get_match_data(dev); 866 867 dbi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0); 868 pci->dbi_base = devm_ioremap_resource(dev, dbi_base); 869 if (IS_ERR(pci->dbi_base)) 870 return PTR_ERR(pci->dbi_base); 871 872 /* Fetch GPIOs */ 873 imx6_pcie->reset_gpio = of_get_named_gpio(node, "reset-gpio", 0); 874 imx6_pcie->gpio_active_high = of_property_read_bool(node, 875 "reset-gpio-active-high"); 876 if (gpio_is_valid(imx6_pcie->reset_gpio)) { 877 ret = devm_gpio_request_one(dev, imx6_pcie->reset_gpio, 878 imx6_pcie->gpio_active_high ? 879 GPIOF_OUT_INIT_HIGH : 880 GPIOF_OUT_INIT_LOW, 881 "PCIe reset"); 882 if (ret) { 883 dev_err(dev, "unable to get reset gpio\n"); 884 return ret; 885 } 886 } else if (imx6_pcie->reset_gpio == -EPROBE_DEFER) { 887 return imx6_pcie->reset_gpio; 888 } 889 890 /* Fetch clocks */ 891 imx6_pcie->pcie_phy = devm_clk_get(dev, "pcie_phy"); 892 if (IS_ERR(imx6_pcie->pcie_phy)) { 893 dev_err(dev, "pcie_phy clock source missing or invalid\n"); 894 return PTR_ERR(imx6_pcie->pcie_phy); 895 } 896 897 imx6_pcie->pcie_bus = devm_clk_get(dev, "pcie_bus"); 898 if (IS_ERR(imx6_pcie->pcie_bus)) { 899 dev_err(dev, "pcie_bus clock source missing or invalid\n"); 900 return PTR_ERR(imx6_pcie->pcie_bus); 901 } 902 903 imx6_pcie->pcie = devm_clk_get(dev, "pcie"); 904 if (IS_ERR(imx6_pcie->pcie)) { 905 dev_err(dev, "pcie clock source missing or invalid\n"); 906 return PTR_ERR(imx6_pcie->pcie); 907 } 908 909 switch (imx6_pcie->variant) { 910 case IMX6SX: 911 imx6_pcie->pcie_inbound_axi = devm_clk_get(dev, 912 "pcie_inbound_axi"); 913 if (IS_ERR(imx6_pcie->pcie_inbound_axi)) { 914 dev_err(dev, "pcie_inbound_axi clock missing or invalid\n"); 915 return PTR_ERR(imx6_pcie->pcie_inbound_axi); 916 } 917 break; 918 case IMX7D: 919 imx6_pcie->pciephy_reset = devm_reset_control_get_exclusive(dev, 920 "pciephy"); 921 if (IS_ERR(imx6_pcie->pciephy_reset)) { 922 dev_err(dev, "Failed to get PCIEPHY reset control\n"); 923 return PTR_ERR(imx6_pcie->pciephy_reset); 924 } 925 926 imx6_pcie->apps_reset = devm_reset_control_get_exclusive(dev, 927 "apps"); 928 if (IS_ERR(imx6_pcie->apps_reset)) { 929 dev_err(dev, "Failed to get PCIE APPS reset control\n"); 930 return PTR_ERR(imx6_pcie->apps_reset); 931 } 932 break; 933 default: 934 break; 935 } 936 937 /* Grab turnoff reset */ 938 imx6_pcie->turnoff_reset = devm_reset_control_get_optional_exclusive(dev, "turnoff"); 939 if (IS_ERR(imx6_pcie->turnoff_reset)) { 940 dev_err(dev, "Failed to get TURNOFF reset control\n"); 941 return PTR_ERR(imx6_pcie->turnoff_reset); 942 } 943 944 /* Grab GPR config register range */ 945 imx6_pcie->iomuxc_gpr = 946 syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr"); 947 if (IS_ERR(imx6_pcie->iomuxc_gpr)) { 948 dev_err(dev, "unable to find iomuxc registers\n"); 949 return PTR_ERR(imx6_pcie->iomuxc_gpr); 950 } 951 952 /* Grab PCIe PHY Tx Settings */ 953 if (of_property_read_u32(node, "fsl,tx-deemph-gen1", 954 &imx6_pcie->tx_deemph_gen1)) 955 imx6_pcie->tx_deemph_gen1 = 0; 956 957 if (of_property_read_u32(node, "fsl,tx-deemph-gen2-3p5db", 958 &imx6_pcie->tx_deemph_gen2_3p5db)) 959 imx6_pcie->tx_deemph_gen2_3p5db = 0; 960 961 if (of_property_read_u32(node, "fsl,tx-deemph-gen2-6db", 962 &imx6_pcie->tx_deemph_gen2_6db)) 963 imx6_pcie->tx_deemph_gen2_6db = 20; 964 965 if (of_property_read_u32(node, "fsl,tx-swing-full", 966 &imx6_pcie->tx_swing_full)) 967 imx6_pcie->tx_swing_full = 127; 968 969 if (of_property_read_u32(node, "fsl,tx-swing-low", 970 &imx6_pcie->tx_swing_low)) 971 imx6_pcie->tx_swing_low = 127; 972 973 /* Limit link speed */ 974 ret = of_property_read_u32(node, "fsl,max-link-speed", 975 &imx6_pcie->link_gen); 976 if (ret) 977 imx6_pcie->link_gen = 1; 978 979 imx6_pcie->vpcie = devm_regulator_get_optional(&pdev->dev, "vpcie"); 980 if (IS_ERR(imx6_pcie->vpcie)) { 981 if (PTR_ERR(imx6_pcie->vpcie) == -EPROBE_DEFER) 982 return -EPROBE_DEFER; 983 imx6_pcie->vpcie = NULL; 984 } 985 986 platform_set_drvdata(pdev, imx6_pcie); 987 988 ret = imx6_add_pcie_port(imx6_pcie, pdev); 989 if (ret < 0) 990 return ret; 991 992 return 0; 993 } 994 995 static void imx6_pcie_shutdown(struct platform_device *pdev) 996 { 997 struct imx6_pcie *imx6_pcie = platform_get_drvdata(pdev); 998 999 /* bring down link, so bootloader gets clean state in case of reboot */ 1000 imx6_pcie_assert_core_reset(imx6_pcie); 1001 } 1002 1003 static const struct of_device_id imx6_pcie_of_match[] = { 1004 { .compatible = "fsl,imx6q-pcie", .data = (void *)IMX6Q, }, 1005 { .compatible = "fsl,imx6sx-pcie", .data = (void *)IMX6SX, }, 1006 { .compatible = "fsl,imx6qp-pcie", .data = (void *)IMX6QP, }, 1007 { .compatible = "fsl,imx7d-pcie", .data = (void *)IMX7D, }, 1008 {}, 1009 }; 1010 1011 static struct platform_driver imx6_pcie_driver = { 1012 .driver = { 1013 .name = "imx6q-pcie", 1014 .of_match_table = imx6_pcie_of_match, 1015 .suppress_bind_attrs = true, 1016 .pm = &imx6_pcie_pm_ops, 1017 }, 1018 .probe = imx6_pcie_probe, 1019 .shutdown = imx6_pcie_shutdown, 1020 }; 1021 1022 static int __init imx6_pcie_init(void) 1023 { 1024 /* 1025 * Since probe() can be deferred we need to make sure that 1026 * hook_fault_code is not called after __init memory is freed 1027 * by kernel and since imx6q_pcie_abort_handler() is a no-op, 1028 * we can install the handler here without risking it 1029 * accessing some uninitialized driver state. 1030 */ 1031 hook_fault_code(8, imx6q_pcie_abort_handler, SIGBUS, 0, 1032 "external abort on non-linefetch"); 1033 1034 return platform_driver_register(&imx6_pcie_driver); 1035 } 1036 device_initcall(imx6_pcie_init); 1037