1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * PCIe host controller driver for Freescale i.MX6 SoCs 4 * 5 * Copyright (C) 2013 Kosagi 6 * http://www.kosagi.com 7 * 8 * Author: Sean Cross <xobs@kosagi.com> 9 */ 10 11 #include <linux/clk.h> 12 #include <linux/delay.h> 13 #include <linux/gpio.h> 14 #include <linux/kernel.h> 15 #include <linux/mfd/syscon.h> 16 #include <linux/mfd/syscon/imx6q-iomuxc-gpr.h> 17 #include <linux/mfd/syscon/imx7-iomuxc-gpr.h> 18 #include <linux/module.h> 19 #include <linux/of_gpio.h> 20 #include <linux/of_device.h> 21 #include <linux/pci.h> 22 #include <linux/platform_device.h> 23 #include <linux/regmap.h> 24 #include <linux/regulator/consumer.h> 25 #include <linux/resource.h> 26 #include <linux/signal.h> 27 #include <linux/types.h> 28 #include <linux/interrupt.h> 29 #include <linux/reset.h> 30 31 #include "pcie-designware.h" 32 33 #define to_imx6_pcie(x) dev_get_drvdata((x)->dev) 34 35 enum imx6_pcie_variants { 36 IMX6Q, 37 IMX6SX, 38 IMX6QP, 39 IMX7D, 40 }; 41 42 struct imx6_pcie { 43 struct dw_pcie *pci; 44 int reset_gpio; 45 bool gpio_active_high; 46 struct clk *pcie_bus; 47 struct clk *pcie_phy; 48 struct clk *pcie_inbound_axi; 49 struct clk *pcie; 50 struct regmap *iomuxc_gpr; 51 struct reset_control *pciephy_reset; 52 struct reset_control *apps_reset; 53 struct reset_control *turnoff_reset; 54 enum imx6_pcie_variants variant; 55 u32 tx_deemph_gen1; 56 u32 tx_deemph_gen2_3p5db; 57 u32 tx_deemph_gen2_6db; 58 u32 tx_swing_full; 59 u32 tx_swing_low; 60 int link_gen; 61 struct regulator *vpcie; 62 }; 63 64 /* Parameters for the waiting for PCIe PHY PLL to lock on i.MX7 */ 65 #define PHY_PLL_LOCK_WAIT_MAX_RETRIES 2000 66 #define PHY_PLL_LOCK_WAIT_USLEEP_MIN 50 67 #define PHY_PLL_LOCK_WAIT_USLEEP_MAX 200 68 69 /* PCIe Root Complex registers (memory-mapped) */ 70 #define PCIE_RC_LCR 0x7c 71 #define PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN1 0x1 72 #define PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2 0x2 73 #define PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK 0xf 74 75 #define PCIE_RC_LCSR 0x80 76 77 /* PCIe Port Logic registers (memory-mapped) */ 78 #define PL_OFFSET 0x700 79 #define PCIE_PL_PFLR (PL_OFFSET + 0x08) 80 #define PCIE_PL_PFLR_LINK_STATE_MASK (0x3f << 16) 81 #define PCIE_PL_PFLR_FORCE_LINK (1 << 15) 82 #define PCIE_PHY_DEBUG_R0 (PL_OFFSET + 0x28) 83 #define PCIE_PHY_DEBUG_R1 (PL_OFFSET + 0x2c) 84 85 #define PCIE_PHY_CTRL (PL_OFFSET + 0x114) 86 #define PCIE_PHY_CTRL_DATA_LOC 0 87 #define PCIE_PHY_CTRL_CAP_ADR_LOC 16 88 #define PCIE_PHY_CTRL_CAP_DAT_LOC 17 89 #define PCIE_PHY_CTRL_WR_LOC 18 90 #define PCIE_PHY_CTRL_RD_LOC 19 91 92 #define PCIE_PHY_STAT (PL_OFFSET + 0x110) 93 #define PCIE_PHY_STAT_ACK_LOC 16 94 95 #define PCIE_LINK_WIDTH_SPEED_CONTROL 0x80C 96 #define PORT_LOGIC_SPEED_CHANGE (0x1 << 17) 97 98 /* PHY registers (not memory-mapped) */ 99 #define PCIE_PHY_ATEOVRD 0x10 100 #define PCIE_PHY_ATEOVRD_EN (0x1 << 2) 101 #define PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT 0 102 #define PCIE_PHY_ATEOVRD_REF_CLKDIV_MASK 0x1 103 104 #define PCIE_PHY_MPLL_OVRD_IN_LO 0x11 105 #define PCIE_PHY_MPLL_MULTIPLIER_SHIFT 2 106 #define PCIE_PHY_MPLL_MULTIPLIER_MASK 0x7f 107 #define PCIE_PHY_MPLL_MULTIPLIER_OVRD (0x1 << 9) 108 109 #define PCIE_PHY_RX_ASIC_OUT 0x100D 110 #define PCIE_PHY_RX_ASIC_OUT_VALID (1 << 0) 111 112 #define PHY_RX_OVRD_IN_LO 0x1005 113 #define PHY_RX_OVRD_IN_LO_RX_DATA_EN (1 << 5) 114 #define PHY_RX_OVRD_IN_LO_RX_PLL_EN (1 << 3) 115 116 static int pcie_phy_poll_ack(struct imx6_pcie *imx6_pcie, int exp_val) 117 { 118 struct dw_pcie *pci = imx6_pcie->pci; 119 u32 val; 120 u32 max_iterations = 10; 121 u32 wait_counter = 0; 122 123 do { 124 val = dw_pcie_readl_dbi(pci, PCIE_PHY_STAT); 125 val = (val >> PCIE_PHY_STAT_ACK_LOC) & 0x1; 126 wait_counter++; 127 128 if (val == exp_val) 129 return 0; 130 131 udelay(1); 132 } while (wait_counter < max_iterations); 133 134 return -ETIMEDOUT; 135 } 136 137 static int pcie_phy_wait_ack(struct imx6_pcie *imx6_pcie, int addr) 138 { 139 struct dw_pcie *pci = imx6_pcie->pci; 140 u32 val; 141 int ret; 142 143 val = addr << PCIE_PHY_CTRL_DATA_LOC; 144 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val); 145 146 val |= (0x1 << PCIE_PHY_CTRL_CAP_ADR_LOC); 147 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val); 148 149 ret = pcie_phy_poll_ack(imx6_pcie, 1); 150 if (ret) 151 return ret; 152 153 val = addr << PCIE_PHY_CTRL_DATA_LOC; 154 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val); 155 156 return pcie_phy_poll_ack(imx6_pcie, 0); 157 } 158 159 /* Read from the 16-bit PCIe PHY control registers (not memory-mapped) */ 160 static int pcie_phy_read(struct imx6_pcie *imx6_pcie, int addr, int *data) 161 { 162 struct dw_pcie *pci = imx6_pcie->pci; 163 u32 val, phy_ctl; 164 int ret; 165 166 ret = pcie_phy_wait_ack(imx6_pcie, addr); 167 if (ret) 168 return ret; 169 170 /* assert Read signal */ 171 phy_ctl = 0x1 << PCIE_PHY_CTRL_RD_LOC; 172 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, phy_ctl); 173 174 ret = pcie_phy_poll_ack(imx6_pcie, 1); 175 if (ret) 176 return ret; 177 178 val = dw_pcie_readl_dbi(pci, PCIE_PHY_STAT); 179 *data = val & 0xffff; 180 181 /* deassert Read signal */ 182 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, 0x00); 183 184 return pcie_phy_poll_ack(imx6_pcie, 0); 185 } 186 187 static int pcie_phy_write(struct imx6_pcie *imx6_pcie, int addr, int data) 188 { 189 struct dw_pcie *pci = imx6_pcie->pci; 190 u32 var; 191 int ret; 192 193 /* write addr */ 194 /* cap addr */ 195 ret = pcie_phy_wait_ack(imx6_pcie, addr); 196 if (ret) 197 return ret; 198 199 var = data << PCIE_PHY_CTRL_DATA_LOC; 200 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var); 201 202 /* capture data */ 203 var |= (0x1 << PCIE_PHY_CTRL_CAP_DAT_LOC); 204 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var); 205 206 ret = pcie_phy_poll_ack(imx6_pcie, 1); 207 if (ret) 208 return ret; 209 210 /* deassert cap data */ 211 var = data << PCIE_PHY_CTRL_DATA_LOC; 212 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var); 213 214 /* wait for ack de-assertion */ 215 ret = pcie_phy_poll_ack(imx6_pcie, 0); 216 if (ret) 217 return ret; 218 219 /* assert wr signal */ 220 var = 0x1 << PCIE_PHY_CTRL_WR_LOC; 221 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var); 222 223 /* wait for ack */ 224 ret = pcie_phy_poll_ack(imx6_pcie, 1); 225 if (ret) 226 return ret; 227 228 /* deassert wr signal */ 229 var = data << PCIE_PHY_CTRL_DATA_LOC; 230 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var); 231 232 /* wait for ack de-assertion */ 233 ret = pcie_phy_poll_ack(imx6_pcie, 0); 234 if (ret) 235 return ret; 236 237 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, 0x0); 238 239 return 0; 240 } 241 242 static void imx6_pcie_reset_phy(struct imx6_pcie *imx6_pcie) 243 { 244 u32 tmp; 245 246 pcie_phy_read(imx6_pcie, PHY_RX_OVRD_IN_LO, &tmp); 247 tmp |= (PHY_RX_OVRD_IN_LO_RX_DATA_EN | 248 PHY_RX_OVRD_IN_LO_RX_PLL_EN); 249 pcie_phy_write(imx6_pcie, PHY_RX_OVRD_IN_LO, tmp); 250 251 usleep_range(2000, 3000); 252 253 pcie_phy_read(imx6_pcie, PHY_RX_OVRD_IN_LO, &tmp); 254 tmp &= ~(PHY_RX_OVRD_IN_LO_RX_DATA_EN | 255 PHY_RX_OVRD_IN_LO_RX_PLL_EN); 256 pcie_phy_write(imx6_pcie, PHY_RX_OVRD_IN_LO, tmp); 257 } 258 259 /* Added for PCI abort handling */ 260 static int imx6q_pcie_abort_handler(unsigned long addr, 261 unsigned int fsr, struct pt_regs *regs) 262 { 263 unsigned long pc = instruction_pointer(regs); 264 unsigned long instr = *(unsigned long *)pc; 265 int reg = (instr >> 12) & 15; 266 267 /* 268 * If the instruction being executed was a read, 269 * make it look like it read all-ones. 270 */ 271 if ((instr & 0x0c100000) == 0x04100000) { 272 unsigned long val; 273 274 if (instr & 0x00400000) 275 val = 255; 276 else 277 val = -1; 278 279 regs->uregs[reg] = val; 280 regs->ARM_pc += 4; 281 return 0; 282 } 283 284 if ((instr & 0x0e100090) == 0x00100090) { 285 regs->uregs[reg] = -1; 286 regs->ARM_pc += 4; 287 return 0; 288 } 289 290 return 1; 291 } 292 293 static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie) 294 { 295 struct device *dev = imx6_pcie->pci->dev; 296 297 switch (imx6_pcie->variant) { 298 case IMX7D: 299 reset_control_assert(imx6_pcie->pciephy_reset); 300 reset_control_assert(imx6_pcie->apps_reset); 301 break; 302 case IMX6SX: 303 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, 304 IMX6SX_GPR12_PCIE_TEST_POWERDOWN, 305 IMX6SX_GPR12_PCIE_TEST_POWERDOWN); 306 /* Force PCIe PHY reset */ 307 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR5, 308 IMX6SX_GPR5_PCIE_BTNRST_RESET, 309 IMX6SX_GPR5_PCIE_BTNRST_RESET); 310 break; 311 case IMX6QP: 312 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, 313 IMX6Q_GPR1_PCIE_SW_RST, 314 IMX6Q_GPR1_PCIE_SW_RST); 315 break; 316 case IMX6Q: 317 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, 318 IMX6Q_GPR1_PCIE_TEST_PD, 1 << 18); 319 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, 320 IMX6Q_GPR1_PCIE_REF_CLK_EN, 0 << 16); 321 break; 322 } 323 324 if (imx6_pcie->vpcie && regulator_is_enabled(imx6_pcie->vpcie) > 0) { 325 int ret = regulator_disable(imx6_pcie->vpcie); 326 327 if (ret) 328 dev_err(dev, "failed to disable vpcie regulator: %d\n", 329 ret); 330 } 331 } 332 333 static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie) 334 { 335 struct dw_pcie *pci = imx6_pcie->pci; 336 struct device *dev = pci->dev; 337 int ret = 0; 338 339 switch (imx6_pcie->variant) { 340 case IMX6SX: 341 ret = clk_prepare_enable(imx6_pcie->pcie_inbound_axi); 342 if (ret) { 343 dev_err(dev, "unable to enable pcie_axi clock\n"); 344 break; 345 } 346 347 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, 348 IMX6SX_GPR12_PCIE_TEST_POWERDOWN, 0); 349 break; 350 case IMX6QP: /* FALLTHROUGH */ 351 case IMX6Q: 352 /* power up core phy and enable ref clock */ 353 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, 354 IMX6Q_GPR1_PCIE_TEST_PD, 0 << 18); 355 /* 356 * the async reset input need ref clock to sync internally, 357 * when the ref clock comes after reset, internal synced 358 * reset time is too short, cannot meet the requirement. 359 * add one ~10us delay here. 360 */ 361 udelay(10); 362 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, 363 IMX6Q_GPR1_PCIE_REF_CLK_EN, 1 << 16); 364 break; 365 case IMX7D: 366 break; 367 } 368 369 return ret; 370 } 371 372 static void imx7d_pcie_wait_for_phy_pll_lock(struct imx6_pcie *imx6_pcie) 373 { 374 u32 val; 375 unsigned int retries; 376 struct device *dev = imx6_pcie->pci->dev; 377 378 for (retries = 0; retries < PHY_PLL_LOCK_WAIT_MAX_RETRIES; retries++) { 379 regmap_read(imx6_pcie->iomuxc_gpr, IOMUXC_GPR22, &val); 380 381 if (val & IMX7D_GPR22_PCIE_PHY_PLL_LOCKED) 382 return; 383 384 usleep_range(PHY_PLL_LOCK_WAIT_USLEEP_MIN, 385 PHY_PLL_LOCK_WAIT_USLEEP_MAX); 386 } 387 388 dev_err(dev, "PCIe PLL lock timeout\n"); 389 } 390 391 static void imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie) 392 { 393 struct dw_pcie *pci = imx6_pcie->pci; 394 struct device *dev = pci->dev; 395 int ret; 396 397 if (imx6_pcie->vpcie && !regulator_is_enabled(imx6_pcie->vpcie)) { 398 ret = regulator_enable(imx6_pcie->vpcie); 399 if (ret) { 400 dev_err(dev, "failed to enable vpcie regulator: %d\n", 401 ret); 402 return; 403 } 404 } 405 406 ret = clk_prepare_enable(imx6_pcie->pcie_phy); 407 if (ret) { 408 dev_err(dev, "unable to enable pcie_phy clock\n"); 409 goto err_pcie_phy; 410 } 411 412 ret = clk_prepare_enable(imx6_pcie->pcie_bus); 413 if (ret) { 414 dev_err(dev, "unable to enable pcie_bus clock\n"); 415 goto err_pcie_bus; 416 } 417 418 ret = clk_prepare_enable(imx6_pcie->pcie); 419 if (ret) { 420 dev_err(dev, "unable to enable pcie clock\n"); 421 goto err_pcie; 422 } 423 424 ret = imx6_pcie_enable_ref_clk(imx6_pcie); 425 if (ret) { 426 dev_err(dev, "unable to enable pcie ref clock\n"); 427 goto err_ref_clk; 428 } 429 430 /* allow the clocks to stabilize */ 431 usleep_range(200, 500); 432 433 /* Some boards don't have PCIe reset GPIO. */ 434 if (gpio_is_valid(imx6_pcie->reset_gpio)) { 435 gpio_set_value_cansleep(imx6_pcie->reset_gpio, 436 imx6_pcie->gpio_active_high); 437 msleep(100); 438 gpio_set_value_cansleep(imx6_pcie->reset_gpio, 439 !imx6_pcie->gpio_active_high); 440 } 441 442 switch (imx6_pcie->variant) { 443 case IMX7D: 444 reset_control_deassert(imx6_pcie->pciephy_reset); 445 imx7d_pcie_wait_for_phy_pll_lock(imx6_pcie); 446 break; 447 case IMX6SX: 448 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR5, 449 IMX6SX_GPR5_PCIE_BTNRST_RESET, 0); 450 break; 451 case IMX6QP: 452 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, 453 IMX6Q_GPR1_PCIE_SW_RST, 0); 454 455 usleep_range(200, 500); 456 break; 457 case IMX6Q: /* Nothing to do */ 458 break; 459 } 460 461 return; 462 463 err_ref_clk: 464 clk_disable_unprepare(imx6_pcie->pcie); 465 err_pcie: 466 clk_disable_unprepare(imx6_pcie->pcie_bus); 467 err_pcie_bus: 468 clk_disable_unprepare(imx6_pcie->pcie_phy); 469 err_pcie_phy: 470 if (imx6_pcie->vpcie && regulator_is_enabled(imx6_pcie->vpcie) > 0) { 471 ret = regulator_disable(imx6_pcie->vpcie); 472 if (ret) 473 dev_err(dev, "failed to disable vpcie regulator: %d\n", 474 ret); 475 } 476 } 477 478 static void imx6_pcie_init_phy(struct imx6_pcie *imx6_pcie) 479 { 480 switch (imx6_pcie->variant) { 481 case IMX7D: 482 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, 483 IMX7D_GPR12_PCIE_PHY_REFCLK_SEL, 0); 484 break; 485 case IMX6SX: 486 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, 487 IMX6SX_GPR12_PCIE_RX_EQ_MASK, 488 IMX6SX_GPR12_PCIE_RX_EQ_2); 489 /* FALLTHROUGH */ 490 default: 491 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, 492 IMX6Q_GPR12_PCIE_CTL_2, 0 << 10); 493 494 /* configure constant input signal to the pcie ctrl and phy */ 495 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, 496 IMX6Q_GPR12_LOS_LEVEL, 9 << 4); 497 498 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8, 499 IMX6Q_GPR8_TX_DEEMPH_GEN1, 500 imx6_pcie->tx_deemph_gen1 << 0); 501 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8, 502 IMX6Q_GPR8_TX_DEEMPH_GEN2_3P5DB, 503 imx6_pcie->tx_deemph_gen2_3p5db << 6); 504 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8, 505 IMX6Q_GPR8_TX_DEEMPH_GEN2_6DB, 506 imx6_pcie->tx_deemph_gen2_6db << 12); 507 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8, 508 IMX6Q_GPR8_TX_SWING_FULL, 509 imx6_pcie->tx_swing_full << 18); 510 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8, 511 IMX6Q_GPR8_TX_SWING_LOW, 512 imx6_pcie->tx_swing_low << 25); 513 break; 514 } 515 516 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, 517 IMX6Q_GPR12_DEVICE_TYPE, PCI_EXP_TYPE_ROOT_PORT << 12); 518 } 519 520 static int imx6_setup_phy_mpll(struct imx6_pcie *imx6_pcie) 521 { 522 unsigned long phy_rate = clk_get_rate(imx6_pcie->pcie_phy); 523 int mult, div; 524 u32 val; 525 526 switch (phy_rate) { 527 case 125000000: 528 /* 529 * The default settings of the MPLL are for a 125MHz input 530 * clock, so no need to reconfigure anything in that case. 531 */ 532 return 0; 533 case 100000000: 534 mult = 25; 535 div = 0; 536 break; 537 case 200000000: 538 mult = 25; 539 div = 1; 540 break; 541 default: 542 dev_err(imx6_pcie->pci->dev, 543 "Unsupported PHY reference clock rate %lu\n", phy_rate); 544 return -EINVAL; 545 } 546 547 pcie_phy_read(imx6_pcie, PCIE_PHY_MPLL_OVRD_IN_LO, &val); 548 val &= ~(PCIE_PHY_MPLL_MULTIPLIER_MASK << 549 PCIE_PHY_MPLL_MULTIPLIER_SHIFT); 550 val |= mult << PCIE_PHY_MPLL_MULTIPLIER_SHIFT; 551 val |= PCIE_PHY_MPLL_MULTIPLIER_OVRD; 552 pcie_phy_write(imx6_pcie, PCIE_PHY_MPLL_OVRD_IN_LO, val); 553 554 pcie_phy_read(imx6_pcie, PCIE_PHY_ATEOVRD, &val); 555 val &= ~(PCIE_PHY_ATEOVRD_REF_CLKDIV_MASK << 556 PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT); 557 val |= div << PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT; 558 val |= PCIE_PHY_ATEOVRD_EN; 559 pcie_phy_write(imx6_pcie, PCIE_PHY_ATEOVRD, val); 560 561 return 0; 562 } 563 564 static int imx6_pcie_wait_for_link(struct imx6_pcie *imx6_pcie) 565 { 566 struct dw_pcie *pci = imx6_pcie->pci; 567 struct device *dev = pci->dev; 568 569 /* check if the link is up or not */ 570 if (!dw_pcie_wait_for_link(pci)) 571 return 0; 572 573 dev_dbg(dev, "DEBUG_R0: 0x%08x, DEBUG_R1: 0x%08x\n", 574 dw_pcie_readl_dbi(pci, PCIE_PHY_DEBUG_R0), 575 dw_pcie_readl_dbi(pci, PCIE_PHY_DEBUG_R1)); 576 return -ETIMEDOUT; 577 } 578 579 static int imx6_pcie_wait_for_speed_change(struct imx6_pcie *imx6_pcie) 580 { 581 struct dw_pcie *pci = imx6_pcie->pci; 582 struct device *dev = pci->dev; 583 u32 tmp; 584 unsigned int retries; 585 586 for (retries = 0; retries < 200; retries++) { 587 tmp = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL); 588 /* Test if the speed change finished. */ 589 if (!(tmp & PORT_LOGIC_SPEED_CHANGE)) 590 return 0; 591 usleep_range(100, 1000); 592 } 593 594 dev_err(dev, "Speed change timeout\n"); 595 return -EINVAL; 596 } 597 598 static void imx6_pcie_ltssm_enable(struct device *dev) 599 { 600 struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev); 601 602 switch (imx6_pcie->variant) { 603 case IMX6Q: 604 case IMX6SX: 605 case IMX6QP: 606 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, 607 IMX6Q_GPR12_PCIE_CTL_2, 608 IMX6Q_GPR12_PCIE_CTL_2); 609 break; 610 case IMX7D: 611 reset_control_deassert(imx6_pcie->apps_reset); 612 break; 613 } 614 } 615 616 static int imx6_pcie_establish_link(struct imx6_pcie *imx6_pcie) 617 { 618 struct dw_pcie *pci = imx6_pcie->pci; 619 struct device *dev = pci->dev; 620 u32 tmp; 621 int ret; 622 623 /* 624 * Force Gen1 operation when starting the link. In case the link is 625 * started in Gen2 mode, there is a possibility the devices on the 626 * bus will not be detected at all. This happens with PCIe switches. 627 */ 628 tmp = dw_pcie_readl_dbi(pci, PCIE_RC_LCR); 629 tmp &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK; 630 tmp |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN1; 631 dw_pcie_writel_dbi(pci, PCIE_RC_LCR, tmp); 632 633 /* Start LTSSM. */ 634 imx6_pcie_ltssm_enable(dev); 635 636 ret = imx6_pcie_wait_for_link(imx6_pcie); 637 if (ret) 638 goto err_reset_phy; 639 640 if (imx6_pcie->link_gen == 2) { 641 /* Allow Gen2 mode after the link is up. */ 642 tmp = dw_pcie_readl_dbi(pci, PCIE_RC_LCR); 643 tmp &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK; 644 tmp |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2; 645 dw_pcie_writel_dbi(pci, PCIE_RC_LCR, tmp); 646 647 /* 648 * Start Directed Speed Change so the best possible 649 * speed both link partners support can be negotiated. 650 */ 651 tmp = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL); 652 tmp |= PORT_LOGIC_SPEED_CHANGE; 653 dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, tmp); 654 655 if (imx6_pcie->variant != IMX7D) { 656 /* 657 * On i.MX7, DIRECT_SPEED_CHANGE behaves differently 658 * from i.MX6 family when no link speed transition 659 * occurs and we go Gen1 -> yep, Gen1. The difference 660 * is that, in such case, it will not be cleared by HW 661 * which will cause the following code to report false 662 * failure. 663 */ 664 665 ret = imx6_pcie_wait_for_speed_change(imx6_pcie); 666 if (ret) { 667 dev_err(dev, "Failed to bring link up!\n"); 668 goto err_reset_phy; 669 } 670 } 671 672 /* Make sure link training is finished as well! */ 673 ret = imx6_pcie_wait_for_link(imx6_pcie); 674 if (ret) { 675 dev_err(dev, "Failed to bring link up!\n"); 676 goto err_reset_phy; 677 } 678 } else { 679 dev_info(dev, "Link: Gen2 disabled\n"); 680 } 681 682 tmp = dw_pcie_readl_dbi(pci, PCIE_RC_LCSR); 683 dev_info(dev, "Link up, Gen%i\n", (tmp >> 16) & 0xf); 684 return 0; 685 686 err_reset_phy: 687 dev_dbg(dev, "PHY DEBUG_R0=0x%08x DEBUG_R1=0x%08x\n", 688 dw_pcie_readl_dbi(pci, PCIE_PHY_DEBUG_R0), 689 dw_pcie_readl_dbi(pci, PCIE_PHY_DEBUG_R1)); 690 imx6_pcie_reset_phy(imx6_pcie); 691 return ret; 692 } 693 694 static int imx6_pcie_host_init(struct pcie_port *pp) 695 { 696 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 697 struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci); 698 699 imx6_pcie_assert_core_reset(imx6_pcie); 700 imx6_pcie_init_phy(imx6_pcie); 701 imx6_pcie_deassert_core_reset(imx6_pcie); 702 imx6_setup_phy_mpll(imx6_pcie); 703 dw_pcie_setup_rc(pp); 704 imx6_pcie_establish_link(imx6_pcie); 705 706 if (IS_ENABLED(CONFIG_PCI_MSI)) 707 dw_pcie_msi_init(pp); 708 709 return 0; 710 } 711 712 static const struct dw_pcie_host_ops imx6_pcie_host_ops = { 713 .host_init = imx6_pcie_host_init, 714 }; 715 716 static int imx6_add_pcie_port(struct imx6_pcie *imx6_pcie, 717 struct platform_device *pdev) 718 { 719 struct dw_pcie *pci = imx6_pcie->pci; 720 struct pcie_port *pp = &pci->pp; 721 struct device *dev = &pdev->dev; 722 int ret; 723 724 if (IS_ENABLED(CONFIG_PCI_MSI)) { 725 pp->msi_irq = platform_get_irq_byname(pdev, "msi"); 726 if (pp->msi_irq <= 0) { 727 dev_err(dev, "failed to get MSI irq\n"); 728 return -ENODEV; 729 } 730 } 731 732 pp->ops = &imx6_pcie_host_ops; 733 734 ret = dw_pcie_host_init(pp); 735 if (ret) { 736 dev_err(dev, "failed to initialize host\n"); 737 return ret; 738 } 739 740 return 0; 741 } 742 743 static const struct dw_pcie_ops dw_pcie_ops = { 744 /* No special ops needed, but pcie-designware still expects this struct */ 745 }; 746 747 #ifdef CONFIG_PM_SLEEP 748 static void imx6_pcie_ltssm_disable(struct device *dev) 749 { 750 struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev); 751 752 switch (imx6_pcie->variant) { 753 case IMX6SX: 754 case IMX6QP: 755 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, 756 IMX6Q_GPR12_PCIE_CTL_2, 0); 757 break; 758 case IMX7D: 759 reset_control_assert(imx6_pcie->apps_reset); 760 break; 761 default: 762 dev_err(dev, "ltssm_disable not supported\n"); 763 } 764 } 765 766 static void imx6_pcie_pm_turnoff(struct imx6_pcie *imx6_pcie) 767 { 768 reset_control_assert(imx6_pcie->turnoff_reset); 769 reset_control_deassert(imx6_pcie->turnoff_reset); 770 771 /* 772 * Components with an upstream port must respond to 773 * PME_Turn_Off with PME_TO_Ack but we can't check. 774 * 775 * The standard recommends a 1-10ms timeout after which to 776 * proceed anyway as if acks were received. 777 */ 778 usleep_range(1000, 10000); 779 } 780 781 static void imx6_pcie_clk_disable(struct imx6_pcie *imx6_pcie) 782 { 783 clk_disable_unprepare(imx6_pcie->pcie); 784 clk_disable_unprepare(imx6_pcie->pcie_phy); 785 clk_disable_unprepare(imx6_pcie->pcie_bus); 786 787 if (imx6_pcie->variant == IMX7D) { 788 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, 789 IMX7D_GPR12_PCIE_PHY_REFCLK_SEL, 790 IMX7D_GPR12_PCIE_PHY_REFCLK_SEL); 791 } 792 } 793 794 static int imx6_pcie_suspend_noirq(struct device *dev) 795 { 796 struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev); 797 798 if (imx6_pcie->variant != IMX7D) 799 return 0; 800 801 imx6_pcie_pm_turnoff(imx6_pcie); 802 imx6_pcie_clk_disable(imx6_pcie); 803 imx6_pcie_ltssm_disable(dev); 804 805 return 0; 806 } 807 808 static int imx6_pcie_resume_noirq(struct device *dev) 809 { 810 int ret; 811 struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev); 812 struct pcie_port *pp = &imx6_pcie->pci->pp; 813 814 if (imx6_pcie->variant != IMX7D) 815 return 0; 816 817 imx6_pcie_assert_core_reset(imx6_pcie); 818 imx6_pcie_init_phy(imx6_pcie); 819 imx6_pcie_deassert_core_reset(imx6_pcie); 820 dw_pcie_setup_rc(pp); 821 822 ret = imx6_pcie_establish_link(imx6_pcie); 823 if (ret < 0) 824 dev_info(dev, "pcie link is down after resume.\n"); 825 826 return 0; 827 } 828 #endif 829 830 static const struct dev_pm_ops imx6_pcie_pm_ops = { 831 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(imx6_pcie_suspend_noirq, 832 imx6_pcie_resume_noirq) 833 }; 834 835 static int imx6_pcie_probe(struct platform_device *pdev) 836 { 837 struct device *dev = &pdev->dev; 838 struct dw_pcie *pci; 839 struct imx6_pcie *imx6_pcie; 840 struct resource *dbi_base; 841 struct device_node *node = dev->of_node; 842 int ret; 843 844 imx6_pcie = devm_kzalloc(dev, sizeof(*imx6_pcie), GFP_KERNEL); 845 if (!imx6_pcie) 846 return -ENOMEM; 847 848 pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); 849 if (!pci) 850 return -ENOMEM; 851 852 pci->dev = dev; 853 pci->ops = &dw_pcie_ops; 854 855 imx6_pcie->pci = pci; 856 imx6_pcie->variant = 857 (enum imx6_pcie_variants)of_device_get_match_data(dev); 858 859 dbi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0); 860 pci->dbi_base = devm_ioremap_resource(dev, dbi_base); 861 if (IS_ERR(pci->dbi_base)) 862 return PTR_ERR(pci->dbi_base); 863 864 /* Fetch GPIOs */ 865 imx6_pcie->reset_gpio = of_get_named_gpio(node, "reset-gpio", 0); 866 imx6_pcie->gpio_active_high = of_property_read_bool(node, 867 "reset-gpio-active-high"); 868 if (gpio_is_valid(imx6_pcie->reset_gpio)) { 869 ret = devm_gpio_request_one(dev, imx6_pcie->reset_gpio, 870 imx6_pcie->gpio_active_high ? 871 GPIOF_OUT_INIT_HIGH : 872 GPIOF_OUT_INIT_LOW, 873 "PCIe reset"); 874 if (ret) { 875 dev_err(dev, "unable to get reset gpio\n"); 876 return ret; 877 } 878 } else if (imx6_pcie->reset_gpio == -EPROBE_DEFER) { 879 return imx6_pcie->reset_gpio; 880 } 881 882 /* Fetch clocks */ 883 imx6_pcie->pcie_phy = devm_clk_get(dev, "pcie_phy"); 884 if (IS_ERR(imx6_pcie->pcie_phy)) { 885 dev_err(dev, "pcie_phy clock source missing or invalid\n"); 886 return PTR_ERR(imx6_pcie->pcie_phy); 887 } 888 889 imx6_pcie->pcie_bus = devm_clk_get(dev, "pcie_bus"); 890 if (IS_ERR(imx6_pcie->pcie_bus)) { 891 dev_err(dev, "pcie_bus clock source missing or invalid\n"); 892 return PTR_ERR(imx6_pcie->pcie_bus); 893 } 894 895 imx6_pcie->pcie = devm_clk_get(dev, "pcie"); 896 if (IS_ERR(imx6_pcie->pcie)) { 897 dev_err(dev, "pcie clock source missing or invalid\n"); 898 return PTR_ERR(imx6_pcie->pcie); 899 } 900 901 switch (imx6_pcie->variant) { 902 case IMX6SX: 903 imx6_pcie->pcie_inbound_axi = devm_clk_get(dev, 904 "pcie_inbound_axi"); 905 if (IS_ERR(imx6_pcie->pcie_inbound_axi)) { 906 dev_err(dev, "pcie_inbound_axi clock missing or invalid\n"); 907 return PTR_ERR(imx6_pcie->pcie_inbound_axi); 908 } 909 break; 910 case IMX7D: 911 imx6_pcie->pciephy_reset = devm_reset_control_get_exclusive(dev, 912 "pciephy"); 913 if (IS_ERR(imx6_pcie->pciephy_reset)) { 914 dev_err(dev, "Failed to get PCIEPHY reset control\n"); 915 return PTR_ERR(imx6_pcie->pciephy_reset); 916 } 917 918 imx6_pcie->apps_reset = devm_reset_control_get_exclusive(dev, 919 "apps"); 920 if (IS_ERR(imx6_pcie->apps_reset)) { 921 dev_err(dev, "Failed to get PCIE APPS reset control\n"); 922 return PTR_ERR(imx6_pcie->apps_reset); 923 } 924 break; 925 default: 926 break; 927 } 928 929 /* Grab turnoff reset */ 930 imx6_pcie->turnoff_reset = devm_reset_control_get_optional_exclusive(dev, "turnoff"); 931 if (IS_ERR(imx6_pcie->turnoff_reset)) { 932 dev_err(dev, "Failed to get TURNOFF reset control\n"); 933 return PTR_ERR(imx6_pcie->turnoff_reset); 934 } 935 936 /* Grab GPR config register range */ 937 imx6_pcie->iomuxc_gpr = 938 syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr"); 939 if (IS_ERR(imx6_pcie->iomuxc_gpr)) { 940 dev_err(dev, "unable to find iomuxc registers\n"); 941 return PTR_ERR(imx6_pcie->iomuxc_gpr); 942 } 943 944 /* Grab PCIe PHY Tx Settings */ 945 if (of_property_read_u32(node, "fsl,tx-deemph-gen1", 946 &imx6_pcie->tx_deemph_gen1)) 947 imx6_pcie->tx_deemph_gen1 = 0; 948 949 if (of_property_read_u32(node, "fsl,tx-deemph-gen2-3p5db", 950 &imx6_pcie->tx_deemph_gen2_3p5db)) 951 imx6_pcie->tx_deemph_gen2_3p5db = 0; 952 953 if (of_property_read_u32(node, "fsl,tx-deemph-gen2-6db", 954 &imx6_pcie->tx_deemph_gen2_6db)) 955 imx6_pcie->tx_deemph_gen2_6db = 20; 956 957 if (of_property_read_u32(node, "fsl,tx-swing-full", 958 &imx6_pcie->tx_swing_full)) 959 imx6_pcie->tx_swing_full = 127; 960 961 if (of_property_read_u32(node, "fsl,tx-swing-low", 962 &imx6_pcie->tx_swing_low)) 963 imx6_pcie->tx_swing_low = 127; 964 965 /* Limit link speed */ 966 ret = of_property_read_u32(node, "fsl,max-link-speed", 967 &imx6_pcie->link_gen); 968 if (ret) 969 imx6_pcie->link_gen = 1; 970 971 imx6_pcie->vpcie = devm_regulator_get_optional(&pdev->dev, "vpcie"); 972 if (IS_ERR(imx6_pcie->vpcie)) { 973 if (PTR_ERR(imx6_pcie->vpcie) == -EPROBE_DEFER) 974 return -EPROBE_DEFER; 975 imx6_pcie->vpcie = NULL; 976 } 977 978 platform_set_drvdata(pdev, imx6_pcie); 979 980 ret = imx6_add_pcie_port(imx6_pcie, pdev); 981 if (ret < 0) 982 return ret; 983 984 return 0; 985 } 986 987 static void imx6_pcie_shutdown(struct platform_device *pdev) 988 { 989 struct imx6_pcie *imx6_pcie = platform_get_drvdata(pdev); 990 991 /* bring down link, so bootloader gets clean state in case of reboot */ 992 imx6_pcie_assert_core_reset(imx6_pcie); 993 } 994 995 static const struct of_device_id imx6_pcie_of_match[] = { 996 { .compatible = "fsl,imx6q-pcie", .data = (void *)IMX6Q, }, 997 { .compatible = "fsl,imx6sx-pcie", .data = (void *)IMX6SX, }, 998 { .compatible = "fsl,imx6qp-pcie", .data = (void *)IMX6QP, }, 999 { .compatible = "fsl,imx7d-pcie", .data = (void *)IMX7D, }, 1000 {}, 1001 }; 1002 1003 static struct platform_driver imx6_pcie_driver = { 1004 .driver = { 1005 .name = "imx6q-pcie", 1006 .of_match_table = imx6_pcie_of_match, 1007 .suppress_bind_attrs = true, 1008 .pm = &imx6_pcie_pm_ops, 1009 }, 1010 .probe = imx6_pcie_probe, 1011 .shutdown = imx6_pcie_shutdown, 1012 }; 1013 1014 static int __init imx6_pcie_init(void) 1015 { 1016 /* 1017 * Since probe() can be deferred we need to make sure that 1018 * hook_fault_code is not called after __init memory is freed 1019 * by kernel and since imx6q_pcie_abort_handler() is a no-op, 1020 * we can install the handler here without risking it 1021 * accessing some uninitialized driver state. 1022 */ 1023 hook_fault_code(8, imx6q_pcie_abort_handler, SIGBUS, 0, 1024 "external abort on non-linefetch"); 1025 1026 return platform_driver_register(&imx6_pcie_driver); 1027 } 1028 device_initcall(imx6_pcie_init); 1029