1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * PCIe host controller driver for Freescale i.MX6 SoCs
4 *
5 * Copyright (C) 2013 Kosagi
6 * https://www.kosagi.com
7 *
8 * Author: Sean Cross <xobs@kosagi.com>
9 */
10
11 #include <linux/bitfield.h>
12 #include <linux/clk.h>
13 #include <linux/delay.h>
14 #include <linux/gpio.h>
15 #include <linux/kernel.h>
16 #include <linux/mfd/syscon.h>
17 #include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
18 #include <linux/mfd/syscon/imx7-iomuxc-gpr.h>
19 #include <linux/module.h>
20 #include <linux/of.h>
21 #include <linux/of_gpio.h>
22 #include <linux/of_address.h>
23 #include <linux/pci.h>
24 #include <linux/platform_device.h>
25 #include <linux/regmap.h>
26 #include <linux/regulator/consumer.h>
27 #include <linux/resource.h>
28 #include <linux/signal.h>
29 #include <linux/types.h>
30 #include <linux/interrupt.h>
31 #include <linux/reset.h>
32 #include <linux/phy/phy.h>
33 #include <linux/pm_domain.h>
34 #include <linux/pm_runtime.h>
35
36 #include "pcie-designware.h"
37
38 #define IMX8MQ_GPR_PCIE_REF_USE_PAD BIT(9)
39 #define IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN BIT(10)
40 #define IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE BIT(11)
41 #define IMX8MQ_GPR_PCIE_VREG_BYPASS BIT(12)
42 #define IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE GENMASK(11, 8)
43 #define IMX8MQ_PCIE2_BASE_ADDR 0x33c00000
44
45 #define to_imx6_pcie(x) dev_get_drvdata((x)->dev)
46
47 enum imx6_pcie_variants {
48 IMX6Q,
49 IMX6SX,
50 IMX6QP,
51 IMX7D,
52 IMX8MQ,
53 IMX8MM,
54 IMX8MP,
55 IMX8MQ_EP,
56 IMX8MM_EP,
57 IMX8MP_EP,
58 };
59
60 #define IMX6_PCIE_FLAG_IMX6_PHY BIT(0)
61 #define IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE BIT(1)
62 #define IMX6_PCIE_FLAG_SUPPORTS_SUSPEND BIT(2)
63
64 #define IMX6_PCIE_MAX_CLKS 6
65
66 struct imx6_pcie_drvdata {
67 enum imx6_pcie_variants variant;
68 enum dw_pcie_device_mode mode;
69 u32 flags;
70 int dbi_length;
71 const char *gpr;
72 const char * const *clk_names;
73 const u32 clks_cnt;
74 };
75
76 struct imx6_pcie {
77 struct dw_pcie *pci;
78 int reset_gpio;
79 bool gpio_active_high;
80 bool link_is_up;
81 struct clk_bulk_data clks[IMX6_PCIE_MAX_CLKS];
82 struct regmap *iomuxc_gpr;
83 u16 msi_ctrl;
84 u32 controller_id;
85 struct reset_control *pciephy_reset;
86 struct reset_control *apps_reset;
87 struct reset_control *turnoff_reset;
88 u32 tx_deemph_gen1;
89 u32 tx_deemph_gen2_3p5db;
90 u32 tx_deemph_gen2_6db;
91 u32 tx_swing_full;
92 u32 tx_swing_low;
93 struct regulator *vpcie;
94 struct regulator *vph;
95 void __iomem *phy_base;
96
97 /* power domain for pcie */
98 struct device *pd_pcie;
99 /* power domain for pcie phy */
100 struct device *pd_pcie_phy;
101 struct phy *phy;
102 const struct imx6_pcie_drvdata *drvdata;
103 };
104
105 /* Parameters for the waiting for PCIe PHY PLL to lock on i.MX7 */
106 #define PHY_PLL_LOCK_WAIT_USLEEP_MAX 200
107 #define PHY_PLL_LOCK_WAIT_TIMEOUT (2000 * PHY_PLL_LOCK_WAIT_USLEEP_MAX)
108
109 /* PCIe Port Logic registers (memory-mapped) */
110 #define PL_OFFSET 0x700
111
112 #define PCIE_PHY_CTRL (PL_OFFSET + 0x114)
113 #define PCIE_PHY_CTRL_DATA(x) FIELD_PREP(GENMASK(15, 0), (x))
114 #define PCIE_PHY_CTRL_CAP_ADR BIT(16)
115 #define PCIE_PHY_CTRL_CAP_DAT BIT(17)
116 #define PCIE_PHY_CTRL_WR BIT(18)
117 #define PCIE_PHY_CTRL_RD BIT(19)
118
119 #define PCIE_PHY_STAT (PL_OFFSET + 0x110)
120 #define PCIE_PHY_STAT_ACK BIT(16)
121
122 /* PHY registers (not memory-mapped) */
123 #define PCIE_PHY_ATEOVRD 0x10
124 #define PCIE_PHY_ATEOVRD_EN BIT(2)
125 #define PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT 0
126 #define PCIE_PHY_ATEOVRD_REF_CLKDIV_MASK 0x1
127
128 #define PCIE_PHY_MPLL_OVRD_IN_LO 0x11
129 #define PCIE_PHY_MPLL_MULTIPLIER_SHIFT 2
130 #define PCIE_PHY_MPLL_MULTIPLIER_MASK 0x7f
131 #define PCIE_PHY_MPLL_MULTIPLIER_OVRD BIT(9)
132
133 #define PCIE_PHY_RX_ASIC_OUT 0x100D
134 #define PCIE_PHY_RX_ASIC_OUT_VALID (1 << 0)
135
136 /* iMX7 PCIe PHY registers */
137 #define PCIE_PHY_CMN_REG4 0x14
138 /* These are probably the bits that *aren't* DCC_FB_EN */
139 #define PCIE_PHY_CMN_REG4_DCC_FB_EN 0x29
140
141 #define PCIE_PHY_CMN_REG15 0x54
142 #define PCIE_PHY_CMN_REG15_DLY_4 BIT(2)
143 #define PCIE_PHY_CMN_REG15_PLL_PD BIT(5)
144 #define PCIE_PHY_CMN_REG15_OVRD_PLL_PD BIT(7)
145
146 #define PCIE_PHY_CMN_REG24 0x90
147 #define PCIE_PHY_CMN_REG24_RX_EQ BIT(6)
148 #define PCIE_PHY_CMN_REG24_RX_EQ_SEL BIT(3)
149
150 #define PCIE_PHY_CMN_REG26 0x98
151 #define PCIE_PHY_CMN_REG26_ATT_MODE 0xBC
152
153 #define PHY_RX_OVRD_IN_LO 0x1005
154 #define PHY_RX_OVRD_IN_LO_RX_DATA_EN BIT(5)
155 #define PHY_RX_OVRD_IN_LO_RX_PLL_EN BIT(3)
156
imx6_pcie_grp_offset(const struct imx6_pcie * imx6_pcie)157 static unsigned int imx6_pcie_grp_offset(const struct imx6_pcie *imx6_pcie)
158 {
159 WARN_ON(imx6_pcie->drvdata->variant != IMX8MQ &&
160 imx6_pcie->drvdata->variant != IMX8MQ_EP &&
161 imx6_pcie->drvdata->variant != IMX8MM &&
162 imx6_pcie->drvdata->variant != IMX8MM_EP &&
163 imx6_pcie->drvdata->variant != IMX8MP &&
164 imx6_pcie->drvdata->variant != IMX8MP_EP);
165 return imx6_pcie->controller_id == 1 ? IOMUXC_GPR16 : IOMUXC_GPR14;
166 }
167
imx6_pcie_configure_type(struct imx6_pcie * imx6_pcie)168 static void imx6_pcie_configure_type(struct imx6_pcie *imx6_pcie)
169 {
170 unsigned int mask, val, mode;
171
172 if (imx6_pcie->drvdata->mode == DW_PCIE_EP_TYPE)
173 mode = PCI_EXP_TYPE_ENDPOINT;
174 else
175 mode = PCI_EXP_TYPE_ROOT_PORT;
176
177 switch (imx6_pcie->drvdata->variant) {
178 case IMX8MQ:
179 case IMX8MQ_EP:
180 if (imx6_pcie->controller_id == 1) {
181 mask = IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE;
182 val = FIELD_PREP(IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE,
183 mode);
184 } else {
185 mask = IMX6Q_GPR12_DEVICE_TYPE;
186 val = FIELD_PREP(IMX6Q_GPR12_DEVICE_TYPE, mode);
187 }
188 break;
189 default:
190 mask = IMX6Q_GPR12_DEVICE_TYPE;
191 val = FIELD_PREP(IMX6Q_GPR12_DEVICE_TYPE, mode);
192 break;
193 }
194
195 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, mask, val);
196 }
197
pcie_phy_poll_ack(struct imx6_pcie * imx6_pcie,bool exp_val)198 static int pcie_phy_poll_ack(struct imx6_pcie *imx6_pcie, bool exp_val)
199 {
200 struct dw_pcie *pci = imx6_pcie->pci;
201 bool val;
202 u32 max_iterations = 10;
203 u32 wait_counter = 0;
204
205 do {
206 val = dw_pcie_readl_dbi(pci, PCIE_PHY_STAT) &
207 PCIE_PHY_STAT_ACK;
208 wait_counter++;
209
210 if (val == exp_val)
211 return 0;
212
213 udelay(1);
214 } while (wait_counter < max_iterations);
215
216 return -ETIMEDOUT;
217 }
218
pcie_phy_wait_ack(struct imx6_pcie * imx6_pcie,int addr)219 static int pcie_phy_wait_ack(struct imx6_pcie *imx6_pcie, int addr)
220 {
221 struct dw_pcie *pci = imx6_pcie->pci;
222 u32 val;
223 int ret;
224
225 val = PCIE_PHY_CTRL_DATA(addr);
226 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val);
227
228 val |= PCIE_PHY_CTRL_CAP_ADR;
229 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val);
230
231 ret = pcie_phy_poll_ack(imx6_pcie, true);
232 if (ret)
233 return ret;
234
235 val = PCIE_PHY_CTRL_DATA(addr);
236 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val);
237
238 return pcie_phy_poll_ack(imx6_pcie, false);
239 }
240
241 /* Read from the 16-bit PCIe PHY control registers (not memory-mapped) */
pcie_phy_read(struct imx6_pcie * imx6_pcie,int addr,u16 * data)242 static int pcie_phy_read(struct imx6_pcie *imx6_pcie, int addr, u16 *data)
243 {
244 struct dw_pcie *pci = imx6_pcie->pci;
245 u32 phy_ctl;
246 int ret;
247
248 ret = pcie_phy_wait_ack(imx6_pcie, addr);
249 if (ret)
250 return ret;
251
252 /* assert Read signal */
253 phy_ctl = PCIE_PHY_CTRL_RD;
254 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, phy_ctl);
255
256 ret = pcie_phy_poll_ack(imx6_pcie, true);
257 if (ret)
258 return ret;
259
260 *data = dw_pcie_readl_dbi(pci, PCIE_PHY_STAT);
261
262 /* deassert Read signal */
263 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, 0x00);
264
265 return pcie_phy_poll_ack(imx6_pcie, false);
266 }
267
pcie_phy_write(struct imx6_pcie * imx6_pcie,int addr,u16 data)268 static int pcie_phy_write(struct imx6_pcie *imx6_pcie, int addr, u16 data)
269 {
270 struct dw_pcie *pci = imx6_pcie->pci;
271 u32 var;
272 int ret;
273
274 /* write addr */
275 /* cap addr */
276 ret = pcie_phy_wait_ack(imx6_pcie, addr);
277 if (ret)
278 return ret;
279
280 var = PCIE_PHY_CTRL_DATA(data);
281 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
282
283 /* capture data */
284 var |= PCIE_PHY_CTRL_CAP_DAT;
285 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
286
287 ret = pcie_phy_poll_ack(imx6_pcie, true);
288 if (ret)
289 return ret;
290
291 /* deassert cap data */
292 var = PCIE_PHY_CTRL_DATA(data);
293 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
294
295 /* wait for ack de-assertion */
296 ret = pcie_phy_poll_ack(imx6_pcie, false);
297 if (ret)
298 return ret;
299
300 /* assert wr signal */
301 var = PCIE_PHY_CTRL_WR;
302 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
303
304 /* wait for ack */
305 ret = pcie_phy_poll_ack(imx6_pcie, true);
306 if (ret)
307 return ret;
308
309 /* deassert wr signal */
310 var = PCIE_PHY_CTRL_DATA(data);
311 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
312
313 /* wait for ack de-assertion */
314 ret = pcie_phy_poll_ack(imx6_pcie, false);
315 if (ret)
316 return ret;
317
318 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, 0x0);
319
320 return 0;
321 }
322
imx6_pcie_init_phy(struct imx6_pcie * imx6_pcie)323 static void imx6_pcie_init_phy(struct imx6_pcie *imx6_pcie)
324 {
325 switch (imx6_pcie->drvdata->variant) {
326 case IMX8MM:
327 case IMX8MM_EP:
328 case IMX8MP:
329 case IMX8MP_EP:
330 /*
331 * The PHY initialization had been done in the PHY
332 * driver, break here directly.
333 */
334 break;
335 case IMX8MQ:
336 case IMX8MQ_EP:
337 /*
338 * TODO: Currently this code assumes external
339 * oscillator is being used
340 */
341 regmap_update_bits(imx6_pcie->iomuxc_gpr,
342 imx6_pcie_grp_offset(imx6_pcie),
343 IMX8MQ_GPR_PCIE_REF_USE_PAD,
344 IMX8MQ_GPR_PCIE_REF_USE_PAD);
345 /*
346 * Regarding the datasheet, the PCIE_VPH is suggested
347 * to be 1.8V. If the PCIE_VPH is supplied by 3.3V, the
348 * VREG_BYPASS should be cleared to zero.
349 */
350 if (imx6_pcie->vph &&
351 regulator_get_voltage(imx6_pcie->vph) > 3000000)
352 regmap_update_bits(imx6_pcie->iomuxc_gpr,
353 imx6_pcie_grp_offset(imx6_pcie),
354 IMX8MQ_GPR_PCIE_VREG_BYPASS,
355 0);
356 break;
357 case IMX7D:
358 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
359 IMX7D_GPR12_PCIE_PHY_REFCLK_SEL, 0);
360 break;
361 case IMX6SX:
362 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
363 IMX6SX_GPR12_PCIE_RX_EQ_MASK,
364 IMX6SX_GPR12_PCIE_RX_EQ_2);
365 fallthrough;
366 default:
367 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
368 IMX6Q_GPR12_PCIE_CTL_2, 0 << 10);
369
370 /* configure constant input signal to the pcie ctrl and phy */
371 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
372 IMX6Q_GPR12_LOS_LEVEL, 9 << 4);
373
374 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
375 IMX6Q_GPR8_TX_DEEMPH_GEN1,
376 imx6_pcie->tx_deemph_gen1 << 0);
377 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
378 IMX6Q_GPR8_TX_DEEMPH_GEN2_3P5DB,
379 imx6_pcie->tx_deemph_gen2_3p5db << 6);
380 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
381 IMX6Q_GPR8_TX_DEEMPH_GEN2_6DB,
382 imx6_pcie->tx_deemph_gen2_6db << 12);
383 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
384 IMX6Q_GPR8_TX_SWING_FULL,
385 imx6_pcie->tx_swing_full << 18);
386 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
387 IMX6Q_GPR8_TX_SWING_LOW,
388 imx6_pcie->tx_swing_low << 25);
389 break;
390 }
391
392 imx6_pcie_configure_type(imx6_pcie);
393 }
394
imx7d_pcie_wait_for_phy_pll_lock(struct imx6_pcie * imx6_pcie)395 static void imx7d_pcie_wait_for_phy_pll_lock(struct imx6_pcie *imx6_pcie)
396 {
397 u32 val;
398 struct device *dev = imx6_pcie->pci->dev;
399
400 if (regmap_read_poll_timeout(imx6_pcie->iomuxc_gpr,
401 IOMUXC_GPR22, val,
402 val & IMX7D_GPR22_PCIE_PHY_PLL_LOCKED,
403 PHY_PLL_LOCK_WAIT_USLEEP_MAX,
404 PHY_PLL_LOCK_WAIT_TIMEOUT))
405 dev_err(dev, "PCIe PLL lock timeout\n");
406 }
407
imx6_setup_phy_mpll(struct imx6_pcie * imx6_pcie)408 static int imx6_setup_phy_mpll(struct imx6_pcie *imx6_pcie)
409 {
410 unsigned long phy_rate = 0;
411 int mult, div;
412 u16 val;
413 int i;
414
415 if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_IMX6_PHY))
416 return 0;
417
418 for (i = 0; i < imx6_pcie->drvdata->clks_cnt; i++)
419 if (strncmp(imx6_pcie->clks[i].id, "pcie_phy", 8) == 0)
420 phy_rate = clk_get_rate(imx6_pcie->clks[i].clk);
421
422 switch (phy_rate) {
423 case 125000000:
424 /*
425 * The default settings of the MPLL are for a 125MHz input
426 * clock, so no need to reconfigure anything in that case.
427 */
428 return 0;
429 case 100000000:
430 mult = 25;
431 div = 0;
432 break;
433 case 200000000:
434 mult = 25;
435 div = 1;
436 break;
437 default:
438 dev_err(imx6_pcie->pci->dev,
439 "Unsupported PHY reference clock rate %lu\n", phy_rate);
440 return -EINVAL;
441 }
442
443 pcie_phy_read(imx6_pcie, PCIE_PHY_MPLL_OVRD_IN_LO, &val);
444 val &= ~(PCIE_PHY_MPLL_MULTIPLIER_MASK <<
445 PCIE_PHY_MPLL_MULTIPLIER_SHIFT);
446 val |= mult << PCIE_PHY_MPLL_MULTIPLIER_SHIFT;
447 val |= PCIE_PHY_MPLL_MULTIPLIER_OVRD;
448 pcie_phy_write(imx6_pcie, PCIE_PHY_MPLL_OVRD_IN_LO, val);
449
450 pcie_phy_read(imx6_pcie, PCIE_PHY_ATEOVRD, &val);
451 val &= ~(PCIE_PHY_ATEOVRD_REF_CLKDIV_MASK <<
452 PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT);
453 val |= div << PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT;
454 val |= PCIE_PHY_ATEOVRD_EN;
455 pcie_phy_write(imx6_pcie, PCIE_PHY_ATEOVRD, val);
456
457 return 0;
458 }
459
imx6_pcie_reset_phy(struct imx6_pcie * imx6_pcie)460 static void imx6_pcie_reset_phy(struct imx6_pcie *imx6_pcie)
461 {
462 u16 tmp;
463
464 if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_IMX6_PHY))
465 return;
466
467 pcie_phy_read(imx6_pcie, PHY_RX_OVRD_IN_LO, &tmp);
468 tmp |= (PHY_RX_OVRD_IN_LO_RX_DATA_EN |
469 PHY_RX_OVRD_IN_LO_RX_PLL_EN);
470 pcie_phy_write(imx6_pcie, PHY_RX_OVRD_IN_LO, tmp);
471
472 usleep_range(2000, 3000);
473
474 pcie_phy_read(imx6_pcie, PHY_RX_OVRD_IN_LO, &tmp);
475 tmp &= ~(PHY_RX_OVRD_IN_LO_RX_DATA_EN |
476 PHY_RX_OVRD_IN_LO_RX_PLL_EN);
477 pcie_phy_write(imx6_pcie, PHY_RX_OVRD_IN_LO, tmp);
478 }
479
480 #ifdef CONFIG_ARM
481 /* Added for PCI abort handling */
imx6q_pcie_abort_handler(unsigned long addr,unsigned int fsr,struct pt_regs * regs)482 static int imx6q_pcie_abort_handler(unsigned long addr,
483 unsigned int fsr, struct pt_regs *regs)
484 {
485 unsigned long pc = instruction_pointer(regs);
486 unsigned long instr = *(unsigned long *)pc;
487 int reg = (instr >> 12) & 15;
488
489 /*
490 * If the instruction being executed was a read,
491 * make it look like it read all-ones.
492 */
493 if ((instr & 0x0c100000) == 0x04100000) {
494 unsigned long val;
495
496 if (instr & 0x00400000)
497 val = 255;
498 else
499 val = -1;
500
501 regs->uregs[reg] = val;
502 regs->ARM_pc += 4;
503 return 0;
504 }
505
506 if ((instr & 0x0e100090) == 0x00100090) {
507 regs->uregs[reg] = -1;
508 regs->ARM_pc += 4;
509 return 0;
510 }
511
512 return 1;
513 }
514 #endif
515
imx6_pcie_attach_pd(struct device * dev)516 static int imx6_pcie_attach_pd(struct device *dev)
517 {
518 struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
519 struct device_link *link;
520
521 /* Do nothing when in a single power domain */
522 if (dev->pm_domain)
523 return 0;
524
525 imx6_pcie->pd_pcie = dev_pm_domain_attach_by_name(dev, "pcie");
526 if (IS_ERR(imx6_pcie->pd_pcie))
527 return PTR_ERR(imx6_pcie->pd_pcie);
528 /* Do nothing when power domain missing */
529 if (!imx6_pcie->pd_pcie)
530 return 0;
531 link = device_link_add(dev, imx6_pcie->pd_pcie,
532 DL_FLAG_STATELESS |
533 DL_FLAG_PM_RUNTIME |
534 DL_FLAG_RPM_ACTIVE);
535 if (!link) {
536 dev_err(dev, "Failed to add device_link to pcie pd.\n");
537 return -EINVAL;
538 }
539
540 imx6_pcie->pd_pcie_phy = dev_pm_domain_attach_by_name(dev, "pcie_phy");
541 if (IS_ERR(imx6_pcie->pd_pcie_phy))
542 return PTR_ERR(imx6_pcie->pd_pcie_phy);
543
544 link = device_link_add(dev, imx6_pcie->pd_pcie_phy,
545 DL_FLAG_STATELESS |
546 DL_FLAG_PM_RUNTIME |
547 DL_FLAG_RPM_ACTIVE);
548 if (!link) {
549 dev_err(dev, "Failed to add device_link to pcie_phy pd.\n");
550 return -EINVAL;
551 }
552
553 return 0;
554 }
555
imx6_pcie_enable_ref_clk(struct imx6_pcie * imx6_pcie)556 static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie)
557 {
558 unsigned int offset;
559 int ret = 0;
560
561 switch (imx6_pcie->drvdata->variant) {
562 case IMX6SX:
563 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
564 IMX6SX_GPR12_PCIE_TEST_POWERDOWN, 0);
565 break;
566 case IMX6QP:
567 case IMX6Q:
568 /* power up core phy and enable ref clock */
569 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
570 IMX6Q_GPR1_PCIE_TEST_PD, 0 << 18);
571 /*
572 * the async reset input need ref clock to sync internally,
573 * when the ref clock comes after reset, internal synced
574 * reset time is too short, cannot meet the requirement.
575 * add one ~10us delay here.
576 */
577 usleep_range(10, 100);
578 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
579 IMX6Q_GPR1_PCIE_REF_CLK_EN, 1 << 16);
580 break;
581 case IMX7D:
582 break;
583 case IMX8MM:
584 case IMX8MM_EP:
585 case IMX8MQ:
586 case IMX8MQ_EP:
587 case IMX8MP:
588 case IMX8MP_EP:
589 offset = imx6_pcie_grp_offset(imx6_pcie);
590 /*
591 * Set the over ride low and enabled
592 * make sure that REF_CLK is turned on.
593 */
594 regmap_update_bits(imx6_pcie->iomuxc_gpr, offset,
595 IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE,
596 0);
597 regmap_update_bits(imx6_pcie->iomuxc_gpr, offset,
598 IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN,
599 IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN);
600 break;
601 }
602
603 return ret;
604 }
605
imx6_pcie_disable_ref_clk(struct imx6_pcie * imx6_pcie)606 static void imx6_pcie_disable_ref_clk(struct imx6_pcie *imx6_pcie)
607 {
608 switch (imx6_pcie->drvdata->variant) {
609 case IMX6QP:
610 case IMX6Q:
611 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
612 IMX6Q_GPR1_PCIE_REF_CLK_EN, 0);
613 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
614 IMX6Q_GPR1_PCIE_TEST_PD,
615 IMX6Q_GPR1_PCIE_TEST_PD);
616 break;
617 case IMX7D:
618 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
619 IMX7D_GPR12_PCIE_PHY_REFCLK_SEL,
620 IMX7D_GPR12_PCIE_PHY_REFCLK_SEL);
621 break;
622 default:
623 break;
624 }
625 }
626
imx6_pcie_clk_enable(struct imx6_pcie * imx6_pcie)627 static int imx6_pcie_clk_enable(struct imx6_pcie *imx6_pcie)
628 {
629 struct dw_pcie *pci = imx6_pcie->pci;
630 struct device *dev = pci->dev;
631 int ret;
632
633 ret = clk_bulk_prepare_enable(imx6_pcie->drvdata->clks_cnt, imx6_pcie->clks);
634 if (ret)
635 return ret;
636
637 ret = imx6_pcie_enable_ref_clk(imx6_pcie);
638 if (ret) {
639 dev_err(dev, "unable to enable pcie ref clock\n");
640 goto err_ref_clk;
641 }
642
643 /* allow the clocks to stabilize */
644 usleep_range(200, 500);
645 return 0;
646
647 err_ref_clk:
648 clk_bulk_disable_unprepare(imx6_pcie->drvdata->clks_cnt, imx6_pcie->clks);
649
650 return ret;
651 }
652
imx6_pcie_clk_disable(struct imx6_pcie * imx6_pcie)653 static void imx6_pcie_clk_disable(struct imx6_pcie *imx6_pcie)
654 {
655 imx6_pcie_disable_ref_clk(imx6_pcie);
656 clk_bulk_disable_unprepare(imx6_pcie->drvdata->clks_cnt, imx6_pcie->clks);
657 }
658
imx6_pcie_assert_core_reset(struct imx6_pcie * imx6_pcie)659 static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie)
660 {
661 switch (imx6_pcie->drvdata->variant) {
662 case IMX7D:
663 case IMX8MQ:
664 case IMX8MQ_EP:
665 reset_control_assert(imx6_pcie->pciephy_reset);
666 fallthrough;
667 case IMX8MM:
668 case IMX8MM_EP:
669 case IMX8MP:
670 case IMX8MP_EP:
671 reset_control_assert(imx6_pcie->apps_reset);
672 break;
673 case IMX6SX:
674 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
675 IMX6SX_GPR12_PCIE_TEST_POWERDOWN,
676 IMX6SX_GPR12_PCIE_TEST_POWERDOWN);
677 /* Force PCIe PHY reset */
678 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR5,
679 IMX6SX_GPR5_PCIE_BTNRST_RESET,
680 IMX6SX_GPR5_PCIE_BTNRST_RESET);
681 break;
682 case IMX6QP:
683 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
684 IMX6Q_GPR1_PCIE_SW_RST,
685 IMX6Q_GPR1_PCIE_SW_RST);
686 break;
687 case IMX6Q:
688 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
689 IMX6Q_GPR1_PCIE_TEST_PD, 1 << 18);
690 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
691 IMX6Q_GPR1_PCIE_REF_CLK_EN, 0 << 16);
692 break;
693 }
694
695 /* Some boards don't have PCIe reset GPIO. */
696 if (gpio_is_valid(imx6_pcie->reset_gpio))
697 gpio_set_value_cansleep(imx6_pcie->reset_gpio,
698 imx6_pcie->gpio_active_high);
699 }
700
imx6_pcie_deassert_core_reset(struct imx6_pcie * imx6_pcie)701 static int imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie)
702 {
703 struct dw_pcie *pci = imx6_pcie->pci;
704 struct device *dev = pci->dev;
705
706 switch (imx6_pcie->drvdata->variant) {
707 case IMX8MQ:
708 case IMX8MQ_EP:
709 reset_control_deassert(imx6_pcie->pciephy_reset);
710 break;
711 case IMX7D:
712 reset_control_deassert(imx6_pcie->pciephy_reset);
713
714 /* Workaround for ERR010728, failure of PCI-e PLL VCO to
715 * oscillate, especially when cold. This turns off "Duty-cycle
716 * Corrector" and other mysterious undocumented things.
717 */
718 if (likely(imx6_pcie->phy_base)) {
719 /* De-assert DCC_FB_EN */
720 writel(PCIE_PHY_CMN_REG4_DCC_FB_EN,
721 imx6_pcie->phy_base + PCIE_PHY_CMN_REG4);
722 /* Assert RX_EQS and RX_EQS_SEL */
723 writel(PCIE_PHY_CMN_REG24_RX_EQ_SEL
724 | PCIE_PHY_CMN_REG24_RX_EQ,
725 imx6_pcie->phy_base + PCIE_PHY_CMN_REG24);
726 /* Assert ATT_MODE */
727 writel(PCIE_PHY_CMN_REG26_ATT_MODE,
728 imx6_pcie->phy_base + PCIE_PHY_CMN_REG26);
729 } else {
730 dev_warn(dev, "Unable to apply ERR010728 workaround. DT missing fsl,imx7d-pcie-phy phandle ?\n");
731 }
732
733 imx7d_pcie_wait_for_phy_pll_lock(imx6_pcie);
734 break;
735 case IMX6SX:
736 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR5,
737 IMX6SX_GPR5_PCIE_BTNRST_RESET, 0);
738 break;
739 case IMX6QP:
740 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
741 IMX6Q_GPR1_PCIE_SW_RST, 0);
742
743 usleep_range(200, 500);
744 break;
745 case IMX6Q: /* Nothing to do */
746 case IMX8MM:
747 case IMX8MM_EP:
748 case IMX8MP:
749 case IMX8MP_EP:
750 break;
751 }
752
753 /* Some boards don't have PCIe reset GPIO. */
754 if (gpio_is_valid(imx6_pcie->reset_gpio)) {
755 msleep(100);
756 gpio_set_value_cansleep(imx6_pcie->reset_gpio,
757 !imx6_pcie->gpio_active_high);
758 /* Wait for 100ms after PERST# deassertion (PCIe r5.0, 6.6.1) */
759 msleep(100);
760 }
761
762 return 0;
763 }
764
imx6_pcie_wait_for_speed_change(struct imx6_pcie * imx6_pcie)765 static int imx6_pcie_wait_for_speed_change(struct imx6_pcie *imx6_pcie)
766 {
767 struct dw_pcie *pci = imx6_pcie->pci;
768 struct device *dev = pci->dev;
769 u32 tmp;
770 unsigned int retries;
771
772 for (retries = 0; retries < 200; retries++) {
773 tmp = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
774 /* Test if the speed change finished. */
775 if (!(tmp & PORT_LOGIC_SPEED_CHANGE))
776 return 0;
777 usleep_range(100, 1000);
778 }
779
780 dev_err(dev, "Speed change timeout\n");
781 return -ETIMEDOUT;
782 }
783
imx6_pcie_ltssm_enable(struct device * dev)784 static void imx6_pcie_ltssm_enable(struct device *dev)
785 {
786 struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
787
788 switch (imx6_pcie->drvdata->variant) {
789 case IMX6Q:
790 case IMX6SX:
791 case IMX6QP:
792 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
793 IMX6Q_GPR12_PCIE_CTL_2,
794 IMX6Q_GPR12_PCIE_CTL_2);
795 break;
796 case IMX7D:
797 case IMX8MQ:
798 case IMX8MQ_EP:
799 case IMX8MM:
800 case IMX8MM_EP:
801 case IMX8MP:
802 case IMX8MP_EP:
803 reset_control_deassert(imx6_pcie->apps_reset);
804 break;
805 }
806 }
807
imx6_pcie_ltssm_disable(struct device * dev)808 static void imx6_pcie_ltssm_disable(struct device *dev)
809 {
810 struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
811
812 switch (imx6_pcie->drvdata->variant) {
813 case IMX6Q:
814 case IMX6SX:
815 case IMX6QP:
816 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
817 IMX6Q_GPR12_PCIE_CTL_2, 0);
818 break;
819 case IMX7D:
820 case IMX8MQ:
821 case IMX8MQ_EP:
822 case IMX8MM:
823 case IMX8MM_EP:
824 case IMX8MP:
825 case IMX8MP_EP:
826 reset_control_assert(imx6_pcie->apps_reset);
827 break;
828 }
829 }
830
imx6_pcie_start_link(struct dw_pcie * pci)831 static int imx6_pcie_start_link(struct dw_pcie *pci)
832 {
833 struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci);
834 struct device *dev = pci->dev;
835 u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
836 u32 tmp;
837 int ret;
838
839 /*
840 * Force Gen1 operation when starting the link. In case the link is
841 * started in Gen2 mode, there is a possibility the devices on the
842 * bus will not be detected at all. This happens with PCIe switches.
843 */
844 dw_pcie_dbi_ro_wr_en(pci);
845 tmp = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP);
846 tmp &= ~PCI_EXP_LNKCAP_SLS;
847 tmp |= PCI_EXP_LNKCAP_SLS_2_5GB;
848 dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCAP, tmp);
849 dw_pcie_dbi_ro_wr_dis(pci);
850
851 /* Start LTSSM. */
852 imx6_pcie_ltssm_enable(dev);
853
854 ret = dw_pcie_wait_for_link(pci);
855 if (ret)
856 goto err_reset_phy;
857
858 if (pci->link_gen > 1) {
859 /* Allow faster modes after the link is up */
860 dw_pcie_dbi_ro_wr_en(pci);
861 tmp = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP);
862 tmp &= ~PCI_EXP_LNKCAP_SLS;
863 tmp |= pci->link_gen;
864 dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCAP, tmp);
865
866 /*
867 * Start Directed Speed Change so the best possible
868 * speed both link partners support can be negotiated.
869 */
870 tmp = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
871 tmp |= PORT_LOGIC_SPEED_CHANGE;
872 dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, tmp);
873 dw_pcie_dbi_ro_wr_dis(pci);
874
875 if (imx6_pcie->drvdata->flags &
876 IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE) {
877 /*
878 * On i.MX7, DIRECT_SPEED_CHANGE behaves differently
879 * from i.MX6 family when no link speed transition
880 * occurs and we go Gen1 -> yep, Gen1. The difference
881 * is that, in such case, it will not be cleared by HW
882 * which will cause the following code to report false
883 * failure.
884 */
885
886 ret = imx6_pcie_wait_for_speed_change(imx6_pcie);
887 if (ret) {
888 dev_err(dev, "Failed to bring link up!\n");
889 goto err_reset_phy;
890 }
891 }
892
893 /* Make sure link training is finished as well! */
894 ret = dw_pcie_wait_for_link(pci);
895 if (ret)
896 goto err_reset_phy;
897 } else {
898 dev_info(dev, "Link: Only Gen1 is enabled\n");
899 }
900
901 imx6_pcie->link_is_up = true;
902 tmp = dw_pcie_readw_dbi(pci, offset + PCI_EXP_LNKSTA);
903 dev_info(dev, "Link up, Gen%i\n", tmp & PCI_EXP_LNKSTA_CLS);
904 return 0;
905
906 err_reset_phy:
907 imx6_pcie->link_is_up = false;
908 dev_dbg(dev, "PHY DEBUG_R0=0x%08x DEBUG_R1=0x%08x\n",
909 dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG0),
910 dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG1));
911 imx6_pcie_reset_phy(imx6_pcie);
912 return 0;
913 }
914
imx6_pcie_stop_link(struct dw_pcie * pci)915 static void imx6_pcie_stop_link(struct dw_pcie *pci)
916 {
917 struct device *dev = pci->dev;
918
919 /* Turn off PCIe LTSSM */
920 imx6_pcie_ltssm_disable(dev);
921 }
922
imx6_pcie_host_init(struct dw_pcie_rp * pp)923 static int imx6_pcie_host_init(struct dw_pcie_rp *pp)
924 {
925 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
926 struct device *dev = pci->dev;
927 struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci);
928 int ret;
929
930 if (imx6_pcie->vpcie) {
931 ret = regulator_enable(imx6_pcie->vpcie);
932 if (ret) {
933 dev_err(dev, "failed to enable vpcie regulator: %d\n",
934 ret);
935 return ret;
936 }
937 }
938
939 imx6_pcie_assert_core_reset(imx6_pcie);
940 imx6_pcie_init_phy(imx6_pcie);
941
942 ret = imx6_pcie_clk_enable(imx6_pcie);
943 if (ret) {
944 dev_err(dev, "unable to enable pcie clocks: %d\n", ret);
945 goto err_reg_disable;
946 }
947
948 if (imx6_pcie->phy) {
949 ret = phy_init(imx6_pcie->phy);
950 if (ret) {
951 dev_err(dev, "pcie PHY power up failed\n");
952 goto err_clk_disable;
953 }
954 }
955
956 if (imx6_pcie->phy) {
957 ret = phy_power_on(imx6_pcie->phy);
958 if (ret) {
959 dev_err(dev, "waiting for PHY ready timeout!\n");
960 goto err_phy_exit;
961 }
962 }
963
964 ret = imx6_pcie_deassert_core_reset(imx6_pcie);
965 if (ret < 0) {
966 dev_err(dev, "pcie deassert core reset failed: %d\n", ret);
967 goto err_phy_off;
968 }
969
970 imx6_setup_phy_mpll(imx6_pcie);
971
972 return 0;
973
974 err_phy_off:
975 phy_power_off(imx6_pcie->phy);
976 err_phy_exit:
977 phy_exit(imx6_pcie->phy);
978 err_clk_disable:
979 imx6_pcie_clk_disable(imx6_pcie);
980 err_reg_disable:
981 if (imx6_pcie->vpcie)
982 regulator_disable(imx6_pcie->vpcie);
983 return ret;
984 }
985
imx6_pcie_host_exit(struct dw_pcie_rp * pp)986 static void imx6_pcie_host_exit(struct dw_pcie_rp *pp)
987 {
988 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
989 struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci);
990
991 if (imx6_pcie->phy) {
992 if (phy_power_off(imx6_pcie->phy))
993 dev_err(pci->dev, "unable to power off PHY\n");
994 phy_exit(imx6_pcie->phy);
995 }
996 imx6_pcie_clk_disable(imx6_pcie);
997
998 if (imx6_pcie->vpcie)
999 regulator_disable(imx6_pcie->vpcie);
1000 }
1001
1002 static const struct dw_pcie_host_ops imx6_pcie_host_ops = {
1003 .host_init = imx6_pcie_host_init,
1004 .host_deinit = imx6_pcie_host_exit,
1005 };
1006
1007 static const struct dw_pcie_ops dw_pcie_ops = {
1008 .start_link = imx6_pcie_start_link,
1009 .stop_link = imx6_pcie_stop_link,
1010 };
1011
imx6_pcie_ep_init(struct dw_pcie_ep * ep)1012 static void imx6_pcie_ep_init(struct dw_pcie_ep *ep)
1013 {
1014 enum pci_barno bar;
1015 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
1016
1017 for (bar = BAR_0; bar <= BAR_5; bar++)
1018 dw_pcie_ep_reset_bar(pci, bar);
1019 }
1020
imx6_pcie_ep_raise_irq(struct dw_pcie_ep * ep,u8 func_no,enum pci_epc_irq_type type,u16 interrupt_num)1021 static int imx6_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
1022 enum pci_epc_irq_type type,
1023 u16 interrupt_num)
1024 {
1025 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
1026
1027 switch (type) {
1028 case PCI_EPC_IRQ_LEGACY:
1029 return dw_pcie_ep_raise_legacy_irq(ep, func_no);
1030 case PCI_EPC_IRQ_MSI:
1031 return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
1032 case PCI_EPC_IRQ_MSIX:
1033 return dw_pcie_ep_raise_msix_irq(ep, func_no, interrupt_num);
1034 default:
1035 dev_err(pci->dev, "UNKNOWN IRQ type\n");
1036 return -EINVAL;
1037 }
1038
1039 return 0;
1040 }
1041
1042 static const struct pci_epc_features imx8m_pcie_epc_features = {
1043 .linkup_notifier = false,
1044 .msi_capable = true,
1045 .msix_capable = false,
1046 .reserved_bar = 1 << BAR_1 | 1 << BAR_3,
1047 .align = SZ_64K,
1048 };
1049
1050 static const struct pci_epc_features*
imx6_pcie_ep_get_features(struct dw_pcie_ep * ep)1051 imx6_pcie_ep_get_features(struct dw_pcie_ep *ep)
1052 {
1053 return &imx8m_pcie_epc_features;
1054 }
1055
1056 static const struct dw_pcie_ep_ops pcie_ep_ops = {
1057 .ep_init = imx6_pcie_ep_init,
1058 .raise_irq = imx6_pcie_ep_raise_irq,
1059 .get_features = imx6_pcie_ep_get_features,
1060 };
1061
imx6_add_pcie_ep(struct imx6_pcie * imx6_pcie,struct platform_device * pdev)1062 static int imx6_add_pcie_ep(struct imx6_pcie *imx6_pcie,
1063 struct platform_device *pdev)
1064 {
1065 int ret;
1066 unsigned int pcie_dbi2_offset;
1067 struct dw_pcie_ep *ep;
1068 struct resource *res;
1069 struct dw_pcie *pci = imx6_pcie->pci;
1070 struct dw_pcie_rp *pp = &pci->pp;
1071 struct device *dev = pci->dev;
1072
1073 imx6_pcie_host_init(pp);
1074 ep = &pci->ep;
1075 ep->ops = &pcie_ep_ops;
1076
1077 switch (imx6_pcie->drvdata->variant) {
1078 case IMX8MQ_EP:
1079 case IMX8MM_EP:
1080 case IMX8MP_EP:
1081 pcie_dbi2_offset = SZ_1M;
1082 break;
1083 default:
1084 pcie_dbi2_offset = SZ_4K;
1085 break;
1086 }
1087 pci->dbi_base2 = pci->dbi_base + pcie_dbi2_offset;
1088 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
1089 if (!res)
1090 return -EINVAL;
1091
1092 ep->phys_base = res->start;
1093 ep->addr_size = resource_size(res);
1094 ep->page_size = SZ_64K;
1095
1096 ret = dw_pcie_ep_init(ep);
1097 if (ret) {
1098 dev_err(dev, "failed to initialize endpoint\n");
1099 return ret;
1100 }
1101 /* Start LTSSM. */
1102 imx6_pcie_ltssm_enable(dev);
1103
1104 return 0;
1105 }
1106
imx6_pcie_pm_turnoff(struct imx6_pcie * imx6_pcie)1107 static void imx6_pcie_pm_turnoff(struct imx6_pcie *imx6_pcie)
1108 {
1109 struct device *dev = imx6_pcie->pci->dev;
1110
1111 /* Some variants have a turnoff reset in DT */
1112 if (imx6_pcie->turnoff_reset) {
1113 reset_control_assert(imx6_pcie->turnoff_reset);
1114 reset_control_deassert(imx6_pcie->turnoff_reset);
1115 goto pm_turnoff_sleep;
1116 }
1117
1118 /* Others poke directly at IOMUXC registers */
1119 switch (imx6_pcie->drvdata->variant) {
1120 case IMX6SX:
1121 case IMX6QP:
1122 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
1123 IMX6SX_GPR12_PCIE_PM_TURN_OFF,
1124 IMX6SX_GPR12_PCIE_PM_TURN_OFF);
1125 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
1126 IMX6SX_GPR12_PCIE_PM_TURN_OFF, 0);
1127 break;
1128 default:
1129 dev_err(dev, "PME_Turn_Off not implemented\n");
1130 return;
1131 }
1132
1133 /*
1134 * Components with an upstream port must respond to
1135 * PME_Turn_Off with PME_TO_Ack but we can't check.
1136 *
1137 * The standard recommends a 1-10ms timeout after which to
1138 * proceed anyway as if acks were received.
1139 */
1140 pm_turnoff_sleep:
1141 usleep_range(1000, 10000);
1142 }
1143
imx6_pcie_msi_save_restore(struct imx6_pcie * imx6_pcie,bool save)1144 static void imx6_pcie_msi_save_restore(struct imx6_pcie *imx6_pcie, bool save)
1145 {
1146 u8 offset;
1147 u16 val;
1148 struct dw_pcie *pci = imx6_pcie->pci;
1149
1150 if (pci_msi_enabled()) {
1151 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_MSI);
1152 if (save) {
1153 val = dw_pcie_readw_dbi(pci, offset + PCI_MSI_FLAGS);
1154 imx6_pcie->msi_ctrl = val;
1155 } else {
1156 dw_pcie_dbi_ro_wr_en(pci);
1157 val = imx6_pcie->msi_ctrl;
1158 dw_pcie_writew_dbi(pci, offset + PCI_MSI_FLAGS, val);
1159 dw_pcie_dbi_ro_wr_dis(pci);
1160 }
1161 }
1162 }
1163
imx6_pcie_suspend_noirq(struct device * dev)1164 static int imx6_pcie_suspend_noirq(struct device *dev)
1165 {
1166 struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
1167 struct dw_pcie_rp *pp = &imx6_pcie->pci->pp;
1168
1169 if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_SUPPORTS_SUSPEND))
1170 return 0;
1171
1172 imx6_pcie_msi_save_restore(imx6_pcie, true);
1173 imx6_pcie_pm_turnoff(imx6_pcie);
1174 imx6_pcie_stop_link(imx6_pcie->pci);
1175 imx6_pcie_host_exit(pp);
1176
1177 return 0;
1178 }
1179
imx6_pcie_resume_noirq(struct device * dev)1180 static int imx6_pcie_resume_noirq(struct device *dev)
1181 {
1182 int ret;
1183 struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
1184 struct dw_pcie_rp *pp = &imx6_pcie->pci->pp;
1185
1186 if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_SUPPORTS_SUSPEND))
1187 return 0;
1188
1189 ret = imx6_pcie_host_init(pp);
1190 if (ret)
1191 return ret;
1192 imx6_pcie_msi_save_restore(imx6_pcie, false);
1193 dw_pcie_setup_rc(pp);
1194
1195 if (imx6_pcie->link_is_up)
1196 imx6_pcie_start_link(imx6_pcie->pci);
1197
1198 return 0;
1199 }
1200
1201 static const struct dev_pm_ops imx6_pcie_pm_ops = {
1202 NOIRQ_SYSTEM_SLEEP_PM_OPS(imx6_pcie_suspend_noirq,
1203 imx6_pcie_resume_noirq)
1204 };
1205
imx6_pcie_probe(struct platform_device * pdev)1206 static int imx6_pcie_probe(struct platform_device *pdev)
1207 {
1208 struct device *dev = &pdev->dev;
1209 struct dw_pcie *pci;
1210 struct imx6_pcie *imx6_pcie;
1211 struct device_node *np;
1212 struct resource *dbi_base;
1213 struct device_node *node = dev->of_node;
1214 int ret;
1215 u16 val;
1216 int i;
1217
1218 imx6_pcie = devm_kzalloc(dev, sizeof(*imx6_pcie), GFP_KERNEL);
1219 if (!imx6_pcie)
1220 return -ENOMEM;
1221
1222 pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
1223 if (!pci)
1224 return -ENOMEM;
1225
1226 pci->dev = dev;
1227 pci->ops = &dw_pcie_ops;
1228 pci->pp.ops = &imx6_pcie_host_ops;
1229
1230 imx6_pcie->pci = pci;
1231 imx6_pcie->drvdata = of_device_get_match_data(dev);
1232
1233 /* Find the PHY if one is defined, only imx7d uses it */
1234 np = of_parse_phandle(node, "fsl,imx7d-pcie-phy", 0);
1235 if (np) {
1236 struct resource res;
1237
1238 ret = of_address_to_resource(np, 0, &res);
1239 if (ret) {
1240 dev_err(dev, "Unable to map PCIe PHY\n");
1241 return ret;
1242 }
1243 imx6_pcie->phy_base = devm_ioremap_resource(dev, &res);
1244 if (IS_ERR(imx6_pcie->phy_base))
1245 return PTR_ERR(imx6_pcie->phy_base);
1246 }
1247
1248 pci->dbi_base = devm_platform_get_and_ioremap_resource(pdev, 0, &dbi_base);
1249 if (IS_ERR(pci->dbi_base))
1250 return PTR_ERR(pci->dbi_base);
1251
1252 /* Fetch GPIOs */
1253 imx6_pcie->reset_gpio = of_get_named_gpio(node, "reset-gpio", 0);
1254 imx6_pcie->gpio_active_high = of_property_read_bool(node,
1255 "reset-gpio-active-high");
1256 if (gpio_is_valid(imx6_pcie->reset_gpio)) {
1257 ret = devm_gpio_request_one(dev, imx6_pcie->reset_gpio,
1258 imx6_pcie->gpio_active_high ?
1259 GPIOF_OUT_INIT_HIGH :
1260 GPIOF_OUT_INIT_LOW,
1261 "PCIe reset");
1262 if (ret) {
1263 dev_err(dev, "unable to get reset gpio\n");
1264 return ret;
1265 }
1266 } else if (imx6_pcie->reset_gpio == -EPROBE_DEFER) {
1267 return imx6_pcie->reset_gpio;
1268 }
1269
1270 if (imx6_pcie->drvdata->clks_cnt >= IMX6_PCIE_MAX_CLKS)
1271 return dev_err_probe(dev, -ENOMEM, "clks_cnt is too big\n");
1272
1273 for (i = 0; i < imx6_pcie->drvdata->clks_cnt; i++)
1274 imx6_pcie->clks[i].id = imx6_pcie->drvdata->clk_names[i];
1275
1276 /* Fetch clocks */
1277 ret = devm_clk_bulk_get(dev, imx6_pcie->drvdata->clks_cnt, imx6_pcie->clks);
1278 if (ret)
1279 return ret;
1280
1281 switch (imx6_pcie->drvdata->variant) {
1282 case IMX8MQ:
1283 case IMX8MQ_EP:
1284 if (dbi_base->start == IMX8MQ_PCIE2_BASE_ADDR)
1285 imx6_pcie->controller_id = 1;
1286
1287 imx6_pcie->pciephy_reset = devm_reset_control_get_exclusive(dev,
1288 "pciephy");
1289 if (IS_ERR(imx6_pcie->pciephy_reset)) {
1290 dev_err(dev, "Failed to get PCIEPHY reset control\n");
1291 return PTR_ERR(imx6_pcie->pciephy_reset);
1292 }
1293
1294 imx6_pcie->apps_reset = devm_reset_control_get_exclusive(dev,
1295 "apps");
1296 if (IS_ERR(imx6_pcie->apps_reset)) {
1297 dev_err(dev, "Failed to get PCIE APPS reset control\n");
1298 return PTR_ERR(imx6_pcie->apps_reset);
1299 }
1300 break;
1301 case IMX8MM:
1302 case IMX8MM_EP:
1303 case IMX8MP:
1304 case IMX8MP_EP:
1305 imx6_pcie->apps_reset = devm_reset_control_get_exclusive(dev,
1306 "apps");
1307 if (IS_ERR(imx6_pcie->apps_reset))
1308 return dev_err_probe(dev, PTR_ERR(imx6_pcie->apps_reset),
1309 "failed to get pcie apps reset control\n");
1310
1311 imx6_pcie->phy = devm_phy_get(dev, "pcie-phy");
1312 if (IS_ERR(imx6_pcie->phy))
1313 return dev_err_probe(dev, PTR_ERR(imx6_pcie->phy),
1314 "failed to get pcie phy\n");
1315
1316 break;
1317 default:
1318 break;
1319 }
1320
1321 /* Grab turnoff reset */
1322 imx6_pcie->turnoff_reset = devm_reset_control_get_optional_exclusive(dev, "turnoff");
1323 if (IS_ERR(imx6_pcie->turnoff_reset)) {
1324 dev_err(dev, "Failed to get TURNOFF reset control\n");
1325 return PTR_ERR(imx6_pcie->turnoff_reset);
1326 }
1327
1328 /* Grab GPR config register range */
1329 imx6_pcie->iomuxc_gpr =
1330 syscon_regmap_lookup_by_compatible(imx6_pcie->drvdata->gpr);
1331 if (IS_ERR(imx6_pcie->iomuxc_gpr)) {
1332 dev_err(dev, "unable to find iomuxc registers\n");
1333 return PTR_ERR(imx6_pcie->iomuxc_gpr);
1334 }
1335
1336 /* Grab PCIe PHY Tx Settings */
1337 if (of_property_read_u32(node, "fsl,tx-deemph-gen1",
1338 &imx6_pcie->tx_deemph_gen1))
1339 imx6_pcie->tx_deemph_gen1 = 0;
1340
1341 if (of_property_read_u32(node, "fsl,tx-deemph-gen2-3p5db",
1342 &imx6_pcie->tx_deemph_gen2_3p5db))
1343 imx6_pcie->tx_deemph_gen2_3p5db = 0;
1344
1345 if (of_property_read_u32(node, "fsl,tx-deemph-gen2-6db",
1346 &imx6_pcie->tx_deemph_gen2_6db))
1347 imx6_pcie->tx_deemph_gen2_6db = 20;
1348
1349 if (of_property_read_u32(node, "fsl,tx-swing-full",
1350 &imx6_pcie->tx_swing_full))
1351 imx6_pcie->tx_swing_full = 127;
1352
1353 if (of_property_read_u32(node, "fsl,tx-swing-low",
1354 &imx6_pcie->tx_swing_low))
1355 imx6_pcie->tx_swing_low = 127;
1356
1357 /* Limit link speed */
1358 pci->link_gen = 1;
1359 of_property_read_u32(node, "fsl,max-link-speed", &pci->link_gen);
1360
1361 imx6_pcie->vpcie = devm_regulator_get_optional(&pdev->dev, "vpcie");
1362 if (IS_ERR(imx6_pcie->vpcie)) {
1363 if (PTR_ERR(imx6_pcie->vpcie) != -ENODEV)
1364 return PTR_ERR(imx6_pcie->vpcie);
1365 imx6_pcie->vpcie = NULL;
1366 }
1367
1368 imx6_pcie->vph = devm_regulator_get_optional(&pdev->dev, "vph");
1369 if (IS_ERR(imx6_pcie->vph)) {
1370 if (PTR_ERR(imx6_pcie->vph) != -ENODEV)
1371 return PTR_ERR(imx6_pcie->vph);
1372 imx6_pcie->vph = NULL;
1373 }
1374
1375 platform_set_drvdata(pdev, imx6_pcie);
1376
1377 ret = imx6_pcie_attach_pd(dev);
1378 if (ret)
1379 return ret;
1380
1381 if (imx6_pcie->drvdata->mode == DW_PCIE_EP_TYPE) {
1382 ret = imx6_add_pcie_ep(imx6_pcie, pdev);
1383 if (ret < 0)
1384 return ret;
1385 } else {
1386 ret = dw_pcie_host_init(&pci->pp);
1387 if (ret < 0)
1388 return ret;
1389
1390 if (pci_msi_enabled()) {
1391 u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_MSI);
1392
1393 val = dw_pcie_readw_dbi(pci, offset + PCI_MSI_FLAGS);
1394 val |= PCI_MSI_FLAGS_ENABLE;
1395 dw_pcie_writew_dbi(pci, offset + PCI_MSI_FLAGS, val);
1396 }
1397 }
1398
1399 return 0;
1400 }
1401
imx6_pcie_shutdown(struct platform_device * pdev)1402 static void imx6_pcie_shutdown(struct platform_device *pdev)
1403 {
1404 struct imx6_pcie *imx6_pcie = platform_get_drvdata(pdev);
1405
1406 /* bring down link, so bootloader gets clean state in case of reboot */
1407 imx6_pcie_assert_core_reset(imx6_pcie);
1408 }
1409
1410 static const char * const imx6q_clks[] = {"pcie_bus", "pcie", "pcie_phy"};
1411 static const char * const imx8mm_clks[] = {"pcie_bus", "pcie", "pcie_aux"};
1412 static const char * const imx8mq_clks[] = {"pcie_bus", "pcie", "pcie_phy", "pcie_aux"};
1413 static const char * const imx6sx_clks[] = {"pcie_bus", "pcie", "pcie_phy", "pcie_inbound_axi"};
1414
1415 static const struct imx6_pcie_drvdata drvdata[] = {
1416 [IMX6Q] = {
1417 .variant = IMX6Q,
1418 .flags = IMX6_PCIE_FLAG_IMX6_PHY |
1419 IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE,
1420 .dbi_length = 0x200,
1421 .gpr = "fsl,imx6q-iomuxc-gpr",
1422 .clk_names = imx6q_clks,
1423 .clks_cnt = ARRAY_SIZE(imx6q_clks),
1424 },
1425 [IMX6SX] = {
1426 .variant = IMX6SX,
1427 .flags = IMX6_PCIE_FLAG_IMX6_PHY |
1428 IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE |
1429 IMX6_PCIE_FLAG_SUPPORTS_SUSPEND,
1430 .gpr = "fsl,imx6q-iomuxc-gpr",
1431 .clk_names = imx6sx_clks,
1432 .clks_cnt = ARRAY_SIZE(imx6sx_clks),
1433 },
1434 [IMX6QP] = {
1435 .variant = IMX6QP,
1436 .flags = IMX6_PCIE_FLAG_IMX6_PHY |
1437 IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE |
1438 IMX6_PCIE_FLAG_SUPPORTS_SUSPEND,
1439 .dbi_length = 0x200,
1440 .gpr = "fsl,imx6q-iomuxc-gpr",
1441 .clk_names = imx6q_clks,
1442 .clks_cnt = ARRAY_SIZE(imx6q_clks),
1443 },
1444 [IMX7D] = {
1445 .variant = IMX7D,
1446 .flags = IMX6_PCIE_FLAG_SUPPORTS_SUSPEND,
1447 .gpr = "fsl,imx7d-iomuxc-gpr",
1448 .clk_names = imx6q_clks,
1449 .clks_cnt = ARRAY_SIZE(imx6q_clks),
1450 },
1451 [IMX8MQ] = {
1452 .variant = IMX8MQ,
1453 .gpr = "fsl,imx8mq-iomuxc-gpr",
1454 .clk_names = imx8mq_clks,
1455 .clks_cnt = ARRAY_SIZE(imx8mq_clks),
1456 },
1457 [IMX8MM] = {
1458 .variant = IMX8MM,
1459 .flags = IMX6_PCIE_FLAG_SUPPORTS_SUSPEND,
1460 .gpr = "fsl,imx8mm-iomuxc-gpr",
1461 .clk_names = imx8mm_clks,
1462 .clks_cnt = ARRAY_SIZE(imx8mm_clks),
1463 },
1464 [IMX8MP] = {
1465 .variant = IMX8MP,
1466 .flags = IMX6_PCIE_FLAG_SUPPORTS_SUSPEND,
1467 .gpr = "fsl,imx8mp-iomuxc-gpr",
1468 .clk_names = imx8mm_clks,
1469 .clks_cnt = ARRAY_SIZE(imx8mm_clks),
1470 },
1471 [IMX8MQ_EP] = {
1472 .variant = IMX8MQ_EP,
1473 .mode = DW_PCIE_EP_TYPE,
1474 .gpr = "fsl,imx8mq-iomuxc-gpr",
1475 .clk_names = imx8mq_clks,
1476 .clks_cnt = ARRAY_SIZE(imx8mq_clks),
1477 },
1478 [IMX8MM_EP] = {
1479 .variant = IMX8MM_EP,
1480 .mode = DW_PCIE_EP_TYPE,
1481 .gpr = "fsl,imx8mm-iomuxc-gpr",
1482 .clk_names = imx8mm_clks,
1483 .clks_cnt = ARRAY_SIZE(imx8mm_clks),
1484 },
1485 [IMX8MP_EP] = {
1486 .variant = IMX8MP_EP,
1487 .mode = DW_PCIE_EP_TYPE,
1488 .gpr = "fsl,imx8mp-iomuxc-gpr",
1489 .clk_names = imx8mm_clks,
1490 .clks_cnt = ARRAY_SIZE(imx8mm_clks),
1491 },
1492 };
1493
1494 static const struct of_device_id imx6_pcie_of_match[] = {
1495 { .compatible = "fsl,imx6q-pcie", .data = &drvdata[IMX6Q], },
1496 { .compatible = "fsl,imx6sx-pcie", .data = &drvdata[IMX6SX], },
1497 { .compatible = "fsl,imx6qp-pcie", .data = &drvdata[IMX6QP], },
1498 { .compatible = "fsl,imx7d-pcie", .data = &drvdata[IMX7D], },
1499 { .compatible = "fsl,imx8mq-pcie", .data = &drvdata[IMX8MQ], },
1500 { .compatible = "fsl,imx8mm-pcie", .data = &drvdata[IMX8MM], },
1501 { .compatible = "fsl,imx8mp-pcie", .data = &drvdata[IMX8MP], },
1502 { .compatible = "fsl,imx8mq-pcie-ep", .data = &drvdata[IMX8MQ_EP], },
1503 { .compatible = "fsl,imx8mm-pcie-ep", .data = &drvdata[IMX8MM_EP], },
1504 { .compatible = "fsl,imx8mp-pcie-ep", .data = &drvdata[IMX8MP_EP], },
1505 {},
1506 };
1507
1508 static struct platform_driver imx6_pcie_driver = {
1509 .driver = {
1510 .name = "imx6q-pcie",
1511 .of_match_table = imx6_pcie_of_match,
1512 .suppress_bind_attrs = true,
1513 .pm = &imx6_pcie_pm_ops,
1514 .probe_type = PROBE_PREFER_ASYNCHRONOUS,
1515 },
1516 .probe = imx6_pcie_probe,
1517 .shutdown = imx6_pcie_shutdown,
1518 };
1519
imx6_pcie_quirk(struct pci_dev * dev)1520 static void imx6_pcie_quirk(struct pci_dev *dev)
1521 {
1522 struct pci_bus *bus = dev->bus;
1523 struct dw_pcie_rp *pp = bus->sysdata;
1524
1525 /* Bus parent is the PCI bridge, its parent is this platform driver */
1526 if (!bus->dev.parent || !bus->dev.parent->parent)
1527 return;
1528
1529 /* Make sure we only quirk devices associated with this driver */
1530 if (bus->dev.parent->parent->driver != &imx6_pcie_driver.driver)
1531 return;
1532
1533 if (pci_is_root_bus(bus)) {
1534 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
1535 struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci);
1536
1537 /*
1538 * Limit config length to avoid the kernel reading beyond
1539 * the register set and causing an abort on i.MX 6Quad
1540 */
1541 if (imx6_pcie->drvdata->dbi_length) {
1542 dev->cfg_size = imx6_pcie->drvdata->dbi_length;
1543 dev_info(&dev->dev, "Limiting cfg_size to %d\n",
1544 dev->cfg_size);
1545 }
1546 }
1547 }
1548 DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_SYNOPSYS, 0xabcd,
1549 PCI_CLASS_BRIDGE_PCI, 8, imx6_pcie_quirk);
1550
imx6_pcie_init(void)1551 static int __init imx6_pcie_init(void)
1552 {
1553 #ifdef CONFIG_ARM
1554 struct device_node *np;
1555
1556 np = of_find_matching_node(NULL, imx6_pcie_of_match);
1557 if (!np)
1558 return -ENODEV;
1559 of_node_put(np);
1560
1561 /*
1562 * Since probe() can be deferred we need to make sure that
1563 * hook_fault_code is not called after __init memory is freed
1564 * by kernel and since imx6q_pcie_abort_handler() is a no-op,
1565 * we can install the handler here without risking it
1566 * accessing some uninitialized driver state.
1567 */
1568 hook_fault_code(8, imx6q_pcie_abort_handler, SIGBUS, 0,
1569 "external abort on non-linefetch");
1570 #endif
1571
1572 return platform_driver_register(&imx6_pcie_driver);
1573 }
1574 device_initcall(imx6_pcie_init);
1575