1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * PCIe host controller driver for Freescale i.MX6 SoCs
4  *
5  * Copyright (C) 2013 Kosagi
6  *		https://www.kosagi.com
7  *
8  * Author: Sean Cross <xobs@kosagi.com>
9  */
10 
11 #include <linux/bitfield.h>
12 #include <linux/clk.h>
13 #include <linux/delay.h>
14 #include <linux/gpio.h>
15 #include <linux/kernel.h>
16 #include <linux/mfd/syscon.h>
17 #include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
18 #include <linux/mfd/syscon/imx7-iomuxc-gpr.h>
19 #include <linux/module.h>
20 #include <linux/of_gpio.h>
21 #include <linux/of_device.h>
22 #include <linux/of_address.h>
23 #include <linux/pci.h>
24 #include <linux/platform_device.h>
25 #include <linux/regmap.h>
26 #include <linux/regulator/consumer.h>
27 #include <linux/resource.h>
28 #include <linux/signal.h>
29 #include <linux/types.h>
30 #include <linux/interrupt.h>
31 #include <linux/reset.h>
32 #include <linux/phy/phy.h>
33 #include <linux/pm_domain.h>
34 #include <linux/pm_runtime.h>
35 
36 #include "pcie-designware.h"
37 
38 #define IMX8MQ_GPR_PCIE_REF_USE_PAD		BIT(9)
39 #define IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN	BIT(10)
40 #define IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE	BIT(11)
41 #define IMX8MQ_GPR_PCIE_VREG_BYPASS		BIT(12)
42 #define IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE	GENMASK(11, 8)
43 #define IMX8MQ_PCIE2_BASE_ADDR			0x33c00000
44 
45 #define to_imx6_pcie(x)	dev_get_drvdata((x)->dev)
46 
47 enum imx6_pcie_variants {
48 	IMX6Q,
49 	IMX6SX,
50 	IMX6QP,
51 	IMX7D,
52 	IMX8MQ,
53 	IMX8MM,
54 };
55 
56 #define IMX6_PCIE_FLAG_IMX6_PHY			BIT(0)
57 #define IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE	BIT(1)
58 #define IMX6_PCIE_FLAG_SUPPORTS_SUSPEND		BIT(2)
59 
60 struct imx6_pcie_drvdata {
61 	enum imx6_pcie_variants variant;
62 	u32 flags;
63 	int dbi_length;
64 };
65 
66 struct imx6_pcie {
67 	struct dw_pcie		*pci;
68 	int			reset_gpio;
69 	bool			gpio_active_high;
70 	struct clk		*pcie_bus;
71 	struct clk		*pcie_phy;
72 	struct clk		*pcie_inbound_axi;
73 	struct clk		*pcie;
74 	struct clk		*pcie_aux;
75 	struct regmap		*iomuxc_gpr;
76 	u32			controller_id;
77 	struct reset_control	*pciephy_reset;
78 	struct reset_control	*apps_reset;
79 	struct reset_control	*turnoff_reset;
80 	u32			tx_deemph_gen1;
81 	u32			tx_deemph_gen2_3p5db;
82 	u32			tx_deemph_gen2_6db;
83 	u32			tx_swing_full;
84 	u32			tx_swing_low;
85 	struct regulator	*vpcie;
86 	struct regulator	*vph;
87 	void __iomem		*phy_base;
88 
89 	/* power domain for pcie */
90 	struct device		*pd_pcie;
91 	/* power domain for pcie phy */
92 	struct device		*pd_pcie_phy;
93 	struct phy		*phy;
94 	const struct imx6_pcie_drvdata *drvdata;
95 };
96 
97 /* Parameters for the waiting for PCIe PHY PLL to lock on i.MX7 */
98 #define PHY_PLL_LOCK_WAIT_USLEEP_MAX	200
99 #define PHY_PLL_LOCK_WAIT_TIMEOUT	(2000 * PHY_PLL_LOCK_WAIT_USLEEP_MAX)
100 
101 /* PCIe Port Logic registers (memory-mapped) */
102 #define PL_OFFSET 0x700
103 
104 #define PCIE_PHY_CTRL (PL_OFFSET + 0x114)
105 #define PCIE_PHY_CTRL_DATA(x)		FIELD_PREP(GENMASK(15, 0), (x))
106 #define PCIE_PHY_CTRL_CAP_ADR		BIT(16)
107 #define PCIE_PHY_CTRL_CAP_DAT		BIT(17)
108 #define PCIE_PHY_CTRL_WR		BIT(18)
109 #define PCIE_PHY_CTRL_RD		BIT(19)
110 
111 #define PCIE_PHY_STAT (PL_OFFSET + 0x110)
112 #define PCIE_PHY_STAT_ACK		BIT(16)
113 
114 /* PHY registers (not memory-mapped) */
115 #define PCIE_PHY_ATEOVRD			0x10
116 #define  PCIE_PHY_ATEOVRD_EN			BIT(2)
117 #define  PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT	0
118 #define  PCIE_PHY_ATEOVRD_REF_CLKDIV_MASK	0x1
119 
120 #define PCIE_PHY_MPLL_OVRD_IN_LO		0x11
121 #define  PCIE_PHY_MPLL_MULTIPLIER_SHIFT		2
122 #define  PCIE_PHY_MPLL_MULTIPLIER_MASK		0x7f
123 #define  PCIE_PHY_MPLL_MULTIPLIER_OVRD		BIT(9)
124 
125 #define PCIE_PHY_RX_ASIC_OUT 0x100D
126 #define PCIE_PHY_RX_ASIC_OUT_VALID	(1 << 0)
127 
128 /* iMX7 PCIe PHY registers */
129 #define PCIE_PHY_CMN_REG4		0x14
130 /* These are probably the bits that *aren't* DCC_FB_EN */
131 #define PCIE_PHY_CMN_REG4_DCC_FB_EN	0x29
132 
133 #define PCIE_PHY_CMN_REG15	        0x54
134 #define PCIE_PHY_CMN_REG15_DLY_4	BIT(2)
135 #define PCIE_PHY_CMN_REG15_PLL_PD	BIT(5)
136 #define PCIE_PHY_CMN_REG15_OVRD_PLL_PD	BIT(7)
137 
138 #define PCIE_PHY_CMN_REG24		0x90
139 #define PCIE_PHY_CMN_REG24_RX_EQ	BIT(6)
140 #define PCIE_PHY_CMN_REG24_RX_EQ_SEL	BIT(3)
141 
142 #define PCIE_PHY_CMN_REG26		0x98
143 #define PCIE_PHY_CMN_REG26_ATT_MODE	0xBC
144 
145 #define PHY_RX_OVRD_IN_LO 0x1005
146 #define PHY_RX_OVRD_IN_LO_RX_DATA_EN		BIT(5)
147 #define PHY_RX_OVRD_IN_LO_RX_PLL_EN		BIT(3)
148 
149 static int pcie_phy_poll_ack(struct imx6_pcie *imx6_pcie, bool exp_val)
150 {
151 	struct dw_pcie *pci = imx6_pcie->pci;
152 	bool val;
153 	u32 max_iterations = 10;
154 	u32 wait_counter = 0;
155 
156 	do {
157 		val = dw_pcie_readl_dbi(pci, PCIE_PHY_STAT) &
158 			PCIE_PHY_STAT_ACK;
159 		wait_counter++;
160 
161 		if (val == exp_val)
162 			return 0;
163 
164 		udelay(1);
165 	} while (wait_counter < max_iterations);
166 
167 	return -ETIMEDOUT;
168 }
169 
170 static int pcie_phy_wait_ack(struct imx6_pcie *imx6_pcie, int addr)
171 {
172 	struct dw_pcie *pci = imx6_pcie->pci;
173 	u32 val;
174 	int ret;
175 
176 	val = PCIE_PHY_CTRL_DATA(addr);
177 	dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val);
178 
179 	val |= PCIE_PHY_CTRL_CAP_ADR;
180 	dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val);
181 
182 	ret = pcie_phy_poll_ack(imx6_pcie, true);
183 	if (ret)
184 		return ret;
185 
186 	val = PCIE_PHY_CTRL_DATA(addr);
187 	dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val);
188 
189 	return pcie_phy_poll_ack(imx6_pcie, false);
190 }
191 
192 /* Read from the 16-bit PCIe PHY control registers (not memory-mapped) */
193 static int pcie_phy_read(struct imx6_pcie *imx6_pcie, int addr, u16 *data)
194 {
195 	struct dw_pcie *pci = imx6_pcie->pci;
196 	u32 phy_ctl;
197 	int ret;
198 
199 	ret = pcie_phy_wait_ack(imx6_pcie, addr);
200 	if (ret)
201 		return ret;
202 
203 	/* assert Read signal */
204 	phy_ctl = PCIE_PHY_CTRL_RD;
205 	dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, phy_ctl);
206 
207 	ret = pcie_phy_poll_ack(imx6_pcie, true);
208 	if (ret)
209 		return ret;
210 
211 	*data = dw_pcie_readl_dbi(pci, PCIE_PHY_STAT);
212 
213 	/* deassert Read signal */
214 	dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, 0x00);
215 
216 	return pcie_phy_poll_ack(imx6_pcie, false);
217 }
218 
219 static int pcie_phy_write(struct imx6_pcie *imx6_pcie, int addr, u16 data)
220 {
221 	struct dw_pcie *pci = imx6_pcie->pci;
222 	u32 var;
223 	int ret;
224 
225 	/* write addr */
226 	/* cap addr */
227 	ret = pcie_phy_wait_ack(imx6_pcie, addr);
228 	if (ret)
229 		return ret;
230 
231 	var = PCIE_PHY_CTRL_DATA(data);
232 	dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
233 
234 	/* capture data */
235 	var |= PCIE_PHY_CTRL_CAP_DAT;
236 	dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
237 
238 	ret = pcie_phy_poll_ack(imx6_pcie, true);
239 	if (ret)
240 		return ret;
241 
242 	/* deassert cap data */
243 	var = PCIE_PHY_CTRL_DATA(data);
244 	dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
245 
246 	/* wait for ack de-assertion */
247 	ret = pcie_phy_poll_ack(imx6_pcie, false);
248 	if (ret)
249 		return ret;
250 
251 	/* assert wr signal */
252 	var = PCIE_PHY_CTRL_WR;
253 	dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
254 
255 	/* wait for ack */
256 	ret = pcie_phy_poll_ack(imx6_pcie, true);
257 	if (ret)
258 		return ret;
259 
260 	/* deassert wr signal */
261 	var = PCIE_PHY_CTRL_DATA(data);
262 	dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
263 
264 	/* wait for ack de-assertion */
265 	ret = pcie_phy_poll_ack(imx6_pcie, false);
266 	if (ret)
267 		return ret;
268 
269 	dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, 0x0);
270 
271 	return 0;
272 }
273 
274 static void imx6_pcie_reset_phy(struct imx6_pcie *imx6_pcie)
275 {
276 	u16 tmp;
277 
278 	if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_IMX6_PHY))
279 		return;
280 
281 	pcie_phy_read(imx6_pcie, PHY_RX_OVRD_IN_LO, &tmp);
282 	tmp |= (PHY_RX_OVRD_IN_LO_RX_DATA_EN |
283 		PHY_RX_OVRD_IN_LO_RX_PLL_EN);
284 	pcie_phy_write(imx6_pcie, PHY_RX_OVRD_IN_LO, tmp);
285 
286 	usleep_range(2000, 3000);
287 
288 	pcie_phy_read(imx6_pcie, PHY_RX_OVRD_IN_LO, &tmp);
289 	tmp &= ~(PHY_RX_OVRD_IN_LO_RX_DATA_EN |
290 		  PHY_RX_OVRD_IN_LO_RX_PLL_EN);
291 	pcie_phy_write(imx6_pcie, PHY_RX_OVRD_IN_LO, tmp);
292 }
293 
294 #ifdef CONFIG_ARM
295 /*  Added for PCI abort handling */
296 static int imx6q_pcie_abort_handler(unsigned long addr,
297 		unsigned int fsr, struct pt_regs *regs)
298 {
299 	unsigned long pc = instruction_pointer(regs);
300 	unsigned long instr = *(unsigned long *)pc;
301 	int reg = (instr >> 12) & 15;
302 
303 	/*
304 	 * If the instruction being executed was a read,
305 	 * make it look like it read all-ones.
306 	 */
307 	if ((instr & 0x0c100000) == 0x04100000) {
308 		unsigned long val;
309 
310 		if (instr & 0x00400000)
311 			val = 255;
312 		else
313 			val = -1;
314 
315 		regs->uregs[reg] = val;
316 		regs->ARM_pc += 4;
317 		return 0;
318 	}
319 
320 	if ((instr & 0x0e100090) == 0x00100090) {
321 		regs->uregs[reg] = -1;
322 		regs->ARM_pc += 4;
323 		return 0;
324 	}
325 
326 	return 1;
327 }
328 #endif
329 
330 static int imx6_pcie_attach_pd(struct device *dev)
331 {
332 	struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
333 	struct device_link *link;
334 
335 	/* Do nothing when in a single power domain */
336 	if (dev->pm_domain)
337 		return 0;
338 
339 	imx6_pcie->pd_pcie = dev_pm_domain_attach_by_name(dev, "pcie");
340 	if (IS_ERR(imx6_pcie->pd_pcie))
341 		return PTR_ERR(imx6_pcie->pd_pcie);
342 	/* Do nothing when power domain missing */
343 	if (!imx6_pcie->pd_pcie)
344 		return 0;
345 	link = device_link_add(dev, imx6_pcie->pd_pcie,
346 			DL_FLAG_STATELESS |
347 			DL_FLAG_PM_RUNTIME |
348 			DL_FLAG_RPM_ACTIVE);
349 	if (!link) {
350 		dev_err(dev, "Failed to add device_link to pcie pd.\n");
351 		return -EINVAL;
352 	}
353 
354 	imx6_pcie->pd_pcie_phy = dev_pm_domain_attach_by_name(dev, "pcie_phy");
355 	if (IS_ERR(imx6_pcie->pd_pcie_phy))
356 		return PTR_ERR(imx6_pcie->pd_pcie_phy);
357 
358 	link = device_link_add(dev, imx6_pcie->pd_pcie_phy,
359 			DL_FLAG_STATELESS |
360 			DL_FLAG_PM_RUNTIME |
361 			DL_FLAG_RPM_ACTIVE);
362 	if (!link) {
363 		dev_err(dev, "Failed to add device_link to pcie_phy pd.\n");
364 		return -EINVAL;
365 	}
366 
367 	return 0;
368 }
369 
370 static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie)
371 {
372 	struct device *dev = imx6_pcie->pci->dev;
373 
374 	switch (imx6_pcie->drvdata->variant) {
375 	case IMX7D:
376 	case IMX8MQ:
377 		reset_control_assert(imx6_pcie->pciephy_reset);
378 		fallthrough;
379 	case IMX8MM:
380 		reset_control_assert(imx6_pcie->apps_reset);
381 		break;
382 	case IMX6SX:
383 		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
384 				   IMX6SX_GPR12_PCIE_TEST_POWERDOWN,
385 				   IMX6SX_GPR12_PCIE_TEST_POWERDOWN);
386 		/* Force PCIe PHY reset */
387 		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR5,
388 				   IMX6SX_GPR5_PCIE_BTNRST_RESET,
389 				   IMX6SX_GPR5_PCIE_BTNRST_RESET);
390 		break;
391 	case IMX6QP:
392 		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
393 				   IMX6Q_GPR1_PCIE_SW_RST,
394 				   IMX6Q_GPR1_PCIE_SW_RST);
395 		break;
396 	case IMX6Q:
397 		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
398 				   IMX6Q_GPR1_PCIE_TEST_PD, 1 << 18);
399 		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
400 				   IMX6Q_GPR1_PCIE_REF_CLK_EN, 0 << 16);
401 		break;
402 	}
403 
404 	if (imx6_pcie->vpcie && regulator_is_enabled(imx6_pcie->vpcie) > 0) {
405 		int ret = regulator_disable(imx6_pcie->vpcie);
406 
407 		if (ret)
408 			dev_err(dev, "failed to disable vpcie regulator: %d\n",
409 				ret);
410 	}
411 }
412 
413 static unsigned int imx6_pcie_grp_offset(const struct imx6_pcie *imx6_pcie)
414 {
415 	WARN_ON(imx6_pcie->drvdata->variant != IMX8MQ &&
416 		imx6_pcie->drvdata->variant != IMX8MM);
417 	return imx6_pcie->controller_id == 1 ? IOMUXC_GPR16 : IOMUXC_GPR14;
418 }
419 
420 static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie)
421 {
422 	struct dw_pcie *pci = imx6_pcie->pci;
423 	struct device *dev = pci->dev;
424 	unsigned int offset;
425 	int ret = 0;
426 
427 	switch (imx6_pcie->drvdata->variant) {
428 	case IMX6SX:
429 		ret = clk_prepare_enable(imx6_pcie->pcie_inbound_axi);
430 		if (ret) {
431 			dev_err(dev, "unable to enable pcie_axi clock\n");
432 			break;
433 		}
434 
435 		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
436 				   IMX6SX_GPR12_PCIE_TEST_POWERDOWN, 0);
437 		break;
438 	case IMX6QP:
439 	case IMX6Q:
440 		/* power up core phy and enable ref clock */
441 		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
442 				   IMX6Q_GPR1_PCIE_TEST_PD, 0 << 18);
443 		/*
444 		 * the async reset input need ref clock to sync internally,
445 		 * when the ref clock comes after reset, internal synced
446 		 * reset time is too short, cannot meet the requirement.
447 		 * add one ~10us delay here.
448 		 */
449 		usleep_range(10, 100);
450 		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
451 				   IMX6Q_GPR1_PCIE_REF_CLK_EN, 1 << 16);
452 		break;
453 	case IMX7D:
454 		break;
455 	case IMX8MM:
456 	case IMX8MQ:
457 		ret = clk_prepare_enable(imx6_pcie->pcie_aux);
458 		if (ret) {
459 			dev_err(dev, "unable to enable pcie_aux clock\n");
460 			break;
461 		}
462 
463 		offset = imx6_pcie_grp_offset(imx6_pcie);
464 		/*
465 		 * Set the over ride low and enabled
466 		 * make sure that REF_CLK is turned on.
467 		 */
468 		regmap_update_bits(imx6_pcie->iomuxc_gpr, offset,
469 				   IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE,
470 				   0);
471 		regmap_update_bits(imx6_pcie->iomuxc_gpr, offset,
472 				   IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN,
473 				   IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN);
474 		break;
475 	}
476 
477 	return ret;
478 }
479 
480 static void imx7d_pcie_wait_for_phy_pll_lock(struct imx6_pcie *imx6_pcie)
481 {
482 	u32 val;
483 	struct device *dev = imx6_pcie->pci->dev;
484 
485 	if (regmap_read_poll_timeout(imx6_pcie->iomuxc_gpr,
486 				     IOMUXC_GPR22, val,
487 				     val & IMX7D_GPR22_PCIE_PHY_PLL_LOCKED,
488 				     PHY_PLL_LOCK_WAIT_USLEEP_MAX,
489 				     PHY_PLL_LOCK_WAIT_TIMEOUT))
490 		dev_err(dev, "PCIe PLL lock timeout\n");
491 }
492 
493 static void imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie)
494 {
495 	struct dw_pcie *pci = imx6_pcie->pci;
496 	struct device *dev = pci->dev;
497 	int ret;
498 
499 	if (imx6_pcie->vpcie && !regulator_is_enabled(imx6_pcie->vpcie)) {
500 		ret = regulator_enable(imx6_pcie->vpcie);
501 		if (ret) {
502 			dev_err(dev, "failed to enable vpcie regulator: %d\n",
503 				ret);
504 			return;
505 		}
506 	}
507 
508 	ret = clk_prepare_enable(imx6_pcie->pcie_phy);
509 	if (ret) {
510 		dev_err(dev, "unable to enable pcie_phy clock\n");
511 		goto err_pcie_phy;
512 	}
513 
514 	ret = clk_prepare_enable(imx6_pcie->pcie_bus);
515 	if (ret) {
516 		dev_err(dev, "unable to enable pcie_bus clock\n");
517 		goto err_pcie_bus;
518 	}
519 
520 	ret = clk_prepare_enable(imx6_pcie->pcie);
521 	if (ret) {
522 		dev_err(dev, "unable to enable pcie clock\n");
523 		goto err_pcie;
524 	}
525 
526 	ret = imx6_pcie_enable_ref_clk(imx6_pcie);
527 	if (ret) {
528 		dev_err(dev, "unable to enable pcie ref clock\n");
529 		goto err_ref_clk;
530 	}
531 
532 	switch (imx6_pcie->drvdata->variant) {
533 	case IMX8MM:
534 		if (phy_power_on(imx6_pcie->phy))
535 			dev_err(dev, "unable to power on PHY\n");
536 		break;
537 	default:
538 		break;
539 	}
540 	/* allow the clocks to stabilize */
541 	usleep_range(200, 500);
542 
543 	/* Some boards don't have PCIe reset GPIO. */
544 	if (gpio_is_valid(imx6_pcie->reset_gpio)) {
545 		gpio_set_value_cansleep(imx6_pcie->reset_gpio,
546 					imx6_pcie->gpio_active_high);
547 		msleep(100);
548 		gpio_set_value_cansleep(imx6_pcie->reset_gpio,
549 					!imx6_pcie->gpio_active_high);
550 	}
551 
552 	switch (imx6_pcie->drvdata->variant) {
553 	case IMX8MQ:
554 		reset_control_deassert(imx6_pcie->pciephy_reset);
555 		break;
556 	case IMX8MM:
557 		if (phy_init(imx6_pcie->phy))
558 			dev_err(dev, "waiting for phy ready timeout!\n");
559 		break;
560 	case IMX7D:
561 		reset_control_deassert(imx6_pcie->pciephy_reset);
562 
563 		/* Workaround for ERR010728, failure of PCI-e PLL VCO to
564 		 * oscillate, especially when cold.  This turns off "Duty-cycle
565 		 * Corrector" and other mysterious undocumented things.
566 		 */
567 		if (likely(imx6_pcie->phy_base)) {
568 			/* De-assert DCC_FB_EN */
569 			writel(PCIE_PHY_CMN_REG4_DCC_FB_EN,
570 			       imx6_pcie->phy_base + PCIE_PHY_CMN_REG4);
571 			/* Assert RX_EQS and RX_EQS_SEL */
572 			writel(PCIE_PHY_CMN_REG24_RX_EQ_SEL
573 				| PCIE_PHY_CMN_REG24_RX_EQ,
574 			       imx6_pcie->phy_base + PCIE_PHY_CMN_REG24);
575 			/* Assert ATT_MODE */
576 			writel(PCIE_PHY_CMN_REG26_ATT_MODE,
577 			       imx6_pcie->phy_base + PCIE_PHY_CMN_REG26);
578 		} else {
579 			dev_warn(dev, "Unable to apply ERR010728 workaround. DT missing fsl,imx7d-pcie-phy phandle ?\n");
580 		}
581 
582 		imx7d_pcie_wait_for_phy_pll_lock(imx6_pcie);
583 		break;
584 	case IMX6SX:
585 		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR5,
586 				   IMX6SX_GPR5_PCIE_BTNRST_RESET, 0);
587 		break;
588 	case IMX6QP:
589 		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
590 				   IMX6Q_GPR1_PCIE_SW_RST, 0);
591 
592 		usleep_range(200, 500);
593 		break;
594 	case IMX6Q:		/* Nothing to do */
595 		break;
596 	}
597 
598 	return;
599 
600 err_ref_clk:
601 	clk_disable_unprepare(imx6_pcie->pcie);
602 err_pcie:
603 	clk_disable_unprepare(imx6_pcie->pcie_bus);
604 err_pcie_bus:
605 	clk_disable_unprepare(imx6_pcie->pcie_phy);
606 err_pcie_phy:
607 	if (imx6_pcie->vpcie && regulator_is_enabled(imx6_pcie->vpcie) > 0) {
608 		ret = regulator_disable(imx6_pcie->vpcie);
609 		if (ret)
610 			dev_err(dev, "failed to disable vpcie regulator: %d\n",
611 				ret);
612 	}
613 }
614 
615 static void imx6_pcie_configure_type(struct imx6_pcie *imx6_pcie)
616 {
617 	unsigned int mask, val;
618 
619 	if (imx6_pcie->drvdata->variant == IMX8MQ &&
620 	    imx6_pcie->controller_id == 1) {
621 		mask   = IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE;
622 		val    = FIELD_PREP(IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE,
623 				    PCI_EXP_TYPE_ROOT_PORT);
624 	} else {
625 		mask = IMX6Q_GPR12_DEVICE_TYPE;
626 		val  = FIELD_PREP(IMX6Q_GPR12_DEVICE_TYPE,
627 				  PCI_EXP_TYPE_ROOT_PORT);
628 	}
629 
630 	regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, mask, val);
631 }
632 
633 static void imx6_pcie_init_phy(struct imx6_pcie *imx6_pcie)
634 {
635 	switch (imx6_pcie->drvdata->variant) {
636 	case IMX8MM:
637 		/*
638 		 * The PHY initialization had been done in the PHY
639 		 * driver, break here directly.
640 		 */
641 		break;
642 	case IMX8MQ:
643 		/*
644 		 * TODO: Currently this code assumes external
645 		 * oscillator is being used
646 		 */
647 		regmap_update_bits(imx6_pcie->iomuxc_gpr,
648 				   imx6_pcie_grp_offset(imx6_pcie),
649 				   IMX8MQ_GPR_PCIE_REF_USE_PAD,
650 				   IMX8MQ_GPR_PCIE_REF_USE_PAD);
651 		/*
652 		 * Regarding the datasheet, the PCIE_VPH is suggested
653 		 * to be 1.8V. If the PCIE_VPH is supplied by 3.3V, the
654 		 * VREG_BYPASS should be cleared to zero.
655 		 */
656 		if (imx6_pcie->vph &&
657 		    regulator_get_voltage(imx6_pcie->vph) > 3000000)
658 			regmap_update_bits(imx6_pcie->iomuxc_gpr,
659 					   imx6_pcie_grp_offset(imx6_pcie),
660 					   IMX8MQ_GPR_PCIE_VREG_BYPASS,
661 					   0);
662 		break;
663 	case IMX7D:
664 		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
665 				   IMX7D_GPR12_PCIE_PHY_REFCLK_SEL, 0);
666 		break;
667 	case IMX6SX:
668 		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
669 				   IMX6SX_GPR12_PCIE_RX_EQ_MASK,
670 				   IMX6SX_GPR12_PCIE_RX_EQ_2);
671 		fallthrough;
672 	default:
673 		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
674 				   IMX6Q_GPR12_PCIE_CTL_2, 0 << 10);
675 
676 		/* configure constant input signal to the pcie ctrl and phy */
677 		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
678 				   IMX6Q_GPR12_LOS_LEVEL, 9 << 4);
679 
680 		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
681 				   IMX6Q_GPR8_TX_DEEMPH_GEN1,
682 				   imx6_pcie->tx_deemph_gen1 << 0);
683 		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
684 				   IMX6Q_GPR8_TX_DEEMPH_GEN2_3P5DB,
685 				   imx6_pcie->tx_deemph_gen2_3p5db << 6);
686 		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
687 				   IMX6Q_GPR8_TX_DEEMPH_GEN2_6DB,
688 				   imx6_pcie->tx_deemph_gen2_6db << 12);
689 		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
690 				   IMX6Q_GPR8_TX_SWING_FULL,
691 				   imx6_pcie->tx_swing_full << 18);
692 		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
693 				   IMX6Q_GPR8_TX_SWING_LOW,
694 				   imx6_pcie->tx_swing_low << 25);
695 		break;
696 	}
697 
698 	imx6_pcie_configure_type(imx6_pcie);
699 }
700 
701 static int imx6_setup_phy_mpll(struct imx6_pcie *imx6_pcie)
702 {
703 	unsigned long phy_rate = clk_get_rate(imx6_pcie->pcie_phy);
704 	int mult, div;
705 	u16 val;
706 
707 	if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_IMX6_PHY))
708 		return 0;
709 
710 	switch (phy_rate) {
711 	case 125000000:
712 		/*
713 		 * The default settings of the MPLL are for a 125MHz input
714 		 * clock, so no need to reconfigure anything in that case.
715 		 */
716 		return 0;
717 	case 100000000:
718 		mult = 25;
719 		div = 0;
720 		break;
721 	case 200000000:
722 		mult = 25;
723 		div = 1;
724 		break;
725 	default:
726 		dev_err(imx6_pcie->pci->dev,
727 			"Unsupported PHY reference clock rate %lu\n", phy_rate);
728 		return -EINVAL;
729 	}
730 
731 	pcie_phy_read(imx6_pcie, PCIE_PHY_MPLL_OVRD_IN_LO, &val);
732 	val &= ~(PCIE_PHY_MPLL_MULTIPLIER_MASK <<
733 		 PCIE_PHY_MPLL_MULTIPLIER_SHIFT);
734 	val |= mult << PCIE_PHY_MPLL_MULTIPLIER_SHIFT;
735 	val |= PCIE_PHY_MPLL_MULTIPLIER_OVRD;
736 	pcie_phy_write(imx6_pcie, PCIE_PHY_MPLL_OVRD_IN_LO, val);
737 
738 	pcie_phy_read(imx6_pcie, PCIE_PHY_ATEOVRD, &val);
739 	val &= ~(PCIE_PHY_ATEOVRD_REF_CLKDIV_MASK <<
740 		 PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT);
741 	val |= div << PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT;
742 	val |= PCIE_PHY_ATEOVRD_EN;
743 	pcie_phy_write(imx6_pcie, PCIE_PHY_ATEOVRD, val);
744 
745 	return 0;
746 }
747 
748 static int imx6_pcie_wait_for_speed_change(struct imx6_pcie *imx6_pcie)
749 {
750 	struct dw_pcie *pci = imx6_pcie->pci;
751 	struct device *dev = pci->dev;
752 	u32 tmp;
753 	unsigned int retries;
754 
755 	for (retries = 0; retries < 200; retries++) {
756 		tmp = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
757 		/* Test if the speed change finished. */
758 		if (!(tmp & PORT_LOGIC_SPEED_CHANGE))
759 			return 0;
760 		usleep_range(100, 1000);
761 	}
762 
763 	dev_err(dev, "Speed change timeout\n");
764 	return -ETIMEDOUT;
765 }
766 
767 static void imx6_pcie_ltssm_enable(struct device *dev)
768 {
769 	struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
770 
771 	switch (imx6_pcie->drvdata->variant) {
772 	case IMX6Q:
773 	case IMX6SX:
774 	case IMX6QP:
775 		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
776 				   IMX6Q_GPR12_PCIE_CTL_2,
777 				   IMX6Q_GPR12_PCIE_CTL_2);
778 		break;
779 	case IMX7D:
780 	case IMX8MQ:
781 	case IMX8MM:
782 		reset_control_deassert(imx6_pcie->apps_reset);
783 		break;
784 	}
785 }
786 
787 static int imx6_pcie_start_link(struct dw_pcie *pci)
788 {
789 	struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci);
790 	struct device *dev = pci->dev;
791 	u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
792 	u32 tmp;
793 	int ret;
794 
795 	/*
796 	 * Force Gen1 operation when starting the link.  In case the link is
797 	 * started in Gen2 mode, there is a possibility the devices on the
798 	 * bus will not be detected at all.  This happens with PCIe switches.
799 	 */
800 	tmp = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP);
801 	tmp &= ~PCI_EXP_LNKCAP_SLS;
802 	tmp |= PCI_EXP_LNKCAP_SLS_2_5GB;
803 	dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCAP, tmp);
804 
805 	/* Start LTSSM. */
806 	imx6_pcie_ltssm_enable(dev);
807 
808 	dw_pcie_wait_for_link(pci);
809 
810 	if (pci->link_gen == 2) {
811 		/* Allow Gen2 mode after the link is up. */
812 		tmp = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP);
813 		tmp &= ~PCI_EXP_LNKCAP_SLS;
814 		tmp |= PCI_EXP_LNKCAP_SLS_5_0GB;
815 		dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCAP, tmp);
816 
817 		/*
818 		 * Start Directed Speed Change so the best possible
819 		 * speed both link partners support can be negotiated.
820 		 */
821 		tmp = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
822 		tmp |= PORT_LOGIC_SPEED_CHANGE;
823 		dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, tmp);
824 
825 		if (imx6_pcie->drvdata->flags &
826 		    IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE) {
827 			/*
828 			 * On i.MX7, DIRECT_SPEED_CHANGE behaves differently
829 			 * from i.MX6 family when no link speed transition
830 			 * occurs and we go Gen1 -> yep, Gen1. The difference
831 			 * is that, in such case, it will not be cleared by HW
832 			 * which will cause the following code to report false
833 			 * failure.
834 			 */
835 
836 			ret = imx6_pcie_wait_for_speed_change(imx6_pcie);
837 			if (ret) {
838 				dev_err(dev, "Failed to bring link up!\n");
839 				goto err_reset_phy;
840 			}
841 		}
842 
843 		/* Make sure link training is finished as well! */
844 		dw_pcie_wait_for_link(pci);
845 	} else {
846 		dev_info(dev, "Link: Gen2 disabled\n");
847 	}
848 
849 	tmp = dw_pcie_readw_dbi(pci, offset + PCI_EXP_LNKSTA);
850 	dev_info(dev, "Link up, Gen%i\n", tmp & PCI_EXP_LNKSTA_CLS);
851 	return 0;
852 
853 err_reset_phy:
854 	dev_dbg(dev, "PHY DEBUG_R0=0x%08x DEBUG_R1=0x%08x\n",
855 		dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG0),
856 		dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG1));
857 	imx6_pcie_reset_phy(imx6_pcie);
858 	return ret;
859 }
860 
861 static int imx6_pcie_host_init(struct pcie_port *pp)
862 {
863 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
864 	struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci);
865 
866 	imx6_pcie_assert_core_reset(imx6_pcie);
867 	imx6_pcie_init_phy(imx6_pcie);
868 	imx6_pcie_deassert_core_reset(imx6_pcie);
869 	imx6_setup_phy_mpll(imx6_pcie);
870 
871 	return 0;
872 }
873 
874 static const struct dw_pcie_host_ops imx6_pcie_host_ops = {
875 	.host_init = imx6_pcie_host_init,
876 };
877 
878 static const struct dw_pcie_ops dw_pcie_ops = {
879 	.start_link = imx6_pcie_start_link,
880 };
881 
882 #ifdef CONFIG_PM_SLEEP
883 static void imx6_pcie_ltssm_disable(struct device *dev)
884 {
885 	struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
886 
887 	switch (imx6_pcie->drvdata->variant) {
888 	case IMX6SX:
889 	case IMX6QP:
890 		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
891 				   IMX6Q_GPR12_PCIE_CTL_2, 0);
892 		break;
893 	case IMX7D:
894 	case IMX8MM:
895 		reset_control_assert(imx6_pcie->apps_reset);
896 		break;
897 	default:
898 		dev_err(dev, "ltssm_disable not supported\n");
899 	}
900 }
901 
902 static void imx6_pcie_pm_turnoff(struct imx6_pcie *imx6_pcie)
903 {
904 	struct device *dev = imx6_pcie->pci->dev;
905 
906 	/* Some variants have a turnoff reset in DT */
907 	if (imx6_pcie->turnoff_reset) {
908 		reset_control_assert(imx6_pcie->turnoff_reset);
909 		reset_control_deassert(imx6_pcie->turnoff_reset);
910 		goto pm_turnoff_sleep;
911 	}
912 
913 	/* Others poke directly at IOMUXC registers */
914 	switch (imx6_pcie->drvdata->variant) {
915 	case IMX6SX:
916 	case IMX6QP:
917 		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
918 				IMX6SX_GPR12_PCIE_PM_TURN_OFF,
919 				IMX6SX_GPR12_PCIE_PM_TURN_OFF);
920 		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
921 				IMX6SX_GPR12_PCIE_PM_TURN_OFF, 0);
922 		break;
923 	default:
924 		dev_err(dev, "PME_Turn_Off not implemented\n");
925 		return;
926 	}
927 
928 	/*
929 	 * Components with an upstream port must respond to
930 	 * PME_Turn_Off with PME_TO_Ack but we can't check.
931 	 *
932 	 * The standard recommends a 1-10ms timeout after which to
933 	 * proceed anyway as if acks were received.
934 	 */
935 pm_turnoff_sleep:
936 	usleep_range(1000, 10000);
937 }
938 
939 static void imx6_pcie_clk_disable(struct imx6_pcie *imx6_pcie)
940 {
941 	clk_disable_unprepare(imx6_pcie->pcie);
942 	clk_disable_unprepare(imx6_pcie->pcie_phy);
943 	clk_disable_unprepare(imx6_pcie->pcie_bus);
944 
945 	switch (imx6_pcie->drvdata->variant) {
946 	case IMX6SX:
947 		clk_disable_unprepare(imx6_pcie->pcie_inbound_axi);
948 		break;
949 	case IMX7D:
950 		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
951 				   IMX7D_GPR12_PCIE_PHY_REFCLK_SEL,
952 				   IMX7D_GPR12_PCIE_PHY_REFCLK_SEL);
953 		break;
954 	case IMX8MQ:
955 	case IMX8MM:
956 		clk_disable_unprepare(imx6_pcie->pcie_aux);
957 		break;
958 	default:
959 		break;
960 	}
961 }
962 
963 static int imx6_pcie_suspend_noirq(struct device *dev)
964 {
965 	struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
966 
967 	if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_SUPPORTS_SUSPEND))
968 		return 0;
969 
970 	imx6_pcie_pm_turnoff(imx6_pcie);
971 	imx6_pcie_ltssm_disable(dev);
972 	imx6_pcie_clk_disable(imx6_pcie);
973 	switch (imx6_pcie->drvdata->variant) {
974 	case IMX8MM:
975 		if (phy_power_off(imx6_pcie->phy))
976 			dev_err(dev, "unable to power off PHY\n");
977 		phy_exit(imx6_pcie->phy);
978 		break;
979 	default:
980 		break;
981 	}
982 
983 	return 0;
984 }
985 
986 static int imx6_pcie_resume_noirq(struct device *dev)
987 {
988 	int ret;
989 	struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
990 	struct pcie_port *pp = &imx6_pcie->pci->pp;
991 
992 	if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_SUPPORTS_SUSPEND))
993 		return 0;
994 
995 	imx6_pcie_assert_core_reset(imx6_pcie);
996 	imx6_pcie_init_phy(imx6_pcie);
997 	imx6_pcie_deassert_core_reset(imx6_pcie);
998 	dw_pcie_setup_rc(pp);
999 
1000 	ret = imx6_pcie_start_link(imx6_pcie->pci);
1001 	if (ret < 0)
1002 		dev_info(dev, "pcie link is down after resume.\n");
1003 
1004 	return 0;
1005 }
1006 #endif
1007 
1008 static const struct dev_pm_ops imx6_pcie_pm_ops = {
1009 	SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(imx6_pcie_suspend_noirq,
1010 				      imx6_pcie_resume_noirq)
1011 };
1012 
1013 static int imx6_pcie_probe(struct platform_device *pdev)
1014 {
1015 	struct device *dev = &pdev->dev;
1016 	struct dw_pcie *pci;
1017 	struct imx6_pcie *imx6_pcie;
1018 	struct device_node *np;
1019 	struct resource *dbi_base;
1020 	struct device_node *node = dev->of_node;
1021 	int ret;
1022 	u16 val;
1023 
1024 	imx6_pcie = devm_kzalloc(dev, sizeof(*imx6_pcie), GFP_KERNEL);
1025 	if (!imx6_pcie)
1026 		return -ENOMEM;
1027 
1028 	pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
1029 	if (!pci)
1030 		return -ENOMEM;
1031 
1032 	pci->dev = dev;
1033 	pci->ops = &dw_pcie_ops;
1034 	pci->pp.ops = &imx6_pcie_host_ops;
1035 
1036 	imx6_pcie->pci = pci;
1037 	imx6_pcie->drvdata = of_device_get_match_data(dev);
1038 
1039 	/* Find the PHY if one is defined, only imx7d uses it */
1040 	np = of_parse_phandle(node, "fsl,imx7d-pcie-phy", 0);
1041 	if (np) {
1042 		struct resource res;
1043 
1044 		ret = of_address_to_resource(np, 0, &res);
1045 		if (ret) {
1046 			dev_err(dev, "Unable to map PCIe PHY\n");
1047 			return ret;
1048 		}
1049 		imx6_pcie->phy_base = devm_ioremap_resource(dev, &res);
1050 		if (IS_ERR(imx6_pcie->phy_base))
1051 			return PTR_ERR(imx6_pcie->phy_base);
1052 	}
1053 
1054 	dbi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1055 	pci->dbi_base = devm_ioremap_resource(dev, dbi_base);
1056 	if (IS_ERR(pci->dbi_base))
1057 		return PTR_ERR(pci->dbi_base);
1058 
1059 	/* Fetch GPIOs */
1060 	imx6_pcie->reset_gpio = of_get_named_gpio(node, "reset-gpio", 0);
1061 	imx6_pcie->gpio_active_high = of_property_read_bool(node,
1062 						"reset-gpio-active-high");
1063 	if (gpio_is_valid(imx6_pcie->reset_gpio)) {
1064 		ret = devm_gpio_request_one(dev, imx6_pcie->reset_gpio,
1065 				imx6_pcie->gpio_active_high ?
1066 					GPIOF_OUT_INIT_HIGH :
1067 					GPIOF_OUT_INIT_LOW,
1068 				"PCIe reset");
1069 		if (ret) {
1070 			dev_err(dev, "unable to get reset gpio\n");
1071 			return ret;
1072 		}
1073 	} else if (imx6_pcie->reset_gpio == -EPROBE_DEFER) {
1074 		return imx6_pcie->reset_gpio;
1075 	}
1076 
1077 	/* Fetch clocks */
1078 	imx6_pcie->pcie_bus = devm_clk_get(dev, "pcie_bus");
1079 	if (IS_ERR(imx6_pcie->pcie_bus))
1080 		return dev_err_probe(dev, PTR_ERR(imx6_pcie->pcie_bus),
1081 				     "pcie_bus clock source missing or invalid\n");
1082 
1083 	imx6_pcie->pcie = devm_clk_get(dev, "pcie");
1084 	if (IS_ERR(imx6_pcie->pcie))
1085 		return dev_err_probe(dev, PTR_ERR(imx6_pcie->pcie),
1086 				     "pcie clock source missing or invalid\n");
1087 
1088 	switch (imx6_pcie->drvdata->variant) {
1089 	case IMX6SX:
1090 		imx6_pcie->pcie_inbound_axi = devm_clk_get(dev,
1091 							   "pcie_inbound_axi");
1092 		if (IS_ERR(imx6_pcie->pcie_inbound_axi))
1093 			return dev_err_probe(dev, PTR_ERR(imx6_pcie->pcie_inbound_axi),
1094 					     "pcie_inbound_axi clock missing or invalid\n");
1095 		break;
1096 	case IMX8MQ:
1097 		imx6_pcie->pcie_aux = devm_clk_get(dev, "pcie_aux");
1098 		if (IS_ERR(imx6_pcie->pcie_aux))
1099 			return dev_err_probe(dev, PTR_ERR(imx6_pcie->pcie_aux),
1100 					     "pcie_aux clock source missing or invalid\n");
1101 		fallthrough;
1102 	case IMX7D:
1103 		if (dbi_base->start == IMX8MQ_PCIE2_BASE_ADDR)
1104 			imx6_pcie->controller_id = 1;
1105 
1106 		imx6_pcie->pciephy_reset = devm_reset_control_get_exclusive(dev,
1107 									    "pciephy");
1108 		if (IS_ERR(imx6_pcie->pciephy_reset)) {
1109 			dev_err(dev, "Failed to get PCIEPHY reset control\n");
1110 			return PTR_ERR(imx6_pcie->pciephy_reset);
1111 		}
1112 
1113 		imx6_pcie->apps_reset = devm_reset_control_get_exclusive(dev,
1114 									 "apps");
1115 		if (IS_ERR(imx6_pcie->apps_reset)) {
1116 			dev_err(dev, "Failed to get PCIE APPS reset control\n");
1117 			return PTR_ERR(imx6_pcie->apps_reset);
1118 		}
1119 		break;
1120 	case IMX8MM:
1121 		imx6_pcie->pcie_aux = devm_clk_get(dev, "pcie_aux");
1122 		if (IS_ERR(imx6_pcie->pcie_aux))
1123 			return dev_err_probe(dev, PTR_ERR(imx6_pcie->pcie_aux),
1124 					     "pcie_aux clock source missing or invalid\n");
1125 		imx6_pcie->apps_reset = devm_reset_control_get_exclusive(dev,
1126 									 "apps");
1127 		if (IS_ERR(imx6_pcie->apps_reset))
1128 			return dev_err_probe(dev, PTR_ERR(imx6_pcie->apps_reset),
1129 					     "failed to get pcie apps reset control\n");
1130 
1131 		imx6_pcie->phy = devm_phy_get(dev, "pcie-phy");
1132 		if (IS_ERR(imx6_pcie->phy))
1133 			return dev_err_probe(dev, PTR_ERR(imx6_pcie->phy),
1134 					     "failed to get pcie phy\n");
1135 
1136 		break;
1137 	default:
1138 		break;
1139 	}
1140 	/* Don't fetch the pcie_phy clock, if it has abstract PHY driver */
1141 	if (imx6_pcie->phy == NULL) {
1142 		imx6_pcie->pcie_phy = devm_clk_get(dev, "pcie_phy");
1143 		if (IS_ERR(imx6_pcie->pcie_phy))
1144 			return dev_err_probe(dev, PTR_ERR(imx6_pcie->pcie_phy),
1145 					     "pcie_phy clock source missing or invalid\n");
1146 	}
1147 
1148 
1149 	/* Grab turnoff reset */
1150 	imx6_pcie->turnoff_reset = devm_reset_control_get_optional_exclusive(dev, "turnoff");
1151 	if (IS_ERR(imx6_pcie->turnoff_reset)) {
1152 		dev_err(dev, "Failed to get TURNOFF reset control\n");
1153 		return PTR_ERR(imx6_pcie->turnoff_reset);
1154 	}
1155 
1156 	/* Grab GPR config register range */
1157 	imx6_pcie->iomuxc_gpr =
1158 		 syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr");
1159 	if (IS_ERR(imx6_pcie->iomuxc_gpr)) {
1160 		dev_err(dev, "unable to find iomuxc registers\n");
1161 		return PTR_ERR(imx6_pcie->iomuxc_gpr);
1162 	}
1163 
1164 	/* Grab PCIe PHY Tx Settings */
1165 	if (of_property_read_u32(node, "fsl,tx-deemph-gen1",
1166 				 &imx6_pcie->tx_deemph_gen1))
1167 		imx6_pcie->tx_deemph_gen1 = 0;
1168 
1169 	if (of_property_read_u32(node, "fsl,tx-deemph-gen2-3p5db",
1170 				 &imx6_pcie->tx_deemph_gen2_3p5db))
1171 		imx6_pcie->tx_deemph_gen2_3p5db = 0;
1172 
1173 	if (of_property_read_u32(node, "fsl,tx-deemph-gen2-6db",
1174 				 &imx6_pcie->tx_deemph_gen2_6db))
1175 		imx6_pcie->tx_deemph_gen2_6db = 20;
1176 
1177 	if (of_property_read_u32(node, "fsl,tx-swing-full",
1178 				 &imx6_pcie->tx_swing_full))
1179 		imx6_pcie->tx_swing_full = 127;
1180 
1181 	if (of_property_read_u32(node, "fsl,tx-swing-low",
1182 				 &imx6_pcie->tx_swing_low))
1183 		imx6_pcie->tx_swing_low = 127;
1184 
1185 	/* Limit link speed */
1186 	pci->link_gen = 1;
1187 	of_property_read_u32(node, "fsl,max-link-speed", &pci->link_gen);
1188 
1189 	imx6_pcie->vpcie = devm_regulator_get_optional(&pdev->dev, "vpcie");
1190 	if (IS_ERR(imx6_pcie->vpcie)) {
1191 		if (PTR_ERR(imx6_pcie->vpcie) != -ENODEV)
1192 			return PTR_ERR(imx6_pcie->vpcie);
1193 		imx6_pcie->vpcie = NULL;
1194 	}
1195 
1196 	imx6_pcie->vph = devm_regulator_get_optional(&pdev->dev, "vph");
1197 	if (IS_ERR(imx6_pcie->vph)) {
1198 		if (PTR_ERR(imx6_pcie->vph) != -ENODEV)
1199 			return PTR_ERR(imx6_pcie->vph);
1200 		imx6_pcie->vph = NULL;
1201 	}
1202 
1203 	platform_set_drvdata(pdev, imx6_pcie);
1204 
1205 	ret = imx6_pcie_attach_pd(dev);
1206 	if (ret)
1207 		return ret;
1208 
1209 	ret = dw_pcie_host_init(&pci->pp);
1210 	if (ret < 0)
1211 		return ret;
1212 
1213 	if (pci_msi_enabled()) {
1214 		u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_MSI);
1215 		val = dw_pcie_readw_dbi(pci, offset + PCI_MSI_FLAGS);
1216 		val |= PCI_MSI_FLAGS_ENABLE;
1217 		dw_pcie_writew_dbi(pci, offset + PCI_MSI_FLAGS, val);
1218 	}
1219 
1220 	return 0;
1221 }
1222 
1223 static void imx6_pcie_shutdown(struct platform_device *pdev)
1224 {
1225 	struct imx6_pcie *imx6_pcie = platform_get_drvdata(pdev);
1226 
1227 	/* bring down link, so bootloader gets clean state in case of reboot */
1228 	imx6_pcie_assert_core_reset(imx6_pcie);
1229 }
1230 
1231 static const struct imx6_pcie_drvdata drvdata[] = {
1232 	[IMX6Q] = {
1233 		.variant = IMX6Q,
1234 		.flags = IMX6_PCIE_FLAG_IMX6_PHY |
1235 			 IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE,
1236 		.dbi_length = 0x200,
1237 	},
1238 	[IMX6SX] = {
1239 		.variant = IMX6SX,
1240 		.flags = IMX6_PCIE_FLAG_IMX6_PHY |
1241 			 IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE |
1242 			 IMX6_PCIE_FLAG_SUPPORTS_SUSPEND,
1243 	},
1244 	[IMX6QP] = {
1245 		.variant = IMX6QP,
1246 		.flags = IMX6_PCIE_FLAG_IMX6_PHY |
1247 			 IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE |
1248 			 IMX6_PCIE_FLAG_SUPPORTS_SUSPEND,
1249 		.dbi_length = 0x200,
1250 	},
1251 	[IMX7D] = {
1252 		.variant = IMX7D,
1253 		.flags = IMX6_PCIE_FLAG_SUPPORTS_SUSPEND,
1254 	},
1255 	[IMX8MQ] = {
1256 		.variant = IMX8MQ,
1257 	},
1258 	[IMX8MM] = {
1259 		.variant = IMX8MM,
1260 		.flags = IMX6_PCIE_FLAG_SUPPORTS_SUSPEND,
1261 	},
1262 };
1263 
1264 static const struct of_device_id imx6_pcie_of_match[] = {
1265 	{ .compatible = "fsl,imx6q-pcie",  .data = &drvdata[IMX6Q],  },
1266 	{ .compatible = "fsl,imx6sx-pcie", .data = &drvdata[IMX6SX], },
1267 	{ .compatible = "fsl,imx6qp-pcie", .data = &drvdata[IMX6QP], },
1268 	{ .compatible = "fsl,imx7d-pcie",  .data = &drvdata[IMX7D],  },
1269 	{ .compatible = "fsl,imx8mq-pcie", .data = &drvdata[IMX8MQ], },
1270 	{ .compatible = "fsl,imx8mm-pcie", .data = &drvdata[IMX8MM], },
1271 	{},
1272 };
1273 
1274 static struct platform_driver imx6_pcie_driver = {
1275 	.driver = {
1276 		.name	= "imx6q-pcie",
1277 		.of_match_table = imx6_pcie_of_match,
1278 		.suppress_bind_attrs = true,
1279 		.pm = &imx6_pcie_pm_ops,
1280 		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
1281 	},
1282 	.probe    = imx6_pcie_probe,
1283 	.shutdown = imx6_pcie_shutdown,
1284 };
1285 
1286 static void imx6_pcie_quirk(struct pci_dev *dev)
1287 {
1288 	struct pci_bus *bus = dev->bus;
1289 	struct pcie_port *pp = bus->sysdata;
1290 
1291 	/* Bus parent is the PCI bridge, its parent is this platform driver */
1292 	if (!bus->dev.parent || !bus->dev.parent->parent)
1293 		return;
1294 
1295 	/* Make sure we only quirk devices associated with this driver */
1296 	if (bus->dev.parent->parent->driver != &imx6_pcie_driver.driver)
1297 		return;
1298 
1299 	if (pci_is_root_bus(bus)) {
1300 		struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
1301 		struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci);
1302 
1303 		/*
1304 		 * Limit config length to avoid the kernel reading beyond
1305 		 * the register set and causing an abort on i.MX 6Quad
1306 		 */
1307 		if (imx6_pcie->drvdata->dbi_length) {
1308 			dev->cfg_size = imx6_pcie->drvdata->dbi_length;
1309 			dev_info(&dev->dev, "Limiting cfg_size to %d\n",
1310 					dev->cfg_size);
1311 		}
1312 	}
1313 }
1314 DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_SYNOPSYS, 0xabcd,
1315 			PCI_CLASS_BRIDGE_PCI, 8, imx6_pcie_quirk);
1316 
1317 static int __init imx6_pcie_init(void)
1318 {
1319 #ifdef CONFIG_ARM
1320 	/*
1321 	 * Since probe() can be deferred we need to make sure that
1322 	 * hook_fault_code is not called after __init memory is freed
1323 	 * by kernel and since imx6q_pcie_abort_handler() is a no-op,
1324 	 * we can install the handler here without risking it
1325 	 * accessing some uninitialized driver state.
1326 	 */
1327 	hook_fault_code(8, imx6q_pcie_abort_handler, SIGBUS, 0,
1328 			"external abort on non-linefetch");
1329 #endif
1330 
1331 	return platform_driver_register(&imx6_pcie_driver);
1332 }
1333 device_initcall(imx6_pcie_init);
1334