1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Qualcomm PCIe root complex driver
4  *
5  * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
6  * Copyright 2015 Linaro Limited.
7  *
8  * Author: Stanimir Varbanov <svarbanov@mm-sol.com>
9  */
10 
11 #include <linux/clk.h>
12 #include <linux/delay.h>
13 #include <linux/gpio/consumer.h>
14 #include <linux/interrupt.h>
15 #include <linux/io.h>
16 #include <linux/iopoll.h>
17 #include <linux/kernel.h>
18 #include <linux/init.h>
19 #include <linux/of_device.h>
20 #include <linux/of_gpio.h>
21 #include <linux/pci.h>
22 #include <linux/pm_runtime.h>
23 #include <linux/platform_device.h>
24 #include <linux/phy/phy.h>
25 #include <linux/regulator/consumer.h>
26 #include <linux/reset.h>
27 #include <linux/slab.h>
28 #include <linux/types.h>
29 
30 #include "../../pci.h"
31 #include "pcie-designware.h"
32 
33 #define PCIE20_PARF_SYS_CTRL			0x00
34 #define MST_WAKEUP_EN				BIT(13)
35 #define SLV_WAKEUP_EN				BIT(12)
36 #define MSTR_ACLK_CGC_DIS			BIT(10)
37 #define SLV_ACLK_CGC_DIS			BIT(9)
38 #define CORE_CLK_CGC_DIS			BIT(6)
39 #define AUX_PWR_DET				BIT(4)
40 #define L23_CLK_RMV_DIS				BIT(2)
41 #define L1_CLK_RMV_DIS				BIT(1)
42 
43 #define PCIE20_PARF_PHY_CTRL			0x40
44 #define PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK	GENMASK(20, 16)
45 #define PHY_CTRL_PHY_TX0_TERM_OFFSET(x)		((x) << 16)
46 
47 #define PCIE20_PARF_PHY_REFCLK			0x4C
48 #define PHY_REFCLK_SSP_EN			BIT(16)
49 #define PHY_REFCLK_USE_PAD			BIT(12)
50 
51 #define PCIE20_PARF_DBI_BASE_ADDR		0x168
52 #define PCIE20_PARF_SLV_ADDR_SPACE_SIZE		0x16C
53 #define PCIE20_PARF_MHI_CLOCK_RESET_CTRL	0x174
54 #define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT	0x178
55 #define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2	0x1A8
56 #define PCIE20_PARF_LTSSM			0x1B0
57 #define PCIE20_PARF_SID_OFFSET			0x234
58 #define PCIE20_PARF_BDF_TRANSLATE_CFG		0x24C
59 #define PCIE20_PARF_DEVICE_TYPE			0x1000
60 
61 #define PCIE20_ELBI_SYS_CTRL			0x04
62 #define PCIE20_ELBI_SYS_CTRL_LT_ENABLE		BIT(0)
63 
64 #define PCIE20_AXI_MSTR_RESP_COMP_CTRL0		0x818
65 #define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K	0x4
66 #define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_4K	0x5
67 #define PCIE20_AXI_MSTR_RESP_COMP_CTRL1		0x81c
68 #define CFG_BRIDGE_SB_INIT			BIT(0)
69 
70 #define PCIE_CAP_LINK1_VAL			0x2FD7F
71 
72 #define PCIE20_PARF_Q2A_FLUSH			0x1AC
73 
74 #define PCIE20_MISC_CONTROL_1_REG		0x8BC
75 #define DBI_RO_WR_EN				1
76 
77 #define PERST_DELAY_US				1000
78 /* PARF registers */
79 #define PCIE20_PARF_PCS_DEEMPH			0x34
80 #define PCS_DEEMPH_TX_DEEMPH_GEN1(x)		((x) << 16)
81 #define PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(x)	((x) << 8)
82 #define PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(x)	((x) << 0)
83 
84 #define PCIE20_PARF_PCS_SWING			0x38
85 #define PCS_SWING_TX_SWING_FULL(x)		((x) << 8)
86 #define PCS_SWING_TX_SWING_LOW(x)		((x) << 0)
87 
88 #define PCIE20_PARF_CONFIG_BITS		0x50
89 #define PHY_RX0_EQ(x)				((x) << 24)
90 
91 #define PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE	0x358
92 #define SLV_ADDR_SPACE_SZ			0x10000000
93 
94 #define PCIE20_LNK_CONTROL2_LINK_STATUS2	0xa0
95 
96 #define DEVICE_TYPE_RC				0x4
97 
98 #define QCOM_PCIE_2_1_0_MAX_SUPPLY	3
99 #define QCOM_PCIE_2_1_0_MAX_CLOCKS	5
100 struct qcom_pcie_resources_2_1_0 {
101 	struct clk_bulk_data clks[QCOM_PCIE_2_1_0_MAX_CLOCKS];
102 	struct reset_control *pci_reset;
103 	struct reset_control *axi_reset;
104 	struct reset_control *ahb_reset;
105 	struct reset_control *por_reset;
106 	struct reset_control *phy_reset;
107 	struct reset_control *ext_reset;
108 	struct regulator_bulk_data supplies[QCOM_PCIE_2_1_0_MAX_SUPPLY];
109 };
110 
111 struct qcom_pcie_resources_1_0_0 {
112 	struct clk *iface;
113 	struct clk *aux;
114 	struct clk *master_bus;
115 	struct clk *slave_bus;
116 	struct reset_control *core;
117 	struct regulator *vdda;
118 };
119 
120 #define QCOM_PCIE_2_3_2_MAX_SUPPLY	2
121 struct qcom_pcie_resources_2_3_2 {
122 	struct clk *aux_clk;
123 	struct clk *master_clk;
124 	struct clk *slave_clk;
125 	struct clk *cfg_clk;
126 	struct clk *pipe_clk;
127 	struct regulator_bulk_data supplies[QCOM_PCIE_2_3_2_MAX_SUPPLY];
128 };
129 
130 #define QCOM_PCIE_2_4_0_MAX_CLOCKS	4
131 struct qcom_pcie_resources_2_4_0 {
132 	struct clk_bulk_data clks[QCOM_PCIE_2_4_0_MAX_CLOCKS];
133 	int num_clks;
134 	struct reset_control *axi_m_reset;
135 	struct reset_control *axi_s_reset;
136 	struct reset_control *pipe_reset;
137 	struct reset_control *axi_m_vmid_reset;
138 	struct reset_control *axi_s_xpu_reset;
139 	struct reset_control *parf_reset;
140 	struct reset_control *phy_reset;
141 	struct reset_control *axi_m_sticky_reset;
142 	struct reset_control *pipe_sticky_reset;
143 	struct reset_control *pwr_reset;
144 	struct reset_control *ahb_reset;
145 	struct reset_control *phy_ahb_reset;
146 };
147 
148 struct qcom_pcie_resources_2_3_3 {
149 	struct clk *iface;
150 	struct clk *axi_m_clk;
151 	struct clk *axi_s_clk;
152 	struct clk *ahb_clk;
153 	struct clk *aux_clk;
154 	struct reset_control *rst[7];
155 };
156 
157 struct qcom_pcie_resources_2_7_0 {
158 	struct clk_bulk_data clks[6];
159 	struct regulator_bulk_data supplies[2];
160 	struct reset_control *pci_reset;
161 	struct clk *pipe_clk;
162 };
163 
164 union qcom_pcie_resources {
165 	struct qcom_pcie_resources_1_0_0 v1_0_0;
166 	struct qcom_pcie_resources_2_1_0 v2_1_0;
167 	struct qcom_pcie_resources_2_3_2 v2_3_2;
168 	struct qcom_pcie_resources_2_3_3 v2_3_3;
169 	struct qcom_pcie_resources_2_4_0 v2_4_0;
170 	struct qcom_pcie_resources_2_7_0 v2_7_0;
171 };
172 
173 struct qcom_pcie;
174 
175 struct qcom_pcie_ops {
176 	int (*get_resources)(struct qcom_pcie *pcie);
177 	int (*init)(struct qcom_pcie *pcie);
178 	int (*post_init)(struct qcom_pcie *pcie);
179 	void (*deinit)(struct qcom_pcie *pcie);
180 	void (*post_deinit)(struct qcom_pcie *pcie);
181 	void (*ltssm_enable)(struct qcom_pcie *pcie);
182 };
183 
184 struct qcom_pcie {
185 	struct dw_pcie *pci;
186 	void __iomem *parf;			/* DT parf */
187 	void __iomem *elbi;			/* DT elbi */
188 	union qcom_pcie_resources res;
189 	struct phy *phy;
190 	struct gpio_desc *reset;
191 	const struct qcom_pcie_ops *ops;
192 };
193 
194 #define to_qcom_pcie(x)		dev_get_drvdata((x)->dev)
195 
196 static void qcom_ep_reset_assert(struct qcom_pcie *pcie)
197 {
198 	gpiod_set_value_cansleep(pcie->reset, 1);
199 	usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
200 }
201 
202 static void qcom_ep_reset_deassert(struct qcom_pcie *pcie)
203 {
204 	/* Ensure that PERST has been asserted for at least 100 ms */
205 	msleep(100);
206 	gpiod_set_value_cansleep(pcie->reset, 0);
207 	usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
208 }
209 
210 static int qcom_pcie_establish_link(struct qcom_pcie *pcie)
211 {
212 	struct dw_pcie *pci = pcie->pci;
213 
214 	if (dw_pcie_link_up(pci))
215 		return 0;
216 
217 	/* Enable Link Training state machine */
218 	if (pcie->ops->ltssm_enable)
219 		pcie->ops->ltssm_enable(pcie);
220 
221 	return dw_pcie_wait_for_link(pci);
222 }
223 
224 static void qcom_pcie_2_1_0_ltssm_enable(struct qcom_pcie *pcie)
225 {
226 	u32 val;
227 
228 	/* enable link training */
229 	val = readl(pcie->elbi + PCIE20_ELBI_SYS_CTRL);
230 	val |= PCIE20_ELBI_SYS_CTRL_LT_ENABLE;
231 	writel(val, pcie->elbi + PCIE20_ELBI_SYS_CTRL);
232 }
233 
234 static int qcom_pcie_get_resources_2_1_0(struct qcom_pcie *pcie)
235 {
236 	struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
237 	struct dw_pcie *pci = pcie->pci;
238 	struct device *dev = pci->dev;
239 	int ret;
240 
241 	res->supplies[0].supply = "vdda";
242 	res->supplies[1].supply = "vdda_phy";
243 	res->supplies[2].supply = "vdda_refclk";
244 	ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies),
245 				      res->supplies);
246 	if (ret)
247 		return ret;
248 
249 	res->clks[0].id = "iface";
250 	res->clks[1].id = "core";
251 	res->clks[2].id = "phy";
252 	res->clks[3].id = "aux";
253 	res->clks[4].id = "ref";
254 
255 	/* iface, core, phy are required */
256 	ret = devm_clk_bulk_get(dev, 3, res->clks);
257 	if (ret < 0)
258 		return ret;
259 
260 	/* aux, ref are optional */
261 	ret = devm_clk_bulk_get_optional(dev, 2, res->clks + 3);
262 	if (ret < 0)
263 		return ret;
264 
265 	res->pci_reset = devm_reset_control_get_exclusive(dev, "pci");
266 	if (IS_ERR(res->pci_reset))
267 		return PTR_ERR(res->pci_reset);
268 
269 	res->axi_reset = devm_reset_control_get_exclusive(dev, "axi");
270 	if (IS_ERR(res->axi_reset))
271 		return PTR_ERR(res->axi_reset);
272 
273 	res->ahb_reset = devm_reset_control_get_exclusive(dev, "ahb");
274 	if (IS_ERR(res->ahb_reset))
275 		return PTR_ERR(res->ahb_reset);
276 
277 	res->por_reset = devm_reset_control_get_exclusive(dev, "por");
278 	if (IS_ERR(res->por_reset))
279 		return PTR_ERR(res->por_reset);
280 
281 	res->ext_reset = devm_reset_control_get_optional_exclusive(dev, "ext");
282 	if (IS_ERR(res->ext_reset))
283 		return PTR_ERR(res->ext_reset);
284 
285 	res->phy_reset = devm_reset_control_get_exclusive(dev, "phy");
286 	return PTR_ERR_OR_ZERO(res->phy_reset);
287 }
288 
289 static void qcom_pcie_deinit_2_1_0(struct qcom_pcie *pcie)
290 {
291 	struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
292 
293 	clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);
294 	reset_control_assert(res->pci_reset);
295 	reset_control_assert(res->axi_reset);
296 	reset_control_assert(res->ahb_reset);
297 	reset_control_assert(res->por_reset);
298 	reset_control_assert(res->ext_reset);
299 	reset_control_assert(res->phy_reset);
300 
301 	writel(1, pcie->parf + PCIE20_PARF_PHY_CTRL);
302 
303 	regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
304 }
305 
306 static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie)
307 {
308 	struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
309 	struct dw_pcie *pci = pcie->pci;
310 	struct device *dev = pci->dev;
311 	struct device_node *node = dev->of_node;
312 	u32 val;
313 	int ret;
314 
315 	/* reset the PCIe interface as uboot can leave it undefined state */
316 	reset_control_assert(res->pci_reset);
317 	reset_control_assert(res->axi_reset);
318 	reset_control_assert(res->ahb_reset);
319 	reset_control_assert(res->por_reset);
320 	reset_control_assert(res->ext_reset);
321 	reset_control_assert(res->phy_reset);
322 
323 	writel(1, pcie->parf + PCIE20_PARF_PHY_CTRL);
324 
325 	ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies);
326 	if (ret < 0) {
327 		dev_err(dev, "cannot enable regulators\n");
328 		return ret;
329 	}
330 
331 	ret = reset_control_deassert(res->ahb_reset);
332 	if (ret) {
333 		dev_err(dev, "cannot deassert ahb reset\n");
334 		goto err_deassert_ahb;
335 	}
336 
337 	ret = reset_control_deassert(res->ext_reset);
338 	if (ret) {
339 		dev_err(dev, "cannot deassert ext reset\n");
340 		goto err_deassert_ext;
341 	}
342 
343 	ret = reset_control_deassert(res->phy_reset);
344 	if (ret) {
345 		dev_err(dev, "cannot deassert phy reset\n");
346 		goto err_deassert_phy;
347 	}
348 
349 	ret = reset_control_deassert(res->pci_reset);
350 	if (ret) {
351 		dev_err(dev, "cannot deassert pci reset\n");
352 		goto err_deassert_pci;
353 	}
354 
355 	ret = reset_control_deassert(res->por_reset);
356 	if (ret) {
357 		dev_err(dev, "cannot deassert por reset\n");
358 		goto err_deassert_por;
359 	}
360 
361 	ret = reset_control_deassert(res->axi_reset);
362 	if (ret) {
363 		dev_err(dev, "cannot deassert axi reset\n");
364 		goto err_deassert_axi;
365 	}
366 
367 	ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks);
368 	if (ret)
369 		goto err_clks;
370 
371 	/* enable PCIe clocks and resets */
372 	val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
373 	val &= ~BIT(0);
374 	writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
375 
376 	if (of_device_is_compatible(node, "qcom,pcie-ipq8064") ||
377 	    of_device_is_compatible(node, "qcom,pcie-ipq8064-v2")) {
378 		writel(PCS_DEEMPH_TX_DEEMPH_GEN1(24) |
379 			       PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(24) |
380 			       PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(34),
381 		       pcie->parf + PCIE20_PARF_PCS_DEEMPH);
382 		writel(PCS_SWING_TX_SWING_FULL(120) |
383 			       PCS_SWING_TX_SWING_LOW(120),
384 		       pcie->parf + PCIE20_PARF_PCS_SWING);
385 		writel(PHY_RX0_EQ(4), pcie->parf + PCIE20_PARF_CONFIG_BITS);
386 	}
387 
388 	if (of_device_is_compatible(node, "qcom,pcie-ipq8064")) {
389 		/* set TX termination offset */
390 		val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
391 		val &= ~PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK;
392 		val |= PHY_CTRL_PHY_TX0_TERM_OFFSET(7);
393 		writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
394 	}
395 
396 	/* enable external reference clock */
397 	val = readl(pcie->parf + PCIE20_PARF_PHY_REFCLK);
398 	val &= ~PHY_REFCLK_USE_PAD;
399 	val |= PHY_REFCLK_SSP_EN;
400 	writel(val, pcie->parf + PCIE20_PARF_PHY_REFCLK);
401 
402 	/* wait for clock acquisition */
403 	usleep_range(1000, 1500);
404 
405 	/* Set the Max TLP size to 2K, instead of using default of 4K */
406 	writel(CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K,
407 	       pci->dbi_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL0);
408 	writel(CFG_BRIDGE_SB_INIT,
409 	       pci->dbi_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL1);
410 
411 	return 0;
412 
413 err_clks:
414 	reset_control_assert(res->axi_reset);
415 err_deassert_axi:
416 	reset_control_assert(res->por_reset);
417 err_deassert_por:
418 	reset_control_assert(res->pci_reset);
419 err_deassert_pci:
420 	reset_control_assert(res->phy_reset);
421 err_deassert_phy:
422 	reset_control_assert(res->ext_reset);
423 err_deassert_ext:
424 	reset_control_assert(res->ahb_reset);
425 err_deassert_ahb:
426 	regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
427 
428 	return ret;
429 }
430 
431 static int qcom_pcie_get_resources_1_0_0(struct qcom_pcie *pcie)
432 {
433 	struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;
434 	struct dw_pcie *pci = pcie->pci;
435 	struct device *dev = pci->dev;
436 
437 	res->vdda = devm_regulator_get(dev, "vdda");
438 	if (IS_ERR(res->vdda))
439 		return PTR_ERR(res->vdda);
440 
441 	res->iface = devm_clk_get(dev, "iface");
442 	if (IS_ERR(res->iface))
443 		return PTR_ERR(res->iface);
444 
445 	res->aux = devm_clk_get(dev, "aux");
446 	if (IS_ERR(res->aux))
447 		return PTR_ERR(res->aux);
448 
449 	res->master_bus = devm_clk_get(dev, "master_bus");
450 	if (IS_ERR(res->master_bus))
451 		return PTR_ERR(res->master_bus);
452 
453 	res->slave_bus = devm_clk_get(dev, "slave_bus");
454 	if (IS_ERR(res->slave_bus))
455 		return PTR_ERR(res->slave_bus);
456 
457 	res->core = devm_reset_control_get_exclusive(dev, "core");
458 	return PTR_ERR_OR_ZERO(res->core);
459 }
460 
461 static void qcom_pcie_deinit_1_0_0(struct qcom_pcie *pcie)
462 {
463 	struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;
464 
465 	reset_control_assert(res->core);
466 	clk_disable_unprepare(res->slave_bus);
467 	clk_disable_unprepare(res->master_bus);
468 	clk_disable_unprepare(res->iface);
469 	clk_disable_unprepare(res->aux);
470 	regulator_disable(res->vdda);
471 }
472 
473 static int qcom_pcie_init_1_0_0(struct qcom_pcie *pcie)
474 {
475 	struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;
476 	struct dw_pcie *pci = pcie->pci;
477 	struct device *dev = pci->dev;
478 	int ret;
479 
480 	ret = reset_control_deassert(res->core);
481 	if (ret) {
482 		dev_err(dev, "cannot deassert core reset\n");
483 		return ret;
484 	}
485 
486 	ret = clk_prepare_enable(res->aux);
487 	if (ret) {
488 		dev_err(dev, "cannot prepare/enable aux clock\n");
489 		goto err_res;
490 	}
491 
492 	ret = clk_prepare_enable(res->iface);
493 	if (ret) {
494 		dev_err(dev, "cannot prepare/enable iface clock\n");
495 		goto err_aux;
496 	}
497 
498 	ret = clk_prepare_enable(res->master_bus);
499 	if (ret) {
500 		dev_err(dev, "cannot prepare/enable master_bus clock\n");
501 		goto err_iface;
502 	}
503 
504 	ret = clk_prepare_enable(res->slave_bus);
505 	if (ret) {
506 		dev_err(dev, "cannot prepare/enable slave_bus clock\n");
507 		goto err_master;
508 	}
509 
510 	ret = regulator_enable(res->vdda);
511 	if (ret) {
512 		dev_err(dev, "cannot enable vdda regulator\n");
513 		goto err_slave;
514 	}
515 
516 	/* change DBI base address */
517 	writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
518 
519 	if (IS_ENABLED(CONFIG_PCI_MSI)) {
520 		u32 val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
521 
522 		val |= BIT(31);
523 		writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
524 	}
525 
526 	return 0;
527 err_slave:
528 	clk_disable_unprepare(res->slave_bus);
529 err_master:
530 	clk_disable_unprepare(res->master_bus);
531 err_iface:
532 	clk_disable_unprepare(res->iface);
533 err_aux:
534 	clk_disable_unprepare(res->aux);
535 err_res:
536 	reset_control_assert(res->core);
537 
538 	return ret;
539 }
540 
541 static void qcom_pcie_2_3_2_ltssm_enable(struct qcom_pcie *pcie)
542 {
543 	u32 val;
544 
545 	/* enable link training */
546 	val = readl(pcie->parf + PCIE20_PARF_LTSSM);
547 	val |= BIT(8);
548 	writel(val, pcie->parf + PCIE20_PARF_LTSSM);
549 }
550 
551 static int qcom_pcie_get_resources_2_3_2(struct qcom_pcie *pcie)
552 {
553 	struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
554 	struct dw_pcie *pci = pcie->pci;
555 	struct device *dev = pci->dev;
556 	int ret;
557 
558 	res->supplies[0].supply = "vdda";
559 	res->supplies[1].supply = "vddpe-3v3";
560 	ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies),
561 				      res->supplies);
562 	if (ret)
563 		return ret;
564 
565 	res->aux_clk = devm_clk_get(dev, "aux");
566 	if (IS_ERR(res->aux_clk))
567 		return PTR_ERR(res->aux_clk);
568 
569 	res->cfg_clk = devm_clk_get(dev, "cfg");
570 	if (IS_ERR(res->cfg_clk))
571 		return PTR_ERR(res->cfg_clk);
572 
573 	res->master_clk = devm_clk_get(dev, "bus_master");
574 	if (IS_ERR(res->master_clk))
575 		return PTR_ERR(res->master_clk);
576 
577 	res->slave_clk = devm_clk_get(dev, "bus_slave");
578 	if (IS_ERR(res->slave_clk))
579 		return PTR_ERR(res->slave_clk);
580 
581 	res->pipe_clk = devm_clk_get(dev, "pipe");
582 	return PTR_ERR_OR_ZERO(res->pipe_clk);
583 }
584 
585 static void qcom_pcie_deinit_2_3_2(struct qcom_pcie *pcie)
586 {
587 	struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
588 
589 	clk_disable_unprepare(res->slave_clk);
590 	clk_disable_unprepare(res->master_clk);
591 	clk_disable_unprepare(res->cfg_clk);
592 	clk_disable_unprepare(res->aux_clk);
593 
594 	regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
595 }
596 
597 static void qcom_pcie_post_deinit_2_3_2(struct qcom_pcie *pcie)
598 {
599 	struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
600 
601 	clk_disable_unprepare(res->pipe_clk);
602 }
603 
604 static int qcom_pcie_init_2_3_2(struct qcom_pcie *pcie)
605 {
606 	struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
607 	struct dw_pcie *pci = pcie->pci;
608 	struct device *dev = pci->dev;
609 	u32 val;
610 	int ret;
611 
612 	ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies);
613 	if (ret < 0) {
614 		dev_err(dev, "cannot enable regulators\n");
615 		return ret;
616 	}
617 
618 	ret = clk_prepare_enable(res->aux_clk);
619 	if (ret) {
620 		dev_err(dev, "cannot prepare/enable aux clock\n");
621 		goto err_aux_clk;
622 	}
623 
624 	ret = clk_prepare_enable(res->cfg_clk);
625 	if (ret) {
626 		dev_err(dev, "cannot prepare/enable cfg clock\n");
627 		goto err_cfg_clk;
628 	}
629 
630 	ret = clk_prepare_enable(res->master_clk);
631 	if (ret) {
632 		dev_err(dev, "cannot prepare/enable master clock\n");
633 		goto err_master_clk;
634 	}
635 
636 	ret = clk_prepare_enable(res->slave_clk);
637 	if (ret) {
638 		dev_err(dev, "cannot prepare/enable slave clock\n");
639 		goto err_slave_clk;
640 	}
641 
642 	/* enable PCIe clocks and resets */
643 	val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
644 	val &= ~BIT(0);
645 	writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
646 
647 	/* change DBI base address */
648 	writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
649 
650 	/* MAC PHY_POWERDOWN MUX DISABLE  */
651 	val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL);
652 	val &= ~BIT(29);
653 	writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL);
654 
655 	val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
656 	val |= BIT(4);
657 	writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
658 
659 	val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
660 	val |= BIT(31);
661 	writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
662 
663 	return 0;
664 
665 err_slave_clk:
666 	clk_disable_unprepare(res->master_clk);
667 err_master_clk:
668 	clk_disable_unprepare(res->cfg_clk);
669 err_cfg_clk:
670 	clk_disable_unprepare(res->aux_clk);
671 
672 err_aux_clk:
673 	regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
674 
675 	return ret;
676 }
677 
678 static int qcom_pcie_post_init_2_3_2(struct qcom_pcie *pcie)
679 {
680 	struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
681 	struct dw_pcie *pci = pcie->pci;
682 	struct device *dev = pci->dev;
683 	int ret;
684 
685 	ret = clk_prepare_enable(res->pipe_clk);
686 	if (ret) {
687 		dev_err(dev, "cannot prepare/enable pipe clock\n");
688 		return ret;
689 	}
690 
691 	return 0;
692 }
693 
694 static int qcom_pcie_get_resources_2_4_0(struct qcom_pcie *pcie)
695 {
696 	struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0;
697 	struct dw_pcie *pci = pcie->pci;
698 	struct device *dev = pci->dev;
699 	bool is_ipq = of_device_is_compatible(dev->of_node, "qcom,pcie-ipq4019");
700 	int ret;
701 
702 	res->clks[0].id = "aux";
703 	res->clks[1].id = "master_bus";
704 	res->clks[2].id = "slave_bus";
705 	res->clks[3].id = "iface";
706 
707 	/* qcom,pcie-ipq4019 is defined without "iface" */
708 	res->num_clks = is_ipq ? 3 : 4;
709 
710 	ret = devm_clk_bulk_get(dev, res->num_clks, res->clks);
711 	if (ret < 0)
712 		return ret;
713 
714 	res->axi_m_reset = devm_reset_control_get_exclusive(dev, "axi_m");
715 	if (IS_ERR(res->axi_m_reset))
716 		return PTR_ERR(res->axi_m_reset);
717 
718 	res->axi_s_reset = devm_reset_control_get_exclusive(dev, "axi_s");
719 	if (IS_ERR(res->axi_s_reset))
720 		return PTR_ERR(res->axi_s_reset);
721 
722 	if (is_ipq) {
723 		/*
724 		 * These resources relates to the PHY or are secure clocks, but
725 		 * are controlled here for IPQ4019
726 		 */
727 		res->pipe_reset = devm_reset_control_get_exclusive(dev, "pipe");
728 		if (IS_ERR(res->pipe_reset))
729 			return PTR_ERR(res->pipe_reset);
730 
731 		res->axi_m_vmid_reset = devm_reset_control_get_exclusive(dev,
732 									 "axi_m_vmid");
733 		if (IS_ERR(res->axi_m_vmid_reset))
734 			return PTR_ERR(res->axi_m_vmid_reset);
735 
736 		res->axi_s_xpu_reset = devm_reset_control_get_exclusive(dev,
737 									"axi_s_xpu");
738 		if (IS_ERR(res->axi_s_xpu_reset))
739 			return PTR_ERR(res->axi_s_xpu_reset);
740 
741 		res->parf_reset = devm_reset_control_get_exclusive(dev, "parf");
742 		if (IS_ERR(res->parf_reset))
743 			return PTR_ERR(res->parf_reset);
744 
745 		res->phy_reset = devm_reset_control_get_exclusive(dev, "phy");
746 		if (IS_ERR(res->phy_reset))
747 			return PTR_ERR(res->phy_reset);
748 	}
749 
750 	res->axi_m_sticky_reset = devm_reset_control_get_exclusive(dev,
751 								   "axi_m_sticky");
752 	if (IS_ERR(res->axi_m_sticky_reset))
753 		return PTR_ERR(res->axi_m_sticky_reset);
754 
755 	res->pipe_sticky_reset = devm_reset_control_get_exclusive(dev,
756 								  "pipe_sticky");
757 	if (IS_ERR(res->pipe_sticky_reset))
758 		return PTR_ERR(res->pipe_sticky_reset);
759 
760 	res->pwr_reset = devm_reset_control_get_exclusive(dev, "pwr");
761 	if (IS_ERR(res->pwr_reset))
762 		return PTR_ERR(res->pwr_reset);
763 
764 	res->ahb_reset = devm_reset_control_get_exclusive(dev, "ahb");
765 	if (IS_ERR(res->ahb_reset))
766 		return PTR_ERR(res->ahb_reset);
767 
768 	if (is_ipq) {
769 		res->phy_ahb_reset = devm_reset_control_get_exclusive(dev, "phy_ahb");
770 		if (IS_ERR(res->phy_ahb_reset))
771 			return PTR_ERR(res->phy_ahb_reset);
772 	}
773 
774 	return 0;
775 }
776 
777 static void qcom_pcie_deinit_2_4_0(struct qcom_pcie *pcie)
778 {
779 	struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0;
780 
781 	reset_control_assert(res->axi_m_reset);
782 	reset_control_assert(res->axi_s_reset);
783 	reset_control_assert(res->pipe_reset);
784 	reset_control_assert(res->pipe_sticky_reset);
785 	reset_control_assert(res->phy_reset);
786 	reset_control_assert(res->phy_ahb_reset);
787 	reset_control_assert(res->axi_m_sticky_reset);
788 	reset_control_assert(res->pwr_reset);
789 	reset_control_assert(res->ahb_reset);
790 	clk_bulk_disable_unprepare(res->num_clks, res->clks);
791 }
792 
793 static int qcom_pcie_init_2_4_0(struct qcom_pcie *pcie)
794 {
795 	struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0;
796 	struct dw_pcie *pci = pcie->pci;
797 	struct device *dev = pci->dev;
798 	u32 val;
799 	int ret;
800 
801 	ret = reset_control_assert(res->axi_m_reset);
802 	if (ret) {
803 		dev_err(dev, "cannot assert axi master reset\n");
804 		return ret;
805 	}
806 
807 	ret = reset_control_assert(res->axi_s_reset);
808 	if (ret) {
809 		dev_err(dev, "cannot assert axi slave reset\n");
810 		return ret;
811 	}
812 
813 	usleep_range(10000, 12000);
814 
815 	ret = reset_control_assert(res->pipe_reset);
816 	if (ret) {
817 		dev_err(dev, "cannot assert pipe reset\n");
818 		return ret;
819 	}
820 
821 	ret = reset_control_assert(res->pipe_sticky_reset);
822 	if (ret) {
823 		dev_err(dev, "cannot assert pipe sticky reset\n");
824 		return ret;
825 	}
826 
827 	ret = reset_control_assert(res->phy_reset);
828 	if (ret) {
829 		dev_err(dev, "cannot assert phy reset\n");
830 		return ret;
831 	}
832 
833 	ret = reset_control_assert(res->phy_ahb_reset);
834 	if (ret) {
835 		dev_err(dev, "cannot assert phy ahb reset\n");
836 		return ret;
837 	}
838 
839 	usleep_range(10000, 12000);
840 
841 	ret = reset_control_assert(res->axi_m_sticky_reset);
842 	if (ret) {
843 		dev_err(dev, "cannot assert axi master sticky reset\n");
844 		return ret;
845 	}
846 
847 	ret = reset_control_assert(res->pwr_reset);
848 	if (ret) {
849 		dev_err(dev, "cannot assert power reset\n");
850 		return ret;
851 	}
852 
853 	ret = reset_control_assert(res->ahb_reset);
854 	if (ret) {
855 		dev_err(dev, "cannot assert ahb reset\n");
856 		return ret;
857 	}
858 
859 	usleep_range(10000, 12000);
860 
861 	ret = reset_control_deassert(res->phy_ahb_reset);
862 	if (ret) {
863 		dev_err(dev, "cannot deassert phy ahb reset\n");
864 		return ret;
865 	}
866 
867 	ret = reset_control_deassert(res->phy_reset);
868 	if (ret) {
869 		dev_err(dev, "cannot deassert phy reset\n");
870 		goto err_rst_phy;
871 	}
872 
873 	ret = reset_control_deassert(res->pipe_reset);
874 	if (ret) {
875 		dev_err(dev, "cannot deassert pipe reset\n");
876 		goto err_rst_pipe;
877 	}
878 
879 	ret = reset_control_deassert(res->pipe_sticky_reset);
880 	if (ret) {
881 		dev_err(dev, "cannot deassert pipe sticky reset\n");
882 		goto err_rst_pipe_sticky;
883 	}
884 
885 	usleep_range(10000, 12000);
886 
887 	ret = reset_control_deassert(res->axi_m_reset);
888 	if (ret) {
889 		dev_err(dev, "cannot deassert axi master reset\n");
890 		goto err_rst_axi_m;
891 	}
892 
893 	ret = reset_control_deassert(res->axi_m_sticky_reset);
894 	if (ret) {
895 		dev_err(dev, "cannot deassert axi master sticky reset\n");
896 		goto err_rst_axi_m_sticky;
897 	}
898 
899 	ret = reset_control_deassert(res->axi_s_reset);
900 	if (ret) {
901 		dev_err(dev, "cannot deassert axi slave reset\n");
902 		goto err_rst_axi_s;
903 	}
904 
905 	ret = reset_control_deassert(res->pwr_reset);
906 	if (ret) {
907 		dev_err(dev, "cannot deassert power reset\n");
908 		goto err_rst_pwr;
909 	}
910 
911 	ret = reset_control_deassert(res->ahb_reset);
912 	if (ret) {
913 		dev_err(dev, "cannot deassert ahb reset\n");
914 		goto err_rst_ahb;
915 	}
916 
917 	usleep_range(10000, 12000);
918 
919 	ret = clk_bulk_prepare_enable(res->num_clks, res->clks);
920 	if (ret)
921 		goto err_clks;
922 
923 	/* enable PCIe clocks and resets */
924 	val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
925 	val &= ~BIT(0);
926 	writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
927 
928 	/* change DBI base address */
929 	writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
930 
931 	/* MAC PHY_POWERDOWN MUX DISABLE  */
932 	val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL);
933 	val &= ~BIT(29);
934 	writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL);
935 
936 	val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
937 	val |= BIT(4);
938 	writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
939 
940 	val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
941 	val |= BIT(31);
942 	writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
943 
944 	return 0;
945 
946 err_clks:
947 	reset_control_assert(res->ahb_reset);
948 err_rst_ahb:
949 	reset_control_assert(res->pwr_reset);
950 err_rst_pwr:
951 	reset_control_assert(res->axi_s_reset);
952 err_rst_axi_s:
953 	reset_control_assert(res->axi_m_sticky_reset);
954 err_rst_axi_m_sticky:
955 	reset_control_assert(res->axi_m_reset);
956 err_rst_axi_m:
957 	reset_control_assert(res->pipe_sticky_reset);
958 err_rst_pipe_sticky:
959 	reset_control_assert(res->pipe_reset);
960 err_rst_pipe:
961 	reset_control_assert(res->phy_reset);
962 err_rst_phy:
963 	reset_control_assert(res->phy_ahb_reset);
964 	return ret;
965 }
966 
967 static int qcom_pcie_get_resources_2_3_3(struct qcom_pcie *pcie)
968 {
969 	struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
970 	struct dw_pcie *pci = pcie->pci;
971 	struct device *dev = pci->dev;
972 	int i;
973 	const char *rst_names[] = { "axi_m", "axi_s", "pipe",
974 				    "axi_m_sticky", "sticky",
975 				    "ahb", "sleep", };
976 
977 	res->iface = devm_clk_get(dev, "iface");
978 	if (IS_ERR(res->iface))
979 		return PTR_ERR(res->iface);
980 
981 	res->axi_m_clk = devm_clk_get(dev, "axi_m");
982 	if (IS_ERR(res->axi_m_clk))
983 		return PTR_ERR(res->axi_m_clk);
984 
985 	res->axi_s_clk = devm_clk_get(dev, "axi_s");
986 	if (IS_ERR(res->axi_s_clk))
987 		return PTR_ERR(res->axi_s_clk);
988 
989 	res->ahb_clk = devm_clk_get(dev, "ahb");
990 	if (IS_ERR(res->ahb_clk))
991 		return PTR_ERR(res->ahb_clk);
992 
993 	res->aux_clk = devm_clk_get(dev, "aux");
994 	if (IS_ERR(res->aux_clk))
995 		return PTR_ERR(res->aux_clk);
996 
997 	for (i = 0; i < ARRAY_SIZE(rst_names); i++) {
998 		res->rst[i] = devm_reset_control_get(dev, rst_names[i]);
999 		if (IS_ERR(res->rst[i]))
1000 			return PTR_ERR(res->rst[i]);
1001 	}
1002 
1003 	return 0;
1004 }
1005 
1006 static void qcom_pcie_deinit_2_3_3(struct qcom_pcie *pcie)
1007 {
1008 	struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
1009 
1010 	clk_disable_unprepare(res->iface);
1011 	clk_disable_unprepare(res->axi_m_clk);
1012 	clk_disable_unprepare(res->axi_s_clk);
1013 	clk_disable_unprepare(res->ahb_clk);
1014 	clk_disable_unprepare(res->aux_clk);
1015 }
1016 
1017 static int qcom_pcie_init_2_3_3(struct qcom_pcie *pcie)
1018 {
1019 	struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
1020 	struct dw_pcie *pci = pcie->pci;
1021 	struct device *dev = pci->dev;
1022 	u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
1023 	int i, ret;
1024 	u32 val;
1025 
1026 	for (i = 0; i < ARRAY_SIZE(res->rst); i++) {
1027 		ret = reset_control_assert(res->rst[i]);
1028 		if (ret) {
1029 			dev_err(dev, "reset #%d assert failed (%d)\n", i, ret);
1030 			return ret;
1031 		}
1032 	}
1033 
1034 	usleep_range(2000, 2500);
1035 
1036 	for (i = 0; i < ARRAY_SIZE(res->rst); i++) {
1037 		ret = reset_control_deassert(res->rst[i]);
1038 		if (ret) {
1039 			dev_err(dev, "reset #%d deassert failed (%d)\n", i,
1040 				ret);
1041 			return ret;
1042 		}
1043 	}
1044 
1045 	/*
1046 	 * Don't have a way to see if the reset has completed.
1047 	 * Wait for some time.
1048 	 */
1049 	usleep_range(2000, 2500);
1050 
1051 	ret = clk_prepare_enable(res->iface);
1052 	if (ret) {
1053 		dev_err(dev, "cannot prepare/enable core clock\n");
1054 		goto err_clk_iface;
1055 	}
1056 
1057 	ret = clk_prepare_enable(res->axi_m_clk);
1058 	if (ret) {
1059 		dev_err(dev, "cannot prepare/enable core clock\n");
1060 		goto err_clk_axi_m;
1061 	}
1062 
1063 	ret = clk_prepare_enable(res->axi_s_clk);
1064 	if (ret) {
1065 		dev_err(dev, "cannot prepare/enable axi slave clock\n");
1066 		goto err_clk_axi_s;
1067 	}
1068 
1069 	ret = clk_prepare_enable(res->ahb_clk);
1070 	if (ret) {
1071 		dev_err(dev, "cannot prepare/enable ahb clock\n");
1072 		goto err_clk_ahb;
1073 	}
1074 
1075 	ret = clk_prepare_enable(res->aux_clk);
1076 	if (ret) {
1077 		dev_err(dev, "cannot prepare/enable aux clock\n");
1078 		goto err_clk_aux;
1079 	}
1080 
1081 	writel(SLV_ADDR_SPACE_SZ,
1082 		pcie->parf + PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE);
1083 
1084 	val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
1085 	val &= ~BIT(0);
1086 	writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
1087 
1088 	writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
1089 
1090 	writel(MST_WAKEUP_EN | SLV_WAKEUP_EN | MSTR_ACLK_CGC_DIS
1091 		| SLV_ACLK_CGC_DIS | CORE_CLK_CGC_DIS |
1092 		AUX_PWR_DET | L23_CLK_RMV_DIS | L1_CLK_RMV_DIS,
1093 		pcie->parf + PCIE20_PARF_SYS_CTRL);
1094 	writel(0, pcie->parf + PCIE20_PARF_Q2A_FLUSH);
1095 
1096 	writel(PCI_COMMAND_MASTER, pci->dbi_base + PCI_COMMAND);
1097 	writel(DBI_RO_WR_EN, pci->dbi_base + PCIE20_MISC_CONTROL_1_REG);
1098 	writel(PCIE_CAP_LINK1_VAL, pci->dbi_base + offset + PCI_EXP_SLTCAP);
1099 
1100 	val = readl(pci->dbi_base + offset + PCI_EXP_LNKCAP);
1101 	val &= ~PCI_EXP_LNKCAP_ASPMS;
1102 	writel(val, pci->dbi_base + offset + PCI_EXP_LNKCAP);
1103 
1104 	writel(PCI_EXP_DEVCTL2_COMP_TMOUT_DIS, pci->dbi_base + offset +
1105 		PCI_EXP_DEVCTL2);
1106 
1107 	return 0;
1108 
1109 err_clk_aux:
1110 	clk_disable_unprepare(res->ahb_clk);
1111 err_clk_ahb:
1112 	clk_disable_unprepare(res->axi_s_clk);
1113 err_clk_axi_s:
1114 	clk_disable_unprepare(res->axi_m_clk);
1115 err_clk_axi_m:
1116 	clk_disable_unprepare(res->iface);
1117 err_clk_iface:
1118 	/*
1119 	 * Not checking for failure, will anyway return
1120 	 * the original failure in 'ret'.
1121 	 */
1122 	for (i = 0; i < ARRAY_SIZE(res->rst); i++)
1123 		reset_control_assert(res->rst[i]);
1124 
1125 	return ret;
1126 }
1127 
1128 static int qcom_pcie_get_resources_2_7_0(struct qcom_pcie *pcie)
1129 {
1130 	struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
1131 	struct dw_pcie *pci = pcie->pci;
1132 	struct device *dev = pci->dev;
1133 	int ret;
1134 
1135 	res->pci_reset = devm_reset_control_get_exclusive(dev, "pci");
1136 	if (IS_ERR(res->pci_reset))
1137 		return PTR_ERR(res->pci_reset);
1138 
1139 	res->supplies[0].supply = "vdda";
1140 	res->supplies[1].supply = "vddpe-3v3";
1141 	ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies),
1142 				      res->supplies);
1143 	if (ret)
1144 		return ret;
1145 
1146 	res->clks[0].id = "aux";
1147 	res->clks[1].id = "cfg";
1148 	res->clks[2].id = "bus_master";
1149 	res->clks[3].id = "bus_slave";
1150 	res->clks[4].id = "slave_q2a";
1151 	res->clks[5].id = "tbu";
1152 
1153 	ret = devm_clk_bulk_get(dev, ARRAY_SIZE(res->clks), res->clks);
1154 	if (ret < 0)
1155 		return ret;
1156 
1157 	res->pipe_clk = devm_clk_get(dev, "pipe");
1158 	return PTR_ERR_OR_ZERO(res->pipe_clk);
1159 }
1160 
1161 static int qcom_pcie_init_2_7_0(struct qcom_pcie *pcie)
1162 {
1163 	struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
1164 	struct dw_pcie *pci = pcie->pci;
1165 	struct device *dev = pci->dev;
1166 	u32 val;
1167 	int ret;
1168 
1169 	ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies);
1170 	if (ret < 0) {
1171 		dev_err(dev, "cannot enable regulators\n");
1172 		return ret;
1173 	}
1174 
1175 	ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks);
1176 	if (ret < 0)
1177 		goto err_disable_regulators;
1178 
1179 	ret = reset_control_assert(res->pci_reset);
1180 	if (ret < 0) {
1181 		dev_err(dev, "cannot deassert pci reset\n");
1182 		goto err_disable_clocks;
1183 	}
1184 
1185 	usleep_range(1000, 1500);
1186 
1187 	ret = reset_control_deassert(res->pci_reset);
1188 	if (ret < 0) {
1189 		dev_err(dev, "cannot deassert pci reset\n");
1190 		goto err_disable_clocks;
1191 	}
1192 
1193 	ret = clk_prepare_enable(res->pipe_clk);
1194 	if (ret) {
1195 		dev_err(dev, "cannot prepare/enable pipe clock\n");
1196 		goto err_disable_clocks;
1197 	}
1198 
1199 	/* configure PCIe to RC mode */
1200 	writel(DEVICE_TYPE_RC, pcie->parf + PCIE20_PARF_DEVICE_TYPE);
1201 
1202 	/* enable PCIe clocks and resets */
1203 	val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
1204 	val &= ~BIT(0);
1205 	writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
1206 
1207 	/* change DBI base address */
1208 	writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
1209 
1210 	/* MAC PHY_POWERDOWN MUX DISABLE  */
1211 	val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL);
1212 	val &= ~BIT(29);
1213 	writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL);
1214 
1215 	val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
1216 	val |= BIT(4);
1217 	writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
1218 
1219 	if (IS_ENABLED(CONFIG_PCI_MSI)) {
1220 		val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
1221 		val |= BIT(31);
1222 		writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
1223 	}
1224 
1225 	return 0;
1226 err_disable_clocks:
1227 	clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);
1228 err_disable_regulators:
1229 	regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
1230 
1231 	return ret;
1232 }
1233 
1234 static void qcom_pcie_deinit_2_7_0(struct qcom_pcie *pcie)
1235 {
1236 	struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
1237 
1238 	clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);
1239 	regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
1240 }
1241 
1242 static int qcom_pcie_post_init_2_7_0(struct qcom_pcie *pcie)
1243 {
1244 	struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
1245 
1246 	return clk_prepare_enable(res->pipe_clk);
1247 }
1248 
1249 static void qcom_pcie_post_deinit_2_7_0(struct qcom_pcie *pcie)
1250 {
1251 	struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
1252 
1253 	clk_disable_unprepare(res->pipe_clk);
1254 }
1255 
1256 static int qcom_pcie_link_up(struct dw_pcie *pci)
1257 {
1258 	u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
1259 	u16 val = readw(pci->dbi_base + offset + PCI_EXP_LNKSTA);
1260 
1261 	return !!(val & PCI_EXP_LNKSTA_DLLLA);
1262 }
1263 
1264 static int qcom_pcie_host_init(struct pcie_port *pp)
1265 {
1266 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
1267 	struct qcom_pcie *pcie = to_qcom_pcie(pci);
1268 	int ret;
1269 
1270 	qcom_ep_reset_assert(pcie);
1271 
1272 	ret = pcie->ops->init(pcie);
1273 	if (ret)
1274 		return ret;
1275 
1276 	ret = phy_power_on(pcie->phy);
1277 	if (ret)
1278 		goto err_deinit;
1279 
1280 	if (pcie->ops->post_init) {
1281 		ret = pcie->ops->post_init(pcie);
1282 		if (ret)
1283 			goto err_disable_phy;
1284 	}
1285 
1286 	dw_pcie_setup_rc(pp);
1287 	dw_pcie_msi_init(pp);
1288 
1289 	qcom_ep_reset_deassert(pcie);
1290 
1291 	ret = qcom_pcie_establish_link(pcie);
1292 	if (ret)
1293 		goto err;
1294 
1295 	return 0;
1296 err:
1297 	qcom_ep_reset_assert(pcie);
1298 	if (pcie->ops->post_deinit)
1299 		pcie->ops->post_deinit(pcie);
1300 err_disable_phy:
1301 	phy_power_off(pcie->phy);
1302 err_deinit:
1303 	pcie->ops->deinit(pcie);
1304 
1305 	return ret;
1306 }
1307 
1308 static const struct dw_pcie_host_ops qcom_pcie_dw_ops = {
1309 	.host_init = qcom_pcie_host_init,
1310 };
1311 
1312 /* Qcom IP rev.: 2.1.0	Synopsys IP rev.: 4.01a */
1313 static const struct qcom_pcie_ops ops_2_1_0 = {
1314 	.get_resources = qcom_pcie_get_resources_2_1_0,
1315 	.init = qcom_pcie_init_2_1_0,
1316 	.deinit = qcom_pcie_deinit_2_1_0,
1317 	.ltssm_enable = qcom_pcie_2_1_0_ltssm_enable,
1318 };
1319 
1320 /* Qcom IP rev.: 1.0.0	Synopsys IP rev.: 4.11a */
1321 static const struct qcom_pcie_ops ops_1_0_0 = {
1322 	.get_resources = qcom_pcie_get_resources_1_0_0,
1323 	.init = qcom_pcie_init_1_0_0,
1324 	.deinit = qcom_pcie_deinit_1_0_0,
1325 	.ltssm_enable = qcom_pcie_2_1_0_ltssm_enable,
1326 };
1327 
1328 /* Qcom IP rev.: 2.3.2	Synopsys IP rev.: 4.21a */
1329 static const struct qcom_pcie_ops ops_2_3_2 = {
1330 	.get_resources = qcom_pcie_get_resources_2_3_2,
1331 	.init = qcom_pcie_init_2_3_2,
1332 	.post_init = qcom_pcie_post_init_2_3_2,
1333 	.deinit = qcom_pcie_deinit_2_3_2,
1334 	.post_deinit = qcom_pcie_post_deinit_2_3_2,
1335 	.ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
1336 };
1337 
1338 /* Qcom IP rev.: 2.4.0	Synopsys IP rev.: 4.20a */
1339 static const struct qcom_pcie_ops ops_2_4_0 = {
1340 	.get_resources = qcom_pcie_get_resources_2_4_0,
1341 	.init = qcom_pcie_init_2_4_0,
1342 	.deinit = qcom_pcie_deinit_2_4_0,
1343 	.ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
1344 };
1345 
1346 /* Qcom IP rev.: 2.3.3	Synopsys IP rev.: 4.30a */
1347 static const struct qcom_pcie_ops ops_2_3_3 = {
1348 	.get_resources = qcom_pcie_get_resources_2_3_3,
1349 	.init = qcom_pcie_init_2_3_3,
1350 	.deinit = qcom_pcie_deinit_2_3_3,
1351 	.ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
1352 };
1353 
1354 /* Qcom IP rev.: 2.7.0	Synopsys IP rev.: 4.30a */
1355 static const struct qcom_pcie_ops ops_2_7_0 = {
1356 	.get_resources = qcom_pcie_get_resources_2_7_0,
1357 	.init = qcom_pcie_init_2_7_0,
1358 	.deinit = qcom_pcie_deinit_2_7_0,
1359 	.ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
1360 	.post_init = qcom_pcie_post_init_2_7_0,
1361 	.post_deinit = qcom_pcie_post_deinit_2_7_0,
1362 };
1363 
1364 static const struct dw_pcie_ops dw_pcie_ops = {
1365 	.link_up = qcom_pcie_link_up,
1366 };
1367 
1368 static int qcom_pcie_probe(struct platform_device *pdev)
1369 {
1370 	struct device *dev = &pdev->dev;
1371 	struct resource *res;
1372 	struct pcie_port *pp;
1373 	struct dw_pcie *pci;
1374 	struct qcom_pcie *pcie;
1375 	int ret;
1376 
1377 	pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
1378 	if (!pcie)
1379 		return -ENOMEM;
1380 
1381 	pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
1382 	if (!pci)
1383 		return -ENOMEM;
1384 
1385 	pm_runtime_enable(dev);
1386 	ret = pm_runtime_get_sync(dev);
1387 	if (ret < 0)
1388 		goto err_pm_runtime_put;
1389 
1390 	pci->dev = dev;
1391 	pci->ops = &dw_pcie_ops;
1392 	pp = &pci->pp;
1393 
1394 	pcie->pci = pci;
1395 
1396 	pcie->ops = of_device_get_match_data(dev);
1397 
1398 	pcie->reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_HIGH);
1399 	if (IS_ERR(pcie->reset)) {
1400 		ret = PTR_ERR(pcie->reset);
1401 		goto err_pm_runtime_put;
1402 	}
1403 
1404 	pcie->parf = devm_platform_ioremap_resource_byname(pdev, "parf");
1405 	if (IS_ERR(pcie->parf)) {
1406 		ret = PTR_ERR(pcie->parf);
1407 		goto err_pm_runtime_put;
1408 	}
1409 
1410 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
1411 	pci->dbi_base = devm_pci_remap_cfg_resource(dev, res);
1412 	if (IS_ERR(pci->dbi_base)) {
1413 		ret = PTR_ERR(pci->dbi_base);
1414 		goto err_pm_runtime_put;
1415 	}
1416 
1417 	pcie->elbi = devm_platform_ioremap_resource_byname(pdev, "elbi");
1418 	if (IS_ERR(pcie->elbi)) {
1419 		ret = PTR_ERR(pcie->elbi);
1420 		goto err_pm_runtime_put;
1421 	}
1422 
1423 	pcie->phy = devm_phy_optional_get(dev, "pciephy");
1424 	if (IS_ERR(pcie->phy)) {
1425 		ret = PTR_ERR(pcie->phy);
1426 		goto err_pm_runtime_put;
1427 	}
1428 
1429 	ret = pcie->ops->get_resources(pcie);
1430 	if (ret)
1431 		goto err_pm_runtime_put;
1432 
1433 	pp->ops = &qcom_pcie_dw_ops;
1434 
1435 	if (IS_ENABLED(CONFIG_PCI_MSI)) {
1436 		pp->msi_irq = platform_get_irq_byname(pdev, "msi");
1437 		if (pp->msi_irq < 0) {
1438 			ret = pp->msi_irq;
1439 			goto err_pm_runtime_put;
1440 		}
1441 	}
1442 
1443 	ret = phy_init(pcie->phy);
1444 	if (ret) {
1445 		pm_runtime_disable(&pdev->dev);
1446 		goto err_pm_runtime_put;
1447 	}
1448 
1449 	platform_set_drvdata(pdev, pcie);
1450 
1451 	ret = dw_pcie_host_init(pp);
1452 	if (ret) {
1453 		dev_err(dev, "cannot initialize host\n");
1454 		pm_runtime_disable(&pdev->dev);
1455 		goto err_pm_runtime_put;
1456 	}
1457 
1458 	return 0;
1459 
1460 err_pm_runtime_put:
1461 	pm_runtime_put(dev);
1462 	pm_runtime_disable(dev);
1463 
1464 	return ret;
1465 }
1466 
1467 static const struct of_device_id qcom_pcie_match[] = {
1468 	{ .compatible = "qcom,pcie-apq8084", .data = &ops_1_0_0 },
1469 	{ .compatible = "qcom,pcie-ipq8064", .data = &ops_2_1_0 },
1470 	{ .compatible = "qcom,pcie-ipq8064-v2", .data = &ops_2_1_0 },
1471 	{ .compatible = "qcom,pcie-apq8064", .data = &ops_2_1_0 },
1472 	{ .compatible = "qcom,pcie-msm8996", .data = &ops_2_3_2 },
1473 	{ .compatible = "qcom,pcie-ipq8074", .data = &ops_2_3_3 },
1474 	{ .compatible = "qcom,pcie-ipq4019", .data = &ops_2_4_0 },
1475 	{ .compatible = "qcom,pcie-qcs404", .data = &ops_2_4_0 },
1476 	{ .compatible = "qcom,pcie-sdm845", .data = &ops_2_7_0 },
1477 	{ }
1478 };
1479 
1480 static void qcom_fixup_class(struct pci_dev *dev)
1481 {
1482 	dev->class = PCI_CLASS_BRIDGE_PCI << 8;
1483 }
1484 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0101, qcom_fixup_class);
1485 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0104, qcom_fixup_class);
1486 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0106, qcom_fixup_class);
1487 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0107, qcom_fixup_class);
1488 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0302, qcom_fixup_class);
1489 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x1000, qcom_fixup_class);
1490 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x1001, qcom_fixup_class);
1491 
1492 static struct platform_driver qcom_pcie_driver = {
1493 	.probe = qcom_pcie_probe,
1494 	.driver = {
1495 		.name = "qcom-pcie",
1496 		.suppress_bind_attrs = true,
1497 		.of_match_table = qcom_pcie_match,
1498 	},
1499 };
1500 builtin_platform_driver(qcom_pcie_driver);
1501