1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Qualcomm PCIe root complex driver
4  *
5  * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
6  * Copyright 2015 Linaro Limited.
7  *
8  * Author: Stanimir Varbanov <svarbanov@mm-sol.com>
9  */
10 
11 #include <linux/clk.h>
12 #include <linux/delay.h>
13 #include <linux/gpio/consumer.h>
14 #include <linux/interrupt.h>
15 #include <linux/io.h>
16 #include <linux/iopoll.h>
17 #include <linux/kernel.h>
18 #include <linux/init.h>
19 #include <linux/of_device.h>
20 #include <linux/of_gpio.h>
21 #include <linux/pci.h>
22 #include <linux/pm_runtime.h>
23 #include <linux/platform_device.h>
24 #include <linux/phy/phy.h>
25 #include <linux/regulator/consumer.h>
26 #include <linux/reset.h>
27 #include <linux/slab.h>
28 #include <linux/types.h>
29 
30 #include "../../pci.h"
31 #include "pcie-designware.h"
32 
33 #define PCIE20_PARF_SYS_CTRL			0x00
34 #define MST_WAKEUP_EN				BIT(13)
35 #define SLV_WAKEUP_EN				BIT(12)
36 #define MSTR_ACLK_CGC_DIS			BIT(10)
37 #define SLV_ACLK_CGC_DIS			BIT(9)
38 #define CORE_CLK_CGC_DIS			BIT(6)
39 #define AUX_PWR_DET				BIT(4)
40 #define L23_CLK_RMV_DIS				BIT(2)
41 #define L1_CLK_RMV_DIS				BIT(1)
42 
43 #define PCIE20_PARF_PHY_CTRL			0x40
44 #define PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK	GENMASK(20, 16)
45 #define PHY_CTRL_PHY_TX0_TERM_OFFSET(x)		((x) << 16)
46 
47 #define PCIE20_PARF_PHY_REFCLK			0x4C
48 #define PHY_REFCLK_SSP_EN			BIT(16)
49 #define PHY_REFCLK_USE_PAD			BIT(12)
50 
51 #define PCIE20_PARF_DBI_BASE_ADDR		0x168
52 #define PCIE20_PARF_SLV_ADDR_SPACE_SIZE		0x16C
53 #define PCIE20_PARF_MHI_CLOCK_RESET_CTRL	0x174
54 #define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT	0x178
55 #define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2	0x1A8
56 #define PCIE20_PARF_LTSSM			0x1B0
57 #define PCIE20_PARF_SID_OFFSET			0x234
58 #define PCIE20_PARF_BDF_TRANSLATE_CFG		0x24C
59 #define PCIE20_PARF_DEVICE_TYPE			0x1000
60 
61 #define PCIE20_ELBI_SYS_CTRL			0x04
62 #define PCIE20_ELBI_SYS_CTRL_LT_ENABLE		BIT(0)
63 
64 #define PCIE20_AXI_MSTR_RESP_COMP_CTRL0		0x818
65 #define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K	0x4
66 #define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_4K	0x5
67 #define PCIE20_AXI_MSTR_RESP_COMP_CTRL1		0x81c
68 #define CFG_BRIDGE_SB_INIT			BIT(0)
69 
70 #define PCIE20_CAP				0x70
71 #define PCIE20_DEVICE_CONTROL2_STATUS2		(PCIE20_CAP + PCI_EXP_DEVCTL2)
72 #define PCIE20_CAP_LINK_CAPABILITIES		(PCIE20_CAP + PCI_EXP_LNKCAP)
73 #define PCIE20_CAP_LINK_1			(PCIE20_CAP + 0x14)
74 #define PCIE_CAP_LINK1_VAL			0x2FD7F
75 
76 #define PCIE20_PARF_Q2A_FLUSH			0x1AC
77 
78 #define PCIE20_MISC_CONTROL_1_REG		0x8BC
79 #define DBI_RO_WR_EN				1
80 
81 #define PERST_DELAY_US				1000
82 /* PARF registers */
83 #define PCIE20_PARF_PCS_DEEMPH			0x34
84 #define PCS_DEEMPH_TX_DEEMPH_GEN1(x)		((x) << 16)
85 #define PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(x)	((x) << 8)
86 #define PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(x)	((x) << 0)
87 
88 #define PCIE20_PARF_PCS_SWING			0x38
89 #define PCS_SWING_TX_SWING_FULL(x)		((x) << 8)
90 #define PCS_SWING_TX_SWING_LOW(x)		((x) << 0)
91 
92 #define PCIE20_PARF_CONFIG_BITS		0x50
93 #define PHY_RX0_EQ(x)				((x) << 24)
94 
95 #define PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE	0x358
96 #define SLV_ADDR_SPACE_SZ			0x10000000
97 
98 #define PCIE20_LNK_CONTROL2_LINK_STATUS2	0xa0
99 
100 #define DEVICE_TYPE_RC				0x4
101 
102 #define QCOM_PCIE_2_1_0_MAX_SUPPLY	3
103 #define QCOM_PCIE_2_1_0_MAX_CLOCKS	5
104 struct qcom_pcie_resources_2_1_0 {
105 	struct clk_bulk_data clks[QCOM_PCIE_2_1_0_MAX_CLOCKS];
106 	struct reset_control *pci_reset;
107 	struct reset_control *axi_reset;
108 	struct reset_control *ahb_reset;
109 	struct reset_control *por_reset;
110 	struct reset_control *phy_reset;
111 	struct reset_control *ext_reset;
112 	struct regulator_bulk_data supplies[QCOM_PCIE_2_1_0_MAX_SUPPLY];
113 };
114 
115 struct qcom_pcie_resources_1_0_0 {
116 	struct clk *iface;
117 	struct clk *aux;
118 	struct clk *master_bus;
119 	struct clk *slave_bus;
120 	struct reset_control *core;
121 	struct regulator *vdda;
122 };
123 
124 #define QCOM_PCIE_2_3_2_MAX_SUPPLY	2
125 struct qcom_pcie_resources_2_3_2 {
126 	struct clk *aux_clk;
127 	struct clk *master_clk;
128 	struct clk *slave_clk;
129 	struct clk *cfg_clk;
130 	struct clk *pipe_clk;
131 	struct regulator_bulk_data supplies[QCOM_PCIE_2_3_2_MAX_SUPPLY];
132 };
133 
134 #define QCOM_PCIE_2_4_0_MAX_CLOCKS	4
135 struct qcom_pcie_resources_2_4_0 {
136 	struct clk_bulk_data clks[QCOM_PCIE_2_4_0_MAX_CLOCKS];
137 	int num_clks;
138 	struct reset_control *axi_m_reset;
139 	struct reset_control *axi_s_reset;
140 	struct reset_control *pipe_reset;
141 	struct reset_control *axi_m_vmid_reset;
142 	struct reset_control *axi_s_xpu_reset;
143 	struct reset_control *parf_reset;
144 	struct reset_control *phy_reset;
145 	struct reset_control *axi_m_sticky_reset;
146 	struct reset_control *pipe_sticky_reset;
147 	struct reset_control *pwr_reset;
148 	struct reset_control *ahb_reset;
149 	struct reset_control *phy_ahb_reset;
150 };
151 
152 struct qcom_pcie_resources_2_3_3 {
153 	struct clk *iface;
154 	struct clk *axi_m_clk;
155 	struct clk *axi_s_clk;
156 	struct clk *ahb_clk;
157 	struct clk *aux_clk;
158 	struct reset_control *rst[7];
159 };
160 
161 struct qcom_pcie_resources_2_7_0 {
162 	struct clk_bulk_data clks[6];
163 	struct regulator_bulk_data supplies[2];
164 	struct reset_control *pci_reset;
165 	struct clk *pipe_clk;
166 };
167 
168 union qcom_pcie_resources {
169 	struct qcom_pcie_resources_1_0_0 v1_0_0;
170 	struct qcom_pcie_resources_2_1_0 v2_1_0;
171 	struct qcom_pcie_resources_2_3_2 v2_3_2;
172 	struct qcom_pcie_resources_2_3_3 v2_3_3;
173 	struct qcom_pcie_resources_2_4_0 v2_4_0;
174 	struct qcom_pcie_resources_2_7_0 v2_7_0;
175 };
176 
177 struct qcom_pcie;
178 
179 struct qcom_pcie_ops {
180 	int (*get_resources)(struct qcom_pcie *pcie);
181 	int (*init)(struct qcom_pcie *pcie);
182 	int (*post_init)(struct qcom_pcie *pcie);
183 	void (*deinit)(struct qcom_pcie *pcie);
184 	void (*post_deinit)(struct qcom_pcie *pcie);
185 	void (*ltssm_enable)(struct qcom_pcie *pcie);
186 };
187 
188 struct qcom_pcie {
189 	struct dw_pcie *pci;
190 	void __iomem *parf;			/* DT parf */
191 	void __iomem *elbi;			/* DT elbi */
192 	union qcom_pcie_resources res;
193 	struct phy *phy;
194 	struct gpio_desc *reset;
195 	const struct qcom_pcie_ops *ops;
196 	int gen;
197 };
198 
199 #define to_qcom_pcie(x)		dev_get_drvdata((x)->dev)
200 
201 static void qcom_ep_reset_assert(struct qcom_pcie *pcie)
202 {
203 	gpiod_set_value_cansleep(pcie->reset, 1);
204 	usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
205 }
206 
207 static void qcom_ep_reset_deassert(struct qcom_pcie *pcie)
208 {
209 	/* Ensure that PERST has been asserted for at least 100 ms */
210 	msleep(100);
211 	gpiod_set_value_cansleep(pcie->reset, 0);
212 	usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
213 }
214 
215 static int qcom_pcie_establish_link(struct qcom_pcie *pcie)
216 {
217 	struct dw_pcie *pci = pcie->pci;
218 
219 	if (dw_pcie_link_up(pci))
220 		return 0;
221 
222 	/* Enable Link Training state machine */
223 	if (pcie->ops->ltssm_enable)
224 		pcie->ops->ltssm_enable(pcie);
225 
226 	return dw_pcie_wait_for_link(pci);
227 }
228 
229 static void qcom_pcie_2_1_0_ltssm_enable(struct qcom_pcie *pcie)
230 {
231 	u32 val;
232 
233 	/* enable link training */
234 	val = readl(pcie->elbi + PCIE20_ELBI_SYS_CTRL);
235 	val |= PCIE20_ELBI_SYS_CTRL_LT_ENABLE;
236 	writel(val, pcie->elbi + PCIE20_ELBI_SYS_CTRL);
237 }
238 
239 static int qcom_pcie_get_resources_2_1_0(struct qcom_pcie *pcie)
240 {
241 	struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
242 	struct dw_pcie *pci = pcie->pci;
243 	struct device *dev = pci->dev;
244 	int ret;
245 
246 	res->supplies[0].supply = "vdda";
247 	res->supplies[1].supply = "vdda_phy";
248 	res->supplies[2].supply = "vdda_refclk";
249 	ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies),
250 				      res->supplies);
251 	if (ret)
252 		return ret;
253 
254 	res->clks[0].id = "iface";
255 	res->clks[1].id = "core";
256 	res->clks[2].id = "phy";
257 	res->clks[3].id = "aux";
258 	res->clks[4].id = "ref";
259 
260 	/* iface, core, phy are required */
261 	ret = devm_clk_bulk_get(dev, 3, res->clks);
262 	if (ret < 0)
263 		return ret;
264 
265 	/* aux, ref are optional */
266 	ret = devm_clk_bulk_get_optional(dev, 2, res->clks + 3);
267 	if (ret < 0)
268 		return ret;
269 
270 	res->pci_reset = devm_reset_control_get_exclusive(dev, "pci");
271 	if (IS_ERR(res->pci_reset))
272 		return PTR_ERR(res->pci_reset);
273 
274 	res->axi_reset = devm_reset_control_get_exclusive(dev, "axi");
275 	if (IS_ERR(res->axi_reset))
276 		return PTR_ERR(res->axi_reset);
277 
278 	res->ahb_reset = devm_reset_control_get_exclusive(dev, "ahb");
279 	if (IS_ERR(res->ahb_reset))
280 		return PTR_ERR(res->ahb_reset);
281 
282 	res->por_reset = devm_reset_control_get_exclusive(dev, "por");
283 	if (IS_ERR(res->por_reset))
284 		return PTR_ERR(res->por_reset);
285 
286 	res->ext_reset = devm_reset_control_get_optional_exclusive(dev, "ext");
287 	if (IS_ERR(res->ext_reset))
288 		return PTR_ERR(res->ext_reset);
289 
290 	res->phy_reset = devm_reset_control_get_exclusive(dev, "phy");
291 	return PTR_ERR_OR_ZERO(res->phy_reset);
292 }
293 
294 static void qcom_pcie_deinit_2_1_0(struct qcom_pcie *pcie)
295 {
296 	struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
297 
298 	clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);
299 	reset_control_assert(res->pci_reset);
300 	reset_control_assert(res->axi_reset);
301 	reset_control_assert(res->ahb_reset);
302 	reset_control_assert(res->por_reset);
303 	reset_control_assert(res->ext_reset);
304 	reset_control_assert(res->phy_reset);
305 	regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
306 }
307 
308 static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie)
309 {
310 	struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
311 	struct dw_pcie *pci = pcie->pci;
312 	struct device *dev = pci->dev;
313 	struct device_node *node = dev->of_node;
314 	u32 val;
315 	int ret;
316 
317 	ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies);
318 	if (ret < 0) {
319 		dev_err(dev, "cannot enable regulators\n");
320 		return ret;
321 	}
322 
323 	ret = reset_control_deassert(res->ahb_reset);
324 	if (ret) {
325 		dev_err(dev, "cannot deassert ahb reset\n");
326 		goto err_deassert_ahb;
327 	}
328 
329 	ret = reset_control_deassert(res->ext_reset);
330 	if (ret) {
331 		dev_err(dev, "cannot deassert ext reset\n");
332 		goto err_deassert_ext;
333 	}
334 
335 	ret = reset_control_deassert(res->phy_reset);
336 	if (ret) {
337 		dev_err(dev, "cannot deassert phy reset\n");
338 		goto err_deassert_phy;
339 	}
340 
341 	ret = reset_control_deassert(res->pci_reset);
342 	if (ret) {
343 		dev_err(dev, "cannot deassert pci reset\n");
344 		goto err_deassert_pci;
345 	}
346 
347 	ret = reset_control_deassert(res->por_reset);
348 	if (ret) {
349 		dev_err(dev, "cannot deassert por reset\n");
350 		goto err_deassert_por;
351 	}
352 
353 	ret = reset_control_deassert(res->axi_reset);
354 	if (ret) {
355 		dev_err(dev, "cannot deassert axi reset\n");
356 		goto err_deassert_axi;
357 	}
358 
359 	ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks);
360 	if (ret)
361 		goto err_clks;
362 
363 	/* enable PCIe clocks and resets */
364 	val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
365 	val &= ~BIT(0);
366 	writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
367 
368 	if (of_device_is_compatible(node, "qcom,pcie-ipq8064") ||
369 	    of_device_is_compatible(node, "qcom,pcie-ipq8064-v2")) {
370 		writel(PCS_DEEMPH_TX_DEEMPH_GEN1(24) |
371 			       PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(24) |
372 			       PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(34),
373 		       pcie->parf + PCIE20_PARF_PCS_DEEMPH);
374 		writel(PCS_SWING_TX_SWING_FULL(120) |
375 			       PCS_SWING_TX_SWING_LOW(120),
376 		       pcie->parf + PCIE20_PARF_PCS_SWING);
377 		writel(PHY_RX0_EQ(4), pcie->parf + PCIE20_PARF_CONFIG_BITS);
378 	}
379 
380 	if (of_device_is_compatible(node, "qcom,pcie-ipq8064")) {
381 		/* set TX termination offset */
382 		val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
383 		val &= ~PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK;
384 		val |= PHY_CTRL_PHY_TX0_TERM_OFFSET(7);
385 		writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
386 	}
387 
388 	/* enable external reference clock */
389 	val = readl(pcie->parf + PCIE20_PARF_PHY_REFCLK);
390 	val &= ~PHY_REFCLK_USE_PAD;
391 	val |= PHY_REFCLK_SSP_EN;
392 	writel(val, pcie->parf + PCIE20_PARF_PHY_REFCLK);
393 
394 	/* wait for clock acquisition */
395 	usleep_range(1000, 1500);
396 
397 	if (pcie->gen == 1) {
398 		val = readl(pci->dbi_base + PCIE20_LNK_CONTROL2_LINK_STATUS2);
399 		val |= PCI_EXP_LNKSTA_CLS_2_5GB;
400 		writel(val, pci->dbi_base + PCIE20_LNK_CONTROL2_LINK_STATUS2);
401 	}
402 
403 	/* Set the Max TLP size to 2K, instead of using default of 4K */
404 	writel(CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K,
405 	       pci->dbi_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL0);
406 	writel(CFG_BRIDGE_SB_INIT,
407 	       pci->dbi_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL1);
408 
409 	return 0;
410 
411 err_clks:
412 	reset_control_assert(res->axi_reset);
413 err_deassert_axi:
414 	reset_control_assert(res->por_reset);
415 err_deassert_por:
416 	reset_control_assert(res->pci_reset);
417 err_deassert_pci:
418 	reset_control_assert(res->phy_reset);
419 err_deassert_phy:
420 	reset_control_assert(res->ext_reset);
421 err_deassert_ext:
422 	reset_control_assert(res->ahb_reset);
423 err_deassert_ahb:
424 	regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
425 
426 	return ret;
427 }
428 
429 static int qcom_pcie_get_resources_1_0_0(struct qcom_pcie *pcie)
430 {
431 	struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;
432 	struct dw_pcie *pci = pcie->pci;
433 	struct device *dev = pci->dev;
434 
435 	res->vdda = devm_regulator_get(dev, "vdda");
436 	if (IS_ERR(res->vdda))
437 		return PTR_ERR(res->vdda);
438 
439 	res->iface = devm_clk_get(dev, "iface");
440 	if (IS_ERR(res->iface))
441 		return PTR_ERR(res->iface);
442 
443 	res->aux = devm_clk_get(dev, "aux");
444 	if (IS_ERR(res->aux))
445 		return PTR_ERR(res->aux);
446 
447 	res->master_bus = devm_clk_get(dev, "master_bus");
448 	if (IS_ERR(res->master_bus))
449 		return PTR_ERR(res->master_bus);
450 
451 	res->slave_bus = devm_clk_get(dev, "slave_bus");
452 	if (IS_ERR(res->slave_bus))
453 		return PTR_ERR(res->slave_bus);
454 
455 	res->core = devm_reset_control_get_exclusive(dev, "core");
456 	return PTR_ERR_OR_ZERO(res->core);
457 }
458 
459 static void qcom_pcie_deinit_1_0_0(struct qcom_pcie *pcie)
460 {
461 	struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;
462 
463 	reset_control_assert(res->core);
464 	clk_disable_unprepare(res->slave_bus);
465 	clk_disable_unprepare(res->master_bus);
466 	clk_disable_unprepare(res->iface);
467 	clk_disable_unprepare(res->aux);
468 	regulator_disable(res->vdda);
469 }
470 
471 static int qcom_pcie_init_1_0_0(struct qcom_pcie *pcie)
472 {
473 	struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;
474 	struct dw_pcie *pci = pcie->pci;
475 	struct device *dev = pci->dev;
476 	int ret;
477 
478 	ret = reset_control_deassert(res->core);
479 	if (ret) {
480 		dev_err(dev, "cannot deassert core reset\n");
481 		return ret;
482 	}
483 
484 	ret = clk_prepare_enable(res->aux);
485 	if (ret) {
486 		dev_err(dev, "cannot prepare/enable aux clock\n");
487 		goto err_res;
488 	}
489 
490 	ret = clk_prepare_enable(res->iface);
491 	if (ret) {
492 		dev_err(dev, "cannot prepare/enable iface clock\n");
493 		goto err_aux;
494 	}
495 
496 	ret = clk_prepare_enable(res->master_bus);
497 	if (ret) {
498 		dev_err(dev, "cannot prepare/enable master_bus clock\n");
499 		goto err_iface;
500 	}
501 
502 	ret = clk_prepare_enable(res->slave_bus);
503 	if (ret) {
504 		dev_err(dev, "cannot prepare/enable slave_bus clock\n");
505 		goto err_master;
506 	}
507 
508 	ret = regulator_enable(res->vdda);
509 	if (ret) {
510 		dev_err(dev, "cannot enable vdda regulator\n");
511 		goto err_slave;
512 	}
513 
514 	/* change DBI base address */
515 	writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
516 
517 	if (IS_ENABLED(CONFIG_PCI_MSI)) {
518 		u32 val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
519 
520 		val |= BIT(31);
521 		writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
522 	}
523 
524 	return 0;
525 err_slave:
526 	clk_disable_unprepare(res->slave_bus);
527 err_master:
528 	clk_disable_unprepare(res->master_bus);
529 err_iface:
530 	clk_disable_unprepare(res->iface);
531 err_aux:
532 	clk_disable_unprepare(res->aux);
533 err_res:
534 	reset_control_assert(res->core);
535 
536 	return ret;
537 }
538 
539 static void qcom_pcie_2_3_2_ltssm_enable(struct qcom_pcie *pcie)
540 {
541 	u32 val;
542 
543 	/* enable link training */
544 	val = readl(pcie->parf + PCIE20_PARF_LTSSM);
545 	val |= BIT(8);
546 	writel(val, pcie->parf + PCIE20_PARF_LTSSM);
547 }
548 
549 static int qcom_pcie_get_resources_2_3_2(struct qcom_pcie *pcie)
550 {
551 	struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
552 	struct dw_pcie *pci = pcie->pci;
553 	struct device *dev = pci->dev;
554 	int ret;
555 
556 	res->supplies[0].supply = "vdda";
557 	res->supplies[1].supply = "vddpe-3v3";
558 	ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies),
559 				      res->supplies);
560 	if (ret)
561 		return ret;
562 
563 	res->aux_clk = devm_clk_get(dev, "aux");
564 	if (IS_ERR(res->aux_clk))
565 		return PTR_ERR(res->aux_clk);
566 
567 	res->cfg_clk = devm_clk_get(dev, "cfg");
568 	if (IS_ERR(res->cfg_clk))
569 		return PTR_ERR(res->cfg_clk);
570 
571 	res->master_clk = devm_clk_get(dev, "bus_master");
572 	if (IS_ERR(res->master_clk))
573 		return PTR_ERR(res->master_clk);
574 
575 	res->slave_clk = devm_clk_get(dev, "bus_slave");
576 	if (IS_ERR(res->slave_clk))
577 		return PTR_ERR(res->slave_clk);
578 
579 	res->pipe_clk = devm_clk_get(dev, "pipe");
580 	return PTR_ERR_OR_ZERO(res->pipe_clk);
581 }
582 
583 static void qcom_pcie_deinit_2_3_2(struct qcom_pcie *pcie)
584 {
585 	struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
586 
587 	clk_disable_unprepare(res->slave_clk);
588 	clk_disable_unprepare(res->master_clk);
589 	clk_disable_unprepare(res->cfg_clk);
590 	clk_disable_unprepare(res->aux_clk);
591 
592 	regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
593 }
594 
595 static void qcom_pcie_post_deinit_2_3_2(struct qcom_pcie *pcie)
596 {
597 	struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
598 
599 	clk_disable_unprepare(res->pipe_clk);
600 }
601 
602 static int qcom_pcie_init_2_3_2(struct qcom_pcie *pcie)
603 {
604 	struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
605 	struct dw_pcie *pci = pcie->pci;
606 	struct device *dev = pci->dev;
607 	u32 val;
608 	int ret;
609 
610 	ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies);
611 	if (ret < 0) {
612 		dev_err(dev, "cannot enable regulators\n");
613 		return ret;
614 	}
615 
616 	ret = clk_prepare_enable(res->aux_clk);
617 	if (ret) {
618 		dev_err(dev, "cannot prepare/enable aux clock\n");
619 		goto err_aux_clk;
620 	}
621 
622 	ret = clk_prepare_enable(res->cfg_clk);
623 	if (ret) {
624 		dev_err(dev, "cannot prepare/enable cfg clock\n");
625 		goto err_cfg_clk;
626 	}
627 
628 	ret = clk_prepare_enable(res->master_clk);
629 	if (ret) {
630 		dev_err(dev, "cannot prepare/enable master clock\n");
631 		goto err_master_clk;
632 	}
633 
634 	ret = clk_prepare_enable(res->slave_clk);
635 	if (ret) {
636 		dev_err(dev, "cannot prepare/enable slave clock\n");
637 		goto err_slave_clk;
638 	}
639 
640 	/* enable PCIe clocks and resets */
641 	val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
642 	val &= ~BIT(0);
643 	writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
644 
645 	/* change DBI base address */
646 	writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
647 
648 	/* MAC PHY_POWERDOWN MUX DISABLE  */
649 	val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL);
650 	val &= ~BIT(29);
651 	writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL);
652 
653 	val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
654 	val |= BIT(4);
655 	writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
656 
657 	val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
658 	val |= BIT(31);
659 	writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
660 
661 	return 0;
662 
663 err_slave_clk:
664 	clk_disable_unprepare(res->master_clk);
665 err_master_clk:
666 	clk_disable_unprepare(res->cfg_clk);
667 err_cfg_clk:
668 	clk_disable_unprepare(res->aux_clk);
669 
670 err_aux_clk:
671 	regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
672 
673 	return ret;
674 }
675 
676 static int qcom_pcie_post_init_2_3_2(struct qcom_pcie *pcie)
677 {
678 	struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
679 	struct dw_pcie *pci = pcie->pci;
680 	struct device *dev = pci->dev;
681 	int ret;
682 
683 	ret = clk_prepare_enable(res->pipe_clk);
684 	if (ret) {
685 		dev_err(dev, "cannot prepare/enable pipe clock\n");
686 		return ret;
687 	}
688 
689 	return 0;
690 }
691 
692 static int qcom_pcie_get_resources_2_4_0(struct qcom_pcie *pcie)
693 {
694 	struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0;
695 	struct dw_pcie *pci = pcie->pci;
696 	struct device *dev = pci->dev;
697 	bool is_ipq = of_device_is_compatible(dev->of_node, "qcom,pcie-ipq4019");
698 	int ret;
699 
700 	res->clks[0].id = "aux";
701 	res->clks[1].id = "master_bus";
702 	res->clks[2].id = "slave_bus";
703 	res->clks[3].id = "iface";
704 
705 	/* qcom,pcie-ipq4019 is defined without "iface" */
706 	res->num_clks = is_ipq ? 3 : 4;
707 
708 	ret = devm_clk_bulk_get(dev, res->num_clks, res->clks);
709 	if (ret < 0)
710 		return ret;
711 
712 	res->axi_m_reset = devm_reset_control_get_exclusive(dev, "axi_m");
713 	if (IS_ERR(res->axi_m_reset))
714 		return PTR_ERR(res->axi_m_reset);
715 
716 	res->axi_s_reset = devm_reset_control_get_exclusive(dev, "axi_s");
717 	if (IS_ERR(res->axi_s_reset))
718 		return PTR_ERR(res->axi_s_reset);
719 
720 	if (is_ipq) {
721 		/*
722 		 * These resources relates to the PHY or are secure clocks, but
723 		 * are controlled here for IPQ4019
724 		 */
725 		res->pipe_reset = devm_reset_control_get_exclusive(dev, "pipe");
726 		if (IS_ERR(res->pipe_reset))
727 			return PTR_ERR(res->pipe_reset);
728 
729 		res->axi_m_vmid_reset = devm_reset_control_get_exclusive(dev,
730 									 "axi_m_vmid");
731 		if (IS_ERR(res->axi_m_vmid_reset))
732 			return PTR_ERR(res->axi_m_vmid_reset);
733 
734 		res->axi_s_xpu_reset = devm_reset_control_get_exclusive(dev,
735 									"axi_s_xpu");
736 		if (IS_ERR(res->axi_s_xpu_reset))
737 			return PTR_ERR(res->axi_s_xpu_reset);
738 
739 		res->parf_reset = devm_reset_control_get_exclusive(dev, "parf");
740 		if (IS_ERR(res->parf_reset))
741 			return PTR_ERR(res->parf_reset);
742 
743 		res->phy_reset = devm_reset_control_get_exclusive(dev, "phy");
744 		if (IS_ERR(res->phy_reset))
745 			return PTR_ERR(res->phy_reset);
746 	}
747 
748 	res->axi_m_sticky_reset = devm_reset_control_get_exclusive(dev,
749 								   "axi_m_sticky");
750 	if (IS_ERR(res->axi_m_sticky_reset))
751 		return PTR_ERR(res->axi_m_sticky_reset);
752 
753 	res->pipe_sticky_reset = devm_reset_control_get_exclusive(dev,
754 								  "pipe_sticky");
755 	if (IS_ERR(res->pipe_sticky_reset))
756 		return PTR_ERR(res->pipe_sticky_reset);
757 
758 	res->pwr_reset = devm_reset_control_get_exclusive(dev, "pwr");
759 	if (IS_ERR(res->pwr_reset))
760 		return PTR_ERR(res->pwr_reset);
761 
762 	res->ahb_reset = devm_reset_control_get_exclusive(dev, "ahb");
763 	if (IS_ERR(res->ahb_reset))
764 		return PTR_ERR(res->ahb_reset);
765 
766 	if (is_ipq) {
767 		res->phy_ahb_reset = devm_reset_control_get_exclusive(dev, "phy_ahb");
768 		if (IS_ERR(res->phy_ahb_reset))
769 			return PTR_ERR(res->phy_ahb_reset);
770 	}
771 
772 	return 0;
773 }
774 
775 static void qcom_pcie_deinit_2_4_0(struct qcom_pcie *pcie)
776 {
777 	struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0;
778 
779 	reset_control_assert(res->axi_m_reset);
780 	reset_control_assert(res->axi_s_reset);
781 	reset_control_assert(res->pipe_reset);
782 	reset_control_assert(res->pipe_sticky_reset);
783 	reset_control_assert(res->phy_reset);
784 	reset_control_assert(res->phy_ahb_reset);
785 	reset_control_assert(res->axi_m_sticky_reset);
786 	reset_control_assert(res->pwr_reset);
787 	reset_control_assert(res->ahb_reset);
788 	clk_bulk_disable_unprepare(res->num_clks, res->clks);
789 }
790 
791 static int qcom_pcie_init_2_4_0(struct qcom_pcie *pcie)
792 {
793 	struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0;
794 	struct dw_pcie *pci = pcie->pci;
795 	struct device *dev = pci->dev;
796 	u32 val;
797 	int ret;
798 
799 	ret = reset_control_assert(res->axi_m_reset);
800 	if (ret) {
801 		dev_err(dev, "cannot assert axi master reset\n");
802 		return ret;
803 	}
804 
805 	ret = reset_control_assert(res->axi_s_reset);
806 	if (ret) {
807 		dev_err(dev, "cannot assert axi slave reset\n");
808 		return ret;
809 	}
810 
811 	usleep_range(10000, 12000);
812 
813 	ret = reset_control_assert(res->pipe_reset);
814 	if (ret) {
815 		dev_err(dev, "cannot assert pipe reset\n");
816 		return ret;
817 	}
818 
819 	ret = reset_control_assert(res->pipe_sticky_reset);
820 	if (ret) {
821 		dev_err(dev, "cannot assert pipe sticky reset\n");
822 		return ret;
823 	}
824 
825 	ret = reset_control_assert(res->phy_reset);
826 	if (ret) {
827 		dev_err(dev, "cannot assert phy reset\n");
828 		return ret;
829 	}
830 
831 	ret = reset_control_assert(res->phy_ahb_reset);
832 	if (ret) {
833 		dev_err(dev, "cannot assert phy ahb reset\n");
834 		return ret;
835 	}
836 
837 	usleep_range(10000, 12000);
838 
839 	ret = reset_control_assert(res->axi_m_sticky_reset);
840 	if (ret) {
841 		dev_err(dev, "cannot assert axi master sticky reset\n");
842 		return ret;
843 	}
844 
845 	ret = reset_control_assert(res->pwr_reset);
846 	if (ret) {
847 		dev_err(dev, "cannot assert power reset\n");
848 		return ret;
849 	}
850 
851 	ret = reset_control_assert(res->ahb_reset);
852 	if (ret) {
853 		dev_err(dev, "cannot assert ahb reset\n");
854 		return ret;
855 	}
856 
857 	usleep_range(10000, 12000);
858 
859 	ret = reset_control_deassert(res->phy_ahb_reset);
860 	if (ret) {
861 		dev_err(dev, "cannot deassert phy ahb reset\n");
862 		return ret;
863 	}
864 
865 	ret = reset_control_deassert(res->phy_reset);
866 	if (ret) {
867 		dev_err(dev, "cannot deassert phy reset\n");
868 		goto err_rst_phy;
869 	}
870 
871 	ret = reset_control_deassert(res->pipe_reset);
872 	if (ret) {
873 		dev_err(dev, "cannot deassert pipe reset\n");
874 		goto err_rst_pipe;
875 	}
876 
877 	ret = reset_control_deassert(res->pipe_sticky_reset);
878 	if (ret) {
879 		dev_err(dev, "cannot deassert pipe sticky reset\n");
880 		goto err_rst_pipe_sticky;
881 	}
882 
883 	usleep_range(10000, 12000);
884 
885 	ret = reset_control_deassert(res->axi_m_reset);
886 	if (ret) {
887 		dev_err(dev, "cannot deassert axi master reset\n");
888 		goto err_rst_axi_m;
889 	}
890 
891 	ret = reset_control_deassert(res->axi_m_sticky_reset);
892 	if (ret) {
893 		dev_err(dev, "cannot deassert axi master sticky reset\n");
894 		goto err_rst_axi_m_sticky;
895 	}
896 
897 	ret = reset_control_deassert(res->axi_s_reset);
898 	if (ret) {
899 		dev_err(dev, "cannot deassert axi slave reset\n");
900 		goto err_rst_axi_s;
901 	}
902 
903 	ret = reset_control_deassert(res->pwr_reset);
904 	if (ret) {
905 		dev_err(dev, "cannot deassert power reset\n");
906 		goto err_rst_pwr;
907 	}
908 
909 	ret = reset_control_deassert(res->ahb_reset);
910 	if (ret) {
911 		dev_err(dev, "cannot deassert ahb reset\n");
912 		goto err_rst_ahb;
913 	}
914 
915 	usleep_range(10000, 12000);
916 
917 	ret = clk_bulk_prepare_enable(res->num_clks, res->clks);
918 	if (ret)
919 		goto err_clks;
920 
921 	/* enable PCIe clocks and resets */
922 	val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
923 	val &= ~BIT(0);
924 	writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
925 
926 	/* change DBI base address */
927 	writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
928 
929 	/* MAC PHY_POWERDOWN MUX DISABLE  */
930 	val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL);
931 	val &= ~BIT(29);
932 	writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL);
933 
934 	val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
935 	val |= BIT(4);
936 	writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
937 
938 	val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
939 	val |= BIT(31);
940 	writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
941 
942 	return 0;
943 
944 err_clks:
945 	reset_control_assert(res->ahb_reset);
946 err_rst_ahb:
947 	reset_control_assert(res->pwr_reset);
948 err_rst_pwr:
949 	reset_control_assert(res->axi_s_reset);
950 err_rst_axi_s:
951 	reset_control_assert(res->axi_m_sticky_reset);
952 err_rst_axi_m_sticky:
953 	reset_control_assert(res->axi_m_reset);
954 err_rst_axi_m:
955 	reset_control_assert(res->pipe_sticky_reset);
956 err_rst_pipe_sticky:
957 	reset_control_assert(res->pipe_reset);
958 err_rst_pipe:
959 	reset_control_assert(res->phy_reset);
960 err_rst_phy:
961 	reset_control_assert(res->phy_ahb_reset);
962 	return ret;
963 }
964 
965 static int qcom_pcie_get_resources_2_3_3(struct qcom_pcie *pcie)
966 {
967 	struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
968 	struct dw_pcie *pci = pcie->pci;
969 	struct device *dev = pci->dev;
970 	int i;
971 	const char *rst_names[] = { "axi_m", "axi_s", "pipe",
972 				    "axi_m_sticky", "sticky",
973 				    "ahb", "sleep", };
974 
975 	res->iface = devm_clk_get(dev, "iface");
976 	if (IS_ERR(res->iface))
977 		return PTR_ERR(res->iface);
978 
979 	res->axi_m_clk = devm_clk_get(dev, "axi_m");
980 	if (IS_ERR(res->axi_m_clk))
981 		return PTR_ERR(res->axi_m_clk);
982 
983 	res->axi_s_clk = devm_clk_get(dev, "axi_s");
984 	if (IS_ERR(res->axi_s_clk))
985 		return PTR_ERR(res->axi_s_clk);
986 
987 	res->ahb_clk = devm_clk_get(dev, "ahb");
988 	if (IS_ERR(res->ahb_clk))
989 		return PTR_ERR(res->ahb_clk);
990 
991 	res->aux_clk = devm_clk_get(dev, "aux");
992 	if (IS_ERR(res->aux_clk))
993 		return PTR_ERR(res->aux_clk);
994 
995 	for (i = 0; i < ARRAY_SIZE(rst_names); i++) {
996 		res->rst[i] = devm_reset_control_get(dev, rst_names[i]);
997 		if (IS_ERR(res->rst[i]))
998 			return PTR_ERR(res->rst[i]);
999 	}
1000 
1001 	return 0;
1002 }
1003 
1004 static void qcom_pcie_deinit_2_3_3(struct qcom_pcie *pcie)
1005 {
1006 	struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
1007 
1008 	clk_disable_unprepare(res->iface);
1009 	clk_disable_unprepare(res->axi_m_clk);
1010 	clk_disable_unprepare(res->axi_s_clk);
1011 	clk_disable_unprepare(res->ahb_clk);
1012 	clk_disable_unprepare(res->aux_clk);
1013 }
1014 
1015 static int qcom_pcie_init_2_3_3(struct qcom_pcie *pcie)
1016 {
1017 	struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
1018 	struct dw_pcie *pci = pcie->pci;
1019 	struct device *dev = pci->dev;
1020 	int i, ret;
1021 	u32 val;
1022 
1023 	for (i = 0; i < ARRAY_SIZE(res->rst); i++) {
1024 		ret = reset_control_assert(res->rst[i]);
1025 		if (ret) {
1026 			dev_err(dev, "reset #%d assert failed (%d)\n", i, ret);
1027 			return ret;
1028 		}
1029 	}
1030 
1031 	usleep_range(2000, 2500);
1032 
1033 	for (i = 0; i < ARRAY_SIZE(res->rst); i++) {
1034 		ret = reset_control_deassert(res->rst[i]);
1035 		if (ret) {
1036 			dev_err(dev, "reset #%d deassert failed (%d)\n", i,
1037 				ret);
1038 			return ret;
1039 		}
1040 	}
1041 
1042 	/*
1043 	 * Don't have a way to see if the reset has completed.
1044 	 * Wait for some time.
1045 	 */
1046 	usleep_range(2000, 2500);
1047 
1048 	ret = clk_prepare_enable(res->iface);
1049 	if (ret) {
1050 		dev_err(dev, "cannot prepare/enable core clock\n");
1051 		goto err_clk_iface;
1052 	}
1053 
1054 	ret = clk_prepare_enable(res->axi_m_clk);
1055 	if (ret) {
1056 		dev_err(dev, "cannot prepare/enable core clock\n");
1057 		goto err_clk_axi_m;
1058 	}
1059 
1060 	ret = clk_prepare_enable(res->axi_s_clk);
1061 	if (ret) {
1062 		dev_err(dev, "cannot prepare/enable axi slave clock\n");
1063 		goto err_clk_axi_s;
1064 	}
1065 
1066 	ret = clk_prepare_enable(res->ahb_clk);
1067 	if (ret) {
1068 		dev_err(dev, "cannot prepare/enable ahb clock\n");
1069 		goto err_clk_ahb;
1070 	}
1071 
1072 	ret = clk_prepare_enable(res->aux_clk);
1073 	if (ret) {
1074 		dev_err(dev, "cannot prepare/enable aux clock\n");
1075 		goto err_clk_aux;
1076 	}
1077 
1078 	writel(SLV_ADDR_SPACE_SZ,
1079 		pcie->parf + PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE);
1080 
1081 	val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
1082 	val &= ~BIT(0);
1083 	writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
1084 
1085 	writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
1086 
1087 	writel(MST_WAKEUP_EN | SLV_WAKEUP_EN | MSTR_ACLK_CGC_DIS
1088 		| SLV_ACLK_CGC_DIS | CORE_CLK_CGC_DIS |
1089 		AUX_PWR_DET | L23_CLK_RMV_DIS | L1_CLK_RMV_DIS,
1090 		pcie->parf + PCIE20_PARF_SYS_CTRL);
1091 	writel(0, pcie->parf + PCIE20_PARF_Q2A_FLUSH);
1092 
1093 	writel(PCI_COMMAND_MASTER, pci->dbi_base + PCI_COMMAND);
1094 	writel(DBI_RO_WR_EN, pci->dbi_base + PCIE20_MISC_CONTROL_1_REG);
1095 	writel(PCIE_CAP_LINK1_VAL, pci->dbi_base + PCIE20_CAP_LINK_1);
1096 
1097 	val = readl(pci->dbi_base + PCIE20_CAP_LINK_CAPABILITIES);
1098 	val &= ~PCI_EXP_LNKCAP_ASPMS;
1099 	writel(val, pci->dbi_base + PCIE20_CAP_LINK_CAPABILITIES);
1100 
1101 	writel(PCI_EXP_DEVCTL2_COMP_TMOUT_DIS, pci->dbi_base +
1102 		PCIE20_DEVICE_CONTROL2_STATUS2);
1103 
1104 	return 0;
1105 
1106 err_clk_aux:
1107 	clk_disable_unprepare(res->ahb_clk);
1108 err_clk_ahb:
1109 	clk_disable_unprepare(res->axi_s_clk);
1110 err_clk_axi_s:
1111 	clk_disable_unprepare(res->axi_m_clk);
1112 err_clk_axi_m:
1113 	clk_disable_unprepare(res->iface);
1114 err_clk_iface:
1115 	/*
1116 	 * Not checking for failure, will anyway return
1117 	 * the original failure in 'ret'.
1118 	 */
1119 	for (i = 0; i < ARRAY_SIZE(res->rst); i++)
1120 		reset_control_assert(res->rst[i]);
1121 
1122 	return ret;
1123 }
1124 
1125 static int qcom_pcie_get_resources_2_7_0(struct qcom_pcie *pcie)
1126 {
1127 	struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
1128 	struct dw_pcie *pci = pcie->pci;
1129 	struct device *dev = pci->dev;
1130 	int ret;
1131 
1132 	res->pci_reset = devm_reset_control_get_exclusive(dev, "pci");
1133 	if (IS_ERR(res->pci_reset))
1134 		return PTR_ERR(res->pci_reset);
1135 
1136 	res->supplies[0].supply = "vdda";
1137 	res->supplies[1].supply = "vddpe-3v3";
1138 	ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies),
1139 				      res->supplies);
1140 	if (ret)
1141 		return ret;
1142 
1143 	res->clks[0].id = "aux";
1144 	res->clks[1].id = "cfg";
1145 	res->clks[2].id = "bus_master";
1146 	res->clks[3].id = "bus_slave";
1147 	res->clks[4].id = "slave_q2a";
1148 	res->clks[5].id = "tbu";
1149 
1150 	ret = devm_clk_bulk_get(dev, ARRAY_SIZE(res->clks), res->clks);
1151 	if (ret < 0)
1152 		return ret;
1153 
1154 	res->pipe_clk = devm_clk_get(dev, "pipe");
1155 	return PTR_ERR_OR_ZERO(res->pipe_clk);
1156 }
1157 
1158 static int qcom_pcie_init_2_7_0(struct qcom_pcie *pcie)
1159 {
1160 	struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
1161 	struct dw_pcie *pci = pcie->pci;
1162 	struct device *dev = pci->dev;
1163 	u32 val;
1164 	int ret;
1165 
1166 	ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies);
1167 	if (ret < 0) {
1168 		dev_err(dev, "cannot enable regulators\n");
1169 		return ret;
1170 	}
1171 
1172 	ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks);
1173 	if (ret < 0)
1174 		goto err_disable_regulators;
1175 
1176 	ret = reset_control_assert(res->pci_reset);
1177 	if (ret < 0) {
1178 		dev_err(dev, "cannot deassert pci reset\n");
1179 		goto err_disable_clocks;
1180 	}
1181 
1182 	usleep_range(1000, 1500);
1183 
1184 	ret = reset_control_deassert(res->pci_reset);
1185 	if (ret < 0) {
1186 		dev_err(dev, "cannot deassert pci reset\n");
1187 		goto err_disable_clocks;
1188 	}
1189 
1190 	ret = clk_prepare_enable(res->pipe_clk);
1191 	if (ret) {
1192 		dev_err(dev, "cannot prepare/enable pipe clock\n");
1193 		goto err_disable_clocks;
1194 	}
1195 
1196 	/* configure PCIe to RC mode */
1197 	writel(DEVICE_TYPE_RC, pcie->parf + PCIE20_PARF_DEVICE_TYPE);
1198 
1199 	/* enable PCIe clocks and resets */
1200 	val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
1201 	val &= ~BIT(0);
1202 	writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
1203 
1204 	/* change DBI base address */
1205 	writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
1206 
1207 	/* MAC PHY_POWERDOWN MUX DISABLE  */
1208 	val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL);
1209 	val &= ~BIT(29);
1210 	writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL);
1211 
1212 	val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
1213 	val |= BIT(4);
1214 	writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
1215 
1216 	if (IS_ENABLED(CONFIG_PCI_MSI)) {
1217 		val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
1218 		val |= BIT(31);
1219 		writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
1220 	}
1221 
1222 	return 0;
1223 err_disable_clocks:
1224 	clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);
1225 err_disable_regulators:
1226 	regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
1227 
1228 	return ret;
1229 }
1230 
1231 static void qcom_pcie_deinit_2_7_0(struct qcom_pcie *pcie)
1232 {
1233 	struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
1234 
1235 	clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);
1236 	regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
1237 }
1238 
1239 static int qcom_pcie_post_init_2_7_0(struct qcom_pcie *pcie)
1240 {
1241 	struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
1242 
1243 	return clk_prepare_enable(res->pipe_clk);
1244 }
1245 
1246 static void qcom_pcie_post_deinit_2_7_0(struct qcom_pcie *pcie)
1247 {
1248 	struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
1249 
1250 	clk_disable_unprepare(res->pipe_clk);
1251 }
1252 
1253 static int qcom_pcie_link_up(struct dw_pcie *pci)
1254 {
1255 	u16 val = readw(pci->dbi_base + PCIE20_CAP + PCI_EXP_LNKSTA);
1256 
1257 	return !!(val & PCI_EXP_LNKSTA_DLLLA);
1258 }
1259 
1260 static int qcom_pcie_host_init(struct pcie_port *pp)
1261 {
1262 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
1263 	struct qcom_pcie *pcie = to_qcom_pcie(pci);
1264 	int ret;
1265 
1266 	qcom_ep_reset_assert(pcie);
1267 
1268 	ret = pcie->ops->init(pcie);
1269 	if (ret)
1270 		return ret;
1271 
1272 	ret = phy_power_on(pcie->phy);
1273 	if (ret)
1274 		goto err_deinit;
1275 
1276 	if (pcie->ops->post_init) {
1277 		ret = pcie->ops->post_init(pcie);
1278 		if (ret)
1279 			goto err_disable_phy;
1280 	}
1281 
1282 	dw_pcie_setup_rc(pp);
1283 
1284 	if (IS_ENABLED(CONFIG_PCI_MSI))
1285 		dw_pcie_msi_init(pp);
1286 
1287 	qcom_ep_reset_deassert(pcie);
1288 
1289 	ret = qcom_pcie_establish_link(pcie);
1290 	if (ret)
1291 		goto err;
1292 
1293 	return 0;
1294 err:
1295 	qcom_ep_reset_assert(pcie);
1296 	if (pcie->ops->post_deinit)
1297 		pcie->ops->post_deinit(pcie);
1298 err_disable_phy:
1299 	phy_power_off(pcie->phy);
1300 err_deinit:
1301 	pcie->ops->deinit(pcie);
1302 
1303 	return ret;
1304 }
1305 
1306 static const struct dw_pcie_host_ops qcom_pcie_dw_ops = {
1307 	.host_init = qcom_pcie_host_init,
1308 };
1309 
1310 /* Qcom IP rev.: 2.1.0	Synopsys IP rev.: 4.01a */
1311 static const struct qcom_pcie_ops ops_2_1_0 = {
1312 	.get_resources = qcom_pcie_get_resources_2_1_0,
1313 	.init = qcom_pcie_init_2_1_0,
1314 	.deinit = qcom_pcie_deinit_2_1_0,
1315 	.ltssm_enable = qcom_pcie_2_1_0_ltssm_enable,
1316 };
1317 
1318 /* Qcom IP rev.: 1.0.0	Synopsys IP rev.: 4.11a */
1319 static const struct qcom_pcie_ops ops_1_0_0 = {
1320 	.get_resources = qcom_pcie_get_resources_1_0_0,
1321 	.init = qcom_pcie_init_1_0_0,
1322 	.deinit = qcom_pcie_deinit_1_0_0,
1323 	.ltssm_enable = qcom_pcie_2_1_0_ltssm_enable,
1324 };
1325 
1326 /* Qcom IP rev.: 2.3.2	Synopsys IP rev.: 4.21a */
1327 static const struct qcom_pcie_ops ops_2_3_2 = {
1328 	.get_resources = qcom_pcie_get_resources_2_3_2,
1329 	.init = qcom_pcie_init_2_3_2,
1330 	.post_init = qcom_pcie_post_init_2_3_2,
1331 	.deinit = qcom_pcie_deinit_2_3_2,
1332 	.post_deinit = qcom_pcie_post_deinit_2_3_2,
1333 	.ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
1334 };
1335 
1336 /* Qcom IP rev.: 2.4.0	Synopsys IP rev.: 4.20a */
1337 static const struct qcom_pcie_ops ops_2_4_0 = {
1338 	.get_resources = qcom_pcie_get_resources_2_4_0,
1339 	.init = qcom_pcie_init_2_4_0,
1340 	.deinit = qcom_pcie_deinit_2_4_0,
1341 	.ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
1342 };
1343 
1344 /* Qcom IP rev.: 2.3.3	Synopsys IP rev.: 4.30a */
1345 static const struct qcom_pcie_ops ops_2_3_3 = {
1346 	.get_resources = qcom_pcie_get_resources_2_3_3,
1347 	.init = qcom_pcie_init_2_3_3,
1348 	.deinit = qcom_pcie_deinit_2_3_3,
1349 	.ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
1350 };
1351 
1352 /* Qcom IP rev.: 2.7.0	Synopsys IP rev.: 4.30a */
1353 static const struct qcom_pcie_ops ops_2_7_0 = {
1354 	.get_resources = qcom_pcie_get_resources_2_7_0,
1355 	.init = qcom_pcie_init_2_7_0,
1356 	.deinit = qcom_pcie_deinit_2_7_0,
1357 	.ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
1358 	.post_init = qcom_pcie_post_init_2_7_0,
1359 	.post_deinit = qcom_pcie_post_deinit_2_7_0,
1360 };
1361 
1362 static const struct dw_pcie_ops dw_pcie_ops = {
1363 	.link_up = qcom_pcie_link_up,
1364 };
1365 
1366 static int qcom_pcie_probe(struct platform_device *pdev)
1367 {
1368 	struct device *dev = &pdev->dev;
1369 	struct resource *res;
1370 	struct pcie_port *pp;
1371 	struct dw_pcie *pci;
1372 	struct qcom_pcie *pcie;
1373 	int ret;
1374 
1375 	pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
1376 	if (!pcie)
1377 		return -ENOMEM;
1378 
1379 	pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
1380 	if (!pci)
1381 		return -ENOMEM;
1382 
1383 	pm_runtime_enable(dev);
1384 	ret = pm_runtime_get_sync(dev);
1385 	if (ret < 0)
1386 		goto err_pm_runtime_put;
1387 
1388 	pci->dev = dev;
1389 	pci->ops = &dw_pcie_ops;
1390 	pp = &pci->pp;
1391 
1392 	pcie->pci = pci;
1393 
1394 	pcie->ops = of_device_get_match_data(dev);
1395 
1396 	pcie->reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_HIGH);
1397 	if (IS_ERR(pcie->reset)) {
1398 		ret = PTR_ERR(pcie->reset);
1399 		goto err_pm_runtime_put;
1400 	}
1401 
1402 	pcie->gen = of_pci_get_max_link_speed(pdev->dev.of_node);
1403 	if (pcie->gen < 0)
1404 		pcie->gen = 2;
1405 
1406 	pcie->parf = devm_platform_ioremap_resource_byname(pdev, "parf");
1407 	if (IS_ERR(pcie->parf)) {
1408 		ret = PTR_ERR(pcie->parf);
1409 		goto err_pm_runtime_put;
1410 	}
1411 
1412 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
1413 	pci->dbi_base = devm_pci_remap_cfg_resource(dev, res);
1414 	if (IS_ERR(pci->dbi_base)) {
1415 		ret = PTR_ERR(pci->dbi_base);
1416 		goto err_pm_runtime_put;
1417 	}
1418 
1419 	pcie->elbi = devm_platform_ioremap_resource_byname(pdev, "elbi");
1420 	if (IS_ERR(pcie->elbi)) {
1421 		ret = PTR_ERR(pcie->elbi);
1422 		goto err_pm_runtime_put;
1423 	}
1424 
1425 	pcie->phy = devm_phy_optional_get(dev, "pciephy");
1426 	if (IS_ERR(pcie->phy)) {
1427 		ret = PTR_ERR(pcie->phy);
1428 		goto err_pm_runtime_put;
1429 	}
1430 
1431 	ret = pcie->ops->get_resources(pcie);
1432 	if (ret)
1433 		goto err_pm_runtime_put;
1434 
1435 	pp->ops = &qcom_pcie_dw_ops;
1436 
1437 	if (IS_ENABLED(CONFIG_PCI_MSI)) {
1438 		pp->msi_irq = platform_get_irq_byname(pdev, "msi");
1439 		if (pp->msi_irq < 0) {
1440 			ret = pp->msi_irq;
1441 			goto err_pm_runtime_put;
1442 		}
1443 	}
1444 
1445 	ret = phy_init(pcie->phy);
1446 	if (ret) {
1447 		pm_runtime_disable(&pdev->dev);
1448 		goto err_pm_runtime_put;
1449 	}
1450 
1451 	platform_set_drvdata(pdev, pcie);
1452 
1453 	ret = dw_pcie_host_init(pp);
1454 	if (ret) {
1455 		dev_err(dev, "cannot initialize host\n");
1456 		pm_runtime_disable(&pdev->dev);
1457 		goto err_pm_runtime_put;
1458 	}
1459 
1460 	return 0;
1461 
1462 err_pm_runtime_put:
1463 	pm_runtime_put(dev);
1464 	pm_runtime_disable(dev);
1465 
1466 	return ret;
1467 }
1468 
1469 static const struct of_device_id qcom_pcie_match[] = {
1470 	{ .compatible = "qcom,pcie-apq8084", .data = &ops_1_0_0 },
1471 	{ .compatible = "qcom,pcie-ipq8064", .data = &ops_2_1_0 },
1472 	{ .compatible = "qcom,pcie-ipq8064-v2", .data = &ops_2_1_0 },
1473 	{ .compatible = "qcom,pcie-apq8064", .data = &ops_2_1_0 },
1474 	{ .compatible = "qcom,pcie-msm8996", .data = &ops_2_3_2 },
1475 	{ .compatible = "qcom,pcie-ipq8074", .data = &ops_2_3_3 },
1476 	{ .compatible = "qcom,pcie-ipq4019", .data = &ops_2_4_0 },
1477 	{ .compatible = "qcom,pcie-qcs404", .data = &ops_2_4_0 },
1478 	{ .compatible = "qcom,pcie-sdm845", .data = &ops_2_7_0 },
1479 	{ }
1480 };
1481 
1482 static void qcom_fixup_class(struct pci_dev *dev)
1483 {
1484 	dev->class = PCI_CLASS_BRIDGE_PCI << 8;
1485 }
1486 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0101, qcom_fixup_class);
1487 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0104, qcom_fixup_class);
1488 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0106, qcom_fixup_class);
1489 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0107, qcom_fixup_class);
1490 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0302, qcom_fixup_class);
1491 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x1000, qcom_fixup_class);
1492 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x1001, qcom_fixup_class);
1493 
1494 static struct platform_driver qcom_pcie_driver = {
1495 	.probe = qcom_pcie_probe,
1496 	.driver = {
1497 		.name = "qcom-pcie",
1498 		.suppress_bind_attrs = true,
1499 		.of_match_table = qcom_pcie_match,
1500 	},
1501 };
1502 builtin_platform_driver(qcom_pcie_driver);
1503