1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Qualcomm PCIe root complex driver
4  *
5  * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
6  * Copyright 2015 Linaro Limited.
7  *
8  * Author: Stanimir Varbanov <svarbanov@mm-sol.com>
9  */
10 
11 #include <linux/clk.h>
12 #include <linux/delay.h>
13 #include <linux/gpio/consumer.h>
14 #include <linux/interrupt.h>
15 #include <linux/io.h>
16 #include <linux/iopoll.h>
17 #include <linux/kernel.h>
18 #include <linux/init.h>
19 #include <linux/of_device.h>
20 #include <linux/of_gpio.h>
21 #include <linux/pci.h>
22 #include <linux/pm_runtime.h>
23 #include <linux/platform_device.h>
24 #include <linux/phy/phy.h>
25 #include <linux/regulator/consumer.h>
26 #include <linux/reset.h>
27 #include <linux/slab.h>
28 #include <linux/types.h>
29 
30 #include "pcie-designware.h"
31 
32 #define PCIE20_PARF_SYS_CTRL			0x00
33 #define MST_WAKEUP_EN				BIT(13)
34 #define SLV_WAKEUP_EN				BIT(12)
35 #define MSTR_ACLK_CGC_DIS			BIT(10)
36 #define SLV_ACLK_CGC_DIS			BIT(9)
37 #define CORE_CLK_CGC_DIS			BIT(6)
38 #define AUX_PWR_DET				BIT(4)
39 #define L23_CLK_RMV_DIS				BIT(2)
40 #define L1_CLK_RMV_DIS				BIT(1)
41 
42 #define PCIE20_COMMAND_STATUS			0x04
43 #define CMD_BME_VAL				0x4
44 #define PCIE20_DEVICE_CONTROL2_STATUS2		0x98
45 #define PCIE_CAP_CPL_TIMEOUT_DISABLE		0x10
46 
47 #define PCIE20_PARF_PHY_CTRL			0x40
48 #define PCIE20_PARF_PHY_REFCLK			0x4C
49 #define PCIE20_PARF_DBI_BASE_ADDR		0x168
50 #define PCIE20_PARF_SLV_ADDR_SPACE_SIZE		0x16C
51 #define PCIE20_PARF_MHI_CLOCK_RESET_CTRL	0x174
52 #define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT	0x178
53 #define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2	0x1A8
54 #define PCIE20_PARF_LTSSM			0x1B0
55 #define PCIE20_PARF_SID_OFFSET			0x234
56 #define PCIE20_PARF_BDF_TRANSLATE_CFG		0x24C
57 
58 #define PCIE20_ELBI_SYS_CTRL			0x04
59 #define PCIE20_ELBI_SYS_CTRL_LT_ENABLE		BIT(0)
60 
61 #define PCIE20_AXI_MSTR_RESP_COMP_CTRL0		0x818
62 #define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K	0x4
63 #define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_4K	0x5
64 #define PCIE20_AXI_MSTR_RESP_COMP_CTRL1		0x81c
65 #define CFG_BRIDGE_SB_INIT			BIT(0)
66 
67 #define PCIE20_CAP				0x70
68 #define PCIE20_CAP_LINK_CAPABILITIES		(PCIE20_CAP + 0xC)
69 #define PCIE20_CAP_ACTIVE_STATE_LINK_PM_SUPPORT	(BIT(10) | BIT(11))
70 #define PCIE20_CAP_LINK_1			(PCIE20_CAP + 0x14)
71 #define PCIE_CAP_LINK1_VAL			0x2FD7F
72 
73 #define PCIE20_PARF_Q2A_FLUSH			0x1AC
74 
75 #define PCIE20_MISC_CONTROL_1_REG		0x8BC
76 #define DBI_RO_WR_EN				1
77 
78 #define PERST_DELAY_US				1000
79 
80 #define PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE	0x358
81 #define SLV_ADDR_SPACE_SZ			0x10000000
82 
83 #define QCOM_PCIE_2_1_0_MAX_SUPPLY	3
84 struct qcom_pcie_resources_2_1_0 {
85 	struct clk *iface_clk;
86 	struct clk *core_clk;
87 	struct clk *phy_clk;
88 	struct reset_control *pci_reset;
89 	struct reset_control *axi_reset;
90 	struct reset_control *ahb_reset;
91 	struct reset_control *por_reset;
92 	struct reset_control *phy_reset;
93 	struct regulator_bulk_data supplies[QCOM_PCIE_2_1_0_MAX_SUPPLY];
94 };
95 
96 struct qcom_pcie_resources_1_0_0 {
97 	struct clk *iface;
98 	struct clk *aux;
99 	struct clk *master_bus;
100 	struct clk *slave_bus;
101 	struct reset_control *core;
102 	struct regulator *vdda;
103 };
104 
105 #define QCOM_PCIE_2_3_2_MAX_SUPPLY	2
106 struct qcom_pcie_resources_2_3_2 {
107 	struct clk *aux_clk;
108 	struct clk *master_clk;
109 	struct clk *slave_clk;
110 	struct clk *cfg_clk;
111 	struct clk *pipe_clk;
112 	struct regulator_bulk_data supplies[QCOM_PCIE_2_3_2_MAX_SUPPLY];
113 };
114 
115 struct qcom_pcie_resources_2_4_0 {
116 	struct clk *aux_clk;
117 	struct clk *master_clk;
118 	struct clk *slave_clk;
119 	struct reset_control *axi_m_reset;
120 	struct reset_control *axi_s_reset;
121 	struct reset_control *pipe_reset;
122 	struct reset_control *axi_m_vmid_reset;
123 	struct reset_control *axi_s_xpu_reset;
124 	struct reset_control *parf_reset;
125 	struct reset_control *phy_reset;
126 	struct reset_control *axi_m_sticky_reset;
127 	struct reset_control *pipe_sticky_reset;
128 	struct reset_control *pwr_reset;
129 	struct reset_control *ahb_reset;
130 	struct reset_control *phy_ahb_reset;
131 };
132 
133 struct qcom_pcie_resources_2_3_3 {
134 	struct clk *iface;
135 	struct clk *axi_m_clk;
136 	struct clk *axi_s_clk;
137 	struct clk *ahb_clk;
138 	struct clk *aux_clk;
139 	struct reset_control *rst[7];
140 };
141 
142 union qcom_pcie_resources {
143 	struct qcom_pcie_resources_1_0_0 v1_0_0;
144 	struct qcom_pcie_resources_2_1_0 v2_1_0;
145 	struct qcom_pcie_resources_2_3_2 v2_3_2;
146 	struct qcom_pcie_resources_2_3_3 v2_3_3;
147 	struct qcom_pcie_resources_2_4_0 v2_4_0;
148 };
149 
150 struct qcom_pcie;
151 
152 struct qcom_pcie_ops {
153 	int (*get_resources)(struct qcom_pcie *pcie);
154 	int (*init)(struct qcom_pcie *pcie);
155 	int (*post_init)(struct qcom_pcie *pcie);
156 	void (*deinit)(struct qcom_pcie *pcie);
157 	void (*post_deinit)(struct qcom_pcie *pcie);
158 	void (*ltssm_enable)(struct qcom_pcie *pcie);
159 };
160 
161 struct qcom_pcie {
162 	struct dw_pcie *pci;
163 	void __iomem *parf;			/* DT parf */
164 	void __iomem *elbi;			/* DT elbi */
165 	union qcom_pcie_resources res;
166 	struct phy *phy;
167 	struct gpio_desc *reset;
168 	const struct qcom_pcie_ops *ops;
169 };
170 
171 #define to_qcom_pcie(x)		dev_get_drvdata((x)->dev)
172 
173 static void qcom_ep_reset_assert(struct qcom_pcie *pcie)
174 {
175 	gpiod_set_value_cansleep(pcie->reset, 1);
176 	usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
177 }
178 
179 static void qcom_ep_reset_deassert(struct qcom_pcie *pcie)
180 {
181 	gpiod_set_value_cansleep(pcie->reset, 0);
182 	usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
183 }
184 
185 static int qcom_pcie_establish_link(struct qcom_pcie *pcie)
186 {
187 	struct dw_pcie *pci = pcie->pci;
188 
189 	if (dw_pcie_link_up(pci))
190 		return 0;
191 
192 	/* Enable Link Training state machine */
193 	if (pcie->ops->ltssm_enable)
194 		pcie->ops->ltssm_enable(pcie);
195 
196 	return dw_pcie_wait_for_link(pci);
197 }
198 
199 static void qcom_pcie_2_1_0_ltssm_enable(struct qcom_pcie *pcie)
200 {
201 	u32 val;
202 
203 	/* enable link training */
204 	val = readl(pcie->elbi + PCIE20_ELBI_SYS_CTRL);
205 	val |= PCIE20_ELBI_SYS_CTRL_LT_ENABLE;
206 	writel(val, pcie->elbi + PCIE20_ELBI_SYS_CTRL);
207 }
208 
209 static int qcom_pcie_get_resources_2_1_0(struct qcom_pcie *pcie)
210 {
211 	struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
212 	struct dw_pcie *pci = pcie->pci;
213 	struct device *dev = pci->dev;
214 	int ret;
215 
216 	res->supplies[0].supply = "vdda";
217 	res->supplies[1].supply = "vdda_phy";
218 	res->supplies[2].supply = "vdda_refclk";
219 	ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies),
220 				      res->supplies);
221 	if (ret)
222 		return ret;
223 
224 	res->iface_clk = devm_clk_get(dev, "iface");
225 	if (IS_ERR(res->iface_clk))
226 		return PTR_ERR(res->iface_clk);
227 
228 	res->core_clk = devm_clk_get(dev, "core");
229 	if (IS_ERR(res->core_clk))
230 		return PTR_ERR(res->core_clk);
231 
232 	res->phy_clk = devm_clk_get(dev, "phy");
233 	if (IS_ERR(res->phy_clk))
234 		return PTR_ERR(res->phy_clk);
235 
236 	res->pci_reset = devm_reset_control_get_exclusive(dev, "pci");
237 	if (IS_ERR(res->pci_reset))
238 		return PTR_ERR(res->pci_reset);
239 
240 	res->axi_reset = devm_reset_control_get_exclusive(dev, "axi");
241 	if (IS_ERR(res->axi_reset))
242 		return PTR_ERR(res->axi_reset);
243 
244 	res->ahb_reset = devm_reset_control_get_exclusive(dev, "ahb");
245 	if (IS_ERR(res->ahb_reset))
246 		return PTR_ERR(res->ahb_reset);
247 
248 	res->por_reset = devm_reset_control_get_exclusive(dev, "por");
249 	if (IS_ERR(res->por_reset))
250 		return PTR_ERR(res->por_reset);
251 
252 	res->phy_reset = devm_reset_control_get_exclusive(dev, "phy");
253 	return PTR_ERR_OR_ZERO(res->phy_reset);
254 }
255 
256 static void qcom_pcie_deinit_2_1_0(struct qcom_pcie *pcie)
257 {
258 	struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
259 
260 	reset_control_assert(res->pci_reset);
261 	reset_control_assert(res->axi_reset);
262 	reset_control_assert(res->ahb_reset);
263 	reset_control_assert(res->por_reset);
264 	reset_control_assert(res->pci_reset);
265 	clk_disable_unprepare(res->iface_clk);
266 	clk_disable_unprepare(res->core_clk);
267 	clk_disable_unprepare(res->phy_clk);
268 	regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
269 }
270 
271 static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie)
272 {
273 	struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
274 	struct dw_pcie *pci = pcie->pci;
275 	struct device *dev = pci->dev;
276 	u32 val;
277 	int ret;
278 
279 	ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies);
280 	if (ret < 0) {
281 		dev_err(dev, "cannot enable regulators\n");
282 		return ret;
283 	}
284 
285 	ret = reset_control_assert(res->ahb_reset);
286 	if (ret) {
287 		dev_err(dev, "cannot assert ahb reset\n");
288 		goto err_assert_ahb;
289 	}
290 
291 	ret = clk_prepare_enable(res->iface_clk);
292 	if (ret) {
293 		dev_err(dev, "cannot prepare/enable iface clock\n");
294 		goto err_assert_ahb;
295 	}
296 
297 	ret = clk_prepare_enable(res->phy_clk);
298 	if (ret) {
299 		dev_err(dev, "cannot prepare/enable phy clock\n");
300 		goto err_clk_phy;
301 	}
302 
303 	ret = clk_prepare_enable(res->core_clk);
304 	if (ret) {
305 		dev_err(dev, "cannot prepare/enable core clock\n");
306 		goto err_clk_core;
307 	}
308 
309 	ret = reset_control_deassert(res->ahb_reset);
310 	if (ret) {
311 		dev_err(dev, "cannot deassert ahb reset\n");
312 		goto err_deassert_ahb;
313 	}
314 
315 	/* enable PCIe clocks and resets */
316 	val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
317 	val &= ~BIT(0);
318 	writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
319 
320 	/* enable external reference clock */
321 	val = readl(pcie->parf + PCIE20_PARF_PHY_REFCLK);
322 	val |= BIT(16);
323 	writel(val, pcie->parf + PCIE20_PARF_PHY_REFCLK);
324 
325 	ret = reset_control_deassert(res->phy_reset);
326 	if (ret) {
327 		dev_err(dev, "cannot deassert phy reset\n");
328 		return ret;
329 	}
330 
331 	ret = reset_control_deassert(res->pci_reset);
332 	if (ret) {
333 		dev_err(dev, "cannot deassert pci reset\n");
334 		return ret;
335 	}
336 
337 	ret = reset_control_deassert(res->por_reset);
338 	if (ret) {
339 		dev_err(dev, "cannot deassert por reset\n");
340 		return ret;
341 	}
342 
343 	ret = reset_control_deassert(res->axi_reset);
344 	if (ret) {
345 		dev_err(dev, "cannot deassert axi reset\n");
346 		return ret;
347 	}
348 
349 	/* wait for clock acquisition */
350 	usleep_range(1000, 1500);
351 
352 
353 	/* Set the Max TLP size to 2K, instead of using default of 4K */
354 	writel(CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K,
355 	       pci->dbi_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL0);
356 	writel(CFG_BRIDGE_SB_INIT,
357 	       pci->dbi_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL1);
358 
359 	return 0;
360 
361 err_deassert_ahb:
362 	clk_disable_unprepare(res->core_clk);
363 err_clk_core:
364 	clk_disable_unprepare(res->phy_clk);
365 err_clk_phy:
366 	clk_disable_unprepare(res->iface_clk);
367 err_assert_ahb:
368 	regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
369 
370 	return ret;
371 }
372 
373 static int qcom_pcie_get_resources_1_0_0(struct qcom_pcie *pcie)
374 {
375 	struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;
376 	struct dw_pcie *pci = pcie->pci;
377 	struct device *dev = pci->dev;
378 
379 	res->vdda = devm_regulator_get(dev, "vdda");
380 	if (IS_ERR(res->vdda))
381 		return PTR_ERR(res->vdda);
382 
383 	res->iface = devm_clk_get(dev, "iface");
384 	if (IS_ERR(res->iface))
385 		return PTR_ERR(res->iface);
386 
387 	res->aux = devm_clk_get(dev, "aux");
388 	if (IS_ERR(res->aux))
389 		return PTR_ERR(res->aux);
390 
391 	res->master_bus = devm_clk_get(dev, "master_bus");
392 	if (IS_ERR(res->master_bus))
393 		return PTR_ERR(res->master_bus);
394 
395 	res->slave_bus = devm_clk_get(dev, "slave_bus");
396 	if (IS_ERR(res->slave_bus))
397 		return PTR_ERR(res->slave_bus);
398 
399 	res->core = devm_reset_control_get_exclusive(dev, "core");
400 	return PTR_ERR_OR_ZERO(res->core);
401 }
402 
403 static void qcom_pcie_deinit_1_0_0(struct qcom_pcie *pcie)
404 {
405 	struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;
406 
407 	reset_control_assert(res->core);
408 	clk_disable_unprepare(res->slave_bus);
409 	clk_disable_unprepare(res->master_bus);
410 	clk_disable_unprepare(res->iface);
411 	clk_disable_unprepare(res->aux);
412 	regulator_disable(res->vdda);
413 }
414 
415 static int qcom_pcie_init_1_0_0(struct qcom_pcie *pcie)
416 {
417 	struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;
418 	struct dw_pcie *pci = pcie->pci;
419 	struct device *dev = pci->dev;
420 	int ret;
421 
422 	ret = reset_control_deassert(res->core);
423 	if (ret) {
424 		dev_err(dev, "cannot deassert core reset\n");
425 		return ret;
426 	}
427 
428 	ret = clk_prepare_enable(res->aux);
429 	if (ret) {
430 		dev_err(dev, "cannot prepare/enable aux clock\n");
431 		goto err_res;
432 	}
433 
434 	ret = clk_prepare_enable(res->iface);
435 	if (ret) {
436 		dev_err(dev, "cannot prepare/enable iface clock\n");
437 		goto err_aux;
438 	}
439 
440 	ret = clk_prepare_enable(res->master_bus);
441 	if (ret) {
442 		dev_err(dev, "cannot prepare/enable master_bus clock\n");
443 		goto err_iface;
444 	}
445 
446 	ret = clk_prepare_enable(res->slave_bus);
447 	if (ret) {
448 		dev_err(dev, "cannot prepare/enable slave_bus clock\n");
449 		goto err_master;
450 	}
451 
452 	ret = regulator_enable(res->vdda);
453 	if (ret) {
454 		dev_err(dev, "cannot enable vdda regulator\n");
455 		goto err_slave;
456 	}
457 
458 	/* change DBI base address */
459 	writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
460 
461 	if (IS_ENABLED(CONFIG_PCI_MSI)) {
462 		u32 val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
463 
464 		val |= BIT(31);
465 		writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
466 	}
467 
468 	return 0;
469 err_slave:
470 	clk_disable_unprepare(res->slave_bus);
471 err_master:
472 	clk_disable_unprepare(res->master_bus);
473 err_iface:
474 	clk_disable_unprepare(res->iface);
475 err_aux:
476 	clk_disable_unprepare(res->aux);
477 err_res:
478 	reset_control_assert(res->core);
479 
480 	return ret;
481 }
482 
483 static void qcom_pcie_2_3_2_ltssm_enable(struct qcom_pcie *pcie)
484 {
485 	u32 val;
486 
487 	/* enable link training */
488 	val = readl(pcie->parf + PCIE20_PARF_LTSSM);
489 	val |= BIT(8);
490 	writel(val, pcie->parf + PCIE20_PARF_LTSSM);
491 }
492 
493 static int qcom_pcie_get_resources_2_3_2(struct qcom_pcie *pcie)
494 {
495 	struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
496 	struct dw_pcie *pci = pcie->pci;
497 	struct device *dev = pci->dev;
498 	int ret;
499 
500 	res->supplies[0].supply = "vdda";
501 	res->supplies[1].supply = "vddpe-3v3";
502 	ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies),
503 				      res->supplies);
504 	if (ret)
505 		return ret;
506 
507 	res->aux_clk = devm_clk_get(dev, "aux");
508 	if (IS_ERR(res->aux_clk))
509 		return PTR_ERR(res->aux_clk);
510 
511 	res->cfg_clk = devm_clk_get(dev, "cfg");
512 	if (IS_ERR(res->cfg_clk))
513 		return PTR_ERR(res->cfg_clk);
514 
515 	res->master_clk = devm_clk_get(dev, "bus_master");
516 	if (IS_ERR(res->master_clk))
517 		return PTR_ERR(res->master_clk);
518 
519 	res->slave_clk = devm_clk_get(dev, "bus_slave");
520 	if (IS_ERR(res->slave_clk))
521 		return PTR_ERR(res->slave_clk);
522 
523 	res->pipe_clk = devm_clk_get(dev, "pipe");
524 	return PTR_ERR_OR_ZERO(res->pipe_clk);
525 }
526 
527 static void qcom_pcie_deinit_2_3_2(struct qcom_pcie *pcie)
528 {
529 	struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
530 
531 	clk_disable_unprepare(res->slave_clk);
532 	clk_disable_unprepare(res->master_clk);
533 	clk_disable_unprepare(res->cfg_clk);
534 	clk_disable_unprepare(res->aux_clk);
535 
536 	regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
537 }
538 
539 static void qcom_pcie_post_deinit_2_3_2(struct qcom_pcie *pcie)
540 {
541 	struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
542 
543 	clk_disable_unprepare(res->pipe_clk);
544 }
545 
546 static int qcom_pcie_init_2_3_2(struct qcom_pcie *pcie)
547 {
548 	struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
549 	struct dw_pcie *pci = pcie->pci;
550 	struct device *dev = pci->dev;
551 	u32 val;
552 	int ret;
553 
554 	ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies);
555 	if (ret < 0) {
556 		dev_err(dev, "cannot enable regulators\n");
557 		return ret;
558 	}
559 
560 	ret = clk_prepare_enable(res->aux_clk);
561 	if (ret) {
562 		dev_err(dev, "cannot prepare/enable aux clock\n");
563 		goto err_aux_clk;
564 	}
565 
566 	ret = clk_prepare_enable(res->cfg_clk);
567 	if (ret) {
568 		dev_err(dev, "cannot prepare/enable cfg clock\n");
569 		goto err_cfg_clk;
570 	}
571 
572 	ret = clk_prepare_enable(res->master_clk);
573 	if (ret) {
574 		dev_err(dev, "cannot prepare/enable master clock\n");
575 		goto err_master_clk;
576 	}
577 
578 	ret = clk_prepare_enable(res->slave_clk);
579 	if (ret) {
580 		dev_err(dev, "cannot prepare/enable slave clock\n");
581 		goto err_slave_clk;
582 	}
583 
584 	/* enable PCIe clocks and resets */
585 	val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
586 	val &= ~BIT(0);
587 	writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
588 
589 	/* change DBI base address */
590 	writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
591 
592 	/* MAC PHY_POWERDOWN MUX DISABLE  */
593 	val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL);
594 	val &= ~BIT(29);
595 	writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL);
596 
597 	val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
598 	val |= BIT(4);
599 	writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
600 
601 	val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
602 	val |= BIT(31);
603 	writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
604 
605 	return 0;
606 
607 err_slave_clk:
608 	clk_disable_unprepare(res->master_clk);
609 err_master_clk:
610 	clk_disable_unprepare(res->cfg_clk);
611 err_cfg_clk:
612 	clk_disable_unprepare(res->aux_clk);
613 
614 err_aux_clk:
615 	regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
616 
617 	return ret;
618 }
619 
620 static int qcom_pcie_post_init_2_3_2(struct qcom_pcie *pcie)
621 {
622 	struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
623 	struct dw_pcie *pci = pcie->pci;
624 	struct device *dev = pci->dev;
625 	int ret;
626 
627 	ret = clk_prepare_enable(res->pipe_clk);
628 	if (ret) {
629 		dev_err(dev, "cannot prepare/enable pipe clock\n");
630 		return ret;
631 	}
632 
633 	return 0;
634 }
635 
636 static int qcom_pcie_get_resources_2_4_0(struct qcom_pcie *pcie)
637 {
638 	struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0;
639 	struct dw_pcie *pci = pcie->pci;
640 	struct device *dev = pci->dev;
641 
642 	res->aux_clk = devm_clk_get(dev, "aux");
643 	if (IS_ERR(res->aux_clk))
644 		return PTR_ERR(res->aux_clk);
645 
646 	res->master_clk = devm_clk_get(dev, "master_bus");
647 	if (IS_ERR(res->master_clk))
648 		return PTR_ERR(res->master_clk);
649 
650 	res->slave_clk = devm_clk_get(dev, "slave_bus");
651 	if (IS_ERR(res->slave_clk))
652 		return PTR_ERR(res->slave_clk);
653 
654 	res->axi_m_reset = devm_reset_control_get_exclusive(dev, "axi_m");
655 	if (IS_ERR(res->axi_m_reset))
656 		return PTR_ERR(res->axi_m_reset);
657 
658 	res->axi_s_reset = devm_reset_control_get_exclusive(dev, "axi_s");
659 	if (IS_ERR(res->axi_s_reset))
660 		return PTR_ERR(res->axi_s_reset);
661 
662 	res->pipe_reset = devm_reset_control_get_exclusive(dev, "pipe");
663 	if (IS_ERR(res->pipe_reset))
664 		return PTR_ERR(res->pipe_reset);
665 
666 	res->axi_m_vmid_reset = devm_reset_control_get_exclusive(dev,
667 								 "axi_m_vmid");
668 	if (IS_ERR(res->axi_m_vmid_reset))
669 		return PTR_ERR(res->axi_m_vmid_reset);
670 
671 	res->axi_s_xpu_reset = devm_reset_control_get_exclusive(dev,
672 								"axi_s_xpu");
673 	if (IS_ERR(res->axi_s_xpu_reset))
674 		return PTR_ERR(res->axi_s_xpu_reset);
675 
676 	res->parf_reset = devm_reset_control_get_exclusive(dev, "parf");
677 	if (IS_ERR(res->parf_reset))
678 		return PTR_ERR(res->parf_reset);
679 
680 	res->phy_reset = devm_reset_control_get_exclusive(dev, "phy");
681 	if (IS_ERR(res->phy_reset))
682 		return PTR_ERR(res->phy_reset);
683 
684 	res->axi_m_sticky_reset = devm_reset_control_get_exclusive(dev,
685 								   "axi_m_sticky");
686 	if (IS_ERR(res->axi_m_sticky_reset))
687 		return PTR_ERR(res->axi_m_sticky_reset);
688 
689 	res->pipe_sticky_reset = devm_reset_control_get_exclusive(dev,
690 								  "pipe_sticky");
691 	if (IS_ERR(res->pipe_sticky_reset))
692 		return PTR_ERR(res->pipe_sticky_reset);
693 
694 	res->pwr_reset = devm_reset_control_get_exclusive(dev, "pwr");
695 	if (IS_ERR(res->pwr_reset))
696 		return PTR_ERR(res->pwr_reset);
697 
698 	res->ahb_reset = devm_reset_control_get_exclusive(dev, "ahb");
699 	if (IS_ERR(res->ahb_reset))
700 		return PTR_ERR(res->ahb_reset);
701 
702 	res->phy_ahb_reset = devm_reset_control_get_exclusive(dev, "phy_ahb");
703 	if (IS_ERR(res->phy_ahb_reset))
704 		return PTR_ERR(res->phy_ahb_reset);
705 
706 	return 0;
707 }
708 
709 static void qcom_pcie_deinit_2_4_0(struct qcom_pcie *pcie)
710 {
711 	struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0;
712 
713 	reset_control_assert(res->axi_m_reset);
714 	reset_control_assert(res->axi_s_reset);
715 	reset_control_assert(res->pipe_reset);
716 	reset_control_assert(res->pipe_sticky_reset);
717 	reset_control_assert(res->phy_reset);
718 	reset_control_assert(res->phy_ahb_reset);
719 	reset_control_assert(res->axi_m_sticky_reset);
720 	reset_control_assert(res->pwr_reset);
721 	reset_control_assert(res->ahb_reset);
722 	clk_disable_unprepare(res->aux_clk);
723 	clk_disable_unprepare(res->master_clk);
724 	clk_disable_unprepare(res->slave_clk);
725 }
726 
727 static int qcom_pcie_init_2_4_0(struct qcom_pcie *pcie)
728 {
729 	struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0;
730 	struct dw_pcie *pci = pcie->pci;
731 	struct device *dev = pci->dev;
732 	u32 val;
733 	int ret;
734 
735 	ret = reset_control_assert(res->axi_m_reset);
736 	if (ret) {
737 		dev_err(dev, "cannot assert axi master reset\n");
738 		return ret;
739 	}
740 
741 	ret = reset_control_assert(res->axi_s_reset);
742 	if (ret) {
743 		dev_err(dev, "cannot assert axi slave reset\n");
744 		return ret;
745 	}
746 
747 	usleep_range(10000, 12000);
748 
749 	ret = reset_control_assert(res->pipe_reset);
750 	if (ret) {
751 		dev_err(dev, "cannot assert pipe reset\n");
752 		return ret;
753 	}
754 
755 	ret = reset_control_assert(res->pipe_sticky_reset);
756 	if (ret) {
757 		dev_err(dev, "cannot assert pipe sticky reset\n");
758 		return ret;
759 	}
760 
761 	ret = reset_control_assert(res->phy_reset);
762 	if (ret) {
763 		dev_err(dev, "cannot assert phy reset\n");
764 		return ret;
765 	}
766 
767 	ret = reset_control_assert(res->phy_ahb_reset);
768 	if (ret) {
769 		dev_err(dev, "cannot assert phy ahb reset\n");
770 		return ret;
771 	}
772 
773 	usleep_range(10000, 12000);
774 
775 	ret = reset_control_assert(res->axi_m_sticky_reset);
776 	if (ret) {
777 		dev_err(dev, "cannot assert axi master sticky reset\n");
778 		return ret;
779 	}
780 
781 	ret = reset_control_assert(res->pwr_reset);
782 	if (ret) {
783 		dev_err(dev, "cannot assert power reset\n");
784 		return ret;
785 	}
786 
787 	ret = reset_control_assert(res->ahb_reset);
788 	if (ret) {
789 		dev_err(dev, "cannot assert ahb reset\n");
790 		return ret;
791 	}
792 
793 	usleep_range(10000, 12000);
794 
795 	ret = reset_control_deassert(res->phy_ahb_reset);
796 	if (ret) {
797 		dev_err(dev, "cannot deassert phy ahb reset\n");
798 		return ret;
799 	}
800 
801 	ret = reset_control_deassert(res->phy_reset);
802 	if (ret) {
803 		dev_err(dev, "cannot deassert phy reset\n");
804 		goto err_rst_phy;
805 	}
806 
807 	ret = reset_control_deassert(res->pipe_reset);
808 	if (ret) {
809 		dev_err(dev, "cannot deassert pipe reset\n");
810 		goto err_rst_pipe;
811 	}
812 
813 	ret = reset_control_deassert(res->pipe_sticky_reset);
814 	if (ret) {
815 		dev_err(dev, "cannot deassert pipe sticky reset\n");
816 		goto err_rst_pipe_sticky;
817 	}
818 
819 	usleep_range(10000, 12000);
820 
821 	ret = reset_control_deassert(res->axi_m_reset);
822 	if (ret) {
823 		dev_err(dev, "cannot deassert axi master reset\n");
824 		goto err_rst_axi_m;
825 	}
826 
827 	ret = reset_control_deassert(res->axi_m_sticky_reset);
828 	if (ret) {
829 		dev_err(dev, "cannot deassert axi master sticky reset\n");
830 		goto err_rst_axi_m_sticky;
831 	}
832 
833 	ret = reset_control_deassert(res->axi_s_reset);
834 	if (ret) {
835 		dev_err(dev, "cannot deassert axi slave reset\n");
836 		goto err_rst_axi_s;
837 	}
838 
839 	ret = reset_control_deassert(res->pwr_reset);
840 	if (ret) {
841 		dev_err(dev, "cannot deassert power reset\n");
842 		goto err_rst_pwr;
843 	}
844 
845 	ret = reset_control_deassert(res->ahb_reset);
846 	if (ret) {
847 		dev_err(dev, "cannot deassert ahb reset\n");
848 		goto err_rst_ahb;
849 	}
850 
851 	usleep_range(10000, 12000);
852 
853 	ret = clk_prepare_enable(res->aux_clk);
854 	if (ret) {
855 		dev_err(dev, "cannot prepare/enable iface clock\n");
856 		goto err_clk_aux;
857 	}
858 
859 	ret = clk_prepare_enable(res->master_clk);
860 	if (ret) {
861 		dev_err(dev, "cannot prepare/enable core clock\n");
862 		goto err_clk_axi_m;
863 	}
864 
865 	ret = clk_prepare_enable(res->slave_clk);
866 	if (ret) {
867 		dev_err(dev, "cannot prepare/enable phy clock\n");
868 		goto err_clk_axi_s;
869 	}
870 
871 	/* enable PCIe clocks and resets */
872 	val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
873 	val &= ~BIT(0);
874 	writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
875 
876 	/* change DBI base address */
877 	writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
878 
879 	/* MAC PHY_POWERDOWN MUX DISABLE  */
880 	val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL);
881 	val &= ~BIT(29);
882 	writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL);
883 
884 	val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
885 	val |= BIT(4);
886 	writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
887 
888 	val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
889 	val |= BIT(31);
890 	writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
891 
892 	return 0;
893 
894 err_clk_axi_s:
895 	clk_disable_unprepare(res->master_clk);
896 err_clk_axi_m:
897 	clk_disable_unprepare(res->aux_clk);
898 err_clk_aux:
899 	reset_control_assert(res->ahb_reset);
900 err_rst_ahb:
901 	reset_control_assert(res->pwr_reset);
902 err_rst_pwr:
903 	reset_control_assert(res->axi_s_reset);
904 err_rst_axi_s:
905 	reset_control_assert(res->axi_m_sticky_reset);
906 err_rst_axi_m_sticky:
907 	reset_control_assert(res->axi_m_reset);
908 err_rst_axi_m:
909 	reset_control_assert(res->pipe_sticky_reset);
910 err_rst_pipe_sticky:
911 	reset_control_assert(res->pipe_reset);
912 err_rst_pipe:
913 	reset_control_assert(res->phy_reset);
914 err_rst_phy:
915 	reset_control_assert(res->phy_ahb_reset);
916 	return ret;
917 }
918 
919 static int qcom_pcie_get_resources_2_3_3(struct qcom_pcie *pcie)
920 {
921 	struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
922 	struct dw_pcie *pci = pcie->pci;
923 	struct device *dev = pci->dev;
924 	int i;
925 	const char *rst_names[] = { "axi_m", "axi_s", "pipe",
926 				    "axi_m_sticky", "sticky",
927 				    "ahb", "sleep", };
928 
929 	res->iface = devm_clk_get(dev, "iface");
930 	if (IS_ERR(res->iface))
931 		return PTR_ERR(res->iface);
932 
933 	res->axi_m_clk = devm_clk_get(dev, "axi_m");
934 	if (IS_ERR(res->axi_m_clk))
935 		return PTR_ERR(res->axi_m_clk);
936 
937 	res->axi_s_clk = devm_clk_get(dev, "axi_s");
938 	if (IS_ERR(res->axi_s_clk))
939 		return PTR_ERR(res->axi_s_clk);
940 
941 	res->ahb_clk = devm_clk_get(dev, "ahb");
942 	if (IS_ERR(res->ahb_clk))
943 		return PTR_ERR(res->ahb_clk);
944 
945 	res->aux_clk = devm_clk_get(dev, "aux");
946 	if (IS_ERR(res->aux_clk))
947 		return PTR_ERR(res->aux_clk);
948 
949 	for (i = 0; i < ARRAY_SIZE(rst_names); i++) {
950 		res->rst[i] = devm_reset_control_get(dev, rst_names[i]);
951 		if (IS_ERR(res->rst[i]))
952 			return PTR_ERR(res->rst[i]);
953 	}
954 
955 	return 0;
956 }
957 
958 static void qcom_pcie_deinit_2_3_3(struct qcom_pcie *pcie)
959 {
960 	struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
961 
962 	clk_disable_unprepare(res->iface);
963 	clk_disable_unprepare(res->axi_m_clk);
964 	clk_disable_unprepare(res->axi_s_clk);
965 	clk_disable_unprepare(res->ahb_clk);
966 	clk_disable_unprepare(res->aux_clk);
967 }
968 
969 static int qcom_pcie_init_2_3_3(struct qcom_pcie *pcie)
970 {
971 	struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
972 	struct dw_pcie *pci = pcie->pci;
973 	struct device *dev = pci->dev;
974 	int i, ret;
975 	u32 val;
976 
977 	for (i = 0; i < ARRAY_SIZE(res->rst); i++) {
978 		ret = reset_control_assert(res->rst[i]);
979 		if (ret) {
980 			dev_err(dev, "reset #%d assert failed (%d)\n", i, ret);
981 			return ret;
982 		}
983 	}
984 
985 	usleep_range(2000, 2500);
986 
987 	for (i = 0; i < ARRAY_SIZE(res->rst); i++) {
988 		ret = reset_control_deassert(res->rst[i]);
989 		if (ret) {
990 			dev_err(dev, "reset #%d deassert failed (%d)\n", i,
991 				ret);
992 			return ret;
993 		}
994 	}
995 
996 	/*
997 	 * Don't have a way to see if the reset has completed.
998 	 * Wait for some time.
999 	 */
1000 	usleep_range(2000, 2500);
1001 
1002 	ret = clk_prepare_enable(res->iface);
1003 	if (ret) {
1004 		dev_err(dev, "cannot prepare/enable core clock\n");
1005 		goto err_clk_iface;
1006 	}
1007 
1008 	ret = clk_prepare_enable(res->axi_m_clk);
1009 	if (ret) {
1010 		dev_err(dev, "cannot prepare/enable core clock\n");
1011 		goto err_clk_axi_m;
1012 	}
1013 
1014 	ret = clk_prepare_enable(res->axi_s_clk);
1015 	if (ret) {
1016 		dev_err(dev, "cannot prepare/enable axi slave clock\n");
1017 		goto err_clk_axi_s;
1018 	}
1019 
1020 	ret = clk_prepare_enable(res->ahb_clk);
1021 	if (ret) {
1022 		dev_err(dev, "cannot prepare/enable ahb clock\n");
1023 		goto err_clk_ahb;
1024 	}
1025 
1026 	ret = clk_prepare_enable(res->aux_clk);
1027 	if (ret) {
1028 		dev_err(dev, "cannot prepare/enable aux clock\n");
1029 		goto err_clk_aux;
1030 	}
1031 
1032 	writel(SLV_ADDR_SPACE_SZ,
1033 		pcie->parf + PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE);
1034 
1035 	val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
1036 	val &= ~BIT(0);
1037 	writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
1038 
1039 	writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
1040 
1041 	writel(MST_WAKEUP_EN | SLV_WAKEUP_EN | MSTR_ACLK_CGC_DIS
1042 		| SLV_ACLK_CGC_DIS | CORE_CLK_CGC_DIS |
1043 		AUX_PWR_DET | L23_CLK_RMV_DIS | L1_CLK_RMV_DIS,
1044 		pcie->parf + PCIE20_PARF_SYS_CTRL);
1045 	writel(0, pcie->parf + PCIE20_PARF_Q2A_FLUSH);
1046 
1047 	writel(CMD_BME_VAL, pci->dbi_base + PCIE20_COMMAND_STATUS);
1048 	writel(DBI_RO_WR_EN, pci->dbi_base + PCIE20_MISC_CONTROL_1_REG);
1049 	writel(PCIE_CAP_LINK1_VAL, pci->dbi_base + PCIE20_CAP_LINK_1);
1050 
1051 	val = readl(pci->dbi_base + PCIE20_CAP_LINK_CAPABILITIES);
1052 	val &= ~PCIE20_CAP_ACTIVE_STATE_LINK_PM_SUPPORT;
1053 	writel(val, pci->dbi_base + PCIE20_CAP_LINK_CAPABILITIES);
1054 
1055 	writel(PCIE_CAP_CPL_TIMEOUT_DISABLE, pci->dbi_base +
1056 		PCIE20_DEVICE_CONTROL2_STATUS2);
1057 
1058 	return 0;
1059 
1060 err_clk_aux:
1061 	clk_disable_unprepare(res->ahb_clk);
1062 err_clk_ahb:
1063 	clk_disable_unprepare(res->axi_s_clk);
1064 err_clk_axi_s:
1065 	clk_disable_unprepare(res->axi_m_clk);
1066 err_clk_axi_m:
1067 	clk_disable_unprepare(res->iface);
1068 err_clk_iface:
1069 	/*
1070 	 * Not checking for failure, will anyway return
1071 	 * the original failure in 'ret'.
1072 	 */
1073 	for (i = 0; i < ARRAY_SIZE(res->rst); i++)
1074 		reset_control_assert(res->rst[i]);
1075 
1076 	return ret;
1077 }
1078 
1079 static int qcom_pcie_link_up(struct dw_pcie *pci)
1080 {
1081 	u16 val = readw(pci->dbi_base + PCIE20_CAP + PCI_EXP_LNKSTA);
1082 
1083 	return !!(val & PCI_EXP_LNKSTA_DLLLA);
1084 }
1085 
1086 static int qcom_pcie_host_init(struct pcie_port *pp)
1087 {
1088 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
1089 	struct qcom_pcie *pcie = to_qcom_pcie(pci);
1090 	int ret;
1091 
1092 	qcom_ep_reset_assert(pcie);
1093 
1094 	ret = pcie->ops->init(pcie);
1095 	if (ret)
1096 		return ret;
1097 
1098 	ret = phy_power_on(pcie->phy);
1099 	if (ret)
1100 		goto err_deinit;
1101 
1102 	if (pcie->ops->post_init) {
1103 		ret = pcie->ops->post_init(pcie);
1104 		if (ret)
1105 			goto err_disable_phy;
1106 	}
1107 
1108 	dw_pcie_setup_rc(pp);
1109 
1110 	if (IS_ENABLED(CONFIG_PCI_MSI))
1111 		dw_pcie_msi_init(pp);
1112 
1113 	qcom_ep_reset_deassert(pcie);
1114 
1115 	ret = qcom_pcie_establish_link(pcie);
1116 	if (ret)
1117 		goto err;
1118 
1119 	return 0;
1120 err:
1121 	qcom_ep_reset_assert(pcie);
1122 	if (pcie->ops->post_deinit)
1123 		pcie->ops->post_deinit(pcie);
1124 err_disable_phy:
1125 	phy_power_off(pcie->phy);
1126 err_deinit:
1127 	pcie->ops->deinit(pcie);
1128 
1129 	return ret;
1130 }
1131 
1132 static const struct dw_pcie_host_ops qcom_pcie_dw_ops = {
1133 	.host_init = qcom_pcie_host_init,
1134 };
1135 
1136 /* Qcom IP rev.: 2.1.0	Synopsys IP rev.: 4.01a */
1137 static const struct qcom_pcie_ops ops_2_1_0 = {
1138 	.get_resources = qcom_pcie_get_resources_2_1_0,
1139 	.init = qcom_pcie_init_2_1_0,
1140 	.deinit = qcom_pcie_deinit_2_1_0,
1141 	.ltssm_enable = qcom_pcie_2_1_0_ltssm_enable,
1142 };
1143 
1144 /* Qcom IP rev.: 1.0.0	Synopsys IP rev.: 4.11a */
1145 static const struct qcom_pcie_ops ops_1_0_0 = {
1146 	.get_resources = qcom_pcie_get_resources_1_0_0,
1147 	.init = qcom_pcie_init_1_0_0,
1148 	.deinit = qcom_pcie_deinit_1_0_0,
1149 	.ltssm_enable = qcom_pcie_2_1_0_ltssm_enable,
1150 };
1151 
1152 /* Qcom IP rev.: 2.3.2	Synopsys IP rev.: 4.21a */
1153 static const struct qcom_pcie_ops ops_2_3_2 = {
1154 	.get_resources = qcom_pcie_get_resources_2_3_2,
1155 	.init = qcom_pcie_init_2_3_2,
1156 	.post_init = qcom_pcie_post_init_2_3_2,
1157 	.deinit = qcom_pcie_deinit_2_3_2,
1158 	.post_deinit = qcom_pcie_post_deinit_2_3_2,
1159 	.ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
1160 };
1161 
1162 /* Qcom IP rev.: 2.4.0	Synopsys IP rev.: 4.20a */
1163 static const struct qcom_pcie_ops ops_2_4_0 = {
1164 	.get_resources = qcom_pcie_get_resources_2_4_0,
1165 	.init = qcom_pcie_init_2_4_0,
1166 	.deinit = qcom_pcie_deinit_2_4_0,
1167 	.ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
1168 };
1169 
1170 /* Qcom IP rev.: 2.3.3	Synopsys IP rev.: 4.30a */
1171 static const struct qcom_pcie_ops ops_2_3_3 = {
1172 	.get_resources = qcom_pcie_get_resources_2_3_3,
1173 	.init = qcom_pcie_init_2_3_3,
1174 	.deinit = qcom_pcie_deinit_2_3_3,
1175 	.ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
1176 };
1177 
1178 static const struct dw_pcie_ops dw_pcie_ops = {
1179 	.link_up = qcom_pcie_link_up,
1180 };
1181 
1182 static int qcom_pcie_probe(struct platform_device *pdev)
1183 {
1184 	struct device *dev = &pdev->dev;
1185 	struct resource *res;
1186 	struct pcie_port *pp;
1187 	struct dw_pcie *pci;
1188 	struct qcom_pcie *pcie;
1189 	int ret;
1190 
1191 	pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
1192 	if (!pcie)
1193 		return -ENOMEM;
1194 
1195 	pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
1196 	if (!pci)
1197 		return -ENOMEM;
1198 
1199 	pm_runtime_enable(dev);
1200 	ret = pm_runtime_get_sync(dev);
1201 	if (ret < 0) {
1202 		pm_runtime_disable(dev);
1203 		return ret;
1204 	}
1205 
1206 	pci->dev = dev;
1207 	pci->ops = &dw_pcie_ops;
1208 	pp = &pci->pp;
1209 
1210 	pcie->pci = pci;
1211 
1212 	pcie->ops = of_device_get_match_data(dev);
1213 
1214 	pcie->reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_HIGH);
1215 	if (IS_ERR(pcie->reset)) {
1216 		ret = PTR_ERR(pcie->reset);
1217 		goto err_pm_runtime_put;
1218 	}
1219 
1220 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "parf");
1221 	pcie->parf = devm_ioremap_resource(dev, res);
1222 	if (IS_ERR(pcie->parf)) {
1223 		ret = PTR_ERR(pcie->parf);
1224 		goto err_pm_runtime_put;
1225 	}
1226 
1227 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
1228 	pci->dbi_base = devm_pci_remap_cfg_resource(dev, res);
1229 	if (IS_ERR(pci->dbi_base)) {
1230 		ret = PTR_ERR(pci->dbi_base);
1231 		goto err_pm_runtime_put;
1232 	}
1233 
1234 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "elbi");
1235 	pcie->elbi = devm_ioremap_resource(dev, res);
1236 	if (IS_ERR(pcie->elbi)) {
1237 		ret = PTR_ERR(pcie->elbi);
1238 		goto err_pm_runtime_put;
1239 	}
1240 
1241 	pcie->phy = devm_phy_optional_get(dev, "pciephy");
1242 	if (IS_ERR(pcie->phy)) {
1243 		ret = PTR_ERR(pcie->phy);
1244 		goto err_pm_runtime_put;
1245 	}
1246 
1247 	ret = pcie->ops->get_resources(pcie);
1248 	if (ret)
1249 		goto err_pm_runtime_put;
1250 
1251 	pp->ops = &qcom_pcie_dw_ops;
1252 
1253 	if (IS_ENABLED(CONFIG_PCI_MSI)) {
1254 		pp->msi_irq = platform_get_irq_byname(pdev, "msi");
1255 		if (pp->msi_irq < 0) {
1256 			ret = pp->msi_irq;
1257 			goto err_pm_runtime_put;
1258 		}
1259 	}
1260 
1261 	ret = phy_init(pcie->phy);
1262 	if (ret) {
1263 		pm_runtime_disable(&pdev->dev);
1264 		goto err_pm_runtime_put;
1265 	}
1266 
1267 	platform_set_drvdata(pdev, pcie);
1268 
1269 	ret = dw_pcie_host_init(pp);
1270 	if (ret) {
1271 		dev_err(dev, "cannot initialize host\n");
1272 		pm_runtime_disable(&pdev->dev);
1273 		goto err_pm_runtime_put;
1274 	}
1275 
1276 	return 0;
1277 
1278 err_pm_runtime_put:
1279 	pm_runtime_put(dev);
1280 	pm_runtime_disable(dev);
1281 
1282 	return ret;
1283 }
1284 
1285 static const struct of_device_id qcom_pcie_match[] = {
1286 	{ .compatible = "qcom,pcie-apq8084", .data = &ops_1_0_0 },
1287 	{ .compatible = "qcom,pcie-ipq8064", .data = &ops_2_1_0 },
1288 	{ .compatible = "qcom,pcie-apq8064", .data = &ops_2_1_0 },
1289 	{ .compatible = "qcom,pcie-msm8996", .data = &ops_2_3_2 },
1290 	{ .compatible = "qcom,pcie-ipq8074", .data = &ops_2_3_3 },
1291 	{ .compatible = "qcom,pcie-ipq4019", .data = &ops_2_4_0 },
1292 	{ }
1293 };
1294 
1295 static void qcom_fixup_class(struct pci_dev *dev)
1296 {
1297 	dev->class = PCI_CLASS_BRIDGE_PCI << 8;
1298 }
1299 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, PCI_ANY_ID, qcom_fixup_class);
1300 
1301 static struct platform_driver qcom_pcie_driver = {
1302 	.probe = qcom_pcie_probe,
1303 	.driver = {
1304 		.name = "qcom-pcie",
1305 		.suppress_bind_attrs = true,
1306 		.of_match_table = qcom_pcie_match,
1307 	},
1308 };
1309 builtin_platform_driver(qcom_pcie_driver);
1310