1d3bf75b5SJianjun Wang // SPDX-License-Identifier: GPL-2.0
2d3bf75b5SJianjun Wang /*
3d3bf75b5SJianjun Wang  * MediaTek PCIe host controller driver.
4d3bf75b5SJianjun Wang  *
5d3bf75b5SJianjun Wang  * Copyright (c) 2020 MediaTek Inc.
6d3bf75b5SJianjun Wang  * Author: Jianjun Wang <jianjun.wang@mediatek.com>
7d3bf75b5SJianjun Wang  */
8d3bf75b5SJianjun Wang 
9d3bf75b5SJianjun Wang #include <linux/clk.h>
10d3bf75b5SJianjun Wang #include <linux/delay.h>
11d3bf75b5SJianjun Wang #include <linux/iopoll.h>
12814cceebSJianjun Wang #include <linux/irq.h>
13814cceebSJianjun Wang #include <linux/irqchip/chained_irq.h>
14814cceebSJianjun Wang #include <linux/irqdomain.h>
15d3bf75b5SJianjun Wang #include <linux/kernel.h>
16d3bf75b5SJianjun Wang #include <linux/module.h>
171bdafba5SJianjun Wang #include <linux/msi.h>
18d3bf75b5SJianjun Wang #include <linux/pci.h>
19d3bf75b5SJianjun Wang #include <linux/phy/phy.h>
20d3bf75b5SJianjun Wang #include <linux/platform_device.h>
21d3bf75b5SJianjun Wang #include <linux/pm_domain.h>
22d3bf75b5SJianjun Wang #include <linux/pm_runtime.h>
23d3bf75b5SJianjun Wang #include <linux/reset.h>
24d3bf75b5SJianjun Wang 
25d3bf75b5SJianjun Wang #include "../pci.h"
26d3bf75b5SJianjun Wang 
27d3bf75b5SJianjun Wang #define PCIE_SETTING_REG		0x80
28d3bf75b5SJianjun Wang #define PCIE_PCI_IDS_1			0x9c
29d3bf75b5SJianjun Wang #define PCI_CLASS(class)		(class << 8)
30d3bf75b5SJianjun Wang #define PCIE_RC_MODE			BIT(0)
31d3bf75b5SJianjun Wang 
32d3bf75b5SJianjun Wang #define PCIE_CFGNUM_REG			0x140
33d3bf75b5SJianjun Wang #define PCIE_CFG_DEVFN(devfn)		((devfn) & GENMASK(7, 0))
34d3bf75b5SJianjun Wang #define PCIE_CFG_BUS(bus)		(((bus) << 8) & GENMASK(15, 8))
35d3bf75b5SJianjun Wang #define PCIE_CFG_BYTE_EN(bytes)		(((bytes) << 16) & GENMASK(19, 16))
36d3bf75b5SJianjun Wang #define PCIE_CFG_FORCE_BYTE_EN		BIT(20)
37d3bf75b5SJianjun Wang #define PCIE_CFG_OFFSET_ADDR		0x1000
38d3bf75b5SJianjun Wang #define PCIE_CFG_HEADER(bus, devfn) \
39d3bf75b5SJianjun Wang 	(PCIE_CFG_BUS(bus) | PCIE_CFG_DEVFN(devfn))
40d3bf75b5SJianjun Wang 
41d3bf75b5SJianjun Wang #define PCIE_RST_CTRL_REG		0x148
42d3bf75b5SJianjun Wang #define PCIE_MAC_RSTB			BIT(0)
43d3bf75b5SJianjun Wang #define PCIE_PHY_RSTB			BIT(1)
44d3bf75b5SJianjun Wang #define PCIE_BRG_RSTB			BIT(2)
45d3bf75b5SJianjun Wang #define PCIE_PE_RSTB			BIT(3)
46d3bf75b5SJianjun Wang 
47d3bf75b5SJianjun Wang #define PCIE_LTSSM_STATUS_REG		0x150
48d537dc12SJianjun Wang #define PCIE_LTSSM_STATE_MASK		GENMASK(28, 24)
49d537dc12SJianjun Wang #define PCIE_LTSSM_STATE(val)		((val & PCIE_LTSSM_STATE_MASK) >> 24)
50d537dc12SJianjun Wang #define PCIE_LTSSM_STATE_L2_IDLE	0x14
51d3bf75b5SJianjun Wang 
52d3bf75b5SJianjun Wang #define PCIE_LINK_STATUS_REG		0x154
53d3bf75b5SJianjun Wang #define PCIE_PORT_LINKUP		BIT(8)
54d3bf75b5SJianjun Wang 
551bdafba5SJianjun Wang #define PCIE_MSI_SET_NUM		8
561bdafba5SJianjun Wang #define PCIE_MSI_IRQS_PER_SET		32
571bdafba5SJianjun Wang #define PCIE_MSI_IRQS_NUM \
581bdafba5SJianjun Wang 	(PCIE_MSI_IRQS_PER_SET * PCIE_MSI_SET_NUM)
591bdafba5SJianjun Wang 
60814cceebSJianjun Wang #define PCIE_INT_ENABLE_REG		0x180
611bdafba5SJianjun Wang #define PCIE_MSI_ENABLE			GENMASK(PCIE_MSI_SET_NUM + 8 - 1, 8)
621bdafba5SJianjun Wang #define PCIE_MSI_SHIFT			8
63814cceebSJianjun Wang #define PCIE_INTX_SHIFT			24
64814cceebSJianjun Wang #define PCIE_INTX_ENABLE \
65814cceebSJianjun Wang 	GENMASK(PCIE_INTX_SHIFT + PCI_NUM_INTX - 1, PCIE_INTX_SHIFT)
66814cceebSJianjun Wang 
67814cceebSJianjun Wang #define PCIE_INT_STATUS_REG		0x184
681bdafba5SJianjun Wang #define PCIE_MSI_SET_ENABLE_REG		0x190
691bdafba5SJianjun Wang #define PCIE_MSI_SET_ENABLE		GENMASK(PCIE_MSI_SET_NUM - 1, 0)
701bdafba5SJianjun Wang 
711bdafba5SJianjun Wang #define PCIE_MSI_SET_BASE_REG		0xc00
721bdafba5SJianjun Wang #define PCIE_MSI_SET_OFFSET		0x10
731bdafba5SJianjun Wang #define PCIE_MSI_SET_STATUS_OFFSET	0x04
741bdafba5SJianjun Wang #define PCIE_MSI_SET_ENABLE_OFFSET	0x08
751bdafba5SJianjun Wang 
761bdafba5SJianjun Wang #define PCIE_MSI_SET_ADDR_HI_BASE	0xc80
771bdafba5SJianjun Wang #define PCIE_MSI_SET_ADDR_HI_OFFSET	0x04
78814cceebSJianjun Wang 
79d537dc12SJianjun Wang #define PCIE_ICMD_PM_REG		0x198
80d537dc12SJianjun Wang #define PCIE_TURN_OFF_LINK		BIT(4)
81d537dc12SJianjun Wang 
82ab344fd4SJianjun Wang #define PCIE_MISC_CTRL_REG		0x348
83ab344fd4SJianjun Wang #define PCIE_DISABLE_DVFSRC_VLT_REQ	BIT(1)
84ab344fd4SJianjun Wang 
85d3bf75b5SJianjun Wang #define PCIE_TRANS_TABLE_BASE_REG	0x800
86d3bf75b5SJianjun Wang #define PCIE_ATR_SRC_ADDR_MSB_OFFSET	0x4
87d3bf75b5SJianjun Wang #define PCIE_ATR_TRSL_ADDR_LSB_OFFSET	0x8
88d3bf75b5SJianjun Wang #define PCIE_ATR_TRSL_ADDR_MSB_OFFSET	0xc
89d3bf75b5SJianjun Wang #define PCIE_ATR_TRSL_PARAM_OFFSET	0x10
90d3bf75b5SJianjun Wang #define PCIE_ATR_TLB_SET_OFFSET		0x20
91d3bf75b5SJianjun Wang 
92d3bf75b5SJianjun Wang #define PCIE_MAX_TRANS_TABLES		8
93d3bf75b5SJianjun Wang #define PCIE_ATR_EN			BIT(0)
94d3bf75b5SJianjun Wang #define PCIE_ATR_SIZE(size) \
95d3bf75b5SJianjun Wang 	(((((size) - 1) << 1) & GENMASK(6, 1)) | PCIE_ATR_EN)
96d3bf75b5SJianjun Wang #define PCIE_ATR_ID(id)			((id) & GENMASK(3, 0))
97d3bf75b5SJianjun Wang #define PCIE_ATR_TYPE_MEM		PCIE_ATR_ID(0)
98d3bf75b5SJianjun Wang #define PCIE_ATR_TYPE_IO		PCIE_ATR_ID(1)
99d3bf75b5SJianjun Wang #define PCIE_ATR_TLP_TYPE(type)		(((type) << 16) & GENMASK(18, 16))
100d3bf75b5SJianjun Wang #define PCIE_ATR_TLP_TYPE_MEM		PCIE_ATR_TLP_TYPE(0)
101d3bf75b5SJianjun Wang #define PCIE_ATR_TLP_TYPE_IO		PCIE_ATR_TLP_TYPE(2)
102d3bf75b5SJianjun Wang 
103d3bf75b5SJianjun Wang /**
1041bdafba5SJianjun Wang  * struct mtk_msi_set - MSI information for each set
1051bdafba5SJianjun Wang  * @base: IO mapped register base
1061bdafba5SJianjun Wang  * @msg_addr: MSI message address
107d537dc12SJianjun Wang  * @saved_irq_state: IRQ enable state saved at suspend time
1081bdafba5SJianjun Wang  */
1091bdafba5SJianjun Wang struct mtk_msi_set {
1101bdafba5SJianjun Wang 	void __iomem *base;
1111bdafba5SJianjun Wang 	phys_addr_t msg_addr;
112d537dc12SJianjun Wang 	u32 saved_irq_state;
1131bdafba5SJianjun Wang };
1141bdafba5SJianjun Wang 
1151bdafba5SJianjun Wang /**
116d5a4835bSFan Fei  * struct mtk_gen3_pcie - PCIe port information
117d3bf75b5SJianjun Wang  * @dev: pointer to PCIe device
118d3bf75b5SJianjun Wang  * @base: IO mapped register base
119d3bf75b5SJianjun Wang  * @reg_base: physical register base
120d3bf75b5SJianjun Wang  * @mac_reset: MAC reset control
121d3bf75b5SJianjun Wang  * @phy_reset: PHY reset control
122d3bf75b5SJianjun Wang  * @phy: PHY controller block
123d3bf75b5SJianjun Wang  * @clks: PCIe clocks
124d3bf75b5SJianjun Wang  * @num_clks: PCIe clocks count for this port
125814cceebSJianjun Wang  * @irq: PCIe controller interrupt number
126d537dc12SJianjun Wang  * @saved_irq_state: IRQ enable state saved at suspend time
127814cceebSJianjun Wang  * @irq_lock: lock protecting IRQ register access
128814cceebSJianjun Wang  * @intx_domain: legacy INTx IRQ domain
1291bdafba5SJianjun Wang  * @msi_domain: MSI IRQ domain
1301bdafba5SJianjun Wang  * @msi_bottom_domain: MSI IRQ bottom domain
1311bdafba5SJianjun Wang  * @msi_sets: MSI sets information
1321bdafba5SJianjun Wang  * @lock: lock protecting IRQ bit map
1331bdafba5SJianjun Wang  * @msi_irq_in_use: bit map for assigned MSI IRQ
134d3bf75b5SJianjun Wang  */
135d5a4835bSFan Fei struct mtk_gen3_pcie {
136d3bf75b5SJianjun Wang 	struct device *dev;
137d3bf75b5SJianjun Wang 	void __iomem *base;
138d3bf75b5SJianjun Wang 	phys_addr_t reg_base;
139d3bf75b5SJianjun Wang 	struct reset_control *mac_reset;
140d3bf75b5SJianjun Wang 	struct reset_control *phy_reset;
141d3bf75b5SJianjun Wang 	struct phy *phy;
142d3bf75b5SJianjun Wang 	struct clk_bulk_data *clks;
143d3bf75b5SJianjun Wang 	int num_clks;
144814cceebSJianjun Wang 
145814cceebSJianjun Wang 	int irq;
146d537dc12SJianjun Wang 	u32 saved_irq_state;
147814cceebSJianjun Wang 	raw_spinlock_t irq_lock;
148814cceebSJianjun Wang 	struct irq_domain *intx_domain;
1491bdafba5SJianjun Wang 	struct irq_domain *msi_domain;
1501bdafba5SJianjun Wang 	struct irq_domain *msi_bottom_domain;
1511bdafba5SJianjun Wang 	struct mtk_msi_set msi_sets[PCIE_MSI_SET_NUM];
1521bdafba5SJianjun Wang 	struct mutex lock;
1531bdafba5SJianjun Wang 	DECLARE_BITMAP(msi_irq_in_use, PCIE_MSI_IRQS_NUM);
154d3bf75b5SJianjun Wang };
155d3bf75b5SJianjun Wang 
156d3bf75b5SJianjun Wang /**
157d3bf75b5SJianjun Wang  * mtk_pcie_config_tlp_header() - Configure a configuration TLP header
158d3bf75b5SJianjun Wang  * @bus: PCI bus to query
159d3bf75b5SJianjun Wang  * @devfn: device/function number
160d3bf75b5SJianjun Wang  * @where: offset in config space
161d3bf75b5SJianjun Wang  * @size: data size in TLP header
162d3bf75b5SJianjun Wang  *
163d3bf75b5SJianjun Wang  * Set byte enable field and device information in configuration TLP header.
164d3bf75b5SJianjun Wang  */
165d3bf75b5SJianjun Wang static void mtk_pcie_config_tlp_header(struct pci_bus *bus, unsigned int devfn,
166d3bf75b5SJianjun Wang 					int where, int size)
167d3bf75b5SJianjun Wang {
168d5a4835bSFan Fei 	struct mtk_gen3_pcie *pcie = bus->sysdata;
169d3bf75b5SJianjun Wang 	int bytes;
170d3bf75b5SJianjun Wang 	u32 val;
171d3bf75b5SJianjun Wang 
172d3bf75b5SJianjun Wang 	bytes = (GENMASK(size - 1, 0) & 0xf) << (where & 0x3);
173d3bf75b5SJianjun Wang 
174d3bf75b5SJianjun Wang 	val = PCIE_CFG_FORCE_BYTE_EN | PCIE_CFG_BYTE_EN(bytes) |
175d3bf75b5SJianjun Wang 	      PCIE_CFG_HEADER(bus->number, devfn);
176d3bf75b5SJianjun Wang 
177d5a4835bSFan Fei 	writel_relaxed(val, pcie->base + PCIE_CFGNUM_REG);
178d3bf75b5SJianjun Wang }
179d3bf75b5SJianjun Wang 
180d3bf75b5SJianjun Wang static void __iomem *mtk_pcie_map_bus(struct pci_bus *bus, unsigned int devfn,
181d3bf75b5SJianjun Wang 				      int where)
182d3bf75b5SJianjun Wang {
183d5a4835bSFan Fei 	struct mtk_gen3_pcie *pcie = bus->sysdata;
184d3bf75b5SJianjun Wang 
185d5a4835bSFan Fei 	return pcie->base + PCIE_CFG_OFFSET_ADDR + where;
186d3bf75b5SJianjun Wang }
187d3bf75b5SJianjun Wang 
188d3bf75b5SJianjun Wang static int mtk_pcie_config_read(struct pci_bus *bus, unsigned int devfn,
189d3bf75b5SJianjun Wang 				int where, int size, u32 *val)
190d3bf75b5SJianjun Wang {
191d3bf75b5SJianjun Wang 	mtk_pcie_config_tlp_header(bus, devfn, where, size);
192d3bf75b5SJianjun Wang 
193d3bf75b5SJianjun Wang 	return pci_generic_config_read32(bus, devfn, where, size, val);
194d3bf75b5SJianjun Wang }
195d3bf75b5SJianjun Wang 
196d3bf75b5SJianjun Wang static int mtk_pcie_config_write(struct pci_bus *bus, unsigned int devfn,
197d3bf75b5SJianjun Wang 				 int where, int size, u32 val)
198d3bf75b5SJianjun Wang {
199d3bf75b5SJianjun Wang 	mtk_pcie_config_tlp_header(bus, devfn, where, size);
200d3bf75b5SJianjun Wang 
201d3bf75b5SJianjun Wang 	if (size <= 2)
202d3bf75b5SJianjun Wang 		val <<= (where & 0x3) * 8;
203d3bf75b5SJianjun Wang 
204d3bf75b5SJianjun Wang 	return pci_generic_config_write32(bus, devfn, where, 4, val);
205d3bf75b5SJianjun Wang }
206d3bf75b5SJianjun Wang 
207d3bf75b5SJianjun Wang static struct pci_ops mtk_pcie_ops = {
208d3bf75b5SJianjun Wang 	.map_bus = mtk_pcie_map_bus,
209d3bf75b5SJianjun Wang 	.read  = mtk_pcie_config_read,
210d3bf75b5SJianjun Wang 	.write = mtk_pcie_config_write,
211d3bf75b5SJianjun Wang };
212d3bf75b5SJianjun Wang 
213d5a4835bSFan Fei static int mtk_pcie_set_trans_table(struct mtk_gen3_pcie *pcie,
214d3bf75b5SJianjun Wang 				    resource_size_t cpu_addr,
215d3bf75b5SJianjun Wang 				    resource_size_t pci_addr,
216d3bf75b5SJianjun Wang 				    resource_size_t size,
217d3bf75b5SJianjun Wang 				    unsigned long type, int num)
218d3bf75b5SJianjun Wang {
219d3bf75b5SJianjun Wang 	void __iomem *table;
220d3bf75b5SJianjun Wang 	u32 val;
221d3bf75b5SJianjun Wang 
222d3bf75b5SJianjun Wang 	if (num >= PCIE_MAX_TRANS_TABLES) {
223d5a4835bSFan Fei 		dev_err(pcie->dev, "not enough translate table for addr: %#llx, limited to [%d]\n",
224d3bf75b5SJianjun Wang 			(unsigned long long)cpu_addr, PCIE_MAX_TRANS_TABLES);
225d3bf75b5SJianjun Wang 		return -ENODEV;
226d3bf75b5SJianjun Wang 	}
227d3bf75b5SJianjun Wang 
228d5a4835bSFan Fei 	table = pcie->base + PCIE_TRANS_TABLE_BASE_REG +
229d3bf75b5SJianjun Wang 		num * PCIE_ATR_TLB_SET_OFFSET;
230d3bf75b5SJianjun Wang 
231d3bf75b5SJianjun Wang 	writel_relaxed(lower_32_bits(cpu_addr) | PCIE_ATR_SIZE(fls(size) - 1),
232d3bf75b5SJianjun Wang 		       table);
233d3bf75b5SJianjun Wang 	writel_relaxed(upper_32_bits(cpu_addr),
234d3bf75b5SJianjun Wang 		       table + PCIE_ATR_SRC_ADDR_MSB_OFFSET);
235d3bf75b5SJianjun Wang 	writel_relaxed(lower_32_bits(pci_addr),
236d3bf75b5SJianjun Wang 		       table + PCIE_ATR_TRSL_ADDR_LSB_OFFSET);
237d3bf75b5SJianjun Wang 	writel_relaxed(upper_32_bits(pci_addr),
238d3bf75b5SJianjun Wang 		       table + PCIE_ATR_TRSL_ADDR_MSB_OFFSET);
239d3bf75b5SJianjun Wang 
240d3bf75b5SJianjun Wang 	if (type == IORESOURCE_IO)
241d3bf75b5SJianjun Wang 		val = PCIE_ATR_TYPE_IO | PCIE_ATR_TLP_TYPE_IO;
242d3bf75b5SJianjun Wang 	else
243d3bf75b5SJianjun Wang 		val = PCIE_ATR_TYPE_MEM | PCIE_ATR_TLP_TYPE_MEM;
244d3bf75b5SJianjun Wang 
245d3bf75b5SJianjun Wang 	writel_relaxed(val, table + PCIE_ATR_TRSL_PARAM_OFFSET);
246d3bf75b5SJianjun Wang 
247d3bf75b5SJianjun Wang 	return 0;
248d3bf75b5SJianjun Wang }
249d3bf75b5SJianjun Wang 
250d5a4835bSFan Fei static void mtk_pcie_enable_msi(struct mtk_gen3_pcie *pcie)
2511bdafba5SJianjun Wang {
2521bdafba5SJianjun Wang 	int i;
2531bdafba5SJianjun Wang 	u32 val;
2541bdafba5SJianjun Wang 
2551bdafba5SJianjun Wang 	for (i = 0; i < PCIE_MSI_SET_NUM; i++) {
256d5a4835bSFan Fei 		struct mtk_msi_set *msi_set = &pcie->msi_sets[i];
2571bdafba5SJianjun Wang 
258d5a4835bSFan Fei 		msi_set->base = pcie->base + PCIE_MSI_SET_BASE_REG +
2591bdafba5SJianjun Wang 				i * PCIE_MSI_SET_OFFSET;
260d5a4835bSFan Fei 		msi_set->msg_addr = pcie->reg_base + PCIE_MSI_SET_BASE_REG +
2611bdafba5SJianjun Wang 				    i * PCIE_MSI_SET_OFFSET;
2621bdafba5SJianjun Wang 
2631bdafba5SJianjun Wang 		/* Configure the MSI capture address */
2641bdafba5SJianjun Wang 		writel_relaxed(lower_32_bits(msi_set->msg_addr), msi_set->base);
2651bdafba5SJianjun Wang 		writel_relaxed(upper_32_bits(msi_set->msg_addr),
266d5a4835bSFan Fei 			       pcie->base + PCIE_MSI_SET_ADDR_HI_BASE +
2671bdafba5SJianjun Wang 			       i * PCIE_MSI_SET_ADDR_HI_OFFSET);
2681bdafba5SJianjun Wang 	}
2691bdafba5SJianjun Wang 
270d5a4835bSFan Fei 	val = readl_relaxed(pcie->base + PCIE_MSI_SET_ENABLE_REG);
2711bdafba5SJianjun Wang 	val |= PCIE_MSI_SET_ENABLE;
272d5a4835bSFan Fei 	writel_relaxed(val, pcie->base + PCIE_MSI_SET_ENABLE_REG);
2731bdafba5SJianjun Wang 
274d5a4835bSFan Fei 	val = readl_relaxed(pcie->base + PCIE_INT_ENABLE_REG);
2751bdafba5SJianjun Wang 	val |= PCIE_MSI_ENABLE;
276d5a4835bSFan Fei 	writel_relaxed(val, pcie->base + PCIE_INT_ENABLE_REG);
2771bdafba5SJianjun Wang }
2781bdafba5SJianjun Wang 
279d5a4835bSFan Fei static int mtk_pcie_startup_port(struct mtk_gen3_pcie *pcie)
280d3bf75b5SJianjun Wang {
281d3bf75b5SJianjun Wang 	struct resource_entry *entry;
282d5a4835bSFan Fei 	struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
283d3bf75b5SJianjun Wang 	unsigned int table_index = 0;
284d3bf75b5SJianjun Wang 	int err;
285d3bf75b5SJianjun Wang 	u32 val;
286d3bf75b5SJianjun Wang 
287d3bf75b5SJianjun Wang 	/* Set as RC mode */
288d5a4835bSFan Fei 	val = readl_relaxed(pcie->base + PCIE_SETTING_REG);
289d3bf75b5SJianjun Wang 	val |= PCIE_RC_MODE;
290d5a4835bSFan Fei 	writel_relaxed(val, pcie->base + PCIE_SETTING_REG);
291d3bf75b5SJianjun Wang 
292d3bf75b5SJianjun Wang 	/* Set class code */
293d5a4835bSFan Fei 	val = readl_relaxed(pcie->base + PCIE_PCI_IDS_1);
294d3bf75b5SJianjun Wang 	val &= ~GENMASK(31, 8);
295904b10fbSPali Rohár 	val |= PCI_CLASS(PCI_CLASS_BRIDGE_PCI_NORMAL);
296d5a4835bSFan Fei 	writel_relaxed(val, pcie->base + PCIE_PCI_IDS_1);
297d3bf75b5SJianjun Wang 
298814cceebSJianjun Wang 	/* Mask all INTx interrupts */
299d5a4835bSFan Fei 	val = readl_relaxed(pcie->base + PCIE_INT_ENABLE_REG);
300814cceebSJianjun Wang 	val &= ~PCIE_INTX_ENABLE;
301d5a4835bSFan Fei 	writel_relaxed(val, pcie->base + PCIE_INT_ENABLE_REG);
302814cceebSJianjun Wang 
303ab344fd4SJianjun Wang 	/* Disable DVFSRC voltage request */
30487c71931SBjorn Helgaas 	val = readl_relaxed(pcie->base + PCIE_MISC_CTRL_REG);
305ab344fd4SJianjun Wang 	val |= PCIE_DISABLE_DVFSRC_VLT_REQ;
30687c71931SBjorn Helgaas 	writel_relaxed(val, pcie->base + PCIE_MISC_CTRL_REG);
307ab344fd4SJianjun Wang 
308d3bf75b5SJianjun Wang 	/* Assert all reset signals */
309d5a4835bSFan Fei 	val = readl_relaxed(pcie->base + PCIE_RST_CTRL_REG);
310d3bf75b5SJianjun Wang 	val |= PCIE_MAC_RSTB | PCIE_PHY_RSTB | PCIE_BRG_RSTB | PCIE_PE_RSTB;
311d5a4835bSFan Fei 	writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG);
312d3bf75b5SJianjun Wang 
313d3bf75b5SJianjun Wang 	/*
314ccd36795SKrzysztof Wilczyński 	 * Described in PCIe CEM specification sections 2.2 (PERST# Signal)
315d3bf75b5SJianjun Wang 	 * and 2.2.1 (Initial Power-Up (G3 to S0)).
316d3bf75b5SJianjun Wang 	 * The deassertion of PERST# should be delayed 100ms (TPVPERL)
317d3bf75b5SJianjun Wang 	 * for the power and clock to become stable.
318d3bf75b5SJianjun Wang 	 */
319d3bf75b5SJianjun Wang 	msleep(100);
320d3bf75b5SJianjun Wang 
321d3bf75b5SJianjun Wang 	/* De-assert reset signals */
322d3bf75b5SJianjun Wang 	val &= ~(PCIE_MAC_RSTB | PCIE_PHY_RSTB | PCIE_BRG_RSTB | PCIE_PE_RSTB);
323d5a4835bSFan Fei 	writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG);
324d3bf75b5SJianjun Wang 
325d3bf75b5SJianjun Wang 	/* Check if the link is up or not */
326d5a4835bSFan Fei 	err = readl_poll_timeout(pcie->base + PCIE_LINK_STATUS_REG, val,
327d3bf75b5SJianjun Wang 				 !!(val & PCIE_PORT_LINKUP), 20,
328d3bf75b5SJianjun Wang 				 PCI_PM_D3COLD_WAIT * USEC_PER_MSEC);
329d3bf75b5SJianjun Wang 	if (err) {
330d5a4835bSFan Fei 		val = readl_relaxed(pcie->base + PCIE_LTSSM_STATUS_REG);
331d5a4835bSFan Fei 		dev_err(pcie->dev, "PCIe link down, ltssm reg val: %#x\n", val);
332d3bf75b5SJianjun Wang 		return err;
333d3bf75b5SJianjun Wang 	}
334d3bf75b5SJianjun Wang 
335d5a4835bSFan Fei 	mtk_pcie_enable_msi(pcie);
3361bdafba5SJianjun Wang 
337d3bf75b5SJianjun Wang 	/* Set PCIe translation windows */
338d3bf75b5SJianjun Wang 	resource_list_for_each_entry(entry, &host->windows) {
339d3bf75b5SJianjun Wang 		struct resource *res = entry->res;
340d3bf75b5SJianjun Wang 		unsigned long type = resource_type(res);
341d3bf75b5SJianjun Wang 		resource_size_t cpu_addr;
342d3bf75b5SJianjun Wang 		resource_size_t pci_addr;
343d3bf75b5SJianjun Wang 		resource_size_t size;
344d3bf75b5SJianjun Wang 		const char *range_type;
345d3bf75b5SJianjun Wang 
346d3bf75b5SJianjun Wang 		if (type == IORESOURCE_IO) {
347d3bf75b5SJianjun Wang 			cpu_addr = pci_pio_to_address(res->start);
348d3bf75b5SJianjun Wang 			range_type = "IO";
349d3bf75b5SJianjun Wang 		} else if (type == IORESOURCE_MEM) {
350d3bf75b5SJianjun Wang 			cpu_addr = res->start;
351d3bf75b5SJianjun Wang 			range_type = "MEM";
352d3bf75b5SJianjun Wang 		} else {
353d3bf75b5SJianjun Wang 			continue;
354d3bf75b5SJianjun Wang 		}
355d3bf75b5SJianjun Wang 
356d3bf75b5SJianjun Wang 		pci_addr = res->start - entry->offset;
357d3bf75b5SJianjun Wang 		size = resource_size(res);
358d5a4835bSFan Fei 		err = mtk_pcie_set_trans_table(pcie, cpu_addr, pci_addr, size,
359d3bf75b5SJianjun Wang 					       type, table_index);
360d3bf75b5SJianjun Wang 		if (err)
361d3bf75b5SJianjun Wang 			return err;
362d3bf75b5SJianjun Wang 
363d5a4835bSFan Fei 		dev_dbg(pcie->dev, "set %s trans window[%d]: cpu_addr = %#llx, pci_addr = %#llx, size = %#llx\n",
364d3bf75b5SJianjun Wang 			range_type, table_index, (unsigned long long)cpu_addr,
365d3bf75b5SJianjun Wang 			(unsigned long long)pci_addr, (unsigned long long)size);
366d3bf75b5SJianjun Wang 
367d3bf75b5SJianjun Wang 		table_index++;
368d3bf75b5SJianjun Wang 	}
369d3bf75b5SJianjun Wang 
370d3bf75b5SJianjun Wang 	return 0;
371d3bf75b5SJianjun Wang }
372d3bf75b5SJianjun Wang 
373814cceebSJianjun Wang static int mtk_pcie_set_affinity(struct irq_data *data,
374814cceebSJianjun Wang 				 const struct cpumask *mask, bool force)
375814cceebSJianjun Wang {
376814cceebSJianjun Wang 	return -EINVAL;
377814cceebSJianjun Wang }
378814cceebSJianjun Wang 
3791bdafba5SJianjun Wang static void mtk_pcie_msi_irq_mask(struct irq_data *data)
3801bdafba5SJianjun Wang {
3811bdafba5SJianjun Wang 	pci_msi_mask_irq(data);
3821bdafba5SJianjun Wang 	irq_chip_mask_parent(data);
3831bdafba5SJianjun Wang }
3841bdafba5SJianjun Wang 
3851bdafba5SJianjun Wang static void mtk_pcie_msi_irq_unmask(struct irq_data *data)
3861bdafba5SJianjun Wang {
3871bdafba5SJianjun Wang 	pci_msi_unmask_irq(data);
3881bdafba5SJianjun Wang 	irq_chip_unmask_parent(data);
3891bdafba5SJianjun Wang }
3901bdafba5SJianjun Wang 
3911bdafba5SJianjun Wang static struct irq_chip mtk_msi_irq_chip = {
3921bdafba5SJianjun Wang 	.irq_ack = irq_chip_ack_parent,
3931bdafba5SJianjun Wang 	.irq_mask = mtk_pcie_msi_irq_mask,
3941bdafba5SJianjun Wang 	.irq_unmask = mtk_pcie_msi_irq_unmask,
3951bdafba5SJianjun Wang 	.name = "MSI",
3961bdafba5SJianjun Wang };
3971bdafba5SJianjun Wang 
3981bdafba5SJianjun Wang static struct msi_domain_info mtk_msi_domain_info = {
3991bdafba5SJianjun Wang 	.flags	= (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
4001bdafba5SJianjun Wang 		   MSI_FLAG_PCI_MSIX | MSI_FLAG_MULTI_PCI_MSI),
4011bdafba5SJianjun Wang 	.chip	= &mtk_msi_irq_chip,
4021bdafba5SJianjun Wang };
4031bdafba5SJianjun Wang 
4041bdafba5SJianjun Wang static void mtk_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
4051bdafba5SJianjun Wang {
4061bdafba5SJianjun Wang 	struct mtk_msi_set *msi_set = irq_data_get_irq_chip_data(data);
407d5a4835bSFan Fei 	struct mtk_gen3_pcie *pcie = data->domain->host_data;
4081bdafba5SJianjun Wang 	unsigned long hwirq;
4091bdafba5SJianjun Wang 
4101bdafba5SJianjun Wang 	hwirq =	data->hwirq % PCIE_MSI_IRQS_PER_SET;
4111bdafba5SJianjun Wang 
4121bdafba5SJianjun Wang 	msg->address_hi = upper_32_bits(msi_set->msg_addr);
4131bdafba5SJianjun Wang 	msg->address_lo = lower_32_bits(msi_set->msg_addr);
4141bdafba5SJianjun Wang 	msg->data = hwirq;
415d5a4835bSFan Fei 	dev_dbg(pcie->dev, "msi#%#lx address_hi %#x address_lo %#x data %d\n",
4161bdafba5SJianjun Wang 		hwirq, msg->address_hi, msg->address_lo, msg->data);
4171bdafba5SJianjun Wang }
4181bdafba5SJianjun Wang 
4191bdafba5SJianjun Wang static void mtk_msi_bottom_irq_ack(struct irq_data *data)
4201bdafba5SJianjun Wang {
4211bdafba5SJianjun Wang 	struct mtk_msi_set *msi_set = irq_data_get_irq_chip_data(data);
4221bdafba5SJianjun Wang 	unsigned long hwirq;
4231bdafba5SJianjun Wang 
4241bdafba5SJianjun Wang 	hwirq =	data->hwirq % PCIE_MSI_IRQS_PER_SET;
4251bdafba5SJianjun Wang 
4261bdafba5SJianjun Wang 	writel_relaxed(BIT(hwirq), msi_set->base + PCIE_MSI_SET_STATUS_OFFSET);
4271bdafba5SJianjun Wang }
4281bdafba5SJianjun Wang 
4291bdafba5SJianjun Wang static void mtk_msi_bottom_irq_mask(struct irq_data *data)
4301bdafba5SJianjun Wang {
4311bdafba5SJianjun Wang 	struct mtk_msi_set *msi_set = irq_data_get_irq_chip_data(data);
432d5a4835bSFan Fei 	struct mtk_gen3_pcie *pcie = data->domain->host_data;
4331bdafba5SJianjun Wang 	unsigned long hwirq, flags;
4341bdafba5SJianjun Wang 	u32 val;
4351bdafba5SJianjun Wang 
4361bdafba5SJianjun Wang 	hwirq =	data->hwirq % PCIE_MSI_IRQS_PER_SET;
4371bdafba5SJianjun Wang 
438d5a4835bSFan Fei 	raw_spin_lock_irqsave(&pcie->irq_lock, flags);
4391bdafba5SJianjun Wang 	val = readl_relaxed(msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET);
4401bdafba5SJianjun Wang 	val &= ~BIT(hwirq);
4411bdafba5SJianjun Wang 	writel_relaxed(val, msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET);
442d5a4835bSFan Fei 	raw_spin_unlock_irqrestore(&pcie->irq_lock, flags);
4431bdafba5SJianjun Wang }
4441bdafba5SJianjun Wang 
4451bdafba5SJianjun Wang static void mtk_msi_bottom_irq_unmask(struct irq_data *data)
4461bdafba5SJianjun Wang {
4471bdafba5SJianjun Wang 	struct mtk_msi_set *msi_set = irq_data_get_irq_chip_data(data);
448d5a4835bSFan Fei 	struct mtk_gen3_pcie *pcie = data->domain->host_data;
4491bdafba5SJianjun Wang 	unsigned long hwirq, flags;
4501bdafba5SJianjun Wang 	u32 val;
4511bdafba5SJianjun Wang 
4521bdafba5SJianjun Wang 	hwirq =	data->hwirq % PCIE_MSI_IRQS_PER_SET;
4531bdafba5SJianjun Wang 
454d5a4835bSFan Fei 	raw_spin_lock_irqsave(&pcie->irq_lock, flags);
4551bdafba5SJianjun Wang 	val = readl_relaxed(msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET);
4561bdafba5SJianjun Wang 	val |= BIT(hwirq);
4571bdafba5SJianjun Wang 	writel_relaxed(val, msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET);
458d5a4835bSFan Fei 	raw_spin_unlock_irqrestore(&pcie->irq_lock, flags);
4591bdafba5SJianjun Wang }
4601bdafba5SJianjun Wang 
4611bdafba5SJianjun Wang static struct irq_chip mtk_msi_bottom_irq_chip = {
4621bdafba5SJianjun Wang 	.irq_ack		= mtk_msi_bottom_irq_ack,
4631bdafba5SJianjun Wang 	.irq_mask		= mtk_msi_bottom_irq_mask,
4641bdafba5SJianjun Wang 	.irq_unmask		= mtk_msi_bottom_irq_unmask,
4651bdafba5SJianjun Wang 	.irq_compose_msi_msg	= mtk_compose_msi_msg,
4661bdafba5SJianjun Wang 	.irq_set_affinity	= mtk_pcie_set_affinity,
4671bdafba5SJianjun Wang 	.name			= "MSI",
4681bdafba5SJianjun Wang };
4691bdafba5SJianjun Wang 
4701bdafba5SJianjun Wang static int mtk_msi_bottom_domain_alloc(struct irq_domain *domain,
4711bdafba5SJianjun Wang 				       unsigned int virq, unsigned int nr_irqs,
4721bdafba5SJianjun Wang 				       void *arg)
4731bdafba5SJianjun Wang {
474d5a4835bSFan Fei 	struct mtk_gen3_pcie *pcie = domain->host_data;
4751bdafba5SJianjun Wang 	struct mtk_msi_set *msi_set;
4761bdafba5SJianjun Wang 	int i, hwirq, set_idx;
4771bdafba5SJianjun Wang 
478d5a4835bSFan Fei 	mutex_lock(&pcie->lock);
4791bdafba5SJianjun Wang 
480d5a4835bSFan Fei 	hwirq = bitmap_find_free_region(pcie->msi_irq_in_use, PCIE_MSI_IRQS_NUM,
4811bdafba5SJianjun Wang 					order_base_2(nr_irqs));
4821bdafba5SJianjun Wang 
483d5a4835bSFan Fei 	mutex_unlock(&pcie->lock);
4841bdafba5SJianjun Wang 
4851bdafba5SJianjun Wang 	if (hwirq < 0)
4861bdafba5SJianjun Wang 		return -ENOSPC;
4871bdafba5SJianjun Wang 
4881bdafba5SJianjun Wang 	set_idx = hwirq / PCIE_MSI_IRQS_PER_SET;
489d5a4835bSFan Fei 	msi_set = &pcie->msi_sets[set_idx];
4901bdafba5SJianjun Wang 
4911bdafba5SJianjun Wang 	for (i = 0; i < nr_irqs; i++)
4921bdafba5SJianjun Wang 		irq_domain_set_info(domain, virq + i, hwirq + i,
4931bdafba5SJianjun Wang 				    &mtk_msi_bottom_irq_chip, msi_set,
4941bdafba5SJianjun Wang 				    handle_edge_irq, NULL, NULL);
4951bdafba5SJianjun Wang 
4961bdafba5SJianjun Wang 	return 0;
4971bdafba5SJianjun Wang }
4981bdafba5SJianjun Wang 
4991bdafba5SJianjun Wang static void mtk_msi_bottom_domain_free(struct irq_domain *domain,
5001bdafba5SJianjun Wang 				       unsigned int virq, unsigned int nr_irqs)
5011bdafba5SJianjun Wang {
502d5a4835bSFan Fei 	struct mtk_gen3_pcie *pcie = domain->host_data;
5031bdafba5SJianjun Wang 	struct irq_data *data = irq_domain_get_irq_data(domain, virq);
5041bdafba5SJianjun Wang 
505d5a4835bSFan Fei 	mutex_lock(&pcie->lock);
5061bdafba5SJianjun Wang 
507d5a4835bSFan Fei 	bitmap_release_region(pcie->msi_irq_in_use, data->hwirq,
5081bdafba5SJianjun Wang 			      order_base_2(nr_irqs));
5091bdafba5SJianjun Wang 
510d5a4835bSFan Fei 	mutex_unlock(&pcie->lock);
5111bdafba5SJianjun Wang 
5121bdafba5SJianjun Wang 	irq_domain_free_irqs_common(domain, virq, nr_irqs);
5131bdafba5SJianjun Wang }
5141bdafba5SJianjun Wang 
5151bdafba5SJianjun Wang static const struct irq_domain_ops mtk_msi_bottom_domain_ops = {
5161bdafba5SJianjun Wang 	.alloc = mtk_msi_bottom_domain_alloc,
5171bdafba5SJianjun Wang 	.free = mtk_msi_bottom_domain_free,
5181bdafba5SJianjun Wang };
5191bdafba5SJianjun Wang 
520814cceebSJianjun Wang static void mtk_intx_mask(struct irq_data *data)
521814cceebSJianjun Wang {
522d5a4835bSFan Fei 	struct mtk_gen3_pcie *pcie = irq_data_get_irq_chip_data(data);
523814cceebSJianjun Wang 	unsigned long flags;
524814cceebSJianjun Wang 	u32 val;
525814cceebSJianjun Wang 
526d5a4835bSFan Fei 	raw_spin_lock_irqsave(&pcie->irq_lock, flags);
527d5a4835bSFan Fei 	val = readl_relaxed(pcie->base + PCIE_INT_ENABLE_REG);
528814cceebSJianjun Wang 	val &= ~BIT(data->hwirq + PCIE_INTX_SHIFT);
529d5a4835bSFan Fei 	writel_relaxed(val, pcie->base + PCIE_INT_ENABLE_REG);
530d5a4835bSFan Fei 	raw_spin_unlock_irqrestore(&pcie->irq_lock, flags);
531814cceebSJianjun Wang }
532814cceebSJianjun Wang 
533814cceebSJianjun Wang static void mtk_intx_unmask(struct irq_data *data)
534814cceebSJianjun Wang {
535d5a4835bSFan Fei 	struct mtk_gen3_pcie *pcie = irq_data_get_irq_chip_data(data);
536814cceebSJianjun Wang 	unsigned long flags;
537814cceebSJianjun Wang 	u32 val;
538814cceebSJianjun Wang 
539d5a4835bSFan Fei 	raw_spin_lock_irqsave(&pcie->irq_lock, flags);
540d5a4835bSFan Fei 	val = readl_relaxed(pcie->base + PCIE_INT_ENABLE_REG);
541814cceebSJianjun Wang 	val |= BIT(data->hwirq + PCIE_INTX_SHIFT);
542d5a4835bSFan Fei 	writel_relaxed(val, pcie->base + PCIE_INT_ENABLE_REG);
543d5a4835bSFan Fei 	raw_spin_unlock_irqrestore(&pcie->irq_lock, flags);
544814cceebSJianjun Wang }
545814cceebSJianjun Wang 
546814cceebSJianjun Wang /**
547814cceebSJianjun Wang  * mtk_intx_eoi() - Clear INTx IRQ status at the end of interrupt
548814cceebSJianjun Wang  * @data: pointer to chip specific data
549814cceebSJianjun Wang  *
550814cceebSJianjun Wang  * As an emulated level IRQ, its interrupt status will remain
551814cceebSJianjun Wang  * until the corresponding de-assert message is received; hence that
552814cceebSJianjun Wang  * the status can only be cleared when the interrupt has been serviced.
553814cceebSJianjun Wang  */
554814cceebSJianjun Wang static void mtk_intx_eoi(struct irq_data *data)
555814cceebSJianjun Wang {
556d5a4835bSFan Fei 	struct mtk_gen3_pcie *pcie = irq_data_get_irq_chip_data(data);
557814cceebSJianjun Wang 	unsigned long hwirq;
558814cceebSJianjun Wang 
559814cceebSJianjun Wang 	hwirq = data->hwirq + PCIE_INTX_SHIFT;
560d5a4835bSFan Fei 	writel_relaxed(BIT(hwirq), pcie->base + PCIE_INT_STATUS_REG);
561814cceebSJianjun Wang }
562814cceebSJianjun Wang 
563814cceebSJianjun Wang static struct irq_chip mtk_intx_irq_chip = {
564814cceebSJianjun Wang 	.irq_mask		= mtk_intx_mask,
565814cceebSJianjun Wang 	.irq_unmask		= mtk_intx_unmask,
566814cceebSJianjun Wang 	.irq_eoi		= mtk_intx_eoi,
567814cceebSJianjun Wang 	.irq_set_affinity	= mtk_pcie_set_affinity,
568814cceebSJianjun Wang 	.name			= "INTx",
569814cceebSJianjun Wang };
570814cceebSJianjun Wang 
571814cceebSJianjun Wang static int mtk_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
572814cceebSJianjun Wang 			     irq_hw_number_t hwirq)
573814cceebSJianjun Wang {
574814cceebSJianjun Wang 	irq_set_chip_data(irq, domain->host_data);
575814cceebSJianjun Wang 	irq_set_chip_and_handler_name(irq, &mtk_intx_irq_chip,
576814cceebSJianjun Wang 				      handle_fasteoi_irq, "INTx");
577814cceebSJianjun Wang 	return 0;
578814cceebSJianjun Wang }
579814cceebSJianjun Wang 
580814cceebSJianjun Wang static const struct irq_domain_ops intx_domain_ops = {
581814cceebSJianjun Wang 	.map = mtk_pcie_intx_map,
582814cceebSJianjun Wang };
583814cceebSJianjun Wang 
584d5a4835bSFan Fei static int mtk_pcie_init_irq_domains(struct mtk_gen3_pcie *pcie)
585814cceebSJianjun Wang {
586d5a4835bSFan Fei 	struct device *dev = pcie->dev;
587814cceebSJianjun Wang 	struct device_node *intc_node, *node = dev->of_node;
5881bdafba5SJianjun Wang 	int ret;
589814cceebSJianjun Wang 
590d5a4835bSFan Fei 	raw_spin_lock_init(&pcie->irq_lock);
591814cceebSJianjun Wang 
592814cceebSJianjun Wang 	/* Setup INTx */
593814cceebSJianjun Wang 	intc_node = of_get_child_by_name(node, "interrupt-controller");
594814cceebSJianjun Wang 	if (!intc_node) {
595814cceebSJianjun Wang 		dev_err(dev, "missing interrupt-controller node\n");
596814cceebSJianjun Wang 		return -ENODEV;
597814cceebSJianjun Wang 	}
598814cceebSJianjun Wang 
599d5a4835bSFan Fei 	pcie->intx_domain = irq_domain_add_linear(intc_node, PCI_NUM_INTX,
600d5a4835bSFan Fei 						  &intx_domain_ops, pcie);
601d5a4835bSFan Fei 	if (!pcie->intx_domain) {
602814cceebSJianjun Wang 		dev_err(dev, "failed to create INTx IRQ domain\n");
603814cceebSJianjun Wang 		return -ENODEV;
604814cceebSJianjun Wang 	}
605814cceebSJianjun Wang 
6061bdafba5SJianjun Wang 	/* Setup MSI */
607d5a4835bSFan Fei 	mutex_init(&pcie->lock);
6081bdafba5SJianjun Wang 
609d5a4835bSFan Fei 	pcie->msi_bottom_domain = irq_domain_add_linear(node, PCIE_MSI_IRQS_NUM,
610d5a4835bSFan Fei 				  &mtk_msi_bottom_domain_ops, pcie);
611d5a4835bSFan Fei 	if (!pcie->msi_bottom_domain) {
6121bdafba5SJianjun Wang 		dev_err(dev, "failed to create MSI bottom domain\n");
6131bdafba5SJianjun Wang 		ret = -ENODEV;
6141bdafba5SJianjun Wang 		goto err_msi_bottom_domain;
6151bdafba5SJianjun Wang 	}
6161bdafba5SJianjun Wang 
617d5a4835bSFan Fei 	pcie->msi_domain = pci_msi_create_irq_domain(dev->fwnode,
6181bdafba5SJianjun Wang 						     &mtk_msi_domain_info,
619d5a4835bSFan Fei 						     pcie->msi_bottom_domain);
620d5a4835bSFan Fei 	if (!pcie->msi_domain) {
6211bdafba5SJianjun Wang 		dev_err(dev, "failed to create MSI domain\n");
6221bdafba5SJianjun Wang 		ret = -ENODEV;
6231bdafba5SJianjun Wang 		goto err_msi_domain;
6241bdafba5SJianjun Wang 	}
6251bdafba5SJianjun Wang 
626814cceebSJianjun Wang 	return 0;
6271bdafba5SJianjun Wang 
6281bdafba5SJianjun Wang err_msi_domain:
629d5a4835bSFan Fei 	irq_domain_remove(pcie->msi_bottom_domain);
6301bdafba5SJianjun Wang err_msi_bottom_domain:
631d5a4835bSFan Fei 	irq_domain_remove(pcie->intx_domain);
6321bdafba5SJianjun Wang 
6331bdafba5SJianjun Wang 	return ret;
634814cceebSJianjun Wang }
635814cceebSJianjun Wang 
636d5a4835bSFan Fei static void mtk_pcie_irq_teardown(struct mtk_gen3_pcie *pcie)
637814cceebSJianjun Wang {
638d5a4835bSFan Fei 	irq_set_chained_handler_and_data(pcie->irq, NULL, NULL);
639814cceebSJianjun Wang 
640d5a4835bSFan Fei 	if (pcie->intx_domain)
641d5a4835bSFan Fei 		irq_domain_remove(pcie->intx_domain);
642814cceebSJianjun Wang 
643d5a4835bSFan Fei 	if (pcie->msi_domain)
644d5a4835bSFan Fei 		irq_domain_remove(pcie->msi_domain);
6451bdafba5SJianjun Wang 
646d5a4835bSFan Fei 	if (pcie->msi_bottom_domain)
647d5a4835bSFan Fei 		irq_domain_remove(pcie->msi_bottom_domain);
6481bdafba5SJianjun Wang 
649d5a4835bSFan Fei 	irq_dispose_mapping(pcie->irq);
650814cceebSJianjun Wang }
651814cceebSJianjun Wang 
652d5a4835bSFan Fei static void mtk_pcie_msi_handler(struct mtk_gen3_pcie *pcie, int set_idx)
6531bdafba5SJianjun Wang {
654d5a4835bSFan Fei 	struct mtk_msi_set *msi_set = &pcie->msi_sets[set_idx];
6551bdafba5SJianjun Wang 	unsigned long msi_enable, msi_status;
6561bdafba5SJianjun Wang 	irq_hw_number_t bit, hwirq;
6571bdafba5SJianjun Wang 
6581bdafba5SJianjun Wang 	msi_enable = readl_relaxed(msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET);
6591bdafba5SJianjun Wang 
6601bdafba5SJianjun Wang 	do {
6611bdafba5SJianjun Wang 		msi_status = readl_relaxed(msi_set->base +
6621bdafba5SJianjun Wang 					   PCIE_MSI_SET_STATUS_OFFSET);
6631bdafba5SJianjun Wang 		msi_status &= msi_enable;
6641bdafba5SJianjun Wang 		if (!msi_status)
6651bdafba5SJianjun Wang 			break;
6661bdafba5SJianjun Wang 
6671bdafba5SJianjun Wang 		for_each_set_bit(bit, &msi_status, PCIE_MSI_IRQS_PER_SET) {
6681bdafba5SJianjun Wang 			hwirq = bit + set_idx * PCIE_MSI_IRQS_PER_SET;
669d5a4835bSFan Fei 			generic_handle_domain_irq(pcie->msi_bottom_domain, hwirq);
6701bdafba5SJianjun Wang 		}
6711bdafba5SJianjun Wang 	} while (true);
6721bdafba5SJianjun Wang }
6731bdafba5SJianjun Wang 
674814cceebSJianjun Wang static void mtk_pcie_irq_handler(struct irq_desc *desc)
675814cceebSJianjun Wang {
676d5a4835bSFan Fei 	struct mtk_gen3_pcie *pcie = irq_desc_get_handler_data(desc);
677814cceebSJianjun Wang 	struct irq_chip *irqchip = irq_desc_get_chip(desc);
678814cceebSJianjun Wang 	unsigned long status;
679814cceebSJianjun Wang 	irq_hw_number_t irq_bit = PCIE_INTX_SHIFT;
680814cceebSJianjun Wang 
681814cceebSJianjun Wang 	chained_irq_enter(irqchip, desc);
682814cceebSJianjun Wang 
683d5a4835bSFan Fei 	status = readl_relaxed(pcie->base + PCIE_INT_STATUS_REG);
684814cceebSJianjun Wang 	for_each_set_bit_from(irq_bit, &status, PCI_NUM_INTX +
685d21faba1SMarc Zyngier 			      PCIE_INTX_SHIFT)
686d5a4835bSFan Fei 		generic_handle_domain_irq(pcie->intx_domain,
687814cceebSJianjun Wang 					  irq_bit - PCIE_INTX_SHIFT);
688814cceebSJianjun Wang 
6891bdafba5SJianjun Wang 	irq_bit = PCIE_MSI_SHIFT;
6901bdafba5SJianjun Wang 	for_each_set_bit_from(irq_bit, &status, PCIE_MSI_SET_NUM +
6911bdafba5SJianjun Wang 			      PCIE_MSI_SHIFT) {
692d5a4835bSFan Fei 		mtk_pcie_msi_handler(pcie, irq_bit - PCIE_MSI_SHIFT);
6931bdafba5SJianjun Wang 
694d5a4835bSFan Fei 		writel_relaxed(BIT(irq_bit), pcie->base + PCIE_INT_STATUS_REG);
6951bdafba5SJianjun Wang 	}
6961bdafba5SJianjun Wang 
697814cceebSJianjun Wang 	chained_irq_exit(irqchip, desc);
698814cceebSJianjun Wang }
699814cceebSJianjun Wang 
700d5a4835bSFan Fei static int mtk_pcie_setup_irq(struct mtk_gen3_pcie *pcie)
701814cceebSJianjun Wang {
702d5a4835bSFan Fei 	struct device *dev = pcie->dev;
703814cceebSJianjun Wang 	struct platform_device *pdev = to_platform_device(dev);
704814cceebSJianjun Wang 	int err;
705814cceebSJianjun Wang 
706d5a4835bSFan Fei 	err = mtk_pcie_init_irq_domains(pcie);
707814cceebSJianjun Wang 	if (err)
708814cceebSJianjun Wang 		return err;
709814cceebSJianjun Wang 
710d5a4835bSFan Fei 	pcie->irq = platform_get_irq(pdev, 0);
711d5a4835bSFan Fei 	if (pcie->irq < 0)
712d5a4835bSFan Fei 		return pcie->irq;
713814cceebSJianjun Wang 
714d5a4835bSFan Fei 	irq_set_chained_handler_and_data(pcie->irq, mtk_pcie_irq_handler, pcie);
715814cceebSJianjun Wang 
716814cceebSJianjun Wang 	return 0;
717814cceebSJianjun Wang }
718814cceebSJianjun Wang 
719d5a4835bSFan Fei static int mtk_pcie_parse_port(struct mtk_gen3_pcie *pcie)
720d3bf75b5SJianjun Wang {
721d5a4835bSFan Fei 	struct device *dev = pcie->dev;
722d3bf75b5SJianjun Wang 	struct platform_device *pdev = to_platform_device(dev);
723d3bf75b5SJianjun Wang 	struct resource *regs;
724d3bf75b5SJianjun Wang 	int ret;
725d3bf75b5SJianjun Wang 
726d3bf75b5SJianjun Wang 	regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcie-mac");
727d3bf75b5SJianjun Wang 	if (!regs)
728d3bf75b5SJianjun Wang 		return -EINVAL;
729d5a4835bSFan Fei 	pcie->base = devm_ioremap_resource(dev, regs);
730d5a4835bSFan Fei 	if (IS_ERR(pcie->base)) {
731d3bf75b5SJianjun Wang 		dev_err(dev, "failed to map register base\n");
732d5a4835bSFan Fei 		return PTR_ERR(pcie->base);
733d3bf75b5SJianjun Wang 	}
734d3bf75b5SJianjun Wang 
735d5a4835bSFan Fei 	pcie->reg_base = regs->start;
736d3bf75b5SJianjun Wang 
737d5a4835bSFan Fei 	pcie->phy_reset = devm_reset_control_get_optional_exclusive(dev, "phy");
738d5a4835bSFan Fei 	if (IS_ERR(pcie->phy_reset)) {
739d5a4835bSFan Fei 		ret = PTR_ERR(pcie->phy_reset);
740d3bf75b5SJianjun Wang 		if (ret != -EPROBE_DEFER)
741d3bf75b5SJianjun Wang 			dev_err(dev, "failed to get PHY reset\n");
742d3bf75b5SJianjun Wang 
743d3bf75b5SJianjun Wang 		return ret;
744d3bf75b5SJianjun Wang 	}
745d3bf75b5SJianjun Wang 
746d5a4835bSFan Fei 	pcie->mac_reset = devm_reset_control_get_optional_exclusive(dev, "mac");
747d5a4835bSFan Fei 	if (IS_ERR(pcie->mac_reset)) {
748d5a4835bSFan Fei 		ret = PTR_ERR(pcie->mac_reset);
749d3bf75b5SJianjun Wang 		if (ret != -EPROBE_DEFER)
750d3bf75b5SJianjun Wang 			dev_err(dev, "failed to get MAC reset\n");
751d3bf75b5SJianjun Wang 
752d3bf75b5SJianjun Wang 		return ret;
753d3bf75b5SJianjun Wang 	}
754d3bf75b5SJianjun Wang 
755d5a4835bSFan Fei 	pcie->phy = devm_phy_optional_get(dev, "pcie-phy");
756d5a4835bSFan Fei 	if (IS_ERR(pcie->phy)) {
757d5a4835bSFan Fei 		ret = PTR_ERR(pcie->phy);
758d3bf75b5SJianjun Wang 		if (ret != -EPROBE_DEFER)
759d3bf75b5SJianjun Wang 			dev_err(dev, "failed to get PHY\n");
760d3bf75b5SJianjun Wang 
761d3bf75b5SJianjun Wang 		return ret;
762d3bf75b5SJianjun Wang 	}
763d3bf75b5SJianjun Wang 
764d5a4835bSFan Fei 	pcie->num_clks = devm_clk_bulk_get_all(dev, &pcie->clks);
765d5a4835bSFan Fei 	if (pcie->num_clks < 0) {
766d3bf75b5SJianjun Wang 		dev_err(dev, "failed to get clocks\n");
767d5a4835bSFan Fei 		return pcie->num_clks;
768d3bf75b5SJianjun Wang 	}
769d3bf75b5SJianjun Wang 
770d3bf75b5SJianjun Wang 	return 0;
771d3bf75b5SJianjun Wang }
772d3bf75b5SJianjun Wang 
773d5a4835bSFan Fei static int mtk_pcie_power_up(struct mtk_gen3_pcie *pcie)
774d3bf75b5SJianjun Wang {
775d5a4835bSFan Fei 	struct device *dev = pcie->dev;
776d3bf75b5SJianjun Wang 	int err;
777d3bf75b5SJianjun Wang 
778d3bf75b5SJianjun Wang 	/* PHY power on and enable pipe clock */
779d5a4835bSFan Fei 	reset_control_deassert(pcie->phy_reset);
780d3bf75b5SJianjun Wang 
781d5a4835bSFan Fei 	err = phy_init(pcie->phy);
782d3bf75b5SJianjun Wang 	if (err) {
783d3bf75b5SJianjun Wang 		dev_err(dev, "failed to initialize PHY\n");
784d3bf75b5SJianjun Wang 		goto err_phy_init;
785d3bf75b5SJianjun Wang 	}
786d3bf75b5SJianjun Wang 
787d5a4835bSFan Fei 	err = phy_power_on(pcie->phy);
788d3bf75b5SJianjun Wang 	if (err) {
789d3bf75b5SJianjun Wang 		dev_err(dev, "failed to power on PHY\n");
790d3bf75b5SJianjun Wang 		goto err_phy_on;
791d3bf75b5SJianjun Wang 	}
792d3bf75b5SJianjun Wang 
793d3bf75b5SJianjun Wang 	/* MAC power on and enable transaction layer clocks */
794d5a4835bSFan Fei 	reset_control_deassert(pcie->mac_reset);
795d3bf75b5SJianjun Wang 
796d3bf75b5SJianjun Wang 	pm_runtime_enable(dev);
797d3bf75b5SJianjun Wang 	pm_runtime_get_sync(dev);
798d3bf75b5SJianjun Wang 
799d5a4835bSFan Fei 	err = clk_bulk_prepare_enable(pcie->num_clks, pcie->clks);
800d3bf75b5SJianjun Wang 	if (err) {
801d3bf75b5SJianjun Wang 		dev_err(dev, "failed to enable clocks\n");
802d3bf75b5SJianjun Wang 		goto err_clk_init;
803d3bf75b5SJianjun Wang 	}
804d3bf75b5SJianjun Wang 
805d3bf75b5SJianjun Wang 	return 0;
806d3bf75b5SJianjun Wang 
807d3bf75b5SJianjun Wang err_clk_init:
808d3bf75b5SJianjun Wang 	pm_runtime_put_sync(dev);
809d3bf75b5SJianjun Wang 	pm_runtime_disable(dev);
810d5a4835bSFan Fei 	reset_control_assert(pcie->mac_reset);
811d5a4835bSFan Fei 	phy_power_off(pcie->phy);
812d3bf75b5SJianjun Wang err_phy_on:
813d5a4835bSFan Fei 	phy_exit(pcie->phy);
814d3bf75b5SJianjun Wang err_phy_init:
815d5a4835bSFan Fei 	reset_control_assert(pcie->phy_reset);
816d3bf75b5SJianjun Wang 
817d3bf75b5SJianjun Wang 	return err;
818d3bf75b5SJianjun Wang }
819d3bf75b5SJianjun Wang 
820d5a4835bSFan Fei static void mtk_pcie_power_down(struct mtk_gen3_pcie *pcie)
821d3bf75b5SJianjun Wang {
822d5a4835bSFan Fei 	clk_bulk_disable_unprepare(pcie->num_clks, pcie->clks);
823d3bf75b5SJianjun Wang 
824d5a4835bSFan Fei 	pm_runtime_put_sync(pcie->dev);
825d5a4835bSFan Fei 	pm_runtime_disable(pcie->dev);
826d5a4835bSFan Fei 	reset_control_assert(pcie->mac_reset);
827d3bf75b5SJianjun Wang 
828d5a4835bSFan Fei 	phy_power_off(pcie->phy);
829d5a4835bSFan Fei 	phy_exit(pcie->phy);
830d5a4835bSFan Fei 	reset_control_assert(pcie->phy_reset);
831d3bf75b5SJianjun Wang }
832d3bf75b5SJianjun Wang 
833d5a4835bSFan Fei static int mtk_pcie_setup(struct mtk_gen3_pcie *pcie)
834d3bf75b5SJianjun Wang {
835d3bf75b5SJianjun Wang 	int err;
836d3bf75b5SJianjun Wang 
837d5a4835bSFan Fei 	err = mtk_pcie_parse_port(pcie);
838d3bf75b5SJianjun Wang 	if (err)
839d3bf75b5SJianjun Wang 		return err;
840d3bf75b5SJianjun Wang 
8411d565935SAngeloGioacchino Del Regno 	/*
8421d565935SAngeloGioacchino Del Regno 	 * The controller may have been left out of reset by the bootloader
8431d565935SAngeloGioacchino Del Regno 	 * so make sure that we get a clean start by asserting resets here.
8441d565935SAngeloGioacchino Del Regno 	 */
8451d565935SAngeloGioacchino Del Regno 	reset_control_assert(pcie->phy_reset);
8461d565935SAngeloGioacchino Del Regno 	reset_control_assert(pcie->mac_reset);
8471d565935SAngeloGioacchino Del Regno 	usleep_range(10, 20);
8481d565935SAngeloGioacchino Del Regno 
849d3bf75b5SJianjun Wang 	/* Don't touch the hardware registers before power up */
850d5a4835bSFan Fei 	err = mtk_pcie_power_up(pcie);
851d3bf75b5SJianjun Wang 	if (err)
852d3bf75b5SJianjun Wang 		return err;
853d3bf75b5SJianjun Wang 
854d3bf75b5SJianjun Wang 	/* Try link up */
855d5a4835bSFan Fei 	err = mtk_pcie_startup_port(pcie);
856d3bf75b5SJianjun Wang 	if (err)
857d3bf75b5SJianjun Wang 		goto err_setup;
858d3bf75b5SJianjun Wang 
859d5a4835bSFan Fei 	err = mtk_pcie_setup_irq(pcie);
860814cceebSJianjun Wang 	if (err)
861814cceebSJianjun Wang 		goto err_setup;
862814cceebSJianjun Wang 
863d3bf75b5SJianjun Wang 	return 0;
864d3bf75b5SJianjun Wang 
865d3bf75b5SJianjun Wang err_setup:
866d5a4835bSFan Fei 	mtk_pcie_power_down(pcie);
867d3bf75b5SJianjun Wang 
868d3bf75b5SJianjun Wang 	return err;
869d3bf75b5SJianjun Wang }
870d3bf75b5SJianjun Wang 
871d3bf75b5SJianjun Wang static int mtk_pcie_probe(struct platform_device *pdev)
872d3bf75b5SJianjun Wang {
873d3bf75b5SJianjun Wang 	struct device *dev = &pdev->dev;
874d5a4835bSFan Fei 	struct mtk_gen3_pcie *pcie;
875d3bf75b5SJianjun Wang 	struct pci_host_bridge *host;
876d3bf75b5SJianjun Wang 	int err;
877d3bf75b5SJianjun Wang 
878d5a4835bSFan Fei 	host = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
879d3bf75b5SJianjun Wang 	if (!host)
880d3bf75b5SJianjun Wang 		return -ENOMEM;
881d3bf75b5SJianjun Wang 
882d5a4835bSFan Fei 	pcie = pci_host_bridge_priv(host);
883d3bf75b5SJianjun Wang 
884d5a4835bSFan Fei 	pcie->dev = dev;
885d5a4835bSFan Fei 	platform_set_drvdata(pdev, pcie);
886d3bf75b5SJianjun Wang 
887d5a4835bSFan Fei 	err = mtk_pcie_setup(pcie);
888d3bf75b5SJianjun Wang 	if (err)
889d3bf75b5SJianjun Wang 		return err;
890d3bf75b5SJianjun Wang 
891d3bf75b5SJianjun Wang 	host->ops = &mtk_pcie_ops;
892d5a4835bSFan Fei 	host->sysdata = pcie;
893d3bf75b5SJianjun Wang 
894d3bf75b5SJianjun Wang 	err = pci_host_probe(host);
895d3bf75b5SJianjun Wang 	if (err) {
896d5a4835bSFan Fei 		mtk_pcie_irq_teardown(pcie);
897d5a4835bSFan Fei 		mtk_pcie_power_down(pcie);
898d3bf75b5SJianjun Wang 		return err;
899d3bf75b5SJianjun Wang 	}
900d3bf75b5SJianjun Wang 
901d3bf75b5SJianjun Wang 	return 0;
902d3bf75b5SJianjun Wang }
903d3bf75b5SJianjun Wang 
904d3bf75b5SJianjun Wang static int mtk_pcie_remove(struct platform_device *pdev)
905d3bf75b5SJianjun Wang {
906d5a4835bSFan Fei 	struct mtk_gen3_pcie *pcie = platform_get_drvdata(pdev);
907d5a4835bSFan Fei 	struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
908d3bf75b5SJianjun Wang 
909d3bf75b5SJianjun Wang 	pci_lock_rescan_remove();
910d3bf75b5SJianjun Wang 	pci_stop_root_bus(host->bus);
911d3bf75b5SJianjun Wang 	pci_remove_root_bus(host->bus);
912d3bf75b5SJianjun Wang 	pci_unlock_rescan_remove();
913d3bf75b5SJianjun Wang 
914d5a4835bSFan Fei 	mtk_pcie_irq_teardown(pcie);
915d5a4835bSFan Fei 	mtk_pcie_power_down(pcie);
916d3bf75b5SJianjun Wang 
917d3bf75b5SJianjun Wang 	return 0;
918d3bf75b5SJianjun Wang }
919d3bf75b5SJianjun Wang 
920*19b7858cSBjorn Helgaas static void mtk_pcie_irq_save(struct mtk_gen3_pcie *pcie)
921d537dc12SJianjun Wang {
922d537dc12SJianjun Wang 	int i;
923d537dc12SJianjun Wang 
924d5a4835bSFan Fei 	raw_spin_lock(&pcie->irq_lock);
925d537dc12SJianjun Wang 
926d5a4835bSFan Fei 	pcie->saved_irq_state = readl_relaxed(pcie->base + PCIE_INT_ENABLE_REG);
927d537dc12SJianjun Wang 
928d537dc12SJianjun Wang 	for (i = 0; i < PCIE_MSI_SET_NUM; i++) {
929d5a4835bSFan Fei 		struct mtk_msi_set *msi_set = &pcie->msi_sets[i];
930d537dc12SJianjun Wang 
931d537dc12SJianjun Wang 		msi_set->saved_irq_state = readl_relaxed(msi_set->base +
932d537dc12SJianjun Wang 					   PCIE_MSI_SET_ENABLE_OFFSET);
933d537dc12SJianjun Wang 	}
934d537dc12SJianjun Wang 
935d5a4835bSFan Fei 	raw_spin_unlock(&pcie->irq_lock);
936d537dc12SJianjun Wang }
937d537dc12SJianjun Wang 
938*19b7858cSBjorn Helgaas static void mtk_pcie_irq_restore(struct mtk_gen3_pcie *pcie)
939d537dc12SJianjun Wang {
940d537dc12SJianjun Wang 	int i;
941d537dc12SJianjun Wang 
942d5a4835bSFan Fei 	raw_spin_lock(&pcie->irq_lock);
943d537dc12SJianjun Wang 
944d5a4835bSFan Fei 	writel_relaxed(pcie->saved_irq_state, pcie->base + PCIE_INT_ENABLE_REG);
945d537dc12SJianjun Wang 
946d537dc12SJianjun Wang 	for (i = 0; i < PCIE_MSI_SET_NUM; i++) {
947d5a4835bSFan Fei 		struct mtk_msi_set *msi_set = &pcie->msi_sets[i];
948d537dc12SJianjun Wang 
949d537dc12SJianjun Wang 		writel_relaxed(msi_set->saved_irq_state,
950d537dc12SJianjun Wang 			       msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET);
951d537dc12SJianjun Wang 	}
952d537dc12SJianjun Wang 
953d5a4835bSFan Fei 	raw_spin_unlock(&pcie->irq_lock);
954d537dc12SJianjun Wang }
955d537dc12SJianjun Wang 
956*19b7858cSBjorn Helgaas static int mtk_pcie_turn_off_link(struct mtk_gen3_pcie *pcie)
957d537dc12SJianjun Wang {
958d537dc12SJianjun Wang 	u32 val;
959d537dc12SJianjun Wang 
960d5a4835bSFan Fei 	val = readl_relaxed(pcie->base + PCIE_ICMD_PM_REG);
961d537dc12SJianjun Wang 	val |= PCIE_TURN_OFF_LINK;
962d5a4835bSFan Fei 	writel_relaxed(val, pcie->base + PCIE_ICMD_PM_REG);
963d537dc12SJianjun Wang 
964d537dc12SJianjun Wang 	/* Check the link is L2 */
965d5a4835bSFan Fei 	return readl_poll_timeout(pcie->base + PCIE_LTSSM_STATUS_REG, val,
966d537dc12SJianjun Wang 				  (PCIE_LTSSM_STATE(val) ==
967d537dc12SJianjun Wang 				   PCIE_LTSSM_STATE_L2_IDLE), 20,
968d537dc12SJianjun Wang 				   50 * USEC_PER_MSEC);
969d537dc12SJianjun Wang }
970d537dc12SJianjun Wang 
971*19b7858cSBjorn Helgaas static int mtk_pcie_suspend_noirq(struct device *dev)
972d537dc12SJianjun Wang {
973d5a4835bSFan Fei 	struct mtk_gen3_pcie *pcie = dev_get_drvdata(dev);
974d537dc12SJianjun Wang 	int err;
975d537dc12SJianjun Wang 	u32 val;
976d537dc12SJianjun Wang 
977d537dc12SJianjun Wang 	/* Trigger link to L2 state */
978d5a4835bSFan Fei 	err = mtk_pcie_turn_off_link(pcie);
979d537dc12SJianjun Wang 	if (err) {
980d5a4835bSFan Fei 		dev_err(pcie->dev, "cannot enter L2 state\n");
981d537dc12SJianjun Wang 		return err;
982d537dc12SJianjun Wang 	}
983d537dc12SJianjun Wang 
984d537dc12SJianjun Wang 	/* Pull down the PERST# pin */
985d5a4835bSFan Fei 	val = readl_relaxed(pcie->base + PCIE_RST_CTRL_REG);
986d537dc12SJianjun Wang 	val |= PCIE_PE_RSTB;
987d5a4835bSFan Fei 	writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG);
988d537dc12SJianjun Wang 
989d5a4835bSFan Fei 	dev_dbg(pcie->dev, "entered L2 states successfully");
990d537dc12SJianjun Wang 
991d5a4835bSFan Fei 	mtk_pcie_irq_save(pcie);
992d5a4835bSFan Fei 	mtk_pcie_power_down(pcie);
993d537dc12SJianjun Wang 
994d537dc12SJianjun Wang 	return 0;
995d537dc12SJianjun Wang }
996d537dc12SJianjun Wang 
997*19b7858cSBjorn Helgaas static int mtk_pcie_resume_noirq(struct device *dev)
998d537dc12SJianjun Wang {
999d5a4835bSFan Fei 	struct mtk_gen3_pcie *pcie = dev_get_drvdata(dev);
1000d537dc12SJianjun Wang 	int err;
1001d537dc12SJianjun Wang 
1002d5a4835bSFan Fei 	err = mtk_pcie_power_up(pcie);
1003d537dc12SJianjun Wang 	if (err)
1004d537dc12SJianjun Wang 		return err;
1005d537dc12SJianjun Wang 
1006d5a4835bSFan Fei 	err = mtk_pcie_startup_port(pcie);
1007d537dc12SJianjun Wang 	if (err) {
1008d5a4835bSFan Fei 		mtk_pcie_power_down(pcie);
1009d537dc12SJianjun Wang 		return err;
1010d537dc12SJianjun Wang 	}
1011d537dc12SJianjun Wang 
1012d5a4835bSFan Fei 	mtk_pcie_irq_restore(pcie);
1013d537dc12SJianjun Wang 
1014d537dc12SJianjun Wang 	return 0;
1015d537dc12SJianjun Wang }
1016d537dc12SJianjun Wang 
1017d537dc12SJianjun Wang static const struct dev_pm_ops mtk_pcie_pm_ops = {
1018*19b7858cSBjorn Helgaas 	NOIRQ_SYSTEM_SLEEP_PM_OPS(mtk_pcie_suspend_noirq,
1019d537dc12SJianjun Wang 				  mtk_pcie_resume_noirq)
1020d537dc12SJianjun Wang };
1021d537dc12SJianjun Wang 
1022d3bf75b5SJianjun Wang static const struct of_device_id mtk_pcie_of_match[] = {
1023d3bf75b5SJianjun Wang 	{ .compatible = "mediatek,mt8192-pcie" },
1024d3bf75b5SJianjun Wang 	{},
1025d3bf75b5SJianjun Wang };
10263a2e476dSZou Wei MODULE_DEVICE_TABLE(of, mtk_pcie_of_match);
1027d3bf75b5SJianjun Wang 
1028d3bf75b5SJianjun Wang static struct platform_driver mtk_pcie_driver = {
1029d3bf75b5SJianjun Wang 	.probe = mtk_pcie_probe,
1030d3bf75b5SJianjun Wang 	.remove = mtk_pcie_remove,
1031d3bf75b5SJianjun Wang 	.driver = {
1032d3bf75b5SJianjun Wang 		.name = "mtk-pcie",
1033d3bf75b5SJianjun Wang 		.of_match_table = mtk_pcie_of_match,
1034d537dc12SJianjun Wang 		.pm = &mtk_pcie_pm_ops,
1035d3bf75b5SJianjun Wang 	},
1036d3bf75b5SJianjun Wang };
1037d3bf75b5SJianjun Wang 
1038d3bf75b5SJianjun Wang module_platform_driver(mtk_pcie_driver);
1039d3bf75b5SJianjun Wang MODULE_LICENSE("GPL v2");
1040