1 // SPDX-License-Identifier: GPL-2.0+
2 /* Copyright (C) 2009 - 2019 Broadcom */
3 
4 #include <linux/bitfield.h>
5 #include <linux/bitops.h>
6 #include <linux/clk.h>
7 #include <linux/compiler.h>
8 #include <linux/delay.h>
9 #include <linux/init.h>
10 #include <linux/interrupt.h>
11 #include <linux/io.h>
12 #include <linux/ioport.h>
13 #include <linux/irqchip/chained_irq.h>
14 #include <linux/irqdomain.h>
15 #include <linux/kernel.h>
16 #include <linux/list.h>
17 #include <linux/log2.h>
18 #include <linux/module.h>
19 #include <linux/msi.h>
20 #include <linux/of_address.h>
21 #include <linux/of_irq.h>
22 #include <linux/of_pci.h>
23 #include <linux/of_platform.h>
24 #include <linux/pci.h>
25 #include <linux/pci-ecam.h>
26 #include <linux/printk.h>
27 #include <linux/reset.h>
28 #include <linux/sizes.h>
29 #include <linux/slab.h>
30 #include <linux/string.h>
31 #include <linux/types.h>
32 
33 #include "../pci.h"
34 
35 /* BRCM_PCIE_CAP_REGS - Offset for the mandatory capability config regs */
36 #define BRCM_PCIE_CAP_REGS				0x00ac
37 
38 /* Broadcom STB PCIe Register Offsets */
39 #define PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1				0x0188
40 #define  PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1_ENDIAN_MODE_BAR2_MASK	0xc
41 #define  PCIE_RC_CFG_VENDOR_SPCIFIC_REG1_LITTLE_ENDIAN			0x0
42 
43 #define PCIE_RC_CFG_PRIV1_ID_VAL3			0x043c
44 #define  PCIE_RC_CFG_PRIV1_ID_VAL3_CLASS_CODE_MASK	0xffffff
45 
46 #define PCIE_RC_CFG_PRIV1_LINK_CAPABILITY			0x04dc
47 #define  PCIE_RC_CFG_PRIV1_LINK_CAPABILITY_ASPM_SUPPORT_MASK	0xc00
48 
49 #define PCIE_RC_DL_MDIO_ADDR				0x1100
50 #define PCIE_RC_DL_MDIO_WR_DATA				0x1104
51 #define PCIE_RC_DL_MDIO_RD_DATA				0x1108
52 
53 #define PCIE_MISC_MISC_CTRL				0x4008
54 #define  PCIE_MISC_MISC_CTRL_SCB_ACCESS_EN_MASK		0x1000
55 #define  PCIE_MISC_MISC_CTRL_CFG_READ_UR_MODE_MASK	0x2000
56 #define  PCIE_MISC_MISC_CTRL_MAX_BURST_SIZE_MASK	0x300000
57 
58 #define  PCIE_MISC_MISC_CTRL_SCB0_SIZE_MASK		0xf8000000
59 #define  PCIE_MISC_MISC_CTRL_SCB1_SIZE_MASK		0x07c00000
60 #define  PCIE_MISC_MISC_CTRL_SCB2_SIZE_MASK		0x0000001f
61 #define  SCB_SIZE_MASK(x) PCIE_MISC_MISC_CTRL_SCB ## x ## _SIZE_MASK
62 
63 #define PCIE_MISC_CPU_2_PCIE_MEM_WIN0_LO		0x400c
64 #define PCIE_MEM_WIN0_LO(win)	\
65 		PCIE_MISC_CPU_2_PCIE_MEM_WIN0_LO + ((win) * 8)
66 
67 #define PCIE_MISC_CPU_2_PCIE_MEM_WIN0_HI		0x4010
68 #define PCIE_MEM_WIN0_HI(win)	\
69 		PCIE_MISC_CPU_2_PCIE_MEM_WIN0_HI + ((win) * 8)
70 
71 #define PCIE_MISC_RC_BAR1_CONFIG_LO			0x402c
72 #define  PCIE_MISC_RC_BAR1_CONFIG_LO_SIZE_MASK		0x1f
73 
74 #define PCIE_MISC_RC_BAR2_CONFIG_LO			0x4034
75 #define  PCIE_MISC_RC_BAR2_CONFIG_LO_SIZE_MASK		0x1f
76 #define PCIE_MISC_RC_BAR2_CONFIG_HI			0x4038
77 
78 #define PCIE_MISC_RC_BAR3_CONFIG_LO			0x403c
79 #define  PCIE_MISC_RC_BAR3_CONFIG_LO_SIZE_MASK		0x1f
80 
81 #define PCIE_MISC_MSI_BAR_CONFIG_LO			0x4044
82 #define PCIE_MISC_MSI_BAR_CONFIG_HI			0x4048
83 
84 #define PCIE_MISC_MSI_DATA_CONFIG			0x404c
85 #define  PCIE_MISC_MSI_DATA_CONFIG_VAL_32		0xffe06540
86 #define  PCIE_MISC_MSI_DATA_CONFIG_VAL_8		0xfff86540
87 
88 #define PCIE_MISC_PCIE_CTRL				0x4064
89 #define  PCIE_MISC_PCIE_CTRL_PCIE_L23_REQUEST_MASK	0x1
90 #define PCIE_MISC_PCIE_CTRL_PCIE_PERSTB_MASK		0x4
91 
92 #define PCIE_MISC_PCIE_STATUS				0x4068
93 #define  PCIE_MISC_PCIE_STATUS_PCIE_PORT_MASK		0x80
94 #define  PCIE_MISC_PCIE_STATUS_PCIE_DL_ACTIVE_MASK	0x20
95 #define  PCIE_MISC_PCIE_STATUS_PCIE_PHYLINKUP_MASK	0x10
96 #define  PCIE_MISC_PCIE_STATUS_PCIE_LINK_IN_L23_MASK	0x40
97 
98 #define PCIE_MISC_REVISION				0x406c
99 #define  BRCM_PCIE_HW_REV_33				0x0303
100 #define  BRCM_PCIE_HW_REV_3_20				0x0320
101 
102 #define PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_LIMIT		0x4070
103 #define  PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_LIMIT_LIMIT_MASK	0xfff00000
104 #define  PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_LIMIT_BASE_MASK	0xfff0
105 #define PCIE_MEM_WIN0_BASE_LIMIT(win)	\
106 		PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_LIMIT + ((win) * 4)
107 
108 #define PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_HI			0x4080
109 #define  PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_HI_BASE_MASK	0xff
110 #define PCIE_MEM_WIN0_BASE_HI(win)	\
111 		PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_HI + ((win) * 8)
112 
113 #define PCIE_MISC_CPU_2_PCIE_MEM_WIN0_LIMIT_HI			0x4084
114 #define  PCIE_MISC_CPU_2_PCIE_MEM_WIN0_LIMIT_HI_LIMIT_MASK	0xff
115 #define PCIE_MEM_WIN0_LIMIT_HI(win)	\
116 		PCIE_MISC_CPU_2_PCIE_MEM_WIN0_LIMIT_HI + ((win) * 8)
117 
118 #define PCIE_MISC_HARD_PCIE_HARD_DEBUG					0x4204
119 #define  PCIE_MISC_HARD_PCIE_HARD_DEBUG_CLKREQ_DEBUG_ENABLE_MASK	0x2
120 #define  PCIE_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK		0x08000000
121 #define  PCIE_BMIPS_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK		0x00800000
122 
123 
124 #define PCIE_INTR2_CPU_BASE		0x4300
125 #define PCIE_MSI_INTR2_BASE		0x4500
126 /* Offsets from PCIE_INTR2_CPU_BASE and PCIE_MSI_INTR2_BASE */
127 #define  MSI_INT_STATUS			0x0
128 #define  MSI_INT_CLR			0x8
129 #define  MSI_INT_MASK_SET		0x10
130 #define  MSI_INT_MASK_CLR		0x14
131 
132 #define PCIE_EXT_CFG_DATA				0x8000
133 #define PCIE_EXT_CFG_INDEX				0x9000
134 
135 #define  PCIE_RGR1_SW_INIT_1_PERST_MASK			0x1
136 #define  PCIE_RGR1_SW_INIT_1_PERST_SHIFT		0x0
137 
138 #define RGR1_SW_INIT_1_INIT_GENERIC_MASK		0x2
139 #define RGR1_SW_INIT_1_INIT_GENERIC_SHIFT		0x1
140 #define RGR1_SW_INIT_1_INIT_7278_MASK			0x1
141 #define RGR1_SW_INIT_1_INIT_7278_SHIFT			0x0
142 
143 /* PCIe parameters */
144 #define BRCM_NUM_PCIE_OUT_WINS		0x4
145 #define BRCM_INT_PCI_MSI_NR		32
146 #define BRCM_INT_PCI_MSI_LEGACY_NR	8
147 #define BRCM_INT_PCI_MSI_SHIFT		0
148 #define BRCM_INT_PCI_MSI_MASK		GENMASK(BRCM_INT_PCI_MSI_NR - 1, 0)
149 #define BRCM_INT_PCI_MSI_LEGACY_MASK	GENMASK(31, \
150 						32 - BRCM_INT_PCI_MSI_LEGACY_NR)
151 
152 /* MSI target addresses */
153 #define BRCM_MSI_TARGET_ADDR_LT_4GB	0x0fffffffcULL
154 #define BRCM_MSI_TARGET_ADDR_GT_4GB	0xffffffffcULL
155 
156 /* MDIO registers */
157 #define MDIO_PORT0			0x0
158 #define MDIO_DATA_MASK			0x7fffffff
159 #define MDIO_PORT_MASK			0xf0000
160 #define MDIO_REGAD_MASK			0xffff
161 #define MDIO_CMD_MASK			0xfff00000
162 #define MDIO_CMD_READ			0x1
163 #define MDIO_CMD_WRITE			0x0
164 #define MDIO_DATA_DONE_MASK		0x80000000
165 #define MDIO_RD_DONE(x)			(((x) & MDIO_DATA_DONE_MASK) ? 1 : 0)
166 #define MDIO_WT_DONE(x)			(((x) & MDIO_DATA_DONE_MASK) ? 0 : 1)
167 #define SSC_REGS_ADDR			0x1100
168 #define SET_ADDR_OFFSET			0x1f
169 #define SSC_CNTL_OFFSET			0x2
170 #define SSC_CNTL_OVRD_EN_MASK		0x8000
171 #define SSC_CNTL_OVRD_VAL_MASK		0x4000
172 #define SSC_STATUS_OFFSET		0x1
173 #define SSC_STATUS_SSC_MASK		0x400
174 #define SSC_STATUS_PLL_LOCK_MASK	0x800
175 #define PCIE_BRCM_MAX_MEMC		3
176 
177 #define IDX_ADDR(pcie)			(pcie->reg_offsets[EXT_CFG_INDEX])
178 #define DATA_ADDR(pcie)			(pcie->reg_offsets[EXT_CFG_DATA])
179 #define PCIE_RGR1_SW_INIT_1(pcie)	(pcie->reg_offsets[RGR1_SW_INIT_1])
180 
181 /* Rescal registers */
182 #define PCIE_DVT_PMU_PCIE_PHY_CTRL				0xc700
183 #define  PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_NFLDS			0x3
184 #define  PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_DIG_RESET_MASK		0x4
185 #define  PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_DIG_RESET_SHIFT	0x2
186 #define  PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_RESET_MASK		0x2
187 #define  PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_RESET_SHIFT		0x1
188 #define  PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_PWRDN_MASK		0x1
189 #define  PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_PWRDN_SHIFT		0x0
190 
191 /* Forward declarations */
192 struct brcm_pcie;
193 static inline void brcm_pcie_bridge_sw_init_set_7278(struct brcm_pcie *pcie, u32 val);
194 static inline void brcm_pcie_bridge_sw_init_set_generic(struct brcm_pcie *pcie, u32 val);
195 static inline void brcm_pcie_perst_set_4908(struct brcm_pcie *pcie, u32 val);
196 static inline void brcm_pcie_perst_set_7278(struct brcm_pcie *pcie, u32 val);
197 static inline void brcm_pcie_perst_set_generic(struct brcm_pcie *pcie, u32 val);
198 
199 enum {
200 	RGR1_SW_INIT_1,
201 	EXT_CFG_INDEX,
202 	EXT_CFG_DATA,
203 };
204 
205 enum {
206 	RGR1_SW_INIT_1_INIT_MASK,
207 	RGR1_SW_INIT_1_INIT_SHIFT,
208 };
209 
210 enum pcie_type {
211 	GENERIC,
212 	BCM7425,
213 	BCM7435,
214 	BCM4908,
215 	BCM7278,
216 	BCM2711,
217 };
218 
219 struct pcie_cfg_data {
220 	const int *offsets;
221 	const enum pcie_type type;
222 	void (*perst_set)(struct brcm_pcie *pcie, u32 val);
223 	void (*bridge_sw_init_set)(struct brcm_pcie *pcie, u32 val);
224 };
225 
226 static const int pcie_offsets[] = {
227 	[RGR1_SW_INIT_1] = 0x9210,
228 	[EXT_CFG_INDEX]  = 0x9000,
229 	[EXT_CFG_DATA]   = 0x9004,
230 };
231 
232 static const int pcie_offsets_bmips_7425[] = {
233 	[RGR1_SW_INIT_1] = 0x8010,
234 	[EXT_CFG_INDEX]  = 0x8300,
235 	[EXT_CFG_DATA]   = 0x8304,
236 };
237 
238 static const struct pcie_cfg_data generic_cfg = {
239 	.offsets	= pcie_offsets,
240 	.type		= GENERIC,
241 	.perst_set	= brcm_pcie_perst_set_generic,
242 	.bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
243 };
244 
245 static const struct pcie_cfg_data bcm7425_cfg = {
246 	.offsets	= pcie_offsets_bmips_7425,
247 	.type		= BCM7425,
248 	.perst_set	= brcm_pcie_perst_set_generic,
249 	.bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
250 };
251 
252 static const struct pcie_cfg_data bcm7435_cfg = {
253 	.offsets	= pcie_offsets,
254 	.type		= BCM7435,
255 	.perst_set	= brcm_pcie_perst_set_generic,
256 	.bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
257 };
258 
259 static const struct pcie_cfg_data bcm4908_cfg = {
260 	.offsets	= pcie_offsets,
261 	.type		= BCM4908,
262 	.perst_set	= brcm_pcie_perst_set_4908,
263 	.bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
264 };
265 
266 static const int pcie_offset_bcm7278[] = {
267 	[RGR1_SW_INIT_1] = 0xc010,
268 	[EXT_CFG_INDEX] = 0x9000,
269 	[EXT_CFG_DATA] = 0x9004,
270 };
271 
272 static const struct pcie_cfg_data bcm7278_cfg = {
273 	.offsets	= pcie_offset_bcm7278,
274 	.type		= BCM7278,
275 	.perst_set	= brcm_pcie_perst_set_7278,
276 	.bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_7278,
277 };
278 
279 static const struct pcie_cfg_data bcm2711_cfg = {
280 	.offsets	= pcie_offsets,
281 	.type		= BCM2711,
282 	.perst_set	= brcm_pcie_perst_set_generic,
283 	.bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
284 };
285 
286 struct brcm_msi {
287 	struct device		*dev;
288 	void __iomem		*base;
289 	struct device_node	*np;
290 	struct irq_domain	*msi_domain;
291 	struct irq_domain	*inner_domain;
292 	struct mutex		lock; /* guards the alloc/free operations */
293 	u64			target_addr;
294 	int			irq;
295 	DECLARE_BITMAP(used, BRCM_INT_PCI_MSI_NR);
296 	bool			legacy;
297 	/* Some chips have MSIs in bits [31..24] of a shared register. */
298 	int			legacy_shift;
299 	int			nr; /* No. of MSI available, depends on chip */
300 	/* This is the base pointer for interrupt status/set/clr regs */
301 	void __iomem		*intr_base;
302 };
303 
304 /* Internal PCIe Host Controller Information.*/
305 struct brcm_pcie {
306 	struct device		*dev;
307 	void __iomem		*base;
308 	struct clk		*clk;
309 	struct device_node	*np;
310 	bool			ssc;
311 	int			gen;
312 	u64			msi_target_addr;
313 	struct brcm_msi		*msi;
314 	const int		*reg_offsets;
315 	enum pcie_type		type;
316 	struct reset_control	*rescal;
317 	struct reset_control	*perst_reset;
318 	int			num_memc;
319 	u64			memc_size[PCIE_BRCM_MAX_MEMC];
320 	u32			hw_rev;
321 	void			(*perst_set)(struct brcm_pcie *pcie, u32 val);
322 	void			(*bridge_sw_init_set)(struct brcm_pcie *pcie, u32 val);
323 };
324 
325 static inline bool is_bmips(const struct brcm_pcie *pcie)
326 {
327 	return pcie->type == BCM7435 || pcie->type == BCM7425;
328 }
329 
330 /*
331  * This is to convert the size of the inbound "BAR" region to the
332  * non-linear values of PCIE_X_MISC_RC_BAR[123]_CONFIG_LO.SIZE
333  */
334 static int brcm_pcie_encode_ibar_size(u64 size)
335 {
336 	int log2_in = ilog2(size);
337 
338 	if (log2_in >= 12 && log2_in <= 15)
339 		/* Covers 4KB to 32KB (inclusive) */
340 		return (log2_in - 12) + 0x1c;
341 	else if (log2_in >= 16 && log2_in <= 35)
342 		/* Covers 64KB to 32GB, (inclusive) */
343 		return log2_in - 15;
344 	/* Something is awry so disable */
345 	return 0;
346 }
347 
348 static u32 brcm_pcie_mdio_form_pkt(int port, int regad, int cmd)
349 {
350 	u32 pkt = 0;
351 
352 	pkt |= FIELD_PREP(MDIO_PORT_MASK, port);
353 	pkt |= FIELD_PREP(MDIO_REGAD_MASK, regad);
354 	pkt |= FIELD_PREP(MDIO_CMD_MASK, cmd);
355 
356 	return pkt;
357 }
358 
359 /* negative return value indicates error */
360 static int brcm_pcie_mdio_read(void __iomem *base, u8 port, u8 regad, u32 *val)
361 {
362 	int tries;
363 	u32 data;
364 
365 	writel(brcm_pcie_mdio_form_pkt(port, regad, MDIO_CMD_READ),
366 		   base + PCIE_RC_DL_MDIO_ADDR);
367 	readl(base + PCIE_RC_DL_MDIO_ADDR);
368 
369 	data = readl(base + PCIE_RC_DL_MDIO_RD_DATA);
370 	for (tries = 0; !MDIO_RD_DONE(data) && tries < 10; tries++) {
371 		udelay(10);
372 		data = readl(base + PCIE_RC_DL_MDIO_RD_DATA);
373 	}
374 
375 	*val = FIELD_GET(MDIO_DATA_MASK, data);
376 	return MDIO_RD_DONE(data) ? 0 : -EIO;
377 }
378 
379 /* negative return value indicates error */
380 static int brcm_pcie_mdio_write(void __iomem *base, u8 port,
381 				u8 regad, u16 wrdata)
382 {
383 	int tries;
384 	u32 data;
385 
386 	writel(brcm_pcie_mdio_form_pkt(port, regad, MDIO_CMD_WRITE),
387 		   base + PCIE_RC_DL_MDIO_ADDR);
388 	readl(base + PCIE_RC_DL_MDIO_ADDR);
389 	writel(MDIO_DATA_DONE_MASK | wrdata, base + PCIE_RC_DL_MDIO_WR_DATA);
390 
391 	data = readl(base + PCIE_RC_DL_MDIO_WR_DATA);
392 	for (tries = 0; !MDIO_WT_DONE(data) && tries < 10; tries++) {
393 		udelay(10);
394 		data = readl(base + PCIE_RC_DL_MDIO_WR_DATA);
395 	}
396 
397 	return MDIO_WT_DONE(data) ? 0 : -EIO;
398 }
399 
400 /*
401  * Configures device for Spread Spectrum Clocking (SSC) mode; a negative
402  * return value indicates error.
403  */
404 static int brcm_pcie_set_ssc(struct brcm_pcie *pcie)
405 {
406 	int pll, ssc;
407 	int ret;
408 	u32 tmp;
409 
410 	ret = brcm_pcie_mdio_write(pcie->base, MDIO_PORT0, SET_ADDR_OFFSET,
411 				   SSC_REGS_ADDR);
412 	if (ret < 0)
413 		return ret;
414 
415 	ret = brcm_pcie_mdio_read(pcie->base, MDIO_PORT0,
416 				  SSC_CNTL_OFFSET, &tmp);
417 	if (ret < 0)
418 		return ret;
419 
420 	u32p_replace_bits(&tmp, 1, SSC_CNTL_OVRD_EN_MASK);
421 	u32p_replace_bits(&tmp, 1, SSC_CNTL_OVRD_VAL_MASK);
422 	ret = brcm_pcie_mdio_write(pcie->base, MDIO_PORT0,
423 				   SSC_CNTL_OFFSET, tmp);
424 	if (ret < 0)
425 		return ret;
426 
427 	usleep_range(1000, 2000);
428 	ret = brcm_pcie_mdio_read(pcie->base, MDIO_PORT0,
429 				  SSC_STATUS_OFFSET, &tmp);
430 	if (ret < 0)
431 		return ret;
432 
433 	ssc = FIELD_GET(SSC_STATUS_SSC_MASK, tmp);
434 	pll = FIELD_GET(SSC_STATUS_PLL_LOCK_MASK, tmp);
435 
436 	return ssc && pll ? 0 : -EIO;
437 }
438 
439 /* Limits operation to a specific generation (1, 2, or 3) */
440 static void brcm_pcie_set_gen(struct brcm_pcie *pcie, int gen)
441 {
442 	u16 lnkctl2 = readw(pcie->base + BRCM_PCIE_CAP_REGS + PCI_EXP_LNKCTL2);
443 	u32 lnkcap = readl(pcie->base + BRCM_PCIE_CAP_REGS + PCI_EXP_LNKCAP);
444 
445 	lnkcap = (lnkcap & ~PCI_EXP_LNKCAP_SLS) | gen;
446 	writel(lnkcap, pcie->base + BRCM_PCIE_CAP_REGS + PCI_EXP_LNKCAP);
447 
448 	lnkctl2 = (lnkctl2 & ~0xf) | gen;
449 	writew(lnkctl2, pcie->base + BRCM_PCIE_CAP_REGS + PCI_EXP_LNKCTL2);
450 }
451 
452 static void brcm_pcie_set_outbound_win(struct brcm_pcie *pcie,
453 				       unsigned int win, u64 cpu_addr,
454 				       u64 pcie_addr, u64 size)
455 {
456 	u32 cpu_addr_mb_high, limit_addr_mb_high;
457 	phys_addr_t cpu_addr_mb, limit_addr_mb;
458 	int high_addr_shift;
459 	u32 tmp;
460 
461 	/* Set the base of the pcie_addr window */
462 	writel(lower_32_bits(pcie_addr), pcie->base + PCIE_MEM_WIN0_LO(win));
463 	writel(upper_32_bits(pcie_addr), pcie->base + PCIE_MEM_WIN0_HI(win));
464 
465 	/* Write the addr base & limit lower bits (in MBs) */
466 	cpu_addr_mb = cpu_addr / SZ_1M;
467 	limit_addr_mb = (cpu_addr + size - 1) / SZ_1M;
468 
469 	tmp = readl(pcie->base + PCIE_MEM_WIN0_BASE_LIMIT(win));
470 	u32p_replace_bits(&tmp, cpu_addr_mb,
471 			  PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_LIMIT_BASE_MASK);
472 	u32p_replace_bits(&tmp, limit_addr_mb,
473 			  PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_LIMIT_LIMIT_MASK);
474 	writel(tmp, pcie->base + PCIE_MEM_WIN0_BASE_LIMIT(win));
475 
476 	if (is_bmips(pcie))
477 		return;
478 
479 	/* Write the cpu & limit addr upper bits */
480 	high_addr_shift =
481 		HWEIGHT32(PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_LIMIT_BASE_MASK);
482 
483 	cpu_addr_mb_high = cpu_addr_mb >> high_addr_shift;
484 	tmp = readl(pcie->base + PCIE_MEM_WIN0_BASE_HI(win));
485 	u32p_replace_bits(&tmp, cpu_addr_mb_high,
486 			  PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_HI_BASE_MASK);
487 	writel(tmp, pcie->base + PCIE_MEM_WIN0_BASE_HI(win));
488 
489 	limit_addr_mb_high = limit_addr_mb >> high_addr_shift;
490 	tmp = readl(pcie->base + PCIE_MEM_WIN0_LIMIT_HI(win));
491 	u32p_replace_bits(&tmp, limit_addr_mb_high,
492 			  PCIE_MISC_CPU_2_PCIE_MEM_WIN0_LIMIT_HI_LIMIT_MASK);
493 	writel(tmp, pcie->base + PCIE_MEM_WIN0_LIMIT_HI(win));
494 }
495 
496 static struct irq_chip brcm_msi_irq_chip = {
497 	.name            = "BRCM STB PCIe MSI",
498 	.irq_ack         = irq_chip_ack_parent,
499 	.irq_mask        = pci_msi_mask_irq,
500 	.irq_unmask      = pci_msi_unmask_irq,
501 };
502 
503 static struct msi_domain_info brcm_msi_domain_info = {
504 	/* Multi MSI is supported by the controller, but not by this driver */
505 	.flags	= (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS),
506 	.chip	= &brcm_msi_irq_chip,
507 };
508 
509 static void brcm_pcie_msi_isr(struct irq_desc *desc)
510 {
511 	struct irq_chip *chip = irq_desc_get_chip(desc);
512 	unsigned long status;
513 	struct brcm_msi *msi;
514 	struct device *dev;
515 	u32 bit;
516 
517 	chained_irq_enter(chip, desc);
518 	msi = irq_desc_get_handler_data(desc);
519 	dev = msi->dev;
520 
521 	status = readl(msi->intr_base + MSI_INT_STATUS);
522 	status >>= msi->legacy_shift;
523 
524 	for_each_set_bit(bit, &status, msi->nr) {
525 		int ret;
526 		ret = generic_handle_domain_irq(msi->inner_domain, bit);
527 		if (ret)
528 			dev_dbg(dev, "unexpected MSI\n");
529 	}
530 
531 	chained_irq_exit(chip, desc);
532 }
533 
534 static void brcm_msi_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
535 {
536 	struct brcm_msi *msi = irq_data_get_irq_chip_data(data);
537 
538 	msg->address_lo = lower_32_bits(msi->target_addr);
539 	msg->address_hi = upper_32_bits(msi->target_addr);
540 	msg->data = (0xffff & PCIE_MISC_MSI_DATA_CONFIG_VAL_32) | data->hwirq;
541 }
542 
543 static int brcm_msi_set_affinity(struct irq_data *irq_data,
544 				 const struct cpumask *mask, bool force)
545 {
546 	return -EINVAL;
547 }
548 
549 static void brcm_msi_ack_irq(struct irq_data *data)
550 {
551 	struct brcm_msi *msi = irq_data_get_irq_chip_data(data);
552 	const int shift_amt = data->hwirq + msi->legacy_shift;
553 
554 	writel(1 << shift_amt, msi->intr_base + MSI_INT_CLR);
555 }
556 
557 
558 static struct irq_chip brcm_msi_bottom_irq_chip = {
559 	.name			= "BRCM STB MSI",
560 	.irq_compose_msi_msg	= brcm_msi_compose_msi_msg,
561 	.irq_set_affinity	= brcm_msi_set_affinity,
562 	.irq_ack                = brcm_msi_ack_irq,
563 };
564 
565 static int brcm_msi_alloc(struct brcm_msi *msi)
566 {
567 	int hwirq;
568 
569 	mutex_lock(&msi->lock);
570 	hwirq = bitmap_find_free_region(msi->used, msi->nr, 0);
571 	mutex_unlock(&msi->lock);
572 
573 	return hwirq;
574 }
575 
576 static void brcm_msi_free(struct brcm_msi *msi, unsigned long hwirq)
577 {
578 	mutex_lock(&msi->lock);
579 	bitmap_release_region(msi->used, hwirq, 0);
580 	mutex_unlock(&msi->lock);
581 }
582 
583 static int brcm_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
584 				 unsigned int nr_irqs, void *args)
585 {
586 	struct brcm_msi *msi = domain->host_data;
587 	int hwirq;
588 
589 	hwirq = brcm_msi_alloc(msi);
590 
591 	if (hwirq < 0)
592 		return hwirq;
593 
594 	irq_domain_set_info(domain, virq, (irq_hw_number_t)hwirq,
595 			    &brcm_msi_bottom_irq_chip, domain->host_data,
596 			    handle_edge_irq, NULL, NULL);
597 	return 0;
598 }
599 
600 static void brcm_irq_domain_free(struct irq_domain *domain,
601 				 unsigned int virq, unsigned int nr_irqs)
602 {
603 	struct irq_data *d = irq_domain_get_irq_data(domain, virq);
604 	struct brcm_msi *msi = irq_data_get_irq_chip_data(d);
605 
606 	brcm_msi_free(msi, d->hwirq);
607 }
608 
609 static const struct irq_domain_ops msi_domain_ops = {
610 	.alloc	= brcm_irq_domain_alloc,
611 	.free	= brcm_irq_domain_free,
612 };
613 
614 static int brcm_allocate_domains(struct brcm_msi *msi)
615 {
616 	struct fwnode_handle *fwnode = of_node_to_fwnode(msi->np);
617 	struct device *dev = msi->dev;
618 
619 	msi->inner_domain = irq_domain_add_linear(NULL, msi->nr, &msi_domain_ops, msi);
620 	if (!msi->inner_domain) {
621 		dev_err(dev, "failed to create IRQ domain\n");
622 		return -ENOMEM;
623 	}
624 
625 	msi->msi_domain = pci_msi_create_irq_domain(fwnode,
626 						    &brcm_msi_domain_info,
627 						    msi->inner_domain);
628 	if (!msi->msi_domain) {
629 		dev_err(dev, "failed to create MSI domain\n");
630 		irq_domain_remove(msi->inner_domain);
631 		return -ENOMEM;
632 	}
633 
634 	return 0;
635 }
636 
637 static void brcm_free_domains(struct brcm_msi *msi)
638 {
639 	irq_domain_remove(msi->msi_domain);
640 	irq_domain_remove(msi->inner_domain);
641 }
642 
643 static void brcm_msi_remove(struct brcm_pcie *pcie)
644 {
645 	struct brcm_msi *msi = pcie->msi;
646 
647 	if (!msi)
648 		return;
649 	irq_set_chained_handler_and_data(msi->irq, NULL, NULL);
650 	brcm_free_domains(msi);
651 }
652 
653 static void brcm_msi_set_regs(struct brcm_msi *msi)
654 {
655 	u32 val = msi->legacy ? BRCM_INT_PCI_MSI_LEGACY_MASK :
656 				BRCM_INT_PCI_MSI_MASK;
657 
658 	writel(val, msi->intr_base + MSI_INT_MASK_CLR);
659 	writel(val, msi->intr_base + MSI_INT_CLR);
660 
661 	/*
662 	 * The 0 bit of PCIE_MISC_MSI_BAR_CONFIG_LO is repurposed to MSI
663 	 * enable, which we set to 1.
664 	 */
665 	writel(lower_32_bits(msi->target_addr) | 0x1,
666 	       msi->base + PCIE_MISC_MSI_BAR_CONFIG_LO);
667 	writel(upper_32_bits(msi->target_addr),
668 	       msi->base + PCIE_MISC_MSI_BAR_CONFIG_HI);
669 
670 	val = msi->legacy ? PCIE_MISC_MSI_DATA_CONFIG_VAL_8 : PCIE_MISC_MSI_DATA_CONFIG_VAL_32;
671 	writel(val, msi->base + PCIE_MISC_MSI_DATA_CONFIG);
672 }
673 
674 static int brcm_pcie_enable_msi(struct brcm_pcie *pcie)
675 {
676 	struct brcm_msi *msi;
677 	int irq, ret;
678 	struct device *dev = pcie->dev;
679 
680 	irq = irq_of_parse_and_map(dev->of_node, 1);
681 	if (irq <= 0) {
682 		dev_err(dev, "cannot map MSI interrupt\n");
683 		return -ENODEV;
684 	}
685 
686 	msi = devm_kzalloc(dev, sizeof(struct brcm_msi), GFP_KERNEL);
687 	if (!msi)
688 		return -ENOMEM;
689 
690 	mutex_init(&msi->lock);
691 	msi->dev = dev;
692 	msi->base = pcie->base;
693 	msi->np = pcie->np;
694 	msi->target_addr = pcie->msi_target_addr;
695 	msi->irq = irq;
696 	msi->legacy = pcie->hw_rev < BRCM_PCIE_HW_REV_33;
697 
698 	/*
699 	 * Sanity check to make sure that the 'used' bitmap in struct brcm_msi
700 	 * is large enough.
701 	 */
702 	BUILD_BUG_ON(BRCM_INT_PCI_MSI_LEGACY_NR > BRCM_INT_PCI_MSI_NR);
703 
704 	if (msi->legacy) {
705 		msi->intr_base = msi->base + PCIE_INTR2_CPU_BASE;
706 		msi->nr = BRCM_INT_PCI_MSI_LEGACY_NR;
707 		msi->legacy_shift = 24;
708 	} else {
709 		msi->intr_base = msi->base + PCIE_MSI_INTR2_BASE;
710 		msi->nr = BRCM_INT_PCI_MSI_NR;
711 		msi->legacy_shift = 0;
712 	}
713 
714 	ret = brcm_allocate_domains(msi);
715 	if (ret)
716 		return ret;
717 
718 	irq_set_chained_handler_and_data(msi->irq, brcm_pcie_msi_isr, msi);
719 
720 	brcm_msi_set_regs(msi);
721 	pcie->msi = msi;
722 
723 	return 0;
724 }
725 
726 /* The controller is capable of serving in both RC and EP roles */
727 static bool brcm_pcie_rc_mode(struct brcm_pcie *pcie)
728 {
729 	void __iomem *base = pcie->base;
730 	u32 val = readl(base + PCIE_MISC_PCIE_STATUS);
731 
732 	return !!FIELD_GET(PCIE_MISC_PCIE_STATUS_PCIE_PORT_MASK, val);
733 }
734 
735 static bool brcm_pcie_link_up(struct brcm_pcie *pcie)
736 {
737 	u32 val = readl(pcie->base + PCIE_MISC_PCIE_STATUS);
738 	u32 dla = FIELD_GET(PCIE_MISC_PCIE_STATUS_PCIE_DL_ACTIVE_MASK, val);
739 	u32 plu = FIELD_GET(PCIE_MISC_PCIE_STATUS_PCIE_PHYLINKUP_MASK, val);
740 
741 	return dla && plu;
742 }
743 
744 static void __iomem *brcm_pcie_map_conf(struct pci_bus *bus, unsigned int devfn,
745 					int where)
746 {
747 	struct brcm_pcie *pcie = bus->sysdata;
748 	void __iomem *base = pcie->base;
749 	int idx;
750 
751 	/* Accesses to the RC go right to the RC registers if slot==0 */
752 	if (pci_is_root_bus(bus))
753 		return PCI_SLOT(devfn) ? NULL : base + where;
754 
755 	/* For devices, write to the config space index register */
756 	idx = PCIE_ECAM_OFFSET(bus->number, devfn, 0);
757 	writel(idx, pcie->base + PCIE_EXT_CFG_INDEX);
758 	return base + PCIE_EXT_CFG_DATA + where;
759 }
760 
761 static void __iomem *brcm_pcie_map_conf32(struct pci_bus *bus, unsigned int devfn,
762 					 int where)
763 {
764 	struct brcm_pcie *pcie = bus->sysdata;
765 	void __iomem *base = pcie->base;
766 	int idx;
767 
768 	/* Accesses to the RC go right to the RC registers if slot==0 */
769 	if (pci_is_root_bus(bus))
770 		return PCI_SLOT(devfn) ? NULL : base + (where & ~0x3);
771 
772 	/* For devices, write to the config space index register */
773 	idx = PCIE_ECAM_OFFSET(bus->number, devfn, (where & ~3));
774 	writel(idx, base + IDX_ADDR(pcie));
775 	return base + DATA_ADDR(pcie);
776 }
777 
778 static struct pci_ops brcm_pcie_ops = {
779 	.map_bus = brcm_pcie_map_conf,
780 	.read = pci_generic_config_read,
781 	.write = pci_generic_config_write,
782 };
783 
784 static struct pci_ops brcm_pcie_ops32 = {
785 	.map_bus = brcm_pcie_map_conf32,
786 	.read = pci_generic_config_read32,
787 	.write = pci_generic_config_write32,
788 };
789 
790 static inline void brcm_pcie_bridge_sw_init_set_generic(struct brcm_pcie *pcie, u32 val)
791 {
792 	u32 tmp, mask =  RGR1_SW_INIT_1_INIT_GENERIC_MASK;
793 	u32 shift = RGR1_SW_INIT_1_INIT_GENERIC_SHIFT;
794 
795 	tmp = readl(pcie->base + PCIE_RGR1_SW_INIT_1(pcie));
796 	tmp = (tmp & ~mask) | ((val << shift) & mask);
797 	writel(tmp, pcie->base + PCIE_RGR1_SW_INIT_1(pcie));
798 }
799 
800 static inline void brcm_pcie_bridge_sw_init_set_7278(struct brcm_pcie *pcie, u32 val)
801 {
802 	u32 tmp, mask =  RGR1_SW_INIT_1_INIT_7278_MASK;
803 	u32 shift = RGR1_SW_INIT_1_INIT_7278_SHIFT;
804 
805 	tmp = readl(pcie->base + PCIE_RGR1_SW_INIT_1(pcie));
806 	tmp = (tmp & ~mask) | ((val << shift) & mask);
807 	writel(tmp, pcie->base + PCIE_RGR1_SW_INIT_1(pcie));
808 }
809 
810 static inline void brcm_pcie_perst_set_4908(struct brcm_pcie *pcie, u32 val)
811 {
812 	if (WARN_ONCE(!pcie->perst_reset, "missing PERST# reset controller\n"))
813 		return;
814 
815 	if (val)
816 		reset_control_assert(pcie->perst_reset);
817 	else
818 		reset_control_deassert(pcie->perst_reset);
819 }
820 
821 static inline void brcm_pcie_perst_set_7278(struct brcm_pcie *pcie, u32 val)
822 {
823 	u32 tmp;
824 
825 	/* Perst bit has moved and assert value is 0 */
826 	tmp = readl(pcie->base + PCIE_MISC_PCIE_CTRL);
827 	u32p_replace_bits(&tmp, !val, PCIE_MISC_PCIE_CTRL_PCIE_PERSTB_MASK);
828 	writel(tmp, pcie->base +  PCIE_MISC_PCIE_CTRL);
829 }
830 
831 static inline void brcm_pcie_perst_set_generic(struct brcm_pcie *pcie, u32 val)
832 {
833 	u32 tmp;
834 
835 	tmp = readl(pcie->base + PCIE_RGR1_SW_INIT_1(pcie));
836 	u32p_replace_bits(&tmp, val, PCIE_RGR1_SW_INIT_1_PERST_MASK);
837 	writel(tmp, pcie->base + PCIE_RGR1_SW_INIT_1(pcie));
838 }
839 
840 static inline int brcm_pcie_get_rc_bar2_size_and_offset(struct brcm_pcie *pcie,
841 							u64 *rc_bar2_size,
842 							u64 *rc_bar2_offset)
843 {
844 	struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
845 	struct resource_entry *entry;
846 	struct device *dev = pcie->dev;
847 	u64 lowest_pcie_addr = ~(u64)0;
848 	int ret, i = 0;
849 	u64 size = 0;
850 
851 	resource_list_for_each_entry(entry, &bridge->dma_ranges) {
852 		u64 pcie_beg = entry->res->start - entry->offset;
853 
854 		size += entry->res->end - entry->res->start + 1;
855 		if (pcie_beg < lowest_pcie_addr)
856 			lowest_pcie_addr = pcie_beg;
857 	}
858 
859 	if (lowest_pcie_addr == ~(u64)0) {
860 		dev_err(dev, "DT node has no dma-ranges\n");
861 		return -EINVAL;
862 	}
863 
864 	ret = of_property_read_variable_u64_array(pcie->np, "brcm,scb-sizes", pcie->memc_size, 1,
865 						  PCIE_BRCM_MAX_MEMC);
866 
867 	if (ret <= 0) {
868 		/* Make an educated guess */
869 		pcie->num_memc = 1;
870 		pcie->memc_size[0] = 1ULL << fls64(size - 1);
871 	} else {
872 		pcie->num_memc = ret;
873 	}
874 
875 	/* Each memc is viewed through a "port" that is a power of 2 */
876 	for (i = 0, size = 0; i < pcie->num_memc; i++)
877 		size += pcie->memc_size[i];
878 
879 	/* System memory starts at this address in PCIe-space */
880 	*rc_bar2_offset = lowest_pcie_addr;
881 	/* The sum of all memc views must also be a power of 2 */
882 	*rc_bar2_size = 1ULL << fls64(size - 1);
883 
884 	/*
885 	 * We validate the inbound memory view even though we should trust
886 	 * whatever the device-tree provides. This is because of an HW issue on
887 	 * early Raspberry Pi 4's revisions (bcm2711). It turns out its
888 	 * firmware has to dynamically edit dma-ranges due to a bug on the
889 	 * PCIe controller integration, which prohibits any access above the
890 	 * lower 3GB of memory. Given this, we decided to keep the dma-ranges
891 	 * in check, avoiding hard to debug device-tree related issues in the
892 	 * future:
893 	 *
894 	 * The PCIe host controller by design must set the inbound viewport to
895 	 * be a contiguous arrangement of all of the system's memory.  In
896 	 * addition, its size mut be a power of two.  To further complicate
897 	 * matters, the viewport must start on a pcie-address that is aligned
898 	 * on a multiple of its size.  If a portion of the viewport does not
899 	 * represent system memory -- e.g. 3GB of memory requires a 4GB
900 	 * viewport -- we can map the outbound memory in or after 3GB and even
901 	 * though the viewport will overlap the outbound memory the controller
902 	 * will know to send outbound memory downstream and everything else
903 	 * upstream.
904 	 *
905 	 * For example:
906 	 *
907 	 * - The best-case scenario, memory up to 3GB, is to place the inbound
908 	 *   region in the first 4GB of pcie-space, as some legacy devices can
909 	 *   only address 32bits. We would also like to put the MSI under 4GB
910 	 *   as well, since some devices require a 32bit MSI target address.
911 	 *
912 	 * - If the system memory is 4GB or larger we cannot start the inbound
913 	 *   region at location 0 (since we have to allow some space for
914 	 *   outbound memory @ 3GB). So instead it will  start at the 1x
915 	 *   multiple of its size
916 	 */
917 	if (!*rc_bar2_size || (*rc_bar2_offset & (*rc_bar2_size - 1)) ||
918 	    (*rc_bar2_offset < SZ_4G && *rc_bar2_offset > SZ_2G)) {
919 		dev_err(dev, "Invalid rc_bar2_offset/size: size 0x%llx, off 0x%llx\n",
920 			*rc_bar2_size, *rc_bar2_offset);
921 		return -EINVAL;
922 	}
923 
924 	return 0;
925 }
926 
927 static int brcm_pcie_setup(struct brcm_pcie *pcie)
928 {
929 	struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
930 	u64 rc_bar2_offset, rc_bar2_size;
931 	void __iomem *base = pcie->base;
932 	struct device *dev = pcie->dev;
933 	struct resource_entry *entry;
934 	bool ssc_good = false;
935 	struct resource *res;
936 	int num_out_wins = 0;
937 	u16 nlw, cls, lnksta;
938 	int i, ret, memc;
939 	u32 tmp, burst, aspm_support;
940 
941 	/* Reset the bridge */
942 	pcie->bridge_sw_init_set(pcie, 1);
943 	usleep_range(100, 200);
944 
945 	/* Take the bridge out of reset */
946 	pcie->bridge_sw_init_set(pcie, 0);
947 
948 	tmp = readl(base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
949 	if (is_bmips(pcie))
950 		tmp &= ~PCIE_BMIPS_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK;
951 	else
952 		tmp &= ~PCIE_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK;
953 	writel(tmp, base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
954 	/* Wait for SerDes to be stable */
955 	usleep_range(100, 200);
956 
957 	/*
958 	 * SCB_MAX_BURST_SIZE is a two bit field.  For GENERIC chips it
959 	 * is encoded as 0=128, 1=256, 2=512, 3=Rsvd, for BCM7278 it
960 	 * is encoded as 0=Rsvd, 1=128, 2=256, 3=512.
961 	 */
962 	if (is_bmips(pcie))
963 		burst = 0x1; /* 256 bytes */
964 	else if (pcie->type == BCM2711)
965 		burst = 0x0; /* 128 bytes */
966 	else if (pcie->type == BCM7278)
967 		burst = 0x3; /* 512 bytes */
968 	else
969 		burst = 0x2; /* 512 bytes */
970 
971 	/* Set SCB_MAX_BURST_SIZE, CFG_READ_UR_MODE, SCB_ACCESS_EN */
972 	tmp = readl(base + PCIE_MISC_MISC_CTRL);
973 	u32p_replace_bits(&tmp, 1, PCIE_MISC_MISC_CTRL_SCB_ACCESS_EN_MASK);
974 	u32p_replace_bits(&tmp, 1, PCIE_MISC_MISC_CTRL_CFG_READ_UR_MODE_MASK);
975 	u32p_replace_bits(&tmp, burst, PCIE_MISC_MISC_CTRL_MAX_BURST_SIZE_MASK);
976 	writel(tmp, base + PCIE_MISC_MISC_CTRL);
977 
978 	ret = brcm_pcie_get_rc_bar2_size_and_offset(pcie, &rc_bar2_size,
979 						    &rc_bar2_offset);
980 	if (ret)
981 		return ret;
982 
983 	tmp = lower_32_bits(rc_bar2_offset);
984 	u32p_replace_bits(&tmp, brcm_pcie_encode_ibar_size(rc_bar2_size),
985 			  PCIE_MISC_RC_BAR2_CONFIG_LO_SIZE_MASK);
986 	writel(tmp, base + PCIE_MISC_RC_BAR2_CONFIG_LO);
987 	writel(upper_32_bits(rc_bar2_offset),
988 	       base + PCIE_MISC_RC_BAR2_CONFIG_HI);
989 
990 	tmp = readl(base + PCIE_MISC_MISC_CTRL);
991 	for (memc = 0; memc < pcie->num_memc; memc++) {
992 		u32 scb_size_val = ilog2(pcie->memc_size[memc]) - 15;
993 
994 		if (memc == 0)
995 			u32p_replace_bits(&tmp, scb_size_val, SCB_SIZE_MASK(0));
996 		else if (memc == 1)
997 			u32p_replace_bits(&tmp, scb_size_val, SCB_SIZE_MASK(1));
998 		else if (memc == 2)
999 			u32p_replace_bits(&tmp, scb_size_val, SCB_SIZE_MASK(2));
1000 	}
1001 	writel(tmp, base + PCIE_MISC_MISC_CTRL);
1002 
1003 	/*
1004 	 * We ideally want the MSI target address to be located in the 32bit
1005 	 * addressable memory area. Some devices might depend on it. This is
1006 	 * possible either when the inbound window is located above the lower
1007 	 * 4GB or when the inbound area is smaller than 4GB (taking into
1008 	 * account the rounding-up we're forced to perform).
1009 	 */
1010 	if (rc_bar2_offset >= SZ_4G || (rc_bar2_size + rc_bar2_offset) < SZ_4G)
1011 		pcie->msi_target_addr = BRCM_MSI_TARGET_ADDR_LT_4GB;
1012 	else
1013 		pcie->msi_target_addr = BRCM_MSI_TARGET_ADDR_GT_4GB;
1014 
1015 	/* disable the PCIe->GISB memory window (RC_BAR1) */
1016 	tmp = readl(base + PCIE_MISC_RC_BAR1_CONFIG_LO);
1017 	tmp &= ~PCIE_MISC_RC_BAR1_CONFIG_LO_SIZE_MASK;
1018 	writel(tmp, base + PCIE_MISC_RC_BAR1_CONFIG_LO);
1019 
1020 	/* disable the PCIe->SCB memory window (RC_BAR3) */
1021 	tmp = readl(base + PCIE_MISC_RC_BAR3_CONFIG_LO);
1022 	tmp &= ~PCIE_MISC_RC_BAR3_CONFIG_LO_SIZE_MASK;
1023 	writel(tmp, base + PCIE_MISC_RC_BAR3_CONFIG_LO);
1024 
1025 	if (pcie->gen)
1026 		brcm_pcie_set_gen(pcie, pcie->gen);
1027 
1028 	/* Unassert the fundamental reset */
1029 	pcie->perst_set(pcie, 0);
1030 
1031 	/*
1032 	 * Give the RC/EP time to wake up, before trying to configure RC.
1033 	 * Intermittently check status for link-up, up to a total of 100ms.
1034 	 */
1035 	for (i = 0; i < 100 && !brcm_pcie_link_up(pcie); i += 5)
1036 		msleep(5);
1037 
1038 	if (!brcm_pcie_link_up(pcie)) {
1039 		dev_err(dev, "link down\n");
1040 		return -ENODEV;
1041 	}
1042 
1043 	if (!brcm_pcie_rc_mode(pcie)) {
1044 		dev_err(dev, "PCIe misconfigured; is in EP mode\n");
1045 		return -EINVAL;
1046 	}
1047 
1048 	resource_list_for_each_entry(entry, &bridge->windows) {
1049 		res = entry->res;
1050 
1051 		if (resource_type(res) != IORESOURCE_MEM)
1052 			continue;
1053 
1054 		if (num_out_wins >= BRCM_NUM_PCIE_OUT_WINS) {
1055 			dev_err(pcie->dev, "too many outbound wins\n");
1056 			return -EINVAL;
1057 		}
1058 
1059 		if (is_bmips(pcie)) {
1060 			u64 start = res->start;
1061 			unsigned int j, nwins = resource_size(res) / SZ_128M;
1062 
1063 			/* bmips PCIe outbound windows have a 128MB max size */
1064 			if (nwins > BRCM_NUM_PCIE_OUT_WINS)
1065 				nwins = BRCM_NUM_PCIE_OUT_WINS;
1066 			for (j = 0; j < nwins; j++, start += SZ_128M)
1067 				brcm_pcie_set_outbound_win(pcie, j, start,
1068 							   start - entry->offset,
1069 							   SZ_128M);
1070 			break;
1071 		}
1072 		brcm_pcie_set_outbound_win(pcie, num_out_wins, res->start,
1073 					   res->start - entry->offset,
1074 					   resource_size(res));
1075 		num_out_wins++;
1076 	}
1077 
1078 	/* Don't advertise L0s capability if 'aspm-no-l0s' */
1079 	aspm_support = PCIE_LINK_STATE_L1;
1080 	if (!of_property_read_bool(pcie->np, "aspm-no-l0s"))
1081 		aspm_support |= PCIE_LINK_STATE_L0S;
1082 	tmp = readl(base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY);
1083 	u32p_replace_bits(&tmp, aspm_support,
1084 		PCIE_RC_CFG_PRIV1_LINK_CAPABILITY_ASPM_SUPPORT_MASK);
1085 	writel(tmp, base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY);
1086 
1087 	/*
1088 	 * For config space accesses on the RC, show the right class for
1089 	 * a PCIe-PCIe bridge (the default setting is to be EP mode).
1090 	 */
1091 	tmp = readl(base + PCIE_RC_CFG_PRIV1_ID_VAL3);
1092 	u32p_replace_bits(&tmp, 0x060400,
1093 			  PCIE_RC_CFG_PRIV1_ID_VAL3_CLASS_CODE_MASK);
1094 	writel(tmp, base + PCIE_RC_CFG_PRIV1_ID_VAL3);
1095 
1096 	if (pcie->ssc) {
1097 		ret = brcm_pcie_set_ssc(pcie);
1098 		if (ret == 0)
1099 			ssc_good = true;
1100 		else
1101 			dev_err(dev, "failed attempt to enter ssc mode\n");
1102 	}
1103 
1104 	lnksta = readw(base + BRCM_PCIE_CAP_REGS + PCI_EXP_LNKSTA);
1105 	cls = FIELD_GET(PCI_EXP_LNKSTA_CLS, lnksta);
1106 	nlw = FIELD_GET(PCI_EXP_LNKSTA_NLW, lnksta);
1107 	dev_info(dev, "link up, %s x%u %s\n",
1108 		 pci_speed_string(pcie_link_speed[cls]), nlw,
1109 		 ssc_good ? "(SSC)" : "(!SSC)");
1110 
1111 	/* PCIe->SCB endian mode for BAR */
1112 	tmp = readl(base + PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1);
1113 	u32p_replace_bits(&tmp, PCIE_RC_CFG_VENDOR_SPCIFIC_REG1_LITTLE_ENDIAN,
1114 		PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1_ENDIAN_MODE_BAR2_MASK);
1115 	writel(tmp, base + PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1);
1116 
1117 	/*
1118 	 * Refclk from RC should be gated with CLKREQ# input when ASPM L0s,L1
1119 	 * is enabled => setting the CLKREQ_DEBUG_ENABLE field to 1.
1120 	 */
1121 	tmp = readl(base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
1122 	tmp |= PCIE_MISC_HARD_PCIE_HARD_DEBUG_CLKREQ_DEBUG_ENABLE_MASK;
1123 	writel(tmp, base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
1124 
1125 	return 0;
1126 }
1127 
1128 /* L23 is a low-power PCIe link state */
1129 static void brcm_pcie_enter_l23(struct brcm_pcie *pcie)
1130 {
1131 	void __iomem *base = pcie->base;
1132 	int l23, i;
1133 	u32 tmp;
1134 
1135 	/* Assert request for L23 */
1136 	tmp = readl(base + PCIE_MISC_PCIE_CTRL);
1137 	u32p_replace_bits(&tmp, 1, PCIE_MISC_PCIE_CTRL_PCIE_L23_REQUEST_MASK);
1138 	writel(tmp, base + PCIE_MISC_PCIE_CTRL);
1139 
1140 	/* Wait up to 36 msec for L23 */
1141 	tmp = readl(base + PCIE_MISC_PCIE_STATUS);
1142 	l23 = FIELD_GET(PCIE_MISC_PCIE_STATUS_PCIE_LINK_IN_L23_MASK, tmp);
1143 	for (i = 0; i < 15 && !l23; i++) {
1144 		usleep_range(2000, 2400);
1145 		tmp = readl(base + PCIE_MISC_PCIE_STATUS);
1146 		l23 = FIELD_GET(PCIE_MISC_PCIE_STATUS_PCIE_LINK_IN_L23_MASK,
1147 				tmp);
1148 	}
1149 
1150 	if (!l23)
1151 		dev_err(pcie->dev, "failed to enter low-power link state\n");
1152 }
1153 
1154 static int brcm_phy_cntl(struct brcm_pcie *pcie, const int start)
1155 {
1156 	static const u32 shifts[PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_NFLDS] = {
1157 		PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_PWRDN_SHIFT,
1158 		PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_RESET_SHIFT,
1159 		PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_DIG_RESET_SHIFT,};
1160 	static const u32 masks[PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_NFLDS] = {
1161 		PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_PWRDN_MASK,
1162 		PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_RESET_MASK,
1163 		PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_DIG_RESET_MASK,};
1164 	const int beg = start ? 0 : PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_NFLDS - 1;
1165 	const int end = start ? PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_NFLDS : -1;
1166 	u32 tmp, combined_mask = 0;
1167 	u32 val;
1168 	void __iomem *base = pcie->base;
1169 	int i, ret;
1170 
1171 	for (i = beg; i != end; start ? i++ : i--) {
1172 		val = start ? BIT_MASK(shifts[i]) : 0;
1173 		tmp = readl(base + PCIE_DVT_PMU_PCIE_PHY_CTRL);
1174 		tmp = (tmp & ~masks[i]) | (val & masks[i]);
1175 		writel(tmp, base + PCIE_DVT_PMU_PCIE_PHY_CTRL);
1176 		usleep_range(50, 200);
1177 		combined_mask |= masks[i];
1178 	}
1179 
1180 	tmp = readl(base + PCIE_DVT_PMU_PCIE_PHY_CTRL);
1181 	val = start ? combined_mask : 0;
1182 
1183 	ret = (tmp & combined_mask) == val ? 0 : -EIO;
1184 	if (ret)
1185 		dev_err(pcie->dev, "failed to %s phy\n", (start ? "start" : "stop"));
1186 
1187 	return ret;
1188 }
1189 
1190 static inline int brcm_phy_start(struct brcm_pcie *pcie)
1191 {
1192 	return pcie->rescal ? brcm_phy_cntl(pcie, 1) : 0;
1193 }
1194 
1195 static inline int brcm_phy_stop(struct brcm_pcie *pcie)
1196 {
1197 	return pcie->rescal ? brcm_phy_cntl(pcie, 0) : 0;
1198 }
1199 
1200 static void brcm_pcie_turn_off(struct brcm_pcie *pcie)
1201 {
1202 	void __iomem *base = pcie->base;
1203 	int tmp;
1204 
1205 	if (brcm_pcie_link_up(pcie))
1206 		brcm_pcie_enter_l23(pcie);
1207 	/* Assert fundamental reset */
1208 	pcie->perst_set(pcie, 1);
1209 
1210 	/* Deassert request for L23 in case it was asserted */
1211 	tmp = readl(base + PCIE_MISC_PCIE_CTRL);
1212 	u32p_replace_bits(&tmp, 0, PCIE_MISC_PCIE_CTRL_PCIE_L23_REQUEST_MASK);
1213 	writel(tmp, base + PCIE_MISC_PCIE_CTRL);
1214 
1215 	/* Turn off SerDes */
1216 	tmp = readl(base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
1217 	u32p_replace_bits(&tmp, 1, PCIE_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK);
1218 	writel(tmp, base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
1219 
1220 	/* Shutdown PCIe bridge */
1221 	pcie->bridge_sw_init_set(pcie, 1);
1222 }
1223 
1224 static int brcm_pcie_suspend(struct device *dev)
1225 {
1226 	struct brcm_pcie *pcie = dev_get_drvdata(dev);
1227 	int ret;
1228 
1229 	brcm_pcie_turn_off(pcie);
1230 	/*
1231 	 * If brcm_phy_stop() returns an error, just dev_err(). If we
1232 	 * return the error it will cause the suspend to fail and this is a
1233 	 * forgivable offense that will probably be erased on resume.
1234 	 */
1235 	if (brcm_phy_stop(pcie))
1236 		dev_err(dev, "Could not stop phy for suspend\n");
1237 
1238 	ret = reset_control_rearm(pcie->rescal);
1239 	if (ret) {
1240 		dev_err(dev, "Could not rearm rescal reset\n");
1241 		return ret;
1242 	}
1243 
1244 	clk_disable_unprepare(pcie->clk);
1245 
1246 	return 0;
1247 }
1248 
1249 static int brcm_pcie_resume(struct device *dev)
1250 {
1251 	struct brcm_pcie *pcie = dev_get_drvdata(dev);
1252 	void __iomem *base;
1253 	u32 tmp;
1254 	int ret;
1255 
1256 	base = pcie->base;
1257 	ret = clk_prepare_enable(pcie->clk);
1258 	if (ret)
1259 		return ret;
1260 
1261 	ret = reset_control_reset(pcie->rescal);
1262 	if (ret)
1263 		goto err_disable_clk;
1264 
1265 	ret = brcm_phy_start(pcie);
1266 	if (ret)
1267 		goto err_reset;
1268 
1269 	/* Take bridge out of reset so we can access the SERDES reg */
1270 	pcie->bridge_sw_init_set(pcie, 0);
1271 
1272 	/* SERDES_IDDQ = 0 */
1273 	tmp = readl(base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
1274 	u32p_replace_bits(&tmp, 0, PCIE_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK);
1275 	writel(tmp, base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
1276 
1277 	/* wait for serdes to be stable */
1278 	udelay(100);
1279 
1280 	ret = brcm_pcie_setup(pcie);
1281 	if (ret)
1282 		goto err_reset;
1283 
1284 	if (pcie->msi)
1285 		brcm_msi_set_regs(pcie->msi);
1286 
1287 	return 0;
1288 
1289 err_reset:
1290 	reset_control_rearm(pcie->rescal);
1291 err_disable_clk:
1292 	clk_disable_unprepare(pcie->clk);
1293 	return ret;
1294 }
1295 
1296 static void __brcm_pcie_remove(struct brcm_pcie *pcie)
1297 {
1298 	brcm_msi_remove(pcie);
1299 	brcm_pcie_turn_off(pcie);
1300 	if (brcm_phy_stop(pcie))
1301 		dev_err(pcie->dev, "Could not stop phy\n");
1302 	if (reset_control_rearm(pcie->rescal))
1303 		dev_err(pcie->dev, "Could not rearm rescal reset\n");
1304 	clk_disable_unprepare(pcie->clk);
1305 }
1306 
1307 static int brcm_pcie_remove(struct platform_device *pdev)
1308 {
1309 	struct brcm_pcie *pcie = platform_get_drvdata(pdev);
1310 	struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
1311 
1312 	pci_stop_root_bus(bridge->bus);
1313 	pci_remove_root_bus(bridge->bus);
1314 	__brcm_pcie_remove(pcie);
1315 
1316 	return 0;
1317 }
1318 
1319 static const struct of_device_id brcm_pcie_match[] = {
1320 	{ .compatible = "brcm,bcm2711-pcie", .data = &bcm2711_cfg },
1321 	{ .compatible = "brcm,bcm4908-pcie", .data = &bcm4908_cfg },
1322 	{ .compatible = "brcm,bcm7211-pcie", .data = &generic_cfg },
1323 	{ .compatible = "brcm,bcm7278-pcie", .data = &bcm7278_cfg },
1324 	{ .compatible = "brcm,bcm7216-pcie", .data = &bcm7278_cfg },
1325 	{ .compatible = "brcm,bcm7445-pcie", .data = &generic_cfg },
1326 	{ .compatible = "brcm,bcm7435-pcie", .data = &bcm7435_cfg },
1327 	{ .compatible = "brcm,bcm7425-pcie", .data = &bcm7425_cfg },
1328 	{},
1329 };
1330 
1331 static int brcm_pcie_probe(struct platform_device *pdev)
1332 {
1333 	struct device_node *np = pdev->dev.of_node, *msi_np;
1334 	struct pci_host_bridge *bridge;
1335 	const struct pcie_cfg_data *data;
1336 	struct brcm_pcie *pcie;
1337 	int ret;
1338 
1339 	bridge = devm_pci_alloc_host_bridge(&pdev->dev, sizeof(*pcie));
1340 	if (!bridge)
1341 		return -ENOMEM;
1342 
1343 	data = of_device_get_match_data(&pdev->dev);
1344 	if (!data) {
1345 		pr_err("failed to look up compatible string\n");
1346 		return -EINVAL;
1347 	}
1348 
1349 	pcie = pci_host_bridge_priv(bridge);
1350 	pcie->dev = &pdev->dev;
1351 	pcie->np = np;
1352 	pcie->reg_offsets = data->offsets;
1353 	pcie->type = data->type;
1354 	pcie->perst_set = data->perst_set;
1355 	pcie->bridge_sw_init_set = data->bridge_sw_init_set;
1356 
1357 	pcie->base = devm_platform_ioremap_resource(pdev, 0);
1358 	if (IS_ERR(pcie->base))
1359 		return PTR_ERR(pcie->base);
1360 
1361 	pcie->clk = devm_clk_get_optional(&pdev->dev, "sw_pcie");
1362 	if (IS_ERR(pcie->clk))
1363 		return PTR_ERR(pcie->clk);
1364 
1365 	ret = of_pci_get_max_link_speed(np);
1366 	pcie->gen = (ret < 0) ? 0 : ret;
1367 
1368 	pcie->ssc = of_property_read_bool(np, "brcm,enable-ssc");
1369 
1370 	ret = clk_prepare_enable(pcie->clk);
1371 	if (ret) {
1372 		dev_err(&pdev->dev, "could not enable clock\n");
1373 		return ret;
1374 	}
1375 	pcie->rescal = devm_reset_control_get_optional_shared(&pdev->dev, "rescal");
1376 	if (IS_ERR(pcie->rescal)) {
1377 		clk_disable_unprepare(pcie->clk);
1378 		return PTR_ERR(pcie->rescal);
1379 	}
1380 	pcie->perst_reset = devm_reset_control_get_optional_exclusive(&pdev->dev, "perst");
1381 	if (IS_ERR(pcie->perst_reset)) {
1382 		clk_disable_unprepare(pcie->clk);
1383 		return PTR_ERR(pcie->perst_reset);
1384 	}
1385 
1386 	ret = reset_control_reset(pcie->rescal);
1387 	if (ret)
1388 		dev_err(&pdev->dev, "failed to deassert 'rescal'\n");
1389 
1390 	ret = brcm_phy_start(pcie);
1391 	if (ret) {
1392 		reset_control_rearm(pcie->rescal);
1393 		clk_disable_unprepare(pcie->clk);
1394 		return ret;
1395 	}
1396 
1397 	ret = brcm_pcie_setup(pcie);
1398 	if (ret)
1399 		goto fail;
1400 
1401 	pcie->hw_rev = readl(pcie->base + PCIE_MISC_REVISION);
1402 	if (pcie->type == BCM4908 && pcie->hw_rev >= BRCM_PCIE_HW_REV_3_20) {
1403 		dev_err(pcie->dev, "hardware revision with unsupported PERST# setup\n");
1404 		ret = -ENODEV;
1405 		goto fail;
1406 	}
1407 
1408 	msi_np = of_parse_phandle(pcie->np, "msi-parent", 0);
1409 	if (pci_msi_enabled() && msi_np == pcie->np) {
1410 		ret = brcm_pcie_enable_msi(pcie);
1411 		if (ret) {
1412 			dev_err(pcie->dev, "probe of internal MSI failed");
1413 			goto fail;
1414 		}
1415 	}
1416 
1417 	bridge->ops = pcie->type == BCM7425 ? &brcm_pcie_ops32 : &brcm_pcie_ops;
1418 	bridge->sysdata = pcie;
1419 
1420 	platform_set_drvdata(pdev, pcie);
1421 
1422 	return pci_host_probe(bridge);
1423 fail:
1424 	__brcm_pcie_remove(pcie);
1425 	return ret;
1426 }
1427 
1428 MODULE_DEVICE_TABLE(of, brcm_pcie_match);
1429 
1430 static const struct dev_pm_ops brcm_pcie_pm_ops = {
1431 	.suspend = brcm_pcie_suspend,
1432 	.resume = brcm_pcie_resume,
1433 };
1434 
1435 static struct platform_driver brcm_pcie_driver = {
1436 	.probe = brcm_pcie_probe,
1437 	.remove = brcm_pcie_remove,
1438 	.driver = {
1439 		.name = "brcm-pcie",
1440 		.of_match_table = brcm_pcie_match,
1441 		.pm = &brcm_pcie_pm_ops,
1442 	},
1443 };
1444 module_platform_driver(brcm_pcie_driver);
1445 
1446 MODULE_LICENSE("GPL");
1447 MODULE_DESCRIPTION("Broadcom STB PCIe RC driver");
1448 MODULE_AUTHOR("Broadcom");
1449