1 // SPDX-License-Identifier: GPL-2.0+
2 /* Copyright (C) 2009 - 2019 Broadcom */
3 
4 #include <linux/bitfield.h>
5 #include <linux/bitops.h>
6 #include <linux/clk.h>
7 #include <linux/compiler.h>
8 #include <linux/delay.h>
9 #include <linux/init.h>
10 #include <linux/interrupt.h>
11 #include <linux/io.h>
12 #include <linux/ioport.h>
13 #include <linux/irqchip/chained_irq.h>
14 #include <linux/irqdomain.h>
15 #include <linux/kernel.h>
16 #include <linux/list.h>
17 #include <linux/log2.h>
18 #include <linux/module.h>
19 #include <linux/msi.h>
20 #include <linux/of_address.h>
21 #include <linux/of_irq.h>
22 #include <linux/of_pci.h>
23 #include <linux/of_platform.h>
24 #include <linux/pci.h>
25 #include <linux/printk.h>
26 #include <linux/sizes.h>
27 #include <linux/slab.h>
28 #include <linux/string.h>
29 #include <linux/types.h>
30 
31 #include "../pci.h"
32 
33 /* BRCM_PCIE_CAP_REGS - Offset for the mandatory capability config regs */
34 #define BRCM_PCIE_CAP_REGS				0x00ac
35 
36 /* Broadcom STB PCIe Register Offsets */
37 #define PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1				0x0188
38 #define  PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1_ENDIAN_MODE_BAR2_MASK	0xc
39 #define  PCIE_RC_CFG_VENDOR_SPCIFIC_REG1_LITTLE_ENDIAN			0x0
40 
41 #define PCIE_RC_CFG_PRIV1_ID_VAL3			0x043c
42 #define  PCIE_RC_CFG_PRIV1_ID_VAL3_CLASS_CODE_MASK	0xffffff
43 
44 #define PCIE_RC_DL_MDIO_ADDR				0x1100
45 #define PCIE_RC_DL_MDIO_WR_DATA				0x1104
46 #define PCIE_RC_DL_MDIO_RD_DATA				0x1108
47 
48 #define PCIE_MISC_MISC_CTRL				0x4008
49 #define  PCIE_MISC_MISC_CTRL_SCB_ACCESS_EN_MASK		0x1000
50 #define  PCIE_MISC_MISC_CTRL_CFG_READ_UR_MODE_MASK	0x2000
51 #define  PCIE_MISC_MISC_CTRL_MAX_BURST_SIZE_MASK	0x300000
52 #define  PCIE_MISC_MISC_CTRL_MAX_BURST_SIZE_128		0x0
53 #define  PCIE_MISC_MISC_CTRL_SCB0_SIZE_MASK		0xf8000000
54 
55 #define PCIE_MISC_CPU_2_PCIE_MEM_WIN0_LO		0x400c
56 #define PCIE_MEM_WIN0_LO(win)	\
57 		PCIE_MISC_CPU_2_PCIE_MEM_WIN0_LO + ((win) * 4)
58 
59 #define PCIE_MISC_CPU_2_PCIE_MEM_WIN0_HI		0x4010
60 #define PCIE_MEM_WIN0_HI(win)	\
61 		PCIE_MISC_CPU_2_PCIE_MEM_WIN0_HI + ((win) * 4)
62 
63 #define PCIE_MISC_RC_BAR1_CONFIG_LO			0x402c
64 #define  PCIE_MISC_RC_BAR1_CONFIG_LO_SIZE_MASK		0x1f
65 
66 #define PCIE_MISC_RC_BAR2_CONFIG_LO			0x4034
67 #define  PCIE_MISC_RC_BAR2_CONFIG_LO_SIZE_MASK		0x1f
68 #define PCIE_MISC_RC_BAR2_CONFIG_HI			0x4038
69 
70 #define PCIE_MISC_RC_BAR3_CONFIG_LO			0x403c
71 #define  PCIE_MISC_RC_BAR3_CONFIG_LO_SIZE_MASK		0x1f
72 
73 #define PCIE_MISC_MSI_BAR_CONFIG_LO			0x4044
74 #define PCIE_MISC_MSI_BAR_CONFIG_HI			0x4048
75 
76 #define PCIE_MISC_MSI_DATA_CONFIG			0x404c
77 #define  PCIE_MISC_MSI_DATA_CONFIG_VAL			0xffe06540
78 
79 #define PCIE_MISC_PCIE_CTRL				0x4064
80 #define  PCIE_MISC_PCIE_CTRL_PCIE_L23_REQUEST_MASK	0x1
81 
82 #define PCIE_MISC_PCIE_STATUS				0x4068
83 #define  PCIE_MISC_PCIE_STATUS_PCIE_PORT_MASK		0x80
84 #define  PCIE_MISC_PCIE_STATUS_PCIE_DL_ACTIVE_MASK	0x20
85 #define  PCIE_MISC_PCIE_STATUS_PCIE_PHYLINKUP_MASK	0x10
86 #define  PCIE_MISC_PCIE_STATUS_PCIE_LINK_IN_L23_MASK	0x40
87 
88 #define PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_LIMIT		0x4070
89 #define  PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_LIMIT_LIMIT_MASK	0xfff00000
90 #define  PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_LIMIT_BASE_MASK	0xfff0
91 #define PCIE_MEM_WIN0_BASE_LIMIT(win)	\
92 		PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_LIMIT + ((win) * 4)
93 
94 #define PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_HI			0x4080
95 #define  PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_HI_BASE_MASK	0xff
96 #define PCIE_MEM_WIN0_BASE_HI(win)	\
97 		PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_HI + ((win) * 8)
98 
99 #define PCIE_MISC_CPU_2_PCIE_MEM_WIN0_LIMIT_HI			0x4084
100 #define  PCIE_MISC_CPU_2_PCIE_MEM_WIN0_LIMIT_HI_LIMIT_MASK	0xff
101 #define PCIE_MEM_WIN0_LIMIT_HI(win)	\
102 		PCIE_MISC_CPU_2_PCIE_MEM_WIN0_LIMIT_HI + ((win) * 8)
103 
104 #define PCIE_MISC_HARD_PCIE_HARD_DEBUG					0x4204
105 #define  PCIE_MISC_HARD_PCIE_HARD_DEBUG_CLKREQ_DEBUG_ENABLE_MASK	0x2
106 #define  PCIE_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK		0x08000000
107 
108 #define PCIE_MSI_INTR2_STATUS				0x4500
109 #define PCIE_MSI_INTR2_CLR				0x4508
110 #define PCIE_MSI_INTR2_MASK_SET				0x4510
111 #define PCIE_MSI_INTR2_MASK_CLR				0x4514
112 
113 #define PCIE_EXT_CFG_DATA				0x8000
114 
115 #define PCIE_EXT_CFG_INDEX				0x9000
116 #define  PCIE_EXT_BUSNUM_SHIFT				20
117 #define  PCIE_EXT_SLOT_SHIFT				15
118 #define  PCIE_EXT_FUNC_SHIFT				12
119 
120 #define PCIE_RGR1_SW_INIT_1				0x9210
121 #define  PCIE_RGR1_SW_INIT_1_PERST_MASK			0x1
122 #define  PCIE_RGR1_SW_INIT_1_INIT_MASK			0x2
123 
124 /* PCIe parameters */
125 #define BRCM_NUM_PCIE_OUT_WINS		0x4
126 #define BRCM_INT_PCI_MSI_NR		32
127 
128 /* MSI target adresses */
129 #define BRCM_MSI_TARGET_ADDR_LT_4GB	0x0fffffffcULL
130 #define BRCM_MSI_TARGET_ADDR_GT_4GB	0xffffffffcULL
131 
132 /* MDIO registers */
133 #define MDIO_PORT0			0x0
134 #define MDIO_DATA_MASK			0x7fffffff
135 #define MDIO_PORT_MASK			0xf0000
136 #define MDIO_REGAD_MASK			0xffff
137 #define MDIO_CMD_MASK			0xfff00000
138 #define MDIO_CMD_READ			0x1
139 #define MDIO_CMD_WRITE			0x0
140 #define MDIO_DATA_DONE_MASK		0x80000000
141 #define MDIO_RD_DONE(x)			(((x) & MDIO_DATA_DONE_MASK) ? 1 : 0)
142 #define MDIO_WT_DONE(x)			(((x) & MDIO_DATA_DONE_MASK) ? 0 : 1)
143 #define SSC_REGS_ADDR			0x1100
144 #define SET_ADDR_OFFSET			0x1f
145 #define SSC_CNTL_OFFSET			0x2
146 #define SSC_CNTL_OVRD_EN_MASK		0x8000
147 #define SSC_CNTL_OVRD_VAL_MASK		0x4000
148 #define SSC_STATUS_OFFSET		0x1
149 #define SSC_STATUS_SSC_MASK		0x400
150 #define SSC_STATUS_PLL_LOCK_MASK	0x800
151 
152 struct brcm_msi {
153 	struct device		*dev;
154 	void __iomem		*base;
155 	struct device_node	*np;
156 	struct irq_domain	*msi_domain;
157 	struct irq_domain	*inner_domain;
158 	struct mutex		lock; /* guards the alloc/free operations */
159 	u64			target_addr;
160 	int			irq;
161 	/* used indicates which MSI interrupts have been alloc'd */
162 	unsigned long		used;
163 };
164 
165 /* Internal PCIe Host Controller Information.*/
166 struct brcm_pcie {
167 	struct device		*dev;
168 	void __iomem		*base;
169 	struct clk		*clk;
170 	struct pci_bus		*root_bus;
171 	struct device_node	*np;
172 	bool			ssc;
173 	int			gen;
174 	u64			msi_target_addr;
175 	struct brcm_msi		*msi;
176 };
177 
178 /*
179  * This is to convert the size of the inbound "BAR" region to the
180  * non-linear values of PCIE_X_MISC_RC_BAR[123]_CONFIG_LO.SIZE
181  */
182 static int brcm_pcie_encode_ibar_size(u64 size)
183 {
184 	int log2_in = ilog2(size);
185 
186 	if (log2_in >= 12 && log2_in <= 15)
187 		/* Covers 4KB to 32KB (inclusive) */
188 		return (log2_in - 12) + 0x1c;
189 	else if (log2_in >= 16 && log2_in <= 35)
190 		/* Covers 64KB to 32GB, (inclusive) */
191 		return log2_in - 15;
192 	/* Something is awry so disable */
193 	return 0;
194 }
195 
196 static u32 brcm_pcie_mdio_form_pkt(int port, int regad, int cmd)
197 {
198 	u32 pkt = 0;
199 
200 	pkt |= FIELD_PREP(MDIO_PORT_MASK, port);
201 	pkt |= FIELD_PREP(MDIO_REGAD_MASK, regad);
202 	pkt |= FIELD_PREP(MDIO_CMD_MASK, cmd);
203 
204 	return pkt;
205 }
206 
207 /* negative return value indicates error */
208 static int brcm_pcie_mdio_read(void __iomem *base, u8 port, u8 regad, u32 *val)
209 {
210 	int tries;
211 	u32 data;
212 
213 	writel(brcm_pcie_mdio_form_pkt(port, regad, MDIO_CMD_READ),
214 		   base + PCIE_RC_DL_MDIO_ADDR);
215 	readl(base + PCIE_RC_DL_MDIO_ADDR);
216 
217 	data = readl(base + PCIE_RC_DL_MDIO_RD_DATA);
218 	for (tries = 0; !MDIO_RD_DONE(data) && tries < 10; tries++) {
219 		udelay(10);
220 		data = readl(base + PCIE_RC_DL_MDIO_RD_DATA);
221 	}
222 
223 	*val = FIELD_GET(MDIO_DATA_MASK, data);
224 	return MDIO_RD_DONE(data) ? 0 : -EIO;
225 }
226 
227 /* negative return value indicates error */
228 static int brcm_pcie_mdio_write(void __iomem *base, u8 port,
229 				u8 regad, u16 wrdata)
230 {
231 	int tries;
232 	u32 data;
233 
234 	writel(brcm_pcie_mdio_form_pkt(port, regad, MDIO_CMD_WRITE),
235 		   base + PCIE_RC_DL_MDIO_ADDR);
236 	readl(base + PCIE_RC_DL_MDIO_ADDR);
237 	writel(MDIO_DATA_DONE_MASK | wrdata, base + PCIE_RC_DL_MDIO_WR_DATA);
238 
239 	data = readl(base + PCIE_RC_DL_MDIO_WR_DATA);
240 	for (tries = 0; !MDIO_WT_DONE(data) && tries < 10; tries++) {
241 		udelay(10);
242 		data = readl(base + PCIE_RC_DL_MDIO_WR_DATA);
243 	}
244 
245 	return MDIO_WT_DONE(data) ? 0 : -EIO;
246 }
247 
248 /*
249  * Configures device for Spread Spectrum Clocking (SSC) mode; a negative
250  * return value indicates error.
251  */
252 static int brcm_pcie_set_ssc(struct brcm_pcie *pcie)
253 {
254 	int pll, ssc;
255 	int ret;
256 	u32 tmp;
257 
258 	ret = brcm_pcie_mdio_write(pcie->base, MDIO_PORT0, SET_ADDR_OFFSET,
259 				   SSC_REGS_ADDR);
260 	if (ret < 0)
261 		return ret;
262 
263 	ret = brcm_pcie_mdio_read(pcie->base, MDIO_PORT0,
264 				  SSC_CNTL_OFFSET, &tmp);
265 	if (ret < 0)
266 		return ret;
267 
268 	u32p_replace_bits(&tmp, 1, SSC_CNTL_OVRD_EN_MASK);
269 	u32p_replace_bits(&tmp, 1, SSC_CNTL_OVRD_VAL_MASK);
270 	ret = brcm_pcie_mdio_write(pcie->base, MDIO_PORT0,
271 				   SSC_CNTL_OFFSET, tmp);
272 	if (ret < 0)
273 		return ret;
274 
275 	usleep_range(1000, 2000);
276 	ret = brcm_pcie_mdio_read(pcie->base, MDIO_PORT0,
277 				  SSC_STATUS_OFFSET, &tmp);
278 	if (ret < 0)
279 		return ret;
280 
281 	ssc = FIELD_GET(SSC_STATUS_SSC_MASK, tmp);
282 	pll = FIELD_GET(SSC_STATUS_PLL_LOCK_MASK, tmp);
283 
284 	return ssc && pll ? 0 : -EIO;
285 }
286 
287 /* Limits operation to a specific generation (1, 2, or 3) */
288 static void brcm_pcie_set_gen(struct brcm_pcie *pcie, int gen)
289 {
290 	u16 lnkctl2 = readw(pcie->base + BRCM_PCIE_CAP_REGS + PCI_EXP_LNKCTL2);
291 	u32 lnkcap = readl(pcie->base + BRCM_PCIE_CAP_REGS + PCI_EXP_LNKCAP);
292 
293 	lnkcap = (lnkcap & ~PCI_EXP_LNKCAP_SLS) | gen;
294 	writel(lnkcap, pcie->base + BRCM_PCIE_CAP_REGS + PCI_EXP_LNKCAP);
295 
296 	lnkctl2 = (lnkctl2 & ~0xf) | gen;
297 	writew(lnkctl2, pcie->base + BRCM_PCIE_CAP_REGS + PCI_EXP_LNKCTL2);
298 }
299 
300 static void brcm_pcie_set_outbound_win(struct brcm_pcie *pcie,
301 				       unsigned int win, u64 cpu_addr,
302 				       u64 pcie_addr, u64 size)
303 {
304 	u32 cpu_addr_mb_high, limit_addr_mb_high;
305 	phys_addr_t cpu_addr_mb, limit_addr_mb;
306 	int high_addr_shift;
307 	u32 tmp;
308 
309 	/* Set the base of the pcie_addr window */
310 	writel(lower_32_bits(pcie_addr), pcie->base + PCIE_MEM_WIN0_LO(win));
311 	writel(upper_32_bits(pcie_addr), pcie->base + PCIE_MEM_WIN0_HI(win));
312 
313 	/* Write the addr base & limit lower bits (in MBs) */
314 	cpu_addr_mb = cpu_addr / SZ_1M;
315 	limit_addr_mb = (cpu_addr + size - 1) / SZ_1M;
316 
317 	tmp = readl(pcie->base + PCIE_MEM_WIN0_BASE_LIMIT(win));
318 	u32p_replace_bits(&tmp, cpu_addr_mb,
319 			  PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_LIMIT_BASE_MASK);
320 	u32p_replace_bits(&tmp, limit_addr_mb,
321 			  PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_LIMIT_LIMIT_MASK);
322 	writel(tmp, pcie->base + PCIE_MEM_WIN0_BASE_LIMIT(win));
323 
324 	/* Write the cpu & limit addr upper bits */
325 	high_addr_shift =
326 		HWEIGHT32(PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_LIMIT_BASE_MASK);
327 
328 	cpu_addr_mb_high = cpu_addr_mb >> high_addr_shift;
329 	tmp = readl(pcie->base + PCIE_MEM_WIN0_BASE_HI(win));
330 	u32p_replace_bits(&tmp, cpu_addr_mb_high,
331 			  PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_HI_BASE_MASK);
332 	writel(tmp, pcie->base + PCIE_MEM_WIN0_BASE_HI(win));
333 
334 	limit_addr_mb_high = limit_addr_mb >> high_addr_shift;
335 	tmp = readl(pcie->base + PCIE_MEM_WIN0_LIMIT_HI(win));
336 	u32p_replace_bits(&tmp, limit_addr_mb_high,
337 			  PCIE_MISC_CPU_2_PCIE_MEM_WIN0_LIMIT_HI_LIMIT_MASK);
338 	writel(tmp, pcie->base + PCIE_MEM_WIN0_LIMIT_HI(win));
339 }
340 
341 static struct irq_chip brcm_msi_irq_chip = {
342 	.name            = "BRCM STB PCIe MSI",
343 	.irq_ack         = irq_chip_ack_parent,
344 	.irq_mask        = pci_msi_mask_irq,
345 	.irq_unmask      = pci_msi_unmask_irq,
346 };
347 
348 static struct msi_domain_info brcm_msi_domain_info = {
349 	/* Multi MSI is supported by the controller, but not by this driver */
350 	.flags	= (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS),
351 	.chip	= &brcm_msi_irq_chip,
352 };
353 
354 static void brcm_pcie_msi_isr(struct irq_desc *desc)
355 {
356 	struct irq_chip *chip = irq_desc_get_chip(desc);
357 	unsigned long status, virq;
358 	struct brcm_msi *msi;
359 	struct device *dev;
360 	u32 bit;
361 
362 	chained_irq_enter(chip, desc);
363 	msi = irq_desc_get_handler_data(desc);
364 	dev = msi->dev;
365 
366 	status = readl(msi->base + PCIE_MSI_INTR2_STATUS);
367 	for_each_set_bit(bit, &status, BRCM_INT_PCI_MSI_NR) {
368 		virq = irq_find_mapping(msi->inner_domain, bit);
369 		if (virq)
370 			generic_handle_irq(virq);
371 		else
372 			dev_dbg(dev, "unexpected MSI\n");
373 	}
374 
375 	chained_irq_exit(chip, desc);
376 }
377 
378 static void brcm_msi_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
379 {
380 	struct brcm_msi *msi = irq_data_get_irq_chip_data(data);
381 
382 	msg->address_lo = lower_32_bits(msi->target_addr);
383 	msg->address_hi = upper_32_bits(msi->target_addr);
384 	msg->data = (0xffff & PCIE_MISC_MSI_DATA_CONFIG_VAL) | data->hwirq;
385 }
386 
387 static int brcm_msi_set_affinity(struct irq_data *irq_data,
388 				 const struct cpumask *mask, bool force)
389 {
390 	return -EINVAL;
391 }
392 
393 static void brcm_msi_ack_irq(struct irq_data *data)
394 {
395 	struct brcm_msi *msi = irq_data_get_irq_chip_data(data);
396 
397 	writel(1 << data->hwirq, msi->base + PCIE_MSI_INTR2_CLR);
398 }
399 
400 
401 static struct irq_chip brcm_msi_bottom_irq_chip = {
402 	.name			= "BRCM STB MSI",
403 	.irq_compose_msi_msg	= brcm_msi_compose_msi_msg,
404 	.irq_set_affinity	= brcm_msi_set_affinity,
405 	.irq_ack                = brcm_msi_ack_irq,
406 };
407 
408 static int brcm_msi_alloc(struct brcm_msi *msi)
409 {
410 	int hwirq;
411 
412 	mutex_lock(&msi->lock);
413 	hwirq = bitmap_find_free_region(&msi->used, BRCM_INT_PCI_MSI_NR, 0);
414 	mutex_unlock(&msi->lock);
415 
416 	return hwirq;
417 }
418 
419 static void brcm_msi_free(struct brcm_msi *msi, unsigned long hwirq)
420 {
421 	mutex_lock(&msi->lock);
422 	bitmap_release_region(&msi->used, hwirq, 0);
423 	mutex_unlock(&msi->lock);
424 }
425 
426 static int brcm_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
427 				 unsigned int nr_irqs, void *args)
428 {
429 	struct brcm_msi *msi = domain->host_data;
430 	int hwirq;
431 
432 	hwirq = brcm_msi_alloc(msi);
433 
434 	if (hwirq < 0)
435 		return hwirq;
436 
437 	irq_domain_set_info(domain, virq, (irq_hw_number_t)hwirq,
438 			    &brcm_msi_bottom_irq_chip, domain->host_data,
439 			    handle_edge_irq, NULL, NULL);
440 	return 0;
441 }
442 
443 static void brcm_irq_domain_free(struct irq_domain *domain,
444 				 unsigned int virq, unsigned int nr_irqs)
445 {
446 	struct irq_data *d = irq_domain_get_irq_data(domain, virq);
447 	struct brcm_msi *msi = irq_data_get_irq_chip_data(d);
448 
449 	brcm_msi_free(msi, d->hwirq);
450 }
451 
452 static const struct irq_domain_ops msi_domain_ops = {
453 	.alloc	= brcm_irq_domain_alloc,
454 	.free	= brcm_irq_domain_free,
455 };
456 
457 static int brcm_allocate_domains(struct brcm_msi *msi)
458 {
459 	struct fwnode_handle *fwnode = of_node_to_fwnode(msi->np);
460 	struct device *dev = msi->dev;
461 
462 	msi->inner_domain = irq_domain_add_linear(NULL, BRCM_INT_PCI_MSI_NR,
463 						  &msi_domain_ops, msi);
464 	if (!msi->inner_domain) {
465 		dev_err(dev, "failed to create IRQ domain\n");
466 		return -ENOMEM;
467 	}
468 
469 	msi->msi_domain = pci_msi_create_irq_domain(fwnode,
470 						    &brcm_msi_domain_info,
471 						    msi->inner_domain);
472 	if (!msi->msi_domain) {
473 		dev_err(dev, "failed to create MSI domain\n");
474 		irq_domain_remove(msi->inner_domain);
475 		return -ENOMEM;
476 	}
477 
478 	return 0;
479 }
480 
481 static void brcm_free_domains(struct brcm_msi *msi)
482 {
483 	irq_domain_remove(msi->msi_domain);
484 	irq_domain_remove(msi->inner_domain);
485 }
486 
487 static void brcm_msi_remove(struct brcm_pcie *pcie)
488 {
489 	struct brcm_msi *msi = pcie->msi;
490 
491 	if (!msi)
492 		return;
493 	irq_set_chained_handler(msi->irq, NULL);
494 	irq_set_handler_data(msi->irq, NULL);
495 	brcm_free_domains(msi);
496 }
497 
498 static void brcm_msi_set_regs(struct brcm_msi *msi)
499 {
500 	writel(0xffffffff, msi->base + PCIE_MSI_INTR2_MASK_CLR);
501 
502 	/*
503 	 * The 0 bit of PCIE_MISC_MSI_BAR_CONFIG_LO is repurposed to MSI
504 	 * enable, which we set to 1.
505 	 */
506 	writel(lower_32_bits(msi->target_addr) | 0x1,
507 	       msi->base + PCIE_MISC_MSI_BAR_CONFIG_LO);
508 	writel(upper_32_bits(msi->target_addr),
509 	       msi->base + PCIE_MISC_MSI_BAR_CONFIG_HI);
510 
511 	writel(PCIE_MISC_MSI_DATA_CONFIG_VAL,
512 	       msi->base + PCIE_MISC_MSI_DATA_CONFIG);
513 }
514 
515 static int brcm_pcie_enable_msi(struct brcm_pcie *pcie)
516 {
517 	struct brcm_msi *msi;
518 	int irq, ret;
519 	struct device *dev = pcie->dev;
520 
521 	irq = irq_of_parse_and_map(dev->of_node, 1);
522 	if (irq <= 0) {
523 		dev_err(dev, "cannot map MSI interrupt\n");
524 		return -ENODEV;
525 	}
526 
527 	msi = devm_kzalloc(dev, sizeof(struct brcm_msi), GFP_KERNEL);
528 	if (!msi)
529 		return -ENOMEM;
530 
531 	mutex_init(&msi->lock);
532 	msi->dev = dev;
533 	msi->base = pcie->base;
534 	msi->np = pcie->np;
535 	msi->target_addr = pcie->msi_target_addr;
536 	msi->irq = irq;
537 
538 	ret = brcm_allocate_domains(msi);
539 	if (ret)
540 		return ret;
541 
542 	irq_set_chained_handler_and_data(msi->irq, brcm_pcie_msi_isr, msi);
543 
544 	brcm_msi_set_regs(msi);
545 	pcie->msi = msi;
546 
547 	return 0;
548 }
549 
550 /* The controller is capable of serving in both RC and EP roles */
551 static bool brcm_pcie_rc_mode(struct brcm_pcie *pcie)
552 {
553 	void __iomem *base = pcie->base;
554 	u32 val = readl(base + PCIE_MISC_PCIE_STATUS);
555 
556 	return !!FIELD_GET(PCIE_MISC_PCIE_STATUS_PCIE_PORT_MASK, val);
557 }
558 
559 static bool brcm_pcie_link_up(struct brcm_pcie *pcie)
560 {
561 	u32 val = readl(pcie->base + PCIE_MISC_PCIE_STATUS);
562 	u32 dla = FIELD_GET(PCIE_MISC_PCIE_STATUS_PCIE_DL_ACTIVE_MASK, val);
563 	u32 plu = FIELD_GET(PCIE_MISC_PCIE_STATUS_PCIE_PHYLINKUP_MASK, val);
564 
565 	return dla && plu;
566 }
567 
568 /* Configuration space read/write support */
569 static inline int brcm_pcie_cfg_index(int busnr, int devfn, int reg)
570 {
571 	return ((PCI_SLOT(devfn) & 0x1f) << PCIE_EXT_SLOT_SHIFT)
572 		| ((PCI_FUNC(devfn) & 0x07) << PCIE_EXT_FUNC_SHIFT)
573 		| (busnr << PCIE_EXT_BUSNUM_SHIFT)
574 		| (reg & ~3);
575 }
576 
577 static void __iomem *brcm_pcie_map_conf(struct pci_bus *bus, unsigned int devfn,
578 					int where)
579 {
580 	struct brcm_pcie *pcie = bus->sysdata;
581 	void __iomem *base = pcie->base;
582 	int idx;
583 
584 	/* Accesses to the RC go right to the RC registers if slot==0 */
585 	if (pci_is_root_bus(bus))
586 		return PCI_SLOT(devfn) ? NULL : base + where;
587 
588 	/* For devices, write to the config space index register */
589 	idx = brcm_pcie_cfg_index(bus->number, devfn, 0);
590 	writel(idx, pcie->base + PCIE_EXT_CFG_INDEX);
591 	return base + PCIE_EXT_CFG_DATA + where;
592 }
593 
594 static struct pci_ops brcm_pcie_ops = {
595 	.map_bus = brcm_pcie_map_conf,
596 	.read = pci_generic_config_read,
597 	.write = pci_generic_config_write,
598 };
599 
600 static inline void brcm_pcie_bridge_sw_init_set(struct brcm_pcie *pcie, u32 val)
601 {
602 	u32 tmp;
603 
604 	tmp = readl(pcie->base + PCIE_RGR1_SW_INIT_1);
605 	u32p_replace_bits(&tmp, val, PCIE_RGR1_SW_INIT_1_INIT_MASK);
606 	writel(tmp, pcie->base + PCIE_RGR1_SW_INIT_1);
607 }
608 
609 static inline void brcm_pcie_perst_set(struct brcm_pcie *pcie, u32 val)
610 {
611 	u32 tmp;
612 
613 	tmp = readl(pcie->base + PCIE_RGR1_SW_INIT_1);
614 	u32p_replace_bits(&tmp, val, PCIE_RGR1_SW_INIT_1_PERST_MASK);
615 	writel(tmp, pcie->base + PCIE_RGR1_SW_INIT_1);
616 }
617 
618 static inline int brcm_pcie_get_rc_bar2_size_and_offset(struct brcm_pcie *pcie,
619 							u64 *rc_bar2_size,
620 							u64 *rc_bar2_offset)
621 {
622 	struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
623 	struct device *dev = pcie->dev;
624 	struct resource_entry *entry;
625 
626 	entry = resource_list_first_type(&bridge->dma_ranges, IORESOURCE_MEM);
627 	if (!entry)
628 		return -ENODEV;
629 
630 
631 	/*
632 	 * The controller expects the inbound window offset to be calculated as
633 	 * the difference between PCIe's address space and CPU's. The offset
634 	 * provided by the firmware is calculated the opposite way, so we
635 	 * negate it.
636 	 */
637 	*rc_bar2_offset = -entry->offset;
638 	*rc_bar2_size = 1ULL << fls64(entry->res->end - entry->res->start);
639 
640 	/*
641 	 * We validate the inbound memory view even though we should trust
642 	 * whatever the device-tree provides. This is because of an HW issue on
643 	 * early Raspberry Pi 4's revisions (bcm2711). It turns out its
644 	 * firmware has to dynamically edit dma-ranges due to a bug on the
645 	 * PCIe controller integration, which prohibits any access above the
646 	 * lower 3GB of memory. Given this, we decided to keep the dma-ranges
647 	 * in check, avoiding hard to debug device-tree related issues in the
648 	 * future:
649 	 *
650 	 * The PCIe host controller by design must set the inbound viewport to
651 	 * be a contiguous arrangement of all of the system's memory.  In
652 	 * addition, its size mut be a power of two.  To further complicate
653 	 * matters, the viewport must start on a pcie-address that is aligned
654 	 * on a multiple of its size.  If a portion of the viewport does not
655 	 * represent system memory -- e.g. 3GB of memory requires a 4GB
656 	 * viewport -- we can map the outbound memory in or after 3GB and even
657 	 * though the viewport will overlap the outbound memory the controller
658 	 * will know to send outbound memory downstream and everything else
659 	 * upstream.
660 	 *
661 	 * For example:
662 	 *
663 	 * - The best-case scenario, memory up to 3GB, is to place the inbound
664 	 *   region in the first 4GB of pcie-space, as some legacy devices can
665 	 *   only address 32bits. We would also like to put the MSI under 4GB
666 	 *   as well, since some devices require a 32bit MSI target address.
667 	 *
668 	 * - If the system memory is 4GB or larger we cannot start the inbound
669 	 *   region at location 0 (since we have to allow some space for
670 	 *   outbound memory @ 3GB). So instead it will  start at the 1x
671 	 *   multiple of its size
672 	 */
673 	if (!*rc_bar2_size || (*rc_bar2_offset & (*rc_bar2_size - 1)) ||
674 	    (*rc_bar2_offset < SZ_4G && *rc_bar2_offset > SZ_2G)) {
675 		dev_err(dev, "Invalid rc_bar2_offset/size: size 0x%llx, off 0x%llx\n",
676 			*rc_bar2_size, *rc_bar2_offset);
677 		return -EINVAL;
678 	}
679 
680 	return 0;
681 }
682 
683 static int brcm_pcie_setup(struct brcm_pcie *pcie)
684 {
685 	struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
686 	u64 rc_bar2_offset, rc_bar2_size;
687 	void __iomem *base = pcie->base;
688 	struct device *dev = pcie->dev;
689 	struct resource_entry *entry;
690 	unsigned int scb_size_val;
691 	bool ssc_good = false;
692 	struct resource *res;
693 	int num_out_wins = 0;
694 	u16 nlw, cls, lnksta;
695 	int i, ret;
696 	u32 tmp;
697 
698 	/* Reset the bridge */
699 	brcm_pcie_bridge_sw_init_set(pcie, 1);
700 
701 	usleep_range(100, 200);
702 
703 	/* Take the bridge out of reset */
704 	brcm_pcie_bridge_sw_init_set(pcie, 0);
705 
706 	tmp = readl(base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
707 	tmp &= ~PCIE_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK;
708 	writel(tmp, base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
709 	/* Wait for SerDes to be stable */
710 	usleep_range(100, 200);
711 
712 	/* Set SCB_MAX_BURST_SIZE, CFG_READ_UR_MODE, SCB_ACCESS_EN */
713 	u32p_replace_bits(&tmp, 1, PCIE_MISC_MISC_CTRL_SCB_ACCESS_EN_MASK);
714 	u32p_replace_bits(&tmp, 1, PCIE_MISC_MISC_CTRL_CFG_READ_UR_MODE_MASK);
715 	u32p_replace_bits(&tmp, PCIE_MISC_MISC_CTRL_MAX_BURST_SIZE_128,
716 			  PCIE_MISC_MISC_CTRL_MAX_BURST_SIZE_MASK);
717 	writel(tmp, base + PCIE_MISC_MISC_CTRL);
718 
719 	ret = brcm_pcie_get_rc_bar2_size_and_offset(pcie, &rc_bar2_size,
720 						    &rc_bar2_offset);
721 	if (ret)
722 		return ret;
723 
724 	tmp = lower_32_bits(rc_bar2_offset);
725 	u32p_replace_bits(&tmp, brcm_pcie_encode_ibar_size(rc_bar2_size),
726 			  PCIE_MISC_RC_BAR2_CONFIG_LO_SIZE_MASK);
727 	writel(tmp, base + PCIE_MISC_RC_BAR2_CONFIG_LO);
728 	writel(upper_32_bits(rc_bar2_offset),
729 	       base + PCIE_MISC_RC_BAR2_CONFIG_HI);
730 
731 	scb_size_val = rc_bar2_size ?
732 		       ilog2(rc_bar2_size) - 15 : 0xf; /* 0xf is 1GB */
733 	tmp = readl(base + PCIE_MISC_MISC_CTRL);
734 	u32p_replace_bits(&tmp, scb_size_val,
735 			  PCIE_MISC_MISC_CTRL_SCB0_SIZE_MASK);
736 	writel(tmp, base + PCIE_MISC_MISC_CTRL);
737 
738 	/*
739 	 * We ideally want the MSI target address to be located in the 32bit
740 	 * addressable memory area. Some devices might depend on it. This is
741 	 * possible either when the inbound window is located above the lower
742 	 * 4GB or when the inbound area is smaller than 4GB (taking into
743 	 * account the rounding-up we're forced to perform).
744 	 */
745 	if (rc_bar2_offset >= SZ_4G || (rc_bar2_size + rc_bar2_offset) < SZ_4G)
746 		pcie->msi_target_addr = BRCM_MSI_TARGET_ADDR_LT_4GB;
747 	else
748 		pcie->msi_target_addr = BRCM_MSI_TARGET_ADDR_GT_4GB;
749 
750 	/* disable the PCIe->GISB memory window (RC_BAR1) */
751 	tmp = readl(base + PCIE_MISC_RC_BAR1_CONFIG_LO);
752 	tmp &= ~PCIE_MISC_RC_BAR1_CONFIG_LO_SIZE_MASK;
753 	writel(tmp, base + PCIE_MISC_RC_BAR1_CONFIG_LO);
754 
755 	/* disable the PCIe->SCB memory window (RC_BAR3) */
756 	tmp = readl(base + PCIE_MISC_RC_BAR3_CONFIG_LO);
757 	tmp &= ~PCIE_MISC_RC_BAR3_CONFIG_LO_SIZE_MASK;
758 	writel(tmp, base + PCIE_MISC_RC_BAR3_CONFIG_LO);
759 
760 	/* Mask all interrupts since we are not handling any yet */
761 	writel(0xffffffff, pcie->base + PCIE_MSI_INTR2_MASK_SET);
762 
763 	/* clear any interrupts we find on boot */
764 	writel(0xffffffff, pcie->base + PCIE_MSI_INTR2_CLR);
765 
766 	if (pcie->gen)
767 		brcm_pcie_set_gen(pcie, pcie->gen);
768 
769 	/* Unassert the fundamental reset */
770 	brcm_pcie_perst_set(pcie, 0);
771 
772 	/*
773 	 * Give the RC/EP time to wake up, before trying to configure RC.
774 	 * Intermittently check status for link-up, up to a total of 100ms.
775 	 */
776 	for (i = 0; i < 100 && !brcm_pcie_link_up(pcie); i += 5)
777 		msleep(5);
778 
779 	if (!brcm_pcie_link_up(pcie)) {
780 		dev_err(dev, "link down\n");
781 		return -ENODEV;
782 	}
783 
784 	if (!brcm_pcie_rc_mode(pcie)) {
785 		dev_err(dev, "PCIe misconfigured; is in EP mode\n");
786 		return -EINVAL;
787 	}
788 
789 	resource_list_for_each_entry(entry, &bridge->windows) {
790 		res = entry->res;
791 
792 		if (resource_type(res) != IORESOURCE_MEM)
793 			continue;
794 
795 		if (num_out_wins >= BRCM_NUM_PCIE_OUT_WINS) {
796 			dev_err(pcie->dev, "too many outbound wins\n");
797 			return -EINVAL;
798 		}
799 
800 		brcm_pcie_set_outbound_win(pcie, num_out_wins, res->start,
801 					   res->start - entry->offset,
802 					   resource_size(res));
803 		num_out_wins++;
804 	}
805 
806 	/*
807 	 * For config space accesses on the RC, show the right class for
808 	 * a PCIe-PCIe bridge (the default setting is to be EP mode).
809 	 */
810 	tmp = readl(base + PCIE_RC_CFG_PRIV1_ID_VAL3);
811 	u32p_replace_bits(&tmp, 0x060400,
812 			  PCIE_RC_CFG_PRIV1_ID_VAL3_CLASS_CODE_MASK);
813 	writel(tmp, base + PCIE_RC_CFG_PRIV1_ID_VAL3);
814 
815 	if (pcie->ssc) {
816 		ret = brcm_pcie_set_ssc(pcie);
817 		if (ret == 0)
818 			ssc_good = true;
819 		else
820 			dev_err(dev, "failed attempt to enter ssc mode\n");
821 	}
822 
823 	lnksta = readw(base + BRCM_PCIE_CAP_REGS + PCI_EXP_LNKSTA);
824 	cls = FIELD_GET(PCI_EXP_LNKSTA_CLS, lnksta);
825 	nlw = FIELD_GET(PCI_EXP_LNKSTA_NLW, lnksta);
826 	dev_info(dev, "link up, %s x%u %s\n",
827 		 pci_speed_string(pcie_link_speed[cls]), nlw,
828 		 ssc_good ? "(SSC)" : "(!SSC)");
829 
830 	/* PCIe->SCB endian mode for BAR */
831 	tmp = readl(base + PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1);
832 	u32p_replace_bits(&tmp, PCIE_RC_CFG_VENDOR_SPCIFIC_REG1_LITTLE_ENDIAN,
833 		PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1_ENDIAN_MODE_BAR2_MASK);
834 	writel(tmp, base + PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1);
835 
836 	/*
837 	 * Refclk from RC should be gated with CLKREQ# input when ASPM L0s,L1
838 	 * is enabled => setting the CLKREQ_DEBUG_ENABLE field to 1.
839 	 */
840 	tmp = readl(base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
841 	tmp |= PCIE_MISC_HARD_PCIE_HARD_DEBUG_CLKREQ_DEBUG_ENABLE_MASK;
842 	writel(tmp, base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
843 
844 	return 0;
845 }
846 
847 /* L23 is a low-power PCIe link state */
848 static void brcm_pcie_enter_l23(struct brcm_pcie *pcie)
849 {
850 	void __iomem *base = pcie->base;
851 	int l23, i;
852 	u32 tmp;
853 
854 	/* Assert request for L23 */
855 	tmp = readl(base + PCIE_MISC_PCIE_CTRL);
856 	u32p_replace_bits(&tmp, 1, PCIE_MISC_PCIE_CTRL_PCIE_L23_REQUEST_MASK);
857 	writel(tmp, base + PCIE_MISC_PCIE_CTRL);
858 
859 	/* Wait up to 36 msec for L23 */
860 	tmp = readl(base + PCIE_MISC_PCIE_STATUS);
861 	l23 = FIELD_GET(PCIE_MISC_PCIE_STATUS_PCIE_LINK_IN_L23_MASK, tmp);
862 	for (i = 0; i < 15 && !l23; i++) {
863 		usleep_range(2000, 2400);
864 		tmp = readl(base + PCIE_MISC_PCIE_STATUS);
865 		l23 = FIELD_GET(PCIE_MISC_PCIE_STATUS_PCIE_LINK_IN_L23_MASK,
866 				tmp);
867 	}
868 
869 	if (!l23)
870 		dev_err(pcie->dev, "failed to enter low-power link state\n");
871 }
872 
873 static void brcm_pcie_turn_off(struct brcm_pcie *pcie)
874 {
875 	void __iomem *base = pcie->base;
876 	int tmp;
877 
878 	if (brcm_pcie_link_up(pcie))
879 		brcm_pcie_enter_l23(pcie);
880 	/* Assert fundamental reset */
881 	brcm_pcie_perst_set(pcie, 1);
882 
883 	/* Deassert request for L23 in case it was asserted */
884 	tmp = readl(base + PCIE_MISC_PCIE_CTRL);
885 	u32p_replace_bits(&tmp, 0, PCIE_MISC_PCIE_CTRL_PCIE_L23_REQUEST_MASK);
886 	writel(tmp, base + PCIE_MISC_PCIE_CTRL);
887 
888 	/* Turn off SerDes */
889 	tmp = readl(base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
890 	u32p_replace_bits(&tmp, 1, PCIE_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK);
891 	writel(tmp, base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
892 
893 	/* Shutdown PCIe bridge */
894 	brcm_pcie_bridge_sw_init_set(pcie, 1);
895 }
896 
897 static void __brcm_pcie_remove(struct brcm_pcie *pcie)
898 {
899 	brcm_msi_remove(pcie);
900 	brcm_pcie_turn_off(pcie);
901 	clk_disable_unprepare(pcie->clk);
902 	clk_put(pcie->clk);
903 }
904 
905 static int brcm_pcie_remove(struct platform_device *pdev)
906 {
907 	struct brcm_pcie *pcie = platform_get_drvdata(pdev);
908 
909 	pci_stop_root_bus(pcie->root_bus);
910 	pci_remove_root_bus(pcie->root_bus);
911 	__brcm_pcie_remove(pcie);
912 
913 	return 0;
914 }
915 
916 static int brcm_pcie_probe(struct platform_device *pdev)
917 {
918 	struct device_node *np = pdev->dev.of_node, *msi_np;
919 	struct pci_host_bridge *bridge;
920 	struct brcm_pcie *pcie;
921 	struct pci_bus *child;
922 	struct resource *res;
923 	int ret;
924 
925 	bridge = devm_pci_alloc_host_bridge(&pdev->dev, sizeof(*pcie));
926 	if (!bridge)
927 		return -ENOMEM;
928 
929 	pcie = pci_host_bridge_priv(bridge);
930 	pcie->dev = &pdev->dev;
931 	pcie->np = np;
932 
933 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
934 	pcie->base = devm_ioremap_resource(&pdev->dev, res);
935 	if (IS_ERR(pcie->base))
936 		return PTR_ERR(pcie->base);
937 
938 	pcie->clk = devm_clk_get_optional(&pdev->dev, "sw_pcie");
939 	if (IS_ERR(pcie->clk))
940 		return PTR_ERR(pcie->clk);
941 
942 	ret = of_pci_get_max_link_speed(np);
943 	pcie->gen = (ret < 0) ? 0 : ret;
944 
945 	pcie->ssc = of_property_read_bool(np, "brcm,enable-ssc");
946 
947 	ret = pci_parse_request_of_pci_ranges(pcie->dev, &bridge->windows,
948 					      &bridge->dma_ranges, NULL);
949 	if (ret)
950 		return ret;
951 
952 	ret = clk_prepare_enable(pcie->clk);
953 	if (ret) {
954 		dev_err(&pdev->dev, "could not enable clock\n");
955 		return ret;
956 	}
957 
958 	ret = brcm_pcie_setup(pcie);
959 	if (ret)
960 		goto fail;
961 
962 	msi_np = of_parse_phandle(pcie->np, "msi-parent", 0);
963 	if (pci_msi_enabled() && msi_np == pcie->np) {
964 		ret = brcm_pcie_enable_msi(pcie);
965 		if (ret) {
966 			dev_err(pcie->dev, "probe of internal MSI failed");
967 			goto fail;
968 		}
969 	}
970 
971 	bridge->dev.parent = &pdev->dev;
972 	bridge->busnr = 0;
973 	bridge->ops = &brcm_pcie_ops;
974 	bridge->sysdata = pcie;
975 	bridge->map_irq = of_irq_parse_and_map_pci;
976 	bridge->swizzle_irq = pci_common_swizzle;
977 
978 	ret = pci_scan_root_bus_bridge(bridge);
979 	if (ret < 0) {
980 		dev_err(pcie->dev, "Scanning root bridge failed\n");
981 		goto fail;
982 	}
983 
984 	pci_assign_unassigned_bus_resources(bridge->bus);
985 	list_for_each_entry(child, &bridge->bus->children, node)
986 		pcie_bus_configure_settings(child);
987 	pci_bus_add_devices(bridge->bus);
988 	platform_set_drvdata(pdev, pcie);
989 	pcie->root_bus = bridge->bus;
990 
991 	return 0;
992 fail:
993 	__brcm_pcie_remove(pcie);
994 	return ret;
995 }
996 
997 static const struct of_device_id brcm_pcie_match[] = {
998 	{ .compatible = "brcm,bcm2711-pcie" },
999 	{},
1000 };
1001 MODULE_DEVICE_TABLE(of, brcm_pcie_match);
1002 
1003 static struct platform_driver brcm_pcie_driver = {
1004 	.probe = brcm_pcie_probe,
1005 	.remove = brcm_pcie_remove,
1006 	.driver = {
1007 		.name = "brcm-pcie",
1008 		.of_match_table = brcm_pcie_match,
1009 	},
1010 };
1011 module_platform_driver(brcm_pcie_driver);
1012 
1013 MODULE_LICENSE("GPL");
1014 MODULE_DESCRIPTION("Broadcom STB PCIe RC driver");
1015 MODULE_AUTHOR("Broadcom");
1016