1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (c) 2017 Cadence
3 // Cadence PCIe host controller driver.
4 // Author: Cyrille Pitchen <cyrille.pitchen@free-electrons.com>
5 
6 #include <linux/delay.h>
7 #include <linux/kernel.h>
8 #include <linux/list_sort.h>
9 #include <linux/of_address.h>
10 #include <linux/of_pci.h>
11 #include <linux/platform_device.h>
12 
13 #include "pcie-cadence.h"
14 
15 static u64 bar_max_size[] = {
16 	[RP_BAR0] = _ULL(128 * SZ_2G),
17 	[RP_BAR1] = SZ_2G,
18 	[RP_NO_BAR] = _BITULL(63),
19 };
20 
21 static u8 bar_aperture_mask[] = {
22 	[RP_BAR0] = 0x1F,
23 	[RP_BAR1] = 0xF,
24 };
25 
26 void __iomem *cdns_pci_map_bus(struct pci_bus *bus, unsigned int devfn,
27 			       int where)
28 {
29 	struct pci_host_bridge *bridge = pci_find_host_bridge(bus);
30 	struct cdns_pcie_rc *rc = pci_host_bridge_priv(bridge);
31 	struct cdns_pcie *pcie = &rc->pcie;
32 	unsigned int busn = bus->number;
33 	u32 addr0, desc0;
34 
35 	if (pci_is_root_bus(bus)) {
36 		/*
37 		 * Only the root port (devfn == 0) is connected to this bus.
38 		 * All other PCI devices are behind some bridge hence on another
39 		 * bus.
40 		 */
41 		if (devfn)
42 			return NULL;
43 
44 		return pcie->reg_base + (where & 0xfff);
45 	}
46 	/* Check that the link is up */
47 	if (!(cdns_pcie_readl(pcie, CDNS_PCIE_LM_BASE) & 0x1))
48 		return NULL;
49 	/* Clear AXI link-down status */
50 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_LINKDOWN, 0x0);
51 
52 	/* Update Output registers for AXI region 0. */
53 	addr0 = CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS(12) |
54 		CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN(devfn) |
55 		CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_BUS(busn);
56 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR0(0), addr0);
57 
58 	/* Configuration Type 0 or Type 1 access. */
59 	desc0 = CDNS_PCIE_AT_OB_REGION_DESC0_HARDCODED_RID |
60 		CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(0);
61 	/*
62 	 * The bus number was already set once for all in desc1 by
63 	 * cdns_pcie_host_init_address_translation().
64 	 */
65 	if (busn == bridge->busnr + 1)
66 		desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_CONF_TYPE0;
67 	else
68 		desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_CONF_TYPE1;
69 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC0(0), desc0);
70 
71 	return rc->cfg_base + (where & 0xfff);
72 }
73 
74 static struct pci_ops cdns_pcie_host_ops = {
75 	.map_bus	= cdns_pci_map_bus,
76 	.read		= pci_generic_config_read,
77 	.write		= pci_generic_config_write,
78 };
79 
80 static int cdns_pcie_host_wait_for_link(struct cdns_pcie *pcie)
81 {
82 	struct device *dev = pcie->dev;
83 	int retries;
84 
85 	/* Check if the link is up or not */
86 	for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
87 		if (cdns_pcie_link_up(pcie)) {
88 			dev_info(dev, "Link up\n");
89 			return 0;
90 		}
91 		usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX);
92 	}
93 
94 	return -ETIMEDOUT;
95 }
96 
97 static int cdns_pcie_retrain(struct cdns_pcie *pcie)
98 {
99 	u32 lnk_cap_sls, pcie_cap_off = CDNS_PCIE_RP_CAP_OFFSET;
100 	u16 lnk_stat, lnk_ctl;
101 	int ret = 0;
102 
103 	/*
104 	 * Set retrain bit if current speed is 2.5 GB/s,
105 	 * but the PCIe root port support is > 2.5 GB/s.
106 	 */
107 
108 	lnk_cap_sls = cdns_pcie_readl(pcie, (CDNS_PCIE_RP_BASE + pcie_cap_off +
109 					     PCI_EXP_LNKCAP));
110 	if ((lnk_cap_sls & PCI_EXP_LNKCAP_SLS) <= PCI_EXP_LNKCAP_SLS_2_5GB)
111 		return ret;
112 
113 	lnk_stat = cdns_pcie_rp_readw(pcie, pcie_cap_off + PCI_EXP_LNKSTA);
114 	if ((lnk_stat & PCI_EXP_LNKSTA_CLS) == PCI_EXP_LNKSTA_CLS_2_5GB) {
115 		lnk_ctl = cdns_pcie_rp_readw(pcie,
116 					     pcie_cap_off + PCI_EXP_LNKCTL);
117 		lnk_ctl |= PCI_EXP_LNKCTL_RL;
118 		cdns_pcie_rp_writew(pcie, pcie_cap_off + PCI_EXP_LNKCTL,
119 				    lnk_ctl);
120 
121 		ret = cdns_pcie_host_wait_for_link(pcie);
122 	}
123 	return ret;
124 }
125 
126 static int cdns_pcie_host_start_link(struct cdns_pcie_rc *rc)
127 {
128 	struct cdns_pcie *pcie = &rc->pcie;
129 	int ret;
130 
131 	ret = cdns_pcie_host_wait_for_link(pcie);
132 
133 	/*
134 	 * Retrain link for Gen2 training defect
135 	 * if quirk flag is set.
136 	 */
137 	if (!ret && rc->quirk_retrain_flag)
138 		ret = cdns_pcie_retrain(pcie);
139 
140 	return ret;
141 }
142 
143 static int cdns_pcie_host_init_root_port(struct cdns_pcie_rc *rc)
144 {
145 	struct cdns_pcie *pcie = &rc->pcie;
146 	u32 value, ctrl;
147 	u32 id;
148 
149 	/*
150 	 * Set the root complex BAR configuration register:
151 	 * - disable both BAR0 and BAR1.
152 	 * - enable Prefetchable Memory Base and Limit registers in type 1
153 	 *   config space (64 bits).
154 	 * - enable IO Base and Limit registers in type 1 config
155 	 *   space (32 bits).
156 	 */
157 	ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_DISABLED;
158 	value = CDNS_PCIE_LM_RC_BAR_CFG_BAR0_CTRL(ctrl) |
159 		CDNS_PCIE_LM_RC_BAR_CFG_BAR1_CTRL(ctrl) |
160 		CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_ENABLE |
161 		CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_64BITS |
162 		CDNS_PCIE_LM_RC_BAR_CFG_IO_ENABLE |
163 		CDNS_PCIE_LM_RC_BAR_CFG_IO_32BITS;
164 	cdns_pcie_writel(pcie, CDNS_PCIE_LM_RC_BAR_CFG, value);
165 
166 	/* Set root port configuration space */
167 	if (rc->vendor_id != 0xffff) {
168 		id = CDNS_PCIE_LM_ID_VENDOR(rc->vendor_id) |
169 			CDNS_PCIE_LM_ID_SUBSYS(rc->vendor_id);
170 		cdns_pcie_writel(pcie, CDNS_PCIE_LM_ID, id);
171 	}
172 
173 	if (rc->device_id != 0xffff)
174 		cdns_pcie_rp_writew(pcie, PCI_DEVICE_ID, rc->device_id);
175 
176 	cdns_pcie_rp_writeb(pcie, PCI_CLASS_REVISION, 0);
177 	cdns_pcie_rp_writeb(pcie, PCI_CLASS_PROG, 0);
178 	cdns_pcie_rp_writew(pcie, PCI_CLASS_DEVICE, PCI_CLASS_BRIDGE_PCI);
179 
180 	return 0;
181 }
182 
183 static int cdns_pcie_host_bar_ib_config(struct cdns_pcie_rc *rc,
184 					enum cdns_pcie_rp_bar bar,
185 					u64 cpu_addr, u64 size,
186 					unsigned long flags)
187 {
188 	struct cdns_pcie *pcie = &rc->pcie;
189 	u32 addr0, addr1, aperture, value;
190 
191 	if (!rc->avail_ib_bar[bar])
192 		return -EBUSY;
193 
194 	rc->avail_ib_bar[bar] = false;
195 
196 	aperture = ilog2(size);
197 	addr0 = CDNS_PCIE_AT_IB_RP_BAR_ADDR0_NBITS(aperture) |
198 		(lower_32_bits(cpu_addr) & GENMASK(31, 8));
199 	addr1 = upper_32_bits(cpu_addr);
200 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_RP_BAR_ADDR0(bar), addr0);
201 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_RP_BAR_ADDR1(bar), addr1);
202 
203 	if (bar == RP_NO_BAR)
204 		return 0;
205 
206 	value = cdns_pcie_readl(pcie, CDNS_PCIE_LM_RC_BAR_CFG);
207 	value &= ~(LM_RC_BAR_CFG_CTRL_MEM_64BITS(bar) |
208 		   LM_RC_BAR_CFG_CTRL_PREF_MEM_64BITS(bar) |
209 		   LM_RC_BAR_CFG_CTRL_MEM_32BITS(bar) |
210 		   LM_RC_BAR_CFG_CTRL_PREF_MEM_32BITS(bar) |
211 		   LM_RC_BAR_CFG_APERTURE(bar, bar_aperture_mask[bar] + 2));
212 	if (size + cpu_addr >= SZ_4G) {
213 		if (!(flags & IORESOURCE_PREFETCH))
214 			value |= LM_RC_BAR_CFG_CTRL_MEM_64BITS(bar);
215 		value |= LM_RC_BAR_CFG_CTRL_PREF_MEM_64BITS(bar);
216 	} else {
217 		if (!(flags & IORESOURCE_PREFETCH))
218 			value |= LM_RC_BAR_CFG_CTRL_MEM_32BITS(bar);
219 		value |= LM_RC_BAR_CFG_CTRL_PREF_MEM_32BITS(bar);
220 	}
221 
222 	value |= LM_RC_BAR_CFG_APERTURE(bar, aperture);
223 	cdns_pcie_writel(pcie, CDNS_PCIE_LM_RC_BAR_CFG, value);
224 
225 	return 0;
226 }
227 
228 static enum cdns_pcie_rp_bar
229 cdns_pcie_host_find_min_bar(struct cdns_pcie_rc *rc, u64 size)
230 {
231 	enum cdns_pcie_rp_bar bar, sel_bar;
232 
233 	sel_bar = RP_BAR_UNDEFINED;
234 	for (bar = RP_BAR0; bar <= RP_NO_BAR; bar++) {
235 		if (!rc->avail_ib_bar[bar])
236 			continue;
237 
238 		if (size <= bar_max_size[bar]) {
239 			if (sel_bar == RP_BAR_UNDEFINED) {
240 				sel_bar = bar;
241 				continue;
242 			}
243 
244 			if (bar_max_size[bar] < bar_max_size[sel_bar])
245 				sel_bar = bar;
246 		}
247 	}
248 
249 	return sel_bar;
250 }
251 
252 static enum cdns_pcie_rp_bar
253 cdns_pcie_host_find_max_bar(struct cdns_pcie_rc *rc, u64 size)
254 {
255 	enum cdns_pcie_rp_bar bar, sel_bar;
256 
257 	sel_bar = RP_BAR_UNDEFINED;
258 	for (bar = RP_BAR0; bar <= RP_NO_BAR; bar++) {
259 		if (!rc->avail_ib_bar[bar])
260 			continue;
261 
262 		if (size >= bar_max_size[bar]) {
263 			if (sel_bar == RP_BAR_UNDEFINED) {
264 				sel_bar = bar;
265 				continue;
266 			}
267 
268 			if (bar_max_size[bar] > bar_max_size[sel_bar])
269 				sel_bar = bar;
270 		}
271 	}
272 
273 	return sel_bar;
274 }
275 
276 static int cdns_pcie_host_bar_config(struct cdns_pcie_rc *rc,
277 				     struct resource_entry *entry)
278 {
279 	u64 cpu_addr, pci_addr, size, winsize;
280 	struct cdns_pcie *pcie = &rc->pcie;
281 	struct device *dev = pcie->dev;
282 	enum cdns_pcie_rp_bar bar;
283 	unsigned long flags;
284 	int ret;
285 
286 	cpu_addr = entry->res->start;
287 	pci_addr = entry->res->start - entry->offset;
288 	flags = entry->res->flags;
289 	size = resource_size(entry->res);
290 
291 	if (entry->offset) {
292 		dev_err(dev, "PCI addr: %llx must be equal to CPU addr: %llx\n",
293 			pci_addr, cpu_addr);
294 		return -EINVAL;
295 	}
296 
297 	while (size > 0) {
298 		/*
299 		 * Try to find a minimum BAR whose size is greater than
300 		 * or equal to the remaining resource_entry size. This will
301 		 * fail if the size of each of the available BARs is less than
302 		 * the remaining resource_entry size.
303 		 * If a minimum BAR is found, IB ATU will be configured and
304 		 * exited.
305 		 */
306 		bar = cdns_pcie_host_find_min_bar(rc, size);
307 		if (bar != RP_BAR_UNDEFINED) {
308 			ret = cdns_pcie_host_bar_ib_config(rc, bar, cpu_addr,
309 							   size, flags);
310 			if (ret)
311 				dev_err(dev, "IB BAR: %d config failed\n", bar);
312 			return ret;
313 		}
314 
315 		/*
316 		 * If the control reaches here, it would mean the remaining
317 		 * resource_entry size cannot be fitted in a single BAR. So we
318 		 * find a maximum BAR whose size is less than or equal to the
319 		 * remaining resource_entry size and split the resource entry
320 		 * so that part of resource entry is fitted inside the maximum
321 		 * BAR. The remaining size would be fitted during the next
322 		 * iteration of the loop.
323 		 * If a maximum BAR is not found, there is no way we can fit
324 		 * this resource_entry, so we error out.
325 		 */
326 		bar = cdns_pcie_host_find_max_bar(rc, size);
327 		if (bar == RP_BAR_UNDEFINED) {
328 			dev_err(dev, "No free BAR to map cpu_addr %llx\n",
329 				cpu_addr);
330 			return -EINVAL;
331 		}
332 
333 		winsize = bar_max_size[bar];
334 		ret = cdns_pcie_host_bar_ib_config(rc, bar, cpu_addr, winsize,
335 						   flags);
336 		if (ret) {
337 			dev_err(dev, "IB BAR: %d config failed\n", bar);
338 			return ret;
339 		}
340 
341 		size -= winsize;
342 		cpu_addr += winsize;
343 	}
344 
345 	return 0;
346 }
347 
348 static int cdns_pcie_host_dma_ranges_cmp(void *priv, const struct list_head *a,
349 					 const struct list_head *b)
350 {
351 	struct resource_entry *entry1, *entry2;
352 
353         entry1 = container_of(a, struct resource_entry, node);
354         entry2 = container_of(b, struct resource_entry, node);
355 
356         return resource_size(entry2->res) - resource_size(entry1->res);
357 }
358 
359 static int cdns_pcie_host_map_dma_ranges(struct cdns_pcie_rc *rc)
360 {
361 	struct cdns_pcie *pcie = &rc->pcie;
362 	struct device *dev = pcie->dev;
363 	struct device_node *np = dev->of_node;
364 	struct pci_host_bridge *bridge;
365 	struct resource_entry *entry;
366 	u32 no_bar_nbits = 32;
367 	int err;
368 
369 	bridge = pci_host_bridge_from_priv(rc);
370 	if (!bridge)
371 		return -ENOMEM;
372 
373 	if (list_empty(&bridge->dma_ranges)) {
374 		of_property_read_u32(np, "cdns,no-bar-match-nbits",
375 				     &no_bar_nbits);
376 		err = cdns_pcie_host_bar_ib_config(rc, RP_NO_BAR, 0x0,
377 						   (u64)1 << no_bar_nbits, 0);
378 		if (err)
379 			dev_err(dev, "IB BAR: %d config failed\n", RP_NO_BAR);
380 		return err;
381 	}
382 
383 	list_sort(NULL, &bridge->dma_ranges, cdns_pcie_host_dma_ranges_cmp);
384 
385 	resource_list_for_each_entry(entry, &bridge->dma_ranges) {
386 		err = cdns_pcie_host_bar_config(rc, entry);
387 		if (err) {
388 			dev_err(dev, "Fail to configure IB using dma-ranges\n");
389 			return err;
390 		}
391 	}
392 
393 	return 0;
394 }
395 
396 static int cdns_pcie_host_init_address_translation(struct cdns_pcie_rc *rc)
397 {
398 	struct cdns_pcie *pcie = &rc->pcie;
399 	struct pci_host_bridge *bridge = pci_host_bridge_from_priv(rc);
400 	struct resource *cfg_res = rc->cfg_res;
401 	struct resource_entry *entry;
402 	u64 cpu_addr = cfg_res->start;
403 	u32 addr0, addr1, desc1;
404 	int r, busnr = 0;
405 
406 	entry = resource_list_first_type(&bridge->windows, IORESOURCE_BUS);
407 	if (entry)
408 		busnr = entry->res->start;
409 
410 	/*
411 	 * Reserve region 0 for PCI configure space accesses:
412 	 * OB_REGION_PCI_ADDR0 and OB_REGION_DESC0 are updated dynamically by
413 	 * cdns_pci_map_bus(), other region registers are set here once for all.
414 	 */
415 	addr1 = 0; /* Should be programmed to zero. */
416 	desc1 = CDNS_PCIE_AT_OB_REGION_DESC1_BUS(busnr);
417 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(0), addr1);
418 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC1(0), desc1);
419 
420 	if (pcie->ops->cpu_addr_fixup)
421 		cpu_addr = pcie->ops->cpu_addr_fixup(pcie, cpu_addr);
422 
423 	addr0 = CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS(12) |
424 		(lower_32_bits(cpu_addr) & GENMASK(31, 8));
425 	addr1 = upper_32_bits(cpu_addr);
426 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(0), addr0);
427 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(0), addr1);
428 
429 	r = 1;
430 	resource_list_for_each_entry(entry, &bridge->windows) {
431 		struct resource *res = entry->res;
432 		u64 pci_addr = res->start - entry->offset;
433 
434 		if (resource_type(res) == IORESOURCE_IO)
435 			cdns_pcie_set_outbound_region(pcie, busnr, 0, r,
436 						      true,
437 						      pci_pio_to_address(res->start),
438 						      pci_addr,
439 						      resource_size(res));
440 		else
441 			cdns_pcie_set_outbound_region(pcie, busnr, 0, r,
442 						      false,
443 						      res->start,
444 						      pci_addr,
445 						      resource_size(res));
446 
447 		r++;
448 	}
449 
450 	return cdns_pcie_host_map_dma_ranges(rc);
451 }
452 
453 static int cdns_pcie_host_init(struct device *dev,
454 			       struct cdns_pcie_rc *rc)
455 {
456 	int err;
457 
458 	err = cdns_pcie_host_init_root_port(rc);
459 	if (err)
460 		return err;
461 
462 	return cdns_pcie_host_init_address_translation(rc);
463 }
464 
465 int cdns_pcie_host_setup(struct cdns_pcie_rc *rc)
466 {
467 	struct device *dev = rc->pcie.dev;
468 	struct platform_device *pdev = to_platform_device(dev);
469 	struct device_node *np = dev->of_node;
470 	struct pci_host_bridge *bridge;
471 	enum cdns_pcie_rp_bar bar;
472 	struct cdns_pcie *pcie;
473 	struct resource *res;
474 	int ret;
475 
476 	bridge = pci_host_bridge_from_priv(rc);
477 	if (!bridge)
478 		return -ENOMEM;
479 
480 	pcie = &rc->pcie;
481 	pcie->is_rc = true;
482 
483 	rc->vendor_id = 0xffff;
484 	of_property_read_u32(np, "vendor-id", &rc->vendor_id);
485 
486 	rc->device_id = 0xffff;
487 	of_property_read_u32(np, "device-id", &rc->device_id);
488 
489 	pcie->reg_base = devm_platform_ioremap_resource_byname(pdev, "reg");
490 	if (IS_ERR(pcie->reg_base)) {
491 		dev_err(dev, "missing \"reg\"\n");
492 		return PTR_ERR(pcie->reg_base);
493 	}
494 
495 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg");
496 	rc->cfg_base = devm_pci_remap_cfg_resource(dev, res);
497 	if (IS_ERR(rc->cfg_base))
498 		return PTR_ERR(rc->cfg_base);
499 	rc->cfg_res = res;
500 
501 	ret = cdns_pcie_start_link(pcie);
502 	if (ret) {
503 		dev_err(dev, "Failed to start link\n");
504 		return ret;
505 	}
506 
507 	ret = cdns_pcie_host_start_link(rc);
508 	if (ret)
509 		dev_dbg(dev, "PCIe link never came up\n");
510 
511 	for (bar = RP_BAR0; bar <= RP_NO_BAR; bar++)
512 		rc->avail_ib_bar[bar] = true;
513 
514 	ret = cdns_pcie_host_init(dev, rc);
515 	if (ret)
516 		return ret;
517 
518 	if (!bridge->ops)
519 		bridge->ops = &cdns_pcie_host_ops;
520 
521 	ret = pci_host_probe(bridge);
522 	if (ret < 0)
523 		goto err_init;
524 
525 	return 0;
526 
527  err_init:
528 	pm_runtime_put_sync(dev);
529 
530 	return ret;
531 }
532