16e0832faSShawn Lin // SPDX-License-Identifier: GPL-2.0
26e0832faSShawn Lin /*
36e0832faSShawn Lin  * Synopsys DesignWare PCIe host controller driver
46e0832faSShawn Lin  *
56e0832faSShawn Lin  * Copyright (C) 2013 Samsung Electronics Co., Ltd.
67ecd4a81SAlexander A. Klimov  *		https://www.samsung.com
76e0832faSShawn Lin  *
86e0832faSShawn Lin  * Author: Jingoo Han <jg1.han@samsung.com>
96e0832faSShawn Lin  */
106e0832faSShawn Lin 
11*4774faf8SFrank Li #include <linux/iopoll.h>
126e0832faSShawn Lin #include <linux/irqchip/chained_irq.h>
136e0832faSShawn Lin #include <linux/irqdomain.h>
14bbd8810dSKrzysztof Wilczynski #include <linux/msi.h>
156e0832faSShawn Lin #include <linux/of_address.h>
166e0832faSShawn Lin #include <linux/of_pci.h>
176e0832faSShawn Lin #include <linux/pci_regs.h>
186e0832faSShawn Lin #include <linux/platform_device.h>
196e0832faSShawn Lin 
20*4774faf8SFrank Li #include "../../pci.h"
216e0832faSShawn Lin #include "pcie-designware.h"
226e0832faSShawn Lin 
236e0832faSShawn Lin static struct pci_ops dw_pcie_ops;
24c2b0c098SRob Herring static struct pci_ops dw_child_pcie_ops;
256e0832faSShawn Lin 
dw_msi_ack_irq(struct irq_data * d)266e0832faSShawn Lin static void dw_msi_ack_irq(struct irq_data *d)
276e0832faSShawn Lin {
286e0832faSShawn Lin 	irq_chip_ack_parent(d);
296e0832faSShawn Lin }
306e0832faSShawn Lin 
dw_msi_mask_irq(struct irq_data * d)316e0832faSShawn Lin static void dw_msi_mask_irq(struct irq_data *d)
326e0832faSShawn Lin {
336e0832faSShawn Lin 	pci_msi_mask_irq(d);
346e0832faSShawn Lin 	irq_chip_mask_parent(d);
356e0832faSShawn Lin }
366e0832faSShawn Lin 
dw_msi_unmask_irq(struct irq_data * d)376e0832faSShawn Lin static void dw_msi_unmask_irq(struct irq_data *d)
386e0832faSShawn Lin {
396e0832faSShawn Lin 	pci_msi_unmask_irq(d);
406e0832faSShawn Lin 	irq_chip_unmask_parent(d);
416e0832faSShawn Lin }
426e0832faSShawn Lin 
436e0832faSShawn Lin static struct irq_chip dw_pcie_msi_irq_chip = {
446e0832faSShawn Lin 	.name = "PCI-MSI",
456e0832faSShawn Lin 	.irq_ack = dw_msi_ack_irq,
466e0832faSShawn Lin 	.irq_mask = dw_msi_mask_irq,
476e0832faSShawn Lin 	.irq_unmask = dw_msi_unmask_irq,
486e0832faSShawn Lin };
496e0832faSShawn Lin 
506e0832faSShawn Lin static struct msi_domain_info dw_pcie_msi_domain_info = {
516e0832faSShawn Lin 	.flags	= (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
526e0832faSShawn Lin 		   MSI_FLAG_PCI_MSIX | MSI_FLAG_MULTI_PCI_MSI),
536e0832faSShawn Lin 	.chip	= &dw_pcie_msi_irq_chip,
546e0832faSShawn Lin };
556e0832faSShawn Lin 
566e0832faSShawn Lin /* MSI int handler */
dw_handle_msi_irq(struct dw_pcie_rp * pp)5760b3c27fSSerge Semin irqreturn_t dw_handle_msi_irq(struct dw_pcie_rp *pp)
586e0832faSShawn Lin {
59d21faba1SMarc Zyngier 	int i, pos;
601137e61dSNiklas Cassel 	unsigned long val;
611137e61dSNiklas Cassel 	u32 status, num_ctrls;
626e0832faSShawn Lin 	irqreturn_t ret = IRQ_NONE;
63f81c770dSRob Herring 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
646e0832faSShawn Lin 
656e0832faSShawn Lin 	num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
666e0832faSShawn Lin 
676e0832faSShawn Lin 	for (i = 0; i < num_ctrls; i++) {
68f81c770dSRob Herring 		status = dw_pcie_readl_dbi(pci, PCIE_MSI_INTR0_STATUS +
69f81c770dSRob Herring 					   (i * MSI_REG_CTRL_BLOCK_SIZE));
701137e61dSNiklas Cassel 		if (!status)
716e0832faSShawn Lin 			continue;
726e0832faSShawn Lin 
736e0832faSShawn Lin 		ret = IRQ_HANDLED;
741137e61dSNiklas Cassel 		val = status;
756e0832faSShawn Lin 		pos = 0;
761137e61dSNiklas Cassel 		while ((pos = find_next_bit(&val, MAX_MSI_IRQS_PER_CTRL,
776e0832faSShawn Lin 					    pos)) != MAX_MSI_IRQS_PER_CTRL) {
78d21faba1SMarc Zyngier 			generic_handle_domain_irq(pp->irq_domain,
796e0832faSShawn Lin 						  (i * MAX_MSI_IRQS_PER_CTRL) +
806e0832faSShawn Lin 						  pos);
816e0832faSShawn Lin 			pos++;
826e0832faSShawn Lin 		}
836e0832faSShawn Lin 	}
846e0832faSShawn Lin 
856e0832faSShawn Lin 	return ret;
866e0832faSShawn Lin }
876e0832faSShawn Lin 
886e0832faSShawn Lin /* Chained MSI interrupt service routine */
dw_chained_msi_isr(struct irq_desc * desc)896e0832faSShawn Lin static void dw_chained_msi_isr(struct irq_desc *desc)
906e0832faSShawn Lin {
916e0832faSShawn Lin 	struct irq_chip *chip = irq_desc_get_chip(desc);
9260b3c27fSSerge Semin 	struct dw_pcie_rp *pp;
936e0832faSShawn Lin 
946e0832faSShawn Lin 	chained_irq_enter(chip, desc);
956e0832faSShawn Lin 
966e0832faSShawn Lin 	pp = irq_desc_get_handler_data(desc);
976e0832faSShawn Lin 	dw_handle_msi_irq(pp);
986e0832faSShawn Lin 
996e0832faSShawn Lin 	chained_irq_exit(chip, desc);
1006e0832faSShawn Lin }
1016e0832faSShawn Lin 
dw_pci_setup_msi_msg(struct irq_data * d,struct msi_msg * msg)10259ea68b3SGustavo Pimentel static void dw_pci_setup_msi_msg(struct irq_data *d, struct msi_msg *msg)
1036e0832faSShawn Lin {
10460b3c27fSSerge Semin 	struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);
1056e0832faSShawn Lin 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
1066e0832faSShawn Lin 	u64 msi_target;
1076e0832faSShawn Lin 
1086e0832faSShawn Lin 	msi_target = (u64)pp->msi_data;
1096e0832faSShawn Lin 
1106e0832faSShawn Lin 	msg->address_lo = lower_32_bits(msi_target);
1116e0832faSShawn Lin 	msg->address_hi = upper_32_bits(msi_target);
1126e0832faSShawn Lin 
11359ea68b3SGustavo Pimentel 	msg->data = d->hwirq;
1146e0832faSShawn Lin 
1156e0832faSShawn Lin 	dev_dbg(pci->dev, "msi#%d address_hi %#x address_lo %#x\n",
11659ea68b3SGustavo Pimentel 		(int)d->hwirq, msg->address_hi, msg->address_lo);
1176e0832faSShawn Lin }
1186e0832faSShawn Lin 
dw_pci_msi_set_affinity(struct irq_data * d,const struct cpumask * mask,bool force)119fd5288a3SGustavo Pimentel static int dw_pci_msi_set_affinity(struct irq_data *d,
1206e0832faSShawn Lin 				   const struct cpumask *mask, bool force)
1216e0832faSShawn Lin {
1226e0832faSShawn Lin 	return -EINVAL;
1236e0832faSShawn Lin }
1246e0832faSShawn Lin 
dw_pci_bottom_mask(struct irq_data * d)12540e9892eSGustavo Pimentel static void dw_pci_bottom_mask(struct irq_data *d)
1266e0832faSShawn Lin {
12760b3c27fSSerge Semin 	struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);
128f81c770dSRob Herring 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
1296e0832faSShawn Lin 	unsigned int res, bit, ctrl;
1306e0832faSShawn Lin 	unsigned long flags;
1316e0832faSShawn Lin 
1326e0832faSShawn Lin 	raw_spin_lock_irqsave(&pp->lock, flags);
1336e0832faSShawn Lin 
13440e9892eSGustavo Pimentel 	ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
1356e0832faSShawn Lin 	res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
13640e9892eSGustavo Pimentel 	bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
1376e0832faSShawn Lin 
13865772257SGustavo Pimentel 	pp->irq_mask[ctrl] |= BIT(bit);
139f81c770dSRob Herring 	dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + res, pp->irq_mask[ctrl]);
1406e0832faSShawn Lin 
1416e0832faSShawn Lin 	raw_spin_unlock_irqrestore(&pp->lock, flags);
1426e0832faSShawn Lin }
1436e0832faSShawn Lin 
dw_pci_bottom_unmask(struct irq_data * d)14440e9892eSGustavo Pimentel static void dw_pci_bottom_unmask(struct irq_data *d)
1456e0832faSShawn Lin {
14660b3c27fSSerge Semin 	struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);
147f81c770dSRob Herring 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
1486e0832faSShawn Lin 	unsigned int res, bit, ctrl;
1496e0832faSShawn Lin 	unsigned long flags;
1506e0832faSShawn Lin 
1516e0832faSShawn Lin 	raw_spin_lock_irqsave(&pp->lock, flags);
1526e0832faSShawn Lin 
15340e9892eSGustavo Pimentel 	ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
1546e0832faSShawn Lin 	res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
15540e9892eSGustavo Pimentel 	bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
1566e0832faSShawn Lin 
15765772257SGustavo Pimentel 	pp->irq_mask[ctrl] &= ~BIT(bit);
158f81c770dSRob Herring 	dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + res, pp->irq_mask[ctrl]);
1596e0832faSShawn Lin 
1606e0832faSShawn Lin 	raw_spin_unlock_irqrestore(&pp->lock, flags);
1616e0832faSShawn Lin }
1626e0832faSShawn Lin 
dw_pci_bottom_ack(struct irq_data * d)1636e0832faSShawn Lin static void dw_pci_bottom_ack(struct irq_data *d)
1646e0832faSShawn Lin {
16560b3c27fSSerge Semin 	struct dw_pcie_rp *pp  = irq_data_get_irq_chip_data(d);
166f81c770dSRob Herring 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
1673f7bb2ecSMarc Zyngier 	unsigned int res, bit, ctrl;
1686e0832faSShawn Lin 
1693f7bb2ecSMarc Zyngier 	ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
1703f7bb2ecSMarc Zyngier 	res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
1713f7bb2ecSMarc Zyngier 	bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
1726e0832faSShawn Lin 
173f81c770dSRob Herring 	dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_STATUS + res, BIT(bit));
1746e0832faSShawn Lin }
1756e0832faSShawn Lin 
1766e0832faSShawn Lin static struct irq_chip dw_pci_msi_bottom_irq_chip = {
1776e0832faSShawn Lin 	.name = "DWPCI-MSI",
1786e0832faSShawn Lin 	.irq_ack = dw_pci_bottom_ack,
1796e0832faSShawn Lin 	.irq_compose_msi_msg = dw_pci_setup_msi_msg,
1806e0832faSShawn Lin 	.irq_set_affinity = dw_pci_msi_set_affinity,
1816e0832faSShawn Lin 	.irq_mask = dw_pci_bottom_mask,
1826e0832faSShawn Lin 	.irq_unmask = dw_pci_bottom_unmask,
1836e0832faSShawn Lin };
1846e0832faSShawn Lin 
dw_pcie_irq_domain_alloc(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs,void * args)1856e0832faSShawn Lin static int dw_pcie_irq_domain_alloc(struct irq_domain *domain,
1866e0832faSShawn Lin 				    unsigned int virq, unsigned int nr_irqs,
1876e0832faSShawn Lin 				    void *args)
1886e0832faSShawn Lin {
18960b3c27fSSerge Semin 	struct dw_pcie_rp *pp = domain->host_data;
1906e0832faSShawn Lin 	unsigned long flags;
1916e0832faSShawn Lin 	u32 i;
1926e0832faSShawn Lin 	int bit;
1936e0832faSShawn Lin 
1946e0832faSShawn Lin 	raw_spin_lock_irqsave(&pp->lock, flags);
1956e0832faSShawn Lin 
1966e0832faSShawn Lin 	bit = bitmap_find_free_region(pp->msi_irq_in_use, pp->num_vectors,
1976e0832faSShawn Lin 				      order_base_2(nr_irqs));
1986e0832faSShawn Lin 
1996e0832faSShawn Lin 	raw_spin_unlock_irqrestore(&pp->lock, flags);
2006e0832faSShawn Lin 
2016e0832faSShawn Lin 	if (bit < 0)
2026e0832faSShawn Lin 		return -ENOSPC;
2036e0832faSShawn Lin 
2046e0832faSShawn Lin 	for (i = 0; i < nr_irqs; i++)
2056e0832faSShawn Lin 		irq_domain_set_info(domain, virq + i, bit + i,
2069f67437bSKishon Vijay Abraham I 				    pp->msi_irq_chip,
2076e0832faSShawn Lin 				    pp, handle_edge_irq,
2086e0832faSShawn Lin 				    NULL, NULL);
2096e0832faSShawn Lin 
2106e0832faSShawn Lin 	return 0;
2116e0832faSShawn Lin }
2126e0832faSShawn Lin 
dw_pcie_irq_domain_free(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs)2136e0832faSShawn Lin static void dw_pcie_irq_domain_free(struct irq_domain *domain,
2146e0832faSShawn Lin 				    unsigned int virq, unsigned int nr_irqs)
2156e0832faSShawn Lin {
2164cfae0f1SGustavo Pimentel 	struct irq_data *d = irq_domain_get_irq_data(domain, virq);
21760b3c27fSSerge Semin 	struct dw_pcie_rp *pp = domain->host_data;
2186e0832faSShawn Lin 	unsigned long flags;
2196e0832faSShawn Lin 
2206e0832faSShawn Lin 	raw_spin_lock_irqsave(&pp->lock, flags);
2216e0832faSShawn Lin 
2224cfae0f1SGustavo Pimentel 	bitmap_release_region(pp->msi_irq_in_use, d->hwirq,
2236e0832faSShawn Lin 			      order_base_2(nr_irqs));
2246e0832faSShawn Lin 
2256e0832faSShawn Lin 	raw_spin_unlock_irqrestore(&pp->lock, flags);
2266e0832faSShawn Lin }
2276e0832faSShawn Lin 
2286e0832faSShawn Lin static const struct irq_domain_ops dw_pcie_msi_domain_ops = {
2296e0832faSShawn Lin 	.alloc	= dw_pcie_irq_domain_alloc,
2306e0832faSShawn Lin 	.free	= dw_pcie_irq_domain_free,
2316e0832faSShawn Lin };
2326e0832faSShawn Lin 
dw_pcie_allocate_domains(struct dw_pcie_rp * pp)23360b3c27fSSerge Semin int dw_pcie_allocate_domains(struct dw_pcie_rp *pp)
2346e0832faSShawn Lin {
2356e0832faSShawn Lin 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
2366e0832faSShawn Lin 	struct fwnode_handle *fwnode = of_node_to_fwnode(pci->dev->of_node);
2376e0832faSShawn Lin 
2386e0832faSShawn Lin 	pp->irq_domain = irq_domain_create_linear(fwnode, pp->num_vectors,
2396e0832faSShawn Lin 					       &dw_pcie_msi_domain_ops, pp);
2406e0832faSShawn Lin 	if (!pp->irq_domain) {
2416e0832faSShawn Lin 		dev_err(pci->dev, "Failed to create IRQ domain\n");
2426e0832faSShawn Lin 		return -ENOMEM;
2436e0832faSShawn Lin 	}
2446e0832faSShawn Lin 
2450414b93eSMarc Zyngier 	irq_domain_update_bus_token(pp->irq_domain, DOMAIN_BUS_NEXUS);
2460414b93eSMarc Zyngier 
2476e0832faSShawn Lin 	pp->msi_domain = pci_msi_create_irq_domain(fwnode,
2486e0832faSShawn Lin 						   &dw_pcie_msi_domain_info,
2496e0832faSShawn Lin 						   pp->irq_domain);
2506e0832faSShawn Lin 	if (!pp->msi_domain) {
2516e0832faSShawn Lin 		dev_err(pci->dev, "Failed to create MSI domain\n");
2526e0832faSShawn Lin 		irq_domain_remove(pp->irq_domain);
2536e0832faSShawn Lin 		return -ENOMEM;
2546e0832faSShawn Lin 	}
2556e0832faSShawn Lin 
2566e0832faSShawn Lin 	return 0;
2576e0832faSShawn Lin }
2586e0832faSShawn Lin 
dw_pcie_free_msi(struct dw_pcie_rp * pp)25960b3c27fSSerge Semin static void dw_pcie_free_msi(struct dw_pcie_rp *pp)
2606e0832faSShawn Lin {
261db388348SDmitry Baryshkov 	u32 ctrl;
262db388348SDmitry Baryshkov 
263db388348SDmitry Baryshkov 	for (ctrl = 0; ctrl < MAX_MSI_CTRLS; ctrl++) {
264db388348SDmitry Baryshkov 		if (pp->msi_irq[ctrl] > 0)
265db388348SDmitry Baryshkov 			irq_set_chained_handler_and_data(pp->msi_irq[ctrl],
266db388348SDmitry Baryshkov 							 NULL, NULL);
267db388348SDmitry Baryshkov 	}
2686e0832faSShawn Lin 
2696e0832faSShawn Lin 	irq_domain_remove(pp->msi_domain);
2706e0832faSShawn Lin 	irq_domain_remove(pp->irq_domain);
2716e0832faSShawn Lin }
2726e0832faSShawn Lin 
dw_pcie_msi_init(struct dw_pcie_rp * pp)27360b3c27fSSerge Semin static void dw_pcie_msi_init(struct dw_pcie_rp *pp)
2746e0832faSShawn Lin {
2756e0832faSShawn Lin 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
27607940c36SJisheng Zhang 	u64 msi_target = (u64)pp->msi_data;
2776e0832faSShawn Lin 
27859fbab1aSRob Herring 	if (!pci_msi_enabled() || !pp->has_msi_ctrl)
279cf627713SRob Herring 		return;
280cf627713SRob Herring 
2816e0832faSShawn Lin 	/* Program the msi_data */
282f81c770dSRob Herring 	dw_pcie_writel_dbi(pci, PCIE_MSI_ADDR_LO, lower_32_bits(msi_target));
283f81c770dSRob Herring 	dw_pcie_writel_dbi(pci, PCIE_MSI_ADDR_HI, upper_32_bits(msi_target));
2846e0832faSShawn Lin }
2856e0832faSShawn Lin 
dw_pcie_parse_split_msi_irq(struct dw_pcie_rp * pp)286cd761378SDmitry Baryshkov static int dw_pcie_parse_split_msi_irq(struct dw_pcie_rp *pp)
287cd761378SDmitry Baryshkov {
288cd761378SDmitry Baryshkov 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
289cd761378SDmitry Baryshkov 	struct device *dev = pci->dev;
290cd761378SDmitry Baryshkov 	struct platform_device *pdev = to_platform_device(dev);
291cd761378SDmitry Baryshkov 	u32 ctrl, max_vectors;
292cd761378SDmitry Baryshkov 	int irq;
293cd761378SDmitry Baryshkov 
294cd761378SDmitry Baryshkov 	/* Parse any "msiX" IRQs described in the devicetree */
295cd761378SDmitry Baryshkov 	for (ctrl = 0; ctrl < MAX_MSI_CTRLS; ctrl++) {
296cd761378SDmitry Baryshkov 		char msi_name[] = "msiX";
297cd761378SDmitry Baryshkov 
298cd761378SDmitry Baryshkov 		msi_name[3] = '0' + ctrl;
299cd761378SDmitry Baryshkov 		irq = platform_get_irq_byname_optional(pdev, msi_name);
300cd761378SDmitry Baryshkov 		if (irq == -ENXIO)
301cd761378SDmitry Baryshkov 			break;
302cd761378SDmitry Baryshkov 		if (irq < 0)
303cd761378SDmitry Baryshkov 			return dev_err_probe(dev, irq,
304cd761378SDmitry Baryshkov 					     "Failed to parse MSI IRQ '%s'\n",
305cd761378SDmitry Baryshkov 					     msi_name);
306cd761378SDmitry Baryshkov 
307cd761378SDmitry Baryshkov 		pp->msi_irq[ctrl] = irq;
308cd761378SDmitry Baryshkov 	}
309cd761378SDmitry Baryshkov 
310cd761378SDmitry Baryshkov 	/* If no "msiX" IRQs, caller should fallback to "msi" IRQ */
311cd761378SDmitry Baryshkov 	if (ctrl == 0)
312cd761378SDmitry Baryshkov 		return -ENXIO;
313cd761378SDmitry Baryshkov 
314cd761378SDmitry Baryshkov 	max_vectors = ctrl * MAX_MSI_IRQS_PER_CTRL;
315cd761378SDmitry Baryshkov 	if (pp->num_vectors > max_vectors) {
316cd761378SDmitry Baryshkov 		dev_warn(dev, "Exceeding number of MSI vectors, limiting to %u\n",
317cd761378SDmitry Baryshkov 			 max_vectors);
318cd761378SDmitry Baryshkov 		pp->num_vectors = max_vectors;
319cd761378SDmitry Baryshkov 	}
320cd761378SDmitry Baryshkov 	if (!pp->num_vectors)
321cd761378SDmitry Baryshkov 		pp->num_vectors = max_vectors;
322cd761378SDmitry Baryshkov 
323cd761378SDmitry Baryshkov 	return 0;
324cd761378SDmitry Baryshkov }
325cd761378SDmitry Baryshkov 
dw_pcie_msi_host_init(struct dw_pcie_rp * pp)326226ec087SDmitry Baryshkov static int dw_pcie_msi_host_init(struct dw_pcie_rp *pp)
327226ec087SDmitry Baryshkov {
328226ec087SDmitry Baryshkov 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
329226ec087SDmitry Baryshkov 	struct device *dev = pci->dev;
330226ec087SDmitry Baryshkov 	struct platform_device *pdev = to_platform_device(dev);
331423511ecSWill McVicker 	u64 *msi_vaddr;
332226ec087SDmitry Baryshkov 	int ret;
333226ec087SDmitry Baryshkov 	u32 ctrl, num_ctrls;
334226ec087SDmitry Baryshkov 
335cd761378SDmitry Baryshkov 	for (ctrl = 0; ctrl < MAX_MSI_CTRLS; ctrl++)
336226ec087SDmitry Baryshkov 		pp->irq_mask[ctrl] = ~0;
337226ec087SDmitry Baryshkov 
338db388348SDmitry Baryshkov 	if (!pp->msi_irq[0]) {
339cd761378SDmitry Baryshkov 		ret = dw_pcie_parse_split_msi_irq(pp);
340cd761378SDmitry Baryshkov 		if (ret < 0 && ret != -ENXIO)
341cd761378SDmitry Baryshkov 			return ret;
342cd761378SDmitry Baryshkov 	}
343cd761378SDmitry Baryshkov 
344cd761378SDmitry Baryshkov 	if (!pp->num_vectors)
345cd761378SDmitry Baryshkov 		pp->num_vectors = MSI_DEF_NUM_VECTORS;
346cd761378SDmitry Baryshkov 	num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
347cd761378SDmitry Baryshkov 
348cd761378SDmitry Baryshkov 	if (!pp->msi_irq[0]) {
349db388348SDmitry Baryshkov 		pp->msi_irq[0] = platform_get_irq_byname_optional(pdev, "msi");
350db388348SDmitry Baryshkov 		if (pp->msi_irq[0] < 0) {
351db388348SDmitry Baryshkov 			pp->msi_irq[0] = platform_get_irq(pdev, 0);
352db388348SDmitry Baryshkov 			if (pp->msi_irq[0] < 0)
353db388348SDmitry Baryshkov 				return pp->msi_irq[0];
354226ec087SDmitry Baryshkov 		}
355226ec087SDmitry Baryshkov 	}
356226ec087SDmitry Baryshkov 
357cd761378SDmitry Baryshkov 	dev_dbg(dev, "Using %d MSI vectors\n", pp->num_vectors);
358cd761378SDmitry Baryshkov 
359226ec087SDmitry Baryshkov 	pp->msi_irq_chip = &dw_pci_msi_bottom_irq_chip;
360226ec087SDmitry Baryshkov 
361226ec087SDmitry Baryshkov 	ret = dw_pcie_allocate_domains(pp);
362226ec087SDmitry Baryshkov 	if (ret)
363226ec087SDmitry Baryshkov 		return ret;
364226ec087SDmitry Baryshkov 
365db388348SDmitry Baryshkov 	for (ctrl = 0; ctrl < num_ctrls; ctrl++) {
366db388348SDmitry Baryshkov 		if (pp->msi_irq[ctrl] > 0)
367db388348SDmitry Baryshkov 			irq_set_chained_handler_and_data(pp->msi_irq[ctrl],
368226ec087SDmitry Baryshkov 						    dw_chained_msi_isr, pp);
369db388348SDmitry Baryshkov 	}
370226ec087SDmitry Baryshkov 
3716c784e21SSerge Semin 	/*
3726c784e21SSerge Semin 	 * Even though the iMSI-RX Module supports 64-bit addresses some
3736c784e21SSerge Semin 	 * peripheral PCIe devices may lack 64-bit message support. In
3746c784e21SSerge Semin 	 * order not to miss MSI TLPs from those devices the MSI target
3756c784e21SSerge Semin 	 * address has to be within the lowest 4GB.
3766c784e21SSerge Semin 	 *
3776c784e21SSerge Semin 	 * Note until there is a better alternative found the reservation is
3786c784e21SSerge Semin 	 * done by allocating from the artificially limited DMA-coherent
3796c784e21SSerge Semin 	 * memory.
3806c784e21SSerge Semin 	 */
3816c784e21SSerge Semin 	ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
382226ec087SDmitry Baryshkov 	if (ret)
383226ec087SDmitry Baryshkov 		dev_warn(dev, "Failed to set DMA mask to 32-bit. Devices with only 32-bit MSI support may not work properly\n");
384226ec087SDmitry Baryshkov 
385423511ecSWill McVicker 	msi_vaddr = dmam_alloc_coherent(dev, sizeof(u64), &pp->msi_data,
386423511ecSWill McVicker 					GFP_KERNEL);
387423511ecSWill McVicker 	if (!msi_vaddr) {
388423511ecSWill McVicker 		dev_err(dev, "Failed to alloc and map MSI data\n");
389226ec087SDmitry Baryshkov 		dw_pcie_free_msi(pp);
390423511ecSWill McVicker 		return -ENOMEM;
391226ec087SDmitry Baryshkov 	}
392226ec087SDmitry Baryshkov 
393226ec087SDmitry Baryshkov 	return 0;
394226ec087SDmitry Baryshkov }
395226ec087SDmitry Baryshkov 
dw_pcie_host_init(struct dw_pcie_rp * pp)39660b3c27fSSerge Semin int dw_pcie_host_init(struct dw_pcie_rp *pp)
3976e0832faSShawn Lin {
3986e0832faSShawn Lin 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
3996e0832faSShawn Lin 	struct device *dev = pci->dev;
4006e0832faSShawn Lin 	struct device_node *np = dev->of_node;
4016e0832faSShawn Lin 	struct platform_device *pdev = to_platform_device(dev);
4027fe71aa8SRob Herring 	struct resource_entry *win;
4036e0832faSShawn Lin 	struct pci_host_bridge *bridge;
404bd42f310SSerge Semin 	struct resource *res;
4056e0832faSShawn Lin 	int ret;
4066e0832faSShawn Lin 
40760a4352fSSerge Semin 	raw_spin_lock_init(&pp->lock);
4086e0832faSShawn Lin 
409ef8c5887SSerge Semin 	ret = dw_pcie_get_resources(pci);
410ef8c5887SSerge Semin 	if (ret)
411ef8c5887SSerge Semin 		return ret;
412ef8c5887SSerge Semin 
413bd42f310SSerge Semin 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
414bd42f310SSerge Semin 	if (res) {
415bd42f310SSerge Semin 		pp->cfg0_size = resource_size(res);
416bd42f310SSerge Semin 		pp->cfg0_base = res->start;
4172f5ab5afSRob Herring 
418bd42f310SSerge Semin 		pp->va_cfg0_base = devm_pci_remap_cfg_resource(dev, res);
4192f5ab5afSRob Herring 		if (IS_ERR(pp->va_cfg0_base))
4202f5ab5afSRob Herring 			return PTR_ERR(pp->va_cfg0_base);
4212f5ab5afSRob Herring 	} else {
4226e0832faSShawn Lin 		dev_err(dev, "Missing *config* reg space\n");
4232f5ab5afSRob Herring 		return -ENODEV;
4246e0832faSShawn Lin 	}
4256e0832faSShawn Lin 
426e6fdd3bfSJisheng Zhang 	bridge = devm_pci_alloc_host_bridge(dev, 0);
4276e0832faSShawn Lin 	if (!bridge)
4286e0832faSShawn Lin 		return -ENOMEM;
4296e0832faSShawn Lin 
430444ddca5SRob Herring 	pp->bridge = bridge;
431444ddca5SRob Herring 
4322f5ab5afSRob Herring 	/* Get the I/O range from DT */
4332f5ab5afSRob Herring 	win = resource_list_first_type(&bridge->windows, IORESOURCE_IO);
4342f5ab5afSRob Herring 	if (win) {
4350f71c60fSRob Herring 		pp->io_size = resource_size(win->res);
4360f71c60fSRob Herring 		pp->io_bus_addr = win->res->start - win->offset;
4370f71c60fSRob Herring 		pp->io_base = pci_pio_to_address(win->res->start);
4386e0832faSShawn Lin 	}
4396e0832faSShawn Lin 
4407e919677SBjorn Andersson 	/* Set default bus ops */
4417e919677SBjorn Andersson 	bridge->ops = &dw_pcie_ops;
4427e919677SBjorn Andersson 	bridge->child_ops = &dw_child_pcie_ops;
4437e919677SBjorn Andersson 
4447e919677SBjorn Andersson 	if (pp->ops->host_init) {
4457e919677SBjorn Andersson 		ret = pp->ops->host_init(pp);
4467e919677SBjorn Andersson 		if (ret)
4477e919677SBjorn Andersson 			return ret;
4487e919677SBjorn Andersson 	}
4497e919677SBjorn Andersson 
4509e2b5de5SJisheng Zhang 	if (pci_msi_enabled()) {
451f78f0263SRob Herring 		pp->has_msi_ctrl = !(pp->ops->msi_host_init ||
452f78f0263SRob Herring 				     of_property_read_bool(np, "msi-parent") ||
453f78f0263SRob Herring 				     of_property_read_bool(np, "msi-map"));
454f78f0263SRob Herring 
455cd761378SDmitry Baryshkov 		/*
456cd761378SDmitry Baryshkov 		 * For the has_msi_ctrl case the default assignment is handled
457cd761378SDmitry Baryshkov 		 * in the dw_pcie_msi_host_init().
458cd761378SDmitry Baryshkov 		 */
459cd761378SDmitry Baryshkov 		if (!pp->has_msi_ctrl && !pp->num_vectors) {
4606e0832faSShawn Lin 			pp->num_vectors = MSI_DEF_NUM_VECTORS;
461331e9bceSRob Herring 		} else if (pp->num_vectors > MAX_MSI_IRQS) {
462331e9bceSRob Herring 			dev_err(dev, "Invalid number of vectors\n");
463c6481d51SSerge Semin 			ret = -EINVAL;
464c6481d51SSerge Semin 			goto err_deinit_host;
4656e0832faSShawn Lin 		}
4666e0832faSShawn Lin 
467f78f0263SRob Herring 		if (pp->ops->msi_host_init) {
468f78f0263SRob Herring 			ret = pp->ops->msi_host_init(pp);
469f78f0263SRob Herring 			if (ret < 0)
470c6481d51SSerge Semin 				goto err_deinit_host;
471f78f0263SRob Herring 		} else if (pp->has_msi_ctrl) {
472226ec087SDmitry Baryshkov 			ret = dw_pcie_msi_host_init(pp);
473226ec087SDmitry Baryshkov 			if (ret < 0)
474c6481d51SSerge Semin 				goto err_deinit_host;
475c6481d51SSerge Semin 		}
4765bcb1757SRob Herring 	}
4776e0832faSShawn Lin 
47813e9d390SSerge Semin 	dw_pcie_version_detect(pci);
47913e9d390SSerge Semin 
4808bcca265SHou Zhiqiang 	dw_pcie_iatu_detect(pci);
4816e0832faSShawn Lin 
482939fbcd5SSerge Semin 	ret = dw_pcie_edma_detect(pci);
483ce06bf57SSerge Semin 	if (ret)
484ce06bf57SSerge Semin 		goto err_free_msi;
48559fbab1aSRob Herring 
486939fbcd5SSerge Semin 	ret = dw_pcie_setup_rc(pp);
487939fbcd5SSerge Semin 	if (ret)
488939fbcd5SSerge Semin 		goto err_remove_edma;
489939fbcd5SSerge Semin 
490da56a1bfSAjay Agarwal 	if (!dw_pcie_link_up(pci)) {
491da56a1bfSAjay Agarwal 		ret = dw_pcie_start_link(pci);
492da56a1bfSAjay Agarwal 		if (ret)
493a37beefbSSerge Semin 			goto err_remove_edma;
494886a9c13SRob Herring 	}
495939fbcd5SSerge Semin 
496886a9c13SRob Herring 	/* Ignore errors, the link may come up later */
497da56a1bfSAjay Agarwal 	dw_pcie_wait_for_link(pci);
498da56a1bfSAjay Agarwal 
499da56a1bfSAjay Agarwal 	bridge->sysdata = pp;
500da56a1bfSAjay Agarwal 
501da56a1bfSAjay Agarwal 	ret = pci_host_probe(bridge);
502da56a1bfSAjay Agarwal 	if (ret)
503886a9c13SRob Herring 		goto err_stop_link;
5046e0832faSShawn Lin 
5056e0832faSShawn Lin 	return 0;
5061df79305SRob Herring 
507113fa857SSerge Semin err_stop_link:
508113fa857SSerge Semin 	dw_pcie_stop_link(pci);
509113fa857SSerge Semin 
5106e0832faSShawn Lin err_remove_edma:
5116e0832faSShawn Lin 	dw_pcie_edma_remove(pci);
512113fa857SSerge Semin 
513a37beefbSSerge Semin err_free_msi:
514113fa857SSerge Semin 	if (pp->has_msi_ctrl)
515939fbcd5SSerge Semin 		dw_pcie_free_msi(pp);
516939fbcd5SSerge Semin 
517939fbcd5SSerge Semin err_deinit_host:
5189e2b5de5SJisheng Zhang 	if (pp->ops->host_deinit)
519f78f0263SRob Herring 		pp->ops->host_deinit(pp);
5209e2b5de5SJisheng Zhang 
521c6481d51SSerge Semin 	return ret;
522c6481d51SSerge Semin }
523c6481d51SSerge Semin EXPORT_SYMBOL_GPL(dw_pcie_host_init);
524c6481d51SSerge Semin 
dw_pcie_host_deinit(struct dw_pcie_rp * pp)525c6481d51SSerge Semin void dw_pcie_host_deinit(struct dw_pcie_rp *pp)
5266e0832faSShawn Lin {
5276e0832faSShawn Lin 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
528ca98329dSVidya Sagar 
5296e0832faSShawn Lin 	pci_stop_root_bus(pp->bridge->bus);
53060b3c27fSSerge Semin 	pci_remove_root_bus(pp->bridge->bus);
5319d071cadSVidya Sagar 
532113fa857SSerge Semin 	dw_pcie_stop_link(pci);
533113fa857SSerge Semin 
5345808d43eSRob Herring 	dw_pcie_edma_remove(pci);
5355808d43eSRob Herring 
536113fa857SSerge Semin 	if (pp->has_msi_ctrl)
537a37beefbSSerge Semin 		dw_pcie_free_msi(pp);
538113fa857SSerge Semin 
539939fbcd5SSerge Semin 	if (pp->ops->host_deinit)
540939fbcd5SSerge Semin 		pp->ops->host_deinit(pp);
541f78f0263SRob Herring }
5429d071cadSVidya Sagar EXPORT_SYMBOL_GPL(dw_pcie_host_deinit);
543c6481d51SSerge Semin 
dw_pcie_other_conf_map_bus(struct pci_bus * bus,unsigned int devfn,int where)544c6481d51SSerge Semin static void __iomem *dw_pcie_other_conf_map_bus(struct pci_bus *bus,
545c6481d51SSerge Semin 						unsigned int devfn, int where)
5469d071cadSVidya Sagar {
547ca98329dSVidya Sagar 	struct dw_pcie_rp *pp = bus->sysdata;
5489d071cadSVidya Sagar 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
549c2b0c098SRob Herring 	int type, ret;
550c2b0c098SRob Herring 	u32 busdev;
5516e0832faSShawn Lin 
55260b3c27fSSerge Semin 	/*
5536e0832faSShawn Lin 	 * Checking whether the link is up here is a last line of defense
554ce06bf57SSerge Semin 	 * against platforms that forward errors on the system bus as
555ce06bf57SSerge Semin 	 * SError upon PCI configuration transactions issued when the link
5566e0832faSShawn Lin 	 * is down. This check is racy by definition and does not stop
55715b23906SHou Zhiqiang 	 * the system from triggering an SError if the link goes down
55815b23906SHou Zhiqiang 	 * after this check is performed.
55915b23906SHou Zhiqiang 	 */
56015b23906SHou Zhiqiang 	if (!dw_pcie_link_up(pci))
56115b23906SHou Zhiqiang 		return NULL;
56215b23906SHou Zhiqiang 
56315b23906SHou Zhiqiang 	busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |
56415b23906SHou Zhiqiang 		 PCIE_ATU_FUNC(PCI_FUNC(devfn));
56515b23906SHou Zhiqiang 
56615b23906SHou Zhiqiang 	if (pci_is_root_bus(bus->parent))
56715b23906SHou Zhiqiang 		type = PCIE_ATU_TYPE_CFG0;
5686e0832faSShawn Lin 	else
5696e0832faSShawn Lin 		type = PCIE_ATU_TYPE_CFG1;
5706e0832faSShawn Lin 
5712ef6b06aSRob Herring 	ret = dw_pcie_prog_outbound_atu(pci, 0, type, pp->cfg0_base, busdev,
5726e0832faSShawn Lin 					pp->cfg0_size);
5732ef6b06aSRob Herring 	if (ret)
5746e0832faSShawn Lin 		return NULL;
5752ef6b06aSRob Herring 
576ce06bf57SSerge Semin 	return pp->va_cfg0_base + where;
577ce06bf57SSerge Semin }
578ce06bf57SSerge Semin 
dw_pcie_rd_other_conf(struct pci_bus * bus,unsigned int devfn,int where,int size,u32 * val)579ce06bf57SSerge Semin static int dw_pcie_rd_other_conf(struct pci_bus *bus, unsigned int devfn,
580689e349aSAndrey Smirnov 				 int where, int size, u32 *val)
5812ef6b06aSRob Herring {
582c2b0c098SRob Herring 	struct dw_pcie_rp *pp = bus->sysdata;
583c2b0c098SRob Herring 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
584c2b0c098SRob Herring 	int ret;
585c2b0c098SRob Herring 
586c2b0c098SRob Herring 	ret = pci_generic_config_read(bus, devfn, where, size, val);
58760b3c27fSSerge Semin 	if (ret != PCIBIOS_SUCCESSFUL)
588c2b0c098SRob Herring 		return ret;
589ce06bf57SSerge Semin 
590c2b0c098SRob Herring 	if (pp->cfg0_io_shared) {
591c2b0c098SRob Herring 		ret = dw_pcie_prog_outbound_atu(pci, 0, PCIE_ATU_TYPE_IO,
592ce06bf57SSerge Semin 						pp->io_base, pp->io_bus_addr,
5936e0832faSShawn Lin 						pp->io_size);
594ce06bf57SSerge Semin 		if (ret)
595ce06bf57SSerge Semin 			return PCIBIOS_SET_FAILED;
596ce06bf57SSerge Semin 	}
597ce06bf57SSerge Semin 
598ce06bf57SSerge Semin 	return PCIBIOS_SUCCESSFUL;
599ce06bf57SSerge Semin }
600ce06bf57SSerge Semin 
dw_pcie_wr_other_conf(struct pci_bus * bus,unsigned int devfn,int where,int size,u32 val)601ce06bf57SSerge Semin static int dw_pcie_wr_other_conf(struct pci_bus *bus, unsigned int devfn,
602ce06bf57SSerge Semin 				 int where, int size, u32 val)
603ce06bf57SSerge Semin {
6046e0832faSShawn Lin 	struct dw_pcie_rp *pp = bus->sysdata;
6056e0832faSShawn Lin 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
606c2b0c098SRob Herring 	int ret;
6076e0832faSShawn Lin 
6086e0832faSShawn Lin 	ret = pci_generic_config_write(bus, devfn, where, size, val);
60960b3c27fSSerge Semin 	if (ret != PCIBIOS_SUCCESSFUL)
610c2b0c098SRob Herring 		return ret;
611ce06bf57SSerge Semin 
6126e0832faSShawn Lin 	if (pp->cfg0_io_shared) {
613c2b0c098SRob Herring 		ret = dw_pcie_prog_outbound_atu(pci, 0, PCIE_ATU_TYPE_IO,
614ce06bf57SSerge Semin 						pp->io_base, pp->io_bus_addr,
615c2b0c098SRob Herring 						pp->io_size);
616ce06bf57SSerge Semin 		if (ret)
617ce06bf57SSerge Semin 			return PCIBIOS_SET_FAILED;
618ce06bf57SSerge Semin 	}
619ce06bf57SSerge Semin 
620ce06bf57SSerge Semin 	return PCIBIOS_SUCCESSFUL;
621ce06bf57SSerge Semin }
622ce06bf57SSerge Semin 
623ce06bf57SSerge Semin static struct pci_ops dw_child_pcie_ops = {
624ce06bf57SSerge Semin 	.map_bus = dw_pcie_other_conf_map_bus,
625ce06bf57SSerge Semin 	.read = dw_pcie_rd_other_conf,
6266e0832faSShawn Lin 	.write = dw_pcie_wr_other_conf,
6276e0832faSShawn Lin };
628c2b0c098SRob Herring 
dw_pcie_own_conf_map_bus(struct pci_bus * bus,unsigned int devfn,int where)629c2b0c098SRob Herring void __iomem *dw_pcie_own_conf_map_bus(struct pci_bus *bus, unsigned int devfn, int where)
630c2b0c098SRob Herring {
631c2b0c098SRob Herring 	struct dw_pcie_rp *pp = bus->sysdata;
632c2b0c098SRob Herring 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
633c2b0c098SRob Herring 
63427e7ed01SRob Herring 	if (PCI_SLOT(devfn) > 0)
63527e7ed01SRob Herring 		return NULL;
63660b3c27fSSerge Semin 
63727e7ed01SRob Herring 	return pci->dbi_base + where;
63827e7ed01SRob Herring }
63927e7ed01SRob Herring EXPORT_SYMBOL_GPL(dw_pcie_own_conf_map_bus);
64027e7ed01SRob Herring 
64127e7ed01SRob Herring static struct pci_ops dw_pcie_ops = {
64227e7ed01SRob Herring 	.map_bus = dw_pcie_own_conf_map_bus,
64327e7ed01SRob Herring 	.read = pci_generic_config_read,
64427e7ed01SRob Herring 	.write = pci_generic_config_write,
64527e7ed01SRob Herring };
6466e0832faSShawn Lin 
dw_pcie_iatu_setup(struct dw_pcie_rp * pp)647c2b0c098SRob Herring static int dw_pcie_iatu_setup(struct dw_pcie_rp *pp)
648c2b0c098SRob Herring {
649c2b0c098SRob Herring 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
6506e0832faSShawn Lin 	struct resource_entry *entry;
6516e0832faSShawn Lin 	int i, ret;
652ce06bf57SSerge Semin 
6536e0832faSShawn Lin 	/* Note the very first outbound ATU is used for CFG IOs */
6546e0832faSShawn Lin 	if (!pci->num_ob_windows) {
655ce06bf57SSerge Semin 		dev_err(pci->dev, "No outbound iATU found\n");
656ce06bf57SSerge Semin 		return -EINVAL;
657ce06bf57SSerge Semin 	}
658ce06bf57SSerge Semin 
659ce06bf57SSerge Semin 	/*
660ce06bf57SSerge Semin 	 * Ensure all out/inbound windows are disabled before proceeding with
661ce06bf57SSerge Semin 	 * the MEM/IO (dma-)ranges setups.
662ce06bf57SSerge Semin 	 */
663ce06bf57SSerge Semin 	for (i = 0; i < pci->num_ob_windows; i++)
664ce06bf57SSerge Semin 		dw_pcie_disable_atu(pci, PCIE_ATU_REGION_DIR_OB, i);
6658522e17dSSerge Semin 
6668522e17dSSerge Semin 	for (i = 0; i < pci->num_ib_windows; i++)
667ce06bf57SSerge Semin 		dw_pcie_disable_atu(pci, PCIE_ATU_REGION_DIR_IB, i);
668ce06bf57SSerge Semin 
669ce06bf57SSerge Semin 	i = 0;
670ce06bf57SSerge Semin 	resource_list_for_each_entry(entry, &pp->bridge->windows) {
6718522e17dSSerge Semin 		if (resource_type(entry->res) != IORESOURCE_MEM)
6728522e17dSSerge Semin 			continue;
6738522e17dSSerge Semin 
674ce06bf57SSerge Semin 		if (pci->num_ob_windows <= ++i)
675ce06bf57SSerge Semin 			break;
676ce06bf57SSerge Semin 
677ce06bf57SSerge Semin 		ret = dw_pcie_prog_outbound_atu(pci, i, PCIE_ATU_TYPE_MEM,
678ce06bf57SSerge Semin 						entry->res->start,
679ce06bf57SSerge Semin 						entry->res->start - entry->offset,
680ce06bf57SSerge Semin 						resource_size(entry->res));
681ce06bf57SSerge Semin 		if (ret) {
682ce06bf57SSerge Semin 			dev_err(pci->dev, "Failed to set MEM range %pr\n",
683ce06bf57SSerge Semin 				entry->res);
684ce06bf57SSerge Semin 			return ret;
685ce06bf57SSerge Semin 		}
686ce06bf57SSerge Semin 	}
687ce06bf57SSerge Semin 
688ce06bf57SSerge Semin 	if (pp->io_size) {
689ce06bf57SSerge Semin 		if (pci->num_ob_windows > ++i) {
690ce06bf57SSerge Semin 			ret = dw_pcie_prog_outbound_atu(pci, i, PCIE_ATU_TYPE_IO,
691ce06bf57SSerge Semin 							pp->io_base,
692ce06bf57SSerge Semin 							pp->io_bus_addr,
693ce06bf57SSerge Semin 							pp->io_size);
694ce06bf57SSerge Semin 			if (ret) {
695ce06bf57SSerge Semin 				dev_err(pci->dev, "Failed to set IO range %pr\n",
696ce06bf57SSerge Semin 					entry->res);
697ce06bf57SSerge Semin 				return ret;
698ce06bf57SSerge Semin 			}
699ce06bf57SSerge Semin 		} else {
700ce06bf57SSerge Semin 			pp->cfg0_io_shared = true;
701ce06bf57SSerge Semin 		}
702ce06bf57SSerge Semin 	}
703ce06bf57SSerge Semin 
704ce06bf57SSerge Semin 	if (pci->num_ob_windows <= i)
705ce06bf57SSerge Semin 		dev_warn(pci->dev, "Ranges exceed outbound iATU size (%d)\n",
706ce06bf57SSerge Semin 			 pci->num_ob_windows);
707ce06bf57SSerge Semin 
708ce06bf57SSerge Semin 	i = 0;
709ce06bf57SSerge Semin 	resource_list_for_each_entry(entry, &pp->bridge->dma_ranges) {
7108522e17dSSerge Semin 		if (resource_type(entry->res) != IORESOURCE_MEM)
711ce06bf57SSerge Semin 			continue;
712ce06bf57SSerge Semin 
7138522e17dSSerge Semin 		if (pci->num_ib_windows <= i)
7148522e17dSSerge Semin 			break;
7158522e17dSSerge Semin 
7168522e17dSSerge Semin 		ret = dw_pcie_prog_inbound_atu(pci, i++, PCIE_ATU_TYPE_MEM,
7178522e17dSSerge Semin 					       entry->res->start,
7188522e17dSSerge Semin 					       entry->res->start - entry->offset,
7198522e17dSSerge Semin 					       resource_size(entry->res));
7208522e17dSSerge Semin 		if (ret) {
7218522e17dSSerge Semin 			dev_err(pci->dev, "Failed to set DMA range %pr\n",
7228522e17dSSerge Semin 				entry->res);
7238522e17dSSerge Semin 			return ret;
7248522e17dSSerge Semin 		}
7258522e17dSSerge Semin 	}
7268522e17dSSerge Semin 
7278522e17dSSerge Semin 	if (pci->num_ib_windows <= i)
7288522e17dSSerge Semin 		dev_warn(pci->dev, "Dma-ranges exceed inbound iATU size (%u)\n",
7298522e17dSSerge Semin 			 pci->num_ib_windows);
7308522e17dSSerge Semin 
7318522e17dSSerge Semin 	return 0;
7328522e17dSSerge Semin }
7338522e17dSSerge Semin 
dw_pcie_setup_rc(struct dw_pcie_rp * pp)7348522e17dSSerge Semin int dw_pcie_setup_rc(struct dw_pcie_rp *pp)
7358522e17dSSerge Semin {
736ce06bf57SSerge Semin 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
737ce06bf57SSerge Semin 	u32 val, ctrl, num_ctrls;
738ce06bf57SSerge Semin 	int ret;
739ce06bf57SSerge Semin 
740ce06bf57SSerge Semin 	/*
741ce06bf57SSerge Semin 	 * Enable DBI read-only registers for writing/updating configuration.
742ce06bf57SSerge Semin 	 * Write permission gets disabled towards the end of this function.
743ce06bf57SSerge Semin 	 */
7446e0832faSShawn Lin 	dw_pcie_dbi_ro_wr_en(pci);
7453924bc2fSVidya Sagar 
7463924bc2fSVidya Sagar 	dw_pcie_setup(pci);
7473924bc2fSVidya Sagar 
7483924bc2fSVidya Sagar 	if (pp->has_msi_ctrl) {
7493924bc2fSVidya Sagar 		num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
7503924bc2fSVidya Sagar 
7516e0832faSShawn Lin 		/* Initialize IRQ Status array */
7526e0832faSShawn Lin 		for (ctrl = 0; ctrl < num_ctrls; ctrl++) {
753f78f0263SRob Herring 			dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK +
7546e0832faSShawn Lin 					    (ctrl * MSI_REG_CTRL_BLOCK_SIZE),
7556e0832faSShawn Lin 					    pp->irq_mask[ctrl]);
7566e0832faSShawn Lin 			dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_ENABLE +
757830920e0SMarc Zyngier 					    (ctrl * MSI_REG_CTRL_BLOCK_SIZE),
758f81c770dSRob Herring 					    ~0);
7596e0832faSShawn Lin 		}
760f81c770dSRob Herring 	}
761f81c770dSRob Herring 
762830920e0SMarc Zyngier 	dw_pcie_msi_init(pp);
763f81c770dSRob Herring 
764830920e0SMarc Zyngier 	/* Setup RC BARs */
765fd8a44bdSKishon Vijay Abraham I 	dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0x00000004);
7666e0832faSShawn Lin 	dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0x00000000);
767294353d9SJisheng Zhang 
768294353d9SJisheng Zhang 	/* Setup interrupt pins */
7696e0832faSShawn Lin 	val = dw_pcie_readl_dbi(pci, PCI_INTERRUPT_LINE);
7706e0832faSShawn Lin 	val &= 0xffff00ff;
7716e0832faSShawn Lin 	val |= 0x00000100;
7726e0832faSShawn Lin 	dw_pcie_writel_dbi(pci, PCI_INTERRUPT_LINE, val);
7736e0832faSShawn Lin 
7746e0832faSShawn Lin 	/* Setup bus numbers */
7756e0832faSShawn Lin 	val = dw_pcie_readl_dbi(pci, PCI_PRIMARY_BUS);
7766e0832faSShawn Lin 	val &= 0xff000000;
7776e0832faSShawn Lin 	val |= 0x00ff0100;
7786e0832faSShawn Lin 	dw_pcie_writel_dbi(pci, PCI_PRIMARY_BUS, val);
7796e0832faSShawn Lin 
7806e0832faSShawn Lin 	/* Setup command register */
7816e0832faSShawn Lin 	val = dw_pcie_readl_dbi(pci, PCI_COMMAND);
7826e0832faSShawn Lin 	val &= 0xffff0000;
7836e0832faSShawn Lin 	val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
7846e0832faSShawn Lin 		PCI_COMMAND_MASTER | PCI_COMMAND_SERR;
7856e0832faSShawn Lin 	dw_pcie_writel_dbi(pci, PCI_COMMAND, val);
7866e0832faSShawn Lin 
7876e0832faSShawn Lin 	/*
7886e0832faSShawn Lin 	 * If the platform provides its own child bus config accesses, it means
7896e0832faSShawn Lin 	 * the platform uses its own address translation component rather than
7906e0832faSShawn Lin 	 * ATU, so we should not program the ATU here.
7916e0832faSShawn Lin 	 */
7926e0832faSShawn Lin 	if (pp->bridge->child_ops == &dw_child_pcie_ops) {
793444ddca5SRob Herring 		ret = dw_pcie_iatu_setup(pp);
794444ddca5SRob Herring 		if (ret)
795444ddca5SRob Herring 			return ret;
7966e0832faSShawn Lin 	}
797c2b0c098SRob Herring 
798ce06bf57SSerge Semin 	dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0);
799ce06bf57SSerge Semin 
800ce06bf57SSerge Semin 	/* Program correct class for RC */
8016e0832faSShawn Lin 	dw_pcie_writew_dbi(pci, PCI_CLASS_DEVICE, PCI_CLASS_BRIDGE_PCI);
8026e0832faSShawn Lin 
803f81c770dSRob Herring 	val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
8046e0832faSShawn Lin 	val |= PORT_LOGIC_SPEED_CHANGE;
8056e0832faSShawn Lin 	dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
806f81c770dSRob Herring 
8076e0832faSShawn Lin 	dw_pcie_dbi_ro_wr_dis(pci);
808f81c770dSRob Herring 
8096e0832faSShawn Lin 	return 0;
810f81c770dSRob Herring }
8113924bc2fSVidya Sagar EXPORT_SYMBOL_GPL(dw_pcie_setup_rc);
8123924bc2fSVidya Sagar 
dw_pcie_suspend_noirq(struct dw_pcie * pci)813ce06bf57SSerge Semin int dw_pcie_suspend_noirq(struct dw_pcie *pci)
814ce06bf57SSerge Semin {
8156e0832faSShawn Lin 	u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
816ca98329dSVidya Sagar 	u32 val;
817*4774faf8SFrank Li 	int ret;
818*4774faf8SFrank Li 
819*4774faf8SFrank Li 	/*
820*4774faf8SFrank Li 	 * If L1SS is supported, then do not put the link into L2 as some
821*4774faf8SFrank Li 	 * devices such as NVMe expect low resume latency.
822*4774faf8SFrank Li 	 */
823*4774faf8SFrank Li 	if (dw_pcie_readw_dbi(pci, offset + PCI_EXP_LNKCTL) & PCI_EXP_LNKCTL_ASPM_L1)
824*4774faf8SFrank Li 		return 0;
825*4774faf8SFrank Li 
826*4774faf8SFrank Li 	if (dw_pcie_get_ltssm(pci) <= DW_PCIE_LTSSM_DETECT_ACT)
827*4774faf8SFrank Li 		return 0;
828*4774faf8SFrank Li 
829*4774faf8SFrank Li 	if (!pci->pp.ops->pme_turn_off)
830*4774faf8SFrank Li 		return 0;
831*4774faf8SFrank Li 
832*4774faf8SFrank Li 	pci->pp.ops->pme_turn_off(&pci->pp);
833*4774faf8SFrank Li 
834*4774faf8SFrank Li 	ret = read_poll_timeout(dw_pcie_get_ltssm, val, val == DW_PCIE_LTSSM_L2_IDLE,
835*4774faf8SFrank Li 				PCIE_PME_TO_L2_TIMEOUT_US/10,
836*4774faf8SFrank Li 				PCIE_PME_TO_L2_TIMEOUT_US, false, pci);
837*4774faf8SFrank Li 	if (ret) {
838*4774faf8SFrank Li 		dev_err(pci->dev, "Timeout waiting for L2 entry! LTSSM: 0x%x\n", val);
839*4774faf8SFrank Li 		return ret;
840*4774faf8SFrank Li 	}
841*4774faf8SFrank Li 
842*4774faf8SFrank Li 	if (pci->pp.ops->host_deinit)
843*4774faf8SFrank Li 		pci->pp.ops->host_deinit(&pci->pp);
844*4774faf8SFrank Li 
845*4774faf8SFrank Li 	pci->suspended = true;
846*4774faf8SFrank Li 
847*4774faf8SFrank Li 	return ret;
848*4774faf8SFrank Li }
849*4774faf8SFrank Li EXPORT_SYMBOL_GPL(dw_pcie_suspend_noirq);
850*4774faf8SFrank Li 
dw_pcie_resume_noirq(struct dw_pcie * pci)851*4774faf8SFrank Li int dw_pcie_resume_noirq(struct dw_pcie *pci)
852*4774faf8SFrank Li {
853*4774faf8SFrank Li 	int ret;
854*4774faf8SFrank Li 
855*4774faf8SFrank Li 	if (!pci->suspended)
856*4774faf8SFrank Li 		return 0;
857*4774faf8SFrank Li 
858*4774faf8SFrank Li 	pci->suspended = false;
859*4774faf8SFrank Li 
860*4774faf8SFrank Li 	if (pci->pp.ops->host_init) {
861*4774faf8SFrank Li 		ret = pci->pp.ops->host_init(&pci->pp);
862*4774faf8SFrank Li 		if (ret) {
863*4774faf8SFrank Li 			dev_err(pci->dev, "Host init failed: %d\n", ret);
864*4774faf8SFrank Li 			return ret;
865*4774faf8SFrank Li 		}
866*4774faf8SFrank Li 	}
867*4774faf8SFrank Li 
868*4774faf8SFrank Li 	dw_pcie_setup_rc(&pci->pp);
869*4774faf8SFrank Li 
870*4774faf8SFrank Li 	ret = dw_pcie_start_link(pci);
871*4774faf8SFrank Li 	if (ret)
872*4774faf8SFrank Li 		return ret;
873*4774faf8SFrank Li 
874*4774faf8SFrank Li 	ret = dw_pcie_wait_for_link(pci);
875*4774faf8SFrank Li 	if (ret)
876*4774faf8SFrank Li 		return ret;
877*4774faf8SFrank Li 
878*4774faf8SFrank Li 	return ret;
879*4774faf8SFrank Li }
880*4774faf8SFrank Li EXPORT_SYMBOL_GPL(dw_pcie_resume_noirq);
881*4774faf8SFrank Li