xref: /openbmc/linux/drivers/pci/controller/dwc/pcie-designware-host.c (revision c900529f3d9161bfde5cca0754f83b4d3c3e0220)
1  // SPDX-License-Identifier: GPL-2.0
2  /*
3   * Synopsys DesignWare PCIe host controller driver
4   *
5   * Copyright (C) 2013 Samsung Electronics Co., Ltd.
6   *		https://www.samsung.com
7   *
8   * Author: Jingoo Han <jg1.han@samsung.com>
9   */
10  
11  #include <linux/iopoll.h>
12  #include <linux/irqchip/chained_irq.h>
13  #include <linux/irqdomain.h>
14  #include <linux/msi.h>
15  #include <linux/of_address.h>
16  #include <linux/of_pci.h>
17  #include <linux/pci_regs.h>
18  #include <linux/platform_device.h>
19  
20  #include "../../pci.h"
21  #include "pcie-designware.h"
22  
23  static struct pci_ops dw_pcie_ops;
24  static struct pci_ops dw_child_pcie_ops;
25  
dw_msi_ack_irq(struct irq_data * d)26  static void dw_msi_ack_irq(struct irq_data *d)
27  {
28  	irq_chip_ack_parent(d);
29  }
30  
dw_msi_mask_irq(struct irq_data * d)31  static void dw_msi_mask_irq(struct irq_data *d)
32  {
33  	pci_msi_mask_irq(d);
34  	irq_chip_mask_parent(d);
35  }
36  
dw_msi_unmask_irq(struct irq_data * d)37  static void dw_msi_unmask_irq(struct irq_data *d)
38  {
39  	pci_msi_unmask_irq(d);
40  	irq_chip_unmask_parent(d);
41  }
42  
43  static struct irq_chip dw_pcie_msi_irq_chip = {
44  	.name = "PCI-MSI",
45  	.irq_ack = dw_msi_ack_irq,
46  	.irq_mask = dw_msi_mask_irq,
47  	.irq_unmask = dw_msi_unmask_irq,
48  };
49  
50  static struct msi_domain_info dw_pcie_msi_domain_info = {
51  	.flags	= (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
52  		   MSI_FLAG_PCI_MSIX | MSI_FLAG_MULTI_PCI_MSI),
53  	.chip	= &dw_pcie_msi_irq_chip,
54  };
55  
56  /* MSI int handler */
dw_handle_msi_irq(struct dw_pcie_rp * pp)57  irqreturn_t dw_handle_msi_irq(struct dw_pcie_rp *pp)
58  {
59  	int i, pos;
60  	unsigned long val;
61  	u32 status, num_ctrls;
62  	irqreturn_t ret = IRQ_NONE;
63  	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
64  
65  	num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
66  
67  	for (i = 0; i < num_ctrls; i++) {
68  		status = dw_pcie_readl_dbi(pci, PCIE_MSI_INTR0_STATUS +
69  					   (i * MSI_REG_CTRL_BLOCK_SIZE));
70  		if (!status)
71  			continue;
72  
73  		ret = IRQ_HANDLED;
74  		val = status;
75  		pos = 0;
76  		while ((pos = find_next_bit(&val, MAX_MSI_IRQS_PER_CTRL,
77  					    pos)) != MAX_MSI_IRQS_PER_CTRL) {
78  			generic_handle_domain_irq(pp->irq_domain,
79  						  (i * MAX_MSI_IRQS_PER_CTRL) +
80  						  pos);
81  			pos++;
82  		}
83  	}
84  
85  	return ret;
86  }
87  
88  /* Chained MSI interrupt service routine */
dw_chained_msi_isr(struct irq_desc * desc)89  static void dw_chained_msi_isr(struct irq_desc *desc)
90  {
91  	struct irq_chip *chip = irq_desc_get_chip(desc);
92  	struct dw_pcie_rp *pp;
93  
94  	chained_irq_enter(chip, desc);
95  
96  	pp = irq_desc_get_handler_data(desc);
97  	dw_handle_msi_irq(pp);
98  
99  	chained_irq_exit(chip, desc);
100  }
101  
dw_pci_setup_msi_msg(struct irq_data * d,struct msi_msg * msg)102  static void dw_pci_setup_msi_msg(struct irq_data *d, struct msi_msg *msg)
103  {
104  	struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);
105  	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
106  	u64 msi_target;
107  
108  	msi_target = (u64)pp->msi_data;
109  
110  	msg->address_lo = lower_32_bits(msi_target);
111  	msg->address_hi = upper_32_bits(msi_target);
112  
113  	msg->data = d->hwirq;
114  
115  	dev_dbg(pci->dev, "msi#%d address_hi %#x address_lo %#x\n",
116  		(int)d->hwirq, msg->address_hi, msg->address_lo);
117  }
118  
dw_pci_msi_set_affinity(struct irq_data * d,const struct cpumask * mask,bool force)119  static int dw_pci_msi_set_affinity(struct irq_data *d,
120  				   const struct cpumask *mask, bool force)
121  {
122  	return -EINVAL;
123  }
124  
dw_pci_bottom_mask(struct irq_data * d)125  static void dw_pci_bottom_mask(struct irq_data *d)
126  {
127  	struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);
128  	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
129  	unsigned int res, bit, ctrl;
130  	unsigned long flags;
131  
132  	raw_spin_lock_irqsave(&pp->lock, flags);
133  
134  	ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
135  	res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
136  	bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
137  
138  	pp->irq_mask[ctrl] |= BIT(bit);
139  	dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + res, pp->irq_mask[ctrl]);
140  
141  	raw_spin_unlock_irqrestore(&pp->lock, flags);
142  }
143  
dw_pci_bottom_unmask(struct irq_data * d)144  static void dw_pci_bottom_unmask(struct irq_data *d)
145  {
146  	struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);
147  	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
148  	unsigned int res, bit, ctrl;
149  	unsigned long flags;
150  
151  	raw_spin_lock_irqsave(&pp->lock, flags);
152  
153  	ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
154  	res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
155  	bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
156  
157  	pp->irq_mask[ctrl] &= ~BIT(bit);
158  	dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + res, pp->irq_mask[ctrl]);
159  
160  	raw_spin_unlock_irqrestore(&pp->lock, flags);
161  }
162  
dw_pci_bottom_ack(struct irq_data * d)163  static void dw_pci_bottom_ack(struct irq_data *d)
164  {
165  	struct dw_pcie_rp *pp  = irq_data_get_irq_chip_data(d);
166  	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
167  	unsigned int res, bit, ctrl;
168  
169  	ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
170  	res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
171  	bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
172  
173  	dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_STATUS + res, BIT(bit));
174  }
175  
176  static struct irq_chip dw_pci_msi_bottom_irq_chip = {
177  	.name = "DWPCI-MSI",
178  	.irq_ack = dw_pci_bottom_ack,
179  	.irq_compose_msi_msg = dw_pci_setup_msi_msg,
180  	.irq_set_affinity = dw_pci_msi_set_affinity,
181  	.irq_mask = dw_pci_bottom_mask,
182  	.irq_unmask = dw_pci_bottom_unmask,
183  };
184  
dw_pcie_irq_domain_alloc(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs,void * args)185  static int dw_pcie_irq_domain_alloc(struct irq_domain *domain,
186  				    unsigned int virq, unsigned int nr_irqs,
187  				    void *args)
188  {
189  	struct dw_pcie_rp *pp = domain->host_data;
190  	unsigned long flags;
191  	u32 i;
192  	int bit;
193  
194  	raw_spin_lock_irqsave(&pp->lock, flags);
195  
196  	bit = bitmap_find_free_region(pp->msi_irq_in_use, pp->num_vectors,
197  				      order_base_2(nr_irqs));
198  
199  	raw_spin_unlock_irqrestore(&pp->lock, flags);
200  
201  	if (bit < 0)
202  		return -ENOSPC;
203  
204  	for (i = 0; i < nr_irqs; i++)
205  		irq_domain_set_info(domain, virq + i, bit + i,
206  				    pp->msi_irq_chip,
207  				    pp, handle_edge_irq,
208  				    NULL, NULL);
209  
210  	return 0;
211  }
212  
dw_pcie_irq_domain_free(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs)213  static void dw_pcie_irq_domain_free(struct irq_domain *domain,
214  				    unsigned int virq, unsigned int nr_irqs)
215  {
216  	struct irq_data *d = irq_domain_get_irq_data(domain, virq);
217  	struct dw_pcie_rp *pp = domain->host_data;
218  	unsigned long flags;
219  
220  	raw_spin_lock_irqsave(&pp->lock, flags);
221  
222  	bitmap_release_region(pp->msi_irq_in_use, d->hwirq,
223  			      order_base_2(nr_irqs));
224  
225  	raw_spin_unlock_irqrestore(&pp->lock, flags);
226  }
227  
228  static const struct irq_domain_ops dw_pcie_msi_domain_ops = {
229  	.alloc	= dw_pcie_irq_domain_alloc,
230  	.free	= dw_pcie_irq_domain_free,
231  };
232  
dw_pcie_allocate_domains(struct dw_pcie_rp * pp)233  int dw_pcie_allocate_domains(struct dw_pcie_rp *pp)
234  {
235  	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
236  	struct fwnode_handle *fwnode = of_node_to_fwnode(pci->dev->of_node);
237  
238  	pp->irq_domain = irq_domain_create_linear(fwnode, pp->num_vectors,
239  					       &dw_pcie_msi_domain_ops, pp);
240  	if (!pp->irq_domain) {
241  		dev_err(pci->dev, "Failed to create IRQ domain\n");
242  		return -ENOMEM;
243  	}
244  
245  	irq_domain_update_bus_token(pp->irq_domain, DOMAIN_BUS_NEXUS);
246  
247  	pp->msi_domain = pci_msi_create_irq_domain(fwnode,
248  						   &dw_pcie_msi_domain_info,
249  						   pp->irq_domain);
250  	if (!pp->msi_domain) {
251  		dev_err(pci->dev, "Failed to create MSI domain\n");
252  		irq_domain_remove(pp->irq_domain);
253  		return -ENOMEM;
254  	}
255  
256  	return 0;
257  }
258  
dw_pcie_free_msi(struct dw_pcie_rp * pp)259  static void dw_pcie_free_msi(struct dw_pcie_rp *pp)
260  {
261  	u32 ctrl;
262  
263  	for (ctrl = 0; ctrl < MAX_MSI_CTRLS; ctrl++) {
264  		if (pp->msi_irq[ctrl] > 0)
265  			irq_set_chained_handler_and_data(pp->msi_irq[ctrl],
266  							 NULL, NULL);
267  	}
268  
269  	irq_domain_remove(pp->msi_domain);
270  	irq_domain_remove(pp->irq_domain);
271  }
272  
dw_pcie_msi_init(struct dw_pcie_rp * pp)273  static void dw_pcie_msi_init(struct dw_pcie_rp *pp)
274  {
275  	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
276  	u64 msi_target = (u64)pp->msi_data;
277  
278  	if (!pci_msi_enabled() || !pp->has_msi_ctrl)
279  		return;
280  
281  	/* Program the msi_data */
282  	dw_pcie_writel_dbi(pci, PCIE_MSI_ADDR_LO, lower_32_bits(msi_target));
283  	dw_pcie_writel_dbi(pci, PCIE_MSI_ADDR_HI, upper_32_bits(msi_target));
284  }
285  
dw_pcie_parse_split_msi_irq(struct dw_pcie_rp * pp)286  static int dw_pcie_parse_split_msi_irq(struct dw_pcie_rp *pp)
287  {
288  	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
289  	struct device *dev = pci->dev;
290  	struct platform_device *pdev = to_platform_device(dev);
291  	u32 ctrl, max_vectors;
292  	int irq;
293  
294  	/* Parse any "msiX" IRQs described in the devicetree */
295  	for (ctrl = 0; ctrl < MAX_MSI_CTRLS; ctrl++) {
296  		char msi_name[] = "msiX";
297  
298  		msi_name[3] = '0' + ctrl;
299  		irq = platform_get_irq_byname_optional(pdev, msi_name);
300  		if (irq == -ENXIO)
301  			break;
302  		if (irq < 0)
303  			return dev_err_probe(dev, irq,
304  					     "Failed to parse MSI IRQ '%s'\n",
305  					     msi_name);
306  
307  		pp->msi_irq[ctrl] = irq;
308  	}
309  
310  	/* If no "msiX" IRQs, caller should fallback to "msi" IRQ */
311  	if (ctrl == 0)
312  		return -ENXIO;
313  
314  	max_vectors = ctrl * MAX_MSI_IRQS_PER_CTRL;
315  	if (pp->num_vectors > max_vectors) {
316  		dev_warn(dev, "Exceeding number of MSI vectors, limiting to %u\n",
317  			 max_vectors);
318  		pp->num_vectors = max_vectors;
319  	}
320  	if (!pp->num_vectors)
321  		pp->num_vectors = max_vectors;
322  
323  	return 0;
324  }
325  
dw_pcie_msi_host_init(struct dw_pcie_rp * pp)326  static int dw_pcie_msi_host_init(struct dw_pcie_rp *pp)
327  {
328  	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
329  	struct device *dev = pci->dev;
330  	struct platform_device *pdev = to_platform_device(dev);
331  	u64 *msi_vaddr;
332  	int ret;
333  	u32 ctrl, num_ctrls;
334  
335  	for (ctrl = 0; ctrl < MAX_MSI_CTRLS; ctrl++)
336  		pp->irq_mask[ctrl] = ~0;
337  
338  	if (!pp->msi_irq[0]) {
339  		ret = dw_pcie_parse_split_msi_irq(pp);
340  		if (ret < 0 && ret != -ENXIO)
341  			return ret;
342  	}
343  
344  	if (!pp->num_vectors)
345  		pp->num_vectors = MSI_DEF_NUM_VECTORS;
346  	num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
347  
348  	if (!pp->msi_irq[0]) {
349  		pp->msi_irq[0] = platform_get_irq_byname_optional(pdev, "msi");
350  		if (pp->msi_irq[0] < 0) {
351  			pp->msi_irq[0] = platform_get_irq(pdev, 0);
352  			if (pp->msi_irq[0] < 0)
353  				return pp->msi_irq[0];
354  		}
355  	}
356  
357  	dev_dbg(dev, "Using %d MSI vectors\n", pp->num_vectors);
358  
359  	pp->msi_irq_chip = &dw_pci_msi_bottom_irq_chip;
360  
361  	ret = dw_pcie_allocate_domains(pp);
362  	if (ret)
363  		return ret;
364  
365  	for (ctrl = 0; ctrl < num_ctrls; ctrl++) {
366  		if (pp->msi_irq[ctrl] > 0)
367  			irq_set_chained_handler_and_data(pp->msi_irq[ctrl],
368  						    dw_chained_msi_isr, pp);
369  	}
370  
371  	/*
372  	 * Even though the iMSI-RX Module supports 64-bit addresses some
373  	 * peripheral PCIe devices may lack 64-bit message support. In
374  	 * order not to miss MSI TLPs from those devices the MSI target
375  	 * address has to be within the lowest 4GB.
376  	 *
377  	 * Note until there is a better alternative found the reservation is
378  	 * done by allocating from the artificially limited DMA-coherent
379  	 * memory.
380  	 */
381  	ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
382  	if (ret)
383  		dev_warn(dev, "Failed to set DMA mask to 32-bit. Devices with only 32-bit MSI support may not work properly\n");
384  
385  	msi_vaddr = dmam_alloc_coherent(dev, sizeof(u64), &pp->msi_data,
386  					GFP_KERNEL);
387  	if (!msi_vaddr) {
388  		dev_err(dev, "Failed to alloc and map MSI data\n");
389  		dw_pcie_free_msi(pp);
390  		return -ENOMEM;
391  	}
392  
393  	return 0;
394  }
395  
dw_pcie_host_init(struct dw_pcie_rp * pp)396  int dw_pcie_host_init(struct dw_pcie_rp *pp)
397  {
398  	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
399  	struct device *dev = pci->dev;
400  	struct device_node *np = dev->of_node;
401  	struct platform_device *pdev = to_platform_device(dev);
402  	struct resource_entry *win;
403  	struct pci_host_bridge *bridge;
404  	struct resource *res;
405  	int ret;
406  
407  	raw_spin_lock_init(&pp->lock);
408  
409  	ret = dw_pcie_get_resources(pci);
410  	if (ret)
411  		return ret;
412  
413  	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
414  	if (res) {
415  		pp->cfg0_size = resource_size(res);
416  		pp->cfg0_base = res->start;
417  
418  		pp->va_cfg0_base = devm_pci_remap_cfg_resource(dev, res);
419  		if (IS_ERR(pp->va_cfg0_base))
420  			return PTR_ERR(pp->va_cfg0_base);
421  	} else {
422  		dev_err(dev, "Missing *config* reg space\n");
423  		return -ENODEV;
424  	}
425  
426  	bridge = devm_pci_alloc_host_bridge(dev, 0);
427  	if (!bridge)
428  		return -ENOMEM;
429  
430  	pp->bridge = bridge;
431  
432  	/* Get the I/O range from DT */
433  	win = resource_list_first_type(&bridge->windows, IORESOURCE_IO);
434  	if (win) {
435  		pp->io_size = resource_size(win->res);
436  		pp->io_bus_addr = win->res->start - win->offset;
437  		pp->io_base = pci_pio_to_address(win->res->start);
438  	}
439  
440  	/* Set default bus ops */
441  	bridge->ops = &dw_pcie_ops;
442  	bridge->child_ops = &dw_child_pcie_ops;
443  
444  	if (pp->ops->host_init) {
445  		ret = pp->ops->host_init(pp);
446  		if (ret)
447  			return ret;
448  	}
449  
450  	if (pci_msi_enabled()) {
451  		pp->has_msi_ctrl = !(pp->ops->msi_host_init ||
452  				     of_property_read_bool(np, "msi-parent") ||
453  				     of_property_read_bool(np, "msi-map"));
454  
455  		/*
456  		 * For the has_msi_ctrl case the default assignment is handled
457  		 * in the dw_pcie_msi_host_init().
458  		 */
459  		if (!pp->has_msi_ctrl && !pp->num_vectors) {
460  			pp->num_vectors = MSI_DEF_NUM_VECTORS;
461  		} else if (pp->num_vectors > MAX_MSI_IRQS) {
462  			dev_err(dev, "Invalid number of vectors\n");
463  			ret = -EINVAL;
464  			goto err_deinit_host;
465  		}
466  
467  		if (pp->ops->msi_host_init) {
468  			ret = pp->ops->msi_host_init(pp);
469  			if (ret < 0)
470  				goto err_deinit_host;
471  		} else if (pp->has_msi_ctrl) {
472  			ret = dw_pcie_msi_host_init(pp);
473  			if (ret < 0)
474  				goto err_deinit_host;
475  		}
476  	}
477  
478  	dw_pcie_version_detect(pci);
479  
480  	dw_pcie_iatu_detect(pci);
481  
482  	ret = dw_pcie_edma_detect(pci);
483  	if (ret)
484  		goto err_free_msi;
485  
486  	ret = dw_pcie_setup_rc(pp);
487  	if (ret)
488  		goto err_remove_edma;
489  
490  	if (!dw_pcie_link_up(pci)) {
491  		ret = dw_pcie_start_link(pci);
492  		if (ret)
493  			goto err_remove_edma;
494  	}
495  
496  	/* Ignore errors, the link may come up later */
497  	dw_pcie_wait_for_link(pci);
498  
499  	bridge->sysdata = pp;
500  
501  	ret = pci_host_probe(bridge);
502  	if (ret)
503  		goto err_stop_link;
504  
505  	return 0;
506  
507  err_stop_link:
508  	dw_pcie_stop_link(pci);
509  
510  err_remove_edma:
511  	dw_pcie_edma_remove(pci);
512  
513  err_free_msi:
514  	if (pp->has_msi_ctrl)
515  		dw_pcie_free_msi(pp);
516  
517  err_deinit_host:
518  	if (pp->ops->host_deinit)
519  		pp->ops->host_deinit(pp);
520  
521  	return ret;
522  }
523  EXPORT_SYMBOL_GPL(dw_pcie_host_init);
524  
dw_pcie_host_deinit(struct dw_pcie_rp * pp)525  void dw_pcie_host_deinit(struct dw_pcie_rp *pp)
526  {
527  	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
528  
529  	pci_stop_root_bus(pp->bridge->bus);
530  	pci_remove_root_bus(pp->bridge->bus);
531  
532  	dw_pcie_stop_link(pci);
533  
534  	dw_pcie_edma_remove(pci);
535  
536  	if (pp->has_msi_ctrl)
537  		dw_pcie_free_msi(pp);
538  
539  	if (pp->ops->host_deinit)
540  		pp->ops->host_deinit(pp);
541  }
542  EXPORT_SYMBOL_GPL(dw_pcie_host_deinit);
543  
dw_pcie_other_conf_map_bus(struct pci_bus * bus,unsigned int devfn,int where)544  static void __iomem *dw_pcie_other_conf_map_bus(struct pci_bus *bus,
545  						unsigned int devfn, int where)
546  {
547  	struct dw_pcie_rp *pp = bus->sysdata;
548  	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
549  	int type, ret;
550  	u32 busdev;
551  
552  	/*
553  	 * Checking whether the link is up here is a last line of defense
554  	 * against platforms that forward errors on the system bus as
555  	 * SError upon PCI configuration transactions issued when the link
556  	 * is down. This check is racy by definition and does not stop
557  	 * the system from triggering an SError if the link goes down
558  	 * after this check is performed.
559  	 */
560  	if (!dw_pcie_link_up(pci))
561  		return NULL;
562  
563  	busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |
564  		 PCIE_ATU_FUNC(PCI_FUNC(devfn));
565  
566  	if (pci_is_root_bus(bus->parent))
567  		type = PCIE_ATU_TYPE_CFG0;
568  	else
569  		type = PCIE_ATU_TYPE_CFG1;
570  
571  	ret = dw_pcie_prog_outbound_atu(pci, 0, type, pp->cfg0_base, busdev,
572  					pp->cfg0_size);
573  	if (ret)
574  		return NULL;
575  
576  	return pp->va_cfg0_base + where;
577  }
578  
dw_pcie_rd_other_conf(struct pci_bus * bus,unsigned int devfn,int where,int size,u32 * val)579  static int dw_pcie_rd_other_conf(struct pci_bus *bus, unsigned int devfn,
580  				 int where, int size, u32 *val)
581  {
582  	struct dw_pcie_rp *pp = bus->sysdata;
583  	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
584  	int ret;
585  
586  	ret = pci_generic_config_read(bus, devfn, where, size, val);
587  	if (ret != PCIBIOS_SUCCESSFUL)
588  		return ret;
589  
590  	if (pp->cfg0_io_shared) {
591  		ret = dw_pcie_prog_outbound_atu(pci, 0, PCIE_ATU_TYPE_IO,
592  						pp->io_base, pp->io_bus_addr,
593  						pp->io_size);
594  		if (ret)
595  			return PCIBIOS_SET_FAILED;
596  	}
597  
598  	return PCIBIOS_SUCCESSFUL;
599  }
600  
dw_pcie_wr_other_conf(struct pci_bus * bus,unsigned int devfn,int where,int size,u32 val)601  static int dw_pcie_wr_other_conf(struct pci_bus *bus, unsigned int devfn,
602  				 int where, int size, u32 val)
603  {
604  	struct dw_pcie_rp *pp = bus->sysdata;
605  	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
606  	int ret;
607  
608  	ret = pci_generic_config_write(bus, devfn, where, size, val);
609  	if (ret != PCIBIOS_SUCCESSFUL)
610  		return ret;
611  
612  	if (pp->cfg0_io_shared) {
613  		ret = dw_pcie_prog_outbound_atu(pci, 0, PCIE_ATU_TYPE_IO,
614  						pp->io_base, pp->io_bus_addr,
615  						pp->io_size);
616  		if (ret)
617  			return PCIBIOS_SET_FAILED;
618  	}
619  
620  	return PCIBIOS_SUCCESSFUL;
621  }
622  
623  static struct pci_ops dw_child_pcie_ops = {
624  	.map_bus = dw_pcie_other_conf_map_bus,
625  	.read = dw_pcie_rd_other_conf,
626  	.write = dw_pcie_wr_other_conf,
627  };
628  
dw_pcie_own_conf_map_bus(struct pci_bus * bus,unsigned int devfn,int where)629  void __iomem *dw_pcie_own_conf_map_bus(struct pci_bus *bus, unsigned int devfn, int where)
630  {
631  	struct dw_pcie_rp *pp = bus->sysdata;
632  	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
633  
634  	if (PCI_SLOT(devfn) > 0)
635  		return NULL;
636  
637  	return pci->dbi_base + where;
638  }
639  EXPORT_SYMBOL_GPL(dw_pcie_own_conf_map_bus);
640  
641  static struct pci_ops dw_pcie_ops = {
642  	.map_bus = dw_pcie_own_conf_map_bus,
643  	.read = pci_generic_config_read,
644  	.write = pci_generic_config_write,
645  };
646  
dw_pcie_iatu_setup(struct dw_pcie_rp * pp)647  static int dw_pcie_iatu_setup(struct dw_pcie_rp *pp)
648  {
649  	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
650  	struct resource_entry *entry;
651  	int i, ret;
652  
653  	/* Note the very first outbound ATU is used for CFG IOs */
654  	if (!pci->num_ob_windows) {
655  		dev_err(pci->dev, "No outbound iATU found\n");
656  		return -EINVAL;
657  	}
658  
659  	/*
660  	 * Ensure all out/inbound windows are disabled before proceeding with
661  	 * the MEM/IO (dma-)ranges setups.
662  	 */
663  	for (i = 0; i < pci->num_ob_windows; i++)
664  		dw_pcie_disable_atu(pci, PCIE_ATU_REGION_DIR_OB, i);
665  
666  	for (i = 0; i < pci->num_ib_windows; i++)
667  		dw_pcie_disable_atu(pci, PCIE_ATU_REGION_DIR_IB, i);
668  
669  	i = 0;
670  	resource_list_for_each_entry(entry, &pp->bridge->windows) {
671  		if (resource_type(entry->res) != IORESOURCE_MEM)
672  			continue;
673  
674  		if (pci->num_ob_windows <= ++i)
675  			break;
676  
677  		ret = dw_pcie_prog_outbound_atu(pci, i, PCIE_ATU_TYPE_MEM,
678  						entry->res->start,
679  						entry->res->start - entry->offset,
680  						resource_size(entry->res));
681  		if (ret) {
682  			dev_err(pci->dev, "Failed to set MEM range %pr\n",
683  				entry->res);
684  			return ret;
685  		}
686  	}
687  
688  	if (pp->io_size) {
689  		if (pci->num_ob_windows > ++i) {
690  			ret = dw_pcie_prog_outbound_atu(pci, i, PCIE_ATU_TYPE_IO,
691  							pp->io_base,
692  							pp->io_bus_addr,
693  							pp->io_size);
694  			if (ret) {
695  				dev_err(pci->dev, "Failed to set IO range %pr\n",
696  					entry->res);
697  				return ret;
698  			}
699  		} else {
700  			pp->cfg0_io_shared = true;
701  		}
702  	}
703  
704  	if (pci->num_ob_windows <= i)
705  		dev_warn(pci->dev, "Ranges exceed outbound iATU size (%d)\n",
706  			 pci->num_ob_windows);
707  
708  	i = 0;
709  	resource_list_for_each_entry(entry, &pp->bridge->dma_ranges) {
710  		if (resource_type(entry->res) != IORESOURCE_MEM)
711  			continue;
712  
713  		if (pci->num_ib_windows <= i)
714  			break;
715  
716  		ret = dw_pcie_prog_inbound_atu(pci, i++, PCIE_ATU_TYPE_MEM,
717  					       entry->res->start,
718  					       entry->res->start - entry->offset,
719  					       resource_size(entry->res));
720  		if (ret) {
721  			dev_err(pci->dev, "Failed to set DMA range %pr\n",
722  				entry->res);
723  			return ret;
724  		}
725  	}
726  
727  	if (pci->num_ib_windows <= i)
728  		dev_warn(pci->dev, "Dma-ranges exceed inbound iATU size (%u)\n",
729  			 pci->num_ib_windows);
730  
731  	return 0;
732  }
733  
dw_pcie_setup_rc(struct dw_pcie_rp * pp)734  int dw_pcie_setup_rc(struct dw_pcie_rp *pp)
735  {
736  	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
737  	u32 val, ctrl, num_ctrls;
738  	int ret;
739  
740  	/*
741  	 * Enable DBI read-only registers for writing/updating configuration.
742  	 * Write permission gets disabled towards the end of this function.
743  	 */
744  	dw_pcie_dbi_ro_wr_en(pci);
745  
746  	dw_pcie_setup(pci);
747  
748  	if (pp->has_msi_ctrl) {
749  		num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
750  
751  		/* Initialize IRQ Status array */
752  		for (ctrl = 0; ctrl < num_ctrls; ctrl++) {
753  			dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK +
754  					    (ctrl * MSI_REG_CTRL_BLOCK_SIZE),
755  					    pp->irq_mask[ctrl]);
756  			dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_ENABLE +
757  					    (ctrl * MSI_REG_CTRL_BLOCK_SIZE),
758  					    ~0);
759  		}
760  	}
761  
762  	dw_pcie_msi_init(pp);
763  
764  	/* Setup RC BARs */
765  	dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0x00000004);
766  	dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0x00000000);
767  
768  	/* Setup interrupt pins */
769  	val = dw_pcie_readl_dbi(pci, PCI_INTERRUPT_LINE);
770  	val &= 0xffff00ff;
771  	val |= 0x00000100;
772  	dw_pcie_writel_dbi(pci, PCI_INTERRUPT_LINE, val);
773  
774  	/* Setup bus numbers */
775  	val = dw_pcie_readl_dbi(pci, PCI_PRIMARY_BUS);
776  	val &= 0xff000000;
777  	val |= 0x00ff0100;
778  	dw_pcie_writel_dbi(pci, PCI_PRIMARY_BUS, val);
779  
780  	/* Setup command register */
781  	val = dw_pcie_readl_dbi(pci, PCI_COMMAND);
782  	val &= 0xffff0000;
783  	val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
784  		PCI_COMMAND_MASTER | PCI_COMMAND_SERR;
785  	dw_pcie_writel_dbi(pci, PCI_COMMAND, val);
786  
787  	/*
788  	 * If the platform provides its own child bus config accesses, it means
789  	 * the platform uses its own address translation component rather than
790  	 * ATU, so we should not program the ATU here.
791  	 */
792  	if (pp->bridge->child_ops == &dw_child_pcie_ops) {
793  		ret = dw_pcie_iatu_setup(pp);
794  		if (ret)
795  			return ret;
796  	}
797  
798  	dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0);
799  
800  	/* Program correct class for RC */
801  	dw_pcie_writew_dbi(pci, PCI_CLASS_DEVICE, PCI_CLASS_BRIDGE_PCI);
802  
803  	val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
804  	val |= PORT_LOGIC_SPEED_CHANGE;
805  	dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
806  
807  	dw_pcie_dbi_ro_wr_dis(pci);
808  
809  	return 0;
810  }
811  EXPORT_SYMBOL_GPL(dw_pcie_setup_rc);
812  
dw_pcie_suspend_noirq(struct dw_pcie * pci)813  int dw_pcie_suspend_noirq(struct dw_pcie *pci)
814  {
815  	u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
816  	u32 val;
817  	int ret;
818  
819  	/*
820  	 * If L1SS is supported, then do not put the link into L2 as some
821  	 * devices such as NVMe expect low resume latency.
822  	 */
823  	if (dw_pcie_readw_dbi(pci, offset + PCI_EXP_LNKCTL) & PCI_EXP_LNKCTL_ASPM_L1)
824  		return 0;
825  
826  	if (dw_pcie_get_ltssm(pci) <= DW_PCIE_LTSSM_DETECT_ACT)
827  		return 0;
828  
829  	if (!pci->pp.ops->pme_turn_off)
830  		return 0;
831  
832  	pci->pp.ops->pme_turn_off(&pci->pp);
833  
834  	ret = read_poll_timeout(dw_pcie_get_ltssm, val, val == DW_PCIE_LTSSM_L2_IDLE,
835  				PCIE_PME_TO_L2_TIMEOUT_US/10,
836  				PCIE_PME_TO_L2_TIMEOUT_US, false, pci);
837  	if (ret) {
838  		dev_err(pci->dev, "Timeout waiting for L2 entry! LTSSM: 0x%x\n", val);
839  		return ret;
840  	}
841  
842  	if (pci->pp.ops->host_deinit)
843  		pci->pp.ops->host_deinit(&pci->pp);
844  
845  	pci->suspended = true;
846  
847  	return ret;
848  }
849  EXPORT_SYMBOL_GPL(dw_pcie_suspend_noirq);
850  
dw_pcie_resume_noirq(struct dw_pcie * pci)851  int dw_pcie_resume_noirq(struct dw_pcie *pci)
852  {
853  	int ret;
854  
855  	if (!pci->suspended)
856  		return 0;
857  
858  	pci->suspended = false;
859  
860  	if (pci->pp.ops->host_init) {
861  		ret = pci->pp.ops->host_init(&pci->pp);
862  		if (ret) {
863  			dev_err(pci->dev, "Host init failed: %d\n", ret);
864  			return ret;
865  		}
866  	}
867  
868  	dw_pcie_setup_rc(&pci->pp);
869  
870  	ret = dw_pcie_start_link(pci);
871  	if (ret)
872  		return ret;
873  
874  	ret = dw_pcie_wait_for_link(pci);
875  	if (ret)
876  		return ret;
877  
878  	return ret;
879  }
880  EXPORT_SYMBOL_GPL(dw_pcie_resume_noirq);
881