xref: /openbmc/linux/drivers/pci/controller/dwc/pci-keystone.c (revision ecc23d0a422a3118fcf6e4f0a46e17a6c2047b02)
1  // SPDX-License-Identifier: GPL-2.0
2  /*
3   * PCIe host controller driver for Texas Instruments Keystone SoCs
4   *
5   * Copyright (C) 2013-2014 Texas Instruments., Ltd.
6   *		https://www.ti.com
7   *
8   * Author: Murali Karicheri <m-karicheri2@ti.com>
9   * Implementation based on pci-exynos.c and pcie-designware.c
10   */
11  
12  #include <linux/clk.h>
13  #include <linux/delay.h>
14  #include <linux/gpio/consumer.h>
15  #include <linux/init.h>
16  #include <linux/interrupt.h>
17  #include <linux/irqchip/chained_irq.h>
18  #include <linux/irqdomain.h>
19  #include <linux/mfd/syscon.h>
20  #include <linux/msi.h>
21  #include <linux/of.h>
22  #include <linux/of_irq.h>
23  #include <linux/of_pci.h>
24  #include <linux/phy/phy.h>
25  #include <linux/platform_device.h>
26  #include <linux/regmap.h>
27  #include <linux/resource.h>
28  #include <linux/signal.h>
29  
30  #include "../../pci.h"
31  #include "pcie-designware.h"
32  
33  #define PCIE_VENDORID_MASK	0xffff
34  #define PCIE_DEVICEID_SHIFT	16
35  
36  /* Application registers */
37  #define PID				0x000
38  #define RTL				GENMASK(15, 11)
39  #define RTL_SHIFT			11
40  #define AM6_PCI_PG1_RTL_VER		0x15
41  
42  #define CMD_STATUS			0x004
43  #define LTSSM_EN_VAL		        BIT(0)
44  #define OB_XLAT_EN_VAL		        BIT(1)
45  #define DBI_CS2				BIT(5)
46  
47  #define CFG_SETUP			0x008
48  #define CFG_BUS(x)			(((x) & 0xff) << 16)
49  #define CFG_DEVICE(x)			(((x) & 0x1f) << 8)
50  #define CFG_FUNC(x)			((x) & 0x7)
51  #define CFG_TYPE1			BIT(24)
52  
53  #define OB_SIZE				0x030
54  #define OB_OFFSET_INDEX(n)		(0x200 + (8 * (n)))
55  #define OB_OFFSET_HI(n)			(0x204 + (8 * (n)))
56  #define OB_ENABLEN			BIT(0)
57  #define OB_WIN_SIZE			8	/* 8MB */
58  
59  #define PCIE_LEGACY_IRQ_ENABLE_SET(n)	(0x188 + (0x10 * ((n) - 1)))
60  #define PCIE_LEGACY_IRQ_ENABLE_CLR(n)	(0x18c + (0x10 * ((n) - 1)))
61  #define PCIE_EP_IRQ_SET			0x64
62  #define PCIE_EP_IRQ_CLR			0x68
63  #define INT_ENABLE			BIT(0)
64  
65  /* IRQ register defines */
66  #define IRQ_EOI				0x050
67  
68  #define MSI_IRQ				0x054
69  #define MSI_IRQ_STATUS(n)		(0x104 + ((n) << 4))
70  #define MSI_IRQ_ENABLE_SET(n)		(0x108 + ((n) << 4))
71  #define MSI_IRQ_ENABLE_CLR(n)		(0x10c + ((n) << 4))
72  #define MSI_IRQ_OFFSET			4
73  
74  #define IRQ_STATUS(n)			(0x184 + ((n) << 4))
75  #define IRQ_ENABLE_SET(n)		(0x188 + ((n) << 4))
76  #define INTx_EN				BIT(0)
77  
78  #define ERR_IRQ_STATUS			0x1c4
79  #define ERR_IRQ_ENABLE_SET		0x1c8
80  #define ERR_AER				BIT(5)	/* ECRC error */
81  #define AM6_ERR_AER			BIT(4)	/* AM6 ECRC error */
82  #define ERR_AXI				BIT(4)	/* AXI tag lookup fatal error */
83  #define ERR_CORR			BIT(3)	/* Correctable error */
84  #define ERR_NONFATAL			BIT(2)	/* Non-fatal error */
85  #define ERR_FATAL			BIT(1)	/* Fatal error */
86  #define ERR_SYS				BIT(0)	/* System error */
87  #define ERR_IRQ_ALL			(ERR_AER | ERR_AXI | ERR_CORR | \
88  					 ERR_NONFATAL | ERR_FATAL | ERR_SYS)
89  
90  /* PCIE controller device IDs */
91  #define PCIE_RC_K2HK			0xb008
92  #define PCIE_RC_K2E			0xb009
93  #define PCIE_RC_K2L			0xb00a
94  #define PCIE_RC_K2G			0xb00b
95  
96  #define KS_PCIE_DEV_TYPE_MASK		(0x3 << 1)
97  #define KS_PCIE_DEV_TYPE(mode)		((mode) << 1)
98  
99  #define EP				0x0
100  #define LEG_EP				0x1
101  #define RC				0x2
102  
103  #define KS_PCIE_SYSCLOCKOUTEN		BIT(0)
104  
105  #define AM654_PCIE_DEV_TYPE_MASK	0x3
106  #define AM654_WIN_SIZE			SZ_64K
107  
108  #define APP_ADDR_SPACE_0		(16 * SZ_1K)
109  
110  #define to_keystone_pcie(x)		dev_get_drvdata((x)->dev)
111  
112  #define PCI_DEVICE_ID_TI_AM654X		0xb00c
113  
114  struct ks_pcie_of_data {
115  	enum dw_pcie_device_mode mode;
116  	const struct dw_pcie_host_ops *host_ops;
117  	const struct dw_pcie_ep_ops *ep_ops;
118  	u32 version;
119  };
120  
121  struct keystone_pcie {
122  	struct dw_pcie		*pci;
123  	/* PCI Device ID */
124  	u32			device_id;
125  	int			legacy_host_irqs[PCI_NUM_INTX];
126  	struct			device_node *legacy_intc_np;
127  
128  	int			msi_host_irq;
129  	int			num_lanes;
130  	u32			num_viewport;
131  	struct phy		**phy;
132  	struct device_link	**link;
133  	struct			device_node *msi_intc_np;
134  	struct irq_domain	*legacy_irq_domain;
135  	struct device_node	*np;
136  
137  	/* Application register space */
138  	void __iomem		*va_app_base;	/* DT 1st resource */
139  	struct resource		app;
140  	bool			is_am6;
141  };
142  
ks_pcie_app_readl(struct keystone_pcie * ks_pcie,u32 offset)143  static u32 ks_pcie_app_readl(struct keystone_pcie *ks_pcie, u32 offset)
144  {
145  	return readl(ks_pcie->va_app_base + offset);
146  }
147  
ks_pcie_app_writel(struct keystone_pcie * ks_pcie,u32 offset,u32 val)148  static void ks_pcie_app_writel(struct keystone_pcie *ks_pcie, u32 offset,
149  			       u32 val)
150  {
151  	writel(val, ks_pcie->va_app_base + offset);
152  }
153  
ks_pcie_msi_irq_ack(struct irq_data * data)154  static void ks_pcie_msi_irq_ack(struct irq_data *data)
155  {
156  	struct dw_pcie_rp *pp  = irq_data_get_irq_chip_data(data);
157  	struct keystone_pcie *ks_pcie;
158  	u32 irq = data->hwirq;
159  	struct dw_pcie *pci;
160  	u32 reg_offset;
161  	u32 bit_pos;
162  
163  	pci = to_dw_pcie_from_pp(pp);
164  	ks_pcie = to_keystone_pcie(pci);
165  
166  	reg_offset = irq % 8;
167  	bit_pos = irq >> 3;
168  
169  	ks_pcie_app_writel(ks_pcie, MSI_IRQ_STATUS(reg_offset),
170  			   BIT(bit_pos));
171  	ks_pcie_app_writel(ks_pcie, IRQ_EOI, reg_offset + MSI_IRQ_OFFSET);
172  }
173  
ks_pcie_compose_msi_msg(struct irq_data * data,struct msi_msg * msg)174  static void ks_pcie_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
175  {
176  	struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(data);
177  	struct keystone_pcie *ks_pcie;
178  	struct dw_pcie *pci;
179  	u64 msi_target;
180  
181  	pci = to_dw_pcie_from_pp(pp);
182  	ks_pcie = to_keystone_pcie(pci);
183  
184  	msi_target = ks_pcie->app.start + MSI_IRQ;
185  	msg->address_lo = lower_32_bits(msi_target);
186  	msg->address_hi = upper_32_bits(msi_target);
187  	msg->data = data->hwirq;
188  
189  	dev_dbg(pci->dev, "msi#%d address_hi %#x address_lo %#x\n",
190  		(int)data->hwirq, msg->address_hi, msg->address_lo);
191  }
192  
ks_pcie_msi_set_affinity(struct irq_data * irq_data,const struct cpumask * mask,bool force)193  static int ks_pcie_msi_set_affinity(struct irq_data *irq_data,
194  				    const struct cpumask *mask, bool force)
195  {
196  	return -EINVAL;
197  }
198  
ks_pcie_msi_mask(struct irq_data * data)199  static void ks_pcie_msi_mask(struct irq_data *data)
200  {
201  	struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(data);
202  	struct keystone_pcie *ks_pcie;
203  	u32 irq = data->hwirq;
204  	struct dw_pcie *pci;
205  	unsigned long flags;
206  	u32 reg_offset;
207  	u32 bit_pos;
208  
209  	raw_spin_lock_irqsave(&pp->lock, flags);
210  
211  	pci = to_dw_pcie_from_pp(pp);
212  	ks_pcie = to_keystone_pcie(pci);
213  
214  	reg_offset = irq % 8;
215  	bit_pos = irq >> 3;
216  
217  	ks_pcie_app_writel(ks_pcie, MSI_IRQ_ENABLE_CLR(reg_offset),
218  			   BIT(bit_pos));
219  
220  	raw_spin_unlock_irqrestore(&pp->lock, flags);
221  }
222  
ks_pcie_msi_unmask(struct irq_data * data)223  static void ks_pcie_msi_unmask(struct irq_data *data)
224  {
225  	struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(data);
226  	struct keystone_pcie *ks_pcie;
227  	u32 irq = data->hwirq;
228  	struct dw_pcie *pci;
229  	unsigned long flags;
230  	u32 reg_offset;
231  	u32 bit_pos;
232  
233  	raw_spin_lock_irqsave(&pp->lock, flags);
234  
235  	pci = to_dw_pcie_from_pp(pp);
236  	ks_pcie = to_keystone_pcie(pci);
237  
238  	reg_offset = irq % 8;
239  	bit_pos = irq >> 3;
240  
241  	ks_pcie_app_writel(ks_pcie, MSI_IRQ_ENABLE_SET(reg_offset),
242  			   BIT(bit_pos));
243  
244  	raw_spin_unlock_irqrestore(&pp->lock, flags);
245  }
246  
247  static struct irq_chip ks_pcie_msi_irq_chip = {
248  	.name = "KEYSTONE-PCI-MSI",
249  	.irq_ack = ks_pcie_msi_irq_ack,
250  	.irq_compose_msi_msg = ks_pcie_compose_msi_msg,
251  	.irq_set_affinity = ks_pcie_msi_set_affinity,
252  	.irq_mask = ks_pcie_msi_mask,
253  	.irq_unmask = ks_pcie_msi_unmask,
254  };
255  
256  /**
257   * ks_pcie_set_dbi_mode() - Set DBI mode to access overlaid BAR mask registers
258   * @ks_pcie: A pointer to the keystone_pcie structure which holds the KeyStone
259   *	     PCIe host controller driver information.
260   *
261   * Since modification of dbi_cs2 involves different clock domain, read the
262   * status back to ensure the transition is complete.
263   */
ks_pcie_set_dbi_mode(struct keystone_pcie * ks_pcie)264  static void ks_pcie_set_dbi_mode(struct keystone_pcie *ks_pcie)
265  {
266  	u32 val;
267  
268  	val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
269  	val |= DBI_CS2;
270  	ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
271  
272  	do {
273  		val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
274  	} while (!(val & DBI_CS2));
275  }
276  
277  /**
278   * ks_pcie_clear_dbi_mode() - Disable DBI mode
279   * @ks_pcie: A pointer to the keystone_pcie structure which holds the KeyStone
280   *	     PCIe host controller driver information.
281   *
282   * Since modification of dbi_cs2 involves different clock domain, read the
283   * status back to ensure the transition is complete.
284   */
ks_pcie_clear_dbi_mode(struct keystone_pcie * ks_pcie)285  static void ks_pcie_clear_dbi_mode(struct keystone_pcie *ks_pcie)
286  {
287  	u32 val;
288  
289  	val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
290  	val &= ~DBI_CS2;
291  	ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
292  
293  	do {
294  		val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
295  	} while (val & DBI_CS2);
296  }
297  
ks_pcie_msi_host_init(struct dw_pcie_rp * pp)298  static int ks_pcie_msi_host_init(struct dw_pcie_rp *pp)
299  {
300  	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
301  	struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
302  
303  	/* Configure and set up BAR0 */
304  	ks_pcie_set_dbi_mode(ks_pcie);
305  
306  	/* Enable BAR0 */
307  	dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 1);
308  	dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, SZ_4K - 1);
309  
310  	ks_pcie_clear_dbi_mode(ks_pcie);
311  
312  	/*
313  	 * For BAR0, just setting bus address for inbound writes (MSI) should
314  	 * be sufficient.  Use physical address to avoid any conflicts.
315  	 */
316  	dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, ks_pcie->app.start);
317  
318  	pp->msi_irq_chip = &ks_pcie_msi_irq_chip;
319  	return dw_pcie_allocate_domains(pp);
320  }
321  
ks_pcie_handle_legacy_irq(struct keystone_pcie * ks_pcie,int offset)322  static void ks_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie,
323  				      int offset)
324  {
325  	struct dw_pcie *pci = ks_pcie->pci;
326  	struct device *dev = pci->dev;
327  	u32 pending;
328  
329  	pending = ks_pcie_app_readl(ks_pcie, IRQ_STATUS(offset));
330  
331  	if (BIT(0) & pending) {
332  		dev_dbg(dev, ": irq: irq_offset %d", offset);
333  		generic_handle_domain_irq(ks_pcie->legacy_irq_domain, offset);
334  	}
335  
336  	/* EOI the INTx interrupt */
337  	ks_pcie_app_writel(ks_pcie, IRQ_EOI, offset);
338  }
339  
ks_pcie_enable_error_irq(struct keystone_pcie * ks_pcie)340  static void ks_pcie_enable_error_irq(struct keystone_pcie *ks_pcie)
341  {
342  	ks_pcie_app_writel(ks_pcie, ERR_IRQ_ENABLE_SET, ERR_IRQ_ALL);
343  }
344  
ks_pcie_handle_error_irq(struct keystone_pcie * ks_pcie)345  static irqreturn_t ks_pcie_handle_error_irq(struct keystone_pcie *ks_pcie)
346  {
347  	u32 reg;
348  	struct device *dev = ks_pcie->pci->dev;
349  
350  	reg = ks_pcie_app_readl(ks_pcie, ERR_IRQ_STATUS);
351  	if (!reg)
352  		return IRQ_NONE;
353  
354  	if (reg & ERR_SYS)
355  		dev_err(dev, "System Error\n");
356  
357  	if (reg & ERR_FATAL)
358  		dev_err(dev, "Fatal Error\n");
359  
360  	if (reg & ERR_NONFATAL)
361  		dev_dbg(dev, "Non Fatal Error\n");
362  
363  	if (reg & ERR_CORR)
364  		dev_dbg(dev, "Correctable Error\n");
365  
366  	if (!ks_pcie->is_am6 && (reg & ERR_AXI))
367  		dev_err(dev, "AXI tag lookup fatal Error\n");
368  
369  	if (reg & ERR_AER || (ks_pcie->is_am6 && (reg & AM6_ERR_AER)))
370  		dev_err(dev, "ECRC Error\n");
371  
372  	ks_pcie_app_writel(ks_pcie, ERR_IRQ_STATUS, reg);
373  
374  	return IRQ_HANDLED;
375  }
376  
ks_pcie_ack_legacy_irq(struct irq_data * d)377  static void ks_pcie_ack_legacy_irq(struct irq_data *d)
378  {
379  }
380  
ks_pcie_mask_legacy_irq(struct irq_data * d)381  static void ks_pcie_mask_legacy_irq(struct irq_data *d)
382  {
383  }
384  
ks_pcie_unmask_legacy_irq(struct irq_data * d)385  static void ks_pcie_unmask_legacy_irq(struct irq_data *d)
386  {
387  }
388  
389  static struct irq_chip ks_pcie_legacy_irq_chip = {
390  	.name = "Keystone-PCI-Legacy-IRQ",
391  	.irq_ack = ks_pcie_ack_legacy_irq,
392  	.irq_mask = ks_pcie_mask_legacy_irq,
393  	.irq_unmask = ks_pcie_unmask_legacy_irq,
394  };
395  
ks_pcie_init_legacy_irq_map(struct irq_domain * d,unsigned int irq,irq_hw_number_t hw_irq)396  static int ks_pcie_init_legacy_irq_map(struct irq_domain *d,
397  				       unsigned int irq,
398  				       irq_hw_number_t hw_irq)
399  {
400  	irq_set_chip_and_handler(irq, &ks_pcie_legacy_irq_chip,
401  				 handle_level_irq);
402  	irq_set_chip_data(irq, d->host_data);
403  
404  	return 0;
405  }
406  
407  static const struct irq_domain_ops ks_pcie_legacy_irq_domain_ops = {
408  	.map = ks_pcie_init_legacy_irq_map,
409  	.xlate = irq_domain_xlate_onetwocell,
410  };
411  
ks_pcie_setup_rc_app_regs(struct keystone_pcie * ks_pcie)412  static int ks_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie)
413  {
414  	u32 val;
415  	u32 num_viewport = ks_pcie->num_viewport;
416  	struct dw_pcie *pci = ks_pcie->pci;
417  	struct dw_pcie_rp *pp = &pci->pp;
418  	struct resource_entry *entry;
419  	struct resource *mem;
420  	u64 start, end;
421  	int i;
422  
423  	entry = resource_list_first_type(&pp->bridge->windows, IORESOURCE_MEM);
424  	if (!entry)
425  		return -ENODEV;
426  
427  	mem = entry->res;
428  	start = mem->start;
429  	end = mem->end;
430  
431  	/* Disable BARs for inbound access */
432  	ks_pcie_set_dbi_mode(ks_pcie);
433  	dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0);
434  	dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0);
435  	ks_pcie_clear_dbi_mode(ks_pcie);
436  
437  	if (ks_pcie->is_am6)
438  		return 0;
439  
440  	val = ilog2(OB_WIN_SIZE);
441  	ks_pcie_app_writel(ks_pcie, OB_SIZE, val);
442  
443  	/* Using Direct 1:1 mapping of RC <-> PCI memory space */
444  	for (i = 0; i < num_viewport && (start < end); i++) {
445  		ks_pcie_app_writel(ks_pcie, OB_OFFSET_INDEX(i),
446  				   lower_32_bits(start) | OB_ENABLEN);
447  		ks_pcie_app_writel(ks_pcie, OB_OFFSET_HI(i),
448  				   upper_32_bits(start));
449  		start += OB_WIN_SIZE * SZ_1M;
450  	}
451  
452  	val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
453  	val |= OB_XLAT_EN_VAL;
454  	ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
455  
456  	return 0;
457  }
458  
ks_pcie_other_map_bus(struct pci_bus * bus,unsigned int devfn,int where)459  static void __iomem *ks_pcie_other_map_bus(struct pci_bus *bus,
460  					   unsigned int devfn, int where)
461  {
462  	struct dw_pcie_rp *pp = bus->sysdata;
463  	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
464  	struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
465  	u32 reg;
466  
467  	/*
468  	 * Checking whether the link is up here is a last line of defense
469  	 * against platforms that forward errors on the system bus as
470  	 * SError upon PCI configuration transactions issued when the link
471  	 * is down. This check is racy by definition and does not stop
472  	 * the system from triggering an SError if the link goes down
473  	 * after this check is performed.
474  	 */
475  	if (!dw_pcie_link_up(pci))
476  		return NULL;
477  
478  	reg = CFG_BUS(bus->number) | CFG_DEVICE(PCI_SLOT(devfn)) |
479  		CFG_FUNC(PCI_FUNC(devfn));
480  	if (!pci_is_root_bus(bus->parent))
481  		reg |= CFG_TYPE1;
482  	ks_pcie_app_writel(ks_pcie, CFG_SETUP, reg);
483  
484  	return pp->va_cfg0_base + where;
485  }
486  
487  static struct pci_ops ks_child_pcie_ops = {
488  	.map_bus = ks_pcie_other_map_bus,
489  	.read = pci_generic_config_read,
490  	.write = pci_generic_config_write,
491  };
492  
493  static struct pci_ops ks_pcie_ops = {
494  	.map_bus = dw_pcie_own_conf_map_bus,
495  	.read = pci_generic_config_read,
496  	.write = pci_generic_config_write,
497  };
498  
499  /**
500   * ks_pcie_link_up() - Check if link up
501   * @pci: A pointer to the dw_pcie structure which holds the DesignWare PCIe host
502   *	 controller driver information.
503   */
ks_pcie_link_up(struct dw_pcie * pci)504  static int ks_pcie_link_up(struct dw_pcie *pci)
505  {
506  	u32 val;
507  
508  	val = dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG0);
509  	val &= PORT_LOGIC_LTSSM_STATE_MASK;
510  	return (val == PORT_LOGIC_LTSSM_STATE_L0);
511  }
512  
ks_pcie_stop_link(struct dw_pcie * pci)513  static void ks_pcie_stop_link(struct dw_pcie *pci)
514  {
515  	struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
516  	u32 val;
517  
518  	/* Disable Link training */
519  	val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
520  	val &= ~LTSSM_EN_VAL;
521  	ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
522  }
523  
ks_pcie_start_link(struct dw_pcie * pci)524  static int ks_pcie_start_link(struct dw_pcie *pci)
525  {
526  	struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
527  	u32 val;
528  
529  	/* Initiate Link Training */
530  	val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
531  	ks_pcie_app_writel(ks_pcie, CMD_STATUS, LTSSM_EN_VAL | val);
532  
533  	return 0;
534  }
535  
ks_pcie_quirk(struct pci_dev * dev)536  static void ks_pcie_quirk(struct pci_dev *dev)
537  {
538  	struct pci_bus *bus = dev->bus;
539  	struct keystone_pcie *ks_pcie;
540  	struct device *bridge_dev;
541  	struct pci_dev *bridge;
542  	u32 val;
543  
544  	static const struct pci_device_id rc_pci_devids[] = {
545  		{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2HK),
546  		 .class = PCI_CLASS_BRIDGE_PCI_NORMAL, .class_mask = ~0, },
547  		{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2E),
548  		 .class = PCI_CLASS_BRIDGE_PCI_NORMAL, .class_mask = ~0, },
549  		{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2L),
550  		 .class = PCI_CLASS_BRIDGE_PCI_NORMAL, .class_mask = ~0, },
551  		{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2G),
552  		 .class = PCI_CLASS_BRIDGE_PCI_NORMAL, .class_mask = ~0, },
553  		{ 0, },
554  	};
555  	static const struct pci_device_id am6_pci_devids[] = {
556  		{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM654X),
557  		 .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
558  		{ 0, },
559  	};
560  
561  	if (pci_is_root_bus(bus))
562  		bridge = dev;
563  
564  	/* look for the host bridge */
565  	while (!pci_is_root_bus(bus)) {
566  		bridge = bus->self;
567  		bus = bus->parent;
568  	}
569  
570  	if (!bridge)
571  		return;
572  
573  	/*
574  	 * Keystone PCI controller has a h/w limitation of
575  	 * 256 bytes maximum read request size.  It can't handle
576  	 * anything higher than this.  So force this limit on
577  	 * all downstream devices.
578  	 */
579  	if (pci_match_id(rc_pci_devids, bridge)) {
580  		if (pcie_get_readrq(dev) > 256) {
581  			dev_info(&dev->dev, "limiting MRRS to 256 bytes\n");
582  			pcie_set_readrq(dev, 256);
583  		}
584  	}
585  
586  	/*
587  	 * Memory transactions fail with PCI controller in AM654 PG1.0
588  	 * when MRRS is set to more than 128 bytes. Force the MRRS to
589  	 * 128 bytes in all downstream devices.
590  	 */
591  	if (pci_match_id(am6_pci_devids, bridge)) {
592  		bridge_dev = pci_get_host_bridge_device(dev);
593  		if (!bridge_dev || !bridge_dev->parent)
594  			return;
595  
596  		ks_pcie = dev_get_drvdata(bridge_dev->parent);
597  		if (!ks_pcie)
598  			return;
599  
600  		val = ks_pcie_app_readl(ks_pcie, PID);
601  		val &= RTL;
602  		val >>= RTL_SHIFT;
603  		if (val != AM6_PCI_PG1_RTL_VER)
604  			return;
605  
606  		if (pcie_get_readrq(dev) > 128) {
607  			dev_info(&dev->dev, "limiting MRRS to 128 bytes\n");
608  			pcie_set_readrq(dev, 128);
609  		}
610  	}
611  }
612  DECLARE_PCI_FIXUP_ENABLE(PCI_ANY_ID, PCI_ANY_ID, ks_pcie_quirk);
613  
ks_pcie_msi_irq_handler(struct irq_desc * desc)614  static void ks_pcie_msi_irq_handler(struct irq_desc *desc)
615  {
616  	unsigned int irq = desc->irq_data.hwirq;
617  	struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc);
618  	u32 offset = irq - ks_pcie->msi_host_irq;
619  	struct dw_pcie *pci = ks_pcie->pci;
620  	struct dw_pcie_rp *pp = &pci->pp;
621  	struct device *dev = pci->dev;
622  	struct irq_chip *chip = irq_desc_get_chip(desc);
623  	u32 vector, reg, pos;
624  
625  	dev_dbg(dev, "%s, irq %d\n", __func__, irq);
626  
627  	/*
628  	 * The chained irq handler installation would have replaced normal
629  	 * interrupt driver handler so we need to take care of mask/unmask and
630  	 * ack operation.
631  	 */
632  	chained_irq_enter(chip, desc);
633  
634  	reg = ks_pcie_app_readl(ks_pcie, MSI_IRQ_STATUS(offset));
635  	/*
636  	 * MSI0 status bit 0-3 shows vectors 0, 8, 16, 24, MSI1 status bit
637  	 * shows 1, 9, 17, 25 and so forth
638  	 */
639  	for (pos = 0; pos < 4; pos++) {
640  		if (!(reg & BIT(pos)))
641  			continue;
642  
643  		vector = offset + (pos << 3);
644  		dev_dbg(dev, "irq: bit %d, vector %d\n", pos, vector);
645  		generic_handle_domain_irq(pp->irq_domain, vector);
646  	}
647  
648  	chained_irq_exit(chip, desc);
649  }
650  
651  /**
652   * ks_pcie_legacy_irq_handler() - Handle legacy interrupt
653   * @desc: Pointer to irq descriptor
654   *
655   * Traverse through pending legacy interrupts and invoke handler for each. Also
656   * takes care of interrupt controller level mask/ack operation.
657   */
ks_pcie_legacy_irq_handler(struct irq_desc * desc)658  static void ks_pcie_legacy_irq_handler(struct irq_desc *desc)
659  {
660  	unsigned int irq = irq_desc_get_irq(desc);
661  	struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc);
662  	struct dw_pcie *pci = ks_pcie->pci;
663  	struct device *dev = pci->dev;
664  	u32 irq_offset = irq - ks_pcie->legacy_host_irqs[0];
665  	struct irq_chip *chip = irq_desc_get_chip(desc);
666  
667  	dev_dbg(dev, ": Handling legacy irq %d\n", irq);
668  
669  	/*
670  	 * The chained irq handler installation would have replaced normal
671  	 * interrupt driver handler so we need to take care of mask/unmask and
672  	 * ack operation.
673  	 */
674  	chained_irq_enter(chip, desc);
675  	ks_pcie_handle_legacy_irq(ks_pcie, irq_offset);
676  	chained_irq_exit(chip, desc);
677  }
678  
ks_pcie_config_msi_irq(struct keystone_pcie * ks_pcie)679  static int ks_pcie_config_msi_irq(struct keystone_pcie *ks_pcie)
680  {
681  	struct device *dev = ks_pcie->pci->dev;
682  	struct device_node *np = ks_pcie->np;
683  	struct device_node *intc_np;
684  	struct irq_data *irq_data;
685  	int irq_count, irq, ret, i;
686  
687  	if (!IS_ENABLED(CONFIG_PCI_MSI))
688  		return 0;
689  
690  	intc_np = of_get_child_by_name(np, "msi-interrupt-controller");
691  	if (!intc_np) {
692  		if (ks_pcie->is_am6)
693  			return 0;
694  		dev_warn(dev, "msi-interrupt-controller node is absent\n");
695  		return -EINVAL;
696  	}
697  
698  	irq_count = of_irq_count(intc_np);
699  	if (!irq_count) {
700  		dev_err(dev, "No IRQ entries in msi-interrupt-controller\n");
701  		ret = -EINVAL;
702  		goto err;
703  	}
704  
705  	for (i = 0; i < irq_count; i++) {
706  		irq = irq_of_parse_and_map(intc_np, i);
707  		if (!irq) {
708  			ret = -EINVAL;
709  			goto err;
710  		}
711  
712  		if (!ks_pcie->msi_host_irq) {
713  			irq_data = irq_get_irq_data(irq);
714  			if (!irq_data) {
715  				ret = -EINVAL;
716  				goto err;
717  			}
718  			ks_pcie->msi_host_irq = irq_data->hwirq;
719  		}
720  
721  		irq_set_chained_handler_and_data(irq, ks_pcie_msi_irq_handler,
722  						 ks_pcie);
723  	}
724  
725  	of_node_put(intc_np);
726  	return 0;
727  
728  err:
729  	of_node_put(intc_np);
730  	return ret;
731  }
732  
ks_pcie_config_legacy_irq(struct keystone_pcie * ks_pcie)733  static int ks_pcie_config_legacy_irq(struct keystone_pcie *ks_pcie)
734  {
735  	struct device *dev = ks_pcie->pci->dev;
736  	struct irq_domain *legacy_irq_domain;
737  	struct device_node *np = ks_pcie->np;
738  	struct device_node *intc_np;
739  	int irq_count, irq, ret = 0, i;
740  
741  	intc_np = of_get_child_by_name(np, "legacy-interrupt-controller");
742  	if (!intc_np) {
743  		/*
744  		 * Since legacy interrupts are modeled as edge-interrupts in
745  		 * AM6, keep it disabled for now.
746  		 */
747  		if (ks_pcie->is_am6)
748  			return 0;
749  		dev_warn(dev, "legacy-interrupt-controller node is absent\n");
750  		return -EINVAL;
751  	}
752  
753  	irq_count = of_irq_count(intc_np);
754  	if (!irq_count) {
755  		dev_err(dev, "No IRQ entries in legacy-interrupt-controller\n");
756  		ret = -EINVAL;
757  		goto err;
758  	}
759  
760  	for (i = 0; i < irq_count; i++) {
761  		irq = irq_of_parse_and_map(intc_np, i);
762  		if (!irq) {
763  			ret = -EINVAL;
764  			goto err;
765  		}
766  		ks_pcie->legacy_host_irqs[i] = irq;
767  
768  		irq_set_chained_handler_and_data(irq,
769  						 ks_pcie_legacy_irq_handler,
770  						 ks_pcie);
771  	}
772  
773  	legacy_irq_domain =
774  		irq_domain_add_linear(intc_np, PCI_NUM_INTX,
775  				      &ks_pcie_legacy_irq_domain_ops, NULL);
776  	if (!legacy_irq_domain) {
777  		dev_err(dev, "Failed to add irq domain for legacy irqs\n");
778  		ret = -EINVAL;
779  		goto err;
780  	}
781  	ks_pcie->legacy_irq_domain = legacy_irq_domain;
782  
783  	for (i = 0; i < PCI_NUM_INTX; i++)
784  		ks_pcie_app_writel(ks_pcie, IRQ_ENABLE_SET(i), INTx_EN);
785  
786  err:
787  	of_node_put(intc_np);
788  	return ret;
789  }
790  
791  #ifdef CONFIG_ARM
792  /*
793   * When a PCI device does not exist during config cycles, keystone host
794   * gets a bus error instead of returning 0xffffffff (PCI_ERROR_RESPONSE).
795   * This handler always returns 0 for this kind of fault.
796   */
ks_pcie_fault(unsigned long addr,unsigned int fsr,struct pt_regs * regs)797  static int ks_pcie_fault(unsigned long addr, unsigned int fsr,
798  			 struct pt_regs *regs)
799  {
800  	unsigned long instr = *(unsigned long *) instruction_pointer(regs);
801  
802  	if ((instr & 0x0e100090) == 0x00100090) {
803  		int reg = (instr >> 12) & 15;
804  
805  		regs->uregs[reg] = -1;
806  		regs->ARM_pc += 4;
807  	}
808  
809  	return 0;
810  }
811  #endif
812  
ks_pcie_init_id(struct keystone_pcie * ks_pcie)813  static int __init ks_pcie_init_id(struct keystone_pcie *ks_pcie)
814  {
815  	int ret;
816  	unsigned int id;
817  	struct regmap *devctrl_regs;
818  	struct dw_pcie *pci = ks_pcie->pci;
819  	struct device *dev = pci->dev;
820  	struct device_node *np = dev->of_node;
821  	struct of_phandle_args args;
822  	unsigned int offset = 0;
823  
824  	devctrl_regs = syscon_regmap_lookup_by_phandle(np, "ti,syscon-pcie-id");
825  	if (IS_ERR(devctrl_regs))
826  		return PTR_ERR(devctrl_regs);
827  
828  	/* Do not error out to maintain old DT compatibility */
829  	ret = of_parse_phandle_with_fixed_args(np, "ti,syscon-pcie-id", 1, 0, &args);
830  	if (!ret)
831  		offset = args.args[0];
832  
833  	ret = regmap_read(devctrl_regs, offset, &id);
834  	if (ret)
835  		return ret;
836  
837  	dw_pcie_dbi_ro_wr_en(pci);
838  	dw_pcie_writew_dbi(pci, PCI_VENDOR_ID, id & PCIE_VENDORID_MASK);
839  	dw_pcie_writew_dbi(pci, PCI_DEVICE_ID, id >> PCIE_DEVICEID_SHIFT);
840  	dw_pcie_dbi_ro_wr_dis(pci);
841  
842  	return 0;
843  }
844  
ks_pcie_host_init(struct dw_pcie_rp * pp)845  static int __init ks_pcie_host_init(struct dw_pcie_rp *pp)
846  {
847  	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
848  	struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
849  	int ret;
850  
851  	pp->bridge->ops = &ks_pcie_ops;
852  	if (!ks_pcie->is_am6)
853  		pp->bridge->child_ops = &ks_child_pcie_ops;
854  
855  	ret = ks_pcie_config_legacy_irq(ks_pcie);
856  	if (ret)
857  		return ret;
858  
859  	ret = ks_pcie_config_msi_irq(ks_pcie);
860  	if (ret)
861  		return ret;
862  
863  	ks_pcie_stop_link(pci);
864  	ret = ks_pcie_setup_rc_app_regs(ks_pcie);
865  	if (ret)
866  		return ret;
867  
868  	writew(PCI_IO_RANGE_TYPE_32 | (PCI_IO_RANGE_TYPE_32 << 8),
869  			pci->dbi_base + PCI_IO_BASE);
870  
871  	ret = ks_pcie_init_id(ks_pcie);
872  	if (ret < 0)
873  		return ret;
874  
875  #ifdef CONFIG_ARM
876  	/*
877  	 * PCIe access errors that result into OCP errors are caught by ARM as
878  	 * "External aborts"
879  	 */
880  	hook_fault_code(17, ks_pcie_fault, SIGBUS, 0,
881  			"Asynchronous external abort");
882  #endif
883  
884  	return 0;
885  }
886  
887  static const struct dw_pcie_host_ops ks_pcie_host_ops = {
888  	.host_init = ks_pcie_host_init,
889  	.msi_host_init = ks_pcie_msi_host_init,
890  };
891  
892  static const struct dw_pcie_host_ops ks_pcie_am654_host_ops = {
893  	.host_init = ks_pcie_host_init,
894  };
895  
ks_pcie_err_irq_handler(int irq,void * priv)896  static irqreturn_t ks_pcie_err_irq_handler(int irq, void *priv)
897  {
898  	struct keystone_pcie *ks_pcie = priv;
899  
900  	return ks_pcie_handle_error_irq(ks_pcie);
901  }
902  
ks_pcie_am654_write_dbi2(struct dw_pcie * pci,void __iomem * base,u32 reg,size_t size,u32 val)903  static void ks_pcie_am654_write_dbi2(struct dw_pcie *pci, void __iomem *base,
904  				     u32 reg, size_t size, u32 val)
905  {
906  	struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
907  
908  	ks_pcie_set_dbi_mode(ks_pcie);
909  	dw_pcie_write(base + reg, size, val);
910  	ks_pcie_clear_dbi_mode(ks_pcie);
911  }
912  
913  static const struct dw_pcie_ops ks_pcie_dw_pcie_ops = {
914  	.start_link = ks_pcie_start_link,
915  	.stop_link = ks_pcie_stop_link,
916  	.link_up = ks_pcie_link_up,
917  	.write_dbi2 = ks_pcie_am654_write_dbi2,
918  };
919  
ks_pcie_am654_ep_init(struct dw_pcie_ep * ep)920  static void ks_pcie_am654_ep_init(struct dw_pcie_ep *ep)
921  {
922  	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
923  	int flags;
924  
925  	ep->page_size = AM654_WIN_SIZE;
926  	flags = PCI_BASE_ADDRESS_SPACE_MEMORY | PCI_BASE_ADDRESS_MEM_TYPE_32;
927  	dw_pcie_writel_dbi2(pci, PCI_BASE_ADDRESS_0, APP_ADDR_SPACE_0 - 1);
928  	dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, flags);
929  }
930  
ks_pcie_am654_raise_legacy_irq(struct keystone_pcie * ks_pcie)931  static void ks_pcie_am654_raise_legacy_irq(struct keystone_pcie *ks_pcie)
932  {
933  	struct dw_pcie *pci = ks_pcie->pci;
934  	u8 int_pin;
935  
936  	int_pin = dw_pcie_readb_dbi(pci, PCI_INTERRUPT_PIN);
937  	if (int_pin == 0 || int_pin > 4)
938  		return;
939  
940  	ks_pcie_app_writel(ks_pcie, PCIE_LEGACY_IRQ_ENABLE_SET(int_pin),
941  			   INT_ENABLE);
942  	ks_pcie_app_writel(ks_pcie, PCIE_EP_IRQ_SET, INT_ENABLE);
943  	mdelay(1);
944  	ks_pcie_app_writel(ks_pcie, PCIE_EP_IRQ_CLR, INT_ENABLE);
945  	ks_pcie_app_writel(ks_pcie, PCIE_LEGACY_IRQ_ENABLE_CLR(int_pin),
946  			   INT_ENABLE);
947  }
948  
ks_pcie_am654_raise_irq(struct dw_pcie_ep * ep,u8 func_no,enum pci_epc_irq_type type,u16 interrupt_num)949  static int ks_pcie_am654_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
950  				   enum pci_epc_irq_type type,
951  				   u16 interrupt_num)
952  {
953  	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
954  	struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
955  
956  	switch (type) {
957  	case PCI_EPC_IRQ_LEGACY:
958  		ks_pcie_am654_raise_legacy_irq(ks_pcie);
959  		break;
960  	case PCI_EPC_IRQ_MSI:
961  		dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
962  		break;
963  	case PCI_EPC_IRQ_MSIX:
964  		dw_pcie_ep_raise_msix_irq(ep, func_no, interrupt_num);
965  		break;
966  	default:
967  		dev_err(pci->dev, "UNKNOWN IRQ type\n");
968  		return -EINVAL;
969  	}
970  
971  	return 0;
972  }
973  
974  static const struct pci_epc_features ks_pcie_am654_epc_features = {
975  	.linkup_notifier = false,
976  	.msi_capable = true,
977  	.msix_capable = true,
978  	.reserved_bar = 1 << BAR_0 | 1 << BAR_1,
979  	.bar_fixed_64bit = 1 << BAR_0,
980  	.bar_fixed_size[2] = SZ_1M,
981  	.bar_fixed_size[3] = SZ_64K,
982  	.bar_fixed_size[4] = 256,
983  	.bar_fixed_size[5] = SZ_1M,
984  	.align = SZ_1M,
985  };
986  
987  static const struct pci_epc_features*
ks_pcie_am654_get_features(struct dw_pcie_ep * ep)988  ks_pcie_am654_get_features(struct dw_pcie_ep *ep)
989  {
990  	return &ks_pcie_am654_epc_features;
991  }
992  
993  static const struct dw_pcie_ep_ops ks_pcie_am654_ep_ops = {
994  	.ep_init = ks_pcie_am654_ep_init,
995  	.raise_irq = ks_pcie_am654_raise_irq,
996  	.get_features = &ks_pcie_am654_get_features,
997  };
998  
ks_pcie_disable_phy(struct keystone_pcie * ks_pcie)999  static void ks_pcie_disable_phy(struct keystone_pcie *ks_pcie)
1000  {
1001  	int num_lanes = ks_pcie->num_lanes;
1002  
1003  	while (num_lanes--) {
1004  		phy_power_off(ks_pcie->phy[num_lanes]);
1005  		phy_exit(ks_pcie->phy[num_lanes]);
1006  	}
1007  }
1008  
ks_pcie_enable_phy(struct keystone_pcie * ks_pcie)1009  static int ks_pcie_enable_phy(struct keystone_pcie *ks_pcie)
1010  {
1011  	int i;
1012  	int ret;
1013  	int num_lanes = ks_pcie->num_lanes;
1014  
1015  	for (i = 0; i < num_lanes; i++) {
1016  		ret = phy_reset(ks_pcie->phy[i]);
1017  		if (ret < 0)
1018  			goto err_phy;
1019  
1020  		ret = phy_init(ks_pcie->phy[i]);
1021  		if (ret < 0)
1022  			goto err_phy;
1023  
1024  		ret = phy_power_on(ks_pcie->phy[i]);
1025  		if (ret < 0) {
1026  			phy_exit(ks_pcie->phy[i]);
1027  			goto err_phy;
1028  		}
1029  	}
1030  
1031  	return 0;
1032  
1033  err_phy:
1034  	while (--i >= 0) {
1035  		phy_power_off(ks_pcie->phy[i]);
1036  		phy_exit(ks_pcie->phy[i]);
1037  	}
1038  
1039  	return ret;
1040  }
1041  
ks_pcie_set_mode(struct device * dev)1042  static int ks_pcie_set_mode(struct device *dev)
1043  {
1044  	struct device_node *np = dev->of_node;
1045  	struct of_phandle_args args;
1046  	unsigned int offset = 0;
1047  	struct regmap *syscon;
1048  	u32 val;
1049  	u32 mask;
1050  	int ret = 0;
1051  
1052  	syscon = syscon_regmap_lookup_by_phandle(np, "ti,syscon-pcie-mode");
1053  	if (IS_ERR(syscon))
1054  		return 0;
1055  
1056  	/* Do not error out to maintain old DT compatibility */
1057  	ret = of_parse_phandle_with_fixed_args(np, "ti,syscon-pcie-mode", 1, 0, &args);
1058  	if (!ret)
1059  		offset = args.args[0];
1060  
1061  	mask = KS_PCIE_DEV_TYPE_MASK | KS_PCIE_SYSCLOCKOUTEN;
1062  	val = KS_PCIE_DEV_TYPE(RC) | KS_PCIE_SYSCLOCKOUTEN;
1063  
1064  	ret = regmap_update_bits(syscon, offset, mask, val);
1065  	if (ret) {
1066  		dev_err(dev, "failed to set pcie mode\n");
1067  		return ret;
1068  	}
1069  
1070  	return 0;
1071  }
1072  
ks_pcie_am654_set_mode(struct device * dev,enum dw_pcie_device_mode mode)1073  static int ks_pcie_am654_set_mode(struct device *dev,
1074  				  enum dw_pcie_device_mode mode)
1075  {
1076  	struct device_node *np = dev->of_node;
1077  	struct of_phandle_args args;
1078  	unsigned int offset = 0;
1079  	struct regmap *syscon;
1080  	u32 val;
1081  	u32 mask;
1082  	int ret = 0;
1083  
1084  	syscon = syscon_regmap_lookup_by_phandle(np, "ti,syscon-pcie-mode");
1085  	if (IS_ERR(syscon))
1086  		return 0;
1087  
1088  	/* Do not error out to maintain old DT compatibility */
1089  	ret = of_parse_phandle_with_fixed_args(np, "ti,syscon-pcie-mode", 1, 0, &args);
1090  	if (!ret)
1091  		offset = args.args[0];
1092  
1093  	mask = AM654_PCIE_DEV_TYPE_MASK;
1094  
1095  	switch (mode) {
1096  	case DW_PCIE_RC_TYPE:
1097  		val = RC;
1098  		break;
1099  	case DW_PCIE_EP_TYPE:
1100  		val = EP;
1101  		break;
1102  	default:
1103  		dev_err(dev, "INVALID device type %d\n", mode);
1104  		return -EINVAL;
1105  	}
1106  
1107  	ret = regmap_update_bits(syscon, offset, mask, val);
1108  	if (ret) {
1109  		dev_err(dev, "failed to set pcie mode\n");
1110  		return ret;
1111  	}
1112  
1113  	return 0;
1114  }
1115  
1116  static const struct ks_pcie_of_data ks_pcie_rc_of_data = {
1117  	.host_ops = &ks_pcie_host_ops,
1118  	.mode = DW_PCIE_RC_TYPE,
1119  	.version = DW_PCIE_VER_365A,
1120  };
1121  
1122  static const struct ks_pcie_of_data ks_pcie_am654_rc_of_data = {
1123  	.host_ops = &ks_pcie_am654_host_ops,
1124  	.mode = DW_PCIE_RC_TYPE,
1125  	.version = DW_PCIE_VER_490A,
1126  };
1127  
1128  static const struct ks_pcie_of_data ks_pcie_am654_ep_of_data = {
1129  	.ep_ops = &ks_pcie_am654_ep_ops,
1130  	.mode = DW_PCIE_EP_TYPE,
1131  	.version = DW_PCIE_VER_490A,
1132  };
1133  
1134  static const struct of_device_id ks_pcie_of_match[] = {
1135  	{
1136  		.type = "pci",
1137  		.data = &ks_pcie_rc_of_data,
1138  		.compatible = "ti,keystone-pcie",
1139  	},
1140  	{
1141  		.data = &ks_pcie_am654_rc_of_data,
1142  		.compatible = "ti,am654-pcie-rc",
1143  	},
1144  	{
1145  		.data = &ks_pcie_am654_ep_of_data,
1146  		.compatible = "ti,am654-pcie-ep",
1147  	},
1148  	{ },
1149  };
1150  
ks_pcie_probe(struct platform_device * pdev)1151  static int ks_pcie_probe(struct platform_device *pdev)
1152  {
1153  	const struct dw_pcie_host_ops *host_ops;
1154  	const struct dw_pcie_ep_ops *ep_ops;
1155  	struct device *dev = &pdev->dev;
1156  	struct device_node *np = dev->of_node;
1157  	const struct ks_pcie_of_data *data;
1158  	enum dw_pcie_device_mode mode;
1159  	struct dw_pcie *pci;
1160  	struct keystone_pcie *ks_pcie;
1161  	struct device_link **link;
1162  	struct gpio_desc *gpiod;
1163  	struct resource *res;
1164  	void __iomem *base;
1165  	u32 num_viewport;
1166  	struct phy **phy;
1167  	u32 num_lanes;
1168  	char name[10];
1169  	u32 version;
1170  	int ret;
1171  	int irq;
1172  	int i;
1173  
1174  	data = of_device_get_match_data(dev);
1175  	if (!data)
1176  		return -EINVAL;
1177  
1178  	version = data->version;
1179  	host_ops = data->host_ops;
1180  	ep_ops = data->ep_ops;
1181  	mode = data->mode;
1182  
1183  	ks_pcie = devm_kzalloc(dev, sizeof(*ks_pcie), GFP_KERNEL);
1184  	if (!ks_pcie)
1185  		return -ENOMEM;
1186  
1187  	pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
1188  	if (!pci)
1189  		return -ENOMEM;
1190  
1191  	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "app");
1192  	ks_pcie->va_app_base = devm_ioremap_resource(dev, res);
1193  	if (IS_ERR(ks_pcie->va_app_base))
1194  		return PTR_ERR(ks_pcie->va_app_base);
1195  
1196  	ks_pcie->app = *res;
1197  
1198  	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbics");
1199  	base = devm_pci_remap_cfg_resource(dev, res);
1200  	if (IS_ERR(base))
1201  		return PTR_ERR(base);
1202  
1203  	if (of_device_is_compatible(np, "ti,am654-pcie-rc"))
1204  		ks_pcie->is_am6 = true;
1205  
1206  	pci->dbi_base = base;
1207  	pci->dbi_base2 = base;
1208  	pci->dev = dev;
1209  	pci->ops = &ks_pcie_dw_pcie_ops;
1210  	pci->version = version;
1211  
1212  	irq = platform_get_irq(pdev, 0);
1213  	if (irq < 0)
1214  		return irq;
1215  
1216  	ret = request_irq(irq, ks_pcie_err_irq_handler, IRQF_SHARED,
1217  			  "ks-pcie-error-irq", ks_pcie);
1218  	if (ret < 0) {
1219  		dev_err(dev, "failed to request error IRQ %d\n",
1220  			irq);
1221  		return ret;
1222  	}
1223  
1224  	ret = of_property_read_u32(np, "num-lanes", &num_lanes);
1225  	if (ret)
1226  		num_lanes = 1;
1227  
1228  	phy = devm_kzalloc(dev, sizeof(*phy) * num_lanes, GFP_KERNEL);
1229  	if (!phy)
1230  		return -ENOMEM;
1231  
1232  	link = devm_kzalloc(dev, sizeof(*link) * num_lanes, GFP_KERNEL);
1233  	if (!link)
1234  		return -ENOMEM;
1235  
1236  	for (i = 0; i < num_lanes; i++) {
1237  		snprintf(name, sizeof(name), "pcie-phy%d", i);
1238  		phy[i] = devm_phy_optional_get(dev, name);
1239  		if (IS_ERR(phy[i])) {
1240  			ret = PTR_ERR(phy[i]);
1241  			goto err_link;
1242  		}
1243  
1244  		if (!phy[i])
1245  			continue;
1246  
1247  		link[i] = device_link_add(dev, &phy[i]->dev, DL_FLAG_STATELESS);
1248  		if (!link[i]) {
1249  			ret = -EINVAL;
1250  			goto err_link;
1251  		}
1252  	}
1253  
1254  	ks_pcie->np = np;
1255  	ks_pcie->pci = pci;
1256  	ks_pcie->link = link;
1257  	ks_pcie->num_lanes = num_lanes;
1258  	ks_pcie->phy = phy;
1259  
1260  	gpiod = devm_gpiod_get_optional(dev, "reset",
1261  					GPIOD_OUT_LOW);
1262  	if (IS_ERR(gpiod)) {
1263  		ret = PTR_ERR(gpiod);
1264  		if (ret != -EPROBE_DEFER)
1265  			dev_err(dev, "Failed to get reset GPIO\n");
1266  		goto err_link;
1267  	}
1268  
1269  	/* Obtain references to the PHYs */
1270  	for (i = 0; i < num_lanes; i++)
1271  		phy_pm_runtime_get_sync(ks_pcie->phy[i]);
1272  
1273  	ret = ks_pcie_enable_phy(ks_pcie);
1274  
1275  	/* Release references to the PHYs */
1276  	for (i = 0; i < num_lanes; i++)
1277  		phy_pm_runtime_put_sync(ks_pcie->phy[i]);
1278  
1279  	if (ret) {
1280  		dev_err(dev, "failed to enable phy\n");
1281  		goto err_link;
1282  	}
1283  
1284  	platform_set_drvdata(pdev, ks_pcie);
1285  	pm_runtime_enable(dev);
1286  	ret = pm_runtime_get_sync(dev);
1287  	if (ret < 0) {
1288  		dev_err(dev, "pm_runtime_get_sync failed\n");
1289  		goto err_get_sync;
1290  	}
1291  
1292  	if (dw_pcie_ver_is_ge(pci, 480A))
1293  		ret = ks_pcie_am654_set_mode(dev, mode);
1294  	else
1295  		ret = ks_pcie_set_mode(dev);
1296  	if (ret < 0)
1297  		goto err_get_sync;
1298  
1299  	switch (mode) {
1300  	case DW_PCIE_RC_TYPE:
1301  		if (!IS_ENABLED(CONFIG_PCI_KEYSTONE_HOST)) {
1302  			ret = -ENODEV;
1303  			goto err_get_sync;
1304  		}
1305  
1306  		ret = of_property_read_u32(np, "num-viewport", &num_viewport);
1307  		if (ret < 0) {
1308  			dev_err(dev, "unable to read *num-viewport* property\n");
1309  			goto err_get_sync;
1310  		}
1311  
1312  		/*
1313  		 * "Power Sequencing and Reset Signal Timings" table in
1314  		 * PCI EXPRESS CARD ELECTROMECHANICAL SPECIFICATION, REV. 2.0
1315  		 * indicates PERST# should be deasserted after minimum of 100us
1316  		 * once REFCLK is stable. The REFCLK to the connector in RC
1317  		 * mode is selected while enabling the PHY. So deassert PERST#
1318  		 * after 100 us.
1319  		 */
1320  		if (gpiod) {
1321  			usleep_range(100, 200);
1322  			gpiod_set_value_cansleep(gpiod, 1);
1323  		}
1324  
1325  		ks_pcie->num_viewport = num_viewport;
1326  		pci->pp.ops = host_ops;
1327  		ret = dw_pcie_host_init(&pci->pp);
1328  		if (ret < 0)
1329  			goto err_get_sync;
1330  		break;
1331  	case DW_PCIE_EP_TYPE:
1332  		if (!IS_ENABLED(CONFIG_PCI_KEYSTONE_EP)) {
1333  			ret = -ENODEV;
1334  			goto err_get_sync;
1335  		}
1336  
1337  		pci->ep.ops = ep_ops;
1338  		ret = dw_pcie_ep_init(&pci->ep);
1339  		if (ret < 0)
1340  			goto err_get_sync;
1341  		break;
1342  	default:
1343  		dev_err(dev, "INVALID device type %d\n", mode);
1344  	}
1345  
1346  	ks_pcie_enable_error_irq(ks_pcie);
1347  
1348  	return 0;
1349  
1350  err_get_sync:
1351  	pm_runtime_put(dev);
1352  	pm_runtime_disable(dev);
1353  	ks_pcie_disable_phy(ks_pcie);
1354  
1355  err_link:
1356  	while (--i >= 0 && link[i])
1357  		device_link_del(link[i]);
1358  
1359  	return ret;
1360  }
1361  
ks_pcie_remove(struct platform_device * pdev)1362  static int ks_pcie_remove(struct platform_device *pdev)
1363  {
1364  	struct keystone_pcie *ks_pcie = platform_get_drvdata(pdev);
1365  	struct device_link **link = ks_pcie->link;
1366  	int num_lanes = ks_pcie->num_lanes;
1367  	struct device *dev = &pdev->dev;
1368  
1369  	pm_runtime_put(dev);
1370  	pm_runtime_disable(dev);
1371  	ks_pcie_disable_phy(ks_pcie);
1372  	while (num_lanes--)
1373  		device_link_del(link[num_lanes]);
1374  
1375  	return 0;
1376  }
1377  
1378  static struct platform_driver ks_pcie_driver = {
1379  	.probe  = ks_pcie_probe,
1380  	.remove = ks_pcie_remove,
1381  	.driver = {
1382  		.name	= "keystone-pcie",
1383  		.of_match_table = ks_pcie_of_match,
1384  	},
1385  };
1386  builtin_platform_driver(ks_pcie_driver);
1387