xref: /openbmc/linux/arch/sh/drivers/pci/pcie-sh7786.c (revision 7dd65feb)
1 /*
2  * Low-Level PCI Express Support for the SH7786
3  *
4  *  Copyright (C) 2009  Paul Mundt
5  *
6  * This file is subject to the terms and conditions of the GNU General Public
7  * License.  See the file "COPYING" in the main directory of this archive
8  * for more details.
9  */
10 #include <linux/pci.h>
11 #include <linux/init.h>
12 #include <linux/kernel.h>
13 #include <linux/io.h>
14 #include <linux/delay.h>
15 #include "pcie-sh7786.h"
16 #include <asm/sizes.h>
17 
18 struct sh7786_pcie_port {
19 	struct pci_channel	*hose;
20 	unsigned int		index;
21 	int			endpoint;
22 	int			link;
23 };
24 
25 static struct sh7786_pcie_port *sh7786_pcie_ports;
26 static unsigned int nr_ports;
27 
28 static struct sh7786_pcie_hwops {
29 	int (*core_init)(void);
30 	int (*port_init_hw)(struct sh7786_pcie_port *port);
31 } *sh7786_pcie_hwops;
32 
33 static struct resource sh7786_pci_32bit_mem_resources[] = {
34 	{
35 		.name	= "pci0_mem",
36 		.start	= SH4A_PCIMEM_BASEA,
37 		.end	= SH4A_PCIMEM_BASEA + SZ_64M - 1,
38 		.flags	= IORESOURCE_MEM,
39 	}, {
40 		.name	= "pci1_mem",
41 		.start	= SH4A_PCIMEM_BASEA1,
42 		.end	= SH4A_PCIMEM_BASEA1 + SZ_64M - 1,
43 		.flags	= IORESOURCE_MEM,
44 	}, {
45 		.name	= "pci2_mem",
46 		.start	= SH4A_PCIMEM_BASEA2,
47 		.end	= SH4A_PCIMEM_BASEA2 + SZ_64M - 1,
48 		.flags	= IORESOURCE_MEM,
49 	},
50 };
51 
52 static struct resource sh7786_pci_29bit_mem_resource = {
53 	.start	= SH4A_PCIMEM_BASE,
54 	.end	= SH4A_PCIMEM_BASE + SZ_64M - 1,
55 	.flags	= IORESOURCE_MEM,
56 };
57 
58 static struct resource sh7786_pci_io_resources[] = {
59 	{
60 		.name	= "pci0_io",
61 		.start	= SH4A_PCIIO_BASE,
62 		.end	= SH4A_PCIIO_BASE + SZ_8M - 1,
63 		.flags	= IORESOURCE_IO,
64 	}, {
65 		.name	= "pci1_io",
66 		.start	= SH4A_PCIIO_BASE1,
67 		.end	= SH4A_PCIIO_BASE1 + SZ_8M - 1,
68 		.flags	= IORESOURCE_IO,
69 	}, {
70 		.name	= "pci2_io",
71 		.start	= SH4A_PCIIO_BASE2,
72 		.end	= SH4A_PCIIO_BASE2 + SZ_4M - 1,
73 		.flags	= IORESOURCE_IO,
74 	},
75 };
76 
77 extern struct pci_ops sh7786_pci_ops;
78 
79 #define DEFINE_CONTROLLER(start, idx)				\
80 {								\
81 	.pci_ops	= &sh7786_pci_ops,			\
82 	.reg_base	= start,				\
83 	/* mem_resource filled in at probe time */		\
84 	.mem_offset	= 0,					\
85 	.io_resource	= &sh7786_pci_io_resources[idx],	\
86 	.io_offset	= 0,					\
87 }
88 
89 static struct pci_channel sh7786_pci_channels[] = {
90 	DEFINE_CONTROLLER(0xfe000000, 0),
91 	DEFINE_CONTROLLER(0xfe200000, 1),
92 	DEFINE_CONTROLLER(0xfcc00000, 2),
93 };
94 
95 static int phy_wait_for_ack(struct pci_channel *chan)
96 {
97 	unsigned int timeout = 100;
98 
99 	while (timeout--) {
100 		if (pci_read_reg(chan, SH4A_PCIEPHYADRR) & (1 << BITS_ACK))
101 			return 0;
102 
103 		udelay(100);
104 	}
105 
106 	return -ETIMEDOUT;
107 }
108 
109 static int pci_wait_for_irq(struct pci_channel *chan, unsigned int mask)
110 {
111 	unsigned int timeout = 100;
112 
113 	while (timeout--) {
114 		if ((pci_read_reg(chan, SH4A_PCIEINTR) & mask) == mask)
115 			return 0;
116 
117 		udelay(100);
118 	}
119 
120 	return -ETIMEDOUT;
121 }
122 
123 static void phy_write_reg(struct pci_channel *chan, unsigned int addr,
124 			  unsigned int lane, unsigned int data)
125 {
126 	unsigned long phyaddr, ctrl;
127 
128 	phyaddr = (1 << BITS_CMD) + ((lane & 0xf) << BITS_LANE) +
129 			((addr & 0xff) << BITS_ADR);
130 
131 	/* Enable clock */
132 	ctrl = pci_read_reg(chan, SH4A_PCIEPHYCTLR);
133 	ctrl |= (1 << BITS_CKE);
134 	pci_write_reg(chan, ctrl, SH4A_PCIEPHYCTLR);
135 
136 	/* Set write data */
137 	pci_write_reg(chan, data, SH4A_PCIEPHYDOUTR);
138 	pci_write_reg(chan, phyaddr, SH4A_PCIEPHYADRR);
139 
140 	phy_wait_for_ack(chan);
141 
142 	/* Clear command */
143 	pci_write_reg(chan, 0, SH4A_PCIEPHYADRR);
144 
145 	phy_wait_for_ack(chan);
146 
147 	/* Disable clock */
148 	ctrl = pci_read_reg(chan, SH4A_PCIEPHYCTLR);
149 	ctrl &= ~(1 << BITS_CKE);
150 	pci_write_reg(chan, ctrl, SH4A_PCIEPHYCTLR);
151 }
152 
153 static int phy_init(struct pci_channel *chan)
154 {
155 	unsigned int timeout = 100;
156 
157 	/* Initialize the phy */
158 	phy_write_reg(chan, 0x60, 0xf, 0x004b008b);
159 	phy_write_reg(chan, 0x61, 0xf, 0x00007b41);
160 	phy_write_reg(chan, 0x64, 0xf, 0x00ff4f00);
161 	phy_write_reg(chan, 0x65, 0xf, 0x09070907);
162 	phy_write_reg(chan, 0x66, 0xf, 0x00000010);
163 	phy_write_reg(chan, 0x74, 0xf, 0x0007001c);
164 	phy_write_reg(chan, 0x79, 0xf, 0x01fc000d);
165 
166 	/* Deassert Standby */
167 	phy_write_reg(chan, 0x67, 0xf, 0x00000400);
168 
169 	while (timeout--) {
170 		if (pci_read_reg(chan, SH4A_PCIEPHYSR))
171 			return 0;
172 
173 		udelay(100);
174 	}
175 
176 	return -ETIMEDOUT;
177 }
178 
179 static int pcie_init(struct sh7786_pcie_port *port)
180 {
181 	struct pci_channel *chan = port->hose;
182 	unsigned int data;
183 	int ret;
184 
185 	/* Begin initialization */
186 	pci_write_reg(chan, 0, SH4A_PCIETCTLR);
187 
188 	/* Initialize as type1. */
189 	data = pci_read_reg(chan, SH4A_PCIEPCICONF3);
190 	data &= ~(0x7f << 16);
191 	data |= PCI_HEADER_TYPE_BRIDGE << 16;
192 	pci_write_reg(chan, data, SH4A_PCIEPCICONF3);
193 
194 	/* Initialize default capabilities. */
195 	data = pci_read_reg(chan, SH4A_PCIEEXPCAP0);
196 	data &= ~(PCI_EXP_FLAGS_TYPE << 16);
197 
198 	if (port->endpoint)
199 		data |= PCI_EXP_TYPE_ENDPOINT << 20;
200 	else
201 		data |= PCI_EXP_TYPE_ROOT_PORT << 20;
202 
203 	data |= PCI_CAP_ID_EXP;
204 	pci_write_reg(chan, data, SH4A_PCIEEXPCAP0);
205 
206 	/* Enable x4 link width and extended sync. */
207 	data = pci_read_reg(chan, SH4A_PCIEEXPCAP4);
208 	data &= ~(PCI_EXP_LNKSTA_NLW << 16);
209 	data |= (1 << 22) | PCI_EXP_LNKCTL_ES;
210 	pci_write_reg(chan, data, SH4A_PCIEEXPCAP4);
211 
212 	/* Set the completion timer timeout to the maximum 32ms. */
213 	data = pci_read_reg(chan, SH4A_PCIETLCTLR);
214 	data &= ~0xffff;
215 	data |= 0x32 << 8;
216 	pci_write_reg(chan, data, SH4A_PCIETLCTLR);
217 
218 	/*
219 	 * Set fast training sequences to the maximum 255,
220 	 * and enable MAC data scrambling.
221 	 */
222 	data = pci_read_reg(chan, SH4A_PCIEMACCTLR);
223 	data &= ~PCIEMACCTLR_SCR_DIS;
224 	data |= (0xff << 16);
225 	pci_write_reg(chan, data, SH4A_PCIEMACCTLR);
226 
227 	/* Finish initialization */
228 	data = pci_read_reg(chan, SH4A_PCIETCTLR);
229 	data |= 0x1;
230 	pci_write_reg(chan, data, SH4A_PCIETCTLR);
231 
232 	/* Enable DL_Active Interrupt generation */
233 	data = pci_read_reg(chan, SH4A_PCIEDLINTENR);
234 	data |= PCIEDLINTENR_DLL_ACT_ENABLE;
235 	pci_write_reg(chan, data, SH4A_PCIEDLINTENR);
236 
237 	/* Disable MAC data scrambling. */
238 	data = pci_read_reg(chan, SH4A_PCIEMACCTLR);
239 	data |= PCIEMACCTLR_SCR_DIS | (0xff << 16);
240 	pci_write_reg(chan, data, SH4A_PCIEMACCTLR);
241 
242 	ret = pci_wait_for_irq(chan, MASK_INT_TX_CTRL);
243 	if (unlikely(ret != 0))
244 		return -ENODEV;
245 
246 	pci_write_reg(chan, 0x00100007, SH4A_PCIEPCICONF1);
247 	pci_write_reg(chan, 0x80888000, SH4A_PCIETXVC0DCTLR);
248 	pci_write_reg(chan, 0x00222000, SH4A_PCIERXVC0DCTLR);
249 	pci_write_reg(chan, 0x000050A0, SH4A_PCIEEXPCAP2);
250 
251 	wmb();
252 
253 	data = pci_read_reg(chan, SH4A_PCIEMACSR);
254 	printk(KERN_NOTICE "PCI: PCIe#%d link width %d\n",
255 	       port->index, (data >> 20) & 0x3f);
256 
257 	pci_write_reg(chan, 0x007c0000, SH4A_PCIEPAMR0);
258 	pci_write_reg(chan, 0x00000000, SH4A_PCIEPARH0);
259 	pci_write_reg(chan, 0x00000000, SH4A_PCIEPARL0);
260 	pci_write_reg(chan, 0x80000100, SH4A_PCIEPTCTLR0);
261 
262 	pci_write_reg(chan, 0x03fc0000, SH4A_PCIEPAMR2);
263 	pci_write_reg(chan, 0x00000000, SH4A_PCIEPARH2);
264 	pci_write_reg(chan, 0x00000000, SH4A_PCIEPARL2);
265 	pci_write_reg(chan, 0x80000000, SH4A_PCIEPTCTLR2);
266 
267 	return 0;
268 }
269 
270 int __init pcibios_map_platform_irq(struct pci_dev *pdev, u8 slot, u8 pin)
271 {
272         return 71;
273 }
274 
275 static int sh7786_pcie_core_init(void)
276 {
277 	/* Return the number of ports */
278 	return test_mode_pin(MODE_PIN12) ? 3 : 2;
279 }
280 
281 static int __devinit sh7786_pcie_init_hw(struct sh7786_pcie_port *port)
282 {
283 	int ret;
284 
285 	ret = phy_init(port->hose);
286 	if (unlikely(ret < 0))
287 		return ret;
288 
289 	/*
290 	 * Check if we are configured in endpoint or root complex mode,
291 	 * this is a fixed pin setting that applies to all PCIe ports.
292 	 */
293 	port->endpoint = test_mode_pin(MODE_PIN11);
294 
295 	ret = pcie_init(port);
296 	if (unlikely(ret < 0))
297 		return ret;
298 
299 	register_pci_controller(port->hose);
300 
301 	return 0;
302 }
303 
304 static struct sh7786_pcie_hwops sh7786_65nm_pcie_hwops __initdata = {
305 	.core_init	= sh7786_pcie_core_init,
306 	.port_init_hw	= sh7786_pcie_init_hw,
307 };
308 
309 static int __init sh7786_pcie_init(void)
310 {
311 	int ret = 0, i;
312 
313 	printk(KERN_NOTICE "PCI: Starting intialization.\n");
314 
315 	sh7786_pcie_hwops = &sh7786_65nm_pcie_hwops;
316 
317 	nr_ports = sh7786_pcie_hwops->core_init();
318 	BUG_ON(nr_ports > ARRAY_SIZE(sh7786_pci_channels));
319 
320 	if (unlikely(nr_ports == 0))
321 		return -ENODEV;
322 
323 	sh7786_pcie_ports = kzalloc(nr_ports * sizeof(struct sh7786_pcie_port),
324 				    GFP_KERNEL);
325 	if (unlikely(!sh7786_pcie_ports))
326 		return -ENOMEM;
327 
328 	printk(KERN_NOTICE "PCI: probing %d ports.\n", nr_ports);
329 
330 	for (i = 0; i < nr_ports; i++) {
331 		struct sh7786_pcie_port *port = sh7786_pcie_ports + i;
332 
333 		port->index		= i;
334 		port->hose		= sh7786_pci_channels + i;
335 		port->hose->io_map_base	= port->hose->io_resource->start;
336 
337 		/*
338 		 * Check if we are booting in 29 or 32-bit mode
339 		 *
340 		 * 32-bit mode provides each controller with its own
341 		 * memory window, while 29-bit mode uses a shared one.
342 		 */
343 		port->hose->mem_resource = test_mode_pin(MODE_PIN10) ?
344 			&sh7786_pci_32bit_mem_resources[i] :
345 			&sh7786_pci_29bit_mem_resource;
346 
347 		ret |= sh7786_pcie_hwops->port_init_hw(port);
348 	}
349 
350 	if (unlikely(ret))
351 		return ret;
352 
353 	return 0;
354 }
355 arch_initcall(sh7786_pcie_init);
356