xref: /openbmc/linux/arch/mips/pci/pci-alchemy.c (revision 60772e48)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Alchemy PCI host mode support.
4  *
5  * Copyright 2001-2003, 2007-2008 MontaVista Software Inc.
6  * Author: MontaVista Software, Inc. <source@mvista.com>
7  *
8  * Support for all devices (greater than 16) added by David Gathright.
9  */
10 
11 #include <linux/clk.h>
12 #include <linux/export.h>
13 #include <linux/types.h>
14 #include <linux/pci.h>
15 #include <linux/platform_device.h>
16 #include <linux/kernel.h>
17 #include <linux/init.h>
18 #include <linux/syscore_ops.h>
19 #include <linux/vmalloc.h>
20 
21 #include <asm/dma-coherence.h>
22 #include <asm/mach-au1x00/au1000.h>
23 #include <asm/tlbmisc.h>
24 
25 #ifdef CONFIG_PCI_DEBUG
26 #define DBG(x...) printk(KERN_DEBUG x)
27 #else
28 #define DBG(x...) do {} while (0)
29 #endif
30 
31 #define PCI_ACCESS_READ		0
32 #define PCI_ACCESS_WRITE	1
33 
34 struct alchemy_pci_context {
35 	struct pci_controller alchemy_pci_ctrl; /* leave as first member! */
36 	void __iomem *regs;			/* ctrl base */
37 	/* tools for wired entry for config space access */
38 	unsigned long last_elo0;
39 	unsigned long last_elo1;
40 	int wired_entry;
41 	struct vm_struct *pci_cfg_vm;
42 
43 	unsigned long pm[12];
44 
45 	int (*board_map_irq)(const struct pci_dev *d, u8 slot, u8 pin);
46 	int (*board_pci_idsel)(unsigned int devsel, int assert);
47 };
48 
49 /* for syscore_ops. There's only one PCI controller on Alchemy chips, so this
50  * should suffice for now.
51  */
52 static struct alchemy_pci_context *__alchemy_pci_ctx;
53 
54 
55 /* IO/MEM resources for PCI. Keep the memres in sync with __fixup_bigphys_addr
56  * in arch/mips/alchemy/common/setup.c
57  */
58 static struct resource alchemy_pci_def_memres = {
59 	.start	= ALCHEMY_PCI_MEMWIN_START,
60 	.end	= ALCHEMY_PCI_MEMWIN_END,
61 	.name	= "PCI memory space",
62 	.flags	= IORESOURCE_MEM
63 };
64 
65 static struct resource alchemy_pci_def_iores = {
66 	.start	= ALCHEMY_PCI_IOWIN_START,
67 	.end	= ALCHEMY_PCI_IOWIN_END,
68 	.name	= "PCI IO space",
69 	.flags	= IORESOURCE_IO
70 };
71 
72 static void mod_wired_entry(int entry, unsigned long entrylo0,
73 		unsigned long entrylo1, unsigned long entryhi,
74 		unsigned long pagemask)
75 {
76 	unsigned long old_pagemask;
77 	unsigned long old_ctx;
78 
79 	/* Save old context and create impossible VPN2 value */
80 	old_ctx = read_c0_entryhi() & MIPS_ENTRYHI_ASID;
81 	old_pagemask = read_c0_pagemask();
82 	write_c0_index(entry);
83 	write_c0_pagemask(pagemask);
84 	write_c0_entryhi(entryhi);
85 	write_c0_entrylo0(entrylo0);
86 	write_c0_entrylo1(entrylo1);
87 	tlb_write_indexed();
88 	write_c0_entryhi(old_ctx);
89 	write_c0_pagemask(old_pagemask);
90 }
91 
92 static void alchemy_pci_wired_entry(struct alchemy_pci_context *ctx)
93 {
94 	ctx->wired_entry = read_c0_wired();
95 	add_wired_entry(0, 0, (unsigned long)ctx->pci_cfg_vm->addr, PM_4K);
96 	ctx->last_elo0 = ctx->last_elo1 = ~0;
97 }
98 
99 static int config_access(unsigned char access_type, struct pci_bus *bus,
100 			 unsigned int dev_fn, unsigned char where, u32 *data)
101 {
102 	struct alchemy_pci_context *ctx = bus->sysdata;
103 	unsigned int device = PCI_SLOT(dev_fn);
104 	unsigned int function = PCI_FUNC(dev_fn);
105 	unsigned long offset, status, cfg_base, flags, entryLo0, entryLo1, r;
106 	int error = PCIBIOS_SUCCESSFUL;
107 
108 	if (device > 19) {
109 		*data = 0xffffffff;
110 		return -1;
111 	}
112 
113 	local_irq_save(flags);
114 	r = __raw_readl(ctx->regs + PCI_REG_STATCMD) & 0x0000ffff;
115 	r |= PCI_STATCMD_STATUS(0x2000);
116 	__raw_writel(r, ctx->regs + PCI_REG_STATCMD);
117 	wmb();
118 
119 	/* Allow board vendors to implement their own off-chip IDSEL.
120 	 * If it doesn't succeed, may as well bail out at this point.
121 	 */
122 	if (ctx->board_pci_idsel(device, 1) == 0) {
123 		*data = 0xffffffff;
124 		local_irq_restore(flags);
125 		return -1;
126 	}
127 
128 	/* Setup the config window */
129 	if (bus->number == 0)
130 		cfg_base = (1 << device) << 11;
131 	else
132 		cfg_base = 0x80000000 | (bus->number << 16) | (device << 11);
133 
134 	/* Setup the lower bits of the 36-bit address */
135 	offset = (function << 8) | (where & ~0x3);
136 	/* Pick up any address that falls below the page mask */
137 	offset |= cfg_base & ~PAGE_MASK;
138 
139 	/* Page boundary */
140 	cfg_base = cfg_base & PAGE_MASK;
141 
142 	/* To improve performance, if the current device is the same as
143 	 * the last device accessed, we don't touch the TLB.
144 	 */
145 	entryLo0 = (6 << 26) | (cfg_base >> 6) | (2 << 3) | 7;
146 	entryLo1 = (6 << 26) | (cfg_base >> 6) | (0x1000 >> 6) | (2 << 3) | 7;
147 	if ((entryLo0 != ctx->last_elo0) || (entryLo1 != ctx->last_elo1)) {
148 		mod_wired_entry(ctx->wired_entry, entryLo0, entryLo1,
149 				(unsigned long)ctx->pci_cfg_vm->addr, PM_4K);
150 		ctx->last_elo0 = entryLo0;
151 		ctx->last_elo1 = entryLo1;
152 	}
153 
154 	if (access_type == PCI_ACCESS_WRITE)
155 		__raw_writel(*data, ctx->pci_cfg_vm->addr + offset);
156 	else
157 		*data = __raw_readl(ctx->pci_cfg_vm->addr + offset);
158 	wmb();
159 
160 	DBG("alchemy-pci: cfg access %d bus %u dev %u at %x dat %x conf %lx\n",
161 	    access_type, bus->number, device, where, *data, offset);
162 
163 	/* check for errors, master abort */
164 	status = __raw_readl(ctx->regs + PCI_REG_STATCMD);
165 	if (status & (1 << 29)) {
166 		*data = 0xffffffff;
167 		error = -1;
168 		DBG("alchemy-pci: master abort on cfg access %d bus %d dev %d\n",
169 		    access_type, bus->number, device);
170 	} else if ((status >> 28) & 0xf) {
171 		DBG("alchemy-pci: PCI ERR detected: dev %d, status %lx\n",
172 		    device, (status >> 28) & 0xf);
173 
174 		/* clear errors */
175 		__raw_writel(status & 0xf000ffff, ctx->regs + PCI_REG_STATCMD);
176 
177 		*data = 0xffffffff;
178 		error = -1;
179 	}
180 
181 	/* Take away the IDSEL. */
182 	(void)ctx->board_pci_idsel(device, 0);
183 
184 	local_irq_restore(flags);
185 	return error;
186 }
187 
188 static int read_config_byte(struct pci_bus *bus, unsigned int devfn,
189 			    int where,	u8 *val)
190 {
191 	u32 data;
192 	int ret = config_access(PCI_ACCESS_READ, bus, devfn, where, &data);
193 
194 	if (where & 1)
195 		data >>= 8;
196 	if (where & 2)
197 		data >>= 16;
198 	*val = data & 0xff;
199 	return ret;
200 }
201 
202 static int read_config_word(struct pci_bus *bus, unsigned int devfn,
203 			    int where, u16 *val)
204 {
205 	u32 data;
206 	int ret = config_access(PCI_ACCESS_READ, bus, devfn, where, &data);
207 
208 	if (where & 2)
209 		data >>= 16;
210 	*val = data & 0xffff;
211 	return ret;
212 }
213 
214 static int read_config_dword(struct pci_bus *bus, unsigned int devfn,
215 			     int where, u32 *val)
216 {
217 	return config_access(PCI_ACCESS_READ, bus, devfn, where, val);
218 }
219 
220 static int write_config_byte(struct pci_bus *bus, unsigned int devfn,
221 			     int where, u8 val)
222 {
223 	u32 data = 0;
224 
225 	if (config_access(PCI_ACCESS_READ, bus, devfn, where, &data))
226 		return -1;
227 
228 	data = (data & ~(0xff << ((where & 3) << 3))) |
229 	       (val << ((where & 3) << 3));
230 
231 	if (config_access(PCI_ACCESS_WRITE, bus, devfn, where, &data))
232 		return -1;
233 
234 	return PCIBIOS_SUCCESSFUL;
235 }
236 
237 static int write_config_word(struct pci_bus *bus, unsigned int devfn,
238 			     int where, u16 val)
239 {
240 	u32 data = 0;
241 
242 	if (config_access(PCI_ACCESS_READ, bus, devfn, where, &data))
243 		return -1;
244 
245 	data = (data & ~(0xffff << ((where & 3) << 3))) |
246 	       (val << ((where & 3) << 3));
247 
248 	if (config_access(PCI_ACCESS_WRITE, bus, devfn, where, &data))
249 		return -1;
250 
251 	return PCIBIOS_SUCCESSFUL;
252 }
253 
254 static int write_config_dword(struct pci_bus *bus, unsigned int devfn,
255 			      int where, u32 val)
256 {
257 	return config_access(PCI_ACCESS_WRITE, bus, devfn, where, &val);
258 }
259 
260 static int alchemy_pci_read(struct pci_bus *bus, unsigned int devfn,
261 		       int where, int size, u32 *val)
262 {
263 	switch (size) {
264 	case 1: {
265 			u8 _val;
266 			int rc = read_config_byte(bus, devfn, where, &_val);
267 
268 			*val = _val;
269 			return rc;
270 		}
271 	case 2: {
272 			u16 _val;
273 			int rc = read_config_word(bus, devfn, where, &_val);
274 
275 			*val = _val;
276 			return rc;
277 		}
278 	default:
279 		return read_config_dword(bus, devfn, where, val);
280 	}
281 }
282 
283 static int alchemy_pci_write(struct pci_bus *bus, unsigned int devfn,
284 			     int where, int size, u32 val)
285 {
286 	switch (size) {
287 	case 1:
288 		return write_config_byte(bus, devfn, where, (u8) val);
289 	case 2:
290 		return write_config_word(bus, devfn, where, (u16) val);
291 	default:
292 		return write_config_dword(bus, devfn, where, val);
293 	}
294 }
295 
296 static struct pci_ops alchemy_pci_ops = {
297 	.read	= alchemy_pci_read,
298 	.write	= alchemy_pci_write,
299 };
300 
301 static int alchemy_pci_def_idsel(unsigned int devsel, int assert)
302 {
303 	return 1;	/* success */
304 }
305 
306 /* save PCI controller register contents. */
307 static int alchemy_pci_suspend(void)
308 {
309 	struct alchemy_pci_context *ctx = __alchemy_pci_ctx;
310 	if (!ctx)
311 		return 0;
312 
313 	ctx->pm[0]  = __raw_readl(ctx->regs + PCI_REG_CMEM);
314 	ctx->pm[1]  = __raw_readl(ctx->regs + PCI_REG_CONFIG) & 0x0009ffff;
315 	ctx->pm[2]  = __raw_readl(ctx->regs + PCI_REG_B2BMASK_CCH);
316 	ctx->pm[3]  = __raw_readl(ctx->regs + PCI_REG_B2BBASE0_VID);
317 	ctx->pm[4]  = __raw_readl(ctx->regs + PCI_REG_B2BBASE1_SID);
318 	ctx->pm[5]  = __raw_readl(ctx->regs + PCI_REG_MWMASK_DEV);
319 	ctx->pm[6]  = __raw_readl(ctx->regs + PCI_REG_MWBASE_REV_CCL);
320 	ctx->pm[7]  = __raw_readl(ctx->regs + PCI_REG_ID);
321 	ctx->pm[8]  = __raw_readl(ctx->regs + PCI_REG_CLASSREV);
322 	ctx->pm[9]  = __raw_readl(ctx->regs + PCI_REG_PARAM);
323 	ctx->pm[10] = __raw_readl(ctx->regs + PCI_REG_MBAR);
324 	ctx->pm[11] = __raw_readl(ctx->regs + PCI_REG_TIMEOUT);
325 
326 	return 0;
327 }
328 
329 static void alchemy_pci_resume(void)
330 {
331 	struct alchemy_pci_context *ctx = __alchemy_pci_ctx;
332 	if (!ctx)
333 		return;
334 
335 	__raw_writel(ctx->pm[0],  ctx->regs + PCI_REG_CMEM);
336 	__raw_writel(ctx->pm[2],  ctx->regs + PCI_REG_B2BMASK_CCH);
337 	__raw_writel(ctx->pm[3],  ctx->regs + PCI_REG_B2BBASE0_VID);
338 	__raw_writel(ctx->pm[4],  ctx->regs + PCI_REG_B2BBASE1_SID);
339 	__raw_writel(ctx->pm[5],  ctx->regs + PCI_REG_MWMASK_DEV);
340 	__raw_writel(ctx->pm[6],  ctx->regs + PCI_REG_MWBASE_REV_CCL);
341 	__raw_writel(ctx->pm[7],  ctx->regs + PCI_REG_ID);
342 	__raw_writel(ctx->pm[8],  ctx->regs + PCI_REG_CLASSREV);
343 	__raw_writel(ctx->pm[9],  ctx->regs + PCI_REG_PARAM);
344 	__raw_writel(ctx->pm[10], ctx->regs + PCI_REG_MBAR);
345 	__raw_writel(ctx->pm[11], ctx->regs + PCI_REG_TIMEOUT);
346 	wmb();
347 	__raw_writel(ctx->pm[1],  ctx->regs + PCI_REG_CONFIG);
348 	wmb();
349 
350 	/* YAMON on all db1xxx boards wipes the TLB and writes zero to C0_wired
351 	 * on resume, making it necessary to recreate it as soon as possible.
352 	 */
353 	ctx->wired_entry = 8191;	/* impossibly high value */
354 	alchemy_pci_wired_entry(ctx);	/* install it */
355 }
356 
357 static struct syscore_ops alchemy_pci_pmops = {
358 	.suspend	= alchemy_pci_suspend,
359 	.resume		= alchemy_pci_resume,
360 };
361 
362 static int alchemy_pci_probe(struct platform_device *pdev)
363 {
364 	struct alchemy_pci_platdata *pd = pdev->dev.platform_data;
365 	struct alchemy_pci_context *ctx;
366 	void __iomem *virt_io;
367 	unsigned long val;
368 	struct resource *r;
369 	struct clk *c;
370 	int ret;
371 
372 	/* need at least PCI IRQ mapping table */
373 	if (!pd) {
374 		dev_err(&pdev->dev, "need platform data for PCI setup\n");
375 		ret = -ENODEV;
376 		goto out;
377 	}
378 
379 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
380 	if (!ctx) {
381 		dev_err(&pdev->dev, "no memory for pcictl context\n");
382 		ret = -ENOMEM;
383 		goto out;
384 	}
385 
386 	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
387 	if (!r) {
388 		dev_err(&pdev->dev, "no	 pcictl ctrl regs resource\n");
389 		ret = -ENODEV;
390 		goto out1;
391 	}
392 
393 	if (!request_mem_region(r->start, resource_size(r), pdev->name)) {
394 		dev_err(&pdev->dev, "cannot claim pci regs\n");
395 		ret = -ENODEV;
396 		goto out1;
397 	}
398 
399 	c = clk_get(&pdev->dev, "pci_clko");
400 	if (IS_ERR(c)) {
401 		dev_err(&pdev->dev, "unable to find PCI clock\n");
402 		ret = PTR_ERR(c);
403 		goto out2;
404 	}
405 
406 	ret = clk_prepare_enable(c);
407 	if (ret) {
408 		dev_err(&pdev->dev, "cannot enable PCI clock\n");
409 		goto out6;
410 	}
411 
412 	ctx->regs = ioremap_nocache(r->start, resource_size(r));
413 	if (!ctx->regs) {
414 		dev_err(&pdev->dev, "cannot map pci regs\n");
415 		ret = -ENODEV;
416 		goto out5;
417 	}
418 
419 	/* map parts of the PCI IO area */
420 	/* REVISIT: if this changes with a newer variant (doubt it) make this
421 	 * a platform resource.
422 	 */
423 	virt_io = ioremap(AU1500_PCI_IO_PHYS_ADDR, 0x00100000);
424 	if (!virt_io) {
425 		dev_err(&pdev->dev, "cannot remap pci io space\n");
426 		ret = -ENODEV;
427 		goto out3;
428 	}
429 	ctx->alchemy_pci_ctrl.io_map_base = (unsigned long)virt_io;
430 
431 	/* Au1500 revisions older than AD have borked coherent PCI */
432 	if ((alchemy_get_cputype() == ALCHEMY_CPU_AU1500) &&
433 	    (read_c0_prid() < 0x01030202) &&
434 	    (coherentio == IO_COHERENCE_DISABLED)) {
435 		val = __raw_readl(ctx->regs + PCI_REG_CONFIG);
436 		val |= PCI_CONFIG_NC;
437 		__raw_writel(val, ctx->regs + PCI_REG_CONFIG);
438 		wmb();
439 		dev_info(&pdev->dev, "non-coherent PCI on Au1500 AA/AB/AC\n");
440 	}
441 
442 	if (pd->board_map_irq)
443 		ctx->board_map_irq = pd->board_map_irq;
444 
445 	if (pd->board_pci_idsel)
446 		ctx->board_pci_idsel = pd->board_pci_idsel;
447 	else
448 		ctx->board_pci_idsel = alchemy_pci_def_idsel;
449 
450 	/* fill in relevant pci_controller members */
451 	ctx->alchemy_pci_ctrl.pci_ops = &alchemy_pci_ops;
452 	ctx->alchemy_pci_ctrl.mem_resource = &alchemy_pci_def_memres;
453 	ctx->alchemy_pci_ctrl.io_resource = &alchemy_pci_def_iores;
454 
455 	/* we can't ioremap the entire pci config space because it's too large,
456 	 * nor can we dynamically ioremap it because some drivers use the
457 	 * PCI config routines from within atomic contex and that becomes a
458 	 * problem in get_vm_area().  Instead we use one wired TLB entry to
459 	 * handle all config accesses for all busses.
460 	 */
461 	ctx->pci_cfg_vm = get_vm_area(0x2000, VM_IOREMAP);
462 	if (!ctx->pci_cfg_vm) {
463 		dev_err(&pdev->dev, "unable to get vm area\n");
464 		ret = -ENOMEM;
465 		goto out4;
466 	}
467 	ctx->wired_entry = 8191;	/* impossibly high value */
468 	alchemy_pci_wired_entry(ctx);	/* install it */
469 
470 	set_io_port_base((unsigned long)ctx->alchemy_pci_ctrl.io_map_base);
471 
472 	/* board may want to modify bits in the config register, do it now */
473 	val = __raw_readl(ctx->regs + PCI_REG_CONFIG);
474 	val &= ~pd->pci_cfg_clr;
475 	val |= pd->pci_cfg_set;
476 	val &= ~PCI_CONFIG_PD;		/* clear disable bit */
477 	__raw_writel(val, ctx->regs + PCI_REG_CONFIG);
478 	wmb();
479 
480 	__alchemy_pci_ctx = ctx;
481 	platform_set_drvdata(pdev, ctx);
482 	register_syscore_ops(&alchemy_pci_pmops);
483 	register_pci_controller(&ctx->alchemy_pci_ctrl);
484 
485 	dev_info(&pdev->dev, "PCI controller at %ld MHz\n",
486 		 clk_get_rate(c) / 1000000);
487 
488 	return 0;
489 
490 out4:
491 	iounmap(virt_io);
492 out3:
493 	iounmap(ctx->regs);
494 out5:
495 	clk_disable_unprepare(c);
496 out6:
497 	clk_put(c);
498 out2:
499 	release_mem_region(r->start, resource_size(r));
500 out1:
501 	kfree(ctx);
502 out:
503 	return ret;
504 }
505 
506 static struct platform_driver alchemy_pcictl_driver = {
507 	.probe		= alchemy_pci_probe,
508 	.driver = {
509 		.name	= "alchemy-pci",
510 	},
511 };
512 
513 static int __init alchemy_pci_init(void)
514 {
515 	/* Au1500/Au1550 have PCI */
516 	switch (alchemy_get_cputype()) {
517 	case ALCHEMY_CPU_AU1500:
518 	case ALCHEMY_CPU_AU1550:
519 		return platform_driver_register(&alchemy_pcictl_driver);
520 	}
521 	return 0;
522 }
523 arch_initcall(alchemy_pci_init);
524 
525 
526 int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
527 {
528 	struct alchemy_pci_context *ctx = dev->sysdata;
529 	if (ctx && ctx->board_map_irq)
530 		return ctx->board_map_irq(dev, slot, pin);
531 	return -1;
532 }
533 
534 int pcibios_plat_dev_init(struct pci_dev *dev)
535 {
536 	return 0;
537 }
538