xref: /openbmc/linux/drivers/scsi/sun_esp.c (revision 2c6467d2)
1 /* sun_esp.c: ESP front-end for Sparc SBUS systems.
2  *
3  * Copyright (C) 2007, 2008 David S. Miller (davem@davemloft.net)
4  */
5 
6 #include <linux/kernel.h>
7 #include <linux/types.h>
8 #include <linux/delay.h>
9 #include <linux/module.h>
10 #include <linux/mm.h>
11 #include <linux/init.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/of.h>
14 #include <linux/of_device.h>
15 #include <linux/gfp.h>
16 
17 #include <asm/irq.h>
18 #include <asm/io.h>
19 #include <asm/dma.h>
20 
21 #include <scsi/scsi_host.h>
22 
23 #include "esp_scsi.h"
24 
25 #define DRV_MODULE_NAME		"sun_esp"
26 #define PFX DRV_MODULE_NAME	": "
27 #define DRV_VERSION		"1.100"
28 #define DRV_MODULE_RELDATE	"August 27, 2008"
29 
30 #define dma_read32(REG) \
31 	sbus_readl(esp->dma_regs + (REG))
32 #define dma_write32(VAL, REG) \
33 	sbus_writel((VAL), esp->dma_regs + (REG))
34 
35 /* DVMA chip revisions */
36 enum dvma_rev {
37 	dvmarev0,
38 	dvmaesc1,
39 	dvmarev1,
40 	dvmarev2,
41 	dvmarev3,
42 	dvmarevplus,
43 	dvmahme
44 };
45 
46 static int esp_sbus_setup_dma(struct esp *esp, struct platform_device *dma_of)
47 {
48 	esp->dma = dma_of;
49 
50 	esp->dma_regs = of_ioremap(&dma_of->resource[0], 0,
51 				   resource_size(&dma_of->resource[0]),
52 				   "espdma");
53 	if (!esp->dma_regs)
54 		return -ENOMEM;
55 
56 	switch (dma_read32(DMA_CSR) & DMA_DEVICE_ID) {
57 	case DMA_VERS0:
58 		esp->dmarev = dvmarev0;
59 		break;
60 	case DMA_ESCV1:
61 		esp->dmarev = dvmaesc1;
62 		break;
63 	case DMA_VERS1:
64 		esp->dmarev = dvmarev1;
65 		break;
66 	case DMA_VERS2:
67 		esp->dmarev = dvmarev2;
68 		break;
69 	case DMA_VERHME:
70 		esp->dmarev = dvmahme;
71 		break;
72 	case DMA_VERSPLUS:
73 		esp->dmarev = dvmarevplus;
74 		break;
75 	}
76 
77 	return 0;
78 
79 }
80 
81 static int esp_sbus_map_regs(struct esp *esp, int hme)
82 {
83 	struct platform_device *op = to_platform_device(esp->dev);
84 	struct resource *res;
85 
86 	/* On HME, two reg sets exist, first is DVMA,
87 	 * second is ESP registers.
88 	 */
89 	if (hme)
90 		res = &op->resource[1];
91 	else
92 		res = &op->resource[0];
93 
94 	esp->regs = of_ioremap(res, 0, SBUS_ESP_REG_SIZE, "ESP");
95 	if (!esp->regs)
96 		return -ENOMEM;
97 
98 	return 0;
99 }
100 
101 static int esp_sbus_map_command_block(struct esp *esp)
102 {
103 	esp->command_block = dma_alloc_coherent(esp->dev, 16,
104 						&esp->command_block_dma,
105 						GFP_KERNEL);
106 	if (!esp->command_block)
107 		return -ENOMEM;
108 	return 0;
109 }
110 
111 static int esp_sbus_register_irq(struct esp *esp)
112 {
113 	struct Scsi_Host *host = esp->host;
114 	struct platform_device *op = to_platform_device(esp->dev);
115 
116 	host->irq = op->archdata.irqs[0];
117 	return request_irq(host->irq, scsi_esp_intr, IRQF_SHARED, "ESP", esp);
118 }
119 
120 static void esp_get_scsi_id(struct esp *esp, struct platform_device *espdma)
121 {
122 	struct platform_device *op = to_platform_device(esp->dev);
123 	struct device_node *dp;
124 
125 	dp = op->dev.of_node;
126 	esp->scsi_id = of_getintprop_default(dp, "initiator-id", 0xff);
127 	if (esp->scsi_id != 0xff)
128 		goto done;
129 
130 	esp->scsi_id = of_getintprop_default(dp, "scsi-initiator-id", 0xff);
131 	if (esp->scsi_id != 0xff)
132 		goto done;
133 
134 	esp->scsi_id = of_getintprop_default(espdma->dev.of_node,
135 					     "scsi-initiator-id", 7);
136 
137 done:
138 	esp->host->this_id = esp->scsi_id;
139 	esp->scsi_id_mask = (1 << esp->scsi_id);
140 }
141 
142 static void esp_get_differential(struct esp *esp)
143 {
144 	struct platform_device *op = to_platform_device(esp->dev);
145 	struct device_node *dp;
146 
147 	dp = op->dev.of_node;
148 	if (of_find_property(dp, "differential", NULL))
149 		esp->flags |= ESP_FLAG_DIFFERENTIAL;
150 	else
151 		esp->flags &= ~ESP_FLAG_DIFFERENTIAL;
152 }
153 
154 static void esp_get_clock_params(struct esp *esp)
155 {
156 	struct platform_device *op = to_platform_device(esp->dev);
157 	struct device_node *bus_dp, *dp;
158 	int fmhz;
159 
160 	dp = op->dev.of_node;
161 	bus_dp = dp->parent;
162 
163 	fmhz = of_getintprop_default(dp, "clock-frequency", 0);
164 	if (fmhz == 0)
165 		fmhz = of_getintprop_default(bus_dp, "clock-frequency", 0);
166 
167 	esp->cfreq = fmhz;
168 }
169 
170 static void esp_get_bursts(struct esp *esp, struct platform_device *dma_of)
171 {
172 	struct device_node *dma_dp = dma_of->dev.of_node;
173 	struct platform_device *op = to_platform_device(esp->dev);
174 	struct device_node *dp;
175 	u8 bursts, val;
176 
177 	dp = op->dev.of_node;
178 	bursts = of_getintprop_default(dp, "burst-sizes", 0xff);
179 	val = of_getintprop_default(dma_dp, "burst-sizes", 0xff);
180 	if (val != 0xff)
181 		bursts &= val;
182 
183 	val = of_getintprop_default(dma_dp->parent, "burst-sizes", 0xff);
184 	if (val != 0xff)
185 		bursts &= val;
186 
187 	if (bursts == 0xff ||
188 	    (bursts & DMA_BURST16) == 0 ||
189 	    (bursts & DMA_BURST32) == 0)
190 		bursts = (DMA_BURST32 - 1);
191 
192 	esp->bursts = bursts;
193 }
194 
195 static void esp_sbus_get_props(struct esp *esp, struct platform_device *espdma)
196 {
197 	esp_get_scsi_id(esp, espdma);
198 	esp_get_differential(esp);
199 	esp_get_clock_params(esp);
200 	esp_get_bursts(esp, espdma);
201 }
202 
203 static void sbus_esp_write8(struct esp *esp, u8 val, unsigned long reg)
204 {
205 	sbus_writeb(val, esp->regs + (reg * 4UL));
206 }
207 
208 static u8 sbus_esp_read8(struct esp *esp, unsigned long reg)
209 {
210 	return sbus_readb(esp->regs + (reg * 4UL));
211 }
212 
213 static int sbus_esp_irq_pending(struct esp *esp)
214 {
215 	if (dma_read32(DMA_CSR) & (DMA_HNDL_INTR | DMA_HNDL_ERROR))
216 		return 1;
217 	return 0;
218 }
219 
220 static void sbus_esp_reset_dma(struct esp *esp)
221 {
222 	int can_do_burst16, can_do_burst32, can_do_burst64;
223 	int can_do_sbus64, lim;
224 	struct platform_device *op = to_platform_device(esp->dev);
225 	u32 val;
226 
227 	can_do_burst16 = (esp->bursts & DMA_BURST16) != 0;
228 	can_do_burst32 = (esp->bursts & DMA_BURST32) != 0;
229 	can_do_burst64 = 0;
230 	can_do_sbus64 = 0;
231 	if (sbus_can_dma_64bit())
232 		can_do_sbus64 = 1;
233 	if (sbus_can_burst64())
234 		can_do_burst64 = (esp->bursts & DMA_BURST64) != 0;
235 
236 	/* Put the DVMA into a known state. */
237 	if (esp->dmarev != dvmahme) {
238 		val = dma_read32(DMA_CSR);
239 		dma_write32(val | DMA_RST_SCSI, DMA_CSR);
240 		dma_write32(val & ~DMA_RST_SCSI, DMA_CSR);
241 	}
242 	switch (esp->dmarev) {
243 	case dvmahme:
244 		dma_write32(DMA_RESET_FAS366, DMA_CSR);
245 		dma_write32(DMA_RST_SCSI, DMA_CSR);
246 
247 		esp->prev_hme_dmacsr = (DMA_PARITY_OFF | DMA_2CLKS |
248 					DMA_SCSI_DISAB | DMA_INT_ENAB);
249 
250 		esp->prev_hme_dmacsr &= ~(DMA_ENABLE | DMA_ST_WRITE |
251 					  DMA_BRST_SZ);
252 
253 		if (can_do_burst64)
254 			esp->prev_hme_dmacsr |= DMA_BRST64;
255 		else if (can_do_burst32)
256 			esp->prev_hme_dmacsr |= DMA_BRST32;
257 
258 		if (can_do_sbus64) {
259 			esp->prev_hme_dmacsr |= DMA_SCSI_SBUS64;
260 			sbus_set_sbus64(&op->dev, esp->bursts);
261 		}
262 
263 		lim = 1000;
264 		while (dma_read32(DMA_CSR) & DMA_PEND_READ) {
265 			if (--lim == 0) {
266 				printk(KERN_ALERT PFX "esp%d: DMA_PEND_READ "
267 				       "will not clear!\n",
268 				       esp->host->unique_id);
269 				break;
270 			}
271 			udelay(1);
272 		}
273 
274 		dma_write32(0, DMA_CSR);
275 		dma_write32(esp->prev_hme_dmacsr, DMA_CSR);
276 
277 		dma_write32(0, DMA_ADDR);
278 		break;
279 
280 	case dvmarev2:
281 		if (esp->rev != ESP100) {
282 			val = dma_read32(DMA_CSR);
283 			dma_write32(val | DMA_3CLKS, DMA_CSR);
284 		}
285 		break;
286 
287 	case dvmarev3:
288 		val = dma_read32(DMA_CSR);
289 		val &= ~DMA_3CLKS;
290 		val |= DMA_2CLKS;
291 		if (can_do_burst32) {
292 			val &= ~DMA_BRST_SZ;
293 			val |= DMA_BRST32;
294 		}
295 		dma_write32(val, DMA_CSR);
296 		break;
297 
298 	case dvmaesc1:
299 		val = dma_read32(DMA_CSR);
300 		val |= DMA_ADD_ENABLE;
301 		val &= ~DMA_BCNT_ENAB;
302 		if (!can_do_burst32 && can_do_burst16) {
303 			val |= DMA_ESC_BURST;
304 		} else {
305 			val &= ~(DMA_ESC_BURST);
306 		}
307 		dma_write32(val, DMA_CSR);
308 		break;
309 
310 	default:
311 		break;
312 	}
313 
314 	/* Enable interrupts.  */
315 	val = dma_read32(DMA_CSR);
316 	dma_write32(val | DMA_INT_ENAB, DMA_CSR);
317 }
318 
319 static void sbus_esp_dma_drain(struct esp *esp)
320 {
321 	u32 csr;
322 	int lim;
323 
324 	if (esp->dmarev == dvmahme)
325 		return;
326 
327 	csr = dma_read32(DMA_CSR);
328 	if (!(csr & DMA_FIFO_ISDRAIN))
329 		return;
330 
331 	if (esp->dmarev != dvmarev3 && esp->dmarev != dvmaesc1)
332 		dma_write32(csr | DMA_FIFO_STDRAIN, DMA_CSR);
333 
334 	lim = 1000;
335 	while (dma_read32(DMA_CSR) & DMA_FIFO_ISDRAIN) {
336 		if (--lim == 0) {
337 			printk(KERN_ALERT PFX "esp%d: DMA will not drain!\n",
338 			       esp->host->unique_id);
339 			break;
340 		}
341 		udelay(1);
342 	}
343 }
344 
345 static void sbus_esp_dma_invalidate(struct esp *esp)
346 {
347 	if (esp->dmarev == dvmahme) {
348 		dma_write32(DMA_RST_SCSI, DMA_CSR);
349 
350 		esp->prev_hme_dmacsr = ((esp->prev_hme_dmacsr |
351 					 (DMA_PARITY_OFF | DMA_2CLKS |
352 					  DMA_SCSI_DISAB | DMA_INT_ENAB)) &
353 					~(DMA_ST_WRITE | DMA_ENABLE));
354 
355 		dma_write32(0, DMA_CSR);
356 		dma_write32(esp->prev_hme_dmacsr, DMA_CSR);
357 
358 		/* This is necessary to avoid having the SCSI channel
359 		 * engine lock up on us.
360 		 */
361 		dma_write32(0, DMA_ADDR);
362 	} else {
363 		u32 val;
364 		int lim;
365 
366 		lim = 1000;
367 		while ((val = dma_read32(DMA_CSR)) & DMA_PEND_READ) {
368 			if (--lim == 0) {
369 				printk(KERN_ALERT PFX "esp%d: DMA will not "
370 				       "invalidate!\n", esp->host->unique_id);
371 				break;
372 			}
373 			udelay(1);
374 		}
375 
376 		val &= ~(DMA_ENABLE | DMA_ST_WRITE | DMA_BCNT_ENAB);
377 		val |= DMA_FIFO_INV;
378 		dma_write32(val, DMA_CSR);
379 		val &= ~DMA_FIFO_INV;
380 		dma_write32(val, DMA_CSR);
381 	}
382 }
383 
384 static void sbus_esp_send_dma_cmd(struct esp *esp, u32 addr, u32 esp_count,
385 				  u32 dma_count, int write, u8 cmd)
386 {
387 	u32 csr;
388 
389 	BUG_ON(!(cmd & ESP_CMD_DMA));
390 
391 	sbus_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
392 	sbus_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
393 	if (esp->rev == FASHME) {
394 		sbus_esp_write8(esp, (esp_count >> 16) & 0xff, FAS_RLO);
395 		sbus_esp_write8(esp, 0, FAS_RHI);
396 
397 		scsi_esp_cmd(esp, cmd);
398 
399 		csr = esp->prev_hme_dmacsr;
400 		csr |= DMA_SCSI_DISAB | DMA_ENABLE;
401 		if (write)
402 			csr |= DMA_ST_WRITE;
403 		else
404 			csr &= ~DMA_ST_WRITE;
405 		esp->prev_hme_dmacsr = csr;
406 
407 		dma_write32(dma_count, DMA_COUNT);
408 		dma_write32(addr, DMA_ADDR);
409 		dma_write32(csr, DMA_CSR);
410 	} else {
411 		csr = dma_read32(DMA_CSR);
412 		csr |= DMA_ENABLE;
413 		if (write)
414 			csr |= DMA_ST_WRITE;
415 		else
416 			csr &= ~DMA_ST_WRITE;
417 		dma_write32(csr, DMA_CSR);
418 		if (esp->dmarev == dvmaesc1) {
419 			u32 end = PAGE_ALIGN(addr + dma_count + 16U);
420 			dma_write32(end - addr, DMA_COUNT);
421 		}
422 		dma_write32(addr, DMA_ADDR);
423 
424 		scsi_esp_cmd(esp, cmd);
425 	}
426 
427 }
428 
429 static int sbus_esp_dma_error(struct esp *esp)
430 {
431 	u32 csr = dma_read32(DMA_CSR);
432 
433 	if (csr & DMA_HNDL_ERROR)
434 		return 1;
435 
436 	return 0;
437 }
438 
439 static const struct esp_driver_ops sbus_esp_ops = {
440 	.esp_write8	=	sbus_esp_write8,
441 	.esp_read8	=	sbus_esp_read8,
442 	.irq_pending	=	sbus_esp_irq_pending,
443 	.reset_dma	=	sbus_esp_reset_dma,
444 	.dma_drain	=	sbus_esp_dma_drain,
445 	.dma_invalidate	=	sbus_esp_dma_invalidate,
446 	.send_dma_cmd	=	sbus_esp_send_dma_cmd,
447 	.dma_error	=	sbus_esp_dma_error,
448 };
449 
450 static int esp_sbus_probe_one(struct platform_device *op,
451 			      struct platform_device *espdma, int hme)
452 {
453 	struct scsi_host_template *tpnt = &scsi_esp_template;
454 	struct Scsi_Host *host;
455 	struct esp *esp;
456 	int err;
457 
458 	host = scsi_host_alloc(tpnt, sizeof(struct esp));
459 
460 	err = -ENOMEM;
461 	if (!host)
462 		goto fail;
463 
464 	host->max_id = (hme ? 16 : 8);
465 	esp = shost_priv(host);
466 
467 	esp->host = host;
468 	esp->dev = &op->dev;
469 	esp->ops = &sbus_esp_ops;
470 
471 	if (hme)
472 		esp->flags |= ESP_FLAG_WIDE_CAPABLE;
473 
474 	err = esp_sbus_setup_dma(esp, espdma);
475 	if (err < 0)
476 		goto fail_unlink;
477 
478 	err = esp_sbus_map_regs(esp, hme);
479 	if (err < 0)
480 		goto fail_unlink;
481 
482 	err = esp_sbus_map_command_block(esp);
483 	if (err < 0)
484 		goto fail_unmap_regs;
485 
486 	err = esp_sbus_register_irq(esp);
487 	if (err < 0)
488 		goto fail_unmap_command_block;
489 
490 	esp_sbus_get_props(esp, espdma);
491 
492 	/* Before we try to touch the ESP chip, ESC1 dma can
493 	 * come up with the reset bit set, so make sure that
494 	 * is clear first.
495 	 */
496 	if (esp->dmarev == dvmaesc1) {
497 		u32 val = dma_read32(DMA_CSR);
498 
499 		dma_write32(val & ~DMA_RST_SCSI, DMA_CSR);
500 	}
501 
502 	dev_set_drvdata(&op->dev, esp);
503 
504 	err = scsi_esp_register(esp);
505 	if (err)
506 		goto fail_free_irq;
507 
508 	return 0;
509 
510 fail_free_irq:
511 	free_irq(host->irq, esp);
512 fail_unmap_command_block:
513 	dma_free_coherent(&op->dev, 16,
514 			  esp->command_block,
515 			  esp->command_block_dma);
516 fail_unmap_regs:
517 	of_iounmap(&op->resource[(hme ? 1 : 0)], esp->regs, SBUS_ESP_REG_SIZE);
518 fail_unlink:
519 	scsi_host_put(host);
520 fail:
521 	return err;
522 }
523 
524 static int esp_sbus_probe(struct platform_device *op)
525 {
526 	struct device_node *dma_node = NULL;
527 	struct device_node *dp = op->dev.of_node;
528 	struct platform_device *dma_of = NULL;
529 	int hme = 0;
530 	int ret;
531 
532 	if (of_node_name_eq(dp->parent, "espdma") ||
533 	    of_node_name_eq(dp->parent, "dma"))
534 		dma_node = dp->parent;
535 	else if (of_node_name_eq(dp, "SUNW,fas")) {
536 		dma_node = op->dev.of_node;
537 		hme = 1;
538 	}
539 	if (dma_node)
540 		dma_of = of_find_device_by_node(dma_node);
541 	if (!dma_of)
542 		return -ENODEV;
543 
544 	ret = esp_sbus_probe_one(op, dma_of, hme);
545 	if (ret)
546 		put_device(&dma_of->dev);
547 
548 	return ret;
549 }
550 
551 static int esp_sbus_remove(struct platform_device *op)
552 {
553 	struct esp *esp = dev_get_drvdata(&op->dev);
554 	struct platform_device *dma_of = esp->dma;
555 	unsigned int irq = esp->host->irq;
556 	bool is_hme;
557 	u32 val;
558 
559 	scsi_esp_unregister(esp);
560 
561 	/* Disable interrupts.  */
562 	val = dma_read32(DMA_CSR);
563 	dma_write32(val & ~DMA_INT_ENAB, DMA_CSR);
564 
565 	free_irq(irq, esp);
566 
567 	is_hme = (esp->dmarev == dvmahme);
568 
569 	dma_free_coherent(&op->dev, 16,
570 			  esp->command_block,
571 			  esp->command_block_dma);
572 	of_iounmap(&op->resource[(is_hme ? 1 : 0)], esp->regs,
573 		   SBUS_ESP_REG_SIZE);
574 	of_iounmap(&dma_of->resource[0], esp->dma_regs,
575 		   resource_size(&dma_of->resource[0]));
576 
577 	scsi_host_put(esp->host);
578 
579 	dev_set_drvdata(&op->dev, NULL);
580 
581 	put_device(&dma_of->dev);
582 
583 	return 0;
584 }
585 
586 static const struct of_device_id esp_match[] = {
587 	{
588 		.name = "SUNW,esp",
589 	},
590 	{
591 		.name = "SUNW,fas",
592 	},
593 	{
594 		.name = "esp",
595 	},
596 	{},
597 };
598 MODULE_DEVICE_TABLE(of, esp_match);
599 
600 static struct platform_driver esp_sbus_driver = {
601 	.driver = {
602 		.name = "esp",
603 		.of_match_table = esp_match,
604 	},
605 	.probe		= esp_sbus_probe,
606 	.remove		= esp_sbus_remove,
607 };
608 
609 static int __init sunesp_init(void)
610 {
611 	return platform_driver_register(&esp_sbus_driver);
612 }
613 
614 static void __exit sunesp_exit(void)
615 {
616 	platform_driver_unregister(&esp_sbus_driver);
617 }
618 
619 MODULE_DESCRIPTION("Sun ESP SCSI driver");
620 MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
621 MODULE_LICENSE("GPL");
622 MODULE_VERSION(DRV_VERSION);
623 
624 module_init(sunesp_init);
625 module_exit(sunesp_exit);
626