xref: /openbmc/linux/drivers/scsi/sun_esp.c (revision ded1ffea)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* sun_esp.c: ESP front-end for Sparc SBUS systems.
3  *
4  * Copyright (C) 2007, 2008 David S. Miller (davem@davemloft.net)
5  */
6 
7 #include <linux/kernel.h>
8 #include <linux/types.h>
9 #include <linux/delay.h>
10 #include <linux/module.h>
11 #include <linux/mm.h>
12 #include <linux/init.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/of.h>
15 #include <linux/of_platform.h>
16 #include <linux/platform_device.h>
17 #include <linux/gfp.h>
18 
19 #include <asm/irq.h>
20 #include <asm/io.h>
21 #include <asm/dma.h>
22 
23 #include <scsi/scsi_host.h>
24 
25 #include "esp_scsi.h"
26 
27 #define DRV_MODULE_NAME		"sun_esp"
28 #define PFX DRV_MODULE_NAME	": "
29 #define DRV_VERSION		"1.100"
30 #define DRV_MODULE_RELDATE	"August 27, 2008"
31 
32 #define dma_read32(REG) \
33 	sbus_readl(esp->dma_regs + (REG))
34 #define dma_write32(VAL, REG) \
35 	sbus_writel((VAL), esp->dma_regs + (REG))
36 
37 /* DVMA chip revisions */
38 enum dvma_rev {
39 	dvmarev0,
40 	dvmaesc1,
41 	dvmarev1,
42 	dvmarev2,
43 	dvmarev3,
44 	dvmarevplus,
45 	dvmahme
46 };
47 
48 static int esp_sbus_setup_dma(struct esp *esp, struct platform_device *dma_of)
49 {
50 	esp->dma = dma_of;
51 
52 	esp->dma_regs = of_ioremap(&dma_of->resource[0], 0,
53 				   resource_size(&dma_of->resource[0]),
54 				   "espdma");
55 	if (!esp->dma_regs)
56 		return -ENOMEM;
57 
58 	switch (dma_read32(DMA_CSR) & DMA_DEVICE_ID) {
59 	case DMA_VERS0:
60 		esp->dmarev = dvmarev0;
61 		break;
62 	case DMA_ESCV1:
63 		esp->dmarev = dvmaesc1;
64 		break;
65 	case DMA_VERS1:
66 		esp->dmarev = dvmarev1;
67 		break;
68 	case DMA_VERS2:
69 		esp->dmarev = dvmarev2;
70 		break;
71 	case DMA_VERHME:
72 		esp->dmarev = dvmahme;
73 		break;
74 	case DMA_VERSPLUS:
75 		esp->dmarev = dvmarevplus;
76 		break;
77 	}
78 
79 	return 0;
80 
81 }
82 
83 static int esp_sbus_map_regs(struct esp *esp, int hme)
84 {
85 	struct platform_device *op = to_platform_device(esp->dev);
86 	struct resource *res;
87 
88 	/* On HME, two reg sets exist, first is DVMA,
89 	 * second is ESP registers.
90 	 */
91 	if (hme)
92 		res = &op->resource[1];
93 	else
94 		res = &op->resource[0];
95 
96 	esp->regs = of_ioremap(res, 0, SBUS_ESP_REG_SIZE, "ESP");
97 	if (!esp->regs)
98 		return -ENOMEM;
99 
100 	return 0;
101 }
102 
103 static int esp_sbus_map_command_block(struct esp *esp)
104 {
105 	esp->command_block = dma_alloc_coherent(esp->dev, 16,
106 						&esp->command_block_dma,
107 						GFP_KERNEL);
108 	if (!esp->command_block)
109 		return -ENOMEM;
110 	return 0;
111 }
112 
113 static int esp_sbus_register_irq(struct esp *esp)
114 {
115 	struct Scsi_Host *host = esp->host;
116 	struct platform_device *op = to_platform_device(esp->dev);
117 
118 	host->irq = op->archdata.irqs[0];
119 	return request_irq(host->irq, scsi_esp_intr, IRQF_SHARED, "ESP", esp);
120 }
121 
122 static void esp_get_scsi_id(struct esp *esp, struct platform_device *espdma)
123 {
124 	struct platform_device *op = to_platform_device(esp->dev);
125 	struct device_node *dp;
126 
127 	dp = op->dev.of_node;
128 	esp->scsi_id = of_getintprop_default(dp, "initiator-id", 0xff);
129 	if (esp->scsi_id != 0xff)
130 		goto done;
131 
132 	esp->scsi_id = of_getintprop_default(dp, "scsi-initiator-id", 0xff);
133 	if (esp->scsi_id != 0xff)
134 		goto done;
135 
136 	esp->scsi_id = of_getintprop_default(espdma->dev.of_node,
137 					     "scsi-initiator-id", 7);
138 
139 done:
140 	esp->host->this_id = esp->scsi_id;
141 	esp->scsi_id_mask = (1 << esp->scsi_id);
142 }
143 
144 static void esp_get_differential(struct esp *esp)
145 {
146 	struct platform_device *op = to_platform_device(esp->dev);
147 	struct device_node *dp;
148 
149 	dp = op->dev.of_node;
150 	if (of_property_read_bool(dp, "differential"))
151 		esp->flags |= ESP_FLAG_DIFFERENTIAL;
152 	else
153 		esp->flags &= ~ESP_FLAG_DIFFERENTIAL;
154 }
155 
156 static void esp_get_clock_params(struct esp *esp)
157 {
158 	struct platform_device *op = to_platform_device(esp->dev);
159 	struct device_node *bus_dp, *dp;
160 	int fmhz;
161 
162 	dp = op->dev.of_node;
163 	bus_dp = dp->parent;
164 
165 	fmhz = of_getintprop_default(dp, "clock-frequency", 0);
166 	if (fmhz == 0)
167 		fmhz = of_getintprop_default(bus_dp, "clock-frequency", 0);
168 
169 	esp->cfreq = fmhz;
170 }
171 
172 static void esp_get_bursts(struct esp *esp, struct platform_device *dma_of)
173 {
174 	struct device_node *dma_dp = dma_of->dev.of_node;
175 	struct platform_device *op = to_platform_device(esp->dev);
176 	struct device_node *dp;
177 	u8 bursts, val;
178 
179 	dp = op->dev.of_node;
180 	bursts = of_getintprop_default(dp, "burst-sizes", 0xff);
181 	val = of_getintprop_default(dma_dp, "burst-sizes", 0xff);
182 	if (val != 0xff)
183 		bursts &= val;
184 
185 	val = of_getintprop_default(dma_dp->parent, "burst-sizes", 0xff);
186 	if (val != 0xff)
187 		bursts &= val;
188 
189 	if (bursts == 0xff ||
190 	    (bursts & DMA_BURST16) == 0 ||
191 	    (bursts & DMA_BURST32) == 0)
192 		bursts = (DMA_BURST32 - 1);
193 
194 	esp->bursts = bursts;
195 }
196 
197 static void esp_sbus_get_props(struct esp *esp, struct platform_device *espdma)
198 {
199 	esp_get_scsi_id(esp, espdma);
200 	esp_get_differential(esp);
201 	esp_get_clock_params(esp);
202 	esp_get_bursts(esp, espdma);
203 }
204 
205 static void sbus_esp_write8(struct esp *esp, u8 val, unsigned long reg)
206 {
207 	sbus_writeb(val, esp->regs + (reg * 4UL));
208 }
209 
210 static u8 sbus_esp_read8(struct esp *esp, unsigned long reg)
211 {
212 	return sbus_readb(esp->regs + (reg * 4UL));
213 }
214 
215 static int sbus_esp_irq_pending(struct esp *esp)
216 {
217 	if (dma_read32(DMA_CSR) & (DMA_HNDL_INTR | DMA_HNDL_ERROR))
218 		return 1;
219 	return 0;
220 }
221 
222 static void sbus_esp_reset_dma(struct esp *esp)
223 {
224 	int can_do_burst16, can_do_burst32, can_do_burst64;
225 	int can_do_sbus64, lim;
226 	struct platform_device *op = to_platform_device(esp->dev);
227 	u32 val;
228 
229 	can_do_burst16 = (esp->bursts & DMA_BURST16) != 0;
230 	can_do_burst32 = (esp->bursts & DMA_BURST32) != 0;
231 	can_do_burst64 = 0;
232 	can_do_sbus64 = 0;
233 	if (sbus_can_dma_64bit())
234 		can_do_sbus64 = 1;
235 	if (sbus_can_burst64())
236 		can_do_burst64 = (esp->bursts & DMA_BURST64) != 0;
237 
238 	/* Put the DVMA into a known state. */
239 	if (esp->dmarev != dvmahme) {
240 		val = dma_read32(DMA_CSR);
241 		dma_write32(val | DMA_RST_SCSI, DMA_CSR);
242 		dma_write32(val & ~DMA_RST_SCSI, DMA_CSR);
243 	}
244 	switch (esp->dmarev) {
245 	case dvmahme:
246 		dma_write32(DMA_RESET_FAS366, DMA_CSR);
247 		dma_write32(DMA_RST_SCSI, DMA_CSR);
248 
249 		esp->prev_hme_dmacsr = (DMA_PARITY_OFF | DMA_2CLKS |
250 					DMA_SCSI_DISAB | DMA_INT_ENAB);
251 
252 		esp->prev_hme_dmacsr &= ~(DMA_ENABLE | DMA_ST_WRITE |
253 					  DMA_BRST_SZ);
254 
255 		if (can_do_burst64)
256 			esp->prev_hme_dmacsr |= DMA_BRST64;
257 		else if (can_do_burst32)
258 			esp->prev_hme_dmacsr |= DMA_BRST32;
259 
260 		if (can_do_sbus64) {
261 			esp->prev_hme_dmacsr |= DMA_SCSI_SBUS64;
262 			sbus_set_sbus64(&op->dev, esp->bursts);
263 		}
264 
265 		lim = 1000;
266 		while (dma_read32(DMA_CSR) & DMA_PEND_READ) {
267 			if (--lim == 0) {
268 				printk(KERN_ALERT PFX "esp%d: DMA_PEND_READ "
269 				       "will not clear!\n",
270 				       esp->host->unique_id);
271 				break;
272 			}
273 			udelay(1);
274 		}
275 
276 		dma_write32(0, DMA_CSR);
277 		dma_write32(esp->prev_hme_dmacsr, DMA_CSR);
278 
279 		dma_write32(0, DMA_ADDR);
280 		break;
281 
282 	case dvmarev2:
283 		if (esp->rev != ESP100) {
284 			val = dma_read32(DMA_CSR);
285 			dma_write32(val | DMA_3CLKS, DMA_CSR);
286 		}
287 		break;
288 
289 	case dvmarev3:
290 		val = dma_read32(DMA_CSR);
291 		val &= ~DMA_3CLKS;
292 		val |= DMA_2CLKS;
293 		if (can_do_burst32) {
294 			val &= ~DMA_BRST_SZ;
295 			val |= DMA_BRST32;
296 		}
297 		dma_write32(val, DMA_CSR);
298 		break;
299 
300 	case dvmaesc1:
301 		val = dma_read32(DMA_CSR);
302 		val |= DMA_ADD_ENABLE;
303 		val &= ~DMA_BCNT_ENAB;
304 		if (!can_do_burst32 && can_do_burst16) {
305 			val |= DMA_ESC_BURST;
306 		} else {
307 			val &= ~(DMA_ESC_BURST);
308 		}
309 		dma_write32(val, DMA_CSR);
310 		break;
311 
312 	default:
313 		break;
314 	}
315 
316 	/* Enable interrupts.  */
317 	val = dma_read32(DMA_CSR);
318 	dma_write32(val | DMA_INT_ENAB, DMA_CSR);
319 }
320 
321 static void sbus_esp_dma_drain(struct esp *esp)
322 {
323 	u32 csr;
324 	int lim;
325 
326 	if (esp->dmarev == dvmahme)
327 		return;
328 
329 	csr = dma_read32(DMA_CSR);
330 	if (!(csr & DMA_FIFO_ISDRAIN))
331 		return;
332 
333 	if (esp->dmarev != dvmarev3 && esp->dmarev != dvmaesc1)
334 		dma_write32(csr | DMA_FIFO_STDRAIN, DMA_CSR);
335 
336 	lim = 1000;
337 	while (dma_read32(DMA_CSR) & DMA_FIFO_ISDRAIN) {
338 		if (--lim == 0) {
339 			printk(KERN_ALERT PFX "esp%d: DMA will not drain!\n",
340 			       esp->host->unique_id);
341 			break;
342 		}
343 		udelay(1);
344 	}
345 }
346 
347 static void sbus_esp_dma_invalidate(struct esp *esp)
348 {
349 	if (esp->dmarev == dvmahme) {
350 		dma_write32(DMA_RST_SCSI, DMA_CSR);
351 
352 		esp->prev_hme_dmacsr = ((esp->prev_hme_dmacsr |
353 					 (DMA_PARITY_OFF | DMA_2CLKS |
354 					  DMA_SCSI_DISAB | DMA_INT_ENAB)) &
355 					~(DMA_ST_WRITE | DMA_ENABLE));
356 
357 		dma_write32(0, DMA_CSR);
358 		dma_write32(esp->prev_hme_dmacsr, DMA_CSR);
359 
360 		/* This is necessary to avoid having the SCSI channel
361 		 * engine lock up on us.
362 		 */
363 		dma_write32(0, DMA_ADDR);
364 	} else {
365 		u32 val;
366 		int lim;
367 
368 		lim = 1000;
369 		while ((val = dma_read32(DMA_CSR)) & DMA_PEND_READ) {
370 			if (--lim == 0) {
371 				printk(KERN_ALERT PFX "esp%d: DMA will not "
372 				       "invalidate!\n", esp->host->unique_id);
373 				break;
374 			}
375 			udelay(1);
376 		}
377 
378 		val &= ~(DMA_ENABLE | DMA_ST_WRITE | DMA_BCNT_ENAB);
379 		val |= DMA_FIFO_INV;
380 		dma_write32(val, DMA_CSR);
381 		val &= ~DMA_FIFO_INV;
382 		dma_write32(val, DMA_CSR);
383 	}
384 }
385 
386 static void sbus_esp_send_dma_cmd(struct esp *esp, u32 addr, u32 esp_count,
387 				  u32 dma_count, int write, u8 cmd)
388 {
389 	u32 csr;
390 
391 	BUG_ON(!(cmd & ESP_CMD_DMA));
392 
393 	sbus_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
394 	sbus_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
395 	if (esp->rev == FASHME) {
396 		sbus_esp_write8(esp, (esp_count >> 16) & 0xff, FAS_RLO);
397 		sbus_esp_write8(esp, 0, FAS_RHI);
398 
399 		scsi_esp_cmd(esp, cmd);
400 
401 		csr = esp->prev_hme_dmacsr;
402 		csr |= DMA_SCSI_DISAB | DMA_ENABLE;
403 		if (write)
404 			csr |= DMA_ST_WRITE;
405 		else
406 			csr &= ~DMA_ST_WRITE;
407 		esp->prev_hme_dmacsr = csr;
408 
409 		dma_write32(dma_count, DMA_COUNT);
410 		dma_write32(addr, DMA_ADDR);
411 		dma_write32(csr, DMA_CSR);
412 	} else {
413 		csr = dma_read32(DMA_CSR);
414 		csr |= DMA_ENABLE;
415 		if (write)
416 			csr |= DMA_ST_WRITE;
417 		else
418 			csr &= ~DMA_ST_WRITE;
419 		dma_write32(csr, DMA_CSR);
420 		if (esp->dmarev == dvmaesc1) {
421 			u32 end = PAGE_ALIGN(addr + dma_count + 16U);
422 			dma_write32(end - addr, DMA_COUNT);
423 		}
424 		dma_write32(addr, DMA_ADDR);
425 
426 		scsi_esp_cmd(esp, cmd);
427 	}
428 
429 }
430 
431 static int sbus_esp_dma_error(struct esp *esp)
432 {
433 	u32 csr = dma_read32(DMA_CSR);
434 
435 	if (csr & DMA_HNDL_ERROR)
436 		return 1;
437 
438 	return 0;
439 }
440 
441 static const struct esp_driver_ops sbus_esp_ops = {
442 	.esp_write8	=	sbus_esp_write8,
443 	.esp_read8	=	sbus_esp_read8,
444 	.irq_pending	=	sbus_esp_irq_pending,
445 	.reset_dma	=	sbus_esp_reset_dma,
446 	.dma_drain	=	sbus_esp_dma_drain,
447 	.dma_invalidate	=	sbus_esp_dma_invalidate,
448 	.send_dma_cmd	=	sbus_esp_send_dma_cmd,
449 	.dma_error	=	sbus_esp_dma_error,
450 };
451 
452 static int esp_sbus_probe_one(struct platform_device *op,
453 			      struct platform_device *espdma, int hme)
454 {
455 	const struct scsi_host_template *tpnt = &scsi_esp_template;
456 	struct Scsi_Host *host;
457 	struct esp *esp;
458 	int err;
459 
460 	host = scsi_host_alloc(tpnt, sizeof(struct esp));
461 
462 	err = -ENOMEM;
463 	if (!host)
464 		goto fail;
465 
466 	host->max_id = (hme ? 16 : 8);
467 	esp = shost_priv(host);
468 
469 	esp->host = host;
470 	esp->dev = &op->dev;
471 	esp->ops = &sbus_esp_ops;
472 
473 	if (hme)
474 		esp->flags |= ESP_FLAG_WIDE_CAPABLE;
475 
476 	err = esp_sbus_setup_dma(esp, espdma);
477 	if (err < 0)
478 		goto fail_unlink;
479 
480 	err = esp_sbus_map_regs(esp, hme);
481 	if (err < 0)
482 		goto fail_unlink;
483 
484 	err = esp_sbus_map_command_block(esp);
485 	if (err < 0)
486 		goto fail_unmap_regs;
487 
488 	err = esp_sbus_register_irq(esp);
489 	if (err < 0)
490 		goto fail_unmap_command_block;
491 
492 	esp_sbus_get_props(esp, espdma);
493 
494 	/* Before we try to touch the ESP chip, ESC1 dma can
495 	 * come up with the reset bit set, so make sure that
496 	 * is clear first.
497 	 */
498 	if (esp->dmarev == dvmaesc1) {
499 		u32 val = dma_read32(DMA_CSR);
500 
501 		dma_write32(val & ~DMA_RST_SCSI, DMA_CSR);
502 	}
503 
504 	dev_set_drvdata(&op->dev, esp);
505 
506 	err = scsi_esp_register(esp);
507 	if (err)
508 		goto fail_free_irq;
509 
510 	return 0;
511 
512 fail_free_irq:
513 	free_irq(host->irq, esp);
514 fail_unmap_command_block:
515 	dma_free_coherent(&op->dev, 16,
516 			  esp->command_block,
517 			  esp->command_block_dma);
518 fail_unmap_regs:
519 	of_iounmap(&op->resource[(hme ? 1 : 0)], esp->regs, SBUS_ESP_REG_SIZE);
520 fail_unlink:
521 	scsi_host_put(host);
522 fail:
523 	return err;
524 }
525 
526 static int esp_sbus_probe(struct platform_device *op)
527 {
528 	struct device_node *dma_node = NULL;
529 	struct device_node *dp = op->dev.of_node;
530 	struct platform_device *dma_of = NULL;
531 	int hme = 0;
532 	int ret;
533 
534 	if (of_node_name_eq(dp->parent, "espdma") ||
535 	    of_node_name_eq(dp->parent, "dma"))
536 		dma_node = dp->parent;
537 	else if (of_node_name_eq(dp, "SUNW,fas")) {
538 		dma_node = op->dev.of_node;
539 		hme = 1;
540 	}
541 	if (dma_node)
542 		dma_of = of_find_device_by_node(dma_node);
543 	if (!dma_of)
544 		return -ENODEV;
545 
546 	ret = esp_sbus_probe_one(op, dma_of, hme);
547 	if (ret)
548 		put_device(&dma_of->dev);
549 
550 	return ret;
551 }
552 
553 static int esp_sbus_remove(struct platform_device *op)
554 {
555 	struct esp *esp = dev_get_drvdata(&op->dev);
556 	struct platform_device *dma_of = esp->dma;
557 	unsigned int irq = esp->host->irq;
558 	bool is_hme;
559 	u32 val;
560 
561 	scsi_esp_unregister(esp);
562 
563 	/* Disable interrupts.  */
564 	val = dma_read32(DMA_CSR);
565 	dma_write32(val & ~DMA_INT_ENAB, DMA_CSR);
566 
567 	free_irq(irq, esp);
568 
569 	is_hme = (esp->dmarev == dvmahme);
570 
571 	dma_free_coherent(&op->dev, 16,
572 			  esp->command_block,
573 			  esp->command_block_dma);
574 	of_iounmap(&op->resource[(is_hme ? 1 : 0)], esp->regs,
575 		   SBUS_ESP_REG_SIZE);
576 	of_iounmap(&dma_of->resource[0], esp->dma_regs,
577 		   resource_size(&dma_of->resource[0]));
578 
579 	scsi_host_put(esp->host);
580 
581 	dev_set_drvdata(&op->dev, NULL);
582 
583 	put_device(&dma_of->dev);
584 
585 	return 0;
586 }
587 
588 static const struct of_device_id esp_match[] = {
589 	{
590 		.name = "SUNW,esp",
591 	},
592 	{
593 		.name = "SUNW,fas",
594 	},
595 	{
596 		.name = "esp",
597 	},
598 	{},
599 };
600 MODULE_DEVICE_TABLE(of, esp_match);
601 
602 static struct platform_driver esp_sbus_driver = {
603 	.driver = {
604 		.name = "esp",
605 		.of_match_table = esp_match,
606 	},
607 	.probe		= esp_sbus_probe,
608 	.remove		= esp_sbus_remove,
609 };
610 module_platform_driver(esp_sbus_driver);
611 
612 MODULE_DESCRIPTION("Sun ESP SCSI driver");
613 MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
614 MODULE_LICENSE("GPL");
615 MODULE_VERSION(DRV_VERSION);
616