1 /* sun_esp.c: ESP front-end for Sparc SBUS systems. 2 * 3 * Copyright (C) 2007, 2008 David S. Miller (davem@davemloft.net) 4 */ 5 6 #include <linux/kernel.h> 7 #include <linux/types.h> 8 #include <linux/delay.h> 9 #include <linux/module.h> 10 #include <linux/mm.h> 11 #include <linux/init.h> 12 #include <linux/dma-mapping.h> 13 #include <linux/of.h> 14 #include <linux/of_device.h> 15 16 #include <asm/irq.h> 17 #include <asm/io.h> 18 #include <asm/dma.h> 19 20 #include <scsi/scsi_host.h> 21 22 #include "esp_scsi.h" 23 24 #define DRV_MODULE_NAME "sun_esp" 25 #define PFX DRV_MODULE_NAME ": " 26 #define DRV_VERSION "1.100" 27 #define DRV_MODULE_RELDATE "August 27, 2008" 28 29 #define dma_read32(REG) \ 30 sbus_readl(esp->dma_regs + (REG)) 31 #define dma_write32(VAL, REG) \ 32 sbus_writel((VAL), esp->dma_regs + (REG)) 33 34 /* DVMA chip revisions */ 35 enum dvma_rev { 36 dvmarev0, 37 dvmaesc1, 38 dvmarev1, 39 dvmarev2, 40 dvmarev3, 41 dvmarevplus, 42 dvmahme 43 }; 44 45 static int __devinit esp_sbus_setup_dma(struct esp *esp, 46 struct of_device *dma_of) 47 { 48 esp->dma = dma_of; 49 50 esp->dma_regs = of_ioremap(&dma_of->resource[0], 0, 51 resource_size(&dma_of->resource[0]), 52 "espdma"); 53 if (!esp->dma_regs) 54 return -ENOMEM; 55 56 switch (dma_read32(DMA_CSR) & DMA_DEVICE_ID) { 57 case DMA_VERS0: 58 esp->dmarev = dvmarev0; 59 break; 60 case DMA_ESCV1: 61 esp->dmarev = dvmaesc1; 62 break; 63 case DMA_VERS1: 64 esp->dmarev = dvmarev1; 65 break; 66 case DMA_VERS2: 67 esp->dmarev = dvmarev2; 68 break; 69 case DMA_VERHME: 70 esp->dmarev = dvmahme; 71 break; 72 case DMA_VERSPLUS: 73 esp->dmarev = dvmarevplus; 74 break; 75 } 76 77 return 0; 78 79 } 80 81 static int __devinit esp_sbus_map_regs(struct esp *esp, int hme) 82 { 83 struct of_device *op = esp->dev; 84 struct resource *res; 85 86 /* On HME, two reg sets exist, first is DVMA, 87 * second is ESP registers. 88 */ 89 if (hme) 90 res = &op->resource[1]; 91 else 92 res = &op->resource[0]; 93 94 esp->regs = of_ioremap(res, 0, SBUS_ESP_REG_SIZE, "ESP"); 95 if (!esp->regs) 96 return -ENOMEM; 97 98 return 0; 99 } 100 101 static int __devinit esp_sbus_map_command_block(struct esp *esp) 102 { 103 struct of_device *op = esp->dev; 104 105 esp->command_block = dma_alloc_coherent(&op->dev, 16, 106 &esp->command_block_dma, 107 GFP_ATOMIC); 108 if (!esp->command_block) 109 return -ENOMEM; 110 return 0; 111 } 112 113 static int __devinit esp_sbus_register_irq(struct esp *esp) 114 { 115 struct Scsi_Host *host = esp->host; 116 struct of_device *op = esp->dev; 117 118 host->irq = op->irqs[0]; 119 return request_irq(host->irq, scsi_esp_intr, IRQF_SHARED, "ESP", esp); 120 } 121 122 static void __devinit esp_get_scsi_id(struct esp *esp, struct of_device *espdma) 123 { 124 struct of_device *op = esp->dev; 125 struct device_node *dp; 126 127 dp = op->node; 128 esp->scsi_id = of_getintprop_default(dp, "initiator-id", 0xff); 129 if (esp->scsi_id != 0xff) 130 goto done; 131 132 esp->scsi_id = of_getintprop_default(dp, "scsi-initiator-id", 0xff); 133 if (esp->scsi_id != 0xff) 134 goto done; 135 136 esp->scsi_id = of_getintprop_default(espdma->node, 137 "scsi-initiator-id", 7); 138 139 done: 140 esp->host->this_id = esp->scsi_id; 141 esp->scsi_id_mask = (1 << esp->scsi_id); 142 } 143 144 static void __devinit esp_get_differential(struct esp *esp) 145 { 146 struct of_device *op = esp->dev; 147 struct device_node *dp; 148 149 dp = op->node; 150 if (of_find_property(dp, "differential", NULL)) 151 esp->flags |= ESP_FLAG_DIFFERENTIAL; 152 else 153 esp->flags &= ~ESP_FLAG_DIFFERENTIAL; 154 } 155 156 static void __devinit esp_get_clock_params(struct esp *esp) 157 { 158 struct of_device *op = esp->dev; 159 struct device_node *bus_dp, *dp; 160 int fmhz; 161 162 dp = op->node; 163 bus_dp = dp->parent; 164 165 fmhz = of_getintprop_default(dp, "clock-frequency", 0); 166 if (fmhz == 0) 167 fmhz = of_getintprop_default(bus_dp, "clock-frequency", 0); 168 169 esp->cfreq = fmhz; 170 } 171 172 static void __devinit esp_get_bursts(struct esp *esp, struct of_device *dma_of) 173 { 174 struct device_node *dma_dp = dma_of->node; 175 struct of_device *op = esp->dev; 176 struct device_node *dp; 177 u8 bursts, val; 178 179 dp = op->node; 180 bursts = of_getintprop_default(dp, "burst-sizes", 0xff); 181 val = of_getintprop_default(dma_dp, "burst-sizes", 0xff); 182 if (val != 0xff) 183 bursts &= val; 184 185 val = of_getintprop_default(dma_dp->parent, "burst-sizes", 0xff); 186 if (val != 0xff) 187 bursts &= val; 188 189 if (bursts == 0xff || 190 (bursts & DMA_BURST16) == 0 || 191 (bursts & DMA_BURST32) == 0) 192 bursts = (DMA_BURST32 - 1); 193 194 esp->bursts = bursts; 195 } 196 197 static void __devinit esp_sbus_get_props(struct esp *esp, struct of_device *espdma) 198 { 199 esp_get_scsi_id(esp, espdma); 200 esp_get_differential(esp); 201 esp_get_clock_params(esp); 202 esp_get_bursts(esp, espdma); 203 } 204 205 static void sbus_esp_write8(struct esp *esp, u8 val, unsigned long reg) 206 { 207 sbus_writeb(val, esp->regs + (reg * 4UL)); 208 } 209 210 static u8 sbus_esp_read8(struct esp *esp, unsigned long reg) 211 { 212 return sbus_readb(esp->regs + (reg * 4UL)); 213 } 214 215 static dma_addr_t sbus_esp_map_single(struct esp *esp, void *buf, 216 size_t sz, int dir) 217 { 218 struct of_device *op = esp->dev; 219 220 return dma_map_single(&op->dev, buf, sz, dir); 221 } 222 223 static int sbus_esp_map_sg(struct esp *esp, struct scatterlist *sg, 224 int num_sg, int dir) 225 { 226 struct of_device *op = esp->dev; 227 228 return dma_map_sg(&op->dev, sg, num_sg, dir); 229 } 230 231 static void sbus_esp_unmap_single(struct esp *esp, dma_addr_t addr, 232 size_t sz, int dir) 233 { 234 struct of_device *op = esp->dev; 235 236 dma_unmap_single(&op->dev, addr, sz, dir); 237 } 238 239 static void sbus_esp_unmap_sg(struct esp *esp, struct scatterlist *sg, 240 int num_sg, int dir) 241 { 242 struct of_device *op = esp->dev; 243 244 dma_unmap_sg(&op->dev, sg, num_sg, dir); 245 } 246 247 static int sbus_esp_irq_pending(struct esp *esp) 248 { 249 if (dma_read32(DMA_CSR) & (DMA_HNDL_INTR | DMA_HNDL_ERROR)) 250 return 1; 251 return 0; 252 } 253 254 static void sbus_esp_reset_dma(struct esp *esp) 255 { 256 int can_do_burst16, can_do_burst32, can_do_burst64; 257 int can_do_sbus64, lim; 258 struct of_device *op; 259 u32 val; 260 261 can_do_burst16 = (esp->bursts & DMA_BURST16) != 0; 262 can_do_burst32 = (esp->bursts & DMA_BURST32) != 0; 263 can_do_burst64 = 0; 264 can_do_sbus64 = 0; 265 op = esp->dev; 266 if (sbus_can_dma_64bit()) 267 can_do_sbus64 = 1; 268 if (sbus_can_burst64()) 269 can_do_burst64 = (esp->bursts & DMA_BURST64) != 0; 270 271 /* Put the DVMA into a known state. */ 272 if (esp->dmarev != dvmahme) { 273 val = dma_read32(DMA_CSR); 274 dma_write32(val | DMA_RST_SCSI, DMA_CSR); 275 dma_write32(val & ~DMA_RST_SCSI, DMA_CSR); 276 } 277 switch (esp->dmarev) { 278 case dvmahme: 279 dma_write32(DMA_RESET_FAS366, DMA_CSR); 280 dma_write32(DMA_RST_SCSI, DMA_CSR); 281 282 esp->prev_hme_dmacsr = (DMA_PARITY_OFF | DMA_2CLKS | 283 DMA_SCSI_DISAB | DMA_INT_ENAB); 284 285 esp->prev_hme_dmacsr &= ~(DMA_ENABLE | DMA_ST_WRITE | 286 DMA_BRST_SZ); 287 288 if (can_do_burst64) 289 esp->prev_hme_dmacsr |= DMA_BRST64; 290 else if (can_do_burst32) 291 esp->prev_hme_dmacsr |= DMA_BRST32; 292 293 if (can_do_sbus64) { 294 esp->prev_hme_dmacsr |= DMA_SCSI_SBUS64; 295 sbus_set_sbus64(&op->dev, esp->bursts); 296 } 297 298 lim = 1000; 299 while (dma_read32(DMA_CSR) & DMA_PEND_READ) { 300 if (--lim == 0) { 301 printk(KERN_ALERT PFX "esp%d: DMA_PEND_READ " 302 "will not clear!\n", 303 esp->host->unique_id); 304 break; 305 } 306 udelay(1); 307 } 308 309 dma_write32(0, DMA_CSR); 310 dma_write32(esp->prev_hme_dmacsr, DMA_CSR); 311 312 dma_write32(0, DMA_ADDR); 313 break; 314 315 case dvmarev2: 316 if (esp->rev != ESP100) { 317 val = dma_read32(DMA_CSR); 318 dma_write32(val | DMA_3CLKS, DMA_CSR); 319 } 320 break; 321 322 case dvmarev3: 323 val = dma_read32(DMA_CSR); 324 val &= ~DMA_3CLKS; 325 val |= DMA_2CLKS; 326 if (can_do_burst32) { 327 val &= ~DMA_BRST_SZ; 328 val |= DMA_BRST32; 329 } 330 dma_write32(val, DMA_CSR); 331 break; 332 333 case dvmaesc1: 334 val = dma_read32(DMA_CSR); 335 val |= DMA_ADD_ENABLE; 336 val &= ~DMA_BCNT_ENAB; 337 if (!can_do_burst32 && can_do_burst16) { 338 val |= DMA_ESC_BURST; 339 } else { 340 val &= ~(DMA_ESC_BURST); 341 } 342 dma_write32(val, DMA_CSR); 343 break; 344 345 default: 346 break; 347 } 348 349 /* Enable interrupts. */ 350 val = dma_read32(DMA_CSR); 351 dma_write32(val | DMA_INT_ENAB, DMA_CSR); 352 } 353 354 static void sbus_esp_dma_drain(struct esp *esp) 355 { 356 u32 csr; 357 int lim; 358 359 if (esp->dmarev == dvmahme) 360 return; 361 362 csr = dma_read32(DMA_CSR); 363 if (!(csr & DMA_FIFO_ISDRAIN)) 364 return; 365 366 if (esp->dmarev != dvmarev3 && esp->dmarev != dvmaesc1) 367 dma_write32(csr | DMA_FIFO_STDRAIN, DMA_CSR); 368 369 lim = 1000; 370 while (dma_read32(DMA_CSR) & DMA_FIFO_ISDRAIN) { 371 if (--lim == 0) { 372 printk(KERN_ALERT PFX "esp%d: DMA will not drain!\n", 373 esp->host->unique_id); 374 break; 375 } 376 udelay(1); 377 } 378 } 379 380 static void sbus_esp_dma_invalidate(struct esp *esp) 381 { 382 if (esp->dmarev == dvmahme) { 383 dma_write32(DMA_RST_SCSI, DMA_CSR); 384 385 esp->prev_hme_dmacsr = ((esp->prev_hme_dmacsr | 386 (DMA_PARITY_OFF | DMA_2CLKS | 387 DMA_SCSI_DISAB | DMA_INT_ENAB)) & 388 ~(DMA_ST_WRITE | DMA_ENABLE)); 389 390 dma_write32(0, DMA_CSR); 391 dma_write32(esp->prev_hme_dmacsr, DMA_CSR); 392 393 /* This is necessary to avoid having the SCSI channel 394 * engine lock up on us. 395 */ 396 dma_write32(0, DMA_ADDR); 397 } else { 398 u32 val; 399 int lim; 400 401 lim = 1000; 402 while ((val = dma_read32(DMA_CSR)) & DMA_PEND_READ) { 403 if (--lim == 0) { 404 printk(KERN_ALERT PFX "esp%d: DMA will not " 405 "invalidate!\n", esp->host->unique_id); 406 break; 407 } 408 udelay(1); 409 } 410 411 val &= ~(DMA_ENABLE | DMA_ST_WRITE | DMA_BCNT_ENAB); 412 val |= DMA_FIFO_INV; 413 dma_write32(val, DMA_CSR); 414 val &= ~DMA_FIFO_INV; 415 dma_write32(val, DMA_CSR); 416 } 417 } 418 419 static void sbus_esp_send_dma_cmd(struct esp *esp, u32 addr, u32 esp_count, 420 u32 dma_count, int write, u8 cmd) 421 { 422 u32 csr; 423 424 BUG_ON(!(cmd & ESP_CMD_DMA)); 425 426 sbus_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW); 427 sbus_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED); 428 if (esp->rev == FASHME) { 429 sbus_esp_write8(esp, (esp_count >> 16) & 0xff, FAS_RLO); 430 sbus_esp_write8(esp, 0, FAS_RHI); 431 432 scsi_esp_cmd(esp, cmd); 433 434 csr = esp->prev_hme_dmacsr; 435 csr |= DMA_SCSI_DISAB | DMA_ENABLE; 436 if (write) 437 csr |= DMA_ST_WRITE; 438 else 439 csr &= ~DMA_ST_WRITE; 440 esp->prev_hme_dmacsr = csr; 441 442 dma_write32(dma_count, DMA_COUNT); 443 dma_write32(addr, DMA_ADDR); 444 dma_write32(csr, DMA_CSR); 445 } else { 446 csr = dma_read32(DMA_CSR); 447 csr |= DMA_ENABLE; 448 if (write) 449 csr |= DMA_ST_WRITE; 450 else 451 csr &= ~DMA_ST_WRITE; 452 dma_write32(csr, DMA_CSR); 453 if (esp->dmarev == dvmaesc1) { 454 u32 end = PAGE_ALIGN(addr + dma_count + 16U); 455 dma_write32(end - addr, DMA_COUNT); 456 } 457 dma_write32(addr, DMA_ADDR); 458 459 scsi_esp_cmd(esp, cmd); 460 } 461 462 } 463 464 static int sbus_esp_dma_error(struct esp *esp) 465 { 466 u32 csr = dma_read32(DMA_CSR); 467 468 if (csr & DMA_HNDL_ERROR) 469 return 1; 470 471 return 0; 472 } 473 474 static const struct esp_driver_ops sbus_esp_ops = { 475 .esp_write8 = sbus_esp_write8, 476 .esp_read8 = sbus_esp_read8, 477 .map_single = sbus_esp_map_single, 478 .map_sg = sbus_esp_map_sg, 479 .unmap_single = sbus_esp_unmap_single, 480 .unmap_sg = sbus_esp_unmap_sg, 481 .irq_pending = sbus_esp_irq_pending, 482 .reset_dma = sbus_esp_reset_dma, 483 .dma_drain = sbus_esp_dma_drain, 484 .dma_invalidate = sbus_esp_dma_invalidate, 485 .send_dma_cmd = sbus_esp_send_dma_cmd, 486 .dma_error = sbus_esp_dma_error, 487 }; 488 489 static int __devinit esp_sbus_probe_one(struct of_device *op, 490 struct of_device *espdma, 491 int hme) 492 { 493 struct scsi_host_template *tpnt = &scsi_esp_template; 494 struct Scsi_Host *host; 495 struct esp *esp; 496 int err; 497 498 host = scsi_host_alloc(tpnt, sizeof(struct esp)); 499 500 err = -ENOMEM; 501 if (!host) 502 goto fail; 503 504 host->max_id = (hme ? 16 : 8); 505 esp = shost_priv(host); 506 507 esp->host = host; 508 esp->dev = op; 509 esp->ops = &sbus_esp_ops; 510 511 if (hme) 512 esp->flags |= ESP_FLAG_WIDE_CAPABLE; 513 514 err = esp_sbus_setup_dma(esp, espdma); 515 if (err < 0) 516 goto fail_unlink; 517 518 err = esp_sbus_map_regs(esp, hme); 519 if (err < 0) 520 goto fail_unlink; 521 522 err = esp_sbus_map_command_block(esp); 523 if (err < 0) 524 goto fail_unmap_regs; 525 526 err = esp_sbus_register_irq(esp); 527 if (err < 0) 528 goto fail_unmap_command_block; 529 530 esp_sbus_get_props(esp, espdma); 531 532 /* Before we try to touch the ESP chip, ESC1 dma can 533 * come up with the reset bit set, so make sure that 534 * is clear first. 535 */ 536 if (esp->dmarev == dvmaesc1) { 537 u32 val = dma_read32(DMA_CSR); 538 539 dma_write32(val & ~DMA_RST_SCSI, DMA_CSR); 540 } 541 542 dev_set_drvdata(&op->dev, esp); 543 544 err = scsi_esp_register(esp, &op->dev); 545 if (err) 546 goto fail_free_irq; 547 548 return 0; 549 550 fail_free_irq: 551 free_irq(host->irq, esp); 552 fail_unmap_command_block: 553 dma_free_coherent(&op->dev, 16, 554 esp->command_block, 555 esp->command_block_dma); 556 fail_unmap_regs: 557 of_iounmap(&op->resource[(hme ? 1 : 0)], esp->regs, SBUS_ESP_REG_SIZE); 558 fail_unlink: 559 scsi_host_put(host); 560 fail: 561 return err; 562 } 563 564 static int __devinit esp_sbus_probe(struct of_device *op, const struct of_device_id *match) 565 { 566 struct device_node *dma_node = NULL; 567 struct device_node *dp = op->node; 568 struct of_device *dma_of = NULL; 569 int hme = 0; 570 571 if (dp->parent && 572 (!strcmp(dp->parent->name, "espdma") || 573 !strcmp(dp->parent->name, "dma"))) 574 dma_node = dp->parent; 575 else if (!strcmp(dp->name, "SUNW,fas")) { 576 dma_node = op->node; 577 hme = 1; 578 } 579 if (dma_node) 580 dma_of = of_find_device_by_node(dma_node); 581 if (!dma_of) 582 return -ENODEV; 583 584 return esp_sbus_probe_one(op, dma_of, hme); 585 } 586 587 static int __devexit esp_sbus_remove(struct of_device *op) 588 { 589 struct esp *esp = dev_get_drvdata(&op->dev); 590 struct of_device *dma_of = esp->dma; 591 unsigned int irq = esp->host->irq; 592 bool is_hme; 593 u32 val; 594 595 scsi_esp_unregister(esp); 596 597 /* Disable interrupts. */ 598 val = dma_read32(DMA_CSR); 599 dma_write32(val & ~DMA_INT_ENAB, DMA_CSR); 600 601 free_irq(irq, esp); 602 603 is_hme = (esp->dmarev == dvmahme); 604 605 dma_free_coherent(&op->dev, 16, 606 esp->command_block, 607 esp->command_block_dma); 608 of_iounmap(&op->resource[(is_hme ? 1 : 0)], esp->regs, 609 SBUS_ESP_REG_SIZE); 610 of_iounmap(&dma_of->resource[0], esp->dma_regs, 611 resource_size(&dma_of->resource[0])); 612 613 scsi_host_put(esp->host); 614 615 dev_set_drvdata(&op->dev, NULL); 616 617 return 0; 618 } 619 620 static const struct of_device_id esp_match[] = { 621 { 622 .name = "SUNW,esp", 623 }, 624 { 625 .name = "SUNW,fas", 626 }, 627 { 628 .name = "esp", 629 }, 630 {}, 631 }; 632 MODULE_DEVICE_TABLE(of, esp_match); 633 634 static struct of_platform_driver esp_sbus_driver = { 635 .name = "esp", 636 .match_table = esp_match, 637 .probe = esp_sbus_probe, 638 .remove = __devexit_p(esp_sbus_remove), 639 }; 640 641 static int __init sunesp_init(void) 642 { 643 return of_register_driver(&esp_sbus_driver, &of_bus_type); 644 } 645 646 static void __exit sunesp_exit(void) 647 { 648 of_unregister_driver(&esp_sbus_driver); 649 } 650 651 MODULE_DESCRIPTION("Sun ESP SCSI driver"); 652 MODULE_AUTHOR("David S. Miller (davem@davemloft.net)"); 653 MODULE_LICENSE("GPL"); 654 MODULE_VERSION(DRV_VERSION); 655 656 module_init(sunesp_init); 657 module_exit(sunesp_exit); 658