1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * ESP front-end for Amiga ZORRO SCSI systems. 4 * 5 * Copyright (C) 1996 Jesper Skov (jskov@cygnus.co.uk) 6 * 7 * Copyright (C) 2011,2018 Michael Schmitz (schmitz@debian.org) for 8 * migration to ESP SCSI core 9 * 10 * Copyright (C) 2013 Tuomas Vainikka (tuomas.vainikka@aalto.fi) for 11 * Blizzard 1230 DMA and probe function fixes 12 */ 13 /* 14 * ZORRO bus code from: 15 */ 16 /* 17 * Detection routine for the NCR53c710 based Amiga SCSI Controllers for Linux. 18 * Amiga MacroSystemUS WarpEngine SCSI controller. 19 * Amiga Technologies/DKB A4091 SCSI controller. 20 * 21 * Written 1997 by Alan Hourihane <alanh@fairlite.demon.co.uk> 22 * plus modifications of the 53c7xx.c driver to support the Amiga. 23 * 24 * Rewritten to use 53c700.c by Kars de Jong <jongk@linux-m68k.org> 25 */ 26 27 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 28 29 #include <linux/module.h> 30 #include <linux/init.h> 31 #include <linux/interrupt.h> 32 #include <linux/dma-mapping.h> 33 #include <linux/scatterlist.h> 34 #include <linux/delay.h> 35 #include <linux/zorro.h> 36 #include <linux/slab.h> 37 38 #include <asm/page.h> 39 #include <asm/pgtable.h> 40 #include <asm/cacheflush.h> 41 #include <asm/amigahw.h> 42 #include <asm/amigaints.h> 43 44 #include <scsi/scsi_host.h> 45 #include <scsi/scsi_transport_spi.h> 46 #include <scsi/scsi_device.h> 47 #include <scsi/scsi_tcq.h> 48 49 #include "esp_scsi.h" 50 51 MODULE_AUTHOR("Michael Schmitz <schmitz@debian.org>"); 52 MODULE_DESCRIPTION("Amiga Zorro NCR5C9x (ESP) driver"); 53 MODULE_LICENSE("GPL"); 54 55 /* per-board register layout definitions */ 56 57 /* Blizzard 1230 DMA interface */ 58 59 struct blz1230_dma_registers { 60 unsigned char dma_addr; /* DMA address [0x0000] */ 61 unsigned char dmapad2[0x7fff]; 62 unsigned char dma_latch; /* DMA latch [0x8000] */ 63 }; 64 65 /* Blizzard 1230II DMA interface */ 66 67 struct blz1230II_dma_registers { 68 unsigned char dma_addr; /* DMA address [0x0000] */ 69 unsigned char dmapad2[0xf]; 70 unsigned char dma_latch; /* DMA latch [0x0010] */ 71 }; 72 73 /* Blizzard 2060 DMA interface */ 74 75 struct blz2060_dma_registers { 76 unsigned char dma_led_ctrl; /* DMA led control [0x000] */ 77 unsigned char dmapad1[0x0f]; 78 unsigned char dma_addr0; /* DMA address (MSB) [0x010] */ 79 unsigned char dmapad2[0x03]; 80 unsigned char dma_addr1; /* DMA address [0x014] */ 81 unsigned char dmapad3[0x03]; 82 unsigned char dma_addr2; /* DMA address [0x018] */ 83 unsigned char dmapad4[0x03]; 84 unsigned char dma_addr3; /* DMA address (LSB) [0x01c] */ 85 }; 86 87 /* DMA control bits */ 88 #define DMA_WRITE 0x80000000 89 90 /* Cyberstorm DMA interface */ 91 92 struct cyber_dma_registers { 93 unsigned char dma_addr0; /* DMA address (MSB) [0x000] */ 94 unsigned char dmapad1[1]; 95 unsigned char dma_addr1; /* DMA address [0x002] */ 96 unsigned char dmapad2[1]; 97 unsigned char dma_addr2; /* DMA address [0x004] */ 98 unsigned char dmapad3[1]; 99 unsigned char dma_addr3; /* DMA address (LSB) [0x006] */ 100 unsigned char dmapad4[0x3fb]; 101 unsigned char cond_reg; /* DMA cond (ro) [0x402] */ 102 #define ctrl_reg cond_reg /* DMA control (wo) [0x402] */ 103 }; 104 105 /* DMA control bits */ 106 #define CYBER_DMA_WRITE 0x40 /* DMA direction. 1 = write */ 107 #define CYBER_DMA_Z3 0x20 /* 16 (Z2) or 32 (CHIP/Z3) bit DMA transfer */ 108 109 /* DMA status bits */ 110 #define CYBER_DMA_HNDL_INTR 0x80 /* DMA IRQ pending? */ 111 112 /* The CyberStorm II DMA interface */ 113 struct cyberII_dma_registers { 114 unsigned char cond_reg; /* DMA cond (ro) [0x000] */ 115 #define ctrl_reg cond_reg /* DMA control (wo) [0x000] */ 116 unsigned char dmapad4[0x3f]; 117 unsigned char dma_addr0; /* DMA address (MSB) [0x040] */ 118 unsigned char dmapad1[3]; 119 unsigned char dma_addr1; /* DMA address [0x044] */ 120 unsigned char dmapad2[3]; 121 unsigned char dma_addr2; /* DMA address [0x048] */ 122 unsigned char dmapad3[3]; 123 unsigned char dma_addr3; /* DMA address (LSB) [0x04c] */ 124 }; 125 126 /* Fastlane DMA interface */ 127 128 struct fastlane_dma_registers { 129 unsigned char cond_reg; /* DMA status (ro) [0x0000] */ 130 #define ctrl_reg cond_reg /* DMA control (wo) [0x0000] */ 131 char dmapad1[0x3f]; 132 unsigned char clear_strobe; /* DMA clear (wo) [0x0040] */ 133 }; 134 135 /* 136 * The controller registers can be found in the Z2 config area at these 137 * offsets: 138 */ 139 #define FASTLANE_ESP_ADDR 0x1000001 140 141 /* DMA status bits */ 142 #define FASTLANE_DMA_MINT 0x80 143 #define FASTLANE_DMA_IACT 0x40 144 #define FASTLANE_DMA_CREQ 0x20 145 146 /* DMA control bits */ 147 #define FASTLANE_DMA_FCODE 0xa0 148 #define FASTLANE_DMA_MASK 0xf3 149 #define FASTLANE_DMA_WRITE 0x08 /* 1 = write */ 150 #define FASTLANE_DMA_ENABLE 0x04 /* Enable DMA */ 151 #define FASTLANE_DMA_EDI 0x02 /* Enable DMA IRQ ? */ 152 #define FASTLANE_DMA_ESI 0x01 /* Enable SCSI IRQ */ 153 154 /* 155 * private data used for driver 156 */ 157 struct zorro_esp_priv { 158 struct esp *esp; /* our ESP instance - for Scsi_host* */ 159 void __iomem *board_base; /* virtual address (Zorro III board) */ 160 int zorro3; /* board is Zorro III */ 161 unsigned char ctrl_data; /* shadow copy of ctrl_reg */ 162 }; 163 164 /* 165 * On all implementations except for the Oktagon, padding between ESP 166 * registers is three bytes. 167 * On Oktagon, it is one byte - use a different accessor there. 168 * 169 * Oktagon needs PDMA - currently unsupported! 170 */ 171 172 static void zorro_esp_write8(struct esp *esp, u8 val, unsigned long reg) 173 { 174 writeb(val, esp->regs + (reg * 4UL)); 175 } 176 177 static u8 zorro_esp_read8(struct esp *esp, unsigned long reg) 178 { 179 return readb(esp->regs + (reg * 4UL)); 180 } 181 182 static int zorro_esp_irq_pending(struct esp *esp) 183 { 184 /* check ESP status register; DMA has no status reg. */ 185 if (zorro_esp_read8(esp, ESP_STATUS) & ESP_STAT_INTR) 186 return 1; 187 188 return 0; 189 } 190 191 static int cyber_esp_irq_pending(struct esp *esp) 192 { 193 struct cyber_dma_registers __iomem *dregs = esp->dma_regs; 194 unsigned char dma_status = readb(&dregs->cond_reg); 195 196 /* It's important to check the DMA IRQ bit in the correct way! */ 197 return ((zorro_esp_read8(esp, ESP_STATUS) & ESP_STAT_INTR) && 198 (dma_status & CYBER_DMA_HNDL_INTR)); 199 } 200 201 static int fastlane_esp_irq_pending(struct esp *esp) 202 { 203 struct fastlane_dma_registers __iomem *dregs = esp->dma_regs; 204 unsigned char dma_status; 205 206 dma_status = readb(&dregs->cond_reg); 207 208 if (dma_status & FASTLANE_DMA_IACT) 209 return 0; /* not our IRQ */ 210 211 /* Return non-zero if ESP requested IRQ */ 212 return ( 213 (dma_status & FASTLANE_DMA_CREQ) && 214 (!(dma_status & FASTLANE_DMA_MINT)) && 215 (zorro_esp_read8(esp, ESP_STATUS) & ESP_STAT_INTR)); 216 } 217 218 static u32 zorro_esp_dma_length_limit(struct esp *esp, u32 dma_addr, 219 u32 dma_len) 220 { 221 return dma_len > (1U << 16) ? (1U << 16) : dma_len; 222 } 223 224 static u32 fastlane_esp_dma_length_limit(struct esp *esp, u32 dma_addr, 225 u32 dma_len) 226 { 227 /* The old driver used 0xfffc as limit, so do that here too */ 228 return dma_len > 0xfffc ? 0xfffc : dma_len; 229 } 230 231 static void zorro_esp_reset_dma(struct esp *esp) 232 { 233 /* nothing to do here */ 234 } 235 236 static void zorro_esp_dma_drain(struct esp *esp) 237 { 238 /* nothing to do here */ 239 } 240 241 static void zorro_esp_dma_invalidate(struct esp *esp) 242 { 243 /* nothing to do here */ 244 } 245 246 static void fastlane_esp_dma_invalidate(struct esp *esp) 247 { 248 struct zorro_esp_priv *zep = dev_get_drvdata(esp->dev); 249 struct fastlane_dma_registers __iomem *dregs = esp->dma_regs; 250 unsigned char *ctrl_data = &zep->ctrl_data; 251 252 *ctrl_data = (*ctrl_data & FASTLANE_DMA_MASK); 253 writeb(0, &dregs->clear_strobe); 254 z_writel(0, zep->board_base); 255 } 256 257 /* Blizzard 1230/60 SCSI-IV DMA */ 258 259 static void zorro_esp_send_blz1230_dma_cmd(struct esp *esp, u32 addr, 260 u32 esp_count, u32 dma_count, int write, u8 cmd) 261 { 262 struct blz1230_dma_registers __iomem *dregs = esp->dma_regs; 263 u8 phase = esp->sreg & ESP_STAT_PMASK; 264 265 /* 266 * Use PIO if transferring message bytes to esp->command_block_dma. 267 * PIO requires a virtual address, so substitute esp->command_block 268 * for addr. 269 */ 270 if (phase == ESP_MIP && addr == esp->command_block_dma) { 271 esp_send_pio_cmd(esp, (u32)esp->command_block, esp_count, 272 dma_count, write, cmd); 273 return; 274 } 275 276 /* Clear the results of a possible prior esp->ops->send_dma_cmd() */ 277 esp->send_cmd_error = 0; 278 esp->send_cmd_residual = 0; 279 280 if (write) 281 /* DMA receive */ 282 dma_sync_single_for_device(esp->dev, addr, esp_count, 283 DMA_FROM_DEVICE); 284 else 285 /* DMA send */ 286 dma_sync_single_for_device(esp->dev, addr, esp_count, 287 DMA_TO_DEVICE); 288 289 addr >>= 1; 290 if (write) 291 addr &= ~(DMA_WRITE); 292 else 293 addr |= DMA_WRITE; 294 295 writeb((addr >> 24) & 0xff, &dregs->dma_latch); 296 writeb((addr >> 24) & 0xff, &dregs->dma_addr); 297 writeb((addr >> 16) & 0xff, &dregs->dma_addr); 298 writeb((addr >> 8) & 0xff, &dregs->dma_addr); 299 writeb(addr & 0xff, &dregs->dma_addr); 300 301 scsi_esp_cmd(esp, ESP_CMD_DMA); 302 zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW); 303 zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED); 304 305 scsi_esp_cmd(esp, cmd); 306 } 307 308 /* Blizzard 1230-II DMA */ 309 310 static void zorro_esp_send_blz1230II_dma_cmd(struct esp *esp, u32 addr, 311 u32 esp_count, u32 dma_count, int write, u8 cmd) 312 { 313 struct blz1230II_dma_registers __iomem *dregs = esp->dma_regs; 314 u8 phase = esp->sreg & ESP_STAT_PMASK; 315 316 /* Use PIO if transferring message bytes to esp->command_block_dma */ 317 if (phase == ESP_MIP && addr == esp->command_block_dma) { 318 esp_send_pio_cmd(esp, (u32)esp->command_block, esp_count, 319 dma_count, write, cmd); 320 return; 321 } 322 323 esp->send_cmd_error = 0; 324 esp->send_cmd_residual = 0; 325 326 if (write) 327 /* DMA receive */ 328 dma_sync_single_for_device(esp->dev, addr, esp_count, 329 DMA_FROM_DEVICE); 330 else 331 /* DMA send */ 332 dma_sync_single_for_device(esp->dev, addr, esp_count, 333 DMA_TO_DEVICE); 334 335 addr >>= 1; 336 if (write) 337 addr &= ~(DMA_WRITE); 338 else 339 addr |= DMA_WRITE; 340 341 writeb((addr >> 24) & 0xff, &dregs->dma_latch); 342 writeb((addr >> 16) & 0xff, &dregs->dma_addr); 343 writeb((addr >> 8) & 0xff, &dregs->dma_addr); 344 writeb(addr & 0xff, &dregs->dma_addr); 345 346 scsi_esp_cmd(esp, ESP_CMD_DMA); 347 zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW); 348 zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED); 349 350 scsi_esp_cmd(esp, cmd); 351 } 352 353 /* Blizzard 2060 DMA */ 354 355 static void zorro_esp_send_blz2060_dma_cmd(struct esp *esp, u32 addr, 356 u32 esp_count, u32 dma_count, int write, u8 cmd) 357 { 358 struct blz2060_dma_registers __iomem *dregs = esp->dma_regs; 359 u8 phase = esp->sreg & ESP_STAT_PMASK; 360 361 /* Use PIO if transferring message bytes to esp->command_block_dma */ 362 if (phase == ESP_MIP && addr == esp->command_block_dma) { 363 esp_send_pio_cmd(esp, (u32)esp->command_block, esp_count, 364 dma_count, write, cmd); 365 return; 366 } 367 368 esp->send_cmd_error = 0; 369 esp->send_cmd_residual = 0; 370 371 if (write) 372 /* DMA receive */ 373 dma_sync_single_for_device(esp->dev, addr, esp_count, 374 DMA_FROM_DEVICE); 375 else 376 /* DMA send */ 377 dma_sync_single_for_device(esp->dev, addr, esp_count, 378 DMA_TO_DEVICE); 379 380 addr >>= 1; 381 if (write) 382 addr &= ~(DMA_WRITE); 383 else 384 addr |= DMA_WRITE; 385 386 writeb(addr & 0xff, &dregs->dma_addr3); 387 writeb((addr >> 8) & 0xff, &dregs->dma_addr2); 388 writeb((addr >> 16) & 0xff, &dregs->dma_addr1); 389 writeb((addr >> 24) & 0xff, &dregs->dma_addr0); 390 391 scsi_esp_cmd(esp, ESP_CMD_DMA); 392 zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW); 393 zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED); 394 395 scsi_esp_cmd(esp, cmd); 396 } 397 398 /* Cyberstorm I DMA */ 399 400 static void zorro_esp_send_cyber_dma_cmd(struct esp *esp, u32 addr, 401 u32 esp_count, u32 dma_count, int write, u8 cmd) 402 { 403 struct zorro_esp_priv *zep = dev_get_drvdata(esp->dev); 404 struct cyber_dma_registers __iomem *dregs = esp->dma_regs; 405 u8 phase = esp->sreg & ESP_STAT_PMASK; 406 unsigned char *ctrl_data = &zep->ctrl_data; 407 408 /* Use PIO if transferring message bytes to esp->command_block_dma */ 409 if (phase == ESP_MIP && addr == esp->command_block_dma) { 410 esp_send_pio_cmd(esp, (u32)esp->command_block, esp_count, 411 dma_count, write, cmd); 412 return; 413 } 414 415 esp->send_cmd_error = 0; 416 esp->send_cmd_residual = 0; 417 418 zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW); 419 zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED); 420 421 if (write) { 422 /* DMA receive */ 423 dma_sync_single_for_device(esp->dev, addr, esp_count, 424 DMA_FROM_DEVICE); 425 addr &= ~(1); 426 } else { 427 /* DMA send */ 428 dma_sync_single_for_device(esp->dev, addr, esp_count, 429 DMA_TO_DEVICE); 430 addr |= 1; 431 } 432 433 writeb((addr >> 24) & 0xff, &dregs->dma_addr0); 434 writeb((addr >> 16) & 0xff, &dregs->dma_addr1); 435 writeb((addr >> 8) & 0xff, &dregs->dma_addr2); 436 writeb(addr & 0xff, &dregs->dma_addr3); 437 438 if (write) 439 *ctrl_data &= ~(CYBER_DMA_WRITE); 440 else 441 *ctrl_data |= CYBER_DMA_WRITE; 442 443 *ctrl_data &= ~(CYBER_DMA_Z3); /* Z2, do 16 bit DMA */ 444 445 writeb(*ctrl_data, &dregs->ctrl_reg); 446 447 scsi_esp_cmd(esp, cmd); 448 } 449 450 /* Cyberstorm II DMA */ 451 452 static void zorro_esp_send_cyberII_dma_cmd(struct esp *esp, u32 addr, 453 u32 esp_count, u32 dma_count, int write, u8 cmd) 454 { 455 struct cyberII_dma_registers __iomem *dregs = esp->dma_regs; 456 u8 phase = esp->sreg & ESP_STAT_PMASK; 457 458 /* Use PIO if transferring message bytes to esp->command_block_dma */ 459 if (phase == ESP_MIP && addr == esp->command_block_dma) { 460 esp_send_pio_cmd(esp, (u32)esp->command_block, esp_count, 461 dma_count, write, cmd); 462 return; 463 } 464 465 esp->send_cmd_error = 0; 466 esp->send_cmd_residual = 0; 467 468 zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW); 469 zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED); 470 471 if (write) { 472 /* DMA receive */ 473 dma_sync_single_for_device(esp->dev, addr, esp_count, 474 DMA_FROM_DEVICE); 475 addr &= ~(1); 476 } else { 477 /* DMA send */ 478 dma_sync_single_for_device(esp->dev, addr, esp_count, 479 DMA_TO_DEVICE); 480 addr |= 1; 481 } 482 483 writeb((addr >> 24) & 0xff, &dregs->dma_addr0); 484 writeb((addr >> 16) & 0xff, &dregs->dma_addr1); 485 writeb((addr >> 8) & 0xff, &dregs->dma_addr2); 486 writeb(addr & 0xff, &dregs->dma_addr3); 487 488 scsi_esp_cmd(esp, cmd); 489 } 490 491 /* Fastlane DMA */ 492 493 static void zorro_esp_send_fastlane_dma_cmd(struct esp *esp, u32 addr, 494 u32 esp_count, u32 dma_count, int write, u8 cmd) 495 { 496 struct zorro_esp_priv *zep = dev_get_drvdata(esp->dev); 497 struct fastlane_dma_registers __iomem *dregs = esp->dma_regs; 498 u8 phase = esp->sreg & ESP_STAT_PMASK; 499 unsigned char *ctrl_data = &zep->ctrl_data; 500 501 /* Use PIO if transferring message bytes to esp->command_block_dma */ 502 if (phase == ESP_MIP && addr == esp->command_block_dma) { 503 esp_send_pio_cmd(esp, (u32)esp->command_block, esp_count, 504 dma_count, write, cmd); 505 return; 506 } 507 508 esp->send_cmd_error = 0; 509 esp->send_cmd_residual = 0; 510 511 zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW); 512 zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED); 513 514 if (write) { 515 /* DMA receive */ 516 dma_sync_single_for_device(esp->dev, addr, esp_count, 517 DMA_FROM_DEVICE); 518 addr &= ~(1); 519 } else { 520 /* DMA send */ 521 dma_sync_single_for_device(esp->dev, addr, esp_count, 522 DMA_TO_DEVICE); 523 addr |= 1; 524 } 525 526 writeb(0, &dregs->clear_strobe); 527 z_writel(addr, ((addr & 0x00ffffff) + zep->board_base)); 528 529 if (write) { 530 *ctrl_data = (*ctrl_data & FASTLANE_DMA_MASK) | 531 FASTLANE_DMA_ENABLE; 532 } else { 533 *ctrl_data = ((*ctrl_data & FASTLANE_DMA_MASK) | 534 FASTLANE_DMA_ENABLE | 535 FASTLANE_DMA_WRITE); 536 } 537 538 writeb(*ctrl_data, &dregs->ctrl_reg); 539 540 scsi_esp_cmd(esp, cmd); 541 } 542 543 static int zorro_esp_dma_error(struct esp *esp) 544 { 545 return esp->send_cmd_error; 546 } 547 548 /* per-board ESP driver ops */ 549 550 static const struct esp_driver_ops blz1230_esp_ops = { 551 .esp_write8 = zorro_esp_write8, 552 .esp_read8 = zorro_esp_read8, 553 .irq_pending = zorro_esp_irq_pending, 554 .dma_length_limit = zorro_esp_dma_length_limit, 555 .reset_dma = zorro_esp_reset_dma, 556 .dma_drain = zorro_esp_dma_drain, 557 .dma_invalidate = zorro_esp_dma_invalidate, 558 .send_dma_cmd = zorro_esp_send_blz1230_dma_cmd, 559 .dma_error = zorro_esp_dma_error, 560 }; 561 562 static const struct esp_driver_ops blz1230II_esp_ops = { 563 .esp_write8 = zorro_esp_write8, 564 .esp_read8 = zorro_esp_read8, 565 .irq_pending = zorro_esp_irq_pending, 566 .dma_length_limit = zorro_esp_dma_length_limit, 567 .reset_dma = zorro_esp_reset_dma, 568 .dma_drain = zorro_esp_dma_drain, 569 .dma_invalidate = zorro_esp_dma_invalidate, 570 .send_dma_cmd = zorro_esp_send_blz1230II_dma_cmd, 571 .dma_error = zorro_esp_dma_error, 572 }; 573 574 static const struct esp_driver_ops blz2060_esp_ops = { 575 .esp_write8 = zorro_esp_write8, 576 .esp_read8 = zorro_esp_read8, 577 .irq_pending = zorro_esp_irq_pending, 578 .dma_length_limit = zorro_esp_dma_length_limit, 579 .reset_dma = zorro_esp_reset_dma, 580 .dma_drain = zorro_esp_dma_drain, 581 .dma_invalidate = zorro_esp_dma_invalidate, 582 .send_dma_cmd = zorro_esp_send_blz2060_dma_cmd, 583 .dma_error = zorro_esp_dma_error, 584 }; 585 586 static const struct esp_driver_ops cyber_esp_ops = { 587 .esp_write8 = zorro_esp_write8, 588 .esp_read8 = zorro_esp_read8, 589 .irq_pending = cyber_esp_irq_pending, 590 .dma_length_limit = zorro_esp_dma_length_limit, 591 .reset_dma = zorro_esp_reset_dma, 592 .dma_drain = zorro_esp_dma_drain, 593 .dma_invalidate = zorro_esp_dma_invalidate, 594 .send_dma_cmd = zorro_esp_send_cyber_dma_cmd, 595 .dma_error = zorro_esp_dma_error, 596 }; 597 598 static const struct esp_driver_ops cyberII_esp_ops = { 599 .esp_write8 = zorro_esp_write8, 600 .esp_read8 = zorro_esp_read8, 601 .irq_pending = zorro_esp_irq_pending, 602 .dma_length_limit = zorro_esp_dma_length_limit, 603 .reset_dma = zorro_esp_reset_dma, 604 .dma_drain = zorro_esp_dma_drain, 605 .dma_invalidate = zorro_esp_dma_invalidate, 606 .send_dma_cmd = zorro_esp_send_cyberII_dma_cmd, 607 .dma_error = zorro_esp_dma_error, 608 }; 609 610 static const struct esp_driver_ops fastlane_esp_ops = { 611 .esp_write8 = zorro_esp_write8, 612 .esp_read8 = zorro_esp_read8, 613 .irq_pending = fastlane_esp_irq_pending, 614 .dma_length_limit = fastlane_esp_dma_length_limit, 615 .reset_dma = zorro_esp_reset_dma, 616 .dma_drain = zorro_esp_dma_drain, 617 .dma_invalidate = fastlane_esp_dma_invalidate, 618 .send_dma_cmd = zorro_esp_send_fastlane_dma_cmd, 619 .dma_error = zorro_esp_dma_error, 620 }; 621 622 /* Zorro driver config data */ 623 624 struct zorro_driver_data { 625 const char *name; 626 unsigned long offset; 627 unsigned long dma_offset; 628 int absolute; /* offset is absolute address */ 629 int scsi_option; 630 const struct esp_driver_ops *esp_ops; 631 }; 632 633 /* board types */ 634 635 enum { 636 ZORRO_BLZ1230, 637 ZORRO_BLZ1230II, 638 ZORRO_BLZ2060, 639 ZORRO_CYBER, 640 ZORRO_CYBERII, 641 ZORRO_FASTLANE, 642 }; 643 644 /* per-board config data */ 645 646 static const struct zorro_driver_data zorro_esp_boards[] = { 647 [ZORRO_BLZ1230] = { 648 .name = "Blizzard 1230", 649 .offset = 0x8000, 650 .dma_offset = 0x10000, 651 .scsi_option = 1, 652 .esp_ops = &blz1230_esp_ops, 653 }, 654 [ZORRO_BLZ1230II] = { 655 .name = "Blizzard 1230II", 656 .offset = 0x10000, 657 .dma_offset = 0x10021, 658 .scsi_option = 1, 659 .esp_ops = &blz1230II_esp_ops, 660 }, 661 [ZORRO_BLZ2060] = { 662 .name = "Blizzard 2060", 663 .offset = 0x1ff00, 664 .dma_offset = 0x1ffe0, 665 .esp_ops = &blz2060_esp_ops, 666 }, 667 [ZORRO_CYBER] = { 668 .name = "CyberStormI", 669 .offset = 0xf400, 670 .dma_offset = 0xf800, 671 .esp_ops = &cyber_esp_ops, 672 }, 673 [ZORRO_CYBERII] = { 674 .name = "CyberStormII", 675 .offset = 0x1ff03, 676 .dma_offset = 0x1ff43, 677 .scsi_option = 1, 678 .esp_ops = &cyberII_esp_ops, 679 }, 680 [ZORRO_FASTLANE] = { 681 .name = "Fastlane", 682 .offset = 0x1000001, 683 .dma_offset = 0x1000041, 684 .esp_ops = &fastlane_esp_ops, 685 }, 686 }; 687 688 static const struct zorro_device_id zorro_esp_zorro_tbl[] = { 689 { /* Blizzard 1230 IV */ 690 .id = ZORRO_ID(PHASE5, 0x11, 0), 691 .driver_data = ZORRO_BLZ1230, 692 }, 693 { /* Blizzard 1230 II (Zorro II) or Fastlane (Zorro III) */ 694 .id = ZORRO_ID(PHASE5, 0x0B, 0), 695 .driver_data = ZORRO_BLZ1230II, 696 }, 697 { /* Blizzard 2060 */ 698 .id = ZORRO_ID(PHASE5, 0x18, 0), 699 .driver_data = ZORRO_BLZ2060, 700 }, 701 { /* Cyberstorm */ 702 .id = ZORRO_ID(PHASE5, 0x0C, 0), 703 .driver_data = ZORRO_CYBER, 704 }, 705 { /* Cyberstorm II */ 706 .id = ZORRO_ID(PHASE5, 0x19, 0), 707 .driver_data = ZORRO_CYBERII, 708 }, 709 { 0 } 710 }; 711 MODULE_DEVICE_TABLE(zorro, zorro_esp_zorro_tbl); 712 713 static int zorro_esp_probe(struct zorro_dev *z, 714 const struct zorro_device_id *ent) 715 { 716 struct scsi_host_template *tpnt = &scsi_esp_template; 717 struct Scsi_Host *host; 718 struct esp *esp; 719 const struct zorro_driver_data *zdd; 720 struct zorro_esp_priv *zep; 721 unsigned long board, ioaddr, dmaaddr; 722 int err; 723 724 board = zorro_resource_start(z); 725 zdd = &zorro_esp_boards[ent->driver_data]; 726 727 pr_info("%s found at address 0x%lx.\n", zdd->name, board); 728 729 zep = kzalloc(sizeof(*zep), GFP_KERNEL); 730 if (!zep) { 731 pr_err("Can't allocate device private data!\n"); 732 return -ENOMEM; 733 } 734 735 /* let's figure out whether we have a Zorro II or Zorro III board */ 736 if ((z->rom.er_Type & ERT_TYPEMASK) == ERT_ZORROIII) { 737 if (board > 0xffffff) 738 zep->zorro3 = 1; 739 } else { 740 /* 741 * Even though most of these boards identify as Zorro II, 742 * they are in fact CPU expansion slot boards and have full 743 * access to all of memory. Fix up DMA bitmask here. 744 */ 745 z->dev.coherent_dma_mask = DMA_BIT_MASK(32); 746 } 747 748 /* 749 * If Zorro III and ID matches Fastlane, our device table entry 750 * contains data for the Blizzard 1230 II board which does share the 751 * same ID. Fix up device table entry here. 752 * TODO: Some Cyberstom060 boards also share this ID but would need 753 * to use the Cyberstorm I driver data ... we catch this by checking 754 * for presence of ESP chip later, but don't try to fix up yet. 755 */ 756 if (zep->zorro3 && ent->driver_data == ZORRO_BLZ1230II) { 757 pr_info("%s at address 0x%lx is Fastlane Z3, fixing data!\n", 758 zdd->name, board); 759 zdd = &zorro_esp_boards[ZORRO_FASTLANE]; 760 } 761 762 if (zdd->absolute) { 763 ioaddr = zdd->offset; 764 dmaaddr = zdd->dma_offset; 765 } else { 766 ioaddr = board + zdd->offset; 767 dmaaddr = board + zdd->dma_offset; 768 } 769 770 if (!zorro_request_device(z, zdd->name)) { 771 pr_err("cannot reserve region 0x%lx, abort\n", 772 board); 773 err = -EBUSY; 774 goto fail_free_zep; 775 } 776 777 host = scsi_host_alloc(tpnt, sizeof(struct esp)); 778 779 if (!host) { 780 pr_err("No host detected; board configuration problem?\n"); 781 err = -ENOMEM; 782 goto fail_release_device; 783 } 784 785 host->base = ioaddr; 786 host->this_id = 7; 787 788 esp = shost_priv(host); 789 esp->host = host; 790 esp->dev = &z->dev; 791 792 esp->scsi_id = host->this_id; 793 esp->scsi_id_mask = (1 << esp->scsi_id); 794 795 esp->cfreq = 40000000; 796 797 zep->esp = esp; 798 799 dev_set_drvdata(esp->dev, zep); 800 801 /* additional setup required for Fastlane */ 802 if (zep->zorro3 && ent->driver_data == ZORRO_BLZ1230II) { 803 /* map full address space up to ESP base for DMA */ 804 zep->board_base = ioremap_nocache(board, 805 FASTLANE_ESP_ADDR-1); 806 if (!zep->board_base) { 807 pr_err("Cannot allocate board address space\n"); 808 err = -ENOMEM; 809 goto fail_free_host; 810 } 811 /* initialize DMA control shadow register */ 812 zep->ctrl_data = (FASTLANE_DMA_FCODE | 813 FASTLANE_DMA_EDI | FASTLANE_DMA_ESI); 814 } 815 816 esp->ops = zdd->esp_ops; 817 818 if (ioaddr > 0xffffff) 819 esp->regs = ioremap_nocache(ioaddr, 0x20); 820 else 821 /* ZorroII address space remapped nocache by early startup */ 822 esp->regs = ZTWO_VADDR(ioaddr); 823 824 if (!esp->regs) { 825 err = -ENOMEM; 826 goto fail_unmap_fastlane; 827 } 828 829 esp->fifo_reg = esp->regs + ESP_FDATA * 4; 830 831 /* Check whether a Blizzard 12x0 or CyberstormII really has SCSI */ 832 if (zdd->scsi_option) { 833 zorro_esp_write8(esp, (ESP_CONFIG1_PENABLE | 7), ESP_CFG1); 834 if (zorro_esp_read8(esp, ESP_CFG1) != (ESP_CONFIG1_PENABLE|7)) { 835 err = -ENODEV; 836 goto fail_unmap_regs; 837 } 838 } 839 840 if (zep->zorro3) { 841 /* 842 * Only Fastlane Z3 for now - add switch for correct struct 843 * dma_registers size if adding any more 844 */ 845 esp->dma_regs = ioremap_nocache(dmaaddr, 846 sizeof(struct fastlane_dma_registers)); 847 } else 848 /* ZorroII address space remapped nocache by early startup */ 849 esp->dma_regs = ZTWO_VADDR(dmaaddr); 850 851 if (!esp->dma_regs) { 852 err = -ENOMEM; 853 goto fail_unmap_regs; 854 } 855 856 esp->command_block = dma_alloc_coherent(esp->dev, 16, 857 &esp->command_block_dma, 858 GFP_KERNEL); 859 860 if (!esp->command_block) { 861 err = -ENOMEM; 862 goto fail_unmap_dma_regs; 863 } 864 865 host->irq = IRQ_AMIGA_PORTS; 866 err = request_irq(host->irq, scsi_esp_intr, IRQF_SHARED, 867 "Amiga Zorro ESP", esp); 868 if (err < 0) { 869 err = -ENODEV; 870 goto fail_free_command_block; 871 } 872 873 /* register the chip */ 874 err = scsi_esp_register(esp); 875 876 if (err) { 877 err = -ENOMEM; 878 goto fail_free_irq; 879 } 880 881 return 0; 882 883 fail_free_irq: 884 free_irq(host->irq, esp); 885 886 fail_free_command_block: 887 dma_free_coherent(esp->dev, 16, 888 esp->command_block, 889 esp->command_block_dma); 890 891 fail_unmap_dma_regs: 892 if (zep->zorro3) 893 iounmap(esp->dma_regs); 894 895 fail_unmap_regs: 896 if (ioaddr > 0xffffff) 897 iounmap(esp->regs); 898 899 fail_unmap_fastlane: 900 if (zep->zorro3) 901 iounmap(zep->board_base); 902 903 fail_free_host: 904 scsi_host_put(host); 905 906 fail_release_device: 907 zorro_release_device(z); 908 909 fail_free_zep: 910 kfree(zep); 911 912 return err; 913 } 914 915 static void zorro_esp_remove(struct zorro_dev *z) 916 { 917 struct zorro_esp_priv *zep = dev_get_drvdata(&z->dev); 918 struct esp *esp = zep->esp; 919 struct Scsi_Host *host = esp->host; 920 921 scsi_esp_unregister(esp); 922 923 free_irq(host->irq, esp); 924 dma_free_coherent(esp->dev, 16, 925 esp->command_block, 926 esp->command_block_dma); 927 928 if (zep->zorro3) { 929 iounmap(zep->board_base); 930 iounmap(esp->dma_regs); 931 } 932 933 if (host->base > 0xffffff) 934 iounmap(esp->regs); 935 936 scsi_host_put(host); 937 938 zorro_release_device(z); 939 940 kfree(zep); 941 } 942 943 static struct zorro_driver zorro_esp_driver = { 944 .name = KBUILD_MODNAME, 945 .id_table = zorro_esp_zorro_tbl, 946 .probe = zorro_esp_probe, 947 .remove = zorro_esp_remove, 948 }; 949 950 static int __init zorro_esp_scsi_init(void) 951 { 952 return zorro_register_driver(&zorro_esp_driver); 953 } 954 955 static void __exit zorro_esp_scsi_exit(void) 956 { 957 zorro_unregister_driver(&zorro_esp_driver); 958 } 959 960 module_init(zorro_esp_scsi_init); 961 module_exit(zorro_esp_scsi_exit); 962