1 /* 2 * Device driver for the SYMBIOS/LSILOGIC 53C8XX and 53C1010 family 3 * of PCI-SCSI IO processors. 4 * 5 * Copyright (C) 1999-2001 Gerard Roudier <groudier@free.fr> 6 * Copyright (c) 2003-2005 Matthew Wilcox <matthew@wil.cx> 7 * 8 * This driver is derived from the Linux sym53c8xx driver. 9 * Copyright (C) 1998-2000 Gerard Roudier 10 * 11 * The sym53c8xx driver is derived from the ncr53c8xx driver that had been 12 * a port of the FreeBSD ncr driver to Linux-1.2.13. 13 * 14 * The original ncr driver has been written for 386bsd and FreeBSD by 15 * Wolfgang Stanglmeier <wolf@cologne.de> 16 * Stefan Esser <se@mi.Uni-Koeln.de> 17 * Copyright (C) 1994 Wolfgang Stanglmeier 18 * 19 * Other major contributions: 20 * 21 * NVRAM detection and reading. 22 * Copyright (C) 1997 Richard Waltham <dormouse@farsrobt.demon.co.uk> 23 * 24 *----------------------------------------------------------------------------- 25 * 26 * This program is free software; you can redistribute it and/or modify 27 * it under the terms of the GNU General Public License as published by 28 * the Free Software Foundation; either version 2 of the License, or 29 * (at your option) any later version. 30 * 31 * This program is distributed in the hope that it will be useful, 32 * but WITHOUT ANY WARRANTY; without even the implied warranty of 33 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 34 * GNU General Public License for more details. 35 * 36 * You should have received a copy of the GNU General Public License 37 * along with this program; if not, write to the Free Software 38 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 39 */ 40 #include <linux/ctype.h> 41 #include <linux/init.h> 42 #include <linux/interrupt.h> 43 #include <linux/module.h> 44 #include <linux/moduleparam.h> 45 #include <linux/spinlock.h> 46 #include <scsi/scsi.h> 47 #include <scsi/scsi_tcq.h> 48 #include <scsi/scsi_device.h> 49 #include <scsi/scsi_transport.h> 50 51 #include "sym_glue.h" 52 #include "sym_nvram.h" 53 54 #define NAME53C "sym53c" 55 #define NAME53C8XX "sym53c8xx" 56 57 #define IRQ_FMT "%d" 58 #define IRQ_PRM(x) (x) 59 60 struct sym_driver_setup sym_driver_setup = SYM_LINUX_DRIVER_SETUP; 61 unsigned int sym_debug_flags = 0; 62 63 static char *excl_string; 64 static char *safe_string; 65 module_param_named(cmd_per_lun, sym_driver_setup.max_tag, ushort, 0); 66 module_param_string(tag_ctrl, sym_driver_setup.tag_ctrl, 100, 0); 67 module_param_named(burst, sym_driver_setup.burst_order, byte, 0); 68 module_param_named(led, sym_driver_setup.scsi_led, byte, 0); 69 module_param_named(diff, sym_driver_setup.scsi_diff, byte, 0); 70 module_param_named(irqm, sym_driver_setup.irq_mode, byte, 0); 71 module_param_named(buschk, sym_driver_setup.scsi_bus_check, byte, 0); 72 module_param_named(hostid, sym_driver_setup.host_id, byte, 0); 73 module_param_named(verb, sym_driver_setup.verbose, byte, 0); 74 module_param_named(debug, sym_debug_flags, uint, 0); 75 module_param_named(settle, sym_driver_setup.settle_delay, byte, 0); 76 module_param_named(nvram, sym_driver_setup.use_nvram, byte, 0); 77 module_param_named(excl, excl_string, charp, 0); 78 module_param_named(safe, safe_string, charp, 0); 79 80 MODULE_PARM_DESC(cmd_per_lun, "The maximum number of tags to use by default"); 81 MODULE_PARM_DESC(tag_ctrl, "More detailed control over tags per LUN"); 82 MODULE_PARM_DESC(burst, "Maximum burst. 0 to disable, 255 to read from registers"); 83 MODULE_PARM_DESC(led, "Set to 1 to enable LED support"); 84 MODULE_PARM_DESC(diff, "0 for no differential mode, 1 for BIOS, 2 for always, 3 for not GPIO3"); 85 MODULE_PARM_DESC(irqm, "0 for open drain, 1 to leave alone, 2 for totem pole"); 86 MODULE_PARM_DESC(buschk, "0 to not check, 1 for detach on error, 2 for warn on error"); 87 MODULE_PARM_DESC(hostid, "The SCSI ID to use for the host adapters"); 88 MODULE_PARM_DESC(verb, "0 for minimal verbosity, 1 for normal, 2 for excessive"); 89 MODULE_PARM_DESC(debug, "Set bits to enable debugging"); 90 MODULE_PARM_DESC(settle, "Settle delay in seconds. Default 3"); 91 MODULE_PARM_DESC(nvram, "Option currently not used"); 92 MODULE_PARM_DESC(excl, "List ioport addresses here to prevent controllers from being attached"); 93 MODULE_PARM_DESC(safe, "Set other settings to a \"safe mode\""); 94 95 MODULE_LICENSE("GPL"); 96 MODULE_VERSION(SYM_VERSION); 97 MODULE_AUTHOR("Matthew Wilcox <matthew@wil.cx>"); 98 MODULE_DESCRIPTION("NCR, Symbios and LSI 8xx and 1010 PCI SCSI adapters"); 99 100 static void sym2_setup_params(void) 101 { 102 char *p = excl_string; 103 int xi = 0; 104 105 while (p && (xi < 8)) { 106 char *next_p; 107 int val = (int) simple_strtoul(p, &next_p, 0); 108 sym_driver_setup.excludes[xi++] = val; 109 p = next_p; 110 } 111 112 if (safe_string) { 113 if (*safe_string == 'y') { 114 sym_driver_setup.max_tag = 0; 115 sym_driver_setup.burst_order = 0; 116 sym_driver_setup.scsi_led = 0; 117 sym_driver_setup.scsi_diff = 1; 118 sym_driver_setup.irq_mode = 0; 119 sym_driver_setup.scsi_bus_check = 2; 120 sym_driver_setup.host_id = 7; 121 sym_driver_setup.verbose = 2; 122 sym_driver_setup.settle_delay = 10; 123 sym_driver_setup.use_nvram = 1; 124 } else if (*safe_string != 'n') { 125 printk(KERN_WARNING NAME53C8XX "Ignoring parameter %s" 126 " passed to safe option", safe_string); 127 } 128 } 129 } 130 131 static struct scsi_transport_template *sym2_transport_template = NULL; 132 133 /* 134 * Driver private area in the SCSI command structure. 135 */ 136 struct sym_ucmd { /* Override the SCSI pointer structure */ 137 dma_addr_t data_mapping; 138 unsigned char data_mapped; 139 unsigned char to_do; /* For error handling */ 140 void (*old_done)(struct scsi_cmnd *); /* For error handling */ 141 struct completion *eh_done; /* For error handling */ 142 }; 143 144 #define SYM_UCMD_PTR(cmd) ((struct sym_ucmd *)(&(cmd)->SCp)) 145 #define SYM_SOFTC_PTR(cmd) sym_get_hcb(cmd->device->host) 146 147 static void __unmap_scsi_data(struct pci_dev *pdev, struct scsi_cmnd *cmd) 148 { 149 if (SYM_UCMD_PTR(cmd)->data_mapped) 150 scsi_dma_unmap(cmd); 151 152 SYM_UCMD_PTR(cmd)->data_mapped = 0; 153 } 154 155 static int __map_scsi_sg_data(struct pci_dev *pdev, struct scsi_cmnd *cmd) 156 { 157 int use_sg; 158 159 use_sg = scsi_dma_map(cmd); 160 if (use_sg > 0) { 161 SYM_UCMD_PTR(cmd)->data_mapped = 2; 162 SYM_UCMD_PTR(cmd)->data_mapping = use_sg; 163 } 164 165 return use_sg; 166 } 167 168 #define unmap_scsi_data(np, cmd) \ 169 __unmap_scsi_data(np->s.device, cmd) 170 #define map_scsi_sg_data(np, cmd) \ 171 __map_scsi_sg_data(np->s.device, cmd) 172 /* 173 * Complete a pending CAM CCB. 174 */ 175 void sym_xpt_done(struct sym_hcb *np, struct scsi_cmnd *cmd) 176 { 177 unmap_scsi_data(np, cmd); 178 cmd->scsi_done(cmd); 179 } 180 181 static void sym_xpt_done2(struct sym_hcb *np, struct scsi_cmnd *cmd, int cam_status) 182 { 183 sym_set_cam_status(cmd, cam_status); 184 sym_xpt_done(np, cmd); 185 } 186 187 188 /* 189 * Tell the SCSI layer about a BUS RESET. 190 */ 191 void sym_xpt_async_bus_reset(struct sym_hcb *np) 192 { 193 printf_notice("%s: SCSI BUS has been reset.\n", sym_name(np)); 194 np->s.settle_time = jiffies + sym_driver_setup.settle_delay * HZ; 195 np->s.settle_time_valid = 1; 196 if (sym_verbose >= 2) 197 printf_info("%s: command processing suspended for %d seconds\n", 198 sym_name(np), sym_driver_setup.settle_delay); 199 } 200 201 /* 202 * Tell the SCSI layer about a BUS DEVICE RESET message sent. 203 */ 204 void sym_xpt_async_sent_bdr(struct sym_hcb *np, int target) 205 { 206 printf_notice("%s: TARGET %d has been reset.\n", sym_name(np), target); 207 } 208 209 /* 210 * Choose the more appropriate CAM status if 211 * the IO encountered an extended error. 212 */ 213 static int sym_xerr_cam_status(int cam_status, int x_status) 214 { 215 if (x_status) { 216 if (x_status & XE_PARITY_ERR) 217 cam_status = DID_PARITY; 218 else if (x_status &(XE_EXTRA_DATA|XE_SODL_UNRUN|XE_SWIDE_OVRUN)) 219 cam_status = DID_ERROR; 220 else if (x_status & XE_BAD_PHASE) 221 cam_status = DID_ERROR; 222 else 223 cam_status = DID_ERROR; 224 } 225 return cam_status; 226 } 227 228 /* 229 * Build CAM result for a failed or auto-sensed IO. 230 */ 231 void sym_set_cam_result_error(struct sym_hcb *np, struct sym_ccb *cp, int resid) 232 { 233 struct scsi_cmnd *cmd = cp->cmd; 234 u_int cam_status, scsi_status, drv_status; 235 236 drv_status = 0; 237 cam_status = DID_OK; 238 scsi_status = cp->ssss_status; 239 240 if (cp->host_flags & HF_SENSE) { 241 scsi_status = cp->sv_scsi_status; 242 resid = cp->sv_resid; 243 if (sym_verbose && cp->sv_xerr_status) 244 sym_print_xerr(cmd, cp->sv_xerr_status); 245 if (cp->host_status == HS_COMPLETE && 246 cp->ssss_status == S_GOOD && 247 cp->xerr_status == 0) { 248 cam_status = sym_xerr_cam_status(DID_OK, 249 cp->sv_xerr_status); 250 drv_status = DRIVER_SENSE; 251 /* 252 * Bounce back the sense data to user. 253 */ 254 memset(&cmd->sense_buffer, 0, sizeof(cmd->sense_buffer)); 255 memcpy(cmd->sense_buffer, cp->sns_bbuf, 256 min(sizeof(cmd->sense_buffer), 257 (size_t)SYM_SNS_BBUF_LEN)); 258 #if 0 259 /* 260 * If the device reports a UNIT ATTENTION condition 261 * due to a RESET condition, we should consider all 262 * disconnect CCBs for this unit as aborted. 263 */ 264 if (1) { 265 u_char *p; 266 p = (u_char *) cmd->sense_data; 267 if (p[0]==0x70 && p[2]==0x6 && p[12]==0x29) 268 sym_clear_tasks(np, DID_ABORT, 269 cp->target,cp->lun, -1); 270 } 271 #endif 272 } else { 273 /* 274 * Error return from our internal request sense. This 275 * is bad: we must clear the contingent allegiance 276 * condition otherwise the device will always return 277 * BUSY. Use a big stick. 278 */ 279 sym_reset_scsi_target(np, cmd->device->id); 280 cam_status = DID_ERROR; 281 } 282 } else if (cp->host_status == HS_COMPLETE) /* Bad SCSI status */ 283 cam_status = DID_OK; 284 else if (cp->host_status == HS_SEL_TIMEOUT) /* Selection timeout */ 285 cam_status = DID_NO_CONNECT; 286 else if (cp->host_status == HS_UNEXPECTED) /* Unexpected BUS FREE*/ 287 cam_status = DID_ERROR; 288 else { /* Extended error */ 289 if (sym_verbose) { 290 sym_print_addr(cmd, "COMMAND FAILED (%x %x %x).\n", 291 cp->host_status, cp->ssss_status, 292 cp->xerr_status); 293 } 294 /* 295 * Set the most appropriate value for CAM status. 296 */ 297 cam_status = sym_xerr_cam_status(DID_ERROR, cp->xerr_status); 298 } 299 scsi_set_resid(cmd, resid); 300 cmd->result = (drv_status << 24) + (cam_status << 16) + scsi_status; 301 } 302 303 static int sym_scatter(struct sym_hcb *np, struct sym_ccb *cp, struct scsi_cmnd *cmd) 304 { 305 int segment; 306 int use_sg; 307 308 cp->data_len = 0; 309 310 use_sg = map_scsi_sg_data(np, cmd); 311 if (use_sg > 0) { 312 struct scatterlist *sg; 313 struct sym_tcb *tp = &np->target[cp->target]; 314 struct sym_tblmove *data; 315 316 if (use_sg > SYM_CONF_MAX_SG) { 317 unmap_scsi_data(np, cmd); 318 return -1; 319 } 320 321 data = &cp->phys.data[SYM_CONF_MAX_SG - use_sg]; 322 323 scsi_for_each_sg(cmd, sg, use_sg, segment) { 324 dma_addr_t baddr = sg_dma_address(sg); 325 unsigned int len = sg_dma_len(sg); 326 327 if ((len & 1) && (tp->head.wval & EWS)) { 328 len++; 329 cp->odd_byte_adjustment++; 330 } 331 332 sym_build_sge(np, &data[segment], baddr, len); 333 cp->data_len += len; 334 } 335 } else { 336 segment = -2; 337 } 338 339 return segment; 340 } 341 342 /* 343 * Queue a SCSI command. 344 */ 345 static int sym_queue_command(struct sym_hcb *np, struct scsi_cmnd *cmd) 346 { 347 struct scsi_device *sdev = cmd->device; 348 struct sym_tcb *tp; 349 struct sym_lcb *lp; 350 struct sym_ccb *cp; 351 int order; 352 353 /* 354 * Minimal checkings, so that we will not 355 * go outside our tables. 356 */ 357 if (sdev->id == np->myaddr) { 358 sym_xpt_done2(np, cmd, DID_NO_CONNECT); 359 return 0; 360 } 361 362 /* 363 * Retrieve the target descriptor. 364 */ 365 tp = &np->target[sdev->id]; 366 367 /* 368 * Select tagged/untagged. 369 */ 370 lp = sym_lp(tp, sdev->lun); 371 order = (lp && lp->s.reqtags) ? M_SIMPLE_TAG : 0; 372 373 /* 374 * Queue the SCSI IO. 375 */ 376 cp = sym_get_ccb(np, cmd, order); 377 if (!cp) 378 return 1; /* Means resource shortage */ 379 sym_queue_scsiio(np, cmd, cp); 380 return 0; 381 } 382 383 /* 384 * Setup buffers and pointers that address the CDB. 385 */ 386 static inline int sym_setup_cdb(struct sym_hcb *np, struct scsi_cmnd *cmd, struct sym_ccb *cp) 387 { 388 memcpy(cp->cdb_buf, cmd->cmnd, cmd->cmd_len); 389 390 cp->phys.cmd.addr = CCB_BA(cp, cdb_buf[0]); 391 cp->phys.cmd.size = cpu_to_scr(cmd->cmd_len); 392 393 return 0; 394 } 395 396 /* 397 * Setup pointers that address the data and start the I/O. 398 */ 399 int sym_setup_data_and_start(struct sym_hcb *np, struct scsi_cmnd *cmd, struct sym_ccb *cp) 400 { 401 u32 lastp, goalp; 402 int dir; 403 404 /* 405 * Build the CDB. 406 */ 407 if (sym_setup_cdb(np, cmd, cp)) 408 goto out_abort; 409 410 /* 411 * No direction means no data. 412 */ 413 dir = cmd->sc_data_direction; 414 if (dir != DMA_NONE) { 415 cp->segments = sym_scatter(np, cp, cmd); 416 if (cp->segments < 0) { 417 sym_set_cam_status(cmd, DID_ERROR); 418 goto out_abort; 419 } 420 421 /* 422 * No segments means no data. 423 */ 424 if (!cp->segments) 425 dir = DMA_NONE; 426 } else { 427 cp->data_len = 0; 428 cp->segments = 0; 429 } 430 431 /* 432 * Set the data pointer. 433 */ 434 switch (dir) { 435 case DMA_BIDIRECTIONAL: 436 printk("%s: got DMA_BIDIRECTIONAL command", sym_name(np)); 437 sym_set_cam_status(cmd, DID_ERROR); 438 goto out_abort; 439 case DMA_TO_DEVICE: 440 goalp = SCRIPTA_BA(np, data_out2) + 8; 441 lastp = goalp - 8 - (cp->segments * (2*4)); 442 break; 443 case DMA_FROM_DEVICE: 444 cp->host_flags |= HF_DATA_IN; 445 goalp = SCRIPTA_BA(np, data_in2) + 8; 446 lastp = goalp - 8 - (cp->segments * (2*4)); 447 break; 448 case DMA_NONE: 449 default: 450 lastp = goalp = SCRIPTB_BA(np, no_data); 451 break; 452 } 453 454 /* 455 * Set all pointers values needed by SCRIPTS. 456 */ 457 cp->phys.head.lastp = cpu_to_scr(lastp); 458 cp->phys.head.savep = cpu_to_scr(lastp); 459 cp->startp = cp->phys.head.savep; 460 cp->goalp = cpu_to_scr(goalp); 461 462 /* 463 * When `#ifed 1', the code below makes the driver 464 * panic on the first attempt to write to a SCSI device. 465 * It is the first test we want to do after a driver 466 * change that does not seem obviously safe. :) 467 */ 468 #if 0 469 switch (cp->cdb_buf[0]) { 470 case 0x0A: case 0x2A: case 0xAA: 471 panic("XXXXXXXXXXXXX WRITE NOT YET ALLOWED XXXXXXXXXXXXXX\n"); 472 break; 473 default: 474 break; 475 } 476 #endif 477 478 /* 479 * activate this job. 480 */ 481 sym_put_start_queue(np, cp); 482 return 0; 483 484 out_abort: 485 sym_free_ccb(np, cp); 486 sym_xpt_done(np, cmd); 487 return 0; 488 } 489 490 491 /* 492 * timer daemon. 493 * 494 * Misused to keep the driver running when 495 * interrupts are not configured correctly. 496 */ 497 static void sym_timer(struct sym_hcb *np) 498 { 499 unsigned long thistime = jiffies; 500 501 /* 502 * Restart the timer. 503 */ 504 np->s.timer.expires = thistime + SYM_CONF_TIMER_INTERVAL; 505 add_timer(&np->s.timer); 506 507 /* 508 * If we are resetting the ncr, wait for settle_time before 509 * clearing it. Then command processing will be resumed. 510 */ 511 if (np->s.settle_time_valid) { 512 if (time_before_eq(np->s.settle_time, thistime)) { 513 if (sym_verbose >= 2 ) 514 printk("%s: command processing resumed\n", 515 sym_name(np)); 516 np->s.settle_time_valid = 0; 517 } 518 return; 519 } 520 521 /* 522 * Nothing to do for now, but that may come. 523 */ 524 if (np->s.lasttime + 4*HZ < thistime) { 525 np->s.lasttime = thistime; 526 } 527 528 #ifdef SYM_CONF_PCIQ_MAY_MISS_COMPLETIONS 529 /* 530 * Some way-broken PCI bridges may lead to 531 * completions being lost when the clearing 532 * of the INTFLY flag by the CPU occurs 533 * concurrently with the chip raising this flag. 534 * If this ever happen, lost completions will 535 * be reaped here. 536 */ 537 sym_wakeup_done(np); 538 #endif 539 } 540 541 542 /* 543 * PCI BUS error handler. 544 */ 545 void sym_log_bus_error(struct sym_hcb *np) 546 { 547 u_short pci_sts; 548 pci_read_config_word(np->s.device, PCI_STATUS, &pci_sts); 549 if (pci_sts & 0xf900) { 550 pci_write_config_word(np->s.device, PCI_STATUS, pci_sts); 551 printf("%s: PCI STATUS = 0x%04x\n", 552 sym_name(np), pci_sts & 0xf900); 553 } 554 } 555 556 /* 557 * queuecommand method. Entered with the host adapter lock held and 558 * interrupts disabled. 559 */ 560 static int sym53c8xx_queue_command(struct scsi_cmnd *cmd, 561 void (*done)(struct scsi_cmnd *)) 562 { 563 struct sym_hcb *np = SYM_SOFTC_PTR(cmd); 564 struct sym_ucmd *ucp = SYM_UCMD_PTR(cmd); 565 int sts = 0; 566 567 cmd->scsi_done = done; 568 memset(ucp, 0, sizeof(*ucp)); 569 570 /* 571 * Shorten our settle_time if needed for 572 * this command not to time out. 573 */ 574 if (np->s.settle_time_valid && cmd->timeout_per_command) { 575 unsigned long tlimit = jiffies + cmd->timeout_per_command; 576 tlimit -= SYM_CONF_TIMER_INTERVAL*2; 577 if (time_after(np->s.settle_time, tlimit)) { 578 np->s.settle_time = tlimit; 579 } 580 } 581 582 if (np->s.settle_time_valid) 583 return SCSI_MLQUEUE_HOST_BUSY; 584 585 sts = sym_queue_command(np, cmd); 586 if (sts) 587 return SCSI_MLQUEUE_HOST_BUSY; 588 return 0; 589 } 590 591 /* 592 * Linux entry point of the interrupt handler. 593 */ 594 static irqreturn_t sym53c8xx_intr(int irq, void *dev_id) 595 { 596 unsigned long flags; 597 struct sym_hcb *np = (struct sym_hcb *)dev_id; 598 599 if (DEBUG_FLAGS & DEBUG_TINY) printf_debug ("["); 600 601 spin_lock_irqsave(np->s.host->host_lock, flags); 602 sym_interrupt(np); 603 spin_unlock_irqrestore(np->s.host->host_lock, flags); 604 605 if (DEBUG_FLAGS & DEBUG_TINY) printf_debug ("]\n"); 606 607 return IRQ_HANDLED; 608 } 609 610 /* 611 * Linux entry point of the timer handler 612 */ 613 static void sym53c8xx_timer(unsigned long npref) 614 { 615 struct sym_hcb *np = (struct sym_hcb *)npref; 616 unsigned long flags; 617 618 spin_lock_irqsave(np->s.host->host_lock, flags); 619 sym_timer(np); 620 spin_unlock_irqrestore(np->s.host->host_lock, flags); 621 } 622 623 624 /* 625 * What the eh thread wants us to perform. 626 */ 627 #define SYM_EH_ABORT 0 628 #define SYM_EH_DEVICE_RESET 1 629 #define SYM_EH_BUS_RESET 2 630 #define SYM_EH_HOST_RESET 3 631 632 /* 633 * What we will do regarding the involved SCSI command. 634 */ 635 #define SYM_EH_DO_IGNORE 0 636 #define SYM_EH_DO_WAIT 2 637 638 /* 639 * scsi_done() alias when error recovery is in progress. 640 */ 641 static void sym_eh_done(struct scsi_cmnd *cmd) 642 { 643 struct sym_ucmd *ucmd = SYM_UCMD_PTR(cmd); 644 BUILD_BUG_ON(sizeof(struct scsi_pointer) < sizeof(struct sym_ucmd)); 645 646 cmd->scsi_done = ucmd->old_done; 647 648 if (ucmd->to_do == SYM_EH_DO_WAIT) 649 complete(ucmd->eh_done); 650 } 651 652 /* 653 * Generic method for our eh processing. 654 * The 'op' argument tells what we have to do. 655 */ 656 static int sym_eh_handler(int op, char *opname, struct scsi_cmnd *cmd) 657 { 658 struct sym_hcb *np = SYM_SOFTC_PTR(cmd); 659 struct sym_ucmd *ucmd = SYM_UCMD_PTR(cmd); 660 struct Scsi_Host *host = cmd->device->host; 661 SYM_QUEHEAD *qp; 662 int to_do = SYM_EH_DO_IGNORE; 663 int sts = -1; 664 struct completion eh_done; 665 666 dev_warn(&cmd->device->sdev_gendev, "%s operation started.\n", opname); 667 668 spin_lock_irq(host->host_lock); 669 /* This one is queued in some place -> to wait for completion */ 670 FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) { 671 struct sym_ccb *cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); 672 if (cp->cmd == cmd) { 673 to_do = SYM_EH_DO_WAIT; 674 break; 675 } 676 } 677 678 if (to_do == SYM_EH_DO_WAIT) { 679 init_completion(&eh_done); 680 ucmd->old_done = cmd->scsi_done; 681 ucmd->eh_done = &eh_done; 682 wmb(); 683 cmd->scsi_done = sym_eh_done; 684 } 685 686 /* Try to proceed the operation we have been asked for */ 687 sts = -1; 688 switch(op) { 689 case SYM_EH_ABORT: 690 sts = sym_abort_scsiio(np, cmd, 1); 691 break; 692 case SYM_EH_DEVICE_RESET: 693 sts = sym_reset_scsi_target(np, cmd->device->id); 694 break; 695 case SYM_EH_BUS_RESET: 696 sym_reset_scsi_bus(np, 1); 697 sts = 0; 698 break; 699 case SYM_EH_HOST_RESET: 700 sym_reset_scsi_bus(np, 0); 701 sym_start_up (np, 1); 702 sts = 0; 703 break; 704 default: 705 break; 706 } 707 708 /* On error, restore everything and cross fingers :) */ 709 if (sts) { 710 cmd->scsi_done = ucmd->old_done; 711 to_do = SYM_EH_DO_IGNORE; 712 } 713 714 ucmd->to_do = to_do; 715 spin_unlock_irq(host->host_lock); 716 717 if (to_do == SYM_EH_DO_WAIT) { 718 if (!wait_for_completion_timeout(&eh_done, 5*HZ)) { 719 ucmd->to_do = SYM_EH_DO_IGNORE; 720 wmb(); 721 sts = -2; 722 } 723 } 724 dev_warn(&cmd->device->sdev_gendev, "%s operation %s.\n", opname, 725 sts==0 ? "complete" :sts==-2 ? "timed-out" : "failed"); 726 return sts ? SCSI_FAILED : SCSI_SUCCESS; 727 } 728 729 730 /* 731 * Error handlers called from the eh thread (one thread per HBA). 732 */ 733 static int sym53c8xx_eh_abort_handler(struct scsi_cmnd *cmd) 734 { 735 return sym_eh_handler(SYM_EH_ABORT, "ABORT", cmd); 736 } 737 738 static int sym53c8xx_eh_device_reset_handler(struct scsi_cmnd *cmd) 739 { 740 return sym_eh_handler(SYM_EH_DEVICE_RESET, "DEVICE RESET", cmd); 741 } 742 743 static int sym53c8xx_eh_bus_reset_handler(struct scsi_cmnd *cmd) 744 { 745 return sym_eh_handler(SYM_EH_BUS_RESET, "BUS RESET", cmd); 746 } 747 748 static int sym53c8xx_eh_host_reset_handler(struct scsi_cmnd *cmd) 749 { 750 return sym_eh_handler(SYM_EH_HOST_RESET, "HOST RESET", cmd); 751 } 752 753 /* 754 * Tune device queuing depth, according to various limits. 755 */ 756 static void sym_tune_dev_queuing(struct sym_tcb *tp, int lun, u_short reqtags) 757 { 758 struct sym_lcb *lp = sym_lp(tp, lun); 759 u_short oldtags; 760 761 if (!lp) 762 return; 763 764 oldtags = lp->s.reqtags; 765 766 if (reqtags > lp->s.scdev_depth) 767 reqtags = lp->s.scdev_depth; 768 769 lp->s.reqtags = reqtags; 770 771 if (reqtags != oldtags) { 772 dev_info(&tp->starget->dev, 773 "tagged command queuing %s, command queue depth %d.\n", 774 lp->s.reqtags ? "enabled" : "disabled", reqtags); 775 } 776 } 777 778 /* 779 * Linux select queue depths function 780 */ 781 #define DEF_DEPTH (sym_driver_setup.max_tag) 782 #define ALL_TARGETS -2 783 #define NO_TARGET -1 784 #define ALL_LUNS -2 785 #define NO_LUN -1 786 787 static int device_queue_depth(struct sym_hcb *np, int target, int lun) 788 { 789 int c, h, t, u, v; 790 char *p = sym_driver_setup.tag_ctrl; 791 char *ep; 792 793 h = -1; 794 t = NO_TARGET; 795 u = NO_LUN; 796 while ((c = *p++) != 0) { 797 v = simple_strtoul(p, &ep, 0); 798 switch(c) { 799 case '/': 800 ++h; 801 t = ALL_TARGETS; 802 u = ALL_LUNS; 803 break; 804 case 't': 805 if (t != target) 806 t = (target == v) ? v : NO_TARGET; 807 u = ALL_LUNS; 808 break; 809 case 'u': 810 if (u != lun) 811 u = (lun == v) ? v : NO_LUN; 812 break; 813 case 'q': 814 if (h == np->s.unit && 815 (t == ALL_TARGETS || t == target) && 816 (u == ALL_LUNS || u == lun)) 817 return v; 818 break; 819 case '-': 820 t = ALL_TARGETS; 821 u = ALL_LUNS; 822 break; 823 default: 824 break; 825 } 826 p = ep; 827 } 828 return DEF_DEPTH; 829 } 830 831 static int sym53c8xx_slave_alloc(struct scsi_device *sdev) 832 { 833 struct sym_hcb *np = sym_get_hcb(sdev->host); 834 struct sym_tcb *tp = &np->target[sdev->id]; 835 struct sym_lcb *lp; 836 837 if (sdev->id >= SYM_CONF_MAX_TARGET || sdev->lun >= SYM_CONF_MAX_LUN) 838 return -ENXIO; 839 840 tp->starget = sdev->sdev_target; 841 /* 842 * Fail the device init if the device is flagged NOSCAN at BOOT in 843 * the NVRAM. This may speed up boot and maintain coherency with 844 * BIOS device numbering. Clearing the flag allows the user to 845 * rescan skipped devices later. We also return an error for 846 * devices not flagged for SCAN LUNS in the NVRAM since some single 847 * lun devices behave badly when asked for a non zero LUN. 848 */ 849 850 if (tp->usrflags & SYM_SCAN_BOOT_DISABLED) { 851 tp->usrflags &= ~SYM_SCAN_BOOT_DISABLED; 852 starget_printk(KERN_INFO, tp->starget, 853 "Scan at boot disabled in NVRAM\n"); 854 return -ENXIO; 855 } 856 857 if (tp->usrflags & SYM_SCAN_LUNS_DISABLED) { 858 if (sdev->lun != 0) 859 return -ENXIO; 860 starget_printk(KERN_INFO, tp->starget, 861 "Multiple LUNs disabled in NVRAM\n"); 862 } 863 864 lp = sym_alloc_lcb(np, sdev->id, sdev->lun); 865 if (!lp) 866 return -ENOMEM; 867 868 spi_min_period(tp->starget) = tp->usr_period; 869 spi_max_width(tp->starget) = tp->usr_width; 870 871 return 0; 872 } 873 874 /* 875 * Linux entry point for device queue sizing. 876 */ 877 static int sym53c8xx_slave_configure(struct scsi_device *sdev) 878 { 879 struct sym_hcb *np = sym_get_hcb(sdev->host); 880 struct sym_tcb *tp = &np->target[sdev->id]; 881 struct sym_lcb *lp = sym_lp(tp, sdev->lun); 882 int reqtags, depth_to_use; 883 884 /* 885 * Get user flags. 886 */ 887 lp->curr_flags = lp->user_flags; 888 889 /* 890 * Select queue depth from driver setup. 891 * Donnot use more than configured by user. 892 * Use at least 2. 893 * Donnot use more than our maximum. 894 */ 895 reqtags = device_queue_depth(np, sdev->id, sdev->lun); 896 if (reqtags > tp->usrtags) 897 reqtags = tp->usrtags; 898 if (!sdev->tagged_supported) 899 reqtags = 0; 900 #if 1 /* Avoid to locally queue commands for no good reasons */ 901 if (reqtags > SYM_CONF_MAX_TAG) 902 reqtags = SYM_CONF_MAX_TAG; 903 depth_to_use = (reqtags ? reqtags : 2); 904 #else 905 depth_to_use = (reqtags ? SYM_CONF_MAX_TAG : 2); 906 #endif 907 scsi_adjust_queue_depth(sdev, 908 (sdev->tagged_supported ? 909 MSG_SIMPLE_TAG : 0), 910 depth_to_use); 911 lp->s.scdev_depth = depth_to_use; 912 sym_tune_dev_queuing(tp, sdev->lun, reqtags); 913 914 if (!spi_initial_dv(sdev->sdev_target)) 915 spi_dv_device(sdev); 916 917 return 0; 918 } 919 920 static void sym53c8xx_slave_destroy(struct scsi_device *sdev) 921 { 922 struct sym_hcb *np = sym_get_hcb(sdev->host); 923 struct sym_lcb *lp = sym_lp(&np->target[sdev->id], sdev->lun); 924 925 if (lp->itlq_tbl) 926 sym_mfree_dma(lp->itlq_tbl, SYM_CONF_MAX_TASK * 4, "ITLQ_TBL"); 927 kfree(lp->cb_tags); 928 sym_mfree_dma(lp, sizeof(*lp), "LCB"); 929 } 930 931 /* 932 * Linux entry point for info() function 933 */ 934 static const char *sym53c8xx_info (struct Scsi_Host *host) 935 { 936 return SYM_DRIVER_NAME; 937 } 938 939 940 #ifdef SYM_LINUX_PROC_INFO_SUPPORT 941 /* 942 * Proc file system stuff 943 * 944 * A read operation returns adapter information. 945 * A write operation is a control command. 946 * The string is parsed in the driver code and the command is passed 947 * to the sym_usercmd() function. 948 */ 949 950 #ifdef SYM_LINUX_USER_COMMAND_SUPPORT 951 952 struct sym_usrcmd { 953 u_long target; 954 u_long lun; 955 u_long data; 956 u_long cmd; 957 }; 958 959 #define UC_SETSYNC 10 960 #define UC_SETTAGS 11 961 #define UC_SETDEBUG 12 962 #define UC_SETWIDE 14 963 #define UC_SETFLAG 15 964 #define UC_SETVERBOSE 17 965 #define UC_RESETDEV 18 966 #define UC_CLEARDEV 19 967 968 static void sym_exec_user_command (struct sym_hcb *np, struct sym_usrcmd *uc) 969 { 970 struct sym_tcb *tp; 971 int t, l; 972 973 switch (uc->cmd) { 974 case 0: return; 975 976 #ifdef SYM_LINUX_DEBUG_CONTROL_SUPPORT 977 case UC_SETDEBUG: 978 sym_debug_flags = uc->data; 979 break; 980 #endif 981 case UC_SETVERBOSE: 982 np->verbose = uc->data; 983 break; 984 default: 985 /* 986 * We assume that other commands apply to targets. 987 * This should always be the case and avoid the below 988 * 4 lines to be repeated 6 times. 989 */ 990 for (t = 0; t < SYM_CONF_MAX_TARGET; t++) { 991 if (!((uc->target >> t) & 1)) 992 continue; 993 tp = &np->target[t]; 994 995 switch (uc->cmd) { 996 997 case UC_SETSYNC: 998 if (!uc->data || uc->data >= 255) { 999 tp->tgoal.iu = tp->tgoal.dt = 1000 tp->tgoal.qas = 0; 1001 tp->tgoal.offset = 0; 1002 } else if (uc->data <= 9 && np->minsync_dt) { 1003 if (uc->data < np->minsync_dt) 1004 uc->data = np->minsync_dt; 1005 tp->tgoal.iu = tp->tgoal.dt = 1006 tp->tgoal.qas = 1; 1007 tp->tgoal.width = 1; 1008 tp->tgoal.period = uc->data; 1009 tp->tgoal.offset = np->maxoffs_dt; 1010 } else { 1011 if (uc->data < np->minsync) 1012 uc->data = np->minsync; 1013 tp->tgoal.iu = tp->tgoal.dt = 1014 tp->tgoal.qas = 0; 1015 tp->tgoal.period = uc->data; 1016 tp->tgoal.offset = np->maxoffs; 1017 } 1018 tp->tgoal.check_nego = 1; 1019 break; 1020 case UC_SETWIDE: 1021 tp->tgoal.width = uc->data ? 1 : 0; 1022 tp->tgoal.check_nego = 1; 1023 break; 1024 case UC_SETTAGS: 1025 for (l = 0; l < SYM_CONF_MAX_LUN; l++) 1026 sym_tune_dev_queuing(tp, l, uc->data); 1027 break; 1028 case UC_RESETDEV: 1029 tp->to_reset = 1; 1030 np->istat_sem = SEM; 1031 OUTB(np, nc_istat, SIGP|SEM); 1032 break; 1033 case UC_CLEARDEV: 1034 for (l = 0; l < SYM_CONF_MAX_LUN; l++) { 1035 struct sym_lcb *lp = sym_lp(tp, l); 1036 if (lp) lp->to_clear = 1; 1037 } 1038 np->istat_sem = SEM; 1039 OUTB(np, nc_istat, SIGP|SEM); 1040 break; 1041 case UC_SETFLAG: 1042 tp->usrflags = uc->data; 1043 break; 1044 } 1045 } 1046 break; 1047 } 1048 } 1049 1050 static int skip_spaces(char *ptr, int len) 1051 { 1052 int cnt, c; 1053 1054 for (cnt = len; cnt > 0 && (c = *ptr++) && isspace(c); cnt--); 1055 1056 return (len - cnt); 1057 } 1058 1059 static int get_int_arg(char *ptr, int len, u_long *pv) 1060 { 1061 char *end; 1062 1063 *pv = simple_strtoul(ptr, &end, 10); 1064 return (end - ptr); 1065 } 1066 1067 static int is_keyword(char *ptr, int len, char *verb) 1068 { 1069 int verb_len = strlen(verb); 1070 1071 if (len >= verb_len && !memcmp(verb, ptr, verb_len)) 1072 return verb_len; 1073 else 1074 return 0; 1075 } 1076 1077 #define SKIP_SPACES(ptr, len) \ 1078 if ((arg_len = skip_spaces(ptr, len)) < 1) \ 1079 return -EINVAL; \ 1080 ptr += arg_len; len -= arg_len; 1081 1082 #define GET_INT_ARG(ptr, len, v) \ 1083 if (!(arg_len = get_int_arg(ptr, len, &(v)))) \ 1084 return -EINVAL; \ 1085 ptr += arg_len; len -= arg_len; 1086 1087 1088 /* 1089 * Parse a control command 1090 */ 1091 1092 static int sym_user_command(struct sym_hcb *np, char *buffer, int length) 1093 { 1094 char *ptr = buffer; 1095 int len = length; 1096 struct sym_usrcmd cmd, *uc = &cmd; 1097 int arg_len; 1098 u_long target; 1099 1100 memset(uc, 0, sizeof(*uc)); 1101 1102 if (len > 0 && ptr[len-1] == '\n') 1103 --len; 1104 1105 if ((arg_len = is_keyword(ptr, len, "setsync")) != 0) 1106 uc->cmd = UC_SETSYNC; 1107 else if ((arg_len = is_keyword(ptr, len, "settags")) != 0) 1108 uc->cmd = UC_SETTAGS; 1109 else if ((arg_len = is_keyword(ptr, len, "setverbose")) != 0) 1110 uc->cmd = UC_SETVERBOSE; 1111 else if ((arg_len = is_keyword(ptr, len, "setwide")) != 0) 1112 uc->cmd = UC_SETWIDE; 1113 #ifdef SYM_LINUX_DEBUG_CONTROL_SUPPORT 1114 else if ((arg_len = is_keyword(ptr, len, "setdebug")) != 0) 1115 uc->cmd = UC_SETDEBUG; 1116 #endif 1117 else if ((arg_len = is_keyword(ptr, len, "setflag")) != 0) 1118 uc->cmd = UC_SETFLAG; 1119 else if ((arg_len = is_keyword(ptr, len, "resetdev")) != 0) 1120 uc->cmd = UC_RESETDEV; 1121 else if ((arg_len = is_keyword(ptr, len, "cleardev")) != 0) 1122 uc->cmd = UC_CLEARDEV; 1123 else 1124 arg_len = 0; 1125 1126 #ifdef DEBUG_PROC_INFO 1127 printk("sym_user_command: arg_len=%d, cmd=%ld\n", arg_len, uc->cmd); 1128 #endif 1129 1130 if (!arg_len) 1131 return -EINVAL; 1132 ptr += arg_len; len -= arg_len; 1133 1134 switch(uc->cmd) { 1135 case UC_SETSYNC: 1136 case UC_SETTAGS: 1137 case UC_SETWIDE: 1138 case UC_SETFLAG: 1139 case UC_RESETDEV: 1140 case UC_CLEARDEV: 1141 SKIP_SPACES(ptr, len); 1142 if ((arg_len = is_keyword(ptr, len, "all")) != 0) { 1143 ptr += arg_len; len -= arg_len; 1144 uc->target = ~0; 1145 } else { 1146 GET_INT_ARG(ptr, len, target); 1147 uc->target = (1<<target); 1148 #ifdef DEBUG_PROC_INFO 1149 printk("sym_user_command: target=%ld\n", target); 1150 #endif 1151 } 1152 break; 1153 } 1154 1155 switch(uc->cmd) { 1156 case UC_SETVERBOSE: 1157 case UC_SETSYNC: 1158 case UC_SETTAGS: 1159 case UC_SETWIDE: 1160 SKIP_SPACES(ptr, len); 1161 GET_INT_ARG(ptr, len, uc->data); 1162 #ifdef DEBUG_PROC_INFO 1163 printk("sym_user_command: data=%ld\n", uc->data); 1164 #endif 1165 break; 1166 #ifdef SYM_LINUX_DEBUG_CONTROL_SUPPORT 1167 case UC_SETDEBUG: 1168 while (len > 0) { 1169 SKIP_SPACES(ptr, len); 1170 if ((arg_len = is_keyword(ptr, len, "alloc"))) 1171 uc->data |= DEBUG_ALLOC; 1172 else if ((arg_len = is_keyword(ptr, len, "phase"))) 1173 uc->data |= DEBUG_PHASE; 1174 else if ((arg_len = is_keyword(ptr, len, "queue"))) 1175 uc->data |= DEBUG_QUEUE; 1176 else if ((arg_len = is_keyword(ptr, len, "result"))) 1177 uc->data |= DEBUG_RESULT; 1178 else if ((arg_len = is_keyword(ptr, len, "scatter"))) 1179 uc->data |= DEBUG_SCATTER; 1180 else if ((arg_len = is_keyword(ptr, len, "script"))) 1181 uc->data |= DEBUG_SCRIPT; 1182 else if ((arg_len = is_keyword(ptr, len, "tiny"))) 1183 uc->data |= DEBUG_TINY; 1184 else if ((arg_len = is_keyword(ptr, len, "timing"))) 1185 uc->data |= DEBUG_TIMING; 1186 else if ((arg_len = is_keyword(ptr, len, "nego"))) 1187 uc->data |= DEBUG_NEGO; 1188 else if ((arg_len = is_keyword(ptr, len, "tags"))) 1189 uc->data |= DEBUG_TAGS; 1190 else if ((arg_len = is_keyword(ptr, len, "pointer"))) 1191 uc->data |= DEBUG_POINTER; 1192 else 1193 return -EINVAL; 1194 ptr += arg_len; len -= arg_len; 1195 } 1196 #ifdef DEBUG_PROC_INFO 1197 printk("sym_user_command: data=%ld\n", uc->data); 1198 #endif 1199 break; 1200 #endif /* SYM_LINUX_DEBUG_CONTROL_SUPPORT */ 1201 case UC_SETFLAG: 1202 while (len > 0) { 1203 SKIP_SPACES(ptr, len); 1204 if ((arg_len = is_keyword(ptr, len, "no_disc"))) 1205 uc->data &= ~SYM_DISC_ENABLED; 1206 else 1207 return -EINVAL; 1208 ptr += arg_len; len -= arg_len; 1209 } 1210 break; 1211 default: 1212 break; 1213 } 1214 1215 if (len) 1216 return -EINVAL; 1217 else { 1218 unsigned long flags; 1219 1220 spin_lock_irqsave(np->s.host->host_lock, flags); 1221 sym_exec_user_command (np, uc); 1222 spin_unlock_irqrestore(np->s.host->host_lock, flags); 1223 } 1224 return length; 1225 } 1226 1227 #endif /* SYM_LINUX_USER_COMMAND_SUPPORT */ 1228 1229 1230 #ifdef SYM_LINUX_USER_INFO_SUPPORT 1231 /* 1232 * Informations through the proc file system. 1233 */ 1234 struct info_str { 1235 char *buffer; 1236 int length; 1237 int offset; 1238 int pos; 1239 }; 1240 1241 static void copy_mem_info(struct info_str *info, char *data, int len) 1242 { 1243 if (info->pos + len > info->length) 1244 len = info->length - info->pos; 1245 1246 if (info->pos + len < info->offset) { 1247 info->pos += len; 1248 return; 1249 } 1250 if (info->pos < info->offset) { 1251 data += (info->offset - info->pos); 1252 len -= (info->offset - info->pos); 1253 } 1254 1255 if (len > 0) { 1256 memcpy(info->buffer + info->pos, data, len); 1257 info->pos += len; 1258 } 1259 } 1260 1261 static int copy_info(struct info_str *info, char *fmt, ...) 1262 { 1263 va_list args; 1264 char buf[81]; 1265 int len; 1266 1267 va_start(args, fmt); 1268 len = vsprintf(buf, fmt, args); 1269 va_end(args); 1270 1271 copy_mem_info(info, buf, len); 1272 return len; 1273 } 1274 1275 /* 1276 * Copy formatted information into the input buffer. 1277 */ 1278 static int sym_host_info(struct sym_hcb *np, char *ptr, off_t offset, int len) 1279 { 1280 struct info_str info; 1281 1282 info.buffer = ptr; 1283 info.length = len; 1284 info.offset = offset; 1285 info.pos = 0; 1286 1287 copy_info(&info, "Chip " NAME53C "%s, device id 0x%x, " 1288 "revision id 0x%x\n", 1289 np->s.chip_name, np->device_id, np->revision_id); 1290 copy_info(&info, "At PCI address %s, IRQ " IRQ_FMT "\n", 1291 pci_name(np->s.device), IRQ_PRM(np->s.irq)); 1292 copy_info(&info, "Min. period factor %d, %s SCSI BUS%s\n", 1293 (int) (np->minsync_dt ? np->minsync_dt : np->minsync), 1294 np->maxwide ? "Wide" : "Narrow", 1295 np->minsync_dt ? ", DT capable" : ""); 1296 1297 copy_info(&info, "Max. started commands %d, " 1298 "max. commands per LUN %d\n", 1299 SYM_CONF_MAX_START, SYM_CONF_MAX_TAG); 1300 1301 return info.pos > info.offset? info.pos - info.offset : 0; 1302 } 1303 #endif /* SYM_LINUX_USER_INFO_SUPPORT */ 1304 1305 /* 1306 * Entry point of the scsi proc fs of the driver. 1307 * - func = 0 means read (returns adapter infos) 1308 * - func = 1 means write (not yet merget from sym53c8xx) 1309 */ 1310 static int sym53c8xx_proc_info(struct Scsi_Host *host, char *buffer, 1311 char **start, off_t offset, int length, int func) 1312 { 1313 struct sym_hcb *np = sym_get_hcb(host); 1314 int retv; 1315 1316 if (func) { 1317 #ifdef SYM_LINUX_USER_COMMAND_SUPPORT 1318 retv = sym_user_command(np, buffer, length); 1319 #else 1320 retv = -EINVAL; 1321 #endif 1322 } else { 1323 if (start) 1324 *start = buffer; 1325 #ifdef SYM_LINUX_USER_INFO_SUPPORT 1326 retv = sym_host_info(np, buffer, offset, length); 1327 #else 1328 retv = -EINVAL; 1329 #endif 1330 } 1331 1332 return retv; 1333 } 1334 #endif /* SYM_LINUX_PROC_INFO_SUPPORT */ 1335 1336 /* 1337 * Free controller resources. 1338 */ 1339 static void sym_free_resources(struct sym_hcb *np, struct pci_dev *pdev) 1340 { 1341 /* 1342 * Free O/S specific resources. 1343 */ 1344 if (np->s.irq) 1345 free_irq(np->s.irq, np); 1346 if (np->s.ioaddr) 1347 pci_iounmap(pdev, np->s.ioaddr); 1348 if (np->s.ramaddr) 1349 pci_iounmap(pdev, np->s.ramaddr); 1350 /* 1351 * Free O/S independent resources. 1352 */ 1353 sym_hcb_free(np); 1354 1355 sym_mfree_dma(np, sizeof(*np), "HCB"); 1356 } 1357 1358 /* 1359 * Ask/tell the system about DMA addressing. 1360 */ 1361 static int sym_setup_bus_dma_mask(struct sym_hcb *np) 1362 { 1363 #if SYM_CONF_DMA_ADDRESSING_MODE > 0 1364 #if SYM_CONF_DMA_ADDRESSING_MODE == 1 1365 #define DMA_DAC_MASK DMA_40BIT_MASK 1366 #elif SYM_CONF_DMA_ADDRESSING_MODE == 2 1367 #define DMA_DAC_MASK DMA_64BIT_MASK 1368 #endif 1369 if ((np->features & FE_DAC) && 1370 !pci_set_dma_mask(np->s.device, DMA_DAC_MASK)) { 1371 np->use_dac = 1; 1372 return 0; 1373 } 1374 #endif 1375 1376 if (!pci_set_dma_mask(np->s.device, DMA_32BIT_MASK)) 1377 return 0; 1378 1379 printf_warning("%s: No suitable DMA available\n", sym_name(np)); 1380 return -1; 1381 } 1382 1383 /* 1384 * Host attach and initialisations. 1385 * 1386 * Allocate host data and ncb structure. 1387 * Remap MMIO region. 1388 * Do chip initialization. 1389 * If all is OK, install interrupt handling and 1390 * start the timer daemon. 1391 */ 1392 static struct Scsi_Host * __devinit sym_attach(struct scsi_host_template *tpnt, 1393 int unit, struct sym_device *dev) 1394 { 1395 struct host_data *host_data; 1396 struct sym_hcb *np = NULL; 1397 struct Scsi_Host *instance = NULL; 1398 struct pci_dev *pdev = dev->pdev; 1399 unsigned long flags; 1400 struct sym_fw *fw; 1401 1402 printk(KERN_INFO 1403 "sym%d: <%s> rev 0x%x at pci %s irq " IRQ_FMT "\n", 1404 unit, dev->chip.name, dev->chip.revision_id, 1405 pci_name(pdev), IRQ_PRM(pdev->irq)); 1406 1407 /* 1408 * Get the firmware for this chip. 1409 */ 1410 fw = sym_find_firmware(&dev->chip); 1411 if (!fw) 1412 goto attach_failed; 1413 1414 /* 1415 * Allocate host_data structure 1416 */ 1417 instance = scsi_host_alloc(tpnt, sizeof(*host_data)); 1418 if (!instance) 1419 goto attach_failed; 1420 host_data = (struct host_data *) instance->hostdata; 1421 1422 /* 1423 * Allocate immediately the host control block, 1424 * since we are only expecting to succeed. :) 1425 * We keep track in the HCB of all the resources that 1426 * are to be released on error. 1427 */ 1428 np = __sym_calloc_dma(&pdev->dev, sizeof(*np), "HCB"); 1429 if (!np) 1430 goto attach_failed; 1431 np->s.device = pdev; 1432 np->bus_dmat = &pdev->dev; /* Result in 1 DMA pool per HBA */ 1433 host_data->ncb = np; 1434 np->s.host = instance; 1435 1436 pci_set_drvdata(pdev, np); 1437 1438 /* 1439 * Copy some useful infos to the HCB. 1440 */ 1441 np->hcb_ba = vtobus(np); 1442 np->verbose = sym_driver_setup.verbose; 1443 np->s.device = pdev; 1444 np->s.unit = unit; 1445 np->device_id = dev->chip.device_id; 1446 np->revision_id = dev->chip.revision_id; 1447 np->features = dev->chip.features; 1448 np->clock_divn = dev->chip.nr_divisor; 1449 np->maxoffs = dev->chip.offset_max; 1450 np->maxburst = dev->chip.burst_max; 1451 np->myaddr = dev->host_id; 1452 1453 /* 1454 * Edit its name. 1455 */ 1456 strlcpy(np->s.chip_name, dev->chip.name, sizeof(np->s.chip_name)); 1457 sprintf(np->s.inst_name, "sym%d", np->s.unit); 1458 1459 if (sym_setup_bus_dma_mask(np)) 1460 goto attach_failed; 1461 1462 /* 1463 * Try to map the controller chip to 1464 * virtual and physical memory. 1465 */ 1466 np->mmio_ba = (u32)dev->mmio_base; 1467 np->s.ioaddr = dev->s.ioaddr; 1468 np->s.ramaddr = dev->s.ramaddr; 1469 np->s.io_ws = (np->features & FE_IO256) ? 256 : 128; 1470 1471 /* 1472 * Map on-chip RAM if present and supported. 1473 */ 1474 if (!(np->features & FE_RAM)) 1475 dev->ram_base = 0; 1476 if (dev->ram_base) { 1477 np->ram_ba = (u32)dev->ram_base; 1478 np->ram_ws = (np->features & FE_RAM8K) ? 8192 : 4096; 1479 } 1480 1481 if (sym_hcb_attach(instance, fw, dev->nvram)) 1482 goto attach_failed; 1483 1484 /* 1485 * Install the interrupt handler. 1486 * If we synchonize the C code with SCRIPTS on interrupt, 1487 * we do not want to share the INTR line at all. 1488 */ 1489 if (request_irq(pdev->irq, sym53c8xx_intr, IRQF_SHARED, NAME53C8XX, np)) { 1490 printf_err("%s: request irq %d failure\n", 1491 sym_name(np), pdev->irq); 1492 goto attach_failed; 1493 } 1494 np->s.irq = pdev->irq; 1495 1496 /* 1497 * After SCSI devices have been opened, we cannot 1498 * reset the bus safely, so we do it here. 1499 */ 1500 spin_lock_irqsave(instance->host_lock, flags); 1501 if (sym_reset_scsi_bus(np, 0)) 1502 goto reset_failed; 1503 1504 /* 1505 * Start the SCRIPTS. 1506 */ 1507 sym_start_up (np, 1); 1508 1509 /* 1510 * Start the timer daemon 1511 */ 1512 init_timer(&np->s.timer); 1513 np->s.timer.data = (unsigned long) np; 1514 np->s.timer.function = sym53c8xx_timer; 1515 np->s.lasttime=0; 1516 sym_timer (np); 1517 1518 /* 1519 * Fill Linux host instance structure 1520 * and return success. 1521 */ 1522 instance->max_channel = 0; 1523 instance->this_id = np->myaddr; 1524 instance->max_id = np->maxwide ? 16 : 8; 1525 instance->max_lun = SYM_CONF_MAX_LUN; 1526 instance->unique_id = pci_resource_start(pdev, 0); 1527 instance->cmd_per_lun = SYM_CONF_MAX_TAG; 1528 instance->can_queue = (SYM_CONF_MAX_START-2); 1529 instance->sg_tablesize = SYM_CONF_MAX_SG; 1530 instance->max_cmd_len = 16; 1531 BUG_ON(sym2_transport_template == NULL); 1532 instance->transportt = sym2_transport_template; 1533 1534 spin_unlock_irqrestore(instance->host_lock, flags); 1535 1536 return instance; 1537 1538 reset_failed: 1539 printf_err("%s: FATAL ERROR: CHECK SCSI BUS - CABLES, " 1540 "TERMINATION, DEVICE POWER etc.!\n", sym_name(np)); 1541 spin_unlock_irqrestore(instance->host_lock, flags); 1542 attach_failed: 1543 if (!instance) 1544 return NULL; 1545 printf_info("%s: giving up ...\n", sym_name(np)); 1546 if (np) 1547 sym_free_resources(np, pdev); 1548 scsi_host_put(instance); 1549 1550 return NULL; 1551 } 1552 1553 1554 /* 1555 * Detect and try to read SYMBIOS and TEKRAM NVRAM. 1556 */ 1557 #if SYM_CONF_NVRAM_SUPPORT 1558 static void __devinit sym_get_nvram(struct sym_device *devp, struct sym_nvram *nvp) 1559 { 1560 devp->nvram = nvp; 1561 devp->device_id = devp->chip.device_id; 1562 nvp->type = 0; 1563 1564 sym_read_nvram(devp, nvp); 1565 } 1566 #else 1567 static inline void sym_get_nvram(struct sym_device *devp, struct sym_nvram *nvp) 1568 { 1569 } 1570 #endif /* SYM_CONF_NVRAM_SUPPORT */ 1571 1572 static int __devinit sym_check_supported(struct sym_device *device) 1573 { 1574 struct sym_chip *chip; 1575 struct pci_dev *pdev = device->pdev; 1576 u_char revision; 1577 unsigned long io_port = pci_resource_start(pdev, 0); 1578 int i; 1579 1580 /* 1581 * If user excluded this chip, do not initialize it. 1582 * I hate this code so much. Must kill it. 1583 */ 1584 if (io_port) { 1585 for (i = 0 ; i < 8 ; i++) { 1586 if (sym_driver_setup.excludes[i] == io_port) 1587 return -ENODEV; 1588 } 1589 } 1590 1591 /* 1592 * Check if the chip is supported. Then copy the chip description 1593 * to our device structure so we can make it match the actual device 1594 * and options. 1595 */ 1596 pci_read_config_byte(pdev, PCI_CLASS_REVISION, &revision); 1597 chip = sym_lookup_chip_table(pdev->device, revision); 1598 if (!chip) { 1599 dev_info(&pdev->dev, "device not supported\n"); 1600 return -ENODEV; 1601 } 1602 memcpy(&device->chip, chip, sizeof(device->chip)); 1603 device->chip.revision_id = revision; 1604 1605 return 0; 1606 } 1607 1608 /* 1609 * Ignore Symbios chips controlled by various RAID controllers. 1610 * These controllers set value 0x52414944 at RAM end - 16. 1611 */ 1612 static int __devinit sym_check_raid(struct sym_device *device) 1613 { 1614 unsigned int ram_size, ram_val; 1615 1616 if (!device->s.ramaddr) 1617 return 0; 1618 1619 if (device->chip.features & FE_RAM8K) 1620 ram_size = 8192; 1621 else 1622 ram_size = 4096; 1623 1624 ram_val = readl(device->s.ramaddr + ram_size - 16); 1625 if (ram_val != 0x52414944) 1626 return 0; 1627 1628 dev_info(&device->pdev->dev, 1629 "not initializing, driven by RAID controller.\n"); 1630 return -ENODEV; 1631 } 1632 1633 static int __devinit sym_set_workarounds(struct sym_device *device) 1634 { 1635 struct sym_chip *chip = &device->chip; 1636 struct pci_dev *pdev = device->pdev; 1637 u_short status_reg; 1638 1639 /* 1640 * (ITEM 12 of a DEL about the 896 I haven't yet). 1641 * We must ensure the chip will use WRITE AND INVALIDATE. 1642 * The revision number limit is for now arbitrary. 1643 */ 1644 if (pdev->device == PCI_DEVICE_ID_NCR_53C896 && chip->revision_id < 0x4) { 1645 chip->features |= (FE_WRIE | FE_CLSE); 1646 } 1647 1648 /* If the chip can do Memory Write Invalidate, enable it */ 1649 if (chip->features & FE_WRIE) { 1650 if (pci_set_mwi(pdev)) 1651 return -ENODEV; 1652 } 1653 1654 /* 1655 * Work around for errant bit in 895A. The 66Mhz 1656 * capable bit is set erroneously. Clear this bit. 1657 * (Item 1 DEL 533) 1658 * 1659 * Make sure Config space and Features agree. 1660 * 1661 * Recall: writes are not normal to status register - 1662 * write a 1 to clear and a 0 to leave unchanged. 1663 * Can only reset bits. 1664 */ 1665 pci_read_config_word(pdev, PCI_STATUS, &status_reg); 1666 if (chip->features & FE_66MHZ) { 1667 if (!(status_reg & PCI_STATUS_66MHZ)) 1668 chip->features &= ~FE_66MHZ; 1669 } else { 1670 if (status_reg & PCI_STATUS_66MHZ) { 1671 status_reg = PCI_STATUS_66MHZ; 1672 pci_write_config_word(pdev, PCI_STATUS, status_reg); 1673 pci_read_config_word(pdev, PCI_STATUS, &status_reg); 1674 } 1675 } 1676 1677 return 0; 1678 } 1679 1680 /* 1681 * Read and check the PCI configuration for any detected NCR 1682 * boards and save data for attaching after all boards have 1683 * been detected. 1684 */ 1685 static void __devinit 1686 sym_init_device(struct pci_dev *pdev, struct sym_device *device) 1687 { 1688 int i = 2; 1689 struct pci_bus_region bus_addr; 1690 1691 device->host_id = SYM_SETUP_HOST_ID; 1692 device->pdev = pdev; 1693 1694 pcibios_resource_to_bus(pdev, &bus_addr, &pdev->resource[1]); 1695 device->mmio_base = bus_addr.start; 1696 1697 /* 1698 * If the BAR is 64-bit, resource 2 will be occupied by the 1699 * upper 32 bits 1700 */ 1701 if (!pdev->resource[i].flags) 1702 i++; 1703 pcibios_resource_to_bus(pdev, &bus_addr, &pdev->resource[i]); 1704 device->ram_base = bus_addr.start; 1705 1706 #ifdef CONFIG_SCSI_SYM53C8XX_MMIO 1707 if (device->mmio_base) 1708 device->s.ioaddr = pci_iomap(pdev, 1, 1709 pci_resource_len(pdev, 1)); 1710 #endif 1711 if (!device->s.ioaddr) 1712 device->s.ioaddr = pci_iomap(pdev, 0, 1713 pci_resource_len(pdev, 0)); 1714 if (device->ram_base) 1715 device->s.ramaddr = pci_iomap(pdev, i, 1716 pci_resource_len(pdev, i)); 1717 } 1718 1719 /* 1720 * The NCR PQS and PDS cards are constructed as a DEC bridge 1721 * behind which sits a proprietary NCR memory controller and 1722 * either four or two 53c875s as separate devices. We can tell 1723 * if an 875 is part of a PQS/PDS or not since if it is, it will 1724 * be on the same bus as the memory controller. In its usual 1725 * mode of operation, the 875s are slaved to the memory 1726 * controller for all transfers. To operate with the Linux 1727 * driver, the memory controller is disabled and the 875s 1728 * freed to function independently. The only wrinkle is that 1729 * the preset SCSI ID (which may be zero) must be read in from 1730 * a special configuration space register of the 875. 1731 */ 1732 static void sym_config_pqs(struct pci_dev *pdev, struct sym_device *sym_dev) 1733 { 1734 int slot; 1735 u8 tmp; 1736 1737 for (slot = 0; slot < 256; slot++) { 1738 struct pci_dev *memc = pci_get_slot(pdev->bus, slot); 1739 1740 if (!memc || memc->vendor != 0x101a || memc->device == 0x0009) { 1741 pci_dev_put(memc); 1742 continue; 1743 } 1744 1745 /* bit 1: allow individual 875 configuration */ 1746 pci_read_config_byte(memc, 0x44, &tmp); 1747 if ((tmp & 0x2) == 0) { 1748 tmp |= 0x2; 1749 pci_write_config_byte(memc, 0x44, tmp); 1750 } 1751 1752 /* bit 2: drive individual 875 interrupts to the bus */ 1753 pci_read_config_byte(memc, 0x45, &tmp); 1754 if ((tmp & 0x4) == 0) { 1755 tmp |= 0x4; 1756 pci_write_config_byte(memc, 0x45, tmp); 1757 } 1758 1759 pci_dev_put(memc); 1760 break; 1761 } 1762 1763 pci_read_config_byte(pdev, 0x84, &tmp); 1764 sym_dev->host_id = tmp; 1765 } 1766 1767 /* 1768 * Called before unloading the module. 1769 * Detach the host. 1770 * We have to free resources and halt the NCR chip. 1771 */ 1772 static int sym_detach(struct sym_hcb *np, struct pci_dev *pdev) 1773 { 1774 printk("%s: detaching ...\n", sym_name(np)); 1775 1776 del_timer_sync(&np->s.timer); 1777 1778 /* 1779 * Reset NCR chip. 1780 * We should use sym_soft_reset(), but we don't want to do 1781 * so, since we may not be safe if interrupts occur. 1782 */ 1783 printk("%s: resetting chip\n", sym_name(np)); 1784 OUTB(np, nc_istat, SRST); 1785 INB(np, nc_mbox1); 1786 udelay(10); 1787 OUTB(np, nc_istat, 0); 1788 1789 sym_free_resources(np, pdev); 1790 1791 return 1; 1792 } 1793 1794 /* 1795 * Driver host template. 1796 */ 1797 static struct scsi_host_template sym2_template = { 1798 .module = THIS_MODULE, 1799 .name = "sym53c8xx", 1800 .info = sym53c8xx_info, 1801 .queuecommand = sym53c8xx_queue_command, 1802 .slave_alloc = sym53c8xx_slave_alloc, 1803 .slave_configure = sym53c8xx_slave_configure, 1804 .slave_destroy = sym53c8xx_slave_destroy, 1805 .eh_abort_handler = sym53c8xx_eh_abort_handler, 1806 .eh_device_reset_handler = sym53c8xx_eh_device_reset_handler, 1807 .eh_bus_reset_handler = sym53c8xx_eh_bus_reset_handler, 1808 .eh_host_reset_handler = sym53c8xx_eh_host_reset_handler, 1809 .this_id = 7, 1810 .use_clustering = ENABLE_CLUSTERING, 1811 .use_sg_chaining = ENABLE_SG_CHAINING, 1812 .max_sectors = 0xFFFF, 1813 #ifdef SYM_LINUX_PROC_INFO_SUPPORT 1814 .proc_info = sym53c8xx_proc_info, 1815 .proc_name = NAME53C8XX, 1816 #endif 1817 }; 1818 1819 static int attach_count; 1820 1821 static int __devinit sym2_probe(struct pci_dev *pdev, 1822 const struct pci_device_id *ent) 1823 { 1824 struct sym_device sym_dev; 1825 struct sym_nvram nvram; 1826 struct Scsi_Host *instance; 1827 1828 memset(&sym_dev, 0, sizeof(sym_dev)); 1829 memset(&nvram, 0, sizeof(nvram)); 1830 1831 if (pci_enable_device(pdev)) 1832 goto leave; 1833 1834 pci_set_master(pdev); 1835 1836 if (pci_request_regions(pdev, NAME53C8XX)) 1837 goto disable; 1838 1839 sym_init_device(pdev, &sym_dev); 1840 if (sym_check_supported(&sym_dev)) 1841 goto free; 1842 1843 if (sym_check_raid(&sym_dev)) 1844 goto leave; /* Don't disable the device */ 1845 1846 if (sym_set_workarounds(&sym_dev)) 1847 goto free; 1848 1849 sym_config_pqs(pdev, &sym_dev); 1850 1851 sym_get_nvram(&sym_dev, &nvram); 1852 1853 instance = sym_attach(&sym2_template, attach_count, &sym_dev); 1854 if (!instance) 1855 goto free; 1856 1857 if (scsi_add_host(instance, &pdev->dev)) 1858 goto detach; 1859 scsi_scan_host(instance); 1860 1861 attach_count++; 1862 1863 return 0; 1864 1865 detach: 1866 sym_detach(pci_get_drvdata(pdev), pdev); 1867 free: 1868 pci_release_regions(pdev); 1869 disable: 1870 pci_disable_device(pdev); 1871 leave: 1872 return -ENODEV; 1873 } 1874 1875 static void __devexit sym2_remove(struct pci_dev *pdev) 1876 { 1877 struct sym_hcb *np = pci_get_drvdata(pdev); 1878 struct Scsi_Host *host = np->s.host; 1879 1880 scsi_remove_host(host); 1881 scsi_host_put(host); 1882 1883 sym_detach(np, pdev); 1884 1885 pci_release_regions(pdev); 1886 pci_disable_device(pdev); 1887 1888 attach_count--; 1889 } 1890 1891 static void sym2_get_signalling(struct Scsi_Host *shost) 1892 { 1893 struct sym_hcb *np = sym_get_hcb(shost); 1894 enum spi_signal_type type; 1895 1896 switch (np->scsi_mode) { 1897 case SMODE_SE: 1898 type = SPI_SIGNAL_SE; 1899 break; 1900 case SMODE_LVD: 1901 type = SPI_SIGNAL_LVD; 1902 break; 1903 case SMODE_HVD: 1904 type = SPI_SIGNAL_HVD; 1905 break; 1906 default: 1907 type = SPI_SIGNAL_UNKNOWN; 1908 break; 1909 } 1910 spi_signalling(shost) = type; 1911 } 1912 1913 static void sym2_set_offset(struct scsi_target *starget, int offset) 1914 { 1915 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); 1916 struct sym_hcb *np = sym_get_hcb(shost); 1917 struct sym_tcb *tp = &np->target[starget->id]; 1918 1919 tp->tgoal.offset = offset; 1920 tp->tgoal.check_nego = 1; 1921 } 1922 1923 static void sym2_set_period(struct scsi_target *starget, int period) 1924 { 1925 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); 1926 struct sym_hcb *np = sym_get_hcb(shost); 1927 struct sym_tcb *tp = &np->target[starget->id]; 1928 1929 /* have to have DT for these transfers, but DT will also 1930 * set width, so check that this is allowed */ 1931 if (period <= np->minsync && spi_width(starget)) 1932 tp->tgoal.dt = 1; 1933 1934 tp->tgoal.period = period; 1935 tp->tgoal.check_nego = 1; 1936 } 1937 1938 static void sym2_set_width(struct scsi_target *starget, int width) 1939 { 1940 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); 1941 struct sym_hcb *np = sym_get_hcb(shost); 1942 struct sym_tcb *tp = &np->target[starget->id]; 1943 1944 /* It is illegal to have DT set on narrow transfers. If DT is 1945 * clear, we must also clear IU and QAS. */ 1946 if (width == 0) 1947 tp->tgoal.iu = tp->tgoal.dt = tp->tgoal.qas = 0; 1948 1949 tp->tgoal.width = width; 1950 tp->tgoal.check_nego = 1; 1951 } 1952 1953 static void sym2_set_dt(struct scsi_target *starget, int dt) 1954 { 1955 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); 1956 struct sym_hcb *np = sym_get_hcb(shost); 1957 struct sym_tcb *tp = &np->target[starget->id]; 1958 1959 /* We must clear QAS and IU if DT is clear */ 1960 if (dt) 1961 tp->tgoal.dt = 1; 1962 else 1963 tp->tgoal.iu = tp->tgoal.dt = tp->tgoal.qas = 0; 1964 tp->tgoal.check_nego = 1; 1965 } 1966 1967 #if 0 1968 static void sym2_set_iu(struct scsi_target *starget, int iu) 1969 { 1970 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); 1971 struct sym_hcb *np = sym_get_hcb(shost); 1972 struct sym_tcb *tp = &np->target[starget->id]; 1973 1974 if (iu) 1975 tp->tgoal.iu = tp->tgoal.dt = 1; 1976 else 1977 tp->tgoal.iu = 0; 1978 tp->tgoal.check_nego = 1; 1979 } 1980 1981 static void sym2_set_qas(struct scsi_target *starget, int qas) 1982 { 1983 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); 1984 struct sym_hcb *np = sym_get_hcb(shost); 1985 struct sym_tcb *tp = &np->target[starget->id]; 1986 1987 if (qas) 1988 tp->tgoal.dt = tp->tgoal.qas = 1; 1989 else 1990 tp->tgoal.qas = 0; 1991 tp->tgoal.check_nego = 1; 1992 } 1993 #endif 1994 1995 static struct spi_function_template sym2_transport_functions = { 1996 .set_offset = sym2_set_offset, 1997 .show_offset = 1, 1998 .set_period = sym2_set_period, 1999 .show_period = 1, 2000 .set_width = sym2_set_width, 2001 .show_width = 1, 2002 .set_dt = sym2_set_dt, 2003 .show_dt = 1, 2004 #if 0 2005 .set_iu = sym2_set_iu, 2006 .show_iu = 1, 2007 .set_qas = sym2_set_qas, 2008 .show_qas = 1, 2009 #endif 2010 .get_signalling = sym2_get_signalling, 2011 }; 2012 2013 static struct pci_device_id sym2_id_table[] __devinitdata = { 2014 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C810, 2015 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 2016 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C820, 2017 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, /* new */ 2018 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C825, 2019 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 2020 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C815, 2021 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 2022 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_53C810AP, 2023 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, /* new */ 2024 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C860, 2025 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 2026 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_53C1510, 2027 PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_SCSI<<8, 0xffff00, 0UL }, 2028 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C896, 2029 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 2030 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C895, 2031 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 2032 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C885, 2033 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 2034 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C875, 2035 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 2036 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C1510, 2037 PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_SCSI<<8, 0xffff00, 0UL }, /* new */ 2038 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_53C895A, 2039 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 2040 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_53C875A, 2041 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 2042 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_53C1010_33, 2043 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 2044 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_53C1010_66, 2045 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 2046 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C875J, 2047 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 2048 { 0, } 2049 }; 2050 2051 MODULE_DEVICE_TABLE(pci, sym2_id_table); 2052 2053 static struct pci_driver sym2_driver = { 2054 .name = NAME53C8XX, 2055 .id_table = sym2_id_table, 2056 .probe = sym2_probe, 2057 .remove = __devexit_p(sym2_remove), 2058 }; 2059 2060 static int __init sym2_init(void) 2061 { 2062 int error; 2063 2064 sym2_setup_params(); 2065 sym2_transport_template = spi_attach_transport(&sym2_transport_functions); 2066 if (!sym2_transport_template) 2067 return -ENODEV; 2068 2069 error = pci_register_driver(&sym2_driver); 2070 if (error) 2071 spi_release_transport(sym2_transport_template); 2072 return error; 2073 } 2074 2075 static void __exit sym2_exit(void) 2076 { 2077 pci_unregister_driver(&sym2_driver); 2078 spi_release_transport(sym2_transport_template); 2079 } 2080 2081 module_init(sym2_init); 2082 module_exit(sym2_exit); 2083