1 /* 2 * Device driver for the SYMBIOS/LSILOGIC 53C8XX and 53C1010 family 3 * of PCI-SCSI IO processors. 4 * 5 * Copyright (C) 1999-2001 Gerard Roudier <groudier@free.fr> 6 * Copyright (c) 2003-2005 Matthew Wilcox <matthew@wil.cx> 7 * 8 * This driver is derived from the Linux sym53c8xx driver. 9 * Copyright (C) 1998-2000 Gerard Roudier 10 * 11 * The sym53c8xx driver is derived from the ncr53c8xx driver that had been 12 * a port of the FreeBSD ncr driver to Linux-1.2.13. 13 * 14 * The original ncr driver has been written for 386bsd and FreeBSD by 15 * Wolfgang Stanglmeier <wolf@cologne.de> 16 * Stefan Esser <se@mi.Uni-Koeln.de> 17 * Copyright (C) 1994 Wolfgang Stanglmeier 18 * 19 * Other major contributions: 20 * 21 * NVRAM detection and reading. 22 * Copyright (C) 1997 Richard Waltham <dormouse@farsrobt.demon.co.uk> 23 * 24 *----------------------------------------------------------------------------- 25 * 26 * This program is free software; you can redistribute it and/or modify 27 * it under the terms of the GNU General Public License as published by 28 * the Free Software Foundation; either version 2 of the License, or 29 * (at your option) any later version. 30 * 31 * This program is distributed in the hope that it will be useful, 32 * but WITHOUT ANY WARRANTY; without even the implied warranty of 33 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 34 * GNU General Public License for more details. 35 * 36 * You should have received a copy of the GNU General Public License 37 * along with this program; if not, write to the Free Software 38 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 39 */ 40 #include <linux/ctype.h> 41 #include <linux/init.h> 42 #include <linux/interrupt.h> 43 #include <linux/module.h> 44 #include <linux/moduleparam.h> 45 #include <linux/spinlock.h> 46 #include <scsi/scsi.h> 47 #include <scsi/scsi_tcq.h> 48 #include <scsi/scsi_device.h> 49 #include <scsi/scsi_transport.h> 50 51 #include "sym_glue.h" 52 #include "sym_nvram.h" 53 54 #define NAME53C "sym53c" 55 #define NAME53C8XX "sym53c8xx" 56 57 /* SPARC just has to be different ... */ 58 #ifdef __sparc__ 59 #define IRQ_FMT "%s" 60 #define IRQ_PRM(x) __irq_itoa(x) 61 #else 62 #define IRQ_FMT "%d" 63 #define IRQ_PRM(x) (x) 64 #endif 65 66 struct sym_driver_setup sym_driver_setup = SYM_LINUX_DRIVER_SETUP; 67 unsigned int sym_debug_flags = 0; 68 69 static char *excl_string; 70 static char *safe_string; 71 module_param_named(cmd_per_lun, sym_driver_setup.max_tag, ushort, 0); 72 module_param_string(tag_ctrl, sym_driver_setup.tag_ctrl, 100, 0); 73 module_param_named(burst, sym_driver_setup.burst_order, byte, 0); 74 module_param_named(led, sym_driver_setup.scsi_led, byte, 0); 75 module_param_named(diff, sym_driver_setup.scsi_diff, byte, 0); 76 module_param_named(irqm, sym_driver_setup.irq_mode, byte, 0); 77 module_param_named(buschk, sym_driver_setup.scsi_bus_check, byte, 0); 78 module_param_named(hostid, sym_driver_setup.host_id, byte, 0); 79 module_param_named(verb, sym_driver_setup.verbose, byte, 0); 80 module_param_named(debug, sym_debug_flags, uint, 0); 81 module_param_named(settle, sym_driver_setup.settle_delay, byte, 0); 82 module_param_named(nvram, sym_driver_setup.use_nvram, byte, 0); 83 module_param_named(excl, excl_string, charp, 0); 84 module_param_named(safe, safe_string, charp, 0); 85 86 MODULE_PARM_DESC(cmd_per_lun, "The maximum number of tags to use by default"); 87 MODULE_PARM_DESC(tag_ctrl, "More detailed control over tags per LUN"); 88 MODULE_PARM_DESC(burst, "Maximum burst. 0 to disable, 255 to read from registers"); 89 MODULE_PARM_DESC(led, "Set to 1 to enable LED support"); 90 MODULE_PARM_DESC(diff, "0 for no differential mode, 1 for BIOS, 2 for always, 3 for not GPIO3"); 91 MODULE_PARM_DESC(irqm, "0 for open drain, 1 to leave alone, 2 for totem pole"); 92 MODULE_PARM_DESC(buschk, "0 to not check, 1 for detach on error, 2 for warn on error"); 93 MODULE_PARM_DESC(hostid, "The SCSI ID to use for the host adapters"); 94 MODULE_PARM_DESC(verb, "0 for minimal verbosity, 1 for normal, 2 for excessive"); 95 MODULE_PARM_DESC(debug, "Set bits to enable debugging"); 96 MODULE_PARM_DESC(settle, "Settle delay in seconds. Default 3"); 97 MODULE_PARM_DESC(nvram, "Option currently not used"); 98 MODULE_PARM_DESC(excl, "List ioport addresses here to prevent controllers from being attached"); 99 MODULE_PARM_DESC(safe, "Set other settings to a \"safe mode\""); 100 101 MODULE_LICENSE("GPL"); 102 MODULE_VERSION(SYM_VERSION); 103 MODULE_AUTHOR("Matthew Wilcox <matthew@wil.cx>"); 104 MODULE_DESCRIPTION("NCR, Symbios and LSI 8xx and 1010 PCI SCSI adapters"); 105 106 static void sym2_setup_params(void) 107 { 108 char *p = excl_string; 109 int xi = 0; 110 111 while (p && (xi < 8)) { 112 char *next_p; 113 int val = (int) simple_strtoul(p, &next_p, 0); 114 sym_driver_setup.excludes[xi++] = val; 115 p = next_p; 116 } 117 118 if (safe_string) { 119 if (*safe_string == 'y') { 120 sym_driver_setup.max_tag = 0; 121 sym_driver_setup.burst_order = 0; 122 sym_driver_setup.scsi_led = 0; 123 sym_driver_setup.scsi_diff = 1; 124 sym_driver_setup.irq_mode = 0; 125 sym_driver_setup.scsi_bus_check = 2; 126 sym_driver_setup.host_id = 7; 127 sym_driver_setup.verbose = 2; 128 sym_driver_setup.settle_delay = 10; 129 sym_driver_setup.use_nvram = 1; 130 } else if (*safe_string != 'n') { 131 printk(KERN_WARNING NAME53C8XX "Ignoring parameter %s" 132 " passed to safe option", safe_string); 133 } 134 } 135 } 136 137 /* 138 * We used to try to deal with 64-bit BARs here, but don't any more. 139 * There are many parts of this driver which would need to be modified 140 * to handle a 64-bit base address, including scripts. I'm uncomfortable 141 * with making those changes when I have no way of testing it, so I'm 142 * just going to disable it. 143 * 144 * Note that some machines (eg HP rx8620 and Superdome) have bus addresses 145 * below 4GB and physical addresses above 4GB. These will continue to work. 146 */ 147 static int __devinit 148 pci_get_base_address(struct pci_dev *pdev, int index, unsigned long *basep) 149 { 150 u32 tmp; 151 unsigned long base; 152 #define PCI_BAR_OFFSET(index) (PCI_BASE_ADDRESS_0 + (index<<2)) 153 154 pci_read_config_dword(pdev, PCI_BAR_OFFSET(index++), &tmp); 155 base = tmp; 156 if ((tmp & 0x7) == PCI_BASE_ADDRESS_MEM_TYPE_64) { 157 pci_read_config_dword(pdev, PCI_BAR_OFFSET(index++), &tmp); 158 if (tmp > 0) 159 dev_err(&pdev->dev, 160 "BAR %d is 64-bit, disabling\n", index - 1); 161 base = 0; 162 } 163 164 if ((base & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) { 165 base &= PCI_BASE_ADDRESS_IO_MASK; 166 } else { 167 base &= PCI_BASE_ADDRESS_MEM_MASK; 168 } 169 170 *basep = base; 171 return index; 172 #undef PCI_BAR_OFFSET 173 } 174 175 static struct scsi_transport_template *sym2_transport_template = NULL; 176 177 /* 178 * Used by the eh thread to wait for command completion. 179 * It is allocated on the eh thread stack. 180 */ 181 struct sym_eh_wait { 182 struct completion done; 183 struct timer_list timer; 184 void (*old_done)(struct scsi_cmnd *); 185 int to_do; 186 int timed_out; 187 }; 188 189 /* 190 * Driver private area in the SCSI command structure. 191 */ 192 struct sym_ucmd { /* Override the SCSI pointer structure */ 193 dma_addr_t data_mapping; 194 u_char data_mapped; 195 struct sym_eh_wait *eh_wait; 196 }; 197 198 #define SYM_UCMD_PTR(cmd) ((struct sym_ucmd *)(&(cmd)->SCp)) 199 #define SYM_SOFTC_PTR(cmd) sym_get_hcb(cmd->device->host) 200 201 static void __unmap_scsi_data(struct pci_dev *pdev, struct scsi_cmnd *cmd) 202 { 203 int dma_dir = cmd->sc_data_direction; 204 205 switch(SYM_UCMD_PTR(cmd)->data_mapped) { 206 case 2: 207 pci_unmap_sg(pdev, cmd->buffer, cmd->use_sg, dma_dir); 208 break; 209 case 1: 210 pci_unmap_single(pdev, SYM_UCMD_PTR(cmd)->data_mapping, 211 cmd->request_bufflen, dma_dir); 212 break; 213 } 214 SYM_UCMD_PTR(cmd)->data_mapped = 0; 215 } 216 217 static dma_addr_t __map_scsi_single_data(struct pci_dev *pdev, struct scsi_cmnd *cmd) 218 { 219 dma_addr_t mapping; 220 int dma_dir = cmd->sc_data_direction; 221 222 mapping = pci_map_single(pdev, cmd->request_buffer, 223 cmd->request_bufflen, dma_dir); 224 if (mapping) { 225 SYM_UCMD_PTR(cmd)->data_mapped = 1; 226 SYM_UCMD_PTR(cmd)->data_mapping = mapping; 227 } 228 229 return mapping; 230 } 231 232 static int __map_scsi_sg_data(struct pci_dev *pdev, struct scsi_cmnd *cmd) 233 { 234 int use_sg; 235 int dma_dir = cmd->sc_data_direction; 236 237 use_sg = pci_map_sg(pdev, cmd->buffer, cmd->use_sg, dma_dir); 238 if (use_sg > 0) { 239 SYM_UCMD_PTR(cmd)->data_mapped = 2; 240 SYM_UCMD_PTR(cmd)->data_mapping = use_sg; 241 } 242 243 return use_sg; 244 } 245 246 #define unmap_scsi_data(np, cmd) \ 247 __unmap_scsi_data(np->s.device, cmd) 248 #define map_scsi_single_data(np, cmd) \ 249 __map_scsi_single_data(np->s.device, cmd) 250 #define map_scsi_sg_data(np, cmd) \ 251 __map_scsi_sg_data(np->s.device, cmd) 252 /* 253 * Complete a pending CAM CCB. 254 */ 255 void sym_xpt_done(struct sym_hcb *np, struct scsi_cmnd *cmd) 256 { 257 unmap_scsi_data(np, cmd); 258 cmd->scsi_done(cmd); 259 } 260 261 static void sym_xpt_done2(struct sym_hcb *np, struct scsi_cmnd *cmd, int cam_status) 262 { 263 sym_set_cam_status(cmd, cam_status); 264 sym_xpt_done(np, cmd); 265 } 266 267 268 /* 269 * Tell the SCSI layer about a BUS RESET. 270 */ 271 void sym_xpt_async_bus_reset(struct sym_hcb *np) 272 { 273 printf_notice("%s: SCSI BUS has been reset.\n", sym_name(np)); 274 np->s.settle_time = jiffies + sym_driver_setup.settle_delay * HZ; 275 np->s.settle_time_valid = 1; 276 if (sym_verbose >= 2) 277 printf_info("%s: command processing suspended for %d seconds\n", 278 sym_name(np), sym_driver_setup.settle_delay); 279 } 280 281 /* 282 * Tell the SCSI layer about a BUS DEVICE RESET message sent. 283 */ 284 void sym_xpt_async_sent_bdr(struct sym_hcb *np, int target) 285 { 286 printf_notice("%s: TARGET %d has been reset.\n", sym_name(np), target); 287 } 288 289 /* 290 * Choose the more appropriate CAM status if 291 * the IO encountered an extended error. 292 */ 293 static int sym_xerr_cam_status(int cam_status, int x_status) 294 { 295 if (x_status) { 296 if (x_status & XE_PARITY_ERR) 297 cam_status = DID_PARITY; 298 else if (x_status &(XE_EXTRA_DATA|XE_SODL_UNRUN|XE_SWIDE_OVRUN)) 299 cam_status = DID_ERROR; 300 else if (x_status & XE_BAD_PHASE) 301 cam_status = DID_ERROR; 302 else 303 cam_status = DID_ERROR; 304 } 305 return cam_status; 306 } 307 308 /* 309 * Build CAM result for a failed or auto-sensed IO. 310 */ 311 void sym_set_cam_result_error(struct sym_hcb *np, struct sym_ccb *cp, int resid) 312 { 313 struct scsi_cmnd *cmd = cp->cmd; 314 u_int cam_status, scsi_status, drv_status; 315 316 drv_status = 0; 317 cam_status = DID_OK; 318 scsi_status = cp->ssss_status; 319 320 if (cp->host_flags & HF_SENSE) { 321 scsi_status = cp->sv_scsi_status; 322 resid = cp->sv_resid; 323 if (sym_verbose && cp->sv_xerr_status) 324 sym_print_xerr(cmd, cp->sv_xerr_status); 325 if (cp->host_status == HS_COMPLETE && 326 cp->ssss_status == S_GOOD && 327 cp->xerr_status == 0) { 328 cam_status = sym_xerr_cam_status(DID_OK, 329 cp->sv_xerr_status); 330 drv_status = DRIVER_SENSE; 331 /* 332 * Bounce back the sense data to user. 333 */ 334 memset(&cmd->sense_buffer, 0, sizeof(cmd->sense_buffer)); 335 memcpy(cmd->sense_buffer, cp->sns_bbuf, 336 min(sizeof(cmd->sense_buffer), 337 (size_t)SYM_SNS_BBUF_LEN)); 338 #if 0 339 /* 340 * If the device reports a UNIT ATTENTION condition 341 * due to a RESET condition, we should consider all 342 * disconnect CCBs for this unit as aborted. 343 */ 344 if (1) { 345 u_char *p; 346 p = (u_char *) cmd->sense_data; 347 if (p[0]==0x70 && p[2]==0x6 && p[12]==0x29) 348 sym_clear_tasks(np, DID_ABORT, 349 cp->target,cp->lun, -1); 350 } 351 #endif 352 } else { 353 /* 354 * Error return from our internal request sense. This 355 * is bad: we must clear the contingent allegiance 356 * condition otherwise the device will always return 357 * BUSY. Use a big stick. 358 */ 359 sym_reset_scsi_target(np, cmd->device->id); 360 cam_status = DID_ERROR; 361 } 362 } else if (cp->host_status == HS_COMPLETE) /* Bad SCSI status */ 363 cam_status = DID_OK; 364 else if (cp->host_status == HS_SEL_TIMEOUT) /* Selection timeout */ 365 cam_status = DID_NO_CONNECT; 366 else if (cp->host_status == HS_UNEXPECTED) /* Unexpected BUS FREE*/ 367 cam_status = DID_ERROR; 368 else { /* Extended error */ 369 if (sym_verbose) { 370 sym_print_addr(cmd, "COMMAND FAILED (%x %x %x).\n", 371 cp->host_status, cp->ssss_status, 372 cp->xerr_status); 373 } 374 /* 375 * Set the most appropriate value for CAM status. 376 */ 377 cam_status = sym_xerr_cam_status(DID_ERROR, cp->xerr_status); 378 } 379 cmd->resid = resid; 380 cmd->result = (drv_status << 24) + (cam_status << 16) + scsi_status; 381 } 382 383 384 /* 385 * Build the scatter/gather array for an I/O. 386 */ 387 388 static int sym_scatter_no_sglist(struct sym_hcb *np, struct sym_ccb *cp, struct scsi_cmnd *cmd) 389 { 390 struct sym_tblmove *data = &cp->phys.data[SYM_CONF_MAX_SG-1]; 391 int segment; 392 393 cp->data_len = cmd->request_bufflen; 394 395 if (cmd->request_bufflen) { 396 dma_addr_t baddr = map_scsi_single_data(np, cmd); 397 if (baddr) { 398 sym_build_sge(np, data, baddr, cmd->request_bufflen); 399 segment = 1; 400 } else { 401 segment = -2; 402 } 403 } else { 404 segment = 0; 405 } 406 407 return segment; 408 } 409 410 static int sym_scatter(struct sym_hcb *np, struct sym_ccb *cp, struct scsi_cmnd *cmd) 411 { 412 int segment; 413 int use_sg = (int) cmd->use_sg; 414 415 cp->data_len = 0; 416 417 if (!use_sg) 418 segment = sym_scatter_no_sglist(np, cp, cmd); 419 else if ((use_sg = map_scsi_sg_data(np, cmd)) > 0) { 420 struct scatterlist *scatter = (struct scatterlist *)cmd->buffer; 421 struct sym_tblmove *data; 422 423 if (use_sg > SYM_CONF_MAX_SG) { 424 unmap_scsi_data(np, cmd); 425 return -1; 426 } 427 428 data = &cp->phys.data[SYM_CONF_MAX_SG - use_sg]; 429 430 for (segment = 0; segment < use_sg; segment++) { 431 dma_addr_t baddr = sg_dma_address(&scatter[segment]); 432 unsigned int len = sg_dma_len(&scatter[segment]); 433 434 sym_build_sge(np, &data[segment], baddr, len); 435 cp->data_len += len; 436 } 437 } else { 438 segment = -2; 439 } 440 441 return segment; 442 } 443 444 /* 445 * Queue a SCSI command. 446 */ 447 static int sym_queue_command(struct sym_hcb *np, struct scsi_cmnd *cmd) 448 { 449 struct scsi_device *sdev = cmd->device; 450 struct sym_tcb *tp; 451 struct sym_lcb *lp; 452 struct sym_ccb *cp; 453 int order; 454 455 /* 456 * Minimal checkings, so that we will not 457 * go outside our tables. 458 */ 459 if (sdev->id == np->myaddr || 460 sdev->id >= SYM_CONF_MAX_TARGET || 461 sdev->lun >= SYM_CONF_MAX_LUN) { 462 sym_xpt_done2(np, cmd, CAM_DEV_NOT_THERE); 463 return 0; 464 } 465 466 /* 467 * Retrieve the target descriptor. 468 */ 469 tp = &np->target[sdev->id]; 470 471 /* 472 * Complete the 1st INQUIRY command with error 473 * condition if the device is flagged NOSCAN 474 * at BOOT in the NVRAM. This may speed up 475 * the boot and maintain coherency with BIOS 476 * device numbering. Clearing the flag allows 477 * user to rescan skipped devices later. 478 * We also return error for devices not flagged 479 * for SCAN LUNS in the NVRAM since some mono-lun 480 * devices behave badly when asked for some non 481 * zero LUN. Btw, this is an absolute hack.:-) 482 */ 483 if (cmd->cmnd[0] == 0x12 || cmd->cmnd[0] == 0x0) { 484 if ((tp->usrflags & SYM_SCAN_BOOT_DISABLED) || 485 ((tp->usrflags & SYM_SCAN_LUNS_DISABLED) && 486 sdev->lun != 0)) { 487 tp->usrflags &= ~SYM_SCAN_BOOT_DISABLED; 488 sym_xpt_done2(np, cmd, CAM_DEV_NOT_THERE); 489 return 0; 490 } 491 } 492 493 /* 494 * Select tagged/untagged. 495 */ 496 lp = sym_lp(tp, sdev->lun); 497 order = (lp && lp->s.reqtags) ? M_SIMPLE_TAG : 0; 498 499 /* 500 * Queue the SCSI IO. 501 */ 502 cp = sym_get_ccb(np, cmd, order); 503 if (!cp) 504 return 1; /* Means resource shortage */ 505 sym_queue_scsiio(np, cmd, cp); 506 return 0; 507 } 508 509 /* 510 * Setup buffers and pointers that address the CDB. 511 */ 512 static inline int sym_setup_cdb(struct sym_hcb *np, struct scsi_cmnd *cmd, struct sym_ccb *cp) 513 { 514 u32 cmd_ba; 515 int cmd_len; 516 517 /* 518 * CDB is 16 bytes max. 519 */ 520 if (cmd->cmd_len > sizeof(cp->cdb_buf)) { 521 sym_set_cam_status(cp->cmd, CAM_REQ_INVALID); 522 return -1; 523 } 524 525 memcpy(cp->cdb_buf, cmd->cmnd, cmd->cmd_len); 526 cmd_ba = CCB_BA (cp, cdb_buf[0]); 527 cmd_len = cmd->cmd_len; 528 529 cp->phys.cmd.addr = cpu_to_scr(cmd_ba); 530 cp->phys.cmd.size = cpu_to_scr(cmd_len); 531 532 return 0; 533 } 534 535 /* 536 * Setup pointers that address the data and start the I/O. 537 */ 538 int sym_setup_data_and_start(struct sym_hcb *np, struct scsi_cmnd *cmd, struct sym_ccb *cp) 539 { 540 int dir; 541 struct sym_tcb *tp = &np->target[cp->target]; 542 struct sym_lcb *lp = sym_lp(tp, cp->lun); 543 544 /* 545 * Build the CDB. 546 */ 547 if (sym_setup_cdb(np, cmd, cp)) 548 goto out_abort; 549 550 /* 551 * No direction means no data. 552 */ 553 dir = cmd->sc_data_direction; 554 if (dir != DMA_NONE) { 555 cp->segments = sym_scatter(np, cp, cmd); 556 if (cp->segments < 0) { 557 if (cp->segments == -2) 558 sym_set_cam_status(cmd, CAM_RESRC_UNAVAIL); 559 else 560 sym_set_cam_status(cmd, CAM_REQ_TOO_BIG); 561 goto out_abort; 562 } 563 } else { 564 cp->data_len = 0; 565 cp->segments = 0; 566 } 567 568 /* 569 * Set data pointers. 570 */ 571 sym_setup_data_pointers(np, cp, dir); 572 573 /* 574 * When `#ifed 1', the code below makes the driver 575 * panic on the first attempt to write to a SCSI device. 576 * It is the first test we want to do after a driver 577 * change that does not seem obviously safe. :) 578 */ 579 #if 0 580 switch (cp->cdb_buf[0]) { 581 case 0x0A: case 0x2A: case 0xAA: 582 panic("XXXXXXXXXXXXX WRITE NOT YET ALLOWED XXXXXXXXXXXXXX\n"); 583 break; 584 default: 585 break; 586 } 587 #endif 588 589 /* 590 * activate this job. 591 */ 592 if (lp) 593 sym_start_next_ccbs(np, lp, 2); 594 else 595 sym_put_start_queue(np, cp); 596 return 0; 597 598 out_abort: 599 sym_free_ccb(np, cp); 600 sym_xpt_done(np, cmd); 601 return 0; 602 } 603 604 605 /* 606 * timer daemon. 607 * 608 * Misused to keep the driver running when 609 * interrupts are not configured correctly. 610 */ 611 static void sym_timer(struct sym_hcb *np) 612 { 613 unsigned long thistime = jiffies; 614 615 /* 616 * Restart the timer. 617 */ 618 np->s.timer.expires = thistime + SYM_CONF_TIMER_INTERVAL; 619 add_timer(&np->s.timer); 620 621 /* 622 * If we are resetting the ncr, wait for settle_time before 623 * clearing it. Then command processing will be resumed. 624 */ 625 if (np->s.settle_time_valid) { 626 if (time_before_eq(np->s.settle_time, thistime)) { 627 if (sym_verbose >= 2 ) 628 printk("%s: command processing resumed\n", 629 sym_name(np)); 630 np->s.settle_time_valid = 0; 631 } 632 return; 633 } 634 635 /* 636 * Nothing to do for now, but that may come. 637 */ 638 if (np->s.lasttime + 4*HZ < thistime) { 639 np->s.lasttime = thistime; 640 } 641 642 #ifdef SYM_CONF_PCIQ_MAY_MISS_COMPLETIONS 643 /* 644 * Some way-broken PCI bridges may lead to 645 * completions being lost when the clearing 646 * of the INTFLY flag by the CPU occurs 647 * concurrently with the chip raising this flag. 648 * If this ever happen, lost completions will 649 * be reaped here. 650 */ 651 sym_wakeup_done(np); 652 #endif 653 } 654 655 656 /* 657 * PCI BUS error handler. 658 */ 659 void sym_log_bus_error(struct sym_hcb *np) 660 { 661 u_short pci_sts; 662 pci_read_config_word(np->s.device, PCI_STATUS, &pci_sts); 663 if (pci_sts & 0xf900) { 664 pci_write_config_word(np->s.device, PCI_STATUS, pci_sts); 665 printf("%s: PCI STATUS = 0x%04x\n", 666 sym_name(np), pci_sts & 0xf900); 667 } 668 } 669 670 /* 671 * queuecommand method. Entered with the host adapter lock held and 672 * interrupts disabled. 673 */ 674 static int sym53c8xx_queue_command(struct scsi_cmnd *cmd, 675 void (*done)(struct scsi_cmnd *)) 676 { 677 struct sym_hcb *np = SYM_SOFTC_PTR(cmd); 678 struct sym_ucmd *ucp = SYM_UCMD_PTR(cmd); 679 int sts = 0; 680 681 cmd->scsi_done = done; 682 memset(ucp, 0, sizeof(*ucp)); 683 684 /* 685 * Shorten our settle_time if needed for 686 * this command not to time out. 687 */ 688 if (np->s.settle_time_valid && cmd->timeout_per_command) { 689 unsigned long tlimit = jiffies + cmd->timeout_per_command; 690 tlimit -= SYM_CONF_TIMER_INTERVAL*2; 691 if (time_after(np->s.settle_time, tlimit)) { 692 np->s.settle_time = tlimit; 693 } 694 } 695 696 if (np->s.settle_time_valid) 697 return SCSI_MLQUEUE_HOST_BUSY; 698 699 sts = sym_queue_command(np, cmd); 700 if (sts) 701 return SCSI_MLQUEUE_HOST_BUSY; 702 return 0; 703 } 704 705 /* 706 * Linux entry point of the interrupt handler. 707 */ 708 static irqreturn_t sym53c8xx_intr(int irq, void *dev_id, struct pt_regs * regs) 709 { 710 unsigned long flags; 711 struct sym_hcb *np = (struct sym_hcb *)dev_id; 712 713 if (DEBUG_FLAGS & DEBUG_TINY) printf_debug ("["); 714 715 spin_lock_irqsave(np->s.host->host_lock, flags); 716 sym_interrupt(np); 717 spin_unlock_irqrestore(np->s.host->host_lock, flags); 718 719 if (DEBUG_FLAGS & DEBUG_TINY) printf_debug ("]\n"); 720 721 return IRQ_HANDLED; 722 } 723 724 /* 725 * Linux entry point of the timer handler 726 */ 727 static void sym53c8xx_timer(unsigned long npref) 728 { 729 struct sym_hcb *np = (struct sym_hcb *)npref; 730 unsigned long flags; 731 732 spin_lock_irqsave(np->s.host->host_lock, flags); 733 sym_timer(np); 734 spin_unlock_irqrestore(np->s.host->host_lock, flags); 735 } 736 737 738 /* 739 * What the eh thread wants us to perform. 740 */ 741 #define SYM_EH_ABORT 0 742 #define SYM_EH_DEVICE_RESET 1 743 #define SYM_EH_BUS_RESET 2 744 #define SYM_EH_HOST_RESET 3 745 746 /* 747 * What we will do regarding the involved SCSI command. 748 */ 749 #define SYM_EH_DO_IGNORE 0 750 #define SYM_EH_DO_COMPLETE 1 751 #define SYM_EH_DO_WAIT 2 752 753 /* 754 * Our general completion handler. 755 */ 756 static void __sym_eh_done(struct scsi_cmnd *cmd, int timed_out) 757 { 758 struct sym_eh_wait *ep = SYM_UCMD_PTR(cmd)->eh_wait; 759 if (!ep) 760 return; 761 762 /* Try to avoid a race here (not 100% safe) */ 763 if (!timed_out) { 764 ep->timed_out = 0; 765 if (ep->to_do == SYM_EH_DO_WAIT && !del_timer(&ep->timer)) 766 return; 767 } 768 769 /* Revert everything */ 770 SYM_UCMD_PTR(cmd)->eh_wait = NULL; 771 cmd->scsi_done = ep->old_done; 772 773 /* Wake up the eh thread if it wants to sleep */ 774 if (ep->to_do == SYM_EH_DO_WAIT) 775 complete(&ep->done); 776 } 777 778 /* 779 * scsi_done() alias when error recovery is in progress. 780 */ 781 static void sym_eh_done(struct scsi_cmnd *cmd) { __sym_eh_done(cmd, 0); } 782 783 /* 784 * Some timeout handler to avoid waiting too long. 785 */ 786 static void sym_eh_timeout(u_long p) { __sym_eh_done((struct scsi_cmnd *)p, 1); } 787 788 /* 789 * Generic method for our eh processing. 790 * The 'op' argument tells what we have to do. 791 */ 792 static int sym_eh_handler(int op, char *opname, struct scsi_cmnd *cmd) 793 { 794 struct sym_hcb *np = SYM_SOFTC_PTR(cmd); 795 SYM_QUEHEAD *qp; 796 int to_do = SYM_EH_DO_IGNORE; 797 int sts = -1; 798 struct sym_eh_wait eh, *ep = &eh; 799 800 dev_warn(&cmd->device->sdev_gendev, "%s operation started.\n", opname); 801 802 #if 0 803 /* This one should be the result of some race, thus to ignore */ 804 if (cmd->serial_number != cmd->serial_number_at_timeout) 805 goto prepare; 806 #endif 807 808 /* This one is queued in some place -> to wait for completion */ 809 FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) { 810 struct sym_ccb *cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); 811 if (cp->cmd == cmd) { 812 to_do = SYM_EH_DO_WAIT; 813 goto prepare; 814 } 815 } 816 817 prepare: 818 /* Prepare stuff to either ignore, complete or wait for completion */ 819 switch(to_do) { 820 default: 821 case SYM_EH_DO_IGNORE: 822 break; 823 case SYM_EH_DO_WAIT: 824 init_completion(&ep->done); 825 /* fall through */ 826 case SYM_EH_DO_COMPLETE: 827 ep->old_done = cmd->scsi_done; 828 cmd->scsi_done = sym_eh_done; 829 SYM_UCMD_PTR(cmd)->eh_wait = ep; 830 } 831 832 /* Try to proceed the operation we have been asked for */ 833 sts = -1; 834 switch(op) { 835 case SYM_EH_ABORT: 836 sts = sym_abort_scsiio(np, cmd, 1); 837 break; 838 case SYM_EH_DEVICE_RESET: 839 sts = sym_reset_scsi_target(np, cmd->device->id); 840 break; 841 case SYM_EH_BUS_RESET: 842 sym_reset_scsi_bus(np, 1); 843 sts = 0; 844 break; 845 case SYM_EH_HOST_RESET: 846 sym_reset_scsi_bus(np, 0); 847 sym_start_up (np, 1); 848 sts = 0; 849 break; 850 default: 851 break; 852 } 853 854 /* On error, restore everything and cross fingers :) */ 855 if (sts) { 856 SYM_UCMD_PTR(cmd)->eh_wait = NULL; 857 cmd->scsi_done = ep->old_done; 858 to_do = SYM_EH_DO_IGNORE; 859 } 860 861 ep->to_do = to_do; 862 /* Complete the command with locks held as required by the driver */ 863 if (to_do == SYM_EH_DO_COMPLETE) 864 sym_xpt_done2(np, cmd, CAM_REQ_ABORTED); 865 866 /* Wait for completion with locks released, as required by kernel */ 867 if (to_do == SYM_EH_DO_WAIT) { 868 init_timer(&ep->timer); 869 ep->timer.expires = jiffies + (5*HZ); 870 ep->timer.function = sym_eh_timeout; 871 ep->timer.data = (u_long)cmd; 872 ep->timed_out = 1; /* Be pessimistic for once :) */ 873 add_timer(&ep->timer); 874 spin_unlock_irq(np->s.host->host_lock); 875 wait_for_completion(&ep->done); 876 spin_lock_irq(np->s.host->host_lock); 877 if (ep->timed_out) 878 sts = -2; 879 } 880 dev_warn(&cmd->device->sdev_gendev, "%s operation %s.\n", opname, 881 sts==0 ? "complete" :sts==-2 ? "timed-out" : "failed"); 882 return sts ? SCSI_FAILED : SCSI_SUCCESS; 883 } 884 885 886 /* 887 * Error handlers called from the eh thread (one thread per HBA). 888 */ 889 static int sym53c8xx_eh_abort_handler(struct scsi_cmnd *cmd) 890 { 891 return sym_eh_handler(SYM_EH_ABORT, "ABORT", cmd); 892 } 893 894 static int sym53c8xx_eh_device_reset_handler(struct scsi_cmnd *cmd) 895 { 896 return sym_eh_handler(SYM_EH_DEVICE_RESET, "DEVICE RESET", cmd); 897 } 898 899 static int sym53c8xx_eh_bus_reset_handler(struct scsi_cmnd *cmd) 900 { 901 return sym_eh_handler(SYM_EH_BUS_RESET, "BUS RESET", cmd); 902 } 903 904 static int sym53c8xx_eh_host_reset_handler(struct scsi_cmnd *cmd) 905 { 906 return sym_eh_handler(SYM_EH_HOST_RESET, "HOST RESET", cmd); 907 } 908 909 /* 910 * Tune device queuing depth, according to various limits. 911 */ 912 static void sym_tune_dev_queuing(struct sym_tcb *tp, int lun, u_short reqtags) 913 { 914 struct sym_lcb *lp = sym_lp(tp, lun); 915 u_short oldtags; 916 917 if (!lp) 918 return; 919 920 oldtags = lp->s.reqtags; 921 922 if (reqtags > lp->s.scdev_depth) 923 reqtags = lp->s.scdev_depth; 924 925 lp->started_limit = reqtags ? reqtags : 2; 926 lp->started_max = 1; 927 lp->s.reqtags = reqtags; 928 929 if (reqtags != oldtags) { 930 dev_info(&tp->sdev->sdev_target->dev, 931 "tagged command queuing %s, command queue depth %d.\n", 932 lp->s.reqtags ? "enabled" : "disabled", 933 lp->started_limit); 934 } 935 } 936 937 /* 938 * Linux select queue depths function 939 */ 940 #define DEF_DEPTH (sym_driver_setup.max_tag) 941 #define ALL_TARGETS -2 942 #define NO_TARGET -1 943 #define ALL_LUNS -2 944 #define NO_LUN -1 945 946 static int device_queue_depth(struct sym_hcb *np, int target, int lun) 947 { 948 int c, h, t, u, v; 949 char *p = sym_driver_setup.tag_ctrl; 950 char *ep; 951 952 h = -1; 953 t = NO_TARGET; 954 u = NO_LUN; 955 while ((c = *p++) != 0) { 956 v = simple_strtoul(p, &ep, 0); 957 switch(c) { 958 case '/': 959 ++h; 960 t = ALL_TARGETS; 961 u = ALL_LUNS; 962 break; 963 case 't': 964 if (t != target) 965 t = (target == v) ? v : NO_TARGET; 966 u = ALL_LUNS; 967 break; 968 case 'u': 969 if (u != lun) 970 u = (lun == v) ? v : NO_LUN; 971 break; 972 case 'q': 973 if (h == np->s.unit && 974 (t == ALL_TARGETS || t == target) && 975 (u == ALL_LUNS || u == lun)) 976 return v; 977 break; 978 case '-': 979 t = ALL_TARGETS; 980 u = ALL_LUNS; 981 break; 982 default: 983 break; 984 } 985 p = ep; 986 } 987 return DEF_DEPTH; 988 } 989 990 static int sym53c8xx_slave_alloc(struct scsi_device *device) 991 { 992 struct sym_hcb *np = sym_get_hcb(device->host); 993 struct sym_tcb *tp = &np->target[device->id]; 994 if (!tp->sdev) 995 tp->sdev = device; 996 997 return 0; 998 } 999 1000 static void sym53c8xx_slave_destroy(struct scsi_device *device) 1001 { 1002 struct sym_hcb *np = sym_get_hcb(device->host); 1003 struct sym_tcb *tp = &np->target[device->id]; 1004 if (tp->sdev == device) 1005 tp->sdev = NULL; 1006 } 1007 1008 /* 1009 * Linux entry point for device queue sizing. 1010 */ 1011 static int sym53c8xx_slave_configure(struct scsi_device *device) 1012 { 1013 struct sym_hcb *np = sym_get_hcb(device->host); 1014 struct sym_tcb *tp = &np->target[device->id]; 1015 struct sym_lcb *lp; 1016 int reqtags, depth_to_use; 1017 1018 /* 1019 * Allocate the LCB if not yet. 1020 * If it fail, we may well be in the sh*t. :) 1021 */ 1022 lp = sym_alloc_lcb(np, device->id, device->lun); 1023 if (!lp) 1024 return -ENOMEM; 1025 1026 /* 1027 * Get user flags. 1028 */ 1029 lp->curr_flags = lp->user_flags; 1030 1031 /* 1032 * Select queue depth from driver setup. 1033 * Donnot use more than configured by user. 1034 * Use at least 2. 1035 * Donnot use more than our maximum. 1036 */ 1037 reqtags = device_queue_depth(np, device->id, device->lun); 1038 if (reqtags > tp->usrtags) 1039 reqtags = tp->usrtags; 1040 if (!device->tagged_supported) 1041 reqtags = 0; 1042 #if 1 /* Avoid to locally queue commands for no good reasons */ 1043 if (reqtags > SYM_CONF_MAX_TAG) 1044 reqtags = SYM_CONF_MAX_TAG; 1045 depth_to_use = (reqtags ? reqtags : 2); 1046 #else 1047 depth_to_use = (reqtags ? SYM_CONF_MAX_TAG : 2); 1048 #endif 1049 scsi_adjust_queue_depth(device, 1050 (device->tagged_supported ? 1051 MSG_SIMPLE_TAG : 0), 1052 depth_to_use); 1053 lp->s.scdev_depth = depth_to_use; 1054 sym_tune_dev_queuing(tp, device->lun, reqtags); 1055 1056 if (!spi_initial_dv(device->sdev_target)) 1057 spi_dv_device(device); 1058 1059 return 0; 1060 } 1061 1062 /* 1063 * Linux entry point for info() function 1064 */ 1065 static const char *sym53c8xx_info (struct Scsi_Host *host) 1066 { 1067 return SYM_DRIVER_NAME; 1068 } 1069 1070 1071 #ifdef SYM_LINUX_PROC_INFO_SUPPORT 1072 /* 1073 * Proc file system stuff 1074 * 1075 * A read operation returns adapter information. 1076 * A write operation is a control command. 1077 * The string is parsed in the driver code and the command is passed 1078 * to the sym_usercmd() function. 1079 */ 1080 1081 #ifdef SYM_LINUX_USER_COMMAND_SUPPORT 1082 1083 struct sym_usrcmd { 1084 u_long target; 1085 u_long lun; 1086 u_long data; 1087 u_long cmd; 1088 }; 1089 1090 #define UC_SETSYNC 10 1091 #define UC_SETTAGS 11 1092 #define UC_SETDEBUG 12 1093 #define UC_SETWIDE 14 1094 #define UC_SETFLAG 15 1095 #define UC_SETVERBOSE 17 1096 #define UC_RESETDEV 18 1097 #define UC_CLEARDEV 19 1098 1099 static void sym_exec_user_command (struct sym_hcb *np, struct sym_usrcmd *uc) 1100 { 1101 struct sym_tcb *tp; 1102 int t, l; 1103 1104 switch (uc->cmd) { 1105 case 0: return; 1106 1107 #ifdef SYM_LINUX_DEBUG_CONTROL_SUPPORT 1108 case UC_SETDEBUG: 1109 sym_debug_flags = uc->data; 1110 break; 1111 #endif 1112 case UC_SETVERBOSE: 1113 np->verbose = uc->data; 1114 break; 1115 default: 1116 /* 1117 * We assume that other commands apply to targets. 1118 * This should always be the case and avoid the below 1119 * 4 lines to be repeated 6 times. 1120 */ 1121 for (t = 0; t < SYM_CONF_MAX_TARGET; t++) { 1122 if (!((uc->target >> t) & 1)) 1123 continue; 1124 tp = &np->target[t]; 1125 1126 switch (uc->cmd) { 1127 1128 case UC_SETSYNC: 1129 if (!uc->data || uc->data >= 255) { 1130 tp->tgoal.iu = tp->tgoal.dt = 1131 tp->tgoal.qas = 0; 1132 tp->tgoal.offset = 0; 1133 } else if (uc->data <= 9 && np->minsync_dt) { 1134 if (uc->data < np->minsync_dt) 1135 uc->data = np->minsync_dt; 1136 tp->tgoal.iu = tp->tgoal.dt = 1137 tp->tgoal.qas = 1; 1138 tp->tgoal.width = 1; 1139 tp->tgoal.period = uc->data; 1140 tp->tgoal.offset = np->maxoffs_dt; 1141 } else { 1142 if (uc->data < np->minsync) 1143 uc->data = np->minsync; 1144 tp->tgoal.iu = tp->tgoal.dt = 1145 tp->tgoal.qas = 0; 1146 tp->tgoal.period = uc->data; 1147 tp->tgoal.offset = np->maxoffs; 1148 } 1149 tp->tgoal.check_nego = 1; 1150 break; 1151 case UC_SETWIDE: 1152 tp->tgoal.width = uc->data ? 1 : 0; 1153 tp->tgoal.check_nego = 1; 1154 break; 1155 case UC_SETTAGS: 1156 for (l = 0; l < SYM_CONF_MAX_LUN; l++) 1157 sym_tune_dev_queuing(tp, l, uc->data); 1158 break; 1159 case UC_RESETDEV: 1160 tp->to_reset = 1; 1161 np->istat_sem = SEM; 1162 OUTB(np, nc_istat, SIGP|SEM); 1163 break; 1164 case UC_CLEARDEV: 1165 for (l = 0; l < SYM_CONF_MAX_LUN; l++) { 1166 struct sym_lcb *lp = sym_lp(tp, l); 1167 if (lp) lp->to_clear = 1; 1168 } 1169 np->istat_sem = SEM; 1170 OUTB(np, nc_istat, SIGP|SEM); 1171 break; 1172 case UC_SETFLAG: 1173 tp->usrflags = uc->data; 1174 break; 1175 } 1176 } 1177 break; 1178 } 1179 } 1180 1181 static int skip_spaces(char *ptr, int len) 1182 { 1183 int cnt, c; 1184 1185 for (cnt = len; cnt > 0 && (c = *ptr++) && isspace(c); cnt--); 1186 1187 return (len - cnt); 1188 } 1189 1190 static int get_int_arg(char *ptr, int len, u_long *pv) 1191 { 1192 char *end; 1193 1194 *pv = simple_strtoul(ptr, &end, 10); 1195 return (end - ptr); 1196 } 1197 1198 static int is_keyword(char *ptr, int len, char *verb) 1199 { 1200 int verb_len = strlen(verb); 1201 1202 if (len >= verb_len && !memcmp(verb, ptr, verb_len)) 1203 return verb_len; 1204 else 1205 return 0; 1206 } 1207 1208 #define SKIP_SPACES(ptr, len) \ 1209 if ((arg_len = skip_spaces(ptr, len)) < 1) \ 1210 return -EINVAL; \ 1211 ptr += arg_len; len -= arg_len; 1212 1213 #define GET_INT_ARG(ptr, len, v) \ 1214 if (!(arg_len = get_int_arg(ptr, len, &(v)))) \ 1215 return -EINVAL; \ 1216 ptr += arg_len; len -= arg_len; 1217 1218 1219 /* 1220 * Parse a control command 1221 */ 1222 1223 static int sym_user_command(struct sym_hcb *np, char *buffer, int length) 1224 { 1225 char *ptr = buffer; 1226 int len = length; 1227 struct sym_usrcmd cmd, *uc = &cmd; 1228 int arg_len; 1229 u_long target; 1230 1231 memset(uc, 0, sizeof(*uc)); 1232 1233 if (len > 0 && ptr[len-1] == '\n') 1234 --len; 1235 1236 if ((arg_len = is_keyword(ptr, len, "setsync")) != 0) 1237 uc->cmd = UC_SETSYNC; 1238 else if ((arg_len = is_keyword(ptr, len, "settags")) != 0) 1239 uc->cmd = UC_SETTAGS; 1240 else if ((arg_len = is_keyword(ptr, len, "setverbose")) != 0) 1241 uc->cmd = UC_SETVERBOSE; 1242 else if ((arg_len = is_keyword(ptr, len, "setwide")) != 0) 1243 uc->cmd = UC_SETWIDE; 1244 #ifdef SYM_LINUX_DEBUG_CONTROL_SUPPORT 1245 else if ((arg_len = is_keyword(ptr, len, "setdebug")) != 0) 1246 uc->cmd = UC_SETDEBUG; 1247 #endif 1248 else if ((arg_len = is_keyword(ptr, len, "setflag")) != 0) 1249 uc->cmd = UC_SETFLAG; 1250 else if ((arg_len = is_keyword(ptr, len, "resetdev")) != 0) 1251 uc->cmd = UC_RESETDEV; 1252 else if ((arg_len = is_keyword(ptr, len, "cleardev")) != 0) 1253 uc->cmd = UC_CLEARDEV; 1254 else 1255 arg_len = 0; 1256 1257 #ifdef DEBUG_PROC_INFO 1258 printk("sym_user_command: arg_len=%d, cmd=%ld\n", arg_len, uc->cmd); 1259 #endif 1260 1261 if (!arg_len) 1262 return -EINVAL; 1263 ptr += arg_len; len -= arg_len; 1264 1265 switch(uc->cmd) { 1266 case UC_SETSYNC: 1267 case UC_SETTAGS: 1268 case UC_SETWIDE: 1269 case UC_SETFLAG: 1270 case UC_RESETDEV: 1271 case UC_CLEARDEV: 1272 SKIP_SPACES(ptr, len); 1273 if ((arg_len = is_keyword(ptr, len, "all")) != 0) { 1274 ptr += arg_len; len -= arg_len; 1275 uc->target = ~0; 1276 } else { 1277 GET_INT_ARG(ptr, len, target); 1278 uc->target = (1<<target); 1279 #ifdef DEBUG_PROC_INFO 1280 printk("sym_user_command: target=%ld\n", target); 1281 #endif 1282 } 1283 break; 1284 } 1285 1286 switch(uc->cmd) { 1287 case UC_SETVERBOSE: 1288 case UC_SETSYNC: 1289 case UC_SETTAGS: 1290 case UC_SETWIDE: 1291 SKIP_SPACES(ptr, len); 1292 GET_INT_ARG(ptr, len, uc->data); 1293 #ifdef DEBUG_PROC_INFO 1294 printk("sym_user_command: data=%ld\n", uc->data); 1295 #endif 1296 break; 1297 #ifdef SYM_LINUX_DEBUG_CONTROL_SUPPORT 1298 case UC_SETDEBUG: 1299 while (len > 0) { 1300 SKIP_SPACES(ptr, len); 1301 if ((arg_len = is_keyword(ptr, len, "alloc"))) 1302 uc->data |= DEBUG_ALLOC; 1303 else if ((arg_len = is_keyword(ptr, len, "phase"))) 1304 uc->data |= DEBUG_PHASE; 1305 else if ((arg_len = is_keyword(ptr, len, "queue"))) 1306 uc->data |= DEBUG_QUEUE; 1307 else if ((arg_len = is_keyword(ptr, len, "result"))) 1308 uc->data |= DEBUG_RESULT; 1309 else if ((arg_len = is_keyword(ptr, len, "scatter"))) 1310 uc->data |= DEBUG_SCATTER; 1311 else if ((arg_len = is_keyword(ptr, len, "script"))) 1312 uc->data |= DEBUG_SCRIPT; 1313 else if ((arg_len = is_keyword(ptr, len, "tiny"))) 1314 uc->data |= DEBUG_TINY; 1315 else if ((arg_len = is_keyword(ptr, len, "timing"))) 1316 uc->data |= DEBUG_TIMING; 1317 else if ((arg_len = is_keyword(ptr, len, "nego"))) 1318 uc->data |= DEBUG_NEGO; 1319 else if ((arg_len = is_keyword(ptr, len, "tags"))) 1320 uc->data |= DEBUG_TAGS; 1321 else if ((arg_len = is_keyword(ptr, len, "pointer"))) 1322 uc->data |= DEBUG_POINTER; 1323 else 1324 return -EINVAL; 1325 ptr += arg_len; len -= arg_len; 1326 } 1327 #ifdef DEBUG_PROC_INFO 1328 printk("sym_user_command: data=%ld\n", uc->data); 1329 #endif 1330 break; 1331 #endif /* SYM_LINUX_DEBUG_CONTROL_SUPPORT */ 1332 case UC_SETFLAG: 1333 while (len > 0) { 1334 SKIP_SPACES(ptr, len); 1335 if ((arg_len = is_keyword(ptr, len, "no_disc"))) 1336 uc->data &= ~SYM_DISC_ENABLED; 1337 else 1338 return -EINVAL; 1339 ptr += arg_len; len -= arg_len; 1340 } 1341 break; 1342 default: 1343 break; 1344 } 1345 1346 if (len) 1347 return -EINVAL; 1348 else { 1349 unsigned long flags; 1350 1351 spin_lock_irqsave(np->s.host->host_lock, flags); 1352 sym_exec_user_command (np, uc); 1353 spin_unlock_irqrestore(np->s.host->host_lock, flags); 1354 } 1355 return length; 1356 } 1357 1358 #endif /* SYM_LINUX_USER_COMMAND_SUPPORT */ 1359 1360 1361 #ifdef SYM_LINUX_USER_INFO_SUPPORT 1362 /* 1363 * Informations through the proc file system. 1364 */ 1365 struct info_str { 1366 char *buffer; 1367 int length; 1368 int offset; 1369 int pos; 1370 }; 1371 1372 static void copy_mem_info(struct info_str *info, char *data, int len) 1373 { 1374 if (info->pos + len > info->length) 1375 len = info->length - info->pos; 1376 1377 if (info->pos + len < info->offset) { 1378 info->pos += len; 1379 return; 1380 } 1381 if (info->pos < info->offset) { 1382 data += (info->offset - info->pos); 1383 len -= (info->offset - info->pos); 1384 } 1385 1386 if (len > 0) { 1387 memcpy(info->buffer + info->pos, data, len); 1388 info->pos += len; 1389 } 1390 } 1391 1392 static int copy_info(struct info_str *info, char *fmt, ...) 1393 { 1394 va_list args; 1395 char buf[81]; 1396 int len; 1397 1398 va_start(args, fmt); 1399 len = vsprintf(buf, fmt, args); 1400 va_end(args); 1401 1402 copy_mem_info(info, buf, len); 1403 return len; 1404 } 1405 1406 /* 1407 * Copy formatted information into the input buffer. 1408 */ 1409 static int sym_host_info(struct sym_hcb *np, char *ptr, off_t offset, int len) 1410 { 1411 struct info_str info; 1412 1413 info.buffer = ptr; 1414 info.length = len; 1415 info.offset = offset; 1416 info.pos = 0; 1417 1418 copy_info(&info, "Chip " NAME53C "%s, device id 0x%x, " 1419 "revision id 0x%x\n", 1420 np->s.chip_name, np->device_id, np->revision_id); 1421 copy_info(&info, "At PCI address %s, IRQ " IRQ_FMT "\n", 1422 pci_name(np->s.device), IRQ_PRM(np->s.irq)); 1423 copy_info(&info, "Min. period factor %d, %s SCSI BUS%s\n", 1424 (int) (np->minsync_dt ? np->minsync_dt : np->minsync), 1425 np->maxwide ? "Wide" : "Narrow", 1426 np->minsync_dt ? ", DT capable" : ""); 1427 1428 copy_info(&info, "Max. started commands %d, " 1429 "max. commands per LUN %d\n", 1430 SYM_CONF_MAX_START, SYM_CONF_MAX_TAG); 1431 1432 return info.pos > info.offset? info.pos - info.offset : 0; 1433 } 1434 #endif /* SYM_LINUX_USER_INFO_SUPPORT */ 1435 1436 /* 1437 * Entry point of the scsi proc fs of the driver. 1438 * - func = 0 means read (returns adapter infos) 1439 * - func = 1 means write (not yet merget from sym53c8xx) 1440 */ 1441 static int sym53c8xx_proc_info(struct Scsi_Host *host, char *buffer, 1442 char **start, off_t offset, int length, int func) 1443 { 1444 struct sym_hcb *np = sym_get_hcb(host); 1445 int retv; 1446 1447 if (func) { 1448 #ifdef SYM_LINUX_USER_COMMAND_SUPPORT 1449 retv = sym_user_command(np, buffer, length); 1450 #else 1451 retv = -EINVAL; 1452 #endif 1453 } else { 1454 if (start) 1455 *start = buffer; 1456 #ifdef SYM_LINUX_USER_INFO_SUPPORT 1457 retv = sym_host_info(np, buffer, offset, length); 1458 #else 1459 retv = -EINVAL; 1460 #endif 1461 } 1462 1463 return retv; 1464 } 1465 #endif /* SYM_LINUX_PROC_INFO_SUPPORT */ 1466 1467 /* 1468 * Free controller resources. 1469 */ 1470 static void sym_free_resources(struct sym_hcb *np, struct pci_dev *pdev) 1471 { 1472 /* 1473 * Free O/S specific resources. 1474 */ 1475 if (np->s.irq) 1476 free_irq(np->s.irq, np); 1477 if (np->s.ioaddr) 1478 pci_iounmap(pdev, np->s.ioaddr); 1479 if (np->s.ramaddr) 1480 pci_iounmap(pdev, np->s.ramaddr); 1481 /* 1482 * Free O/S independent resources. 1483 */ 1484 sym_hcb_free(np); 1485 1486 sym_mfree_dma(np, sizeof(*np), "HCB"); 1487 } 1488 1489 /* 1490 * Ask/tell the system about DMA addressing. 1491 */ 1492 static int sym_setup_bus_dma_mask(struct sym_hcb *np) 1493 { 1494 #if SYM_CONF_DMA_ADDRESSING_MODE > 0 1495 #if SYM_CONF_DMA_ADDRESSING_MODE == 1 1496 #define DMA_DAC_MASK 0x000000ffffffffffULL /* 40-bit */ 1497 #elif SYM_CONF_DMA_ADDRESSING_MODE == 2 1498 #define DMA_DAC_MASK DMA_64BIT_MASK 1499 #endif 1500 if ((np->features & FE_DAC) && 1501 !pci_set_dma_mask(np->s.device, DMA_DAC_MASK)) { 1502 np->use_dac = 1; 1503 return 0; 1504 } 1505 #endif 1506 1507 if (!pci_set_dma_mask(np->s.device, DMA_32BIT_MASK)) 1508 return 0; 1509 1510 printf_warning("%s: No suitable DMA available\n", sym_name(np)); 1511 return -1; 1512 } 1513 1514 /* 1515 * Host attach and initialisations. 1516 * 1517 * Allocate host data and ncb structure. 1518 * Remap MMIO region. 1519 * Do chip initialization. 1520 * If all is OK, install interrupt handling and 1521 * start the timer daemon. 1522 */ 1523 static struct Scsi_Host * __devinit sym_attach(struct scsi_host_template *tpnt, 1524 int unit, struct sym_device *dev) 1525 { 1526 struct host_data *host_data; 1527 struct sym_hcb *np = NULL; 1528 struct Scsi_Host *instance = NULL; 1529 struct pci_dev *pdev = dev->pdev; 1530 unsigned long flags; 1531 struct sym_fw *fw; 1532 1533 printk(KERN_INFO 1534 "sym%d: <%s> rev 0x%x at pci %s irq " IRQ_FMT "\n", 1535 unit, dev->chip.name, dev->chip.revision_id, 1536 pci_name(pdev), IRQ_PRM(pdev->irq)); 1537 1538 /* 1539 * Get the firmware for this chip. 1540 */ 1541 fw = sym_find_firmware(&dev->chip); 1542 if (!fw) 1543 goto attach_failed; 1544 1545 /* 1546 * Allocate host_data structure 1547 */ 1548 instance = scsi_host_alloc(tpnt, sizeof(*host_data)); 1549 if (!instance) 1550 goto attach_failed; 1551 host_data = (struct host_data *) instance->hostdata; 1552 1553 /* 1554 * Allocate immediately the host control block, 1555 * since we are only expecting to succeed. :) 1556 * We keep track in the HCB of all the resources that 1557 * are to be released on error. 1558 */ 1559 np = __sym_calloc_dma(&pdev->dev, sizeof(*np), "HCB"); 1560 if (!np) 1561 goto attach_failed; 1562 np->s.device = pdev; 1563 np->bus_dmat = &pdev->dev; /* Result in 1 DMA pool per HBA */ 1564 host_data->ncb = np; 1565 np->s.host = instance; 1566 1567 pci_set_drvdata(pdev, np); 1568 1569 /* 1570 * Copy some useful infos to the HCB. 1571 */ 1572 np->hcb_ba = vtobus(np); 1573 np->verbose = sym_driver_setup.verbose; 1574 np->s.device = pdev; 1575 np->s.unit = unit; 1576 np->device_id = dev->chip.device_id; 1577 np->revision_id = dev->chip.revision_id; 1578 np->features = dev->chip.features; 1579 np->clock_divn = dev->chip.nr_divisor; 1580 np->maxoffs = dev->chip.offset_max; 1581 np->maxburst = dev->chip.burst_max; 1582 np->myaddr = dev->host_id; 1583 1584 /* 1585 * Edit its name. 1586 */ 1587 strlcpy(np->s.chip_name, dev->chip.name, sizeof(np->s.chip_name)); 1588 sprintf(np->s.inst_name, "sym%d", np->s.unit); 1589 1590 if (sym_setup_bus_dma_mask(np)) 1591 goto attach_failed; 1592 1593 /* 1594 * Try to map the controller chip to 1595 * virtual and physical memory. 1596 */ 1597 np->mmio_ba = (u32)dev->mmio_base; 1598 np->s.ioaddr = dev->s.ioaddr; 1599 np->s.ramaddr = dev->s.ramaddr; 1600 np->s.io_ws = (np->features & FE_IO256) ? 256 : 128; 1601 1602 /* 1603 * Map on-chip RAM if present and supported. 1604 */ 1605 if (!(np->features & FE_RAM)) 1606 dev->ram_base = 0; 1607 if (dev->ram_base) { 1608 np->ram_ba = (u32)dev->ram_base; 1609 np->ram_ws = (np->features & FE_RAM8K) ? 8192 : 4096; 1610 } 1611 1612 if (sym_hcb_attach(instance, fw, dev->nvram)) 1613 goto attach_failed; 1614 1615 /* 1616 * Install the interrupt handler. 1617 * If we synchonize the C code with SCRIPTS on interrupt, 1618 * we do not want to share the INTR line at all. 1619 */ 1620 if (request_irq(pdev->irq, sym53c8xx_intr, SA_SHIRQ, NAME53C8XX, np)) { 1621 printf_err("%s: request irq %d failure\n", 1622 sym_name(np), pdev->irq); 1623 goto attach_failed; 1624 } 1625 np->s.irq = pdev->irq; 1626 1627 /* 1628 * After SCSI devices have been opened, we cannot 1629 * reset the bus safely, so we do it here. 1630 */ 1631 spin_lock_irqsave(instance->host_lock, flags); 1632 if (sym_reset_scsi_bus(np, 0)) 1633 goto reset_failed; 1634 1635 /* 1636 * Start the SCRIPTS. 1637 */ 1638 sym_start_up (np, 1); 1639 1640 /* 1641 * Start the timer daemon 1642 */ 1643 init_timer(&np->s.timer); 1644 np->s.timer.data = (unsigned long) np; 1645 np->s.timer.function = sym53c8xx_timer; 1646 np->s.lasttime=0; 1647 sym_timer (np); 1648 1649 /* 1650 * Fill Linux host instance structure 1651 * and return success. 1652 */ 1653 instance->max_channel = 0; 1654 instance->this_id = np->myaddr; 1655 instance->max_id = np->maxwide ? 16 : 8; 1656 instance->max_lun = SYM_CONF_MAX_LUN; 1657 instance->unique_id = pci_resource_start(pdev, 0); 1658 instance->cmd_per_lun = SYM_CONF_MAX_TAG; 1659 instance->can_queue = (SYM_CONF_MAX_START-2); 1660 instance->sg_tablesize = SYM_CONF_MAX_SG; 1661 instance->max_cmd_len = 16; 1662 BUG_ON(sym2_transport_template == NULL); 1663 instance->transportt = sym2_transport_template; 1664 1665 spin_unlock_irqrestore(instance->host_lock, flags); 1666 1667 return instance; 1668 1669 reset_failed: 1670 printf_err("%s: FATAL ERROR: CHECK SCSI BUS - CABLES, " 1671 "TERMINATION, DEVICE POWER etc.!\n", sym_name(np)); 1672 spin_unlock_irqrestore(instance->host_lock, flags); 1673 attach_failed: 1674 if (!instance) 1675 return NULL; 1676 printf_info("%s: giving up ...\n", sym_name(np)); 1677 if (np) 1678 sym_free_resources(np, pdev); 1679 scsi_host_put(instance); 1680 1681 return NULL; 1682 } 1683 1684 1685 /* 1686 * Detect and try to read SYMBIOS and TEKRAM NVRAM. 1687 */ 1688 #if SYM_CONF_NVRAM_SUPPORT 1689 static void __devinit sym_get_nvram(struct sym_device *devp, struct sym_nvram *nvp) 1690 { 1691 devp->nvram = nvp; 1692 devp->device_id = devp->chip.device_id; 1693 nvp->type = 0; 1694 1695 sym_read_nvram(devp, nvp); 1696 } 1697 #else 1698 static inline void sym_get_nvram(struct sym_device *devp, struct sym_nvram *nvp) 1699 { 1700 } 1701 #endif /* SYM_CONF_NVRAM_SUPPORT */ 1702 1703 static int __devinit sym_check_supported(struct sym_device *device) 1704 { 1705 struct sym_chip *chip; 1706 struct pci_dev *pdev = device->pdev; 1707 u_char revision; 1708 unsigned long io_port = pci_resource_start(pdev, 0); 1709 int i; 1710 1711 /* 1712 * If user excluded this chip, do not initialize it. 1713 * I hate this code so much. Must kill it. 1714 */ 1715 if (io_port) { 1716 for (i = 0 ; i < 8 ; i++) { 1717 if (sym_driver_setup.excludes[i] == io_port) 1718 return -ENODEV; 1719 } 1720 } 1721 1722 /* 1723 * Check if the chip is supported. Then copy the chip description 1724 * to our device structure so we can make it match the actual device 1725 * and options. 1726 */ 1727 pci_read_config_byte(pdev, PCI_CLASS_REVISION, &revision); 1728 chip = sym_lookup_chip_table(pdev->device, revision); 1729 if (!chip) { 1730 dev_info(&pdev->dev, "device not supported\n"); 1731 return -ENODEV; 1732 } 1733 memcpy(&device->chip, chip, sizeof(device->chip)); 1734 device->chip.revision_id = revision; 1735 1736 return 0; 1737 } 1738 1739 /* 1740 * Ignore Symbios chips controlled by various RAID controllers. 1741 * These controllers set value 0x52414944 at RAM end - 16. 1742 */ 1743 static int __devinit sym_check_raid(struct sym_device *device) 1744 { 1745 unsigned int ram_size, ram_val; 1746 1747 if (!device->s.ramaddr) 1748 return 0; 1749 1750 if (device->chip.features & FE_RAM8K) 1751 ram_size = 8192; 1752 else 1753 ram_size = 4096; 1754 1755 ram_val = readl(device->s.ramaddr + ram_size - 16); 1756 if (ram_val != 0x52414944) 1757 return 0; 1758 1759 dev_info(&device->pdev->dev, 1760 "not initializing, driven by RAID controller.\n"); 1761 return -ENODEV; 1762 } 1763 1764 static int __devinit sym_set_workarounds(struct sym_device *device) 1765 { 1766 struct sym_chip *chip = &device->chip; 1767 struct pci_dev *pdev = device->pdev; 1768 u_short status_reg; 1769 1770 /* 1771 * (ITEM 12 of a DEL about the 896 I haven't yet). 1772 * We must ensure the chip will use WRITE AND INVALIDATE. 1773 * The revision number limit is for now arbitrary. 1774 */ 1775 if (pdev->device == PCI_DEVICE_ID_NCR_53C896 && chip->revision_id < 0x4) { 1776 chip->features |= (FE_WRIE | FE_CLSE); 1777 } 1778 1779 /* If the chip can do Memory Write Invalidate, enable it */ 1780 if (chip->features & FE_WRIE) { 1781 if (pci_set_mwi(pdev)) 1782 return -ENODEV; 1783 } 1784 1785 /* 1786 * Work around for errant bit in 895A. The 66Mhz 1787 * capable bit is set erroneously. Clear this bit. 1788 * (Item 1 DEL 533) 1789 * 1790 * Make sure Config space and Features agree. 1791 * 1792 * Recall: writes are not normal to status register - 1793 * write a 1 to clear and a 0 to leave unchanged. 1794 * Can only reset bits. 1795 */ 1796 pci_read_config_word(pdev, PCI_STATUS, &status_reg); 1797 if (chip->features & FE_66MHZ) { 1798 if (!(status_reg & PCI_STATUS_66MHZ)) 1799 chip->features &= ~FE_66MHZ; 1800 } else { 1801 if (status_reg & PCI_STATUS_66MHZ) { 1802 status_reg = PCI_STATUS_66MHZ; 1803 pci_write_config_word(pdev, PCI_STATUS, status_reg); 1804 pci_read_config_word(pdev, PCI_STATUS, &status_reg); 1805 } 1806 } 1807 1808 return 0; 1809 } 1810 1811 /* 1812 * Read and check the PCI configuration for any detected NCR 1813 * boards and save data for attaching after all boards have 1814 * been detected. 1815 */ 1816 static void __devinit 1817 sym_init_device(struct pci_dev *pdev, struct sym_device *device) 1818 { 1819 int i; 1820 1821 device->host_id = SYM_SETUP_HOST_ID; 1822 device->pdev = pdev; 1823 1824 i = pci_get_base_address(pdev, 1, &device->mmio_base); 1825 pci_get_base_address(pdev, i, &device->ram_base); 1826 1827 #ifndef CONFIG_SCSI_SYM53C8XX_IOMAPPED 1828 if (device->mmio_base) 1829 device->s.ioaddr = pci_iomap(pdev, 1, 1830 pci_resource_len(pdev, 1)); 1831 #endif 1832 if (!device->s.ioaddr) 1833 device->s.ioaddr = pci_iomap(pdev, 0, 1834 pci_resource_len(pdev, 0)); 1835 if (device->ram_base) 1836 device->s.ramaddr = pci_iomap(pdev, i, 1837 pci_resource_len(pdev, i)); 1838 } 1839 1840 /* 1841 * The NCR PQS and PDS cards are constructed as a DEC bridge 1842 * behind which sits a proprietary NCR memory controller and 1843 * either four or two 53c875s as separate devices. We can tell 1844 * if an 875 is part of a PQS/PDS or not since if it is, it will 1845 * be on the same bus as the memory controller. In its usual 1846 * mode of operation, the 875s are slaved to the memory 1847 * controller for all transfers. To operate with the Linux 1848 * driver, the memory controller is disabled and the 875s 1849 * freed to function independently. The only wrinkle is that 1850 * the preset SCSI ID (which may be zero) must be read in from 1851 * a special configuration space register of the 875. 1852 */ 1853 static void sym_config_pqs(struct pci_dev *pdev, struct sym_device *sym_dev) 1854 { 1855 int slot; 1856 u8 tmp; 1857 1858 for (slot = 0; slot < 256; slot++) { 1859 struct pci_dev *memc = pci_get_slot(pdev->bus, slot); 1860 1861 if (!memc || memc->vendor != 0x101a || memc->device == 0x0009) { 1862 pci_dev_put(memc); 1863 continue; 1864 } 1865 1866 /* bit 1: allow individual 875 configuration */ 1867 pci_read_config_byte(memc, 0x44, &tmp); 1868 if ((tmp & 0x2) == 0) { 1869 tmp |= 0x2; 1870 pci_write_config_byte(memc, 0x44, tmp); 1871 } 1872 1873 /* bit 2: drive individual 875 interrupts to the bus */ 1874 pci_read_config_byte(memc, 0x45, &tmp); 1875 if ((tmp & 0x4) == 0) { 1876 tmp |= 0x4; 1877 pci_write_config_byte(memc, 0x45, tmp); 1878 } 1879 1880 pci_dev_put(memc); 1881 break; 1882 } 1883 1884 pci_read_config_byte(pdev, 0x84, &tmp); 1885 sym_dev->host_id = tmp; 1886 } 1887 1888 /* 1889 * Called before unloading the module. 1890 * Detach the host. 1891 * We have to free resources and halt the NCR chip. 1892 */ 1893 static int sym_detach(struct sym_hcb *np, struct pci_dev *pdev) 1894 { 1895 printk("%s: detaching ...\n", sym_name(np)); 1896 1897 del_timer_sync(&np->s.timer); 1898 1899 /* 1900 * Reset NCR chip. 1901 * We should use sym_soft_reset(), but we don't want to do 1902 * so, since we may not be safe if interrupts occur. 1903 */ 1904 printk("%s: resetting chip\n", sym_name(np)); 1905 OUTB(np, nc_istat, SRST); 1906 udelay(10); 1907 OUTB(np, nc_istat, 0); 1908 1909 sym_free_resources(np, pdev); 1910 1911 return 1; 1912 } 1913 1914 /* 1915 * Driver host template. 1916 */ 1917 static struct scsi_host_template sym2_template = { 1918 .module = THIS_MODULE, 1919 .name = "sym53c8xx", 1920 .info = sym53c8xx_info, 1921 .queuecommand = sym53c8xx_queue_command, 1922 .slave_alloc = sym53c8xx_slave_alloc, 1923 .slave_configure = sym53c8xx_slave_configure, 1924 .slave_destroy = sym53c8xx_slave_destroy, 1925 .eh_abort_handler = sym53c8xx_eh_abort_handler, 1926 .eh_device_reset_handler = sym53c8xx_eh_device_reset_handler, 1927 .eh_bus_reset_handler = sym53c8xx_eh_bus_reset_handler, 1928 .eh_host_reset_handler = sym53c8xx_eh_host_reset_handler, 1929 .this_id = 7, 1930 .use_clustering = DISABLE_CLUSTERING, 1931 #ifdef SYM_LINUX_PROC_INFO_SUPPORT 1932 .proc_info = sym53c8xx_proc_info, 1933 .proc_name = NAME53C8XX, 1934 #endif 1935 }; 1936 1937 static int attach_count; 1938 1939 static int __devinit sym2_probe(struct pci_dev *pdev, 1940 const struct pci_device_id *ent) 1941 { 1942 struct sym_device sym_dev; 1943 struct sym_nvram nvram; 1944 struct Scsi_Host *instance; 1945 1946 memset(&sym_dev, 0, sizeof(sym_dev)); 1947 memset(&nvram, 0, sizeof(nvram)); 1948 1949 if (pci_enable_device(pdev)) 1950 goto leave; 1951 1952 pci_set_master(pdev); 1953 1954 if (pci_request_regions(pdev, NAME53C8XX)) 1955 goto disable; 1956 1957 sym_init_device(pdev, &sym_dev); 1958 if (sym_check_supported(&sym_dev)) 1959 goto free; 1960 1961 if (sym_check_raid(&sym_dev)) 1962 goto leave; /* Don't disable the device */ 1963 1964 if (sym_set_workarounds(&sym_dev)) 1965 goto free; 1966 1967 sym_config_pqs(pdev, &sym_dev); 1968 1969 sym_get_nvram(&sym_dev, &nvram); 1970 1971 instance = sym_attach(&sym2_template, attach_count, &sym_dev); 1972 if (!instance) 1973 goto free; 1974 1975 if (scsi_add_host(instance, &pdev->dev)) 1976 goto detach; 1977 scsi_scan_host(instance); 1978 1979 attach_count++; 1980 1981 return 0; 1982 1983 detach: 1984 sym_detach(pci_get_drvdata(pdev), pdev); 1985 free: 1986 pci_release_regions(pdev); 1987 disable: 1988 pci_disable_device(pdev); 1989 leave: 1990 return -ENODEV; 1991 } 1992 1993 static void __devexit sym2_remove(struct pci_dev *pdev) 1994 { 1995 struct sym_hcb *np = pci_get_drvdata(pdev); 1996 struct Scsi_Host *host = np->s.host; 1997 1998 scsi_remove_host(host); 1999 scsi_host_put(host); 2000 2001 sym_detach(np, pdev); 2002 2003 pci_release_regions(pdev); 2004 pci_disable_device(pdev); 2005 2006 attach_count--; 2007 } 2008 2009 static void sym2_get_signalling(struct Scsi_Host *shost) 2010 { 2011 struct sym_hcb *np = sym_get_hcb(shost); 2012 enum spi_signal_type type; 2013 2014 switch (np->scsi_mode) { 2015 case SMODE_SE: 2016 type = SPI_SIGNAL_SE; 2017 break; 2018 case SMODE_LVD: 2019 type = SPI_SIGNAL_LVD; 2020 break; 2021 case SMODE_HVD: 2022 type = SPI_SIGNAL_HVD; 2023 break; 2024 default: 2025 type = SPI_SIGNAL_UNKNOWN; 2026 break; 2027 } 2028 spi_signalling(shost) = type; 2029 } 2030 2031 static void sym2_set_offset(struct scsi_target *starget, int offset) 2032 { 2033 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); 2034 struct sym_hcb *np = sym_get_hcb(shost); 2035 struct sym_tcb *tp = &np->target[starget->id]; 2036 2037 tp->tgoal.offset = offset; 2038 tp->tgoal.check_nego = 1; 2039 } 2040 2041 static void sym2_set_period(struct scsi_target *starget, int period) 2042 { 2043 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); 2044 struct sym_hcb *np = sym_get_hcb(shost); 2045 struct sym_tcb *tp = &np->target[starget->id]; 2046 2047 /* have to have DT for these transfers */ 2048 if (period <= np->minsync) 2049 tp->tgoal.dt = 1; 2050 2051 tp->tgoal.period = period; 2052 tp->tgoal.check_nego = 1; 2053 } 2054 2055 static void sym2_set_width(struct scsi_target *starget, int width) 2056 { 2057 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); 2058 struct sym_hcb *np = sym_get_hcb(shost); 2059 struct sym_tcb *tp = &np->target[starget->id]; 2060 2061 /* It is illegal to have DT set on narrow transfers. If DT is 2062 * clear, we must also clear IU and QAS. */ 2063 if (width == 0) 2064 tp->tgoal.iu = tp->tgoal.dt = tp->tgoal.qas = 0; 2065 2066 tp->tgoal.width = width; 2067 tp->tgoal.check_nego = 1; 2068 } 2069 2070 static void sym2_set_dt(struct scsi_target *starget, int dt) 2071 { 2072 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); 2073 struct sym_hcb *np = sym_get_hcb(shost); 2074 struct sym_tcb *tp = &np->target[starget->id]; 2075 2076 /* We must clear QAS and IU if DT is clear */ 2077 if (dt) 2078 tp->tgoal.dt = 1; 2079 else 2080 tp->tgoal.iu = tp->tgoal.dt = tp->tgoal.qas = 0; 2081 tp->tgoal.check_nego = 1; 2082 } 2083 2084 static void sym2_set_iu(struct scsi_target *starget, int iu) 2085 { 2086 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); 2087 struct sym_hcb *np = sym_get_hcb(shost); 2088 struct sym_tcb *tp = &np->target[starget->id]; 2089 2090 if (iu) 2091 tp->tgoal.iu = tp->tgoal.dt = 1; 2092 else 2093 tp->tgoal.iu = 0; 2094 tp->tgoal.check_nego = 1; 2095 } 2096 2097 static void sym2_set_qas(struct scsi_target *starget, int qas) 2098 { 2099 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); 2100 struct sym_hcb *np = sym_get_hcb(shost); 2101 struct sym_tcb *tp = &np->target[starget->id]; 2102 2103 if (qas) 2104 tp->tgoal.dt = tp->tgoal.qas = 1; 2105 else 2106 tp->tgoal.qas = 0; 2107 tp->tgoal.check_nego = 1; 2108 } 2109 2110 2111 static struct spi_function_template sym2_transport_functions = { 2112 .set_offset = sym2_set_offset, 2113 .show_offset = 1, 2114 .set_period = sym2_set_period, 2115 .show_period = 1, 2116 .set_width = sym2_set_width, 2117 .show_width = 1, 2118 .set_dt = sym2_set_dt, 2119 .show_dt = 1, 2120 .set_iu = sym2_set_iu, 2121 .show_iu = 1, 2122 .set_qas = sym2_set_qas, 2123 .show_qas = 1, 2124 .get_signalling = sym2_get_signalling, 2125 }; 2126 2127 static struct pci_device_id sym2_id_table[] __devinitdata = { 2128 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C810, 2129 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 2130 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C820, 2131 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, /* new */ 2132 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C825, 2133 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 2134 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C815, 2135 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 2136 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_53C810AP, 2137 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, /* new */ 2138 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C860, 2139 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 2140 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_53C1510, 2141 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 2142 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C896, 2143 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 2144 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C895, 2145 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 2146 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C885, 2147 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 2148 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C875, 2149 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 2150 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C1510, 2151 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, /* new */ 2152 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_53C895A, 2153 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 2154 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_53C875A, 2155 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 2156 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_53C1010_33, 2157 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 2158 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_53C1010_66, 2159 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 2160 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C875J, 2161 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 2162 { 0, } 2163 }; 2164 2165 MODULE_DEVICE_TABLE(pci, sym2_id_table); 2166 2167 static struct pci_driver sym2_driver = { 2168 .name = NAME53C8XX, 2169 .id_table = sym2_id_table, 2170 .probe = sym2_probe, 2171 .remove = __devexit_p(sym2_remove), 2172 }; 2173 2174 static int __init sym2_init(void) 2175 { 2176 int error; 2177 2178 sym2_setup_params(); 2179 sym2_transport_template = spi_attach_transport(&sym2_transport_functions); 2180 if (!sym2_transport_template) 2181 return -ENODEV; 2182 2183 error = pci_register_driver(&sym2_driver); 2184 if (error) 2185 spi_release_transport(sym2_transport_template); 2186 return error; 2187 } 2188 2189 static void __exit sym2_exit(void) 2190 { 2191 pci_unregister_driver(&sym2_driver); 2192 spi_release_transport(sym2_transport_template); 2193 } 2194 2195 module_init(sym2_init); 2196 module_exit(sym2_exit); 2197