1 /* 2 * Device driver for the SYMBIOS/LSILOGIC 53C8XX and 53C1010 family 3 * of PCI-SCSI IO processors. 4 * 5 * Copyright (C) 1999-2001 Gerard Roudier <groudier@free.fr> 6 * Copyright (c) 2003-2005 Matthew Wilcox <matthew@wil.cx> 7 * 8 * This driver is derived from the Linux sym53c8xx driver. 9 * Copyright (C) 1998-2000 Gerard Roudier 10 * 11 * The sym53c8xx driver is derived from the ncr53c8xx driver that had been 12 * a port of the FreeBSD ncr driver to Linux-1.2.13. 13 * 14 * The original ncr driver has been written for 386bsd and FreeBSD by 15 * Wolfgang Stanglmeier <wolf@cologne.de> 16 * Stefan Esser <se@mi.Uni-Koeln.de> 17 * Copyright (C) 1994 Wolfgang Stanglmeier 18 * 19 * Other major contributions: 20 * 21 * NVRAM detection and reading. 22 * Copyright (C) 1997 Richard Waltham <dormouse@farsrobt.demon.co.uk> 23 * 24 *----------------------------------------------------------------------------- 25 * 26 * This program is free software; you can redistribute it and/or modify 27 * it under the terms of the GNU General Public License as published by 28 * the Free Software Foundation; either version 2 of the License, or 29 * (at your option) any later version. 30 * 31 * This program is distributed in the hope that it will be useful, 32 * but WITHOUT ANY WARRANTY; without even the implied warranty of 33 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 34 * GNU General Public License for more details. 35 * 36 * You should have received a copy of the GNU General Public License 37 * along with this program; if not, write to the Free Software 38 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 39 */ 40 #include <linux/ctype.h> 41 #include <linux/init.h> 42 #include <linux/interrupt.h> 43 #include <linux/module.h> 44 #include <linux/moduleparam.h> 45 #include <linux/spinlock.h> 46 #include <scsi/scsi.h> 47 #include <scsi/scsi_tcq.h> 48 #include <scsi/scsi_device.h> 49 #include <scsi/scsi_transport.h> 50 51 #include "sym_glue.h" 52 #include "sym_nvram.h" 53 54 #define NAME53C "sym53c" 55 #define NAME53C8XX "sym53c8xx" 56 57 #define IRQ_FMT "%d" 58 #define IRQ_PRM(x) (x) 59 60 struct sym_driver_setup sym_driver_setup = SYM_LINUX_DRIVER_SETUP; 61 unsigned int sym_debug_flags = 0; 62 63 static char *excl_string; 64 static char *safe_string; 65 module_param_named(cmd_per_lun, sym_driver_setup.max_tag, ushort, 0); 66 module_param_string(tag_ctrl, sym_driver_setup.tag_ctrl, 100, 0); 67 module_param_named(burst, sym_driver_setup.burst_order, byte, 0); 68 module_param_named(led, sym_driver_setup.scsi_led, byte, 0); 69 module_param_named(diff, sym_driver_setup.scsi_diff, byte, 0); 70 module_param_named(irqm, sym_driver_setup.irq_mode, byte, 0); 71 module_param_named(buschk, sym_driver_setup.scsi_bus_check, byte, 0); 72 module_param_named(hostid, sym_driver_setup.host_id, byte, 0); 73 module_param_named(verb, sym_driver_setup.verbose, byte, 0); 74 module_param_named(debug, sym_debug_flags, uint, 0); 75 module_param_named(settle, sym_driver_setup.settle_delay, byte, 0); 76 module_param_named(nvram, sym_driver_setup.use_nvram, byte, 0); 77 module_param_named(excl, excl_string, charp, 0); 78 module_param_named(safe, safe_string, charp, 0); 79 80 MODULE_PARM_DESC(cmd_per_lun, "The maximum number of tags to use by default"); 81 MODULE_PARM_DESC(tag_ctrl, "More detailed control over tags per LUN"); 82 MODULE_PARM_DESC(burst, "Maximum burst. 0 to disable, 255 to read from registers"); 83 MODULE_PARM_DESC(led, "Set to 1 to enable LED support"); 84 MODULE_PARM_DESC(diff, "0 for no differential mode, 1 for BIOS, 2 for always, 3 for not GPIO3"); 85 MODULE_PARM_DESC(irqm, "0 for open drain, 1 to leave alone, 2 for totem pole"); 86 MODULE_PARM_DESC(buschk, "0 to not check, 1 for detach on error, 2 for warn on error"); 87 MODULE_PARM_DESC(hostid, "The SCSI ID to use for the host adapters"); 88 MODULE_PARM_DESC(verb, "0 for minimal verbosity, 1 for normal, 2 for excessive"); 89 MODULE_PARM_DESC(debug, "Set bits to enable debugging"); 90 MODULE_PARM_DESC(settle, "Settle delay in seconds. Default 3"); 91 MODULE_PARM_DESC(nvram, "Option currently not used"); 92 MODULE_PARM_DESC(excl, "List ioport addresses here to prevent controllers from being attached"); 93 MODULE_PARM_DESC(safe, "Set other settings to a \"safe mode\""); 94 95 MODULE_LICENSE("GPL"); 96 MODULE_VERSION(SYM_VERSION); 97 MODULE_AUTHOR("Matthew Wilcox <matthew@wil.cx>"); 98 MODULE_DESCRIPTION("NCR, Symbios and LSI 8xx and 1010 PCI SCSI adapters"); 99 100 static void sym2_setup_params(void) 101 { 102 char *p = excl_string; 103 int xi = 0; 104 105 while (p && (xi < 8)) { 106 char *next_p; 107 int val = (int) simple_strtoul(p, &next_p, 0); 108 sym_driver_setup.excludes[xi++] = val; 109 p = next_p; 110 } 111 112 if (safe_string) { 113 if (*safe_string == 'y') { 114 sym_driver_setup.max_tag = 0; 115 sym_driver_setup.burst_order = 0; 116 sym_driver_setup.scsi_led = 0; 117 sym_driver_setup.scsi_diff = 1; 118 sym_driver_setup.irq_mode = 0; 119 sym_driver_setup.scsi_bus_check = 2; 120 sym_driver_setup.host_id = 7; 121 sym_driver_setup.verbose = 2; 122 sym_driver_setup.settle_delay = 10; 123 sym_driver_setup.use_nvram = 1; 124 } else if (*safe_string != 'n') { 125 printk(KERN_WARNING NAME53C8XX "Ignoring parameter %s" 126 " passed to safe option", safe_string); 127 } 128 } 129 } 130 131 static struct scsi_transport_template *sym2_transport_template = NULL; 132 133 /* 134 * Driver private area in the SCSI command structure. 135 */ 136 struct sym_ucmd { /* Override the SCSI pointer structure */ 137 dma_addr_t data_mapping; 138 unsigned char data_mapped; 139 unsigned char to_do; /* For error handling */ 140 void (*old_done)(struct scsi_cmnd *); /* For error handling */ 141 struct completion *eh_done; /* For error handling */ 142 }; 143 144 #define SYM_UCMD_PTR(cmd) ((struct sym_ucmd *)(&(cmd)->SCp)) 145 #define SYM_SOFTC_PTR(cmd) sym_get_hcb(cmd->device->host) 146 147 static void __unmap_scsi_data(struct pci_dev *pdev, struct scsi_cmnd *cmd) 148 { 149 int dma_dir = cmd->sc_data_direction; 150 151 switch(SYM_UCMD_PTR(cmd)->data_mapped) { 152 case 2: 153 pci_unmap_sg(pdev, cmd->request_buffer, cmd->use_sg, dma_dir); 154 break; 155 case 1: 156 pci_unmap_single(pdev, SYM_UCMD_PTR(cmd)->data_mapping, 157 cmd->request_bufflen, dma_dir); 158 break; 159 } 160 SYM_UCMD_PTR(cmd)->data_mapped = 0; 161 } 162 163 static dma_addr_t __map_scsi_single_data(struct pci_dev *pdev, struct scsi_cmnd *cmd) 164 { 165 dma_addr_t mapping; 166 int dma_dir = cmd->sc_data_direction; 167 168 mapping = pci_map_single(pdev, cmd->request_buffer, 169 cmd->request_bufflen, dma_dir); 170 if (mapping) { 171 SYM_UCMD_PTR(cmd)->data_mapped = 1; 172 SYM_UCMD_PTR(cmd)->data_mapping = mapping; 173 } 174 175 return mapping; 176 } 177 178 static int __map_scsi_sg_data(struct pci_dev *pdev, struct scsi_cmnd *cmd) 179 { 180 int use_sg; 181 int dma_dir = cmd->sc_data_direction; 182 183 use_sg = pci_map_sg(pdev, cmd->request_buffer, cmd->use_sg, dma_dir); 184 if (use_sg > 0) { 185 SYM_UCMD_PTR(cmd)->data_mapped = 2; 186 SYM_UCMD_PTR(cmd)->data_mapping = use_sg; 187 } 188 189 return use_sg; 190 } 191 192 #define unmap_scsi_data(np, cmd) \ 193 __unmap_scsi_data(np->s.device, cmd) 194 #define map_scsi_single_data(np, cmd) \ 195 __map_scsi_single_data(np->s.device, cmd) 196 #define map_scsi_sg_data(np, cmd) \ 197 __map_scsi_sg_data(np->s.device, cmd) 198 /* 199 * Complete a pending CAM CCB. 200 */ 201 void sym_xpt_done(struct sym_hcb *np, struct scsi_cmnd *cmd) 202 { 203 unmap_scsi_data(np, cmd); 204 cmd->scsi_done(cmd); 205 } 206 207 static void sym_xpt_done2(struct sym_hcb *np, struct scsi_cmnd *cmd, int cam_status) 208 { 209 sym_set_cam_status(cmd, cam_status); 210 sym_xpt_done(np, cmd); 211 } 212 213 214 /* 215 * Tell the SCSI layer about a BUS RESET. 216 */ 217 void sym_xpt_async_bus_reset(struct sym_hcb *np) 218 { 219 printf_notice("%s: SCSI BUS has been reset.\n", sym_name(np)); 220 np->s.settle_time = jiffies + sym_driver_setup.settle_delay * HZ; 221 np->s.settle_time_valid = 1; 222 if (sym_verbose >= 2) 223 printf_info("%s: command processing suspended for %d seconds\n", 224 sym_name(np), sym_driver_setup.settle_delay); 225 } 226 227 /* 228 * Tell the SCSI layer about a BUS DEVICE RESET message sent. 229 */ 230 void sym_xpt_async_sent_bdr(struct sym_hcb *np, int target) 231 { 232 printf_notice("%s: TARGET %d has been reset.\n", sym_name(np), target); 233 } 234 235 /* 236 * Choose the more appropriate CAM status if 237 * the IO encountered an extended error. 238 */ 239 static int sym_xerr_cam_status(int cam_status, int x_status) 240 { 241 if (x_status) { 242 if (x_status & XE_PARITY_ERR) 243 cam_status = DID_PARITY; 244 else if (x_status &(XE_EXTRA_DATA|XE_SODL_UNRUN|XE_SWIDE_OVRUN)) 245 cam_status = DID_ERROR; 246 else if (x_status & XE_BAD_PHASE) 247 cam_status = DID_ERROR; 248 else 249 cam_status = DID_ERROR; 250 } 251 return cam_status; 252 } 253 254 /* 255 * Build CAM result for a failed or auto-sensed IO. 256 */ 257 void sym_set_cam_result_error(struct sym_hcb *np, struct sym_ccb *cp, int resid) 258 { 259 struct scsi_cmnd *cmd = cp->cmd; 260 u_int cam_status, scsi_status, drv_status; 261 262 drv_status = 0; 263 cam_status = DID_OK; 264 scsi_status = cp->ssss_status; 265 266 if (cp->host_flags & HF_SENSE) { 267 scsi_status = cp->sv_scsi_status; 268 resid = cp->sv_resid; 269 if (sym_verbose && cp->sv_xerr_status) 270 sym_print_xerr(cmd, cp->sv_xerr_status); 271 if (cp->host_status == HS_COMPLETE && 272 cp->ssss_status == S_GOOD && 273 cp->xerr_status == 0) { 274 cam_status = sym_xerr_cam_status(DID_OK, 275 cp->sv_xerr_status); 276 drv_status = DRIVER_SENSE; 277 /* 278 * Bounce back the sense data to user. 279 */ 280 memset(&cmd->sense_buffer, 0, sizeof(cmd->sense_buffer)); 281 memcpy(cmd->sense_buffer, cp->sns_bbuf, 282 min(sizeof(cmd->sense_buffer), 283 (size_t)SYM_SNS_BBUF_LEN)); 284 #if 0 285 /* 286 * If the device reports a UNIT ATTENTION condition 287 * due to a RESET condition, we should consider all 288 * disconnect CCBs for this unit as aborted. 289 */ 290 if (1) { 291 u_char *p; 292 p = (u_char *) cmd->sense_data; 293 if (p[0]==0x70 && p[2]==0x6 && p[12]==0x29) 294 sym_clear_tasks(np, DID_ABORT, 295 cp->target,cp->lun, -1); 296 } 297 #endif 298 } else { 299 /* 300 * Error return from our internal request sense. This 301 * is bad: we must clear the contingent allegiance 302 * condition otherwise the device will always return 303 * BUSY. Use a big stick. 304 */ 305 sym_reset_scsi_target(np, cmd->device->id); 306 cam_status = DID_ERROR; 307 } 308 } else if (cp->host_status == HS_COMPLETE) /* Bad SCSI status */ 309 cam_status = DID_OK; 310 else if (cp->host_status == HS_SEL_TIMEOUT) /* Selection timeout */ 311 cam_status = DID_NO_CONNECT; 312 else if (cp->host_status == HS_UNEXPECTED) /* Unexpected BUS FREE*/ 313 cam_status = DID_ERROR; 314 else { /* Extended error */ 315 if (sym_verbose) { 316 sym_print_addr(cmd, "COMMAND FAILED (%x %x %x).\n", 317 cp->host_status, cp->ssss_status, 318 cp->xerr_status); 319 } 320 /* 321 * Set the most appropriate value for CAM status. 322 */ 323 cam_status = sym_xerr_cam_status(DID_ERROR, cp->xerr_status); 324 } 325 cmd->resid = resid; 326 cmd->result = (drv_status << 24) + (cam_status << 16) + scsi_status; 327 } 328 329 330 /* 331 * Build the scatter/gather array for an I/O. 332 */ 333 334 static int sym_scatter_no_sglist(struct sym_hcb *np, struct sym_ccb *cp, struct scsi_cmnd *cmd) 335 { 336 struct sym_tblmove *data = &cp->phys.data[SYM_CONF_MAX_SG-1]; 337 int segment; 338 unsigned int len = cmd->request_bufflen; 339 340 if (len) { 341 dma_addr_t baddr = map_scsi_single_data(np, cmd); 342 if (baddr) { 343 if (len & 1) { 344 struct sym_tcb *tp = &np->target[cp->target]; 345 if (tp->head.wval & EWS) { 346 len++; 347 cp->odd_byte_adjustment++; 348 } 349 } 350 cp->data_len = len; 351 sym_build_sge(np, data, baddr, len); 352 segment = 1; 353 } else { 354 segment = -2; 355 } 356 } else { 357 segment = 0; 358 } 359 360 return segment; 361 } 362 363 static int sym_scatter(struct sym_hcb *np, struct sym_ccb *cp, struct scsi_cmnd *cmd) 364 { 365 int segment; 366 int use_sg = (int) cmd->use_sg; 367 368 cp->data_len = 0; 369 370 if (!use_sg) 371 segment = sym_scatter_no_sglist(np, cp, cmd); 372 else if ((use_sg = map_scsi_sg_data(np, cmd)) > 0) { 373 struct scatterlist *scatter = (struct scatterlist *)cmd->request_buffer; 374 struct sym_tcb *tp = &np->target[cp->target]; 375 struct sym_tblmove *data; 376 377 if (use_sg > SYM_CONF_MAX_SG) { 378 unmap_scsi_data(np, cmd); 379 return -1; 380 } 381 382 data = &cp->phys.data[SYM_CONF_MAX_SG - use_sg]; 383 384 for (segment = 0; segment < use_sg; segment++) { 385 dma_addr_t baddr = sg_dma_address(&scatter[segment]); 386 unsigned int len = sg_dma_len(&scatter[segment]); 387 388 if ((len & 1) && (tp->head.wval & EWS)) { 389 len++; 390 cp->odd_byte_adjustment++; 391 } 392 393 sym_build_sge(np, &data[segment], baddr, len); 394 cp->data_len += len; 395 } 396 } else { 397 segment = -2; 398 } 399 400 return segment; 401 } 402 403 /* 404 * Queue a SCSI command. 405 */ 406 static int sym_queue_command(struct sym_hcb *np, struct scsi_cmnd *cmd) 407 { 408 struct scsi_device *sdev = cmd->device; 409 struct sym_tcb *tp; 410 struct sym_lcb *lp; 411 struct sym_ccb *cp; 412 int order; 413 414 /* 415 * Minimal checkings, so that we will not 416 * go outside our tables. 417 */ 418 if (sdev->id == np->myaddr) { 419 sym_xpt_done2(np, cmd, DID_NO_CONNECT); 420 return 0; 421 } 422 423 /* 424 * Retrieve the target descriptor. 425 */ 426 tp = &np->target[sdev->id]; 427 428 /* 429 * Select tagged/untagged. 430 */ 431 lp = sym_lp(tp, sdev->lun); 432 order = (lp && lp->s.reqtags) ? M_SIMPLE_TAG : 0; 433 434 /* 435 * Queue the SCSI IO. 436 */ 437 cp = sym_get_ccb(np, cmd, order); 438 if (!cp) 439 return 1; /* Means resource shortage */ 440 sym_queue_scsiio(np, cmd, cp); 441 return 0; 442 } 443 444 /* 445 * Setup buffers and pointers that address the CDB. 446 */ 447 static inline int sym_setup_cdb(struct sym_hcb *np, struct scsi_cmnd *cmd, struct sym_ccb *cp) 448 { 449 memcpy(cp->cdb_buf, cmd->cmnd, cmd->cmd_len); 450 451 cp->phys.cmd.addr = CCB_BA(cp, cdb_buf[0]); 452 cp->phys.cmd.size = cpu_to_scr(cmd->cmd_len); 453 454 return 0; 455 } 456 457 /* 458 * Setup pointers that address the data and start the I/O. 459 */ 460 int sym_setup_data_and_start(struct sym_hcb *np, struct scsi_cmnd *cmd, struct sym_ccb *cp) 461 { 462 u32 lastp, goalp; 463 int dir; 464 465 /* 466 * Build the CDB. 467 */ 468 if (sym_setup_cdb(np, cmd, cp)) 469 goto out_abort; 470 471 /* 472 * No direction means no data. 473 */ 474 dir = cmd->sc_data_direction; 475 if (dir != DMA_NONE) { 476 cp->segments = sym_scatter(np, cp, cmd); 477 if (cp->segments < 0) { 478 sym_set_cam_status(cmd, DID_ERROR); 479 goto out_abort; 480 } 481 482 /* 483 * No segments means no data. 484 */ 485 if (!cp->segments) 486 dir = DMA_NONE; 487 } else { 488 cp->data_len = 0; 489 cp->segments = 0; 490 } 491 492 /* 493 * Set the data pointer. 494 */ 495 switch (dir) { 496 case DMA_BIDIRECTIONAL: 497 printk("%s: got DMA_BIDIRECTIONAL command", sym_name(np)); 498 sym_set_cam_status(cmd, DID_ERROR); 499 goto out_abort; 500 case DMA_TO_DEVICE: 501 goalp = SCRIPTA_BA(np, data_out2) + 8; 502 lastp = goalp - 8 - (cp->segments * (2*4)); 503 break; 504 case DMA_FROM_DEVICE: 505 cp->host_flags |= HF_DATA_IN; 506 goalp = SCRIPTA_BA(np, data_in2) + 8; 507 lastp = goalp - 8 - (cp->segments * (2*4)); 508 break; 509 case DMA_NONE: 510 default: 511 lastp = goalp = SCRIPTB_BA(np, no_data); 512 break; 513 } 514 515 /* 516 * Set all pointers values needed by SCRIPTS. 517 */ 518 cp->phys.head.lastp = cpu_to_scr(lastp); 519 cp->phys.head.savep = cpu_to_scr(lastp); 520 cp->startp = cp->phys.head.savep; 521 cp->goalp = cpu_to_scr(goalp); 522 523 /* 524 * When `#ifed 1', the code below makes the driver 525 * panic on the first attempt to write to a SCSI device. 526 * It is the first test we want to do after a driver 527 * change that does not seem obviously safe. :) 528 */ 529 #if 0 530 switch (cp->cdb_buf[0]) { 531 case 0x0A: case 0x2A: case 0xAA: 532 panic("XXXXXXXXXXXXX WRITE NOT YET ALLOWED XXXXXXXXXXXXXX\n"); 533 break; 534 default: 535 break; 536 } 537 #endif 538 539 /* 540 * activate this job. 541 */ 542 sym_put_start_queue(np, cp); 543 return 0; 544 545 out_abort: 546 sym_free_ccb(np, cp); 547 sym_xpt_done(np, cmd); 548 return 0; 549 } 550 551 552 /* 553 * timer daemon. 554 * 555 * Misused to keep the driver running when 556 * interrupts are not configured correctly. 557 */ 558 static void sym_timer(struct sym_hcb *np) 559 { 560 unsigned long thistime = jiffies; 561 562 /* 563 * Restart the timer. 564 */ 565 np->s.timer.expires = thistime + SYM_CONF_TIMER_INTERVAL; 566 add_timer(&np->s.timer); 567 568 /* 569 * If we are resetting the ncr, wait for settle_time before 570 * clearing it. Then command processing will be resumed. 571 */ 572 if (np->s.settle_time_valid) { 573 if (time_before_eq(np->s.settle_time, thistime)) { 574 if (sym_verbose >= 2 ) 575 printk("%s: command processing resumed\n", 576 sym_name(np)); 577 np->s.settle_time_valid = 0; 578 } 579 return; 580 } 581 582 /* 583 * Nothing to do for now, but that may come. 584 */ 585 if (np->s.lasttime + 4*HZ < thistime) { 586 np->s.lasttime = thistime; 587 } 588 589 #ifdef SYM_CONF_PCIQ_MAY_MISS_COMPLETIONS 590 /* 591 * Some way-broken PCI bridges may lead to 592 * completions being lost when the clearing 593 * of the INTFLY flag by the CPU occurs 594 * concurrently with the chip raising this flag. 595 * If this ever happen, lost completions will 596 * be reaped here. 597 */ 598 sym_wakeup_done(np); 599 #endif 600 } 601 602 603 /* 604 * PCI BUS error handler. 605 */ 606 void sym_log_bus_error(struct sym_hcb *np) 607 { 608 u_short pci_sts; 609 pci_read_config_word(np->s.device, PCI_STATUS, &pci_sts); 610 if (pci_sts & 0xf900) { 611 pci_write_config_word(np->s.device, PCI_STATUS, pci_sts); 612 printf("%s: PCI STATUS = 0x%04x\n", 613 sym_name(np), pci_sts & 0xf900); 614 } 615 } 616 617 /* 618 * queuecommand method. Entered with the host adapter lock held and 619 * interrupts disabled. 620 */ 621 static int sym53c8xx_queue_command(struct scsi_cmnd *cmd, 622 void (*done)(struct scsi_cmnd *)) 623 { 624 struct sym_hcb *np = SYM_SOFTC_PTR(cmd); 625 struct sym_ucmd *ucp = SYM_UCMD_PTR(cmd); 626 int sts = 0; 627 628 cmd->scsi_done = done; 629 memset(ucp, 0, sizeof(*ucp)); 630 631 /* 632 * Shorten our settle_time if needed for 633 * this command not to time out. 634 */ 635 if (np->s.settle_time_valid && cmd->timeout_per_command) { 636 unsigned long tlimit = jiffies + cmd->timeout_per_command; 637 tlimit -= SYM_CONF_TIMER_INTERVAL*2; 638 if (time_after(np->s.settle_time, tlimit)) { 639 np->s.settle_time = tlimit; 640 } 641 } 642 643 if (np->s.settle_time_valid) 644 return SCSI_MLQUEUE_HOST_BUSY; 645 646 sts = sym_queue_command(np, cmd); 647 if (sts) 648 return SCSI_MLQUEUE_HOST_BUSY; 649 return 0; 650 } 651 652 /* 653 * Linux entry point of the interrupt handler. 654 */ 655 static irqreturn_t sym53c8xx_intr(int irq, void *dev_id) 656 { 657 unsigned long flags; 658 struct sym_hcb *np = (struct sym_hcb *)dev_id; 659 660 if (DEBUG_FLAGS & DEBUG_TINY) printf_debug ("["); 661 662 spin_lock_irqsave(np->s.host->host_lock, flags); 663 sym_interrupt(np); 664 spin_unlock_irqrestore(np->s.host->host_lock, flags); 665 666 if (DEBUG_FLAGS & DEBUG_TINY) printf_debug ("]\n"); 667 668 return IRQ_HANDLED; 669 } 670 671 /* 672 * Linux entry point of the timer handler 673 */ 674 static void sym53c8xx_timer(unsigned long npref) 675 { 676 struct sym_hcb *np = (struct sym_hcb *)npref; 677 unsigned long flags; 678 679 spin_lock_irqsave(np->s.host->host_lock, flags); 680 sym_timer(np); 681 spin_unlock_irqrestore(np->s.host->host_lock, flags); 682 } 683 684 685 /* 686 * What the eh thread wants us to perform. 687 */ 688 #define SYM_EH_ABORT 0 689 #define SYM_EH_DEVICE_RESET 1 690 #define SYM_EH_BUS_RESET 2 691 #define SYM_EH_HOST_RESET 3 692 693 /* 694 * What we will do regarding the involved SCSI command. 695 */ 696 #define SYM_EH_DO_IGNORE 0 697 #define SYM_EH_DO_WAIT 2 698 699 /* 700 * scsi_done() alias when error recovery is in progress. 701 */ 702 static void sym_eh_done(struct scsi_cmnd *cmd) 703 { 704 struct sym_ucmd *ucmd = SYM_UCMD_PTR(cmd); 705 BUILD_BUG_ON(sizeof(struct scsi_pointer) < sizeof(struct sym_ucmd)); 706 707 cmd->scsi_done = ucmd->old_done; 708 709 if (ucmd->to_do == SYM_EH_DO_WAIT) 710 complete(ucmd->eh_done); 711 } 712 713 /* 714 * Generic method for our eh processing. 715 * The 'op' argument tells what we have to do. 716 */ 717 static int sym_eh_handler(int op, char *opname, struct scsi_cmnd *cmd) 718 { 719 struct sym_hcb *np = SYM_SOFTC_PTR(cmd); 720 struct sym_ucmd *ucmd = SYM_UCMD_PTR(cmd); 721 struct Scsi_Host *host = cmd->device->host; 722 SYM_QUEHEAD *qp; 723 int to_do = SYM_EH_DO_IGNORE; 724 int sts = -1; 725 struct completion eh_done; 726 727 dev_warn(&cmd->device->sdev_gendev, "%s operation started.\n", opname); 728 729 spin_lock_irq(host->host_lock); 730 /* This one is queued in some place -> to wait for completion */ 731 FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) { 732 struct sym_ccb *cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); 733 if (cp->cmd == cmd) { 734 to_do = SYM_EH_DO_WAIT; 735 break; 736 } 737 } 738 739 if (to_do == SYM_EH_DO_WAIT) { 740 init_completion(&eh_done); 741 ucmd->old_done = cmd->scsi_done; 742 ucmd->eh_done = &eh_done; 743 wmb(); 744 cmd->scsi_done = sym_eh_done; 745 } 746 747 /* Try to proceed the operation we have been asked for */ 748 sts = -1; 749 switch(op) { 750 case SYM_EH_ABORT: 751 sts = sym_abort_scsiio(np, cmd, 1); 752 break; 753 case SYM_EH_DEVICE_RESET: 754 sts = sym_reset_scsi_target(np, cmd->device->id); 755 break; 756 case SYM_EH_BUS_RESET: 757 sym_reset_scsi_bus(np, 1); 758 sts = 0; 759 break; 760 case SYM_EH_HOST_RESET: 761 sym_reset_scsi_bus(np, 0); 762 sym_start_up (np, 1); 763 sts = 0; 764 break; 765 default: 766 break; 767 } 768 769 /* On error, restore everything and cross fingers :) */ 770 if (sts) { 771 cmd->scsi_done = ucmd->old_done; 772 to_do = SYM_EH_DO_IGNORE; 773 } 774 775 ucmd->to_do = to_do; 776 spin_unlock_irq(host->host_lock); 777 778 if (to_do == SYM_EH_DO_WAIT) { 779 if (!wait_for_completion_timeout(&eh_done, 5*HZ)) { 780 ucmd->to_do = SYM_EH_DO_IGNORE; 781 wmb(); 782 sts = -2; 783 } 784 } 785 dev_warn(&cmd->device->sdev_gendev, "%s operation %s.\n", opname, 786 sts==0 ? "complete" :sts==-2 ? "timed-out" : "failed"); 787 return sts ? SCSI_FAILED : SCSI_SUCCESS; 788 } 789 790 791 /* 792 * Error handlers called from the eh thread (one thread per HBA). 793 */ 794 static int sym53c8xx_eh_abort_handler(struct scsi_cmnd *cmd) 795 { 796 return sym_eh_handler(SYM_EH_ABORT, "ABORT", cmd); 797 } 798 799 static int sym53c8xx_eh_device_reset_handler(struct scsi_cmnd *cmd) 800 { 801 return sym_eh_handler(SYM_EH_DEVICE_RESET, "DEVICE RESET", cmd); 802 } 803 804 static int sym53c8xx_eh_bus_reset_handler(struct scsi_cmnd *cmd) 805 { 806 return sym_eh_handler(SYM_EH_BUS_RESET, "BUS RESET", cmd); 807 } 808 809 static int sym53c8xx_eh_host_reset_handler(struct scsi_cmnd *cmd) 810 { 811 return sym_eh_handler(SYM_EH_HOST_RESET, "HOST RESET", cmd); 812 } 813 814 /* 815 * Tune device queuing depth, according to various limits. 816 */ 817 static void sym_tune_dev_queuing(struct sym_tcb *tp, int lun, u_short reqtags) 818 { 819 struct sym_lcb *lp = sym_lp(tp, lun); 820 u_short oldtags; 821 822 if (!lp) 823 return; 824 825 oldtags = lp->s.reqtags; 826 827 if (reqtags > lp->s.scdev_depth) 828 reqtags = lp->s.scdev_depth; 829 830 lp->s.reqtags = reqtags; 831 832 if (reqtags != oldtags) { 833 dev_info(&tp->starget->dev, 834 "tagged command queuing %s, command queue depth %d.\n", 835 lp->s.reqtags ? "enabled" : "disabled", reqtags); 836 } 837 } 838 839 /* 840 * Linux select queue depths function 841 */ 842 #define DEF_DEPTH (sym_driver_setup.max_tag) 843 #define ALL_TARGETS -2 844 #define NO_TARGET -1 845 #define ALL_LUNS -2 846 #define NO_LUN -1 847 848 static int device_queue_depth(struct sym_hcb *np, int target, int lun) 849 { 850 int c, h, t, u, v; 851 char *p = sym_driver_setup.tag_ctrl; 852 char *ep; 853 854 h = -1; 855 t = NO_TARGET; 856 u = NO_LUN; 857 while ((c = *p++) != 0) { 858 v = simple_strtoul(p, &ep, 0); 859 switch(c) { 860 case '/': 861 ++h; 862 t = ALL_TARGETS; 863 u = ALL_LUNS; 864 break; 865 case 't': 866 if (t != target) 867 t = (target == v) ? v : NO_TARGET; 868 u = ALL_LUNS; 869 break; 870 case 'u': 871 if (u != lun) 872 u = (lun == v) ? v : NO_LUN; 873 break; 874 case 'q': 875 if (h == np->s.unit && 876 (t == ALL_TARGETS || t == target) && 877 (u == ALL_LUNS || u == lun)) 878 return v; 879 break; 880 case '-': 881 t = ALL_TARGETS; 882 u = ALL_LUNS; 883 break; 884 default: 885 break; 886 } 887 p = ep; 888 } 889 return DEF_DEPTH; 890 } 891 892 static int sym53c8xx_slave_alloc(struct scsi_device *sdev) 893 { 894 struct sym_hcb *np = sym_get_hcb(sdev->host); 895 struct sym_tcb *tp = &np->target[sdev->id]; 896 struct sym_lcb *lp; 897 898 if (sdev->id >= SYM_CONF_MAX_TARGET || sdev->lun >= SYM_CONF_MAX_LUN) 899 return -ENXIO; 900 901 tp->starget = sdev->sdev_target; 902 /* 903 * Fail the device init if the device is flagged NOSCAN at BOOT in 904 * the NVRAM. This may speed up boot and maintain coherency with 905 * BIOS device numbering. Clearing the flag allows the user to 906 * rescan skipped devices later. We also return an error for 907 * devices not flagged for SCAN LUNS in the NVRAM since some single 908 * lun devices behave badly when asked for a non zero LUN. 909 */ 910 911 if (tp->usrflags & SYM_SCAN_BOOT_DISABLED) { 912 tp->usrflags &= ~SYM_SCAN_BOOT_DISABLED; 913 starget_printk(KERN_INFO, tp->starget, 914 "Scan at boot disabled in NVRAM\n"); 915 return -ENXIO; 916 } 917 918 if (tp->usrflags & SYM_SCAN_LUNS_DISABLED) { 919 if (sdev->lun != 0) 920 return -ENXIO; 921 starget_printk(KERN_INFO, tp->starget, 922 "Multiple LUNs disabled in NVRAM\n"); 923 } 924 925 lp = sym_alloc_lcb(np, sdev->id, sdev->lun); 926 if (!lp) 927 return -ENOMEM; 928 929 spi_min_period(tp->starget) = tp->usr_period; 930 spi_max_width(tp->starget) = tp->usr_width; 931 932 return 0; 933 } 934 935 /* 936 * Linux entry point for device queue sizing. 937 */ 938 static int sym53c8xx_slave_configure(struct scsi_device *sdev) 939 { 940 struct sym_hcb *np = sym_get_hcb(sdev->host); 941 struct sym_tcb *tp = &np->target[sdev->id]; 942 struct sym_lcb *lp = sym_lp(tp, sdev->lun); 943 int reqtags, depth_to_use; 944 945 /* 946 * Get user flags. 947 */ 948 lp->curr_flags = lp->user_flags; 949 950 /* 951 * Select queue depth from driver setup. 952 * Donnot use more than configured by user. 953 * Use at least 2. 954 * Donnot use more than our maximum. 955 */ 956 reqtags = device_queue_depth(np, sdev->id, sdev->lun); 957 if (reqtags > tp->usrtags) 958 reqtags = tp->usrtags; 959 if (!sdev->tagged_supported) 960 reqtags = 0; 961 #if 1 /* Avoid to locally queue commands for no good reasons */ 962 if (reqtags > SYM_CONF_MAX_TAG) 963 reqtags = SYM_CONF_MAX_TAG; 964 depth_to_use = (reqtags ? reqtags : 2); 965 #else 966 depth_to_use = (reqtags ? SYM_CONF_MAX_TAG : 2); 967 #endif 968 scsi_adjust_queue_depth(sdev, 969 (sdev->tagged_supported ? 970 MSG_SIMPLE_TAG : 0), 971 depth_to_use); 972 lp->s.scdev_depth = depth_to_use; 973 sym_tune_dev_queuing(tp, sdev->lun, reqtags); 974 975 if (!spi_initial_dv(sdev->sdev_target)) 976 spi_dv_device(sdev); 977 978 return 0; 979 } 980 981 static void sym53c8xx_slave_destroy(struct scsi_device *sdev) 982 { 983 struct sym_hcb *np = sym_get_hcb(sdev->host); 984 struct sym_lcb *lp = sym_lp(&np->target[sdev->id], sdev->lun); 985 986 if (lp->itlq_tbl) 987 sym_mfree_dma(lp->itlq_tbl, SYM_CONF_MAX_TASK * 4, "ITLQ_TBL"); 988 kfree(lp->cb_tags); 989 sym_mfree_dma(lp, sizeof(*lp), "LCB"); 990 } 991 992 /* 993 * Linux entry point for info() function 994 */ 995 static const char *sym53c8xx_info (struct Scsi_Host *host) 996 { 997 return SYM_DRIVER_NAME; 998 } 999 1000 1001 #ifdef SYM_LINUX_PROC_INFO_SUPPORT 1002 /* 1003 * Proc file system stuff 1004 * 1005 * A read operation returns adapter information. 1006 * A write operation is a control command. 1007 * The string is parsed in the driver code and the command is passed 1008 * to the sym_usercmd() function. 1009 */ 1010 1011 #ifdef SYM_LINUX_USER_COMMAND_SUPPORT 1012 1013 struct sym_usrcmd { 1014 u_long target; 1015 u_long lun; 1016 u_long data; 1017 u_long cmd; 1018 }; 1019 1020 #define UC_SETSYNC 10 1021 #define UC_SETTAGS 11 1022 #define UC_SETDEBUG 12 1023 #define UC_SETWIDE 14 1024 #define UC_SETFLAG 15 1025 #define UC_SETVERBOSE 17 1026 #define UC_RESETDEV 18 1027 #define UC_CLEARDEV 19 1028 1029 static void sym_exec_user_command (struct sym_hcb *np, struct sym_usrcmd *uc) 1030 { 1031 struct sym_tcb *tp; 1032 int t, l; 1033 1034 switch (uc->cmd) { 1035 case 0: return; 1036 1037 #ifdef SYM_LINUX_DEBUG_CONTROL_SUPPORT 1038 case UC_SETDEBUG: 1039 sym_debug_flags = uc->data; 1040 break; 1041 #endif 1042 case UC_SETVERBOSE: 1043 np->verbose = uc->data; 1044 break; 1045 default: 1046 /* 1047 * We assume that other commands apply to targets. 1048 * This should always be the case and avoid the below 1049 * 4 lines to be repeated 6 times. 1050 */ 1051 for (t = 0; t < SYM_CONF_MAX_TARGET; t++) { 1052 if (!((uc->target >> t) & 1)) 1053 continue; 1054 tp = &np->target[t]; 1055 1056 switch (uc->cmd) { 1057 1058 case UC_SETSYNC: 1059 if (!uc->data || uc->data >= 255) { 1060 tp->tgoal.iu = tp->tgoal.dt = 1061 tp->tgoal.qas = 0; 1062 tp->tgoal.offset = 0; 1063 } else if (uc->data <= 9 && np->minsync_dt) { 1064 if (uc->data < np->minsync_dt) 1065 uc->data = np->minsync_dt; 1066 tp->tgoal.iu = tp->tgoal.dt = 1067 tp->tgoal.qas = 1; 1068 tp->tgoal.width = 1; 1069 tp->tgoal.period = uc->data; 1070 tp->tgoal.offset = np->maxoffs_dt; 1071 } else { 1072 if (uc->data < np->minsync) 1073 uc->data = np->minsync; 1074 tp->tgoal.iu = tp->tgoal.dt = 1075 tp->tgoal.qas = 0; 1076 tp->tgoal.period = uc->data; 1077 tp->tgoal.offset = np->maxoffs; 1078 } 1079 tp->tgoal.check_nego = 1; 1080 break; 1081 case UC_SETWIDE: 1082 tp->tgoal.width = uc->data ? 1 : 0; 1083 tp->tgoal.check_nego = 1; 1084 break; 1085 case UC_SETTAGS: 1086 for (l = 0; l < SYM_CONF_MAX_LUN; l++) 1087 sym_tune_dev_queuing(tp, l, uc->data); 1088 break; 1089 case UC_RESETDEV: 1090 tp->to_reset = 1; 1091 np->istat_sem = SEM; 1092 OUTB(np, nc_istat, SIGP|SEM); 1093 break; 1094 case UC_CLEARDEV: 1095 for (l = 0; l < SYM_CONF_MAX_LUN; l++) { 1096 struct sym_lcb *lp = sym_lp(tp, l); 1097 if (lp) lp->to_clear = 1; 1098 } 1099 np->istat_sem = SEM; 1100 OUTB(np, nc_istat, SIGP|SEM); 1101 break; 1102 case UC_SETFLAG: 1103 tp->usrflags = uc->data; 1104 break; 1105 } 1106 } 1107 break; 1108 } 1109 } 1110 1111 static int skip_spaces(char *ptr, int len) 1112 { 1113 int cnt, c; 1114 1115 for (cnt = len; cnt > 0 && (c = *ptr++) && isspace(c); cnt--); 1116 1117 return (len - cnt); 1118 } 1119 1120 static int get_int_arg(char *ptr, int len, u_long *pv) 1121 { 1122 char *end; 1123 1124 *pv = simple_strtoul(ptr, &end, 10); 1125 return (end - ptr); 1126 } 1127 1128 static int is_keyword(char *ptr, int len, char *verb) 1129 { 1130 int verb_len = strlen(verb); 1131 1132 if (len >= verb_len && !memcmp(verb, ptr, verb_len)) 1133 return verb_len; 1134 else 1135 return 0; 1136 } 1137 1138 #define SKIP_SPACES(ptr, len) \ 1139 if ((arg_len = skip_spaces(ptr, len)) < 1) \ 1140 return -EINVAL; \ 1141 ptr += arg_len; len -= arg_len; 1142 1143 #define GET_INT_ARG(ptr, len, v) \ 1144 if (!(arg_len = get_int_arg(ptr, len, &(v)))) \ 1145 return -EINVAL; \ 1146 ptr += arg_len; len -= arg_len; 1147 1148 1149 /* 1150 * Parse a control command 1151 */ 1152 1153 static int sym_user_command(struct sym_hcb *np, char *buffer, int length) 1154 { 1155 char *ptr = buffer; 1156 int len = length; 1157 struct sym_usrcmd cmd, *uc = &cmd; 1158 int arg_len; 1159 u_long target; 1160 1161 memset(uc, 0, sizeof(*uc)); 1162 1163 if (len > 0 && ptr[len-1] == '\n') 1164 --len; 1165 1166 if ((arg_len = is_keyword(ptr, len, "setsync")) != 0) 1167 uc->cmd = UC_SETSYNC; 1168 else if ((arg_len = is_keyword(ptr, len, "settags")) != 0) 1169 uc->cmd = UC_SETTAGS; 1170 else if ((arg_len = is_keyword(ptr, len, "setverbose")) != 0) 1171 uc->cmd = UC_SETVERBOSE; 1172 else if ((arg_len = is_keyword(ptr, len, "setwide")) != 0) 1173 uc->cmd = UC_SETWIDE; 1174 #ifdef SYM_LINUX_DEBUG_CONTROL_SUPPORT 1175 else if ((arg_len = is_keyword(ptr, len, "setdebug")) != 0) 1176 uc->cmd = UC_SETDEBUG; 1177 #endif 1178 else if ((arg_len = is_keyword(ptr, len, "setflag")) != 0) 1179 uc->cmd = UC_SETFLAG; 1180 else if ((arg_len = is_keyword(ptr, len, "resetdev")) != 0) 1181 uc->cmd = UC_RESETDEV; 1182 else if ((arg_len = is_keyword(ptr, len, "cleardev")) != 0) 1183 uc->cmd = UC_CLEARDEV; 1184 else 1185 arg_len = 0; 1186 1187 #ifdef DEBUG_PROC_INFO 1188 printk("sym_user_command: arg_len=%d, cmd=%ld\n", arg_len, uc->cmd); 1189 #endif 1190 1191 if (!arg_len) 1192 return -EINVAL; 1193 ptr += arg_len; len -= arg_len; 1194 1195 switch(uc->cmd) { 1196 case UC_SETSYNC: 1197 case UC_SETTAGS: 1198 case UC_SETWIDE: 1199 case UC_SETFLAG: 1200 case UC_RESETDEV: 1201 case UC_CLEARDEV: 1202 SKIP_SPACES(ptr, len); 1203 if ((arg_len = is_keyword(ptr, len, "all")) != 0) { 1204 ptr += arg_len; len -= arg_len; 1205 uc->target = ~0; 1206 } else { 1207 GET_INT_ARG(ptr, len, target); 1208 uc->target = (1<<target); 1209 #ifdef DEBUG_PROC_INFO 1210 printk("sym_user_command: target=%ld\n", target); 1211 #endif 1212 } 1213 break; 1214 } 1215 1216 switch(uc->cmd) { 1217 case UC_SETVERBOSE: 1218 case UC_SETSYNC: 1219 case UC_SETTAGS: 1220 case UC_SETWIDE: 1221 SKIP_SPACES(ptr, len); 1222 GET_INT_ARG(ptr, len, uc->data); 1223 #ifdef DEBUG_PROC_INFO 1224 printk("sym_user_command: data=%ld\n", uc->data); 1225 #endif 1226 break; 1227 #ifdef SYM_LINUX_DEBUG_CONTROL_SUPPORT 1228 case UC_SETDEBUG: 1229 while (len > 0) { 1230 SKIP_SPACES(ptr, len); 1231 if ((arg_len = is_keyword(ptr, len, "alloc"))) 1232 uc->data |= DEBUG_ALLOC; 1233 else if ((arg_len = is_keyword(ptr, len, "phase"))) 1234 uc->data |= DEBUG_PHASE; 1235 else if ((arg_len = is_keyword(ptr, len, "queue"))) 1236 uc->data |= DEBUG_QUEUE; 1237 else if ((arg_len = is_keyword(ptr, len, "result"))) 1238 uc->data |= DEBUG_RESULT; 1239 else if ((arg_len = is_keyword(ptr, len, "scatter"))) 1240 uc->data |= DEBUG_SCATTER; 1241 else if ((arg_len = is_keyword(ptr, len, "script"))) 1242 uc->data |= DEBUG_SCRIPT; 1243 else if ((arg_len = is_keyword(ptr, len, "tiny"))) 1244 uc->data |= DEBUG_TINY; 1245 else if ((arg_len = is_keyword(ptr, len, "timing"))) 1246 uc->data |= DEBUG_TIMING; 1247 else if ((arg_len = is_keyword(ptr, len, "nego"))) 1248 uc->data |= DEBUG_NEGO; 1249 else if ((arg_len = is_keyword(ptr, len, "tags"))) 1250 uc->data |= DEBUG_TAGS; 1251 else if ((arg_len = is_keyword(ptr, len, "pointer"))) 1252 uc->data |= DEBUG_POINTER; 1253 else 1254 return -EINVAL; 1255 ptr += arg_len; len -= arg_len; 1256 } 1257 #ifdef DEBUG_PROC_INFO 1258 printk("sym_user_command: data=%ld\n", uc->data); 1259 #endif 1260 break; 1261 #endif /* SYM_LINUX_DEBUG_CONTROL_SUPPORT */ 1262 case UC_SETFLAG: 1263 while (len > 0) { 1264 SKIP_SPACES(ptr, len); 1265 if ((arg_len = is_keyword(ptr, len, "no_disc"))) 1266 uc->data &= ~SYM_DISC_ENABLED; 1267 else 1268 return -EINVAL; 1269 ptr += arg_len; len -= arg_len; 1270 } 1271 break; 1272 default: 1273 break; 1274 } 1275 1276 if (len) 1277 return -EINVAL; 1278 else { 1279 unsigned long flags; 1280 1281 spin_lock_irqsave(np->s.host->host_lock, flags); 1282 sym_exec_user_command (np, uc); 1283 spin_unlock_irqrestore(np->s.host->host_lock, flags); 1284 } 1285 return length; 1286 } 1287 1288 #endif /* SYM_LINUX_USER_COMMAND_SUPPORT */ 1289 1290 1291 #ifdef SYM_LINUX_USER_INFO_SUPPORT 1292 /* 1293 * Informations through the proc file system. 1294 */ 1295 struct info_str { 1296 char *buffer; 1297 int length; 1298 int offset; 1299 int pos; 1300 }; 1301 1302 static void copy_mem_info(struct info_str *info, char *data, int len) 1303 { 1304 if (info->pos + len > info->length) 1305 len = info->length - info->pos; 1306 1307 if (info->pos + len < info->offset) { 1308 info->pos += len; 1309 return; 1310 } 1311 if (info->pos < info->offset) { 1312 data += (info->offset - info->pos); 1313 len -= (info->offset - info->pos); 1314 } 1315 1316 if (len > 0) { 1317 memcpy(info->buffer + info->pos, data, len); 1318 info->pos += len; 1319 } 1320 } 1321 1322 static int copy_info(struct info_str *info, char *fmt, ...) 1323 { 1324 va_list args; 1325 char buf[81]; 1326 int len; 1327 1328 va_start(args, fmt); 1329 len = vsprintf(buf, fmt, args); 1330 va_end(args); 1331 1332 copy_mem_info(info, buf, len); 1333 return len; 1334 } 1335 1336 /* 1337 * Copy formatted information into the input buffer. 1338 */ 1339 static int sym_host_info(struct sym_hcb *np, char *ptr, off_t offset, int len) 1340 { 1341 struct info_str info; 1342 1343 info.buffer = ptr; 1344 info.length = len; 1345 info.offset = offset; 1346 info.pos = 0; 1347 1348 copy_info(&info, "Chip " NAME53C "%s, device id 0x%x, " 1349 "revision id 0x%x\n", 1350 np->s.chip_name, np->device_id, np->revision_id); 1351 copy_info(&info, "At PCI address %s, IRQ " IRQ_FMT "\n", 1352 pci_name(np->s.device), IRQ_PRM(np->s.irq)); 1353 copy_info(&info, "Min. period factor %d, %s SCSI BUS%s\n", 1354 (int) (np->minsync_dt ? np->minsync_dt : np->minsync), 1355 np->maxwide ? "Wide" : "Narrow", 1356 np->minsync_dt ? ", DT capable" : ""); 1357 1358 copy_info(&info, "Max. started commands %d, " 1359 "max. commands per LUN %d\n", 1360 SYM_CONF_MAX_START, SYM_CONF_MAX_TAG); 1361 1362 return info.pos > info.offset? info.pos - info.offset : 0; 1363 } 1364 #endif /* SYM_LINUX_USER_INFO_SUPPORT */ 1365 1366 /* 1367 * Entry point of the scsi proc fs of the driver. 1368 * - func = 0 means read (returns adapter infos) 1369 * - func = 1 means write (not yet merget from sym53c8xx) 1370 */ 1371 static int sym53c8xx_proc_info(struct Scsi_Host *host, char *buffer, 1372 char **start, off_t offset, int length, int func) 1373 { 1374 struct sym_hcb *np = sym_get_hcb(host); 1375 int retv; 1376 1377 if (func) { 1378 #ifdef SYM_LINUX_USER_COMMAND_SUPPORT 1379 retv = sym_user_command(np, buffer, length); 1380 #else 1381 retv = -EINVAL; 1382 #endif 1383 } else { 1384 if (start) 1385 *start = buffer; 1386 #ifdef SYM_LINUX_USER_INFO_SUPPORT 1387 retv = sym_host_info(np, buffer, offset, length); 1388 #else 1389 retv = -EINVAL; 1390 #endif 1391 } 1392 1393 return retv; 1394 } 1395 #endif /* SYM_LINUX_PROC_INFO_SUPPORT */ 1396 1397 /* 1398 * Free controller resources. 1399 */ 1400 static void sym_free_resources(struct sym_hcb *np, struct pci_dev *pdev) 1401 { 1402 /* 1403 * Free O/S specific resources. 1404 */ 1405 if (np->s.irq) 1406 free_irq(np->s.irq, np); 1407 if (np->s.ioaddr) 1408 pci_iounmap(pdev, np->s.ioaddr); 1409 if (np->s.ramaddr) 1410 pci_iounmap(pdev, np->s.ramaddr); 1411 /* 1412 * Free O/S independent resources. 1413 */ 1414 sym_hcb_free(np); 1415 1416 sym_mfree_dma(np, sizeof(*np), "HCB"); 1417 } 1418 1419 /* 1420 * Ask/tell the system about DMA addressing. 1421 */ 1422 static int sym_setup_bus_dma_mask(struct sym_hcb *np) 1423 { 1424 #if SYM_CONF_DMA_ADDRESSING_MODE > 0 1425 #if SYM_CONF_DMA_ADDRESSING_MODE == 1 1426 #define DMA_DAC_MASK DMA_40BIT_MASK 1427 #elif SYM_CONF_DMA_ADDRESSING_MODE == 2 1428 #define DMA_DAC_MASK DMA_64BIT_MASK 1429 #endif 1430 if ((np->features & FE_DAC) && 1431 !pci_set_dma_mask(np->s.device, DMA_DAC_MASK)) { 1432 np->use_dac = 1; 1433 return 0; 1434 } 1435 #endif 1436 1437 if (!pci_set_dma_mask(np->s.device, DMA_32BIT_MASK)) 1438 return 0; 1439 1440 printf_warning("%s: No suitable DMA available\n", sym_name(np)); 1441 return -1; 1442 } 1443 1444 /* 1445 * Host attach and initialisations. 1446 * 1447 * Allocate host data and ncb structure. 1448 * Remap MMIO region. 1449 * Do chip initialization. 1450 * If all is OK, install interrupt handling and 1451 * start the timer daemon. 1452 */ 1453 static struct Scsi_Host * __devinit sym_attach(struct scsi_host_template *tpnt, 1454 int unit, struct sym_device *dev) 1455 { 1456 struct host_data *host_data; 1457 struct sym_hcb *np = NULL; 1458 struct Scsi_Host *instance = NULL; 1459 struct pci_dev *pdev = dev->pdev; 1460 unsigned long flags; 1461 struct sym_fw *fw; 1462 1463 printk(KERN_INFO 1464 "sym%d: <%s> rev 0x%x at pci %s irq " IRQ_FMT "\n", 1465 unit, dev->chip.name, dev->chip.revision_id, 1466 pci_name(pdev), IRQ_PRM(pdev->irq)); 1467 1468 /* 1469 * Get the firmware for this chip. 1470 */ 1471 fw = sym_find_firmware(&dev->chip); 1472 if (!fw) 1473 goto attach_failed; 1474 1475 /* 1476 * Allocate host_data structure 1477 */ 1478 instance = scsi_host_alloc(tpnt, sizeof(*host_data)); 1479 if (!instance) 1480 goto attach_failed; 1481 host_data = (struct host_data *) instance->hostdata; 1482 1483 /* 1484 * Allocate immediately the host control block, 1485 * since we are only expecting to succeed. :) 1486 * We keep track in the HCB of all the resources that 1487 * are to be released on error. 1488 */ 1489 np = __sym_calloc_dma(&pdev->dev, sizeof(*np), "HCB"); 1490 if (!np) 1491 goto attach_failed; 1492 np->s.device = pdev; 1493 np->bus_dmat = &pdev->dev; /* Result in 1 DMA pool per HBA */ 1494 host_data->ncb = np; 1495 np->s.host = instance; 1496 1497 pci_set_drvdata(pdev, np); 1498 1499 /* 1500 * Copy some useful infos to the HCB. 1501 */ 1502 np->hcb_ba = vtobus(np); 1503 np->verbose = sym_driver_setup.verbose; 1504 np->s.device = pdev; 1505 np->s.unit = unit; 1506 np->device_id = dev->chip.device_id; 1507 np->revision_id = dev->chip.revision_id; 1508 np->features = dev->chip.features; 1509 np->clock_divn = dev->chip.nr_divisor; 1510 np->maxoffs = dev->chip.offset_max; 1511 np->maxburst = dev->chip.burst_max; 1512 np->myaddr = dev->host_id; 1513 1514 /* 1515 * Edit its name. 1516 */ 1517 strlcpy(np->s.chip_name, dev->chip.name, sizeof(np->s.chip_name)); 1518 sprintf(np->s.inst_name, "sym%d", np->s.unit); 1519 1520 if (sym_setup_bus_dma_mask(np)) 1521 goto attach_failed; 1522 1523 /* 1524 * Try to map the controller chip to 1525 * virtual and physical memory. 1526 */ 1527 np->mmio_ba = (u32)dev->mmio_base; 1528 np->s.ioaddr = dev->s.ioaddr; 1529 np->s.ramaddr = dev->s.ramaddr; 1530 np->s.io_ws = (np->features & FE_IO256) ? 256 : 128; 1531 1532 /* 1533 * Map on-chip RAM if present and supported. 1534 */ 1535 if (!(np->features & FE_RAM)) 1536 dev->ram_base = 0; 1537 if (dev->ram_base) { 1538 np->ram_ba = (u32)dev->ram_base; 1539 np->ram_ws = (np->features & FE_RAM8K) ? 8192 : 4096; 1540 } 1541 1542 if (sym_hcb_attach(instance, fw, dev->nvram)) 1543 goto attach_failed; 1544 1545 /* 1546 * Install the interrupt handler. 1547 * If we synchonize the C code with SCRIPTS on interrupt, 1548 * we do not want to share the INTR line at all. 1549 */ 1550 if (request_irq(pdev->irq, sym53c8xx_intr, IRQF_SHARED, NAME53C8XX, np)) { 1551 printf_err("%s: request irq %d failure\n", 1552 sym_name(np), pdev->irq); 1553 goto attach_failed; 1554 } 1555 np->s.irq = pdev->irq; 1556 1557 /* 1558 * After SCSI devices have been opened, we cannot 1559 * reset the bus safely, so we do it here. 1560 */ 1561 spin_lock_irqsave(instance->host_lock, flags); 1562 if (sym_reset_scsi_bus(np, 0)) 1563 goto reset_failed; 1564 1565 /* 1566 * Start the SCRIPTS. 1567 */ 1568 sym_start_up (np, 1); 1569 1570 /* 1571 * Start the timer daemon 1572 */ 1573 init_timer(&np->s.timer); 1574 np->s.timer.data = (unsigned long) np; 1575 np->s.timer.function = sym53c8xx_timer; 1576 np->s.lasttime=0; 1577 sym_timer (np); 1578 1579 /* 1580 * Fill Linux host instance structure 1581 * and return success. 1582 */ 1583 instance->max_channel = 0; 1584 instance->this_id = np->myaddr; 1585 instance->max_id = np->maxwide ? 16 : 8; 1586 instance->max_lun = SYM_CONF_MAX_LUN; 1587 instance->unique_id = pci_resource_start(pdev, 0); 1588 instance->cmd_per_lun = SYM_CONF_MAX_TAG; 1589 instance->can_queue = (SYM_CONF_MAX_START-2); 1590 instance->sg_tablesize = SYM_CONF_MAX_SG; 1591 instance->max_cmd_len = 16; 1592 BUG_ON(sym2_transport_template == NULL); 1593 instance->transportt = sym2_transport_template; 1594 1595 spin_unlock_irqrestore(instance->host_lock, flags); 1596 1597 return instance; 1598 1599 reset_failed: 1600 printf_err("%s: FATAL ERROR: CHECK SCSI BUS - CABLES, " 1601 "TERMINATION, DEVICE POWER etc.!\n", sym_name(np)); 1602 spin_unlock_irqrestore(instance->host_lock, flags); 1603 attach_failed: 1604 if (!instance) 1605 return NULL; 1606 printf_info("%s: giving up ...\n", sym_name(np)); 1607 if (np) 1608 sym_free_resources(np, pdev); 1609 scsi_host_put(instance); 1610 1611 return NULL; 1612 } 1613 1614 1615 /* 1616 * Detect and try to read SYMBIOS and TEKRAM NVRAM. 1617 */ 1618 #if SYM_CONF_NVRAM_SUPPORT 1619 static void __devinit sym_get_nvram(struct sym_device *devp, struct sym_nvram *nvp) 1620 { 1621 devp->nvram = nvp; 1622 devp->device_id = devp->chip.device_id; 1623 nvp->type = 0; 1624 1625 sym_read_nvram(devp, nvp); 1626 } 1627 #else 1628 static inline void sym_get_nvram(struct sym_device *devp, struct sym_nvram *nvp) 1629 { 1630 } 1631 #endif /* SYM_CONF_NVRAM_SUPPORT */ 1632 1633 static int __devinit sym_check_supported(struct sym_device *device) 1634 { 1635 struct sym_chip *chip; 1636 struct pci_dev *pdev = device->pdev; 1637 u_char revision; 1638 unsigned long io_port = pci_resource_start(pdev, 0); 1639 int i; 1640 1641 /* 1642 * If user excluded this chip, do not initialize it. 1643 * I hate this code so much. Must kill it. 1644 */ 1645 if (io_port) { 1646 for (i = 0 ; i < 8 ; i++) { 1647 if (sym_driver_setup.excludes[i] == io_port) 1648 return -ENODEV; 1649 } 1650 } 1651 1652 /* 1653 * Check if the chip is supported. Then copy the chip description 1654 * to our device structure so we can make it match the actual device 1655 * and options. 1656 */ 1657 pci_read_config_byte(pdev, PCI_CLASS_REVISION, &revision); 1658 chip = sym_lookup_chip_table(pdev->device, revision); 1659 if (!chip) { 1660 dev_info(&pdev->dev, "device not supported\n"); 1661 return -ENODEV; 1662 } 1663 memcpy(&device->chip, chip, sizeof(device->chip)); 1664 device->chip.revision_id = revision; 1665 1666 return 0; 1667 } 1668 1669 /* 1670 * Ignore Symbios chips controlled by various RAID controllers. 1671 * These controllers set value 0x52414944 at RAM end - 16. 1672 */ 1673 static int __devinit sym_check_raid(struct sym_device *device) 1674 { 1675 unsigned int ram_size, ram_val; 1676 1677 if (!device->s.ramaddr) 1678 return 0; 1679 1680 if (device->chip.features & FE_RAM8K) 1681 ram_size = 8192; 1682 else 1683 ram_size = 4096; 1684 1685 ram_val = readl(device->s.ramaddr + ram_size - 16); 1686 if (ram_val != 0x52414944) 1687 return 0; 1688 1689 dev_info(&device->pdev->dev, 1690 "not initializing, driven by RAID controller.\n"); 1691 return -ENODEV; 1692 } 1693 1694 static int __devinit sym_set_workarounds(struct sym_device *device) 1695 { 1696 struct sym_chip *chip = &device->chip; 1697 struct pci_dev *pdev = device->pdev; 1698 u_short status_reg; 1699 1700 /* 1701 * (ITEM 12 of a DEL about the 896 I haven't yet). 1702 * We must ensure the chip will use WRITE AND INVALIDATE. 1703 * The revision number limit is for now arbitrary. 1704 */ 1705 if (pdev->device == PCI_DEVICE_ID_NCR_53C896 && chip->revision_id < 0x4) { 1706 chip->features |= (FE_WRIE | FE_CLSE); 1707 } 1708 1709 /* If the chip can do Memory Write Invalidate, enable it */ 1710 if (chip->features & FE_WRIE) { 1711 if (pci_set_mwi(pdev)) 1712 return -ENODEV; 1713 } 1714 1715 /* 1716 * Work around for errant bit in 895A. The 66Mhz 1717 * capable bit is set erroneously. Clear this bit. 1718 * (Item 1 DEL 533) 1719 * 1720 * Make sure Config space and Features agree. 1721 * 1722 * Recall: writes are not normal to status register - 1723 * write a 1 to clear and a 0 to leave unchanged. 1724 * Can only reset bits. 1725 */ 1726 pci_read_config_word(pdev, PCI_STATUS, &status_reg); 1727 if (chip->features & FE_66MHZ) { 1728 if (!(status_reg & PCI_STATUS_66MHZ)) 1729 chip->features &= ~FE_66MHZ; 1730 } else { 1731 if (status_reg & PCI_STATUS_66MHZ) { 1732 status_reg = PCI_STATUS_66MHZ; 1733 pci_write_config_word(pdev, PCI_STATUS, status_reg); 1734 pci_read_config_word(pdev, PCI_STATUS, &status_reg); 1735 } 1736 } 1737 1738 return 0; 1739 } 1740 1741 /* 1742 * Read and check the PCI configuration for any detected NCR 1743 * boards and save data for attaching after all boards have 1744 * been detected. 1745 */ 1746 static void __devinit 1747 sym_init_device(struct pci_dev *pdev, struct sym_device *device) 1748 { 1749 int i = 2; 1750 struct pci_bus_region bus_addr; 1751 1752 device->host_id = SYM_SETUP_HOST_ID; 1753 device->pdev = pdev; 1754 1755 pcibios_resource_to_bus(pdev, &bus_addr, &pdev->resource[1]); 1756 device->mmio_base = bus_addr.start; 1757 1758 /* 1759 * If the BAR is 64-bit, resource 2 will be occupied by the 1760 * upper 32 bits 1761 */ 1762 if (!pdev->resource[i].flags) 1763 i++; 1764 pcibios_resource_to_bus(pdev, &bus_addr, &pdev->resource[i]); 1765 device->ram_base = bus_addr.start; 1766 1767 #ifdef CONFIG_SCSI_SYM53C8XX_MMIO 1768 if (device->mmio_base) 1769 device->s.ioaddr = pci_iomap(pdev, 1, 1770 pci_resource_len(pdev, 1)); 1771 #endif 1772 if (!device->s.ioaddr) 1773 device->s.ioaddr = pci_iomap(pdev, 0, 1774 pci_resource_len(pdev, 0)); 1775 if (device->ram_base) 1776 device->s.ramaddr = pci_iomap(pdev, i, 1777 pci_resource_len(pdev, i)); 1778 } 1779 1780 /* 1781 * The NCR PQS and PDS cards are constructed as a DEC bridge 1782 * behind which sits a proprietary NCR memory controller and 1783 * either four or two 53c875s as separate devices. We can tell 1784 * if an 875 is part of a PQS/PDS or not since if it is, it will 1785 * be on the same bus as the memory controller. In its usual 1786 * mode of operation, the 875s are slaved to the memory 1787 * controller for all transfers. To operate with the Linux 1788 * driver, the memory controller is disabled and the 875s 1789 * freed to function independently. The only wrinkle is that 1790 * the preset SCSI ID (which may be zero) must be read in from 1791 * a special configuration space register of the 875. 1792 */ 1793 static void sym_config_pqs(struct pci_dev *pdev, struct sym_device *sym_dev) 1794 { 1795 int slot; 1796 u8 tmp; 1797 1798 for (slot = 0; slot < 256; slot++) { 1799 struct pci_dev *memc = pci_get_slot(pdev->bus, slot); 1800 1801 if (!memc || memc->vendor != 0x101a || memc->device == 0x0009) { 1802 pci_dev_put(memc); 1803 continue; 1804 } 1805 1806 /* bit 1: allow individual 875 configuration */ 1807 pci_read_config_byte(memc, 0x44, &tmp); 1808 if ((tmp & 0x2) == 0) { 1809 tmp |= 0x2; 1810 pci_write_config_byte(memc, 0x44, tmp); 1811 } 1812 1813 /* bit 2: drive individual 875 interrupts to the bus */ 1814 pci_read_config_byte(memc, 0x45, &tmp); 1815 if ((tmp & 0x4) == 0) { 1816 tmp |= 0x4; 1817 pci_write_config_byte(memc, 0x45, tmp); 1818 } 1819 1820 pci_dev_put(memc); 1821 break; 1822 } 1823 1824 pci_read_config_byte(pdev, 0x84, &tmp); 1825 sym_dev->host_id = tmp; 1826 } 1827 1828 /* 1829 * Called before unloading the module. 1830 * Detach the host. 1831 * We have to free resources and halt the NCR chip. 1832 */ 1833 static int sym_detach(struct sym_hcb *np, struct pci_dev *pdev) 1834 { 1835 printk("%s: detaching ...\n", sym_name(np)); 1836 1837 del_timer_sync(&np->s.timer); 1838 1839 /* 1840 * Reset NCR chip. 1841 * We should use sym_soft_reset(), but we don't want to do 1842 * so, since we may not be safe if interrupts occur. 1843 */ 1844 printk("%s: resetting chip\n", sym_name(np)); 1845 OUTB(np, nc_istat, SRST); 1846 INB(np, nc_mbox1); 1847 udelay(10); 1848 OUTB(np, nc_istat, 0); 1849 1850 sym_free_resources(np, pdev); 1851 1852 return 1; 1853 } 1854 1855 /* 1856 * Driver host template. 1857 */ 1858 static struct scsi_host_template sym2_template = { 1859 .module = THIS_MODULE, 1860 .name = "sym53c8xx", 1861 .info = sym53c8xx_info, 1862 .queuecommand = sym53c8xx_queue_command, 1863 .slave_alloc = sym53c8xx_slave_alloc, 1864 .slave_configure = sym53c8xx_slave_configure, 1865 .slave_destroy = sym53c8xx_slave_destroy, 1866 .eh_abort_handler = sym53c8xx_eh_abort_handler, 1867 .eh_device_reset_handler = sym53c8xx_eh_device_reset_handler, 1868 .eh_bus_reset_handler = sym53c8xx_eh_bus_reset_handler, 1869 .eh_host_reset_handler = sym53c8xx_eh_host_reset_handler, 1870 .this_id = 7, 1871 .use_clustering = ENABLE_CLUSTERING, 1872 .max_sectors = 0xFFFF, 1873 #ifdef SYM_LINUX_PROC_INFO_SUPPORT 1874 .proc_info = sym53c8xx_proc_info, 1875 .proc_name = NAME53C8XX, 1876 #endif 1877 }; 1878 1879 static int attach_count; 1880 1881 static int __devinit sym2_probe(struct pci_dev *pdev, 1882 const struct pci_device_id *ent) 1883 { 1884 struct sym_device sym_dev; 1885 struct sym_nvram nvram; 1886 struct Scsi_Host *instance; 1887 1888 memset(&sym_dev, 0, sizeof(sym_dev)); 1889 memset(&nvram, 0, sizeof(nvram)); 1890 1891 if (pci_enable_device(pdev)) 1892 goto leave; 1893 1894 pci_set_master(pdev); 1895 1896 if (pci_request_regions(pdev, NAME53C8XX)) 1897 goto disable; 1898 1899 sym_init_device(pdev, &sym_dev); 1900 if (sym_check_supported(&sym_dev)) 1901 goto free; 1902 1903 if (sym_check_raid(&sym_dev)) 1904 goto leave; /* Don't disable the device */ 1905 1906 if (sym_set_workarounds(&sym_dev)) 1907 goto free; 1908 1909 sym_config_pqs(pdev, &sym_dev); 1910 1911 sym_get_nvram(&sym_dev, &nvram); 1912 1913 instance = sym_attach(&sym2_template, attach_count, &sym_dev); 1914 if (!instance) 1915 goto free; 1916 1917 if (scsi_add_host(instance, &pdev->dev)) 1918 goto detach; 1919 scsi_scan_host(instance); 1920 1921 attach_count++; 1922 1923 return 0; 1924 1925 detach: 1926 sym_detach(pci_get_drvdata(pdev), pdev); 1927 free: 1928 pci_release_regions(pdev); 1929 disable: 1930 pci_disable_device(pdev); 1931 leave: 1932 return -ENODEV; 1933 } 1934 1935 static void __devexit sym2_remove(struct pci_dev *pdev) 1936 { 1937 struct sym_hcb *np = pci_get_drvdata(pdev); 1938 struct Scsi_Host *host = np->s.host; 1939 1940 scsi_remove_host(host); 1941 scsi_host_put(host); 1942 1943 sym_detach(np, pdev); 1944 1945 pci_release_regions(pdev); 1946 pci_disable_device(pdev); 1947 1948 attach_count--; 1949 } 1950 1951 static void sym2_get_signalling(struct Scsi_Host *shost) 1952 { 1953 struct sym_hcb *np = sym_get_hcb(shost); 1954 enum spi_signal_type type; 1955 1956 switch (np->scsi_mode) { 1957 case SMODE_SE: 1958 type = SPI_SIGNAL_SE; 1959 break; 1960 case SMODE_LVD: 1961 type = SPI_SIGNAL_LVD; 1962 break; 1963 case SMODE_HVD: 1964 type = SPI_SIGNAL_HVD; 1965 break; 1966 default: 1967 type = SPI_SIGNAL_UNKNOWN; 1968 break; 1969 } 1970 spi_signalling(shost) = type; 1971 } 1972 1973 static void sym2_set_offset(struct scsi_target *starget, int offset) 1974 { 1975 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); 1976 struct sym_hcb *np = sym_get_hcb(shost); 1977 struct sym_tcb *tp = &np->target[starget->id]; 1978 1979 tp->tgoal.offset = offset; 1980 tp->tgoal.check_nego = 1; 1981 } 1982 1983 static void sym2_set_period(struct scsi_target *starget, int period) 1984 { 1985 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); 1986 struct sym_hcb *np = sym_get_hcb(shost); 1987 struct sym_tcb *tp = &np->target[starget->id]; 1988 1989 /* have to have DT for these transfers, but DT will also 1990 * set width, so check that this is allowed */ 1991 if (period <= np->minsync && spi_width(starget)) 1992 tp->tgoal.dt = 1; 1993 1994 tp->tgoal.period = period; 1995 tp->tgoal.check_nego = 1; 1996 } 1997 1998 static void sym2_set_width(struct scsi_target *starget, int width) 1999 { 2000 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); 2001 struct sym_hcb *np = sym_get_hcb(shost); 2002 struct sym_tcb *tp = &np->target[starget->id]; 2003 2004 /* It is illegal to have DT set on narrow transfers. If DT is 2005 * clear, we must also clear IU and QAS. */ 2006 if (width == 0) 2007 tp->tgoal.iu = tp->tgoal.dt = tp->tgoal.qas = 0; 2008 2009 tp->tgoal.width = width; 2010 tp->tgoal.check_nego = 1; 2011 } 2012 2013 static void sym2_set_dt(struct scsi_target *starget, int dt) 2014 { 2015 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); 2016 struct sym_hcb *np = sym_get_hcb(shost); 2017 struct sym_tcb *tp = &np->target[starget->id]; 2018 2019 /* We must clear QAS and IU if DT is clear */ 2020 if (dt) 2021 tp->tgoal.dt = 1; 2022 else 2023 tp->tgoal.iu = tp->tgoal.dt = tp->tgoal.qas = 0; 2024 tp->tgoal.check_nego = 1; 2025 } 2026 2027 #if 0 2028 static void sym2_set_iu(struct scsi_target *starget, int iu) 2029 { 2030 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); 2031 struct sym_hcb *np = sym_get_hcb(shost); 2032 struct sym_tcb *tp = &np->target[starget->id]; 2033 2034 if (iu) 2035 tp->tgoal.iu = tp->tgoal.dt = 1; 2036 else 2037 tp->tgoal.iu = 0; 2038 tp->tgoal.check_nego = 1; 2039 } 2040 2041 static void sym2_set_qas(struct scsi_target *starget, int qas) 2042 { 2043 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); 2044 struct sym_hcb *np = sym_get_hcb(shost); 2045 struct sym_tcb *tp = &np->target[starget->id]; 2046 2047 if (qas) 2048 tp->tgoal.dt = tp->tgoal.qas = 1; 2049 else 2050 tp->tgoal.qas = 0; 2051 tp->tgoal.check_nego = 1; 2052 } 2053 #endif 2054 2055 static struct spi_function_template sym2_transport_functions = { 2056 .set_offset = sym2_set_offset, 2057 .show_offset = 1, 2058 .set_period = sym2_set_period, 2059 .show_period = 1, 2060 .set_width = sym2_set_width, 2061 .show_width = 1, 2062 .set_dt = sym2_set_dt, 2063 .show_dt = 1, 2064 #if 0 2065 .set_iu = sym2_set_iu, 2066 .show_iu = 1, 2067 .set_qas = sym2_set_qas, 2068 .show_qas = 1, 2069 #endif 2070 .get_signalling = sym2_get_signalling, 2071 }; 2072 2073 static struct pci_device_id sym2_id_table[] __devinitdata = { 2074 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C810, 2075 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 2076 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C820, 2077 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, /* new */ 2078 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C825, 2079 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 2080 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C815, 2081 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 2082 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_53C810AP, 2083 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, /* new */ 2084 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C860, 2085 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 2086 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_53C1510, 2087 PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_SCSI<<8, 0xffff00, 0UL }, 2088 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C896, 2089 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 2090 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C895, 2091 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 2092 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C885, 2093 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 2094 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C875, 2095 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 2096 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C1510, 2097 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, /* new */ 2098 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_53C895A, 2099 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 2100 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_53C875A, 2101 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 2102 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_53C1010_33, 2103 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 2104 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_53C1010_66, 2105 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 2106 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C875J, 2107 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 2108 { 0, } 2109 }; 2110 2111 MODULE_DEVICE_TABLE(pci, sym2_id_table); 2112 2113 static struct pci_driver sym2_driver = { 2114 .name = NAME53C8XX, 2115 .id_table = sym2_id_table, 2116 .probe = sym2_probe, 2117 .remove = __devexit_p(sym2_remove), 2118 }; 2119 2120 static int __init sym2_init(void) 2121 { 2122 int error; 2123 2124 sym2_setup_params(); 2125 sym2_transport_template = spi_attach_transport(&sym2_transport_functions); 2126 if (!sym2_transport_template) 2127 return -ENODEV; 2128 2129 error = pci_register_driver(&sym2_driver); 2130 if (error) 2131 spi_release_transport(sym2_transport_template); 2132 return error; 2133 } 2134 2135 static void __exit sym2_exit(void) 2136 { 2137 pci_unregister_driver(&sym2_driver); 2138 spi_release_transport(sym2_transport_template); 2139 } 2140 2141 module_init(sym2_init); 2142 module_exit(sym2_exit); 2143