1 /* 2 * Device driver for the SYMBIOS/LSILOGIC 53C8XX and 53C1010 family 3 * of PCI-SCSI IO processors. 4 * 5 * Copyright (C) 1999-2001 Gerard Roudier <groudier@free.fr> 6 * Copyright (c) 2003-2005 Matthew Wilcox <matthew@wil.cx> 7 * 8 * This driver is derived from the Linux sym53c8xx driver. 9 * Copyright (C) 1998-2000 Gerard Roudier 10 * 11 * The sym53c8xx driver is derived from the ncr53c8xx driver that had been 12 * a port of the FreeBSD ncr driver to Linux-1.2.13. 13 * 14 * The original ncr driver has been written for 386bsd and FreeBSD by 15 * Wolfgang Stanglmeier <wolf@cologne.de> 16 * Stefan Esser <se@mi.Uni-Koeln.de> 17 * Copyright (C) 1994 Wolfgang Stanglmeier 18 * 19 * Other major contributions: 20 * 21 * NVRAM detection and reading. 22 * Copyright (C) 1997 Richard Waltham <dormouse@farsrobt.demon.co.uk> 23 * 24 *----------------------------------------------------------------------------- 25 * 26 * This program is free software; you can redistribute it and/or modify 27 * it under the terms of the GNU General Public License as published by 28 * the Free Software Foundation; either version 2 of the License, or 29 * (at your option) any later version. 30 * 31 * This program is distributed in the hope that it will be useful, 32 * but WITHOUT ANY WARRANTY; without even the implied warranty of 33 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 34 * GNU General Public License for more details. 35 * 36 * You should have received a copy of the GNU General Public License 37 * along with this program; if not, write to the Free Software 38 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 39 */ 40 #include <linux/ctype.h> 41 #include <linux/init.h> 42 #include <linux/interrupt.h> 43 #include <linux/module.h> 44 #include <linux/moduleparam.h> 45 #include <linux/spinlock.h> 46 #include <scsi/scsi.h> 47 #include <scsi/scsi_tcq.h> 48 #include <scsi/scsi_device.h> 49 #include <scsi/scsi_transport.h> 50 51 #include "sym_glue.h" 52 #include "sym_nvram.h" 53 54 #define NAME53C "sym53c" 55 #define NAME53C8XX "sym53c8xx" 56 57 /* SPARC just has to be different ... */ 58 #ifdef __sparc__ 59 #define IRQ_FMT "%s" 60 #define IRQ_PRM(x) __irq_itoa(x) 61 #else 62 #define IRQ_FMT "%d" 63 #define IRQ_PRM(x) (x) 64 #endif 65 66 struct sym_driver_setup sym_driver_setup = SYM_LINUX_DRIVER_SETUP; 67 unsigned int sym_debug_flags = 0; 68 69 static char *excl_string; 70 static char *safe_string; 71 module_param_named(cmd_per_lun, sym_driver_setup.max_tag, ushort, 0); 72 module_param_string(tag_ctrl, sym_driver_setup.tag_ctrl, 100, 0); 73 module_param_named(burst, sym_driver_setup.burst_order, byte, 0); 74 module_param_named(led, sym_driver_setup.scsi_led, byte, 0); 75 module_param_named(diff, sym_driver_setup.scsi_diff, byte, 0); 76 module_param_named(irqm, sym_driver_setup.irq_mode, byte, 0); 77 module_param_named(buschk, sym_driver_setup.scsi_bus_check, byte, 0); 78 module_param_named(hostid, sym_driver_setup.host_id, byte, 0); 79 module_param_named(verb, sym_driver_setup.verbose, byte, 0); 80 module_param_named(debug, sym_debug_flags, uint, 0); 81 module_param_named(settle, sym_driver_setup.settle_delay, byte, 0); 82 module_param_named(nvram, sym_driver_setup.use_nvram, byte, 0); 83 module_param_named(excl, excl_string, charp, 0); 84 module_param_named(safe, safe_string, charp, 0); 85 86 MODULE_PARM_DESC(cmd_per_lun, "The maximum number of tags to use by default"); 87 MODULE_PARM_DESC(tag_ctrl, "More detailed control over tags per LUN"); 88 MODULE_PARM_DESC(burst, "Maximum burst. 0 to disable, 255 to read from registers"); 89 MODULE_PARM_DESC(led, "Set to 1 to enable LED support"); 90 MODULE_PARM_DESC(diff, "0 for no differential mode, 1 for BIOS, 2 for always, 3 for not GPIO3"); 91 MODULE_PARM_DESC(irqm, "0 for open drain, 1 to leave alone, 2 for totem pole"); 92 MODULE_PARM_DESC(buschk, "0 to not check, 1 for detach on error, 2 for warn on error"); 93 MODULE_PARM_DESC(hostid, "The SCSI ID to use for the host adapters"); 94 MODULE_PARM_DESC(verb, "0 for minimal verbosity, 1 for normal, 2 for excessive"); 95 MODULE_PARM_DESC(debug, "Set bits to enable debugging"); 96 MODULE_PARM_DESC(settle, "Settle delay in seconds. Default 3"); 97 MODULE_PARM_DESC(nvram, "Option currently not used"); 98 MODULE_PARM_DESC(excl, "List ioport addresses here to prevent controllers from being attached"); 99 MODULE_PARM_DESC(safe, "Set other settings to a \"safe mode\""); 100 101 MODULE_LICENSE("GPL"); 102 MODULE_VERSION(SYM_VERSION); 103 MODULE_AUTHOR("Matthew Wilcox <matthew@wil.cx>"); 104 MODULE_DESCRIPTION("NCR, Symbios and LSI 8xx and 1010 PCI SCSI adapters"); 105 106 static void sym2_setup_params(void) 107 { 108 char *p = excl_string; 109 int xi = 0; 110 111 while (p && (xi < 8)) { 112 char *next_p; 113 int val = (int) simple_strtoul(p, &next_p, 0); 114 sym_driver_setup.excludes[xi++] = val; 115 p = next_p; 116 } 117 118 if (safe_string) { 119 if (*safe_string == 'y') { 120 sym_driver_setup.max_tag = 0; 121 sym_driver_setup.burst_order = 0; 122 sym_driver_setup.scsi_led = 0; 123 sym_driver_setup.scsi_diff = 1; 124 sym_driver_setup.irq_mode = 0; 125 sym_driver_setup.scsi_bus_check = 2; 126 sym_driver_setup.host_id = 7; 127 sym_driver_setup.verbose = 2; 128 sym_driver_setup.settle_delay = 10; 129 sym_driver_setup.use_nvram = 1; 130 } else if (*safe_string != 'n') { 131 printk(KERN_WARNING NAME53C8XX "Ignoring parameter %s" 132 " passed to safe option", safe_string); 133 } 134 } 135 } 136 137 /* 138 * We used to try to deal with 64-bit BARs here, but don't any more. 139 * There are many parts of this driver which would need to be modified 140 * to handle a 64-bit base address, including scripts. I'm uncomfortable 141 * with making those changes when I have no way of testing it, so I'm 142 * just going to disable it. 143 * 144 * Note that some machines (eg HP rx8620 and Superdome) have bus addresses 145 * below 4GB and physical addresses above 4GB. These will continue to work. 146 */ 147 static int __devinit 148 pci_get_base_address(struct pci_dev *pdev, int index, unsigned long *basep) 149 { 150 u32 tmp; 151 unsigned long base; 152 #define PCI_BAR_OFFSET(index) (PCI_BASE_ADDRESS_0 + (index<<2)) 153 154 pci_read_config_dword(pdev, PCI_BAR_OFFSET(index++), &tmp); 155 base = tmp; 156 if ((tmp & 0x7) == PCI_BASE_ADDRESS_MEM_TYPE_64) { 157 pci_read_config_dword(pdev, PCI_BAR_OFFSET(index++), &tmp); 158 if (tmp > 0) { 159 dev_err(&pdev->dev, 160 "BAR %d is 64-bit, disabling\n", index - 1); 161 base = 0; 162 } 163 } 164 165 if ((base & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) { 166 base &= PCI_BASE_ADDRESS_IO_MASK; 167 } else { 168 base &= PCI_BASE_ADDRESS_MEM_MASK; 169 } 170 171 *basep = base; 172 return index; 173 #undef PCI_BAR_OFFSET 174 } 175 176 static struct scsi_transport_template *sym2_transport_template = NULL; 177 178 /* 179 * Used by the eh thread to wait for command completion. 180 * It is allocated on the eh thread stack. 181 */ 182 struct sym_eh_wait { 183 struct completion done; 184 struct timer_list timer; 185 void (*old_done)(struct scsi_cmnd *); 186 int to_do; 187 int timed_out; 188 }; 189 190 /* 191 * Driver private area in the SCSI command structure. 192 */ 193 struct sym_ucmd { /* Override the SCSI pointer structure */ 194 dma_addr_t data_mapping; 195 u_char data_mapped; 196 struct sym_eh_wait *eh_wait; 197 }; 198 199 #define SYM_UCMD_PTR(cmd) ((struct sym_ucmd *)(&(cmd)->SCp)) 200 #define SYM_SOFTC_PTR(cmd) sym_get_hcb(cmd->device->host) 201 202 static void __unmap_scsi_data(struct pci_dev *pdev, struct scsi_cmnd *cmd) 203 { 204 int dma_dir = cmd->sc_data_direction; 205 206 switch(SYM_UCMD_PTR(cmd)->data_mapped) { 207 case 2: 208 pci_unmap_sg(pdev, cmd->buffer, cmd->use_sg, dma_dir); 209 break; 210 case 1: 211 pci_unmap_single(pdev, SYM_UCMD_PTR(cmd)->data_mapping, 212 cmd->request_bufflen, dma_dir); 213 break; 214 } 215 SYM_UCMD_PTR(cmd)->data_mapped = 0; 216 } 217 218 static dma_addr_t __map_scsi_single_data(struct pci_dev *pdev, struct scsi_cmnd *cmd) 219 { 220 dma_addr_t mapping; 221 int dma_dir = cmd->sc_data_direction; 222 223 mapping = pci_map_single(pdev, cmd->request_buffer, 224 cmd->request_bufflen, dma_dir); 225 if (mapping) { 226 SYM_UCMD_PTR(cmd)->data_mapped = 1; 227 SYM_UCMD_PTR(cmd)->data_mapping = mapping; 228 } 229 230 return mapping; 231 } 232 233 static int __map_scsi_sg_data(struct pci_dev *pdev, struct scsi_cmnd *cmd) 234 { 235 int use_sg; 236 int dma_dir = cmd->sc_data_direction; 237 238 use_sg = pci_map_sg(pdev, cmd->buffer, cmd->use_sg, dma_dir); 239 if (use_sg > 0) { 240 SYM_UCMD_PTR(cmd)->data_mapped = 2; 241 SYM_UCMD_PTR(cmd)->data_mapping = use_sg; 242 } 243 244 return use_sg; 245 } 246 247 #define unmap_scsi_data(np, cmd) \ 248 __unmap_scsi_data(np->s.device, cmd) 249 #define map_scsi_single_data(np, cmd) \ 250 __map_scsi_single_data(np->s.device, cmd) 251 #define map_scsi_sg_data(np, cmd) \ 252 __map_scsi_sg_data(np->s.device, cmd) 253 /* 254 * Complete a pending CAM CCB. 255 */ 256 void sym_xpt_done(struct sym_hcb *np, struct scsi_cmnd *cmd) 257 { 258 unmap_scsi_data(np, cmd); 259 cmd->scsi_done(cmd); 260 } 261 262 static void sym_xpt_done2(struct sym_hcb *np, struct scsi_cmnd *cmd, int cam_status) 263 { 264 sym_set_cam_status(cmd, cam_status); 265 sym_xpt_done(np, cmd); 266 } 267 268 269 /* 270 * Tell the SCSI layer about a BUS RESET. 271 */ 272 void sym_xpt_async_bus_reset(struct sym_hcb *np) 273 { 274 printf_notice("%s: SCSI BUS has been reset.\n", sym_name(np)); 275 np->s.settle_time = jiffies + sym_driver_setup.settle_delay * HZ; 276 np->s.settle_time_valid = 1; 277 if (sym_verbose >= 2) 278 printf_info("%s: command processing suspended for %d seconds\n", 279 sym_name(np), sym_driver_setup.settle_delay); 280 } 281 282 /* 283 * Tell the SCSI layer about a BUS DEVICE RESET message sent. 284 */ 285 void sym_xpt_async_sent_bdr(struct sym_hcb *np, int target) 286 { 287 printf_notice("%s: TARGET %d has been reset.\n", sym_name(np), target); 288 } 289 290 /* 291 * Choose the more appropriate CAM status if 292 * the IO encountered an extended error. 293 */ 294 static int sym_xerr_cam_status(int cam_status, int x_status) 295 { 296 if (x_status) { 297 if (x_status & XE_PARITY_ERR) 298 cam_status = DID_PARITY; 299 else if (x_status &(XE_EXTRA_DATA|XE_SODL_UNRUN|XE_SWIDE_OVRUN)) 300 cam_status = DID_ERROR; 301 else if (x_status & XE_BAD_PHASE) 302 cam_status = DID_ERROR; 303 else 304 cam_status = DID_ERROR; 305 } 306 return cam_status; 307 } 308 309 /* 310 * Build CAM result for a failed or auto-sensed IO. 311 */ 312 void sym_set_cam_result_error(struct sym_hcb *np, struct sym_ccb *cp, int resid) 313 { 314 struct scsi_cmnd *cmd = cp->cmd; 315 u_int cam_status, scsi_status, drv_status; 316 317 drv_status = 0; 318 cam_status = DID_OK; 319 scsi_status = cp->ssss_status; 320 321 if (cp->host_flags & HF_SENSE) { 322 scsi_status = cp->sv_scsi_status; 323 resid = cp->sv_resid; 324 if (sym_verbose && cp->sv_xerr_status) 325 sym_print_xerr(cmd, cp->sv_xerr_status); 326 if (cp->host_status == HS_COMPLETE && 327 cp->ssss_status == S_GOOD && 328 cp->xerr_status == 0) { 329 cam_status = sym_xerr_cam_status(DID_OK, 330 cp->sv_xerr_status); 331 drv_status = DRIVER_SENSE; 332 /* 333 * Bounce back the sense data to user. 334 */ 335 memset(&cmd->sense_buffer, 0, sizeof(cmd->sense_buffer)); 336 memcpy(cmd->sense_buffer, cp->sns_bbuf, 337 min(sizeof(cmd->sense_buffer), 338 (size_t)SYM_SNS_BBUF_LEN)); 339 #if 0 340 /* 341 * If the device reports a UNIT ATTENTION condition 342 * due to a RESET condition, we should consider all 343 * disconnect CCBs for this unit as aborted. 344 */ 345 if (1) { 346 u_char *p; 347 p = (u_char *) cmd->sense_data; 348 if (p[0]==0x70 && p[2]==0x6 && p[12]==0x29) 349 sym_clear_tasks(np, DID_ABORT, 350 cp->target,cp->lun, -1); 351 } 352 #endif 353 } else { 354 /* 355 * Error return from our internal request sense. This 356 * is bad: we must clear the contingent allegiance 357 * condition otherwise the device will always return 358 * BUSY. Use a big stick. 359 */ 360 sym_reset_scsi_target(np, cmd->device->id); 361 cam_status = DID_ERROR; 362 } 363 } else if (cp->host_status == HS_COMPLETE) /* Bad SCSI status */ 364 cam_status = DID_OK; 365 else if (cp->host_status == HS_SEL_TIMEOUT) /* Selection timeout */ 366 cam_status = DID_NO_CONNECT; 367 else if (cp->host_status == HS_UNEXPECTED) /* Unexpected BUS FREE*/ 368 cam_status = DID_ERROR; 369 else { /* Extended error */ 370 if (sym_verbose) { 371 sym_print_addr(cmd, "COMMAND FAILED (%x %x %x).\n", 372 cp->host_status, cp->ssss_status, 373 cp->xerr_status); 374 } 375 /* 376 * Set the most appropriate value for CAM status. 377 */ 378 cam_status = sym_xerr_cam_status(DID_ERROR, cp->xerr_status); 379 } 380 cmd->resid = resid; 381 cmd->result = (drv_status << 24) + (cam_status << 16) + scsi_status; 382 } 383 384 385 /* 386 * Build the scatter/gather array for an I/O. 387 */ 388 389 static int sym_scatter_no_sglist(struct sym_hcb *np, struct sym_ccb *cp, struct scsi_cmnd *cmd) 390 { 391 struct sym_tblmove *data = &cp->phys.data[SYM_CONF_MAX_SG-1]; 392 int segment; 393 unsigned int len = cmd->request_bufflen; 394 395 if (len) { 396 dma_addr_t baddr = map_scsi_single_data(np, cmd); 397 if (baddr) { 398 if (len & 1) { 399 struct sym_tcb *tp = &np->target[cp->target]; 400 if (tp->head.wval & EWS) { 401 len++; 402 cp->odd_byte_adjustment++; 403 } 404 } 405 cp->data_len = len; 406 sym_build_sge(np, data, baddr, len); 407 segment = 1; 408 } else { 409 segment = -2; 410 } 411 } else { 412 segment = 0; 413 } 414 415 return segment; 416 } 417 418 static int sym_scatter(struct sym_hcb *np, struct sym_ccb *cp, struct scsi_cmnd *cmd) 419 { 420 int segment; 421 int use_sg = (int) cmd->use_sg; 422 423 cp->data_len = 0; 424 425 if (!use_sg) 426 segment = sym_scatter_no_sglist(np, cp, cmd); 427 else if ((use_sg = map_scsi_sg_data(np, cmd)) > 0) { 428 struct scatterlist *scatter = (struct scatterlist *)cmd->buffer; 429 struct sym_tcb *tp = &np->target[cp->target]; 430 struct sym_tblmove *data; 431 432 if (use_sg > SYM_CONF_MAX_SG) { 433 unmap_scsi_data(np, cmd); 434 return -1; 435 } 436 437 data = &cp->phys.data[SYM_CONF_MAX_SG - use_sg]; 438 439 for (segment = 0; segment < use_sg; segment++) { 440 dma_addr_t baddr = sg_dma_address(&scatter[segment]); 441 unsigned int len = sg_dma_len(&scatter[segment]); 442 443 if ((len & 1) && (tp->head.wval & EWS)) { 444 len++; 445 cp->odd_byte_adjustment++; 446 } 447 448 sym_build_sge(np, &data[segment], baddr, len); 449 cp->data_len += len; 450 } 451 } else { 452 segment = -2; 453 } 454 455 return segment; 456 } 457 458 /* 459 * Queue a SCSI command. 460 */ 461 static int sym_queue_command(struct sym_hcb *np, struct scsi_cmnd *cmd) 462 { 463 struct scsi_device *sdev = cmd->device; 464 struct sym_tcb *tp; 465 struct sym_lcb *lp; 466 struct sym_ccb *cp; 467 int order; 468 469 /* 470 * Minimal checkings, so that we will not 471 * go outside our tables. 472 */ 473 if (sdev->id == np->myaddr) { 474 sym_xpt_done2(np, cmd, DID_NO_CONNECT); 475 return 0; 476 } 477 478 /* 479 * Retrieve the target descriptor. 480 */ 481 tp = &np->target[sdev->id]; 482 483 /* 484 * Select tagged/untagged. 485 */ 486 lp = sym_lp(tp, sdev->lun); 487 order = (lp && lp->s.reqtags) ? M_SIMPLE_TAG : 0; 488 489 /* 490 * Queue the SCSI IO. 491 */ 492 cp = sym_get_ccb(np, cmd, order); 493 if (!cp) 494 return 1; /* Means resource shortage */ 495 sym_queue_scsiio(np, cmd, cp); 496 return 0; 497 } 498 499 /* 500 * Setup buffers and pointers that address the CDB. 501 */ 502 static inline int sym_setup_cdb(struct sym_hcb *np, struct scsi_cmnd *cmd, struct sym_ccb *cp) 503 { 504 memcpy(cp->cdb_buf, cmd->cmnd, cmd->cmd_len); 505 506 cp->phys.cmd.addr = CCB_BA(cp, cdb_buf[0]); 507 cp->phys.cmd.size = cpu_to_scr(cmd->cmd_len); 508 509 return 0; 510 } 511 512 /* 513 * Setup pointers that address the data and start the I/O. 514 */ 515 int sym_setup_data_and_start(struct sym_hcb *np, struct scsi_cmnd *cmd, struct sym_ccb *cp) 516 { 517 struct sym_tcb *tp = &np->target[cp->target]; 518 struct sym_lcb *lp = sym_lp(tp, cp->lun); 519 u32 lastp, goalp; 520 int dir; 521 522 /* 523 * Build the CDB. 524 */ 525 if (sym_setup_cdb(np, cmd, cp)) 526 goto out_abort; 527 528 /* 529 * No direction means no data. 530 */ 531 dir = cmd->sc_data_direction; 532 if (dir != DMA_NONE) { 533 cp->segments = sym_scatter(np, cp, cmd); 534 if (cp->segments < 0) { 535 sym_set_cam_status(cmd, DID_ERROR); 536 goto out_abort; 537 } 538 539 /* 540 * No segments means no data. 541 */ 542 if (!cp->segments) 543 dir = DMA_NONE; 544 } else { 545 cp->data_len = 0; 546 cp->segments = 0; 547 } 548 549 /* 550 * Set the data pointer. 551 */ 552 switch (dir) { 553 case DMA_BIDIRECTIONAL: 554 printk("%s: got DMA_BIDIRECTIONAL command", sym_name(np)); 555 sym_set_cam_status(cmd, DID_ERROR); 556 goto out_abort; 557 case DMA_TO_DEVICE: 558 goalp = SCRIPTA_BA(np, data_out2) + 8; 559 lastp = goalp - 8 - (cp->segments * (2*4)); 560 break; 561 case DMA_FROM_DEVICE: 562 cp->host_flags |= HF_DATA_IN; 563 goalp = SCRIPTA_BA(np, data_in2) + 8; 564 lastp = goalp - 8 - (cp->segments * (2*4)); 565 break; 566 case DMA_NONE: 567 default: 568 lastp = goalp = SCRIPTB_BA(np, no_data); 569 break; 570 } 571 572 /* 573 * Set all pointers values needed by SCRIPTS. 574 */ 575 cp->phys.head.lastp = cpu_to_scr(lastp); 576 cp->phys.head.savep = cpu_to_scr(lastp); 577 cp->startp = cp->phys.head.savep; 578 cp->goalp = cpu_to_scr(goalp); 579 580 /* 581 * When `#ifed 1', the code below makes the driver 582 * panic on the first attempt to write to a SCSI device. 583 * It is the first test we want to do after a driver 584 * change that does not seem obviously safe. :) 585 */ 586 #if 0 587 switch (cp->cdb_buf[0]) { 588 case 0x0A: case 0x2A: case 0xAA: 589 panic("XXXXXXXXXXXXX WRITE NOT YET ALLOWED XXXXXXXXXXXXXX\n"); 590 break; 591 default: 592 break; 593 } 594 #endif 595 596 /* 597 * activate this job. 598 */ 599 sym_start_next_ccbs(np, lp, 2); 600 return 0; 601 602 out_abort: 603 sym_free_ccb(np, cp); 604 sym_xpt_done(np, cmd); 605 return 0; 606 } 607 608 609 /* 610 * timer daemon. 611 * 612 * Misused to keep the driver running when 613 * interrupts are not configured correctly. 614 */ 615 static void sym_timer(struct sym_hcb *np) 616 { 617 unsigned long thistime = jiffies; 618 619 /* 620 * Restart the timer. 621 */ 622 np->s.timer.expires = thistime + SYM_CONF_TIMER_INTERVAL; 623 add_timer(&np->s.timer); 624 625 /* 626 * If we are resetting the ncr, wait for settle_time before 627 * clearing it. Then command processing will be resumed. 628 */ 629 if (np->s.settle_time_valid) { 630 if (time_before_eq(np->s.settle_time, thistime)) { 631 if (sym_verbose >= 2 ) 632 printk("%s: command processing resumed\n", 633 sym_name(np)); 634 np->s.settle_time_valid = 0; 635 } 636 return; 637 } 638 639 /* 640 * Nothing to do for now, but that may come. 641 */ 642 if (np->s.lasttime + 4*HZ < thistime) { 643 np->s.lasttime = thistime; 644 } 645 646 #ifdef SYM_CONF_PCIQ_MAY_MISS_COMPLETIONS 647 /* 648 * Some way-broken PCI bridges may lead to 649 * completions being lost when the clearing 650 * of the INTFLY flag by the CPU occurs 651 * concurrently with the chip raising this flag. 652 * If this ever happen, lost completions will 653 * be reaped here. 654 */ 655 sym_wakeup_done(np); 656 #endif 657 } 658 659 660 /* 661 * PCI BUS error handler. 662 */ 663 void sym_log_bus_error(struct sym_hcb *np) 664 { 665 u_short pci_sts; 666 pci_read_config_word(np->s.device, PCI_STATUS, &pci_sts); 667 if (pci_sts & 0xf900) { 668 pci_write_config_word(np->s.device, PCI_STATUS, pci_sts); 669 printf("%s: PCI STATUS = 0x%04x\n", 670 sym_name(np), pci_sts & 0xf900); 671 } 672 } 673 674 /* 675 * queuecommand method. Entered with the host adapter lock held and 676 * interrupts disabled. 677 */ 678 static int sym53c8xx_queue_command(struct scsi_cmnd *cmd, 679 void (*done)(struct scsi_cmnd *)) 680 { 681 struct sym_hcb *np = SYM_SOFTC_PTR(cmd); 682 struct sym_ucmd *ucp = SYM_UCMD_PTR(cmd); 683 int sts = 0; 684 685 cmd->scsi_done = done; 686 memset(ucp, 0, sizeof(*ucp)); 687 688 /* 689 * Shorten our settle_time if needed for 690 * this command not to time out. 691 */ 692 if (np->s.settle_time_valid && cmd->timeout_per_command) { 693 unsigned long tlimit = jiffies + cmd->timeout_per_command; 694 tlimit -= SYM_CONF_TIMER_INTERVAL*2; 695 if (time_after(np->s.settle_time, tlimit)) { 696 np->s.settle_time = tlimit; 697 } 698 } 699 700 if (np->s.settle_time_valid) 701 return SCSI_MLQUEUE_HOST_BUSY; 702 703 sts = sym_queue_command(np, cmd); 704 if (sts) 705 return SCSI_MLQUEUE_HOST_BUSY; 706 return 0; 707 } 708 709 /* 710 * Linux entry point of the interrupt handler. 711 */ 712 static irqreturn_t sym53c8xx_intr(int irq, void *dev_id, struct pt_regs * regs) 713 { 714 unsigned long flags; 715 struct sym_hcb *np = (struct sym_hcb *)dev_id; 716 717 if (DEBUG_FLAGS & DEBUG_TINY) printf_debug ("["); 718 719 spin_lock_irqsave(np->s.host->host_lock, flags); 720 sym_interrupt(np); 721 spin_unlock_irqrestore(np->s.host->host_lock, flags); 722 723 if (DEBUG_FLAGS & DEBUG_TINY) printf_debug ("]\n"); 724 725 return IRQ_HANDLED; 726 } 727 728 /* 729 * Linux entry point of the timer handler 730 */ 731 static void sym53c8xx_timer(unsigned long npref) 732 { 733 struct sym_hcb *np = (struct sym_hcb *)npref; 734 unsigned long flags; 735 736 spin_lock_irqsave(np->s.host->host_lock, flags); 737 sym_timer(np); 738 spin_unlock_irqrestore(np->s.host->host_lock, flags); 739 } 740 741 742 /* 743 * What the eh thread wants us to perform. 744 */ 745 #define SYM_EH_ABORT 0 746 #define SYM_EH_DEVICE_RESET 1 747 #define SYM_EH_BUS_RESET 2 748 #define SYM_EH_HOST_RESET 3 749 750 /* 751 * What we will do regarding the involved SCSI command. 752 */ 753 #define SYM_EH_DO_IGNORE 0 754 #define SYM_EH_DO_COMPLETE 1 755 #define SYM_EH_DO_WAIT 2 756 757 /* 758 * Our general completion handler. 759 */ 760 static void __sym_eh_done(struct scsi_cmnd *cmd, int timed_out) 761 { 762 struct sym_eh_wait *ep = SYM_UCMD_PTR(cmd)->eh_wait; 763 if (!ep) 764 return; 765 766 /* Try to avoid a race here (not 100% safe) */ 767 if (!timed_out) { 768 ep->timed_out = 0; 769 if (ep->to_do == SYM_EH_DO_WAIT && !del_timer(&ep->timer)) 770 return; 771 } 772 773 /* Revert everything */ 774 SYM_UCMD_PTR(cmd)->eh_wait = NULL; 775 cmd->scsi_done = ep->old_done; 776 777 /* Wake up the eh thread if it wants to sleep */ 778 if (ep->to_do == SYM_EH_DO_WAIT) 779 complete(&ep->done); 780 } 781 782 /* 783 * scsi_done() alias when error recovery is in progress. 784 */ 785 static void sym_eh_done(struct scsi_cmnd *cmd) { __sym_eh_done(cmd, 0); } 786 787 /* 788 * Some timeout handler to avoid waiting too long. 789 */ 790 static void sym_eh_timeout(u_long p) { __sym_eh_done((struct scsi_cmnd *)p, 1); } 791 792 /* 793 * Generic method for our eh processing. 794 * The 'op' argument tells what we have to do. 795 */ 796 static int sym_eh_handler(int op, char *opname, struct scsi_cmnd *cmd) 797 { 798 struct sym_hcb *np = SYM_SOFTC_PTR(cmd); 799 SYM_QUEHEAD *qp; 800 int to_do = SYM_EH_DO_IGNORE; 801 int sts = -1; 802 struct sym_eh_wait eh, *ep = &eh; 803 804 dev_warn(&cmd->device->sdev_gendev, "%s operation started.\n", opname); 805 806 /* This one is queued in some place -> to wait for completion */ 807 FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) { 808 struct sym_ccb *cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); 809 if (cp->cmd == cmd) { 810 to_do = SYM_EH_DO_WAIT; 811 goto prepare; 812 } 813 } 814 815 prepare: 816 /* Prepare stuff to either ignore, complete or wait for completion */ 817 switch(to_do) { 818 default: 819 case SYM_EH_DO_IGNORE: 820 break; 821 case SYM_EH_DO_WAIT: 822 init_completion(&ep->done); 823 /* fall through */ 824 case SYM_EH_DO_COMPLETE: 825 ep->old_done = cmd->scsi_done; 826 cmd->scsi_done = sym_eh_done; 827 SYM_UCMD_PTR(cmd)->eh_wait = ep; 828 } 829 830 /* Try to proceed the operation we have been asked for */ 831 sts = -1; 832 switch(op) { 833 case SYM_EH_ABORT: 834 sts = sym_abort_scsiio(np, cmd, 1); 835 break; 836 case SYM_EH_DEVICE_RESET: 837 sts = sym_reset_scsi_target(np, cmd->device->id); 838 break; 839 case SYM_EH_BUS_RESET: 840 sym_reset_scsi_bus(np, 1); 841 sts = 0; 842 break; 843 case SYM_EH_HOST_RESET: 844 sym_reset_scsi_bus(np, 0); 845 sym_start_up (np, 1); 846 sts = 0; 847 break; 848 default: 849 break; 850 } 851 852 /* On error, restore everything and cross fingers :) */ 853 if (sts) { 854 SYM_UCMD_PTR(cmd)->eh_wait = NULL; 855 cmd->scsi_done = ep->old_done; 856 to_do = SYM_EH_DO_IGNORE; 857 } 858 859 ep->to_do = to_do; 860 /* Complete the command with locks held as required by the driver */ 861 if (to_do == SYM_EH_DO_COMPLETE) 862 sym_xpt_done2(np, cmd, DID_ABORT); 863 864 /* Wait for completion with locks released, as required by kernel */ 865 if (to_do == SYM_EH_DO_WAIT) { 866 init_timer(&ep->timer); 867 ep->timer.expires = jiffies + (5*HZ); 868 ep->timer.function = sym_eh_timeout; 869 ep->timer.data = (u_long)cmd; 870 ep->timed_out = 1; /* Be pessimistic for once :) */ 871 add_timer(&ep->timer); 872 spin_unlock_irq(np->s.host->host_lock); 873 wait_for_completion(&ep->done); 874 spin_lock_irq(np->s.host->host_lock); 875 if (ep->timed_out) 876 sts = -2; 877 } 878 dev_warn(&cmd->device->sdev_gendev, "%s operation %s.\n", opname, 879 sts==0 ? "complete" :sts==-2 ? "timed-out" : "failed"); 880 return sts ? SCSI_FAILED : SCSI_SUCCESS; 881 } 882 883 884 /* 885 * Error handlers called from the eh thread (one thread per HBA). 886 */ 887 static int sym53c8xx_eh_abort_handler(struct scsi_cmnd *cmd) 888 { 889 int rc; 890 891 spin_lock_irq(cmd->device->host->host_lock); 892 rc = sym_eh_handler(SYM_EH_ABORT, "ABORT", cmd); 893 spin_unlock_irq(cmd->device->host->host_lock); 894 895 return rc; 896 } 897 898 static int sym53c8xx_eh_device_reset_handler(struct scsi_cmnd *cmd) 899 { 900 int rc; 901 902 spin_lock_irq(cmd->device->host->host_lock); 903 rc = sym_eh_handler(SYM_EH_DEVICE_RESET, "DEVICE RESET", cmd); 904 spin_unlock_irq(cmd->device->host->host_lock); 905 906 return rc; 907 } 908 909 static int sym53c8xx_eh_bus_reset_handler(struct scsi_cmnd *cmd) 910 { 911 int rc; 912 913 spin_lock_irq(cmd->device->host->host_lock); 914 rc = sym_eh_handler(SYM_EH_BUS_RESET, "BUS RESET", cmd); 915 spin_unlock_irq(cmd->device->host->host_lock); 916 917 return rc; 918 } 919 920 static int sym53c8xx_eh_host_reset_handler(struct scsi_cmnd *cmd) 921 { 922 int rc; 923 924 spin_lock_irq(cmd->device->host->host_lock); 925 rc = sym_eh_handler(SYM_EH_HOST_RESET, "HOST RESET", cmd); 926 spin_unlock_irq(cmd->device->host->host_lock); 927 928 return rc; 929 } 930 931 /* 932 * Tune device queuing depth, according to various limits. 933 */ 934 static void sym_tune_dev_queuing(struct sym_tcb *tp, int lun, u_short reqtags) 935 { 936 struct sym_lcb *lp = sym_lp(tp, lun); 937 u_short oldtags; 938 939 if (!lp) 940 return; 941 942 oldtags = lp->s.reqtags; 943 944 if (reqtags > lp->s.scdev_depth) 945 reqtags = lp->s.scdev_depth; 946 947 lp->started_limit = reqtags ? reqtags : 2; 948 lp->started_max = 1; 949 lp->s.reqtags = reqtags; 950 951 if (reqtags != oldtags) { 952 dev_info(&tp->starget->dev, 953 "tagged command queuing %s, command queue depth %d.\n", 954 lp->s.reqtags ? "enabled" : "disabled", 955 lp->started_limit); 956 } 957 } 958 959 /* 960 * Linux select queue depths function 961 */ 962 #define DEF_DEPTH (sym_driver_setup.max_tag) 963 #define ALL_TARGETS -2 964 #define NO_TARGET -1 965 #define ALL_LUNS -2 966 #define NO_LUN -1 967 968 static int device_queue_depth(struct sym_hcb *np, int target, int lun) 969 { 970 int c, h, t, u, v; 971 char *p = sym_driver_setup.tag_ctrl; 972 char *ep; 973 974 h = -1; 975 t = NO_TARGET; 976 u = NO_LUN; 977 while ((c = *p++) != 0) { 978 v = simple_strtoul(p, &ep, 0); 979 switch(c) { 980 case '/': 981 ++h; 982 t = ALL_TARGETS; 983 u = ALL_LUNS; 984 break; 985 case 't': 986 if (t != target) 987 t = (target == v) ? v : NO_TARGET; 988 u = ALL_LUNS; 989 break; 990 case 'u': 991 if (u != lun) 992 u = (lun == v) ? v : NO_LUN; 993 break; 994 case 'q': 995 if (h == np->s.unit && 996 (t == ALL_TARGETS || t == target) && 997 (u == ALL_LUNS || u == lun)) 998 return v; 999 break; 1000 case '-': 1001 t = ALL_TARGETS; 1002 u = ALL_LUNS; 1003 break; 1004 default: 1005 break; 1006 } 1007 p = ep; 1008 } 1009 return DEF_DEPTH; 1010 } 1011 1012 static int sym53c8xx_slave_alloc(struct scsi_device *sdev) 1013 { 1014 struct sym_hcb *np = sym_get_hcb(sdev->host); 1015 struct sym_tcb *tp = &np->target[sdev->id]; 1016 struct sym_lcb *lp; 1017 1018 if (sdev->id >= SYM_CONF_MAX_TARGET || sdev->lun >= SYM_CONF_MAX_LUN) 1019 return -ENXIO; 1020 1021 tp->starget = sdev->sdev_target; 1022 /* 1023 * Fail the device init if the device is flagged NOSCAN at BOOT in 1024 * the NVRAM. This may speed up boot and maintain coherency with 1025 * BIOS device numbering. Clearing the flag allows the user to 1026 * rescan skipped devices later. We also return an error for 1027 * devices not flagged for SCAN LUNS in the NVRAM since some single 1028 * lun devices behave badly when asked for a non zero LUN. 1029 */ 1030 1031 if (tp->usrflags & SYM_SCAN_BOOT_DISABLED) { 1032 tp->usrflags &= ~SYM_SCAN_BOOT_DISABLED; 1033 starget_printk(KERN_INFO, tp->starget, 1034 "Scan at boot disabled in NVRAM\n"); 1035 return -ENXIO; 1036 } 1037 1038 if (tp->usrflags & SYM_SCAN_LUNS_DISABLED) { 1039 if (sdev->lun != 0) 1040 return -ENXIO; 1041 starget_printk(KERN_INFO, tp->starget, 1042 "Multiple LUNs disabled in NVRAM\n"); 1043 } 1044 1045 lp = sym_alloc_lcb(np, sdev->id, sdev->lun); 1046 if (!lp) 1047 return -ENOMEM; 1048 1049 spi_min_period(tp->starget) = tp->usr_period; 1050 spi_max_width(tp->starget) = tp->usr_width; 1051 1052 return 0; 1053 } 1054 1055 /* 1056 * Linux entry point for device queue sizing. 1057 */ 1058 static int sym53c8xx_slave_configure(struct scsi_device *sdev) 1059 { 1060 struct sym_hcb *np = sym_get_hcb(sdev->host); 1061 struct sym_tcb *tp = &np->target[sdev->id]; 1062 struct sym_lcb *lp = sym_lp(tp, sdev->lun); 1063 int reqtags, depth_to_use; 1064 1065 /* 1066 * Get user flags. 1067 */ 1068 lp->curr_flags = lp->user_flags; 1069 1070 /* 1071 * Select queue depth from driver setup. 1072 * Donnot use more than configured by user. 1073 * Use at least 2. 1074 * Donnot use more than our maximum. 1075 */ 1076 reqtags = device_queue_depth(np, sdev->id, sdev->lun); 1077 if (reqtags > tp->usrtags) 1078 reqtags = tp->usrtags; 1079 if (!sdev->tagged_supported) 1080 reqtags = 0; 1081 #if 1 /* Avoid to locally queue commands for no good reasons */ 1082 if (reqtags > SYM_CONF_MAX_TAG) 1083 reqtags = SYM_CONF_MAX_TAG; 1084 depth_to_use = (reqtags ? reqtags : 2); 1085 #else 1086 depth_to_use = (reqtags ? SYM_CONF_MAX_TAG : 2); 1087 #endif 1088 scsi_adjust_queue_depth(sdev, 1089 (sdev->tagged_supported ? 1090 MSG_SIMPLE_TAG : 0), 1091 depth_to_use); 1092 lp->s.scdev_depth = depth_to_use; 1093 sym_tune_dev_queuing(tp, sdev->lun, reqtags); 1094 1095 if (!spi_initial_dv(sdev->sdev_target)) 1096 spi_dv_device(sdev); 1097 1098 return 0; 1099 } 1100 1101 static void sym53c8xx_slave_destroy(struct scsi_device *sdev) 1102 { 1103 struct sym_hcb *np = sym_get_hcb(sdev->host); 1104 struct sym_lcb *lp = sym_lp(&np->target[sdev->id], sdev->lun); 1105 1106 if (lp->itlq_tbl) 1107 sym_mfree_dma(lp->itlq_tbl, SYM_CONF_MAX_TASK * 4, "ITLQ_TBL"); 1108 kfree(lp->cb_tags); 1109 sym_mfree_dma(lp, sizeof(*lp), "LCB"); 1110 } 1111 1112 /* 1113 * Linux entry point for info() function 1114 */ 1115 static const char *sym53c8xx_info (struct Scsi_Host *host) 1116 { 1117 return SYM_DRIVER_NAME; 1118 } 1119 1120 1121 #ifdef SYM_LINUX_PROC_INFO_SUPPORT 1122 /* 1123 * Proc file system stuff 1124 * 1125 * A read operation returns adapter information. 1126 * A write operation is a control command. 1127 * The string is parsed in the driver code and the command is passed 1128 * to the sym_usercmd() function. 1129 */ 1130 1131 #ifdef SYM_LINUX_USER_COMMAND_SUPPORT 1132 1133 struct sym_usrcmd { 1134 u_long target; 1135 u_long lun; 1136 u_long data; 1137 u_long cmd; 1138 }; 1139 1140 #define UC_SETSYNC 10 1141 #define UC_SETTAGS 11 1142 #define UC_SETDEBUG 12 1143 #define UC_SETWIDE 14 1144 #define UC_SETFLAG 15 1145 #define UC_SETVERBOSE 17 1146 #define UC_RESETDEV 18 1147 #define UC_CLEARDEV 19 1148 1149 static void sym_exec_user_command (struct sym_hcb *np, struct sym_usrcmd *uc) 1150 { 1151 struct sym_tcb *tp; 1152 int t, l; 1153 1154 switch (uc->cmd) { 1155 case 0: return; 1156 1157 #ifdef SYM_LINUX_DEBUG_CONTROL_SUPPORT 1158 case UC_SETDEBUG: 1159 sym_debug_flags = uc->data; 1160 break; 1161 #endif 1162 case UC_SETVERBOSE: 1163 np->verbose = uc->data; 1164 break; 1165 default: 1166 /* 1167 * We assume that other commands apply to targets. 1168 * This should always be the case and avoid the below 1169 * 4 lines to be repeated 6 times. 1170 */ 1171 for (t = 0; t < SYM_CONF_MAX_TARGET; t++) { 1172 if (!((uc->target >> t) & 1)) 1173 continue; 1174 tp = &np->target[t]; 1175 1176 switch (uc->cmd) { 1177 1178 case UC_SETSYNC: 1179 if (!uc->data || uc->data >= 255) { 1180 tp->tgoal.iu = tp->tgoal.dt = 1181 tp->tgoal.qas = 0; 1182 tp->tgoal.offset = 0; 1183 } else if (uc->data <= 9 && np->minsync_dt) { 1184 if (uc->data < np->minsync_dt) 1185 uc->data = np->minsync_dt; 1186 tp->tgoal.iu = tp->tgoal.dt = 1187 tp->tgoal.qas = 1; 1188 tp->tgoal.width = 1; 1189 tp->tgoal.period = uc->data; 1190 tp->tgoal.offset = np->maxoffs_dt; 1191 } else { 1192 if (uc->data < np->minsync) 1193 uc->data = np->minsync; 1194 tp->tgoal.iu = tp->tgoal.dt = 1195 tp->tgoal.qas = 0; 1196 tp->tgoal.period = uc->data; 1197 tp->tgoal.offset = np->maxoffs; 1198 } 1199 tp->tgoal.check_nego = 1; 1200 break; 1201 case UC_SETWIDE: 1202 tp->tgoal.width = uc->data ? 1 : 0; 1203 tp->tgoal.check_nego = 1; 1204 break; 1205 case UC_SETTAGS: 1206 for (l = 0; l < SYM_CONF_MAX_LUN; l++) 1207 sym_tune_dev_queuing(tp, l, uc->data); 1208 break; 1209 case UC_RESETDEV: 1210 tp->to_reset = 1; 1211 np->istat_sem = SEM; 1212 OUTB(np, nc_istat, SIGP|SEM); 1213 break; 1214 case UC_CLEARDEV: 1215 for (l = 0; l < SYM_CONF_MAX_LUN; l++) { 1216 struct sym_lcb *lp = sym_lp(tp, l); 1217 if (lp) lp->to_clear = 1; 1218 } 1219 np->istat_sem = SEM; 1220 OUTB(np, nc_istat, SIGP|SEM); 1221 break; 1222 case UC_SETFLAG: 1223 tp->usrflags = uc->data; 1224 break; 1225 } 1226 } 1227 break; 1228 } 1229 } 1230 1231 static int skip_spaces(char *ptr, int len) 1232 { 1233 int cnt, c; 1234 1235 for (cnt = len; cnt > 0 && (c = *ptr++) && isspace(c); cnt--); 1236 1237 return (len - cnt); 1238 } 1239 1240 static int get_int_arg(char *ptr, int len, u_long *pv) 1241 { 1242 char *end; 1243 1244 *pv = simple_strtoul(ptr, &end, 10); 1245 return (end - ptr); 1246 } 1247 1248 static int is_keyword(char *ptr, int len, char *verb) 1249 { 1250 int verb_len = strlen(verb); 1251 1252 if (len >= verb_len && !memcmp(verb, ptr, verb_len)) 1253 return verb_len; 1254 else 1255 return 0; 1256 } 1257 1258 #define SKIP_SPACES(ptr, len) \ 1259 if ((arg_len = skip_spaces(ptr, len)) < 1) \ 1260 return -EINVAL; \ 1261 ptr += arg_len; len -= arg_len; 1262 1263 #define GET_INT_ARG(ptr, len, v) \ 1264 if (!(arg_len = get_int_arg(ptr, len, &(v)))) \ 1265 return -EINVAL; \ 1266 ptr += arg_len; len -= arg_len; 1267 1268 1269 /* 1270 * Parse a control command 1271 */ 1272 1273 static int sym_user_command(struct sym_hcb *np, char *buffer, int length) 1274 { 1275 char *ptr = buffer; 1276 int len = length; 1277 struct sym_usrcmd cmd, *uc = &cmd; 1278 int arg_len; 1279 u_long target; 1280 1281 memset(uc, 0, sizeof(*uc)); 1282 1283 if (len > 0 && ptr[len-1] == '\n') 1284 --len; 1285 1286 if ((arg_len = is_keyword(ptr, len, "setsync")) != 0) 1287 uc->cmd = UC_SETSYNC; 1288 else if ((arg_len = is_keyword(ptr, len, "settags")) != 0) 1289 uc->cmd = UC_SETTAGS; 1290 else if ((arg_len = is_keyword(ptr, len, "setverbose")) != 0) 1291 uc->cmd = UC_SETVERBOSE; 1292 else if ((arg_len = is_keyword(ptr, len, "setwide")) != 0) 1293 uc->cmd = UC_SETWIDE; 1294 #ifdef SYM_LINUX_DEBUG_CONTROL_SUPPORT 1295 else if ((arg_len = is_keyword(ptr, len, "setdebug")) != 0) 1296 uc->cmd = UC_SETDEBUG; 1297 #endif 1298 else if ((arg_len = is_keyword(ptr, len, "setflag")) != 0) 1299 uc->cmd = UC_SETFLAG; 1300 else if ((arg_len = is_keyword(ptr, len, "resetdev")) != 0) 1301 uc->cmd = UC_RESETDEV; 1302 else if ((arg_len = is_keyword(ptr, len, "cleardev")) != 0) 1303 uc->cmd = UC_CLEARDEV; 1304 else 1305 arg_len = 0; 1306 1307 #ifdef DEBUG_PROC_INFO 1308 printk("sym_user_command: arg_len=%d, cmd=%ld\n", arg_len, uc->cmd); 1309 #endif 1310 1311 if (!arg_len) 1312 return -EINVAL; 1313 ptr += arg_len; len -= arg_len; 1314 1315 switch(uc->cmd) { 1316 case UC_SETSYNC: 1317 case UC_SETTAGS: 1318 case UC_SETWIDE: 1319 case UC_SETFLAG: 1320 case UC_RESETDEV: 1321 case UC_CLEARDEV: 1322 SKIP_SPACES(ptr, len); 1323 if ((arg_len = is_keyword(ptr, len, "all")) != 0) { 1324 ptr += arg_len; len -= arg_len; 1325 uc->target = ~0; 1326 } else { 1327 GET_INT_ARG(ptr, len, target); 1328 uc->target = (1<<target); 1329 #ifdef DEBUG_PROC_INFO 1330 printk("sym_user_command: target=%ld\n", target); 1331 #endif 1332 } 1333 break; 1334 } 1335 1336 switch(uc->cmd) { 1337 case UC_SETVERBOSE: 1338 case UC_SETSYNC: 1339 case UC_SETTAGS: 1340 case UC_SETWIDE: 1341 SKIP_SPACES(ptr, len); 1342 GET_INT_ARG(ptr, len, uc->data); 1343 #ifdef DEBUG_PROC_INFO 1344 printk("sym_user_command: data=%ld\n", uc->data); 1345 #endif 1346 break; 1347 #ifdef SYM_LINUX_DEBUG_CONTROL_SUPPORT 1348 case UC_SETDEBUG: 1349 while (len > 0) { 1350 SKIP_SPACES(ptr, len); 1351 if ((arg_len = is_keyword(ptr, len, "alloc"))) 1352 uc->data |= DEBUG_ALLOC; 1353 else if ((arg_len = is_keyword(ptr, len, "phase"))) 1354 uc->data |= DEBUG_PHASE; 1355 else if ((arg_len = is_keyword(ptr, len, "queue"))) 1356 uc->data |= DEBUG_QUEUE; 1357 else if ((arg_len = is_keyword(ptr, len, "result"))) 1358 uc->data |= DEBUG_RESULT; 1359 else if ((arg_len = is_keyword(ptr, len, "scatter"))) 1360 uc->data |= DEBUG_SCATTER; 1361 else if ((arg_len = is_keyword(ptr, len, "script"))) 1362 uc->data |= DEBUG_SCRIPT; 1363 else if ((arg_len = is_keyword(ptr, len, "tiny"))) 1364 uc->data |= DEBUG_TINY; 1365 else if ((arg_len = is_keyword(ptr, len, "timing"))) 1366 uc->data |= DEBUG_TIMING; 1367 else if ((arg_len = is_keyword(ptr, len, "nego"))) 1368 uc->data |= DEBUG_NEGO; 1369 else if ((arg_len = is_keyword(ptr, len, "tags"))) 1370 uc->data |= DEBUG_TAGS; 1371 else if ((arg_len = is_keyword(ptr, len, "pointer"))) 1372 uc->data |= DEBUG_POINTER; 1373 else 1374 return -EINVAL; 1375 ptr += arg_len; len -= arg_len; 1376 } 1377 #ifdef DEBUG_PROC_INFO 1378 printk("sym_user_command: data=%ld\n", uc->data); 1379 #endif 1380 break; 1381 #endif /* SYM_LINUX_DEBUG_CONTROL_SUPPORT */ 1382 case UC_SETFLAG: 1383 while (len > 0) { 1384 SKIP_SPACES(ptr, len); 1385 if ((arg_len = is_keyword(ptr, len, "no_disc"))) 1386 uc->data &= ~SYM_DISC_ENABLED; 1387 else 1388 return -EINVAL; 1389 ptr += arg_len; len -= arg_len; 1390 } 1391 break; 1392 default: 1393 break; 1394 } 1395 1396 if (len) 1397 return -EINVAL; 1398 else { 1399 unsigned long flags; 1400 1401 spin_lock_irqsave(np->s.host->host_lock, flags); 1402 sym_exec_user_command (np, uc); 1403 spin_unlock_irqrestore(np->s.host->host_lock, flags); 1404 } 1405 return length; 1406 } 1407 1408 #endif /* SYM_LINUX_USER_COMMAND_SUPPORT */ 1409 1410 1411 #ifdef SYM_LINUX_USER_INFO_SUPPORT 1412 /* 1413 * Informations through the proc file system. 1414 */ 1415 struct info_str { 1416 char *buffer; 1417 int length; 1418 int offset; 1419 int pos; 1420 }; 1421 1422 static void copy_mem_info(struct info_str *info, char *data, int len) 1423 { 1424 if (info->pos + len > info->length) 1425 len = info->length - info->pos; 1426 1427 if (info->pos + len < info->offset) { 1428 info->pos += len; 1429 return; 1430 } 1431 if (info->pos < info->offset) { 1432 data += (info->offset - info->pos); 1433 len -= (info->offset - info->pos); 1434 } 1435 1436 if (len > 0) { 1437 memcpy(info->buffer + info->pos, data, len); 1438 info->pos += len; 1439 } 1440 } 1441 1442 static int copy_info(struct info_str *info, char *fmt, ...) 1443 { 1444 va_list args; 1445 char buf[81]; 1446 int len; 1447 1448 va_start(args, fmt); 1449 len = vsprintf(buf, fmt, args); 1450 va_end(args); 1451 1452 copy_mem_info(info, buf, len); 1453 return len; 1454 } 1455 1456 /* 1457 * Copy formatted information into the input buffer. 1458 */ 1459 static int sym_host_info(struct sym_hcb *np, char *ptr, off_t offset, int len) 1460 { 1461 struct info_str info; 1462 1463 info.buffer = ptr; 1464 info.length = len; 1465 info.offset = offset; 1466 info.pos = 0; 1467 1468 copy_info(&info, "Chip " NAME53C "%s, device id 0x%x, " 1469 "revision id 0x%x\n", 1470 np->s.chip_name, np->device_id, np->revision_id); 1471 copy_info(&info, "At PCI address %s, IRQ " IRQ_FMT "\n", 1472 pci_name(np->s.device), IRQ_PRM(np->s.irq)); 1473 copy_info(&info, "Min. period factor %d, %s SCSI BUS%s\n", 1474 (int) (np->minsync_dt ? np->minsync_dt : np->minsync), 1475 np->maxwide ? "Wide" : "Narrow", 1476 np->minsync_dt ? ", DT capable" : ""); 1477 1478 copy_info(&info, "Max. started commands %d, " 1479 "max. commands per LUN %d\n", 1480 SYM_CONF_MAX_START, SYM_CONF_MAX_TAG); 1481 1482 return info.pos > info.offset? info.pos - info.offset : 0; 1483 } 1484 #endif /* SYM_LINUX_USER_INFO_SUPPORT */ 1485 1486 /* 1487 * Entry point of the scsi proc fs of the driver. 1488 * - func = 0 means read (returns adapter infos) 1489 * - func = 1 means write (not yet merget from sym53c8xx) 1490 */ 1491 static int sym53c8xx_proc_info(struct Scsi_Host *host, char *buffer, 1492 char **start, off_t offset, int length, int func) 1493 { 1494 struct sym_hcb *np = sym_get_hcb(host); 1495 int retv; 1496 1497 if (func) { 1498 #ifdef SYM_LINUX_USER_COMMAND_SUPPORT 1499 retv = sym_user_command(np, buffer, length); 1500 #else 1501 retv = -EINVAL; 1502 #endif 1503 } else { 1504 if (start) 1505 *start = buffer; 1506 #ifdef SYM_LINUX_USER_INFO_SUPPORT 1507 retv = sym_host_info(np, buffer, offset, length); 1508 #else 1509 retv = -EINVAL; 1510 #endif 1511 } 1512 1513 return retv; 1514 } 1515 #endif /* SYM_LINUX_PROC_INFO_SUPPORT */ 1516 1517 /* 1518 * Free controller resources. 1519 */ 1520 static void sym_free_resources(struct sym_hcb *np, struct pci_dev *pdev) 1521 { 1522 /* 1523 * Free O/S specific resources. 1524 */ 1525 if (np->s.irq) 1526 free_irq(np->s.irq, np); 1527 if (np->s.ioaddr) 1528 pci_iounmap(pdev, np->s.ioaddr); 1529 if (np->s.ramaddr) 1530 pci_iounmap(pdev, np->s.ramaddr); 1531 /* 1532 * Free O/S independent resources. 1533 */ 1534 sym_hcb_free(np); 1535 1536 sym_mfree_dma(np, sizeof(*np), "HCB"); 1537 } 1538 1539 /* 1540 * Ask/tell the system about DMA addressing. 1541 */ 1542 static int sym_setup_bus_dma_mask(struct sym_hcb *np) 1543 { 1544 #if SYM_CONF_DMA_ADDRESSING_MODE > 0 1545 #if SYM_CONF_DMA_ADDRESSING_MODE == 1 1546 #define DMA_DAC_MASK DMA_40BIT_MASK 1547 #elif SYM_CONF_DMA_ADDRESSING_MODE == 2 1548 #define DMA_DAC_MASK DMA_64BIT_MASK 1549 #endif 1550 if ((np->features & FE_DAC) && 1551 !pci_set_dma_mask(np->s.device, DMA_DAC_MASK)) { 1552 np->use_dac = 1; 1553 return 0; 1554 } 1555 #endif 1556 1557 if (!pci_set_dma_mask(np->s.device, DMA_32BIT_MASK)) 1558 return 0; 1559 1560 printf_warning("%s: No suitable DMA available\n", sym_name(np)); 1561 return -1; 1562 } 1563 1564 /* 1565 * Host attach and initialisations. 1566 * 1567 * Allocate host data and ncb structure. 1568 * Remap MMIO region. 1569 * Do chip initialization. 1570 * If all is OK, install interrupt handling and 1571 * start the timer daemon. 1572 */ 1573 static struct Scsi_Host * __devinit sym_attach(struct scsi_host_template *tpnt, 1574 int unit, struct sym_device *dev) 1575 { 1576 struct host_data *host_data; 1577 struct sym_hcb *np = NULL; 1578 struct Scsi_Host *instance = NULL; 1579 struct pci_dev *pdev = dev->pdev; 1580 unsigned long flags; 1581 struct sym_fw *fw; 1582 1583 printk(KERN_INFO 1584 "sym%d: <%s> rev 0x%x at pci %s irq " IRQ_FMT "\n", 1585 unit, dev->chip.name, dev->chip.revision_id, 1586 pci_name(pdev), IRQ_PRM(pdev->irq)); 1587 1588 /* 1589 * Get the firmware for this chip. 1590 */ 1591 fw = sym_find_firmware(&dev->chip); 1592 if (!fw) 1593 goto attach_failed; 1594 1595 /* 1596 * Allocate host_data structure 1597 */ 1598 instance = scsi_host_alloc(tpnt, sizeof(*host_data)); 1599 if (!instance) 1600 goto attach_failed; 1601 host_data = (struct host_data *) instance->hostdata; 1602 1603 /* 1604 * Allocate immediately the host control block, 1605 * since we are only expecting to succeed. :) 1606 * We keep track in the HCB of all the resources that 1607 * are to be released on error. 1608 */ 1609 np = __sym_calloc_dma(&pdev->dev, sizeof(*np), "HCB"); 1610 if (!np) 1611 goto attach_failed; 1612 np->s.device = pdev; 1613 np->bus_dmat = &pdev->dev; /* Result in 1 DMA pool per HBA */ 1614 host_data->ncb = np; 1615 np->s.host = instance; 1616 1617 pci_set_drvdata(pdev, np); 1618 1619 /* 1620 * Copy some useful infos to the HCB. 1621 */ 1622 np->hcb_ba = vtobus(np); 1623 np->verbose = sym_driver_setup.verbose; 1624 np->s.device = pdev; 1625 np->s.unit = unit; 1626 np->device_id = dev->chip.device_id; 1627 np->revision_id = dev->chip.revision_id; 1628 np->features = dev->chip.features; 1629 np->clock_divn = dev->chip.nr_divisor; 1630 np->maxoffs = dev->chip.offset_max; 1631 np->maxburst = dev->chip.burst_max; 1632 np->myaddr = dev->host_id; 1633 1634 /* 1635 * Edit its name. 1636 */ 1637 strlcpy(np->s.chip_name, dev->chip.name, sizeof(np->s.chip_name)); 1638 sprintf(np->s.inst_name, "sym%d", np->s.unit); 1639 1640 if (sym_setup_bus_dma_mask(np)) 1641 goto attach_failed; 1642 1643 /* 1644 * Try to map the controller chip to 1645 * virtual and physical memory. 1646 */ 1647 np->mmio_ba = (u32)dev->mmio_base; 1648 np->s.ioaddr = dev->s.ioaddr; 1649 np->s.ramaddr = dev->s.ramaddr; 1650 np->s.io_ws = (np->features & FE_IO256) ? 256 : 128; 1651 1652 /* 1653 * Map on-chip RAM if present and supported. 1654 */ 1655 if (!(np->features & FE_RAM)) 1656 dev->ram_base = 0; 1657 if (dev->ram_base) { 1658 np->ram_ba = (u32)dev->ram_base; 1659 np->ram_ws = (np->features & FE_RAM8K) ? 8192 : 4096; 1660 } 1661 1662 if (sym_hcb_attach(instance, fw, dev->nvram)) 1663 goto attach_failed; 1664 1665 /* 1666 * Install the interrupt handler. 1667 * If we synchonize the C code with SCRIPTS on interrupt, 1668 * we do not want to share the INTR line at all. 1669 */ 1670 if (request_irq(pdev->irq, sym53c8xx_intr, SA_SHIRQ, NAME53C8XX, np)) { 1671 printf_err("%s: request irq %d failure\n", 1672 sym_name(np), pdev->irq); 1673 goto attach_failed; 1674 } 1675 np->s.irq = pdev->irq; 1676 1677 /* 1678 * After SCSI devices have been opened, we cannot 1679 * reset the bus safely, so we do it here. 1680 */ 1681 spin_lock_irqsave(instance->host_lock, flags); 1682 if (sym_reset_scsi_bus(np, 0)) 1683 goto reset_failed; 1684 1685 /* 1686 * Start the SCRIPTS. 1687 */ 1688 sym_start_up (np, 1); 1689 1690 /* 1691 * Start the timer daemon 1692 */ 1693 init_timer(&np->s.timer); 1694 np->s.timer.data = (unsigned long) np; 1695 np->s.timer.function = sym53c8xx_timer; 1696 np->s.lasttime=0; 1697 sym_timer (np); 1698 1699 /* 1700 * Fill Linux host instance structure 1701 * and return success. 1702 */ 1703 instance->max_channel = 0; 1704 instance->this_id = np->myaddr; 1705 instance->max_id = np->maxwide ? 16 : 8; 1706 instance->max_lun = SYM_CONF_MAX_LUN; 1707 instance->unique_id = pci_resource_start(pdev, 0); 1708 instance->cmd_per_lun = SYM_CONF_MAX_TAG; 1709 instance->can_queue = (SYM_CONF_MAX_START-2); 1710 instance->sg_tablesize = SYM_CONF_MAX_SG; 1711 instance->max_cmd_len = 16; 1712 BUG_ON(sym2_transport_template == NULL); 1713 instance->transportt = sym2_transport_template; 1714 1715 spin_unlock_irqrestore(instance->host_lock, flags); 1716 1717 return instance; 1718 1719 reset_failed: 1720 printf_err("%s: FATAL ERROR: CHECK SCSI BUS - CABLES, " 1721 "TERMINATION, DEVICE POWER etc.!\n", sym_name(np)); 1722 spin_unlock_irqrestore(instance->host_lock, flags); 1723 attach_failed: 1724 if (!instance) 1725 return NULL; 1726 printf_info("%s: giving up ...\n", sym_name(np)); 1727 if (np) 1728 sym_free_resources(np, pdev); 1729 scsi_host_put(instance); 1730 1731 return NULL; 1732 } 1733 1734 1735 /* 1736 * Detect and try to read SYMBIOS and TEKRAM NVRAM. 1737 */ 1738 #if SYM_CONF_NVRAM_SUPPORT 1739 static void __devinit sym_get_nvram(struct sym_device *devp, struct sym_nvram *nvp) 1740 { 1741 devp->nvram = nvp; 1742 devp->device_id = devp->chip.device_id; 1743 nvp->type = 0; 1744 1745 sym_read_nvram(devp, nvp); 1746 } 1747 #else 1748 static inline void sym_get_nvram(struct sym_device *devp, struct sym_nvram *nvp) 1749 { 1750 } 1751 #endif /* SYM_CONF_NVRAM_SUPPORT */ 1752 1753 static int __devinit sym_check_supported(struct sym_device *device) 1754 { 1755 struct sym_chip *chip; 1756 struct pci_dev *pdev = device->pdev; 1757 u_char revision; 1758 unsigned long io_port = pci_resource_start(pdev, 0); 1759 int i; 1760 1761 /* 1762 * If user excluded this chip, do not initialize it. 1763 * I hate this code so much. Must kill it. 1764 */ 1765 if (io_port) { 1766 for (i = 0 ; i < 8 ; i++) { 1767 if (sym_driver_setup.excludes[i] == io_port) 1768 return -ENODEV; 1769 } 1770 } 1771 1772 /* 1773 * Check if the chip is supported. Then copy the chip description 1774 * to our device structure so we can make it match the actual device 1775 * and options. 1776 */ 1777 pci_read_config_byte(pdev, PCI_CLASS_REVISION, &revision); 1778 chip = sym_lookup_chip_table(pdev->device, revision); 1779 if (!chip) { 1780 dev_info(&pdev->dev, "device not supported\n"); 1781 return -ENODEV; 1782 } 1783 memcpy(&device->chip, chip, sizeof(device->chip)); 1784 device->chip.revision_id = revision; 1785 1786 return 0; 1787 } 1788 1789 /* 1790 * Ignore Symbios chips controlled by various RAID controllers. 1791 * These controllers set value 0x52414944 at RAM end - 16. 1792 */ 1793 static int __devinit sym_check_raid(struct sym_device *device) 1794 { 1795 unsigned int ram_size, ram_val; 1796 1797 if (!device->s.ramaddr) 1798 return 0; 1799 1800 if (device->chip.features & FE_RAM8K) 1801 ram_size = 8192; 1802 else 1803 ram_size = 4096; 1804 1805 ram_val = readl(device->s.ramaddr + ram_size - 16); 1806 if (ram_val != 0x52414944) 1807 return 0; 1808 1809 dev_info(&device->pdev->dev, 1810 "not initializing, driven by RAID controller.\n"); 1811 return -ENODEV; 1812 } 1813 1814 static int __devinit sym_set_workarounds(struct sym_device *device) 1815 { 1816 struct sym_chip *chip = &device->chip; 1817 struct pci_dev *pdev = device->pdev; 1818 u_short status_reg; 1819 1820 /* 1821 * (ITEM 12 of a DEL about the 896 I haven't yet). 1822 * We must ensure the chip will use WRITE AND INVALIDATE. 1823 * The revision number limit is for now arbitrary. 1824 */ 1825 if (pdev->device == PCI_DEVICE_ID_NCR_53C896 && chip->revision_id < 0x4) { 1826 chip->features |= (FE_WRIE | FE_CLSE); 1827 } 1828 1829 /* If the chip can do Memory Write Invalidate, enable it */ 1830 if (chip->features & FE_WRIE) { 1831 if (pci_set_mwi(pdev)) 1832 return -ENODEV; 1833 } 1834 1835 /* 1836 * Work around for errant bit in 895A. The 66Mhz 1837 * capable bit is set erroneously. Clear this bit. 1838 * (Item 1 DEL 533) 1839 * 1840 * Make sure Config space and Features agree. 1841 * 1842 * Recall: writes are not normal to status register - 1843 * write a 1 to clear and a 0 to leave unchanged. 1844 * Can only reset bits. 1845 */ 1846 pci_read_config_word(pdev, PCI_STATUS, &status_reg); 1847 if (chip->features & FE_66MHZ) { 1848 if (!(status_reg & PCI_STATUS_66MHZ)) 1849 chip->features &= ~FE_66MHZ; 1850 } else { 1851 if (status_reg & PCI_STATUS_66MHZ) { 1852 status_reg = PCI_STATUS_66MHZ; 1853 pci_write_config_word(pdev, PCI_STATUS, status_reg); 1854 pci_read_config_word(pdev, PCI_STATUS, &status_reg); 1855 } 1856 } 1857 1858 return 0; 1859 } 1860 1861 /* 1862 * Read and check the PCI configuration for any detected NCR 1863 * boards and save data for attaching after all boards have 1864 * been detected. 1865 */ 1866 static void __devinit 1867 sym_init_device(struct pci_dev *pdev, struct sym_device *device) 1868 { 1869 int i; 1870 1871 device->host_id = SYM_SETUP_HOST_ID; 1872 device->pdev = pdev; 1873 1874 i = pci_get_base_address(pdev, 1, &device->mmio_base); 1875 pci_get_base_address(pdev, i, &device->ram_base); 1876 1877 #ifndef CONFIG_SCSI_SYM53C8XX_IOMAPPED 1878 if (device->mmio_base) 1879 device->s.ioaddr = pci_iomap(pdev, 1, 1880 pci_resource_len(pdev, 1)); 1881 #endif 1882 if (!device->s.ioaddr) 1883 device->s.ioaddr = pci_iomap(pdev, 0, 1884 pci_resource_len(pdev, 0)); 1885 if (device->ram_base) 1886 device->s.ramaddr = pci_iomap(pdev, i, 1887 pci_resource_len(pdev, i)); 1888 } 1889 1890 /* 1891 * The NCR PQS and PDS cards are constructed as a DEC bridge 1892 * behind which sits a proprietary NCR memory controller and 1893 * either four or two 53c875s as separate devices. We can tell 1894 * if an 875 is part of a PQS/PDS or not since if it is, it will 1895 * be on the same bus as the memory controller. In its usual 1896 * mode of operation, the 875s are slaved to the memory 1897 * controller for all transfers. To operate with the Linux 1898 * driver, the memory controller is disabled and the 875s 1899 * freed to function independently. The only wrinkle is that 1900 * the preset SCSI ID (which may be zero) must be read in from 1901 * a special configuration space register of the 875. 1902 */ 1903 static void sym_config_pqs(struct pci_dev *pdev, struct sym_device *sym_dev) 1904 { 1905 int slot; 1906 u8 tmp; 1907 1908 for (slot = 0; slot < 256; slot++) { 1909 struct pci_dev *memc = pci_get_slot(pdev->bus, slot); 1910 1911 if (!memc || memc->vendor != 0x101a || memc->device == 0x0009) { 1912 pci_dev_put(memc); 1913 continue; 1914 } 1915 1916 /* bit 1: allow individual 875 configuration */ 1917 pci_read_config_byte(memc, 0x44, &tmp); 1918 if ((tmp & 0x2) == 0) { 1919 tmp |= 0x2; 1920 pci_write_config_byte(memc, 0x44, tmp); 1921 } 1922 1923 /* bit 2: drive individual 875 interrupts to the bus */ 1924 pci_read_config_byte(memc, 0x45, &tmp); 1925 if ((tmp & 0x4) == 0) { 1926 tmp |= 0x4; 1927 pci_write_config_byte(memc, 0x45, tmp); 1928 } 1929 1930 pci_dev_put(memc); 1931 break; 1932 } 1933 1934 pci_read_config_byte(pdev, 0x84, &tmp); 1935 sym_dev->host_id = tmp; 1936 } 1937 1938 /* 1939 * Called before unloading the module. 1940 * Detach the host. 1941 * We have to free resources and halt the NCR chip. 1942 */ 1943 static int sym_detach(struct sym_hcb *np, struct pci_dev *pdev) 1944 { 1945 printk("%s: detaching ...\n", sym_name(np)); 1946 1947 del_timer_sync(&np->s.timer); 1948 1949 /* 1950 * Reset NCR chip. 1951 * We should use sym_soft_reset(), but we don't want to do 1952 * so, since we may not be safe if interrupts occur. 1953 */ 1954 printk("%s: resetting chip\n", sym_name(np)); 1955 OUTB(np, nc_istat, SRST); 1956 INB(np, nc_mbox1); 1957 udelay(10); 1958 OUTB(np, nc_istat, 0); 1959 1960 sym_free_resources(np, pdev); 1961 1962 return 1; 1963 } 1964 1965 /* 1966 * Driver host template. 1967 */ 1968 static struct scsi_host_template sym2_template = { 1969 .module = THIS_MODULE, 1970 .name = "sym53c8xx", 1971 .info = sym53c8xx_info, 1972 .queuecommand = sym53c8xx_queue_command, 1973 .slave_alloc = sym53c8xx_slave_alloc, 1974 .slave_configure = sym53c8xx_slave_configure, 1975 .slave_destroy = sym53c8xx_slave_destroy, 1976 .eh_abort_handler = sym53c8xx_eh_abort_handler, 1977 .eh_device_reset_handler = sym53c8xx_eh_device_reset_handler, 1978 .eh_bus_reset_handler = sym53c8xx_eh_bus_reset_handler, 1979 .eh_host_reset_handler = sym53c8xx_eh_host_reset_handler, 1980 .this_id = 7, 1981 .use_clustering = DISABLE_CLUSTERING, 1982 #ifdef SYM_LINUX_PROC_INFO_SUPPORT 1983 .proc_info = sym53c8xx_proc_info, 1984 .proc_name = NAME53C8XX, 1985 #endif 1986 }; 1987 1988 static int attach_count; 1989 1990 static int __devinit sym2_probe(struct pci_dev *pdev, 1991 const struct pci_device_id *ent) 1992 { 1993 struct sym_device sym_dev; 1994 struct sym_nvram nvram; 1995 struct Scsi_Host *instance; 1996 1997 memset(&sym_dev, 0, sizeof(sym_dev)); 1998 memset(&nvram, 0, sizeof(nvram)); 1999 2000 if (pci_enable_device(pdev)) 2001 goto leave; 2002 2003 pci_set_master(pdev); 2004 2005 if (pci_request_regions(pdev, NAME53C8XX)) 2006 goto disable; 2007 2008 sym_init_device(pdev, &sym_dev); 2009 if (sym_check_supported(&sym_dev)) 2010 goto free; 2011 2012 if (sym_check_raid(&sym_dev)) 2013 goto leave; /* Don't disable the device */ 2014 2015 if (sym_set_workarounds(&sym_dev)) 2016 goto free; 2017 2018 sym_config_pqs(pdev, &sym_dev); 2019 2020 sym_get_nvram(&sym_dev, &nvram); 2021 2022 instance = sym_attach(&sym2_template, attach_count, &sym_dev); 2023 if (!instance) 2024 goto free; 2025 2026 if (scsi_add_host(instance, &pdev->dev)) 2027 goto detach; 2028 scsi_scan_host(instance); 2029 2030 attach_count++; 2031 2032 return 0; 2033 2034 detach: 2035 sym_detach(pci_get_drvdata(pdev), pdev); 2036 free: 2037 pci_release_regions(pdev); 2038 disable: 2039 pci_disable_device(pdev); 2040 leave: 2041 return -ENODEV; 2042 } 2043 2044 static void __devexit sym2_remove(struct pci_dev *pdev) 2045 { 2046 struct sym_hcb *np = pci_get_drvdata(pdev); 2047 struct Scsi_Host *host = np->s.host; 2048 2049 scsi_remove_host(host); 2050 scsi_host_put(host); 2051 2052 sym_detach(np, pdev); 2053 2054 pci_release_regions(pdev); 2055 pci_disable_device(pdev); 2056 2057 attach_count--; 2058 } 2059 2060 static void sym2_get_signalling(struct Scsi_Host *shost) 2061 { 2062 struct sym_hcb *np = sym_get_hcb(shost); 2063 enum spi_signal_type type; 2064 2065 switch (np->scsi_mode) { 2066 case SMODE_SE: 2067 type = SPI_SIGNAL_SE; 2068 break; 2069 case SMODE_LVD: 2070 type = SPI_SIGNAL_LVD; 2071 break; 2072 case SMODE_HVD: 2073 type = SPI_SIGNAL_HVD; 2074 break; 2075 default: 2076 type = SPI_SIGNAL_UNKNOWN; 2077 break; 2078 } 2079 spi_signalling(shost) = type; 2080 } 2081 2082 static void sym2_set_offset(struct scsi_target *starget, int offset) 2083 { 2084 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); 2085 struct sym_hcb *np = sym_get_hcb(shost); 2086 struct sym_tcb *tp = &np->target[starget->id]; 2087 2088 tp->tgoal.offset = offset; 2089 tp->tgoal.check_nego = 1; 2090 } 2091 2092 static void sym2_set_period(struct scsi_target *starget, int period) 2093 { 2094 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); 2095 struct sym_hcb *np = sym_get_hcb(shost); 2096 struct sym_tcb *tp = &np->target[starget->id]; 2097 2098 /* have to have DT for these transfers, but DT will also 2099 * set width, so check that this is allowed */ 2100 if (period <= np->minsync && spi_width(starget)) 2101 tp->tgoal.dt = 1; 2102 2103 tp->tgoal.period = period; 2104 tp->tgoal.check_nego = 1; 2105 } 2106 2107 static void sym2_set_width(struct scsi_target *starget, int width) 2108 { 2109 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); 2110 struct sym_hcb *np = sym_get_hcb(shost); 2111 struct sym_tcb *tp = &np->target[starget->id]; 2112 2113 /* It is illegal to have DT set on narrow transfers. If DT is 2114 * clear, we must also clear IU and QAS. */ 2115 if (width == 0) 2116 tp->tgoal.iu = tp->tgoal.dt = tp->tgoal.qas = 0; 2117 2118 tp->tgoal.width = width; 2119 tp->tgoal.check_nego = 1; 2120 } 2121 2122 static void sym2_set_dt(struct scsi_target *starget, int dt) 2123 { 2124 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); 2125 struct sym_hcb *np = sym_get_hcb(shost); 2126 struct sym_tcb *tp = &np->target[starget->id]; 2127 2128 /* We must clear QAS and IU if DT is clear */ 2129 if (dt) 2130 tp->tgoal.dt = 1; 2131 else 2132 tp->tgoal.iu = tp->tgoal.dt = tp->tgoal.qas = 0; 2133 tp->tgoal.check_nego = 1; 2134 } 2135 2136 #if 0 2137 static void sym2_set_iu(struct scsi_target *starget, int iu) 2138 { 2139 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); 2140 struct sym_hcb *np = sym_get_hcb(shost); 2141 struct sym_tcb *tp = &np->target[starget->id]; 2142 2143 if (iu) 2144 tp->tgoal.iu = tp->tgoal.dt = 1; 2145 else 2146 tp->tgoal.iu = 0; 2147 tp->tgoal.check_nego = 1; 2148 } 2149 2150 static void sym2_set_qas(struct scsi_target *starget, int qas) 2151 { 2152 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); 2153 struct sym_hcb *np = sym_get_hcb(shost); 2154 struct sym_tcb *tp = &np->target[starget->id]; 2155 2156 if (qas) 2157 tp->tgoal.dt = tp->tgoal.qas = 1; 2158 else 2159 tp->tgoal.qas = 0; 2160 tp->tgoal.check_nego = 1; 2161 } 2162 #endif 2163 2164 static struct spi_function_template sym2_transport_functions = { 2165 .set_offset = sym2_set_offset, 2166 .show_offset = 1, 2167 .set_period = sym2_set_period, 2168 .show_period = 1, 2169 .set_width = sym2_set_width, 2170 .show_width = 1, 2171 .set_dt = sym2_set_dt, 2172 .show_dt = 1, 2173 #if 0 2174 .set_iu = sym2_set_iu, 2175 .show_iu = 1, 2176 .set_qas = sym2_set_qas, 2177 .show_qas = 1, 2178 #endif 2179 .get_signalling = sym2_get_signalling, 2180 }; 2181 2182 static struct pci_device_id sym2_id_table[] __devinitdata = { 2183 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C810, 2184 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 2185 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C820, 2186 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, /* new */ 2187 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C825, 2188 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 2189 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C815, 2190 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 2191 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_53C810AP, 2192 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, /* new */ 2193 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C860, 2194 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 2195 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_53C1510, 2196 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 2197 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C896, 2198 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 2199 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C895, 2200 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 2201 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C885, 2202 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 2203 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C875, 2204 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 2205 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C1510, 2206 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, /* new */ 2207 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_53C895A, 2208 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 2209 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_53C875A, 2210 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 2211 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_53C1010_33, 2212 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 2213 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_53C1010_66, 2214 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 2215 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C875J, 2216 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 2217 { 0, } 2218 }; 2219 2220 MODULE_DEVICE_TABLE(pci, sym2_id_table); 2221 2222 static struct pci_driver sym2_driver = { 2223 .name = NAME53C8XX, 2224 .id_table = sym2_id_table, 2225 .probe = sym2_probe, 2226 .remove = __devexit_p(sym2_remove), 2227 }; 2228 2229 static int __init sym2_init(void) 2230 { 2231 int error; 2232 2233 sym2_setup_params(); 2234 sym2_transport_template = spi_attach_transport(&sym2_transport_functions); 2235 if (!sym2_transport_template) 2236 return -ENODEV; 2237 2238 error = pci_register_driver(&sym2_driver); 2239 if (error) 2240 spi_release_transport(sym2_transport_template); 2241 return error; 2242 } 2243 2244 static void __exit sym2_exit(void) 2245 { 2246 pci_unregister_driver(&sym2_driver); 2247 spi_release_transport(sym2_transport_template); 2248 } 2249 2250 module_init(sym2_init); 2251 module_exit(sym2_exit); 2252