1 /****************************************************************************** 2 * QLOGIC LINUX SOFTWARE 3 * 4 * QLogic QLA1280 (Ultra2) and QLA12160 (Ultra3) SCSI driver 5 * Copyright (C) 2000 Qlogic Corporation (www.qlogic.com) 6 * Copyright (C) 2001-2004 Jes Sorensen, Wild Open Source Inc. 7 * Copyright (C) 2003-2004 Christoph Hellwig 8 * 9 * This program is free software; you can redistribute it and/or modify it 10 * under the terms of the GNU General Public License as published by the 11 * Free Software Foundation; either version 2, or (at your option) any 12 * later version. 13 * 14 * This program is distributed in the hope that it will be useful, but 15 * WITHOUT ANY WARRANTY; without even the implied warranty of 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 17 * General Public License for more details. 18 * 19 ******************************************************************************/ 20 #define QLA1280_VERSION "3.25" 21 /***************************************************************************** 22 Revision History: 23 Rev 3.25.1, February 10, 2005 Christoph Hellwig 24 - use pci_map_single to map non-S/G requests 25 - remove qla1280_proc_info 26 Rev 3.25, September 28, 2004, Christoph Hellwig 27 - add support for ISP1020/1040 28 - don't include "scsi.h" anymore for 2.6.x 29 Rev 3.24.4 June 7, 2004 Christoph Hellwig 30 - restructure firmware loading, cleanup initialization code 31 - prepare support for ISP1020/1040 chips 32 Rev 3.24.3 January 19, 2004, Jes Sorensen 33 - Handle PCI DMA mask settings correctly 34 - Correct order of error handling in probe_one, free_irq should not 35 be called if request_irq failed 36 Rev 3.24.2 January 19, 2004, James Bottomley & Andrew Vasquez 37 - Big endian fixes (James) 38 - Remove bogus IOCB content on zero data transfer commands (Andrew) 39 Rev 3.24.1 January 5, 2004, Jes Sorensen 40 - Initialize completion queue to avoid OOPS on probe 41 - Handle interrupts during mailbox testing 42 Rev 3.24 November 17, 2003, Christoph Hellwig 43 - use struct list_head for completion queue 44 - avoid old Scsi_FOO typedefs 45 - cleanup 2.4 compat glue a bit 46 - use <scsi/scsi_*.h> headers on 2.6 instead of "scsi.h" 47 - make initialization for memory mapped vs port I/O more similar 48 - remove broken pci config space manipulation 49 - kill more cruft 50 - this is an almost perfect 2.6 scsi driver now! ;) 51 Rev 3.23.39 December 17, 2003, Jes Sorensen 52 - Delete completion queue from srb if mailbox command failed to 53 to avoid qla1280_done completeting qla1280_error_action's 54 obsolete context 55 - Reduce arguments for qla1280_done 56 Rev 3.23.38 October 18, 2003, Christoph Hellwig 57 - Convert to new-style hotplugable driver for 2.6 58 - Fix missing scsi_unregister/scsi_host_put on HBA removal 59 - Kill some more cruft 60 Rev 3.23.37 October 1, 2003, Jes Sorensen 61 - Make MMIO depend on CONFIG_X86_VISWS instead of yet another 62 random CONFIG option 63 - Clean up locking in probe path 64 Rev 3.23.36 October 1, 2003, Christoph Hellwig 65 - queuecommand only ever receives new commands - clear flags 66 - Reintegrate lost fixes from Linux 2.5 67 Rev 3.23.35 August 14, 2003, Jes Sorensen 68 - Build against 2.6 69 Rev 3.23.34 July 23, 2003, Jes Sorensen 70 - Remove pointless TRUE/FALSE macros 71 - Clean up vchan handling 72 Rev 3.23.33 July 3, 2003, Jes Sorensen 73 - Don't define register access macros before define determining MMIO. 74 This just happend to work out on ia64 but not elsewhere. 75 - Don't try and read from the card while it is in reset as 76 it won't respond and causes an MCA 77 Rev 3.23.32 June 23, 2003, Jes Sorensen 78 - Basic support for boot time arguments 79 Rev 3.23.31 June 8, 2003, Jes Sorensen 80 - Reduce boot time messages 81 Rev 3.23.30 June 6, 2003, Jes Sorensen 82 - Do not enable sync/wide/ppr before it has been determined 83 that the target device actually supports it 84 - Enable DMA arbitration for multi channel controllers 85 Rev 3.23.29 June 3, 2003, Jes Sorensen 86 - Port to 2.5.69 87 Rev 3.23.28 June 3, 2003, Jes Sorensen 88 - Eliminate duplicate marker commands on bus resets 89 - Handle outstanding commands appropriately on bus/device resets 90 Rev 3.23.27 May 28, 2003, Jes Sorensen 91 - Remove bogus input queue code, let the Linux SCSI layer do the work 92 - Clean up NVRAM handling, only read it once from the card 93 - Add a number of missing default nvram parameters 94 Rev 3.23.26 Beta May 28, 2003, Jes Sorensen 95 - Use completion queue for mailbox commands instead of busy wait 96 Rev 3.23.25 Beta May 27, 2003, James Bottomley 97 - Migrate to use new error handling code 98 Rev 3.23.24 Beta May 21, 2003, James Bottomley 99 - Big endian support 100 - Cleanup data direction code 101 Rev 3.23.23 Beta May 12, 2003, Jes Sorensen 102 - Switch to using MMIO instead of PIO 103 Rev 3.23.22 Beta April 15, 2003, Jes Sorensen 104 - Fix PCI parity problem with 12160 during reset. 105 Rev 3.23.21 Beta April 14, 2003, Jes Sorensen 106 - Use pci_map_page()/pci_unmap_page() instead of map_single version. 107 Rev 3.23.20 Beta April 9, 2003, Jes Sorensen 108 - Remove < 2.4.x support 109 - Introduce HOST_LOCK to make the spin lock changes portable. 110 - Remove a bunch of idiotic and unnecessary typedef's 111 - Kill all leftovers of target-mode support which never worked anyway 112 Rev 3.23.19 Beta April 11, 2002, Linus Torvalds 113 - Do qla1280_pci_config() before calling request_irq() and 114 request_region() 115 - Use pci_dma_hi32() to handle upper word of DMA addresses instead 116 of large shifts 117 - Hand correct arguments to free_irq() in case of failure 118 Rev 3.23.18 Beta April 11, 2002, Jes Sorensen 119 - Run source through Lindent and clean up the output 120 Rev 3.23.17 Beta April 11, 2002, Jes Sorensen 121 - Update SCSI firmware to qla1280 v8.15.00 and qla12160 v10.04.32 122 Rev 3.23.16 Beta March 19, 2002, Jes Sorensen 123 - Rely on mailbox commands generating interrupts - do not 124 run qla1280_isr() from ql1280_mailbox_command() 125 - Remove device_reg_t 126 - Integrate ql12160_set_target_parameters() with 1280 version 127 - Make qla1280_setup() non static 128 - Do not call qla1280_check_for_dead_scsi_bus() on every I/O request 129 sent to the card - this command pauses the firmare!!! 130 Rev 3.23.15 Beta March 19, 2002, Jes Sorensen 131 - Clean up qla1280.h - remove obsolete QL_DEBUG_LEVEL_x definitions 132 - Remove a pile of pointless and confusing (srb_t **) and 133 (scsi_lu_t *) typecasts 134 - Explicit mark that we do not use the new error handling (for now) 135 - Remove scsi_qla_host_t and use 'struct' instead 136 - Remove in_abort, watchdog_enabled, dpc, dpc_sched, bios_enabled, 137 pci_64bit_slot flags which weren't used for anything anyway 138 - Grab host->host_lock while calling qla1280_isr() from abort() 139 - Use spin_lock()/spin_unlock() in qla1280_intr_handler() - we 140 do not need to save/restore flags in the interrupt handler 141 - Enable interrupts early (before any mailbox access) in preparation 142 for cleaning up the mailbox handling 143 Rev 3.23.14 Beta March 14, 2002, Jes Sorensen 144 - Further cleanups. Remove all trace of QL_DEBUG_LEVEL_x and replace 145 it with proper use of dprintk(). 146 - Make qla1280_print_scsi_cmd() and qla1280_dump_buffer() both take 147 a debug level argument to determine if data is to be printed 148 - Add KERN_* info to printk() 149 Rev 3.23.13 Beta March 14, 2002, Jes Sorensen 150 - Significant cosmetic cleanups 151 - Change debug code to use dprintk() and remove #if mess 152 Rev 3.23.12 Beta March 13, 2002, Jes Sorensen 153 - More cosmetic cleanups, fix places treating return as function 154 - use cpu_relax() in qla1280_debounce_register() 155 Rev 3.23.11 Beta March 13, 2002, Jes Sorensen 156 - Make it compile under 2.5.5 157 Rev 3.23.10 Beta October 1, 2001, Jes Sorensen 158 - Do no typecast short * to long * in QL1280BoardTbl, this 159 broke miserably on big endian boxes 160 Rev 3.23.9 Beta September 30, 2001, Jes Sorensen 161 - Remove pre 2.2 hack for checking for reentrance in interrupt handler 162 - Make data types used to receive from SCSI_{BUS,TCN,LUN}_32 163 unsigned int to match the types from struct scsi_cmnd 164 Rev 3.23.8 Beta September 29, 2001, Jes Sorensen 165 - Remove bogus timer_t typedef from qla1280.h 166 - Remove obsolete pre 2.2 PCI setup code, use proper #define's 167 for PCI_ values, call pci_set_master() 168 - Fix memleak of qla1280_buffer on module unload 169 - Only compile module parsing code #ifdef MODULE - should be 170 changed to use individual MODULE_PARM's later 171 - Remove dummy_buffer that was never modified nor printed 172 - ENTER()/LEAVE() are noops unless QL_DEBUG_LEVEL_3, hence remove 173 #ifdef QL_DEBUG_LEVEL_3/#endif around ENTER()/LEAVE() calls 174 - Remove \r from print statements, this is Linux, not DOS 175 - Remove obsolete QLA1280_{SCSILU,INTR,RING}_{LOCK,UNLOCK} 176 dummy macros 177 - Remove C++ compile hack in header file as Linux driver are not 178 supposed to be compiled as C++ 179 - Kill MS_64BITS macro as it makes the code more readable 180 - Remove unnecessary flags.in_interrupts bit 181 Rev 3.23.7 Beta August 20, 2001, Jes Sorensen 182 - Dont' check for set flags on q->q_flag one by one in qla1280_next() 183 - Check whether the interrupt was generated by the QLA1280 before 184 doing any processing 185 - qla1280_status_entry(): Only zero out part of sense_buffer that 186 is not being copied into 187 - Remove more superflouous typecasts 188 - qla1280_32bit_start_scsi() replace home-brew memcpy() with memcpy() 189 Rev 3.23.6 Beta August 20, 2001, Tony Luck, Intel 190 - Don't walk the entire list in qla1280_putq_t() just to directly 191 grab the pointer to the last element afterwards 192 Rev 3.23.5 Beta August 9, 2001, Jes Sorensen 193 - Don't use SA_INTERRUPT, it's use is deprecated for this kinda driver 194 Rev 3.23.4 Beta August 8, 2001, Jes Sorensen 195 - Set dev->max_sectors to 1024 196 Rev 3.23.3 Beta August 6, 2001, Jes Sorensen 197 - Provide compat macros for pci_enable_device(), pci_find_subsys() 198 and scsi_set_pci_device() 199 - Call scsi_set_pci_device() for all devices 200 - Reduce size of kernel version dependent device probe code 201 - Move duplicate probe/init code to separate function 202 - Handle error if qla1280_mem_alloc() fails 203 - Kill OFFSET() macro and use Linux's PCI definitions instead 204 - Kill private structure defining PCI config space (struct config_reg) 205 - Only allocate I/O port region if not in MMIO mode 206 - Remove duplicate (unused) sanity check of sife of srb_t 207 Rev 3.23.2 Beta August 6, 2001, Jes Sorensen 208 - Change home-brew memset() implementations to use memset() 209 - Remove all references to COMTRACE() - accessing a PC's COM2 serial 210 port directly is not legal under Linux. 211 Rev 3.23.1 Beta April 24, 2001, Jes Sorensen 212 - Remove pre 2.2 kernel support 213 - clean up 64 bit DMA setting to use 2.4 API (provide backwards compat) 214 - Fix MMIO access to use readl/writel instead of directly 215 dereferencing pointers 216 - Nuke MSDOS debugging code 217 - Change true/false data types to int from uint8_t 218 - Use int for counters instead of uint8_t etc. 219 - Clean up size & byte order conversion macro usage 220 Rev 3.23 Beta January 11, 2001 BN Qlogic 221 - Added check of device_id when handling non 222 QLA12160s during detect(). 223 Rev 3.22 Beta January 5, 2001 BN Qlogic 224 - Changed queue_task() to schedule_task() 225 for kernels 2.4.0 and higher. 226 Note: 2.4.0-testxx kernels released prior to 227 the actual 2.4.0 kernel release on January 2001 228 will get compile/link errors with schedule_task(). 229 Please update your kernel to released 2.4.0 level, 230 or comment lines in this file flagged with 3.22 231 to resolve compile/link error of schedule_task(). 232 - Added -DCONFIG_SMP in addition to -D__SMP__ 233 in Makefile for 2.4.0 builds of driver as module. 234 Rev 3.21 Beta January 4, 2001 BN Qlogic 235 - Changed criteria of 64/32 Bit mode of HBA 236 operation according to BITS_PER_LONG rather 237 than HBA's NVRAM setting of >4Gig memory bit; 238 so that the HBA auto-configures without the need 239 to setup each system individually. 240 Rev 3.20 Beta December 5, 2000 BN Qlogic 241 - Added priority handling to IA-64 onboard SCSI 242 ISP12160 chip for kernels greater than 2.3.18. 243 - Added irqrestore for qla1280_intr_handler. 244 - Enabled /proc/scsi/qla1280 interface. 245 - Clear /proc/scsi/qla1280 counters in detect(). 246 Rev 3.19 Beta October 13, 2000 BN Qlogic 247 - Declare driver_template for new kernel 248 (2.4.0 and greater) scsi initialization scheme. 249 - Update /proc/scsi entry for 2.3.18 kernels and 250 above as qla1280 251 Rev 3.18 Beta October 10, 2000 BN Qlogic 252 - Changed scan order of adapters to map 253 the QLA12160 followed by the QLA1280. 254 Rev 3.17 Beta September 18, 2000 BN Qlogic 255 - Removed warnings for 32 bit 2.4.x compiles 256 - Corrected declared size for request and response 257 DMA addresses that are kept in each ha 258 Rev. 3.16 Beta August 25, 2000 BN Qlogic 259 - Corrected 64 bit addressing issue on IA-64 260 where the upper 32 bits were not properly 261 passed to the RISC engine. 262 Rev. 3.15 Beta August 22, 2000 BN Qlogic 263 - Modified qla1280_setup_chip to properly load 264 ISP firmware for greater that 4 Gig memory on IA-64 265 Rev. 3.14 Beta August 16, 2000 BN Qlogic 266 - Added setting of dma_mask to full 64 bit 267 if flags.enable_64bit_addressing is set in NVRAM 268 Rev. 3.13 Beta August 16, 2000 BN Qlogic 269 - Use new PCI DMA mapping APIs for 2.4.x kernel 270 Rev. 3.12 July 18, 2000 Redhat & BN Qlogic 271 - Added check of pci_enable_device to detect() for 2.3.x 272 - Use pci_resource_start() instead of 273 pdev->resource[0].start in detect() for 2.3.x 274 - Updated driver version 275 Rev. 3.11 July 14, 2000 BN Qlogic 276 - Updated SCSI Firmware to following versions: 277 qla1x80: 8.13.08 278 qla1x160: 10.04.08 279 - Updated driver version to 3.11 280 Rev. 3.10 June 23, 2000 BN Qlogic 281 - Added filtering of AMI SubSys Vendor ID devices 282 Rev. 3.9 283 - DEBUG_QLA1280 undefined and new version BN Qlogic 284 Rev. 3.08b May 9, 2000 MD Dell 285 - Added logic to check against AMI subsystem vendor ID 286 Rev. 3.08 May 4, 2000 DG Qlogic 287 - Added logic to check for PCI subsystem ID. 288 Rev. 3.07 Apr 24, 2000 DG & BN Qlogic 289 - Updated SCSI Firmware to following versions: 290 qla12160: 10.01.19 291 qla1280: 8.09.00 292 Rev. 3.06 Apr 12, 2000 DG & BN Qlogic 293 - Internal revision; not released 294 Rev. 3.05 Mar 28, 2000 DG & BN Qlogic 295 - Edit correction for virt_to_bus and PROC. 296 Rev. 3.04 Mar 28, 2000 DG & BN Qlogic 297 - Merge changes from ia64 port. 298 Rev. 3.03 Mar 28, 2000 BN Qlogic 299 - Increase version to reflect new code drop with compile fix 300 of issue with inclusion of linux/spinlock for 2.3 kernels 301 Rev. 3.02 Mar 15, 2000 BN Qlogic 302 - Merge qla1280_proc_info from 2.10 code base 303 Rev. 3.01 Feb 10, 2000 BN Qlogic 304 - Corrected code to compile on a 2.2.x kernel. 305 Rev. 3.00 Jan 17, 2000 DG Qlogic 306 - Added 64-bit support. 307 Rev. 2.07 Nov 9, 1999 DG Qlogic 308 - Added new routine to set target parameters for ISP12160. 309 Rev. 2.06 Sept 10, 1999 DG Qlogic 310 - Added support for ISP12160 Ultra 3 chip. 311 Rev. 2.03 August 3, 1999 Fred Lewis, Intel DuPont 312 - Modified code to remove errors generated when compiling with 313 Cygnus IA64 Compiler. 314 - Changed conversion of pointers to unsigned longs instead of integers. 315 - Changed type of I/O port variables from uint32_t to unsigned long. 316 - Modified OFFSET macro to work with 64-bit as well as 32-bit. 317 - Changed sprintf and printk format specifiers for pointers to %p. 318 - Changed some int to long type casts where needed in sprintf & printk. 319 - Added l modifiers to sprintf and printk format specifiers for longs. 320 - Removed unused local variables. 321 Rev. 1.20 June 8, 1999 DG, Qlogic 322 Changes to support RedHat release 6.0 (kernel 2.2.5). 323 - Added SCSI exclusive access lock (io_request_lock) when accessing 324 the adapter. 325 - Added changes for the new LINUX interface template. Some new error 326 handling routines have been added to the template, but for now we 327 will use the old ones. 328 - Initial Beta Release. 329 *****************************************************************************/ 330 331 332 #include <linux/config.h> 333 #include <linux/module.h> 334 335 #include <linux/version.h> 336 #include <linux/types.h> 337 #include <linux/string.h> 338 #include <linux/errno.h> 339 #include <linux/kernel.h> 340 #include <linux/ioport.h> 341 #include <linux/delay.h> 342 #include <linux/timer.h> 343 #include <linux/sched.h> 344 #include <linux/pci.h> 345 #include <linux/proc_fs.h> 346 #include <linux/stat.h> 347 #include <linux/slab.h> 348 #include <linux/pci_ids.h> 349 #include <linux/interrupt.h> 350 #include <linux/init.h> 351 352 #include <asm/io.h> 353 #include <asm/irq.h> 354 #include <asm/byteorder.h> 355 #include <asm/processor.h> 356 #include <asm/types.h> 357 #include <asm/system.h> 358 359 #if LINUX_VERSION_CODE >= 0x020545 360 #include <scsi/scsi.h> 361 #include <scsi/scsi_cmnd.h> 362 #include <scsi/scsi_device.h> 363 #include <scsi/scsi_host.h> 364 #include <scsi/scsi_tcq.h> 365 #else 366 #include <linux/blk.h> 367 #include "scsi.h" 368 #include <scsi/scsi_host.h> 369 #include "sd.h" 370 #endif 371 372 #if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2) 373 #include <asm/sn/io.h> 374 #endif 375 376 #if LINUX_VERSION_CODE < 0x020407 377 #error "Kernels older than 2.4.7 are no longer supported" 378 #endif 379 380 381 /* 382 * Compile time Options: 383 * 0 - Disable and 1 - Enable 384 */ 385 #define DEBUG_QLA1280_INTR 0 386 #define DEBUG_PRINT_NVRAM 0 387 #define DEBUG_QLA1280 0 388 389 /* 390 * The SGI VISWS is broken and doesn't support MMIO ;-( 391 */ 392 #ifdef CONFIG_X86_VISWS 393 #define MEMORY_MAPPED_IO 0 394 #else 395 #define MEMORY_MAPPED_IO 1 396 #endif 397 398 #define UNIQUE_FW_NAME 399 #include "qla1280.h" 400 #include "ql12160_fw.h" /* ISP RISC codes */ 401 #include "ql1280_fw.h" 402 #include "ql1040_fw.h" 403 404 405 /* 406 * Missing PCI ID's 407 */ 408 #ifndef PCI_DEVICE_ID_QLOGIC_ISP1080 409 #define PCI_DEVICE_ID_QLOGIC_ISP1080 0x1080 410 #endif 411 #ifndef PCI_DEVICE_ID_QLOGIC_ISP1240 412 #define PCI_DEVICE_ID_QLOGIC_ISP1240 0x1240 413 #endif 414 #ifndef PCI_DEVICE_ID_QLOGIC_ISP1280 415 #define PCI_DEVICE_ID_QLOGIC_ISP1280 0x1280 416 #endif 417 #ifndef PCI_DEVICE_ID_QLOGIC_ISP10160 418 #define PCI_DEVICE_ID_QLOGIC_ISP10160 0x1016 419 #endif 420 #ifndef PCI_DEVICE_ID_QLOGIC_ISP12160 421 #define PCI_DEVICE_ID_QLOGIC_ISP12160 0x1216 422 #endif 423 424 #ifndef PCI_VENDOR_ID_AMI 425 #define PCI_VENDOR_ID_AMI 0x101e 426 #endif 427 428 #ifndef BITS_PER_LONG 429 #error "BITS_PER_LONG not defined!" 430 #endif 431 #if (BITS_PER_LONG == 64) || defined CONFIG_HIGHMEM 432 #define QLA_64BIT_PTR 1 433 #endif 434 435 #ifdef QLA_64BIT_PTR 436 #define pci_dma_hi32(a) ((a >> 16) >> 16) 437 #else 438 #define pci_dma_hi32(a) 0 439 #endif 440 #define pci_dma_lo32(a) (a & 0xffffffff) 441 442 #define NVRAM_DELAY() udelay(500) /* 2 microseconds */ 443 444 #if LINUX_VERSION_CODE < 0x020500 445 #define HOST_LOCK &io_request_lock 446 #define irqreturn_t void 447 #define IRQ_RETVAL(foo) 448 #define MSG_ORDERED_TAG 1 449 450 #define DMA_BIDIRECTIONAL SCSI_DATA_UNKNOWN 451 #define DMA_TO_DEVICE SCSI_DATA_WRITE 452 #define DMA_FROM_DEVICE SCSI_DATA_READ 453 #define DMA_NONE SCSI_DATA_NONE 454 455 #ifndef HAVE_SECTOR_T 456 typedef unsigned int sector_t; 457 #endif 458 459 static inline void 460 scsi_adjust_queue_depth(struct scsi_device *device, int tag, int depth) 461 { 462 if (tag) { 463 device->tagged_queue = tag; 464 device->current_tag = 0; 465 } 466 device->queue_depth = depth; 467 } 468 static inline struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *t, size_t s) 469 { 470 return scsi_register(t, s); 471 } 472 static inline void scsi_host_put(struct Scsi_Host *h) 473 { 474 scsi_unregister(h); 475 } 476 #else 477 #define HOST_LOCK ha->host->host_lock 478 #endif 479 #if LINUX_VERSION_CODE < 0x020600 480 #define DEV_SIMPLE_TAGS(device) device->tagged_queue 481 /* 482 * Hack around that qla1280_remove_one is called from 483 * qla1280_release in 2.4 484 */ 485 #undef __devexit 486 #define __devexit 487 #else 488 #define DEV_SIMPLE_TAGS(device) device->simple_tags 489 #endif 490 #if defined(__ia64__) && !defined(ia64_platform_is) 491 #define ia64_platform_is(foo) (!strcmp(x, platform_name)) 492 #endif 493 494 495 #define IS_ISP1040(ha) (ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP1020) 496 #define IS_ISP1x40(ha) (ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP1020 || \ 497 ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP1240) 498 #define IS_ISP1x160(ha) (ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP10160 || \ 499 ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP12160) 500 501 502 static int qla1280_probe_one(struct pci_dev *, const struct pci_device_id *); 503 static void qla1280_remove_one(struct pci_dev *); 504 505 /* 506 * QLogic Driver Support Function Prototypes. 507 */ 508 static void qla1280_done(struct scsi_qla_host *); 509 #if LINUX_VERSION_CODE < 0x020545 510 static void qla1280_get_target_options(struct scsi_cmnd *, struct scsi_qla_host *); 511 #endif 512 static int qla1280_get_token(char *); 513 static int qla1280_setup(char *s) __init; 514 515 /* 516 * QLogic ISP1280 Hardware Support Function Prototypes. 517 */ 518 static int qla1280_load_firmware(struct scsi_qla_host *); 519 static int qla1280_init_rings(struct scsi_qla_host *); 520 static int qla1280_nvram_config(struct scsi_qla_host *); 521 static int qla1280_mailbox_command(struct scsi_qla_host *, 522 uint8_t, uint16_t *); 523 static int qla1280_bus_reset(struct scsi_qla_host *, int); 524 static int qla1280_device_reset(struct scsi_qla_host *, int, int); 525 static int qla1280_abort_device(struct scsi_qla_host *, int, int, int); 526 static int qla1280_abort_command(struct scsi_qla_host *, struct srb *, int); 527 static int qla1280_abort_isp(struct scsi_qla_host *); 528 #ifdef QLA_64BIT_PTR 529 static int qla1280_64bit_start_scsi(struct scsi_qla_host *, struct srb *); 530 #else 531 static int qla1280_32bit_start_scsi(struct scsi_qla_host *, struct srb *); 532 #endif 533 static void qla1280_nv_write(struct scsi_qla_host *, uint16_t); 534 static void qla1280_poll(struct scsi_qla_host *); 535 static void qla1280_reset_adapter(struct scsi_qla_host *); 536 static void qla1280_marker(struct scsi_qla_host *, int, int, int, u8); 537 static void qla1280_isp_cmd(struct scsi_qla_host *); 538 static void qla1280_isr(struct scsi_qla_host *, struct list_head *); 539 static void qla1280_rst_aen(struct scsi_qla_host *); 540 static void qla1280_status_entry(struct scsi_qla_host *, struct response *, 541 struct list_head *); 542 static void qla1280_error_entry(struct scsi_qla_host *, struct response *, 543 struct list_head *); 544 static uint16_t qla1280_get_nvram_word(struct scsi_qla_host *, uint32_t); 545 static uint16_t qla1280_nvram_request(struct scsi_qla_host *, uint32_t); 546 static uint16_t qla1280_debounce_register(volatile uint16_t __iomem *); 547 static request_t *qla1280_req_pkt(struct scsi_qla_host *); 548 static int qla1280_check_for_dead_scsi_bus(struct scsi_qla_host *, 549 unsigned int); 550 static void qla1280_get_target_parameters(struct scsi_qla_host *, 551 struct scsi_device *); 552 static int qla1280_set_target_parameters(struct scsi_qla_host *, int, int); 553 554 555 static struct qla_driver_setup driver_setup; 556 557 /* 558 * convert scsi data direction to request_t control flags 559 */ 560 static inline uint16_t 561 qla1280_data_direction(struct scsi_cmnd *cmnd) 562 { 563 switch(cmnd->sc_data_direction) { 564 case DMA_FROM_DEVICE: 565 return BIT_5; 566 case DMA_TO_DEVICE: 567 return BIT_6; 568 case DMA_BIDIRECTIONAL: 569 return BIT_5 | BIT_6; 570 /* 571 * We could BUG() on default here if one of the four cases aren't 572 * met, but then again if we receive something like that from the 573 * SCSI layer we have more serious problems. This shuts up GCC. 574 */ 575 case DMA_NONE: 576 default: 577 return 0; 578 } 579 } 580 581 #if DEBUG_QLA1280 582 static void __qla1280_print_scsi_cmd(struct scsi_cmnd * cmd); 583 static void __qla1280_dump_buffer(char *, int); 584 #endif 585 586 587 /* 588 * insmod needs to find the variable and make it point to something 589 */ 590 #ifdef MODULE 591 static char *qla1280; 592 593 /* insmod qla1280 options=verbose" */ 594 module_param(qla1280, charp, 0); 595 #else 596 __setup("qla1280=", qla1280_setup); 597 #endif 598 599 600 /* 601 * We use the scsi_pointer structure that's included with each scsi_command 602 * to overlay our struct srb over it. qla1280_init() checks that a srb is not 603 * bigger than a scsi_pointer. 604 */ 605 606 #define CMD_SP(Cmnd) &Cmnd->SCp 607 #define CMD_CDBLEN(Cmnd) Cmnd->cmd_len 608 #define CMD_CDBP(Cmnd) Cmnd->cmnd 609 #define CMD_SNSP(Cmnd) Cmnd->sense_buffer 610 #define CMD_SNSLEN(Cmnd) sizeof(Cmnd->sense_buffer) 611 #define CMD_RESULT(Cmnd) Cmnd->result 612 #define CMD_HANDLE(Cmnd) Cmnd->host_scribble 613 #if LINUX_VERSION_CODE < 0x020545 614 #define CMD_REQUEST(Cmnd) Cmnd->request.cmd 615 #else 616 #define CMD_REQUEST(Cmnd) Cmnd->request->cmd 617 #endif 618 619 #define CMD_HOST(Cmnd) Cmnd->device->host 620 #define SCSI_BUS_32(Cmnd) Cmnd->device->channel 621 #define SCSI_TCN_32(Cmnd) Cmnd->device->id 622 #define SCSI_LUN_32(Cmnd) Cmnd->device->lun 623 624 625 /*****************************************/ 626 /* ISP Boards supported by this driver */ 627 /*****************************************/ 628 629 struct qla_boards { 630 unsigned char name[9]; /* Board ID String */ 631 int numPorts; /* Number of SCSI ports */ 632 unsigned short *fwcode; /* pointer to FW array */ 633 unsigned short *fwlen; /* number of words in array */ 634 unsigned short *fwstart; /* start address for F/W */ 635 unsigned char *fwver; /* Ptr to F/W version array */ 636 }; 637 638 /* NOTE: the last argument in each entry is used to index ql1280_board_tbl */ 639 static struct pci_device_id qla1280_pci_tbl[] = { 640 {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP12160, 641 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 642 {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP1020, 643 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1}, 644 {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP1080, 645 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2}, 646 {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP1240, 647 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3}, 648 {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP1280, 649 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4}, 650 {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP10160, 651 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5}, 652 {0,} 653 }; 654 MODULE_DEVICE_TABLE(pci, qla1280_pci_tbl); 655 656 static struct qla_boards ql1280_board_tbl[] = { 657 /* Name , Number of ports, FW details */ 658 {"QLA12160", 2, &fw12160i_code01[0], &fw12160i_length01, 659 &fw12160i_addr01, &fw12160i_version_str[0]}, 660 {"QLA1040", 1, &risc_code01[0], &risc_code_length01, 661 &risc_code_addr01, &firmware_version[0]}, 662 {"QLA1080", 1, &fw1280ei_code01[0], &fw1280ei_length01, 663 &fw1280ei_addr01, &fw1280ei_version_str[0]}, 664 {"QLA1240", 2, &fw1280ei_code01[0], &fw1280ei_length01, 665 &fw1280ei_addr01, &fw1280ei_version_str[0]}, 666 {"QLA1280", 2, &fw1280ei_code01[0], &fw1280ei_length01, 667 &fw1280ei_addr01, &fw1280ei_version_str[0]}, 668 {"QLA10160", 1, &fw12160i_code01[0], &fw12160i_length01, 669 &fw12160i_addr01, &fw12160i_version_str[0]}, 670 {" ", 0} 671 }; 672 673 static int qla1280_verbose = 1; 674 675 #if DEBUG_QLA1280 676 static int ql_debug_level = 1; 677 #define dprintk(level, format, a...) \ 678 do { if (ql_debug_level >= level) printk(KERN_ERR format, ##a); } while(0) 679 #define qla1280_dump_buffer(level, buf, size) \ 680 if (ql_debug_level >= level) __qla1280_dump_buffer(buf, size) 681 #define qla1280_print_scsi_cmd(level, cmd) \ 682 if (ql_debug_level >= level) __qla1280_print_scsi_cmd(cmd) 683 #else 684 #define ql_debug_level 0 685 #define dprintk(level, format, a...) do{}while(0) 686 #define qla1280_dump_buffer(a, b, c) do{}while(0) 687 #define qla1280_print_scsi_cmd(a, b) do{}while(0) 688 #endif 689 690 #define ENTER(x) dprintk(3, "qla1280 : Entering %s()\n", x); 691 #define LEAVE(x) dprintk(3, "qla1280 : Leaving %s()\n", x); 692 #define ENTER_INTR(x) dprintk(4, "qla1280 : Entering %s()\n", x); 693 #define LEAVE_INTR(x) dprintk(4, "qla1280 : Leaving %s()\n", x); 694 695 696 static int qla1280_read_nvram(struct scsi_qla_host *ha) 697 { 698 uint16_t *wptr; 699 uint8_t chksum; 700 int cnt, i; 701 struct nvram *nv; 702 703 ENTER("qla1280_read_nvram"); 704 705 if (driver_setup.no_nvram) 706 return 1; 707 708 printk(KERN_INFO "scsi(%ld): Reading NVRAM\n", ha->host_no); 709 710 wptr = (uint16_t *)&ha->nvram; 711 nv = &ha->nvram; 712 chksum = 0; 713 for (cnt = 0; cnt < 3; cnt++) { 714 *wptr = qla1280_get_nvram_word(ha, cnt); 715 chksum += *wptr & 0xff; 716 chksum += (*wptr >> 8) & 0xff; 717 wptr++; 718 } 719 720 if (nv->id0 != 'I' || nv->id1 != 'S' || 721 nv->id2 != 'P' || nv->id3 != ' ' || nv->version < 1) { 722 dprintk(2, "Invalid nvram ID or version!\n"); 723 chksum = 1; 724 } else { 725 for (; cnt < sizeof(struct nvram); cnt++) { 726 *wptr = qla1280_get_nvram_word(ha, cnt); 727 chksum += *wptr & 0xff; 728 chksum += (*wptr >> 8) & 0xff; 729 wptr++; 730 } 731 } 732 733 dprintk(3, "qla1280_read_nvram: NVRAM Magic ID= %c %c %c %02x" 734 " version %i\n", nv->id0, nv->id1, nv->id2, nv->id3, 735 nv->version); 736 737 738 if (chksum) { 739 if (!driver_setup.no_nvram) 740 printk(KERN_WARNING "scsi(%ld): Unable to identify or " 741 "validate NVRAM checksum, using default " 742 "settings\n", ha->host_no); 743 ha->nvram_valid = 0; 744 } else 745 ha->nvram_valid = 1; 746 747 /* The firmware interface is, um, interesting, in that the 748 * actual firmware image on the chip is little endian, thus, 749 * the process of taking that image to the CPU would end up 750 * little endian. However, the firmare interface requires it 751 * to be read a word (two bytes) at a time. 752 * 753 * The net result of this would be that the word (and 754 * doubleword) quantites in the firmware would be correct, but 755 * the bytes would be pairwise reversed. Since most of the 756 * firmware quantites are, in fact, bytes, we do an extra 757 * le16_to_cpu() in the firmware read routine. 758 * 759 * The upshot of all this is that the bytes in the firmware 760 * are in the correct places, but the 16 and 32 bit quantites 761 * are still in little endian format. We fix that up below by 762 * doing extra reverses on them */ 763 nv->isp_parameter = cpu_to_le16(nv->isp_parameter); 764 nv->firmware_feature.w = cpu_to_le16(nv->firmware_feature.w); 765 for(i = 0; i < MAX_BUSES; i++) { 766 nv->bus[i].selection_timeout = cpu_to_le16(nv->bus[i].selection_timeout); 767 nv->bus[i].max_queue_depth = cpu_to_le16(nv->bus[i].max_queue_depth); 768 } 769 dprintk(1, "qla1280_read_nvram: Completed Reading NVRAM\n"); 770 LEAVE("qla1280_read_nvram"); 771 772 return chksum; 773 } 774 775 /************************************************************************** 776 * qla1280_info 777 * Return a string describing the driver. 778 **************************************************************************/ 779 static const char * 780 qla1280_info(struct Scsi_Host *host) 781 { 782 static char qla1280_scsi_name_buffer[125]; 783 char *bp; 784 struct scsi_qla_host *ha; 785 struct qla_boards *bdp; 786 787 bp = &qla1280_scsi_name_buffer[0]; 788 ha = (struct scsi_qla_host *)host->hostdata; 789 bdp = &ql1280_board_tbl[ha->devnum]; 790 memset(bp, 0, sizeof(qla1280_scsi_name_buffer)); 791 792 sprintf (bp, 793 "QLogic %s PCI to SCSI Host Adapter\n" 794 " Firmware version: %2d.%02d.%02d, Driver version %s", 795 &bdp->name[0], bdp->fwver[0], bdp->fwver[1], bdp->fwver[2], 796 QLA1280_VERSION); 797 return bp; 798 } 799 800 /************************************************************************** 801 * qla1200_queuecommand 802 * Queue a command to the controller. 803 * 804 * Note: 805 * The mid-level driver tries to ensures that queuecommand never gets invoked 806 * concurrently with itself or the interrupt handler (although the 807 * interrupt handler may call this routine as part of request-completion 808 * handling). Unfortunely, it sometimes calls the scheduler in interrupt 809 * context which is a big NO! NO!. 810 **************************************************************************/ 811 static int 812 qla1280_queuecommand(struct scsi_cmnd *cmd, void (*fn)(struct scsi_cmnd *)) 813 { 814 struct Scsi_Host *host = cmd->device->host; 815 struct scsi_qla_host *ha = (struct scsi_qla_host *)host->hostdata; 816 struct srb *sp = (struct srb *)&cmd->SCp; 817 int status; 818 819 cmd->scsi_done = fn; 820 sp->cmd = cmd; 821 sp->flags = 0; 822 823 qla1280_print_scsi_cmd(5, cmd); 824 825 #ifdef QLA_64BIT_PTR 826 /* 827 * Using 64 bit commands if the PCI bridge doesn't support it is a 828 * bit wasteful, however this should really only happen if one's 829 * PCI controller is completely broken, like the BCM1250. For 830 * sane hardware this is not an issue. 831 */ 832 status = qla1280_64bit_start_scsi(ha, sp); 833 #else 834 status = qla1280_32bit_start_scsi(ha, sp); 835 #endif 836 return status; 837 } 838 839 enum action { 840 ABORT_COMMAND, 841 ABORT_DEVICE, 842 DEVICE_RESET, 843 BUS_RESET, 844 ADAPTER_RESET, 845 FAIL 846 }; 847 848 /* timer action for error action processor */ 849 static void qla1280_error_wait_timeout(unsigned long __data) 850 { 851 struct scsi_cmnd *cmd = (struct scsi_cmnd *)__data; 852 struct srb *sp = (struct srb *)CMD_SP(cmd); 853 854 complete(sp->wait); 855 } 856 857 static void qla1280_mailbox_timeout(unsigned long __data) 858 { 859 struct scsi_qla_host *ha = (struct scsi_qla_host *)__data; 860 struct device_reg __iomem *reg; 861 reg = ha->iobase; 862 863 ha->mailbox_out[0] = RD_REG_WORD(®->mailbox0); 864 printk(KERN_ERR "scsi(%ld): mailbox timed out, mailbox0 %04x, " 865 "ictrl %04x, istatus %04x\n", ha->host_no, ha->mailbox_out[0], 866 RD_REG_WORD(®->ictrl), RD_REG_WORD(®->istatus)); 867 complete(ha->mailbox_wait); 868 } 869 870 /************************************************************************** 871 * qla1200_error_action 872 * The function will attempt to perform a specified error action and 873 * wait for the results (or time out). 874 * 875 * Input: 876 * cmd = Linux SCSI command packet of the command that cause the 877 * bus reset. 878 * action = error action to take (see action_t) 879 * 880 * Returns: 881 * SUCCESS or FAILED 882 * 883 * Note: 884 * Resetting the bus always succeeds - is has to, otherwise the 885 * kernel will panic! Try a surgical technique - sending a BUS 886 * DEVICE RESET message - on the offending target before pulling 887 * the SCSI bus reset line. 888 **************************************************************************/ 889 static int 890 qla1280_error_action(struct scsi_cmnd *cmd, enum action action) 891 { 892 struct scsi_qla_host *ha; 893 int bus, target, lun; 894 struct srb *sp; 895 uint16_t data; 896 unsigned char *handle; 897 int result, i; 898 DECLARE_COMPLETION(wait); 899 struct timer_list timer; 900 901 ha = (struct scsi_qla_host *)(CMD_HOST(cmd)->hostdata); 902 903 dprintk(4, "error_action %i, istatus 0x%04x\n", action, 904 RD_REG_WORD(&ha->iobase->istatus)); 905 906 dprintk(4, "host_cmd 0x%04x, ictrl 0x%04x, jiffies %li\n", 907 RD_REG_WORD(&ha->iobase->host_cmd), 908 RD_REG_WORD(&ha->iobase->ictrl), jiffies); 909 910 ENTER("qla1280_error_action"); 911 if (qla1280_verbose) 912 printk(KERN_INFO "scsi(%li): Resetting Cmnd=0x%p, " 913 "Handle=0x%p, action=0x%x\n", 914 ha->host_no, cmd, CMD_HANDLE(cmd), action); 915 916 if (cmd == NULL) { 917 printk(KERN_WARNING "(scsi?:?:?:?) Reset called with NULL " 918 "si_Cmnd pointer, failing.\n"); 919 LEAVE("qla1280_error_action"); 920 return FAILED; 921 } 922 923 ha = (struct scsi_qla_host *)cmd->device->host->hostdata; 924 sp = (struct srb *)CMD_SP(cmd); 925 handle = CMD_HANDLE(cmd); 926 927 /* Check for pending interrupts. */ 928 data = qla1280_debounce_register(&ha->iobase->istatus); 929 /* 930 * The io_request_lock is held when the reset handler is called, hence 931 * the interrupt handler cannot be running in parallel as it also 932 * grabs the lock. /Jes 933 */ 934 if (data & RISC_INT) 935 qla1280_isr(ha, &ha->done_q); 936 937 /* 938 * Determine the suggested action that the mid-level driver wants 939 * us to perform. 940 */ 941 if (handle == (unsigned char *)INVALID_HANDLE || handle == NULL) { 942 if(action == ABORT_COMMAND) { 943 /* we never got this command */ 944 printk(KERN_INFO "qla1280: Aborting a NULL handle\n"); 945 return SUCCESS; /* no action - we don't have command */ 946 } 947 } else { 948 sp->wait = &wait; 949 } 950 951 bus = SCSI_BUS_32(cmd); 952 target = SCSI_TCN_32(cmd); 953 lun = SCSI_LUN_32(cmd); 954 955 /* Overloading result. Here it means the success or fail of the 956 * *issue* of the action. When we return from the routine, it must 957 * mean the actual success or fail of the action */ 958 result = FAILED; 959 switch (action) { 960 case FAIL: 961 break; 962 963 case ABORT_COMMAND: 964 if ((sp->flags & SRB_ABORT_PENDING)) { 965 printk(KERN_WARNING 966 "scsi(): Command has a pending abort " 967 "message - ABORT_PENDING.\n"); 968 /* This should technically be impossible since we 969 * now wait for abort completion */ 970 break; 971 } 972 973 for (i = 0; i < MAX_OUTSTANDING_COMMANDS; i++) { 974 if (sp == ha->outstanding_cmds[i]) { 975 dprintk(1, "qla1280: RISC aborting command\n"); 976 if (qla1280_abort_command(ha, sp, i) == 0) 977 result = SUCCESS; 978 else { 979 /* 980 * Since we don't know what might 981 * have happend to the command, it 982 * is unsafe to remove it from the 983 * device's queue at this point. 984 * Wait and let the escalation 985 * process take care of it. 986 */ 987 printk(KERN_WARNING 988 "scsi(%li:%i:%i:%i): Unable" 989 " to abort command!\n", 990 ha->host_no, bus, target, lun); 991 } 992 } 993 } 994 break; 995 996 case ABORT_DEVICE: 997 if (qla1280_verbose) 998 printk(KERN_INFO 999 "scsi(%ld:%d:%d:%d): Queueing abort device " 1000 "command.\n", ha->host_no, bus, target, lun); 1001 if (qla1280_abort_device(ha, bus, target, lun) == 0) 1002 result = SUCCESS; 1003 break; 1004 1005 case DEVICE_RESET: 1006 if (qla1280_verbose) 1007 printk(KERN_INFO 1008 "scsi(%ld:%d:%d:%d): Queueing device reset " 1009 "command.\n", ha->host_no, bus, target, lun); 1010 if (qla1280_device_reset(ha, bus, target) == 0) 1011 result = SUCCESS; 1012 break; 1013 1014 case BUS_RESET: 1015 if (qla1280_verbose) 1016 printk(KERN_INFO "qla1280(%ld:%d): Issuing BUS " 1017 "DEVICE RESET\n", ha->host_no, bus); 1018 if (qla1280_bus_reset(ha, bus == 0)) 1019 result = SUCCESS; 1020 1021 break; 1022 1023 case ADAPTER_RESET: 1024 default: 1025 if (qla1280_verbose) { 1026 printk(KERN_INFO 1027 "scsi(%ld): Issued ADAPTER RESET\n", 1028 ha->host_no); 1029 printk(KERN_INFO "scsi(%ld): I/O processing will " 1030 "continue automatically\n", ha->host_no); 1031 } 1032 ha->flags.reset_active = 1; 1033 /* 1034 * We restarted all of the commands automatically, so the 1035 * mid-level code can expect completions momentitarily. 1036 */ 1037 if (qla1280_abort_isp(ha) == 0) 1038 result = SUCCESS; 1039 1040 ha->flags.reset_active = 0; 1041 } 1042 1043 if (!list_empty(&ha->done_q)) 1044 qla1280_done(ha); 1045 1046 /* If we didn't manage to issue the action, or we have no 1047 * command to wait for, exit here */ 1048 if (result == FAILED || handle == NULL || 1049 handle == (unsigned char *)INVALID_HANDLE) { 1050 /* 1051 * Clear completion queue to avoid qla1280_done() trying 1052 * to complete the command at a later stage after we 1053 * have exited the current context 1054 */ 1055 sp->wait = NULL; 1056 goto leave; 1057 } 1058 1059 /* set up a timer just in case we're really jammed */ 1060 init_timer(&timer); 1061 timer.expires = jiffies + 4*HZ; 1062 timer.data = (unsigned long)cmd; 1063 timer.function = qla1280_error_wait_timeout; 1064 add_timer(&timer); 1065 1066 /* wait for the action to complete (or the timer to expire) */ 1067 spin_unlock_irq(HOST_LOCK); 1068 wait_for_completion(&wait); 1069 del_timer_sync(&timer); 1070 spin_lock_irq(HOST_LOCK); 1071 sp->wait = NULL; 1072 1073 /* the only action we might get a fail for is abort */ 1074 if (action == ABORT_COMMAND) { 1075 if(sp->flags & SRB_ABORTED) 1076 result = SUCCESS; 1077 else 1078 result = FAILED; 1079 } 1080 1081 leave: 1082 dprintk(1, "RESET returning %d\n", result); 1083 1084 LEAVE("qla1280_error_action"); 1085 return result; 1086 } 1087 1088 /************************************************************************** 1089 * qla1280_abort 1090 * Abort the specified SCSI command(s). 1091 **************************************************************************/ 1092 static int 1093 qla1280_eh_abort(struct scsi_cmnd * cmd) 1094 { 1095 int rc; 1096 1097 spin_lock_irq(cmd->device->host->host_lock); 1098 rc = qla1280_error_action(cmd, ABORT_COMMAND); 1099 spin_unlock_irq(cmd->device->host->host_lock); 1100 1101 return rc; 1102 } 1103 1104 /************************************************************************** 1105 * qla1280_device_reset 1106 * Reset the specified SCSI device 1107 **************************************************************************/ 1108 static int 1109 qla1280_eh_device_reset(struct scsi_cmnd *cmd) 1110 { 1111 int rc; 1112 1113 spin_lock_irq(cmd->device->host->host_lock); 1114 rc = qla1280_error_action(cmd, DEVICE_RESET); 1115 spin_unlock_irq(cmd->device->host->host_lock); 1116 1117 return rc; 1118 } 1119 1120 /************************************************************************** 1121 * qla1280_bus_reset 1122 * Reset the specified bus. 1123 **************************************************************************/ 1124 static int 1125 qla1280_eh_bus_reset(struct scsi_cmnd *cmd) 1126 { 1127 int rc; 1128 1129 spin_lock_irq(cmd->device->host->host_lock); 1130 rc = qla1280_error_action(cmd, BUS_RESET); 1131 spin_unlock_irq(cmd->device->host->host_lock); 1132 1133 return rc; 1134 } 1135 1136 /************************************************************************** 1137 * qla1280_adapter_reset 1138 * Reset the specified adapter (both channels) 1139 **************************************************************************/ 1140 static int 1141 qla1280_eh_adapter_reset(struct scsi_cmnd *cmd) 1142 { 1143 int rc; 1144 1145 spin_lock_irq(cmd->device->host->host_lock); 1146 rc = qla1280_error_action(cmd, ADAPTER_RESET); 1147 spin_unlock_irq(cmd->device->host->host_lock); 1148 1149 return rc; 1150 } 1151 1152 static int 1153 qla1280_biosparam(struct scsi_device *sdev, struct block_device *bdev, 1154 sector_t capacity, int geom[]) 1155 { 1156 int heads, sectors, cylinders; 1157 1158 heads = 64; 1159 sectors = 32; 1160 cylinders = (unsigned long)capacity / (heads * sectors); 1161 if (cylinders > 1024) { 1162 heads = 255; 1163 sectors = 63; 1164 cylinders = (unsigned long)capacity / (heads * sectors); 1165 /* if (cylinders > 1023) 1166 cylinders = 1023; */ 1167 } 1168 1169 geom[0] = heads; 1170 geom[1] = sectors; 1171 geom[2] = cylinders; 1172 1173 return 0; 1174 } 1175 1176 #if LINUX_VERSION_CODE < 0x020600 1177 static int 1178 qla1280_detect(struct scsi_host_template *template) 1179 { 1180 struct pci_device_id *id = &qla1280_pci_tbl[0]; 1181 struct pci_dev *pdev = NULL; 1182 int num_hosts = 0; 1183 1184 if (sizeof(struct srb) > sizeof(Scsi_Pointer)) { 1185 printk(KERN_WARNING 1186 "qla1280: struct srb too big, aborting\n"); 1187 return 0; 1188 } 1189 1190 if ((DMA_BIDIRECTIONAL != PCI_DMA_BIDIRECTIONAL) || 1191 (DMA_TO_DEVICE != PCI_DMA_TODEVICE) || 1192 (DMA_FROM_DEVICE != PCI_DMA_FROMDEVICE) || 1193 (DMA_NONE != PCI_DMA_NONE)) { 1194 printk(KERN_WARNING 1195 "qla1280: dma direction bits don't match\n"); 1196 return 0; 1197 } 1198 1199 #ifdef MODULE 1200 /* 1201 * If we are called as a module, the qla1280 pointer may not be null 1202 * and it would point to our bootup string, just like on the lilo 1203 * command line. IF not NULL, then process this config string with 1204 * qla1280_setup 1205 * 1206 * Boot time Options 1207 * To add options at boot time add a line to your lilo.conf file like: 1208 * append="qla1280=verbose,max_tags:{{255,255,255,255},{255,255,255,255}}" 1209 * which will result in the first four devices on the first two 1210 * controllers being set to a tagged queue depth of 32. 1211 */ 1212 if (qla1280) 1213 qla1280_setup(qla1280); 1214 #endif 1215 1216 /* First Initialize QLA12160 on PCI Bus 1 Dev 2 */ 1217 while ((pdev = pci_find_device(id->vendor, id->device, pdev))) { 1218 if (pdev->bus->number == 1 && PCI_SLOT(pdev->devfn) == 2) { 1219 if (!qla1280_probe_one(pdev, id)) 1220 num_hosts++; 1221 } 1222 } 1223 1224 pdev = NULL; 1225 /* Try and find each different type of adapter we support */ 1226 for (id = &qla1280_pci_tbl[0]; id->device; id++) { 1227 while ((pdev = pci_find_device(id->vendor, id->device, pdev))) { 1228 /* 1229 * skip QLA12160 already initialized on 1230 * PCI Bus 1 Dev 2 since we already initialized 1231 * and presented it 1232 */ 1233 if (id->device == PCI_DEVICE_ID_QLOGIC_ISP12160 && 1234 pdev->bus->number == 1 && 1235 PCI_SLOT(pdev->devfn) == 2) 1236 continue; 1237 1238 if (!qla1280_probe_one(pdev, id)) 1239 num_hosts++; 1240 } 1241 } 1242 1243 return num_hosts; 1244 } 1245 1246 /* 1247 * This looks a bit ugly as we could just pass down host to 1248 * qla1280_remove_one, but I want to keep qla1280_release purely a wrapper 1249 * around pci_driver::remove as used from 2.6 onwards. 1250 */ 1251 static int 1252 qla1280_release(struct Scsi_Host *host) 1253 { 1254 struct scsi_qla_host *ha = (struct scsi_qla_host *)host->hostdata; 1255 1256 qla1280_remove_one(ha->pdev); 1257 return 0; 1258 } 1259 1260 static int 1261 qla1280_biosparam_old(Disk * disk, kdev_t dev, int geom[]) 1262 { 1263 return qla1280_biosparam(disk->device, NULL, disk->capacity, geom); 1264 } 1265 #endif 1266 1267 /* disable risc and host interrupts */ 1268 static inline void 1269 qla1280_disable_intrs(struct scsi_qla_host *ha) 1270 { 1271 WRT_REG_WORD(&ha->iobase->ictrl, 0); 1272 RD_REG_WORD(&ha->iobase->ictrl); /* PCI Posted Write flush */ 1273 } 1274 1275 /* enable risc and host interrupts */ 1276 static inline void 1277 qla1280_enable_intrs(struct scsi_qla_host *ha) 1278 { 1279 WRT_REG_WORD(&ha->iobase->ictrl, (ISP_EN_INT | ISP_EN_RISC)); 1280 RD_REG_WORD(&ha->iobase->ictrl); /* PCI Posted Write flush */ 1281 } 1282 1283 /************************************************************************** 1284 * qla1280_intr_handler 1285 * Handles the H/W interrupt 1286 **************************************************************************/ 1287 static irqreturn_t 1288 qla1280_intr_handler(int irq, void *dev_id, struct pt_regs *regs) 1289 { 1290 struct scsi_qla_host *ha; 1291 struct device_reg __iomem *reg; 1292 u16 data; 1293 int handled = 0; 1294 1295 ENTER_INTR ("qla1280_intr_handler"); 1296 ha = (struct scsi_qla_host *)dev_id; 1297 1298 spin_lock(HOST_LOCK); 1299 1300 ha->isr_count++; 1301 reg = ha->iobase; 1302 1303 qla1280_disable_intrs(ha); 1304 1305 data = qla1280_debounce_register(®->istatus); 1306 /* Check for pending interrupts. */ 1307 if (data & RISC_INT) { 1308 qla1280_isr(ha, &ha->done_q); 1309 handled = 1; 1310 } 1311 if (!list_empty(&ha->done_q)) 1312 qla1280_done(ha); 1313 1314 spin_unlock(HOST_LOCK); 1315 1316 qla1280_enable_intrs(ha); 1317 1318 LEAVE_INTR("qla1280_intr_handler"); 1319 return IRQ_RETVAL(handled); 1320 } 1321 1322 1323 static int 1324 qla1280_set_target_parameters(struct scsi_qla_host *ha, int bus, int target) 1325 { 1326 uint8_t mr; 1327 uint16_t mb[MAILBOX_REGISTER_COUNT]; 1328 struct nvram *nv; 1329 int status, lun; 1330 1331 nv = &ha->nvram; 1332 1333 mr = BIT_3 | BIT_2 | BIT_1 | BIT_0; 1334 1335 /* Set Target Parameters. */ 1336 mb[0] = MBC_SET_TARGET_PARAMETERS; 1337 mb[1] = (uint16_t)((bus ? target | BIT_7 : target) << 8); 1338 mb[2] = nv->bus[bus].target[target].parameter.renegotiate_on_error << 8; 1339 mb[2] |= nv->bus[bus].target[target].parameter.stop_queue_on_check << 9; 1340 mb[2] |= nv->bus[bus].target[target].parameter.auto_request_sense << 10; 1341 mb[2] |= nv->bus[bus].target[target].parameter.tag_queuing << 11; 1342 mb[2] |= nv->bus[bus].target[target].parameter.enable_sync << 12; 1343 mb[2] |= nv->bus[bus].target[target].parameter.enable_wide << 13; 1344 mb[2] |= nv->bus[bus].target[target].parameter.parity_checking << 14; 1345 mb[2] |= nv->bus[bus].target[target].parameter.disconnect_allowed << 15; 1346 1347 if (IS_ISP1x160(ha)) { 1348 mb[2] |= nv->bus[bus].target[target].ppr_1x160.flags.enable_ppr << 5; 1349 mb[3] = (nv->bus[bus].target[target].flags.flags1x160.sync_offset << 8); 1350 mb[6] = (nv->bus[bus].target[target].ppr_1x160.flags.ppr_options << 8) | 1351 nv->bus[bus].target[target].ppr_1x160.flags.ppr_bus_width; 1352 mr |= BIT_6; 1353 } else { 1354 mb[3] = (nv->bus[bus].target[target].flags.flags1x80.sync_offset << 8); 1355 } 1356 mb[3] |= nv->bus[bus].target[target].sync_period; 1357 1358 status = qla1280_mailbox_command(ha, mr, mb); 1359 1360 /* Set Device Queue Parameters. */ 1361 for (lun = 0; lun < MAX_LUNS; lun++) { 1362 mb[0] = MBC_SET_DEVICE_QUEUE; 1363 mb[1] = (uint16_t)((bus ? target | BIT_7 : target) << 8); 1364 mb[1] |= lun; 1365 mb[2] = nv->bus[bus].max_queue_depth; 1366 mb[3] = nv->bus[bus].target[target].execution_throttle; 1367 status |= qla1280_mailbox_command(ha, 0x0f, mb); 1368 } 1369 1370 if (status) 1371 printk(KERN_WARNING "scsi(%ld:%i:%i): " 1372 "qla1280_set_target_parameters() failed\n", 1373 ha->host_no, bus, target); 1374 return status; 1375 } 1376 1377 1378 /************************************************************************** 1379 * qla1280_slave_configure 1380 * 1381 * Description: 1382 * Determines the queue depth for a given device. There are two ways 1383 * a queue depth can be obtained for a tagged queueing device. One 1384 * way is the default queue depth which is determined by whether 1385 * If it is defined, then it is used 1386 * as the default queue depth. Otherwise, we use either 4 or 8 as the 1387 * default queue depth (dependent on the number of hardware SCBs). 1388 **************************************************************************/ 1389 static int 1390 qla1280_slave_configure(struct scsi_device *device) 1391 { 1392 struct scsi_qla_host *ha; 1393 int default_depth = 3; 1394 int bus = device->channel; 1395 int target = device->id; 1396 int status = 0; 1397 struct nvram *nv; 1398 unsigned long flags; 1399 1400 ha = (struct scsi_qla_host *)device->host->hostdata; 1401 nv = &ha->nvram; 1402 1403 if (qla1280_check_for_dead_scsi_bus(ha, bus)) 1404 return 1; 1405 1406 if (device->tagged_supported && 1407 (ha->bus_settings[bus].qtag_enables & (BIT_0 << target))) { 1408 scsi_adjust_queue_depth(device, MSG_ORDERED_TAG, 1409 ha->bus_settings[bus].hiwat); 1410 } else { 1411 scsi_adjust_queue_depth(device, 0, default_depth); 1412 } 1413 1414 #if LINUX_VERSION_CODE > 0x020500 1415 nv->bus[bus].target[target].parameter.enable_sync = device->sdtr; 1416 nv->bus[bus].target[target].parameter.enable_wide = device->wdtr; 1417 nv->bus[bus].target[target].ppr_1x160.flags.enable_ppr = device->ppr; 1418 #endif 1419 1420 if (driver_setup.no_sync || 1421 (driver_setup.sync_mask && 1422 (~driver_setup.sync_mask & (1 << target)))) 1423 nv->bus[bus].target[target].parameter.enable_sync = 0; 1424 if (driver_setup.no_wide || 1425 (driver_setup.wide_mask && 1426 (~driver_setup.wide_mask & (1 << target)))) 1427 nv->bus[bus].target[target].parameter.enable_wide = 0; 1428 if (IS_ISP1x160(ha)) { 1429 if (driver_setup.no_ppr || 1430 (driver_setup.ppr_mask && 1431 (~driver_setup.ppr_mask & (1 << target)))) 1432 nv->bus[bus].target[target].ppr_1x160.flags.enable_ppr = 0; 1433 } 1434 1435 spin_lock_irqsave(HOST_LOCK, flags); 1436 if (nv->bus[bus].target[target].parameter.enable_sync) 1437 status = qla1280_set_target_parameters(ha, bus, target); 1438 qla1280_get_target_parameters(ha, device); 1439 spin_unlock_irqrestore(HOST_LOCK, flags); 1440 return status; 1441 } 1442 1443 #if LINUX_VERSION_CODE < 0x020545 1444 /************************************************************************** 1445 * qla1280_select_queue_depth 1446 * 1447 * Sets the queue depth for each SCSI device hanging off the input 1448 * host adapter. We use a queue depth of 2 for devices that do not 1449 * support tagged queueing. 1450 **************************************************************************/ 1451 static void 1452 qla1280_select_queue_depth(struct Scsi_Host *host, struct scsi_device *sdev_q) 1453 { 1454 struct scsi_qla_host *ha = (struct scsi_qla_host *)host->hostdata; 1455 struct scsi_device *sdev; 1456 1457 ENTER("qla1280_select_queue_depth"); 1458 for (sdev = sdev_q; sdev; sdev = sdev->next) 1459 if (sdev->host == host) 1460 qla1280_slave_configure(sdev); 1461 1462 if (sdev_q) 1463 qla1280_check_for_dead_scsi_bus(ha, sdev_q->channel); 1464 LEAVE("qla1280_select_queue_depth"); 1465 } 1466 #endif 1467 1468 /* 1469 * qla1280_done 1470 * Process completed commands. 1471 * 1472 * Input: 1473 * ha = adapter block pointer. 1474 */ 1475 static void 1476 qla1280_done(struct scsi_qla_host *ha) 1477 { 1478 struct srb *sp; 1479 struct list_head *done_q; 1480 int bus, target, lun; 1481 struct scsi_cmnd *cmd; 1482 1483 ENTER("qla1280_done"); 1484 1485 done_q = &ha->done_q; 1486 1487 while (!list_empty(done_q)) { 1488 sp = list_entry(done_q->next, struct srb, list); 1489 1490 list_del(&sp->list); 1491 1492 cmd = sp->cmd; 1493 bus = SCSI_BUS_32(cmd); 1494 target = SCSI_TCN_32(cmd); 1495 lun = SCSI_LUN_32(cmd); 1496 1497 switch ((CMD_RESULT(cmd) >> 16)) { 1498 case DID_RESET: 1499 /* Issue marker command. */ 1500 qla1280_marker(ha, bus, target, 0, MK_SYNC_ID); 1501 break; 1502 case DID_ABORT: 1503 sp->flags &= ~SRB_ABORT_PENDING; 1504 sp->flags |= SRB_ABORTED; 1505 if (sp->flags & SRB_TIMEOUT) 1506 CMD_RESULT(sp->cmd) = DID_TIME_OUT << 16; 1507 break; 1508 default: 1509 break; 1510 } 1511 1512 /* Release memory used for this I/O */ 1513 if (cmd->use_sg) { 1514 pci_unmap_sg(ha->pdev, cmd->request_buffer, 1515 cmd->use_sg, cmd->sc_data_direction); 1516 } else if (cmd->request_bufflen) { 1517 pci_unmap_single(ha->pdev, sp->saved_dma_handle, 1518 cmd->request_bufflen, 1519 cmd->sc_data_direction); 1520 } 1521 1522 /* Call the mid-level driver interrupt handler */ 1523 CMD_HANDLE(sp->cmd) = (unsigned char *)INVALID_HANDLE; 1524 ha->actthreads--; 1525 1526 #if LINUX_VERSION_CODE < 0x020500 1527 if (cmd->cmnd[0] == INQUIRY) 1528 qla1280_get_target_options(cmd, ha); 1529 #endif 1530 (*(cmd)->scsi_done)(cmd); 1531 1532 if(sp->wait != NULL) 1533 complete(sp->wait); 1534 } 1535 LEAVE("qla1280_done"); 1536 } 1537 1538 /* 1539 * Translates a ISP error to a Linux SCSI error 1540 */ 1541 static int 1542 qla1280_return_status(struct response * sts, struct scsi_cmnd *cp) 1543 { 1544 int host_status = DID_ERROR; 1545 uint16_t comp_status = le16_to_cpu(sts->comp_status); 1546 uint16_t state_flags = le16_to_cpu(sts->state_flags); 1547 uint16_t residual_length = le32_to_cpu(sts->residual_length); 1548 uint16_t scsi_status = le16_to_cpu(sts->scsi_status); 1549 #if DEBUG_QLA1280_INTR 1550 static char *reason[] = { 1551 "DID_OK", 1552 "DID_NO_CONNECT", 1553 "DID_BUS_BUSY", 1554 "DID_TIME_OUT", 1555 "DID_BAD_TARGET", 1556 "DID_ABORT", 1557 "DID_PARITY", 1558 "DID_ERROR", 1559 "DID_RESET", 1560 "DID_BAD_INTR" 1561 }; 1562 #endif /* DEBUG_QLA1280_INTR */ 1563 1564 ENTER("qla1280_return_status"); 1565 1566 #if DEBUG_QLA1280_INTR 1567 /* 1568 dprintk(1, "qla1280_return_status: compl status = 0x%04x\n", 1569 comp_status); 1570 */ 1571 #endif 1572 1573 switch (comp_status) { 1574 case CS_COMPLETE: 1575 host_status = DID_OK; 1576 break; 1577 1578 case CS_INCOMPLETE: 1579 if (!(state_flags & SF_GOT_BUS)) 1580 host_status = DID_NO_CONNECT; 1581 else if (!(state_flags & SF_GOT_TARGET)) 1582 host_status = DID_BAD_TARGET; 1583 else if (!(state_flags & SF_SENT_CDB)) 1584 host_status = DID_ERROR; 1585 else if (!(state_flags & SF_TRANSFERRED_DATA)) 1586 host_status = DID_ERROR; 1587 else if (!(state_flags & SF_GOT_STATUS)) 1588 host_status = DID_ERROR; 1589 else if (!(state_flags & SF_GOT_SENSE)) 1590 host_status = DID_ERROR; 1591 break; 1592 1593 case CS_RESET: 1594 host_status = DID_RESET; 1595 break; 1596 1597 case CS_ABORTED: 1598 host_status = DID_ABORT; 1599 break; 1600 1601 case CS_TIMEOUT: 1602 host_status = DID_TIME_OUT; 1603 break; 1604 1605 case CS_DATA_OVERRUN: 1606 dprintk(2, "Data overrun 0x%x\n", residual_length); 1607 dprintk(2, "qla1280_return_status: response packet data\n"); 1608 qla1280_dump_buffer(2, (char *)sts, RESPONSE_ENTRY_SIZE); 1609 host_status = DID_ERROR; 1610 break; 1611 1612 case CS_DATA_UNDERRUN: 1613 if ((cp->request_bufflen - residual_length) < 1614 cp->underflow) { 1615 printk(KERN_WARNING 1616 "scsi: Underflow detected - retrying " 1617 "command.\n"); 1618 host_status = DID_ERROR; 1619 } else 1620 host_status = DID_OK; 1621 break; 1622 1623 default: 1624 host_status = DID_ERROR; 1625 break; 1626 } 1627 1628 #if DEBUG_QLA1280_INTR 1629 dprintk(1, "qla1280 ISP status: host status (%s) scsi status %x\n", 1630 reason[host_status], scsi_status); 1631 #endif 1632 1633 LEAVE("qla1280_return_status"); 1634 1635 return (scsi_status & 0xff) | (host_status << 16); 1636 } 1637 1638 /****************************************************************************/ 1639 /* QLogic ISP1280 Hardware Support Functions. */ 1640 /****************************************************************************/ 1641 1642 /* 1643 * qla1280_initialize_adapter 1644 * Initialize board. 1645 * 1646 * Input: 1647 * ha = adapter block pointer. 1648 * 1649 * Returns: 1650 * 0 = success 1651 */ 1652 static int __devinit 1653 qla1280_initialize_adapter(struct scsi_qla_host *ha) 1654 { 1655 struct device_reg __iomem *reg; 1656 int status; 1657 int bus; 1658 #if LINUX_VERSION_CODE > 0x020500 1659 unsigned long flags; 1660 #endif 1661 1662 ENTER("qla1280_initialize_adapter"); 1663 1664 /* Clear adapter flags. */ 1665 ha->flags.online = 0; 1666 ha->flags.disable_host_adapter = 0; 1667 ha->flags.reset_active = 0; 1668 ha->flags.abort_isp_active = 0; 1669 1670 #if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2) 1671 if (ia64_platform_is("sn2")) { 1672 printk(KERN_INFO "scsi(%li): Enabling SN2 PCI DMA " 1673 "dual channel lockup workaround\n", ha->host_no); 1674 ha->flags.use_pci_vchannel = 1; 1675 driver_setup.no_nvram = 1; 1676 } 1677 #endif 1678 1679 /* TODO: implement support for the 1040 nvram format */ 1680 if (IS_ISP1040(ha)) 1681 driver_setup.no_nvram = 1; 1682 1683 dprintk(1, "Configure PCI space for adapter...\n"); 1684 1685 reg = ha->iobase; 1686 1687 /* Insure mailbox registers are free. */ 1688 WRT_REG_WORD(®->semaphore, 0); 1689 WRT_REG_WORD(®->host_cmd, HC_CLR_RISC_INT); 1690 WRT_REG_WORD(®->host_cmd, HC_CLR_HOST_INT); 1691 RD_REG_WORD(®->host_cmd); 1692 1693 if (qla1280_read_nvram(ha)) { 1694 dprintk(2, "qla1280_initialize_adapter: failed to read " 1695 "NVRAM\n"); 1696 } 1697 1698 #if LINUX_VERSION_CODE >= 0x020500 1699 /* 1700 * It's necessary to grab the spin here as qla1280_mailbox_command 1701 * needs to be able to drop the lock unconditionally to wait 1702 * for completion. 1703 * In 2.4 ->detect is called with the io_request_lock held. 1704 */ 1705 spin_lock_irqsave(HOST_LOCK, flags); 1706 #endif 1707 1708 status = qla1280_load_firmware(ha); 1709 if (status) { 1710 printk(KERN_ERR "scsi(%li): initialize: pci probe failed!\n", 1711 ha->host_no); 1712 goto out; 1713 } 1714 1715 /* Setup adapter based on NVRAM parameters. */ 1716 dprintk(1, "scsi(%ld): Configure NVRAM parameters\n", ha->host_no); 1717 qla1280_nvram_config(ha); 1718 1719 if (ha->flags.disable_host_adapter) { 1720 status = 1; 1721 goto out; 1722 } 1723 1724 status = qla1280_init_rings(ha); 1725 if (status) 1726 goto out; 1727 1728 /* Issue SCSI reset, if we can't reset twice then bus is dead */ 1729 for (bus = 0; bus < ha->ports; bus++) { 1730 if (!ha->bus_settings[bus].disable_scsi_reset && 1731 qla1280_bus_reset(ha, bus) && 1732 qla1280_bus_reset(ha, bus)) 1733 ha->bus_settings[bus].scsi_bus_dead = 1; 1734 } 1735 1736 ha->flags.online = 1; 1737 out: 1738 #if LINUX_VERSION_CODE >= 0x020500 1739 spin_unlock_irqrestore(HOST_LOCK, flags); 1740 #endif 1741 if (status) 1742 dprintk(2, "qla1280_initialize_adapter: **** FAILED ****\n"); 1743 1744 LEAVE("qla1280_initialize_adapter"); 1745 return status; 1746 } 1747 1748 /* 1749 * Chip diagnostics 1750 * Test chip for proper operation. 1751 * 1752 * Input: 1753 * ha = adapter block pointer. 1754 * 1755 * Returns: 1756 * 0 = success. 1757 */ 1758 static int 1759 qla1280_chip_diag(struct scsi_qla_host *ha) 1760 { 1761 uint16_t mb[MAILBOX_REGISTER_COUNT]; 1762 struct device_reg __iomem *reg = ha->iobase; 1763 int status = 0; 1764 int cnt; 1765 uint16_t data; 1766 dprintk(3, "qla1280_chip_diag: testing device at 0x%p \n", ®->id_l); 1767 1768 dprintk(1, "scsi(%ld): Verifying chip\n", ha->host_no); 1769 1770 /* Soft reset chip and wait for it to finish. */ 1771 WRT_REG_WORD(®->ictrl, ISP_RESET); 1772 1773 /* 1774 * We can't do a traditional PCI write flush here by reading 1775 * back the register. The card will not respond once the reset 1776 * is in action and we end up with a machine check exception 1777 * instead. Nothing to do but wait and hope for the best. 1778 * A portable pci_write_flush(pdev) call would be very useful here. 1779 */ 1780 udelay(20); 1781 data = qla1280_debounce_register(®->ictrl); 1782 /* 1783 * Yet another QLogic gem ;-( 1784 */ 1785 for (cnt = 1000000; cnt && data & ISP_RESET; cnt--) { 1786 udelay(5); 1787 data = RD_REG_WORD(®->ictrl); 1788 } 1789 1790 if (!cnt) 1791 goto fail; 1792 1793 /* Reset register cleared by chip reset. */ 1794 dprintk(3, "qla1280_chip_diag: reset register cleared by chip reset\n"); 1795 1796 WRT_REG_WORD(®->cfg_1, 0); 1797 1798 /* Reset RISC and disable BIOS which 1799 allows RISC to execute out of RAM. */ 1800 WRT_REG_WORD(®->host_cmd, HC_RESET_RISC | 1801 HC_RELEASE_RISC | HC_DISABLE_BIOS); 1802 1803 RD_REG_WORD(®->id_l); /* Flush PCI write */ 1804 data = qla1280_debounce_register(®->mailbox0); 1805 1806 /* 1807 * I *LOVE* this code! 1808 */ 1809 for (cnt = 1000000; cnt && data == MBS_BUSY; cnt--) { 1810 udelay(5); 1811 data = RD_REG_WORD(®->mailbox0); 1812 } 1813 1814 if (!cnt) 1815 goto fail; 1816 1817 /* Check product ID of chip */ 1818 dprintk(3, "qla1280_chip_diag: Checking product ID of chip\n"); 1819 1820 if (RD_REG_WORD(®->mailbox1) != PROD_ID_1 || 1821 (RD_REG_WORD(®->mailbox2) != PROD_ID_2 && 1822 RD_REG_WORD(®->mailbox2) != PROD_ID_2a) || 1823 RD_REG_WORD(®->mailbox3) != PROD_ID_3 || 1824 RD_REG_WORD(®->mailbox4) != PROD_ID_4) { 1825 printk(KERN_INFO "qla1280: Wrong product ID = " 1826 "0x%x,0x%x,0x%x,0x%x\n", 1827 RD_REG_WORD(®->mailbox1), 1828 RD_REG_WORD(®->mailbox2), 1829 RD_REG_WORD(®->mailbox3), 1830 RD_REG_WORD(®->mailbox4)); 1831 goto fail; 1832 } 1833 1834 /* 1835 * Enable ints early!!! 1836 */ 1837 qla1280_enable_intrs(ha); 1838 1839 dprintk(1, "qla1280_chip_diag: Checking mailboxes of chip\n"); 1840 /* Wrap Incoming Mailboxes Test. */ 1841 mb[0] = MBC_MAILBOX_REGISTER_TEST; 1842 mb[1] = 0xAAAA; 1843 mb[2] = 0x5555; 1844 mb[3] = 0xAA55; 1845 mb[4] = 0x55AA; 1846 mb[5] = 0xA5A5; 1847 mb[6] = 0x5A5A; 1848 mb[7] = 0x2525; 1849 1850 status = qla1280_mailbox_command(ha, 0xff, mb); 1851 if (status) 1852 goto fail; 1853 1854 if (mb[1] != 0xAAAA || mb[2] != 0x5555 || mb[3] != 0xAA55 || 1855 mb[4] != 0x55AA || mb[5] != 0xA5A5 || mb[6] != 0x5A5A || 1856 mb[7] != 0x2525) { 1857 printk(KERN_INFO "qla1280: Failed mbox check\n"); 1858 goto fail; 1859 } 1860 1861 dprintk(3, "qla1280_chip_diag: exiting normally\n"); 1862 return 0; 1863 fail: 1864 dprintk(2, "qla1280_chip_diag: **** FAILED ****\n"); 1865 return status; 1866 } 1867 1868 static int 1869 qla1280_load_firmware_pio(struct scsi_qla_host *ha) 1870 { 1871 uint16_t risc_address, *risc_code_address, risc_code_size; 1872 uint16_t mb[MAILBOX_REGISTER_COUNT], i; 1873 int err; 1874 1875 /* Load RISC code. */ 1876 risc_address = *ql1280_board_tbl[ha->devnum].fwstart; 1877 risc_code_address = ql1280_board_tbl[ha->devnum].fwcode; 1878 risc_code_size = *ql1280_board_tbl[ha->devnum].fwlen; 1879 1880 for (i = 0; i < risc_code_size; i++) { 1881 mb[0] = MBC_WRITE_RAM_WORD; 1882 mb[1] = risc_address + i; 1883 mb[2] = risc_code_address[i]; 1884 1885 err = qla1280_mailbox_command(ha, BIT_0 | BIT_1 | BIT_2, mb); 1886 if (err) { 1887 printk(KERN_ERR "scsi(%li): Failed to load firmware\n", 1888 ha->host_no); 1889 return err; 1890 } 1891 } 1892 1893 return 0; 1894 } 1895 1896 #define DUMP_IT_BACK 0 /* for debug of RISC loading */ 1897 static int 1898 qla1280_load_firmware_dma(struct scsi_qla_host *ha) 1899 { 1900 uint16_t risc_address, *risc_code_address, risc_code_size; 1901 uint16_t mb[MAILBOX_REGISTER_COUNT], cnt; 1902 int err = 0, num, i; 1903 #if DUMP_IT_BACK 1904 uint8_t *sp, *tbuf; 1905 dma_addr_t p_tbuf; 1906 1907 tbuf = pci_alloc_consistent(ha->pdev, 8000, &p_tbuf); 1908 if (!tbuf) 1909 return -ENOMEM; 1910 #endif 1911 1912 /* Load RISC code. */ 1913 risc_address = *ql1280_board_tbl[ha->devnum].fwstart; 1914 risc_code_address = ql1280_board_tbl[ha->devnum].fwcode; 1915 risc_code_size = *ql1280_board_tbl[ha->devnum].fwlen; 1916 1917 dprintk(1, "%s: DMA RISC code (%i) words\n", 1918 __FUNCTION__, risc_code_size); 1919 1920 num = 0; 1921 while (risc_code_size > 0) { 1922 int warn __attribute__((unused)) = 0; 1923 1924 cnt = 2000 >> 1; 1925 1926 if (cnt > risc_code_size) 1927 cnt = risc_code_size; 1928 1929 dprintk(2, "qla1280_setup_chip: loading risc @ =(0x%p)," 1930 "%d,%d(0x%x)\n", 1931 risc_code_address, cnt, num, risc_address); 1932 for(i = 0; i < cnt; i++) 1933 ((__le16 *)ha->request_ring)[i] = 1934 cpu_to_le16(risc_code_address[i]); 1935 1936 mb[0] = MBC_LOAD_RAM; 1937 mb[1] = risc_address; 1938 mb[4] = cnt; 1939 mb[3] = ha->request_dma & 0xffff; 1940 mb[2] = (ha->request_dma >> 16) & 0xffff; 1941 mb[7] = pci_dma_hi32(ha->request_dma) & 0xffff; 1942 mb[6] = pci_dma_hi32(ha->request_dma) >> 16; 1943 dprintk(2, "%s: op=%d 0x%p = 0x%4x,0x%4x,0x%4x,0x%4x\n", 1944 __FUNCTION__, mb[0], 1945 (void *)(long)ha->request_dma, 1946 mb[6], mb[7], mb[2], mb[3]); 1947 err = qla1280_mailbox_command(ha, BIT_4 | BIT_3 | BIT_2 | 1948 BIT_1 | BIT_0, mb); 1949 if (err) { 1950 printk(KERN_ERR "scsi(%li): Failed to load partial " 1951 "segment of f\n", ha->host_no); 1952 goto out; 1953 } 1954 1955 #if DUMP_IT_BACK 1956 mb[0] = MBC_DUMP_RAM; 1957 mb[1] = risc_address; 1958 mb[4] = cnt; 1959 mb[3] = p_tbuf & 0xffff; 1960 mb[2] = (p_tbuf >> 16) & 0xffff; 1961 mb[7] = pci_dma_hi32(p_tbuf) & 0xffff; 1962 mb[6] = pci_dma_hi32(p_tbuf) >> 16; 1963 1964 err = qla1280_mailbox_command(ha, BIT_4 | BIT_3 | BIT_2 | 1965 BIT_1 | BIT_0, mb); 1966 if (err) { 1967 printk(KERN_ERR 1968 "Failed to dump partial segment of f/w\n"); 1969 goto out; 1970 } 1971 sp = (uint8_t *)ha->request_ring; 1972 for (i = 0; i < (cnt << 1); i++) { 1973 if (tbuf[i] != sp[i] && warn++ < 10) { 1974 printk(KERN_ERR "%s: FW compare error @ " 1975 "byte(0x%x) loop#=%x\n", 1976 __FUNCTION__, i, num); 1977 printk(KERN_ERR "%s: FWbyte=%x " 1978 "FWfromChip=%x\n", 1979 __FUNCTION__, sp[i], tbuf[i]); 1980 /*break; */ 1981 } 1982 } 1983 #endif 1984 risc_address += cnt; 1985 risc_code_size = risc_code_size - cnt; 1986 risc_code_address = risc_code_address + cnt; 1987 num++; 1988 } 1989 1990 out: 1991 #if DUMP_IT_BACK 1992 pci_free_consistent(ha->pdev, 8000, tbuf, p_tbuf); 1993 #endif 1994 return err; 1995 } 1996 1997 static int 1998 qla1280_start_firmware(struct scsi_qla_host *ha) 1999 { 2000 uint16_t mb[MAILBOX_REGISTER_COUNT]; 2001 int err; 2002 2003 dprintk(1, "%s: Verifying checksum of loaded RISC code.\n", 2004 __FUNCTION__); 2005 2006 /* Verify checksum of loaded RISC code. */ 2007 mb[0] = MBC_VERIFY_CHECKSUM; 2008 /* mb[1] = ql12_risc_code_addr01; */ 2009 mb[1] = *ql1280_board_tbl[ha->devnum].fwstart; 2010 err = qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb); 2011 if (err) { 2012 printk(KERN_ERR "scsi(%li): RISC checksum failed.\n", ha->host_no); 2013 return err; 2014 } 2015 2016 /* Start firmware execution. */ 2017 dprintk(1, "%s: start firmware running.\n", __FUNCTION__); 2018 mb[0] = MBC_EXECUTE_FIRMWARE; 2019 mb[1] = *ql1280_board_tbl[ha->devnum].fwstart; 2020 err = qla1280_mailbox_command(ha, BIT_1 | BIT_0, &mb[0]); 2021 if (err) { 2022 printk(KERN_ERR "scsi(%li): Failed to start firmware\n", 2023 ha->host_no); 2024 } 2025 2026 return err; 2027 } 2028 2029 static int 2030 qla1280_load_firmware(struct scsi_qla_host *ha) 2031 { 2032 int err; 2033 2034 err = qla1280_chip_diag(ha); 2035 if (err) 2036 goto out; 2037 if (IS_ISP1040(ha)) 2038 err = qla1280_load_firmware_pio(ha); 2039 else 2040 err = qla1280_load_firmware_dma(ha); 2041 if (err) 2042 goto out; 2043 err = qla1280_start_firmware(ha); 2044 out: 2045 return err; 2046 } 2047 2048 /* 2049 * Initialize rings 2050 * 2051 * Input: 2052 * ha = adapter block pointer. 2053 * ha->request_ring = request ring virtual address 2054 * ha->response_ring = response ring virtual address 2055 * ha->request_dma = request ring physical address 2056 * ha->response_dma = response ring physical address 2057 * 2058 * Returns: 2059 * 0 = success. 2060 */ 2061 static int 2062 qla1280_init_rings(struct scsi_qla_host *ha) 2063 { 2064 uint16_t mb[MAILBOX_REGISTER_COUNT]; 2065 int status = 0; 2066 2067 ENTER("qla1280_init_rings"); 2068 2069 /* Clear outstanding commands array. */ 2070 memset(ha->outstanding_cmds, 0, 2071 sizeof(struct srb *) * MAX_OUTSTANDING_COMMANDS); 2072 2073 /* Initialize request queue. */ 2074 ha->request_ring_ptr = ha->request_ring; 2075 ha->req_ring_index = 0; 2076 ha->req_q_cnt = REQUEST_ENTRY_CNT; 2077 /* mb[0] = MBC_INIT_REQUEST_QUEUE; */ 2078 mb[0] = MBC_INIT_REQUEST_QUEUE_A64; 2079 mb[1] = REQUEST_ENTRY_CNT; 2080 mb[3] = ha->request_dma & 0xffff; 2081 mb[2] = (ha->request_dma >> 16) & 0xffff; 2082 mb[4] = 0; 2083 mb[7] = pci_dma_hi32(ha->request_dma) & 0xffff; 2084 mb[6] = pci_dma_hi32(ha->request_dma) >> 16; 2085 if (!(status = qla1280_mailbox_command(ha, BIT_7 | BIT_6 | BIT_4 | 2086 BIT_3 | BIT_2 | BIT_1 | BIT_0, 2087 &mb[0]))) { 2088 /* Initialize response queue. */ 2089 ha->response_ring_ptr = ha->response_ring; 2090 ha->rsp_ring_index = 0; 2091 /* mb[0] = MBC_INIT_RESPONSE_QUEUE; */ 2092 mb[0] = MBC_INIT_RESPONSE_QUEUE_A64; 2093 mb[1] = RESPONSE_ENTRY_CNT; 2094 mb[3] = ha->response_dma & 0xffff; 2095 mb[2] = (ha->response_dma >> 16) & 0xffff; 2096 mb[5] = 0; 2097 mb[7] = pci_dma_hi32(ha->response_dma) & 0xffff; 2098 mb[6] = pci_dma_hi32(ha->response_dma) >> 16; 2099 status = qla1280_mailbox_command(ha, BIT_7 | BIT_6 | BIT_5 | 2100 BIT_3 | BIT_2 | BIT_1 | BIT_0, 2101 &mb[0]); 2102 } 2103 2104 if (status) 2105 dprintk(2, "qla1280_init_rings: **** FAILED ****\n"); 2106 2107 LEAVE("qla1280_init_rings"); 2108 return status; 2109 } 2110 2111 static void 2112 qla1280_print_settings(struct nvram *nv) 2113 { 2114 dprintk(1, "qla1280 : initiator scsi id bus[0]=%d\n", 2115 nv->bus[0].config_1.initiator_id); 2116 dprintk(1, "qla1280 : initiator scsi id bus[1]=%d\n", 2117 nv->bus[1].config_1.initiator_id); 2118 2119 dprintk(1, "qla1280 : bus reset delay[0]=%d\n", 2120 nv->bus[0].bus_reset_delay); 2121 dprintk(1, "qla1280 : bus reset delay[1]=%d\n", 2122 nv->bus[1].bus_reset_delay); 2123 2124 dprintk(1, "qla1280 : retry count[0]=%d\n", nv->bus[0].retry_count); 2125 dprintk(1, "qla1280 : retry delay[0]=%d\n", nv->bus[0].retry_delay); 2126 dprintk(1, "qla1280 : retry count[1]=%d\n", nv->bus[1].retry_count); 2127 dprintk(1, "qla1280 : retry delay[1]=%d\n", nv->bus[1].retry_delay); 2128 2129 dprintk(1, "qla1280 : async data setup time[0]=%d\n", 2130 nv->bus[0].config_2.async_data_setup_time); 2131 dprintk(1, "qla1280 : async data setup time[1]=%d\n", 2132 nv->bus[1].config_2.async_data_setup_time); 2133 2134 dprintk(1, "qla1280 : req/ack active negation[0]=%d\n", 2135 nv->bus[0].config_2.req_ack_active_negation); 2136 dprintk(1, "qla1280 : req/ack active negation[1]=%d\n", 2137 nv->bus[1].config_2.req_ack_active_negation); 2138 2139 dprintk(1, "qla1280 : data line active negation[0]=%d\n", 2140 nv->bus[0].config_2.data_line_active_negation); 2141 dprintk(1, "qla1280 : data line active negation[1]=%d\n", 2142 nv->bus[1].config_2.data_line_active_negation); 2143 2144 dprintk(1, "qla1280 : disable loading risc code=%d\n", 2145 nv->cntr_flags_1.disable_loading_risc_code); 2146 2147 dprintk(1, "qla1280 : enable 64bit addressing=%d\n", 2148 nv->cntr_flags_1.enable_64bit_addressing); 2149 2150 dprintk(1, "qla1280 : selection timeout limit[0]=%d\n", 2151 nv->bus[0].selection_timeout); 2152 dprintk(1, "qla1280 : selection timeout limit[1]=%d\n", 2153 nv->bus[1].selection_timeout); 2154 2155 dprintk(1, "qla1280 : max queue depth[0]=%d\n", 2156 nv->bus[0].max_queue_depth); 2157 dprintk(1, "qla1280 : max queue depth[1]=%d\n", 2158 nv->bus[1].max_queue_depth); 2159 } 2160 2161 static void 2162 qla1280_set_target_defaults(struct scsi_qla_host *ha, int bus, int target) 2163 { 2164 struct nvram *nv = &ha->nvram; 2165 2166 nv->bus[bus].target[target].parameter.renegotiate_on_error = 1; 2167 nv->bus[bus].target[target].parameter.auto_request_sense = 1; 2168 nv->bus[bus].target[target].parameter.tag_queuing = 1; 2169 nv->bus[bus].target[target].parameter.enable_sync = 1; 2170 #if 1 /* Some SCSI Processors do not seem to like this */ 2171 nv->bus[bus].target[target].parameter.enable_wide = 1; 2172 #endif 2173 nv->bus[bus].target[target].execution_throttle = 2174 nv->bus[bus].max_queue_depth - 1; 2175 nv->bus[bus].target[target].parameter.parity_checking = 1; 2176 nv->bus[bus].target[target].parameter.disconnect_allowed = 1; 2177 2178 if (IS_ISP1x160(ha)) { 2179 nv->bus[bus].target[target].flags.flags1x160.device_enable = 1; 2180 nv->bus[bus].target[target].flags.flags1x160.sync_offset = 0x0e; 2181 nv->bus[bus].target[target].sync_period = 9; 2182 nv->bus[bus].target[target].ppr_1x160.flags.enable_ppr = 1; 2183 nv->bus[bus].target[target].ppr_1x160.flags.ppr_options = 2; 2184 nv->bus[bus].target[target].ppr_1x160.flags.ppr_bus_width = 1; 2185 } else { 2186 nv->bus[bus].target[target].flags.flags1x80.device_enable = 1; 2187 nv->bus[bus].target[target].flags.flags1x80.sync_offset = 12; 2188 nv->bus[bus].target[target].sync_period = 10; 2189 } 2190 } 2191 2192 static void 2193 qla1280_set_defaults(struct scsi_qla_host *ha) 2194 { 2195 struct nvram *nv = &ha->nvram; 2196 int bus, target; 2197 2198 dprintk(1, "Using defaults for NVRAM: \n"); 2199 memset(nv, 0, sizeof(struct nvram)); 2200 2201 /* nv->cntr_flags_1.disable_loading_risc_code = 1; */ 2202 nv->firmware_feature.f.enable_fast_posting = 1; 2203 nv->firmware_feature.f.disable_synchronous_backoff = 1; 2204 nv->termination.scsi_bus_0_control = 3; 2205 nv->termination.scsi_bus_1_control = 3; 2206 nv->termination.auto_term_support = 1; 2207 2208 /* 2209 * Set default FIFO magic - What appropriate values would be here 2210 * is unknown. This is what I have found testing with 12160s. 2211 * 2212 * Now, I would love the magic decoder ring for this one, the 2213 * header file provided by QLogic seems to be bogus or incomplete 2214 * at best. 2215 */ 2216 nv->isp_config.burst_enable = 1; 2217 if (IS_ISP1040(ha)) 2218 nv->isp_config.fifo_threshold |= 3; 2219 else 2220 nv->isp_config.fifo_threshold |= 4; 2221 2222 if (IS_ISP1x160(ha)) 2223 nv->isp_parameter = 0x01; /* fast memory enable */ 2224 2225 for (bus = 0; bus < MAX_BUSES; bus++) { 2226 nv->bus[bus].config_1.initiator_id = 7; 2227 nv->bus[bus].config_2.req_ack_active_negation = 1; 2228 nv->bus[bus].config_2.data_line_active_negation = 1; 2229 nv->bus[bus].selection_timeout = 250; 2230 nv->bus[bus].max_queue_depth = 256; 2231 2232 if (IS_ISP1040(ha)) { 2233 nv->bus[bus].bus_reset_delay = 3; 2234 nv->bus[bus].config_2.async_data_setup_time = 6; 2235 nv->bus[bus].retry_delay = 1; 2236 } else { 2237 nv->bus[bus].bus_reset_delay = 5; 2238 nv->bus[bus].config_2.async_data_setup_time = 8; 2239 } 2240 2241 for (target = 0; target < MAX_TARGETS; target++) 2242 qla1280_set_target_defaults(ha, bus, target); 2243 } 2244 } 2245 2246 static int 2247 qla1280_config_target(struct scsi_qla_host *ha, int bus, int target) 2248 { 2249 struct nvram *nv = &ha->nvram; 2250 uint16_t mb[MAILBOX_REGISTER_COUNT]; 2251 int status, lun; 2252 uint16_t flag; 2253 2254 /* Set Target Parameters. */ 2255 mb[0] = MBC_SET_TARGET_PARAMETERS; 2256 mb[1] = (uint16_t)((bus ? target | BIT_7 : target) << 8); 2257 2258 /* 2259 * Do not enable sync and ppr for the initial INQUIRY run. We 2260 * enable this later if we determine the target actually 2261 * supports it. 2262 */ 2263 mb[2] = (TP_RENEGOTIATE | TP_AUTO_REQUEST_SENSE | TP_TAGGED_QUEUE 2264 | TP_WIDE | TP_PARITY | TP_DISCONNECT); 2265 2266 if (IS_ISP1x160(ha)) 2267 mb[3] = nv->bus[bus].target[target].flags.flags1x160.sync_offset << 8; 2268 else 2269 mb[3] = nv->bus[bus].target[target].flags.flags1x80.sync_offset << 8; 2270 mb[3] |= nv->bus[bus].target[target].sync_period; 2271 status = qla1280_mailbox_command(ha, 0x0f, mb); 2272 2273 /* Save Tag queuing enable flag. */ 2274 flag = (BIT_0 << target) & mb[0]; 2275 if (nv->bus[bus].target[target].parameter.tag_queuing) 2276 ha->bus_settings[bus].qtag_enables |= flag; 2277 2278 /* Save Device enable flag. */ 2279 if (IS_ISP1x160(ha)) { 2280 if (nv->bus[bus].target[target].flags.flags1x160.device_enable) 2281 ha->bus_settings[bus].device_enables |= flag; 2282 ha->bus_settings[bus].lun_disables |= 0; 2283 } else { 2284 if (nv->bus[bus].target[target].flags.flags1x80.device_enable) 2285 ha->bus_settings[bus].device_enables |= flag; 2286 /* Save LUN disable flag. */ 2287 if (nv->bus[bus].target[target].flags.flags1x80.lun_disable) 2288 ha->bus_settings[bus].lun_disables |= flag; 2289 } 2290 2291 /* Set Device Queue Parameters. */ 2292 for (lun = 0; lun < MAX_LUNS; lun++) { 2293 mb[0] = MBC_SET_DEVICE_QUEUE; 2294 mb[1] = (uint16_t)((bus ? target | BIT_7 : target) << 8); 2295 mb[1] |= lun; 2296 mb[2] = nv->bus[bus].max_queue_depth; 2297 mb[3] = nv->bus[bus].target[target].execution_throttle; 2298 status |= qla1280_mailbox_command(ha, 0x0f, mb); 2299 } 2300 2301 return status; 2302 } 2303 2304 static int 2305 qla1280_config_bus(struct scsi_qla_host *ha, int bus) 2306 { 2307 struct nvram *nv = &ha->nvram; 2308 uint16_t mb[MAILBOX_REGISTER_COUNT]; 2309 int target, status; 2310 2311 /* SCSI Reset Disable. */ 2312 ha->bus_settings[bus].disable_scsi_reset = 2313 nv->bus[bus].config_1.scsi_reset_disable; 2314 2315 /* Initiator ID. */ 2316 ha->bus_settings[bus].id = nv->bus[bus].config_1.initiator_id; 2317 mb[0] = MBC_SET_INITIATOR_ID; 2318 mb[1] = bus ? ha->bus_settings[bus].id | BIT_7 : 2319 ha->bus_settings[bus].id; 2320 status = qla1280_mailbox_command(ha, BIT_1 | BIT_0, &mb[0]); 2321 2322 /* Reset Delay. */ 2323 ha->bus_settings[bus].bus_reset_delay = 2324 nv->bus[bus].bus_reset_delay; 2325 2326 /* Command queue depth per device. */ 2327 ha->bus_settings[bus].hiwat = nv->bus[bus].max_queue_depth - 1; 2328 2329 /* Set target parameters. */ 2330 for (target = 0; target < MAX_TARGETS; target++) 2331 status |= qla1280_config_target(ha, bus, target); 2332 2333 return status; 2334 } 2335 2336 static int 2337 qla1280_nvram_config(struct scsi_qla_host *ha) 2338 { 2339 struct device_reg __iomem *reg = ha->iobase; 2340 struct nvram *nv = &ha->nvram; 2341 int bus, target, status = 0; 2342 uint16_t mb[MAILBOX_REGISTER_COUNT]; 2343 2344 ENTER("qla1280_nvram_config"); 2345 2346 if (ha->nvram_valid) { 2347 /* Always force AUTO sense for LINUX SCSI */ 2348 for (bus = 0; bus < MAX_BUSES; bus++) 2349 for (target = 0; target < MAX_TARGETS; target++) { 2350 nv->bus[bus].target[target].parameter. 2351 auto_request_sense = 1; 2352 } 2353 } else { 2354 qla1280_set_defaults(ha); 2355 } 2356 2357 qla1280_print_settings(nv); 2358 2359 /* Disable RISC load of firmware. */ 2360 ha->flags.disable_risc_code_load = 2361 nv->cntr_flags_1.disable_loading_risc_code; 2362 2363 if (IS_ISP1040(ha)) { 2364 uint16_t hwrev, cfg1, cdma_conf, ddma_conf; 2365 2366 hwrev = RD_REG_WORD(®->cfg_0) & ISP_CFG0_HWMSK; 2367 2368 cfg1 = RD_REG_WORD(®->cfg_1) & ~(BIT_4 | BIT_5 | BIT_6); 2369 cdma_conf = RD_REG_WORD(®->cdma_cfg); 2370 ddma_conf = RD_REG_WORD(®->ddma_cfg); 2371 2372 /* Busted fifo, says mjacob. */ 2373 if (hwrev != ISP_CFG0_1040A) 2374 cfg1 |= nv->isp_config.fifo_threshold << 4; 2375 2376 cfg1 |= nv->isp_config.burst_enable << 2; 2377 WRT_REG_WORD(®->cfg_1, cfg1); 2378 2379 WRT_REG_WORD(®->cdma_cfg, cdma_conf | CDMA_CONF_BENAB); 2380 WRT_REG_WORD(®->ddma_cfg, cdma_conf | DDMA_CONF_BENAB); 2381 } else { 2382 uint16_t cfg1, term; 2383 2384 /* Set ISP hardware DMA burst */ 2385 cfg1 = nv->isp_config.fifo_threshold << 4; 2386 cfg1 |= nv->isp_config.burst_enable << 2; 2387 /* Enable DMA arbitration on dual channel controllers */ 2388 if (ha->ports > 1) 2389 cfg1 |= BIT_13; 2390 WRT_REG_WORD(®->cfg_1, cfg1); 2391 2392 /* Set SCSI termination. */ 2393 WRT_REG_WORD(®->gpio_enable, 2394 BIT_7 | BIT_3 | BIT_2 | BIT_1 | BIT_0); 2395 term = nv->termination.scsi_bus_1_control; 2396 term |= nv->termination.scsi_bus_0_control << 2; 2397 term |= nv->termination.auto_term_support << 7; 2398 RD_REG_WORD(®->id_l); /* Flush PCI write */ 2399 WRT_REG_WORD(®->gpio_data, term); 2400 } 2401 RD_REG_WORD(®->id_l); /* Flush PCI write */ 2402 2403 /* ISP parameter word. */ 2404 mb[0] = MBC_SET_SYSTEM_PARAMETER; 2405 mb[1] = nv->isp_parameter; 2406 status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, &mb[0]); 2407 2408 if (IS_ISP1x40(ha)) { 2409 /* clock rate - for qla1240 and older, only */ 2410 mb[0] = MBC_SET_CLOCK_RATE; 2411 mb[1] = 40; 2412 status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb); 2413 } 2414 2415 /* Firmware feature word. */ 2416 mb[0] = MBC_SET_FIRMWARE_FEATURES; 2417 mb[1] = nv->firmware_feature.f.enable_fast_posting; 2418 mb[1] |= nv->firmware_feature.f.report_lvd_bus_transition << 1; 2419 mb[1] |= nv->firmware_feature.f.disable_synchronous_backoff << 5; 2420 #if defined(CONFIG_IA64_GENERIC) || defined (CONFIG_IA64_SGI_SN2) 2421 if (ia64_platform_is("sn2")) { 2422 printk(KERN_INFO "scsi(%li): Enabling SN2 PCI DMA " 2423 "workaround\n", ha->host_no); 2424 mb[1] |= nv->firmware_feature.f.unused_9 << 9; /* XXX */ 2425 } 2426 #endif 2427 status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb); 2428 2429 /* Retry count and delay. */ 2430 mb[0] = MBC_SET_RETRY_COUNT; 2431 mb[1] = nv->bus[0].retry_count; 2432 mb[2] = nv->bus[0].retry_delay; 2433 mb[6] = nv->bus[1].retry_count; 2434 mb[7] = nv->bus[1].retry_delay; 2435 status |= qla1280_mailbox_command(ha, BIT_7 | BIT_6 | BIT_2 | 2436 BIT_1 | BIT_0, &mb[0]); 2437 2438 /* ASYNC data setup time. */ 2439 mb[0] = MBC_SET_ASYNC_DATA_SETUP; 2440 mb[1] = nv->bus[0].config_2.async_data_setup_time; 2441 mb[2] = nv->bus[1].config_2.async_data_setup_time; 2442 status |= qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, &mb[0]); 2443 2444 /* Active negation states. */ 2445 mb[0] = MBC_SET_ACTIVE_NEGATION; 2446 mb[1] = 0; 2447 if (nv->bus[0].config_2.req_ack_active_negation) 2448 mb[1] |= BIT_5; 2449 if (nv->bus[0].config_2.data_line_active_negation) 2450 mb[1] |= BIT_4; 2451 mb[2] = 0; 2452 if (nv->bus[1].config_2.req_ack_active_negation) 2453 mb[2] |= BIT_5; 2454 if (nv->bus[1].config_2.data_line_active_negation) 2455 mb[2] |= BIT_4; 2456 status |= qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, mb); 2457 2458 mb[0] = MBC_SET_DATA_OVERRUN_RECOVERY; 2459 mb[1] = 2; /* Reset SCSI bus and return all outstanding IO */ 2460 status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb); 2461 2462 /* thingy */ 2463 mb[0] = MBC_SET_PCI_CONTROL; 2464 mb[1] = BIT_1; /* Data DMA Channel Burst Enable */ 2465 mb[2] = BIT_1; /* Command DMA Channel Burst Enable */ 2466 status |= qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, mb); 2467 2468 mb[0] = MBC_SET_TAG_AGE_LIMIT; 2469 mb[1] = 8; 2470 status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb); 2471 2472 /* Selection timeout. */ 2473 mb[0] = MBC_SET_SELECTION_TIMEOUT; 2474 mb[1] = nv->bus[0].selection_timeout; 2475 mb[2] = nv->bus[1].selection_timeout; 2476 status |= qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, mb); 2477 2478 for (bus = 0; bus < ha->ports; bus++) 2479 status |= qla1280_config_bus(ha, bus); 2480 2481 if (status) 2482 dprintk(2, "qla1280_nvram_config: **** FAILED ****\n"); 2483 2484 LEAVE("qla1280_nvram_config"); 2485 return status; 2486 } 2487 2488 /* 2489 * Get NVRAM data word 2490 * Calculates word position in NVRAM and calls request routine to 2491 * get the word from NVRAM. 2492 * 2493 * Input: 2494 * ha = adapter block pointer. 2495 * address = NVRAM word address. 2496 * 2497 * Returns: 2498 * data word. 2499 */ 2500 static uint16_t 2501 qla1280_get_nvram_word(struct scsi_qla_host *ha, uint32_t address) 2502 { 2503 uint32_t nv_cmd; 2504 uint16_t data; 2505 2506 nv_cmd = address << 16; 2507 nv_cmd |= NV_READ_OP; 2508 2509 data = le16_to_cpu(qla1280_nvram_request(ha, nv_cmd)); 2510 2511 dprintk(8, "qla1280_get_nvram_word: exiting normally NVRAM data = " 2512 "0x%x", data); 2513 2514 return data; 2515 } 2516 2517 /* 2518 * NVRAM request 2519 * Sends read command to NVRAM and gets data from NVRAM. 2520 * 2521 * Input: 2522 * ha = adapter block pointer. 2523 * nv_cmd = Bit 26 = start bit 2524 * Bit 25, 24 = opcode 2525 * Bit 23-16 = address 2526 * Bit 15-0 = write data 2527 * 2528 * Returns: 2529 * data word. 2530 */ 2531 static uint16_t 2532 qla1280_nvram_request(struct scsi_qla_host *ha, uint32_t nv_cmd) 2533 { 2534 struct device_reg __iomem *reg = ha->iobase; 2535 int cnt; 2536 uint16_t data = 0; 2537 uint16_t reg_data; 2538 2539 /* Send command to NVRAM. */ 2540 2541 nv_cmd <<= 5; 2542 for (cnt = 0; cnt < 11; cnt++) { 2543 if (nv_cmd & BIT_31) 2544 qla1280_nv_write(ha, NV_DATA_OUT); 2545 else 2546 qla1280_nv_write(ha, 0); 2547 nv_cmd <<= 1; 2548 } 2549 2550 /* Read data from NVRAM. */ 2551 2552 for (cnt = 0; cnt < 16; cnt++) { 2553 WRT_REG_WORD(®->nvram, (NV_SELECT | NV_CLOCK)); 2554 RD_REG_WORD(®->id_l); /* Flush PCI write */ 2555 NVRAM_DELAY(); 2556 data <<= 1; 2557 reg_data = RD_REG_WORD(®->nvram); 2558 if (reg_data & NV_DATA_IN) 2559 data |= BIT_0; 2560 WRT_REG_WORD(®->nvram, NV_SELECT); 2561 RD_REG_WORD(®->id_l); /* Flush PCI write */ 2562 NVRAM_DELAY(); 2563 } 2564 2565 /* Deselect chip. */ 2566 2567 WRT_REG_WORD(®->nvram, NV_DESELECT); 2568 RD_REG_WORD(®->id_l); /* Flush PCI write */ 2569 NVRAM_DELAY(); 2570 2571 return data; 2572 } 2573 2574 static void 2575 qla1280_nv_write(struct scsi_qla_host *ha, uint16_t data) 2576 { 2577 struct device_reg __iomem *reg = ha->iobase; 2578 2579 WRT_REG_WORD(®->nvram, data | NV_SELECT); 2580 RD_REG_WORD(®->id_l); /* Flush PCI write */ 2581 NVRAM_DELAY(); 2582 WRT_REG_WORD(®->nvram, data | NV_SELECT | NV_CLOCK); 2583 RD_REG_WORD(®->id_l); /* Flush PCI write */ 2584 NVRAM_DELAY(); 2585 WRT_REG_WORD(®->nvram, data | NV_SELECT); 2586 RD_REG_WORD(®->id_l); /* Flush PCI write */ 2587 NVRAM_DELAY(); 2588 } 2589 2590 /* 2591 * Mailbox Command 2592 * Issue mailbox command and waits for completion. 2593 * 2594 * Input: 2595 * ha = adapter block pointer. 2596 * mr = mailbox registers to load. 2597 * mb = data pointer for mailbox registers. 2598 * 2599 * Output: 2600 * mb[MAILBOX_REGISTER_COUNT] = returned mailbox data. 2601 * 2602 * Returns: 2603 * 0 = success 2604 */ 2605 static int 2606 qla1280_mailbox_command(struct scsi_qla_host *ha, uint8_t mr, uint16_t *mb) 2607 { 2608 struct device_reg __iomem *reg = ha->iobase; 2609 #if 0 2610 LIST_HEAD(done_q); 2611 #endif 2612 int status = 0; 2613 int cnt; 2614 uint16_t *optr, *iptr; 2615 uint16_t __iomem *mptr; 2616 uint16_t data; 2617 DECLARE_COMPLETION(wait); 2618 struct timer_list timer; 2619 2620 ENTER("qla1280_mailbox_command"); 2621 2622 if (ha->mailbox_wait) { 2623 printk(KERN_ERR "Warning mailbox wait already in use!\n"); 2624 } 2625 ha->mailbox_wait = &wait; 2626 2627 /* 2628 * We really should start out by verifying that the mailbox is 2629 * available before starting sending the command data 2630 */ 2631 /* Load mailbox registers. */ 2632 mptr = (uint16_t __iomem *) ®->mailbox0; 2633 iptr = mb; 2634 for (cnt = 0; cnt < MAILBOX_REGISTER_COUNT; cnt++) { 2635 if (mr & BIT_0) { 2636 WRT_REG_WORD(mptr, (*iptr)); 2637 } 2638 2639 mr >>= 1; 2640 mptr++; 2641 iptr++; 2642 } 2643 2644 /* Issue set host interrupt command. */ 2645 2646 /* set up a timer just in case we're really jammed */ 2647 init_timer(&timer); 2648 timer.expires = jiffies + 20*HZ; 2649 timer.data = (unsigned long)ha; 2650 timer.function = qla1280_mailbox_timeout; 2651 add_timer(&timer); 2652 2653 spin_unlock_irq(HOST_LOCK); 2654 WRT_REG_WORD(®->host_cmd, HC_SET_HOST_INT); 2655 data = qla1280_debounce_register(®->istatus); 2656 2657 wait_for_completion(&wait); 2658 del_timer_sync(&timer); 2659 2660 spin_lock_irq(HOST_LOCK); 2661 2662 ha->mailbox_wait = NULL; 2663 2664 /* Check for mailbox command timeout. */ 2665 if (ha->mailbox_out[0] != MBS_CMD_CMP) { 2666 printk(KERN_WARNING "qla1280_mailbox_command: Command failed, " 2667 "mailbox0 = 0x%04x, mailbox_out0 = 0x%04x, istatus = " 2668 "0x%04x\n", 2669 mb[0], ha->mailbox_out[0], RD_REG_WORD(®->istatus)); 2670 printk(KERN_WARNING "m0 %04x, m1 %04x, m2 %04x, m3 %04x\n", 2671 RD_REG_WORD(®->mailbox0), RD_REG_WORD(®->mailbox1), 2672 RD_REG_WORD(®->mailbox2), RD_REG_WORD(®->mailbox3)); 2673 printk(KERN_WARNING "m4 %04x, m5 %04x, m6 %04x, m7 %04x\n", 2674 RD_REG_WORD(®->mailbox4), RD_REG_WORD(®->mailbox5), 2675 RD_REG_WORD(®->mailbox6), RD_REG_WORD(®->mailbox7)); 2676 status = 1; 2677 } 2678 2679 /* Load return mailbox registers. */ 2680 optr = mb; 2681 iptr = (uint16_t *) &ha->mailbox_out[0]; 2682 mr = MAILBOX_REGISTER_COUNT; 2683 memcpy(optr, iptr, MAILBOX_REGISTER_COUNT * sizeof(uint16_t)); 2684 2685 #if 0 2686 /* Go check for any response interrupts pending. */ 2687 qla1280_isr(ha, &done_q); 2688 #endif 2689 2690 if (ha->flags.reset_marker) 2691 qla1280_rst_aen(ha); 2692 2693 #if 0 2694 if (!list_empty(&done_q)) 2695 qla1280_done(ha, &done_q); 2696 #endif 2697 2698 if (status) 2699 dprintk(2, "qla1280_mailbox_command: **** FAILED, mailbox0 = " 2700 "0x%x ****\n", mb[0]); 2701 2702 LEAVE("qla1280_mailbox_command"); 2703 return status; 2704 } 2705 2706 /* 2707 * qla1280_poll 2708 * Polls ISP for interrupts. 2709 * 2710 * Input: 2711 * ha = adapter block pointer. 2712 */ 2713 static void 2714 qla1280_poll(struct scsi_qla_host *ha) 2715 { 2716 struct device_reg __iomem *reg = ha->iobase; 2717 uint16_t data; 2718 LIST_HEAD(done_q); 2719 2720 /* ENTER("qla1280_poll"); */ 2721 2722 /* Check for pending interrupts. */ 2723 data = RD_REG_WORD(®->istatus); 2724 if (data & RISC_INT) 2725 qla1280_isr(ha, &done_q); 2726 2727 if (!ha->mailbox_wait) { 2728 if (ha->flags.reset_marker) 2729 qla1280_rst_aen(ha); 2730 } 2731 2732 if (!list_empty(&done_q)) 2733 qla1280_done(ha); 2734 2735 /* LEAVE("qla1280_poll"); */ 2736 } 2737 2738 /* 2739 * qla1280_bus_reset 2740 * Issue SCSI bus reset. 2741 * 2742 * Input: 2743 * ha = adapter block pointer. 2744 * bus = SCSI bus number. 2745 * 2746 * Returns: 2747 * 0 = success 2748 */ 2749 static int 2750 qla1280_bus_reset(struct scsi_qla_host *ha, int bus) 2751 { 2752 uint16_t mb[MAILBOX_REGISTER_COUNT]; 2753 uint16_t reset_delay; 2754 int status; 2755 2756 dprintk(3, "qla1280_bus_reset: entered\n"); 2757 2758 if (qla1280_verbose) 2759 printk(KERN_INFO "scsi(%li:%i): Resetting SCSI BUS\n", 2760 ha->host_no, bus); 2761 2762 reset_delay = ha->bus_settings[bus].bus_reset_delay; 2763 mb[0] = MBC_BUS_RESET; 2764 mb[1] = reset_delay; 2765 mb[2] = (uint16_t) bus; 2766 status = qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, &mb[0]); 2767 2768 if (status) { 2769 if (ha->bus_settings[bus].failed_reset_count > 2) 2770 ha->bus_settings[bus].scsi_bus_dead = 1; 2771 ha->bus_settings[bus].failed_reset_count++; 2772 } else { 2773 spin_unlock_irq(HOST_LOCK); 2774 ssleep(reset_delay); 2775 spin_lock_irq(HOST_LOCK); 2776 2777 ha->bus_settings[bus].scsi_bus_dead = 0; 2778 ha->bus_settings[bus].failed_reset_count = 0; 2779 ha->bus_settings[bus].reset_marker = 0; 2780 /* Issue marker command. */ 2781 qla1280_marker(ha, bus, 0, 0, MK_SYNC_ALL); 2782 } 2783 2784 /* 2785 * We should probably call qla1280_set_target_parameters() 2786 * here as well for all devices on the bus. 2787 */ 2788 2789 if (status) 2790 dprintk(2, "qla1280_bus_reset: **** FAILED ****\n"); 2791 else 2792 dprintk(3, "qla1280_bus_reset: exiting normally\n"); 2793 2794 return status; 2795 } 2796 2797 /* 2798 * qla1280_device_reset 2799 * Issue bus device reset message to the target. 2800 * 2801 * Input: 2802 * ha = adapter block pointer. 2803 * bus = SCSI BUS number. 2804 * target = SCSI ID. 2805 * 2806 * Returns: 2807 * 0 = success 2808 */ 2809 static int 2810 qla1280_device_reset(struct scsi_qla_host *ha, int bus, int target) 2811 { 2812 uint16_t mb[MAILBOX_REGISTER_COUNT]; 2813 int status; 2814 2815 ENTER("qla1280_device_reset"); 2816 2817 mb[0] = MBC_ABORT_TARGET; 2818 mb[1] = (bus ? (target | BIT_7) : target) << 8; 2819 mb[2] = 1; 2820 status = qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, &mb[0]); 2821 2822 /* Issue marker command. */ 2823 qla1280_marker(ha, bus, target, 0, MK_SYNC_ID); 2824 2825 if (status) 2826 dprintk(2, "qla1280_device_reset: **** FAILED ****\n"); 2827 2828 LEAVE("qla1280_device_reset"); 2829 return status; 2830 } 2831 2832 /* 2833 * qla1280_abort_device 2834 * Issue an abort message to the device 2835 * 2836 * Input: 2837 * ha = adapter block pointer. 2838 * bus = SCSI BUS. 2839 * target = SCSI ID. 2840 * lun = SCSI LUN. 2841 * 2842 * Returns: 2843 * 0 = success 2844 */ 2845 static int 2846 qla1280_abort_device(struct scsi_qla_host *ha, int bus, int target, int lun) 2847 { 2848 uint16_t mb[MAILBOX_REGISTER_COUNT]; 2849 int status; 2850 2851 ENTER("qla1280_abort_device"); 2852 2853 mb[0] = MBC_ABORT_DEVICE; 2854 mb[1] = (bus ? target | BIT_7 : target) << 8 | lun; 2855 status = qla1280_mailbox_command(ha, BIT_1 | BIT_0, &mb[0]); 2856 2857 /* Issue marker command. */ 2858 qla1280_marker(ha, bus, target, lun, MK_SYNC_ID_LUN); 2859 2860 if (status) 2861 dprintk(2, "qla1280_abort_device: **** FAILED ****\n"); 2862 2863 LEAVE("qla1280_abort_device"); 2864 return status; 2865 } 2866 2867 /* 2868 * qla1280_abort_command 2869 * Abort command aborts a specified IOCB. 2870 * 2871 * Input: 2872 * ha = adapter block pointer. 2873 * sp = SB structure pointer. 2874 * 2875 * Returns: 2876 * 0 = success 2877 */ 2878 static int 2879 qla1280_abort_command(struct scsi_qla_host *ha, struct srb * sp, int handle) 2880 { 2881 uint16_t mb[MAILBOX_REGISTER_COUNT]; 2882 unsigned int bus, target, lun; 2883 int status; 2884 2885 ENTER("qla1280_abort_command"); 2886 2887 bus = SCSI_BUS_32(sp->cmd); 2888 target = SCSI_TCN_32(sp->cmd); 2889 lun = SCSI_LUN_32(sp->cmd); 2890 2891 sp->flags |= SRB_ABORT_PENDING; 2892 2893 mb[0] = MBC_ABORT_COMMAND; 2894 mb[1] = (bus ? target | BIT_7 : target) << 8 | lun; 2895 mb[2] = handle >> 16; 2896 mb[3] = handle & 0xffff; 2897 status = qla1280_mailbox_command(ha, 0x0f, &mb[0]); 2898 2899 if (status) { 2900 dprintk(2, "qla1280_abort_command: **** FAILED ****\n"); 2901 sp->flags &= ~SRB_ABORT_PENDING; 2902 } 2903 2904 2905 LEAVE("qla1280_abort_command"); 2906 return status; 2907 } 2908 2909 /* 2910 * qla1280_reset_adapter 2911 * Reset adapter. 2912 * 2913 * Input: 2914 * ha = adapter block pointer. 2915 */ 2916 static void 2917 qla1280_reset_adapter(struct scsi_qla_host *ha) 2918 { 2919 struct device_reg __iomem *reg = ha->iobase; 2920 2921 ENTER("qla1280_reset_adapter"); 2922 2923 /* Disable ISP chip */ 2924 ha->flags.online = 0; 2925 WRT_REG_WORD(®->ictrl, ISP_RESET); 2926 WRT_REG_WORD(®->host_cmd, 2927 HC_RESET_RISC | HC_RELEASE_RISC | HC_DISABLE_BIOS); 2928 RD_REG_WORD(®->id_l); /* Flush PCI write */ 2929 2930 LEAVE("qla1280_reset_adapter"); 2931 } 2932 2933 /* 2934 * Issue marker command. 2935 * Function issues marker IOCB. 2936 * 2937 * Input: 2938 * ha = adapter block pointer. 2939 * bus = SCSI BUS number 2940 * id = SCSI ID 2941 * lun = SCSI LUN 2942 * type = marker modifier 2943 */ 2944 static void 2945 qla1280_marker(struct scsi_qla_host *ha, int bus, int id, int lun, u8 type) 2946 { 2947 struct mrk_entry *pkt; 2948 2949 ENTER("qla1280_marker"); 2950 2951 /* Get request packet. */ 2952 if ((pkt = (struct mrk_entry *) qla1280_req_pkt(ha))) { 2953 pkt->entry_type = MARKER_TYPE; 2954 pkt->lun = (uint8_t) lun; 2955 pkt->target = (uint8_t) (bus ? (id | BIT_7) : id); 2956 pkt->modifier = type; 2957 pkt->entry_status = 0; 2958 2959 /* Issue command to ISP */ 2960 qla1280_isp_cmd(ha); 2961 } 2962 2963 LEAVE("qla1280_marker"); 2964 } 2965 2966 2967 /* 2968 * qla1280_64bit_start_scsi 2969 * The start SCSI is responsible for building request packets on 2970 * request ring and modifying ISP input pointer. 2971 * 2972 * Input: 2973 * ha = adapter block pointer. 2974 * sp = SB structure pointer. 2975 * 2976 * Returns: 2977 * 0 = success, was able to issue command. 2978 */ 2979 #ifdef QLA_64BIT_PTR 2980 static int 2981 qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp) 2982 { 2983 struct device_reg __iomem *reg = ha->iobase; 2984 struct scsi_cmnd *cmd = sp->cmd; 2985 cmd_a64_entry_t *pkt; 2986 struct scatterlist *sg = NULL; 2987 __le32 *dword_ptr; 2988 dma_addr_t dma_handle; 2989 int status = 0; 2990 int cnt; 2991 int req_cnt; 2992 u16 seg_cnt; 2993 u8 dir; 2994 2995 ENTER("qla1280_64bit_start_scsi:"); 2996 2997 /* Calculate number of entries and segments required. */ 2998 req_cnt = 1; 2999 if (cmd->use_sg) { 3000 sg = (struct scatterlist *) cmd->request_buffer; 3001 seg_cnt = pci_map_sg(ha->pdev, sg, cmd->use_sg, 3002 cmd->sc_data_direction); 3003 3004 if (seg_cnt > 2) { 3005 req_cnt += (seg_cnt - 2) / 5; 3006 if ((seg_cnt - 2) % 5) 3007 req_cnt++; 3008 } 3009 } else if (cmd->request_bufflen) { /* If data transfer. */ 3010 seg_cnt = 1; 3011 } else { 3012 seg_cnt = 0; 3013 } 3014 3015 if ((req_cnt + 2) >= ha->req_q_cnt) { 3016 /* Calculate number of free request entries. */ 3017 cnt = RD_REG_WORD(®->mailbox4); 3018 if (ha->req_ring_index < cnt) 3019 ha->req_q_cnt = cnt - ha->req_ring_index; 3020 else 3021 ha->req_q_cnt = 3022 REQUEST_ENTRY_CNT - (ha->req_ring_index - cnt); 3023 } 3024 3025 dprintk(3, "Number of free entries=(%d) seg_cnt=0x%x\n", 3026 ha->req_q_cnt, seg_cnt); 3027 3028 /* If room for request in request ring. */ 3029 if ((req_cnt + 2) >= ha->req_q_cnt) { 3030 status = 1; 3031 dprintk(2, "qla1280_start_scsi: in-ptr=0x%x req_q_cnt=" 3032 "0x%xreq_cnt=0x%x", ha->req_ring_index, ha->req_q_cnt, 3033 req_cnt); 3034 goto out; 3035 } 3036 3037 /* Check for room in outstanding command list. */ 3038 for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS && 3039 ha->outstanding_cmds[cnt] != 0; cnt++); 3040 3041 if (cnt >= MAX_OUTSTANDING_COMMANDS) { 3042 status = 1; 3043 dprintk(2, "qla1280_start_scsi: NO ROOM IN " 3044 "OUTSTANDING ARRAY, req_q_cnt=0x%x", ha->req_q_cnt); 3045 goto out; 3046 } 3047 3048 ha->outstanding_cmds[cnt] = sp; 3049 ha->req_q_cnt -= req_cnt; 3050 CMD_HANDLE(sp->cmd) = (unsigned char *)(unsigned long)(cnt + 1); 3051 3052 dprintk(2, "start: cmd=%p sp=%p CDB=%xm, handle %lx\n", cmd, sp, 3053 cmd->cmnd[0], (long)CMD_HANDLE(sp->cmd)); 3054 dprintk(2, " bus %i, target %i, lun %i\n", 3055 SCSI_BUS_32(cmd), SCSI_TCN_32(cmd), SCSI_LUN_32(cmd)); 3056 qla1280_dump_buffer(2, cmd->cmnd, MAX_COMMAND_SIZE); 3057 3058 /* 3059 * Build command packet. 3060 */ 3061 pkt = (cmd_a64_entry_t *) ha->request_ring_ptr; 3062 3063 pkt->entry_type = COMMAND_A64_TYPE; 3064 pkt->entry_count = (uint8_t) req_cnt; 3065 pkt->sys_define = (uint8_t) ha->req_ring_index; 3066 pkt->entry_status = 0; 3067 pkt->handle = cpu_to_le32(cnt); 3068 3069 /* Zero out remaining portion of packet. */ 3070 memset(((char *)pkt + 8), 0, (REQUEST_ENTRY_SIZE - 8)); 3071 3072 /* Set ISP command timeout. */ 3073 pkt->timeout = cpu_to_le16(30); 3074 3075 /* Set device target ID and LUN */ 3076 pkt->lun = SCSI_LUN_32(cmd); 3077 pkt->target = SCSI_BUS_32(cmd) ? 3078 (SCSI_TCN_32(cmd) | BIT_7) : SCSI_TCN_32(cmd); 3079 3080 /* Enable simple tag queuing if device supports it. */ 3081 if (DEV_SIMPLE_TAGS(cmd->device)) 3082 pkt->control_flags |= cpu_to_le16(BIT_3); 3083 3084 /* Load SCSI command packet. */ 3085 pkt->cdb_len = cpu_to_le16(CMD_CDBLEN(cmd)); 3086 memcpy(pkt->scsi_cdb, &(CMD_CDBP(cmd)), CMD_CDBLEN(cmd)); 3087 /* dprintk(1, "Build packet for command[0]=0x%x\n",pkt->scsi_cdb[0]); */ 3088 3089 /* Set transfer direction. */ 3090 dir = qla1280_data_direction(cmd); 3091 pkt->control_flags |= cpu_to_le16(dir); 3092 3093 /* Set total data segment count. */ 3094 pkt->dseg_count = cpu_to_le16(seg_cnt); 3095 3096 /* 3097 * Load data segments. 3098 */ 3099 if (seg_cnt) { /* If data transfer. */ 3100 /* Setup packet address segment pointer. */ 3101 dword_ptr = (u32 *)&pkt->dseg_0_address; 3102 3103 if (cmd->use_sg) { /* If scatter gather */ 3104 /* Load command entry data segments. */ 3105 for (cnt = 0; cnt < 2 && seg_cnt; cnt++, seg_cnt--) { 3106 dma_handle = sg_dma_address(sg); 3107 #if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2) 3108 if (ha->flags.use_pci_vchannel) 3109 sn_pci_set_vchan(ha->pdev, 3110 (unsigned long *)&dma_handle, 3111 SCSI_BUS_32(cmd)); 3112 #endif 3113 *dword_ptr++ = 3114 cpu_to_le32(pci_dma_lo32(dma_handle)); 3115 *dword_ptr++ = 3116 cpu_to_le32(pci_dma_hi32(dma_handle)); 3117 *dword_ptr++ = cpu_to_le32(sg_dma_len(sg)); 3118 sg++; 3119 dprintk(3, "S/G Segment phys_addr=%x %x, len=0x%x\n", 3120 cpu_to_le32(pci_dma_hi32(dma_handle)), 3121 cpu_to_le32(pci_dma_lo32(dma_handle)), 3122 cpu_to_le32(sg_dma_len(sg))); 3123 } 3124 dprintk(5, "qla1280_64bit_start_scsi: Scatter/gather " 3125 "command packet data - b %i, t %i, l %i \n", 3126 SCSI_BUS_32(cmd), SCSI_TCN_32(cmd), 3127 SCSI_LUN_32(cmd)); 3128 qla1280_dump_buffer(5, (char *)pkt, 3129 REQUEST_ENTRY_SIZE); 3130 3131 /* 3132 * Build continuation packets. 3133 */ 3134 dprintk(3, "S/G Building Continuation...seg_cnt=0x%x " 3135 "remains\n", seg_cnt); 3136 3137 while (seg_cnt > 0) { 3138 /* Adjust ring index. */ 3139 ha->req_ring_index++; 3140 if (ha->req_ring_index == REQUEST_ENTRY_CNT) { 3141 ha->req_ring_index = 0; 3142 ha->request_ring_ptr = 3143 ha->request_ring; 3144 } else 3145 ha->request_ring_ptr++; 3146 3147 pkt = (cmd_a64_entry_t *)ha->request_ring_ptr; 3148 3149 /* Zero out packet. */ 3150 memset(pkt, 0, REQUEST_ENTRY_SIZE); 3151 3152 /* Load packet defaults. */ 3153 ((struct cont_a64_entry *) pkt)->entry_type = 3154 CONTINUE_A64_TYPE; 3155 ((struct cont_a64_entry *) pkt)->entry_count = 1; 3156 ((struct cont_a64_entry *) pkt)->sys_define = 3157 (uint8_t)ha->req_ring_index; 3158 /* Setup packet address segment pointer. */ 3159 dword_ptr = 3160 (u32 *)&((struct cont_a64_entry *) pkt)->dseg_0_address; 3161 3162 /* Load continuation entry data segments. */ 3163 for (cnt = 0; cnt < 5 && seg_cnt; 3164 cnt++, seg_cnt--) { 3165 dma_handle = sg_dma_address(sg); 3166 #if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2) 3167 if (ha->flags.use_pci_vchannel) 3168 sn_pci_set_vchan(ha->pdev, 3169 (unsigned long *)&dma_handle, 3170 SCSI_BUS_32(cmd)); 3171 #endif 3172 *dword_ptr++ = 3173 cpu_to_le32(pci_dma_lo32(dma_handle)); 3174 *dword_ptr++ = 3175 cpu_to_le32(pci_dma_hi32(dma_handle)); 3176 *dword_ptr++ = 3177 cpu_to_le32(sg_dma_len(sg)); 3178 dprintk(3, "S/G Segment Cont. phys_addr=%x %x, len=0x%x\n", 3179 cpu_to_le32(pci_dma_hi32(dma_handle)), 3180 cpu_to_le32(pci_dma_lo32(dma_handle)), 3181 cpu_to_le32(sg_dma_len(sg))); 3182 sg++; 3183 } 3184 dprintk(5, "qla1280_64bit_start_scsi: " 3185 "continuation packet data - b %i, t " 3186 "%i, l %i \n", SCSI_BUS_32(cmd), 3187 SCSI_TCN_32(cmd), SCSI_LUN_32(cmd)); 3188 qla1280_dump_buffer(5, (char *)pkt, 3189 REQUEST_ENTRY_SIZE); 3190 } 3191 } else { /* No scatter gather data transfer */ 3192 dma_handle = pci_map_single(ha->pdev, 3193 cmd->request_buffer, 3194 cmd->request_bufflen, 3195 cmd->sc_data_direction); 3196 3197 sp->saved_dma_handle = dma_handle; 3198 #if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2) 3199 if (ha->flags.use_pci_vchannel) 3200 sn_pci_set_vchan(ha->pdev, 3201 (unsigned long *)&dma_handle, 3202 SCSI_BUS_32(cmd)); 3203 #endif 3204 *dword_ptr++ = cpu_to_le32(pci_dma_lo32(dma_handle)); 3205 *dword_ptr++ = cpu_to_le32(pci_dma_hi32(dma_handle)); 3206 *dword_ptr = cpu_to_le32(cmd->request_bufflen); 3207 3208 dprintk(5, "qla1280_64bit_start_scsi: No scatter/" 3209 "gather command packet data - b %i, t %i, " 3210 "l %i \n", SCSI_BUS_32(cmd), SCSI_TCN_32(cmd), 3211 SCSI_LUN_32(cmd)); 3212 qla1280_dump_buffer(5, (char *)pkt, 3213 REQUEST_ENTRY_SIZE); 3214 } 3215 } else { /* No data transfer */ 3216 dprintk(5, "qla1280_64bit_start_scsi: No data, command " 3217 "packet data - b %i, t %i, l %i \n", 3218 SCSI_BUS_32(cmd), SCSI_TCN_32(cmd), SCSI_LUN_32(cmd)); 3219 qla1280_dump_buffer(5, (char *)pkt, REQUEST_ENTRY_SIZE); 3220 } 3221 /* Adjust ring index. */ 3222 ha->req_ring_index++; 3223 if (ha->req_ring_index == REQUEST_ENTRY_CNT) { 3224 ha->req_ring_index = 0; 3225 ha->request_ring_ptr = ha->request_ring; 3226 } else 3227 ha->request_ring_ptr++; 3228 3229 /* Set chip new ring index. */ 3230 dprintk(2, 3231 "qla1280_64bit_start_scsi: Wakeup RISC for pending command\n"); 3232 sp->flags |= SRB_SENT; 3233 ha->actthreads++; 3234 WRT_REG_WORD(®->mailbox4, ha->req_ring_index); 3235 /* Enforce mmio write ordering; see comment in qla1280_isp_cmd(). */ 3236 mmiowb(); 3237 3238 out: 3239 if (status) 3240 dprintk(2, "qla1280_64bit_start_scsi: **** FAILED ****\n"); 3241 else 3242 dprintk(3, "qla1280_64bit_start_scsi: exiting normally\n"); 3243 3244 return status; 3245 } 3246 #else /* !QLA_64BIT_PTR */ 3247 3248 /* 3249 * qla1280_32bit_start_scsi 3250 * The start SCSI is responsible for building request packets on 3251 * request ring and modifying ISP input pointer. 3252 * 3253 * The Qlogic firmware interface allows every queue slot to have a SCSI 3254 * command and up to 4 scatter/gather (SG) entries. If we need more 3255 * than 4 SG entries, then continuation entries are used that can 3256 * hold another 7 entries each. The start routine determines if there 3257 * is eought empty slots then build the combination of requests to 3258 * fulfill the OS request. 3259 * 3260 * Input: 3261 * ha = adapter block pointer. 3262 * sp = SCSI Request Block structure pointer. 3263 * 3264 * Returns: 3265 * 0 = success, was able to issue command. 3266 */ 3267 static int 3268 qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp) 3269 { 3270 struct device_reg __iomem *reg = ha->iobase; 3271 struct scsi_cmnd *cmd = sp->cmd; 3272 struct cmd_entry *pkt; 3273 struct scatterlist *sg = NULL; 3274 __le32 *dword_ptr; 3275 int status = 0; 3276 int cnt; 3277 int req_cnt; 3278 uint16_t seg_cnt; 3279 dma_addr_t dma_handle; 3280 u8 dir; 3281 3282 ENTER("qla1280_32bit_start_scsi"); 3283 3284 dprintk(1, "32bit_start: cmd=%p sp=%p CDB=%x\n", cmd, sp, 3285 cmd->cmnd[0]); 3286 3287 /* Calculate number of entries and segments required. */ 3288 req_cnt = 1; 3289 if (cmd->use_sg) { 3290 /* 3291 * We must build an SG list in adapter format, as the kernel's 3292 * SG list cannot be used directly because of data field size 3293 * (__alpha__) differences and the kernel SG list uses virtual 3294 * addresses where we need physical addresses. 3295 */ 3296 sg = (struct scatterlist *) cmd->request_buffer; 3297 seg_cnt = pci_map_sg(ha->pdev, sg, cmd->use_sg, 3298 cmd->sc_data_direction); 3299 3300 /* 3301 * if greater than four sg entries then we need to allocate 3302 * continuation entries 3303 */ 3304 if (seg_cnt > 4) { 3305 req_cnt += (seg_cnt - 4) / 7; 3306 if ((seg_cnt - 4) % 7) 3307 req_cnt++; 3308 } 3309 dprintk(3, "S/G Transfer cmd=%p seg_cnt=0x%x, req_cnt=%x\n", 3310 cmd, seg_cnt, req_cnt); 3311 } else if (cmd->request_bufflen) { /* If data transfer. */ 3312 dprintk(3, "No S/G transfer t=%x cmd=%p len=%x CDB=%x\n", 3313 SCSI_TCN_32(cmd), cmd, cmd->request_bufflen, 3314 cmd->cmnd[0]); 3315 seg_cnt = 1; 3316 } else { 3317 /* dprintk(1, "No data transfer \n"); */ 3318 seg_cnt = 0; 3319 } 3320 3321 if ((req_cnt + 2) >= ha->req_q_cnt) { 3322 /* Calculate number of free request entries. */ 3323 cnt = RD_REG_WORD(®->mailbox4); 3324 if (ha->req_ring_index < cnt) 3325 ha->req_q_cnt = cnt - ha->req_ring_index; 3326 else 3327 ha->req_q_cnt = 3328 REQUEST_ENTRY_CNT - (ha->req_ring_index - cnt); 3329 } 3330 3331 dprintk(3, "Number of free entries=(%d) seg_cnt=0x%x\n", 3332 ha->req_q_cnt, seg_cnt); 3333 /* If room for request in request ring. */ 3334 if ((req_cnt + 2) >= ha->req_q_cnt) { 3335 status = 1; 3336 dprintk(2, "qla1280_32bit_start_scsi: in-ptr=0x%x, " 3337 "req_q_cnt=0x%x, req_cnt=0x%x", ha->req_ring_index, 3338 ha->req_q_cnt, req_cnt); 3339 goto out; 3340 } 3341 3342 /* Check for empty slot in outstanding command list. */ 3343 for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS && 3344 (ha->outstanding_cmds[cnt] != 0); cnt++) ; 3345 3346 if (cnt >= MAX_OUTSTANDING_COMMANDS) { 3347 status = 1; 3348 dprintk(2, "qla1280_32bit_start_scsi: NO ROOM IN OUTSTANDING " 3349 "ARRAY, req_q_cnt=0x%x\n", ha->req_q_cnt); 3350 goto out; 3351 } 3352 3353 CMD_HANDLE(sp->cmd) = (unsigned char *) (unsigned long)(cnt + 1); 3354 ha->outstanding_cmds[cnt] = sp; 3355 ha->req_q_cnt -= req_cnt; 3356 3357 /* 3358 * Build command packet. 3359 */ 3360 pkt = (struct cmd_entry *) ha->request_ring_ptr; 3361 3362 pkt->entry_type = COMMAND_TYPE; 3363 pkt->entry_count = (uint8_t) req_cnt; 3364 pkt->sys_define = (uint8_t) ha->req_ring_index; 3365 pkt->entry_status = 0; 3366 pkt->handle = cpu_to_le32(cnt); 3367 3368 /* Zero out remaining portion of packet. */ 3369 memset(((char *)pkt + 8), 0, (REQUEST_ENTRY_SIZE - 8)); 3370 3371 /* Set ISP command timeout. */ 3372 pkt->timeout = cpu_to_le16(30); 3373 3374 /* Set device target ID and LUN */ 3375 pkt->lun = SCSI_LUN_32(cmd); 3376 pkt->target = SCSI_BUS_32(cmd) ? 3377 (SCSI_TCN_32(cmd) | BIT_7) : SCSI_TCN_32(cmd); 3378 3379 /* Enable simple tag queuing if device supports it. */ 3380 if (DEV_SIMPLE_TAGS(cmd->device)) 3381 pkt->control_flags |= cpu_to_le16(BIT_3); 3382 3383 /* Load SCSI command packet. */ 3384 pkt->cdb_len = cpu_to_le16(CMD_CDBLEN(cmd)); 3385 memcpy(pkt->scsi_cdb, &(CMD_CDBP(cmd)), CMD_CDBLEN(cmd)); 3386 3387 /*dprintk(1, "Build packet for command[0]=0x%x\n",pkt->scsi_cdb[0]); */ 3388 /* Set transfer direction. */ 3389 dir = qla1280_data_direction(cmd); 3390 pkt->control_flags |= cpu_to_le16(dir); 3391 3392 /* Set total data segment count. */ 3393 pkt->dseg_count = cpu_to_le16(seg_cnt); 3394 3395 /* 3396 * Load data segments. 3397 */ 3398 if (seg_cnt) { 3399 /* Setup packet address segment pointer. */ 3400 dword_ptr = &pkt->dseg_0_address; 3401 3402 if (cmd->use_sg) { /* If scatter gather */ 3403 dprintk(3, "Building S/G data segments..\n"); 3404 qla1280_dump_buffer(1, (char *)sg, 4 * 16); 3405 3406 /* Load command entry data segments. */ 3407 for (cnt = 0; cnt < 4 && seg_cnt; cnt++, seg_cnt--) { 3408 *dword_ptr++ = 3409 cpu_to_le32(pci_dma_lo32(sg_dma_address(sg))); 3410 *dword_ptr++ = 3411 cpu_to_le32(sg_dma_len(sg)); 3412 dprintk(3, "S/G Segment phys_addr=0x%lx, len=0x%x\n", 3413 (pci_dma_lo32(sg_dma_address(sg))), 3414 (sg_dma_len(sg))); 3415 sg++; 3416 } 3417 /* 3418 * Build continuation packets. 3419 */ 3420 dprintk(3, "S/G Building Continuation" 3421 "...seg_cnt=0x%x remains\n", seg_cnt); 3422 while (seg_cnt > 0) { 3423 /* Adjust ring index. */ 3424 ha->req_ring_index++; 3425 if (ha->req_ring_index == REQUEST_ENTRY_CNT) { 3426 ha->req_ring_index = 0; 3427 ha->request_ring_ptr = 3428 ha->request_ring; 3429 } else 3430 ha->request_ring_ptr++; 3431 3432 pkt = (struct cmd_entry *)ha->request_ring_ptr; 3433 3434 /* Zero out packet. */ 3435 memset(pkt, 0, REQUEST_ENTRY_SIZE); 3436 3437 /* Load packet defaults. */ 3438 ((struct cont_entry *) pkt)-> 3439 entry_type = CONTINUE_TYPE; 3440 ((struct cont_entry *) pkt)->entry_count = 1; 3441 3442 ((struct cont_entry *) pkt)->sys_define = 3443 (uint8_t) ha->req_ring_index; 3444 3445 /* Setup packet address segment pointer. */ 3446 dword_ptr = 3447 &((struct cont_entry *) pkt)->dseg_0_address; 3448 3449 /* Load continuation entry data segments. */ 3450 for (cnt = 0; cnt < 7 && seg_cnt; 3451 cnt++, seg_cnt--) { 3452 *dword_ptr++ = 3453 cpu_to_le32(pci_dma_lo32(sg_dma_address(sg))); 3454 *dword_ptr++ = 3455 cpu_to_le32(sg_dma_len(sg)); 3456 dprintk(1, 3457 "S/G Segment Cont. phys_addr=0x%x, " 3458 "len=0x%x\n", 3459 cpu_to_le32(pci_dma_lo32(sg_dma_address(sg))), 3460 cpu_to_le32(sg_dma_len(sg))); 3461 sg++; 3462 } 3463 dprintk(5, "qla1280_32bit_start_scsi: " 3464 "continuation packet data - " 3465 "scsi(%i:%i:%i)\n", SCSI_BUS_32(cmd), 3466 SCSI_TCN_32(cmd), SCSI_LUN_32(cmd)); 3467 qla1280_dump_buffer(5, (char *)pkt, 3468 REQUEST_ENTRY_SIZE); 3469 } 3470 } else { /* No S/G data transfer */ 3471 dma_handle = pci_map_single(ha->pdev, 3472 cmd->request_buffer, 3473 cmd->request_bufflen, 3474 cmd->sc_data_direction); 3475 sp->saved_dma_handle = dma_handle; 3476 3477 *dword_ptr++ = cpu_to_le32(pci_dma_lo32(dma_handle)); 3478 *dword_ptr = cpu_to_le32(cmd->request_bufflen); 3479 } 3480 } else { /* No data transfer at all */ 3481 dprintk(5, "qla1280_32bit_start_scsi: No data, command " 3482 "packet data - \n"); 3483 qla1280_dump_buffer(5, (char *)pkt, REQUEST_ENTRY_SIZE); 3484 } 3485 dprintk(5, "qla1280_32bit_start_scsi: First IOCB block:\n"); 3486 qla1280_dump_buffer(5, (char *)ha->request_ring_ptr, 3487 REQUEST_ENTRY_SIZE); 3488 3489 /* Adjust ring index. */ 3490 ha->req_ring_index++; 3491 if (ha->req_ring_index == REQUEST_ENTRY_CNT) { 3492 ha->req_ring_index = 0; 3493 ha->request_ring_ptr = ha->request_ring; 3494 } else 3495 ha->request_ring_ptr++; 3496 3497 /* Set chip new ring index. */ 3498 dprintk(2, "qla1280_32bit_start_scsi: Wakeup RISC " 3499 "for pending command\n"); 3500 sp->flags |= SRB_SENT; 3501 ha->actthreads++; 3502 WRT_REG_WORD(®->mailbox4, ha->req_ring_index); 3503 /* Enforce mmio write ordering; see comment in qla1280_isp_cmd(). */ 3504 mmiowb(); 3505 3506 out: 3507 if (status) 3508 dprintk(2, "qla1280_32bit_start_scsi: **** FAILED ****\n"); 3509 3510 LEAVE("qla1280_32bit_start_scsi"); 3511 3512 return status; 3513 } 3514 #endif 3515 3516 /* 3517 * qla1280_req_pkt 3518 * Function is responsible for locking ring and 3519 * getting a zeroed out request packet. 3520 * 3521 * Input: 3522 * ha = adapter block pointer. 3523 * 3524 * Returns: 3525 * 0 = failed to get slot. 3526 */ 3527 static request_t * 3528 qla1280_req_pkt(struct scsi_qla_host *ha) 3529 { 3530 struct device_reg __iomem *reg = ha->iobase; 3531 request_t *pkt = NULL; 3532 int cnt; 3533 uint32_t timer; 3534 3535 ENTER("qla1280_req_pkt"); 3536 3537 /* 3538 * This can be called from interrupt context, damn it!!! 3539 */ 3540 /* Wait for 30 seconds for slot. */ 3541 for (timer = 15000000; timer; timer--) { 3542 if (ha->req_q_cnt > 0) { 3543 /* Calculate number of free request entries. */ 3544 cnt = RD_REG_WORD(®->mailbox4); 3545 if (ha->req_ring_index < cnt) 3546 ha->req_q_cnt = cnt - ha->req_ring_index; 3547 else 3548 ha->req_q_cnt = 3549 REQUEST_ENTRY_CNT - (ha->req_ring_index - cnt); 3550 } 3551 3552 /* Found empty request ring slot? */ 3553 if (ha->req_q_cnt > 0) { 3554 ha->req_q_cnt--; 3555 pkt = ha->request_ring_ptr; 3556 3557 /* Zero out packet. */ 3558 memset(pkt, 0, REQUEST_ENTRY_SIZE); 3559 3560 /* 3561 * How can this be right when we have a ring 3562 * size of 512??? 3563 */ 3564 /* Set system defined field. */ 3565 pkt->sys_define = (uint8_t) ha->req_ring_index; 3566 3567 /* Set entry count. */ 3568 pkt->entry_count = 1; 3569 3570 break; 3571 } 3572 3573 udelay(2); /* 10 */ 3574 3575 /* Check for pending interrupts. */ 3576 qla1280_poll(ha); 3577 } 3578 3579 if (!pkt) 3580 dprintk(2, "qla1280_req_pkt: **** FAILED ****\n"); 3581 else 3582 dprintk(3, "qla1280_req_pkt: exiting normally\n"); 3583 3584 return pkt; 3585 } 3586 3587 /* 3588 * qla1280_isp_cmd 3589 * Function is responsible for modifying ISP input pointer. 3590 * Releases ring lock. 3591 * 3592 * Input: 3593 * ha = adapter block pointer. 3594 */ 3595 static void 3596 qla1280_isp_cmd(struct scsi_qla_host *ha) 3597 { 3598 struct device_reg __iomem *reg = ha->iobase; 3599 3600 ENTER("qla1280_isp_cmd"); 3601 3602 dprintk(5, "qla1280_isp_cmd: IOCB data:\n"); 3603 qla1280_dump_buffer(5, (char *)ha->request_ring_ptr, 3604 REQUEST_ENTRY_SIZE); 3605 3606 /* Adjust ring index. */ 3607 ha->req_ring_index++; 3608 if (ha->req_ring_index == REQUEST_ENTRY_CNT) { 3609 ha->req_ring_index = 0; 3610 ha->request_ring_ptr = ha->request_ring; 3611 } else 3612 ha->request_ring_ptr++; 3613 3614 /* 3615 * Update request index to mailbox4 (Request Queue In). 3616 * The mmiowb() ensures that this write is ordered with writes by other 3617 * CPUs. Without the mmiowb(), it is possible for the following: 3618 * CPUA posts write of index 5 to mailbox4 3619 * CPUA releases host lock 3620 * CPUB acquires host lock 3621 * CPUB posts write of index 6 to mailbox4 3622 * On PCI bus, order reverses and write of 6 posts, then index 5, 3623 * causing chip to issue full queue of stale commands 3624 * The mmiowb() prevents future writes from crossing the barrier. 3625 * See Documentation/DocBook/deviceiobook.tmpl for more information. 3626 */ 3627 WRT_REG_WORD(®->mailbox4, ha->req_ring_index); 3628 mmiowb(); 3629 3630 LEAVE("qla1280_isp_cmd"); 3631 } 3632 3633 /****************************************************************************/ 3634 /* Interrupt Service Routine. */ 3635 /****************************************************************************/ 3636 3637 /**************************************************************************** 3638 * qla1280_isr 3639 * Calls I/O done on command completion. 3640 * 3641 * Input: 3642 * ha = adapter block pointer. 3643 * done_q = done queue. 3644 ****************************************************************************/ 3645 static void 3646 qla1280_isr(struct scsi_qla_host *ha, struct list_head *done_q) 3647 { 3648 struct device_reg __iomem *reg = ha->iobase; 3649 struct response *pkt; 3650 struct srb *sp = NULL; 3651 uint16_t mailbox[MAILBOX_REGISTER_COUNT]; 3652 uint16_t *wptr; 3653 uint32_t index; 3654 u16 istatus; 3655 3656 ENTER("qla1280_isr"); 3657 3658 istatus = RD_REG_WORD(®->istatus); 3659 if (!(istatus & (RISC_INT | PCI_INT))) 3660 return; 3661 3662 /* Save mailbox register 5 */ 3663 mailbox[5] = RD_REG_WORD(®->mailbox5); 3664 3665 /* Check for mailbox interrupt. */ 3666 3667 mailbox[0] = RD_REG_WORD_dmasync(®->semaphore); 3668 3669 if (mailbox[0] & BIT_0) { 3670 /* Get mailbox data. */ 3671 /* dprintk(1, "qla1280_isr: In Get mailbox data \n"); */ 3672 3673 wptr = &mailbox[0]; 3674 *wptr++ = RD_REG_WORD(®->mailbox0); 3675 *wptr++ = RD_REG_WORD(®->mailbox1); 3676 *wptr = RD_REG_WORD(®->mailbox2); 3677 if (mailbox[0] != MBA_SCSI_COMPLETION) { 3678 wptr++; 3679 *wptr++ = RD_REG_WORD(®->mailbox3); 3680 *wptr++ = RD_REG_WORD(®->mailbox4); 3681 wptr++; 3682 *wptr++ = RD_REG_WORD(®->mailbox6); 3683 *wptr = RD_REG_WORD(®->mailbox7); 3684 } 3685 3686 /* Release mailbox registers. */ 3687 3688 WRT_REG_WORD(®->semaphore, 0); 3689 WRT_REG_WORD(®->host_cmd, HC_CLR_RISC_INT); 3690 3691 dprintk(5, "qla1280_isr: mailbox interrupt mailbox[0] = 0x%x", 3692 mailbox[0]); 3693 3694 /* Handle asynchronous event */ 3695 switch (mailbox[0]) { 3696 case MBA_SCSI_COMPLETION: /* Response completion */ 3697 dprintk(5, "qla1280_isr: mailbox SCSI response " 3698 "completion\n"); 3699 3700 if (ha->flags.online) { 3701 /* Get outstanding command index. */ 3702 index = mailbox[2] << 16 | mailbox[1]; 3703 3704 /* Validate handle. */ 3705 if (index < MAX_OUTSTANDING_COMMANDS) 3706 sp = ha->outstanding_cmds[index]; 3707 else 3708 sp = NULL; 3709 3710 if (sp) { 3711 /* Free outstanding command slot. */ 3712 ha->outstanding_cmds[index] = NULL; 3713 3714 /* Save ISP completion status */ 3715 CMD_RESULT(sp->cmd) = 0; 3716 3717 /* Place block on done queue */ 3718 list_add_tail(&sp->list, done_q); 3719 } else { 3720 /* 3721 * If we get here we have a real problem! 3722 */ 3723 printk(KERN_WARNING 3724 "qla1280: ISP invalid handle"); 3725 } 3726 } 3727 break; 3728 3729 case MBA_BUS_RESET: /* SCSI Bus Reset */ 3730 ha->flags.reset_marker = 1; 3731 index = mailbox[6] & BIT_0; 3732 ha->bus_settings[index].reset_marker = 1; 3733 3734 printk(KERN_DEBUG "qla1280_isr(): index %i " 3735 "asynchronous BUS_RESET\n", index); 3736 break; 3737 3738 case MBA_SYSTEM_ERR: /* System Error */ 3739 printk(KERN_WARNING 3740 "qla1280: ISP System Error - mbx1=%xh, mbx2=" 3741 "%xh, mbx3=%xh\n", mailbox[1], mailbox[2], 3742 mailbox[3]); 3743 break; 3744 3745 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */ 3746 printk(KERN_WARNING 3747 "qla1280: ISP Request Transfer Error\n"); 3748 break; 3749 3750 case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */ 3751 printk(KERN_WARNING 3752 "qla1280: ISP Response Transfer Error\n"); 3753 break; 3754 3755 case MBA_WAKEUP_THRES: /* Request Queue Wake-up */ 3756 dprintk(2, "qla1280_isr: asynchronous WAKEUP_THRES\n"); 3757 break; 3758 3759 case MBA_TIMEOUT_RESET: /* Execution Timeout Reset */ 3760 dprintk(2, 3761 "qla1280_isr: asynchronous TIMEOUT_RESET\n"); 3762 break; 3763 3764 case MBA_DEVICE_RESET: /* Bus Device Reset */ 3765 printk(KERN_INFO "qla1280_isr(): asynchronous " 3766 "BUS_DEVICE_RESET\n"); 3767 3768 ha->flags.reset_marker = 1; 3769 index = mailbox[6] & BIT_0; 3770 ha->bus_settings[index].reset_marker = 1; 3771 break; 3772 3773 case MBA_BUS_MODE_CHANGE: 3774 dprintk(2, 3775 "qla1280_isr: asynchronous BUS_MODE_CHANGE\n"); 3776 break; 3777 3778 default: 3779 /* dprintk(1, "qla1280_isr: default case of switch MB \n"); */ 3780 if (mailbox[0] < MBA_ASYNC_EVENT) { 3781 wptr = &mailbox[0]; 3782 memcpy((uint16_t *) ha->mailbox_out, wptr, 3783 MAILBOX_REGISTER_COUNT * 3784 sizeof(uint16_t)); 3785 3786 if(ha->mailbox_wait != NULL) 3787 complete(ha->mailbox_wait); 3788 } 3789 break; 3790 } 3791 } else { 3792 WRT_REG_WORD(®->host_cmd, HC_CLR_RISC_INT); 3793 } 3794 3795 /* 3796 * We will receive interrupts during mailbox testing prior to 3797 * the card being marked online, hence the double check. 3798 */ 3799 if (!(ha->flags.online && !ha->mailbox_wait)) { 3800 dprintk(2, "qla1280_isr: Response pointer Error\n"); 3801 goto out; 3802 } 3803 3804 if (mailbox[5] >= RESPONSE_ENTRY_CNT) 3805 goto out; 3806 3807 while (ha->rsp_ring_index != mailbox[5]) { 3808 pkt = ha->response_ring_ptr; 3809 3810 dprintk(5, "qla1280_isr: ha->rsp_ring_index = 0x%x, mailbox[5]" 3811 " = 0x%x\n", ha->rsp_ring_index, mailbox[5]); 3812 dprintk(5,"qla1280_isr: response packet data\n"); 3813 qla1280_dump_buffer(5, (char *)pkt, RESPONSE_ENTRY_SIZE); 3814 3815 if (pkt->entry_type == STATUS_TYPE) { 3816 if ((le16_to_cpu(pkt->scsi_status) & 0xff) 3817 || pkt->comp_status || pkt->entry_status) { 3818 dprintk(2, "qla1280_isr: ha->rsp_ring_index = " 3819 "0x%x mailbox[5] = 0x%x, comp_status " 3820 "= 0x%x, scsi_status = 0x%x\n", 3821 ha->rsp_ring_index, mailbox[5], 3822 le16_to_cpu(pkt->comp_status), 3823 le16_to_cpu(pkt->scsi_status)); 3824 } 3825 } else { 3826 dprintk(2, "qla1280_isr: ha->rsp_ring_index = " 3827 "0x%x, mailbox[5] = 0x%x\n", 3828 ha->rsp_ring_index, mailbox[5]); 3829 dprintk(2, "qla1280_isr: response packet data\n"); 3830 qla1280_dump_buffer(2, (char *)pkt, 3831 RESPONSE_ENTRY_SIZE); 3832 } 3833 3834 if (pkt->entry_type == STATUS_TYPE || pkt->entry_status) { 3835 dprintk(2, "status: Cmd %p, handle %i\n", 3836 ha->outstanding_cmds[pkt->handle]->cmd, 3837 pkt->handle); 3838 if (pkt->entry_type == STATUS_TYPE) 3839 qla1280_status_entry(ha, pkt, done_q); 3840 else 3841 qla1280_error_entry(ha, pkt, done_q); 3842 /* Adjust ring index. */ 3843 ha->rsp_ring_index++; 3844 if (ha->rsp_ring_index == RESPONSE_ENTRY_CNT) { 3845 ha->rsp_ring_index = 0; 3846 ha->response_ring_ptr = ha->response_ring; 3847 } else 3848 ha->response_ring_ptr++; 3849 WRT_REG_WORD(®->mailbox5, ha->rsp_ring_index); 3850 } 3851 } 3852 3853 out: 3854 LEAVE("qla1280_isr"); 3855 } 3856 3857 /* 3858 * qla1280_rst_aen 3859 * Processes asynchronous reset. 3860 * 3861 * Input: 3862 * ha = adapter block pointer. 3863 */ 3864 static void 3865 qla1280_rst_aen(struct scsi_qla_host *ha) 3866 { 3867 uint8_t bus; 3868 3869 ENTER("qla1280_rst_aen"); 3870 3871 if (ha->flags.online && !ha->flags.reset_active && 3872 !ha->flags.abort_isp_active) { 3873 ha->flags.reset_active = 1; 3874 while (ha->flags.reset_marker) { 3875 /* Issue marker command. */ 3876 ha->flags.reset_marker = 0; 3877 for (bus = 0; bus < ha->ports && 3878 !ha->flags.reset_marker; bus++) { 3879 if (ha->bus_settings[bus].reset_marker) { 3880 ha->bus_settings[bus].reset_marker = 0; 3881 qla1280_marker(ha, bus, 0, 0, 3882 MK_SYNC_ALL); 3883 } 3884 } 3885 } 3886 } 3887 3888 LEAVE("qla1280_rst_aen"); 3889 } 3890 3891 3892 #if LINUX_VERSION_CODE < 0x020500 3893 /* 3894 * 3895 */ 3896 static void 3897 qla1280_get_target_options(struct scsi_cmnd *cmd, struct scsi_qla_host *ha) 3898 { 3899 unsigned char *result; 3900 struct nvram *n; 3901 int bus, target, lun; 3902 3903 bus = SCSI_BUS_32(cmd); 3904 target = SCSI_TCN_32(cmd); 3905 lun = SCSI_LUN_32(cmd); 3906 3907 /* 3908 * Make sure to not touch anything if someone is using the 3909 * sg interface. 3910 */ 3911 if (cmd->use_sg || (CMD_RESULT(cmd) >> 16) != DID_OK || lun) 3912 return; 3913 3914 result = cmd->request_buffer; 3915 n = &ha->nvram; 3916 3917 n->bus[bus].target[target].parameter.enable_wide = 0; 3918 n->bus[bus].target[target].parameter.enable_sync = 0; 3919 n->bus[bus].target[target].ppr_1x160.flags.enable_ppr = 0; 3920 3921 if (result[7] & 0x60) 3922 n->bus[bus].target[target].parameter.enable_wide = 1; 3923 if (result[7] & 0x10) 3924 n->bus[bus].target[target].parameter.enable_sync = 1; 3925 if ((result[2] >= 3) && (result[4] + 5 > 56) && 3926 (result[56] & 0x4)) 3927 n->bus[bus].target[target].ppr_1x160.flags.enable_ppr = 1; 3928 3929 dprintk(2, "get_target_options(): wide %i, sync %i, ppr %i\n", 3930 n->bus[bus].target[target].parameter.enable_wide, 3931 n->bus[bus].target[target].parameter.enable_sync, 3932 n->bus[bus].target[target].ppr_1x160.flags.enable_ppr); 3933 } 3934 #endif 3935 3936 /* 3937 * qla1280_status_entry 3938 * Processes received ISP status entry. 3939 * 3940 * Input: 3941 * ha = adapter block pointer. 3942 * pkt = entry pointer. 3943 * done_q = done queue. 3944 */ 3945 static void 3946 qla1280_status_entry(struct scsi_qla_host *ha, struct response *pkt, 3947 struct list_head *done_q) 3948 { 3949 unsigned int bus, target, lun; 3950 int sense_sz; 3951 struct srb *sp; 3952 struct scsi_cmnd *cmd; 3953 uint32_t handle = le32_to_cpu(pkt->handle); 3954 uint16_t scsi_status = le16_to_cpu(pkt->scsi_status); 3955 uint16_t comp_status = le16_to_cpu(pkt->comp_status); 3956 3957 ENTER("qla1280_status_entry"); 3958 3959 /* Validate handle. */ 3960 if (handle < MAX_OUTSTANDING_COMMANDS) 3961 sp = ha->outstanding_cmds[handle]; 3962 else 3963 sp = NULL; 3964 3965 if (!sp) { 3966 printk(KERN_WARNING "qla1280: Status Entry invalid handle\n"); 3967 goto out; 3968 } 3969 3970 /* Free outstanding command slot. */ 3971 ha->outstanding_cmds[handle] = NULL; 3972 3973 cmd = sp->cmd; 3974 3975 /* Generate LU queue on cntrl, target, LUN */ 3976 bus = SCSI_BUS_32(cmd); 3977 target = SCSI_TCN_32(cmd); 3978 lun = SCSI_LUN_32(cmd); 3979 3980 if (comp_status || scsi_status) { 3981 dprintk(3, "scsi: comp_status = 0x%x, scsi_status = " 3982 "0x%x, handle = 0x%x\n", comp_status, 3983 scsi_status, handle); 3984 } 3985 3986 /* Target busy or queue full */ 3987 if ((scsi_status & 0xFF) == SAM_STAT_TASK_SET_FULL || 3988 (scsi_status & 0xFF) == SAM_STAT_BUSY) { 3989 CMD_RESULT(cmd) = scsi_status & 0xff; 3990 } else { 3991 3992 /* Save ISP completion status */ 3993 CMD_RESULT(cmd) = qla1280_return_status(pkt, cmd); 3994 3995 if (scsi_status & SAM_STAT_CHECK_CONDITION) { 3996 if (comp_status != CS_ARS_FAILED) { 3997 uint16_t req_sense_length = 3998 le16_to_cpu(pkt->req_sense_length); 3999 if (req_sense_length < CMD_SNSLEN(cmd)) 4000 sense_sz = req_sense_length; 4001 else 4002 /* 4003 * scsi_cmnd->sense_buffer is 4004 * 64 bytes, why only copy 63? 4005 * This looks wrong! /Jes 4006 */ 4007 sense_sz = CMD_SNSLEN(cmd) - 1; 4008 4009 memcpy(cmd->sense_buffer, 4010 &pkt->req_sense_data, sense_sz); 4011 } else 4012 sense_sz = 0; 4013 memset(cmd->sense_buffer + sense_sz, 0, 4014 sizeof(cmd->sense_buffer) - sense_sz); 4015 4016 dprintk(2, "qla1280_status_entry: Check " 4017 "condition Sense data, b %i, t %i, " 4018 "l %i\n", bus, target, lun); 4019 if (sense_sz) 4020 qla1280_dump_buffer(2, 4021 (char *)cmd->sense_buffer, 4022 sense_sz); 4023 } 4024 } 4025 4026 /* Place command on done queue. */ 4027 list_add_tail(&sp->list, done_q); 4028 out: 4029 LEAVE("qla1280_status_entry"); 4030 } 4031 4032 /* 4033 * qla1280_error_entry 4034 * Processes error entry. 4035 * 4036 * Input: 4037 * ha = adapter block pointer. 4038 * pkt = entry pointer. 4039 * done_q = done queue. 4040 */ 4041 static void 4042 qla1280_error_entry(struct scsi_qla_host *ha, struct response *pkt, 4043 struct list_head *done_q) 4044 { 4045 struct srb *sp; 4046 uint32_t handle = le32_to_cpu(pkt->handle); 4047 4048 ENTER("qla1280_error_entry"); 4049 4050 if (pkt->entry_status & BIT_3) 4051 dprintk(2, "qla1280_error_entry: BAD PAYLOAD flag error\n"); 4052 else if (pkt->entry_status & BIT_2) 4053 dprintk(2, "qla1280_error_entry: BAD HEADER flag error\n"); 4054 else if (pkt->entry_status & BIT_1) 4055 dprintk(2, "qla1280_error_entry: FULL flag error\n"); 4056 else 4057 dprintk(2, "qla1280_error_entry: UNKNOWN flag error\n"); 4058 4059 /* Validate handle. */ 4060 if (handle < MAX_OUTSTANDING_COMMANDS) 4061 sp = ha->outstanding_cmds[handle]; 4062 else 4063 sp = NULL; 4064 4065 if (sp) { 4066 /* Free outstanding command slot. */ 4067 ha->outstanding_cmds[handle] = NULL; 4068 4069 /* Bad payload or header */ 4070 if (pkt->entry_status & (BIT_3 + BIT_2)) { 4071 /* Bad payload or header, set error status. */ 4072 /* CMD_RESULT(sp->cmd) = CS_BAD_PAYLOAD; */ 4073 CMD_RESULT(sp->cmd) = DID_ERROR << 16; 4074 } else if (pkt->entry_status & BIT_1) { /* FULL flag */ 4075 CMD_RESULT(sp->cmd) = DID_BUS_BUSY << 16; 4076 } else { 4077 /* Set error status. */ 4078 CMD_RESULT(sp->cmd) = DID_ERROR << 16; 4079 } 4080 4081 /* Place command on done queue. */ 4082 list_add_tail(&sp->list, done_q); 4083 } 4084 #ifdef QLA_64BIT_PTR 4085 else if (pkt->entry_type == COMMAND_A64_TYPE) { 4086 printk(KERN_WARNING "!qla1280: Error Entry invalid handle"); 4087 } 4088 #endif 4089 4090 LEAVE("qla1280_error_entry"); 4091 } 4092 4093 /* 4094 * qla1280_abort_isp 4095 * Resets ISP and aborts all outstanding commands. 4096 * 4097 * Input: 4098 * ha = adapter block pointer. 4099 * 4100 * Returns: 4101 * 0 = success 4102 */ 4103 static int 4104 qla1280_abort_isp(struct scsi_qla_host *ha) 4105 { 4106 struct device_reg __iomem *reg = ha->iobase; 4107 struct srb *sp; 4108 int status = 0; 4109 int cnt; 4110 int bus; 4111 4112 ENTER("qla1280_abort_isp"); 4113 4114 if (ha->flags.abort_isp_active || !ha->flags.online) 4115 goto out; 4116 4117 ha->flags.abort_isp_active = 1; 4118 4119 /* Disable ISP interrupts. */ 4120 qla1280_disable_intrs(ha); 4121 WRT_REG_WORD(®->host_cmd, HC_PAUSE_RISC); 4122 RD_REG_WORD(®->id_l); 4123 4124 printk(KERN_INFO "scsi(%li): dequeuing outstanding commands\n", 4125 ha->host_no); 4126 /* Dequeue all commands in outstanding command list. */ 4127 for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) { 4128 struct scsi_cmnd *cmd; 4129 sp = ha->outstanding_cmds[cnt]; 4130 if (sp) { 4131 4132 cmd = sp->cmd; 4133 CMD_RESULT(cmd) = DID_RESET << 16; 4134 4135 sp->cmd = NULL; 4136 ha->outstanding_cmds[cnt] = NULL; 4137 4138 (*cmd->scsi_done)(cmd); 4139 4140 sp->flags = 0; 4141 } 4142 } 4143 4144 status = qla1280_load_firmware(ha); 4145 if (status) 4146 goto out; 4147 4148 /* Setup adapter based on NVRAM parameters. */ 4149 qla1280_nvram_config (ha); 4150 4151 status = qla1280_init_rings(ha); 4152 if (status) 4153 goto out; 4154 4155 /* Issue SCSI reset. */ 4156 for (bus = 0; bus < ha->ports; bus++) 4157 qla1280_bus_reset(ha, bus); 4158 4159 ha->flags.abort_isp_active = 0; 4160 out: 4161 if (status) { 4162 printk(KERN_WARNING 4163 "qla1280: ISP error recovery failed, board disabled"); 4164 qla1280_reset_adapter(ha); 4165 dprintk(2, "qla1280_abort_isp: **** FAILED ****\n"); 4166 } 4167 4168 LEAVE("qla1280_abort_isp"); 4169 return status; 4170 } 4171 4172 4173 /* 4174 * qla1280_debounce_register 4175 * Debounce register. 4176 * 4177 * Input: 4178 * port = register address. 4179 * 4180 * Returns: 4181 * register value. 4182 */ 4183 static u16 4184 qla1280_debounce_register(volatile u16 __iomem * addr) 4185 { 4186 volatile u16 ret; 4187 volatile u16 ret2; 4188 4189 ret = RD_REG_WORD(addr); 4190 ret2 = RD_REG_WORD(addr); 4191 4192 if (ret == ret2) 4193 return ret; 4194 4195 do { 4196 cpu_relax(); 4197 ret = RD_REG_WORD(addr); 4198 ret2 = RD_REG_WORD(addr); 4199 } while (ret != ret2); 4200 4201 return ret; 4202 } 4203 4204 4205 /************************************************************************ 4206 * qla1280_check_for_dead_scsi_bus * 4207 * * 4208 * This routine checks for a dead SCSI bus * 4209 ************************************************************************/ 4210 #define SET_SXP_BANK 0x0100 4211 #define SCSI_PHASE_INVALID 0x87FF 4212 static int 4213 qla1280_check_for_dead_scsi_bus(struct scsi_qla_host *ha, unsigned int bus) 4214 { 4215 uint16_t config_reg, scsi_control; 4216 struct device_reg __iomem *reg = ha->iobase; 4217 4218 if (ha->bus_settings[bus].scsi_bus_dead) { 4219 WRT_REG_WORD(®->host_cmd, HC_PAUSE_RISC); 4220 config_reg = RD_REG_WORD(®->cfg_1); 4221 WRT_REG_WORD(®->cfg_1, SET_SXP_BANK); 4222 scsi_control = RD_REG_WORD(®->scsiControlPins); 4223 WRT_REG_WORD(®->cfg_1, config_reg); 4224 WRT_REG_WORD(®->host_cmd, HC_RELEASE_RISC); 4225 4226 if (scsi_control == SCSI_PHASE_INVALID) { 4227 ha->bus_settings[bus].scsi_bus_dead = 1; 4228 #if 0 4229 CMD_RESULT(cp) = DID_NO_CONNECT << 16; 4230 CMD_HANDLE(cp) = INVALID_HANDLE; 4231 /* ha->actthreads--; */ 4232 4233 (*(cp)->scsi_done)(cp); 4234 #endif 4235 return 1; /* bus is dead */ 4236 } else { 4237 ha->bus_settings[bus].scsi_bus_dead = 0; 4238 ha->bus_settings[bus].failed_reset_count = 0; 4239 } 4240 } 4241 return 0; /* bus is not dead */ 4242 } 4243 4244 static void 4245 qla1280_get_target_parameters(struct scsi_qla_host *ha, 4246 struct scsi_device *device) 4247 { 4248 uint16_t mb[MAILBOX_REGISTER_COUNT]; 4249 int bus, target, lun; 4250 4251 bus = device->channel; 4252 target = device->id; 4253 lun = device->lun; 4254 4255 4256 mb[0] = MBC_GET_TARGET_PARAMETERS; 4257 mb[1] = (uint16_t) (bus ? target | BIT_7 : target); 4258 mb[1] <<= 8; 4259 qla1280_mailbox_command(ha, BIT_6 | BIT_3 | BIT_2 | BIT_1 | BIT_0, 4260 &mb[0]); 4261 4262 printk(KERN_INFO "scsi(%li:%d:%d:%d):", ha->host_no, bus, target, lun); 4263 4264 if (mb[3] != 0) { 4265 printk(" Sync: period %d, offset %d", 4266 (mb[3] & 0xff), (mb[3] >> 8)); 4267 if (mb[2] & BIT_13) 4268 printk(", Wide"); 4269 if ((mb[2] & BIT_5) && ((mb[6] >> 8) & 0xff) >= 2) 4270 printk(", DT"); 4271 } else 4272 printk(" Async"); 4273 4274 if (DEV_SIMPLE_TAGS(device)) 4275 printk(", Tagged queuing: depth %d", device->queue_depth); 4276 printk("\n"); 4277 } 4278 4279 4280 #if DEBUG_QLA1280 4281 static void 4282 __qla1280_dump_buffer(char *b, int size) 4283 { 4284 int cnt; 4285 u8 c; 4286 4287 printk(KERN_DEBUG " 0 1 2 3 4 5 6 7 8 9 Ah " 4288 "Bh Ch Dh Eh Fh\n"); 4289 printk(KERN_DEBUG "---------------------------------------------" 4290 "------------------\n"); 4291 4292 for (cnt = 0; cnt < size;) { 4293 c = *b++; 4294 4295 printk("0x%02x", c); 4296 cnt++; 4297 if (!(cnt % 16)) 4298 printk("\n"); 4299 else 4300 printk(" "); 4301 } 4302 if (cnt % 16) 4303 printk("\n"); 4304 } 4305 4306 /************************************************************************** 4307 * ql1280_print_scsi_cmd 4308 * 4309 **************************************************************************/ 4310 static void 4311 __qla1280_print_scsi_cmd(struct scsi_cmnd *cmd) 4312 { 4313 struct scsi_qla_host *ha; 4314 struct Scsi_Host *host = CMD_HOST(cmd); 4315 struct srb *sp; 4316 /* struct scatterlist *sg; */ 4317 4318 int i; 4319 ha = (struct scsi_qla_host *)host->hostdata; 4320 4321 sp = (struct srb *)CMD_SP(cmd); 4322 printk("SCSI Command @= 0x%p, Handle=0x%p\n", cmd, CMD_HANDLE(cmd)); 4323 printk(" chan=%d, target = 0x%02x, lun = 0x%02x, cmd_len = 0x%02x\n", 4324 SCSI_BUS_32(cmd), SCSI_TCN_32(cmd), SCSI_LUN_32(cmd), 4325 CMD_CDBLEN(cmd)); 4326 printk(" CDB = "); 4327 for (i = 0; i < cmd->cmd_len; i++) { 4328 printk("0x%02x ", cmd->cmnd[i]); 4329 } 4330 printk(" seg_cnt =%d\n", cmd->use_sg); 4331 printk(" request buffer=0x%p, request buffer len=0x%x\n", 4332 cmd->request_buffer, cmd->request_bufflen); 4333 /* if (cmd->use_sg) 4334 { 4335 sg = (struct scatterlist *) cmd->request_buffer; 4336 printk(" SG buffer: \n"); 4337 qla1280_dump_buffer(1, (char *)sg, (cmd->use_sg*sizeof(struct scatterlist))); 4338 } */ 4339 printk(" tag=%d, transfersize=0x%x \n", 4340 cmd->tag, cmd->transfersize); 4341 printk(" Pid=%li, SP=0x%p\n", cmd->pid, CMD_SP(cmd)); 4342 printk(" underflow size = 0x%x, direction=0x%x\n", 4343 cmd->underflow, cmd->sc_data_direction); 4344 } 4345 4346 /************************************************************************** 4347 * ql1280_dump_device 4348 * 4349 **************************************************************************/ 4350 static void 4351 ql1280_dump_device(struct scsi_qla_host *ha) 4352 { 4353 4354 struct scsi_cmnd *cp; 4355 struct srb *sp; 4356 int i; 4357 4358 printk(KERN_DEBUG "Outstanding Commands on controller:\n"); 4359 4360 for (i = 0; i < MAX_OUTSTANDING_COMMANDS; i++) { 4361 if ((sp = ha->outstanding_cmds[i]) == NULL) 4362 continue; 4363 if ((cp = sp->cmd) == NULL) 4364 continue; 4365 qla1280_print_scsi_cmd(1, cp); 4366 } 4367 } 4368 #endif 4369 4370 4371 enum tokens { 4372 TOKEN_NVRAM, 4373 TOKEN_SYNC, 4374 TOKEN_WIDE, 4375 TOKEN_PPR, 4376 TOKEN_VERBOSE, 4377 TOKEN_DEBUG, 4378 }; 4379 4380 struct setup_tokens { 4381 char *token; 4382 int val; 4383 }; 4384 4385 static struct setup_tokens setup_token[] __initdata = 4386 { 4387 { "nvram", TOKEN_NVRAM }, 4388 { "sync", TOKEN_SYNC }, 4389 { "wide", TOKEN_WIDE }, 4390 { "ppr", TOKEN_PPR }, 4391 { "verbose", TOKEN_VERBOSE }, 4392 { "debug", TOKEN_DEBUG }, 4393 }; 4394 4395 4396 /************************************************************************** 4397 * qla1280_setup 4398 * 4399 * Handle boot parameters. This really needs to be changed so one 4400 * can specify per adapter parameters. 4401 **************************************************************************/ 4402 static int __init 4403 qla1280_setup(char *s) 4404 { 4405 char *cp, *ptr; 4406 unsigned long val; 4407 int toke; 4408 4409 cp = s; 4410 4411 while (cp && (ptr = strchr(cp, ':'))) { 4412 ptr++; 4413 if (!strcmp(ptr, "yes")) { 4414 val = 0x10000; 4415 ptr += 3; 4416 } else if (!strcmp(ptr, "no")) { 4417 val = 0; 4418 ptr += 2; 4419 } else 4420 val = simple_strtoul(ptr, &ptr, 0); 4421 4422 switch ((toke = qla1280_get_token(cp))) { 4423 case TOKEN_NVRAM: 4424 if (!val) 4425 driver_setup.no_nvram = 1; 4426 break; 4427 case TOKEN_SYNC: 4428 if (!val) 4429 driver_setup.no_sync = 1; 4430 else if (val != 0x10000) 4431 driver_setup.sync_mask = val; 4432 break; 4433 case TOKEN_WIDE: 4434 if (!val) 4435 driver_setup.no_wide = 1; 4436 else if (val != 0x10000) 4437 driver_setup.wide_mask = val; 4438 break; 4439 case TOKEN_PPR: 4440 if (!val) 4441 driver_setup.no_ppr = 1; 4442 else if (val != 0x10000) 4443 driver_setup.ppr_mask = val; 4444 break; 4445 case TOKEN_VERBOSE: 4446 qla1280_verbose = val; 4447 break; 4448 default: 4449 printk(KERN_INFO "qla1280: unknown boot option %s\n", 4450 cp); 4451 } 4452 4453 cp = strchr(ptr, ';'); 4454 if (cp) 4455 cp++; 4456 else { 4457 break; 4458 } 4459 } 4460 return 1; 4461 } 4462 4463 4464 static int 4465 qla1280_get_token(char *str) 4466 { 4467 char *sep; 4468 long ret = -1; 4469 int i, len; 4470 4471 len = sizeof(setup_token)/sizeof(struct setup_tokens); 4472 4473 sep = strchr(str, ':'); 4474 4475 if (sep) { 4476 for (i = 0; i < len; i++){ 4477 4478 if (!strncmp(setup_token[i].token, str, (sep - str))) { 4479 ret = setup_token[i].val; 4480 break; 4481 } 4482 } 4483 } 4484 4485 return ret; 4486 } 4487 4488 #if LINUX_VERSION_CODE >= 0x020600 4489 static struct scsi_host_template qla1280_driver_template = { 4490 .module = THIS_MODULE, 4491 .proc_name = "qla1280", 4492 .name = "Qlogic ISP 1280/12160", 4493 .info = qla1280_info, 4494 .slave_configure = qla1280_slave_configure, 4495 .queuecommand = qla1280_queuecommand, 4496 .eh_abort_handler = qla1280_eh_abort, 4497 .eh_device_reset_handler= qla1280_eh_device_reset, 4498 .eh_bus_reset_handler = qla1280_eh_bus_reset, 4499 .eh_host_reset_handler = qla1280_eh_adapter_reset, 4500 .bios_param = qla1280_biosparam, 4501 .can_queue = 0xfffff, 4502 .this_id = -1, 4503 .sg_tablesize = SG_ALL, 4504 .cmd_per_lun = 1, 4505 .use_clustering = ENABLE_CLUSTERING, 4506 }; 4507 #else 4508 static struct scsi_host_template qla1280_driver_template = { 4509 .proc_name = "qla1280", 4510 .name = "Qlogic ISP 1280/12160", 4511 .detect = qla1280_detect, 4512 .release = qla1280_release, 4513 .info = qla1280_info, 4514 .queuecommand = qla1280_queuecommand, 4515 .eh_abort_handler = qla1280_eh_abort, 4516 .eh_device_reset_handler= qla1280_eh_device_reset, 4517 .eh_bus_reset_handler = qla1280_eh_bus_reset, 4518 .eh_host_reset_handler = qla1280_eh_adapter_reset, 4519 .bios_param = qla1280_biosparam_old, 4520 .can_queue = 0xfffff, 4521 .this_id = -1, 4522 .sg_tablesize = SG_ALL, 4523 .cmd_per_lun = 1, 4524 .use_clustering = ENABLE_CLUSTERING, 4525 .use_new_eh_code = 1, 4526 }; 4527 #endif 4528 4529 static int __devinit 4530 qla1280_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) 4531 { 4532 int devnum = id->driver_data; 4533 struct qla_boards *bdp = &ql1280_board_tbl[devnum]; 4534 struct Scsi_Host *host; 4535 struct scsi_qla_host *ha; 4536 int error = -ENODEV; 4537 4538 /* Bypass all AMI SUBSYS VENDOR IDs */ 4539 if (pdev->subsystem_vendor == PCI_VENDOR_ID_AMI) { 4540 printk(KERN_INFO 4541 "qla1280: Skipping AMI SubSys Vendor ID Chip\n"); 4542 goto error; 4543 } 4544 4545 printk(KERN_INFO "qla1280: %s found on PCI bus %i, dev %i\n", 4546 bdp->name, pdev->bus->number, PCI_SLOT(pdev->devfn)); 4547 4548 if (pci_enable_device(pdev)) { 4549 printk(KERN_WARNING 4550 "qla1280: Failed to enabled pci device, aborting.\n"); 4551 goto error; 4552 } 4553 4554 pci_set_master(pdev); 4555 4556 error = -ENOMEM; 4557 host = scsi_host_alloc(&qla1280_driver_template, sizeof(*ha)); 4558 if (!host) { 4559 printk(KERN_WARNING 4560 "qla1280: Failed to register host, aborting.\n"); 4561 goto error_disable_device; 4562 } 4563 4564 ha = (struct scsi_qla_host *)host->hostdata; 4565 memset(ha, 0, sizeof(struct scsi_qla_host)); 4566 4567 ha->pdev = pdev; 4568 ha->devnum = devnum; /* specifies microcode load address */ 4569 4570 #ifdef QLA_64BIT_PTR 4571 if (pci_set_dma_mask(ha->pdev, (dma_addr_t) ~ 0ULL)) { 4572 if (pci_set_dma_mask(ha->pdev, 0xffffffff)) { 4573 printk(KERN_WARNING "scsi(%li): Unable to set a " 4574 "suitable DMA mask - aborting\n", ha->host_no); 4575 error = -ENODEV; 4576 goto error_free_irq; 4577 } 4578 } else 4579 dprintk(2, "scsi(%li): 64 Bit PCI Addressing Enabled\n", 4580 ha->host_no); 4581 #else 4582 if (pci_set_dma_mask(ha->pdev, 0xffffffff)) { 4583 printk(KERN_WARNING "scsi(%li): Unable to set a " 4584 "suitable DMA mask - aborting\n", ha->host_no); 4585 error = -ENODEV; 4586 goto error_free_irq; 4587 } 4588 #endif 4589 4590 ha->request_ring = pci_alloc_consistent(ha->pdev, 4591 ((REQUEST_ENTRY_CNT + 1) * sizeof(request_t)), 4592 &ha->request_dma); 4593 if (!ha->request_ring) { 4594 printk(KERN_INFO "qla1280: Failed to get request memory\n"); 4595 goto error_put_host; 4596 } 4597 4598 ha->response_ring = pci_alloc_consistent(ha->pdev, 4599 ((RESPONSE_ENTRY_CNT + 1) * sizeof(struct response)), 4600 &ha->response_dma); 4601 if (!ha->response_ring) { 4602 printk(KERN_INFO "qla1280: Failed to get response memory\n"); 4603 goto error_free_request_ring; 4604 } 4605 4606 ha->ports = bdp->numPorts; 4607 4608 ha->host = host; 4609 ha->host_no = host->host_no; 4610 4611 host->irq = pdev->irq; 4612 host->max_channel = bdp->numPorts - 1; 4613 host->max_lun = MAX_LUNS - 1; 4614 host->max_id = MAX_TARGETS; 4615 host->max_sectors = 1024; 4616 host->unique_id = host->host_no; 4617 4618 #if LINUX_VERSION_CODE < 0x020545 4619 host->select_queue_depths = qla1280_select_queue_depth; 4620 #endif 4621 4622 error = -ENODEV; 4623 4624 #if MEMORY_MAPPED_IO 4625 ha->mmpbase = ioremap(pci_resource_start(ha->pdev, 1), 4626 pci_resource_len(ha->pdev, 1)); 4627 if (!ha->mmpbase) { 4628 printk(KERN_INFO "qla1280: Unable to map I/O memory\n"); 4629 goto error_free_response_ring; 4630 } 4631 4632 host->base = (unsigned long)ha->mmpbase; 4633 ha->iobase = (struct device_reg __iomem *)ha->mmpbase; 4634 #else 4635 host->io_port = pci_resource_start(ha->pdev, 0); 4636 if (!request_region(host->io_port, 0xff, "qla1280")) { 4637 printk(KERN_INFO "qla1280: Failed to reserve i/o region " 4638 "0x%04lx-0x%04lx - already in use\n", 4639 host->io_port, host->io_port + 0xff); 4640 goto error_free_response_ring; 4641 } 4642 4643 ha->iobase = (struct device_reg *)host->io_port; 4644 #endif 4645 4646 INIT_LIST_HEAD(&ha->done_q); 4647 4648 /* Disable ISP interrupts. */ 4649 qla1280_disable_intrs(ha); 4650 4651 if (request_irq(pdev->irq, qla1280_intr_handler, SA_SHIRQ, 4652 "qla1280", ha)) { 4653 printk("qla1280 : Failed to reserve interrupt %d already " 4654 "in use\n", pdev->irq); 4655 goto error_release_region; 4656 } 4657 4658 /* load the F/W, read paramaters, and init the H/W */ 4659 if (qla1280_initialize_adapter(ha)) { 4660 printk(KERN_INFO "qla1x160: Failed to initialize adapter\n"); 4661 goto error_free_irq; 4662 } 4663 4664 /* set our host ID (need to do something about our two IDs) */ 4665 host->this_id = ha->bus_settings[0].id; 4666 4667 pci_set_drvdata(pdev, host); 4668 4669 #if LINUX_VERSION_CODE >= 0x020600 4670 error = scsi_add_host(host, &pdev->dev); 4671 if (error) 4672 goto error_disable_adapter; 4673 scsi_scan_host(host); 4674 #else 4675 scsi_set_pci_device(host, pdev); 4676 #endif 4677 4678 return 0; 4679 4680 #if LINUX_VERSION_CODE >= 0x020600 4681 error_disable_adapter: 4682 qla1280_disable_intrs(ha); 4683 #endif 4684 error_free_irq: 4685 free_irq(pdev->irq, ha); 4686 error_release_region: 4687 #if MEMORY_MAPPED_IO 4688 iounmap(ha->mmpbase); 4689 #else 4690 release_region(host->io_port, 0xff); 4691 #endif 4692 error_free_response_ring: 4693 pci_free_consistent(ha->pdev, 4694 ((RESPONSE_ENTRY_CNT + 1) * sizeof(struct response)), 4695 ha->response_ring, ha->response_dma); 4696 error_free_request_ring: 4697 pci_free_consistent(ha->pdev, 4698 ((REQUEST_ENTRY_CNT + 1) * sizeof(request_t)), 4699 ha->request_ring, ha->request_dma); 4700 error_put_host: 4701 scsi_host_put(host); 4702 error_disable_device: 4703 pci_disable_device(pdev); 4704 error: 4705 return error; 4706 } 4707 4708 4709 static void __devexit 4710 qla1280_remove_one(struct pci_dev *pdev) 4711 { 4712 struct Scsi_Host *host = pci_get_drvdata(pdev); 4713 struct scsi_qla_host *ha = (struct scsi_qla_host *)host->hostdata; 4714 4715 #if LINUX_VERSION_CODE >= 0x020600 4716 scsi_remove_host(host); 4717 #endif 4718 4719 qla1280_disable_intrs(ha); 4720 4721 free_irq(pdev->irq, ha); 4722 4723 #if MEMORY_MAPPED_IO 4724 iounmap(ha->mmpbase); 4725 #else 4726 release_region(host->io_port, 0xff); 4727 #endif 4728 4729 pci_free_consistent(ha->pdev, 4730 ((REQUEST_ENTRY_CNT + 1) * (sizeof(request_t))), 4731 ha->request_ring, ha->request_dma); 4732 pci_free_consistent(ha->pdev, 4733 ((RESPONSE_ENTRY_CNT + 1) * (sizeof(struct response))), 4734 ha->response_ring, ha->response_dma); 4735 4736 pci_disable_device(pdev); 4737 4738 scsi_host_put(host); 4739 } 4740 4741 #if LINUX_VERSION_CODE >= 0x020600 4742 static struct pci_driver qla1280_pci_driver = { 4743 .name = "qla1280", 4744 .id_table = qla1280_pci_tbl, 4745 .probe = qla1280_probe_one, 4746 .remove = __devexit_p(qla1280_remove_one), 4747 }; 4748 4749 static int __init 4750 qla1280_init(void) 4751 { 4752 if (sizeof(struct srb) > sizeof(struct scsi_pointer)) { 4753 printk(KERN_WARNING 4754 "qla1280: struct srb too big, aborting\n"); 4755 return -EINVAL; 4756 } 4757 4758 #ifdef MODULE 4759 /* 4760 * If we are called as a module, the qla1280 pointer may not be null 4761 * and it would point to our bootup string, just like on the lilo 4762 * command line. IF not NULL, then process this config string with 4763 * qla1280_setup 4764 * 4765 * Boot time Options 4766 * To add options at boot time add a line to your lilo.conf file like: 4767 * append="qla1280=verbose,max_tags:{{255,255,255,255},{255,255,255,255}}" 4768 * which will result in the first four devices on the first two 4769 * controllers being set to a tagged queue depth of 32. 4770 */ 4771 if (qla1280) 4772 qla1280_setup(qla1280); 4773 #endif 4774 4775 return pci_module_init(&qla1280_pci_driver); 4776 } 4777 4778 static void __exit 4779 qla1280_exit(void) 4780 { 4781 pci_unregister_driver(&qla1280_pci_driver); 4782 } 4783 4784 module_init(qla1280_init); 4785 module_exit(qla1280_exit); 4786 4787 #else 4788 # define driver_template qla1280_driver_template 4789 # include "scsi_module.c" 4790 #endif 4791 4792 MODULE_AUTHOR("Qlogic & Jes Sorensen"); 4793 MODULE_DESCRIPTION("Qlogic ISP SCSI (qla1x80/qla1x160) driver"); 4794 MODULE_LICENSE("GPL"); 4795 MODULE_VERSION(QLA1280_VERSION); 4796 4797 /* 4798 * Overrides for Emacs so that we almost follow Linus's tabbing style. 4799 * Emacs will notice this stuff at the end of the file and automatically 4800 * adjust the settings for this buffer only. This must remain at the end 4801 * of the file. 4802 * --------------------------------------------------------------------------- 4803 * Local variables: 4804 * c-basic-offset: 8 4805 * tab-width: 8 4806 * End: 4807 */ 4808