1 /****************************************************************************** 2 * QLOGIC LINUX SOFTWARE 3 * 4 * QLogic QLA1280 (Ultra2) and QLA12160 (Ultra3) SCSI driver 5 * Copyright (C) 2000 Qlogic Corporation (www.qlogic.com) 6 * Copyright (C) 2001-2004 Jes Sorensen, Wild Open Source Inc. 7 * Copyright (C) 2003-2004 Christoph Hellwig 8 * 9 * This program is free software; you can redistribute it and/or modify it 10 * under the terms of the GNU General Public License as published by the 11 * Free Software Foundation; either version 2, or (at your option) any 12 * later version. 13 * 14 * This program is distributed in the hope that it will be useful, but 15 * WITHOUT ANY WARRANTY; without even the implied warranty of 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 17 * General Public License for more details. 18 * 19 ******************************************************************************/ 20 #define QLA1280_VERSION "3.27.1" 21 /***************************************************************************** 22 Revision History: 23 Rev 3.27.1, February 8, 2010, Michael Reed 24 - Retain firmware image for error recovery. 25 Rev 3.27, February 10, 2009, Michael Reed 26 - General code cleanup. 27 - Improve error recovery. 28 Rev 3.26, January 16, 2006 Jes Sorensen 29 - Ditch all < 2.6 support 30 Rev 3.25.1, February 10, 2005 Christoph Hellwig 31 - use pci_map_single to map non-S/G requests 32 - remove qla1280_proc_info 33 Rev 3.25, September 28, 2004, Christoph Hellwig 34 - add support for ISP1020/1040 35 - don't include "scsi.h" anymore for 2.6.x 36 Rev 3.24.4 June 7, 2004 Christoph Hellwig 37 - restructure firmware loading, cleanup initialization code 38 - prepare support for ISP1020/1040 chips 39 Rev 3.24.3 January 19, 2004, Jes Sorensen 40 - Handle PCI DMA mask settings correctly 41 - Correct order of error handling in probe_one, free_irq should not 42 be called if request_irq failed 43 Rev 3.24.2 January 19, 2004, James Bottomley & Andrew Vasquez 44 - Big endian fixes (James) 45 - Remove bogus IOCB content on zero data transfer commands (Andrew) 46 Rev 3.24.1 January 5, 2004, Jes Sorensen 47 - Initialize completion queue to avoid OOPS on probe 48 - Handle interrupts during mailbox testing 49 Rev 3.24 November 17, 2003, Christoph Hellwig 50 - use struct list_head for completion queue 51 - avoid old Scsi_FOO typedefs 52 - cleanup 2.4 compat glue a bit 53 - use <scsi/scsi_*.h> headers on 2.6 instead of "scsi.h" 54 - make initialization for memory mapped vs port I/O more similar 55 - remove broken pci config space manipulation 56 - kill more cruft 57 - this is an almost perfect 2.6 scsi driver now! ;) 58 Rev 3.23.39 December 17, 2003, Jes Sorensen 59 - Delete completion queue from srb if mailbox command failed to 60 to avoid qla1280_done completeting qla1280_error_action's 61 obsolete context 62 - Reduce arguments for qla1280_done 63 Rev 3.23.38 October 18, 2003, Christoph Hellwig 64 - Convert to new-style hotplugable driver for 2.6 65 - Fix missing scsi_unregister/scsi_host_put on HBA removal 66 - Kill some more cruft 67 Rev 3.23.37 October 1, 2003, Jes Sorensen 68 - Make MMIO depend on CONFIG_X86_VISWS instead of yet another 69 random CONFIG option 70 - Clean up locking in probe path 71 Rev 3.23.36 October 1, 2003, Christoph Hellwig 72 - queuecommand only ever receives new commands - clear flags 73 - Reintegrate lost fixes from Linux 2.5 74 Rev 3.23.35 August 14, 2003, Jes Sorensen 75 - Build against 2.6 76 Rev 3.23.34 July 23, 2003, Jes Sorensen 77 - Remove pointless TRUE/FALSE macros 78 - Clean up vchan handling 79 Rev 3.23.33 July 3, 2003, Jes Sorensen 80 - Don't define register access macros before define determining MMIO. 81 This just happened to work out on ia64 but not elsewhere. 82 - Don't try and read from the card while it is in reset as 83 it won't respond and causes an MCA 84 Rev 3.23.32 June 23, 2003, Jes Sorensen 85 - Basic support for boot time arguments 86 Rev 3.23.31 June 8, 2003, Jes Sorensen 87 - Reduce boot time messages 88 Rev 3.23.30 June 6, 2003, Jes Sorensen 89 - Do not enable sync/wide/ppr before it has been determined 90 that the target device actually supports it 91 - Enable DMA arbitration for multi channel controllers 92 Rev 3.23.29 June 3, 2003, Jes Sorensen 93 - Port to 2.5.69 94 Rev 3.23.28 June 3, 2003, Jes Sorensen 95 - Eliminate duplicate marker commands on bus resets 96 - Handle outstanding commands appropriately on bus/device resets 97 Rev 3.23.27 May 28, 2003, Jes Sorensen 98 - Remove bogus input queue code, let the Linux SCSI layer do the work 99 - Clean up NVRAM handling, only read it once from the card 100 - Add a number of missing default nvram parameters 101 Rev 3.23.26 Beta May 28, 2003, Jes Sorensen 102 - Use completion queue for mailbox commands instead of busy wait 103 Rev 3.23.25 Beta May 27, 2003, James Bottomley 104 - Migrate to use new error handling code 105 Rev 3.23.24 Beta May 21, 2003, James Bottomley 106 - Big endian support 107 - Cleanup data direction code 108 Rev 3.23.23 Beta May 12, 2003, Jes Sorensen 109 - Switch to using MMIO instead of PIO 110 Rev 3.23.22 Beta April 15, 2003, Jes Sorensen 111 - Fix PCI parity problem with 12160 during reset. 112 Rev 3.23.21 Beta April 14, 2003, Jes Sorensen 113 - Use pci_map_page()/pci_unmap_page() instead of map_single version. 114 Rev 3.23.20 Beta April 9, 2003, Jes Sorensen 115 - Remove < 2.4.x support 116 - Introduce HOST_LOCK to make the spin lock changes portable. 117 - Remove a bunch of idiotic and unnecessary typedef's 118 - Kill all leftovers of target-mode support which never worked anyway 119 Rev 3.23.19 Beta April 11, 2002, Linus Torvalds 120 - Do qla1280_pci_config() before calling request_irq() and 121 request_region() 122 - Use pci_dma_hi32() to handle upper word of DMA addresses instead 123 of large shifts 124 - Hand correct arguments to free_irq() in case of failure 125 Rev 3.23.18 Beta April 11, 2002, Jes Sorensen 126 - Run source through Lindent and clean up the output 127 Rev 3.23.17 Beta April 11, 2002, Jes Sorensen 128 - Update SCSI firmware to qla1280 v8.15.00 and qla12160 v10.04.32 129 Rev 3.23.16 Beta March 19, 2002, Jes Sorensen 130 - Rely on mailbox commands generating interrupts - do not 131 run qla1280_isr() from ql1280_mailbox_command() 132 - Remove device_reg_t 133 - Integrate ql12160_set_target_parameters() with 1280 version 134 - Make qla1280_setup() non static 135 - Do not call qla1280_check_for_dead_scsi_bus() on every I/O request 136 sent to the card - this command pauses the firmware!!! 137 Rev 3.23.15 Beta March 19, 2002, Jes Sorensen 138 - Clean up qla1280.h - remove obsolete QL_DEBUG_LEVEL_x definitions 139 - Remove a pile of pointless and confusing (srb_t **) and 140 (scsi_lu_t *) typecasts 141 - Explicit mark that we do not use the new error handling (for now) 142 - Remove scsi_qla_host_t and use 'struct' instead 143 - Remove in_abort, watchdog_enabled, dpc, dpc_sched, bios_enabled, 144 pci_64bit_slot flags which weren't used for anything anyway 145 - Grab host->host_lock while calling qla1280_isr() from abort() 146 - Use spin_lock()/spin_unlock() in qla1280_intr_handler() - we 147 do not need to save/restore flags in the interrupt handler 148 - Enable interrupts early (before any mailbox access) in preparation 149 for cleaning up the mailbox handling 150 Rev 3.23.14 Beta March 14, 2002, Jes Sorensen 151 - Further cleanups. Remove all trace of QL_DEBUG_LEVEL_x and replace 152 it with proper use of dprintk(). 153 - Make qla1280_print_scsi_cmd() and qla1280_dump_buffer() both take 154 a debug level argument to determine if data is to be printed 155 - Add KERN_* info to printk() 156 Rev 3.23.13 Beta March 14, 2002, Jes Sorensen 157 - Significant cosmetic cleanups 158 - Change debug code to use dprintk() and remove #if mess 159 Rev 3.23.12 Beta March 13, 2002, Jes Sorensen 160 - More cosmetic cleanups, fix places treating return as function 161 - use cpu_relax() in qla1280_debounce_register() 162 Rev 3.23.11 Beta March 13, 2002, Jes Sorensen 163 - Make it compile under 2.5.5 164 Rev 3.23.10 Beta October 1, 2001, Jes Sorensen 165 - Do no typecast short * to long * in QL1280BoardTbl, this 166 broke miserably on big endian boxes 167 Rev 3.23.9 Beta September 30, 2001, Jes Sorensen 168 - Remove pre 2.2 hack for checking for reentrance in interrupt handler 169 - Make data types used to receive from SCSI_{BUS,TCN,LUN}_32 170 unsigned int to match the types from struct scsi_cmnd 171 Rev 3.23.8 Beta September 29, 2001, Jes Sorensen 172 - Remove bogus timer_t typedef from qla1280.h 173 - Remove obsolete pre 2.2 PCI setup code, use proper #define's 174 for PCI_ values, call pci_set_master() 175 - Fix memleak of qla1280_buffer on module unload 176 - Only compile module parsing code #ifdef MODULE - should be 177 changed to use individual MODULE_PARM's later 178 - Remove dummy_buffer that was never modified nor printed 179 - ENTER()/LEAVE() are noops unless QL_DEBUG_LEVEL_3, hence remove 180 #ifdef QL_DEBUG_LEVEL_3/#endif around ENTER()/LEAVE() calls 181 - Remove \r from print statements, this is Linux, not DOS 182 - Remove obsolete QLA1280_{SCSILU,INTR,RING}_{LOCK,UNLOCK} 183 dummy macros 184 - Remove C++ compile hack in header file as Linux driver are not 185 supposed to be compiled as C++ 186 - Kill MS_64BITS macro as it makes the code more readable 187 - Remove unnecessary flags.in_interrupts bit 188 Rev 3.23.7 Beta August 20, 2001, Jes Sorensen 189 - Dont' check for set flags on q->q_flag one by one in qla1280_next() 190 - Check whether the interrupt was generated by the QLA1280 before 191 doing any processing 192 - qla1280_status_entry(): Only zero out part of sense_buffer that 193 is not being copied into 194 - Remove more superflouous typecasts 195 - qla1280_32bit_start_scsi() replace home-brew memcpy() with memcpy() 196 Rev 3.23.6 Beta August 20, 2001, Tony Luck, Intel 197 - Don't walk the entire list in qla1280_putq_t() just to directly 198 grab the pointer to the last element afterwards 199 Rev 3.23.5 Beta August 9, 2001, Jes Sorensen 200 - Don't use IRQF_DISABLED, it's use is deprecated for this kinda driver 201 Rev 3.23.4 Beta August 8, 2001, Jes Sorensen 202 - Set dev->max_sectors to 1024 203 Rev 3.23.3 Beta August 6, 2001, Jes Sorensen 204 - Provide compat macros for pci_enable_device(), pci_find_subsys() 205 and scsi_set_pci_device() 206 - Call scsi_set_pci_device() for all devices 207 - Reduce size of kernel version dependent device probe code 208 - Move duplicate probe/init code to separate function 209 - Handle error if qla1280_mem_alloc() fails 210 - Kill OFFSET() macro and use Linux's PCI definitions instead 211 - Kill private structure defining PCI config space (struct config_reg) 212 - Only allocate I/O port region if not in MMIO mode 213 - Remove duplicate (unused) sanity check of sife of srb_t 214 Rev 3.23.2 Beta August 6, 2001, Jes Sorensen 215 - Change home-brew memset() implementations to use memset() 216 - Remove all references to COMTRACE() - accessing a PC's COM2 serial 217 port directly is not legal under Linux. 218 Rev 3.23.1 Beta April 24, 2001, Jes Sorensen 219 - Remove pre 2.2 kernel support 220 - clean up 64 bit DMA setting to use 2.4 API (provide backwards compat) 221 - Fix MMIO access to use readl/writel instead of directly 222 dereferencing pointers 223 - Nuke MSDOS debugging code 224 - Change true/false data types to int from uint8_t 225 - Use int for counters instead of uint8_t etc. 226 - Clean up size & byte order conversion macro usage 227 Rev 3.23 Beta January 11, 2001 BN Qlogic 228 - Added check of device_id when handling non 229 QLA12160s during detect(). 230 Rev 3.22 Beta January 5, 2001 BN Qlogic 231 - Changed queue_task() to schedule_task() 232 for kernels 2.4.0 and higher. 233 Note: 2.4.0-testxx kernels released prior to 234 the actual 2.4.0 kernel release on January 2001 235 will get compile/link errors with schedule_task(). 236 Please update your kernel to released 2.4.0 level, 237 or comment lines in this file flagged with 3.22 238 to resolve compile/link error of schedule_task(). 239 - Added -DCONFIG_SMP in addition to -D__SMP__ 240 in Makefile for 2.4.0 builds of driver as module. 241 Rev 3.21 Beta January 4, 2001 BN Qlogic 242 - Changed criteria of 64/32 Bit mode of HBA 243 operation according to BITS_PER_LONG rather 244 than HBA's NVRAM setting of >4Gig memory bit; 245 so that the HBA auto-configures without the need 246 to setup each system individually. 247 Rev 3.20 Beta December 5, 2000 BN Qlogic 248 - Added priority handling to IA-64 onboard SCSI 249 ISP12160 chip for kernels greater than 2.3.18. 250 - Added irqrestore for qla1280_intr_handler. 251 - Enabled /proc/scsi/qla1280 interface. 252 - Clear /proc/scsi/qla1280 counters in detect(). 253 Rev 3.19 Beta October 13, 2000 BN Qlogic 254 - Declare driver_template for new kernel 255 (2.4.0 and greater) scsi initialization scheme. 256 - Update /proc/scsi entry for 2.3.18 kernels and 257 above as qla1280 258 Rev 3.18 Beta October 10, 2000 BN Qlogic 259 - Changed scan order of adapters to map 260 the QLA12160 followed by the QLA1280. 261 Rev 3.17 Beta September 18, 2000 BN Qlogic 262 - Removed warnings for 32 bit 2.4.x compiles 263 - Corrected declared size for request and response 264 DMA addresses that are kept in each ha 265 Rev. 3.16 Beta August 25, 2000 BN Qlogic 266 - Corrected 64 bit addressing issue on IA-64 267 where the upper 32 bits were not properly 268 passed to the RISC engine. 269 Rev. 3.15 Beta August 22, 2000 BN Qlogic 270 - Modified qla1280_setup_chip to properly load 271 ISP firmware for greater that 4 Gig memory on IA-64 272 Rev. 3.14 Beta August 16, 2000 BN Qlogic 273 - Added setting of dma_mask to full 64 bit 274 if flags.enable_64bit_addressing is set in NVRAM 275 Rev. 3.13 Beta August 16, 2000 BN Qlogic 276 - Use new PCI DMA mapping APIs for 2.4.x kernel 277 Rev. 3.12 July 18, 2000 Redhat & BN Qlogic 278 - Added check of pci_enable_device to detect() for 2.3.x 279 - Use pci_resource_start() instead of 280 pdev->resource[0].start in detect() for 2.3.x 281 - Updated driver version 282 Rev. 3.11 July 14, 2000 BN Qlogic 283 - Updated SCSI Firmware to following versions: 284 qla1x80: 8.13.08 285 qla1x160: 10.04.08 286 - Updated driver version to 3.11 287 Rev. 3.10 June 23, 2000 BN Qlogic 288 - Added filtering of AMI SubSys Vendor ID devices 289 Rev. 3.9 290 - DEBUG_QLA1280 undefined and new version BN Qlogic 291 Rev. 3.08b May 9, 2000 MD Dell 292 - Added logic to check against AMI subsystem vendor ID 293 Rev. 3.08 May 4, 2000 DG Qlogic 294 - Added logic to check for PCI subsystem ID. 295 Rev. 3.07 Apr 24, 2000 DG & BN Qlogic 296 - Updated SCSI Firmware to following versions: 297 qla12160: 10.01.19 298 qla1280: 8.09.00 299 Rev. 3.06 Apr 12, 2000 DG & BN Qlogic 300 - Internal revision; not released 301 Rev. 3.05 Mar 28, 2000 DG & BN Qlogic 302 - Edit correction for virt_to_bus and PROC. 303 Rev. 3.04 Mar 28, 2000 DG & BN Qlogic 304 - Merge changes from ia64 port. 305 Rev. 3.03 Mar 28, 2000 BN Qlogic 306 - Increase version to reflect new code drop with compile fix 307 of issue with inclusion of linux/spinlock for 2.3 kernels 308 Rev. 3.02 Mar 15, 2000 BN Qlogic 309 - Merge qla1280_proc_info from 2.10 code base 310 Rev. 3.01 Feb 10, 2000 BN Qlogic 311 - Corrected code to compile on a 2.2.x kernel. 312 Rev. 3.00 Jan 17, 2000 DG Qlogic 313 - Added 64-bit support. 314 Rev. 2.07 Nov 9, 1999 DG Qlogic 315 - Added new routine to set target parameters for ISP12160. 316 Rev. 2.06 Sept 10, 1999 DG Qlogic 317 - Added support for ISP12160 Ultra 3 chip. 318 Rev. 2.03 August 3, 1999 Fred Lewis, Intel DuPont 319 - Modified code to remove errors generated when compiling with 320 Cygnus IA64 Compiler. 321 - Changed conversion of pointers to unsigned longs instead of integers. 322 - Changed type of I/O port variables from uint32_t to unsigned long. 323 - Modified OFFSET macro to work with 64-bit as well as 32-bit. 324 - Changed sprintf and printk format specifiers for pointers to %p. 325 - Changed some int to long type casts where needed in sprintf & printk. 326 - Added l modifiers to sprintf and printk format specifiers for longs. 327 - Removed unused local variables. 328 Rev. 1.20 June 8, 1999 DG, Qlogic 329 Changes to support RedHat release 6.0 (kernel 2.2.5). 330 - Added SCSI exclusive access lock (io_request_lock) when accessing 331 the adapter. 332 - Added changes for the new LINUX interface template. Some new error 333 handling routines have been added to the template, but for now we 334 will use the old ones. 335 - Initial Beta Release. 336 *****************************************************************************/ 337 338 339 #include <linux/module.h> 340 341 #include <linux/types.h> 342 #include <linux/string.h> 343 #include <linux/errno.h> 344 #include <linux/kernel.h> 345 #include <linux/ioport.h> 346 #include <linux/delay.h> 347 #include <linux/timer.h> 348 #include <linux/pci.h> 349 #include <linux/proc_fs.h> 350 #include <linux/stat.h> 351 #include <linux/pci_ids.h> 352 #include <linux/interrupt.h> 353 #include <linux/init.h> 354 #include <linux/dma-mapping.h> 355 #include <linux/firmware.h> 356 357 #include <asm/io.h> 358 #include <asm/irq.h> 359 #include <asm/byteorder.h> 360 #include <asm/processor.h> 361 #include <asm/types.h> 362 363 #include <scsi/scsi.h> 364 #include <scsi/scsi_cmnd.h> 365 #include <scsi/scsi_device.h> 366 #include <scsi/scsi_host.h> 367 #include <scsi/scsi_tcq.h> 368 369 #if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2) 370 #include <asm/sn/io.h> 371 #endif 372 373 374 /* 375 * Compile time Options: 376 * 0 - Disable and 1 - Enable 377 */ 378 #define DEBUG_QLA1280_INTR 0 379 #define DEBUG_PRINT_NVRAM 0 380 #define DEBUG_QLA1280 0 381 382 #define MEMORY_MAPPED_IO 1 383 384 #include "qla1280.h" 385 386 #ifndef BITS_PER_LONG 387 #error "BITS_PER_LONG not defined!" 388 #endif 389 #if (BITS_PER_LONG == 64) || defined CONFIG_HIGHMEM 390 #define QLA_64BIT_PTR 1 391 #endif 392 393 #ifdef QLA_64BIT_PTR 394 #define pci_dma_hi32(a) ((a >> 16) >> 16) 395 #else 396 #define pci_dma_hi32(a) 0 397 #endif 398 #define pci_dma_lo32(a) (a & 0xffffffff) 399 400 #define NVRAM_DELAY() udelay(500) /* 2 microseconds */ 401 402 #if defined(__ia64__) && !defined(ia64_platform_is) 403 #define ia64_platform_is(foo) (!strcmp(x, platform_name)) 404 #endif 405 406 407 #define IS_ISP1040(ha) (ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP1020) 408 #define IS_ISP1x40(ha) (ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP1020 || \ 409 ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP1240) 410 #define IS_ISP1x160(ha) (ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP10160 || \ 411 ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP12160) 412 413 414 static int qla1280_probe_one(struct pci_dev *, const struct pci_device_id *); 415 static void qla1280_remove_one(struct pci_dev *); 416 417 /* 418 * QLogic Driver Support Function Prototypes. 419 */ 420 static void qla1280_done(struct scsi_qla_host *); 421 static int qla1280_get_token(char *); 422 static int qla1280_setup(char *s) __init; 423 424 /* 425 * QLogic ISP1280 Hardware Support Function Prototypes. 426 */ 427 static int qla1280_load_firmware(struct scsi_qla_host *); 428 static int qla1280_init_rings(struct scsi_qla_host *); 429 static int qla1280_nvram_config(struct scsi_qla_host *); 430 static int qla1280_mailbox_command(struct scsi_qla_host *, 431 uint8_t, uint16_t *); 432 static int qla1280_bus_reset(struct scsi_qla_host *, int); 433 static int qla1280_device_reset(struct scsi_qla_host *, int, int); 434 static int qla1280_abort_command(struct scsi_qla_host *, struct srb *, int); 435 static int qla1280_abort_isp(struct scsi_qla_host *); 436 #ifdef QLA_64BIT_PTR 437 static int qla1280_64bit_start_scsi(struct scsi_qla_host *, struct srb *); 438 #else 439 static int qla1280_32bit_start_scsi(struct scsi_qla_host *, struct srb *); 440 #endif 441 static void qla1280_nv_write(struct scsi_qla_host *, uint16_t); 442 static void qla1280_poll(struct scsi_qla_host *); 443 static void qla1280_reset_adapter(struct scsi_qla_host *); 444 static void qla1280_marker(struct scsi_qla_host *, int, int, int, u8); 445 static void qla1280_isp_cmd(struct scsi_qla_host *); 446 static void qla1280_isr(struct scsi_qla_host *, struct list_head *); 447 static void qla1280_rst_aen(struct scsi_qla_host *); 448 static void qla1280_status_entry(struct scsi_qla_host *, struct response *, 449 struct list_head *); 450 static void qla1280_error_entry(struct scsi_qla_host *, struct response *, 451 struct list_head *); 452 static uint16_t qla1280_get_nvram_word(struct scsi_qla_host *, uint32_t); 453 static uint16_t qla1280_nvram_request(struct scsi_qla_host *, uint32_t); 454 static uint16_t qla1280_debounce_register(volatile uint16_t __iomem *); 455 static request_t *qla1280_req_pkt(struct scsi_qla_host *); 456 static int qla1280_check_for_dead_scsi_bus(struct scsi_qla_host *, 457 unsigned int); 458 static void qla1280_get_target_parameters(struct scsi_qla_host *, 459 struct scsi_device *); 460 static int qla1280_set_target_parameters(struct scsi_qla_host *, int, int); 461 462 463 static struct qla_driver_setup driver_setup; 464 465 /* 466 * convert scsi data direction to request_t control flags 467 */ 468 static inline uint16_t 469 qla1280_data_direction(struct scsi_cmnd *cmnd) 470 { 471 switch(cmnd->sc_data_direction) { 472 case DMA_FROM_DEVICE: 473 return BIT_5; 474 case DMA_TO_DEVICE: 475 return BIT_6; 476 case DMA_BIDIRECTIONAL: 477 return BIT_5 | BIT_6; 478 /* 479 * We could BUG() on default here if one of the four cases aren't 480 * met, but then again if we receive something like that from the 481 * SCSI layer we have more serious problems. This shuts up GCC. 482 */ 483 case DMA_NONE: 484 default: 485 return 0; 486 } 487 } 488 489 #if DEBUG_QLA1280 490 static void __qla1280_print_scsi_cmd(struct scsi_cmnd * cmd); 491 static void __qla1280_dump_buffer(char *, int); 492 #endif 493 494 495 /* 496 * insmod needs to find the variable and make it point to something 497 */ 498 #ifdef MODULE 499 static char *qla1280; 500 501 /* insmod qla1280 options=verbose" */ 502 module_param(qla1280, charp, 0); 503 #else 504 __setup("qla1280=", qla1280_setup); 505 #endif 506 507 508 /* 509 * We use the scsi_pointer structure that's included with each scsi_command 510 * to overlay our struct srb over it. qla1280_init() checks that a srb is not 511 * bigger than a scsi_pointer. 512 */ 513 514 #define CMD_SP(Cmnd) &Cmnd->SCp 515 #define CMD_CDBLEN(Cmnd) Cmnd->cmd_len 516 #define CMD_CDBP(Cmnd) Cmnd->cmnd 517 #define CMD_SNSP(Cmnd) Cmnd->sense_buffer 518 #define CMD_SNSLEN(Cmnd) SCSI_SENSE_BUFFERSIZE 519 #define CMD_RESULT(Cmnd) Cmnd->result 520 #define CMD_HANDLE(Cmnd) Cmnd->host_scribble 521 #define CMD_REQUEST(Cmnd) Cmnd->request->cmd 522 523 #define CMD_HOST(Cmnd) Cmnd->device->host 524 #define SCSI_BUS_32(Cmnd) Cmnd->device->channel 525 #define SCSI_TCN_32(Cmnd) Cmnd->device->id 526 #define SCSI_LUN_32(Cmnd) Cmnd->device->lun 527 528 529 /*****************************************/ 530 /* ISP Boards supported by this driver */ 531 /*****************************************/ 532 533 struct qla_boards { 534 char *name; /* Board ID String */ 535 int numPorts; /* Number of SCSI ports */ 536 int fw_index; /* index into qla1280_fw_tbl for firmware */ 537 }; 538 539 /* NOTE: the last argument in each entry is used to index ql1280_board_tbl */ 540 static struct pci_device_id qla1280_pci_tbl[] = { 541 {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP12160, 542 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 543 {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP1020, 544 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1}, 545 {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP1080, 546 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2}, 547 {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP1240, 548 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3}, 549 {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP1280, 550 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4}, 551 {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP10160, 552 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5}, 553 {0,} 554 }; 555 MODULE_DEVICE_TABLE(pci, qla1280_pci_tbl); 556 557 DEFINE_MUTEX(qla1280_firmware_mutex); 558 559 struct qla_fw { 560 char *fwname; 561 const struct firmware *fw; 562 }; 563 564 #define QL_NUM_FW_IMAGES 3 565 566 struct qla_fw qla1280_fw_tbl[QL_NUM_FW_IMAGES] = { 567 {"qlogic/1040.bin", NULL}, /* image 0 */ 568 {"qlogic/1280.bin", NULL}, /* image 1 */ 569 {"qlogic/12160.bin", NULL}, /* image 2 */ 570 }; 571 572 /* NOTE: Order of boards in this table must match order in qla1280_pci_tbl */ 573 static struct qla_boards ql1280_board_tbl[] = { 574 {.name = "QLA12160", .numPorts = 2, .fw_index = 2}, 575 {.name = "QLA1040" , .numPorts = 1, .fw_index = 0}, 576 {.name = "QLA1080" , .numPorts = 1, .fw_index = 1}, 577 {.name = "QLA1240" , .numPorts = 2, .fw_index = 1}, 578 {.name = "QLA1280" , .numPorts = 2, .fw_index = 1}, 579 {.name = "QLA10160", .numPorts = 1, .fw_index = 2}, 580 {.name = " ", .numPorts = 0, .fw_index = -1}, 581 }; 582 583 static int qla1280_verbose = 1; 584 585 #if DEBUG_QLA1280 586 static int ql_debug_level = 1; 587 #define dprintk(level, format, a...) \ 588 do { if (ql_debug_level >= level) printk(KERN_ERR format, ##a); } while(0) 589 #define qla1280_dump_buffer(level, buf, size) \ 590 if (ql_debug_level >= level) __qla1280_dump_buffer(buf, size) 591 #define qla1280_print_scsi_cmd(level, cmd) \ 592 if (ql_debug_level >= level) __qla1280_print_scsi_cmd(cmd) 593 #else 594 #define ql_debug_level 0 595 #define dprintk(level, format, a...) do{}while(0) 596 #define qla1280_dump_buffer(a, b, c) do{}while(0) 597 #define qla1280_print_scsi_cmd(a, b) do{}while(0) 598 #endif 599 600 #define ENTER(x) dprintk(3, "qla1280 : Entering %s()\n", x); 601 #define LEAVE(x) dprintk(3, "qla1280 : Leaving %s()\n", x); 602 #define ENTER_INTR(x) dprintk(4, "qla1280 : Entering %s()\n", x); 603 #define LEAVE_INTR(x) dprintk(4, "qla1280 : Leaving %s()\n", x); 604 605 606 static int qla1280_read_nvram(struct scsi_qla_host *ha) 607 { 608 uint16_t *wptr; 609 uint8_t chksum; 610 int cnt, i; 611 struct nvram *nv; 612 613 ENTER("qla1280_read_nvram"); 614 615 if (driver_setup.no_nvram) 616 return 1; 617 618 printk(KERN_INFO "scsi(%ld): Reading NVRAM\n", ha->host_no); 619 620 wptr = (uint16_t *)&ha->nvram; 621 nv = &ha->nvram; 622 chksum = 0; 623 for (cnt = 0; cnt < 3; cnt++) { 624 *wptr = qla1280_get_nvram_word(ha, cnt); 625 chksum += *wptr & 0xff; 626 chksum += (*wptr >> 8) & 0xff; 627 wptr++; 628 } 629 630 if (nv->id0 != 'I' || nv->id1 != 'S' || 631 nv->id2 != 'P' || nv->id3 != ' ' || nv->version < 1) { 632 dprintk(2, "Invalid nvram ID or version!\n"); 633 chksum = 1; 634 } else { 635 for (; cnt < sizeof(struct nvram); cnt++) { 636 *wptr = qla1280_get_nvram_word(ha, cnt); 637 chksum += *wptr & 0xff; 638 chksum += (*wptr >> 8) & 0xff; 639 wptr++; 640 } 641 } 642 643 dprintk(3, "qla1280_read_nvram: NVRAM Magic ID= %c %c %c %02x" 644 " version %i\n", nv->id0, nv->id1, nv->id2, nv->id3, 645 nv->version); 646 647 648 if (chksum) { 649 if (!driver_setup.no_nvram) 650 printk(KERN_WARNING "scsi(%ld): Unable to identify or " 651 "validate NVRAM checksum, using default " 652 "settings\n", ha->host_no); 653 ha->nvram_valid = 0; 654 } else 655 ha->nvram_valid = 1; 656 657 /* The firmware interface is, um, interesting, in that the 658 * actual firmware image on the chip is little endian, thus, 659 * the process of taking that image to the CPU would end up 660 * little endian. However, the firmware interface requires it 661 * to be read a word (two bytes) at a time. 662 * 663 * The net result of this would be that the word (and 664 * doubleword) quantites in the firmware would be correct, but 665 * the bytes would be pairwise reversed. Since most of the 666 * firmware quantites are, in fact, bytes, we do an extra 667 * le16_to_cpu() in the firmware read routine. 668 * 669 * The upshot of all this is that the bytes in the firmware 670 * are in the correct places, but the 16 and 32 bit quantites 671 * are still in little endian format. We fix that up below by 672 * doing extra reverses on them */ 673 nv->isp_parameter = cpu_to_le16(nv->isp_parameter); 674 nv->firmware_feature.w = cpu_to_le16(nv->firmware_feature.w); 675 for(i = 0; i < MAX_BUSES; i++) { 676 nv->bus[i].selection_timeout = cpu_to_le16(nv->bus[i].selection_timeout); 677 nv->bus[i].max_queue_depth = cpu_to_le16(nv->bus[i].max_queue_depth); 678 } 679 dprintk(1, "qla1280_read_nvram: Completed Reading NVRAM\n"); 680 LEAVE("qla1280_read_nvram"); 681 682 return chksum; 683 } 684 685 /************************************************************************** 686 * qla1280_info 687 * Return a string describing the driver. 688 **************************************************************************/ 689 static const char * 690 qla1280_info(struct Scsi_Host *host) 691 { 692 static char qla1280_scsi_name_buffer[125]; 693 char *bp; 694 struct scsi_qla_host *ha; 695 struct qla_boards *bdp; 696 697 bp = &qla1280_scsi_name_buffer[0]; 698 ha = (struct scsi_qla_host *)host->hostdata; 699 bdp = &ql1280_board_tbl[ha->devnum]; 700 memset(bp, 0, sizeof(qla1280_scsi_name_buffer)); 701 702 sprintf (bp, 703 "QLogic %s PCI to SCSI Host Adapter\n" 704 " Firmware version: %2d.%02d.%02d, Driver version %s", 705 &bdp->name[0], ha->fwver1, ha->fwver2, ha->fwver3, 706 QLA1280_VERSION); 707 return bp; 708 } 709 710 /************************************************************************** 711 * qla1280_queuecommand 712 * Queue a command to the controller. 713 * 714 * Note: 715 * The mid-level driver tries to ensures that queuecommand never gets invoked 716 * concurrently with itself or the interrupt handler (although the 717 * interrupt handler may call this routine as part of request-completion 718 * handling). Unfortunely, it sometimes calls the scheduler in interrupt 719 * context which is a big NO! NO!. 720 **************************************************************************/ 721 static int 722 qla1280_queuecommand_lck(struct scsi_cmnd *cmd, void (*fn)(struct scsi_cmnd *)) 723 { 724 struct Scsi_Host *host = cmd->device->host; 725 struct scsi_qla_host *ha = (struct scsi_qla_host *)host->hostdata; 726 struct srb *sp = (struct srb *)CMD_SP(cmd); 727 int status; 728 729 cmd->scsi_done = fn; 730 sp->cmd = cmd; 731 sp->flags = 0; 732 sp->wait = NULL; 733 CMD_HANDLE(cmd) = (unsigned char *)NULL; 734 735 qla1280_print_scsi_cmd(5, cmd); 736 737 #ifdef QLA_64BIT_PTR 738 /* 739 * Using 64 bit commands if the PCI bridge doesn't support it is a 740 * bit wasteful, however this should really only happen if one's 741 * PCI controller is completely broken, like the BCM1250. For 742 * sane hardware this is not an issue. 743 */ 744 status = qla1280_64bit_start_scsi(ha, sp); 745 #else 746 status = qla1280_32bit_start_scsi(ha, sp); 747 #endif 748 return status; 749 } 750 751 static DEF_SCSI_QCMD(qla1280_queuecommand) 752 753 enum action { 754 ABORT_COMMAND, 755 DEVICE_RESET, 756 BUS_RESET, 757 ADAPTER_RESET, 758 }; 759 760 761 static void qla1280_mailbox_timeout(struct timer_list *t) 762 { 763 struct scsi_qla_host *ha = from_timer(ha, t, mailbox_timer); 764 struct device_reg __iomem *reg; 765 reg = ha->iobase; 766 767 ha->mailbox_out[0] = RD_REG_WORD(®->mailbox0); 768 printk(KERN_ERR "scsi(%ld): mailbox timed out, mailbox0 %04x, " 769 "ictrl %04x, istatus %04x\n", ha->host_no, ha->mailbox_out[0], 770 RD_REG_WORD(®->ictrl), RD_REG_WORD(®->istatus)); 771 complete(ha->mailbox_wait); 772 } 773 774 static int 775 _qla1280_wait_for_single_command(struct scsi_qla_host *ha, struct srb *sp, 776 struct completion *wait) 777 { 778 int status = FAILED; 779 struct scsi_cmnd *cmd = sp->cmd; 780 781 spin_unlock_irq(ha->host->host_lock); 782 wait_for_completion_timeout(wait, 4*HZ); 783 spin_lock_irq(ha->host->host_lock); 784 sp->wait = NULL; 785 if(CMD_HANDLE(cmd) == COMPLETED_HANDLE) { 786 status = SUCCESS; 787 (*cmd->scsi_done)(cmd); 788 } 789 return status; 790 } 791 792 static int 793 qla1280_wait_for_single_command(struct scsi_qla_host *ha, struct srb *sp) 794 { 795 DECLARE_COMPLETION_ONSTACK(wait); 796 797 sp->wait = &wait; 798 return _qla1280_wait_for_single_command(ha, sp, &wait); 799 } 800 801 static int 802 qla1280_wait_for_pending_commands(struct scsi_qla_host *ha, int bus, int target) 803 { 804 int cnt; 805 int status; 806 struct srb *sp; 807 struct scsi_cmnd *cmd; 808 809 status = SUCCESS; 810 811 /* 812 * Wait for all commands with the designated bus/target 813 * to be completed by the firmware 814 */ 815 for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) { 816 sp = ha->outstanding_cmds[cnt]; 817 if (sp) { 818 cmd = sp->cmd; 819 820 if (bus >= 0 && SCSI_BUS_32(cmd) != bus) 821 continue; 822 if (target >= 0 && SCSI_TCN_32(cmd) != target) 823 continue; 824 825 status = qla1280_wait_for_single_command(ha, sp); 826 if (status == FAILED) 827 break; 828 } 829 } 830 return status; 831 } 832 833 /************************************************************************** 834 * qla1280_error_action 835 * The function will attempt to perform a specified error action and 836 * wait for the results (or time out). 837 * 838 * Input: 839 * cmd = Linux SCSI command packet of the command that cause the 840 * bus reset. 841 * action = error action to take (see action_t) 842 * 843 * Returns: 844 * SUCCESS or FAILED 845 * 846 **************************************************************************/ 847 static int 848 qla1280_error_action(struct scsi_cmnd *cmd, enum action action) 849 { 850 struct scsi_qla_host *ha; 851 int bus, target, lun; 852 struct srb *sp; 853 int i, found; 854 int result=FAILED; 855 int wait_for_bus=-1; 856 int wait_for_target = -1; 857 DECLARE_COMPLETION_ONSTACK(wait); 858 859 ENTER("qla1280_error_action"); 860 861 ha = (struct scsi_qla_host *)(CMD_HOST(cmd)->hostdata); 862 sp = (struct srb *)CMD_SP(cmd); 863 bus = SCSI_BUS_32(cmd); 864 target = SCSI_TCN_32(cmd); 865 lun = SCSI_LUN_32(cmd); 866 867 dprintk(4, "error_action %i, istatus 0x%04x\n", action, 868 RD_REG_WORD(&ha->iobase->istatus)); 869 870 dprintk(4, "host_cmd 0x%04x, ictrl 0x%04x, jiffies %li\n", 871 RD_REG_WORD(&ha->iobase->host_cmd), 872 RD_REG_WORD(&ha->iobase->ictrl), jiffies); 873 874 if (qla1280_verbose) 875 printk(KERN_INFO "scsi(%li): Resetting Cmnd=0x%p, " 876 "Handle=0x%p, action=0x%x\n", 877 ha->host_no, cmd, CMD_HANDLE(cmd), action); 878 879 /* 880 * Check to see if we have the command in the outstanding_cmds[] 881 * array. If not then it must have completed before this error 882 * action was initiated. If the error_action isn't ABORT_COMMAND 883 * then the driver must proceed with the requested action. 884 */ 885 found = -1; 886 for (i = 0; i < MAX_OUTSTANDING_COMMANDS; i++) { 887 if (sp == ha->outstanding_cmds[i]) { 888 found = i; 889 sp->wait = &wait; /* we'll wait for it to complete */ 890 break; 891 } 892 } 893 894 if (found < 0) { /* driver doesn't have command */ 895 result = SUCCESS; 896 if (qla1280_verbose) { 897 printk(KERN_INFO 898 "scsi(%ld:%d:%d:%d): specified command has " 899 "already completed.\n", ha->host_no, bus, 900 target, lun); 901 } 902 } 903 904 switch (action) { 905 906 case ABORT_COMMAND: 907 dprintk(1, "qla1280: RISC aborting command\n"); 908 /* 909 * The abort might fail due to race when the host_lock 910 * is released to issue the abort. As such, we 911 * don't bother to check the return status. 912 */ 913 if (found >= 0) 914 qla1280_abort_command(ha, sp, found); 915 break; 916 917 case DEVICE_RESET: 918 if (qla1280_verbose) 919 printk(KERN_INFO 920 "scsi(%ld:%d:%d:%d): Queueing device reset " 921 "command.\n", ha->host_no, bus, target, lun); 922 if (qla1280_device_reset(ha, bus, target) == 0) { 923 /* issued device reset, set wait conditions */ 924 wait_for_bus = bus; 925 wait_for_target = target; 926 } 927 break; 928 929 case BUS_RESET: 930 if (qla1280_verbose) 931 printk(KERN_INFO "qla1280(%ld:%d): Issued bus " 932 "reset.\n", ha->host_no, bus); 933 if (qla1280_bus_reset(ha, bus) == 0) { 934 /* issued bus reset, set wait conditions */ 935 wait_for_bus = bus; 936 } 937 break; 938 939 case ADAPTER_RESET: 940 default: 941 if (qla1280_verbose) { 942 printk(KERN_INFO 943 "scsi(%ld): Issued ADAPTER RESET\n", 944 ha->host_no); 945 printk(KERN_INFO "scsi(%ld): I/O processing will " 946 "continue automatically\n", ha->host_no); 947 } 948 ha->flags.reset_active = 1; 949 950 if (qla1280_abort_isp(ha) != 0) { /* it's dead */ 951 result = FAILED; 952 } 953 954 ha->flags.reset_active = 0; 955 } 956 957 /* 958 * At this point, the host_lock has been released and retaken 959 * by the issuance of the mailbox command. 960 * Wait for the command passed in by the mid-layer if it 961 * was found by the driver. It might have been returned 962 * between eh recovery steps, hence the check of the "found" 963 * variable. 964 */ 965 966 if (found >= 0) 967 result = _qla1280_wait_for_single_command(ha, sp, &wait); 968 969 if (action == ABORT_COMMAND && result != SUCCESS) { 970 printk(KERN_WARNING 971 "scsi(%li:%i:%i:%i): " 972 "Unable to abort command!\n", 973 ha->host_no, bus, target, lun); 974 } 975 976 /* 977 * If the command passed in by the mid-layer has been 978 * returned by the board, then wait for any additional 979 * commands which are supposed to complete based upon 980 * the error action. 981 * 982 * All commands are unconditionally returned during a 983 * call to qla1280_abort_isp(), ADAPTER_RESET. No need 984 * to wait for them. 985 */ 986 if (result == SUCCESS && wait_for_bus >= 0) { 987 result = qla1280_wait_for_pending_commands(ha, 988 wait_for_bus, wait_for_target); 989 } 990 991 dprintk(1, "RESET returning %d\n", result); 992 993 LEAVE("qla1280_error_action"); 994 return result; 995 } 996 997 /************************************************************************** 998 * qla1280_abort 999 * Abort the specified SCSI command(s). 1000 **************************************************************************/ 1001 static int 1002 qla1280_eh_abort(struct scsi_cmnd * cmd) 1003 { 1004 int rc; 1005 1006 spin_lock_irq(cmd->device->host->host_lock); 1007 rc = qla1280_error_action(cmd, ABORT_COMMAND); 1008 spin_unlock_irq(cmd->device->host->host_lock); 1009 1010 return rc; 1011 } 1012 1013 /************************************************************************** 1014 * qla1280_device_reset 1015 * Reset the specified SCSI device 1016 **************************************************************************/ 1017 static int 1018 qla1280_eh_device_reset(struct scsi_cmnd *cmd) 1019 { 1020 int rc; 1021 1022 spin_lock_irq(cmd->device->host->host_lock); 1023 rc = qla1280_error_action(cmd, DEVICE_RESET); 1024 spin_unlock_irq(cmd->device->host->host_lock); 1025 1026 return rc; 1027 } 1028 1029 /************************************************************************** 1030 * qla1280_bus_reset 1031 * Reset the specified bus. 1032 **************************************************************************/ 1033 static int 1034 qla1280_eh_bus_reset(struct scsi_cmnd *cmd) 1035 { 1036 int rc; 1037 1038 spin_lock_irq(cmd->device->host->host_lock); 1039 rc = qla1280_error_action(cmd, BUS_RESET); 1040 spin_unlock_irq(cmd->device->host->host_lock); 1041 1042 return rc; 1043 } 1044 1045 /************************************************************************** 1046 * qla1280_adapter_reset 1047 * Reset the specified adapter (both channels) 1048 **************************************************************************/ 1049 static int 1050 qla1280_eh_adapter_reset(struct scsi_cmnd *cmd) 1051 { 1052 int rc; 1053 1054 spin_lock_irq(cmd->device->host->host_lock); 1055 rc = qla1280_error_action(cmd, ADAPTER_RESET); 1056 spin_unlock_irq(cmd->device->host->host_lock); 1057 1058 return rc; 1059 } 1060 1061 static int 1062 qla1280_biosparam(struct scsi_device *sdev, struct block_device *bdev, 1063 sector_t capacity, int geom[]) 1064 { 1065 int heads, sectors, cylinders; 1066 1067 heads = 64; 1068 sectors = 32; 1069 cylinders = (unsigned long)capacity / (heads * sectors); 1070 if (cylinders > 1024) { 1071 heads = 255; 1072 sectors = 63; 1073 cylinders = (unsigned long)capacity / (heads * sectors); 1074 /* if (cylinders > 1023) 1075 cylinders = 1023; */ 1076 } 1077 1078 geom[0] = heads; 1079 geom[1] = sectors; 1080 geom[2] = cylinders; 1081 1082 return 0; 1083 } 1084 1085 1086 /* disable risc and host interrupts */ 1087 static inline void 1088 qla1280_disable_intrs(struct scsi_qla_host *ha) 1089 { 1090 WRT_REG_WORD(&ha->iobase->ictrl, 0); 1091 RD_REG_WORD(&ha->iobase->ictrl); /* PCI Posted Write flush */ 1092 } 1093 1094 /* enable risc and host interrupts */ 1095 static inline void 1096 qla1280_enable_intrs(struct scsi_qla_host *ha) 1097 { 1098 WRT_REG_WORD(&ha->iobase->ictrl, (ISP_EN_INT | ISP_EN_RISC)); 1099 RD_REG_WORD(&ha->iobase->ictrl); /* PCI Posted Write flush */ 1100 } 1101 1102 /************************************************************************** 1103 * qla1280_intr_handler 1104 * Handles the H/W interrupt 1105 **************************************************************************/ 1106 static irqreturn_t 1107 qla1280_intr_handler(int irq, void *dev_id) 1108 { 1109 struct scsi_qla_host *ha; 1110 struct device_reg __iomem *reg; 1111 u16 data; 1112 int handled = 0; 1113 1114 ENTER_INTR ("qla1280_intr_handler"); 1115 ha = (struct scsi_qla_host *)dev_id; 1116 1117 spin_lock(ha->host->host_lock); 1118 1119 ha->isr_count++; 1120 reg = ha->iobase; 1121 1122 qla1280_disable_intrs(ha); 1123 1124 data = qla1280_debounce_register(®->istatus); 1125 /* Check for pending interrupts. */ 1126 if (data & RISC_INT) { 1127 qla1280_isr(ha, &ha->done_q); 1128 handled = 1; 1129 } 1130 if (!list_empty(&ha->done_q)) 1131 qla1280_done(ha); 1132 1133 spin_unlock(ha->host->host_lock); 1134 1135 qla1280_enable_intrs(ha); 1136 1137 LEAVE_INTR("qla1280_intr_handler"); 1138 return IRQ_RETVAL(handled); 1139 } 1140 1141 1142 static int 1143 qla1280_set_target_parameters(struct scsi_qla_host *ha, int bus, int target) 1144 { 1145 uint8_t mr; 1146 uint16_t mb[MAILBOX_REGISTER_COUNT]; 1147 struct nvram *nv; 1148 int status, lun; 1149 1150 nv = &ha->nvram; 1151 1152 mr = BIT_3 | BIT_2 | BIT_1 | BIT_0; 1153 1154 /* Set Target Parameters. */ 1155 mb[0] = MBC_SET_TARGET_PARAMETERS; 1156 mb[1] = (uint16_t)((bus ? target | BIT_7 : target) << 8); 1157 mb[2] = nv->bus[bus].target[target].parameter.renegotiate_on_error << 8; 1158 mb[2] |= nv->bus[bus].target[target].parameter.stop_queue_on_check << 9; 1159 mb[2] |= nv->bus[bus].target[target].parameter.auto_request_sense << 10; 1160 mb[2] |= nv->bus[bus].target[target].parameter.tag_queuing << 11; 1161 mb[2] |= nv->bus[bus].target[target].parameter.enable_sync << 12; 1162 mb[2] |= nv->bus[bus].target[target].parameter.enable_wide << 13; 1163 mb[2] |= nv->bus[bus].target[target].parameter.parity_checking << 14; 1164 mb[2] |= nv->bus[bus].target[target].parameter.disconnect_allowed << 15; 1165 1166 if (IS_ISP1x160(ha)) { 1167 mb[2] |= nv->bus[bus].target[target].ppr_1x160.flags.enable_ppr << 5; 1168 mb[3] = (nv->bus[bus].target[target].flags.flags1x160.sync_offset << 8); 1169 mb[6] = (nv->bus[bus].target[target].ppr_1x160.flags.ppr_options << 8) | 1170 nv->bus[bus].target[target].ppr_1x160.flags.ppr_bus_width; 1171 mr |= BIT_6; 1172 } else { 1173 mb[3] = (nv->bus[bus].target[target].flags.flags1x80.sync_offset << 8); 1174 } 1175 mb[3] |= nv->bus[bus].target[target].sync_period; 1176 1177 status = qla1280_mailbox_command(ha, mr, mb); 1178 1179 /* Set Device Queue Parameters. */ 1180 for (lun = 0; lun < MAX_LUNS; lun++) { 1181 mb[0] = MBC_SET_DEVICE_QUEUE; 1182 mb[1] = (uint16_t)((bus ? target | BIT_7 : target) << 8); 1183 mb[1] |= lun; 1184 mb[2] = nv->bus[bus].max_queue_depth; 1185 mb[3] = nv->bus[bus].target[target].execution_throttle; 1186 status |= qla1280_mailbox_command(ha, 0x0f, mb); 1187 } 1188 1189 if (status) 1190 printk(KERN_WARNING "scsi(%ld:%i:%i): " 1191 "qla1280_set_target_parameters() failed\n", 1192 ha->host_no, bus, target); 1193 return status; 1194 } 1195 1196 1197 /************************************************************************** 1198 * qla1280_slave_configure 1199 * 1200 * Description: 1201 * Determines the queue depth for a given device. There are two ways 1202 * a queue depth can be obtained for a tagged queueing device. One 1203 * way is the default queue depth which is determined by whether 1204 * If it is defined, then it is used 1205 * as the default queue depth. Otherwise, we use either 4 or 8 as the 1206 * default queue depth (dependent on the number of hardware SCBs). 1207 **************************************************************************/ 1208 static int 1209 qla1280_slave_configure(struct scsi_device *device) 1210 { 1211 struct scsi_qla_host *ha; 1212 int default_depth = 3; 1213 int bus = device->channel; 1214 int target = device->id; 1215 int status = 0; 1216 struct nvram *nv; 1217 unsigned long flags; 1218 1219 ha = (struct scsi_qla_host *)device->host->hostdata; 1220 nv = &ha->nvram; 1221 1222 if (qla1280_check_for_dead_scsi_bus(ha, bus)) 1223 return 1; 1224 1225 if (device->tagged_supported && 1226 (ha->bus_settings[bus].qtag_enables & (BIT_0 << target))) { 1227 scsi_change_queue_depth(device, ha->bus_settings[bus].hiwat); 1228 } else { 1229 scsi_change_queue_depth(device, default_depth); 1230 } 1231 1232 nv->bus[bus].target[target].parameter.enable_sync = device->sdtr; 1233 nv->bus[bus].target[target].parameter.enable_wide = device->wdtr; 1234 nv->bus[bus].target[target].ppr_1x160.flags.enable_ppr = device->ppr; 1235 1236 if (driver_setup.no_sync || 1237 (driver_setup.sync_mask && 1238 (~driver_setup.sync_mask & (1 << target)))) 1239 nv->bus[bus].target[target].parameter.enable_sync = 0; 1240 if (driver_setup.no_wide || 1241 (driver_setup.wide_mask && 1242 (~driver_setup.wide_mask & (1 << target)))) 1243 nv->bus[bus].target[target].parameter.enable_wide = 0; 1244 if (IS_ISP1x160(ha)) { 1245 if (driver_setup.no_ppr || 1246 (driver_setup.ppr_mask && 1247 (~driver_setup.ppr_mask & (1 << target)))) 1248 nv->bus[bus].target[target].ppr_1x160.flags.enable_ppr = 0; 1249 } 1250 1251 spin_lock_irqsave(ha->host->host_lock, flags); 1252 if (nv->bus[bus].target[target].parameter.enable_sync) 1253 status = qla1280_set_target_parameters(ha, bus, target); 1254 qla1280_get_target_parameters(ha, device); 1255 spin_unlock_irqrestore(ha->host->host_lock, flags); 1256 return status; 1257 } 1258 1259 1260 /* 1261 * qla1280_done 1262 * Process completed commands. 1263 * 1264 * Input: 1265 * ha = adapter block pointer. 1266 */ 1267 static void 1268 qla1280_done(struct scsi_qla_host *ha) 1269 { 1270 struct srb *sp; 1271 struct list_head *done_q; 1272 int bus, target, lun; 1273 struct scsi_cmnd *cmd; 1274 1275 ENTER("qla1280_done"); 1276 1277 done_q = &ha->done_q; 1278 1279 while (!list_empty(done_q)) { 1280 sp = list_entry(done_q->next, struct srb, list); 1281 1282 list_del(&sp->list); 1283 1284 cmd = sp->cmd; 1285 bus = SCSI_BUS_32(cmd); 1286 target = SCSI_TCN_32(cmd); 1287 lun = SCSI_LUN_32(cmd); 1288 1289 switch ((CMD_RESULT(cmd) >> 16)) { 1290 case DID_RESET: 1291 /* Issue marker command. */ 1292 if (!ha->flags.abort_isp_active) 1293 qla1280_marker(ha, bus, target, 0, MK_SYNC_ID); 1294 break; 1295 case DID_ABORT: 1296 sp->flags &= ~SRB_ABORT_PENDING; 1297 sp->flags |= SRB_ABORTED; 1298 break; 1299 default: 1300 break; 1301 } 1302 1303 /* Release memory used for this I/O */ 1304 scsi_dma_unmap(cmd); 1305 1306 /* Call the mid-level driver interrupt handler */ 1307 ha->actthreads--; 1308 1309 if (sp->wait == NULL) 1310 (*(cmd)->scsi_done)(cmd); 1311 else 1312 complete(sp->wait); 1313 } 1314 LEAVE("qla1280_done"); 1315 } 1316 1317 /* 1318 * Translates a ISP error to a Linux SCSI error 1319 */ 1320 static int 1321 qla1280_return_status(struct response * sts, struct scsi_cmnd *cp) 1322 { 1323 int host_status = DID_ERROR; 1324 uint16_t comp_status = le16_to_cpu(sts->comp_status); 1325 uint16_t state_flags = le16_to_cpu(sts->state_flags); 1326 uint32_t residual_length = le32_to_cpu(sts->residual_length); 1327 uint16_t scsi_status = le16_to_cpu(sts->scsi_status); 1328 #if DEBUG_QLA1280_INTR 1329 static char *reason[] = { 1330 "DID_OK", 1331 "DID_NO_CONNECT", 1332 "DID_BUS_BUSY", 1333 "DID_TIME_OUT", 1334 "DID_BAD_TARGET", 1335 "DID_ABORT", 1336 "DID_PARITY", 1337 "DID_ERROR", 1338 "DID_RESET", 1339 "DID_BAD_INTR" 1340 }; 1341 #endif /* DEBUG_QLA1280_INTR */ 1342 1343 ENTER("qla1280_return_status"); 1344 1345 #if DEBUG_QLA1280_INTR 1346 /* 1347 dprintk(1, "qla1280_return_status: compl status = 0x%04x\n", 1348 comp_status); 1349 */ 1350 #endif 1351 1352 switch (comp_status) { 1353 case CS_COMPLETE: 1354 host_status = DID_OK; 1355 break; 1356 1357 case CS_INCOMPLETE: 1358 if (!(state_flags & SF_GOT_BUS)) 1359 host_status = DID_NO_CONNECT; 1360 else if (!(state_flags & SF_GOT_TARGET)) 1361 host_status = DID_BAD_TARGET; 1362 else if (!(state_flags & SF_SENT_CDB)) 1363 host_status = DID_ERROR; 1364 else if (!(state_flags & SF_TRANSFERRED_DATA)) 1365 host_status = DID_ERROR; 1366 else if (!(state_flags & SF_GOT_STATUS)) 1367 host_status = DID_ERROR; 1368 else if (!(state_flags & SF_GOT_SENSE)) 1369 host_status = DID_ERROR; 1370 break; 1371 1372 case CS_RESET: 1373 host_status = DID_RESET; 1374 break; 1375 1376 case CS_ABORTED: 1377 host_status = DID_ABORT; 1378 break; 1379 1380 case CS_TIMEOUT: 1381 host_status = DID_TIME_OUT; 1382 break; 1383 1384 case CS_DATA_OVERRUN: 1385 dprintk(2, "Data overrun 0x%x\n", residual_length); 1386 dprintk(2, "qla1280_return_status: response packet data\n"); 1387 qla1280_dump_buffer(2, (char *)sts, RESPONSE_ENTRY_SIZE); 1388 host_status = DID_ERROR; 1389 break; 1390 1391 case CS_DATA_UNDERRUN: 1392 if ((scsi_bufflen(cp) - residual_length) < 1393 cp->underflow) { 1394 printk(KERN_WARNING 1395 "scsi: Underflow detected - retrying " 1396 "command.\n"); 1397 host_status = DID_ERROR; 1398 } else { 1399 scsi_set_resid(cp, residual_length); 1400 host_status = DID_OK; 1401 } 1402 break; 1403 1404 default: 1405 host_status = DID_ERROR; 1406 break; 1407 } 1408 1409 #if DEBUG_QLA1280_INTR 1410 dprintk(1, "qla1280 ISP status: host status (%s) scsi status %x\n", 1411 reason[host_status], scsi_status); 1412 #endif 1413 1414 LEAVE("qla1280_return_status"); 1415 1416 return (scsi_status & 0xff) | (host_status << 16); 1417 } 1418 1419 /****************************************************************************/ 1420 /* QLogic ISP1280 Hardware Support Functions. */ 1421 /****************************************************************************/ 1422 1423 /* 1424 * qla1280_initialize_adapter 1425 * Initialize board. 1426 * 1427 * Input: 1428 * ha = adapter block pointer. 1429 * 1430 * Returns: 1431 * 0 = success 1432 */ 1433 static int 1434 qla1280_initialize_adapter(struct scsi_qla_host *ha) 1435 { 1436 struct device_reg __iomem *reg; 1437 int status; 1438 int bus; 1439 unsigned long flags; 1440 1441 ENTER("qla1280_initialize_adapter"); 1442 1443 /* Clear adapter flags. */ 1444 ha->flags.online = 0; 1445 ha->flags.disable_host_adapter = 0; 1446 ha->flags.reset_active = 0; 1447 ha->flags.abort_isp_active = 0; 1448 1449 #if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2) 1450 if (ia64_platform_is("sn2")) { 1451 printk(KERN_INFO "scsi(%li): Enabling SN2 PCI DMA " 1452 "dual channel lockup workaround\n", ha->host_no); 1453 ha->flags.use_pci_vchannel = 1; 1454 driver_setup.no_nvram = 1; 1455 } 1456 #endif 1457 1458 /* TODO: implement support for the 1040 nvram format */ 1459 if (IS_ISP1040(ha)) 1460 driver_setup.no_nvram = 1; 1461 1462 dprintk(1, "Configure PCI space for adapter...\n"); 1463 1464 reg = ha->iobase; 1465 1466 /* Insure mailbox registers are free. */ 1467 WRT_REG_WORD(®->semaphore, 0); 1468 WRT_REG_WORD(®->host_cmd, HC_CLR_RISC_INT); 1469 WRT_REG_WORD(®->host_cmd, HC_CLR_HOST_INT); 1470 RD_REG_WORD(®->host_cmd); 1471 1472 if (qla1280_read_nvram(ha)) { 1473 dprintk(2, "qla1280_initialize_adapter: failed to read " 1474 "NVRAM\n"); 1475 } 1476 1477 /* 1478 * It's necessary to grab the spin here as qla1280_mailbox_command 1479 * needs to be able to drop the lock unconditionally to wait 1480 * for completion. 1481 */ 1482 spin_lock_irqsave(ha->host->host_lock, flags); 1483 1484 status = qla1280_load_firmware(ha); 1485 if (status) { 1486 printk(KERN_ERR "scsi(%li): initialize: pci probe failed!\n", 1487 ha->host_no); 1488 goto out; 1489 } 1490 1491 /* Setup adapter based on NVRAM parameters. */ 1492 dprintk(1, "scsi(%ld): Configure NVRAM parameters\n", ha->host_no); 1493 qla1280_nvram_config(ha); 1494 1495 if (ha->flags.disable_host_adapter) { 1496 status = 1; 1497 goto out; 1498 } 1499 1500 status = qla1280_init_rings(ha); 1501 if (status) 1502 goto out; 1503 1504 /* Issue SCSI reset, if we can't reset twice then bus is dead */ 1505 for (bus = 0; bus < ha->ports; bus++) { 1506 if (!ha->bus_settings[bus].disable_scsi_reset && 1507 qla1280_bus_reset(ha, bus) && 1508 qla1280_bus_reset(ha, bus)) 1509 ha->bus_settings[bus].scsi_bus_dead = 1; 1510 } 1511 1512 ha->flags.online = 1; 1513 out: 1514 spin_unlock_irqrestore(ha->host->host_lock, flags); 1515 1516 if (status) 1517 dprintk(2, "qla1280_initialize_adapter: **** FAILED ****\n"); 1518 1519 LEAVE("qla1280_initialize_adapter"); 1520 return status; 1521 } 1522 1523 /* 1524 * qla1280_request_firmware 1525 * Acquire firmware for chip. Retain in memory 1526 * for error recovery. 1527 * 1528 * Input: 1529 * ha = adapter block pointer. 1530 * 1531 * Returns: 1532 * Pointer to firmware image or an error code 1533 * cast to pointer via ERR_PTR(). 1534 */ 1535 static const struct firmware * 1536 qla1280_request_firmware(struct scsi_qla_host *ha) 1537 { 1538 const struct firmware *fw; 1539 int err; 1540 int index; 1541 char *fwname; 1542 1543 spin_unlock_irq(ha->host->host_lock); 1544 mutex_lock(&qla1280_firmware_mutex); 1545 1546 index = ql1280_board_tbl[ha->devnum].fw_index; 1547 fw = qla1280_fw_tbl[index].fw; 1548 if (fw) 1549 goto out; 1550 1551 fwname = qla1280_fw_tbl[index].fwname; 1552 err = request_firmware(&fw, fwname, &ha->pdev->dev); 1553 1554 if (err) { 1555 printk(KERN_ERR "Failed to load image \"%s\" err %d\n", 1556 fwname, err); 1557 fw = ERR_PTR(err); 1558 goto unlock; 1559 } 1560 if ((fw->size % 2) || (fw->size < 6)) { 1561 printk(KERN_ERR "Invalid firmware length %zu in image \"%s\"\n", 1562 fw->size, fwname); 1563 release_firmware(fw); 1564 fw = ERR_PTR(-EINVAL); 1565 goto unlock; 1566 } 1567 1568 qla1280_fw_tbl[index].fw = fw; 1569 1570 out: 1571 ha->fwver1 = fw->data[0]; 1572 ha->fwver2 = fw->data[1]; 1573 ha->fwver3 = fw->data[2]; 1574 unlock: 1575 mutex_unlock(&qla1280_firmware_mutex); 1576 spin_lock_irq(ha->host->host_lock); 1577 return fw; 1578 } 1579 1580 /* 1581 * Chip diagnostics 1582 * Test chip for proper operation. 1583 * 1584 * Input: 1585 * ha = adapter block pointer. 1586 * 1587 * Returns: 1588 * 0 = success. 1589 */ 1590 static int 1591 qla1280_chip_diag(struct scsi_qla_host *ha) 1592 { 1593 uint16_t mb[MAILBOX_REGISTER_COUNT]; 1594 struct device_reg __iomem *reg = ha->iobase; 1595 int status = 0; 1596 int cnt; 1597 uint16_t data; 1598 dprintk(3, "qla1280_chip_diag: testing device at 0x%p \n", ®->id_l); 1599 1600 dprintk(1, "scsi(%ld): Verifying chip\n", ha->host_no); 1601 1602 /* Soft reset chip and wait for it to finish. */ 1603 WRT_REG_WORD(®->ictrl, ISP_RESET); 1604 1605 /* 1606 * We can't do a traditional PCI write flush here by reading 1607 * back the register. The card will not respond once the reset 1608 * is in action and we end up with a machine check exception 1609 * instead. Nothing to do but wait and hope for the best. 1610 * A portable pci_write_flush(pdev) call would be very useful here. 1611 */ 1612 udelay(20); 1613 data = qla1280_debounce_register(®->ictrl); 1614 /* 1615 * Yet another QLogic gem ;-( 1616 */ 1617 for (cnt = 1000000; cnt && data & ISP_RESET; cnt--) { 1618 udelay(5); 1619 data = RD_REG_WORD(®->ictrl); 1620 } 1621 1622 if (!cnt) 1623 goto fail; 1624 1625 /* Reset register cleared by chip reset. */ 1626 dprintk(3, "qla1280_chip_diag: reset register cleared by chip reset\n"); 1627 1628 WRT_REG_WORD(®->cfg_1, 0); 1629 1630 /* Reset RISC and disable BIOS which 1631 allows RISC to execute out of RAM. */ 1632 WRT_REG_WORD(®->host_cmd, HC_RESET_RISC | 1633 HC_RELEASE_RISC | HC_DISABLE_BIOS); 1634 1635 RD_REG_WORD(®->id_l); /* Flush PCI write */ 1636 data = qla1280_debounce_register(®->mailbox0); 1637 1638 /* 1639 * I *LOVE* this code! 1640 */ 1641 for (cnt = 1000000; cnt && data == MBS_BUSY; cnt--) { 1642 udelay(5); 1643 data = RD_REG_WORD(®->mailbox0); 1644 } 1645 1646 if (!cnt) 1647 goto fail; 1648 1649 /* Check product ID of chip */ 1650 dprintk(3, "qla1280_chip_diag: Checking product ID of chip\n"); 1651 1652 if (RD_REG_WORD(®->mailbox1) != PROD_ID_1 || 1653 (RD_REG_WORD(®->mailbox2) != PROD_ID_2 && 1654 RD_REG_WORD(®->mailbox2) != PROD_ID_2a) || 1655 RD_REG_WORD(®->mailbox3) != PROD_ID_3 || 1656 RD_REG_WORD(®->mailbox4) != PROD_ID_4) { 1657 printk(KERN_INFO "qla1280: Wrong product ID = " 1658 "0x%x,0x%x,0x%x,0x%x\n", 1659 RD_REG_WORD(®->mailbox1), 1660 RD_REG_WORD(®->mailbox2), 1661 RD_REG_WORD(®->mailbox3), 1662 RD_REG_WORD(®->mailbox4)); 1663 goto fail; 1664 } 1665 1666 /* 1667 * Enable ints early!!! 1668 */ 1669 qla1280_enable_intrs(ha); 1670 1671 dprintk(1, "qla1280_chip_diag: Checking mailboxes of chip\n"); 1672 /* Wrap Incoming Mailboxes Test. */ 1673 mb[0] = MBC_MAILBOX_REGISTER_TEST; 1674 mb[1] = 0xAAAA; 1675 mb[2] = 0x5555; 1676 mb[3] = 0xAA55; 1677 mb[4] = 0x55AA; 1678 mb[5] = 0xA5A5; 1679 mb[6] = 0x5A5A; 1680 mb[7] = 0x2525; 1681 1682 status = qla1280_mailbox_command(ha, 0xff, mb); 1683 if (status) 1684 goto fail; 1685 1686 if (mb[1] != 0xAAAA || mb[2] != 0x5555 || mb[3] != 0xAA55 || 1687 mb[4] != 0x55AA || mb[5] != 0xA5A5 || mb[6] != 0x5A5A || 1688 mb[7] != 0x2525) { 1689 printk(KERN_INFO "qla1280: Failed mbox check\n"); 1690 goto fail; 1691 } 1692 1693 dprintk(3, "qla1280_chip_diag: exiting normally\n"); 1694 return 0; 1695 fail: 1696 dprintk(2, "qla1280_chip_diag: **** FAILED ****\n"); 1697 return status; 1698 } 1699 1700 static int 1701 qla1280_load_firmware_pio(struct scsi_qla_host *ha) 1702 { 1703 /* enter with host_lock acquired */ 1704 1705 const struct firmware *fw; 1706 const __le16 *fw_data; 1707 uint16_t risc_address, risc_code_size; 1708 uint16_t mb[MAILBOX_REGISTER_COUNT], i; 1709 int err = 0; 1710 1711 fw = qla1280_request_firmware(ha); 1712 if (IS_ERR(fw)) 1713 return PTR_ERR(fw); 1714 1715 fw_data = (const __le16 *)&fw->data[0]; 1716 ha->fwstart = __le16_to_cpu(fw_data[2]); 1717 1718 /* Load RISC code. */ 1719 risc_address = ha->fwstart; 1720 fw_data = (const __le16 *)&fw->data[6]; 1721 risc_code_size = (fw->size - 6) / 2; 1722 1723 for (i = 0; i < risc_code_size; i++) { 1724 mb[0] = MBC_WRITE_RAM_WORD; 1725 mb[1] = risc_address + i; 1726 mb[2] = __le16_to_cpu(fw_data[i]); 1727 1728 err = qla1280_mailbox_command(ha, BIT_0 | BIT_1 | BIT_2, mb); 1729 if (err) { 1730 printk(KERN_ERR "scsi(%li): Failed to load firmware\n", 1731 ha->host_no); 1732 break; 1733 } 1734 } 1735 1736 return err; 1737 } 1738 1739 #define DUMP_IT_BACK 0 /* for debug of RISC loading */ 1740 static int 1741 qla1280_load_firmware_dma(struct scsi_qla_host *ha) 1742 { 1743 /* enter with host_lock acquired */ 1744 const struct firmware *fw; 1745 const __le16 *fw_data; 1746 uint16_t risc_address, risc_code_size; 1747 uint16_t mb[MAILBOX_REGISTER_COUNT], cnt; 1748 int err = 0, num, i; 1749 #if DUMP_IT_BACK 1750 uint8_t *sp, *tbuf; 1751 dma_addr_t p_tbuf; 1752 1753 tbuf = dma_alloc_coherent(&ha->pdev->dev, 8000, &p_tbuf, GFP_KERNEL); 1754 if (!tbuf) 1755 return -ENOMEM; 1756 #endif 1757 1758 fw = qla1280_request_firmware(ha); 1759 if (IS_ERR(fw)) 1760 return PTR_ERR(fw); 1761 1762 fw_data = (const __le16 *)&fw->data[0]; 1763 ha->fwstart = __le16_to_cpu(fw_data[2]); 1764 1765 /* Load RISC code. */ 1766 risc_address = ha->fwstart; 1767 fw_data = (const __le16 *)&fw->data[6]; 1768 risc_code_size = (fw->size - 6) / 2; 1769 1770 dprintk(1, "%s: DMA RISC code (%i) words\n", 1771 __func__, risc_code_size); 1772 1773 num = 0; 1774 while (risc_code_size > 0) { 1775 int warn __attribute__((unused)) = 0; 1776 1777 cnt = 2000 >> 1; 1778 1779 if (cnt > risc_code_size) 1780 cnt = risc_code_size; 1781 1782 dprintk(2, "qla1280_setup_chip: loading risc @ =(0x%p)," 1783 "%d,%d(0x%x)\n", 1784 fw_data, cnt, num, risc_address); 1785 for(i = 0; i < cnt; i++) 1786 ((__le16 *)ha->request_ring)[i] = fw_data[i]; 1787 1788 mb[0] = MBC_LOAD_RAM; 1789 mb[1] = risc_address; 1790 mb[4] = cnt; 1791 mb[3] = ha->request_dma & 0xffff; 1792 mb[2] = (ha->request_dma >> 16) & 0xffff; 1793 mb[7] = pci_dma_hi32(ha->request_dma) & 0xffff; 1794 mb[6] = pci_dma_hi32(ha->request_dma) >> 16; 1795 dprintk(2, "%s: op=%d 0x%p = 0x%4x,0x%4x,0x%4x,0x%4x\n", 1796 __func__, mb[0], 1797 (void *)(long)ha->request_dma, 1798 mb[6], mb[7], mb[2], mb[3]); 1799 err = qla1280_mailbox_command(ha, BIT_4 | BIT_3 | BIT_2 | 1800 BIT_1 | BIT_0, mb); 1801 if (err) { 1802 printk(KERN_ERR "scsi(%li): Failed to load partial " 1803 "segment of f\n", ha->host_no); 1804 goto out; 1805 } 1806 1807 #if DUMP_IT_BACK 1808 mb[0] = MBC_DUMP_RAM; 1809 mb[1] = risc_address; 1810 mb[4] = cnt; 1811 mb[3] = p_tbuf & 0xffff; 1812 mb[2] = (p_tbuf >> 16) & 0xffff; 1813 mb[7] = pci_dma_hi32(p_tbuf) & 0xffff; 1814 mb[6] = pci_dma_hi32(p_tbuf) >> 16; 1815 1816 err = qla1280_mailbox_command(ha, BIT_4 | BIT_3 | BIT_2 | 1817 BIT_1 | BIT_0, mb); 1818 if (err) { 1819 printk(KERN_ERR 1820 "Failed to dump partial segment of f/w\n"); 1821 goto out; 1822 } 1823 sp = (uint8_t *)ha->request_ring; 1824 for (i = 0; i < (cnt << 1); i++) { 1825 if (tbuf[i] != sp[i] && warn++ < 10) { 1826 printk(KERN_ERR "%s: FW compare error @ " 1827 "byte(0x%x) loop#=%x\n", 1828 __func__, i, num); 1829 printk(KERN_ERR "%s: FWbyte=%x " 1830 "FWfromChip=%x\n", 1831 __func__, sp[i], tbuf[i]); 1832 /*break; */ 1833 } 1834 } 1835 #endif 1836 risc_address += cnt; 1837 risc_code_size = risc_code_size - cnt; 1838 fw_data = fw_data + cnt; 1839 num++; 1840 } 1841 1842 out: 1843 #if DUMP_IT_BACK 1844 dma_free_coherent(&ha->pdev->dev, 8000, tbuf, p_tbuf); 1845 #endif 1846 return err; 1847 } 1848 1849 static int 1850 qla1280_start_firmware(struct scsi_qla_host *ha) 1851 { 1852 uint16_t mb[MAILBOX_REGISTER_COUNT]; 1853 int err; 1854 1855 dprintk(1, "%s: Verifying checksum of loaded RISC code.\n", 1856 __func__); 1857 1858 /* Verify checksum of loaded RISC code. */ 1859 mb[0] = MBC_VERIFY_CHECKSUM; 1860 /* mb[1] = ql12_risc_code_addr01; */ 1861 mb[1] = ha->fwstart; 1862 err = qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb); 1863 if (err) { 1864 printk(KERN_ERR "scsi(%li): RISC checksum failed.\n", ha->host_no); 1865 return err; 1866 } 1867 1868 /* Start firmware execution. */ 1869 dprintk(1, "%s: start firmware running.\n", __func__); 1870 mb[0] = MBC_EXECUTE_FIRMWARE; 1871 mb[1] = ha->fwstart; 1872 err = qla1280_mailbox_command(ha, BIT_1 | BIT_0, &mb[0]); 1873 if (err) { 1874 printk(KERN_ERR "scsi(%li): Failed to start firmware\n", 1875 ha->host_no); 1876 } 1877 1878 return err; 1879 } 1880 1881 static int 1882 qla1280_load_firmware(struct scsi_qla_host *ha) 1883 { 1884 /* enter with host_lock taken */ 1885 int err; 1886 1887 err = qla1280_chip_diag(ha); 1888 if (err) 1889 goto out; 1890 if (IS_ISP1040(ha)) 1891 err = qla1280_load_firmware_pio(ha); 1892 else 1893 err = qla1280_load_firmware_dma(ha); 1894 if (err) 1895 goto out; 1896 err = qla1280_start_firmware(ha); 1897 out: 1898 return err; 1899 } 1900 1901 /* 1902 * Initialize rings 1903 * 1904 * Input: 1905 * ha = adapter block pointer. 1906 * ha->request_ring = request ring virtual address 1907 * ha->response_ring = response ring virtual address 1908 * ha->request_dma = request ring physical address 1909 * ha->response_dma = response ring physical address 1910 * 1911 * Returns: 1912 * 0 = success. 1913 */ 1914 static int 1915 qla1280_init_rings(struct scsi_qla_host *ha) 1916 { 1917 uint16_t mb[MAILBOX_REGISTER_COUNT]; 1918 int status = 0; 1919 1920 ENTER("qla1280_init_rings"); 1921 1922 /* Clear outstanding commands array. */ 1923 memset(ha->outstanding_cmds, 0, 1924 sizeof(struct srb *) * MAX_OUTSTANDING_COMMANDS); 1925 1926 /* Initialize request queue. */ 1927 ha->request_ring_ptr = ha->request_ring; 1928 ha->req_ring_index = 0; 1929 ha->req_q_cnt = REQUEST_ENTRY_CNT; 1930 /* mb[0] = MBC_INIT_REQUEST_QUEUE; */ 1931 mb[0] = MBC_INIT_REQUEST_QUEUE_A64; 1932 mb[1] = REQUEST_ENTRY_CNT; 1933 mb[3] = ha->request_dma & 0xffff; 1934 mb[2] = (ha->request_dma >> 16) & 0xffff; 1935 mb[4] = 0; 1936 mb[7] = pci_dma_hi32(ha->request_dma) & 0xffff; 1937 mb[6] = pci_dma_hi32(ha->request_dma) >> 16; 1938 if (!(status = qla1280_mailbox_command(ha, BIT_7 | BIT_6 | BIT_4 | 1939 BIT_3 | BIT_2 | BIT_1 | BIT_0, 1940 &mb[0]))) { 1941 /* Initialize response queue. */ 1942 ha->response_ring_ptr = ha->response_ring; 1943 ha->rsp_ring_index = 0; 1944 /* mb[0] = MBC_INIT_RESPONSE_QUEUE; */ 1945 mb[0] = MBC_INIT_RESPONSE_QUEUE_A64; 1946 mb[1] = RESPONSE_ENTRY_CNT; 1947 mb[3] = ha->response_dma & 0xffff; 1948 mb[2] = (ha->response_dma >> 16) & 0xffff; 1949 mb[5] = 0; 1950 mb[7] = pci_dma_hi32(ha->response_dma) & 0xffff; 1951 mb[6] = pci_dma_hi32(ha->response_dma) >> 16; 1952 status = qla1280_mailbox_command(ha, BIT_7 | BIT_6 | BIT_5 | 1953 BIT_3 | BIT_2 | BIT_1 | BIT_0, 1954 &mb[0]); 1955 } 1956 1957 if (status) 1958 dprintk(2, "qla1280_init_rings: **** FAILED ****\n"); 1959 1960 LEAVE("qla1280_init_rings"); 1961 return status; 1962 } 1963 1964 static void 1965 qla1280_print_settings(struct nvram *nv) 1966 { 1967 dprintk(1, "qla1280 : initiator scsi id bus[0]=%d\n", 1968 nv->bus[0].config_1.initiator_id); 1969 dprintk(1, "qla1280 : initiator scsi id bus[1]=%d\n", 1970 nv->bus[1].config_1.initiator_id); 1971 1972 dprintk(1, "qla1280 : bus reset delay[0]=%d\n", 1973 nv->bus[0].bus_reset_delay); 1974 dprintk(1, "qla1280 : bus reset delay[1]=%d\n", 1975 nv->bus[1].bus_reset_delay); 1976 1977 dprintk(1, "qla1280 : retry count[0]=%d\n", nv->bus[0].retry_count); 1978 dprintk(1, "qla1280 : retry delay[0]=%d\n", nv->bus[0].retry_delay); 1979 dprintk(1, "qla1280 : retry count[1]=%d\n", nv->bus[1].retry_count); 1980 dprintk(1, "qla1280 : retry delay[1]=%d\n", nv->bus[1].retry_delay); 1981 1982 dprintk(1, "qla1280 : async data setup time[0]=%d\n", 1983 nv->bus[0].config_2.async_data_setup_time); 1984 dprintk(1, "qla1280 : async data setup time[1]=%d\n", 1985 nv->bus[1].config_2.async_data_setup_time); 1986 1987 dprintk(1, "qla1280 : req/ack active negation[0]=%d\n", 1988 nv->bus[0].config_2.req_ack_active_negation); 1989 dprintk(1, "qla1280 : req/ack active negation[1]=%d\n", 1990 nv->bus[1].config_2.req_ack_active_negation); 1991 1992 dprintk(1, "qla1280 : data line active negation[0]=%d\n", 1993 nv->bus[0].config_2.data_line_active_negation); 1994 dprintk(1, "qla1280 : data line active negation[1]=%d\n", 1995 nv->bus[1].config_2.data_line_active_negation); 1996 1997 dprintk(1, "qla1280 : disable loading risc code=%d\n", 1998 nv->cntr_flags_1.disable_loading_risc_code); 1999 2000 dprintk(1, "qla1280 : enable 64bit addressing=%d\n", 2001 nv->cntr_flags_1.enable_64bit_addressing); 2002 2003 dprintk(1, "qla1280 : selection timeout limit[0]=%d\n", 2004 nv->bus[0].selection_timeout); 2005 dprintk(1, "qla1280 : selection timeout limit[1]=%d\n", 2006 nv->bus[1].selection_timeout); 2007 2008 dprintk(1, "qla1280 : max queue depth[0]=%d\n", 2009 nv->bus[0].max_queue_depth); 2010 dprintk(1, "qla1280 : max queue depth[1]=%d\n", 2011 nv->bus[1].max_queue_depth); 2012 } 2013 2014 static void 2015 qla1280_set_target_defaults(struct scsi_qla_host *ha, int bus, int target) 2016 { 2017 struct nvram *nv = &ha->nvram; 2018 2019 nv->bus[bus].target[target].parameter.renegotiate_on_error = 1; 2020 nv->bus[bus].target[target].parameter.auto_request_sense = 1; 2021 nv->bus[bus].target[target].parameter.tag_queuing = 1; 2022 nv->bus[bus].target[target].parameter.enable_sync = 1; 2023 #if 1 /* Some SCSI Processors do not seem to like this */ 2024 nv->bus[bus].target[target].parameter.enable_wide = 1; 2025 #endif 2026 nv->bus[bus].target[target].execution_throttle = 2027 nv->bus[bus].max_queue_depth - 1; 2028 nv->bus[bus].target[target].parameter.parity_checking = 1; 2029 nv->bus[bus].target[target].parameter.disconnect_allowed = 1; 2030 2031 if (IS_ISP1x160(ha)) { 2032 nv->bus[bus].target[target].flags.flags1x160.device_enable = 1; 2033 nv->bus[bus].target[target].flags.flags1x160.sync_offset = 0x0e; 2034 nv->bus[bus].target[target].sync_period = 9; 2035 nv->bus[bus].target[target].ppr_1x160.flags.enable_ppr = 1; 2036 nv->bus[bus].target[target].ppr_1x160.flags.ppr_options = 2; 2037 nv->bus[bus].target[target].ppr_1x160.flags.ppr_bus_width = 1; 2038 } else { 2039 nv->bus[bus].target[target].flags.flags1x80.device_enable = 1; 2040 nv->bus[bus].target[target].flags.flags1x80.sync_offset = 12; 2041 nv->bus[bus].target[target].sync_period = 10; 2042 } 2043 } 2044 2045 static void 2046 qla1280_set_defaults(struct scsi_qla_host *ha) 2047 { 2048 struct nvram *nv = &ha->nvram; 2049 int bus, target; 2050 2051 dprintk(1, "Using defaults for NVRAM: \n"); 2052 memset(nv, 0, sizeof(struct nvram)); 2053 2054 /* nv->cntr_flags_1.disable_loading_risc_code = 1; */ 2055 nv->firmware_feature.f.enable_fast_posting = 1; 2056 nv->firmware_feature.f.disable_synchronous_backoff = 1; 2057 nv->termination.scsi_bus_0_control = 3; 2058 nv->termination.scsi_bus_1_control = 3; 2059 nv->termination.auto_term_support = 1; 2060 2061 /* 2062 * Set default FIFO magic - What appropriate values would be here 2063 * is unknown. This is what I have found testing with 12160s. 2064 * 2065 * Now, I would love the magic decoder ring for this one, the 2066 * header file provided by QLogic seems to be bogus or incomplete 2067 * at best. 2068 */ 2069 nv->isp_config.burst_enable = 1; 2070 if (IS_ISP1040(ha)) 2071 nv->isp_config.fifo_threshold |= 3; 2072 else 2073 nv->isp_config.fifo_threshold |= 4; 2074 2075 if (IS_ISP1x160(ha)) 2076 nv->isp_parameter = 0x01; /* fast memory enable */ 2077 2078 for (bus = 0; bus < MAX_BUSES; bus++) { 2079 nv->bus[bus].config_1.initiator_id = 7; 2080 nv->bus[bus].config_2.req_ack_active_negation = 1; 2081 nv->bus[bus].config_2.data_line_active_negation = 1; 2082 nv->bus[bus].selection_timeout = 250; 2083 nv->bus[bus].max_queue_depth = 32; 2084 2085 if (IS_ISP1040(ha)) { 2086 nv->bus[bus].bus_reset_delay = 3; 2087 nv->bus[bus].config_2.async_data_setup_time = 6; 2088 nv->bus[bus].retry_delay = 1; 2089 } else { 2090 nv->bus[bus].bus_reset_delay = 5; 2091 nv->bus[bus].config_2.async_data_setup_time = 8; 2092 } 2093 2094 for (target = 0; target < MAX_TARGETS; target++) 2095 qla1280_set_target_defaults(ha, bus, target); 2096 } 2097 } 2098 2099 static int 2100 qla1280_config_target(struct scsi_qla_host *ha, int bus, int target) 2101 { 2102 struct nvram *nv = &ha->nvram; 2103 uint16_t mb[MAILBOX_REGISTER_COUNT]; 2104 int status, lun; 2105 uint16_t flag; 2106 2107 /* Set Target Parameters. */ 2108 mb[0] = MBC_SET_TARGET_PARAMETERS; 2109 mb[1] = (uint16_t)((bus ? target | BIT_7 : target) << 8); 2110 2111 /* 2112 * Do not enable sync and ppr for the initial INQUIRY run. We 2113 * enable this later if we determine the target actually 2114 * supports it. 2115 */ 2116 mb[2] = (TP_RENEGOTIATE | TP_AUTO_REQUEST_SENSE | TP_TAGGED_QUEUE 2117 | TP_WIDE | TP_PARITY | TP_DISCONNECT); 2118 2119 if (IS_ISP1x160(ha)) 2120 mb[3] = nv->bus[bus].target[target].flags.flags1x160.sync_offset << 8; 2121 else 2122 mb[3] = nv->bus[bus].target[target].flags.flags1x80.sync_offset << 8; 2123 mb[3] |= nv->bus[bus].target[target].sync_period; 2124 status = qla1280_mailbox_command(ha, 0x0f, mb); 2125 2126 /* Save Tag queuing enable flag. */ 2127 flag = (BIT_0 << target); 2128 if (nv->bus[bus].target[target].parameter.tag_queuing) 2129 ha->bus_settings[bus].qtag_enables |= flag; 2130 2131 /* Save Device enable flag. */ 2132 if (IS_ISP1x160(ha)) { 2133 if (nv->bus[bus].target[target].flags.flags1x160.device_enable) 2134 ha->bus_settings[bus].device_enables |= flag; 2135 ha->bus_settings[bus].lun_disables |= 0; 2136 } else { 2137 if (nv->bus[bus].target[target].flags.flags1x80.device_enable) 2138 ha->bus_settings[bus].device_enables |= flag; 2139 /* Save LUN disable flag. */ 2140 if (nv->bus[bus].target[target].flags.flags1x80.lun_disable) 2141 ha->bus_settings[bus].lun_disables |= flag; 2142 } 2143 2144 /* Set Device Queue Parameters. */ 2145 for (lun = 0; lun < MAX_LUNS; lun++) { 2146 mb[0] = MBC_SET_DEVICE_QUEUE; 2147 mb[1] = (uint16_t)((bus ? target | BIT_7 : target) << 8); 2148 mb[1] |= lun; 2149 mb[2] = nv->bus[bus].max_queue_depth; 2150 mb[3] = nv->bus[bus].target[target].execution_throttle; 2151 status |= qla1280_mailbox_command(ha, 0x0f, mb); 2152 } 2153 2154 return status; 2155 } 2156 2157 static int 2158 qla1280_config_bus(struct scsi_qla_host *ha, int bus) 2159 { 2160 struct nvram *nv = &ha->nvram; 2161 uint16_t mb[MAILBOX_REGISTER_COUNT]; 2162 int target, status; 2163 2164 /* SCSI Reset Disable. */ 2165 ha->bus_settings[bus].disable_scsi_reset = 2166 nv->bus[bus].config_1.scsi_reset_disable; 2167 2168 /* Initiator ID. */ 2169 ha->bus_settings[bus].id = nv->bus[bus].config_1.initiator_id; 2170 mb[0] = MBC_SET_INITIATOR_ID; 2171 mb[1] = bus ? ha->bus_settings[bus].id | BIT_7 : 2172 ha->bus_settings[bus].id; 2173 status = qla1280_mailbox_command(ha, BIT_1 | BIT_0, &mb[0]); 2174 2175 /* Reset Delay. */ 2176 ha->bus_settings[bus].bus_reset_delay = 2177 nv->bus[bus].bus_reset_delay; 2178 2179 /* Command queue depth per device. */ 2180 ha->bus_settings[bus].hiwat = nv->bus[bus].max_queue_depth - 1; 2181 2182 /* Set target parameters. */ 2183 for (target = 0; target < MAX_TARGETS; target++) 2184 status |= qla1280_config_target(ha, bus, target); 2185 2186 return status; 2187 } 2188 2189 static int 2190 qla1280_nvram_config(struct scsi_qla_host *ha) 2191 { 2192 struct device_reg __iomem *reg = ha->iobase; 2193 struct nvram *nv = &ha->nvram; 2194 int bus, target, status = 0; 2195 uint16_t mb[MAILBOX_REGISTER_COUNT]; 2196 2197 ENTER("qla1280_nvram_config"); 2198 2199 if (ha->nvram_valid) { 2200 /* Always force AUTO sense for LINUX SCSI */ 2201 for (bus = 0; bus < MAX_BUSES; bus++) 2202 for (target = 0; target < MAX_TARGETS; target++) { 2203 nv->bus[bus].target[target].parameter. 2204 auto_request_sense = 1; 2205 } 2206 } else { 2207 qla1280_set_defaults(ha); 2208 } 2209 2210 qla1280_print_settings(nv); 2211 2212 /* Disable RISC load of firmware. */ 2213 ha->flags.disable_risc_code_load = 2214 nv->cntr_flags_1.disable_loading_risc_code; 2215 2216 if (IS_ISP1040(ha)) { 2217 uint16_t hwrev, cfg1, cdma_conf, ddma_conf; 2218 2219 hwrev = RD_REG_WORD(®->cfg_0) & ISP_CFG0_HWMSK; 2220 2221 cfg1 = RD_REG_WORD(®->cfg_1) & ~(BIT_4 | BIT_5 | BIT_6); 2222 cdma_conf = RD_REG_WORD(®->cdma_cfg); 2223 ddma_conf = RD_REG_WORD(®->ddma_cfg); 2224 2225 /* Busted fifo, says mjacob. */ 2226 if (hwrev != ISP_CFG0_1040A) 2227 cfg1 |= nv->isp_config.fifo_threshold << 4; 2228 2229 cfg1 |= nv->isp_config.burst_enable << 2; 2230 WRT_REG_WORD(®->cfg_1, cfg1); 2231 2232 WRT_REG_WORD(®->cdma_cfg, cdma_conf | CDMA_CONF_BENAB); 2233 WRT_REG_WORD(®->ddma_cfg, cdma_conf | DDMA_CONF_BENAB); 2234 } else { 2235 uint16_t cfg1, term; 2236 2237 /* Set ISP hardware DMA burst */ 2238 cfg1 = nv->isp_config.fifo_threshold << 4; 2239 cfg1 |= nv->isp_config.burst_enable << 2; 2240 /* Enable DMA arbitration on dual channel controllers */ 2241 if (ha->ports > 1) 2242 cfg1 |= BIT_13; 2243 WRT_REG_WORD(®->cfg_1, cfg1); 2244 2245 /* Set SCSI termination. */ 2246 WRT_REG_WORD(®->gpio_enable, 2247 BIT_7 | BIT_3 | BIT_2 | BIT_1 | BIT_0); 2248 term = nv->termination.scsi_bus_1_control; 2249 term |= nv->termination.scsi_bus_0_control << 2; 2250 term |= nv->termination.auto_term_support << 7; 2251 RD_REG_WORD(®->id_l); /* Flush PCI write */ 2252 WRT_REG_WORD(®->gpio_data, term); 2253 } 2254 RD_REG_WORD(®->id_l); /* Flush PCI write */ 2255 2256 /* ISP parameter word. */ 2257 mb[0] = MBC_SET_SYSTEM_PARAMETER; 2258 mb[1] = nv->isp_parameter; 2259 status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, &mb[0]); 2260 2261 if (IS_ISP1x40(ha)) { 2262 /* clock rate - for qla1240 and older, only */ 2263 mb[0] = MBC_SET_CLOCK_RATE; 2264 mb[1] = 40; 2265 status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb); 2266 } 2267 2268 /* Firmware feature word. */ 2269 mb[0] = MBC_SET_FIRMWARE_FEATURES; 2270 mb[1] = nv->firmware_feature.f.enable_fast_posting; 2271 mb[1] |= nv->firmware_feature.f.report_lvd_bus_transition << 1; 2272 mb[1] |= nv->firmware_feature.f.disable_synchronous_backoff << 5; 2273 #if defined(CONFIG_IA64_GENERIC) || defined (CONFIG_IA64_SGI_SN2) 2274 if (ia64_platform_is("sn2")) { 2275 printk(KERN_INFO "scsi(%li): Enabling SN2 PCI DMA " 2276 "workaround\n", ha->host_no); 2277 mb[1] |= nv->firmware_feature.f.unused_9 << 9; /* XXX */ 2278 } 2279 #endif 2280 status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb); 2281 2282 /* Retry count and delay. */ 2283 mb[0] = MBC_SET_RETRY_COUNT; 2284 mb[1] = nv->bus[0].retry_count; 2285 mb[2] = nv->bus[0].retry_delay; 2286 mb[6] = nv->bus[1].retry_count; 2287 mb[7] = nv->bus[1].retry_delay; 2288 status |= qla1280_mailbox_command(ha, BIT_7 | BIT_6 | BIT_2 | 2289 BIT_1 | BIT_0, &mb[0]); 2290 2291 /* ASYNC data setup time. */ 2292 mb[0] = MBC_SET_ASYNC_DATA_SETUP; 2293 mb[1] = nv->bus[0].config_2.async_data_setup_time; 2294 mb[2] = nv->bus[1].config_2.async_data_setup_time; 2295 status |= qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, &mb[0]); 2296 2297 /* Active negation states. */ 2298 mb[0] = MBC_SET_ACTIVE_NEGATION; 2299 mb[1] = 0; 2300 if (nv->bus[0].config_2.req_ack_active_negation) 2301 mb[1] |= BIT_5; 2302 if (nv->bus[0].config_2.data_line_active_negation) 2303 mb[1] |= BIT_4; 2304 mb[2] = 0; 2305 if (nv->bus[1].config_2.req_ack_active_negation) 2306 mb[2] |= BIT_5; 2307 if (nv->bus[1].config_2.data_line_active_negation) 2308 mb[2] |= BIT_4; 2309 status |= qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, mb); 2310 2311 mb[0] = MBC_SET_DATA_OVERRUN_RECOVERY; 2312 mb[1] = 2; /* Reset SCSI bus and return all outstanding IO */ 2313 status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb); 2314 2315 /* thingy */ 2316 mb[0] = MBC_SET_PCI_CONTROL; 2317 mb[1] = BIT_1; /* Data DMA Channel Burst Enable */ 2318 mb[2] = BIT_1; /* Command DMA Channel Burst Enable */ 2319 status |= qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, mb); 2320 2321 mb[0] = MBC_SET_TAG_AGE_LIMIT; 2322 mb[1] = 8; 2323 status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb); 2324 2325 /* Selection timeout. */ 2326 mb[0] = MBC_SET_SELECTION_TIMEOUT; 2327 mb[1] = nv->bus[0].selection_timeout; 2328 mb[2] = nv->bus[1].selection_timeout; 2329 status |= qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, mb); 2330 2331 for (bus = 0; bus < ha->ports; bus++) 2332 status |= qla1280_config_bus(ha, bus); 2333 2334 if (status) 2335 dprintk(2, "qla1280_nvram_config: **** FAILED ****\n"); 2336 2337 LEAVE("qla1280_nvram_config"); 2338 return status; 2339 } 2340 2341 /* 2342 * Get NVRAM data word 2343 * Calculates word position in NVRAM and calls request routine to 2344 * get the word from NVRAM. 2345 * 2346 * Input: 2347 * ha = adapter block pointer. 2348 * address = NVRAM word address. 2349 * 2350 * Returns: 2351 * data word. 2352 */ 2353 static uint16_t 2354 qla1280_get_nvram_word(struct scsi_qla_host *ha, uint32_t address) 2355 { 2356 uint32_t nv_cmd; 2357 uint16_t data; 2358 2359 nv_cmd = address << 16; 2360 nv_cmd |= NV_READ_OP; 2361 2362 data = le16_to_cpu(qla1280_nvram_request(ha, nv_cmd)); 2363 2364 dprintk(8, "qla1280_get_nvram_word: exiting normally NVRAM data = " 2365 "0x%x", data); 2366 2367 return data; 2368 } 2369 2370 /* 2371 * NVRAM request 2372 * Sends read command to NVRAM and gets data from NVRAM. 2373 * 2374 * Input: 2375 * ha = adapter block pointer. 2376 * nv_cmd = Bit 26 = start bit 2377 * Bit 25, 24 = opcode 2378 * Bit 23-16 = address 2379 * Bit 15-0 = write data 2380 * 2381 * Returns: 2382 * data word. 2383 */ 2384 static uint16_t 2385 qla1280_nvram_request(struct scsi_qla_host *ha, uint32_t nv_cmd) 2386 { 2387 struct device_reg __iomem *reg = ha->iobase; 2388 int cnt; 2389 uint16_t data = 0; 2390 uint16_t reg_data; 2391 2392 /* Send command to NVRAM. */ 2393 2394 nv_cmd <<= 5; 2395 for (cnt = 0; cnt < 11; cnt++) { 2396 if (nv_cmd & BIT_31) 2397 qla1280_nv_write(ha, NV_DATA_OUT); 2398 else 2399 qla1280_nv_write(ha, 0); 2400 nv_cmd <<= 1; 2401 } 2402 2403 /* Read data from NVRAM. */ 2404 2405 for (cnt = 0; cnt < 16; cnt++) { 2406 WRT_REG_WORD(®->nvram, (NV_SELECT | NV_CLOCK)); 2407 RD_REG_WORD(®->id_l); /* Flush PCI write */ 2408 NVRAM_DELAY(); 2409 data <<= 1; 2410 reg_data = RD_REG_WORD(®->nvram); 2411 if (reg_data & NV_DATA_IN) 2412 data |= BIT_0; 2413 WRT_REG_WORD(®->nvram, NV_SELECT); 2414 RD_REG_WORD(®->id_l); /* Flush PCI write */ 2415 NVRAM_DELAY(); 2416 } 2417 2418 /* Deselect chip. */ 2419 2420 WRT_REG_WORD(®->nvram, NV_DESELECT); 2421 RD_REG_WORD(®->id_l); /* Flush PCI write */ 2422 NVRAM_DELAY(); 2423 2424 return data; 2425 } 2426 2427 static void 2428 qla1280_nv_write(struct scsi_qla_host *ha, uint16_t data) 2429 { 2430 struct device_reg __iomem *reg = ha->iobase; 2431 2432 WRT_REG_WORD(®->nvram, data | NV_SELECT); 2433 RD_REG_WORD(®->id_l); /* Flush PCI write */ 2434 NVRAM_DELAY(); 2435 WRT_REG_WORD(®->nvram, data | NV_SELECT | NV_CLOCK); 2436 RD_REG_WORD(®->id_l); /* Flush PCI write */ 2437 NVRAM_DELAY(); 2438 WRT_REG_WORD(®->nvram, data | NV_SELECT); 2439 RD_REG_WORD(®->id_l); /* Flush PCI write */ 2440 NVRAM_DELAY(); 2441 } 2442 2443 /* 2444 * Mailbox Command 2445 * Issue mailbox command and waits for completion. 2446 * 2447 * Input: 2448 * ha = adapter block pointer. 2449 * mr = mailbox registers to load. 2450 * mb = data pointer for mailbox registers. 2451 * 2452 * Output: 2453 * mb[MAILBOX_REGISTER_COUNT] = returned mailbox data. 2454 * 2455 * Returns: 2456 * 0 = success 2457 */ 2458 static int 2459 qla1280_mailbox_command(struct scsi_qla_host *ha, uint8_t mr, uint16_t *mb) 2460 { 2461 struct device_reg __iomem *reg = ha->iobase; 2462 int status = 0; 2463 int cnt; 2464 uint16_t *optr, *iptr; 2465 uint16_t __iomem *mptr; 2466 uint16_t data; 2467 DECLARE_COMPLETION_ONSTACK(wait); 2468 2469 ENTER("qla1280_mailbox_command"); 2470 2471 if (ha->mailbox_wait) { 2472 printk(KERN_ERR "Warning mailbox wait already in use!\n"); 2473 } 2474 ha->mailbox_wait = &wait; 2475 2476 /* 2477 * We really should start out by verifying that the mailbox is 2478 * available before starting sending the command data 2479 */ 2480 /* Load mailbox registers. */ 2481 mptr = (uint16_t __iomem *) ®->mailbox0; 2482 iptr = mb; 2483 for (cnt = 0; cnt < MAILBOX_REGISTER_COUNT; cnt++) { 2484 if (mr & BIT_0) { 2485 WRT_REG_WORD(mptr, (*iptr)); 2486 } 2487 2488 mr >>= 1; 2489 mptr++; 2490 iptr++; 2491 } 2492 2493 /* Issue set host interrupt command. */ 2494 2495 /* set up a timer just in case we're really jammed */ 2496 timer_setup(&ha->mailbox_timer, qla1280_mailbox_timeout, 0); 2497 mod_timer(&ha->mailbox_timer, jiffies + 20 * HZ); 2498 2499 spin_unlock_irq(ha->host->host_lock); 2500 WRT_REG_WORD(®->host_cmd, HC_SET_HOST_INT); 2501 data = qla1280_debounce_register(®->istatus); 2502 2503 wait_for_completion(&wait); 2504 del_timer_sync(&ha->mailbox_timer); 2505 2506 spin_lock_irq(ha->host->host_lock); 2507 2508 ha->mailbox_wait = NULL; 2509 2510 /* Check for mailbox command timeout. */ 2511 if (ha->mailbox_out[0] != MBS_CMD_CMP) { 2512 printk(KERN_WARNING "qla1280_mailbox_command: Command failed, " 2513 "mailbox0 = 0x%04x, mailbox_out0 = 0x%04x, istatus = " 2514 "0x%04x\n", 2515 mb[0], ha->mailbox_out[0], RD_REG_WORD(®->istatus)); 2516 printk(KERN_WARNING "m0 %04x, m1 %04x, m2 %04x, m3 %04x\n", 2517 RD_REG_WORD(®->mailbox0), RD_REG_WORD(®->mailbox1), 2518 RD_REG_WORD(®->mailbox2), RD_REG_WORD(®->mailbox3)); 2519 printk(KERN_WARNING "m4 %04x, m5 %04x, m6 %04x, m7 %04x\n", 2520 RD_REG_WORD(®->mailbox4), RD_REG_WORD(®->mailbox5), 2521 RD_REG_WORD(®->mailbox6), RD_REG_WORD(®->mailbox7)); 2522 status = 1; 2523 } 2524 2525 /* Load return mailbox registers. */ 2526 optr = mb; 2527 iptr = (uint16_t *) &ha->mailbox_out[0]; 2528 mr = MAILBOX_REGISTER_COUNT; 2529 memcpy(optr, iptr, MAILBOX_REGISTER_COUNT * sizeof(uint16_t)); 2530 2531 if (ha->flags.reset_marker) 2532 qla1280_rst_aen(ha); 2533 2534 if (status) 2535 dprintk(2, "qla1280_mailbox_command: **** FAILED, mailbox0 = " 2536 "0x%x ****\n", mb[0]); 2537 2538 LEAVE("qla1280_mailbox_command"); 2539 return status; 2540 } 2541 2542 /* 2543 * qla1280_poll 2544 * Polls ISP for interrupts. 2545 * 2546 * Input: 2547 * ha = adapter block pointer. 2548 */ 2549 static void 2550 qla1280_poll(struct scsi_qla_host *ha) 2551 { 2552 struct device_reg __iomem *reg = ha->iobase; 2553 uint16_t data; 2554 LIST_HEAD(done_q); 2555 2556 /* ENTER("qla1280_poll"); */ 2557 2558 /* Check for pending interrupts. */ 2559 data = RD_REG_WORD(®->istatus); 2560 if (data & RISC_INT) 2561 qla1280_isr(ha, &done_q); 2562 2563 if (!ha->mailbox_wait) { 2564 if (ha->flags.reset_marker) 2565 qla1280_rst_aen(ha); 2566 } 2567 2568 if (!list_empty(&done_q)) 2569 qla1280_done(ha); 2570 2571 /* LEAVE("qla1280_poll"); */ 2572 } 2573 2574 /* 2575 * qla1280_bus_reset 2576 * Issue SCSI bus reset. 2577 * 2578 * Input: 2579 * ha = adapter block pointer. 2580 * bus = SCSI bus number. 2581 * 2582 * Returns: 2583 * 0 = success 2584 */ 2585 static int 2586 qla1280_bus_reset(struct scsi_qla_host *ha, int bus) 2587 { 2588 uint16_t mb[MAILBOX_REGISTER_COUNT]; 2589 uint16_t reset_delay; 2590 int status; 2591 2592 dprintk(3, "qla1280_bus_reset: entered\n"); 2593 2594 if (qla1280_verbose) 2595 printk(KERN_INFO "scsi(%li:%i): Resetting SCSI BUS\n", 2596 ha->host_no, bus); 2597 2598 reset_delay = ha->bus_settings[bus].bus_reset_delay; 2599 mb[0] = MBC_BUS_RESET; 2600 mb[1] = reset_delay; 2601 mb[2] = (uint16_t) bus; 2602 status = qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, &mb[0]); 2603 2604 if (status) { 2605 if (ha->bus_settings[bus].failed_reset_count > 2) 2606 ha->bus_settings[bus].scsi_bus_dead = 1; 2607 ha->bus_settings[bus].failed_reset_count++; 2608 } else { 2609 spin_unlock_irq(ha->host->host_lock); 2610 ssleep(reset_delay); 2611 spin_lock_irq(ha->host->host_lock); 2612 2613 ha->bus_settings[bus].scsi_bus_dead = 0; 2614 ha->bus_settings[bus].failed_reset_count = 0; 2615 ha->bus_settings[bus].reset_marker = 0; 2616 /* Issue marker command. */ 2617 qla1280_marker(ha, bus, 0, 0, MK_SYNC_ALL); 2618 } 2619 2620 /* 2621 * We should probably call qla1280_set_target_parameters() 2622 * here as well for all devices on the bus. 2623 */ 2624 2625 if (status) 2626 dprintk(2, "qla1280_bus_reset: **** FAILED ****\n"); 2627 else 2628 dprintk(3, "qla1280_bus_reset: exiting normally\n"); 2629 2630 return status; 2631 } 2632 2633 /* 2634 * qla1280_device_reset 2635 * Issue bus device reset message to the target. 2636 * 2637 * Input: 2638 * ha = adapter block pointer. 2639 * bus = SCSI BUS number. 2640 * target = SCSI ID. 2641 * 2642 * Returns: 2643 * 0 = success 2644 */ 2645 static int 2646 qla1280_device_reset(struct scsi_qla_host *ha, int bus, int target) 2647 { 2648 uint16_t mb[MAILBOX_REGISTER_COUNT]; 2649 int status; 2650 2651 ENTER("qla1280_device_reset"); 2652 2653 mb[0] = MBC_ABORT_TARGET; 2654 mb[1] = (bus ? (target | BIT_7) : target) << 8; 2655 mb[2] = 1; 2656 status = qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, &mb[0]); 2657 2658 /* Issue marker command. */ 2659 qla1280_marker(ha, bus, target, 0, MK_SYNC_ID); 2660 2661 if (status) 2662 dprintk(2, "qla1280_device_reset: **** FAILED ****\n"); 2663 2664 LEAVE("qla1280_device_reset"); 2665 return status; 2666 } 2667 2668 /* 2669 * qla1280_abort_command 2670 * Abort command aborts a specified IOCB. 2671 * 2672 * Input: 2673 * ha = adapter block pointer. 2674 * sp = SB structure pointer. 2675 * 2676 * Returns: 2677 * 0 = success 2678 */ 2679 static int 2680 qla1280_abort_command(struct scsi_qla_host *ha, struct srb * sp, int handle) 2681 { 2682 uint16_t mb[MAILBOX_REGISTER_COUNT]; 2683 unsigned int bus, target, lun; 2684 int status; 2685 2686 ENTER("qla1280_abort_command"); 2687 2688 bus = SCSI_BUS_32(sp->cmd); 2689 target = SCSI_TCN_32(sp->cmd); 2690 lun = SCSI_LUN_32(sp->cmd); 2691 2692 sp->flags |= SRB_ABORT_PENDING; 2693 2694 mb[0] = MBC_ABORT_COMMAND; 2695 mb[1] = (bus ? target | BIT_7 : target) << 8 | lun; 2696 mb[2] = handle >> 16; 2697 mb[3] = handle & 0xffff; 2698 status = qla1280_mailbox_command(ha, 0x0f, &mb[0]); 2699 2700 if (status) { 2701 dprintk(2, "qla1280_abort_command: **** FAILED ****\n"); 2702 sp->flags &= ~SRB_ABORT_PENDING; 2703 } 2704 2705 2706 LEAVE("qla1280_abort_command"); 2707 return status; 2708 } 2709 2710 /* 2711 * qla1280_reset_adapter 2712 * Reset adapter. 2713 * 2714 * Input: 2715 * ha = adapter block pointer. 2716 */ 2717 static void 2718 qla1280_reset_adapter(struct scsi_qla_host *ha) 2719 { 2720 struct device_reg __iomem *reg = ha->iobase; 2721 2722 ENTER("qla1280_reset_adapter"); 2723 2724 /* Disable ISP chip */ 2725 ha->flags.online = 0; 2726 WRT_REG_WORD(®->ictrl, ISP_RESET); 2727 WRT_REG_WORD(®->host_cmd, 2728 HC_RESET_RISC | HC_RELEASE_RISC | HC_DISABLE_BIOS); 2729 RD_REG_WORD(®->id_l); /* Flush PCI write */ 2730 2731 LEAVE("qla1280_reset_adapter"); 2732 } 2733 2734 /* 2735 * Issue marker command. 2736 * Function issues marker IOCB. 2737 * 2738 * Input: 2739 * ha = adapter block pointer. 2740 * bus = SCSI BUS number 2741 * id = SCSI ID 2742 * lun = SCSI LUN 2743 * type = marker modifier 2744 */ 2745 static void 2746 qla1280_marker(struct scsi_qla_host *ha, int bus, int id, int lun, u8 type) 2747 { 2748 struct mrk_entry *pkt; 2749 2750 ENTER("qla1280_marker"); 2751 2752 /* Get request packet. */ 2753 if ((pkt = (struct mrk_entry *) qla1280_req_pkt(ha))) { 2754 pkt->entry_type = MARKER_TYPE; 2755 pkt->lun = (uint8_t) lun; 2756 pkt->target = (uint8_t) (bus ? (id | BIT_7) : id); 2757 pkt->modifier = type; 2758 pkt->entry_status = 0; 2759 2760 /* Issue command to ISP */ 2761 qla1280_isp_cmd(ha); 2762 } 2763 2764 LEAVE("qla1280_marker"); 2765 } 2766 2767 2768 /* 2769 * qla1280_64bit_start_scsi 2770 * The start SCSI is responsible for building request packets on 2771 * request ring and modifying ISP input pointer. 2772 * 2773 * Input: 2774 * ha = adapter block pointer. 2775 * sp = SB structure pointer. 2776 * 2777 * Returns: 2778 * 0 = success, was able to issue command. 2779 */ 2780 #ifdef QLA_64BIT_PTR 2781 static int 2782 qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp) 2783 { 2784 struct device_reg __iomem *reg = ha->iobase; 2785 struct scsi_cmnd *cmd = sp->cmd; 2786 cmd_a64_entry_t *pkt; 2787 __le32 *dword_ptr; 2788 dma_addr_t dma_handle; 2789 int status = 0; 2790 int cnt; 2791 int req_cnt; 2792 int seg_cnt; 2793 u8 dir; 2794 2795 ENTER("qla1280_64bit_start_scsi:"); 2796 2797 /* Calculate number of entries and segments required. */ 2798 req_cnt = 1; 2799 seg_cnt = scsi_dma_map(cmd); 2800 if (seg_cnt > 0) { 2801 if (seg_cnt > 2) { 2802 req_cnt += (seg_cnt - 2) / 5; 2803 if ((seg_cnt - 2) % 5) 2804 req_cnt++; 2805 } 2806 } else if (seg_cnt < 0) { 2807 status = 1; 2808 goto out; 2809 } 2810 2811 if ((req_cnt + 2) >= ha->req_q_cnt) { 2812 /* Calculate number of free request entries. */ 2813 cnt = RD_REG_WORD(®->mailbox4); 2814 if (ha->req_ring_index < cnt) 2815 ha->req_q_cnt = cnt - ha->req_ring_index; 2816 else 2817 ha->req_q_cnt = 2818 REQUEST_ENTRY_CNT - (ha->req_ring_index - cnt); 2819 } 2820 2821 dprintk(3, "Number of free entries=(%d) seg_cnt=0x%x\n", 2822 ha->req_q_cnt, seg_cnt); 2823 2824 /* If room for request in request ring. */ 2825 if ((req_cnt + 2) >= ha->req_q_cnt) { 2826 status = SCSI_MLQUEUE_HOST_BUSY; 2827 dprintk(2, "qla1280_start_scsi: in-ptr=0x%x req_q_cnt=" 2828 "0x%xreq_cnt=0x%x", ha->req_ring_index, ha->req_q_cnt, 2829 req_cnt); 2830 goto out; 2831 } 2832 2833 /* Check for room in outstanding command list. */ 2834 for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS && 2835 ha->outstanding_cmds[cnt] != NULL; cnt++); 2836 2837 if (cnt >= MAX_OUTSTANDING_COMMANDS) { 2838 status = SCSI_MLQUEUE_HOST_BUSY; 2839 dprintk(2, "qla1280_start_scsi: NO ROOM IN " 2840 "OUTSTANDING ARRAY, req_q_cnt=0x%x", ha->req_q_cnt); 2841 goto out; 2842 } 2843 2844 ha->outstanding_cmds[cnt] = sp; 2845 ha->req_q_cnt -= req_cnt; 2846 CMD_HANDLE(sp->cmd) = (unsigned char *)(unsigned long)(cnt + 1); 2847 2848 dprintk(2, "start: cmd=%p sp=%p CDB=%xm, handle %lx\n", cmd, sp, 2849 cmd->cmnd[0], (long)CMD_HANDLE(sp->cmd)); 2850 dprintk(2, " bus %i, target %i, lun %i\n", 2851 SCSI_BUS_32(cmd), SCSI_TCN_32(cmd), SCSI_LUN_32(cmd)); 2852 qla1280_dump_buffer(2, cmd->cmnd, MAX_COMMAND_SIZE); 2853 2854 /* 2855 * Build command packet. 2856 */ 2857 pkt = (cmd_a64_entry_t *) ha->request_ring_ptr; 2858 2859 pkt->entry_type = COMMAND_A64_TYPE; 2860 pkt->entry_count = (uint8_t) req_cnt; 2861 pkt->sys_define = (uint8_t) ha->req_ring_index; 2862 pkt->entry_status = 0; 2863 pkt->handle = cpu_to_le32(cnt); 2864 2865 /* Zero out remaining portion of packet. */ 2866 memset(((char *)pkt + 8), 0, (REQUEST_ENTRY_SIZE - 8)); 2867 2868 /* Set ISP command timeout. */ 2869 pkt->timeout = cpu_to_le16(cmd->request->timeout/HZ); 2870 2871 /* Set device target ID and LUN */ 2872 pkt->lun = SCSI_LUN_32(cmd); 2873 pkt->target = SCSI_BUS_32(cmd) ? 2874 (SCSI_TCN_32(cmd) | BIT_7) : SCSI_TCN_32(cmd); 2875 2876 /* Enable simple tag queuing if device supports it. */ 2877 if (cmd->device->simple_tags) 2878 pkt->control_flags |= cpu_to_le16(BIT_3); 2879 2880 /* Load SCSI command packet. */ 2881 pkt->cdb_len = cpu_to_le16(CMD_CDBLEN(cmd)); 2882 memcpy(pkt->scsi_cdb, CMD_CDBP(cmd), CMD_CDBLEN(cmd)); 2883 /* dprintk(1, "Build packet for command[0]=0x%x\n",pkt->scsi_cdb[0]); */ 2884 2885 /* Set transfer direction. */ 2886 dir = qla1280_data_direction(cmd); 2887 pkt->control_flags |= cpu_to_le16(dir); 2888 2889 /* Set total data segment count. */ 2890 pkt->dseg_count = cpu_to_le16(seg_cnt); 2891 2892 /* 2893 * Load data segments. 2894 */ 2895 if (seg_cnt) { /* If data transfer. */ 2896 struct scatterlist *sg, *s; 2897 int remseg = seg_cnt; 2898 2899 sg = scsi_sglist(cmd); 2900 2901 /* Setup packet address segment pointer. */ 2902 dword_ptr = (u32 *)&pkt->dseg_0_address; 2903 2904 /* Load command entry data segments. */ 2905 for_each_sg(sg, s, seg_cnt, cnt) { 2906 if (cnt == 2) 2907 break; 2908 2909 dma_handle = sg_dma_address(s); 2910 #if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2) 2911 if (ha->flags.use_pci_vchannel) 2912 sn_pci_set_vchan(ha->pdev, 2913 (unsigned long *)&dma_handle, 2914 SCSI_BUS_32(cmd)); 2915 #endif 2916 *dword_ptr++ = 2917 cpu_to_le32(pci_dma_lo32(dma_handle)); 2918 *dword_ptr++ = 2919 cpu_to_le32(pci_dma_hi32(dma_handle)); 2920 *dword_ptr++ = cpu_to_le32(sg_dma_len(s)); 2921 dprintk(3, "S/G Segment phys_addr=%x %x, len=0x%x\n", 2922 cpu_to_le32(pci_dma_hi32(dma_handle)), 2923 cpu_to_le32(pci_dma_lo32(dma_handle)), 2924 cpu_to_le32(sg_dma_len(sg_next(s)))); 2925 remseg--; 2926 } 2927 dprintk(5, "qla1280_64bit_start_scsi: Scatter/gather " 2928 "command packet data - b %i, t %i, l %i \n", 2929 SCSI_BUS_32(cmd), SCSI_TCN_32(cmd), 2930 SCSI_LUN_32(cmd)); 2931 qla1280_dump_buffer(5, (char *)pkt, 2932 REQUEST_ENTRY_SIZE); 2933 2934 /* 2935 * Build continuation packets. 2936 */ 2937 dprintk(3, "S/G Building Continuation...seg_cnt=0x%x " 2938 "remains\n", seg_cnt); 2939 2940 while (remseg > 0) { 2941 /* Update sg start */ 2942 sg = s; 2943 /* Adjust ring index. */ 2944 ha->req_ring_index++; 2945 if (ha->req_ring_index == REQUEST_ENTRY_CNT) { 2946 ha->req_ring_index = 0; 2947 ha->request_ring_ptr = 2948 ha->request_ring; 2949 } else 2950 ha->request_ring_ptr++; 2951 2952 pkt = (cmd_a64_entry_t *)ha->request_ring_ptr; 2953 2954 /* Zero out packet. */ 2955 memset(pkt, 0, REQUEST_ENTRY_SIZE); 2956 2957 /* Load packet defaults. */ 2958 ((struct cont_a64_entry *) pkt)->entry_type = 2959 CONTINUE_A64_TYPE; 2960 ((struct cont_a64_entry *) pkt)->entry_count = 1; 2961 ((struct cont_a64_entry *) pkt)->sys_define = 2962 (uint8_t)ha->req_ring_index; 2963 /* Setup packet address segment pointer. */ 2964 dword_ptr = 2965 (u32 *)&((struct cont_a64_entry *) pkt)->dseg_0_address; 2966 2967 /* Load continuation entry data segments. */ 2968 for_each_sg(sg, s, remseg, cnt) { 2969 if (cnt == 5) 2970 break; 2971 dma_handle = sg_dma_address(s); 2972 #if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2) 2973 if (ha->flags.use_pci_vchannel) 2974 sn_pci_set_vchan(ha->pdev, 2975 (unsigned long *)&dma_handle, 2976 SCSI_BUS_32(cmd)); 2977 #endif 2978 *dword_ptr++ = 2979 cpu_to_le32(pci_dma_lo32(dma_handle)); 2980 *dword_ptr++ = 2981 cpu_to_le32(pci_dma_hi32(dma_handle)); 2982 *dword_ptr++ = 2983 cpu_to_le32(sg_dma_len(s)); 2984 dprintk(3, "S/G Segment Cont. phys_addr=%x %x, len=0x%x\n", 2985 cpu_to_le32(pci_dma_hi32(dma_handle)), 2986 cpu_to_le32(pci_dma_lo32(dma_handle)), 2987 cpu_to_le32(sg_dma_len(s))); 2988 } 2989 remseg -= cnt; 2990 dprintk(5, "qla1280_64bit_start_scsi: " 2991 "continuation packet data - b %i, t " 2992 "%i, l %i \n", SCSI_BUS_32(cmd), 2993 SCSI_TCN_32(cmd), SCSI_LUN_32(cmd)); 2994 qla1280_dump_buffer(5, (char *)pkt, 2995 REQUEST_ENTRY_SIZE); 2996 } 2997 } else { /* No data transfer */ 2998 dprintk(5, "qla1280_64bit_start_scsi: No data, command " 2999 "packet data - b %i, t %i, l %i \n", 3000 SCSI_BUS_32(cmd), SCSI_TCN_32(cmd), SCSI_LUN_32(cmd)); 3001 qla1280_dump_buffer(5, (char *)pkt, REQUEST_ENTRY_SIZE); 3002 } 3003 /* Adjust ring index. */ 3004 ha->req_ring_index++; 3005 if (ha->req_ring_index == REQUEST_ENTRY_CNT) { 3006 ha->req_ring_index = 0; 3007 ha->request_ring_ptr = ha->request_ring; 3008 } else 3009 ha->request_ring_ptr++; 3010 3011 /* Set chip new ring index. */ 3012 dprintk(2, 3013 "qla1280_64bit_start_scsi: Wakeup RISC for pending command\n"); 3014 sp->flags |= SRB_SENT; 3015 ha->actthreads++; 3016 WRT_REG_WORD(®->mailbox4, ha->req_ring_index); 3017 /* Enforce mmio write ordering; see comment in qla1280_isp_cmd(). */ 3018 mmiowb(); 3019 3020 out: 3021 if (status) 3022 dprintk(2, "qla1280_64bit_start_scsi: **** FAILED ****\n"); 3023 else 3024 dprintk(3, "qla1280_64bit_start_scsi: exiting normally\n"); 3025 3026 return status; 3027 } 3028 #else /* !QLA_64BIT_PTR */ 3029 3030 /* 3031 * qla1280_32bit_start_scsi 3032 * The start SCSI is responsible for building request packets on 3033 * request ring and modifying ISP input pointer. 3034 * 3035 * The Qlogic firmware interface allows every queue slot to have a SCSI 3036 * command and up to 4 scatter/gather (SG) entries. If we need more 3037 * than 4 SG entries, then continuation entries are used that can 3038 * hold another 7 entries each. The start routine determines if there 3039 * is eought empty slots then build the combination of requests to 3040 * fulfill the OS request. 3041 * 3042 * Input: 3043 * ha = adapter block pointer. 3044 * sp = SCSI Request Block structure pointer. 3045 * 3046 * Returns: 3047 * 0 = success, was able to issue command. 3048 */ 3049 static int 3050 qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp) 3051 { 3052 struct device_reg __iomem *reg = ha->iobase; 3053 struct scsi_cmnd *cmd = sp->cmd; 3054 struct cmd_entry *pkt; 3055 __le32 *dword_ptr; 3056 int status = 0; 3057 int cnt; 3058 int req_cnt; 3059 int seg_cnt; 3060 u8 dir; 3061 3062 ENTER("qla1280_32bit_start_scsi"); 3063 3064 dprintk(1, "32bit_start: cmd=%p sp=%p CDB=%x\n", cmd, sp, 3065 cmd->cmnd[0]); 3066 3067 /* Calculate number of entries and segments required. */ 3068 req_cnt = 1; 3069 seg_cnt = scsi_dma_map(cmd); 3070 if (seg_cnt) { 3071 /* 3072 * if greater than four sg entries then we need to allocate 3073 * continuation entries 3074 */ 3075 if (seg_cnt > 4) { 3076 req_cnt += (seg_cnt - 4) / 7; 3077 if ((seg_cnt - 4) % 7) 3078 req_cnt++; 3079 } 3080 dprintk(3, "S/G Transfer cmd=%p seg_cnt=0x%x, req_cnt=%x\n", 3081 cmd, seg_cnt, req_cnt); 3082 } else if (seg_cnt < 0) { 3083 status = 1; 3084 goto out; 3085 } 3086 3087 if ((req_cnt + 2) >= ha->req_q_cnt) { 3088 /* Calculate number of free request entries. */ 3089 cnt = RD_REG_WORD(®->mailbox4); 3090 if (ha->req_ring_index < cnt) 3091 ha->req_q_cnt = cnt - ha->req_ring_index; 3092 else 3093 ha->req_q_cnt = 3094 REQUEST_ENTRY_CNT - (ha->req_ring_index - cnt); 3095 } 3096 3097 dprintk(3, "Number of free entries=(%d) seg_cnt=0x%x\n", 3098 ha->req_q_cnt, seg_cnt); 3099 /* If room for request in request ring. */ 3100 if ((req_cnt + 2) >= ha->req_q_cnt) { 3101 status = SCSI_MLQUEUE_HOST_BUSY; 3102 dprintk(2, "qla1280_32bit_start_scsi: in-ptr=0x%x, " 3103 "req_q_cnt=0x%x, req_cnt=0x%x", ha->req_ring_index, 3104 ha->req_q_cnt, req_cnt); 3105 goto out; 3106 } 3107 3108 /* Check for empty slot in outstanding command list. */ 3109 for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS && 3110 (ha->outstanding_cmds[cnt] != 0); cnt++) ; 3111 3112 if (cnt >= MAX_OUTSTANDING_COMMANDS) { 3113 status = SCSI_MLQUEUE_HOST_BUSY; 3114 dprintk(2, "qla1280_32bit_start_scsi: NO ROOM IN OUTSTANDING " 3115 "ARRAY, req_q_cnt=0x%x\n", ha->req_q_cnt); 3116 goto out; 3117 } 3118 3119 CMD_HANDLE(sp->cmd) = (unsigned char *) (unsigned long)(cnt + 1); 3120 ha->outstanding_cmds[cnt] = sp; 3121 ha->req_q_cnt -= req_cnt; 3122 3123 /* 3124 * Build command packet. 3125 */ 3126 pkt = (struct cmd_entry *) ha->request_ring_ptr; 3127 3128 pkt->entry_type = COMMAND_TYPE; 3129 pkt->entry_count = (uint8_t) req_cnt; 3130 pkt->sys_define = (uint8_t) ha->req_ring_index; 3131 pkt->entry_status = 0; 3132 pkt->handle = cpu_to_le32(cnt); 3133 3134 /* Zero out remaining portion of packet. */ 3135 memset(((char *)pkt + 8), 0, (REQUEST_ENTRY_SIZE - 8)); 3136 3137 /* Set ISP command timeout. */ 3138 pkt->timeout = cpu_to_le16(cmd->request->timeout/HZ); 3139 3140 /* Set device target ID and LUN */ 3141 pkt->lun = SCSI_LUN_32(cmd); 3142 pkt->target = SCSI_BUS_32(cmd) ? 3143 (SCSI_TCN_32(cmd) | BIT_7) : SCSI_TCN_32(cmd); 3144 3145 /* Enable simple tag queuing if device supports it. */ 3146 if (cmd->device->simple_tags) 3147 pkt->control_flags |= cpu_to_le16(BIT_3); 3148 3149 /* Load SCSI command packet. */ 3150 pkt->cdb_len = cpu_to_le16(CMD_CDBLEN(cmd)); 3151 memcpy(pkt->scsi_cdb, CMD_CDBP(cmd), CMD_CDBLEN(cmd)); 3152 3153 /*dprintk(1, "Build packet for command[0]=0x%x\n",pkt->scsi_cdb[0]); */ 3154 /* Set transfer direction. */ 3155 dir = qla1280_data_direction(cmd); 3156 pkt->control_flags |= cpu_to_le16(dir); 3157 3158 /* Set total data segment count. */ 3159 pkt->dseg_count = cpu_to_le16(seg_cnt); 3160 3161 /* 3162 * Load data segments. 3163 */ 3164 if (seg_cnt) { 3165 struct scatterlist *sg, *s; 3166 int remseg = seg_cnt; 3167 3168 sg = scsi_sglist(cmd); 3169 3170 /* Setup packet address segment pointer. */ 3171 dword_ptr = &pkt->dseg_0_address; 3172 3173 dprintk(3, "Building S/G data segments..\n"); 3174 qla1280_dump_buffer(1, (char *)sg, 4 * 16); 3175 3176 /* Load command entry data segments. */ 3177 for_each_sg(sg, s, seg_cnt, cnt) { 3178 if (cnt == 4) 3179 break; 3180 *dword_ptr++ = 3181 cpu_to_le32(pci_dma_lo32(sg_dma_address(s))); 3182 *dword_ptr++ = cpu_to_le32(sg_dma_len(s)); 3183 dprintk(3, "S/G Segment phys_addr=0x%lx, len=0x%x\n", 3184 (pci_dma_lo32(sg_dma_address(s))), 3185 (sg_dma_len(s))); 3186 remseg--; 3187 } 3188 /* 3189 * Build continuation packets. 3190 */ 3191 dprintk(3, "S/G Building Continuation" 3192 "...seg_cnt=0x%x remains\n", seg_cnt); 3193 while (remseg > 0) { 3194 /* Continue from end point */ 3195 sg = s; 3196 /* Adjust ring index. */ 3197 ha->req_ring_index++; 3198 if (ha->req_ring_index == REQUEST_ENTRY_CNT) { 3199 ha->req_ring_index = 0; 3200 ha->request_ring_ptr = 3201 ha->request_ring; 3202 } else 3203 ha->request_ring_ptr++; 3204 3205 pkt = (struct cmd_entry *)ha->request_ring_ptr; 3206 3207 /* Zero out packet. */ 3208 memset(pkt, 0, REQUEST_ENTRY_SIZE); 3209 3210 /* Load packet defaults. */ 3211 ((struct cont_entry *) pkt)-> 3212 entry_type = CONTINUE_TYPE; 3213 ((struct cont_entry *) pkt)->entry_count = 1; 3214 3215 ((struct cont_entry *) pkt)->sys_define = 3216 (uint8_t) ha->req_ring_index; 3217 3218 /* Setup packet address segment pointer. */ 3219 dword_ptr = 3220 &((struct cont_entry *) pkt)->dseg_0_address; 3221 3222 /* Load continuation entry data segments. */ 3223 for_each_sg(sg, s, remseg, cnt) { 3224 if (cnt == 7) 3225 break; 3226 *dword_ptr++ = 3227 cpu_to_le32(pci_dma_lo32(sg_dma_address(s))); 3228 *dword_ptr++ = 3229 cpu_to_le32(sg_dma_len(s)); 3230 dprintk(1, 3231 "S/G Segment Cont. phys_addr=0x%x, " 3232 "len=0x%x\n", 3233 cpu_to_le32(pci_dma_lo32(sg_dma_address(s))), 3234 cpu_to_le32(sg_dma_len(s))); 3235 } 3236 remseg -= cnt; 3237 dprintk(5, "qla1280_32bit_start_scsi: " 3238 "continuation packet data - " 3239 "scsi(%i:%i:%i)\n", SCSI_BUS_32(cmd), 3240 SCSI_TCN_32(cmd), SCSI_LUN_32(cmd)); 3241 qla1280_dump_buffer(5, (char *)pkt, 3242 REQUEST_ENTRY_SIZE); 3243 } 3244 } else { /* No data transfer at all */ 3245 dprintk(5, "qla1280_32bit_start_scsi: No data, command " 3246 "packet data - \n"); 3247 qla1280_dump_buffer(5, (char *)pkt, REQUEST_ENTRY_SIZE); 3248 } 3249 dprintk(5, "qla1280_32bit_start_scsi: First IOCB block:\n"); 3250 qla1280_dump_buffer(5, (char *)ha->request_ring_ptr, 3251 REQUEST_ENTRY_SIZE); 3252 3253 /* Adjust ring index. */ 3254 ha->req_ring_index++; 3255 if (ha->req_ring_index == REQUEST_ENTRY_CNT) { 3256 ha->req_ring_index = 0; 3257 ha->request_ring_ptr = ha->request_ring; 3258 } else 3259 ha->request_ring_ptr++; 3260 3261 /* Set chip new ring index. */ 3262 dprintk(2, "qla1280_32bit_start_scsi: Wakeup RISC " 3263 "for pending command\n"); 3264 sp->flags |= SRB_SENT; 3265 ha->actthreads++; 3266 WRT_REG_WORD(®->mailbox4, ha->req_ring_index); 3267 /* Enforce mmio write ordering; see comment in qla1280_isp_cmd(). */ 3268 mmiowb(); 3269 3270 out: 3271 if (status) 3272 dprintk(2, "qla1280_32bit_start_scsi: **** FAILED ****\n"); 3273 3274 LEAVE("qla1280_32bit_start_scsi"); 3275 3276 return status; 3277 } 3278 #endif 3279 3280 /* 3281 * qla1280_req_pkt 3282 * Function is responsible for locking ring and 3283 * getting a zeroed out request packet. 3284 * 3285 * Input: 3286 * ha = adapter block pointer. 3287 * 3288 * Returns: 3289 * 0 = failed to get slot. 3290 */ 3291 static request_t * 3292 qla1280_req_pkt(struct scsi_qla_host *ha) 3293 { 3294 struct device_reg __iomem *reg = ha->iobase; 3295 request_t *pkt = NULL; 3296 int cnt; 3297 uint32_t timer; 3298 3299 ENTER("qla1280_req_pkt"); 3300 3301 /* 3302 * This can be called from interrupt context, damn it!!! 3303 */ 3304 /* Wait for 30 seconds for slot. */ 3305 for (timer = 15000000; timer; timer--) { 3306 if (ha->req_q_cnt > 0) { 3307 /* Calculate number of free request entries. */ 3308 cnt = RD_REG_WORD(®->mailbox4); 3309 if (ha->req_ring_index < cnt) 3310 ha->req_q_cnt = cnt - ha->req_ring_index; 3311 else 3312 ha->req_q_cnt = 3313 REQUEST_ENTRY_CNT - (ha->req_ring_index - cnt); 3314 } 3315 3316 /* Found empty request ring slot? */ 3317 if (ha->req_q_cnt > 0) { 3318 ha->req_q_cnt--; 3319 pkt = ha->request_ring_ptr; 3320 3321 /* Zero out packet. */ 3322 memset(pkt, 0, REQUEST_ENTRY_SIZE); 3323 3324 /* 3325 * How can this be right when we have a ring 3326 * size of 512??? 3327 */ 3328 /* Set system defined field. */ 3329 pkt->sys_define = (uint8_t) ha->req_ring_index; 3330 3331 /* Set entry count. */ 3332 pkt->entry_count = 1; 3333 3334 break; 3335 } 3336 3337 udelay(2); /* 10 */ 3338 3339 /* Check for pending interrupts. */ 3340 qla1280_poll(ha); 3341 } 3342 3343 if (!pkt) 3344 dprintk(2, "qla1280_req_pkt: **** FAILED ****\n"); 3345 else 3346 dprintk(3, "qla1280_req_pkt: exiting normally\n"); 3347 3348 return pkt; 3349 } 3350 3351 /* 3352 * qla1280_isp_cmd 3353 * Function is responsible for modifying ISP input pointer. 3354 * Releases ring lock. 3355 * 3356 * Input: 3357 * ha = adapter block pointer. 3358 */ 3359 static void 3360 qla1280_isp_cmd(struct scsi_qla_host *ha) 3361 { 3362 struct device_reg __iomem *reg = ha->iobase; 3363 3364 ENTER("qla1280_isp_cmd"); 3365 3366 dprintk(5, "qla1280_isp_cmd: IOCB data:\n"); 3367 qla1280_dump_buffer(5, (char *)ha->request_ring_ptr, 3368 REQUEST_ENTRY_SIZE); 3369 3370 /* Adjust ring index. */ 3371 ha->req_ring_index++; 3372 if (ha->req_ring_index == REQUEST_ENTRY_CNT) { 3373 ha->req_ring_index = 0; 3374 ha->request_ring_ptr = ha->request_ring; 3375 } else 3376 ha->request_ring_ptr++; 3377 3378 /* 3379 * Update request index to mailbox4 (Request Queue In). 3380 * The mmiowb() ensures that this write is ordered with writes by other 3381 * CPUs. Without the mmiowb(), it is possible for the following: 3382 * CPUA posts write of index 5 to mailbox4 3383 * CPUA releases host lock 3384 * CPUB acquires host lock 3385 * CPUB posts write of index 6 to mailbox4 3386 * On PCI bus, order reverses and write of 6 posts, then index 5, 3387 * causing chip to issue full queue of stale commands 3388 * The mmiowb() prevents future writes from crossing the barrier. 3389 * See Documentation/driver-api/device-io.rst for more information. 3390 */ 3391 WRT_REG_WORD(®->mailbox4, ha->req_ring_index); 3392 mmiowb(); 3393 3394 LEAVE("qla1280_isp_cmd"); 3395 } 3396 3397 /****************************************************************************/ 3398 /* Interrupt Service Routine. */ 3399 /****************************************************************************/ 3400 3401 /**************************************************************************** 3402 * qla1280_isr 3403 * Calls I/O done on command completion. 3404 * 3405 * Input: 3406 * ha = adapter block pointer. 3407 * done_q = done queue. 3408 ****************************************************************************/ 3409 static void 3410 qla1280_isr(struct scsi_qla_host *ha, struct list_head *done_q) 3411 { 3412 struct device_reg __iomem *reg = ha->iobase; 3413 struct response *pkt; 3414 struct srb *sp = NULL; 3415 uint16_t mailbox[MAILBOX_REGISTER_COUNT]; 3416 uint16_t *wptr; 3417 uint32_t index; 3418 u16 istatus; 3419 3420 ENTER("qla1280_isr"); 3421 3422 istatus = RD_REG_WORD(®->istatus); 3423 if (!(istatus & (RISC_INT | PCI_INT))) 3424 return; 3425 3426 /* Save mailbox register 5 */ 3427 mailbox[5] = RD_REG_WORD(®->mailbox5); 3428 3429 /* Check for mailbox interrupt. */ 3430 3431 mailbox[0] = RD_REG_WORD_dmasync(®->semaphore); 3432 3433 if (mailbox[0] & BIT_0) { 3434 /* Get mailbox data. */ 3435 /* dprintk(1, "qla1280_isr: In Get mailbox data \n"); */ 3436 3437 wptr = &mailbox[0]; 3438 *wptr++ = RD_REG_WORD(®->mailbox0); 3439 *wptr++ = RD_REG_WORD(®->mailbox1); 3440 *wptr = RD_REG_WORD(®->mailbox2); 3441 if (mailbox[0] != MBA_SCSI_COMPLETION) { 3442 wptr++; 3443 *wptr++ = RD_REG_WORD(®->mailbox3); 3444 *wptr++ = RD_REG_WORD(®->mailbox4); 3445 wptr++; 3446 *wptr++ = RD_REG_WORD(®->mailbox6); 3447 *wptr = RD_REG_WORD(®->mailbox7); 3448 } 3449 3450 /* Release mailbox registers. */ 3451 3452 WRT_REG_WORD(®->semaphore, 0); 3453 WRT_REG_WORD(®->host_cmd, HC_CLR_RISC_INT); 3454 3455 dprintk(5, "qla1280_isr: mailbox interrupt mailbox[0] = 0x%x", 3456 mailbox[0]); 3457 3458 /* Handle asynchronous event */ 3459 switch (mailbox[0]) { 3460 case MBA_SCSI_COMPLETION: /* Response completion */ 3461 dprintk(5, "qla1280_isr: mailbox SCSI response " 3462 "completion\n"); 3463 3464 if (ha->flags.online) { 3465 /* Get outstanding command index. */ 3466 index = mailbox[2] << 16 | mailbox[1]; 3467 3468 /* Validate handle. */ 3469 if (index < MAX_OUTSTANDING_COMMANDS) 3470 sp = ha->outstanding_cmds[index]; 3471 else 3472 sp = NULL; 3473 3474 if (sp) { 3475 /* Free outstanding command slot. */ 3476 ha->outstanding_cmds[index] = NULL; 3477 3478 /* Save ISP completion status */ 3479 CMD_RESULT(sp->cmd) = 0; 3480 CMD_HANDLE(sp->cmd) = COMPLETED_HANDLE; 3481 3482 /* Place block on done queue */ 3483 list_add_tail(&sp->list, done_q); 3484 } else { 3485 /* 3486 * If we get here we have a real problem! 3487 */ 3488 printk(KERN_WARNING 3489 "qla1280: ISP invalid handle\n"); 3490 } 3491 } 3492 break; 3493 3494 case MBA_BUS_RESET: /* SCSI Bus Reset */ 3495 ha->flags.reset_marker = 1; 3496 index = mailbox[6] & BIT_0; 3497 ha->bus_settings[index].reset_marker = 1; 3498 3499 printk(KERN_DEBUG "qla1280_isr(): index %i " 3500 "asynchronous BUS_RESET\n", index); 3501 break; 3502 3503 case MBA_SYSTEM_ERR: /* System Error */ 3504 printk(KERN_WARNING 3505 "qla1280: ISP System Error - mbx1=%xh, mbx2=" 3506 "%xh, mbx3=%xh\n", mailbox[1], mailbox[2], 3507 mailbox[3]); 3508 break; 3509 3510 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */ 3511 printk(KERN_WARNING 3512 "qla1280: ISP Request Transfer Error\n"); 3513 break; 3514 3515 case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */ 3516 printk(KERN_WARNING 3517 "qla1280: ISP Response Transfer Error\n"); 3518 break; 3519 3520 case MBA_WAKEUP_THRES: /* Request Queue Wake-up */ 3521 dprintk(2, "qla1280_isr: asynchronous WAKEUP_THRES\n"); 3522 break; 3523 3524 case MBA_TIMEOUT_RESET: /* Execution Timeout Reset */ 3525 dprintk(2, 3526 "qla1280_isr: asynchronous TIMEOUT_RESET\n"); 3527 break; 3528 3529 case MBA_DEVICE_RESET: /* Bus Device Reset */ 3530 printk(KERN_INFO "qla1280_isr(): asynchronous " 3531 "BUS_DEVICE_RESET\n"); 3532 3533 ha->flags.reset_marker = 1; 3534 index = mailbox[6] & BIT_0; 3535 ha->bus_settings[index].reset_marker = 1; 3536 break; 3537 3538 case MBA_BUS_MODE_CHANGE: 3539 dprintk(2, 3540 "qla1280_isr: asynchronous BUS_MODE_CHANGE\n"); 3541 break; 3542 3543 default: 3544 /* dprintk(1, "qla1280_isr: default case of switch MB \n"); */ 3545 if (mailbox[0] < MBA_ASYNC_EVENT) { 3546 wptr = &mailbox[0]; 3547 memcpy((uint16_t *) ha->mailbox_out, wptr, 3548 MAILBOX_REGISTER_COUNT * 3549 sizeof(uint16_t)); 3550 3551 if(ha->mailbox_wait != NULL) 3552 complete(ha->mailbox_wait); 3553 } 3554 break; 3555 } 3556 } else { 3557 WRT_REG_WORD(®->host_cmd, HC_CLR_RISC_INT); 3558 } 3559 3560 /* 3561 * We will receive interrupts during mailbox testing prior to 3562 * the card being marked online, hence the double check. 3563 */ 3564 if (!(ha->flags.online && !ha->mailbox_wait)) { 3565 dprintk(2, "qla1280_isr: Response pointer Error\n"); 3566 goto out; 3567 } 3568 3569 if (mailbox[5] >= RESPONSE_ENTRY_CNT) 3570 goto out; 3571 3572 while (ha->rsp_ring_index != mailbox[5]) { 3573 pkt = ha->response_ring_ptr; 3574 3575 dprintk(5, "qla1280_isr: ha->rsp_ring_index = 0x%x, mailbox[5]" 3576 " = 0x%x\n", ha->rsp_ring_index, mailbox[5]); 3577 dprintk(5,"qla1280_isr: response packet data\n"); 3578 qla1280_dump_buffer(5, (char *)pkt, RESPONSE_ENTRY_SIZE); 3579 3580 if (pkt->entry_type == STATUS_TYPE) { 3581 if ((le16_to_cpu(pkt->scsi_status) & 0xff) 3582 || pkt->comp_status || pkt->entry_status) { 3583 dprintk(2, "qla1280_isr: ha->rsp_ring_index = " 3584 "0x%x mailbox[5] = 0x%x, comp_status " 3585 "= 0x%x, scsi_status = 0x%x\n", 3586 ha->rsp_ring_index, mailbox[5], 3587 le16_to_cpu(pkt->comp_status), 3588 le16_to_cpu(pkt->scsi_status)); 3589 } 3590 } else { 3591 dprintk(2, "qla1280_isr: ha->rsp_ring_index = " 3592 "0x%x, mailbox[5] = 0x%x\n", 3593 ha->rsp_ring_index, mailbox[5]); 3594 dprintk(2, "qla1280_isr: response packet data\n"); 3595 qla1280_dump_buffer(2, (char *)pkt, 3596 RESPONSE_ENTRY_SIZE); 3597 } 3598 3599 if (pkt->entry_type == STATUS_TYPE || pkt->entry_status) { 3600 dprintk(2, "status: Cmd %p, handle %i\n", 3601 ha->outstanding_cmds[pkt->handle]->cmd, 3602 pkt->handle); 3603 if (pkt->entry_type == STATUS_TYPE) 3604 qla1280_status_entry(ha, pkt, done_q); 3605 else 3606 qla1280_error_entry(ha, pkt, done_q); 3607 /* Adjust ring index. */ 3608 ha->rsp_ring_index++; 3609 if (ha->rsp_ring_index == RESPONSE_ENTRY_CNT) { 3610 ha->rsp_ring_index = 0; 3611 ha->response_ring_ptr = ha->response_ring; 3612 } else 3613 ha->response_ring_ptr++; 3614 WRT_REG_WORD(®->mailbox5, ha->rsp_ring_index); 3615 } 3616 } 3617 3618 out: 3619 LEAVE("qla1280_isr"); 3620 } 3621 3622 /* 3623 * qla1280_rst_aen 3624 * Processes asynchronous reset. 3625 * 3626 * Input: 3627 * ha = adapter block pointer. 3628 */ 3629 static void 3630 qla1280_rst_aen(struct scsi_qla_host *ha) 3631 { 3632 uint8_t bus; 3633 3634 ENTER("qla1280_rst_aen"); 3635 3636 if (ha->flags.online && !ha->flags.reset_active && 3637 !ha->flags.abort_isp_active) { 3638 ha->flags.reset_active = 1; 3639 while (ha->flags.reset_marker) { 3640 /* Issue marker command. */ 3641 ha->flags.reset_marker = 0; 3642 for (bus = 0; bus < ha->ports && 3643 !ha->flags.reset_marker; bus++) { 3644 if (ha->bus_settings[bus].reset_marker) { 3645 ha->bus_settings[bus].reset_marker = 0; 3646 qla1280_marker(ha, bus, 0, 0, 3647 MK_SYNC_ALL); 3648 } 3649 } 3650 } 3651 } 3652 3653 LEAVE("qla1280_rst_aen"); 3654 } 3655 3656 3657 /* 3658 * qla1280_status_entry 3659 * Processes received ISP status entry. 3660 * 3661 * Input: 3662 * ha = adapter block pointer. 3663 * pkt = entry pointer. 3664 * done_q = done queue. 3665 */ 3666 static void 3667 qla1280_status_entry(struct scsi_qla_host *ha, struct response *pkt, 3668 struct list_head *done_q) 3669 { 3670 unsigned int bus, target, lun; 3671 int sense_sz; 3672 struct srb *sp; 3673 struct scsi_cmnd *cmd; 3674 uint32_t handle = le32_to_cpu(pkt->handle); 3675 uint16_t scsi_status = le16_to_cpu(pkt->scsi_status); 3676 uint16_t comp_status = le16_to_cpu(pkt->comp_status); 3677 3678 ENTER("qla1280_status_entry"); 3679 3680 /* Validate handle. */ 3681 if (handle < MAX_OUTSTANDING_COMMANDS) 3682 sp = ha->outstanding_cmds[handle]; 3683 else 3684 sp = NULL; 3685 3686 if (!sp) { 3687 printk(KERN_WARNING "qla1280: Status Entry invalid handle\n"); 3688 goto out; 3689 } 3690 3691 /* Free outstanding command slot. */ 3692 ha->outstanding_cmds[handle] = NULL; 3693 3694 cmd = sp->cmd; 3695 3696 /* Generate LU queue on cntrl, target, LUN */ 3697 bus = SCSI_BUS_32(cmd); 3698 target = SCSI_TCN_32(cmd); 3699 lun = SCSI_LUN_32(cmd); 3700 3701 if (comp_status || scsi_status) { 3702 dprintk(3, "scsi: comp_status = 0x%x, scsi_status = " 3703 "0x%x, handle = 0x%x\n", comp_status, 3704 scsi_status, handle); 3705 } 3706 3707 /* Target busy or queue full */ 3708 if ((scsi_status & 0xFF) == SAM_STAT_TASK_SET_FULL || 3709 (scsi_status & 0xFF) == SAM_STAT_BUSY) { 3710 CMD_RESULT(cmd) = scsi_status & 0xff; 3711 } else { 3712 3713 /* Save ISP completion status */ 3714 CMD_RESULT(cmd) = qla1280_return_status(pkt, cmd); 3715 3716 if (scsi_status & SAM_STAT_CHECK_CONDITION) { 3717 if (comp_status != CS_ARS_FAILED) { 3718 uint16_t req_sense_length = 3719 le16_to_cpu(pkt->req_sense_length); 3720 if (req_sense_length < CMD_SNSLEN(cmd)) 3721 sense_sz = req_sense_length; 3722 else 3723 /* 3724 * scsi_cmnd->sense_buffer is 3725 * 64 bytes, why only copy 63? 3726 * This looks wrong! /Jes 3727 */ 3728 sense_sz = CMD_SNSLEN(cmd) - 1; 3729 3730 memcpy(cmd->sense_buffer, 3731 &pkt->req_sense_data, sense_sz); 3732 } else 3733 sense_sz = 0; 3734 memset(cmd->sense_buffer + sense_sz, 0, 3735 SCSI_SENSE_BUFFERSIZE - sense_sz); 3736 3737 dprintk(2, "qla1280_status_entry: Check " 3738 "condition Sense data, b %i, t %i, " 3739 "l %i\n", bus, target, lun); 3740 if (sense_sz) 3741 qla1280_dump_buffer(2, 3742 (char *)cmd->sense_buffer, 3743 sense_sz); 3744 } 3745 } 3746 3747 CMD_HANDLE(sp->cmd) = COMPLETED_HANDLE; 3748 3749 /* Place command on done queue. */ 3750 list_add_tail(&sp->list, done_q); 3751 out: 3752 LEAVE("qla1280_status_entry"); 3753 } 3754 3755 /* 3756 * qla1280_error_entry 3757 * Processes error entry. 3758 * 3759 * Input: 3760 * ha = adapter block pointer. 3761 * pkt = entry pointer. 3762 * done_q = done queue. 3763 */ 3764 static void 3765 qla1280_error_entry(struct scsi_qla_host *ha, struct response *pkt, 3766 struct list_head *done_q) 3767 { 3768 struct srb *sp; 3769 uint32_t handle = le32_to_cpu(pkt->handle); 3770 3771 ENTER("qla1280_error_entry"); 3772 3773 if (pkt->entry_status & BIT_3) 3774 dprintk(2, "qla1280_error_entry: BAD PAYLOAD flag error\n"); 3775 else if (pkt->entry_status & BIT_2) 3776 dprintk(2, "qla1280_error_entry: BAD HEADER flag error\n"); 3777 else if (pkt->entry_status & BIT_1) 3778 dprintk(2, "qla1280_error_entry: FULL flag error\n"); 3779 else 3780 dprintk(2, "qla1280_error_entry: UNKNOWN flag error\n"); 3781 3782 /* Validate handle. */ 3783 if (handle < MAX_OUTSTANDING_COMMANDS) 3784 sp = ha->outstanding_cmds[handle]; 3785 else 3786 sp = NULL; 3787 3788 if (sp) { 3789 /* Free outstanding command slot. */ 3790 ha->outstanding_cmds[handle] = NULL; 3791 3792 /* Bad payload or header */ 3793 if (pkt->entry_status & (BIT_3 + BIT_2)) { 3794 /* Bad payload or header, set error status. */ 3795 /* CMD_RESULT(sp->cmd) = CS_BAD_PAYLOAD; */ 3796 CMD_RESULT(sp->cmd) = DID_ERROR << 16; 3797 } else if (pkt->entry_status & BIT_1) { /* FULL flag */ 3798 CMD_RESULT(sp->cmd) = DID_BUS_BUSY << 16; 3799 } else { 3800 /* Set error status. */ 3801 CMD_RESULT(sp->cmd) = DID_ERROR << 16; 3802 } 3803 3804 CMD_HANDLE(sp->cmd) = COMPLETED_HANDLE; 3805 3806 /* Place command on done queue. */ 3807 list_add_tail(&sp->list, done_q); 3808 } 3809 #ifdef QLA_64BIT_PTR 3810 else if (pkt->entry_type == COMMAND_A64_TYPE) { 3811 printk(KERN_WARNING "!qla1280: Error Entry invalid handle"); 3812 } 3813 #endif 3814 3815 LEAVE("qla1280_error_entry"); 3816 } 3817 3818 /* 3819 * qla1280_abort_isp 3820 * Resets ISP and aborts all outstanding commands. 3821 * 3822 * Input: 3823 * ha = adapter block pointer. 3824 * 3825 * Returns: 3826 * 0 = success 3827 */ 3828 static int 3829 qla1280_abort_isp(struct scsi_qla_host *ha) 3830 { 3831 struct device_reg __iomem *reg = ha->iobase; 3832 struct srb *sp; 3833 int status = 0; 3834 int cnt; 3835 int bus; 3836 3837 ENTER("qla1280_abort_isp"); 3838 3839 if (ha->flags.abort_isp_active || !ha->flags.online) 3840 goto out; 3841 3842 ha->flags.abort_isp_active = 1; 3843 3844 /* Disable ISP interrupts. */ 3845 qla1280_disable_intrs(ha); 3846 WRT_REG_WORD(®->host_cmd, HC_PAUSE_RISC); 3847 RD_REG_WORD(®->id_l); 3848 3849 printk(KERN_INFO "scsi(%li): dequeuing outstanding commands\n", 3850 ha->host_no); 3851 /* Dequeue all commands in outstanding command list. */ 3852 for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) { 3853 struct scsi_cmnd *cmd; 3854 sp = ha->outstanding_cmds[cnt]; 3855 if (sp) { 3856 cmd = sp->cmd; 3857 CMD_RESULT(cmd) = DID_RESET << 16; 3858 CMD_HANDLE(cmd) = COMPLETED_HANDLE; 3859 ha->outstanding_cmds[cnt] = NULL; 3860 list_add_tail(&sp->list, &ha->done_q); 3861 } 3862 } 3863 3864 qla1280_done(ha); 3865 3866 status = qla1280_load_firmware(ha); 3867 if (status) 3868 goto out; 3869 3870 /* Setup adapter based on NVRAM parameters. */ 3871 qla1280_nvram_config (ha); 3872 3873 status = qla1280_init_rings(ha); 3874 if (status) 3875 goto out; 3876 3877 /* Issue SCSI reset. */ 3878 for (bus = 0; bus < ha->ports; bus++) 3879 qla1280_bus_reset(ha, bus); 3880 3881 ha->flags.abort_isp_active = 0; 3882 out: 3883 if (status) { 3884 printk(KERN_WARNING 3885 "qla1280: ISP error recovery failed, board disabled"); 3886 qla1280_reset_adapter(ha); 3887 dprintk(2, "qla1280_abort_isp: **** FAILED ****\n"); 3888 } 3889 3890 LEAVE("qla1280_abort_isp"); 3891 return status; 3892 } 3893 3894 3895 /* 3896 * qla1280_debounce_register 3897 * Debounce register. 3898 * 3899 * Input: 3900 * port = register address. 3901 * 3902 * Returns: 3903 * register value. 3904 */ 3905 static u16 3906 qla1280_debounce_register(volatile u16 __iomem * addr) 3907 { 3908 volatile u16 ret; 3909 volatile u16 ret2; 3910 3911 ret = RD_REG_WORD(addr); 3912 ret2 = RD_REG_WORD(addr); 3913 3914 if (ret == ret2) 3915 return ret; 3916 3917 do { 3918 cpu_relax(); 3919 ret = RD_REG_WORD(addr); 3920 ret2 = RD_REG_WORD(addr); 3921 } while (ret != ret2); 3922 3923 return ret; 3924 } 3925 3926 3927 /************************************************************************ 3928 * qla1280_check_for_dead_scsi_bus * 3929 * * 3930 * This routine checks for a dead SCSI bus * 3931 ************************************************************************/ 3932 #define SET_SXP_BANK 0x0100 3933 #define SCSI_PHASE_INVALID 0x87FF 3934 static int 3935 qla1280_check_for_dead_scsi_bus(struct scsi_qla_host *ha, unsigned int bus) 3936 { 3937 uint16_t config_reg, scsi_control; 3938 struct device_reg __iomem *reg = ha->iobase; 3939 3940 if (ha->bus_settings[bus].scsi_bus_dead) { 3941 WRT_REG_WORD(®->host_cmd, HC_PAUSE_RISC); 3942 config_reg = RD_REG_WORD(®->cfg_1); 3943 WRT_REG_WORD(®->cfg_1, SET_SXP_BANK); 3944 scsi_control = RD_REG_WORD(®->scsiControlPins); 3945 WRT_REG_WORD(®->cfg_1, config_reg); 3946 WRT_REG_WORD(®->host_cmd, HC_RELEASE_RISC); 3947 3948 if (scsi_control == SCSI_PHASE_INVALID) { 3949 ha->bus_settings[bus].scsi_bus_dead = 1; 3950 return 1; /* bus is dead */ 3951 } else { 3952 ha->bus_settings[bus].scsi_bus_dead = 0; 3953 ha->bus_settings[bus].failed_reset_count = 0; 3954 } 3955 } 3956 return 0; /* bus is not dead */ 3957 } 3958 3959 static void 3960 qla1280_get_target_parameters(struct scsi_qla_host *ha, 3961 struct scsi_device *device) 3962 { 3963 uint16_t mb[MAILBOX_REGISTER_COUNT]; 3964 int bus, target, lun; 3965 3966 bus = device->channel; 3967 target = device->id; 3968 lun = device->lun; 3969 3970 3971 mb[0] = MBC_GET_TARGET_PARAMETERS; 3972 mb[1] = (uint16_t) (bus ? target | BIT_7 : target); 3973 mb[1] <<= 8; 3974 qla1280_mailbox_command(ha, BIT_6 | BIT_3 | BIT_2 | BIT_1 | BIT_0, 3975 &mb[0]); 3976 3977 printk(KERN_INFO "scsi(%li:%d:%d:%d):", ha->host_no, bus, target, lun); 3978 3979 if (mb[3] != 0) { 3980 printk(" Sync: period %d, offset %d", 3981 (mb[3] & 0xff), (mb[3] >> 8)); 3982 if (mb[2] & BIT_13) 3983 printk(", Wide"); 3984 if ((mb[2] & BIT_5) && ((mb[6] >> 8) & 0xff) >= 2) 3985 printk(", DT"); 3986 } else 3987 printk(" Async"); 3988 3989 if (device->simple_tags) 3990 printk(", Tagged queuing: depth %d", device->queue_depth); 3991 printk("\n"); 3992 } 3993 3994 3995 #if DEBUG_QLA1280 3996 static void 3997 __qla1280_dump_buffer(char *b, int size) 3998 { 3999 int cnt; 4000 u8 c; 4001 4002 printk(KERN_DEBUG " 0 1 2 3 4 5 6 7 8 9 Ah " 4003 "Bh Ch Dh Eh Fh\n"); 4004 printk(KERN_DEBUG "---------------------------------------------" 4005 "------------------\n"); 4006 4007 for (cnt = 0; cnt < size;) { 4008 c = *b++; 4009 4010 printk("0x%02x", c); 4011 cnt++; 4012 if (!(cnt % 16)) 4013 printk("\n"); 4014 else 4015 printk(" "); 4016 } 4017 if (cnt % 16) 4018 printk("\n"); 4019 } 4020 4021 /************************************************************************** 4022 * ql1280_print_scsi_cmd 4023 * 4024 **************************************************************************/ 4025 static void 4026 __qla1280_print_scsi_cmd(struct scsi_cmnd *cmd) 4027 { 4028 struct scsi_qla_host *ha; 4029 struct Scsi_Host *host = CMD_HOST(cmd); 4030 struct srb *sp; 4031 /* struct scatterlist *sg; */ 4032 4033 int i; 4034 ha = (struct scsi_qla_host *)host->hostdata; 4035 4036 sp = (struct srb *)CMD_SP(cmd); 4037 printk("SCSI Command @= 0x%p, Handle=0x%p\n", cmd, CMD_HANDLE(cmd)); 4038 printk(" chan=%d, target = 0x%02x, lun = 0x%02x, cmd_len = 0x%02x\n", 4039 SCSI_BUS_32(cmd), SCSI_TCN_32(cmd), SCSI_LUN_32(cmd), 4040 CMD_CDBLEN(cmd)); 4041 printk(" CDB = "); 4042 for (i = 0; i < cmd->cmd_len; i++) { 4043 printk("0x%02x ", cmd->cmnd[i]); 4044 } 4045 printk(" seg_cnt =%d\n", scsi_sg_count(cmd)); 4046 printk(" request buffer=0x%p, request buffer len=0x%x\n", 4047 scsi_sglist(cmd), scsi_bufflen(cmd)); 4048 /* if (cmd->use_sg) 4049 { 4050 sg = (struct scatterlist *) cmd->request_buffer; 4051 printk(" SG buffer: \n"); 4052 qla1280_dump_buffer(1, (char *)sg, (cmd->use_sg*sizeof(struct scatterlist))); 4053 } */ 4054 printk(" tag=%d, transfersize=0x%x \n", 4055 cmd->tag, cmd->transfersize); 4056 printk(" SP=0x%p\n", CMD_SP(cmd)); 4057 printk(" underflow size = 0x%x, direction=0x%x\n", 4058 cmd->underflow, cmd->sc_data_direction); 4059 } 4060 4061 /************************************************************************** 4062 * ql1280_dump_device 4063 * 4064 **************************************************************************/ 4065 static void 4066 ql1280_dump_device(struct scsi_qla_host *ha) 4067 { 4068 4069 struct scsi_cmnd *cp; 4070 struct srb *sp; 4071 int i; 4072 4073 printk(KERN_DEBUG "Outstanding Commands on controller:\n"); 4074 4075 for (i = 0; i < MAX_OUTSTANDING_COMMANDS; i++) { 4076 if ((sp = ha->outstanding_cmds[i]) == NULL) 4077 continue; 4078 if ((cp = sp->cmd) == NULL) 4079 continue; 4080 qla1280_print_scsi_cmd(1, cp); 4081 } 4082 } 4083 #endif 4084 4085 4086 enum tokens { 4087 TOKEN_NVRAM, 4088 TOKEN_SYNC, 4089 TOKEN_WIDE, 4090 TOKEN_PPR, 4091 TOKEN_VERBOSE, 4092 TOKEN_DEBUG, 4093 }; 4094 4095 struct setup_tokens { 4096 char *token; 4097 int val; 4098 }; 4099 4100 static struct setup_tokens setup_token[] __initdata = 4101 { 4102 { "nvram", TOKEN_NVRAM }, 4103 { "sync", TOKEN_SYNC }, 4104 { "wide", TOKEN_WIDE }, 4105 { "ppr", TOKEN_PPR }, 4106 { "verbose", TOKEN_VERBOSE }, 4107 { "debug", TOKEN_DEBUG }, 4108 }; 4109 4110 4111 /************************************************************************** 4112 * qla1280_setup 4113 * 4114 * Handle boot parameters. This really needs to be changed so one 4115 * can specify per adapter parameters. 4116 **************************************************************************/ 4117 static int __init 4118 qla1280_setup(char *s) 4119 { 4120 char *cp, *ptr; 4121 unsigned long val; 4122 int toke; 4123 4124 cp = s; 4125 4126 while (cp && (ptr = strchr(cp, ':'))) { 4127 ptr++; 4128 if (!strcmp(ptr, "yes")) { 4129 val = 0x10000; 4130 ptr += 3; 4131 } else if (!strcmp(ptr, "no")) { 4132 val = 0; 4133 ptr += 2; 4134 } else 4135 val = simple_strtoul(ptr, &ptr, 0); 4136 4137 switch ((toke = qla1280_get_token(cp))) { 4138 case TOKEN_NVRAM: 4139 if (!val) 4140 driver_setup.no_nvram = 1; 4141 break; 4142 case TOKEN_SYNC: 4143 if (!val) 4144 driver_setup.no_sync = 1; 4145 else if (val != 0x10000) 4146 driver_setup.sync_mask = val; 4147 break; 4148 case TOKEN_WIDE: 4149 if (!val) 4150 driver_setup.no_wide = 1; 4151 else if (val != 0x10000) 4152 driver_setup.wide_mask = val; 4153 break; 4154 case TOKEN_PPR: 4155 if (!val) 4156 driver_setup.no_ppr = 1; 4157 else if (val != 0x10000) 4158 driver_setup.ppr_mask = val; 4159 break; 4160 case TOKEN_VERBOSE: 4161 qla1280_verbose = val; 4162 break; 4163 default: 4164 printk(KERN_INFO "qla1280: unknown boot option %s\n", 4165 cp); 4166 } 4167 4168 cp = strchr(ptr, ';'); 4169 if (cp) 4170 cp++; 4171 else { 4172 break; 4173 } 4174 } 4175 return 1; 4176 } 4177 4178 4179 static int __init 4180 qla1280_get_token(char *str) 4181 { 4182 char *sep; 4183 long ret = -1; 4184 int i; 4185 4186 sep = strchr(str, ':'); 4187 4188 if (sep) { 4189 for (i = 0; i < ARRAY_SIZE(setup_token); i++) { 4190 if (!strncmp(setup_token[i].token, str, (sep - str))) { 4191 ret = setup_token[i].val; 4192 break; 4193 } 4194 } 4195 } 4196 4197 return ret; 4198 } 4199 4200 4201 static struct scsi_host_template qla1280_driver_template = { 4202 .module = THIS_MODULE, 4203 .proc_name = "qla1280", 4204 .name = "Qlogic ISP 1280/12160", 4205 .info = qla1280_info, 4206 .slave_configure = qla1280_slave_configure, 4207 .queuecommand = qla1280_queuecommand, 4208 .eh_abort_handler = qla1280_eh_abort, 4209 .eh_device_reset_handler= qla1280_eh_device_reset, 4210 .eh_bus_reset_handler = qla1280_eh_bus_reset, 4211 .eh_host_reset_handler = qla1280_eh_adapter_reset, 4212 .bios_param = qla1280_biosparam, 4213 .can_queue = MAX_OUTSTANDING_COMMANDS, 4214 .this_id = -1, 4215 .sg_tablesize = SG_ALL, 4216 .use_clustering = ENABLE_CLUSTERING, 4217 }; 4218 4219 4220 static int 4221 qla1280_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) 4222 { 4223 int devnum = id->driver_data; 4224 struct qla_boards *bdp = &ql1280_board_tbl[devnum]; 4225 struct Scsi_Host *host; 4226 struct scsi_qla_host *ha; 4227 int error = -ENODEV; 4228 4229 /* Bypass all AMI SUBSYS VENDOR IDs */ 4230 if (pdev->subsystem_vendor == PCI_VENDOR_ID_AMI) { 4231 printk(KERN_INFO 4232 "qla1280: Skipping AMI SubSys Vendor ID Chip\n"); 4233 goto error; 4234 } 4235 4236 printk(KERN_INFO "qla1280: %s found on PCI bus %i, dev %i\n", 4237 bdp->name, pdev->bus->number, PCI_SLOT(pdev->devfn)); 4238 4239 if (pci_enable_device(pdev)) { 4240 printk(KERN_WARNING 4241 "qla1280: Failed to enabled pci device, aborting.\n"); 4242 goto error; 4243 } 4244 4245 pci_set_master(pdev); 4246 4247 error = -ENOMEM; 4248 host = scsi_host_alloc(&qla1280_driver_template, sizeof(*ha)); 4249 if (!host) { 4250 printk(KERN_WARNING 4251 "qla1280: Failed to register host, aborting.\n"); 4252 goto error_disable_device; 4253 } 4254 4255 ha = (struct scsi_qla_host *)host->hostdata; 4256 memset(ha, 0, sizeof(struct scsi_qla_host)); 4257 4258 ha->pdev = pdev; 4259 ha->devnum = devnum; /* specifies microcode load address */ 4260 4261 #ifdef QLA_64BIT_PTR 4262 if (dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(64))) { 4263 if (dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(32))) { 4264 printk(KERN_WARNING "scsi(%li): Unable to set a " 4265 "suitable DMA mask - aborting\n", ha->host_no); 4266 error = -ENODEV; 4267 goto error_put_host; 4268 } 4269 } else 4270 dprintk(2, "scsi(%li): 64 Bit PCI Addressing Enabled\n", 4271 ha->host_no); 4272 #else 4273 if (dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(32))) { 4274 printk(KERN_WARNING "scsi(%li): Unable to set a " 4275 "suitable DMA mask - aborting\n", ha->host_no); 4276 error = -ENODEV; 4277 goto error_put_host; 4278 } 4279 #endif 4280 4281 ha->request_ring = dma_alloc_coherent(&ha->pdev->dev, 4282 ((REQUEST_ENTRY_CNT + 1) * sizeof(request_t)), 4283 &ha->request_dma, GFP_KERNEL); 4284 if (!ha->request_ring) { 4285 printk(KERN_INFO "qla1280: Failed to get request memory\n"); 4286 goto error_put_host; 4287 } 4288 4289 ha->response_ring = dma_alloc_coherent(&ha->pdev->dev, 4290 ((RESPONSE_ENTRY_CNT + 1) * sizeof(struct response)), 4291 &ha->response_dma, GFP_KERNEL); 4292 if (!ha->response_ring) { 4293 printk(KERN_INFO "qla1280: Failed to get response memory\n"); 4294 goto error_free_request_ring; 4295 } 4296 4297 ha->ports = bdp->numPorts; 4298 4299 ha->host = host; 4300 ha->host_no = host->host_no; 4301 4302 host->irq = pdev->irq; 4303 host->max_channel = bdp->numPorts - 1; 4304 host->max_lun = MAX_LUNS - 1; 4305 host->max_id = MAX_TARGETS; 4306 host->max_sectors = 1024; 4307 host->unique_id = host->host_no; 4308 4309 error = -ENODEV; 4310 4311 #if MEMORY_MAPPED_IO 4312 ha->mmpbase = pci_ioremap_bar(ha->pdev, 1); 4313 if (!ha->mmpbase) { 4314 printk(KERN_INFO "qla1280: Unable to map I/O memory\n"); 4315 goto error_free_response_ring; 4316 } 4317 4318 host->base = (unsigned long)ha->mmpbase; 4319 ha->iobase = (struct device_reg __iomem *)ha->mmpbase; 4320 #else 4321 host->io_port = pci_resource_start(ha->pdev, 0); 4322 if (!request_region(host->io_port, 0xff, "qla1280")) { 4323 printk(KERN_INFO "qla1280: Failed to reserve i/o region " 4324 "0x%04lx-0x%04lx - already in use\n", 4325 host->io_port, host->io_port + 0xff); 4326 goto error_free_response_ring; 4327 } 4328 4329 ha->iobase = (struct device_reg *)host->io_port; 4330 #endif 4331 4332 INIT_LIST_HEAD(&ha->done_q); 4333 4334 /* Disable ISP interrupts. */ 4335 qla1280_disable_intrs(ha); 4336 4337 if (request_irq(pdev->irq, qla1280_intr_handler, IRQF_SHARED, 4338 "qla1280", ha)) { 4339 printk("qla1280 : Failed to reserve interrupt %d already " 4340 "in use\n", pdev->irq); 4341 goto error_release_region; 4342 } 4343 4344 /* load the F/W, read paramaters, and init the H/W */ 4345 if (qla1280_initialize_adapter(ha)) { 4346 printk(KERN_INFO "qla1x160: Failed to initialize adapter\n"); 4347 goto error_free_irq; 4348 } 4349 4350 /* set our host ID (need to do something about our two IDs) */ 4351 host->this_id = ha->bus_settings[0].id; 4352 4353 pci_set_drvdata(pdev, host); 4354 4355 error = scsi_add_host(host, &pdev->dev); 4356 if (error) 4357 goto error_disable_adapter; 4358 scsi_scan_host(host); 4359 4360 return 0; 4361 4362 error_disable_adapter: 4363 qla1280_disable_intrs(ha); 4364 error_free_irq: 4365 free_irq(pdev->irq, ha); 4366 error_release_region: 4367 #if MEMORY_MAPPED_IO 4368 iounmap(ha->mmpbase); 4369 #else 4370 release_region(host->io_port, 0xff); 4371 #endif 4372 error_free_response_ring: 4373 dma_free_coherent(&ha->pdev->dev, 4374 ((RESPONSE_ENTRY_CNT + 1) * sizeof(struct response)), 4375 ha->response_ring, ha->response_dma); 4376 error_free_request_ring: 4377 dma_free_coherent(&ha->pdev->dev, 4378 ((REQUEST_ENTRY_CNT + 1) * sizeof(request_t)), 4379 ha->request_ring, ha->request_dma); 4380 error_put_host: 4381 scsi_host_put(host); 4382 error_disable_device: 4383 pci_disable_device(pdev); 4384 error: 4385 return error; 4386 } 4387 4388 4389 static void 4390 qla1280_remove_one(struct pci_dev *pdev) 4391 { 4392 struct Scsi_Host *host = pci_get_drvdata(pdev); 4393 struct scsi_qla_host *ha = (struct scsi_qla_host *)host->hostdata; 4394 4395 scsi_remove_host(host); 4396 4397 qla1280_disable_intrs(ha); 4398 4399 free_irq(pdev->irq, ha); 4400 4401 #if MEMORY_MAPPED_IO 4402 iounmap(ha->mmpbase); 4403 #else 4404 release_region(host->io_port, 0xff); 4405 #endif 4406 4407 dma_free_coherent(&ha->pdev->dev, 4408 ((REQUEST_ENTRY_CNT + 1) * (sizeof(request_t))), 4409 ha->request_ring, ha->request_dma); 4410 dma_free_coherent(&ha->pdev->dev, 4411 ((RESPONSE_ENTRY_CNT + 1) * (sizeof(struct response))), 4412 ha->response_ring, ha->response_dma); 4413 4414 pci_disable_device(pdev); 4415 4416 scsi_host_put(host); 4417 } 4418 4419 static struct pci_driver qla1280_pci_driver = { 4420 .name = "qla1280", 4421 .id_table = qla1280_pci_tbl, 4422 .probe = qla1280_probe_one, 4423 .remove = qla1280_remove_one, 4424 }; 4425 4426 static int __init 4427 qla1280_init(void) 4428 { 4429 if (sizeof(struct srb) > sizeof(struct scsi_pointer)) { 4430 printk(KERN_WARNING 4431 "qla1280: struct srb too big, aborting\n"); 4432 return -EINVAL; 4433 } 4434 4435 #ifdef MODULE 4436 /* 4437 * If we are called as a module, the qla1280 pointer may not be null 4438 * and it would point to our bootup string, just like on the lilo 4439 * command line. IF not NULL, then process this config string with 4440 * qla1280_setup 4441 * 4442 * Boot time Options 4443 * To add options at boot time add a line to your lilo.conf file like: 4444 * append="qla1280=verbose,max_tags:{{255,255,255,255},{255,255,255,255}}" 4445 * which will result in the first four devices on the first two 4446 * controllers being set to a tagged queue depth of 32. 4447 */ 4448 if (qla1280) 4449 qla1280_setup(qla1280); 4450 #endif 4451 4452 return pci_register_driver(&qla1280_pci_driver); 4453 } 4454 4455 static void __exit 4456 qla1280_exit(void) 4457 { 4458 int i; 4459 4460 pci_unregister_driver(&qla1280_pci_driver); 4461 /* release any allocated firmware images */ 4462 for (i = 0; i < QL_NUM_FW_IMAGES; i++) { 4463 release_firmware(qla1280_fw_tbl[i].fw); 4464 qla1280_fw_tbl[i].fw = NULL; 4465 } 4466 } 4467 4468 module_init(qla1280_init); 4469 module_exit(qla1280_exit); 4470 4471 MODULE_AUTHOR("Qlogic & Jes Sorensen"); 4472 MODULE_DESCRIPTION("Qlogic ISP SCSI (qla1x80/qla1x160) driver"); 4473 MODULE_LICENSE("GPL"); 4474 MODULE_FIRMWARE("qlogic/1040.bin"); 4475 MODULE_FIRMWARE("qlogic/1280.bin"); 4476 MODULE_FIRMWARE("qlogic/12160.bin"); 4477 MODULE_VERSION(QLA1280_VERSION); 4478 4479 /* 4480 * Overrides for Emacs so that we almost follow Linus's tabbing style. 4481 * Emacs will notice this stuff at the end of the file and automatically 4482 * adjust the settings for this buffer only. This must remain at the end 4483 * of the file. 4484 * --------------------------------------------------------------------------- 4485 * Local variables: 4486 * c-basic-offset: 8 4487 * tab-width: 8 4488 * End: 4489 */ 4490