1 /* 2 3w-9xxx.c -- 3ware 9000 Storage Controller device driver for Linux. 3 4 Written By: Adam Radford <aradford@gmail.com> 5 Modifications By: Tom Couch 6 7 Copyright (C) 2004-2009 Applied Micro Circuits Corporation. 8 Copyright (C) 2010 LSI Corporation. 9 10 This program is free software; you can redistribute it and/or modify 11 it under the terms of the GNU General Public License as published by 12 the Free Software Foundation; version 2 of the License. 13 14 This program is distributed in the hope that it will be useful, 15 but WITHOUT ANY WARRANTY; without even the implied warranty of 16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 GNU General Public License for more details. 18 19 NO WARRANTY 20 THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR 21 CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT 22 LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, 23 MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is 24 solely responsible for determining the appropriateness of using and 25 distributing the Program and assumes all risks associated with its 26 exercise of rights under this Agreement, including but not limited to 27 the risks and costs of program errors, damage to or loss of data, 28 programs or equipment, and unavailability or interruption of operations. 29 30 DISCLAIMER OF LIABILITY 31 NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY 32 DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND 34 ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR 35 TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE 36 USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED 37 HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES 38 39 You should have received a copy of the GNU General Public License 40 along with this program; if not, write to the Free Software 41 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 42 43 Bugs/Comments/Suggestions should be mailed to: 44 aradford@gmail.com 45 46 Note: This version of the driver does not contain a bundled firmware 47 image. 48 49 History 50 ------- 51 2.26.02.000 - Driver cleanup for kernel submission. 52 2.26.02.001 - Replace schedule_timeout() calls with msleep(). 53 2.26.02.002 - Add support for PAE mode. 54 Add lun support. 55 Fix twa_remove() to free irq handler/unregister_chrdev() 56 before shutting down card. 57 Change to new 'change_queue_depth' api. 58 Fix 'handled=1' ISR usage, remove bogus IRQ check. 59 Remove un-needed eh_abort handler. 60 Add support for embedded firmware error strings. 61 2.26.02.003 - Correctly handle single sgl's with use_sg=1. 62 2.26.02.004 - Add support for 9550SX controllers. 63 2.26.02.005 - Fix use_sg == 0 mapping on systems with 4GB or higher. 64 2.26.02.006 - Fix 9550SX pchip reset timeout. 65 Add big endian support. 66 2.26.02.007 - Disable local interrupts during kmap/unmap_atomic(). 67 2.26.02.008 - Free irq handler in __twa_shutdown(). 68 Serialize reset code. 69 Add support for 9650SE controllers. 70 2.26.02.009 - Fix dma mask setting to fallback to 32-bit if 64-bit fails. 71 2.26.02.010 - Add support for 9690SA controllers. 72 2.26.02.011 - Increase max AENs drained to 256. 73 Add MSI support and "use_msi" module parameter. 74 Fix bug in twa_get_param() on 4GB+. 75 Use pci_resource_len() for ioremap(). 76 2.26.02.012 - Add power management support. 77 2.26.02.013 - Fix bug in twa_load_sgl(). 78 2.26.02.014 - Force 60 second timeout default. 79 */ 80 81 #include <linux/module.h> 82 #include <linux/reboot.h> 83 #include <linux/spinlock.h> 84 #include <linux/interrupt.h> 85 #include <linux/moduleparam.h> 86 #include <linux/errno.h> 87 #include <linux/types.h> 88 #include <linux/delay.h> 89 #include <linux/pci.h> 90 #include <linux/time.h> 91 #include <linux/mutex.h> 92 #include <linux/slab.h> 93 #include <asm/io.h> 94 #include <asm/irq.h> 95 #include <linux/uaccess.h> 96 #include <scsi/scsi.h> 97 #include <scsi/scsi_host.h> 98 #include <scsi/scsi_tcq.h> 99 #include <scsi/scsi_cmnd.h> 100 #include "3w-9xxx.h" 101 102 /* Globals */ 103 #define TW_DRIVER_VERSION "2.26.02.014" 104 static DEFINE_MUTEX(twa_chrdev_mutex); 105 static TW_Device_Extension *twa_device_extension_list[TW_MAX_SLOT]; 106 static unsigned int twa_device_extension_count; 107 static int twa_major = -1; 108 extern struct timezone sys_tz; 109 110 /* Module parameters */ 111 MODULE_AUTHOR ("LSI"); 112 MODULE_DESCRIPTION ("3ware 9000 Storage Controller Linux Driver"); 113 MODULE_LICENSE("GPL"); 114 MODULE_VERSION(TW_DRIVER_VERSION); 115 116 static int use_msi = 0; 117 module_param(use_msi, int, S_IRUGO); 118 MODULE_PARM_DESC(use_msi, "Use Message Signaled Interrupts. Default: 0"); 119 120 /* Function prototypes */ 121 static void twa_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_Header *header); 122 static int twa_aen_read_queue(TW_Device_Extension *tw_dev, int request_id); 123 static char *twa_aen_severity_lookup(unsigned char severity_code); 124 static void twa_aen_sync_time(TW_Device_Extension *tw_dev, int request_id); 125 static long twa_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg); 126 static int twa_chrdev_open(struct inode *inode, struct file *file); 127 static int twa_fill_sense(TW_Device_Extension *tw_dev, int request_id, int copy_sense, int print_host); 128 static void twa_free_request_id(TW_Device_Extension *tw_dev,int request_id); 129 static void twa_get_request_id(TW_Device_Extension *tw_dev, int *request_id); 130 static int twa_initconnection(TW_Device_Extension *tw_dev, int message_credits, 131 u32 set_features, unsigned short current_fw_srl, 132 unsigned short current_fw_arch_id, 133 unsigned short current_fw_branch, 134 unsigned short current_fw_build, 135 unsigned short *fw_on_ctlr_srl, 136 unsigned short *fw_on_ctlr_arch_id, 137 unsigned short *fw_on_ctlr_branch, 138 unsigned short *fw_on_ctlr_build, 139 u32 *init_connect_result); 140 static void twa_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_command_packet, int request_id, dma_addr_t dma_handle, int length); 141 static int twa_poll_response(TW_Device_Extension *tw_dev, int request_id, int seconds); 142 static int twa_poll_status_gone(TW_Device_Extension *tw_dev, u32 flag, int seconds); 143 static int twa_post_command_packet(TW_Device_Extension *tw_dev, int request_id, char internal); 144 static int twa_reset_device_extension(TW_Device_Extension *tw_dev); 145 static int twa_reset_sequence(TW_Device_Extension *tw_dev, int soft_reset); 146 static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry *sglistarg); 147 static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int request_id); 148 static char *twa_string_lookup(twa_message_type *table, unsigned int aen_code); 149 150 /* Functions */ 151 152 /* Show some statistics about the card */ 153 static ssize_t twa_show_stats(struct device *dev, 154 struct device_attribute *attr, char *buf) 155 { 156 struct Scsi_Host *host = class_to_shost(dev); 157 TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata; 158 unsigned long flags = 0; 159 ssize_t len; 160 161 spin_lock_irqsave(tw_dev->host->host_lock, flags); 162 len = snprintf(buf, PAGE_SIZE, "3w-9xxx Driver version: %s\n" 163 "Current commands posted: %4d\n" 164 "Max commands posted: %4d\n" 165 "Current pending commands: %4d\n" 166 "Max pending commands: %4d\n" 167 "Last sgl length: %4d\n" 168 "Max sgl length: %4d\n" 169 "Last sector count: %4d\n" 170 "Max sector count: %4d\n" 171 "SCSI Host Resets: %4d\n" 172 "AEN's: %4d\n", 173 TW_DRIVER_VERSION, 174 tw_dev->posted_request_count, 175 tw_dev->max_posted_request_count, 176 tw_dev->pending_request_count, 177 tw_dev->max_pending_request_count, 178 tw_dev->sgl_entries, 179 tw_dev->max_sgl_entries, 180 tw_dev->sector_count, 181 tw_dev->max_sector_count, 182 tw_dev->num_resets, 183 tw_dev->aen_count); 184 spin_unlock_irqrestore(tw_dev->host->host_lock, flags); 185 return len; 186 } /* End twa_show_stats() */ 187 188 /* Create sysfs 'stats' entry */ 189 static struct device_attribute twa_host_stats_attr = { 190 .attr = { 191 .name = "stats", 192 .mode = S_IRUGO, 193 }, 194 .show = twa_show_stats 195 }; 196 197 /* Host attributes initializer */ 198 static struct device_attribute *twa_host_attrs[] = { 199 &twa_host_stats_attr, 200 NULL, 201 }; 202 203 /* File operations struct for character device */ 204 static const struct file_operations twa_fops = { 205 .owner = THIS_MODULE, 206 .unlocked_ioctl = twa_chrdev_ioctl, 207 .open = twa_chrdev_open, 208 .release = NULL, 209 .llseek = noop_llseek, 210 }; 211 212 /* 213 * The controllers use an inline buffer instead of a mapped SGL for small, 214 * single entry buffers. Note that we treat a zero-length transfer like 215 * a mapped SGL. 216 */ 217 static bool twa_command_mapped(struct scsi_cmnd *cmd) 218 { 219 return scsi_sg_count(cmd) != 1 || 220 scsi_bufflen(cmd) >= TW_MIN_SGL_LENGTH; 221 } 222 223 /* This function will complete an aen request from the isr */ 224 static int twa_aen_complete(TW_Device_Extension *tw_dev, int request_id) 225 { 226 TW_Command_Full *full_command_packet; 227 TW_Command *command_packet; 228 TW_Command_Apache_Header *header; 229 unsigned short aen; 230 int retval = 1; 231 232 header = (TW_Command_Apache_Header *)tw_dev->generic_buffer_virt[request_id]; 233 tw_dev->posted_request_count--; 234 aen = le16_to_cpu(header->status_block.error); 235 full_command_packet = tw_dev->command_packet_virt[request_id]; 236 command_packet = &full_command_packet->command.oldcommand; 237 238 /* First check for internal completion of set param for time sync */ 239 if (TW_OP_OUT(command_packet->opcode__sgloffset) == TW_OP_SET_PARAM) { 240 /* Keep reading the queue in case there are more aen's */ 241 if (twa_aen_read_queue(tw_dev, request_id)) 242 goto out2; 243 else { 244 retval = 0; 245 goto out; 246 } 247 } 248 249 switch (aen) { 250 case TW_AEN_QUEUE_EMPTY: 251 /* Quit reading the queue if this is the last one */ 252 break; 253 case TW_AEN_SYNC_TIME_WITH_HOST: 254 twa_aen_sync_time(tw_dev, request_id); 255 retval = 0; 256 goto out; 257 default: 258 twa_aen_queue_event(tw_dev, header); 259 260 /* If there are more aen's, keep reading the queue */ 261 if (twa_aen_read_queue(tw_dev, request_id)) 262 goto out2; 263 else { 264 retval = 0; 265 goto out; 266 } 267 } 268 retval = 0; 269 out2: 270 tw_dev->state[request_id] = TW_S_COMPLETED; 271 twa_free_request_id(tw_dev, request_id); 272 clear_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags); 273 out: 274 return retval; 275 } /* End twa_aen_complete() */ 276 277 /* This function will drain aen queue */ 278 static int twa_aen_drain_queue(TW_Device_Extension *tw_dev, int no_check_reset) 279 { 280 int request_id = 0; 281 char cdb[TW_MAX_CDB_LEN]; 282 TW_SG_Entry sglist[1]; 283 int finished = 0, count = 0; 284 TW_Command_Full *full_command_packet; 285 TW_Command_Apache_Header *header; 286 unsigned short aen; 287 int first_reset = 0, queue = 0, retval = 1; 288 289 if (no_check_reset) 290 first_reset = 0; 291 else 292 first_reset = 1; 293 294 full_command_packet = tw_dev->command_packet_virt[request_id]; 295 memset(full_command_packet, 0, sizeof(TW_Command_Full)); 296 297 /* Initialize cdb */ 298 memset(&cdb, 0, TW_MAX_CDB_LEN); 299 cdb[0] = REQUEST_SENSE; /* opcode */ 300 cdb[4] = TW_ALLOCATION_LENGTH; /* allocation length */ 301 302 /* Initialize sglist */ 303 memset(&sglist, 0, sizeof(TW_SG_Entry)); 304 sglist[0].length = TW_SECTOR_SIZE; 305 sglist[0].address = tw_dev->generic_buffer_phys[request_id]; 306 307 if (sglist[0].address & TW_ALIGNMENT_9000_SGL) { 308 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1, "Found unaligned address during AEN drain"); 309 goto out; 310 } 311 312 /* Mark internal command */ 313 tw_dev->srb[request_id] = NULL; 314 315 do { 316 /* Send command to the board */ 317 if (twa_scsiop_execute_scsi(tw_dev, request_id, cdb, 1, sglist)) { 318 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2, "Error posting request sense"); 319 goto out; 320 } 321 322 /* Now poll for completion */ 323 if (twa_poll_response(tw_dev, request_id, 30)) { 324 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x3, "No valid response while draining AEN queue"); 325 tw_dev->posted_request_count--; 326 goto out; 327 } 328 329 tw_dev->posted_request_count--; 330 header = (TW_Command_Apache_Header *)tw_dev->generic_buffer_virt[request_id]; 331 aen = le16_to_cpu(header->status_block.error); 332 queue = 0; 333 count++; 334 335 switch (aen) { 336 case TW_AEN_QUEUE_EMPTY: 337 if (first_reset != 1) 338 goto out; 339 else 340 finished = 1; 341 break; 342 case TW_AEN_SOFT_RESET: 343 if (first_reset == 0) 344 first_reset = 1; 345 else 346 queue = 1; 347 break; 348 case TW_AEN_SYNC_TIME_WITH_HOST: 349 break; 350 default: 351 queue = 1; 352 } 353 354 /* Now queue an event info */ 355 if (queue) 356 twa_aen_queue_event(tw_dev, header); 357 } while ((finished == 0) && (count < TW_MAX_AEN_DRAIN)); 358 359 if (count == TW_MAX_AEN_DRAIN) 360 goto out; 361 362 retval = 0; 363 out: 364 tw_dev->state[request_id] = TW_S_INITIAL; 365 return retval; 366 } /* End twa_aen_drain_queue() */ 367 368 /* This function will queue an event */ 369 static void twa_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_Header *header) 370 { 371 u32 local_time; 372 TW_Event *event; 373 unsigned short aen; 374 char host[16]; 375 char *error_str; 376 377 tw_dev->aen_count++; 378 379 /* Fill out event info */ 380 event = tw_dev->event_queue[tw_dev->error_index]; 381 382 /* Check for clobber */ 383 host[0] = '\0'; 384 if (tw_dev->host) { 385 sprintf(host, " scsi%d:", tw_dev->host->host_no); 386 if (event->retrieved == TW_AEN_NOT_RETRIEVED) 387 tw_dev->aen_clobber = 1; 388 } 389 390 aen = le16_to_cpu(header->status_block.error); 391 memset(event, 0, sizeof(TW_Event)); 392 393 event->severity = TW_SEV_OUT(header->status_block.severity__reserved); 394 /* event->time_stamp_sec overflows in y2106 */ 395 local_time = (u32)(ktime_get_real_seconds() - (sys_tz.tz_minuteswest * 60)); 396 event->time_stamp_sec = local_time; 397 event->aen_code = aen; 398 event->retrieved = TW_AEN_NOT_RETRIEVED; 399 event->sequence_id = tw_dev->error_sequence_id; 400 tw_dev->error_sequence_id++; 401 402 /* Check for embedded error string */ 403 error_str = &(header->err_specific_desc[strlen(header->err_specific_desc)+1]); 404 405 header->err_specific_desc[sizeof(header->err_specific_desc) - 1] = '\0'; 406 event->parameter_len = strlen(header->err_specific_desc); 407 memcpy(event->parameter_data, header->err_specific_desc, event->parameter_len + (error_str[0] == '\0' ? 0 : (1 + strlen(error_str)))); 408 if (event->severity != TW_AEN_SEVERITY_DEBUG) 409 printk(KERN_WARNING "3w-9xxx:%s AEN: %s (0x%02X:0x%04X): %s:%s.\n", 410 host, 411 twa_aen_severity_lookup(TW_SEV_OUT(header->status_block.severity__reserved)), 412 TW_MESSAGE_SOURCE_CONTROLLER_EVENT, aen, 413 error_str[0] == '\0' ? twa_string_lookup(twa_aen_table, aen) : error_str, 414 header->err_specific_desc); 415 else 416 tw_dev->aen_count--; 417 418 if ((tw_dev->error_index + 1) == TW_Q_LENGTH) 419 tw_dev->event_queue_wrapped = 1; 420 tw_dev->error_index = (tw_dev->error_index + 1 ) % TW_Q_LENGTH; 421 } /* End twa_aen_queue_event() */ 422 423 /* This function will read the aen queue from the isr */ 424 static int twa_aen_read_queue(TW_Device_Extension *tw_dev, int request_id) 425 { 426 char cdb[TW_MAX_CDB_LEN]; 427 TW_SG_Entry sglist[1]; 428 TW_Command_Full *full_command_packet; 429 int retval = 1; 430 431 full_command_packet = tw_dev->command_packet_virt[request_id]; 432 memset(full_command_packet, 0, sizeof(TW_Command_Full)); 433 434 /* Initialize cdb */ 435 memset(&cdb, 0, TW_MAX_CDB_LEN); 436 cdb[0] = REQUEST_SENSE; /* opcode */ 437 cdb[4] = TW_ALLOCATION_LENGTH; /* allocation length */ 438 439 /* Initialize sglist */ 440 memset(&sglist, 0, sizeof(TW_SG_Entry)); 441 sglist[0].length = TW_SECTOR_SIZE; 442 sglist[0].address = tw_dev->generic_buffer_phys[request_id]; 443 444 /* Mark internal command */ 445 tw_dev->srb[request_id] = NULL; 446 447 /* Now post the command packet */ 448 if (twa_scsiop_execute_scsi(tw_dev, request_id, cdb, 1, sglist)) { 449 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x4, "Post failed while reading AEN queue"); 450 goto out; 451 } 452 retval = 0; 453 out: 454 return retval; 455 } /* End twa_aen_read_queue() */ 456 457 /* This function will look up an AEN severity string */ 458 static char *twa_aen_severity_lookup(unsigned char severity_code) 459 { 460 char *retval = NULL; 461 462 if ((severity_code < (unsigned char) TW_AEN_SEVERITY_ERROR) || 463 (severity_code > (unsigned char) TW_AEN_SEVERITY_DEBUG)) 464 goto out; 465 466 retval = twa_aen_severity_table[severity_code]; 467 out: 468 return retval; 469 } /* End twa_aen_severity_lookup() */ 470 471 /* This function will sync firmware time with the host time */ 472 static void twa_aen_sync_time(TW_Device_Extension *tw_dev, int request_id) 473 { 474 u32 schedulertime; 475 TW_Command_Full *full_command_packet; 476 TW_Command *command_packet; 477 TW_Param_Apache *param; 478 time64_t local_time; 479 480 /* Fill out the command packet */ 481 full_command_packet = tw_dev->command_packet_virt[request_id]; 482 memset(full_command_packet, 0, sizeof(TW_Command_Full)); 483 command_packet = &full_command_packet->command.oldcommand; 484 command_packet->opcode__sgloffset = TW_OPSGL_IN(2, TW_OP_SET_PARAM); 485 command_packet->request_id = request_id; 486 command_packet->byte8_offset.param.sgl[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]); 487 command_packet->byte8_offset.param.sgl[0].length = cpu_to_le32(TW_SECTOR_SIZE); 488 command_packet->size = TW_COMMAND_SIZE; 489 command_packet->byte6_offset.parameter_count = cpu_to_le16(1); 490 491 /* Setup the param */ 492 param = (TW_Param_Apache *)tw_dev->generic_buffer_virt[request_id]; 493 memset(param, 0, TW_SECTOR_SIZE); 494 param->table_id = cpu_to_le16(TW_TIMEKEEP_TABLE | 0x8000); /* Controller time keep table */ 495 param->parameter_id = cpu_to_le16(0x3); /* SchedulerTime */ 496 param->parameter_size_bytes = cpu_to_le16(4); 497 498 /* Convert system time in UTC to local time seconds since last 499 Sunday 12:00AM */ 500 local_time = (ktime_get_real_seconds() - (sys_tz.tz_minuteswest * 60)); 501 div_u64_rem(local_time - (3 * 86400), 604800, &schedulertime); 502 schedulertime = cpu_to_le32(schedulertime % 604800); 503 504 memcpy(param->data, &schedulertime, sizeof(u32)); 505 506 /* Mark internal command */ 507 tw_dev->srb[request_id] = NULL; 508 509 /* Now post the command */ 510 twa_post_command_packet(tw_dev, request_id, 1); 511 } /* End twa_aen_sync_time() */ 512 513 /* This function will allocate memory and check if it is correctly aligned */ 514 static int twa_allocate_memory(TW_Device_Extension *tw_dev, int size, int which) 515 { 516 int i; 517 dma_addr_t dma_handle; 518 unsigned long *cpu_addr; 519 int retval = 1; 520 521 cpu_addr = dma_alloc_coherent(&tw_dev->tw_pci_dev->dev, 522 size * TW_Q_LENGTH, &dma_handle, GFP_KERNEL); 523 if (!cpu_addr) { 524 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x5, "Memory allocation failed"); 525 goto out; 526 } 527 528 if ((unsigned long)cpu_addr % (TW_ALIGNMENT_9000)) { 529 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x6, "Failed to allocate correctly aligned memory"); 530 dma_free_coherent(&tw_dev->tw_pci_dev->dev, size * TW_Q_LENGTH, 531 cpu_addr, dma_handle); 532 goto out; 533 } 534 535 memset(cpu_addr, 0, size*TW_Q_LENGTH); 536 537 for (i = 0; i < TW_Q_LENGTH; i++) { 538 switch(which) { 539 case 0: 540 tw_dev->command_packet_phys[i] = dma_handle+(i*size); 541 tw_dev->command_packet_virt[i] = (TW_Command_Full *)((unsigned char *)cpu_addr + (i*size)); 542 break; 543 case 1: 544 tw_dev->generic_buffer_phys[i] = dma_handle+(i*size); 545 tw_dev->generic_buffer_virt[i] = (unsigned long *)((unsigned char *)cpu_addr + (i*size)); 546 break; 547 } 548 } 549 retval = 0; 550 out: 551 return retval; 552 } /* End twa_allocate_memory() */ 553 554 /* This function will check the status register for unexpected bits */ 555 static int twa_check_bits(u32 status_reg_value) 556 { 557 int retval = 1; 558 559 if ((status_reg_value & TW_STATUS_EXPECTED_BITS) != TW_STATUS_EXPECTED_BITS) 560 goto out; 561 if ((status_reg_value & TW_STATUS_UNEXPECTED_BITS) != 0) 562 goto out; 563 564 retval = 0; 565 out: 566 return retval; 567 } /* End twa_check_bits() */ 568 569 /* This function will check the srl and decide if we are compatible */ 570 static int twa_check_srl(TW_Device_Extension *tw_dev, int *flashed) 571 { 572 int retval = 1; 573 unsigned short fw_on_ctlr_srl = 0, fw_on_ctlr_arch_id = 0; 574 unsigned short fw_on_ctlr_branch = 0, fw_on_ctlr_build = 0; 575 u32 init_connect_result = 0; 576 577 if (twa_initconnection(tw_dev, TW_INIT_MESSAGE_CREDITS, 578 TW_EXTENDED_INIT_CONNECT, TW_CURRENT_DRIVER_SRL, 579 TW_9000_ARCH_ID, TW_CURRENT_DRIVER_BRANCH, 580 TW_CURRENT_DRIVER_BUILD, &fw_on_ctlr_srl, 581 &fw_on_ctlr_arch_id, &fw_on_ctlr_branch, 582 &fw_on_ctlr_build, &init_connect_result)) { 583 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x7, "Initconnection failed while checking SRL"); 584 goto out; 585 } 586 587 tw_dev->tw_compat_info.working_srl = fw_on_ctlr_srl; 588 tw_dev->tw_compat_info.working_branch = fw_on_ctlr_branch; 589 tw_dev->tw_compat_info.working_build = fw_on_ctlr_build; 590 591 /* Try base mode compatibility */ 592 if (!(init_connect_result & TW_CTLR_FW_COMPATIBLE)) { 593 if (twa_initconnection(tw_dev, TW_INIT_MESSAGE_CREDITS, 594 TW_EXTENDED_INIT_CONNECT, 595 TW_BASE_FW_SRL, TW_9000_ARCH_ID, 596 TW_BASE_FW_BRANCH, TW_BASE_FW_BUILD, 597 &fw_on_ctlr_srl, &fw_on_ctlr_arch_id, 598 &fw_on_ctlr_branch, &fw_on_ctlr_build, 599 &init_connect_result)) { 600 TW_PRINTK(tw_dev->host, TW_DRIVER, 0xa, "Initconnection (base mode) failed while checking SRL"); 601 goto out; 602 } 603 if (!(init_connect_result & TW_CTLR_FW_COMPATIBLE)) { 604 if (TW_CURRENT_DRIVER_SRL > fw_on_ctlr_srl) { 605 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x32, "Firmware and driver incompatibility: please upgrade firmware"); 606 } else { 607 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x33, "Firmware and driver incompatibility: please upgrade driver"); 608 } 609 goto out; 610 } 611 tw_dev->tw_compat_info.working_srl = TW_BASE_FW_SRL; 612 tw_dev->tw_compat_info.working_branch = TW_BASE_FW_BRANCH; 613 tw_dev->tw_compat_info.working_build = TW_BASE_FW_BUILD; 614 } 615 616 /* Load rest of compatibility struct */ 617 strlcpy(tw_dev->tw_compat_info.driver_version, TW_DRIVER_VERSION, 618 sizeof(tw_dev->tw_compat_info.driver_version)); 619 tw_dev->tw_compat_info.driver_srl_high = TW_CURRENT_DRIVER_SRL; 620 tw_dev->tw_compat_info.driver_branch_high = TW_CURRENT_DRIVER_BRANCH; 621 tw_dev->tw_compat_info.driver_build_high = TW_CURRENT_DRIVER_BUILD; 622 tw_dev->tw_compat_info.driver_srl_low = TW_BASE_FW_SRL; 623 tw_dev->tw_compat_info.driver_branch_low = TW_BASE_FW_BRANCH; 624 tw_dev->tw_compat_info.driver_build_low = TW_BASE_FW_BUILD; 625 tw_dev->tw_compat_info.fw_on_ctlr_srl = fw_on_ctlr_srl; 626 tw_dev->tw_compat_info.fw_on_ctlr_branch = fw_on_ctlr_branch; 627 tw_dev->tw_compat_info.fw_on_ctlr_build = fw_on_ctlr_build; 628 629 retval = 0; 630 out: 631 return retval; 632 } /* End twa_check_srl() */ 633 634 /* This function handles ioctl for the character device */ 635 static long twa_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 636 { 637 struct inode *inode = file_inode(file); 638 long timeout; 639 unsigned long *cpu_addr, data_buffer_length_adjusted = 0, flags = 0; 640 dma_addr_t dma_handle; 641 int request_id = 0; 642 unsigned int sequence_id = 0; 643 unsigned char event_index, start_index; 644 TW_Ioctl_Driver_Command driver_command; 645 TW_Ioctl_Buf_Apache *tw_ioctl; 646 TW_Lock *tw_lock; 647 TW_Command_Full *full_command_packet; 648 TW_Compatibility_Info *tw_compat_info; 649 TW_Event *event; 650 ktime_t current_time; 651 TW_Device_Extension *tw_dev = twa_device_extension_list[iminor(inode)]; 652 int retval = TW_IOCTL_ERROR_OS_EFAULT; 653 void __user *argp = (void __user *)arg; 654 655 mutex_lock(&twa_chrdev_mutex); 656 657 /* Only let one of these through at a time */ 658 if (mutex_lock_interruptible(&tw_dev->ioctl_lock)) { 659 retval = TW_IOCTL_ERROR_OS_EINTR; 660 goto out; 661 } 662 663 /* First copy down the driver command */ 664 if (copy_from_user(&driver_command, argp, sizeof(TW_Ioctl_Driver_Command))) 665 goto out2; 666 667 /* Check data buffer size */ 668 if (driver_command.buffer_length > TW_MAX_SECTORS * 2048) { 669 retval = TW_IOCTL_ERROR_OS_EINVAL; 670 goto out2; 671 } 672 673 /* Hardware can only do multiple of 512 byte transfers */ 674 data_buffer_length_adjusted = (driver_command.buffer_length + 511) & ~511; 675 676 /* Now allocate ioctl buf memory */ 677 cpu_addr = dma_alloc_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted+sizeof(TW_Ioctl_Buf_Apache) - 1, &dma_handle, GFP_KERNEL); 678 if (!cpu_addr) { 679 retval = TW_IOCTL_ERROR_OS_ENOMEM; 680 goto out2; 681 } 682 683 tw_ioctl = (TW_Ioctl_Buf_Apache *)cpu_addr; 684 685 /* Now copy down the entire ioctl */ 686 if (copy_from_user(tw_ioctl, argp, driver_command.buffer_length + sizeof(TW_Ioctl_Buf_Apache) - 1)) 687 goto out3; 688 689 /* See which ioctl we are doing */ 690 switch (cmd) { 691 case TW_IOCTL_FIRMWARE_PASS_THROUGH: 692 spin_lock_irqsave(tw_dev->host->host_lock, flags); 693 twa_get_request_id(tw_dev, &request_id); 694 695 /* Flag internal command */ 696 tw_dev->srb[request_id] = NULL; 697 698 /* Flag chrdev ioctl */ 699 tw_dev->chrdev_request_id = request_id; 700 701 full_command_packet = &tw_ioctl->firmware_command; 702 703 /* Load request id and sglist for both command types */ 704 twa_load_sgl(tw_dev, full_command_packet, request_id, dma_handle, data_buffer_length_adjusted); 705 706 memcpy(tw_dev->command_packet_virt[request_id], &(tw_ioctl->firmware_command), sizeof(TW_Command_Full)); 707 708 /* Now post the command packet to the controller */ 709 twa_post_command_packet(tw_dev, request_id, 1); 710 spin_unlock_irqrestore(tw_dev->host->host_lock, flags); 711 712 timeout = TW_IOCTL_CHRDEV_TIMEOUT*HZ; 713 714 /* Now wait for command to complete */ 715 timeout = wait_event_timeout(tw_dev->ioctl_wqueue, tw_dev->chrdev_request_id == TW_IOCTL_CHRDEV_FREE, timeout); 716 717 /* We timed out, and didn't get an interrupt */ 718 if (tw_dev->chrdev_request_id != TW_IOCTL_CHRDEV_FREE) { 719 /* Now we need to reset the board */ 720 printk(KERN_WARNING "3w-9xxx: scsi%d: WARNING: (0x%02X:0x%04X): Character ioctl (0x%x) timed out, resetting card.\n", 721 tw_dev->host->host_no, TW_DRIVER, 0x37, 722 cmd); 723 retval = TW_IOCTL_ERROR_OS_EIO; 724 twa_reset_device_extension(tw_dev); 725 goto out3; 726 } 727 728 /* Now copy in the command packet response */ 729 memcpy(&(tw_ioctl->firmware_command), tw_dev->command_packet_virt[request_id], sizeof(TW_Command_Full)); 730 731 /* Now complete the io */ 732 spin_lock_irqsave(tw_dev->host->host_lock, flags); 733 tw_dev->posted_request_count--; 734 tw_dev->state[request_id] = TW_S_COMPLETED; 735 twa_free_request_id(tw_dev, request_id); 736 spin_unlock_irqrestore(tw_dev->host->host_lock, flags); 737 break; 738 case TW_IOCTL_GET_COMPATIBILITY_INFO: 739 tw_ioctl->driver_command.status = 0; 740 /* Copy compatibility struct into ioctl data buffer */ 741 tw_compat_info = (TW_Compatibility_Info *)tw_ioctl->data_buffer; 742 memcpy(tw_compat_info, &tw_dev->tw_compat_info, sizeof(TW_Compatibility_Info)); 743 break; 744 case TW_IOCTL_GET_LAST_EVENT: 745 if (tw_dev->event_queue_wrapped) { 746 if (tw_dev->aen_clobber) { 747 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER; 748 tw_dev->aen_clobber = 0; 749 } else 750 tw_ioctl->driver_command.status = 0; 751 } else { 752 if (!tw_dev->error_index) { 753 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS; 754 break; 755 } 756 tw_ioctl->driver_command.status = 0; 757 } 758 event_index = (tw_dev->error_index - 1 + TW_Q_LENGTH) % TW_Q_LENGTH; 759 memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event)); 760 tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED; 761 break; 762 case TW_IOCTL_GET_FIRST_EVENT: 763 if (tw_dev->event_queue_wrapped) { 764 if (tw_dev->aen_clobber) { 765 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER; 766 tw_dev->aen_clobber = 0; 767 } else 768 tw_ioctl->driver_command.status = 0; 769 event_index = tw_dev->error_index; 770 } else { 771 if (!tw_dev->error_index) { 772 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS; 773 break; 774 } 775 tw_ioctl->driver_command.status = 0; 776 event_index = 0; 777 } 778 memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event)); 779 tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED; 780 break; 781 case TW_IOCTL_GET_NEXT_EVENT: 782 event = (TW_Event *)tw_ioctl->data_buffer; 783 sequence_id = event->sequence_id; 784 tw_ioctl->driver_command.status = 0; 785 786 if (tw_dev->event_queue_wrapped) { 787 if (tw_dev->aen_clobber) { 788 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER; 789 tw_dev->aen_clobber = 0; 790 } 791 start_index = tw_dev->error_index; 792 } else { 793 if (!tw_dev->error_index) { 794 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS; 795 break; 796 } 797 start_index = 0; 798 } 799 event_index = (start_index + sequence_id - tw_dev->event_queue[start_index]->sequence_id + 1) % TW_Q_LENGTH; 800 801 if (!(tw_dev->event_queue[event_index]->sequence_id > sequence_id)) { 802 if (tw_ioctl->driver_command.status == TW_IOCTL_ERROR_STATUS_AEN_CLOBBER) 803 tw_dev->aen_clobber = 1; 804 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS; 805 break; 806 } 807 memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event)); 808 tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED; 809 break; 810 case TW_IOCTL_GET_PREVIOUS_EVENT: 811 event = (TW_Event *)tw_ioctl->data_buffer; 812 sequence_id = event->sequence_id; 813 tw_ioctl->driver_command.status = 0; 814 815 if (tw_dev->event_queue_wrapped) { 816 if (tw_dev->aen_clobber) { 817 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER; 818 tw_dev->aen_clobber = 0; 819 } 820 start_index = tw_dev->error_index; 821 } else { 822 if (!tw_dev->error_index) { 823 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS; 824 break; 825 } 826 start_index = 0; 827 } 828 event_index = (start_index + sequence_id - tw_dev->event_queue[start_index]->sequence_id - 1) % TW_Q_LENGTH; 829 830 if (!(tw_dev->event_queue[event_index]->sequence_id < sequence_id)) { 831 if (tw_ioctl->driver_command.status == TW_IOCTL_ERROR_STATUS_AEN_CLOBBER) 832 tw_dev->aen_clobber = 1; 833 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS; 834 break; 835 } 836 memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event)); 837 tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED; 838 break; 839 case TW_IOCTL_GET_LOCK: 840 tw_lock = (TW_Lock *)tw_ioctl->data_buffer; 841 current_time = ktime_get(); 842 843 if ((tw_lock->force_flag == 1) || (tw_dev->ioctl_sem_lock == 0) || 844 ktime_after(current_time, tw_dev->ioctl_time)) { 845 tw_dev->ioctl_sem_lock = 1; 846 tw_dev->ioctl_time = ktime_add_ms(current_time, tw_lock->timeout_msec); 847 tw_ioctl->driver_command.status = 0; 848 tw_lock->time_remaining_msec = tw_lock->timeout_msec; 849 } else { 850 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_LOCKED; 851 tw_lock->time_remaining_msec = ktime_ms_delta(tw_dev->ioctl_time, current_time); 852 } 853 break; 854 case TW_IOCTL_RELEASE_LOCK: 855 if (tw_dev->ioctl_sem_lock == 1) { 856 tw_dev->ioctl_sem_lock = 0; 857 tw_ioctl->driver_command.status = 0; 858 } else { 859 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NOT_LOCKED; 860 } 861 break; 862 default: 863 retval = TW_IOCTL_ERROR_OS_ENOTTY; 864 goto out3; 865 } 866 867 /* Now copy the entire response to userspace */ 868 if (copy_to_user(argp, tw_ioctl, sizeof(TW_Ioctl_Buf_Apache) + driver_command.buffer_length - 1) == 0) 869 retval = 0; 870 out3: 871 /* Now free ioctl buf memory */ 872 dma_free_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted+sizeof(TW_Ioctl_Buf_Apache) - 1, cpu_addr, dma_handle); 873 out2: 874 mutex_unlock(&tw_dev->ioctl_lock); 875 out: 876 mutex_unlock(&twa_chrdev_mutex); 877 return retval; 878 } /* End twa_chrdev_ioctl() */ 879 880 /* This function handles open for the character device */ 881 /* NOTE that this function will race with remove. */ 882 static int twa_chrdev_open(struct inode *inode, struct file *file) 883 { 884 unsigned int minor_number; 885 int retval = TW_IOCTL_ERROR_OS_ENODEV; 886 887 if (!capable(CAP_SYS_ADMIN)) { 888 retval = -EACCES; 889 goto out; 890 } 891 892 minor_number = iminor(inode); 893 if (minor_number >= twa_device_extension_count) 894 goto out; 895 retval = 0; 896 out: 897 return retval; 898 } /* End twa_chrdev_open() */ 899 900 /* This function will print readable messages from status register errors */ 901 static int twa_decode_bits(TW_Device_Extension *tw_dev, u32 status_reg_value) 902 { 903 int retval = 1; 904 905 /* Check for various error conditions and handle them appropriately */ 906 if (status_reg_value & TW_STATUS_PCI_PARITY_ERROR) { 907 TW_PRINTK(tw_dev->host, TW_DRIVER, 0xc, "PCI Parity Error: clearing"); 908 writel(TW_CONTROL_CLEAR_PARITY_ERROR, TW_CONTROL_REG_ADDR(tw_dev)); 909 } 910 911 if (status_reg_value & TW_STATUS_PCI_ABORT) { 912 TW_PRINTK(tw_dev->host, TW_DRIVER, 0xd, "PCI Abort: clearing"); 913 writel(TW_CONTROL_CLEAR_PCI_ABORT, TW_CONTROL_REG_ADDR(tw_dev)); 914 pci_write_config_word(tw_dev->tw_pci_dev, PCI_STATUS, TW_PCI_CLEAR_PCI_ABORT); 915 } 916 917 if (status_reg_value & TW_STATUS_QUEUE_ERROR) { 918 if (((tw_dev->tw_pci_dev->device != PCI_DEVICE_ID_3WARE_9650SE) && 919 (tw_dev->tw_pci_dev->device != PCI_DEVICE_ID_3WARE_9690SA)) || 920 (!test_bit(TW_IN_RESET, &tw_dev->flags))) 921 TW_PRINTK(tw_dev->host, TW_DRIVER, 0xe, "Controller Queue Error: clearing"); 922 writel(TW_CONTROL_CLEAR_QUEUE_ERROR, TW_CONTROL_REG_ADDR(tw_dev)); 923 } 924 925 if (status_reg_value & TW_STATUS_MICROCONTROLLER_ERROR) { 926 if (tw_dev->reset_print == 0) { 927 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x10, "Microcontroller Error: clearing"); 928 tw_dev->reset_print = 1; 929 } 930 goto out; 931 } 932 retval = 0; 933 out: 934 return retval; 935 } /* End twa_decode_bits() */ 936 937 /* This function will empty the response queue */ 938 static int twa_empty_response_queue(TW_Device_Extension *tw_dev) 939 { 940 u32 status_reg_value, response_que_value; 941 int count = 0, retval = 1; 942 943 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev)); 944 945 while (((status_reg_value & TW_STATUS_RESPONSE_QUEUE_EMPTY) == 0) && (count < TW_MAX_RESPONSE_DRAIN)) { 946 response_que_value = readl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev)); 947 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev)); 948 count++; 949 } 950 if (count == TW_MAX_RESPONSE_DRAIN) 951 goto out; 952 953 retval = 0; 954 out: 955 return retval; 956 } /* End twa_empty_response_queue() */ 957 958 /* This function will clear the pchip/response queue on 9550SX */ 959 static int twa_empty_response_queue_large(TW_Device_Extension *tw_dev) 960 { 961 u32 response_que_value = 0; 962 unsigned long before; 963 int retval = 1; 964 965 if (tw_dev->tw_pci_dev->device != PCI_DEVICE_ID_3WARE_9000) { 966 before = jiffies; 967 while ((response_que_value & TW_9550SX_DRAIN_COMPLETED) != TW_9550SX_DRAIN_COMPLETED) { 968 response_que_value = readl(TW_RESPONSE_QUEUE_REG_ADDR_LARGE(tw_dev)); 969 msleep(1); 970 if (time_after(jiffies, before + HZ * 30)) 971 goto out; 972 } 973 /* P-chip settle time */ 974 msleep(500); 975 retval = 0; 976 } else 977 retval = 0; 978 out: 979 return retval; 980 } /* End twa_empty_response_queue_large() */ 981 982 /* This function passes sense keys from firmware to scsi layer */ 983 static int twa_fill_sense(TW_Device_Extension *tw_dev, int request_id, int copy_sense, int print_host) 984 { 985 TW_Command_Full *full_command_packet; 986 unsigned short error; 987 int retval = 1; 988 char *error_str; 989 990 full_command_packet = tw_dev->command_packet_virt[request_id]; 991 992 /* Check for embedded error string */ 993 error_str = &(full_command_packet->header.err_specific_desc[strlen(full_command_packet->header.err_specific_desc) + 1]); 994 995 /* Don't print error for Logical unit not supported during rollcall */ 996 error = le16_to_cpu(full_command_packet->header.status_block.error); 997 if ((error != TW_ERROR_LOGICAL_UNIT_NOT_SUPPORTED) && (error != TW_ERROR_UNIT_OFFLINE)) { 998 if (print_host) 999 printk(KERN_WARNING "3w-9xxx: scsi%d: ERROR: (0x%02X:0x%04X): %s:%s.\n", 1000 tw_dev->host->host_no, 1001 TW_MESSAGE_SOURCE_CONTROLLER_ERROR, 1002 full_command_packet->header.status_block.error, 1003 error_str[0] == '\0' ? 1004 twa_string_lookup(twa_error_table, 1005 full_command_packet->header.status_block.error) : error_str, 1006 full_command_packet->header.err_specific_desc); 1007 else 1008 printk(KERN_WARNING "3w-9xxx: ERROR: (0x%02X:0x%04X): %s:%s.\n", 1009 TW_MESSAGE_SOURCE_CONTROLLER_ERROR, 1010 full_command_packet->header.status_block.error, 1011 error_str[0] == '\0' ? 1012 twa_string_lookup(twa_error_table, 1013 full_command_packet->header.status_block.error) : error_str, 1014 full_command_packet->header.err_specific_desc); 1015 } 1016 1017 if (copy_sense) { 1018 memcpy(tw_dev->srb[request_id]->sense_buffer, full_command_packet->header.sense_data, TW_SENSE_DATA_LENGTH); 1019 tw_dev->srb[request_id]->result = (full_command_packet->command.newcommand.status << 1); 1020 retval = TW_ISR_DONT_RESULT; 1021 goto out; 1022 } 1023 retval = 0; 1024 out: 1025 return retval; 1026 } /* End twa_fill_sense() */ 1027 1028 /* This function will free up device extension resources */ 1029 static void twa_free_device_extension(TW_Device_Extension *tw_dev) 1030 { 1031 if (tw_dev->command_packet_virt[0]) 1032 dma_free_coherent(&tw_dev->tw_pci_dev->dev, 1033 sizeof(TW_Command_Full) * TW_Q_LENGTH, 1034 tw_dev->command_packet_virt[0], 1035 tw_dev->command_packet_phys[0]); 1036 1037 if (tw_dev->generic_buffer_virt[0]) 1038 dma_free_coherent(&tw_dev->tw_pci_dev->dev, 1039 TW_SECTOR_SIZE * TW_Q_LENGTH, 1040 tw_dev->generic_buffer_virt[0], 1041 tw_dev->generic_buffer_phys[0]); 1042 1043 kfree(tw_dev->event_queue[0]); 1044 } /* End twa_free_device_extension() */ 1045 1046 /* This function will free a request id */ 1047 static void twa_free_request_id(TW_Device_Extension *tw_dev, int request_id) 1048 { 1049 tw_dev->free_queue[tw_dev->free_tail] = request_id; 1050 tw_dev->state[request_id] = TW_S_FINISHED; 1051 tw_dev->free_tail = (tw_dev->free_tail + 1) % TW_Q_LENGTH; 1052 } /* End twa_free_request_id() */ 1053 1054 /* This function will get parameter table entries from the firmware */ 1055 static void *twa_get_param(TW_Device_Extension *tw_dev, int request_id, int table_id, int parameter_id, int parameter_size_bytes) 1056 { 1057 TW_Command_Full *full_command_packet; 1058 TW_Command *command_packet; 1059 TW_Param_Apache *param; 1060 void *retval = NULL; 1061 1062 /* Setup the command packet */ 1063 full_command_packet = tw_dev->command_packet_virt[request_id]; 1064 memset(full_command_packet, 0, sizeof(TW_Command_Full)); 1065 command_packet = &full_command_packet->command.oldcommand; 1066 1067 command_packet->opcode__sgloffset = TW_OPSGL_IN(2, TW_OP_GET_PARAM); 1068 command_packet->size = TW_COMMAND_SIZE; 1069 command_packet->request_id = request_id; 1070 command_packet->byte6_offset.block_count = cpu_to_le16(1); 1071 1072 /* Now setup the param */ 1073 param = (TW_Param_Apache *)tw_dev->generic_buffer_virt[request_id]; 1074 memset(param, 0, TW_SECTOR_SIZE); 1075 param->table_id = cpu_to_le16(table_id | 0x8000); 1076 param->parameter_id = cpu_to_le16(parameter_id); 1077 param->parameter_size_bytes = cpu_to_le16(parameter_size_bytes); 1078 1079 command_packet->byte8_offset.param.sgl[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]); 1080 command_packet->byte8_offset.param.sgl[0].length = cpu_to_le32(TW_SECTOR_SIZE); 1081 1082 /* Post the command packet to the board */ 1083 twa_post_command_packet(tw_dev, request_id, 1); 1084 1085 /* Poll for completion */ 1086 if (twa_poll_response(tw_dev, request_id, 30)) 1087 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x13, "No valid response during get param") 1088 else 1089 retval = (void *)&(param->data[0]); 1090 1091 tw_dev->posted_request_count--; 1092 tw_dev->state[request_id] = TW_S_INITIAL; 1093 1094 return retval; 1095 } /* End twa_get_param() */ 1096 1097 /* This function will assign an available request id */ 1098 static void twa_get_request_id(TW_Device_Extension *tw_dev, int *request_id) 1099 { 1100 *request_id = tw_dev->free_queue[tw_dev->free_head]; 1101 tw_dev->free_head = (tw_dev->free_head + 1) % TW_Q_LENGTH; 1102 tw_dev->state[*request_id] = TW_S_STARTED; 1103 } /* End twa_get_request_id() */ 1104 1105 /* This function will send an initconnection command to controller */ 1106 static int twa_initconnection(TW_Device_Extension *tw_dev, int message_credits, 1107 u32 set_features, unsigned short current_fw_srl, 1108 unsigned short current_fw_arch_id, 1109 unsigned short current_fw_branch, 1110 unsigned short current_fw_build, 1111 unsigned short *fw_on_ctlr_srl, 1112 unsigned short *fw_on_ctlr_arch_id, 1113 unsigned short *fw_on_ctlr_branch, 1114 unsigned short *fw_on_ctlr_build, 1115 u32 *init_connect_result) 1116 { 1117 TW_Command_Full *full_command_packet; 1118 TW_Initconnect *tw_initconnect; 1119 int request_id = 0, retval = 1; 1120 1121 /* Initialize InitConnection command packet */ 1122 full_command_packet = tw_dev->command_packet_virt[request_id]; 1123 memset(full_command_packet, 0, sizeof(TW_Command_Full)); 1124 full_command_packet->header.header_desc.size_header = 128; 1125 1126 tw_initconnect = (TW_Initconnect *)&full_command_packet->command.oldcommand; 1127 tw_initconnect->opcode__reserved = TW_OPRES_IN(0, TW_OP_INIT_CONNECTION); 1128 tw_initconnect->request_id = request_id; 1129 tw_initconnect->message_credits = cpu_to_le16(message_credits); 1130 tw_initconnect->features = set_features; 1131 1132 /* Turn on 64-bit sgl support if we need to */ 1133 tw_initconnect->features |= sizeof(dma_addr_t) > 4 ? 1 : 0; 1134 1135 tw_initconnect->features = cpu_to_le32(tw_initconnect->features); 1136 1137 if (set_features & TW_EXTENDED_INIT_CONNECT) { 1138 tw_initconnect->size = TW_INIT_COMMAND_PACKET_SIZE_EXTENDED; 1139 tw_initconnect->fw_srl = cpu_to_le16(current_fw_srl); 1140 tw_initconnect->fw_arch_id = cpu_to_le16(current_fw_arch_id); 1141 tw_initconnect->fw_branch = cpu_to_le16(current_fw_branch); 1142 tw_initconnect->fw_build = cpu_to_le16(current_fw_build); 1143 } else 1144 tw_initconnect->size = TW_INIT_COMMAND_PACKET_SIZE; 1145 1146 /* Send command packet to the board */ 1147 twa_post_command_packet(tw_dev, request_id, 1); 1148 1149 /* Poll for completion */ 1150 if (twa_poll_response(tw_dev, request_id, 30)) { 1151 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x15, "No valid response during init connection"); 1152 } else { 1153 if (set_features & TW_EXTENDED_INIT_CONNECT) { 1154 *fw_on_ctlr_srl = le16_to_cpu(tw_initconnect->fw_srl); 1155 *fw_on_ctlr_arch_id = le16_to_cpu(tw_initconnect->fw_arch_id); 1156 *fw_on_ctlr_branch = le16_to_cpu(tw_initconnect->fw_branch); 1157 *fw_on_ctlr_build = le16_to_cpu(tw_initconnect->fw_build); 1158 *init_connect_result = le32_to_cpu(tw_initconnect->result); 1159 } 1160 retval = 0; 1161 } 1162 1163 tw_dev->posted_request_count--; 1164 tw_dev->state[request_id] = TW_S_INITIAL; 1165 1166 return retval; 1167 } /* End twa_initconnection() */ 1168 1169 /* This function will initialize the fields of a device extension */ 1170 static int twa_initialize_device_extension(TW_Device_Extension *tw_dev) 1171 { 1172 int i, retval = 1; 1173 1174 /* Initialize command packet buffers */ 1175 if (twa_allocate_memory(tw_dev, sizeof(TW_Command_Full), 0)) { 1176 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x16, "Command packet memory allocation failed"); 1177 goto out; 1178 } 1179 1180 /* Initialize generic buffer */ 1181 if (twa_allocate_memory(tw_dev, TW_SECTOR_SIZE, 1)) { 1182 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x17, "Generic memory allocation failed"); 1183 goto out; 1184 } 1185 1186 /* Allocate event info space */ 1187 tw_dev->event_queue[0] = kcalloc(TW_Q_LENGTH, sizeof(TW_Event), GFP_KERNEL); 1188 if (!tw_dev->event_queue[0]) { 1189 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x18, "Event info memory allocation failed"); 1190 goto out; 1191 } 1192 1193 1194 for (i = 0; i < TW_Q_LENGTH; i++) { 1195 tw_dev->event_queue[i] = (TW_Event *)((unsigned char *)tw_dev->event_queue[0] + (i * sizeof(TW_Event))); 1196 tw_dev->free_queue[i] = i; 1197 tw_dev->state[i] = TW_S_INITIAL; 1198 } 1199 1200 tw_dev->pending_head = TW_Q_START; 1201 tw_dev->pending_tail = TW_Q_START; 1202 tw_dev->free_head = TW_Q_START; 1203 tw_dev->free_tail = TW_Q_START; 1204 tw_dev->error_sequence_id = 1; 1205 tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE; 1206 1207 mutex_init(&tw_dev->ioctl_lock); 1208 init_waitqueue_head(&tw_dev->ioctl_wqueue); 1209 1210 retval = 0; 1211 out: 1212 return retval; 1213 } /* End twa_initialize_device_extension() */ 1214 1215 /* This function is the interrupt service routine */ 1216 static irqreturn_t twa_interrupt(int irq, void *dev_instance) 1217 { 1218 int request_id, error = 0; 1219 u32 status_reg_value; 1220 TW_Response_Queue response_que; 1221 TW_Command_Full *full_command_packet; 1222 TW_Device_Extension *tw_dev = (TW_Device_Extension *)dev_instance; 1223 int handled = 0; 1224 1225 /* Get the per adapter lock */ 1226 spin_lock(tw_dev->host->host_lock); 1227 1228 /* Read the registers */ 1229 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev)); 1230 1231 /* Check if this is our interrupt, otherwise bail */ 1232 if (!(status_reg_value & TW_STATUS_VALID_INTERRUPT)) 1233 goto twa_interrupt_bail; 1234 1235 handled = 1; 1236 1237 /* If we are resetting, bail */ 1238 if (test_bit(TW_IN_RESET, &tw_dev->flags)) 1239 goto twa_interrupt_bail; 1240 1241 /* Check controller for errors */ 1242 if (twa_check_bits(status_reg_value)) { 1243 if (twa_decode_bits(tw_dev, status_reg_value)) { 1244 TW_CLEAR_ALL_INTERRUPTS(tw_dev); 1245 goto twa_interrupt_bail; 1246 } 1247 } 1248 1249 /* Handle host interrupt */ 1250 if (status_reg_value & TW_STATUS_HOST_INTERRUPT) 1251 TW_CLEAR_HOST_INTERRUPT(tw_dev); 1252 1253 /* Handle attention interrupt */ 1254 if (status_reg_value & TW_STATUS_ATTENTION_INTERRUPT) { 1255 TW_CLEAR_ATTENTION_INTERRUPT(tw_dev); 1256 if (!(test_and_set_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags))) { 1257 twa_get_request_id(tw_dev, &request_id); 1258 1259 error = twa_aen_read_queue(tw_dev, request_id); 1260 if (error) { 1261 tw_dev->state[request_id] = TW_S_COMPLETED; 1262 twa_free_request_id(tw_dev, request_id); 1263 clear_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags); 1264 } 1265 } 1266 } 1267 1268 /* Handle command interrupt */ 1269 if (status_reg_value & TW_STATUS_COMMAND_INTERRUPT) { 1270 TW_MASK_COMMAND_INTERRUPT(tw_dev); 1271 /* Drain as many pending commands as we can */ 1272 while (tw_dev->pending_request_count > 0) { 1273 request_id = tw_dev->pending_queue[tw_dev->pending_head]; 1274 if (tw_dev->state[request_id] != TW_S_PENDING) { 1275 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x19, "Found request id that wasn't pending"); 1276 TW_CLEAR_ALL_INTERRUPTS(tw_dev); 1277 goto twa_interrupt_bail; 1278 } 1279 if (twa_post_command_packet(tw_dev, request_id, 1)==0) { 1280 tw_dev->pending_head = (tw_dev->pending_head + 1) % TW_Q_LENGTH; 1281 tw_dev->pending_request_count--; 1282 } else { 1283 /* If we get here, we will continue re-posting on the next command interrupt */ 1284 break; 1285 } 1286 } 1287 } 1288 1289 /* Handle response interrupt */ 1290 if (status_reg_value & TW_STATUS_RESPONSE_INTERRUPT) { 1291 1292 /* Drain the response queue from the board */ 1293 while ((status_reg_value & TW_STATUS_RESPONSE_QUEUE_EMPTY) == 0) { 1294 /* Complete the response */ 1295 response_que.value = readl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev)); 1296 request_id = TW_RESID_OUT(response_que.response_id); 1297 full_command_packet = tw_dev->command_packet_virt[request_id]; 1298 error = 0; 1299 /* Check for command packet errors */ 1300 if (full_command_packet->command.newcommand.status != 0) { 1301 if (tw_dev->srb[request_id] != NULL) { 1302 error = twa_fill_sense(tw_dev, request_id, 1, 1); 1303 } else { 1304 /* Skip ioctl error prints */ 1305 if (request_id != tw_dev->chrdev_request_id) { 1306 error = twa_fill_sense(tw_dev, request_id, 0, 1); 1307 } 1308 } 1309 } 1310 1311 /* Check for correct state */ 1312 if (tw_dev->state[request_id] != TW_S_POSTED) { 1313 if (tw_dev->srb[request_id] != NULL) { 1314 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1a, "Received a request id that wasn't posted"); 1315 TW_CLEAR_ALL_INTERRUPTS(tw_dev); 1316 goto twa_interrupt_bail; 1317 } 1318 } 1319 1320 /* Check for internal command completion */ 1321 if (tw_dev->srb[request_id] == NULL) { 1322 if (request_id != tw_dev->chrdev_request_id) { 1323 if (twa_aen_complete(tw_dev, request_id)) 1324 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1b, "Error completing AEN during attention interrupt"); 1325 } else { 1326 tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE; 1327 wake_up(&tw_dev->ioctl_wqueue); 1328 } 1329 } else { 1330 struct scsi_cmnd *cmd; 1331 1332 cmd = tw_dev->srb[request_id]; 1333 1334 twa_scsiop_execute_scsi_complete(tw_dev, request_id); 1335 /* If no error command was a success */ 1336 if (error == 0) { 1337 cmd->result = (DID_OK << 16); 1338 } 1339 1340 /* If error, command failed */ 1341 if (error == 1) { 1342 /* Ask for a host reset */ 1343 cmd->result = (DID_OK << 16) | (CHECK_CONDITION << 1); 1344 } 1345 1346 /* Report residual bytes for single sgl */ 1347 if ((scsi_sg_count(cmd) <= 1) && (full_command_packet->command.newcommand.status == 0)) { 1348 if (full_command_packet->command.newcommand.sg_list[0].length < scsi_bufflen(tw_dev->srb[request_id])) 1349 scsi_set_resid(cmd, scsi_bufflen(cmd) - full_command_packet->command.newcommand.sg_list[0].length); 1350 } 1351 1352 /* Now complete the io */ 1353 if (twa_command_mapped(cmd)) 1354 scsi_dma_unmap(cmd); 1355 cmd->scsi_done(cmd); 1356 tw_dev->state[request_id] = TW_S_COMPLETED; 1357 twa_free_request_id(tw_dev, request_id); 1358 tw_dev->posted_request_count--; 1359 } 1360 1361 /* Check for valid status after each drain */ 1362 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev)); 1363 if (twa_check_bits(status_reg_value)) { 1364 if (twa_decode_bits(tw_dev, status_reg_value)) { 1365 TW_CLEAR_ALL_INTERRUPTS(tw_dev); 1366 goto twa_interrupt_bail; 1367 } 1368 } 1369 } 1370 } 1371 1372 twa_interrupt_bail: 1373 spin_unlock(tw_dev->host->host_lock); 1374 return IRQ_RETVAL(handled); 1375 } /* End twa_interrupt() */ 1376 1377 /* This function will load the request id and various sgls for ioctls */ 1378 static void twa_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_command_packet, int request_id, dma_addr_t dma_handle, int length) 1379 { 1380 TW_Command *oldcommand; 1381 TW_Command_Apache *newcommand; 1382 TW_SG_Entry *sgl; 1383 unsigned int pae = 0; 1384 1385 if ((sizeof(long) < 8) && (sizeof(dma_addr_t) > 4)) 1386 pae = 1; 1387 1388 if (TW_OP_OUT(full_command_packet->command.newcommand.opcode__reserved) == TW_OP_EXECUTE_SCSI) { 1389 newcommand = &full_command_packet->command.newcommand; 1390 newcommand->request_id__lunl = 1391 cpu_to_le16(TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->request_id__lunl), request_id)); 1392 if (length) { 1393 newcommand->sg_list[0].address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache) - 1); 1394 newcommand->sg_list[0].length = cpu_to_le32(length); 1395 } 1396 newcommand->sgl_entries__lunh = 1397 cpu_to_le16(TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->sgl_entries__lunh), length ? 1 : 0)); 1398 } else { 1399 oldcommand = &full_command_packet->command.oldcommand; 1400 oldcommand->request_id = request_id; 1401 1402 if (TW_SGL_OUT(oldcommand->opcode__sgloffset)) { 1403 /* Load the sg list */ 1404 if (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9690SA) 1405 sgl = (TW_SG_Entry *)((u32 *)oldcommand+oldcommand->size - (sizeof(TW_SG_Entry)/4) + pae); 1406 else 1407 sgl = (TW_SG_Entry *)((u32 *)oldcommand+TW_SGL_OUT(oldcommand->opcode__sgloffset)); 1408 sgl->address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache) - 1); 1409 sgl->length = cpu_to_le32(length); 1410 1411 oldcommand->size += pae; 1412 } 1413 } 1414 } /* End twa_load_sgl() */ 1415 1416 /* This function will poll for a response interrupt of a request */ 1417 static int twa_poll_response(TW_Device_Extension *tw_dev, int request_id, int seconds) 1418 { 1419 int retval = 1, found = 0, response_request_id; 1420 TW_Response_Queue response_queue; 1421 TW_Command_Full *full_command_packet = tw_dev->command_packet_virt[request_id]; 1422 1423 if (twa_poll_status_gone(tw_dev, TW_STATUS_RESPONSE_QUEUE_EMPTY, seconds) == 0) { 1424 response_queue.value = readl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev)); 1425 response_request_id = TW_RESID_OUT(response_queue.response_id); 1426 if (request_id != response_request_id) { 1427 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1e, "Found unexpected request id while polling for response"); 1428 goto out; 1429 } 1430 if (TW_OP_OUT(full_command_packet->command.newcommand.opcode__reserved) == TW_OP_EXECUTE_SCSI) { 1431 if (full_command_packet->command.newcommand.status != 0) { 1432 /* bad response */ 1433 twa_fill_sense(tw_dev, request_id, 0, 0); 1434 goto out; 1435 } 1436 found = 1; 1437 } else { 1438 if (full_command_packet->command.oldcommand.status != 0) { 1439 /* bad response */ 1440 twa_fill_sense(tw_dev, request_id, 0, 0); 1441 goto out; 1442 } 1443 found = 1; 1444 } 1445 } 1446 1447 if (found) 1448 retval = 0; 1449 out: 1450 return retval; 1451 } /* End twa_poll_response() */ 1452 1453 /* This function will poll the status register for a flag */ 1454 static int twa_poll_status(TW_Device_Extension *tw_dev, u32 flag, int seconds) 1455 { 1456 u32 status_reg_value; 1457 unsigned long before; 1458 int retval = 1; 1459 1460 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev)); 1461 before = jiffies; 1462 1463 if (twa_check_bits(status_reg_value)) 1464 twa_decode_bits(tw_dev, status_reg_value); 1465 1466 while ((status_reg_value & flag) != flag) { 1467 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev)); 1468 1469 if (twa_check_bits(status_reg_value)) 1470 twa_decode_bits(tw_dev, status_reg_value); 1471 1472 if (time_after(jiffies, before + HZ * seconds)) 1473 goto out; 1474 1475 msleep(50); 1476 } 1477 retval = 0; 1478 out: 1479 return retval; 1480 } /* End twa_poll_status() */ 1481 1482 /* This function will poll the status register for disappearance of a flag */ 1483 static int twa_poll_status_gone(TW_Device_Extension *tw_dev, u32 flag, int seconds) 1484 { 1485 u32 status_reg_value; 1486 unsigned long before; 1487 int retval = 1; 1488 1489 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev)); 1490 before = jiffies; 1491 1492 if (twa_check_bits(status_reg_value)) 1493 twa_decode_bits(tw_dev, status_reg_value); 1494 1495 while ((status_reg_value & flag) != 0) { 1496 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev)); 1497 if (twa_check_bits(status_reg_value)) 1498 twa_decode_bits(tw_dev, status_reg_value); 1499 1500 if (time_after(jiffies, before + HZ * seconds)) 1501 goto out; 1502 1503 msleep(50); 1504 } 1505 retval = 0; 1506 out: 1507 return retval; 1508 } /* End twa_poll_status_gone() */ 1509 1510 /* This function will attempt to post a command packet to the board */ 1511 static int twa_post_command_packet(TW_Device_Extension *tw_dev, int request_id, char internal) 1512 { 1513 u32 status_reg_value; 1514 dma_addr_t command_que_value; 1515 int retval = 1; 1516 1517 command_que_value = tw_dev->command_packet_phys[request_id]; 1518 1519 /* For 9650SE write low 4 bytes first */ 1520 if ((tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9650SE) || 1521 (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9690SA)) { 1522 command_que_value += TW_COMMAND_OFFSET; 1523 writel((u32)command_que_value, TW_COMMAND_QUEUE_REG_ADDR_LARGE(tw_dev)); 1524 } 1525 1526 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev)); 1527 1528 if (twa_check_bits(status_reg_value)) 1529 twa_decode_bits(tw_dev, status_reg_value); 1530 1531 if (((tw_dev->pending_request_count > 0) && (tw_dev->state[request_id] != TW_S_PENDING)) || (status_reg_value & TW_STATUS_COMMAND_QUEUE_FULL)) { 1532 1533 /* Only pend internal driver commands */ 1534 if (!internal) { 1535 retval = SCSI_MLQUEUE_HOST_BUSY; 1536 goto out; 1537 } 1538 1539 /* Couldn't post the command packet, so we do it later */ 1540 if (tw_dev->state[request_id] != TW_S_PENDING) { 1541 tw_dev->state[request_id] = TW_S_PENDING; 1542 tw_dev->pending_request_count++; 1543 if (tw_dev->pending_request_count > tw_dev->max_pending_request_count) { 1544 tw_dev->max_pending_request_count = tw_dev->pending_request_count; 1545 } 1546 tw_dev->pending_queue[tw_dev->pending_tail] = request_id; 1547 tw_dev->pending_tail = (tw_dev->pending_tail + 1) % TW_Q_LENGTH; 1548 } 1549 TW_UNMASK_COMMAND_INTERRUPT(tw_dev); 1550 goto out; 1551 } else { 1552 if ((tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9650SE) || 1553 (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9690SA)) { 1554 /* Now write upper 4 bytes */ 1555 writel((u32)((u64)command_que_value >> 32), TW_COMMAND_QUEUE_REG_ADDR_LARGE(tw_dev) + 0x4); 1556 } else { 1557 if (sizeof(dma_addr_t) > 4) { 1558 command_que_value += TW_COMMAND_OFFSET; 1559 writel((u32)command_que_value, TW_COMMAND_QUEUE_REG_ADDR(tw_dev)); 1560 writel((u32)((u64)command_que_value >> 32), TW_COMMAND_QUEUE_REG_ADDR(tw_dev) + 0x4); 1561 } else { 1562 writel(TW_COMMAND_OFFSET + command_que_value, TW_COMMAND_QUEUE_REG_ADDR(tw_dev)); 1563 } 1564 } 1565 tw_dev->state[request_id] = TW_S_POSTED; 1566 tw_dev->posted_request_count++; 1567 if (tw_dev->posted_request_count > tw_dev->max_posted_request_count) { 1568 tw_dev->max_posted_request_count = tw_dev->posted_request_count; 1569 } 1570 } 1571 retval = 0; 1572 out: 1573 return retval; 1574 } /* End twa_post_command_packet() */ 1575 1576 /* This function will reset a device extension */ 1577 static int twa_reset_device_extension(TW_Device_Extension *tw_dev) 1578 { 1579 int i = 0; 1580 int retval = 1; 1581 unsigned long flags = 0; 1582 1583 set_bit(TW_IN_RESET, &tw_dev->flags); 1584 TW_DISABLE_INTERRUPTS(tw_dev); 1585 TW_MASK_COMMAND_INTERRUPT(tw_dev); 1586 spin_lock_irqsave(tw_dev->host->host_lock, flags); 1587 1588 /* Abort all requests that are in progress */ 1589 for (i = 0; i < TW_Q_LENGTH; i++) { 1590 if ((tw_dev->state[i] != TW_S_FINISHED) && 1591 (tw_dev->state[i] != TW_S_INITIAL) && 1592 (tw_dev->state[i] != TW_S_COMPLETED)) { 1593 if (tw_dev->srb[i]) { 1594 struct scsi_cmnd *cmd = tw_dev->srb[i]; 1595 1596 cmd->result = (DID_RESET << 16); 1597 if (twa_command_mapped(cmd)) 1598 scsi_dma_unmap(cmd); 1599 cmd->scsi_done(cmd); 1600 } 1601 } 1602 } 1603 1604 /* Reset queues and counts */ 1605 for (i = 0; i < TW_Q_LENGTH; i++) { 1606 tw_dev->free_queue[i] = i; 1607 tw_dev->state[i] = TW_S_INITIAL; 1608 } 1609 tw_dev->free_head = TW_Q_START; 1610 tw_dev->free_tail = TW_Q_START; 1611 tw_dev->posted_request_count = 0; 1612 tw_dev->pending_request_count = 0; 1613 tw_dev->pending_head = TW_Q_START; 1614 tw_dev->pending_tail = TW_Q_START; 1615 tw_dev->reset_print = 0; 1616 1617 spin_unlock_irqrestore(tw_dev->host->host_lock, flags); 1618 1619 if (twa_reset_sequence(tw_dev, 1)) 1620 goto out; 1621 1622 TW_ENABLE_AND_CLEAR_INTERRUPTS(tw_dev); 1623 clear_bit(TW_IN_RESET, &tw_dev->flags); 1624 tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE; 1625 1626 retval = 0; 1627 out: 1628 return retval; 1629 } /* End twa_reset_device_extension() */ 1630 1631 /* This function will reset a controller */ 1632 static int twa_reset_sequence(TW_Device_Extension *tw_dev, int soft_reset) 1633 { 1634 int tries = 0, retval = 1, flashed = 0, do_soft_reset = soft_reset; 1635 1636 while (tries < TW_MAX_RESET_TRIES) { 1637 if (do_soft_reset) { 1638 TW_SOFT_RESET(tw_dev); 1639 /* Clear pchip/response queue on 9550SX */ 1640 if (twa_empty_response_queue_large(tw_dev)) { 1641 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x36, "Response queue (large) empty failed during reset sequence"); 1642 do_soft_reset = 1; 1643 tries++; 1644 continue; 1645 } 1646 } 1647 1648 /* Make sure controller is in a good state */ 1649 if (twa_poll_status(tw_dev, TW_STATUS_MICROCONTROLLER_READY | (do_soft_reset == 1 ? TW_STATUS_ATTENTION_INTERRUPT : 0), 60)) { 1650 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1f, "Microcontroller not ready during reset sequence"); 1651 do_soft_reset = 1; 1652 tries++; 1653 continue; 1654 } 1655 1656 /* Empty response queue */ 1657 if (twa_empty_response_queue(tw_dev)) { 1658 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x20, "Response queue empty failed during reset sequence"); 1659 do_soft_reset = 1; 1660 tries++; 1661 continue; 1662 } 1663 1664 flashed = 0; 1665 1666 /* Check for compatibility/flash */ 1667 if (twa_check_srl(tw_dev, &flashed)) { 1668 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x21, "Compatibility check failed during reset sequence"); 1669 do_soft_reset = 1; 1670 tries++; 1671 continue; 1672 } else { 1673 if (flashed) { 1674 tries++; 1675 continue; 1676 } 1677 } 1678 1679 /* Drain the AEN queue */ 1680 if (twa_aen_drain_queue(tw_dev, soft_reset)) { 1681 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x22, "AEN drain failed during reset sequence"); 1682 do_soft_reset = 1; 1683 tries++; 1684 continue; 1685 } 1686 1687 /* If we got here, controller is in a good state */ 1688 retval = 0; 1689 goto out; 1690 } 1691 out: 1692 return retval; 1693 } /* End twa_reset_sequence() */ 1694 1695 /* This funciton returns unit geometry in cylinders/heads/sectors */ 1696 static int twa_scsi_biosparam(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int geom[]) 1697 { 1698 int heads, sectors, cylinders; 1699 TW_Device_Extension *tw_dev; 1700 1701 tw_dev = (TW_Device_Extension *)sdev->host->hostdata; 1702 1703 if (capacity >= 0x200000) { 1704 heads = 255; 1705 sectors = 63; 1706 cylinders = sector_div(capacity, heads * sectors); 1707 } else { 1708 heads = 64; 1709 sectors = 32; 1710 cylinders = sector_div(capacity, heads * sectors); 1711 } 1712 1713 geom[0] = heads; 1714 geom[1] = sectors; 1715 geom[2] = cylinders; 1716 1717 return 0; 1718 } /* End twa_scsi_biosparam() */ 1719 1720 /* This is the new scsi eh reset function */ 1721 static int twa_scsi_eh_reset(struct scsi_cmnd *SCpnt) 1722 { 1723 TW_Device_Extension *tw_dev = NULL; 1724 int retval = FAILED; 1725 1726 tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata; 1727 1728 tw_dev->num_resets++; 1729 1730 sdev_printk(KERN_WARNING, SCpnt->device, 1731 "WARNING: (0x%02X:0x%04X): Command (0x%x) timed out, resetting card.\n", 1732 TW_DRIVER, 0x2c, SCpnt->cmnd[0]); 1733 1734 /* Make sure we are not issuing an ioctl or resetting from ioctl */ 1735 mutex_lock(&tw_dev->ioctl_lock); 1736 1737 /* Now reset the card and some of the device extension data */ 1738 if (twa_reset_device_extension(tw_dev)) { 1739 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2b, "Controller reset failed during scsi host reset"); 1740 goto out; 1741 } 1742 1743 retval = SUCCESS; 1744 out: 1745 mutex_unlock(&tw_dev->ioctl_lock); 1746 return retval; 1747 } /* End twa_scsi_eh_reset() */ 1748 1749 /* This is the main scsi queue function to handle scsi opcodes */ 1750 static int twa_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) 1751 { 1752 int request_id, retval; 1753 TW_Device_Extension *tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata; 1754 1755 /* If we are resetting due to timed out ioctl, report as busy */ 1756 if (test_bit(TW_IN_RESET, &tw_dev->flags)) { 1757 retval = SCSI_MLQUEUE_HOST_BUSY; 1758 goto out; 1759 } 1760 1761 /* Check if this FW supports luns */ 1762 if ((SCpnt->device->lun != 0) && (tw_dev->tw_compat_info.working_srl < TW_FW_SRL_LUNS_SUPPORTED)) { 1763 SCpnt->result = (DID_BAD_TARGET << 16); 1764 done(SCpnt); 1765 retval = 0; 1766 goto out; 1767 } 1768 1769 /* Save done function into scsi_cmnd struct */ 1770 SCpnt->scsi_done = done; 1771 1772 /* Get a free request id */ 1773 twa_get_request_id(tw_dev, &request_id); 1774 1775 /* Save the scsi command for use by the ISR */ 1776 tw_dev->srb[request_id] = SCpnt; 1777 1778 retval = twa_scsiop_execute_scsi(tw_dev, request_id, NULL, 0, NULL); 1779 switch (retval) { 1780 case SCSI_MLQUEUE_HOST_BUSY: 1781 if (twa_command_mapped(SCpnt)) 1782 scsi_dma_unmap(SCpnt); 1783 twa_free_request_id(tw_dev, request_id); 1784 break; 1785 case 1: 1786 SCpnt->result = (DID_ERROR << 16); 1787 if (twa_command_mapped(SCpnt)) 1788 scsi_dma_unmap(SCpnt); 1789 done(SCpnt); 1790 tw_dev->state[request_id] = TW_S_COMPLETED; 1791 twa_free_request_id(tw_dev, request_id); 1792 retval = 0; 1793 } 1794 out: 1795 return retval; 1796 } /* End twa_scsi_queue() */ 1797 1798 static DEF_SCSI_QCMD(twa_scsi_queue) 1799 1800 /* This function hands scsi cdb's to the firmware */ 1801 static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry *sglistarg) 1802 { 1803 TW_Command_Full *full_command_packet; 1804 TW_Command_Apache *command_packet; 1805 u32 num_sectors = 0x0; 1806 int i, sg_count; 1807 struct scsi_cmnd *srb = NULL; 1808 struct scatterlist *sglist = NULL, *sg; 1809 int retval = 1; 1810 1811 if (tw_dev->srb[request_id]) { 1812 srb = tw_dev->srb[request_id]; 1813 if (scsi_sglist(srb)) 1814 sglist = scsi_sglist(srb); 1815 } 1816 1817 /* Initialize command packet */ 1818 full_command_packet = tw_dev->command_packet_virt[request_id]; 1819 full_command_packet->header.header_desc.size_header = 128; 1820 full_command_packet->header.status_block.error = 0; 1821 full_command_packet->header.status_block.severity__reserved = 0; 1822 1823 command_packet = &full_command_packet->command.newcommand; 1824 command_packet->status = 0; 1825 command_packet->opcode__reserved = TW_OPRES_IN(0, TW_OP_EXECUTE_SCSI); 1826 1827 /* We forced 16 byte cdb use earlier */ 1828 if (!cdb) 1829 memcpy(command_packet->cdb, srb->cmnd, TW_MAX_CDB_LEN); 1830 else 1831 memcpy(command_packet->cdb, cdb, TW_MAX_CDB_LEN); 1832 1833 if (srb) { 1834 command_packet->unit = srb->device->id; 1835 command_packet->request_id__lunl = 1836 cpu_to_le16(TW_REQ_LUN_IN(srb->device->lun, request_id)); 1837 } else { 1838 command_packet->request_id__lunl = 1839 cpu_to_le16(TW_REQ_LUN_IN(0, request_id)); 1840 command_packet->unit = 0; 1841 } 1842 1843 command_packet->sgl_offset = 16; 1844 1845 if (!sglistarg) { 1846 /* Map sglist from scsi layer to cmd packet */ 1847 1848 if (scsi_sg_count(srb)) { 1849 if (!twa_command_mapped(srb)) { 1850 if (srb->sc_data_direction == DMA_TO_DEVICE || 1851 srb->sc_data_direction == DMA_BIDIRECTIONAL) 1852 scsi_sg_copy_to_buffer(srb, 1853 tw_dev->generic_buffer_virt[request_id], 1854 TW_SECTOR_SIZE); 1855 command_packet->sg_list[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]); 1856 command_packet->sg_list[0].length = cpu_to_le32(TW_MIN_SGL_LENGTH); 1857 } else { 1858 sg_count = scsi_dma_map(srb); 1859 if (sg_count < 0) 1860 goto out; 1861 1862 scsi_for_each_sg(srb, sg, sg_count, i) { 1863 command_packet->sg_list[i].address = TW_CPU_TO_SGL(sg_dma_address(sg)); 1864 command_packet->sg_list[i].length = cpu_to_le32(sg_dma_len(sg)); 1865 if (command_packet->sg_list[i].address & TW_CPU_TO_SGL(TW_ALIGNMENT_9000_SGL)) { 1866 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2e, "Found unaligned sgl address during execute scsi"); 1867 goto out; 1868 } 1869 } 1870 } 1871 command_packet->sgl_entries__lunh = cpu_to_le16(TW_REQ_LUN_IN((srb->device->lun >> 4), scsi_sg_count(tw_dev->srb[request_id]))); 1872 } 1873 } else { 1874 /* Internal cdb post */ 1875 for (i = 0; i < use_sg; i++) { 1876 command_packet->sg_list[i].address = TW_CPU_TO_SGL(sglistarg[i].address); 1877 command_packet->sg_list[i].length = cpu_to_le32(sglistarg[i].length); 1878 if (command_packet->sg_list[i].address & TW_CPU_TO_SGL(TW_ALIGNMENT_9000_SGL)) { 1879 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2f, "Found unaligned sgl address during internal post"); 1880 goto out; 1881 } 1882 } 1883 command_packet->sgl_entries__lunh = cpu_to_le16(TW_REQ_LUN_IN(0, use_sg)); 1884 } 1885 1886 if (srb) { 1887 if (srb->cmnd[0] == READ_6 || srb->cmnd[0] == WRITE_6) 1888 num_sectors = (u32)srb->cmnd[4]; 1889 1890 if (srb->cmnd[0] == READ_10 || srb->cmnd[0] == WRITE_10) 1891 num_sectors = (u32)srb->cmnd[8] | ((u32)srb->cmnd[7] << 8); 1892 } 1893 1894 /* Update sector statistic */ 1895 tw_dev->sector_count = num_sectors; 1896 if (tw_dev->sector_count > tw_dev->max_sector_count) 1897 tw_dev->max_sector_count = tw_dev->sector_count; 1898 1899 /* Update SG statistics */ 1900 if (srb) { 1901 tw_dev->sgl_entries = scsi_sg_count(tw_dev->srb[request_id]); 1902 if (tw_dev->sgl_entries > tw_dev->max_sgl_entries) 1903 tw_dev->max_sgl_entries = tw_dev->sgl_entries; 1904 } 1905 1906 /* Now post the command to the board */ 1907 if (srb) { 1908 retval = twa_post_command_packet(tw_dev, request_id, 0); 1909 } else { 1910 twa_post_command_packet(tw_dev, request_id, 1); 1911 retval = 0; 1912 } 1913 out: 1914 return retval; 1915 } /* End twa_scsiop_execute_scsi() */ 1916 1917 /* This function completes an execute scsi operation */ 1918 static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int request_id) 1919 { 1920 struct scsi_cmnd *cmd = tw_dev->srb[request_id]; 1921 1922 if (!twa_command_mapped(cmd) && 1923 (cmd->sc_data_direction == DMA_FROM_DEVICE || 1924 cmd->sc_data_direction == DMA_BIDIRECTIONAL)) { 1925 if (scsi_sg_count(cmd) == 1) { 1926 void *buf = tw_dev->generic_buffer_virt[request_id]; 1927 1928 scsi_sg_copy_from_buffer(cmd, buf, TW_SECTOR_SIZE); 1929 } 1930 } 1931 } /* End twa_scsiop_execute_scsi_complete() */ 1932 1933 /* This function tells the controller to shut down */ 1934 static void __twa_shutdown(TW_Device_Extension *tw_dev) 1935 { 1936 /* Disable interrupts */ 1937 TW_DISABLE_INTERRUPTS(tw_dev); 1938 1939 /* Free up the IRQ */ 1940 free_irq(tw_dev->tw_pci_dev->irq, tw_dev); 1941 1942 printk(KERN_WARNING "3w-9xxx: Shutting down host %d.\n", tw_dev->host->host_no); 1943 1944 /* Tell the card we are shutting down */ 1945 if (twa_initconnection(tw_dev, 1, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL)) { 1946 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x31, "Connection shutdown failed"); 1947 } else { 1948 printk(KERN_WARNING "3w-9xxx: Shutdown complete.\n"); 1949 } 1950 1951 /* Clear all interrupts just before exit */ 1952 TW_CLEAR_ALL_INTERRUPTS(tw_dev); 1953 } /* End __twa_shutdown() */ 1954 1955 /* Wrapper for __twa_shutdown */ 1956 static void twa_shutdown(struct pci_dev *pdev) 1957 { 1958 struct Scsi_Host *host = pci_get_drvdata(pdev); 1959 TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata; 1960 1961 __twa_shutdown(tw_dev); 1962 } /* End twa_shutdown() */ 1963 1964 /* This function will look up a string */ 1965 static char *twa_string_lookup(twa_message_type *table, unsigned int code) 1966 { 1967 int index; 1968 1969 for (index = 0; ((code != table[index].code) && 1970 (table[index].text != (char *)0)); index++); 1971 return(table[index].text); 1972 } /* End twa_string_lookup() */ 1973 1974 /* This function gets called when a disk is coming on-line */ 1975 static int twa_slave_configure(struct scsi_device *sdev) 1976 { 1977 /* Force 60 second timeout */ 1978 blk_queue_rq_timeout(sdev->request_queue, 60 * HZ); 1979 1980 return 0; 1981 } /* End twa_slave_configure() */ 1982 1983 /* scsi_host_template initializer */ 1984 static struct scsi_host_template driver_template = { 1985 .module = THIS_MODULE, 1986 .name = "3ware 9000 Storage Controller", 1987 .queuecommand = twa_scsi_queue, 1988 .eh_host_reset_handler = twa_scsi_eh_reset, 1989 .bios_param = twa_scsi_biosparam, 1990 .change_queue_depth = scsi_change_queue_depth, 1991 .can_queue = TW_Q_LENGTH-2, 1992 .slave_configure = twa_slave_configure, 1993 .this_id = -1, 1994 .sg_tablesize = TW_APACHE_MAX_SGL_LENGTH, 1995 .max_sectors = TW_MAX_SECTORS, 1996 .cmd_per_lun = TW_MAX_CMDS_PER_LUN, 1997 .use_clustering = ENABLE_CLUSTERING, 1998 .shost_attrs = twa_host_attrs, 1999 .emulated = 1, 2000 .no_write_same = 1, 2001 }; 2002 2003 /* This function will probe and initialize a card */ 2004 static int twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id) 2005 { 2006 struct Scsi_Host *host = NULL; 2007 TW_Device_Extension *tw_dev; 2008 unsigned long mem_addr, mem_len; 2009 int retval = -ENODEV; 2010 2011 retval = pci_enable_device(pdev); 2012 if (retval) { 2013 TW_PRINTK(host, TW_DRIVER, 0x34, "Failed to enable pci device"); 2014 goto out_disable_device; 2015 } 2016 2017 pci_set_master(pdev); 2018 pci_try_set_mwi(pdev); 2019 2020 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) || 2021 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) { 2022 TW_PRINTK(host, TW_DRIVER, 0x23, "Failed to set dma mask"); 2023 retval = -ENODEV; 2024 goto out_disable_device; 2025 } 2026 2027 host = scsi_host_alloc(&driver_template, sizeof(TW_Device_Extension)); 2028 if (!host) { 2029 TW_PRINTK(host, TW_DRIVER, 0x24, "Failed to allocate memory for device extension"); 2030 retval = -ENOMEM; 2031 goto out_disable_device; 2032 } 2033 tw_dev = (TW_Device_Extension *)host->hostdata; 2034 2035 /* Save values to device extension */ 2036 tw_dev->host = host; 2037 tw_dev->tw_pci_dev = pdev; 2038 2039 if (twa_initialize_device_extension(tw_dev)) { 2040 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x25, "Failed to initialize device extension"); 2041 retval = -ENOMEM; 2042 goto out_free_device_extension; 2043 } 2044 2045 /* Request IO regions */ 2046 retval = pci_request_regions(pdev, "3w-9xxx"); 2047 if (retval) { 2048 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x26, "Failed to get mem region"); 2049 goto out_free_device_extension; 2050 } 2051 2052 if (pdev->device == PCI_DEVICE_ID_3WARE_9000) { 2053 mem_addr = pci_resource_start(pdev, 1); 2054 mem_len = pci_resource_len(pdev, 1); 2055 } else { 2056 mem_addr = pci_resource_start(pdev, 2); 2057 mem_len = pci_resource_len(pdev, 2); 2058 } 2059 2060 /* Save base address */ 2061 tw_dev->base_addr = ioremap(mem_addr, mem_len); 2062 if (!tw_dev->base_addr) { 2063 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x35, "Failed to ioremap"); 2064 retval = -ENOMEM; 2065 goto out_release_mem_region; 2066 } 2067 2068 /* Disable interrupts on the card */ 2069 TW_DISABLE_INTERRUPTS(tw_dev); 2070 2071 /* Initialize the card */ 2072 if (twa_reset_sequence(tw_dev, 0)) { 2073 retval = -ENOMEM; 2074 goto out_iounmap; 2075 } 2076 2077 /* Set host specific parameters */ 2078 if ((pdev->device == PCI_DEVICE_ID_3WARE_9650SE) || 2079 (pdev->device == PCI_DEVICE_ID_3WARE_9690SA)) 2080 host->max_id = TW_MAX_UNITS_9650SE; 2081 else 2082 host->max_id = TW_MAX_UNITS; 2083 2084 host->max_cmd_len = TW_MAX_CDB_LEN; 2085 2086 /* Channels aren't supported by adapter */ 2087 host->max_lun = TW_MAX_LUNS(tw_dev->tw_compat_info.working_srl); 2088 host->max_channel = 0; 2089 2090 /* Register the card with the kernel SCSI layer */ 2091 retval = scsi_add_host(host, &pdev->dev); 2092 if (retval) { 2093 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x27, "scsi add host failed"); 2094 goto out_iounmap; 2095 } 2096 2097 pci_set_drvdata(pdev, host); 2098 2099 printk(KERN_WARNING "3w-9xxx: scsi%d: Found a 3ware 9000 Storage Controller at 0x%lx, IRQ: %d.\n", 2100 host->host_no, mem_addr, pdev->irq); 2101 printk(KERN_WARNING "3w-9xxx: scsi%d: Firmware %s, BIOS %s, Ports: %d.\n", 2102 host->host_no, 2103 (char *)twa_get_param(tw_dev, 0, TW_VERSION_TABLE, 2104 TW_PARAM_FWVER, TW_PARAM_FWVER_LENGTH), 2105 (char *)twa_get_param(tw_dev, 1, TW_VERSION_TABLE, 2106 TW_PARAM_BIOSVER, TW_PARAM_BIOSVER_LENGTH), 2107 le32_to_cpu(*(int *)twa_get_param(tw_dev, 2, TW_INFORMATION_TABLE, 2108 TW_PARAM_PORTCOUNT, TW_PARAM_PORTCOUNT_LENGTH))); 2109 2110 /* Try to enable MSI */ 2111 if (use_msi && (pdev->device != PCI_DEVICE_ID_3WARE_9000) && 2112 !pci_enable_msi(pdev)) 2113 set_bit(TW_USING_MSI, &tw_dev->flags); 2114 2115 /* Now setup the interrupt handler */ 2116 retval = request_irq(pdev->irq, twa_interrupt, IRQF_SHARED, "3w-9xxx", tw_dev); 2117 if (retval) { 2118 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x30, "Error requesting IRQ"); 2119 goto out_remove_host; 2120 } 2121 2122 twa_device_extension_list[twa_device_extension_count] = tw_dev; 2123 twa_device_extension_count++; 2124 2125 /* Re-enable interrupts on the card */ 2126 TW_ENABLE_AND_CLEAR_INTERRUPTS(tw_dev); 2127 2128 /* Finally, scan the host */ 2129 scsi_scan_host(host); 2130 2131 if (twa_major == -1) { 2132 if ((twa_major = register_chrdev (0, "twa", &twa_fops)) < 0) 2133 TW_PRINTK(host, TW_DRIVER, 0x29, "Failed to register character device"); 2134 } 2135 return 0; 2136 2137 out_remove_host: 2138 if (test_bit(TW_USING_MSI, &tw_dev->flags)) 2139 pci_disable_msi(pdev); 2140 scsi_remove_host(host); 2141 out_iounmap: 2142 iounmap(tw_dev->base_addr); 2143 out_release_mem_region: 2144 pci_release_regions(pdev); 2145 out_free_device_extension: 2146 twa_free_device_extension(tw_dev); 2147 scsi_host_put(host); 2148 out_disable_device: 2149 pci_disable_device(pdev); 2150 2151 return retval; 2152 } /* End twa_probe() */ 2153 2154 /* This function is called to remove a device */ 2155 static void twa_remove(struct pci_dev *pdev) 2156 { 2157 struct Scsi_Host *host = pci_get_drvdata(pdev); 2158 TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata; 2159 2160 scsi_remove_host(tw_dev->host); 2161 2162 /* Unregister character device */ 2163 if (twa_major >= 0) { 2164 unregister_chrdev(twa_major, "twa"); 2165 twa_major = -1; 2166 } 2167 2168 /* Shutdown the card */ 2169 __twa_shutdown(tw_dev); 2170 2171 /* Disable MSI if enabled */ 2172 if (test_bit(TW_USING_MSI, &tw_dev->flags)) 2173 pci_disable_msi(pdev); 2174 2175 /* Free IO remapping */ 2176 iounmap(tw_dev->base_addr); 2177 2178 /* Free up the mem region */ 2179 pci_release_regions(pdev); 2180 2181 /* Free up device extension resources */ 2182 twa_free_device_extension(tw_dev); 2183 2184 scsi_host_put(tw_dev->host); 2185 pci_disable_device(pdev); 2186 twa_device_extension_count--; 2187 } /* End twa_remove() */ 2188 2189 #ifdef CONFIG_PM 2190 /* This function is called on PCI suspend */ 2191 static int twa_suspend(struct pci_dev *pdev, pm_message_t state) 2192 { 2193 struct Scsi_Host *host = pci_get_drvdata(pdev); 2194 TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata; 2195 2196 printk(KERN_WARNING "3w-9xxx: Suspending host %d.\n", tw_dev->host->host_no); 2197 2198 TW_DISABLE_INTERRUPTS(tw_dev); 2199 free_irq(tw_dev->tw_pci_dev->irq, tw_dev); 2200 2201 if (test_bit(TW_USING_MSI, &tw_dev->flags)) 2202 pci_disable_msi(pdev); 2203 2204 /* Tell the card we are shutting down */ 2205 if (twa_initconnection(tw_dev, 1, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL)) { 2206 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x38, "Connection shutdown failed during suspend"); 2207 } else { 2208 printk(KERN_WARNING "3w-9xxx: Suspend complete.\n"); 2209 } 2210 TW_CLEAR_ALL_INTERRUPTS(tw_dev); 2211 2212 pci_save_state(pdev); 2213 pci_disable_device(pdev); 2214 pci_set_power_state(pdev, pci_choose_state(pdev, state)); 2215 2216 return 0; 2217 } /* End twa_suspend() */ 2218 2219 /* This function is called on PCI resume */ 2220 static int twa_resume(struct pci_dev *pdev) 2221 { 2222 int retval = 0; 2223 struct Scsi_Host *host = pci_get_drvdata(pdev); 2224 TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata; 2225 2226 printk(KERN_WARNING "3w-9xxx: Resuming host %d.\n", tw_dev->host->host_no); 2227 pci_set_power_state(pdev, PCI_D0); 2228 pci_enable_wake(pdev, PCI_D0, 0); 2229 pci_restore_state(pdev); 2230 2231 retval = pci_enable_device(pdev); 2232 if (retval) { 2233 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x39, "Enable device failed during resume"); 2234 return retval; 2235 } 2236 2237 pci_set_master(pdev); 2238 pci_try_set_mwi(pdev); 2239 2240 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) || 2241 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) { 2242 TW_PRINTK(host, TW_DRIVER, 0x40, "Failed to set dma mask during resume"); 2243 retval = -ENODEV; 2244 goto out_disable_device; 2245 } 2246 2247 /* Initialize the card */ 2248 if (twa_reset_sequence(tw_dev, 0)) { 2249 retval = -ENODEV; 2250 goto out_disable_device; 2251 } 2252 2253 /* Now setup the interrupt handler */ 2254 retval = request_irq(pdev->irq, twa_interrupt, IRQF_SHARED, "3w-9xxx", tw_dev); 2255 if (retval) { 2256 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x42, "Error requesting IRQ during resume"); 2257 retval = -ENODEV; 2258 goto out_disable_device; 2259 } 2260 2261 /* Now enable MSI if enabled */ 2262 if (test_bit(TW_USING_MSI, &tw_dev->flags)) 2263 pci_enable_msi(pdev); 2264 2265 /* Re-enable interrupts on the card */ 2266 TW_ENABLE_AND_CLEAR_INTERRUPTS(tw_dev); 2267 2268 printk(KERN_WARNING "3w-9xxx: Resume complete.\n"); 2269 return 0; 2270 2271 out_disable_device: 2272 scsi_remove_host(host); 2273 pci_disable_device(pdev); 2274 2275 return retval; 2276 } /* End twa_resume() */ 2277 #endif 2278 2279 /* PCI Devices supported by this driver */ 2280 static struct pci_device_id twa_pci_tbl[] = { 2281 { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9000, 2282 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 2283 { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9550SX, 2284 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 2285 { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9650SE, 2286 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 2287 { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9690SA, 2288 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 2289 { } 2290 }; 2291 MODULE_DEVICE_TABLE(pci, twa_pci_tbl); 2292 2293 /* pci_driver initializer */ 2294 static struct pci_driver twa_driver = { 2295 .name = "3w-9xxx", 2296 .id_table = twa_pci_tbl, 2297 .probe = twa_probe, 2298 .remove = twa_remove, 2299 #ifdef CONFIG_PM 2300 .suspend = twa_suspend, 2301 .resume = twa_resume, 2302 #endif 2303 .shutdown = twa_shutdown 2304 }; 2305 2306 /* This function is called on driver initialization */ 2307 static int __init twa_init(void) 2308 { 2309 printk(KERN_WARNING "3ware 9000 Storage Controller device driver for Linux v%s.\n", TW_DRIVER_VERSION); 2310 2311 return pci_register_driver(&twa_driver); 2312 } /* End twa_init() */ 2313 2314 /* This function is called on driver exit */ 2315 static void __exit twa_exit(void) 2316 { 2317 pci_unregister_driver(&twa_driver); 2318 } /* End twa_exit() */ 2319 2320 module_init(twa_init); 2321 module_exit(twa_exit); 2322 2323