1 /* 2 3w-9xxx.c -- 3ware 9000 Storage Controller device driver for Linux. 3 4 Written By: Adam Radford <aradford@gmail.com> 5 Modifications By: Tom Couch 6 7 Copyright (C) 2004-2009 Applied Micro Circuits Corporation. 8 Copyright (C) 2010 LSI Corporation. 9 10 This program is free software; you can redistribute it and/or modify 11 it under the terms of the GNU General Public License as published by 12 the Free Software Foundation; version 2 of the License. 13 14 This program is distributed in the hope that it will be useful, 15 but WITHOUT ANY WARRANTY; without even the implied warranty of 16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 GNU General Public License for more details. 18 19 NO WARRANTY 20 THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR 21 CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT 22 LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, 23 MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is 24 solely responsible for determining the appropriateness of using and 25 distributing the Program and assumes all risks associated with its 26 exercise of rights under this Agreement, including but not limited to 27 the risks and costs of program errors, damage to or loss of data, 28 programs or equipment, and unavailability or interruption of operations. 29 30 DISCLAIMER OF LIABILITY 31 NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY 32 DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND 34 ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR 35 TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE 36 USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED 37 HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES 38 39 You should have received a copy of the GNU General Public License 40 along with this program; if not, write to the Free Software 41 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 42 43 Bugs/Comments/Suggestions should be mailed to: 44 aradford@gmail.com 45 46 Note: This version of the driver does not contain a bundled firmware 47 image. 48 49 History 50 ------- 51 2.26.02.000 - Driver cleanup for kernel submission. 52 2.26.02.001 - Replace schedule_timeout() calls with msleep(). 53 2.26.02.002 - Add support for PAE mode. 54 Add lun support. 55 Fix twa_remove() to free irq handler/unregister_chrdev() 56 before shutting down card. 57 Change to new 'change_queue_depth' api. 58 Fix 'handled=1' ISR usage, remove bogus IRQ check. 59 Remove un-needed eh_abort handler. 60 Add support for embedded firmware error strings. 61 2.26.02.003 - Correctly handle single sgl's with use_sg=1. 62 2.26.02.004 - Add support for 9550SX controllers. 63 2.26.02.005 - Fix use_sg == 0 mapping on systems with 4GB or higher. 64 2.26.02.006 - Fix 9550SX pchip reset timeout. 65 Add big endian support. 66 2.26.02.007 - Disable local interrupts during kmap/unmap_atomic(). 67 2.26.02.008 - Free irq handler in __twa_shutdown(). 68 Serialize reset code. 69 Add support for 9650SE controllers. 70 2.26.02.009 - Fix dma mask setting to fallback to 32-bit if 64-bit fails. 71 2.26.02.010 - Add support for 9690SA controllers. 72 2.26.02.011 - Increase max AENs drained to 256. 73 Add MSI support and "use_msi" module parameter. 74 Fix bug in twa_get_param() on 4GB+. 75 Use pci_resource_len() for ioremap(). 76 2.26.02.012 - Add power management support. 77 2.26.02.013 - Fix bug in twa_load_sgl(). 78 2.26.02.014 - Force 60 second timeout default. 79 */ 80 81 #include <linux/module.h> 82 #include <linux/reboot.h> 83 #include <linux/spinlock.h> 84 #include <linux/interrupt.h> 85 #include <linux/moduleparam.h> 86 #include <linux/errno.h> 87 #include <linux/types.h> 88 #include <linux/delay.h> 89 #include <linux/pci.h> 90 #include <linux/time.h> 91 #include <linux/mutex.h> 92 #include <linux/slab.h> 93 #include <asm/io.h> 94 #include <asm/irq.h> 95 #include <linux/uaccess.h> 96 #include <scsi/scsi.h> 97 #include <scsi/scsi_host.h> 98 #include <scsi/scsi_tcq.h> 99 #include <scsi/scsi_cmnd.h> 100 #include "3w-9xxx.h" 101 102 /* Globals */ 103 #define TW_DRIVER_VERSION "2.26.02.014" 104 static DEFINE_MUTEX(twa_chrdev_mutex); 105 static TW_Device_Extension *twa_device_extension_list[TW_MAX_SLOT]; 106 static unsigned int twa_device_extension_count; 107 static int twa_major = -1; 108 extern struct timezone sys_tz; 109 110 /* Module parameters */ 111 MODULE_AUTHOR ("LSI"); 112 MODULE_DESCRIPTION ("3ware 9000 Storage Controller Linux Driver"); 113 MODULE_LICENSE("GPL"); 114 MODULE_VERSION(TW_DRIVER_VERSION); 115 116 static int use_msi = 0; 117 module_param(use_msi, int, S_IRUGO); 118 MODULE_PARM_DESC(use_msi, "Use Message Signaled Interrupts. Default: 0"); 119 120 /* Function prototypes */ 121 static void twa_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_Header *header); 122 static int twa_aen_read_queue(TW_Device_Extension *tw_dev, int request_id); 123 static char *twa_aen_severity_lookup(unsigned char severity_code); 124 static void twa_aen_sync_time(TW_Device_Extension *tw_dev, int request_id); 125 static long twa_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg); 126 static int twa_chrdev_open(struct inode *inode, struct file *file); 127 static int twa_fill_sense(TW_Device_Extension *tw_dev, int request_id, int copy_sense, int print_host); 128 static void twa_free_request_id(TW_Device_Extension *tw_dev,int request_id); 129 static void twa_get_request_id(TW_Device_Extension *tw_dev, int *request_id); 130 static int twa_initconnection(TW_Device_Extension *tw_dev, int message_credits, 131 u32 set_features, unsigned short current_fw_srl, 132 unsigned short current_fw_arch_id, 133 unsigned short current_fw_branch, 134 unsigned short current_fw_build, 135 unsigned short *fw_on_ctlr_srl, 136 unsigned short *fw_on_ctlr_arch_id, 137 unsigned short *fw_on_ctlr_branch, 138 unsigned short *fw_on_ctlr_build, 139 u32 *init_connect_result); 140 static void twa_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_command_packet, int request_id, dma_addr_t dma_handle, int length); 141 static int twa_poll_response(TW_Device_Extension *tw_dev, int request_id, int seconds); 142 static int twa_poll_status_gone(TW_Device_Extension *tw_dev, u32 flag, int seconds); 143 static int twa_post_command_packet(TW_Device_Extension *tw_dev, int request_id, char internal); 144 static int twa_reset_device_extension(TW_Device_Extension *tw_dev); 145 static int twa_reset_sequence(TW_Device_Extension *tw_dev, int soft_reset); 146 static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, 147 unsigned char *cdb, int use_sg, 148 TW_SG_Entry *sglistarg); 149 static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int request_id); 150 static char *twa_string_lookup(twa_message_type *table, unsigned int aen_code); 151 152 /* Functions */ 153 154 /* Show some statistics about the card */ 155 static ssize_t twa_show_stats(struct device *dev, 156 struct device_attribute *attr, char *buf) 157 { 158 struct Scsi_Host *host = class_to_shost(dev); 159 TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata; 160 unsigned long flags = 0; 161 ssize_t len; 162 163 spin_lock_irqsave(tw_dev->host->host_lock, flags); 164 len = snprintf(buf, PAGE_SIZE, "3w-9xxx Driver version: %s\n" 165 "Current commands posted: %4d\n" 166 "Max commands posted: %4d\n" 167 "Current pending commands: %4d\n" 168 "Max pending commands: %4d\n" 169 "Last sgl length: %4d\n" 170 "Max sgl length: %4d\n" 171 "Last sector count: %4d\n" 172 "Max sector count: %4d\n" 173 "SCSI Host Resets: %4d\n" 174 "AEN's: %4d\n", 175 TW_DRIVER_VERSION, 176 tw_dev->posted_request_count, 177 tw_dev->max_posted_request_count, 178 tw_dev->pending_request_count, 179 tw_dev->max_pending_request_count, 180 tw_dev->sgl_entries, 181 tw_dev->max_sgl_entries, 182 tw_dev->sector_count, 183 tw_dev->max_sector_count, 184 tw_dev->num_resets, 185 tw_dev->aen_count); 186 spin_unlock_irqrestore(tw_dev->host->host_lock, flags); 187 return len; 188 } /* End twa_show_stats() */ 189 190 /* Create sysfs 'stats' entry */ 191 static struct device_attribute twa_host_stats_attr = { 192 .attr = { 193 .name = "stats", 194 .mode = S_IRUGO, 195 }, 196 .show = twa_show_stats 197 }; 198 199 /* Host attributes initializer */ 200 static struct device_attribute *twa_host_attrs[] = { 201 &twa_host_stats_attr, 202 NULL, 203 }; 204 205 /* File operations struct for character device */ 206 static const struct file_operations twa_fops = { 207 .owner = THIS_MODULE, 208 .unlocked_ioctl = twa_chrdev_ioctl, 209 .open = twa_chrdev_open, 210 .release = NULL, 211 .llseek = noop_llseek, 212 }; 213 214 /* 215 * The controllers use an inline buffer instead of a mapped SGL for small, 216 * single entry buffers. Note that we treat a zero-length transfer like 217 * a mapped SGL. 218 */ 219 static bool twa_command_mapped(struct scsi_cmnd *cmd) 220 { 221 return scsi_sg_count(cmd) != 1 || 222 scsi_bufflen(cmd) >= TW_MIN_SGL_LENGTH; 223 } 224 225 /* This function will complete an aen request from the isr */ 226 static int twa_aen_complete(TW_Device_Extension *tw_dev, int request_id) 227 { 228 TW_Command_Full *full_command_packet; 229 TW_Command *command_packet; 230 TW_Command_Apache_Header *header; 231 unsigned short aen; 232 int retval = 1; 233 234 header = (TW_Command_Apache_Header *)tw_dev->generic_buffer_virt[request_id]; 235 tw_dev->posted_request_count--; 236 aen = le16_to_cpu(header->status_block.error); 237 full_command_packet = tw_dev->command_packet_virt[request_id]; 238 command_packet = &full_command_packet->command.oldcommand; 239 240 /* First check for internal completion of set param for time sync */ 241 if (TW_OP_OUT(command_packet->opcode__sgloffset) == TW_OP_SET_PARAM) { 242 /* Keep reading the queue in case there are more aen's */ 243 if (twa_aen_read_queue(tw_dev, request_id)) 244 goto out2; 245 else { 246 retval = 0; 247 goto out; 248 } 249 } 250 251 switch (aen) { 252 case TW_AEN_QUEUE_EMPTY: 253 /* Quit reading the queue if this is the last one */ 254 break; 255 case TW_AEN_SYNC_TIME_WITH_HOST: 256 twa_aen_sync_time(tw_dev, request_id); 257 retval = 0; 258 goto out; 259 default: 260 twa_aen_queue_event(tw_dev, header); 261 262 /* If there are more aen's, keep reading the queue */ 263 if (twa_aen_read_queue(tw_dev, request_id)) 264 goto out2; 265 else { 266 retval = 0; 267 goto out; 268 } 269 } 270 retval = 0; 271 out2: 272 tw_dev->state[request_id] = TW_S_COMPLETED; 273 twa_free_request_id(tw_dev, request_id); 274 clear_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags); 275 out: 276 return retval; 277 } /* End twa_aen_complete() */ 278 279 /* This function will drain aen queue */ 280 static int twa_aen_drain_queue(TW_Device_Extension *tw_dev, int no_check_reset) 281 { 282 int request_id = 0; 283 unsigned char cdb[TW_MAX_CDB_LEN]; 284 TW_SG_Entry sglist[1]; 285 int finished = 0, count = 0; 286 TW_Command_Full *full_command_packet; 287 TW_Command_Apache_Header *header; 288 unsigned short aen; 289 int first_reset = 0, queue = 0, retval = 1; 290 291 if (no_check_reset) 292 first_reset = 0; 293 else 294 first_reset = 1; 295 296 full_command_packet = tw_dev->command_packet_virt[request_id]; 297 memset(full_command_packet, 0, sizeof(TW_Command_Full)); 298 299 /* Initialize cdb */ 300 memset(&cdb, 0, TW_MAX_CDB_LEN); 301 cdb[0] = REQUEST_SENSE; /* opcode */ 302 cdb[4] = TW_ALLOCATION_LENGTH; /* allocation length */ 303 304 /* Initialize sglist */ 305 memset(&sglist, 0, sizeof(TW_SG_Entry)); 306 sglist[0].length = cpu_to_le32(TW_SECTOR_SIZE); 307 sglist[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]); 308 309 if (tw_dev->generic_buffer_phys[request_id] & TW_ALIGNMENT_9000_SGL) { 310 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1, "Found unaligned address during AEN drain"); 311 goto out; 312 } 313 314 /* Mark internal command */ 315 tw_dev->srb[request_id] = NULL; 316 317 do { 318 /* Send command to the board */ 319 if (twa_scsiop_execute_scsi(tw_dev, request_id, cdb, 1, sglist)) { 320 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2, "Error posting request sense"); 321 goto out; 322 } 323 324 /* Now poll for completion */ 325 if (twa_poll_response(tw_dev, request_id, 30)) { 326 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x3, "No valid response while draining AEN queue"); 327 tw_dev->posted_request_count--; 328 goto out; 329 } 330 331 tw_dev->posted_request_count--; 332 header = (TW_Command_Apache_Header *)tw_dev->generic_buffer_virt[request_id]; 333 aen = le16_to_cpu(header->status_block.error); 334 queue = 0; 335 count++; 336 337 switch (aen) { 338 case TW_AEN_QUEUE_EMPTY: 339 if (first_reset != 1) 340 goto out; 341 else 342 finished = 1; 343 break; 344 case TW_AEN_SOFT_RESET: 345 if (first_reset == 0) 346 first_reset = 1; 347 else 348 queue = 1; 349 break; 350 case TW_AEN_SYNC_TIME_WITH_HOST: 351 break; 352 default: 353 queue = 1; 354 } 355 356 /* Now queue an event info */ 357 if (queue) 358 twa_aen_queue_event(tw_dev, header); 359 } while ((finished == 0) && (count < TW_MAX_AEN_DRAIN)); 360 361 if (count == TW_MAX_AEN_DRAIN) 362 goto out; 363 364 retval = 0; 365 out: 366 tw_dev->state[request_id] = TW_S_INITIAL; 367 return retval; 368 } /* End twa_aen_drain_queue() */ 369 370 /* This function will queue an event */ 371 static void twa_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_Header *header) 372 { 373 u32 local_time; 374 TW_Event *event; 375 unsigned short aen; 376 char host[16]; 377 char *error_str; 378 379 tw_dev->aen_count++; 380 381 /* Fill out event info */ 382 event = tw_dev->event_queue[tw_dev->error_index]; 383 384 /* Check for clobber */ 385 host[0] = '\0'; 386 if (tw_dev->host) { 387 sprintf(host, " scsi%d:", tw_dev->host->host_no); 388 if (event->retrieved == TW_AEN_NOT_RETRIEVED) 389 tw_dev->aen_clobber = 1; 390 } 391 392 aen = le16_to_cpu(header->status_block.error); 393 memset(event, 0, sizeof(TW_Event)); 394 395 event->severity = TW_SEV_OUT(header->status_block.severity__reserved); 396 /* event->time_stamp_sec overflows in y2106 */ 397 local_time = (u32)(ktime_get_real_seconds() - (sys_tz.tz_minuteswest * 60)); 398 event->time_stamp_sec = local_time; 399 event->aen_code = aen; 400 event->retrieved = TW_AEN_NOT_RETRIEVED; 401 event->sequence_id = tw_dev->error_sequence_id; 402 tw_dev->error_sequence_id++; 403 404 /* Check for embedded error string */ 405 error_str = &(header->err_specific_desc[strlen(header->err_specific_desc)+1]); 406 407 header->err_specific_desc[sizeof(header->err_specific_desc) - 1] = '\0'; 408 event->parameter_len = strlen(header->err_specific_desc); 409 memcpy(event->parameter_data, header->err_specific_desc, event->parameter_len + (error_str[0] == '\0' ? 0 : (1 + strlen(error_str)))); 410 if (event->severity != TW_AEN_SEVERITY_DEBUG) 411 printk(KERN_WARNING "3w-9xxx:%s AEN: %s (0x%02X:0x%04X): %s:%s.\n", 412 host, 413 twa_aen_severity_lookup(TW_SEV_OUT(header->status_block.severity__reserved)), 414 TW_MESSAGE_SOURCE_CONTROLLER_EVENT, aen, 415 error_str[0] == '\0' ? twa_string_lookup(twa_aen_table, aen) : error_str, 416 header->err_specific_desc); 417 else 418 tw_dev->aen_count--; 419 420 if ((tw_dev->error_index + 1) == TW_Q_LENGTH) 421 tw_dev->event_queue_wrapped = 1; 422 tw_dev->error_index = (tw_dev->error_index + 1 ) % TW_Q_LENGTH; 423 } /* End twa_aen_queue_event() */ 424 425 /* This function will read the aen queue from the isr */ 426 static int twa_aen_read_queue(TW_Device_Extension *tw_dev, int request_id) 427 { 428 unsigned char cdb[TW_MAX_CDB_LEN]; 429 TW_SG_Entry sglist[1]; 430 TW_Command_Full *full_command_packet; 431 int retval = 1; 432 433 full_command_packet = tw_dev->command_packet_virt[request_id]; 434 memset(full_command_packet, 0, sizeof(TW_Command_Full)); 435 436 /* Initialize cdb */ 437 memset(&cdb, 0, TW_MAX_CDB_LEN); 438 cdb[0] = REQUEST_SENSE; /* opcode */ 439 cdb[4] = TW_ALLOCATION_LENGTH; /* allocation length */ 440 441 /* Initialize sglist */ 442 memset(&sglist, 0, sizeof(TW_SG_Entry)); 443 sglist[0].length = cpu_to_le32(TW_SECTOR_SIZE); 444 sglist[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]); 445 446 /* Mark internal command */ 447 tw_dev->srb[request_id] = NULL; 448 449 /* Now post the command packet */ 450 if (twa_scsiop_execute_scsi(tw_dev, request_id, cdb, 1, sglist)) { 451 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x4, "Post failed while reading AEN queue"); 452 goto out; 453 } 454 retval = 0; 455 out: 456 return retval; 457 } /* End twa_aen_read_queue() */ 458 459 /* This function will look up an AEN severity string */ 460 static char *twa_aen_severity_lookup(unsigned char severity_code) 461 { 462 char *retval = NULL; 463 464 if ((severity_code < (unsigned char) TW_AEN_SEVERITY_ERROR) || 465 (severity_code > (unsigned char) TW_AEN_SEVERITY_DEBUG)) 466 goto out; 467 468 retval = twa_aen_severity_table[severity_code]; 469 out: 470 return retval; 471 } /* End twa_aen_severity_lookup() */ 472 473 /* This function will sync firmware time with the host time */ 474 static void twa_aen_sync_time(TW_Device_Extension *tw_dev, int request_id) 475 { 476 u32 schedulertime; 477 TW_Command_Full *full_command_packet; 478 TW_Command *command_packet; 479 TW_Param_Apache *param; 480 time64_t local_time; 481 482 /* Fill out the command packet */ 483 full_command_packet = tw_dev->command_packet_virt[request_id]; 484 memset(full_command_packet, 0, sizeof(TW_Command_Full)); 485 command_packet = &full_command_packet->command.oldcommand; 486 command_packet->opcode__sgloffset = TW_OPSGL_IN(2, TW_OP_SET_PARAM); 487 command_packet->request_id = request_id; 488 command_packet->byte8_offset.param.sgl[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]); 489 command_packet->byte8_offset.param.sgl[0].length = cpu_to_le32(TW_SECTOR_SIZE); 490 command_packet->size = TW_COMMAND_SIZE; 491 command_packet->byte6_offset.parameter_count = cpu_to_le16(1); 492 493 /* Setup the param */ 494 param = (TW_Param_Apache *)tw_dev->generic_buffer_virt[request_id]; 495 memset(param, 0, TW_SECTOR_SIZE); 496 param->table_id = cpu_to_le16(TW_TIMEKEEP_TABLE | 0x8000); /* Controller time keep table */ 497 param->parameter_id = cpu_to_le16(0x3); /* SchedulerTime */ 498 param->parameter_size_bytes = cpu_to_le16(4); 499 500 /* Convert system time in UTC to local time seconds since last 501 Sunday 12:00AM */ 502 local_time = (ktime_get_real_seconds() - (sys_tz.tz_minuteswest * 60)); 503 div_u64_rem(local_time - (3 * 86400), 604800, &schedulertime); 504 505 memcpy(param->data, &(__le32){cpu_to_le32(schedulertime)}, sizeof(__le32)); 506 507 /* Mark internal command */ 508 tw_dev->srb[request_id] = NULL; 509 510 /* Now post the command */ 511 twa_post_command_packet(tw_dev, request_id, 1); 512 } /* End twa_aen_sync_time() */ 513 514 /* This function will allocate memory and check if it is correctly aligned */ 515 static int twa_allocate_memory(TW_Device_Extension *tw_dev, int size, int which) 516 { 517 int i; 518 dma_addr_t dma_handle; 519 unsigned long *cpu_addr; 520 int retval = 1; 521 522 cpu_addr = dma_alloc_coherent(&tw_dev->tw_pci_dev->dev, 523 size * TW_Q_LENGTH, &dma_handle, GFP_KERNEL); 524 if (!cpu_addr) { 525 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x5, "Memory allocation failed"); 526 goto out; 527 } 528 529 if ((unsigned long)cpu_addr % (TW_ALIGNMENT_9000)) { 530 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x6, "Failed to allocate correctly aligned memory"); 531 dma_free_coherent(&tw_dev->tw_pci_dev->dev, size * TW_Q_LENGTH, 532 cpu_addr, dma_handle); 533 goto out; 534 } 535 536 memset(cpu_addr, 0, size*TW_Q_LENGTH); 537 538 for (i = 0; i < TW_Q_LENGTH; i++) { 539 switch(which) { 540 case 0: 541 tw_dev->command_packet_phys[i] = dma_handle+(i*size); 542 tw_dev->command_packet_virt[i] = (TW_Command_Full *)((unsigned char *)cpu_addr + (i*size)); 543 break; 544 case 1: 545 tw_dev->generic_buffer_phys[i] = dma_handle+(i*size); 546 tw_dev->generic_buffer_virt[i] = (unsigned long *)((unsigned char *)cpu_addr + (i*size)); 547 break; 548 } 549 } 550 retval = 0; 551 out: 552 return retval; 553 } /* End twa_allocate_memory() */ 554 555 /* This function will check the status register for unexpected bits */ 556 static int twa_check_bits(u32 status_reg_value) 557 { 558 int retval = 1; 559 560 if ((status_reg_value & TW_STATUS_EXPECTED_BITS) != TW_STATUS_EXPECTED_BITS) 561 goto out; 562 if ((status_reg_value & TW_STATUS_UNEXPECTED_BITS) != 0) 563 goto out; 564 565 retval = 0; 566 out: 567 return retval; 568 } /* End twa_check_bits() */ 569 570 /* This function will check the srl and decide if we are compatible */ 571 static int twa_check_srl(TW_Device_Extension *tw_dev, int *flashed) 572 { 573 int retval = 1; 574 unsigned short fw_on_ctlr_srl = 0, fw_on_ctlr_arch_id = 0; 575 unsigned short fw_on_ctlr_branch = 0, fw_on_ctlr_build = 0; 576 u32 init_connect_result = 0; 577 578 if (twa_initconnection(tw_dev, TW_INIT_MESSAGE_CREDITS, 579 TW_EXTENDED_INIT_CONNECT, TW_CURRENT_DRIVER_SRL, 580 TW_9000_ARCH_ID, TW_CURRENT_DRIVER_BRANCH, 581 TW_CURRENT_DRIVER_BUILD, &fw_on_ctlr_srl, 582 &fw_on_ctlr_arch_id, &fw_on_ctlr_branch, 583 &fw_on_ctlr_build, &init_connect_result)) { 584 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x7, "Initconnection failed while checking SRL"); 585 goto out; 586 } 587 588 tw_dev->tw_compat_info.working_srl = fw_on_ctlr_srl; 589 tw_dev->tw_compat_info.working_branch = fw_on_ctlr_branch; 590 tw_dev->tw_compat_info.working_build = fw_on_ctlr_build; 591 592 /* Try base mode compatibility */ 593 if (!(init_connect_result & TW_CTLR_FW_COMPATIBLE)) { 594 if (twa_initconnection(tw_dev, TW_INIT_MESSAGE_CREDITS, 595 TW_EXTENDED_INIT_CONNECT, 596 TW_BASE_FW_SRL, TW_9000_ARCH_ID, 597 TW_BASE_FW_BRANCH, TW_BASE_FW_BUILD, 598 &fw_on_ctlr_srl, &fw_on_ctlr_arch_id, 599 &fw_on_ctlr_branch, &fw_on_ctlr_build, 600 &init_connect_result)) { 601 TW_PRINTK(tw_dev->host, TW_DRIVER, 0xa, "Initconnection (base mode) failed while checking SRL"); 602 goto out; 603 } 604 if (!(init_connect_result & TW_CTLR_FW_COMPATIBLE)) { 605 if (TW_CURRENT_DRIVER_SRL > fw_on_ctlr_srl) { 606 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x32, "Firmware and driver incompatibility: please upgrade firmware"); 607 } else { 608 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x33, "Firmware and driver incompatibility: please upgrade driver"); 609 } 610 goto out; 611 } 612 tw_dev->tw_compat_info.working_srl = TW_BASE_FW_SRL; 613 tw_dev->tw_compat_info.working_branch = TW_BASE_FW_BRANCH; 614 tw_dev->tw_compat_info.working_build = TW_BASE_FW_BUILD; 615 } 616 617 /* Load rest of compatibility struct */ 618 strlcpy(tw_dev->tw_compat_info.driver_version, TW_DRIVER_VERSION, 619 sizeof(tw_dev->tw_compat_info.driver_version)); 620 tw_dev->tw_compat_info.driver_srl_high = TW_CURRENT_DRIVER_SRL; 621 tw_dev->tw_compat_info.driver_branch_high = TW_CURRENT_DRIVER_BRANCH; 622 tw_dev->tw_compat_info.driver_build_high = TW_CURRENT_DRIVER_BUILD; 623 tw_dev->tw_compat_info.driver_srl_low = TW_BASE_FW_SRL; 624 tw_dev->tw_compat_info.driver_branch_low = TW_BASE_FW_BRANCH; 625 tw_dev->tw_compat_info.driver_build_low = TW_BASE_FW_BUILD; 626 tw_dev->tw_compat_info.fw_on_ctlr_srl = fw_on_ctlr_srl; 627 tw_dev->tw_compat_info.fw_on_ctlr_branch = fw_on_ctlr_branch; 628 tw_dev->tw_compat_info.fw_on_ctlr_build = fw_on_ctlr_build; 629 630 retval = 0; 631 out: 632 return retval; 633 } /* End twa_check_srl() */ 634 635 /* This function handles ioctl for the character device */ 636 static long twa_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 637 { 638 struct inode *inode = file_inode(file); 639 long timeout; 640 unsigned long *cpu_addr, data_buffer_length_adjusted = 0, flags = 0; 641 dma_addr_t dma_handle; 642 int request_id = 0; 643 unsigned int sequence_id = 0; 644 unsigned char event_index, start_index; 645 TW_Ioctl_Driver_Command driver_command; 646 TW_Ioctl_Buf_Apache *tw_ioctl; 647 TW_Lock *tw_lock; 648 TW_Command_Full *full_command_packet; 649 TW_Compatibility_Info *tw_compat_info; 650 TW_Event *event; 651 ktime_t current_time; 652 TW_Device_Extension *tw_dev = twa_device_extension_list[iminor(inode)]; 653 int retval = TW_IOCTL_ERROR_OS_EFAULT; 654 void __user *argp = (void __user *)arg; 655 656 mutex_lock(&twa_chrdev_mutex); 657 658 /* Only let one of these through at a time */ 659 if (mutex_lock_interruptible(&tw_dev->ioctl_lock)) { 660 retval = TW_IOCTL_ERROR_OS_EINTR; 661 goto out; 662 } 663 664 /* First copy down the driver command */ 665 if (copy_from_user(&driver_command, argp, sizeof(TW_Ioctl_Driver_Command))) 666 goto out2; 667 668 /* Check data buffer size */ 669 if (driver_command.buffer_length > TW_MAX_SECTORS * 2048) { 670 retval = TW_IOCTL_ERROR_OS_EINVAL; 671 goto out2; 672 } 673 674 /* Hardware can only do multiple of 512 byte transfers */ 675 data_buffer_length_adjusted = (driver_command.buffer_length + 511) & ~511; 676 677 /* Now allocate ioctl buf memory */ 678 cpu_addr = dma_alloc_coherent(&tw_dev->tw_pci_dev->dev, 679 sizeof(TW_Ioctl_Buf_Apache) + data_buffer_length_adjusted, 680 &dma_handle, GFP_KERNEL); 681 if (!cpu_addr) { 682 retval = TW_IOCTL_ERROR_OS_ENOMEM; 683 goto out2; 684 } 685 686 tw_ioctl = (TW_Ioctl_Buf_Apache *)cpu_addr; 687 688 /* Now copy down the entire ioctl */ 689 if (copy_from_user(tw_ioctl, argp, sizeof(TW_Ioctl_Buf_Apache) + driver_command.buffer_length)) 690 goto out3; 691 692 /* See which ioctl we are doing */ 693 switch (cmd) { 694 case TW_IOCTL_FIRMWARE_PASS_THROUGH: 695 spin_lock_irqsave(tw_dev->host->host_lock, flags); 696 twa_get_request_id(tw_dev, &request_id); 697 698 /* Flag internal command */ 699 tw_dev->srb[request_id] = NULL; 700 701 /* Flag chrdev ioctl */ 702 tw_dev->chrdev_request_id = request_id; 703 704 full_command_packet = &tw_ioctl->firmware_command; 705 706 /* Load request id and sglist for both command types */ 707 twa_load_sgl(tw_dev, full_command_packet, request_id, dma_handle, data_buffer_length_adjusted); 708 709 memcpy(tw_dev->command_packet_virt[request_id], &(tw_ioctl->firmware_command), sizeof(TW_Command_Full)); 710 711 /* Now post the command packet to the controller */ 712 twa_post_command_packet(tw_dev, request_id, 1); 713 spin_unlock_irqrestore(tw_dev->host->host_lock, flags); 714 715 timeout = TW_IOCTL_CHRDEV_TIMEOUT*HZ; 716 717 /* Now wait for command to complete */ 718 timeout = wait_event_timeout(tw_dev->ioctl_wqueue, tw_dev->chrdev_request_id == TW_IOCTL_CHRDEV_FREE, timeout); 719 720 /* We timed out, and didn't get an interrupt */ 721 if (tw_dev->chrdev_request_id != TW_IOCTL_CHRDEV_FREE) { 722 /* Now we need to reset the board */ 723 printk(KERN_WARNING "3w-9xxx: scsi%d: WARNING: (0x%02X:0x%04X): Character ioctl (0x%x) timed out, resetting card.\n", 724 tw_dev->host->host_no, TW_DRIVER, 0x37, 725 cmd); 726 retval = TW_IOCTL_ERROR_OS_EIO; 727 twa_reset_device_extension(tw_dev); 728 goto out3; 729 } 730 731 /* Now copy in the command packet response */ 732 memcpy(&(tw_ioctl->firmware_command), tw_dev->command_packet_virt[request_id], sizeof(TW_Command_Full)); 733 734 /* Now complete the io */ 735 spin_lock_irqsave(tw_dev->host->host_lock, flags); 736 tw_dev->posted_request_count--; 737 tw_dev->state[request_id] = TW_S_COMPLETED; 738 twa_free_request_id(tw_dev, request_id); 739 spin_unlock_irqrestore(tw_dev->host->host_lock, flags); 740 break; 741 case TW_IOCTL_GET_COMPATIBILITY_INFO: 742 tw_ioctl->driver_command.status = 0; 743 /* Copy compatibility struct into ioctl data buffer */ 744 tw_compat_info = (TW_Compatibility_Info *)tw_ioctl->data_buffer; 745 memcpy(tw_compat_info, &tw_dev->tw_compat_info, sizeof(TW_Compatibility_Info)); 746 break; 747 case TW_IOCTL_GET_LAST_EVENT: 748 if (tw_dev->event_queue_wrapped) { 749 if (tw_dev->aen_clobber) { 750 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER; 751 tw_dev->aen_clobber = 0; 752 } else 753 tw_ioctl->driver_command.status = 0; 754 } else { 755 if (!tw_dev->error_index) { 756 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS; 757 break; 758 } 759 tw_ioctl->driver_command.status = 0; 760 } 761 event_index = (tw_dev->error_index - 1 + TW_Q_LENGTH) % TW_Q_LENGTH; 762 memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event)); 763 tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED; 764 break; 765 case TW_IOCTL_GET_FIRST_EVENT: 766 if (tw_dev->event_queue_wrapped) { 767 if (tw_dev->aen_clobber) { 768 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER; 769 tw_dev->aen_clobber = 0; 770 } else 771 tw_ioctl->driver_command.status = 0; 772 event_index = tw_dev->error_index; 773 } else { 774 if (!tw_dev->error_index) { 775 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS; 776 break; 777 } 778 tw_ioctl->driver_command.status = 0; 779 event_index = 0; 780 } 781 memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event)); 782 tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED; 783 break; 784 case TW_IOCTL_GET_NEXT_EVENT: 785 event = (TW_Event *)tw_ioctl->data_buffer; 786 sequence_id = event->sequence_id; 787 tw_ioctl->driver_command.status = 0; 788 789 if (tw_dev->event_queue_wrapped) { 790 if (tw_dev->aen_clobber) { 791 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER; 792 tw_dev->aen_clobber = 0; 793 } 794 start_index = tw_dev->error_index; 795 } else { 796 if (!tw_dev->error_index) { 797 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS; 798 break; 799 } 800 start_index = 0; 801 } 802 event_index = (start_index + sequence_id - tw_dev->event_queue[start_index]->sequence_id + 1) % TW_Q_LENGTH; 803 804 if (!(tw_dev->event_queue[event_index]->sequence_id > sequence_id)) { 805 if (tw_ioctl->driver_command.status == TW_IOCTL_ERROR_STATUS_AEN_CLOBBER) 806 tw_dev->aen_clobber = 1; 807 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS; 808 break; 809 } 810 memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event)); 811 tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED; 812 break; 813 case TW_IOCTL_GET_PREVIOUS_EVENT: 814 event = (TW_Event *)tw_ioctl->data_buffer; 815 sequence_id = event->sequence_id; 816 tw_ioctl->driver_command.status = 0; 817 818 if (tw_dev->event_queue_wrapped) { 819 if (tw_dev->aen_clobber) { 820 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER; 821 tw_dev->aen_clobber = 0; 822 } 823 start_index = tw_dev->error_index; 824 } else { 825 if (!tw_dev->error_index) { 826 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS; 827 break; 828 } 829 start_index = 0; 830 } 831 event_index = (start_index + sequence_id - tw_dev->event_queue[start_index]->sequence_id - 1) % TW_Q_LENGTH; 832 833 if (!(tw_dev->event_queue[event_index]->sequence_id < sequence_id)) { 834 if (tw_ioctl->driver_command.status == TW_IOCTL_ERROR_STATUS_AEN_CLOBBER) 835 tw_dev->aen_clobber = 1; 836 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS; 837 break; 838 } 839 memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event)); 840 tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED; 841 break; 842 case TW_IOCTL_GET_LOCK: 843 tw_lock = (TW_Lock *)tw_ioctl->data_buffer; 844 current_time = ktime_get(); 845 846 if ((tw_lock->force_flag == 1) || (tw_dev->ioctl_sem_lock == 0) || 847 ktime_after(current_time, tw_dev->ioctl_time)) { 848 tw_dev->ioctl_sem_lock = 1; 849 tw_dev->ioctl_time = ktime_add_ms(current_time, tw_lock->timeout_msec); 850 tw_ioctl->driver_command.status = 0; 851 tw_lock->time_remaining_msec = tw_lock->timeout_msec; 852 } else { 853 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_LOCKED; 854 tw_lock->time_remaining_msec = ktime_ms_delta(tw_dev->ioctl_time, current_time); 855 } 856 break; 857 case TW_IOCTL_RELEASE_LOCK: 858 if (tw_dev->ioctl_sem_lock == 1) { 859 tw_dev->ioctl_sem_lock = 0; 860 tw_ioctl->driver_command.status = 0; 861 } else { 862 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NOT_LOCKED; 863 } 864 break; 865 default: 866 retval = TW_IOCTL_ERROR_OS_ENOTTY; 867 goto out3; 868 } 869 870 /* Now copy the entire response to userspace */ 871 if (copy_to_user(argp, tw_ioctl, sizeof(TW_Ioctl_Buf_Apache) + driver_command.buffer_length) == 0) 872 retval = 0; 873 out3: 874 /* Now free ioctl buf memory */ 875 dma_free_coherent(&tw_dev->tw_pci_dev->dev, 876 sizeof(TW_Ioctl_Buf_Apache) + data_buffer_length_adjusted, 877 cpu_addr, dma_handle); 878 out2: 879 mutex_unlock(&tw_dev->ioctl_lock); 880 out: 881 mutex_unlock(&twa_chrdev_mutex); 882 return retval; 883 } /* End twa_chrdev_ioctl() */ 884 885 /* This function handles open for the character device */ 886 /* NOTE that this function will race with remove. */ 887 static int twa_chrdev_open(struct inode *inode, struct file *file) 888 { 889 unsigned int minor_number; 890 int retval = TW_IOCTL_ERROR_OS_ENODEV; 891 892 if (!capable(CAP_SYS_ADMIN)) { 893 retval = -EACCES; 894 goto out; 895 } 896 897 minor_number = iminor(inode); 898 if (minor_number >= twa_device_extension_count) 899 goto out; 900 retval = 0; 901 out: 902 return retval; 903 } /* End twa_chrdev_open() */ 904 905 /* This function will print readable messages from status register errors */ 906 static int twa_decode_bits(TW_Device_Extension *tw_dev, u32 status_reg_value) 907 { 908 int retval = 1; 909 910 /* Check for various error conditions and handle them appropriately */ 911 if (status_reg_value & TW_STATUS_PCI_PARITY_ERROR) { 912 TW_PRINTK(tw_dev->host, TW_DRIVER, 0xc, "PCI Parity Error: clearing"); 913 writel(TW_CONTROL_CLEAR_PARITY_ERROR, TW_CONTROL_REG_ADDR(tw_dev)); 914 } 915 916 if (status_reg_value & TW_STATUS_PCI_ABORT) { 917 TW_PRINTK(tw_dev->host, TW_DRIVER, 0xd, "PCI Abort: clearing"); 918 writel(TW_CONTROL_CLEAR_PCI_ABORT, TW_CONTROL_REG_ADDR(tw_dev)); 919 pci_write_config_word(tw_dev->tw_pci_dev, PCI_STATUS, TW_PCI_CLEAR_PCI_ABORT); 920 } 921 922 if (status_reg_value & TW_STATUS_QUEUE_ERROR) { 923 if (((tw_dev->tw_pci_dev->device != PCI_DEVICE_ID_3WARE_9650SE) && 924 (tw_dev->tw_pci_dev->device != PCI_DEVICE_ID_3WARE_9690SA)) || 925 (!test_bit(TW_IN_RESET, &tw_dev->flags))) 926 TW_PRINTK(tw_dev->host, TW_DRIVER, 0xe, "Controller Queue Error: clearing"); 927 writel(TW_CONTROL_CLEAR_QUEUE_ERROR, TW_CONTROL_REG_ADDR(tw_dev)); 928 } 929 930 if (status_reg_value & TW_STATUS_MICROCONTROLLER_ERROR) { 931 if (tw_dev->reset_print == 0) { 932 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x10, "Microcontroller Error: clearing"); 933 tw_dev->reset_print = 1; 934 } 935 goto out; 936 } 937 retval = 0; 938 out: 939 return retval; 940 } /* End twa_decode_bits() */ 941 942 /* This function will empty the response queue */ 943 static int twa_empty_response_queue(TW_Device_Extension *tw_dev) 944 { 945 u32 status_reg_value; 946 int count = 0, retval = 1; 947 948 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev)); 949 950 while (((status_reg_value & TW_STATUS_RESPONSE_QUEUE_EMPTY) == 0) && (count < TW_MAX_RESPONSE_DRAIN)) { 951 readl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev)); 952 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev)); 953 count++; 954 } 955 if (count == TW_MAX_RESPONSE_DRAIN) 956 goto out; 957 958 retval = 0; 959 out: 960 return retval; 961 } /* End twa_empty_response_queue() */ 962 963 /* This function will clear the pchip/response queue on 9550SX */ 964 static int twa_empty_response_queue_large(TW_Device_Extension *tw_dev) 965 { 966 u32 response_que_value = 0; 967 unsigned long before; 968 int retval = 1; 969 970 if (tw_dev->tw_pci_dev->device != PCI_DEVICE_ID_3WARE_9000) { 971 before = jiffies; 972 while ((response_que_value & TW_9550SX_DRAIN_COMPLETED) != TW_9550SX_DRAIN_COMPLETED) { 973 response_que_value = readl(TW_RESPONSE_QUEUE_REG_ADDR_LARGE(tw_dev)); 974 msleep(1); 975 if (time_after(jiffies, before + HZ * 30)) 976 goto out; 977 } 978 /* P-chip settle time */ 979 msleep(500); 980 retval = 0; 981 } else 982 retval = 0; 983 out: 984 return retval; 985 } /* End twa_empty_response_queue_large() */ 986 987 /* This function passes sense keys from firmware to scsi layer */ 988 static int twa_fill_sense(TW_Device_Extension *tw_dev, int request_id, int copy_sense, int print_host) 989 { 990 TW_Command_Full *full_command_packet; 991 unsigned short error; 992 int retval = 1; 993 char *error_str; 994 995 full_command_packet = tw_dev->command_packet_virt[request_id]; 996 997 /* Check for embedded error string */ 998 error_str = &(full_command_packet->header.err_specific_desc[strlen(full_command_packet->header.err_specific_desc) + 1]); 999 1000 /* Don't print error for Logical unit not supported during rollcall */ 1001 error = le16_to_cpu(full_command_packet->header.status_block.error); 1002 if ((error != TW_ERROR_LOGICAL_UNIT_NOT_SUPPORTED) && (error != TW_ERROR_UNIT_OFFLINE)) { 1003 if (print_host) 1004 printk(KERN_WARNING "3w-9xxx: scsi%d: ERROR: (0x%02X:0x%04X): %s:%s.\n", 1005 tw_dev->host->host_no, 1006 TW_MESSAGE_SOURCE_CONTROLLER_ERROR, error, 1007 error_str[0] ? error_str : twa_string_lookup(twa_error_table, error), 1008 full_command_packet->header.err_specific_desc); 1009 else 1010 printk(KERN_WARNING "3w-9xxx: ERROR: (0x%02X:0x%04X): %s:%s.\n", 1011 TW_MESSAGE_SOURCE_CONTROLLER_ERROR, error, 1012 error_str[0] ? error_str : twa_string_lookup(twa_error_table, error), 1013 full_command_packet->header.err_specific_desc); 1014 } 1015 1016 if (copy_sense) { 1017 memcpy(tw_dev->srb[request_id]->sense_buffer, full_command_packet->header.sense_data, TW_SENSE_DATA_LENGTH); 1018 tw_dev->srb[request_id]->result = (full_command_packet->command.newcommand.status << 1); 1019 retval = TW_ISR_DONT_RESULT; 1020 goto out; 1021 } 1022 retval = 0; 1023 out: 1024 return retval; 1025 } /* End twa_fill_sense() */ 1026 1027 /* This function will free up device extension resources */ 1028 static void twa_free_device_extension(TW_Device_Extension *tw_dev) 1029 { 1030 if (tw_dev->command_packet_virt[0]) 1031 dma_free_coherent(&tw_dev->tw_pci_dev->dev, 1032 sizeof(TW_Command_Full) * TW_Q_LENGTH, 1033 tw_dev->command_packet_virt[0], 1034 tw_dev->command_packet_phys[0]); 1035 1036 if (tw_dev->generic_buffer_virt[0]) 1037 dma_free_coherent(&tw_dev->tw_pci_dev->dev, 1038 TW_SECTOR_SIZE * TW_Q_LENGTH, 1039 tw_dev->generic_buffer_virt[0], 1040 tw_dev->generic_buffer_phys[0]); 1041 1042 kfree(tw_dev->event_queue[0]); 1043 } /* End twa_free_device_extension() */ 1044 1045 /* This function will free a request id */ 1046 static void twa_free_request_id(TW_Device_Extension *tw_dev, int request_id) 1047 { 1048 tw_dev->free_queue[tw_dev->free_tail] = request_id; 1049 tw_dev->state[request_id] = TW_S_FINISHED; 1050 tw_dev->free_tail = (tw_dev->free_tail + 1) % TW_Q_LENGTH; 1051 } /* End twa_free_request_id() */ 1052 1053 /* This function will get parameter table entries from the firmware */ 1054 static void *twa_get_param(TW_Device_Extension *tw_dev, int request_id, int table_id, int parameter_id, int parameter_size_bytes) 1055 { 1056 TW_Command_Full *full_command_packet; 1057 TW_Command *command_packet; 1058 TW_Param_Apache *param; 1059 void *retval = NULL; 1060 1061 /* Setup the command packet */ 1062 full_command_packet = tw_dev->command_packet_virt[request_id]; 1063 memset(full_command_packet, 0, sizeof(TW_Command_Full)); 1064 command_packet = &full_command_packet->command.oldcommand; 1065 1066 command_packet->opcode__sgloffset = TW_OPSGL_IN(2, TW_OP_GET_PARAM); 1067 command_packet->size = TW_COMMAND_SIZE; 1068 command_packet->request_id = request_id; 1069 command_packet->byte6_offset.block_count = cpu_to_le16(1); 1070 1071 /* Now setup the param */ 1072 param = (TW_Param_Apache *)tw_dev->generic_buffer_virt[request_id]; 1073 memset(param, 0, TW_SECTOR_SIZE); 1074 param->table_id = cpu_to_le16(table_id | 0x8000); 1075 param->parameter_id = cpu_to_le16(parameter_id); 1076 param->parameter_size_bytes = cpu_to_le16(parameter_size_bytes); 1077 1078 command_packet->byte8_offset.param.sgl[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]); 1079 command_packet->byte8_offset.param.sgl[0].length = cpu_to_le32(TW_SECTOR_SIZE); 1080 1081 /* Post the command packet to the board */ 1082 twa_post_command_packet(tw_dev, request_id, 1); 1083 1084 /* Poll for completion */ 1085 if (twa_poll_response(tw_dev, request_id, 30)) 1086 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x13, "No valid response during get param") 1087 else 1088 retval = (void *)&(param->data[0]); 1089 1090 tw_dev->posted_request_count--; 1091 tw_dev->state[request_id] = TW_S_INITIAL; 1092 1093 return retval; 1094 } /* End twa_get_param() */ 1095 1096 /* This function will assign an available request id */ 1097 static void twa_get_request_id(TW_Device_Extension *tw_dev, int *request_id) 1098 { 1099 *request_id = tw_dev->free_queue[tw_dev->free_head]; 1100 tw_dev->free_head = (tw_dev->free_head + 1) % TW_Q_LENGTH; 1101 tw_dev->state[*request_id] = TW_S_STARTED; 1102 } /* End twa_get_request_id() */ 1103 1104 /* This function will send an initconnection command to controller */ 1105 static int twa_initconnection(TW_Device_Extension *tw_dev, int message_credits, 1106 u32 set_features, unsigned short current_fw_srl, 1107 unsigned short current_fw_arch_id, 1108 unsigned short current_fw_branch, 1109 unsigned short current_fw_build, 1110 unsigned short *fw_on_ctlr_srl, 1111 unsigned short *fw_on_ctlr_arch_id, 1112 unsigned short *fw_on_ctlr_branch, 1113 unsigned short *fw_on_ctlr_build, 1114 u32 *init_connect_result) 1115 { 1116 TW_Command_Full *full_command_packet; 1117 TW_Initconnect *tw_initconnect; 1118 int request_id = 0, retval = 1; 1119 1120 /* Initialize InitConnection command packet */ 1121 full_command_packet = tw_dev->command_packet_virt[request_id]; 1122 memset(full_command_packet, 0, sizeof(TW_Command_Full)); 1123 full_command_packet->header.header_desc.size_header = 128; 1124 1125 tw_initconnect = (TW_Initconnect *)&full_command_packet->command.oldcommand; 1126 tw_initconnect->opcode__reserved = TW_OPRES_IN(0, TW_OP_INIT_CONNECTION); 1127 tw_initconnect->request_id = request_id; 1128 tw_initconnect->message_credits = cpu_to_le16(message_credits); 1129 1130 /* Turn on 64-bit sgl support if we need to */ 1131 set_features |= sizeof(dma_addr_t) > 4 ? 1 : 0; 1132 1133 tw_initconnect->features = cpu_to_le32(set_features); 1134 1135 if (set_features & TW_EXTENDED_INIT_CONNECT) { 1136 tw_initconnect->size = TW_INIT_COMMAND_PACKET_SIZE_EXTENDED; 1137 tw_initconnect->fw_srl = cpu_to_le16(current_fw_srl); 1138 tw_initconnect->fw_arch_id = cpu_to_le16(current_fw_arch_id); 1139 tw_initconnect->fw_branch = cpu_to_le16(current_fw_branch); 1140 tw_initconnect->fw_build = cpu_to_le16(current_fw_build); 1141 } else 1142 tw_initconnect->size = TW_INIT_COMMAND_PACKET_SIZE; 1143 1144 /* Send command packet to the board */ 1145 twa_post_command_packet(tw_dev, request_id, 1); 1146 1147 /* Poll for completion */ 1148 if (twa_poll_response(tw_dev, request_id, 30)) { 1149 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x15, "No valid response during init connection"); 1150 } else { 1151 if (set_features & TW_EXTENDED_INIT_CONNECT) { 1152 *fw_on_ctlr_srl = le16_to_cpu(tw_initconnect->fw_srl); 1153 *fw_on_ctlr_arch_id = le16_to_cpu(tw_initconnect->fw_arch_id); 1154 *fw_on_ctlr_branch = le16_to_cpu(tw_initconnect->fw_branch); 1155 *fw_on_ctlr_build = le16_to_cpu(tw_initconnect->fw_build); 1156 *init_connect_result = le32_to_cpu(tw_initconnect->result); 1157 } 1158 retval = 0; 1159 } 1160 1161 tw_dev->posted_request_count--; 1162 tw_dev->state[request_id] = TW_S_INITIAL; 1163 1164 return retval; 1165 } /* End twa_initconnection() */ 1166 1167 /* This function will initialize the fields of a device extension */ 1168 static int twa_initialize_device_extension(TW_Device_Extension *tw_dev) 1169 { 1170 int i, retval = 1; 1171 1172 /* Initialize command packet buffers */ 1173 if (twa_allocate_memory(tw_dev, sizeof(TW_Command_Full), 0)) { 1174 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x16, "Command packet memory allocation failed"); 1175 goto out; 1176 } 1177 1178 /* Initialize generic buffer */ 1179 if (twa_allocate_memory(tw_dev, TW_SECTOR_SIZE, 1)) { 1180 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x17, "Generic memory allocation failed"); 1181 goto out; 1182 } 1183 1184 /* Allocate event info space */ 1185 tw_dev->event_queue[0] = kcalloc(TW_Q_LENGTH, sizeof(TW_Event), GFP_KERNEL); 1186 if (!tw_dev->event_queue[0]) { 1187 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x18, "Event info memory allocation failed"); 1188 goto out; 1189 } 1190 1191 1192 for (i = 0; i < TW_Q_LENGTH; i++) { 1193 tw_dev->event_queue[i] = (TW_Event *)((unsigned char *)tw_dev->event_queue[0] + (i * sizeof(TW_Event))); 1194 tw_dev->free_queue[i] = i; 1195 tw_dev->state[i] = TW_S_INITIAL; 1196 } 1197 1198 tw_dev->pending_head = TW_Q_START; 1199 tw_dev->pending_tail = TW_Q_START; 1200 tw_dev->free_head = TW_Q_START; 1201 tw_dev->free_tail = TW_Q_START; 1202 tw_dev->error_sequence_id = 1; 1203 tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE; 1204 1205 mutex_init(&tw_dev->ioctl_lock); 1206 init_waitqueue_head(&tw_dev->ioctl_wqueue); 1207 1208 retval = 0; 1209 out: 1210 return retval; 1211 } /* End twa_initialize_device_extension() */ 1212 1213 /* This function is the interrupt service routine */ 1214 static irqreturn_t twa_interrupt(int irq, void *dev_instance) 1215 { 1216 int request_id, error = 0; 1217 u32 status_reg_value; 1218 TW_Response_Queue response_que; 1219 TW_Command_Full *full_command_packet; 1220 TW_Device_Extension *tw_dev = (TW_Device_Extension *)dev_instance; 1221 int handled = 0; 1222 1223 /* Get the per adapter lock */ 1224 spin_lock(tw_dev->host->host_lock); 1225 1226 /* Read the registers */ 1227 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev)); 1228 1229 /* Check if this is our interrupt, otherwise bail */ 1230 if (!(status_reg_value & TW_STATUS_VALID_INTERRUPT)) 1231 goto twa_interrupt_bail; 1232 1233 handled = 1; 1234 1235 /* If we are resetting, bail */ 1236 if (test_bit(TW_IN_RESET, &tw_dev->flags)) 1237 goto twa_interrupt_bail; 1238 1239 /* Check controller for errors */ 1240 if (twa_check_bits(status_reg_value)) { 1241 if (twa_decode_bits(tw_dev, status_reg_value)) { 1242 TW_CLEAR_ALL_INTERRUPTS(tw_dev); 1243 goto twa_interrupt_bail; 1244 } 1245 } 1246 1247 /* Handle host interrupt */ 1248 if (status_reg_value & TW_STATUS_HOST_INTERRUPT) 1249 TW_CLEAR_HOST_INTERRUPT(tw_dev); 1250 1251 /* Handle attention interrupt */ 1252 if (status_reg_value & TW_STATUS_ATTENTION_INTERRUPT) { 1253 TW_CLEAR_ATTENTION_INTERRUPT(tw_dev); 1254 if (!(test_and_set_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags))) { 1255 twa_get_request_id(tw_dev, &request_id); 1256 1257 error = twa_aen_read_queue(tw_dev, request_id); 1258 if (error) { 1259 tw_dev->state[request_id] = TW_S_COMPLETED; 1260 twa_free_request_id(tw_dev, request_id); 1261 clear_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags); 1262 } 1263 } 1264 } 1265 1266 /* Handle command interrupt */ 1267 if (status_reg_value & TW_STATUS_COMMAND_INTERRUPT) { 1268 TW_MASK_COMMAND_INTERRUPT(tw_dev); 1269 /* Drain as many pending commands as we can */ 1270 while (tw_dev->pending_request_count > 0) { 1271 request_id = tw_dev->pending_queue[tw_dev->pending_head]; 1272 if (tw_dev->state[request_id] != TW_S_PENDING) { 1273 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x19, "Found request id that wasn't pending"); 1274 TW_CLEAR_ALL_INTERRUPTS(tw_dev); 1275 goto twa_interrupt_bail; 1276 } 1277 if (twa_post_command_packet(tw_dev, request_id, 1)==0) { 1278 tw_dev->pending_head = (tw_dev->pending_head + 1) % TW_Q_LENGTH; 1279 tw_dev->pending_request_count--; 1280 } else { 1281 /* If we get here, we will continue re-posting on the next command interrupt */ 1282 break; 1283 } 1284 } 1285 } 1286 1287 /* Handle response interrupt */ 1288 if (status_reg_value & TW_STATUS_RESPONSE_INTERRUPT) { 1289 1290 /* Drain the response queue from the board */ 1291 while ((status_reg_value & TW_STATUS_RESPONSE_QUEUE_EMPTY) == 0) { 1292 /* Complete the response */ 1293 response_que.value = readl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev)); 1294 request_id = TW_RESID_OUT(response_que.response_id); 1295 full_command_packet = tw_dev->command_packet_virt[request_id]; 1296 error = 0; 1297 /* Check for command packet errors */ 1298 if (full_command_packet->command.newcommand.status != 0) { 1299 if (tw_dev->srb[request_id] != NULL) { 1300 error = twa_fill_sense(tw_dev, request_id, 1, 1); 1301 } else { 1302 /* Skip ioctl error prints */ 1303 if (request_id != tw_dev->chrdev_request_id) { 1304 error = twa_fill_sense(tw_dev, request_id, 0, 1); 1305 } 1306 } 1307 } 1308 1309 /* Check for correct state */ 1310 if (tw_dev->state[request_id] != TW_S_POSTED) { 1311 if (tw_dev->srb[request_id] != NULL) { 1312 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1a, "Received a request id that wasn't posted"); 1313 TW_CLEAR_ALL_INTERRUPTS(tw_dev); 1314 goto twa_interrupt_bail; 1315 } 1316 } 1317 1318 /* Check for internal command completion */ 1319 if (tw_dev->srb[request_id] == NULL) { 1320 if (request_id != tw_dev->chrdev_request_id) { 1321 if (twa_aen_complete(tw_dev, request_id)) 1322 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1b, "Error completing AEN during attention interrupt"); 1323 } else { 1324 tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE; 1325 wake_up(&tw_dev->ioctl_wqueue); 1326 } 1327 } else { 1328 struct scsi_cmnd *cmd; 1329 1330 cmd = tw_dev->srb[request_id]; 1331 1332 twa_scsiop_execute_scsi_complete(tw_dev, request_id); 1333 /* If no error command was a success */ 1334 if (error == 0) { 1335 cmd->result = (DID_OK << 16); 1336 } 1337 1338 /* If error, command failed */ 1339 if (error == 1) { 1340 /* Ask for a host reset */ 1341 cmd->result = (DID_OK << 16) | SAM_STAT_CHECK_CONDITION; 1342 } 1343 1344 /* Report residual bytes for single sgl */ 1345 if ((scsi_sg_count(cmd) <= 1) && (full_command_packet->command.newcommand.status == 0)) { 1346 u32 length = le32_to_cpu(full_command_packet->command.newcommand.sg_list[0].length); 1347 1348 if (length < scsi_bufflen(cmd)) 1349 scsi_set_resid(cmd, scsi_bufflen(cmd) - length); 1350 } 1351 1352 /* Now complete the io */ 1353 if (twa_command_mapped(cmd)) 1354 scsi_dma_unmap(cmd); 1355 scsi_done(cmd); 1356 tw_dev->state[request_id] = TW_S_COMPLETED; 1357 twa_free_request_id(tw_dev, request_id); 1358 tw_dev->posted_request_count--; 1359 } 1360 1361 /* Check for valid status after each drain */ 1362 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev)); 1363 if (twa_check_bits(status_reg_value)) { 1364 if (twa_decode_bits(tw_dev, status_reg_value)) { 1365 TW_CLEAR_ALL_INTERRUPTS(tw_dev); 1366 goto twa_interrupt_bail; 1367 } 1368 } 1369 } 1370 } 1371 1372 twa_interrupt_bail: 1373 spin_unlock(tw_dev->host->host_lock); 1374 return IRQ_RETVAL(handled); 1375 } /* End twa_interrupt() */ 1376 1377 /* This function will load the request id and various sgls for ioctls */ 1378 static void twa_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_command_packet, int request_id, dma_addr_t dma_handle, int length) 1379 { 1380 TW_Command *oldcommand; 1381 TW_Command_Apache *newcommand; 1382 TW_SG_Entry *sgl; 1383 unsigned int pae = 0; 1384 1385 if ((sizeof(long) < 8) && (sizeof(dma_addr_t) > 4)) 1386 pae = 1; 1387 1388 if (TW_OP_OUT(full_command_packet->command.newcommand.opcode__reserved) == TW_OP_EXECUTE_SCSI) { 1389 newcommand = &full_command_packet->command.newcommand; 1390 newcommand->request_id__lunl = 1391 TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->request_id__lunl), request_id); 1392 if (length) { 1393 newcommand->sg_list[0].address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache)); 1394 newcommand->sg_list[0].length = cpu_to_le32(length); 1395 } 1396 newcommand->sgl_entries__lunh = 1397 TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->sgl_entries__lunh), length ? 1 : 0); 1398 } else { 1399 oldcommand = &full_command_packet->command.oldcommand; 1400 oldcommand->request_id = request_id; 1401 1402 if (TW_SGL_OUT(oldcommand->opcode__sgloffset)) { 1403 /* Load the sg list */ 1404 if (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9690SA) 1405 sgl = (TW_SG_Entry *)((u32 *)oldcommand+oldcommand->size - (sizeof(TW_SG_Entry)/4) + pae); 1406 else 1407 sgl = (TW_SG_Entry *)((u32 *)oldcommand+TW_SGL_OUT(oldcommand->opcode__sgloffset)); 1408 sgl->address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache)); 1409 sgl->length = cpu_to_le32(length); 1410 1411 oldcommand->size += pae; 1412 } 1413 } 1414 } /* End twa_load_sgl() */ 1415 1416 /* This function will poll for a response interrupt of a request */ 1417 static int twa_poll_response(TW_Device_Extension *tw_dev, int request_id, int seconds) 1418 { 1419 int retval = 1, found = 0, response_request_id; 1420 TW_Response_Queue response_queue; 1421 TW_Command_Full *full_command_packet = tw_dev->command_packet_virt[request_id]; 1422 1423 if (twa_poll_status_gone(tw_dev, TW_STATUS_RESPONSE_QUEUE_EMPTY, seconds) == 0) { 1424 response_queue.value = readl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev)); 1425 response_request_id = TW_RESID_OUT(response_queue.response_id); 1426 if (request_id != response_request_id) { 1427 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1e, "Found unexpected request id while polling for response"); 1428 goto out; 1429 } 1430 if (TW_OP_OUT(full_command_packet->command.newcommand.opcode__reserved) == TW_OP_EXECUTE_SCSI) { 1431 if (full_command_packet->command.newcommand.status != 0) { 1432 /* bad response */ 1433 twa_fill_sense(tw_dev, request_id, 0, 0); 1434 goto out; 1435 } 1436 found = 1; 1437 } else { 1438 if (full_command_packet->command.oldcommand.status != 0) { 1439 /* bad response */ 1440 twa_fill_sense(tw_dev, request_id, 0, 0); 1441 goto out; 1442 } 1443 found = 1; 1444 } 1445 } 1446 1447 if (found) 1448 retval = 0; 1449 out: 1450 return retval; 1451 } /* End twa_poll_response() */ 1452 1453 /* This function will poll the status register for a flag */ 1454 static int twa_poll_status(TW_Device_Extension *tw_dev, u32 flag, int seconds) 1455 { 1456 u32 status_reg_value; 1457 unsigned long before; 1458 int retval = 1; 1459 1460 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev)); 1461 before = jiffies; 1462 1463 if (twa_check_bits(status_reg_value)) 1464 twa_decode_bits(tw_dev, status_reg_value); 1465 1466 while ((status_reg_value & flag) != flag) { 1467 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev)); 1468 1469 if (twa_check_bits(status_reg_value)) 1470 twa_decode_bits(tw_dev, status_reg_value); 1471 1472 if (time_after(jiffies, before + HZ * seconds)) 1473 goto out; 1474 1475 msleep(50); 1476 } 1477 retval = 0; 1478 out: 1479 return retval; 1480 } /* End twa_poll_status() */ 1481 1482 /* This function will poll the status register for disappearance of a flag */ 1483 static int twa_poll_status_gone(TW_Device_Extension *tw_dev, u32 flag, int seconds) 1484 { 1485 u32 status_reg_value; 1486 unsigned long before; 1487 int retval = 1; 1488 1489 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev)); 1490 before = jiffies; 1491 1492 if (twa_check_bits(status_reg_value)) 1493 twa_decode_bits(tw_dev, status_reg_value); 1494 1495 while ((status_reg_value & flag) != 0) { 1496 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev)); 1497 if (twa_check_bits(status_reg_value)) 1498 twa_decode_bits(tw_dev, status_reg_value); 1499 1500 if (time_after(jiffies, before + HZ * seconds)) 1501 goto out; 1502 1503 msleep(50); 1504 } 1505 retval = 0; 1506 out: 1507 return retval; 1508 } /* End twa_poll_status_gone() */ 1509 1510 /* This function will attempt to post a command packet to the board */ 1511 static int twa_post_command_packet(TW_Device_Extension *tw_dev, int request_id, char internal) 1512 { 1513 u32 status_reg_value; 1514 dma_addr_t command_que_value; 1515 int retval = 1; 1516 1517 command_que_value = tw_dev->command_packet_phys[request_id]; 1518 1519 /* For 9650SE write low 4 bytes first */ 1520 if ((tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9650SE) || 1521 (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9690SA)) { 1522 command_que_value += TW_COMMAND_OFFSET; 1523 writel((u32)command_que_value, TW_COMMAND_QUEUE_REG_ADDR_LARGE(tw_dev)); 1524 } 1525 1526 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev)); 1527 1528 if (twa_check_bits(status_reg_value)) 1529 twa_decode_bits(tw_dev, status_reg_value); 1530 1531 if (((tw_dev->pending_request_count > 0) && (tw_dev->state[request_id] != TW_S_PENDING)) || (status_reg_value & TW_STATUS_COMMAND_QUEUE_FULL)) { 1532 1533 /* Only pend internal driver commands */ 1534 if (!internal) { 1535 retval = SCSI_MLQUEUE_HOST_BUSY; 1536 goto out; 1537 } 1538 1539 /* Couldn't post the command packet, so we do it later */ 1540 if (tw_dev->state[request_id] != TW_S_PENDING) { 1541 tw_dev->state[request_id] = TW_S_PENDING; 1542 tw_dev->pending_request_count++; 1543 if (tw_dev->pending_request_count > tw_dev->max_pending_request_count) { 1544 tw_dev->max_pending_request_count = tw_dev->pending_request_count; 1545 } 1546 tw_dev->pending_queue[tw_dev->pending_tail] = request_id; 1547 tw_dev->pending_tail = (tw_dev->pending_tail + 1) % TW_Q_LENGTH; 1548 } 1549 TW_UNMASK_COMMAND_INTERRUPT(tw_dev); 1550 goto out; 1551 } else { 1552 if ((tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9650SE) || 1553 (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9690SA)) { 1554 /* Now write upper 4 bytes */ 1555 writel((u32)((u64)command_que_value >> 32), TW_COMMAND_QUEUE_REG_ADDR_LARGE(tw_dev) + 0x4); 1556 } else { 1557 if (sizeof(dma_addr_t) > 4) { 1558 command_que_value += TW_COMMAND_OFFSET; 1559 writel((u32)command_que_value, TW_COMMAND_QUEUE_REG_ADDR(tw_dev)); 1560 writel((u32)((u64)command_que_value >> 32), TW_COMMAND_QUEUE_REG_ADDR(tw_dev) + 0x4); 1561 } else { 1562 writel(TW_COMMAND_OFFSET + command_que_value, TW_COMMAND_QUEUE_REG_ADDR(tw_dev)); 1563 } 1564 } 1565 tw_dev->state[request_id] = TW_S_POSTED; 1566 tw_dev->posted_request_count++; 1567 if (tw_dev->posted_request_count > tw_dev->max_posted_request_count) { 1568 tw_dev->max_posted_request_count = tw_dev->posted_request_count; 1569 } 1570 } 1571 retval = 0; 1572 out: 1573 return retval; 1574 } /* End twa_post_command_packet() */ 1575 1576 /* This function will reset a device extension */ 1577 static int twa_reset_device_extension(TW_Device_Extension *tw_dev) 1578 { 1579 int i = 0; 1580 int retval = 1; 1581 unsigned long flags = 0; 1582 1583 set_bit(TW_IN_RESET, &tw_dev->flags); 1584 TW_DISABLE_INTERRUPTS(tw_dev); 1585 TW_MASK_COMMAND_INTERRUPT(tw_dev); 1586 spin_lock_irqsave(tw_dev->host->host_lock, flags); 1587 1588 /* Abort all requests that are in progress */ 1589 for (i = 0; i < TW_Q_LENGTH; i++) { 1590 if ((tw_dev->state[i] != TW_S_FINISHED) && 1591 (tw_dev->state[i] != TW_S_INITIAL) && 1592 (tw_dev->state[i] != TW_S_COMPLETED)) { 1593 if (tw_dev->srb[i]) { 1594 struct scsi_cmnd *cmd = tw_dev->srb[i]; 1595 1596 cmd->result = (DID_RESET << 16); 1597 if (twa_command_mapped(cmd)) 1598 scsi_dma_unmap(cmd); 1599 scsi_done(cmd); 1600 } 1601 } 1602 } 1603 1604 /* Reset queues and counts */ 1605 for (i = 0; i < TW_Q_LENGTH; i++) { 1606 tw_dev->free_queue[i] = i; 1607 tw_dev->state[i] = TW_S_INITIAL; 1608 } 1609 tw_dev->free_head = TW_Q_START; 1610 tw_dev->free_tail = TW_Q_START; 1611 tw_dev->posted_request_count = 0; 1612 tw_dev->pending_request_count = 0; 1613 tw_dev->pending_head = TW_Q_START; 1614 tw_dev->pending_tail = TW_Q_START; 1615 tw_dev->reset_print = 0; 1616 1617 spin_unlock_irqrestore(tw_dev->host->host_lock, flags); 1618 1619 if (twa_reset_sequence(tw_dev, 1)) 1620 goto out; 1621 1622 TW_ENABLE_AND_CLEAR_INTERRUPTS(tw_dev); 1623 clear_bit(TW_IN_RESET, &tw_dev->flags); 1624 tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE; 1625 1626 retval = 0; 1627 out: 1628 return retval; 1629 } /* End twa_reset_device_extension() */ 1630 1631 /* This function will reset a controller */ 1632 static int twa_reset_sequence(TW_Device_Extension *tw_dev, int soft_reset) 1633 { 1634 int tries = 0, retval = 1, flashed = 0, do_soft_reset = soft_reset; 1635 1636 while (tries < TW_MAX_RESET_TRIES) { 1637 if (do_soft_reset) { 1638 TW_SOFT_RESET(tw_dev); 1639 /* Clear pchip/response queue on 9550SX */ 1640 if (twa_empty_response_queue_large(tw_dev)) { 1641 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x36, "Response queue (large) empty failed during reset sequence"); 1642 do_soft_reset = 1; 1643 tries++; 1644 continue; 1645 } 1646 } 1647 1648 /* Make sure controller is in a good state */ 1649 if (twa_poll_status(tw_dev, TW_STATUS_MICROCONTROLLER_READY | (do_soft_reset == 1 ? TW_STATUS_ATTENTION_INTERRUPT : 0), 60)) { 1650 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1f, "Microcontroller not ready during reset sequence"); 1651 do_soft_reset = 1; 1652 tries++; 1653 continue; 1654 } 1655 1656 /* Empty response queue */ 1657 if (twa_empty_response_queue(tw_dev)) { 1658 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x20, "Response queue empty failed during reset sequence"); 1659 do_soft_reset = 1; 1660 tries++; 1661 continue; 1662 } 1663 1664 flashed = 0; 1665 1666 /* Check for compatibility/flash */ 1667 if (twa_check_srl(tw_dev, &flashed)) { 1668 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x21, "Compatibility check failed during reset sequence"); 1669 do_soft_reset = 1; 1670 tries++; 1671 continue; 1672 } else { 1673 if (flashed) { 1674 tries++; 1675 continue; 1676 } 1677 } 1678 1679 /* Drain the AEN queue */ 1680 if (twa_aen_drain_queue(tw_dev, soft_reset)) { 1681 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x22, "AEN drain failed during reset sequence"); 1682 do_soft_reset = 1; 1683 tries++; 1684 continue; 1685 } 1686 1687 /* If we got here, controller is in a good state */ 1688 retval = 0; 1689 goto out; 1690 } 1691 out: 1692 return retval; 1693 } /* End twa_reset_sequence() */ 1694 1695 /* This funciton returns unit geometry in cylinders/heads/sectors */ 1696 static int twa_scsi_biosparam(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int geom[]) 1697 { 1698 int heads, sectors, cylinders; 1699 1700 if (capacity >= 0x200000) { 1701 heads = 255; 1702 sectors = 63; 1703 cylinders = sector_div(capacity, heads * sectors); 1704 } else { 1705 heads = 64; 1706 sectors = 32; 1707 cylinders = sector_div(capacity, heads * sectors); 1708 } 1709 1710 geom[0] = heads; 1711 geom[1] = sectors; 1712 geom[2] = cylinders; 1713 1714 return 0; 1715 } /* End twa_scsi_biosparam() */ 1716 1717 /* This is the new scsi eh reset function */ 1718 static int twa_scsi_eh_reset(struct scsi_cmnd *SCpnt) 1719 { 1720 TW_Device_Extension *tw_dev = NULL; 1721 int retval = FAILED; 1722 1723 tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata; 1724 1725 tw_dev->num_resets++; 1726 1727 sdev_printk(KERN_WARNING, SCpnt->device, 1728 "WARNING: (0x%02X:0x%04X): Command (0x%x) timed out, resetting card.\n", 1729 TW_DRIVER, 0x2c, SCpnt->cmnd[0]); 1730 1731 /* Make sure we are not issuing an ioctl or resetting from ioctl */ 1732 mutex_lock(&tw_dev->ioctl_lock); 1733 1734 /* Now reset the card and some of the device extension data */ 1735 if (twa_reset_device_extension(tw_dev)) { 1736 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2b, "Controller reset failed during scsi host reset"); 1737 goto out; 1738 } 1739 1740 retval = SUCCESS; 1741 out: 1742 mutex_unlock(&tw_dev->ioctl_lock); 1743 return retval; 1744 } /* End twa_scsi_eh_reset() */ 1745 1746 /* This is the main scsi queue function to handle scsi opcodes */ 1747 static int twa_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) 1748 { 1749 int request_id, retval; 1750 TW_Device_Extension *tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata; 1751 1752 /* If we are resetting due to timed out ioctl, report as busy */ 1753 if (test_bit(TW_IN_RESET, &tw_dev->flags)) { 1754 retval = SCSI_MLQUEUE_HOST_BUSY; 1755 goto out; 1756 } 1757 1758 /* Check if this FW supports luns */ 1759 if ((SCpnt->device->lun != 0) && (tw_dev->tw_compat_info.working_srl < TW_FW_SRL_LUNS_SUPPORTED)) { 1760 SCpnt->result = (DID_BAD_TARGET << 16); 1761 done(SCpnt); 1762 retval = 0; 1763 goto out; 1764 } 1765 1766 /* Get a free request id */ 1767 twa_get_request_id(tw_dev, &request_id); 1768 1769 /* Save the scsi command for use by the ISR */ 1770 tw_dev->srb[request_id] = SCpnt; 1771 1772 retval = twa_scsiop_execute_scsi(tw_dev, request_id, NULL, 0, NULL); 1773 switch (retval) { 1774 case SCSI_MLQUEUE_HOST_BUSY: 1775 if (twa_command_mapped(SCpnt)) 1776 scsi_dma_unmap(SCpnt); 1777 twa_free_request_id(tw_dev, request_id); 1778 break; 1779 case 1: 1780 SCpnt->result = (DID_ERROR << 16); 1781 if (twa_command_mapped(SCpnt)) 1782 scsi_dma_unmap(SCpnt); 1783 done(SCpnt); 1784 tw_dev->state[request_id] = TW_S_COMPLETED; 1785 twa_free_request_id(tw_dev, request_id); 1786 retval = 0; 1787 } 1788 out: 1789 return retval; 1790 } /* End twa_scsi_queue() */ 1791 1792 static DEF_SCSI_QCMD(twa_scsi_queue) 1793 1794 /* This function hands scsi cdb's to the firmware */ 1795 static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, 1796 unsigned char *cdb, int use_sg, 1797 TW_SG_Entry *sglistarg) 1798 { 1799 TW_Command_Full *full_command_packet; 1800 TW_Command_Apache *command_packet; 1801 u32 num_sectors = 0x0; 1802 int i, sg_count; 1803 struct scsi_cmnd *srb = NULL; 1804 struct scatterlist *sg; 1805 int retval = 1; 1806 1807 if (tw_dev->srb[request_id]) 1808 srb = tw_dev->srb[request_id]; 1809 1810 /* Initialize command packet */ 1811 full_command_packet = tw_dev->command_packet_virt[request_id]; 1812 full_command_packet->header.header_desc.size_header = 128; 1813 full_command_packet->header.status_block.error = 0; 1814 full_command_packet->header.status_block.severity__reserved = 0; 1815 1816 command_packet = &full_command_packet->command.newcommand; 1817 command_packet->status = 0; 1818 command_packet->opcode__reserved = TW_OPRES_IN(0, TW_OP_EXECUTE_SCSI); 1819 1820 /* We forced 16 byte cdb use earlier */ 1821 if (!cdb) 1822 memcpy(command_packet->cdb, srb->cmnd, TW_MAX_CDB_LEN); 1823 else 1824 memcpy(command_packet->cdb, cdb, TW_MAX_CDB_LEN); 1825 1826 if (srb) { 1827 command_packet->unit = srb->device->id; 1828 command_packet->request_id__lunl = 1829 TW_REQ_LUN_IN(srb->device->lun, request_id); 1830 } else { 1831 command_packet->request_id__lunl = 1832 TW_REQ_LUN_IN(0, request_id); 1833 command_packet->unit = 0; 1834 } 1835 1836 command_packet->sgl_offset = 16; 1837 1838 if (!sglistarg) { 1839 /* Map sglist from scsi layer to cmd packet */ 1840 1841 if (scsi_sg_count(srb)) { 1842 if (!twa_command_mapped(srb)) { 1843 if (srb->sc_data_direction == DMA_TO_DEVICE || 1844 srb->sc_data_direction == DMA_BIDIRECTIONAL) 1845 scsi_sg_copy_to_buffer(srb, 1846 tw_dev->generic_buffer_virt[request_id], 1847 TW_SECTOR_SIZE); 1848 command_packet->sg_list[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]); 1849 command_packet->sg_list[0].length = cpu_to_le32(TW_MIN_SGL_LENGTH); 1850 } else { 1851 sg_count = scsi_dma_map(srb); 1852 if (sg_count < 0) 1853 goto out; 1854 1855 scsi_for_each_sg(srb, sg, sg_count, i) { 1856 command_packet->sg_list[i].address = TW_CPU_TO_SGL(sg_dma_address(sg)); 1857 command_packet->sg_list[i].length = cpu_to_le32(sg_dma_len(sg)); 1858 if (command_packet->sg_list[i].address & TW_CPU_TO_SGL(TW_ALIGNMENT_9000_SGL)) { 1859 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2e, "Found unaligned sgl address during execute scsi"); 1860 goto out; 1861 } 1862 } 1863 } 1864 command_packet->sgl_entries__lunh = TW_REQ_LUN_IN((srb->device->lun >> 4), scsi_sg_count(tw_dev->srb[request_id])); 1865 } 1866 } else { 1867 /* Internal cdb post */ 1868 for (i = 0; i < use_sg; i++) { 1869 command_packet->sg_list[i].address = sglistarg[i].address; 1870 command_packet->sg_list[i].length = sglistarg[i].length; 1871 if (command_packet->sg_list[i].address & TW_CPU_TO_SGL(TW_ALIGNMENT_9000_SGL)) { 1872 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2f, "Found unaligned sgl address during internal post"); 1873 goto out; 1874 } 1875 } 1876 command_packet->sgl_entries__lunh = TW_REQ_LUN_IN(0, use_sg); 1877 } 1878 1879 if (srb) { 1880 if (srb->cmnd[0] == READ_6 || srb->cmnd[0] == WRITE_6) 1881 num_sectors = (u32)srb->cmnd[4]; 1882 1883 if (srb->cmnd[0] == READ_10 || srb->cmnd[0] == WRITE_10) 1884 num_sectors = (u32)srb->cmnd[8] | ((u32)srb->cmnd[7] << 8); 1885 } 1886 1887 /* Update sector statistic */ 1888 tw_dev->sector_count = num_sectors; 1889 if (tw_dev->sector_count > tw_dev->max_sector_count) 1890 tw_dev->max_sector_count = tw_dev->sector_count; 1891 1892 /* Update SG statistics */ 1893 if (srb) { 1894 tw_dev->sgl_entries = scsi_sg_count(tw_dev->srb[request_id]); 1895 if (tw_dev->sgl_entries > tw_dev->max_sgl_entries) 1896 tw_dev->max_sgl_entries = tw_dev->sgl_entries; 1897 } 1898 1899 /* Now post the command to the board */ 1900 if (srb) { 1901 retval = twa_post_command_packet(tw_dev, request_id, 0); 1902 } else { 1903 twa_post_command_packet(tw_dev, request_id, 1); 1904 retval = 0; 1905 } 1906 out: 1907 return retval; 1908 } /* End twa_scsiop_execute_scsi() */ 1909 1910 /* This function completes an execute scsi operation */ 1911 static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int request_id) 1912 { 1913 struct scsi_cmnd *cmd = tw_dev->srb[request_id]; 1914 1915 if (!twa_command_mapped(cmd) && 1916 (cmd->sc_data_direction == DMA_FROM_DEVICE || 1917 cmd->sc_data_direction == DMA_BIDIRECTIONAL)) { 1918 if (scsi_sg_count(cmd) == 1) { 1919 void *buf = tw_dev->generic_buffer_virt[request_id]; 1920 1921 scsi_sg_copy_from_buffer(cmd, buf, TW_SECTOR_SIZE); 1922 } 1923 } 1924 } /* End twa_scsiop_execute_scsi_complete() */ 1925 1926 /* This function tells the controller to shut down */ 1927 static void __twa_shutdown(TW_Device_Extension *tw_dev) 1928 { 1929 /* Disable interrupts */ 1930 TW_DISABLE_INTERRUPTS(tw_dev); 1931 1932 /* Free up the IRQ */ 1933 free_irq(tw_dev->tw_pci_dev->irq, tw_dev); 1934 1935 printk(KERN_WARNING "3w-9xxx: Shutting down host %d.\n", tw_dev->host->host_no); 1936 1937 /* Tell the card we are shutting down */ 1938 if (twa_initconnection(tw_dev, 1, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL)) { 1939 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x31, "Connection shutdown failed"); 1940 } else { 1941 printk(KERN_WARNING "3w-9xxx: Shutdown complete.\n"); 1942 } 1943 1944 /* Clear all interrupts just before exit */ 1945 TW_CLEAR_ALL_INTERRUPTS(tw_dev); 1946 } /* End __twa_shutdown() */ 1947 1948 /* Wrapper for __twa_shutdown */ 1949 static void twa_shutdown(struct pci_dev *pdev) 1950 { 1951 struct Scsi_Host *host = pci_get_drvdata(pdev); 1952 TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata; 1953 1954 __twa_shutdown(tw_dev); 1955 } /* End twa_shutdown() */ 1956 1957 /* This function will look up a string */ 1958 static char *twa_string_lookup(twa_message_type *table, unsigned int code) 1959 { 1960 int index; 1961 1962 for (index = 0; ((code != table[index].code) && 1963 (table[index].text != (char *)0)); index++); 1964 return(table[index].text); 1965 } /* End twa_string_lookup() */ 1966 1967 /* This function gets called when a disk is coming on-line */ 1968 static int twa_slave_configure(struct scsi_device *sdev) 1969 { 1970 /* Force 60 second timeout */ 1971 blk_queue_rq_timeout(sdev->request_queue, 60 * HZ); 1972 1973 return 0; 1974 } /* End twa_slave_configure() */ 1975 1976 /* scsi_host_template initializer */ 1977 static struct scsi_host_template driver_template = { 1978 .module = THIS_MODULE, 1979 .name = "3ware 9000 Storage Controller", 1980 .queuecommand = twa_scsi_queue, 1981 .eh_host_reset_handler = twa_scsi_eh_reset, 1982 .bios_param = twa_scsi_biosparam, 1983 .change_queue_depth = scsi_change_queue_depth, 1984 .can_queue = TW_Q_LENGTH-2, 1985 .slave_configure = twa_slave_configure, 1986 .this_id = -1, 1987 .sg_tablesize = TW_APACHE_MAX_SGL_LENGTH, 1988 .max_sectors = TW_MAX_SECTORS, 1989 .cmd_per_lun = TW_MAX_CMDS_PER_LUN, 1990 .shost_attrs = twa_host_attrs, 1991 .emulated = 1, 1992 .no_write_same = 1, 1993 }; 1994 1995 /* This function will probe and initialize a card */ 1996 static int twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id) 1997 { 1998 struct Scsi_Host *host = NULL; 1999 TW_Device_Extension *tw_dev; 2000 unsigned long mem_addr, mem_len; 2001 int retval; 2002 2003 retval = pci_enable_device(pdev); 2004 if (retval) { 2005 TW_PRINTK(host, TW_DRIVER, 0x34, "Failed to enable pci device"); 2006 goto out_disable_device; 2007 } 2008 2009 pci_set_master(pdev); 2010 pci_try_set_mwi(pdev); 2011 2012 retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 2013 if (retval) 2014 retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 2015 if (retval) { 2016 TW_PRINTK(host, TW_DRIVER, 0x23, "Failed to set dma mask"); 2017 retval = -ENODEV; 2018 goto out_disable_device; 2019 } 2020 2021 host = scsi_host_alloc(&driver_template, sizeof(TW_Device_Extension)); 2022 if (!host) { 2023 TW_PRINTK(host, TW_DRIVER, 0x24, "Failed to allocate memory for device extension"); 2024 retval = -ENOMEM; 2025 goto out_disable_device; 2026 } 2027 tw_dev = (TW_Device_Extension *)host->hostdata; 2028 2029 /* Save values to device extension */ 2030 tw_dev->host = host; 2031 tw_dev->tw_pci_dev = pdev; 2032 2033 if (twa_initialize_device_extension(tw_dev)) { 2034 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x25, "Failed to initialize device extension"); 2035 retval = -ENOMEM; 2036 goto out_free_device_extension; 2037 } 2038 2039 /* Request IO regions */ 2040 retval = pci_request_regions(pdev, "3w-9xxx"); 2041 if (retval) { 2042 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x26, "Failed to get mem region"); 2043 goto out_free_device_extension; 2044 } 2045 2046 if (pdev->device == PCI_DEVICE_ID_3WARE_9000) { 2047 mem_addr = pci_resource_start(pdev, 1); 2048 mem_len = pci_resource_len(pdev, 1); 2049 } else { 2050 mem_addr = pci_resource_start(pdev, 2); 2051 mem_len = pci_resource_len(pdev, 2); 2052 } 2053 2054 /* Save base address */ 2055 tw_dev->base_addr = ioremap(mem_addr, mem_len); 2056 if (!tw_dev->base_addr) { 2057 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x35, "Failed to ioremap"); 2058 retval = -ENOMEM; 2059 goto out_release_mem_region; 2060 } 2061 2062 /* Disable interrupts on the card */ 2063 TW_DISABLE_INTERRUPTS(tw_dev); 2064 2065 /* Initialize the card */ 2066 if (twa_reset_sequence(tw_dev, 0)) { 2067 retval = -ENOMEM; 2068 goto out_iounmap; 2069 } 2070 2071 /* Set host specific parameters */ 2072 if ((pdev->device == PCI_DEVICE_ID_3WARE_9650SE) || 2073 (pdev->device == PCI_DEVICE_ID_3WARE_9690SA)) 2074 host->max_id = TW_MAX_UNITS_9650SE; 2075 else 2076 host->max_id = TW_MAX_UNITS; 2077 2078 host->max_cmd_len = TW_MAX_CDB_LEN; 2079 2080 /* Channels aren't supported by adapter */ 2081 host->max_lun = TW_MAX_LUNS(tw_dev->tw_compat_info.working_srl); 2082 host->max_channel = 0; 2083 2084 /* Register the card with the kernel SCSI layer */ 2085 retval = scsi_add_host(host, &pdev->dev); 2086 if (retval) { 2087 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x27, "scsi add host failed"); 2088 goto out_iounmap; 2089 } 2090 2091 pci_set_drvdata(pdev, host); 2092 2093 printk(KERN_WARNING "3w-9xxx: scsi%d: Found a 3ware 9000 Storage Controller at 0x%lx, IRQ: %d.\n", 2094 host->host_no, mem_addr, pdev->irq); 2095 printk(KERN_WARNING "3w-9xxx: scsi%d: Firmware %s, BIOS %s, Ports: %d.\n", 2096 host->host_no, 2097 (char *)twa_get_param(tw_dev, 0, TW_VERSION_TABLE, 2098 TW_PARAM_FWVER, TW_PARAM_FWVER_LENGTH), 2099 (char *)twa_get_param(tw_dev, 1, TW_VERSION_TABLE, 2100 TW_PARAM_BIOSVER, TW_PARAM_BIOSVER_LENGTH), 2101 le32_to_cpu(*(__le32 *)twa_get_param(tw_dev, 2, TW_INFORMATION_TABLE, 2102 TW_PARAM_PORTCOUNT, TW_PARAM_PORTCOUNT_LENGTH))); 2103 2104 /* Try to enable MSI */ 2105 if (use_msi && (pdev->device != PCI_DEVICE_ID_3WARE_9000) && 2106 !pci_enable_msi(pdev)) 2107 set_bit(TW_USING_MSI, &tw_dev->flags); 2108 2109 /* Now setup the interrupt handler */ 2110 retval = request_irq(pdev->irq, twa_interrupt, IRQF_SHARED, "3w-9xxx", tw_dev); 2111 if (retval) { 2112 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x30, "Error requesting IRQ"); 2113 goto out_remove_host; 2114 } 2115 2116 twa_device_extension_list[twa_device_extension_count] = tw_dev; 2117 twa_device_extension_count++; 2118 2119 /* Re-enable interrupts on the card */ 2120 TW_ENABLE_AND_CLEAR_INTERRUPTS(tw_dev); 2121 2122 /* Finally, scan the host */ 2123 scsi_scan_host(host); 2124 2125 if (twa_major == -1) { 2126 if ((twa_major = register_chrdev (0, "twa", &twa_fops)) < 0) 2127 TW_PRINTK(host, TW_DRIVER, 0x29, "Failed to register character device"); 2128 } 2129 return 0; 2130 2131 out_remove_host: 2132 if (test_bit(TW_USING_MSI, &tw_dev->flags)) 2133 pci_disable_msi(pdev); 2134 scsi_remove_host(host); 2135 out_iounmap: 2136 iounmap(tw_dev->base_addr); 2137 out_release_mem_region: 2138 pci_release_regions(pdev); 2139 out_free_device_extension: 2140 twa_free_device_extension(tw_dev); 2141 scsi_host_put(host); 2142 out_disable_device: 2143 pci_disable_device(pdev); 2144 2145 return retval; 2146 } /* End twa_probe() */ 2147 2148 /* This function is called to remove a device */ 2149 static void twa_remove(struct pci_dev *pdev) 2150 { 2151 struct Scsi_Host *host = pci_get_drvdata(pdev); 2152 TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata; 2153 2154 scsi_remove_host(tw_dev->host); 2155 2156 /* Unregister character device */ 2157 if (twa_major >= 0) { 2158 unregister_chrdev(twa_major, "twa"); 2159 twa_major = -1; 2160 } 2161 2162 /* Shutdown the card */ 2163 __twa_shutdown(tw_dev); 2164 2165 /* Disable MSI if enabled */ 2166 if (test_bit(TW_USING_MSI, &tw_dev->flags)) 2167 pci_disable_msi(pdev); 2168 2169 /* Free IO remapping */ 2170 iounmap(tw_dev->base_addr); 2171 2172 /* Free up the mem region */ 2173 pci_release_regions(pdev); 2174 2175 /* Free up device extension resources */ 2176 twa_free_device_extension(tw_dev); 2177 2178 scsi_host_put(tw_dev->host); 2179 pci_disable_device(pdev); 2180 twa_device_extension_count--; 2181 } /* End twa_remove() */ 2182 2183 /* This function is called on PCI suspend */ 2184 static int __maybe_unused twa_suspend(struct device *dev) 2185 { 2186 struct pci_dev *pdev = to_pci_dev(dev); 2187 struct Scsi_Host *host = pci_get_drvdata(pdev); 2188 TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata; 2189 2190 printk(KERN_WARNING "3w-9xxx: Suspending host %d.\n", tw_dev->host->host_no); 2191 2192 TW_DISABLE_INTERRUPTS(tw_dev); 2193 free_irq(tw_dev->tw_pci_dev->irq, tw_dev); 2194 2195 if (test_bit(TW_USING_MSI, &tw_dev->flags)) 2196 pci_disable_msi(pdev); 2197 2198 /* Tell the card we are shutting down */ 2199 if (twa_initconnection(tw_dev, 1, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL)) { 2200 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x38, "Connection shutdown failed during suspend"); 2201 } else { 2202 printk(KERN_WARNING "3w-9xxx: Suspend complete.\n"); 2203 } 2204 TW_CLEAR_ALL_INTERRUPTS(tw_dev); 2205 2206 return 0; 2207 } /* End twa_suspend() */ 2208 2209 /* This function is called on PCI resume */ 2210 static int __maybe_unused twa_resume(struct device *dev) 2211 { 2212 int retval = 0; 2213 struct pci_dev *pdev = to_pci_dev(dev); 2214 struct Scsi_Host *host = pci_get_drvdata(pdev); 2215 TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata; 2216 2217 printk(KERN_WARNING "3w-9xxx: Resuming host %d.\n", tw_dev->host->host_no); 2218 2219 pci_try_set_mwi(pdev); 2220 2221 retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 2222 if (retval) 2223 retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 2224 if (retval) { 2225 TW_PRINTK(host, TW_DRIVER, 0x40, "Failed to set dma mask during resume"); 2226 retval = -ENODEV; 2227 goto out_disable_device; 2228 } 2229 2230 /* Initialize the card */ 2231 if (twa_reset_sequence(tw_dev, 0)) { 2232 retval = -ENODEV; 2233 goto out_disable_device; 2234 } 2235 2236 /* Now setup the interrupt handler */ 2237 retval = request_irq(pdev->irq, twa_interrupt, IRQF_SHARED, "3w-9xxx", tw_dev); 2238 if (retval) { 2239 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x42, "Error requesting IRQ during resume"); 2240 retval = -ENODEV; 2241 goto out_disable_device; 2242 } 2243 2244 /* Now enable MSI if enabled */ 2245 if (test_bit(TW_USING_MSI, &tw_dev->flags)) 2246 pci_enable_msi(pdev); 2247 2248 /* Re-enable interrupts on the card */ 2249 TW_ENABLE_AND_CLEAR_INTERRUPTS(tw_dev); 2250 2251 printk(KERN_WARNING "3w-9xxx: Resume complete.\n"); 2252 return 0; 2253 2254 out_disable_device: 2255 scsi_remove_host(host); 2256 2257 return retval; 2258 } /* End twa_resume() */ 2259 2260 /* PCI Devices supported by this driver */ 2261 static struct pci_device_id twa_pci_tbl[] = { 2262 { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9000, 2263 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 2264 { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9550SX, 2265 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 2266 { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9650SE, 2267 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 2268 { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9690SA, 2269 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 2270 { } 2271 }; 2272 MODULE_DEVICE_TABLE(pci, twa_pci_tbl); 2273 2274 static SIMPLE_DEV_PM_OPS(twa_pm_ops, twa_suspend, twa_resume); 2275 2276 /* pci_driver initializer */ 2277 static struct pci_driver twa_driver = { 2278 .name = "3w-9xxx", 2279 .id_table = twa_pci_tbl, 2280 .probe = twa_probe, 2281 .remove = twa_remove, 2282 .driver.pm = &twa_pm_ops, 2283 .shutdown = twa_shutdown 2284 }; 2285 2286 /* This function is called on driver initialization */ 2287 static int __init twa_init(void) 2288 { 2289 printk(KERN_WARNING "3ware 9000 Storage Controller device driver for Linux v%s.\n", TW_DRIVER_VERSION); 2290 2291 return pci_register_driver(&twa_driver); 2292 } /* End twa_init() */ 2293 2294 /* This function is called on driver exit */ 2295 static void __exit twa_exit(void) 2296 { 2297 pci_unregister_driver(&twa_driver); 2298 } /* End twa_exit() */ 2299 2300 module_init(twa_init); 2301 module_exit(twa_exit); 2302 2303