xref: /openbmc/linux/drivers/scsi/3w-9xxx.c (revision 8c0b9ee8)
1 /*
2    3w-9xxx.c -- 3ware 9000 Storage Controller device driver for Linux.
3 
4    Written By: Adam Radford <linuxraid@lsi.com>
5    Modifications By: Tom Couch <linuxraid@lsi.com>
6 
7    Copyright (C) 2004-2009 Applied Micro Circuits Corporation.
8    Copyright (C) 2010 LSI Corporation.
9 
10    This program is free software; you can redistribute it and/or modify
11    it under the terms of the GNU General Public License as published by
12    the Free Software Foundation; version 2 of the License.
13 
14    This program is distributed in the hope that it will be useful,
15    but WITHOUT ANY WARRANTY; without even the implied warranty of
16    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17    GNU General Public License for more details.
18 
19    NO WARRANTY
20    THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
21    CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
22    LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
23    MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
24    solely responsible for determining the appropriateness of using and
25    distributing the Program and assumes all risks associated with its
26    exercise of rights under this Agreement, including but not limited to
27    the risks and costs of program errors, damage to or loss of data,
28    programs or equipment, and unavailability or interruption of operations.
29 
30    DISCLAIMER OF LIABILITY
31    NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
32    DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33    DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
34    ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
35    TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
36    USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
37    HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
38 
39    You should have received a copy of the GNU General Public License
40    along with this program; if not, write to the Free Software
41    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
42 
43    Bugs/Comments/Suggestions should be mailed to:
44    linuxraid@lsi.com
45 
46    For more information, goto:
47    http://www.lsi.com
48 
49    Note: This version of the driver does not contain a bundled firmware
50          image.
51 
52    History
53    -------
54    2.26.02.000 - Driver cleanup for kernel submission.
55    2.26.02.001 - Replace schedule_timeout() calls with msleep().
56    2.26.02.002 - Add support for PAE mode.
57                  Add lun support.
58                  Fix twa_remove() to free irq handler/unregister_chrdev()
59                  before shutting down card.
60                  Change to new 'change_queue_depth' api.
61                  Fix 'handled=1' ISR usage, remove bogus IRQ check.
62                  Remove un-needed eh_abort handler.
63                  Add support for embedded firmware error strings.
64    2.26.02.003 - Correctly handle single sgl's with use_sg=1.
65    2.26.02.004 - Add support for 9550SX controllers.
66    2.26.02.005 - Fix use_sg == 0 mapping on systems with 4GB or higher.
67    2.26.02.006 - Fix 9550SX pchip reset timeout.
68                  Add big endian support.
69    2.26.02.007 - Disable local interrupts during kmap/unmap_atomic().
70    2.26.02.008 - Free irq handler in __twa_shutdown().
71                  Serialize reset code.
72                  Add support for 9650SE controllers.
73    2.26.02.009 - Fix dma mask setting to fallback to 32-bit if 64-bit fails.
74    2.26.02.010 - Add support for 9690SA controllers.
75    2.26.02.011 - Increase max AENs drained to 256.
76                  Add MSI support and "use_msi" module parameter.
77                  Fix bug in twa_get_param() on 4GB+.
78                  Use pci_resource_len() for ioremap().
79    2.26.02.012 - Add power management support.
80    2.26.02.013 - Fix bug in twa_load_sgl().
81    2.26.02.014 - Force 60 second timeout default.
82 */
83 
84 #include <linux/module.h>
85 #include <linux/reboot.h>
86 #include <linux/spinlock.h>
87 #include <linux/interrupt.h>
88 #include <linux/moduleparam.h>
89 #include <linux/errno.h>
90 #include <linux/types.h>
91 #include <linux/delay.h>
92 #include <linux/pci.h>
93 #include <linux/time.h>
94 #include <linux/mutex.h>
95 #include <linux/slab.h>
96 #include <asm/io.h>
97 #include <asm/irq.h>
98 #include <asm/uaccess.h>
99 #include <scsi/scsi.h>
100 #include <scsi/scsi_host.h>
101 #include <scsi/scsi_tcq.h>
102 #include <scsi/scsi_cmnd.h>
103 #include "3w-9xxx.h"
104 
105 /* Globals */
106 #define TW_DRIVER_VERSION "2.26.02.014"
107 static DEFINE_MUTEX(twa_chrdev_mutex);
108 static TW_Device_Extension *twa_device_extension_list[TW_MAX_SLOT];
109 static unsigned int twa_device_extension_count;
110 static int twa_major = -1;
111 extern struct timezone sys_tz;
112 
113 /* Module parameters */
114 MODULE_AUTHOR ("LSI");
115 MODULE_DESCRIPTION ("3ware 9000 Storage Controller Linux Driver");
116 MODULE_LICENSE("GPL");
117 MODULE_VERSION(TW_DRIVER_VERSION);
118 
119 static int use_msi = 0;
120 module_param(use_msi, int, S_IRUGO);
121 MODULE_PARM_DESC(use_msi, "Use Message Signaled Interrupts.  Default: 0");
122 
123 /* Function prototypes */
124 static void twa_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_Header *header);
125 static int twa_aen_read_queue(TW_Device_Extension *tw_dev, int request_id);
126 static char *twa_aen_severity_lookup(unsigned char severity_code);
127 static void twa_aen_sync_time(TW_Device_Extension *tw_dev, int request_id);
128 static long twa_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
129 static int twa_chrdev_open(struct inode *inode, struct file *file);
130 static int twa_fill_sense(TW_Device_Extension *tw_dev, int request_id, int copy_sense, int print_host);
131 static void twa_free_request_id(TW_Device_Extension *tw_dev,int request_id);
132 static void twa_get_request_id(TW_Device_Extension *tw_dev, int *request_id);
133 static int twa_initconnection(TW_Device_Extension *tw_dev, int message_credits,
134  			      u32 set_features, unsigned short current_fw_srl,
135 			      unsigned short current_fw_arch_id,
136 			      unsigned short current_fw_branch,
137 			      unsigned short current_fw_build,
138 			      unsigned short *fw_on_ctlr_srl,
139 			      unsigned short *fw_on_ctlr_arch_id,
140 			      unsigned short *fw_on_ctlr_branch,
141 			      unsigned short *fw_on_ctlr_build,
142 			      u32 *init_connect_result);
143 static void twa_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_command_packet, int request_id, dma_addr_t dma_handle, int length);
144 static int twa_poll_response(TW_Device_Extension *tw_dev, int request_id, int seconds);
145 static int twa_poll_status_gone(TW_Device_Extension *tw_dev, u32 flag, int seconds);
146 static int twa_post_command_packet(TW_Device_Extension *tw_dev, int request_id, char internal);
147 static int twa_reset_device_extension(TW_Device_Extension *tw_dev);
148 static int twa_reset_sequence(TW_Device_Extension *tw_dev, int soft_reset);
149 static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry *sglistarg);
150 static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int request_id);
151 static char *twa_string_lookup(twa_message_type *table, unsigned int aen_code);
152 static void twa_unmap_scsi_data(TW_Device_Extension *tw_dev, int request_id);
153 
154 /* Functions */
155 
156 /* Show some statistics about the card */
157 static ssize_t twa_show_stats(struct device *dev,
158 			      struct device_attribute *attr, char *buf)
159 {
160 	struct Scsi_Host *host = class_to_shost(dev);
161 	TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
162 	unsigned long flags = 0;
163 	ssize_t len;
164 
165 	spin_lock_irqsave(tw_dev->host->host_lock, flags);
166 	len = snprintf(buf, PAGE_SIZE, "3w-9xxx Driver version: %s\n"
167 		       "Current commands posted:   %4d\n"
168 		       "Max commands posted:       %4d\n"
169 		       "Current pending commands:  %4d\n"
170 		       "Max pending commands:      %4d\n"
171 		       "Last sgl length:           %4d\n"
172 		       "Max sgl length:            %4d\n"
173 		       "Last sector count:         %4d\n"
174 		       "Max sector count:          %4d\n"
175 		       "SCSI Host Resets:          %4d\n"
176 		       "AEN's:                     %4d\n",
177 		       TW_DRIVER_VERSION,
178 		       tw_dev->posted_request_count,
179 		       tw_dev->max_posted_request_count,
180 		       tw_dev->pending_request_count,
181 		       tw_dev->max_pending_request_count,
182 		       tw_dev->sgl_entries,
183 		       tw_dev->max_sgl_entries,
184 		       tw_dev->sector_count,
185 		       tw_dev->max_sector_count,
186 		       tw_dev->num_resets,
187 		       tw_dev->aen_count);
188 	spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
189 	return len;
190 } /* End twa_show_stats() */
191 
192 /* Create sysfs 'stats' entry */
193 static struct device_attribute twa_host_stats_attr = {
194 	.attr = {
195 		.name = 	"stats",
196 		.mode =		S_IRUGO,
197 	},
198 	.show = twa_show_stats
199 };
200 
201 /* Host attributes initializer */
202 static struct device_attribute *twa_host_attrs[] = {
203 	&twa_host_stats_attr,
204 	NULL,
205 };
206 
207 /* File operations struct for character device */
208 static const struct file_operations twa_fops = {
209 	.owner		= THIS_MODULE,
210 	.unlocked_ioctl	= twa_chrdev_ioctl,
211 	.open		= twa_chrdev_open,
212 	.release	= NULL,
213 	.llseek		= noop_llseek,
214 };
215 
216 /* This function will complete an aen request from the isr */
217 static int twa_aen_complete(TW_Device_Extension *tw_dev, int request_id)
218 {
219 	TW_Command_Full *full_command_packet;
220 	TW_Command *command_packet;
221 	TW_Command_Apache_Header *header;
222 	unsigned short aen;
223 	int retval = 1;
224 
225 	header = (TW_Command_Apache_Header *)tw_dev->generic_buffer_virt[request_id];
226 	tw_dev->posted_request_count--;
227 	aen = le16_to_cpu(header->status_block.error);
228 	full_command_packet = tw_dev->command_packet_virt[request_id];
229 	command_packet = &full_command_packet->command.oldcommand;
230 
231 	/* First check for internal completion of set param for time sync */
232 	if (TW_OP_OUT(command_packet->opcode__sgloffset) == TW_OP_SET_PARAM) {
233 		/* Keep reading the queue in case there are more aen's */
234 		if (twa_aen_read_queue(tw_dev, request_id))
235 			goto out2;
236 	        else {
237 			retval = 0;
238 			goto out;
239 		}
240 	}
241 
242 	switch (aen) {
243 	case TW_AEN_QUEUE_EMPTY:
244 		/* Quit reading the queue if this is the last one */
245 		break;
246 	case TW_AEN_SYNC_TIME_WITH_HOST:
247 		twa_aen_sync_time(tw_dev, request_id);
248 		retval = 0;
249 		goto out;
250 	default:
251 		twa_aen_queue_event(tw_dev, header);
252 
253 		/* If there are more aen's, keep reading the queue */
254 		if (twa_aen_read_queue(tw_dev, request_id))
255 			goto out2;
256 		else {
257 			retval = 0;
258 			goto out;
259 		}
260 	}
261 	retval = 0;
262 out2:
263 	tw_dev->state[request_id] = TW_S_COMPLETED;
264 	twa_free_request_id(tw_dev, request_id);
265 	clear_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags);
266 out:
267 	return retval;
268 } /* End twa_aen_complete() */
269 
270 /* This function will drain aen queue */
271 static int twa_aen_drain_queue(TW_Device_Extension *tw_dev, int no_check_reset)
272 {
273 	int request_id = 0;
274 	char cdb[TW_MAX_CDB_LEN];
275 	TW_SG_Entry sglist[1];
276 	int finished = 0, count = 0;
277 	TW_Command_Full *full_command_packet;
278 	TW_Command_Apache_Header *header;
279 	unsigned short aen;
280 	int first_reset = 0, queue = 0, retval = 1;
281 
282 	if (no_check_reset)
283 		first_reset = 0;
284 	else
285 		first_reset = 1;
286 
287 	full_command_packet = tw_dev->command_packet_virt[request_id];
288 	memset(full_command_packet, 0, sizeof(TW_Command_Full));
289 
290 	/* Initialize cdb */
291 	memset(&cdb, 0, TW_MAX_CDB_LEN);
292 	cdb[0] = REQUEST_SENSE; /* opcode */
293 	cdb[4] = TW_ALLOCATION_LENGTH; /* allocation length */
294 
295 	/* Initialize sglist */
296 	memset(&sglist, 0, sizeof(TW_SG_Entry));
297 	sglist[0].length = TW_SECTOR_SIZE;
298 	sglist[0].address = tw_dev->generic_buffer_phys[request_id];
299 
300 	if (sglist[0].address & TW_ALIGNMENT_9000_SGL) {
301 		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1, "Found unaligned address during AEN drain");
302 		goto out;
303 	}
304 
305 	/* Mark internal command */
306 	tw_dev->srb[request_id] = NULL;
307 
308 	do {
309 		/* Send command to the board */
310 		if (twa_scsiop_execute_scsi(tw_dev, request_id, cdb, 1, sglist)) {
311 			TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2, "Error posting request sense");
312 			goto out;
313 		}
314 
315 		/* Now poll for completion */
316 		if (twa_poll_response(tw_dev, request_id, 30)) {
317 			TW_PRINTK(tw_dev->host, TW_DRIVER, 0x3, "No valid response while draining AEN queue");
318 			tw_dev->posted_request_count--;
319 			goto out;
320 		}
321 
322 		tw_dev->posted_request_count--;
323 		header = (TW_Command_Apache_Header *)tw_dev->generic_buffer_virt[request_id];
324 		aen = le16_to_cpu(header->status_block.error);
325 		queue = 0;
326 		count++;
327 
328 		switch (aen) {
329 		case TW_AEN_QUEUE_EMPTY:
330 			if (first_reset != 1)
331 				goto out;
332 			else
333 				finished = 1;
334 			break;
335 		case TW_AEN_SOFT_RESET:
336 			if (first_reset == 0)
337 				first_reset = 1;
338 			else
339 				queue = 1;
340 			break;
341 		case TW_AEN_SYNC_TIME_WITH_HOST:
342 			break;
343 		default:
344 			queue = 1;
345 		}
346 
347 		/* Now queue an event info */
348 		if (queue)
349 			twa_aen_queue_event(tw_dev, header);
350 	} while ((finished == 0) && (count < TW_MAX_AEN_DRAIN));
351 
352 	if (count == TW_MAX_AEN_DRAIN)
353 		goto out;
354 
355 	retval = 0;
356 out:
357 	tw_dev->state[request_id] = TW_S_INITIAL;
358 	return retval;
359 } /* End twa_aen_drain_queue() */
360 
361 /* This function will queue an event */
362 static void twa_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_Header *header)
363 {
364 	u32 local_time;
365 	struct timeval time;
366 	TW_Event *event;
367 	unsigned short aen;
368 	char host[16];
369 	char *error_str;
370 
371 	tw_dev->aen_count++;
372 
373 	/* Fill out event info */
374 	event = tw_dev->event_queue[tw_dev->error_index];
375 
376 	/* Check for clobber */
377 	host[0] = '\0';
378 	if (tw_dev->host) {
379 		sprintf(host, " scsi%d:", tw_dev->host->host_no);
380 		if (event->retrieved == TW_AEN_NOT_RETRIEVED)
381 			tw_dev->aen_clobber = 1;
382 	}
383 
384 	aen = le16_to_cpu(header->status_block.error);
385 	memset(event, 0, sizeof(TW_Event));
386 
387 	event->severity = TW_SEV_OUT(header->status_block.severity__reserved);
388 	do_gettimeofday(&time);
389 	local_time = (u32)(time.tv_sec - (sys_tz.tz_minuteswest * 60));
390 	event->time_stamp_sec = local_time;
391 	event->aen_code = aen;
392 	event->retrieved = TW_AEN_NOT_RETRIEVED;
393 	event->sequence_id = tw_dev->error_sequence_id;
394 	tw_dev->error_sequence_id++;
395 
396 	/* Check for embedded error string */
397 	error_str = &(header->err_specific_desc[strlen(header->err_specific_desc)+1]);
398 
399 	header->err_specific_desc[sizeof(header->err_specific_desc) - 1] = '\0';
400 	event->parameter_len = strlen(header->err_specific_desc);
401 	memcpy(event->parameter_data, header->err_specific_desc, event->parameter_len + (error_str[0] == '\0' ? 0 : (1 + strlen(error_str))));
402 	if (event->severity != TW_AEN_SEVERITY_DEBUG)
403 		printk(KERN_WARNING "3w-9xxx:%s AEN: %s (0x%02X:0x%04X): %s:%s.\n",
404 		       host,
405 		       twa_aen_severity_lookup(TW_SEV_OUT(header->status_block.severity__reserved)),
406 		       TW_MESSAGE_SOURCE_CONTROLLER_EVENT, aen,
407 		       error_str[0] == '\0' ? twa_string_lookup(twa_aen_table, aen) : error_str,
408 		       header->err_specific_desc);
409 	else
410 		tw_dev->aen_count--;
411 
412 	if ((tw_dev->error_index + 1) == TW_Q_LENGTH)
413 		tw_dev->event_queue_wrapped = 1;
414 	tw_dev->error_index = (tw_dev->error_index + 1 ) % TW_Q_LENGTH;
415 } /* End twa_aen_queue_event() */
416 
417 /* This function will read the aen queue from the isr */
418 static int twa_aen_read_queue(TW_Device_Extension *tw_dev, int request_id)
419 {
420 	char cdb[TW_MAX_CDB_LEN];
421 	TW_SG_Entry sglist[1];
422 	TW_Command_Full *full_command_packet;
423 	int retval = 1;
424 
425 	full_command_packet = tw_dev->command_packet_virt[request_id];
426 	memset(full_command_packet, 0, sizeof(TW_Command_Full));
427 
428 	/* Initialize cdb */
429 	memset(&cdb, 0, TW_MAX_CDB_LEN);
430 	cdb[0] = REQUEST_SENSE; /* opcode */
431 	cdb[4] = TW_ALLOCATION_LENGTH; /* allocation length */
432 
433 	/* Initialize sglist */
434 	memset(&sglist, 0, sizeof(TW_SG_Entry));
435 	sglist[0].length = TW_SECTOR_SIZE;
436 	sglist[0].address = tw_dev->generic_buffer_phys[request_id];
437 
438 	/* Mark internal command */
439 	tw_dev->srb[request_id] = NULL;
440 
441 	/* Now post the command packet */
442 	if (twa_scsiop_execute_scsi(tw_dev, request_id, cdb, 1, sglist)) {
443 		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x4, "Post failed while reading AEN queue");
444 		goto out;
445 	}
446 	retval = 0;
447 out:
448 	return retval;
449 } /* End twa_aen_read_queue() */
450 
451 /* This function will look up an AEN severity string */
452 static char *twa_aen_severity_lookup(unsigned char severity_code)
453 {
454 	char *retval = NULL;
455 
456 	if ((severity_code < (unsigned char) TW_AEN_SEVERITY_ERROR) ||
457 	    (severity_code > (unsigned char) TW_AEN_SEVERITY_DEBUG))
458 		goto out;
459 
460 	retval = twa_aen_severity_table[severity_code];
461 out:
462 	return retval;
463 } /* End twa_aen_severity_lookup() */
464 
465 /* This function will sync firmware time with the host time */
466 static void twa_aen_sync_time(TW_Device_Extension *tw_dev, int request_id)
467 {
468 	u32 schedulertime;
469 	struct timeval utc;
470 	TW_Command_Full *full_command_packet;
471 	TW_Command *command_packet;
472 	TW_Param_Apache *param;
473 	u32 local_time;
474 
475 	/* Fill out the command packet */
476 	full_command_packet = tw_dev->command_packet_virt[request_id];
477 	memset(full_command_packet, 0, sizeof(TW_Command_Full));
478 	command_packet = &full_command_packet->command.oldcommand;
479 	command_packet->opcode__sgloffset = TW_OPSGL_IN(2, TW_OP_SET_PARAM);
480 	command_packet->request_id = request_id;
481 	command_packet->byte8_offset.param.sgl[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
482 	command_packet->byte8_offset.param.sgl[0].length = cpu_to_le32(TW_SECTOR_SIZE);
483 	command_packet->size = TW_COMMAND_SIZE;
484 	command_packet->byte6_offset.parameter_count = cpu_to_le16(1);
485 
486 	/* Setup the param */
487 	param = (TW_Param_Apache *)tw_dev->generic_buffer_virt[request_id];
488 	memset(param, 0, TW_SECTOR_SIZE);
489 	param->table_id = cpu_to_le16(TW_TIMEKEEP_TABLE | 0x8000); /* Controller time keep table */
490 	param->parameter_id = cpu_to_le16(0x3); /* SchedulerTime */
491 	param->parameter_size_bytes = cpu_to_le16(4);
492 
493 	/* Convert system time in UTC to local time seconds since last
494            Sunday 12:00AM */
495 	do_gettimeofday(&utc);
496 	local_time = (u32)(utc.tv_sec - (sys_tz.tz_minuteswest * 60));
497 	schedulertime = local_time - (3 * 86400);
498 	schedulertime = cpu_to_le32(schedulertime % 604800);
499 
500 	memcpy(param->data, &schedulertime, sizeof(u32));
501 
502 	/* Mark internal command */
503 	tw_dev->srb[request_id] = NULL;
504 
505 	/* Now post the command */
506 	twa_post_command_packet(tw_dev, request_id, 1);
507 } /* End twa_aen_sync_time() */
508 
509 /* This function will allocate memory and check if it is correctly aligned */
510 static int twa_allocate_memory(TW_Device_Extension *tw_dev, int size, int which)
511 {
512 	int i;
513 	dma_addr_t dma_handle;
514 	unsigned long *cpu_addr;
515 	int retval = 1;
516 
517 	cpu_addr = pci_alloc_consistent(tw_dev->tw_pci_dev, size*TW_Q_LENGTH, &dma_handle);
518 	if (!cpu_addr) {
519 		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x5, "Memory allocation failed");
520 		goto out;
521 	}
522 
523 	if ((unsigned long)cpu_addr % (TW_ALIGNMENT_9000)) {
524 		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x6, "Failed to allocate correctly aligned memory");
525 		pci_free_consistent(tw_dev->tw_pci_dev, size*TW_Q_LENGTH, cpu_addr, dma_handle);
526 		goto out;
527 	}
528 
529 	memset(cpu_addr, 0, size*TW_Q_LENGTH);
530 
531 	for (i = 0; i < TW_Q_LENGTH; i++) {
532 		switch(which) {
533 		case 0:
534 			tw_dev->command_packet_phys[i] = dma_handle+(i*size);
535 			tw_dev->command_packet_virt[i] = (TW_Command_Full *)((unsigned char *)cpu_addr + (i*size));
536 			break;
537 		case 1:
538 			tw_dev->generic_buffer_phys[i] = dma_handle+(i*size);
539 			tw_dev->generic_buffer_virt[i] = (unsigned long *)((unsigned char *)cpu_addr + (i*size));
540 			break;
541 		}
542 	}
543 	retval = 0;
544 out:
545 	return retval;
546 } /* End twa_allocate_memory() */
547 
548 /* This function will check the status register for unexpected bits */
549 static int twa_check_bits(u32 status_reg_value)
550 {
551 	int retval = 1;
552 
553 	if ((status_reg_value & TW_STATUS_EXPECTED_BITS) != TW_STATUS_EXPECTED_BITS)
554 		goto out;
555 	if ((status_reg_value & TW_STATUS_UNEXPECTED_BITS) != 0)
556 		goto out;
557 
558 	retval = 0;
559 out:
560 	return retval;
561 } /* End twa_check_bits() */
562 
563 /* This function will check the srl and decide if we are compatible  */
564 static int twa_check_srl(TW_Device_Extension *tw_dev, int *flashed)
565 {
566 	int retval = 1;
567 	unsigned short fw_on_ctlr_srl = 0, fw_on_ctlr_arch_id = 0;
568 	unsigned short fw_on_ctlr_branch = 0, fw_on_ctlr_build = 0;
569 	u32 init_connect_result = 0;
570 
571 	if (twa_initconnection(tw_dev, TW_INIT_MESSAGE_CREDITS,
572 			       TW_EXTENDED_INIT_CONNECT, TW_CURRENT_DRIVER_SRL,
573 			       TW_9000_ARCH_ID, TW_CURRENT_DRIVER_BRANCH,
574 			       TW_CURRENT_DRIVER_BUILD, &fw_on_ctlr_srl,
575 			       &fw_on_ctlr_arch_id, &fw_on_ctlr_branch,
576 			       &fw_on_ctlr_build, &init_connect_result)) {
577 		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x7, "Initconnection failed while checking SRL");
578 		goto out;
579 	}
580 
581 	tw_dev->tw_compat_info.working_srl = fw_on_ctlr_srl;
582 	tw_dev->tw_compat_info.working_branch = fw_on_ctlr_branch;
583 	tw_dev->tw_compat_info.working_build = fw_on_ctlr_build;
584 
585 	/* Try base mode compatibility */
586 	if (!(init_connect_result & TW_CTLR_FW_COMPATIBLE)) {
587 		if (twa_initconnection(tw_dev, TW_INIT_MESSAGE_CREDITS,
588 				       TW_EXTENDED_INIT_CONNECT,
589 				       TW_BASE_FW_SRL, TW_9000_ARCH_ID,
590 				       TW_BASE_FW_BRANCH, TW_BASE_FW_BUILD,
591 				       &fw_on_ctlr_srl, &fw_on_ctlr_arch_id,
592 				       &fw_on_ctlr_branch, &fw_on_ctlr_build,
593 				       &init_connect_result)) {
594 			TW_PRINTK(tw_dev->host, TW_DRIVER, 0xa, "Initconnection (base mode) failed while checking SRL");
595 			goto out;
596 		}
597 		if (!(init_connect_result & TW_CTLR_FW_COMPATIBLE)) {
598 			if (TW_CURRENT_DRIVER_SRL > fw_on_ctlr_srl) {
599 				TW_PRINTK(tw_dev->host, TW_DRIVER, 0x32, "Firmware and driver incompatibility: please upgrade firmware");
600 			} else {
601 				TW_PRINTK(tw_dev->host, TW_DRIVER, 0x33, "Firmware and driver incompatibility: please upgrade driver");
602 			}
603 			goto out;
604 		}
605 		tw_dev->tw_compat_info.working_srl = TW_BASE_FW_SRL;
606 		tw_dev->tw_compat_info.working_branch = TW_BASE_FW_BRANCH;
607 		tw_dev->tw_compat_info.working_build = TW_BASE_FW_BUILD;
608 	}
609 
610 	/* Load rest of compatibility struct */
611 	strlcpy(tw_dev->tw_compat_info.driver_version, TW_DRIVER_VERSION,
612 		sizeof(tw_dev->tw_compat_info.driver_version));
613 	tw_dev->tw_compat_info.driver_srl_high = TW_CURRENT_DRIVER_SRL;
614 	tw_dev->tw_compat_info.driver_branch_high = TW_CURRENT_DRIVER_BRANCH;
615 	tw_dev->tw_compat_info.driver_build_high = TW_CURRENT_DRIVER_BUILD;
616 	tw_dev->tw_compat_info.driver_srl_low = TW_BASE_FW_SRL;
617 	tw_dev->tw_compat_info.driver_branch_low = TW_BASE_FW_BRANCH;
618 	tw_dev->tw_compat_info.driver_build_low = TW_BASE_FW_BUILD;
619 	tw_dev->tw_compat_info.fw_on_ctlr_srl = fw_on_ctlr_srl;
620 	tw_dev->tw_compat_info.fw_on_ctlr_branch = fw_on_ctlr_branch;
621 	tw_dev->tw_compat_info.fw_on_ctlr_build = fw_on_ctlr_build;
622 
623 	retval = 0;
624 out:
625 	return retval;
626 } /* End twa_check_srl() */
627 
628 /* This function handles ioctl for the character device */
629 static long twa_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
630 {
631 	struct inode *inode = file_inode(file);
632 	long timeout;
633 	unsigned long *cpu_addr, data_buffer_length_adjusted = 0, flags = 0;
634 	dma_addr_t dma_handle;
635 	int request_id = 0;
636 	unsigned int sequence_id = 0;
637 	unsigned char event_index, start_index;
638 	TW_Ioctl_Driver_Command driver_command;
639 	TW_Ioctl_Buf_Apache *tw_ioctl;
640 	TW_Lock *tw_lock;
641 	TW_Command_Full *full_command_packet;
642 	TW_Compatibility_Info *tw_compat_info;
643 	TW_Event *event;
644 	struct timeval current_time;
645 	u32 current_time_ms;
646 	TW_Device_Extension *tw_dev = twa_device_extension_list[iminor(inode)];
647 	int retval = TW_IOCTL_ERROR_OS_EFAULT;
648 	void __user *argp = (void __user *)arg;
649 
650 	mutex_lock(&twa_chrdev_mutex);
651 
652 	/* Only let one of these through at a time */
653 	if (mutex_lock_interruptible(&tw_dev->ioctl_lock)) {
654 		retval = TW_IOCTL_ERROR_OS_EINTR;
655 		goto out;
656 	}
657 
658 	/* First copy down the driver command */
659 	if (copy_from_user(&driver_command, argp, sizeof(TW_Ioctl_Driver_Command)))
660 		goto out2;
661 
662 	/* Check data buffer size */
663 	if (driver_command.buffer_length > TW_MAX_SECTORS * 2048) {
664 		retval = TW_IOCTL_ERROR_OS_EINVAL;
665 		goto out2;
666 	}
667 
668 	/* Hardware can only do multiple of 512 byte transfers */
669 	data_buffer_length_adjusted = (driver_command.buffer_length + 511) & ~511;
670 
671 	/* Now allocate ioctl buf memory */
672 	cpu_addr = dma_alloc_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted+sizeof(TW_Ioctl_Buf_Apache) - 1, &dma_handle, GFP_KERNEL);
673 	if (!cpu_addr) {
674 		retval = TW_IOCTL_ERROR_OS_ENOMEM;
675 		goto out2;
676 	}
677 
678 	tw_ioctl = (TW_Ioctl_Buf_Apache *)cpu_addr;
679 
680 	/* Now copy down the entire ioctl */
681 	if (copy_from_user(tw_ioctl, argp, driver_command.buffer_length + sizeof(TW_Ioctl_Buf_Apache) - 1))
682 		goto out3;
683 
684 	/* See which ioctl we are doing */
685 	switch (cmd) {
686 	case TW_IOCTL_FIRMWARE_PASS_THROUGH:
687 		spin_lock_irqsave(tw_dev->host->host_lock, flags);
688 		twa_get_request_id(tw_dev, &request_id);
689 
690 		/* Flag internal command */
691 		tw_dev->srb[request_id] = NULL;
692 
693 		/* Flag chrdev ioctl */
694 		tw_dev->chrdev_request_id = request_id;
695 
696 		full_command_packet = &tw_ioctl->firmware_command;
697 
698 		/* Load request id and sglist for both command types */
699 		twa_load_sgl(tw_dev, full_command_packet, request_id, dma_handle, data_buffer_length_adjusted);
700 
701 		memcpy(tw_dev->command_packet_virt[request_id], &(tw_ioctl->firmware_command), sizeof(TW_Command_Full));
702 
703 		/* Now post the command packet to the controller */
704 		twa_post_command_packet(tw_dev, request_id, 1);
705 		spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
706 
707 		timeout = TW_IOCTL_CHRDEV_TIMEOUT*HZ;
708 
709 		/* Now wait for command to complete */
710 		timeout = wait_event_timeout(tw_dev->ioctl_wqueue, tw_dev->chrdev_request_id == TW_IOCTL_CHRDEV_FREE, timeout);
711 
712 		/* We timed out, and didn't get an interrupt */
713 		if (tw_dev->chrdev_request_id != TW_IOCTL_CHRDEV_FREE) {
714 			/* Now we need to reset the board */
715 			printk(KERN_WARNING "3w-9xxx: scsi%d: WARNING: (0x%02X:0x%04X): Character ioctl (0x%x) timed out, resetting card.\n",
716 			       tw_dev->host->host_no, TW_DRIVER, 0x37,
717 			       cmd);
718 			retval = TW_IOCTL_ERROR_OS_EIO;
719 			twa_reset_device_extension(tw_dev);
720 			goto out3;
721 		}
722 
723 		/* Now copy in the command packet response */
724 		memcpy(&(tw_ioctl->firmware_command), tw_dev->command_packet_virt[request_id], sizeof(TW_Command_Full));
725 
726 		/* Now complete the io */
727 		spin_lock_irqsave(tw_dev->host->host_lock, flags);
728 		tw_dev->posted_request_count--;
729 		tw_dev->state[request_id] = TW_S_COMPLETED;
730 		twa_free_request_id(tw_dev, request_id);
731 		spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
732 		break;
733 	case TW_IOCTL_GET_COMPATIBILITY_INFO:
734 		tw_ioctl->driver_command.status = 0;
735 		/* Copy compatibility struct into ioctl data buffer */
736 		tw_compat_info = (TW_Compatibility_Info *)tw_ioctl->data_buffer;
737 		memcpy(tw_compat_info, &tw_dev->tw_compat_info, sizeof(TW_Compatibility_Info));
738 		break;
739 	case TW_IOCTL_GET_LAST_EVENT:
740 		if (tw_dev->event_queue_wrapped) {
741 			if (tw_dev->aen_clobber) {
742 				tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER;
743 				tw_dev->aen_clobber = 0;
744 			} else
745 				tw_ioctl->driver_command.status = 0;
746 		} else {
747 			if (!tw_dev->error_index) {
748 				tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
749 				break;
750 			}
751 			tw_ioctl->driver_command.status = 0;
752 		}
753 		event_index = (tw_dev->error_index - 1 + TW_Q_LENGTH) % TW_Q_LENGTH;
754 		memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event));
755 		tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED;
756 		break;
757 	case TW_IOCTL_GET_FIRST_EVENT:
758 		if (tw_dev->event_queue_wrapped) {
759 			if (tw_dev->aen_clobber) {
760 				tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER;
761 				tw_dev->aen_clobber = 0;
762 			} else
763 				tw_ioctl->driver_command.status = 0;
764 			event_index = tw_dev->error_index;
765 		} else {
766 			if (!tw_dev->error_index) {
767 				tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
768 				break;
769 			}
770 			tw_ioctl->driver_command.status = 0;
771 			event_index = 0;
772 		}
773 		memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event));
774 		tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED;
775 		break;
776 	case TW_IOCTL_GET_NEXT_EVENT:
777 		event = (TW_Event *)tw_ioctl->data_buffer;
778 		sequence_id = event->sequence_id;
779 		tw_ioctl->driver_command.status = 0;
780 
781 		if (tw_dev->event_queue_wrapped) {
782 			if (tw_dev->aen_clobber) {
783 				tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER;
784 				tw_dev->aen_clobber = 0;
785 			}
786 			start_index = tw_dev->error_index;
787 		} else {
788 			if (!tw_dev->error_index) {
789 				tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
790 				break;
791 			}
792 			start_index = 0;
793 		}
794 		event_index = (start_index + sequence_id - tw_dev->event_queue[start_index]->sequence_id + 1) % TW_Q_LENGTH;
795 
796 		if (!(tw_dev->event_queue[event_index]->sequence_id > sequence_id)) {
797 			if (tw_ioctl->driver_command.status == TW_IOCTL_ERROR_STATUS_AEN_CLOBBER)
798 				tw_dev->aen_clobber = 1;
799 			tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
800 			break;
801 		}
802 		memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event));
803 		tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED;
804 		break;
805 	case TW_IOCTL_GET_PREVIOUS_EVENT:
806 		event = (TW_Event *)tw_ioctl->data_buffer;
807 		sequence_id = event->sequence_id;
808 		tw_ioctl->driver_command.status = 0;
809 
810 		if (tw_dev->event_queue_wrapped) {
811 			if (tw_dev->aen_clobber) {
812 				tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER;
813 				tw_dev->aen_clobber = 0;
814 			}
815 			start_index = tw_dev->error_index;
816 		} else {
817 			if (!tw_dev->error_index) {
818 				tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
819 				break;
820 			}
821 			start_index = 0;
822 		}
823 		event_index = (start_index + sequence_id - tw_dev->event_queue[start_index]->sequence_id - 1) % TW_Q_LENGTH;
824 
825 		if (!(tw_dev->event_queue[event_index]->sequence_id < sequence_id)) {
826 			if (tw_ioctl->driver_command.status == TW_IOCTL_ERROR_STATUS_AEN_CLOBBER)
827 				tw_dev->aen_clobber = 1;
828 			tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
829 			break;
830 		}
831 		memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event));
832 		tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED;
833 		break;
834 	case TW_IOCTL_GET_LOCK:
835 		tw_lock = (TW_Lock *)tw_ioctl->data_buffer;
836 		do_gettimeofday(&current_time);
837 		current_time_ms = (current_time.tv_sec * 1000) + (current_time.tv_usec / 1000);
838 
839 		if ((tw_lock->force_flag == 1) || (tw_dev->ioctl_sem_lock == 0) || (current_time_ms >= tw_dev->ioctl_msec)) {
840 			tw_dev->ioctl_sem_lock = 1;
841 			tw_dev->ioctl_msec = current_time_ms + tw_lock->timeout_msec;
842 			tw_ioctl->driver_command.status = 0;
843 			tw_lock->time_remaining_msec = tw_lock->timeout_msec;
844 		} else {
845 			tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_LOCKED;
846 			tw_lock->time_remaining_msec = tw_dev->ioctl_msec - current_time_ms;
847 		}
848 		break;
849 	case TW_IOCTL_RELEASE_LOCK:
850 		if (tw_dev->ioctl_sem_lock == 1) {
851 			tw_dev->ioctl_sem_lock = 0;
852 			tw_ioctl->driver_command.status = 0;
853 		} else {
854 			tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NOT_LOCKED;
855 		}
856 		break;
857 	default:
858 		retval = TW_IOCTL_ERROR_OS_ENOTTY;
859 		goto out3;
860 	}
861 
862 	/* Now copy the entire response to userspace */
863 	if (copy_to_user(argp, tw_ioctl, sizeof(TW_Ioctl_Buf_Apache) + driver_command.buffer_length - 1) == 0)
864 		retval = 0;
865 out3:
866 	/* Now free ioctl buf memory */
867 	dma_free_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted+sizeof(TW_Ioctl_Buf_Apache) - 1, cpu_addr, dma_handle);
868 out2:
869 	mutex_unlock(&tw_dev->ioctl_lock);
870 out:
871 	mutex_unlock(&twa_chrdev_mutex);
872 	return retval;
873 } /* End twa_chrdev_ioctl() */
874 
875 /* This function handles open for the character device */
876 /* NOTE that this function will race with remove. */
877 static int twa_chrdev_open(struct inode *inode, struct file *file)
878 {
879 	unsigned int minor_number;
880 	int retval = TW_IOCTL_ERROR_OS_ENODEV;
881 
882 	minor_number = iminor(inode);
883 	if (minor_number >= twa_device_extension_count)
884 		goto out;
885 	retval = 0;
886 out:
887 	return retval;
888 } /* End twa_chrdev_open() */
889 
890 /* This function will print readable messages from status register errors */
891 static int twa_decode_bits(TW_Device_Extension *tw_dev, u32 status_reg_value)
892 {
893 	int retval = 1;
894 
895 	/* Check for various error conditions and handle them appropriately */
896 	if (status_reg_value & TW_STATUS_PCI_PARITY_ERROR) {
897 		TW_PRINTK(tw_dev->host, TW_DRIVER, 0xc, "PCI Parity Error: clearing");
898 		writel(TW_CONTROL_CLEAR_PARITY_ERROR, TW_CONTROL_REG_ADDR(tw_dev));
899 	}
900 
901 	if (status_reg_value & TW_STATUS_PCI_ABORT) {
902 		TW_PRINTK(tw_dev->host, TW_DRIVER, 0xd, "PCI Abort: clearing");
903 		writel(TW_CONTROL_CLEAR_PCI_ABORT, TW_CONTROL_REG_ADDR(tw_dev));
904 		pci_write_config_word(tw_dev->tw_pci_dev, PCI_STATUS, TW_PCI_CLEAR_PCI_ABORT);
905 	}
906 
907 	if (status_reg_value & TW_STATUS_QUEUE_ERROR) {
908 		if (((tw_dev->tw_pci_dev->device != PCI_DEVICE_ID_3WARE_9650SE) &&
909 		     (tw_dev->tw_pci_dev->device != PCI_DEVICE_ID_3WARE_9690SA)) ||
910 		    (!test_bit(TW_IN_RESET, &tw_dev->flags)))
911 			TW_PRINTK(tw_dev->host, TW_DRIVER, 0xe, "Controller Queue Error: clearing");
912 		writel(TW_CONTROL_CLEAR_QUEUE_ERROR, TW_CONTROL_REG_ADDR(tw_dev));
913 	}
914 
915 	if (status_reg_value & TW_STATUS_MICROCONTROLLER_ERROR) {
916 		if (tw_dev->reset_print == 0) {
917 			TW_PRINTK(tw_dev->host, TW_DRIVER, 0x10, "Microcontroller Error: clearing");
918 			tw_dev->reset_print = 1;
919 		}
920 		goto out;
921 	}
922 	retval = 0;
923 out:
924 	return retval;
925 } /* End twa_decode_bits() */
926 
927 /* This function will empty the response queue */
928 static int twa_empty_response_queue(TW_Device_Extension *tw_dev)
929 {
930 	u32 status_reg_value, response_que_value;
931 	int count = 0, retval = 1;
932 
933 	status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
934 
935 	while (((status_reg_value & TW_STATUS_RESPONSE_QUEUE_EMPTY) == 0) && (count < TW_MAX_RESPONSE_DRAIN)) {
936 		response_que_value = readl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev));
937 		status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
938 		count++;
939 	}
940 	if (count == TW_MAX_RESPONSE_DRAIN)
941 		goto out;
942 
943 	retval = 0;
944 out:
945 	return retval;
946 } /* End twa_empty_response_queue() */
947 
948 /* This function will clear the pchip/response queue on 9550SX */
949 static int twa_empty_response_queue_large(TW_Device_Extension *tw_dev)
950 {
951 	u32 response_que_value = 0;
952 	unsigned long before;
953 	int retval = 1;
954 
955 	if (tw_dev->tw_pci_dev->device != PCI_DEVICE_ID_3WARE_9000) {
956 		before = jiffies;
957 		while ((response_que_value & TW_9550SX_DRAIN_COMPLETED) != TW_9550SX_DRAIN_COMPLETED) {
958 			response_que_value = readl(TW_RESPONSE_QUEUE_REG_ADDR_LARGE(tw_dev));
959 			msleep(1);
960 			if (time_after(jiffies, before + HZ * 30))
961 				goto out;
962 		}
963 		/* P-chip settle time */
964 		msleep(500);
965 		retval = 0;
966 	} else
967 		retval = 0;
968 out:
969 	return retval;
970 } /* End twa_empty_response_queue_large() */
971 
972 /* This function passes sense keys from firmware to scsi layer */
973 static int twa_fill_sense(TW_Device_Extension *tw_dev, int request_id, int copy_sense, int print_host)
974 {
975 	TW_Command_Full *full_command_packet;
976 	unsigned short error;
977 	int retval = 1;
978 	char *error_str;
979 
980 	full_command_packet = tw_dev->command_packet_virt[request_id];
981 
982 	/* Check for embedded error string */
983 	error_str = &(full_command_packet->header.err_specific_desc[strlen(full_command_packet->header.err_specific_desc) + 1]);
984 
985 	/* Don't print error for Logical unit not supported during rollcall */
986 	error = le16_to_cpu(full_command_packet->header.status_block.error);
987 	if ((error != TW_ERROR_LOGICAL_UNIT_NOT_SUPPORTED) && (error != TW_ERROR_UNIT_OFFLINE)) {
988 		if (print_host)
989 			printk(KERN_WARNING "3w-9xxx: scsi%d: ERROR: (0x%02X:0x%04X): %s:%s.\n",
990 			       tw_dev->host->host_no,
991 			       TW_MESSAGE_SOURCE_CONTROLLER_ERROR,
992 			       full_command_packet->header.status_block.error,
993 			       error_str[0] == '\0' ?
994 			       twa_string_lookup(twa_error_table,
995 						 full_command_packet->header.status_block.error) : error_str,
996 			       full_command_packet->header.err_specific_desc);
997 		else
998 			printk(KERN_WARNING "3w-9xxx: ERROR: (0x%02X:0x%04X): %s:%s.\n",
999 			       TW_MESSAGE_SOURCE_CONTROLLER_ERROR,
1000 			       full_command_packet->header.status_block.error,
1001 			       error_str[0] == '\0' ?
1002 			       twa_string_lookup(twa_error_table,
1003 						 full_command_packet->header.status_block.error) : error_str,
1004 			       full_command_packet->header.err_specific_desc);
1005 	}
1006 
1007 	if (copy_sense) {
1008 		memcpy(tw_dev->srb[request_id]->sense_buffer, full_command_packet->header.sense_data, TW_SENSE_DATA_LENGTH);
1009 		tw_dev->srb[request_id]->result = (full_command_packet->command.newcommand.status << 1);
1010 		retval = TW_ISR_DONT_RESULT;
1011 		goto out;
1012 	}
1013 	retval = 0;
1014 out:
1015 	return retval;
1016 } /* End twa_fill_sense() */
1017 
1018 /* This function will free up device extension resources */
1019 static void twa_free_device_extension(TW_Device_Extension *tw_dev)
1020 {
1021 	if (tw_dev->command_packet_virt[0])
1022 		pci_free_consistent(tw_dev->tw_pci_dev,
1023 				    sizeof(TW_Command_Full)*TW_Q_LENGTH,
1024 				    tw_dev->command_packet_virt[0],
1025 				    tw_dev->command_packet_phys[0]);
1026 
1027 	if (tw_dev->generic_buffer_virt[0])
1028 		pci_free_consistent(tw_dev->tw_pci_dev,
1029 				    TW_SECTOR_SIZE*TW_Q_LENGTH,
1030 				    tw_dev->generic_buffer_virt[0],
1031 				    tw_dev->generic_buffer_phys[0]);
1032 
1033 	kfree(tw_dev->event_queue[0]);
1034 } /* End twa_free_device_extension() */
1035 
1036 /* This function will free a request id */
1037 static void twa_free_request_id(TW_Device_Extension *tw_dev, int request_id)
1038 {
1039 	tw_dev->free_queue[tw_dev->free_tail] = request_id;
1040 	tw_dev->state[request_id] = TW_S_FINISHED;
1041 	tw_dev->free_tail = (tw_dev->free_tail + 1) % TW_Q_LENGTH;
1042 } /* End twa_free_request_id() */
1043 
1044 /* This function will get parameter table entries from the firmware */
1045 static void *twa_get_param(TW_Device_Extension *tw_dev, int request_id, int table_id, int parameter_id, int parameter_size_bytes)
1046 {
1047 	TW_Command_Full *full_command_packet;
1048 	TW_Command *command_packet;
1049 	TW_Param_Apache *param;
1050 	void *retval = NULL;
1051 
1052 	/* Setup the command packet */
1053 	full_command_packet = tw_dev->command_packet_virt[request_id];
1054 	memset(full_command_packet, 0, sizeof(TW_Command_Full));
1055 	command_packet = &full_command_packet->command.oldcommand;
1056 
1057 	command_packet->opcode__sgloffset = TW_OPSGL_IN(2, TW_OP_GET_PARAM);
1058 	command_packet->size              = TW_COMMAND_SIZE;
1059 	command_packet->request_id        = request_id;
1060 	command_packet->byte6_offset.block_count = cpu_to_le16(1);
1061 
1062 	/* Now setup the param */
1063 	param = (TW_Param_Apache *)tw_dev->generic_buffer_virt[request_id];
1064 	memset(param, 0, TW_SECTOR_SIZE);
1065 	param->table_id = cpu_to_le16(table_id | 0x8000);
1066 	param->parameter_id = cpu_to_le16(parameter_id);
1067 	param->parameter_size_bytes = cpu_to_le16(parameter_size_bytes);
1068 
1069 	command_packet->byte8_offset.param.sgl[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
1070 	command_packet->byte8_offset.param.sgl[0].length = cpu_to_le32(TW_SECTOR_SIZE);
1071 
1072 	/* Post the command packet to the board */
1073 	twa_post_command_packet(tw_dev, request_id, 1);
1074 
1075 	/* Poll for completion */
1076 	if (twa_poll_response(tw_dev, request_id, 30))
1077 		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x13, "No valid response during get param")
1078 	else
1079 		retval = (void *)&(param->data[0]);
1080 
1081 	tw_dev->posted_request_count--;
1082 	tw_dev->state[request_id] = TW_S_INITIAL;
1083 
1084 	return retval;
1085 } /* End twa_get_param() */
1086 
1087 /* This function will assign an available request id */
1088 static void twa_get_request_id(TW_Device_Extension *tw_dev, int *request_id)
1089 {
1090 	*request_id = tw_dev->free_queue[tw_dev->free_head];
1091 	tw_dev->free_head = (tw_dev->free_head + 1) % TW_Q_LENGTH;
1092 	tw_dev->state[*request_id] = TW_S_STARTED;
1093 } /* End twa_get_request_id() */
1094 
1095 /* This function will send an initconnection command to controller */
1096 static int twa_initconnection(TW_Device_Extension *tw_dev, int message_credits,
1097  			      u32 set_features, unsigned short current_fw_srl,
1098 			      unsigned short current_fw_arch_id,
1099 			      unsigned short current_fw_branch,
1100 			      unsigned short current_fw_build,
1101 			      unsigned short *fw_on_ctlr_srl,
1102 			      unsigned short *fw_on_ctlr_arch_id,
1103 			      unsigned short *fw_on_ctlr_branch,
1104 			      unsigned short *fw_on_ctlr_build,
1105 			      u32 *init_connect_result)
1106 {
1107 	TW_Command_Full *full_command_packet;
1108 	TW_Initconnect *tw_initconnect;
1109 	int request_id = 0, retval = 1;
1110 
1111 	/* Initialize InitConnection command packet */
1112 	full_command_packet = tw_dev->command_packet_virt[request_id];
1113 	memset(full_command_packet, 0, sizeof(TW_Command_Full));
1114 	full_command_packet->header.header_desc.size_header = 128;
1115 
1116 	tw_initconnect = (TW_Initconnect *)&full_command_packet->command.oldcommand;
1117 	tw_initconnect->opcode__reserved = TW_OPRES_IN(0, TW_OP_INIT_CONNECTION);
1118 	tw_initconnect->request_id = request_id;
1119 	tw_initconnect->message_credits = cpu_to_le16(message_credits);
1120 	tw_initconnect->features = set_features;
1121 
1122 	/* Turn on 64-bit sgl support if we need to */
1123 	tw_initconnect->features |= sizeof(dma_addr_t) > 4 ? 1 : 0;
1124 
1125 	tw_initconnect->features = cpu_to_le32(tw_initconnect->features);
1126 
1127 	if (set_features & TW_EXTENDED_INIT_CONNECT) {
1128 		tw_initconnect->size = TW_INIT_COMMAND_PACKET_SIZE_EXTENDED;
1129 		tw_initconnect->fw_srl = cpu_to_le16(current_fw_srl);
1130 		tw_initconnect->fw_arch_id = cpu_to_le16(current_fw_arch_id);
1131 		tw_initconnect->fw_branch = cpu_to_le16(current_fw_branch);
1132 		tw_initconnect->fw_build = cpu_to_le16(current_fw_build);
1133 	} else
1134 		tw_initconnect->size = TW_INIT_COMMAND_PACKET_SIZE;
1135 
1136 	/* Send command packet to the board */
1137 	twa_post_command_packet(tw_dev, request_id, 1);
1138 
1139 	/* Poll for completion */
1140 	if (twa_poll_response(tw_dev, request_id, 30)) {
1141 		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x15, "No valid response during init connection");
1142 	} else {
1143 		if (set_features & TW_EXTENDED_INIT_CONNECT) {
1144 			*fw_on_ctlr_srl = le16_to_cpu(tw_initconnect->fw_srl);
1145 			*fw_on_ctlr_arch_id = le16_to_cpu(tw_initconnect->fw_arch_id);
1146 			*fw_on_ctlr_branch = le16_to_cpu(tw_initconnect->fw_branch);
1147 			*fw_on_ctlr_build = le16_to_cpu(tw_initconnect->fw_build);
1148 			*init_connect_result = le32_to_cpu(tw_initconnect->result);
1149 		}
1150 		retval = 0;
1151 	}
1152 
1153 	tw_dev->posted_request_count--;
1154 	tw_dev->state[request_id] = TW_S_INITIAL;
1155 
1156 	return retval;
1157 } /* End twa_initconnection() */
1158 
1159 /* This function will initialize the fields of a device extension */
1160 static int twa_initialize_device_extension(TW_Device_Extension *tw_dev)
1161 {
1162 	int i, retval = 1;
1163 
1164 	/* Initialize command packet buffers */
1165 	if (twa_allocate_memory(tw_dev, sizeof(TW_Command_Full), 0)) {
1166 		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x16, "Command packet memory allocation failed");
1167 		goto out;
1168 	}
1169 
1170 	/* Initialize generic buffer */
1171 	if (twa_allocate_memory(tw_dev, TW_SECTOR_SIZE, 1)) {
1172 		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x17, "Generic memory allocation failed");
1173 		goto out;
1174 	}
1175 
1176 	/* Allocate event info space */
1177 	tw_dev->event_queue[0] = kcalloc(TW_Q_LENGTH, sizeof(TW_Event), GFP_KERNEL);
1178 	if (!tw_dev->event_queue[0]) {
1179 		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x18, "Event info memory allocation failed");
1180 		goto out;
1181 	}
1182 
1183 
1184 	for (i = 0; i < TW_Q_LENGTH; i++) {
1185 		tw_dev->event_queue[i] = (TW_Event *)((unsigned char *)tw_dev->event_queue[0] + (i * sizeof(TW_Event)));
1186 		tw_dev->free_queue[i] = i;
1187 		tw_dev->state[i] = TW_S_INITIAL;
1188 	}
1189 
1190 	tw_dev->pending_head = TW_Q_START;
1191 	tw_dev->pending_tail = TW_Q_START;
1192 	tw_dev->free_head = TW_Q_START;
1193 	tw_dev->free_tail = TW_Q_START;
1194 	tw_dev->error_sequence_id = 1;
1195 	tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE;
1196 
1197 	mutex_init(&tw_dev->ioctl_lock);
1198 	init_waitqueue_head(&tw_dev->ioctl_wqueue);
1199 
1200 	retval = 0;
1201 out:
1202 	return retval;
1203 } /* End twa_initialize_device_extension() */
1204 
1205 /* This function is the interrupt service routine */
1206 static irqreturn_t twa_interrupt(int irq, void *dev_instance)
1207 {
1208 	int request_id, error = 0;
1209 	u32 status_reg_value;
1210 	TW_Response_Queue response_que;
1211 	TW_Command_Full *full_command_packet;
1212 	TW_Device_Extension *tw_dev = (TW_Device_Extension *)dev_instance;
1213 	int handled = 0;
1214 
1215 	/* Get the per adapter lock */
1216 	spin_lock(tw_dev->host->host_lock);
1217 
1218 	/* Read the registers */
1219 	status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1220 
1221 	/* Check if this is our interrupt, otherwise bail */
1222 	if (!(status_reg_value & TW_STATUS_VALID_INTERRUPT))
1223 		goto twa_interrupt_bail;
1224 
1225 	handled = 1;
1226 
1227 	/* If we are resetting, bail */
1228 	if (test_bit(TW_IN_RESET, &tw_dev->flags))
1229 		goto twa_interrupt_bail;
1230 
1231 	/* Check controller for errors */
1232 	if (twa_check_bits(status_reg_value)) {
1233 		if (twa_decode_bits(tw_dev, status_reg_value)) {
1234 			TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1235 			goto twa_interrupt_bail;
1236 		}
1237 	}
1238 
1239 	/* Handle host interrupt */
1240 	if (status_reg_value & TW_STATUS_HOST_INTERRUPT)
1241 		TW_CLEAR_HOST_INTERRUPT(tw_dev);
1242 
1243 	/* Handle attention interrupt */
1244 	if (status_reg_value & TW_STATUS_ATTENTION_INTERRUPT) {
1245 		TW_CLEAR_ATTENTION_INTERRUPT(tw_dev);
1246 		if (!(test_and_set_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags))) {
1247 			twa_get_request_id(tw_dev, &request_id);
1248 
1249 			error = twa_aen_read_queue(tw_dev, request_id);
1250 			if (error) {
1251 				tw_dev->state[request_id] = TW_S_COMPLETED;
1252 				twa_free_request_id(tw_dev, request_id);
1253 				clear_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags);
1254 			}
1255 		}
1256 	}
1257 
1258 	/* Handle command interrupt */
1259 	if (status_reg_value & TW_STATUS_COMMAND_INTERRUPT) {
1260 		TW_MASK_COMMAND_INTERRUPT(tw_dev);
1261 		/* Drain as many pending commands as we can */
1262 		while (tw_dev->pending_request_count > 0) {
1263 			request_id = tw_dev->pending_queue[tw_dev->pending_head];
1264 			if (tw_dev->state[request_id] != TW_S_PENDING) {
1265 				TW_PRINTK(tw_dev->host, TW_DRIVER, 0x19, "Found request id that wasn't pending");
1266 				TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1267 				goto twa_interrupt_bail;
1268 			}
1269 			if (twa_post_command_packet(tw_dev, request_id, 1)==0) {
1270 				tw_dev->pending_head = (tw_dev->pending_head + 1) % TW_Q_LENGTH;
1271 				tw_dev->pending_request_count--;
1272 			} else {
1273 				/* If we get here, we will continue re-posting on the next command interrupt */
1274 				break;
1275 			}
1276 		}
1277 	}
1278 
1279 	/* Handle response interrupt */
1280 	if (status_reg_value & TW_STATUS_RESPONSE_INTERRUPT) {
1281 
1282 		/* Drain the response queue from the board */
1283 		while ((status_reg_value & TW_STATUS_RESPONSE_QUEUE_EMPTY) == 0) {
1284 			/* Complete the response */
1285 			response_que.value = readl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev));
1286 			request_id = TW_RESID_OUT(response_que.response_id);
1287 			full_command_packet = tw_dev->command_packet_virt[request_id];
1288 			error = 0;
1289 			/* Check for command packet errors */
1290 			if (full_command_packet->command.newcommand.status != 0) {
1291 				if (tw_dev->srb[request_id] != NULL) {
1292 					error = twa_fill_sense(tw_dev, request_id, 1, 1);
1293 				} else {
1294 					/* Skip ioctl error prints */
1295 					if (request_id != tw_dev->chrdev_request_id) {
1296 						error = twa_fill_sense(tw_dev, request_id, 0, 1);
1297 					}
1298 				}
1299 			}
1300 
1301 			/* Check for correct state */
1302 			if (tw_dev->state[request_id] != TW_S_POSTED) {
1303 				if (tw_dev->srb[request_id] != NULL) {
1304 					TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1a, "Received a request id that wasn't posted");
1305 					TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1306 					goto twa_interrupt_bail;
1307 				}
1308 			}
1309 
1310 			/* Check for internal command completion */
1311 			if (tw_dev->srb[request_id] == NULL) {
1312 				if (request_id != tw_dev->chrdev_request_id) {
1313 					if (twa_aen_complete(tw_dev, request_id))
1314 						TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1b, "Error completing AEN during attention interrupt");
1315 				} else {
1316 					tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE;
1317 					wake_up(&tw_dev->ioctl_wqueue);
1318 				}
1319 			} else {
1320 				struct scsi_cmnd *cmd;
1321 
1322 				cmd = tw_dev->srb[request_id];
1323 
1324 				twa_scsiop_execute_scsi_complete(tw_dev, request_id);
1325 				/* If no error command was a success */
1326 				if (error == 0) {
1327 					cmd->result = (DID_OK << 16);
1328 				}
1329 
1330 				/* If error, command failed */
1331 				if (error == 1) {
1332 					/* Ask for a host reset */
1333 					cmd->result = (DID_OK << 16) | (CHECK_CONDITION << 1);
1334 				}
1335 
1336 				/* Report residual bytes for single sgl */
1337 				if ((scsi_sg_count(cmd) <= 1) && (full_command_packet->command.newcommand.status == 0)) {
1338 					if (full_command_packet->command.newcommand.sg_list[0].length < scsi_bufflen(tw_dev->srb[request_id]))
1339 						scsi_set_resid(cmd, scsi_bufflen(cmd) - full_command_packet->command.newcommand.sg_list[0].length);
1340 				}
1341 
1342 				/* Now complete the io */
1343 				tw_dev->state[request_id] = TW_S_COMPLETED;
1344 				twa_free_request_id(tw_dev, request_id);
1345 				tw_dev->posted_request_count--;
1346 				tw_dev->srb[request_id]->scsi_done(tw_dev->srb[request_id]);
1347 				twa_unmap_scsi_data(tw_dev, request_id);
1348 			}
1349 
1350 			/* Check for valid status after each drain */
1351 			status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1352 			if (twa_check_bits(status_reg_value)) {
1353 				if (twa_decode_bits(tw_dev, status_reg_value)) {
1354 					TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1355 					goto twa_interrupt_bail;
1356 				}
1357 			}
1358 		}
1359 	}
1360 
1361 twa_interrupt_bail:
1362 	spin_unlock(tw_dev->host->host_lock);
1363 	return IRQ_RETVAL(handled);
1364 } /* End twa_interrupt() */
1365 
1366 /* This function will load the request id and various sgls for ioctls */
1367 static void twa_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_command_packet, int request_id, dma_addr_t dma_handle, int length)
1368 {
1369 	TW_Command *oldcommand;
1370 	TW_Command_Apache *newcommand;
1371 	TW_SG_Entry *sgl;
1372 	unsigned int pae = 0;
1373 
1374 	if ((sizeof(long) < 8) && (sizeof(dma_addr_t) > 4))
1375 		pae = 1;
1376 
1377 	if (TW_OP_OUT(full_command_packet->command.newcommand.opcode__reserved) == TW_OP_EXECUTE_SCSI) {
1378 		newcommand = &full_command_packet->command.newcommand;
1379 		newcommand->request_id__lunl =
1380 			cpu_to_le16(TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->request_id__lunl), request_id));
1381 		if (length) {
1382 			newcommand->sg_list[0].address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache) - 1);
1383 			newcommand->sg_list[0].length = cpu_to_le32(length);
1384 		}
1385 		newcommand->sgl_entries__lunh =
1386 			cpu_to_le16(TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->sgl_entries__lunh), length ? 1 : 0));
1387 	} else {
1388 		oldcommand = &full_command_packet->command.oldcommand;
1389 		oldcommand->request_id = request_id;
1390 
1391 		if (TW_SGL_OUT(oldcommand->opcode__sgloffset)) {
1392 			/* Load the sg list */
1393 			if (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9690SA)
1394 				sgl = (TW_SG_Entry *)((u32 *)oldcommand+oldcommand->size - (sizeof(TW_SG_Entry)/4) + pae);
1395 			else
1396 				sgl = (TW_SG_Entry *)((u32 *)oldcommand+TW_SGL_OUT(oldcommand->opcode__sgloffset));
1397 			sgl->address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache) - 1);
1398 			sgl->length = cpu_to_le32(length);
1399 
1400 			oldcommand->size += pae;
1401 		}
1402 	}
1403 } /* End twa_load_sgl() */
1404 
1405 /* This function will perform a pci-dma mapping for a scatter gather list */
1406 static int twa_map_scsi_sg_data(TW_Device_Extension *tw_dev, int request_id)
1407 {
1408 	int use_sg;
1409 	struct scsi_cmnd *cmd = tw_dev->srb[request_id];
1410 
1411 	use_sg = scsi_dma_map(cmd);
1412 	if (!use_sg)
1413 		return 0;
1414 	else if (use_sg < 0) {
1415 		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1c, "Failed to map scatter gather list");
1416 		return 0;
1417 	}
1418 
1419 	cmd->SCp.phase = TW_PHASE_SGLIST;
1420 	cmd->SCp.have_data_in = use_sg;
1421 
1422 	return use_sg;
1423 } /* End twa_map_scsi_sg_data() */
1424 
1425 /* This function will poll for a response interrupt of a request */
1426 static int twa_poll_response(TW_Device_Extension *tw_dev, int request_id, int seconds)
1427 {
1428 	int retval = 1, found = 0, response_request_id;
1429 	TW_Response_Queue response_queue;
1430 	TW_Command_Full *full_command_packet = tw_dev->command_packet_virt[request_id];
1431 
1432 	if (twa_poll_status_gone(tw_dev, TW_STATUS_RESPONSE_QUEUE_EMPTY, seconds) == 0) {
1433 		response_queue.value = readl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev));
1434 		response_request_id = TW_RESID_OUT(response_queue.response_id);
1435 		if (request_id != response_request_id) {
1436 			TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1e, "Found unexpected request id while polling for response");
1437 			goto out;
1438 		}
1439 		if (TW_OP_OUT(full_command_packet->command.newcommand.opcode__reserved) == TW_OP_EXECUTE_SCSI) {
1440 			if (full_command_packet->command.newcommand.status != 0) {
1441 				/* bad response */
1442 				twa_fill_sense(tw_dev, request_id, 0, 0);
1443 				goto out;
1444 			}
1445 			found = 1;
1446 		} else {
1447 			if (full_command_packet->command.oldcommand.status != 0) {
1448 				/* bad response */
1449 				twa_fill_sense(tw_dev, request_id, 0, 0);
1450 				goto out;
1451 			}
1452 			found = 1;
1453 		}
1454 	}
1455 
1456 	if (found)
1457 		retval = 0;
1458 out:
1459 	return retval;
1460 } /* End twa_poll_response() */
1461 
1462 /* This function will poll the status register for a flag */
1463 static int twa_poll_status(TW_Device_Extension *tw_dev, u32 flag, int seconds)
1464 {
1465 	u32 status_reg_value;
1466 	unsigned long before;
1467 	int retval = 1;
1468 
1469 	status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1470 	before = jiffies;
1471 
1472 	if (twa_check_bits(status_reg_value))
1473 		twa_decode_bits(tw_dev, status_reg_value);
1474 
1475 	while ((status_reg_value & flag) != flag) {
1476 		status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1477 
1478 		if (twa_check_bits(status_reg_value))
1479 			twa_decode_bits(tw_dev, status_reg_value);
1480 
1481 		if (time_after(jiffies, before + HZ * seconds))
1482 			goto out;
1483 
1484 		msleep(50);
1485 	}
1486 	retval = 0;
1487 out:
1488 	return retval;
1489 } /* End twa_poll_status() */
1490 
1491 /* This function will poll the status register for disappearance of a flag */
1492 static int twa_poll_status_gone(TW_Device_Extension *tw_dev, u32 flag, int seconds)
1493 {
1494 	u32 status_reg_value;
1495 	unsigned long before;
1496 	int retval = 1;
1497 
1498 	status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1499 	before = jiffies;
1500 
1501 	if (twa_check_bits(status_reg_value))
1502 		twa_decode_bits(tw_dev, status_reg_value);
1503 
1504 	while ((status_reg_value & flag) != 0) {
1505 		status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1506 		if (twa_check_bits(status_reg_value))
1507 			twa_decode_bits(tw_dev, status_reg_value);
1508 
1509 		if (time_after(jiffies, before + HZ * seconds))
1510 			goto out;
1511 
1512 		msleep(50);
1513 	}
1514 	retval = 0;
1515 out:
1516 	return retval;
1517 } /* End twa_poll_status_gone() */
1518 
1519 /* This function will attempt to post a command packet to the board */
1520 static int twa_post_command_packet(TW_Device_Extension *tw_dev, int request_id, char internal)
1521 {
1522 	u32 status_reg_value;
1523 	dma_addr_t command_que_value;
1524 	int retval = 1;
1525 
1526 	command_que_value = tw_dev->command_packet_phys[request_id];
1527 
1528 	/* For 9650SE write low 4 bytes first */
1529 	if ((tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9650SE) ||
1530 	    (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9690SA)) {
1531 		command_que_value += TW_COMMAND_OFFSET;
1532 		writel((u32)command_que_value, TW_COMMAND_QUEUE_REG_ADDR_LARGE(tw_dev));
1533 	}
1534 
1535 	status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1536 
1537 	if (twa_check_bits(status_reg_value))
1538 		twa_decode_bits(tw_dev, status_reg_value);
1539 
1540 	if (((tw_dev->pending_request_count > 0) && (tw_dev->state[request_id] != TW_S_PENDING)) || (status_reg_value & TW_STATUS_COMMAND_QUEUE_FULL)) {
1541 
1542 		/* Only pend internal driver commands */
1543 		if (!internal) {
1544 			retval = SCSI_MLQUEUE_HOST_BUSY;
1545 			goto out;
1546 		}
1547 
1548 		/* Couldn't post the command packet, so we do it later */
1549 		if (tw_dev->state[request_id] != TW_S_PENDING) {
1550 			tw_dev->state[request_id] = TW_S_PENDING;
1551 			tw_dev->pending_request_count++;
1552 			if (tw_dev->pending_request_count > tw_dev->max_pending_request_count) {
1553 				tw_dev->max_pending_request_count = tw_dev->pending_request_count;
1554 			}
1555 			tw_dev->pending_queue[tw_dev->pending_tail] = request_id;
1556 			tw_dev->pending_tail = (tw_dev->pending_tail + 1) % TW_Q_LENGTH;
1557 		}
1558 		TW_UNMASK_COMMAND_INTERRUPT(tw_dev);
1559 		goto out;
1560 	} else {
1561 		if ((tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9650SE) ||
1562 		    (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9690SA)) {
1563 			/* Now write upper 4 bytes */
1564 			writel((u32)((u64)command_que_value >> 32), TW_COMMAND_QUEUE_REG_ADDR_LARGE(tw_dev) + 0x4);
1565 		} else {
1566 			if (sizeof(dma_addr_t) > 4) {
1567 				command_que_value += TW_COMMAND_OFFSET;
1568 				writel((u32)command_que_value, TW_COMMAND_QUEUE_REG_ADDR(tw_dev));
1569 				writel((u32)((u64)command_que_value >> 32), TW_COMMAND_QUEUE_REG_ADDR(tw_dev) + 0x4);
1570 			} else {
1571 				writel(TW_COMMAND_OFFSET + command_que_value, TW_COMMAND_QUEUE_REG_ADDR(tw_dev));
1572 			}
1573 		}
1574 		tw_dev->state[request_id] = TW_S_POSTED;
1575 		tw_dev->posted_request_count++;
1576 		if (tw_dev->posted_request_count > tw_dev->max_posted_request_count) {
1577 			tw_dev->max_posted_request_count = tw_dev->posted_request_count;
1578 		}
1579 	}
1580 	retval = 0;
1581 out:
1582 	return retval;
1583 } /* End twa_post_command_packet() */
1584 
1585 /* This function will reset a device extension */
1586 static int twa_reset_device_extension(TW_Device_Extension *tw_dev)
1587 {
1588 	int i = 0;
1589 	int retval = 1;
1590 	unsigned long flags = 0;
1591 
1592 	set_bit(TW_IN_RESET, &tw_dev->flags);
1593 	TW_DISABLE_INTERRUPTS(tw_dev);
1594 	TW_MASK_COMMAND_INTERRUPT(tw_dev);
1595 	spin_lock_irqsave(tw_dev->host->host_lock, flags);
1596 
1597 	/* Abort all requests that are in progress */
1598 	for (i = 0; i < TW_Q_LENGTH; i++) {
1599 		if ((tw_dev->state[i] != TW_S_FINISHED) &&
1600 		    (tw_dev->state[i] != TW_S_INITIAL) &&
1601 		    (tw_dev->state[i] != TW_S_COMPLETED)) {
1602 			if (tw_dev->srb[i]) {
1603 				tw_dev->srb[i]->result = (DID_RESET << 16);
1604 				tw_dev->srb[i]->scsi_done(tw_dev->srb[i]);
1605 				twa_unmap_scsi_data(tw_dev, i);
1606 			}
1607 		}
1608 	}
1609 
1610 	/* Reset queues and counts */
1611 	for (i = 0; i < TW_Q_LENGTH; i++) {
1612 		tw_dev->free_queue[i] = i;
1613 		tw_dev->state[i] = TW_S_INITIAL;
1614 	}
1615 	tw_dev->free_head = TW_Q_START;
1616 	tw_dev->free_tail = TW_Q_START;
1617 	tw_dev->posted_request_count = 0;
1618 	tw_dev->pending_request_count = 0;
1619 	tw_dev->pending_head = TW_Q_START;
1620 	tw_dev->pending_tail = TW_Q_START;
1621 	tw_dev->reset_print = 0;
1622 
1623 	spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
1624 
1625 	if (twa_reset_sequence(tw_dev, 1))
1626 		goto out;
1627 
1628 	TW_ENABLE_AND_CLEAR_INTERRUPTS(tw_dev);
1629 	clear_bit(TW_IN_RESET, &tw_dev->flags);
1630 	tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE;
1631 
1632 	retval = 0;
1633 out:
1634 	return retval;
1635 } /* End twa_reset_device_extension() */
1636 
1637 /* This function will reset a controller */
1638 static int twa_reset_sequence(TW_Device_Extension *tw_dev, int soft_reset)
1639 {
1640 	int tries = 0, retval = 1, flashed = 0, do_soft_reset = soft_reset;
1641 
1642 	while (tries < TW_MAX_RESET_TRIES) {
1643 		if (do_soft_reset) {
1644 			TW_SOFT_RESET(tw_dev);
1645 			/* Clear pchip/response queue on 9550SX */
1646 			if (twa_empty_response_queue_large(tw_dev)) {
1647 				TW_PRINTK(tw_dev->host, TW_DRIVER, 0x36, "Response queue (large) empty failed during reset sequence");
1648 				do_soft_reset = 1;
1649 				tries++;
1650 				continue;
1651 			}
1652 		}
1653 
1654 		/* Make sure controller is in a good state */
1655 		if (twa_poll_status(tw_dev, TW_STATUS_MICROCONTROLLER_READY | (do_soft_reset == 1 ? TW_STATUS_ATTENTION_INTERRUPT : 0), 60)) {
1656 			TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1f, "Microcontroller not ready during reset sequence");
1657 			do_soft_reset = 1;
1658 			tries++;
1659 			continue;
1660 		}
1661 
1662 		/* Empty response queue */
1663 		if (twa_empty_response_queue(tw_dev)) {
1664 			TW_PRINTK(tw_dev->host, TW_DRIVER, 0x20, "Response queue empty failed during reset sequence");
1665 			do_soft_reset = 1;
1666 			tries++;
1667 			continue;
1668 		}
1669 
1670 		flashed = 0;
1671 
1672 		/* Check for compatibility/flash */
1673 		if (twa_check_srl(tw_dev, &flashed)) {
1674 			TW_PRINTK(tw_dev->host, TW_DRIVER, 0x21, "Compatibility check failed during reset sequence");
1675 			do_soft_reset = 1;
1676 			tries++;
1677 			continue;
1678 		} else {
1679 			if (flashed) {
1680 				tries++;
1681 				continue;
1682 			}
1683 		}
1684 
1685 		/* Drain the AEN queue */
1686 		if (twa_aen_drain_queue(tw_dev, soft_reset)) {
1687 			TW_PRINTK(tw_dev->host, TW_DRIVER, 0x22, "AEN drain failed during reset sequence");
1688 			do_soft_reset = 1;
1689 			tries++;
1690 			continue;
1691 		}
1692 
1693 		/* If we got here, controller is in a good state */
1694 		retval = 0;
1695 		goto out;
1696 	}
1697 out:
1698 	return retval;
1699 } /* End twa_reset_sequence() */
1700 
1701 /* This funciton returns unit geometry in cylinders/heads/sectors */
1702 static int twa_scsi_biosparam(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int geom[])
1703 {
1704 	int heads, sectors, cylinders;
1705 	TW_Device_Extension *tw_dev;
1706 
1707 	tw_dev = (TW_Device_Extension *)sdev->host->hostdata;
1708 
1709 	if (capacity >= 0x200000) {
1710 		heads = 255;
1711 		sectors = 63;
1712 		cylinders = sector_div(capacity, heads * sectors);
1713 	} else {
1714 		heads = 64;
1715 		sectors = 32;
1716 		cylinders = sector_div(capacity, heads * sectors);
1717 	}
1718 
1719 	geom[0] = heads;
1720 	geom[1] = sectors;
1721 	geom[2] = cylinders;
1722 
1723 	return 0;
1724 } /* End twa_scsi_biosparam() */
1725 
1726 /* This is the new scsi eh reset function */
1727 static int twa_scsi_eh_reset(struct scsi_cmnd *SCpnt)
1728 {
1729 	TW_Device_Extension *tw_dev = NULL;
1730 	int retval = FAILED;
1731 
1732 	tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata;
1733 
1734 	tw_dev->num_resets++;
1735 
1736 	sdev_printk(KERN_WARNING, SCpnt->device,
1737 		"WARNING: (0x%02X:0x%04X): Command (0x%x) timed out, resetting card.\n",
1738 		TW_DRIVER, 0x2c, SCpnt->cmnd[0]);
1739 
1740 	/* Make sure we are not issuing an ioctl or resetting from ioctl */
1741 	mutex_lock(&tw_dev->ioctl_lock);
1742 
1743 	/* Now reset the card and some of the device extension data */
1744 	if (twa_reset_device_extension(tw_dev)) {
1745 		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2b, "Controller reset failed during scsi host reset");
1746 		goto out;
1747 	}
1748 
1749 	retval = SUCCESS;
1750 out:
1751 	mutex_unlock(&tw_dev->ioctl_lock);
1752 	return retval;
1753 } /* End twa_scsi_eh_reset() */
1754 
1755 /* This is the main scsi queue function to handle scsi opcodes */
1756 static int twa_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
1757 {
1758 	int request_id, retval;
1759 	TW_Device_Extension *tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata;
1760 
1761 	/* If we are resetting due to timed out ioctl, report as busy */
1762 	if (test_bit(TW_IN_RESET, &tw_dev->flags)) {
1763 		retval = SCSI_MLQUEUE_HOST_BUSY;
1764 		goto out;
1765 	}
1766 
1767 	/* Check if this FW supports luns */
1768 	if ((SCpnt->device->lun != 0) && (tw_dev->tw_compat_info.working_srl < TW_FW_SRL_LUNS_SUPPORTED)) {
1769 		SCpnt->result = (DID_BAD_TARGET << 16);
1770 		done(SCpnt);
1771 		retval = 0;
1772 		goto out;
1773 	}
1774 
1775 	/* Save done function into scsi_cmnd struct */
1776 	SCpnt->scsi_done = done;
1777 
1778 	/* Get a free request id */
1779 	twa_get_request_id(tw_dev, &request_id);
1780 
1781 	/* Save the scsi command for use by the ISR */
1782 	tw_dev->srb[request_id] = SCpnt;
1783 
1784 	/* Initialize phase to zero */
1785 	SCpnt->SCp.phase = TW_PHASE_INITIAL;
1786 
1787 	retval = twa_scsiop_execute_scsi(tw_dev, request_id, NULL, 0, NULL);
1788 	switch (retval) {
1789 	case SCSI_MLQUEUE_HOST_BUSY:
1790 		twa_free_request_id(tw_dev, request_id);
1791 		twa_unmap_scsi_data(tw_dev, request_id);
1792 		break;
1793 	case 1:
1794 		tw_dev->state[request_id] = TW_S_COMPLETED;
1795 		twa_free_request_id(tw_dev, request_id);
1796 		twa_unmap_scsi_data(tw_dev, request_id);
1797 		SCpnt->result = (DID_ERROR << 16);
1798 		done(SCpnt);
1799 		retval = 0;
1800 	}
1801 out:
1802 	return retval;
1803 } /* End twa_scsi_queue() */
1804 
1805 static DEF_SCSI_QCMD(twa_scsi_queue)
1806 
1807 /* This function hands scsi cdb's to the firmware */
1808 static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry *sglistarg)
1809 {
1810 	TW_Command_Full *full_command_packet;
1811 	TW_Command_Apache *command_packet;
1812 	u32 num_sectors = 0x0;
1813 	int i, sg_count;
1814 	struct scsi_cmnd *srb = NULL;
1815 	struct scatterlist *sglist = NULL, *sg;
1816 	int retval = 1;
1817 
1818 	if (tw_dev->srb[request_id]) {
1819 		srb = tw_dev->srb[request_id];
1820 		if (scsi_sglist(srb))
1821 			sglist = scsi_sglist(srb);
1822 	}
1823 
1824 	/* Initialize command packet */
1825 	full_command_packet = tw_dev->command_packet_virt[request_id];
1826 	full_command_packet->header.header_desc.size_header = 128;
1827 	full_command_packet->header.status_block.error = 0;
1828 	full_command_packet->header.status_block.severity__reserved = 0;
1829 
1830 	command_packet = &full_command_packet->command.newcommand;
1831 	command_packet->status = 0;
1832 	command_packet->opcode__reserved = TW_OPRES_IN(0, TW_OP_EXECUTE_SCSI);
1833 
1834 	/* We forced 16 byte cdb use earlier */
1835 	if (!cdb)
1836 		memcpy(command_packet->cdb, srb->cmnd, TW_MAX_CDB_LEN);
1837 	else
1838 		memcpy(command_packet->cdb, cdb, TW_MAX_CDB_LEN);
1839 
1840 	if (srb) {
1841 		command_packet->unit = srb->device->id;
1842 		command_packet->request_id__lunl =
1843 			cpu_to_le16(TW_REQ_LUN_IN(srb->device->lun, request_id));
1844 	} else {
1845 		command_packet->request_id__lunl =
1846 			cpu_to_le16(TW_REQ_LUN_IN(0, request_id));
1847 		command_packet->unit = 0;
1848 	}
1849 
1850 	command_packet->sgl_offset = 16;
1851 
1852 	if (!sglistarg) {
1853 		/* Map sglist from scsi layer to cmd packet */
1854 
1855 		if (scsi_sg_count(srb)) {
1856 			if ((scsi_sg_count(srb) == 1) &&
1857 			    (scsi_bufflen(srb) < TW_MIN_SGL_LENGTH)) {
1858 				if (srb->sc_data_direction == DMA_TO_DEVICE ||
1859 				    srb->sc_data_direction == DMA_BIDIRECTIONAL)
1860 					scsi_sg_copy_to_buffer(srb,
1861 							       tw_dev->generic_buffer_virt[request_id],
1862 							       TW_SECTOR_SIZE);
1863 				command_packet->sg_list[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
1864 				command_packet->sg_list[0].length = cpu_to_le32(TW_MIN_SGL_LENGTH);
1865 			} else {
1866 				sg_count = twa_map_scsi_sg_data(tw_dev, request_id);
1867 				if (sg_count == 0)
1868 					goto out;
1869 
1870 				scsi_for_each_sg(srb, sg, sg_count, i) {
1871 					command_packet->sg_list[i].address = TW_CPU_TO_SGL(sg_dma_address(sg));
1872 					command_packet->sg_list[i].length = cpu_to_le32(sg_dma_len(sg));
1873 					if (command_packet->sg_list[i].address & TW_CPU_TO_SGL(TW_ALIGNMENT_9000_SGL)) {
1874 						TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2e, "Found unaligned sgl address during execute scsi");
1875 						goto out;
1876 					}
1877 				}
1878 			}
1879 			command_packet->sgl_entries__lunh = cpu_to_le16(TW_REQ_LUN_IN((srb->device->lun >> 4), scsi_sg_count(tw_dev->srb[request_id])));
1880 		}
1881 	} else {
1882 		/* Internal cdb post */
1883 		for (i = 0; i < use_sg; i++) {
1884 			command_packet->sg_list[i].address = TW_CPU_TO_SGL(sglistarg[i].address);
1885 			command_packet->sg_list[i].length = cpu_to_le32(sglistarg[i].length);
1886 			if (command_packet->sg_list[i].address & TW_CPU_TO_SGL(TW_ALIGNMENT_9000_SGL)) {
1887 				TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2f, "Found unaligned sgl address during internal post");
1888 				goto out;
1889 			}
1890 		}
1891 		command_packet->sgl_entries__lunh = cpu_to_le16(TW_REQ_LUN_IN(0, use_sg));
1892 	}
1893 
1894 	if (srb) {
1895 		if (srb->cmnd[0] == READ_6 || srb->cmnd[0] == WRITE_6)
1896 			num_sectors = (u32)srb->cmnd[4];
1897 
1898 		if (srb->cmnd[0] == READ_10 || srb->cmnd[0] == WRITE_10)
1899 			num_sectors = (u32)srb->cmnd[8] | ((u32)srb->cmnd[7] << 8);
1900 	}
1901 
1902 	/* Update sector statistic */
1903 	tw_dev->sector_count = num_sectors;
1904 	if (tw_dev->sector_count > tw_dev->max_sector_count)
1905 		tw_dev->max_sector_count = tw_dev->sector_count;
1906 
1907 	/* Update SG statistics */
1908 	if (srb) {
1909 		tw_dev->sgl_entries = scsi_sg_count(tw_dev->srb[request_id]);
1910 		if (tw_dev->sgl_entries > tw_dev->max_sgl_entries)
1911 			tw_dev->max_sgl_entries = tw_dev->sgl_entries;
1912 	}
1913 
1914 	/* Now post the command to the board */
1915 	if (srb) {
1916 		retval = twa_post_command_packet(tw_dev, request_id, 0);
1917 	} else {
1918 		twa_post_command_packet(tw_dev, request_id, 1);
1919 		retval = 0;
1920 	}
1921 out:
1922 	return retval;
1923 } /* End twa_scsiop_execute_scsi() */
1924 
1925 /* This function completes an execute scsi operation */
1926 static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int request_id)
1927 {
1928 	struct scsi_cmnd *cmd = tw_dev->srb[request_id];
1929 
1930 	if (scsi_bufflen(cmd) < TW_MIN_SGL_LENGTH &&
1931 	    (cmd->sc_data_direction == DMA_FROM_DEVICE ||
1932 	     cmd->sc_data_direction == DMA_BIDIRECTIONAL)) {
1933 		if (scsi_sg_count(cmd) == 1) {
1934 			void *buf = tw_dev->generic_buffer_virt[request_id];
1935 
1936 			scsi_sg_copy_from_buffer(cmd, buf, TW_SECTOR_SIZE);
1937 		}
1938 	}
1939 } /* End twa_scsiop_execute_scsi_complete() */
1940 
1941 /* This function tells the controller to shut down */
1942 static void __twa_shutdown(TW_Device_Extension *tw_dev)
1943 {
1944 	/* Disable interrupts */
1945 	TW_DISABLE_INTERRUPTS(tw_dev);
1946 
1947 	/* Free up the IRQ */
1948 	free_irq(tw_dev->tw_pci_dev->irq, tw_dev);
1949 
1950 	printk(KERN_WARNING "3w-9xxx: Shutting down host %d.\n", tw_dev->host->host_no);
1951 
1952 	/* Tell the card we are shutting down */
1953 	if (twa_initconnection(tw_dev, 1, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL)) {
1954 		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x31, "Connection shutdown failed");
1955 	} else {
1956 		printk(KERN_WARNING "3w-9xxx: Shutdown complete.\n");
1957 	}
1958 
1959 	/* Clear all interrupts just before exit */
1960 	TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1961 } /* End __twa_shutdown() */
1962 
1963 /* Wrapper for __twa_shutdown */
1964 static void twa_shutdown(struct pci_dev *pdev)
1965 {
1966 	struct Scsi_Host *host = pci_get_drvdata(pdev);
1967 	TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
1968 
1969 	__twa_shutdown(tw_dev);
1970 } /* End twa_shutdown() */
1971 
1972 /* This function will look up a string */
1973 static char *twa_string_lookup(twa_message_type *table, unsigned int code)
1974 {
1975 	int index;
1976 
1977 	for (index = 0; ((code != table[index].code) &&
1978 		      (table[index].text != (char *)0)); index++);
1979 	return(table[index].text);
1980 } /* End twa_string_lookup() */
1981 
1982 /* This function will perform a pci-dma unmap */
1983 static void twa_unmap_scsi_data(TW_Device_Extension *tw_dev, int request_id)
1984 {
1985 	struct scsi_cmnd *cmd = tw_dev->srb[request_id];
1986 
1987 	if (cmd->SCp.phase == TW_PHASE_SGLIST)
1988 		scsi_dma_unmap(cmd);
1989 } /* End twa_unmap_scsi_data() */
1990 
1991 /* This function gets called when a disk is coming on-line */
1992 static int twa_slave_configure(struct scsi_device *sdev)
1993 {
1994 	/* Force 60 second timeout */
1995 	blk_queue_rq_timeout(sdev->request_queue, 60 * HZ);
1996 
1997 	return 0;
1998 } /* End twa_slave_configure() */
1999 
2000 /* scsi_host_template initializer */
2001 static struct scsi_host_template driver_template = {
2002 	.module			= THIS_MODULE,
2003 	.name			= "3ware 9000 Storage Controller",
2004 	.queuecommand		= twa_scsi_queue,
2005 	.eh_host_reset_handler	= twa_scsi_eh_reset,
2006 	.bios_param		= twa_scsi_biosparam,
2007 	.change_queue_depth	= scsi_change_queue_depth,
2008 	.can_queue		= TW_Q_LENGTH-2,
2009 	.slave_configure	= twa_slave_configure,
2010 	.this_id		= -1,
2011 	.sg_tablesize		= TW_APACHE_MAX_SGL_LENGTH,
2012 	.max_sectors		= TW_MAX_SECTORS,
2013 	.cmd_per_lun		= TW_MAX_CMDS_PER_LUN,
2014 	.use_clustering		= ENABLE_CLUSTERING,
2015 	.shost_attrs		= twa_host_attrs,
2016 	.emulated		= 1,
2017 	.no_write_same		= 1,
2018 };
2019 
2020 /* This function will probe and initialize a card */
2021 static int twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
2022 {
2023 	struct Scsi_Host *host = NULL;
2024 	TW_Device_Extension *tw_dev;
2025 	unsigned long mem_addr, mem_len;
2026 	int retval = -ENODEV;
2027 
2028 	retval = pci_enable_device(pdev);
2029 	if (retval) {
2030 		TW_PRINTK(host, TW_DRIVER, 0x34, "Failed to enable pci device");
2031 		goto out_disable_device;
2032 	}
2033 
2034 	pci_set_master(pdev);
2035 	pci_try_set_mwi(pdev);
2036 
2037 	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
2038 	    || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
2039 		if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
2040 		    || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
2041 			TW_PRINTK(host, TW_DRIVER, 0x23, "Failed to set dma mask");
2042 			retval = -ENODEV;
2043 			goto out_disable_device;
2044 		}
2045 
2046 	host = scsi_host_alloc(&driver_template, sizeof(TW_Device_Extension));
2047 	if (!host) {
2048 		TW_PRINTK(host, TW_DRIVER, 0x24, "Failed to allocate memory for device extension");
2049 		retval = -ENOMEM;
2050 		goto out_disable_device;
2051 	}
2052 	tw_dev = (TW_Device_Extension *)host->hostdata;
2053 
2054 	/* Save values to device extension */
2055 	tw_dev->host = host;
2056 	tw_dev->tw_pci_dev = pdev;
2057 
2058 	if (twa_initialize_device_extension(tw_dev)) {
2059 		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x25, "Failed to initialize device extension");
2060 		goto out_free_device_extension;
2061 	}
2062 
2063 	/* Request IO regions */
2064 	retval = pci_request_regions(pdev, "3w-9xxx");
2065 	if (retval) {
2066 		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x26, "Failed to get mem region");
2067 		goto out_free_device_extension;
2068 	}
2069 
2070 	if (pdev->device == PCI_DEVICE_ID_3WARE_9000) {
2071 		mem_addr = pci_resource_start(pdev, 1);
2072 		mem_len = pci_resource_len(pdev, 1);
2073 	} else {
2074 		mem_addr = pci_resource_start(pdev, 2);
2075 		mem_len = pci_resource_len(pdev, 2);
2076 	}
2077 
2078 	/* Save base address */
2079 	tw_dev->base_addr = ioremap(mem_addr, mem_len);
2080 	if (!tw_dev->base_addr) {
2081 		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x35, "Failed to ioremap");
2082 		goto out_release_mem_region;
2083 	}
2084 
2085 	/* Disable interrupts on the card */
2086 	TW_DISABLE_INTERRUPTS(tw_dev);
2087 
2088 	/* Initialize the card */
2089 	if (twa_reset_sequence(tw_dev, 0))
2090 		goto out_iounmap;
2091 
2092 	/* Set host specific parameters */
2093 	if ((pdev->device == PCI_DEVICE_ID_3WARE_9650SE) ||
2094 	    (pdev->device == PCI_DEVICE_ID_3WARE_9690SA))
2095 		host->max_id = TW_MAX_UNITS_9650SE;
2096 	else
2097 		host->max_id = TW_MAX_UNITS;
2098 
2099 	host->max_cmd_len = TW_MAX_CDB_LEN;
2100 
2101 	/* Channels aren't supported by adapter */
2102 	host->max_lun = TW_MAX_LUNS(tw_dev->tw_compat_info.working_srl);
2103 	host->max_channel = 0;
2104 
2105 	/* Register the card with the kernel SCSI layer */
2106 	retval = scsi_add_host(host, &pdev->dev);
2107 	if (retval) {
2108 		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x27, "scsi add host failed");
2109 		goto out_iounmap;
2110 	}
2111 
2112 	pci_set_drvdata(pdev, host);
2113 
2114 	printk(KERN_WARNING "3w-9xxx: scsi%d: Found a 3ware 9000 Storage Controller at 0x%lx, IRQ: %d.\n",
2115 	       host->host_no, mem_addr, pdev->irq);
2116 	printk(KERN_WARNING "3w-9xxx: scsi%d: Firmware %s, BIOS %s, Ports: %d.\n",
2117 	       host->host_no,
2118 	       (char *)twa_get_param(tw_dev, 0, TW_VERSION_TABLE,
2119 				     TW_PARAM_FWVER, TW_PARAM_FWVER_LENGTH),
2120 	       (char *)twa_get_param(tw_dev, 1, TW_VERSION_TABLE,
2121 				     TW_PARAM_BIOSVER, TW_PARAM_BIOSVER_LENGTH),
2122 	       le32_to_cpu(*(int *)twa_get_param(tw_dev, 2, TW_INFORMATION_TABLE,
2123 				     TW_PARAM_PORTCOUNT, TW_PARAM_PORTCOUNT_LENGTH)));
2124 
2125 	/* Try to enable MSI */
2126 	if (use_msi && (pdev->device != PCI_DEVICE_ID_3WARE_9000) &&
2127 	    !pci_enable_msi(pdev))
2128 		set_bit(TW_USING_MSI, &tw_dev->flags);
2129 
2130 	/* Now setup the interrupt handler */
2131 	retval = request_irq(pdev->irq, twa_interrupt, IRQF_SHARED, "3w-9xxx", tw_dev);
2132 	if (retval) {
2133 		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x30, "Error requesting IRQ");
2134 		goto out_remove_host;
2135 	}
2136 
2137 	twa_device_extension_list[twa_device_extension_count] = tw_dev;
2138 	twa_device_extension_count++;
2139 
2140 	/* Re-enable interrupts on the card */
2141 	TW_ENABLE_AND_CLEAR_INTERRUPTS(tw_dev);
2142 
2143 	/* Finally, scan the host */
2144 	scsi_scan_host(host);
2145 
2146 	if (twa_major == -1) {
2147 		if ((twa_major = register_chrdev (0, "twa", &twa_fops)) < 0)
2148 			TW_PRINTK(host, TW_DRIVER, 0x29, "Failed to register character device");
2149 	}
2150 	return 0;
2151 
2152 out_remove_host:
2153 	if (test_bit(TW_USING_MSI, &tw_dev->flags))
2154 		pci_disable_msi(pdev);
2155 	scsi_remove_host(host);
2156 out_iounmap:
2157 	iounmap(tw_dev->base_addr);
2158 out_release_mem_region:
2159 	pci_release_regions(pdev);
2160 out_free_device_extension:
2161 	twa_free_device_extension(tw_dev);
2162 	scsi_host_put(host);
2163 out_disable_device:
2164 	pci_disable_device(pdev);
2165 
2166 	return retval;
2167 } /* End twa_probe() */
2168 
2169 /* This function is called to remove a device */
2170 static void twa_remove(struct pci_dev *pdev)
2171 {
2172 	struct Scsi_Host *host = pci_get_drvdata(pdev);
2173 	TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
2174 
2175 	scsi_remove_host(tw_dev->host);
2176 
2177 	/* Unregister character device */
2178 	if (twa_major >= 0) {
2179 		unregister_chrdev(twa_major, "twa");
2180 		twa_major = -1;
2181 	}
2182 
2183 	/* Shutdown the card */
2184 	__twa_shutdown(tw_dev);
2185 
2186 	/* Disable MSI if enabled */
2187 	if (test_bit(TW_USING_MSI, &tw_dev->flags))
2188 		pci_disable_msi(pdev);
2189 
2190 	/* Free IO remapping */
2191 	iounmap(tw_dev->base_addr);
2192 
2193 	/* Free up the mem region */
2194 	pci_release_regions(pdev);
2195 
2196 	/* Free up device extension resources */
2197 	twa_free_device_extension(tw_dev);
2198 
2199 	scsi_host_put(tw_dev->host);
2200 	pci_disable_device(pdev);
2201 	twa_device_extension_count--;
2202 } /* End twa_remove() */
2203 
2204 #ifdef CONFIG_PM
2205 /* This function is called on PCI suspend */
2206 static int twa_suspend(struct pci_dev *pdev, pm_message_t state)
2207 {
2208 	struct Scsi_Host *host = pci_get_drvdata(pdev);
2209 	TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
2210 
2211 	printk(KERN_WARNING "3w-9xxx: Suspending host %d.\n", tw_dev->host->host_no);
2212 
2213 	TW_DISABLE_INTERRUPTS(tw_dev);
2214 	free_irq(tw_dev->tw_pci_dev->irq, tw_dev);
2215 
2216 	if (test_bit(TW_USING_MSI, &tw_dev->flags))
2217 		pci_disable_msi(pdev);
2218 
2219 	/* Tell the card we are shutting down */
2220 	if (twa_initconnection(tw_dev, 1, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL)) {
2221 		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x38, "Connection shutdown failed during suspend");
2222 	} else {
2223 		printk(KERN_WARNING "3w-9xxx: Suspend complete.\n");
2224 	}
2225 	TW_CLEAR_ALL_INTERRUPTS(tw_dev);
2226 
2227 	pci_save_state(pdev);
2228 	pci_disable_device(pdev);
2229 	pci_set_power_state(pdev, pci_choose_state(pdev, state));
2230 
2231 	return 0;
2232 } /* End twa_suspend() */
2233 
2234 /* This function is called on PCI resume */
2235 static int twa_resume(struct pci_dev *pdev)
2236 {
2237 	int retval = 0;
2238 	struct Scsi_Host *host = pci_get_drvdata(pdev);
2239 	TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
2240 
2241 	printk(KERN_WARNING "3w-9xxx: Resuming host %d.\n", tw_dev->host->host_no);
2242 	pci_set_power_state(pdev, PCI_D0);
2243 	pci_enable_wake(pdev, PCI_D0, 0);
2244 	pci_restore_state(pdev);
2245 
2246 	retval = pci_enable_device(pdev);
2247 	if (retval) {
2248 		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x39, "Enable device failed during resume");
2249 		return retval;
2250 	}
2251 
2252 	pci_set_master(pdev);
2253 	pci_try_set_mwi(pdev);
2254 
2255 	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
2256 	    || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
2257 		if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
2258 		    || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
2259 			TW_PRINTK(host, TW_DRIVER, 0x40, "Failed to set dma mask during resume");
2260 			retval = -ENODEV;
2261 			goto out_disable_device;
2262 		}
2263 
2264 	/* Initialize the card */
2265 	if (twa_reset_sequence(tw_dev, 0)) {
2266 		retval = -ENODEV;
2267 		goto out_disable_device;
2268 	}
2269 
2270 	/* Now setup the interrupt handler */
2271 	retval = request_irq(pdev->irq, twa_interrupt, IRQF_SHARED, "3w-9xxx", tw_dev);
2272 	if (retval) {
2273 		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x42, "Error requesting IRQ during resume");
2274 		retval = -ENODEV;
2275 		goto out_disable_device;
2276 	}
2277 
2278 	/* Now enable MSI if enabled */
2279 	if (test_bit(TW_USING_MSI, &tw_dev->flags))
2280 		pci_enable_msi(pdev);
2281 
2282 	/* Re-enable interrupts on the card */
2283 	TW_ENABLE_AND_CLEAR_INTERRUPTS(tw_dev);
2284 
2285 	printk(KERN_WARNING "3w-9xxx: Resume complete.\n");
2286 	return 0;
2287 
2288 out_disable_device:
2289 	scsi_remove_host(host);
2290 	pci_disable_device(pdev);
2291 
2292 	return retval;
2293 } /* End twa_resume() */
2294 #endif
2295 
2296 /* PCI Devices supported by this driver */
2297 static struct pci_device_id twa_pci_tbl[] = {
2298 	{ PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9000,
2299 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
2300 	{ PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9550SX,
2301 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
2302 	{ PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9650SE,
2303 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
2304 	{ PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9690SA,
2305 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
2306 	{ }
2307 };
2308 MODULE_DEVICE_TABLE(pci, twa_pci_tbl);
2309 
2310 /* pci_driver initializer */
2311 static struct pci_driver twa_driver = {
2312 	.name		= "3w-9xxx",
2313 	.id_table	= twa_pci_tbl,
2314 	.probe		= twa_probe,
2315 	.remove		= twa_remove,
2316 #ifdef CONFIG_PM
2317 	.suspend	= twa_suspend,
2318 	.resume		= twa_resume,
2319 #endif
2320 	.shutdown	= twa_shutdown
2321 };
2322 
2323 /* This function is called on driver initialization */
2324 static int __init twa_init(void)
2325 {
2326 	printk(KERN_WARNING "3ware 9000 Storage Controller device driver for Linux v%s.\n", TW_DRIVER_VERSION);
2327 
2328 	return pci_register_driver(&twa_driver);
2329 } /* End twa_init() */
2330 
2331 /* This function is called on driver exit */
2332 static void __exit twa_exit(void)
2333 {
2334 	pci_unregister_driver(&twa_driver);
2335 } /* End twa_exit() */
2336 
2337 module_init(twa_init);
2338 module_exit(twa_exit);
2339 
2340