xref: /openbmc/linux/drivers/scsi/3w-9xxx.c (revision d623f60d)
1 /*
2    3w-9xxx.c -- 3ware 9000 Storage Controller device driver for Linux.
3 
4    Written By: Adam Radford <aradford@gmail.com>
5    Modifications By: Tom Couch
6 
7    Copyright (C) 2004-2009 Applied Micro Circuits Corporation.
8    Copyright (C) 2010 LSI Corporation.
9 
10    This program is free software; you can redistribute it and/or modify
11    it under the terms of the GNU General Public License as published by
12    the Free Software Foundation; version 2 of the License.
13 
14    This program is distributed in the hope that it will be useful,
15    but WITHOUT ANY WARRANTY; without even the implied warranty of
16    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17    GNU General Public License for more details.
18 
19    NO WARRANTY
20    THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
21    CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
22    LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
23    MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
24    solely responsible for determining the appropriateness of using and
25    distributing the Program and assumes all risks associated with its
26    exercise of rights under this Agreement, including but not limited to
27    the risks and costs of program errors, damage to or loss of data,
28    programs or equipment, and unavailability or interruption of operations.
29 
30    DISCLAIMER OF LIABILITY
31    NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
32    DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33    DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
34    ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
35    TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
36    USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
37    HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
38 
39    You should have received a copy of the GNU General Public License
40    along with this program; if not, write to the Free Software
41    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
42 
43    Bugs/Comments/Suggestions should be mailed to:
44    aradford@gmail.com
45 
46    Note: This version of the driver does not contain a bundled firmware
47          image.
48 
49    History
50    -------
51    2.26.02.000 - Driver cleanup for kernel submission.
52    2.26.02.001 - Replace schedule_timeout() calls with msleep().
53    2.26.02.002 - Add support for PAE mode.
54                  Add lun support.
55                  Fix twa_remove() to free irq handler/unregister_chrdev()
56                  before shutting down card.
57                  Change to new 'change_queue_depth' api.
58                  Fix 'handled=1' ISR usage, remove bogus IRQ check.
59                  Remove un-needed eh_abort handler.
60                  Add support for embedded firmware error strings.
61    2.26.02.003 - Correctly handle single sgl's with use_sg=1.
62    2.26.02.004 - Add support for 9550SX controllers.
63    2.26.02.005 - Fix use_sg == 0 mapping on systems with 4GB or higher.
64    2.26.02.006 - Fix 9550SX pchip reset timeout.
65                  Add big endian support.
66    2.26.02.007 - Disable local interrupts during kmap/unmap_atomic().
67    2.26.02.008 - Free irq handler in __twa_shutdown().
68                  Serialize reset code.
69                  Add support for 9650SE controllers.
70    2.26.02.009 - Fix dma mask setting to fallback to 32-bit if 64-bit fails.
71    2.26.02.010 - Add support for 9690SA controllers.
72    2.26.02.011 - Increase max AENs drained to 256.
73                  Add MSI support and "use_msi" module parameter.
74                  Fix bug in twa_get_param() on 4GB+.
75                  Use pci_resource_len() for ioremap().
76    2.26.02.012 - Add power management support.
77    2.26.02.013 - Fix bug in twa_load_sgl().
78    2.26.02.014 - Force 60 second timeout default.
79 */
80 
81 #include <linux/module.h>
82 #include <linux/reboot.h>
83 #include <linux/spinlock.h>
84 #include <linux/interrupt.h>
85 #include <linux/moduleparam.h>
86 #include <linux/errno.h>
87 #include <linux/types.h>
88 #include <linux/delay.h>
89 #include <linux/pci.h>
90 #include <linux/time.h>
91 #include <linux/mutex.h>
92 #include <linux/slab.h>
93 #include <asm/io.h>
94 #include <asm/irq.h>
95 #include <linux/uaccess.h>
96 #include <scsi/scsi.h>
97 #include <scsi/scsi_host.h>
98 #include <scsi/scsi_tcq.h>
99 #include <scsi/scsi_cmnd.h>
100 #include "3w-9xxx.h"
101 
102 /* Globals */
103 #define TW_DRIVER_VERSION "2.26.02.014"
104 static DEFINE_MUTEX(twa_chrdev_mutex);
105 static TW_Device_Extension *twa_device_extension_list[TW_MAX_SLOT];
106 static unsigned int twa_device_extension_count;
107 static int twa_major = -1;
108 extern struct timezone sys_tz;
109 
110 /* Module parameters */
111 MODULE_AUTHOR ("LSI");
112 MODULE_DESCRIPTION ("3ware 9000 Storage Controller Linux Driver");
113 MODULE_LICENSE("GPL");
114 MODULE_VERSION(TW_DRIVER_VERSION);
115 
116 static int use_msi = 0;
117 module_param(use_msi, int, S_IRUGO);
118 MODULE_PARM_DESC(use_msi, "Use Message Signaled Interrupts.  Default: 0");
119 
120 /* Function prototypes */
121 static void twa_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_Header *header);
122 static int twa_aen_read_queue(TW_Device_Extension *tw_dev, int request_id);
123 static char *twa_aen_severity_lookup(unsigned char severity_code);
124 static void twa_aen_sync_time(TW_Device_Extension *tw_dev, int request_id);
125 static long twa_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
126 static int twa_chrdev_open(struct inode *inode, struct file *file);
127 static int twa_fill_sense(TW_Device_Extension *tw_dev, int request_id, int copy_sense, int print_host);
128 static void twa_free_request_id(TW_Device_Extension *tw_dev,int request_id);
129 static void twa_get_request_id(TW_Device_Extension *tw_dev, int *request_id);
130 static int twa_initconnection(TW_Device_Extension *tw_dev, int message_credits,
131  			      u32 set_features, unsigned short current_fw_srl,
132 			      unsigned short current_fw_arch_id,
133 			      unsigned short current_fw_branch,
134 			      unsigned short current_fw_build,
135 			      unsigned short *fw_on_ctlr_srl,
136 			      unsigned short *fw_on_ctlr_arch_id,
137 			      unsigned short *fw_on_ctlr_branch,
138 			      unsigned short *fw_on_ctlr_build,
139 			      u32 *init_connect_result);
140 static void twa_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_command_packet, int request_id, dma_addr_t dma_handle, int length);
141 static int twa_poll_response(TW_Device_Extension *tw_dev, int request_id, int seconds);
142 static int twa_poll_status_gone(TW_Device_Extension *tw_dev, u32 flag, int seconds);
143 static int twa_post_command_packet(TW_Device_Extension *tw_dev, int request_id, char internal);
144 static int twa_reset_device_extension(TW_Device_Extension *tw_dev);
145 static int twa_reset_sequence(TW_Device_Extension *tw_dev, int soft_reset);
146 static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry *sglistarg);
147 static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int request_id);
148 static char *twa_string_lookup(twa_message_type *table, unsigned int aen_code);
149 
150 /* Functions */
151 
152 /* Show some statistics about the card */
153 static ssize_t twa_show_stats(struct device *dev,
154 			      struct device_attribute *attr, char *buf)
155 {
156 	struct Scsi_Host *host = class_to_shost(dev);
157 	TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
158 	unsigned long flags = 0;
159 	ssize_t len;
160 
161 	spin_lock_irqsave(tw_dev->host->host_lock, flags);
162 	len = snprintf(buf, PAGE_SIZE, "3w-9xxx Driver version: %s\n"
163 		       "Current commands posted:   %4d\n"
164 		       "Max commands posted:       %4d\n"
165 		       "Current pending commands:  %4d\n"
166 		       "Max pending commands:      %4d\n"
167 		       "Last sgl length:           %4d\n"
168 		       "Max sgl length:            %4d\n"
169 		       "Last sector count:         %4d\n"
170 		       "Max sector count:          %4d\n"
171 		       "SCSI Host Resets:          %4d\n"
172 		       "AEN's:                     %4d\n",
173 		       TW_DRIVER_VERSION,
174 		       tw_dev->posted_request_count,
175 		       tw_dev->max_posted_request_count,
176 		       tw_dev->pending_request_count,
177 		       tw_dev->max_pending_request_count,
178 		       tw_dev->sgl_entries,
179 		       tw_dev->max_sgl_entries,
180 		       tw_dev->sector_count,
181 		       tw_dev->max_sector_count,
182 		       tw_dev->num_resets,
183 		       tw_dev->aen_count);
184 	spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
185 	return len;
186 } /* End twa_show_stats() */
187 
188 /* Create sysfs 'stats' entry */
189 static struct device_attribute twa_host_stats_attr = {
190 	.attr = {
191 		.name = 	"stats",
192 		.mode =		S_IRUGO,
193 	},
194 	.show = twa_show_stats
195 };
196 
197 /* Host attributes initializer */
198 static struct device_attribute *twa_host_attrs[] = {
199 	&twa_host_stats_attr,
200 	NULL,
201 };
202 
203 /* File operations struct for character device */
204 static const struct file_operations twa_fops = {
205 	.owner		= THIS_MODULE,
206 	.unlocked_ioctl	= twa_chrdev_ioctl,
207 	.open		= twa_chrdev_open,
208 	.release	= NULL,
209 	.llseek		= noop_llseek,
210 };
211 
212 /*
213  * The controllers use an inline buffer instead of a mapped SGL for small,
214  * single entry buffers.  Note that we treat a zero-length transfer like
215  * a mapped SGL.
216  */
217 static bool twa_command_mapped(struct scsi_cmnd *cmd)
218 {
219 	return scsi_sg_count(cmd) != 1 ||
220 		scsi_bufflen(cmd) >= TW_MIN_SGL_LENGTH;
221 }
222 
223 /* This function will complete an aen request from the isr */
224 static int twa_aen_complete(TW_Device_Extension *tw_dev, int request_id)
225 {
226 	TW_Command_Full *full_command_packet;
227 	TW_Command *command_packet;
228 	TW_Command_Apache_Header *header;
229 	unsigned short aen;
230 	int retval = 1;
231 
232 	header = (TW_Command_Apache_Header *)tw_dev->generic_buffer_virt[request_id];
233 	tw_dev->posted_request_count--;
234 	aen = le16_to_cpu(header->status_block.error);
235 	full_command_packet = tw_dev->command_packet_virt[request_id];
236 	command_packet = &full_command_packet->command.oldcommand;
237 
238 	/* First check for internal completion of set param for time sync */
239 	if (TW_OP_OUT(command_packet->opcode__sgloffset) == TW_OP_SET_PARAM) {
240 		/* Keep reading the queue in case there are more aen's */
241 		if (twa_aen_read_queue(tw_dev, request_id))
242 			goto out2;
243 	        else {
244 			retval = 0;
245 			goto out;
246 		}
247 	}
248 
249 	switch (aen) {
250 	case TW_AEN_QUEUE_EMPTY:
251 		/* Quit reading the queue if this is the last one */
252 		break;
253 	case TW_AEN_SYNC_TIME_WITH_HOST:
254 		twa_aen_sync_time(tw_dev, request_id);
255 		retval = 0;
256 		goto out;
257 	default:
258 		twa_aen_queue_event(tw_dev, header);
259 
260 		/* If there are more aen's, keep reading the queue */
261 		if (twa_aen_read_queue(tw_dev, request_id))
262 			goto out2;
263 		else {
264 			retval = 0;
265 			goto out;
266 		}
267 	}
268 	retval = 0;
269 out2:
270 	tw_dev->state[request_id] = TW_S_COMPLETED;
271 	twa_free_request_id(tw_dev, request_id);
272 	clear_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags);
273 out:
274 	return retval;
275 } /* End twa_aen_complete() */
276 
277 /* This function will drain aen queue */
278 static int twa_aen_drain_queue(TW_Device_Extension *tw_dev, int no_check_reset)
279 {
280 	int request_id = 0;
281 	char cdb[TW_MAX_CDB_LEN];
282 	TW_SG_Entry sglist[1];
283 	int finished = 0, count = 0;
284 	TW_Command_Full *full_command_packet;
285 	TW_Command_Apache_Header *header;
286 	unsigned short aen;
287 	int first_reset = 0, queue = 0, retval = 1;
288 
289 	if (no_check_reset)
290 		first_reset = 0;
291 	else
292 		first_reset = 1;
293 
294 	full_command_packet = tw_dev->command_packet_virt[request_id];
295 	memset(full_command_packet, 0, sizeof(TW_Command_Full));
296 
297 	/* Initialize cdb */
298 	memset(&cdb, 0, TW_MAX_CDB_LEN);
299 	cdb[0] = REQUEST_SENSE; /* opcode */
300 	cdb[4] = TW_ALLOCATION_LENGTH; /* allocation length */
301 
302 	/* Initialize sglist */
303 	memset(&sglist, 0, sizeof(TW_SG_Entry));
304 	sglist[0].length = TW_SECTOR_SIZE;
305 	sglist[0].address = tw_dev->generic_buffer_phys[request_id];
306 
307 	if (sglist[0].address & TW_ALIGNMENT_9000_SGL) {
308 		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1, "Found unaligned address during AEN drain");
309 		goto out;
310 	}
311 
312 	/* Mark internal command */
313 	tw_dev->srb[request_id] = NULL;
314 
315 	do {
316 		/* Send command to the board */
317 		if (twa_scsiop_execute_scsi(tw_dev, request_id, cdb, 1, sglist)) {
318 			TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2, "Error posting request sense");
319 			goto out;
320 		}
321 
322 		/* Now poll for completion */
323 		if (twa_poll_response(tw_dev, request_id, 30)) {
324 			TW_PRINTK(tw_dev->host, TW_DRIVER, 0x3, "No valid response while draining AEN queue");
325 			tw_dev->posted_request_count--;
326 			goto out;
327 		}
328 
329 		tw_dev->posted_request_count--;
330 		header = (TW_Command_Apache_Header *)tw_dev->generic_buffer_virt[request_id];
331 		aen = le16_to_cpu(header->status_block.error);
332 		queue = 0;
333 		count++;
334 
335 		switch (aen) {
336 		case TW_AEN_QUEUE_EMPTY:
337 			if (first_reset != 1)
338 				goto out;
339 			else
340 				finished = 1;
341 			break;
342 		case TW_AEN_SOFT_RESET:
343 			if (first_reset == 0)
344 				first_reset = 1;
345 			else
346 				queue = 1;
347 			break;
348 		case TW_AEN_SYNC_TIME_WITH_HOST:
349 			break;
350 		default:
351 			queue = 1;
352 		}
353 
354 		/* Now queue an event info */
355 		if (queue)
356 			twa_aen_queue_event(tw_dev, header);
357 	} while ((finished == 0) && (count < TW_MAX_AEN_DRAIN));
358 
359 	if (count == TW_MAX_AEN_DRAIN)
360 		goto out;
361 
362 	retval = 0;
363 out:
364 	tw_dev->state[request_id] = TW_S_INITIAL;
365 	return retval;
366 } /* End twa_aen_drain_queue() */
367 
368 /* This function will queue an event */
369 static void twa_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_Header *header)
370 {
371 	u32 local_time;
372 	TW_Event *event;
373 	unsigned short aen;
374 	char host[16];
375 	char *error_str;
376 
377 	tw_dev->aen_count++;
378 
379 	/* Fill out event info */
380 	event = tw_dev->event_queue[tw_dev->error_index];
381 
382 	/* Check for clobber */
383 	host[0] = '\0';
384 	if (tw_dev->host) {
385 		sprintf(host, " scsi%d:", tw_dev->host->host_no);
386 		if (event->retrieved == TW_AEN_NOT_RETRIEVED)
387 			tw_dev->aen_clobber = 1;
388 	}
389 
390 	aen = le16_to_cpu(header->status_block.error);
391 	memset(event, 0, sizeof(TW_Event));
392 
393 	event->severity = TW_SEV_OUT(header->status_block.severity__reserved);
394 	/* event->time_stamp_sec overflows in y2106 */
395 	local_time = (u32)(ktime_get_real_seconds() - (sys_tz.tz_minuteswest * 60));
396 	event->time_stamp_sec = local_time;
397 	event->aen_code = aen;
398 	event->retrieved = TW_AEN_NOT_RETRIEVED;
399 	event->sequence_id = tw_dev->error_sequence_id;
400 	tw_dev->error_sequence_id++;
401 
402 	/* Check for embedded error string */
403 	error_str = &(header->err_specific_desc[strlen(header->err_specific_desc)+1]);
404 
405 	header->err_specific_desc[sizeof(header->err_specific_desc) - 1] = '\0';
406 	event->parameter_len = strlen(header->err_specific_desc);
407 	memcpy(event->parameter_data, header->err_specific_desc, event->parameter_len + (error_str[0] == '\0' ? 0 : (1 + strlen(error_str))));
408 	if (event->severity != TW_AEN_SEVERITY_DEBUG)
409 		printk(KERN_WARNING "3w-9xxx:%s AEN: %s (0x%02X:0x%04X): %s:%s.\n",
410 		       host,
411 		       twa_aen_severity_lookup(TW_SEV_OUT(header->status_block.severity__reserved)),
412 		       TW_MESSAGE_SOURCE_CONTROLLER_EVENT, aen,
413 		       error_str[0] == '\0' ? twa_string_lookup(twa_aen_table, aen) : error_str,
414 		       header->err_specific_desc);
415 	else
416 		tw_dev->aen_count--;
417 
418 	if ((tw_dev->error_index + 1) == TW_Q_LENGTH)
419 		tw_dev->event_queue_wrapped = 1;
420 	tw_dev->error_index = (tw_dev->error_index + 1 ) % TW_Q_LENGTH;
421 } /* End twa_aen_queue_event() */
422 
423 /* This function will read the aen queue from the isr */
424 static int twa_aen_read_queue(TW_Device_Extension *tw_dev, int request_id)
425 {
426 	char cdb[TW_MAX_CDB_LEN];
427 	TW_SG_Entry sglist[1];
428 	TW_Command_Full *full_command_packet;
429 	int retval = 1;
430 
431 	full_command_packet = tw_dev->command_packet_virt[request_id];
432 	memset(full_command_packet, 0, sizeof(TW_Command_Full));
433 
434 	/* Initialize cdb */
435 	memset(&cdb, 0, TW_MAX_CDB_LEN);
436 	cdb[0] = REQUEST_SENSE; /* opcode */
437 	cdb[4] = TW_ALLOCATION_LENGTH; /* allocation length */
438 
439 	/* Initialize sglist */
440 	memset(&sglist, 0, sizeof(TW_SG_Entry));
441 	sglist[0].length = TW_SECTOR_SIZE;
442 	sglist[0].address = tw_dev->generic_buffer_phys[request_id];
443 
444 	/* Mark internal command */
445 	tw_dev->srb[request_id] = NULL;
446 
447 	/* Now post the command packet */
448 	if (twa_scsiop_execute_scsi(tw_dev, request_id, cdb, 1, sglist)) {
449 		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x4, "Post failed while reading AEN queue");
450 		goto out;
451 	}
452 	retval = 0;
453 out:
454 	return retval;
455 } /* End twa_aen_read_queue() */
456 
457 /* This function will look up an AEN severity string */
458 static char *twa_aen_severity_lookup(unsigned char severity_code)
459 {
460 	char *retval = NULL;
461 
462 	if ((severity_code < (unsigned char) TW_AEN_SEVERITY_ERROR) ||
463 	    (severity_code > (unsigned char) TW_AEN_SEVERITY_DEBUG))
464 		goto out;
465 
466 	retval = twa_aen_severity_table[severity_code];
467 out:
468 	return retval;
469 } /* End twa_aen_severity_lookup() */
470 
471 /* This function will sync firmware time with the host time */
472 static void twa_aen_sync_time(TW_Device_Extension *tw_dev, int request_id)
473 {
474 	u32 schedulertime;
475 	TW_Command_Full *full_command_packet;
476 	TW_Command *command_packet;
477 	TW_Param_Apache *param;
478 	time64_t local_time;
479 
480 	/* Fill out the command packet */
481 	full_command_packet = tw_dev->command_packet_virt[request_id];
482 	memset(full_command_packet, 0, sizeof(TW_Command_Full));
483 	command_packet = &full_command_packet->command.oldcommand;
484 	command_packet->opcode__sgloffset = TW_OPSGL_IN(2, TW_OP_SET_PARAM);
485 	command_packet->request_id = request_id;
486 	command_packet->byte8_offset.param.sgl[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
487 	command_packet->byte8_offset.param.sgl[0].length = cpu_to_le32(TW_SECTOR_SIZE);
488 	command_packet->size = TW_COMMAND_SIZE;
489 	command_packet->byte6_offset.parameter_count = cpu_to_le16(1);
490 
491 	/* Setup the param */
492 	param = (TW_Param_Apache *)tw_dev->generic_buffer_virt[request_id];
493 	memset(param, 0, TW_SECTOR_SIZE);
494 	param->table_id = cpu_to_le16(TW_TIMEKEEP_TABLE | 0x8000); /* Controller time keep table */
495 	param->parameter_id = cpu_to_le16(0x3); /* SchedulerTime */
496 	param->parameter_size_bytes = cpu_to_le16(4);
497 
498 	/* Convert system time in UTC to local time seconds since last
499            Sunday 12:00AM */
500 	local_time = (ktime_get_real_seconds() - (sys_tz.tz_minuteswest * 60));
501 	div_u64_rem(local_time - (3 * 86400), 604800, &schedulertime);
502 	schedulertime = cpu_to_le32(schedulertime % 604800);
503 
504 	memcpy(param->data, &schedulertime, sizeof(u32));
505 
506 	/* Mark internal command */
507 	tw_dev->srb[request_id] = NULL;
508 
509 	/* Now post the command */
510 	twa_post_command_packet(tw_dev, request_id, 1);
511 } /* End twa_aen_sync_time() */
512 
513 /* This function will allocate memory and check if it is correctly aligned */
514 static int twa_allocate_memory(TW_Device_Extension *tw_dev, int size, int which)
515 {
516 	int i;
517 	dma_addr_t dma_handle;
518 	unsigned long *cpu_addr;
519 	int retval = 1;
520 
521 	cpu_addr = pci_alloc_consistent(tw_dev->tw_pci_dev, size*TW_Q_LENGTH, &dma_handle);
522 	if (!cpu_addr) {
523 		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x5, "Memory allocation failed");
524 		goto out;
525 	}
526 
527 	if ((unsigned long)cpu_addr % (TW_ALIGNMENT_9000)) {
528 		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x6, "Failed to allocate correctly aligned memory");
529 		pci_free_consistent(tw_dev->tw_pci_dev, size*TW_Q_LENGTH, cpu_addr, dma_handle);
530 		goto out;
531 	}
532 
533 	memset(cpu_addr, 0, size*TW_Q_LENGTH);
534 
535 	for (i = 0; i < TW_Q_LENGTH; i++) {
536 		switch(which) {
537 		case 0:
538 			tw_dev->command_packet_phys[i] = dma_handle+(i*size);
539 			tw_dev->command_packet_virt[i] = (TW_Command_Full *)((unsigned char *)cpu_addr + (i*size));
540 			break;
541 		case 1:
542 			tw_dev->generic_buffer_phys[i] = dma_handle+(i*size);
543 			tw_dev->generic_buffer_virt[i] = (unsigned long *)((unsigned char *)cpu_addr + (i*size));
544 			break;
545 		}
546 	}
547 	retval = 0;
548 out:
549 	return retval;
550 } /* End twa_allocate_memory() */
551 
552 /* This function will check the status register for unexpected bits */
553 static int twa_check_bits(u32 status_reg_value)
554 {
555 	int retval = 1;
556 
557 	if ((status_reg_value & TW_STATUS_EXPECTED_BITS) != TW_STATUS_EXPECTED_BITS)
558 		goto out;
559 	if ((status_reg_value & TW_STATUS_UNEXPECTED_BITS) != 0)
560 		goto out;
561 
562 	retval = 0;
563 out:
564 	return retval;
565 } /* End twa_check_bits() */
566 
567 /* This function will check the srl and decide if we are compatible  */
568 static int twa_check_srl(TW_Device_Extension *tw_dev, int *flashed)
569 {
570 	int retval = 1;
571 	unsigned short fw_on_ctlr_srl = 0, fw_on_ctlr_arch_id = 0;
572 	unsigned short fw_on_ctlr_branch = 0, fw_on_ctlr_build = 0;
573 	u32 init_connect_result = 0;
574 
575 	if (twa_initconnection(tw_dev, TW_INIT_MESSAGE_CREDITS,
576 			       TW_EXTENDED_INIT_CONNECT, TW_CURRENT_DRIVER_SRL,
577 			       TW_9000_ARCH_ID, TW_CURRENT_DRIVER_BRANCH,
578 			       TW_CURRENT_DRIVER_BUILD, &fw_on_ctlr_srl,
579 			       &fw_on_ctlr_arch_id, &fw_on_ctlr_branch,
580 			       &fw_on_ctlr_build, &init_connect_result)) {
581 		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x7, "Initconnection failed while checking SRL");
582 		goto out;
583 	}
584 
585 	tw_dev->tw_compat_info.working_srl = fw_on_ctlr_srl;
586 	tw_dev->tw_compat_info.working_branch = fw_on_ctlr_branch;
587 	tw_dev->tw_compat_info.working_build = fw_on_ctlr_build;
588 
589 	/* Try base mode compatibility */
590 	if (!(init_connect_result & TW_CTLR_FW_COMPATIBLE)) {
591 		if (twa_initconnection(tw_dev, TW_INIT_MESSAGE_CREDITS,
592 				       TW_EXTENDED_INIT_CONNECT,
593 				       TW_BASE_FW_SRL, TW_9000_ARCH_ID,
594 				       TW_BASE_FW_BRANCH, TW_BASE_FW_BUILD,
595 				       &fw_on_ctlr_srl, &fw_on_ctlr_arch_id,
596 				       &fw_on_ctlr_branch, &fw_on_ctlr_build,
597 				       &init_connect_result)) {
598 			TW_PRINTK(tw_dev->host, TW_DRIVER, 0xa, "Initconnection (base mode) failed while checking SRL");
599 			goto out;
600 		}
601 		if (!(init_connect_result & TW_CTLR_FW_COMPATIBLE)) {
602 			if (TW_CURRENT_DRIVER_SRL > fw_on_ctlr_srl) {
603 				TW_PRINTK(tw_dev->host, TW_DRIVER, 0x32, "Firmware and driver incompatibility: please upgrade firmware");
604 			} else {
605 				TW_PRINTK(tw_dev->host, TW_DRIVER, 0x33, "Firmware and driver incompatibility: please upgrade driver");
606 			}
607 			goto out;
608 		}
609 		tw_dev->tw_compat_info.working_srl = TW_BASE_FW_SRL;
610 		tw_dev->tw_compat_info.working_branch = TW_BASE_FW_BRANCH;
611 		tw_dev->tw_compat_info.working_build = TW_BASE_FW_BUILD;
612 	}
613 
614 	/* Load rest of compatibility struct */
615 	strlcpy(tw_dev->tw_compat_info.driver_version, TW_DRIVER_VERSION,
616 		sizeof(tw_dev->tw_compat_info.driver_version));
617 	tw_dev->tw_compat_info.driver_srl_high = TW_CURRENT_DRIVER_SRL;
618 	tw_dev->tw_compat_info.driver_branch_high = TW_CURRENT_DRIVER_BRANCH;
619 	tw_dev->tw_compat_info.driver_build_high = TW_CURRENT_DRIVER_BUILD;
620 	tw_dev->tw_compat_info.driver_srl_low = TW_BASE_FW_SRL;
621 	tw_dev->tw_compat_info.driver_branch_low = TW_BASE_FW_BRANCH;
622 	tw_dev->tw_compat_info.driver_build_low = TW_BASE_FW_BUILD;
623 	tw_dev->tw_compat_info.fw_on_ctlr_srl = fw_on_ctlr_srl;
624 	tw_dev->tw_compat_info.fw_on_ctlr_branch = fw_on_ctlr_branch;
625 	tw_dev->tw_compat_info.fw_on_ctlr_build = fw_on_ctlr_build;
626 
627 	retval = 0;
628 out:
629 	return retval;
630 } /* End twa_check_srl() */
631 
632 /* This function handles ioctl for the character device */
633 static long twa_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
634 {
635 	struct inode *inode = file_inode(file);
636 	long timeout;
637 	unsigned long *cpu_addr, data_buffer_length_adjusted = 0, flags = 0;
638 	dma_addr_t dma_handle;
639 	int request_id = 0;
640 	unsigned int sequence_id = 0;
641 	unsigned char event_index, start_index;
642 	TW_Ioctl_Driver_Command driver_command;
643 	TW_Ioctl_Buf_Apache *tw_ioctl;
644 	TW_Lock *tw_lock;
645 	TW_Command_Full *full_command_packet;
646 	TW_Compatibility_Info *tw_compat_info;
647 	TW_Event *event;
648 	ktime_t current_time;
649 	TW_Device_Extension *tw_dev = twa_device_extension_list[iminor(inode)];
650 	int retval = TW_IOCTL_ERROR_OS_EFAULT;
651 	void __user *argp = (void __user *)arg;
652 
653 	mutex_lock(&twa_chrdev_mutex);
654 
655 	/* Only let one of these through at a time */
656 	if (mutex_lock_interruptible(&tw_dev->ioctl_lock)) {
657 		retval = TW_IOCTL_ERROR_OS_EINTR;
658 		goto out;
659 	}
660 
661 	/* First copy down the driver command */
662 	if (copy_from_user(&driver_command, argp, sizeof(TW_Ioctl_Driver_Command)))
663 		goto out2;
664 
665 	/* Check data buffer size */
666 	if (driver_command.buffer_length > TW_MAX_SECTORS * 2048) {
667 		retval = TW_IOCTL_ERROR_OS_EINVAL;
668 		goto out2;
669 	}
670 
671 	/* Hardware can only do multiple of 512 byte transfers */
672 	data_buffer_length_adjusted = (driver_command.buffer_length + 511) & ~511;
673 
674 	/* Now allocate ioctl buf memory */
675 	cpu_addr = dma_alloc_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted+sizeof(TW_Ioctl_Buf_Apache) - 1, &dma_handle, GFP_KERNEL);
676 	if (!cpu_addr) {
677 		retval = TW_IOCTL_ERROR_OS_ENOMEM;
678 		goto out2;
679 	}
680 
681 	tw_ioctl = (TW_Ioctl_Buf_Apache *)cpu_addr;
682 
683 	/* Now copy down the entire ioctl */
684 	if (copy_from_user(tw_ioctl, argp, driver_command.buffer_length + sizeof(TW_Ioctl_Buf_Apache) - 1))
685 		goto out3;
686 
687 	/* See which ioctl we are doing */
688 	switch (cmd) {
689 	case TW_IOCTL_FIRMWARE_PASS_THROUGH:
690 		spin_lock_irqsave(tw_dev->host->host_lock, flags);
691 		twa_get_request_id(tw_dev, &request_id);
692 
693 		/* Flag internal command */
694 		tw_dev->srb[request_id] = NULL;
695 
696 		/* Flag chrdev ioctl */
697 		tw_dev->chrdev_request_id = request_id;
698 
699 		full_command_packet = &tw_ioctl->firmware_command;
700 
701 		/* Load request id and sglist for both command types */
702 		twa_load_sgl(tw_dev, full_command_packet, request_id, dma_handle, data_buffer_length_adjusted);
703 
704 		memcpy(tw_dev->command_packet_virt[request_id], &(tw_ioctl->firmware_command), sizeof(TW_Command_Full));
705 
706 		/* Now post the command packet to the controller */
707 		twa_post_command_packet(tw_dev, request_id, 1);
708 		spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
709 
710 		timeout = TW_IOCTL_CHRDEV_TIMEOUT*HZ;
711 
712 		/* Now wait for command to complete */
713 		timeout = wait_event_timeout(tw_dev->ioctl_wqueue, tw_dev->chrdev_request_id == TW_IOCTL_CHRDEV_FREE, timeout);
714 
715 		/* We timed out, and didn't get an interrupt */
716 		if (tw_dev->chrdev_request_id != TW_IOCTL_CHRDEV_FREE) {
717 			/* Now we need to reset the board */
718 			printk(KERN_WARNING "3w-9xxx: scsi%d: WARNING: (0x%02X:0x%04X): Character ioctl (0x%x) timed out, resetting card.\n",
719 			       tw_dev->host->host_no, TW_DRIVER, 0x37,
720 			       cmd);
721 			retval = TW_IOCTL_ERROR_OS_EIO;
722 			twa_reset_device_extension(tw_dev);
723 			goto out3;
724 		}
725 
726 		/* Now copy in the command packet response */
727 		memcpy(&(tw_ioctl->firmware_command), tw_dev->command_packet_virt[request_id], sizeof(TW_Command_Full));
728 
729 		/* Now complete the io */
730 		spin_lock_irqsave(tw_dev->host->host_lock, flags);
731 		tw_dev->posted_request_count--;
732 		tw_dev->state[request_id] = TW_S_COMPLETED;
733 		twa_free_request_id(tw_dev, request_id);
734 		spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
735 		break;
736 	case TW_IOCTL_GET_COMPATIBILITY_INFO:
737 		tw_ioctl->driver_command.status = 0;
738 		/* Copy compatibility struct into ioctl data buffer */
739 		tw_compat_info = (TW_Compatibility_Info *)tw_ioctl->data_buffer;
740 		memcpy(tw_compat_info, &tw_dev->tw_compat_info, sizeof(TW_Compatibility_Info));
741 		break;
742 	case TW_IOCTL_GET_LAST_EVENT:
743 		if (tw_dev->event_queue_wrapped) {
744 			if (tw_dev->aen_clobber) {
745 				tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER;
746 				tw_dev->aen_clobber = 0;
747 			} else
748 				tw_ioctl->driver_command.status = 0;
749 		} else {
750 			if (!tw_dev->error_index) {
751 				tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
752 				break;
753 			}
754 			tw_ioctl->driver_command.status = 0;
755 		}
756 		event_index = (tw_dev->error_index - 1 + TW_Q_LENGTH) % TW_Q_LENGTH;
757 		memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event));
758 		tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED;
759 		break;
760 	case TW_IOCTL_GET_FIRST_EVENT:
761 		if (tw_dev->event_queue_wrapped) {
762 			if (tw_dev->aen_clobber) {
763 				tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER;
764 				tw_dev->aen_clobber = 0;
765 			} else
766 				tw_ioctl->driver_command.status = 0;
767 			event_index = tw_dev->error_index;
768 		} else {
769 			if (!tw_dev->error_index) {
770 				tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
771 				break;
772 			}
773 			tw_ioctl->driver_command.status = 0;
774 			event_index = 0;
775 		}
776 		memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event));
777 		tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED;
778 		break;
779 	case TW_IOCTL_GET_NEXT_EVENT:
780 		event = (TW_Event *)tw_ioctl->data_buffer;
781 		sequence_id = event->sequence_id;
782 		tw_ioctl->driver_command.status = 0;
783 
784 		if (tw_dev->event_queue_wrapped) {
785 			if (tw_dev->aen_clobber) {
786 				tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER;
787 				tw_dev->aen_clobber = 0;
788 			}
789 			start_index = tw_dev->error_index;
790 		} else {
791 			if (!tw_dev->error_index) {
792 				tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
793 				break;
794 			}
795 			start_index = 0;
796 		}
797 		event_index = (start_index + sequence_id - tw_dev->event_queue[start_index]->sequence_id + 1) % TW_Q_LENGTH;
798 
799 		if (!(tw_dev->event_queue[event_index]->sequence_id > sequence_id)) {
800 			if (tw_ioctl->driver_command.status == TW_IOCTL_ERROR_STATUS_AEN_CLOBBER)
801 				tw_dev->aen_clobber = 1;
802 			tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
803 			break;
804 		}
805 		memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event));
806 		tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED;
807 		break;
808 	case TW_IOCTL_GET_PREVIOUS_EVENT:
809 		event = (TW_Event *)tw_ioctl->data_buffer;
810 		sequence_id = event->sequence_id;
811 		tw_ioctl->driver_command.status = 0;
812 
813 		if (tw_dev->event_queue_wrapped) {
814 			if (tw_dev->aen_clobber) {
815 				tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER;
816 				tw_dev->aen_clobber = 0;
817 			}
818 			start_index = tw_dev->error_index;
819 		} else {
820 			if (!tw_dev->error_index) {
821 				tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
822 				break;
823 			}
824 			start_index = 0;
825 		}
826 		event_index = (start_index + sequence_id - tw_dev->event_queue[start_index]->sequence_id - 1) % TW_Q_LENGTH;
827 
828 		if (!(tw_dev->event_queue[event_index]->sequence_id < sequence_id)) {
829 			if (tw_ioctl->driver_command.status == TW_IOCTL_ERROR_STATUS_AEN_CLOBBER)
830 				tw_dev->aen_clobber = 1;
831 			tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
832 			break;
833 		}
834 		memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event));
835 		tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED;
836 		break;
837 	case TW_IOCTL_GET_LOCK:
838 		tw_lock = (TW_Lock *)tw_ioctl->data_buffer;
839 		current_time = ktime_get();
840 
841 		if ((tw_lock->force_flag == 1) || (tw_dev->ioctl_sem_lock == 0) ||
842 		    ktime_after(current_time, tw_dev->ioctl_time)) {
843 			tw_dev->ioctl_sem_lock = 1;
844 			tw_dev->ioctl_time = ktime_add_ms(current_time, tw_lock->timeout_msec);
845 			tw_ioctl->driver_command.status = 0;
846 			tw_lock->time_remaining_msec = tw_lock->timeout_msec;
847 		} else {
848 			tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_LOCKED;
849 			tw_lock->time_remaining_msec = ktime_ms_delta(tw_dev->ioctl_time, current_time);
850 		}
851 		break;
852 	case TW_IOCTL_RELEASE_LOCK:
853 		if (tw_dev->ioctl_sem_lock == 1) {
854 			tw_dev->ioctl_sem_lock = 0;
855 			tw_ioctl->driver_command.status = 0;
856 		} else {
857 			tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NOT_LOCKED;
858 		}
859 		break;
860 	default:
861 		retval = TW_IOCTL_ERROR_OS_ENOTTY;
862 		goto out3;
863 	}
864 
865 	/* Now copy the entire response to userspace */
866 	if (copy_to_user(argp, tw_ioctl, sizeof(TW_Ioctl_Buf_Apache) + driver_command.buffer_length - 1) == 0)
867 		retval = 0;
868 out3:
869 	/* Now free ioctl buf memory */
870 	dma_free_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted+sizeof(TW_Ioctl_Buf_Apache) - 1, cpu_addr, dma_handle);
871 out2:
872 	mutex_unlock(&tw_dev->ioctl_lock);
873 out:
874 	mutex_unlock(&twa_chrdev_mutex);
875 	return retval;
876 } /* End twa_chrdev_ioctl() */
877 
878 /* This function handles open for the character device */
879 /* NOTE that this function will race with remove. */
880 static int twa_chrdev_open(struct inode *inode, struct file *file)
881 {
882 	unsigned int minor_number;
883 	int retval = TW_IOCTL_ERROR_OS_ENODEV;
884 
885 	if (!capable(CAP_SYS_ADMIN)) {
886 		retval = -EACCES;
887 		goto out;
888 	}
889 
890 	minor_number = iminor(inode);
891 	if (minor_number >= twa_device_extension_count)
892 		goto out;
893 	retval = 0;
894 out:
895 	return retval;
896 } /* End twa_chrdev_open() */
897 
898 /* This function will print readable messages from status register errors */
899 static int twa_decode_bits(TW_Device_Extension *tw_dev, u32 status_reg_value)
900 {
901 	int retval = 1;
902 
903 	/* Check for various error conditions and handle them appropriately */
904 	if (status_reg_value & TW_STATUS_PCI_PARITY_ERROR) {
905 		TW_PRINTK(tw_dev->host, TW_DRIVER, 0xc, "PCI Parity Error: clearing");
906 		writel(TW_CONTROL_CLEAR_PARITY_ERROR, TW_CONTROL_REG_ADDR(tw_dev));
907 	}
908 
909 	if (status_reg_value & TW_STATUS_PCI_ABORT) {
910 		TW_PRINTK(tw_dev->host, TW_DRIVER, 0xd, "PCI Abort: clearing");
911 		writel(TW_CONTROL_CLEAR_PCI_ABORT, TW_CONTROL_REG_ADDR(tw_dev));
912 		pci_write_config_word(tw_dev->tw_pci_dev, PCI_STATUS, TW_PCI_CLEAR_PCI_ABORT);
913 	}
914 
915 	if (status_reg_value & TW_STATUS_QUEUE_ERROR) {
916 		if (((tw_dev->tw_pci_dev->device != PCI_DEVICE_ID_3WARE_9650SE) &&
917 		     (tw_dev->tw_pci_dev->device != PCI_DEVICE_ID_3WARE_9690SA)) ||
918 		    (!test_bit(TW_IN_RESET, &tw_dev->flags)))
919 			TW_PRINTK(tw_dev->host, TW_DRIVER, 0xe, "Controller Queue Error: clearing");
920 		writel(TW_CONTROL_CLEAR_QUEUE_ERROR, TW_CONTROL_REG_ADDR(tw_dev));
921 	}
922 
923 	if (status_reg_value & TW_STATUS_MICROCONTROLLER_ERROR) {
924 		if (tw_dev->reset_print == 0) {
925 			TW_PRINTK(tw_dev->host, TW_DRIVER, 0x10, "Microcontroller Error: clearing");
926 			tw_dev->reset_print = 1;
927 		}
928 		goto out;
929 	}
930 	retval = 0;
931 out:
932 	return retval;
933 } /* End twa_decode_bits() */
934 
935 /* This function will empty the response queue */
936 static int twa_empty_response_queue(TW_Device_Extension *tw_dev)
937 {
938 	u32 status_reg_value, response_que_value;
939 	int count = 0, retval = 1;
940 
941 	status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
942 
943 	while (((status_reg_value & TW_STATUS_RESPONSE_QUEUE_EMPTY) == 0) && (count < TW_MAX_RESPONSE_DRAIN)) {
944 		response_que_value = readl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev));
945 		status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
946 		count++;
947 	}
948 	if (count == TW_MAX_RESPONSE_DRAIN)
949 		goto out;
950 
951 	retval = 0;
952 out:
953 	return retval;
954 } /* End twa_empty_response_queue() */
955 
956 /* This function will clear the pchip/response queue on 9550SX */
957 static int twa_empty_response_queue_large(TW_Device_Extension *tw_dev)
958 {
959 	u32 response_que_value = 0;
960 	unsigned long before;
961 	int retval = 1;
962 
963 	if (tw_dev->tw_pci_dev->device != PCI_DEVICE_ID_3WARE_9000) {
964 		before = jiffies;
965 		while ((response_que_value & TW_9550SX_DRAIN_COMPLETED) != TW_9550SX_DRAIN_COMPLETED) {
966 			response_que_value = readl(TW_RESPONSE_QUEUE_REG_ADDR_LARGE(tw_dev));
967 			msleep(1);
968 			if (time_after(jiffies, before + HZ * 30))
969 				goto out;
970 		}
971 		/* P-chip settle time */
972 		msleep(500);
973 		retval = 0;
974 	} else
975 		retval = 0;
976 out:
977 	return retval;
978 } /* End twa_empty_response_queue_large() */
979 
980 /* This function passes sense keys from firmware to scsi layer */
981 static int twa_fill_sense(TW_Device_Extension *tw_dev, int request_id, int copy_sense, int print_host)
982 {
983 	TW_Command_Full *full_command_packet;
984 	unsigned short error;
985 	int retval = 1;
986 	char *error_str;
987 
988 	full_command_packet = tw_dev->command_packet_virt[request_id];
989 
990 	/* Check for embedded error string */
991 	error_str = &(full_command_packet->header.err_specific_desc[strlen(full_command_packet->header.err_specific_desc) + 1]);
992 
993 	/* Don't print error for Logical unit not supported during rollcall */
994 	error = le16_to_cpu(full_command_packet->header.status_block.error);
995 	if ((error != TW_ERROR_LOGICAL_UNIT_NOT_SUPPORTED) && (error != TW_ERROR_UNIT_OFFLINE)) {
996 		if (print_host)
997 			printk(KERN_WARNING "3w-9xxx: scsi%d: ERROR: (0x%02X:0x%04X): %s:%s.\n",
998 			       tw_dev->host->host_no,
999 			       TW_MESSAGE_SOURCE_CONTROLLER_ERROR,
1000 			       full_command_packet->header.status_block.error,
1001 			       error_str[0] == '\0' ?
1002 			       twa_string_lookup(twa_error_table,
1003 						 full_command_packet->header.status_block.error) : error_str,
1004 			       full_command_packet->header.err_specific_desc);
1005 		else
1006 			printk(KERN_WARNING "3w-9xxx: ERROR: (0x%02X:0x%04X): %s:%s.\n",
1007 			       TW_MESSAGE_SOURCE_CONTROLLER_ERROR,
1008 			       full_command_packet->header.status_block.error,
1009 			       error_str[0] == '\0' ?
1010 			       twa_string_lookup(twa_error_table,
1011 						 full_command_packet->header.status_block.error) : error_str,
1012 			       full_command_packet->header.err_specific_desc);
1013 	}
1014 
1015 	if (copy_sense) {
1016 		memcpy(tw_dev->srb[request_id]->sense_buffer, full_command_packet->header.sense_data, TW_SENSE_DATA_LENGTH);
1017 		tw_dev->srb[request_id]->result = (full_command_packet->command.newcommand.status << 1);
1018 		retval = TW_ISR_DONT_RESULT;
1019 		goto out;
1020 	}
1021 	retval = 0;
1022 out:
1023 	return retval;
1024 } /* End twa_fill_sense() */
1025 
1026 /* This function will free up device extension resources */
1027 static void twa_free_device_extension(TW_Device_Extension *tw_dev)
1028 {
1029 	if (tw_dev->command_packet_virt[0])
1030 		pci_free_consistent(tw_dev->tw_pci_dev,
1031 				    sizeof(TW_Command_Full)*TW_Q_LENGTH,
1032 				    tw_dev->command_packet_virt[0],
1033 				    tw_dev->command_packet_phys[0]);
1034 
1035 	if (tw_dev->generic_buffer_virt[0])
1036 		pci_free_consistent(tw_dev->tw_pci_dev,
1037 				    TW_SECTOR_SIZE*TW_Q_LENGTH,
1038 				    tw_dev->generic_buffer_virt[0],
1039 				    tw_dev->generic_buffer_phys[0]);
1040 
1041 	kfree(tw_dev->event_queue[0]);
1042 } /* End twa_free_device_extension() */
1043 
1044 /* This function will free a request id */
1045 static void twa_free_request_id(TW_Device_Extension *tw_dev, int request_id)
1046 {
1047 	tw_dev->free_queue[tw_dev->free_tail] = request_id;
1048 	tw_dev->state[request_id] = TW_S_FINISHED;
1049 	tw_dev->free_tail = (tw_dev->free_tail + 1) % TW_Q_LENGTH;
1050 } /* End twa_free_request_id() */
1051 
1052 /* This function will get parameter table entries from the firmware */
1053 static void *twa_get_param(TW_Device_Extension *tw_dev, int request_id, int table_id, int parameter_id, int parameter_size_bytes)
1054 {
1055 	TW_Command_Full *full_command_packet;
1056 	TW_Command *command_packet;
1057 	TW_Param_Apache *param;
1058 	void *retval = NULL;
1059 
1060 	/* Setup the command packet */
1061 	full_command_packet = tw_dev->command_packet_virt[request_id];
1062 	memset(full_command_packet, 0, sizeof(TW_Command_Full));
1063 	command_packet = &full_command_packet->command.oldcommand;
1064 
1065 	command_packet->opcode__sgloffset = TW_OPSGL_IN(2, TW_OP_GET_PARAM);
1066 	command_packet->size              = TW_COMMAND_SIZE;
1067 	command_packet->request_id        = request_id;
1068 	command_packet->byte6_offset.block_count = cpu_to_le16(1);
1069 
1070 	/* Now setup the param */
1071 	param = (TW_Param_Apache *)tw_dev->generic_buffer_virt[request_id];
1072 	memset(param, 0, TW_SECTOR_SIZE);
1073 	param->table_id = cpu_to_le16(table_id | 0x8000);
1074 	param->parameter_id = cpu_to_le16(parameter_id);
1075 	param->parameter_size_bytes = cpu_to_le16(parameter_size_bytes);
1076 
1077 	command_packet->byte8_offset.param.sgl[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
1078 	command_packet->byte8_offset.param.sgl[0].length = cpu_to_le32(TW_SECTOR_SIZE);
1079 
1080 	/* Post the command packet to the board */
1081 	twa_post_command_packet(tw_dev, request_id, 1);
1082 
1083 	/* Poll for completion */
1084 	if (twa_poll_response(tw_dev, request_id, 30))
1085 		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x13, "No valid response during get param")
1086 	else
1087 		retval = (void *)&(param->data[0]);
1088 
1089 	tw_dev->posted_request_count--;
1090 	tw_dev->state[request_id] = TW_S_INITIAL;
1091 
1092 	return retval;
1093 } /* End twa_get_param() */
1094 
1095 /* This function will assign an available request id */
1096 static void twa_get_request_id(TW_Device_Extension *tw_dev, int *request_id)
1097 {
1098 	*request_id = tw_dev->free_queue[tw_dev->free_head];
1099 	tw_dev->free_head = (tw_dev->free_head + 1) % TW_Q_LENGTH;
1100 	tw_dev->state[*request_id] = TW_S_STARTED;
1101 } /* End twa_get_request_id() */
1102 
1103 /* This function will send an initconnection command to controller */
1104 static int twa_initconnection(TW_Device_Extension *tw_dev, int message_credits,
1105  			      u32 set_features, unsigned short current_fw_srl,
1106 			      unsigned short current_fw_arch_id,
1107 			      unsigned short current_fw_branch,
1108 			      unsigned short current_fw_build,
1109 			      unsigned short *fw_on_ctlr_srl,
1110 			      unsigned short *fw_on_ctlr_arch_id,
1111 			      unsigned short *fw_on_ctlr_branch,
1112 			      unsigned short *fw_on_ctlr_build,
1113 			      u32 *init_connect_result)
1114 {
1115 	TW_Command_Full *full_command_packet;
1116 	TW_Initconnect *tw_initconnect;
1117 	int request_id = 0, retval = 1;
1118 
1119 	/* Initialize InitConnection command packet */
1120 	full_command_packet = tw_dev->command_packet_virt[request_id];
1121 	memset(full_command_packet, 0, sizeof(TW_Command_Full));
1122 	full_command_packet->header.header_desc.size_header = 128;
1123 
1124 	tw_initconnect = (TW_Initconnect *)&full_command_packet->command.oldcommand;
1125 	tw_initconnect->opcode__reserved = TW_OPRES_IN(0, TW_OP_INIT_CONNECTION);
1126 	tw_initconnect->request_id = request_id;
1127 	tw_initconnect->message_credits = cpu_to_le16(message_credits);
1128 	tw_initconnect->features = set_features;
1129 
1130 	/* Turn on 64-bit sgl support if we need to */
1131 	tw_initconnect->features |= sizeof(dma_addr_t) > 4 ? 1 : 0;
1132 
1133 	tw_initconnect->features = cpu_to_le32(tw_initconnect->features);
1134 
1135 	if (set_features & TW_EXTENDED_INIT_CONNECT) {
1136 		tw_initconnect->size = TW_INIT_COMMAND_PACKET_SIZE_EXTENDED;
1137 		tw_initconnect->fw_srl = cpu_to_le16(current_fw_srl);
1138 		tw_initconnect->fw_arch_id = cpu_to_le16(current_fw_arch_id);
1139 		tw_initconnect->fw_branch = cpu_to_le16(current_fw_branch);
1140 		tw_initconnect->fw_build = cpu_to_le16(current_fw_build);
1141 	} else
1142 		tw_initconnect->size = TW_INIT_COMMAND_PACKET_SIZE;
1143 
1144 	/* Send command packet to the board */
1145 	twa_post_command_packet(tw_dev, request_id, 1);
1146 
1147 	/* Poll for completion */
1148 	if (twa_poll_response(tw_dev, request_id, 30)) {
1149 		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x15, "No valid response during init connection");
1150 	} else {
1151 		if (set_features & TW_EXTENDED_INIT_CONNECT) {
1152 			*fw_on_ctlr_srl = le16_to_cpu(tw_initconnect->fw_srl);
1153 			*fw_on_ctlr_arch_id = le16_to_cpu(tw_initconnect->fw_arch_id);
1154 			*fw_on_ctlr_branch = le16_to_cpu(tw_initconnect->fw_branch);
1155 			*fw_on_ctlr_build = le16_to_cpu(tw_initconnect->fw_build);
1156 			*init_connect_result = le32_to_cpu(tw_initconnect->result);
1157 		}
1158 		retval = 0;
1159 	}
1160 
1161 	tw_dev->posted_request_count--;
1162 	tw_dev->state[request_id] = TW_S_INITIAL;
1163 
1164 	return retval;
1165 } /* End twa_initconnection() */
1166 
1167 /* This function will initialize the fields of a device extension */
1168 static int twa_initialize_device_extension(TW_Device_Extension *tw_dev)
1169 {
1170 	int i, retval = 1;
1171 
1172 	/* Initialize command packet buffers */
1173 	if (twa_allocate_memory(tw_dev, sizeof(TW_Command_Full), 0)) {
1174 		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x16, "Command packet memory allocation failed");
1175 		goto out;
1176 	}
1177 
1178 	/* Initialize generic buffer */
1179 	if (twa_allocate_memory(tw_dev, TW_SECTOR_SIZE, 1)) {
1180 		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x17, "Generic memory allocation failed");
1181 		goto out;
1182 	}
1183 
1184 	/* Allocate event info space */
1185 	tw_dev->event_queue[0] = kcalloc(TW_Q_LENGTH, sizeof(TW_Event), GFP_KERNEL);
1186 	if (!tw_dev->event_queue[0]) {
1187 		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x18, "Event info memory allocation failed");
1188 		goto out;
1189 	}
1190 
1191 
1192 	for (i = 0; i < TW_Q_LENGTH; i++) {
1193 		tw_dev->event_queue[i] = (TW_Event *)((unsigned char *)tw_dev->event_queue[0] + (i * sizeof(TW_Event)));
1194 		tw_dev->free_queue[i] = i;
1195 		tw_dev->state[i] = TW_S_INITIAL;
1196 	}
1197 
1198 	tw_dev->pending_head = TW_Q_START;
1199 	tw_dev->pending_tail = TW_Q_START;
1200 	tw_dev->free_head = TW_Q_START;
1201 	tw_dev->free_tail = TW_Q_START;
1202 	tw_dev->error_sequence_id = 1;
1203 	tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE;
1204 
1205 	mutex_init(&tw_dev->ioctl_lock);
1206 	init_waitqueue_head(&tw_dev->ioctl_wqueue);
1207 
1208 	retval = 0;
1209 out:
1210 	return retval;
1211 } /* End twa_initialize_device_extension() */
1212 
1213 /* This function is the interrupt service routine */
1214 static irqreturn_t twa_interrupt(int irq, void *dev_instance)
1215 {
1216 	int request_id, error = 0;
1217 	u32 status_reg_value;
1218 	TW_Response_Queue response_que;
1219 	TW_Command_Full *full_command_packet;
1220 	TW_Device_Extension *tw_dev = (TW_Device_Extension *)dev_instance;
1221 	int handled = 0;
1222 
1223 	/* Get the per adapter lock */
1224 	spin_lock(tw_dev->host->host_lock);
1225 
1226 	/* Read the registers */
1227 	status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1228 
1229 	/* Check if this is our interrupt, otherwise bail */
1230 	if (!(status_reg_value & TW_STATUS_VALID_INTERRUPT))
1231 		goto twa_interrupt_bail;
1232 
1233 	handled = 1;
1234 
1235 	/* If we are resetting, bail */
1236 	if (test_bit(TW_IN_RESET, &tw_dev->flags))
1237 		goto twa_interrupt_bail;
1238 
1239 	/* Check controller for errors */
1240 	if (twa_check_bits(status_reg_value)) {
1241 		if (twa_decode_bits(tw_dev, status_reg_value)) {
1242 			TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1243 			goto twa_interrupt_bail;
1244 		}
1245 	}
1246 
1247 	/* Handle host interrupt */
1248 	if (status_reg_value & TW_STATUS_HOST_INTERRUPT)
1249 		TW_CLEAR_HOST_INTERRUPT(tw_dev);
1250 
1251 	/* Handle attention interrupt */
1252 	if (status_reg_value & TW_STATUS_ATTENTION_INTERRUPT) {
1253 		TW_CLEAR_ATTENTION_INTERRUPT(tw_dev);
1254 		if (!(test_and_set_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags))) {
1255 			twa_get_request_id(tw_dev, &request_id);
1256 
1257 			error = twa_aen_read_queue(tw_dev, request_id);
1258 			if (error) {
1259 				tw_dev->state[request_id] = TW_S_COMPLETED;
1260 				twa_free_request_id(tw_dev, request_id);
1261 				clear_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags);
1262 			}
1263 		}
1264 	}
1265 
1266 	/* Handle command interrupt */
1267 	if (status_reg_value & TW_STATUS_COMMAND_INTERRUPT) {
1268 		TW_MASK_COMMAND_INTERRUPT(tw_dev);
1269 		/* Drain as many pending commands as we can */
1270 		while (tw_dev->pending_request_count > 0) {
1271 			request_id = tw_dev->pending_queue[tw_dev->pending_head];
1272 			if (tw_dev->state[request_id] != TW_S_PENDING) {
1273 				TW_PRINTK(tw_dev->host, TW_DRIVER, 0x19, "Found request id that wasn't pending");
1274 				TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1275 				goto twa_interrupt_bail;
1276 			}
1277 			if (twa_post_command_packet(tw_dev, request_id, 1)==0) {
1278 				tw_dev->pending_head = (tw_dev->pending_head + 1) % TW_Q_LENGTH;
1279 				tw_dev->pending_request_count--;
1280 			} else {
1281 				/* If we get here, we will continue re-posting on the next command interrupt */
1282 				break;
1283 			}
1284 		}
1285 	}
1286 
1287 	/* Handle response interrupt */
1288 	if (status_reg_value & TW_STATUS_RESPONSE_INTERRUPT) {
1289 
1290 		/* Drain the response queue from the board */
1291 		while ((status_reg_value & TW_STATUS_RESPONSE_QUEUE_EMPTY) == 0) {
1292 			/* Complete the response */
1293 			response_que.value = readl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev));
1294 			request_id = TW_RESID_OUT(response_que.response_id);
1295 			full_command_packet = tw_dev->command_packet_virt[request_id];
1296 			error = 0;
1297 			/* Check for command packet errors */
1298 			if (full_command_packet->command.newcommand.status != 0) {
1299 				if (tw_dev->srb[request_id] != NULL) {
1300 					error = twa_fill_sense(tw_dev, request_id, 1, 1);
1301 				} else {
1302 					/* Skip ioctl error prints */
1303 					if (request_id != tw_dev->chrdev_request_id) {
1304 						error = twa_fill_sense(tw_dev, request_id, 0, 1);
1305 					}
1306 				}
1307 			}
1308 
1309 			/* Check for correct state */
1310 			if (tw_dev->state[request_id] != TW_S_POSTED) {
1311 				if (tw_dev->srb[request_id] != NULL) {
1312 					TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1a, "Received a request id that wasn't posted");
1313 					TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1314 					goto twa_interrupt_bail;
1315 				}
1316 			}
1317 
1318 			/* Check for internal command completion */
1319 			if (tw_dev->srb[request_id] == NULL) {
1320 				if (request_id != tw_dev->chrdev_request_id) {
1321 					if (twa_aen_complete(tw_dev, request_id))
1322 						TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1b, "Error completing AEN during attention interrupt");
1323 				} else {
1324 					tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE;
1325 					wake_up(&tw_dev->ioctl_wqueue);
1326 				}
1327 			} else {
1328 				struct scsi_cmnd *cmd;
1329 
1330 				cmd = tw_dev->srb[request_id];
1331 
1332 				twa_scsiop_execute_scsi_complete(tw_dev, request_id);
1333 				/* If no error command was a success */
1334 				if (error == 0) {
1335 					cmd->result = (DID_OK << 16);
1336 				}
1337 
1338 				/* If error, command failed */
1339 				if (error == 1) {
1340 					/* Ask for a host reset */
1341 					cmd->result = (DID_OK << 16) | (CHECK_CONDITION << 1);
1342 				}
1343 
1344 				/* Report residual bytes for single sgl */
1345 				if ((scsi_sg_count(cmd) <= 1) && (full_command_packet->command.newcommand.status == 0)) {
1346 					if (full_command_packet->command.newcommand.sg_list[0].length < scsi_bufflen(tw_dev->srb[request_id]))
1347 						scsi_set_resid(cmd, scsi_bufflen(cmd) - full_command_packet->command.newcommand.sg_list[0].length);
1348 				}
1349 
1350 				/* Now complete the io */
1351 				if (twa_command_mapped(cmd))
1352 					scsi_dma_unmap(cmd);
1353 				cmd->scsi_done(cmd);
1354 				tw_dev->state[request_id] = TW_S_COMPLETED;
1355 				twa_free_request_id(tw_dev, request_id);
1356 				tw_dev->posted_request_count--;
1357 			}
1358 
1359 			/* Check for valid status after each drain */
1360 			status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1361 			if (twa_check_bits(status_reg_value)) {
1362 				if (twa_decode_bits(tw_dev, status_reg_value)) {
1363 					TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1364 					goto twa_interrupt_bail;
1365 				}
1366 			}
1367 		}
1368 	}
1369 
1370 twa_interrupt_bail:
1371 	spin_unlock(tw_dev->host->host_lock);
1372 	return IRQ_RETVAL(handled);
1373 } /* End twa_interrupt() */
1374 
1375 /* This function will load the request id and various sgls for ioctls */
1376 static void twa_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_command_packet, int request_id, dma_addr_t dma_handle, int length)
1377 {
1378 	TW_Command *oldcommand;
1379 	TW_Command_Apache *newcommand;
1380 	TW_SG_Entry *sgl;
1381 	unsigned int pae = 0;
1382 
1383 	if ((sizeof(long) < 8) && (sizeof(dma_addr_t) > 4))
1384 		pae = 1;
1385 
1386 	if (TW_OP_OUT(full_command_packet->command.newcommand.opcode__reserved) == TW_OP_EXECUTE_SCSI) {
1387 		newcommand = &full_command_packet->command.newcommand;
1388 		newcommand->request_id__lunl =
1389 			cpu_to_le16(TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->request_id__lunl), request_id));
1390 		if (length) {
1391 			newcommand->sg_list[0].address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache) - 1);
1392 			newcommand->sg_list[0].length = cpu_to_le32(length);
1393 		}
1394 		newcommand->sgl_entries__lunh =
1395 			cpu_to_le16(TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->sgl_entries__lunh), length ? 1 : 0));
1396 	} else {
1397 		oldcommand = &full_command_packet->command.oldcommand;
1398 		oldcommand->request_id = request_id;
1399 
1400 		if (TW_SGL_OUT(oldcommand->opcode__sgloffset)) {
1401 			/* Load the sg list */
1402 			if (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9690SA)
1403 				sgl = (TW_SG_Entry *)((u32 *)oldcommand+oldcommand->size - (sizeof(TW_SG_Entry)/4) + pae);
1404 			else
1405 				sgl = (TW_SG_Entry *)((u32 *)oldcommand+TW_SGL_OUT(oldcommand->opcode__sgloffset));
1406 			sgl->address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache) - 1);
1407 			sgl->length = cpu_to_le32(length);
1408 
1409 			oldcommand->size += pae;
1410 		}
1411 	}
1412 } /* End twa_load_sgl() */
1413 
1414 /* This function will poll for a response interrupt of a request */
1415 static int twa_poll_response(TW_Device_Extension *tw_dev, int request_id, int seconds)
1416 {
1417 	int retval = 1, found = 0, response_request_id;
1418 	TW_Response_Queue response_queue;
1419 	TW_Command_Full *full_command_packet = tw_dev->command_packet_virt[request_id];
1420 
1421 	if (twa_poll_status_gone(tw_dev, TW_STATUS_RESPONSE_QUEUE_EMPTY, seconds) == 0) {
1422 		response_queue.value = readl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev));
1423 		response_request_id = TW_RESID_OUT(response_queue.response_id);
1424 		if (request_id != response_request_id) {
1425 			TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1e, "Found unexpected request id while polling for response");
1426 			goto out;
1427 		}
1428 		if (TW_OP_OUT(full_command_packet->command.newcommand.opcode__reserved) == TW_OP_EXECUTE_SCSI) {
1429 			if (full_command_packet->command.newcommand.status != 0) {
1430 				/* bad response */
1431 				twa_fill_sense(tw_dev, request_id, 0, 0);
1432 				goto out;
1433 			}
1434 			found = 1;
1435 		} else {
1436 			if (full_command_packet->command.oldcommand.status != 0) {
1437 				/* bad response */
1438 				twa_fill_sense(tw_dev, request_id, 0, 0);
1439 				goto out;
1440 			}
1441 			found = 1;
1442 		}
1443 	}
1444 
1445 	if (found)
1446 		retval = 0;
1447 out:
1448 	return retval;
1449 } /* End twa_poll_response() */
1450 
1451 /* This function will poll the status register for a flag */
1452 static int twa_poll_status(TW_Device_Extension *tw_dev, u32 flag, int seconds)
1453 {
1454 	u32 status_reg_value;
1455 	unsigned long before;
1456 	int retval = 1;
1457 
1458 	status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1459 	before = jiffies;
1460 
1461 	if (twa_check_bits(status_reg_value))
1462 		twa_decode_bits(tw_dev, status_reg_value);
1463 
1464 	while ((status_reg_value & flag) != flag) {
1465 		status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1466 
1467 		if (twa_check_bits(status_reg_value))
1468 			twa_decode_bits(tw_dev, status_reg_value);
1469 
1470 		if (time_after(jiffies, before + HZ * seconds))
1471 			goto out;
1472 
1473 		msleep(50);
1474 	}
1475 	retval = 0;
1476 out:
1477 	return retval;
1478 } /* End twa_poll_status() */
1479 
1480 /* This function will poll the status register for disappearance of a flag */
1481 static int twa_poll_status_gone(TW_Device_Extension *tw_dev, u32 flag, int seconds)
1482 {
1483 	u32 status_reg_value;
1484 	unsigned long before;
1485 	int retval = 1;
1486 
1487 	status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1488 	before = jiffies;
1489 
1490 	if (twa_check_bits(status_reg_value))
1491 		twa_decode_bits(tw_dev, status_reg_value);
1492 
1493 	while ((status_reg_value & flag) != 0) {
1494 		status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1495 		if (twa_check_bits(status_reg_value))
1496 			twa_decode_bits(tw_dev, status_reg_value);
1497 
1498 		if (time_after(jiffies, before + HZ * seconds))
1499 			goto out;
1500 
1501 		msleep(50);
1502 	}
1503 	retval = 0;
1504 out:
1505 	return retval;
1506 } /* End twa_poll_status_gone() */
1507 
1508 /* This function will attempt to post a command packet to the board */
1509 static int twa_post_command_packet(TW_Device_Extension *tw_dev, int request_id, char internal)
1510 {
1511 	u32 status_reg_value;
1512 	dma_addr_t command_que_value;
1513 	int retval = 1;
1514 
1515 	command_que_value = tw_dev->command_packet_phys[request_id];
1516 
1517 	/* For 9650SE write low 4 bytes first */
1518 	if ((tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9650SE) ||
1519 	    (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9690SA)) {
1520 		command_que_value += TW_COMMAND_OFFSET;
1521 		writel((u32)command_que_value, TW_COMMAND_QUEUE_REG_ADDR_LARGE(tw_dev));
1522 	}
1523 
1524 	status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1525 
1526 	if (twa_check_bits(status_reg_value))
1527 		twa_decode_bits(tw_dev, status_reg_value);
1528 
1529 	if (((tw_dev->pending_request_count > 0) && (tw_dev->state[request_id] != TW_S_PENDING)) || (status_reg_value & TW_STATUS_COMMAND_QUEUE_FULL)) {
1530 
1531 		/* Only pend internal driver commands */
1532 		if (!internal) {
1533 			retval = SCSI_MLQUEUE_HOST_BUSY;
1534 			goto out;
1535 		}
1536 
1537 		/* Couldn't post the command packet, so we do it later */
1538 		if (tw_dev->state[request_id] != TW_S_PENDING) {
1539 			tw_dev->state[request_id] = TW_S_PENDING;
1540 			tw_dev->pending_request_count++;
1541 			if (tw_dev->pending_request_count > tw_dev->max_pending_request_count) {
1542 				tw_dev->max_pending_request_count = tw_dev->pending_request_count;
1543 			}
1544 			tw_dev->pending_queue[tw_dev->pending_tail] = request_id;
1545 			tw_dev->pending_tail = (tw_dev->pending_tail + 1) % TW_Q_LENGTH;
1546 		}
1547 		TW_UNMASK_COMMAND_INTERRUPT(tw_dev);
1548 		goto out;
1549 	} else {
1550 		if ((tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9650SE) ||
1551 		    (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9690SA)) {
1552 			/* Now write upper 4 bytes */
1553 			writel((u32)((u64)command_que_value >> 32), TW_COMMAND_QUEUE_REG_ADDR_LARGE(tw_dev) + 0x4);
1554 		} else {
1555 			if (sizeof(dma_addr_t) > 4) {
1556 				command_que_value += TW_COMMAND_OFFSET;
1557 				writel((u32)command_que_value, TW_COMMAND_QUEUE_REG_ADDR(tw_dev));
1558 				writel((u32)((u64)command_que_value >> 32), TW_COMMAND_QUEUE_REG_ADDR(tw_dev) + 0x4);
1559 			} else {
1560 				writel(TW_COMMAND_OFFSET + command_que_value, TW_COMMAND_QUEUE_REG_ADDR(tw_dev));
1561 			}
1562 		}
1563 		tw_dev->state[request_id] = TW_S_POSTED;
1564 		tw_dev->posted_request_count++;
1565 		if (tw_dev->posted_request_count > tw_dev->max_posted_request_count) {
1566 			tw_dev->max_posted_request_count = tw_dev->posted_request_count;
1567 		}
1568 	}
1569 	retval = 0;
1570 out:
1571 	return retval;
1572 } /* End twa_post_command_packet() */
1573 
1574 /* This function will reset a device extension */
1575 static int twa_reset_device_extension(TW_Device_Extension *tw_dev)
1576 {
1577 	int i = 0;
1578 	int retval = 1;
1579 	unsigned long flags = 0;
1580 
1581 	set_bit(TW_IN_RESET, &tw_dev->flags);
1582 	TW_DISABLE_INTERRUPTS(tw_dev);
1583 	TW_MASK_COMMAND_INTERRUPT(tw_dev);
1584 	spin_lock_irqsave(tw_dev->host->host_lock, flags);
1585 
1586 	/* Abort all requests that are in progress */
1587 	for (i = 0; i < TW_Q_LENGTH; i++) {
1588 		if ((tw_dev->state[i] != TW_S_FINISHED) &&
1589 		    (tw_dev->state[i] != TW_S_INITIAL) &&
1590 		    (tw_dev->state[i] != TW_S_COMPLETED)) {
1591 			if (tw_dev->srb[i]) {
1592 				struct scsi_cmnd *cmd = tw_dev->srb[i];
1593 
1594 				cmd->result = (DID_RESET << 16);
1595 				if (twa_command_mapped(cmd))
1596 					scsi_dma_unmap(cmd);
1597 				cmd->scsi_done(cmd);
1598 			}
1599 		}
1600 	}
1601 
1602 	/* Reset queues and counts */
1603 	for (i = 0; i < TW_Q_LENGTH; i++) {
1604 		tw_dev->free_queue[i] = i;
1605 		tw_dev->state[i] = TW_S_INITIAL;
1606 	}
1607 	tw_dev->free_head = TW_Q_START;
1608 	tw_dev->free_tail = TW_Q_START;
1609 	tw_dev->posted_request_count = 0;
1610 	tw_dev->pending_request_count = 0;
1611 	tw_dev->pending_head = TW_Q_START;
1612 	tw_dev->pending_tail = TW_Q_START;
1613 	tw_dev->reset_print = 0;
1614 
1615 	spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
1616 
1617 	if (twa_reset_sequence(tw_dev, 1))
1618 		goto out;
1619 
1620 	TW_ENABLE_AND_CLEAR_INTERRUPTS(tw_dev);
1621 	clear_bit(TW_IN_RESET, &tw_dev->flags);
1622 	tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE;
1623 
1624 	retval = 0;
1625 out:
1626 	return retval;
1627 } /* End twa_reset_device_extension() */
1628 
1629 /* This function will reset a controller */
1630 static int twa_reset_sequence(TW_Device_Extension *tw_dev, int soft_reset)
1631 {
1632 	int tries = 0, retval = 1, flashed = 0, do_soft_reset = soft_reset;
1633 
1634 	while (tries < TW_MAX_RESET_TRIES) {
1635 		if (do_soft_reset) {
1636 			TW_SOFT_RESET(tw_dev);
1637 			/* Clear pchip/response queue on 9550SX */
1638 			if (twa_empty_response_queue_large(tw_dev)) {
1639 				TW_PRINTK(tw_dev->host, TW_DRIVER, 0x36, "Response queue (large) empty failed during reset sequence");
1640 				do_soft_reset = 1;
1641 				tries++;
1642 				continue;
1643 			}
1644 		}
1645 
1646 		/* Make sure controller is in a good state */
1647 		if (twa_poll_status(tw_dev, TW_STATUS_MICROCONTROLLER_READY | (do_soft_reset == 1 ? TW_STATUS_ATTENTION_INTERRUPT : 0), 60)) {
1648 			TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1f, "Microcontroller not ready during reset sequence");
1649 			do_soft_reset = 1;
1650 			tries++;
1651 			continue;
1652 		}
1653 
1654 		/* Empty response queue */
1655 		if (twa_empty_response_queue(tw_dev)) {
1656 			TW_PRINTK(tw_dev->host, TW_DRIVER, 0x20, "Response queue empty failed during reset sequence");
1657 			do_soft_reset = 1;
1658 			tries++;
1659 			continue;
1660 		}
1661 
1662 		flashed = 0;
1663 
1664 		/* Check for compatibility/flash */
1665 		if (twa_check_srl(tw_dev, &flashed)) {
1666 			TW_PRINTK(tw_dev->host, TW_DRIVER, 0x21, "Compatibility check failed during reset sequence");
1667 			do_soft_reset = 1;
1668 			tries++;
1669 			continue;
1670 		} else {
1671 			if (flashed) {
1672 				tries++;
1673 				continue;
1674 			}
1675 		}
1676 
1677 		/* Drain the AEN queue */
1678 		if (twa_aen_drain_queue(tw_dev, soft_reset)) {
1679 			TW_PRINTK(tw_dev->host, TW_DRIVER, 0x22, "AEN drain failed during reset sequence");
1680 			do_soft_reset = 1;
1681 			tries++;
1682 			continue;
1683 		}
1684 
1685 		/* If we got here, controller is in a good state */
1686 		retval = 0;
1687 		goto out;
1688 	}
1689 out:
1690 	return retval;
1691 } /* End twa_reset_sequence() */
1692 
1693 /* This funciton returns unit geometry in cylinders/heads/sectors */
1694 static int twa_scsi_biosparam(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int geom[])
1695 {
1696 	int heads, sectors, cylinders;
1697 	TW_Device_Extension *tw_dev;
1698 
1699 	tw_dev = (TW_Device_Extension *)sdev->host->hostdata;
1700 
1701 	if (capacity >= 0x200000) {
1702 		heads = 255;
1703 		sectors = 63;
1704 		cylinders = sector_div(capacity, heads * sectors);
1705 	} else {
1706 		heads = 64;
1707 		sectors = 32;
1708 		cylinders = sector_div(capacity, heads * sectors);
1709 	}
1710 
1711 	geom[0] = heads;
1712 	geom[1] = sectors;
1713 	geom[2] = cylinders;
1714 
1715 	return 0;
1716 } /* End twa_scsi_biosparam() */
1717 
1718 /* This is the new scsi eh reset function */
1719 static int twa_scsi_eh_reset(struct scsi_cmnd *SCpnt)
1720 {
1721 	TW_Device_Extension *tw_dev = NULL;
1722 	int retval = FAILED;
1723 
1724 	tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata;
1725 
1726 	tw_dev->num_resets++;
1727 
1728 	sdev_printk(KERN_WARNING, SCpnt->device,
1729 		"WARNING: (0x%02X:0x%04X): Command (0x%x) timed out, resetting card.\n",
1730 		TW_DRIVER, 0x2c, SCpnt->cmnd[0]);
1731 
1732 	/* Make sure we are not issuing an ioctl or resetting from ioctl */
1733 	mutex_lock(&tw_dev->ioctl_lock);
1734 
1735 	/* Now reset the card and some of the device extension data */
1736 	if (twa_reset_device_extension(tw_dev)) {
1737 		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2b, "Controller reset failed during scsi host reset");
1738 		goto out;
1739 	}
1740 
1741 	retval = SUCCESS;
1742 out:
1743 	mutex_unlock(&tw_dev->ioctl_lock);
1744 	return retval;
1745 } /* End twa_scsi_eh_reset() */
1746 
1747 /* This is the main scsi queue function to handle scsi opcodes */
1748 static int twa_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
1749 {
1750 	int request_id, retval;
1751 	TW_Device_Extension *tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata;
1752 
1753 	/* If we are resetting due to timed out ioctl, report as busy */
1754 	if (test_bit(TW_IN_RESET, &tw_dev->flags)) {
1755 		retval = SCSI_MLQUEUE_HOST_BUSY;
1756 		goto out;
1757 	}
1758 
1759 	/* Check if this FW supports luns */
1760 	if ((SCpnt->device->lun != 0) && (tw_dev->tw_compat_info.working_srl < TW_FW_SRL_LUNS_SUPPORTED)) {
1761 		SCpnt->result = (DID_BAD_TARGET << 16);
1762 		done(SCpnt);
1763 		retval = 0;
1764 		goto out;
1765 	}
1766 
1767 	/* Save done function into scsi_cmnd struct */
1768 	SCpnt->scsi_done = done;
1769 
1770 	/* Get a free request id */
1771 	twa_get_request_id(tw_dev, &request_id);
1772 
1773 	/* Save the scsi command for use by the ISR */
1774 	tw_dev->srb[request_id] = SCpnt;
1775 
1776 	retval = twa_scsiop_execute_scsi(tw_dev, request_id, NULL, 0, NULL);
1777 	switch (retval) {
1778 	case SCSI_MLQUEUE_HOST_BUSY:
1779 		if (twa_command_mapped(SCpnt))
1780 			scsi_dma_unmap(SCpnt);
1781 		twa_free_request_id(tw_dev, request_id);
1782 		break;
1783 	case 1:
1784 		SCpnt->result = (DID_ERROR << 16);
1785 		if (twa_command_mapped(SCpnt))
1786 			scsi_dma_unmap(SCpnt);
1787 		done(SCpnt);
1788 		tw_dev->state[request_id] = TW_S_COMPLETED;
1789 		twa_free_request_id(tw_dev, request_id);
1790 		retval = 0;
1791 	}
1792 out:
1793 	return retval;
1794 } /* End twa_scsi_queue() */
1795 
1796 static DEF_SCSI_QCMD(twa_scsi_queue)
1797 
1798 /* This function hands scsi cdb's to the firmware */
1799 static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry *sglistarg)
1800 {
1801 	TW_Command_Full *full_command_packet;
1802 	TW_Command_Apache *command_packet;
1803 	u32 num_sectors = 0x0;
1804 	int i, sg_count;
1805 	struct scsi_cmnd *srb = NULL;
1806 	struct scatterlist *sglist = NULL, *sg;
1807 	int retval = 1;
1808 
1809 	if (tw_dev->srb[request_id]) {
1810 		srb = tw_dev->srb[request_id];
1811 		if (scsi_sglist(srb))
1812 			sglist = scsi_sglist(srb);
1813 	}
1814 
1815 	/* Initialize command packet */
1816 	full_command_packet = tw_dev->command_packet_virt[request_id];
1817 	full_command_packet->header.header_desc.size_header = 128;
1818 	full_command_packet->header.status_block.error = 0;
1819 	full_command_packet->header.status_block.severity__reserved = 0;
1820 
1821 	command_packet = &full_command_packet->command.newcommand;
1822 	command_packet->status = 0;
1823 	command_packet->opcode__reserved = TW_OPRES_IN(0, TW_OP_EXECUTE_SCSI);
1824 
1825 	/* We forced 16 byte cdb use earlier */
1826 	if (!cdb)
1827 		memcpy(command_packet->cdb, srb->cmnd, TW_MAX_CDB_LEN);
1828 	else
1829 		memcpy(command_packet->cdb, cdb, TW_MAX_CDB_LEN);
1830 
1831 	if (srb) {
1832 		command_packet->unit = srb->device->id;
1833 		command_packet->request_id__lunl =
1834 			cpu_to_le16(TW_REQ_LUN_IN(srb->device->lun, request_id));
1835 	} else {
1836 		command_packet->request_id__lunl =
1837 			cpu_to_le16(TW_REQ_LUN_IN(0, request_id));
1838 		command_packet->unit = 0;
1839 	}
1840 
1841 	command_packet->sgl_offset = 16;
1842 
1843 	if (!sglistarg) {
1844 		/* Map sglist from scsi layer to cmd packet */
1845 
1846 		if (scsi_sg_count(srb)) {
1847 			if (!twa_command_mapped(srb)) {
1848 				if (srb->sc_data_direction == DMA_TO_DEVICE ||
1849 				    srb->sc_data_direction == DMA_BIDIRECTIONAL)
1850 					scsi_sg_copy_to_buffer(srb,
1851 							       tw_dev->generic_buffer_virt[request_id],
1852 							       TW_SECTOR_SIZE);
1853 				command_packet->sg_list[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
1854 				command_packet->sg_list[0].length = cpu_to_le32(TW_MIN_SGL_LENGTH);
1855 			} else {
1856 				sg_count = scsi_dma_map(srb);
1857 				if (sg_count < 0)
1858 					goto out;
1859 
1860 				scsi_for_each_sg(srb, sg, sg_count, i) {
1861 					command_packet->sg_list[i].address = TW_CPU_TO_SGL(sg_dma_address(sg));
1862 					command_packet->sg_list[i].length = cpu_to_le32(sg_dma_len(sg));
1863 					if (command_packet->sg_list[i].address & TW_CPU_TO_SGL(TW_ALIGNMENT_9000_SGL)) {
1864 						TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2e, "Found unaligned sgl address during execute scsi");
1865 						goto out;
1866 					}
1867 				}
1868 			}
1869 			command_packet->sgl_entries__lunh = cpu_to_le16(TW_REQ_LUN_IN((srb->device->lun >> 4), scsi_sg_count(tw_dev->srb[request_id])));
1870 		}
1871 	} else {
1872 		/* Internal cdb post */
1873 		for (i = 0; i < use_sg; i++) {
1874 			command_packet->sg_list[i].address = TW_CPU_TO_SGL(sglistarg[i].address);
1875 			command_packet->sg_list[i].length = cpu_to_le32(sglistarg[i].length);
1876 			if (command_packet->sg_list[i].address & TW_CPU_TO_SGL(TW_ALIGNMENT_9000_SGL)) {
1877 				TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2f, "Found unaligned sgl address during internal post");
1878 				goto out;
1879 			}
1880 		}
1881 		command_packet->sgl_entries__lunh = cpu_to_le16(TW_REQ_LUN_IN(0, use_sg));
1882 	}
1883 
1884 	if (srb) {
1885 		if (srb->cmnd[0] == READ_6 || srb->cmnd[0] == WRITE_6)
1886 			num_sectors = (u32)srb->cmnd[4];
1887 
1888 		if (srb->cmnd[0] == READ_10 || srb->cmnd[0] == WRITE_10)
1889 			num_sectors = (u32)srb->cmnd[8] | ((u32)srb->cmnd[7] << 8);
1890 	}
1891 
1892 	/* Update sector statistic */
1893 	tw_dev->sector_count = num_sectors;
1894 	if (tw_dev->sector_count > tw_dev->max_sector_count)
1895 		tw_dev->max_sector_count = tw_dev->sector_count;
1896 
1897 	/* Update SG statistics */
1898 	if (srb) {
1899 		tw_dev->sgl_entries = scsi_sg_count(tw_dev->srb[request_id]);
1900 		if (tw_dev->sgl_entries > tw_dev->max_sgl_entries)
1901 			tw_dev->max_sgl_entries = tw_dev->sgl_entries;
1902 	}
1903 
1904 	/* Now post the command to the board */
1905 	if (srb) {
1906 		retval = twa_post_command_packet(tw_dev, request_id, 0);
1907 	} else {
1908 		twa_post_command_packet(tw_dev, request_id, 1);
1909 		retval = 0;
1910 	}
1911 out:
1912 	return retval;
1913 } /* End twa_scsiop_execute_scsi() */
1914 
1915 /* This function completes an execute scsi operation */
1916 static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int request_id)
1917 {
1918 	struct scsi_cmnd *cmd = tw_dev->srb[request_id];
1919 
1920 	if (!twa_command_mapped(cmd) &&
1921 	    (cmd->sc_data_direction == DMA_FROM_DEVICE ||
1922 	     cmd->sc_data_direction == DMA_BIDIRECTIONAL)) {
1923 		if (scsi_sg_count(cmd) == 1) {
1924 			void *buf = tw_dev->generic_buffer_virt[request_id];
1925 
1926 			scsi_sg_copy_from_buffer(cmd, buf, TW_SECTOR_SIZE);
1927 		}
1928 	}
1929 } /* End twa_scsiop_execute_scsi_complete() */
1930 
1931 /* This function tells the controller to shut down */
1932 static void __twa_shutdown(TW_Device_Extension *tw_dev)
1933 {
1934 	/* Disable interrupts */
1935 	TW_DISABLE_INTERRUPTS(tw_dev);
1936 
1937 	/* Free up the IRQ */
1938 	free_irq(tw_dev->tw_pci_dev->irq, tw_dev);
1939 
1940 	printk(KERN_WARNING "3w-9xxx: Shutting down host %d.\n", tw_dev->host->host_no);
1941 
1942 	/* Tell the card we are shutting down */
1943 	if (twa_initconnection(tw_dev, 1, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL)) {
1944 		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x31, "Connection shutdown failed");
1945 	} else {
1946 		printk(KERN_WARNING "3w-9xxx: Shutdown complete.\n");
1947 	}
1948 
1949 	/* Clear all interrupts just before exit */
1950 	TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1951 } /* End __twa_shutdown() */
1952 
1953 /* Wrapper for __twa_shutdown */
1954 static void twa_shutdown(struct pci_dev *pdev)
1955 {
1956 	struct Scsi_Host *host = pci_get_drvdata(pdev);
1957 	TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
1958 
1959 	__twa_shutdown(tw_dev);
1960 } /* End twa_shutdown() */
1961 
1962 /* This function will look up a string */
1963 static char *twa_string_lookup(twa_message_type *table, unsigned int code)
1964 {
1965 	int index;
1966 
1967 	for (index = 0; ((code != table[index].code) &&
1968 		      (table[index].text != (char *)0)); index++);
1969 	return(table[index].text);
1970 } /* End twa_string_lookup() */
1971 
1972 /* This function gets called when a disk is coming on-line */
1973 static int twa_slave_configure(struct scsi_device *sdev)
1974 {
1975 	/* Force 60 second timeout */
1976 	blk_queue_rq_timeout(sdev->request_queue, 60 * HZ);
1977 
1978 	return 0;
1979 } /* End twa_slave_configure() */
1980 
1981 /* scsi_host_template initializer */
1982 static struct scsi_host_template driver_template = {
1983 	.module			= THIS_MODULE,
1984 	.name			= "3ware 9000 Storage Controller",
1985 	.queuecommand		= twa_scsi_queue,
1986 	.eh_host_reset_handler	= twa_scsi_eh_reset,
1987 	.bios_param		= twa_scsi_biosparam,
1988 	.change_queue_depth	= scsi_change_queue_depth,
1989 	.can_queue		= TW_Q_LENGTH-2,
1990 	.slave_configure	= twa_slave_configure,
1991 	.this_id		= -1,
1992 	.sg_tablesize		= TW_APACHE_MAX_SGL_LENGTH,
1993 	.max_sectors		= TW_MAX_SECTORS,
1994 	.cmd_per_lun		= TW_MAX_CMDS_PER_LUN,
1995 	.use_clustering		= ENABLE_CLUSTERING,
1996 	.shost_attrs		= twa_host_attrs,
1997 	.emulated		= 1,
1998 	.no_write_same		= 1,
1999 };
2000 
2001 /* This function will probe and initialize a card */
2002 static int twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
2003 {
2004 	struct Scsi_Host *host = NULL;
2005 	TW_Device_Extension *tw_dev;
2006 	unsigned long mem_addr, mem_len;
2007 	int retval = -ENODEV;
2008 
2009 	retval = pci_enable_device(pdev);
2010 	if (retval) {
2011 		TW_PRINTK(host, TW_DRIVER, 0x34, "Failed to enable pci device");
2012 		goto out_disable_device;
2013 	}
2014 
2015 	pci_set_master(pdev);
2016 	pci_try_set_mwi(pdev);
2017 
2018 	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
2019 	    || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
2020 		if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
2021 		    || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
2022 			TW_PRINTK(host, TW_DRIVER, 0x23, "Failed to set dma mask");
2023 			retval = -ENODEV;
2024 			goto out_disable_device;
2025 		}
2026 
2027 	host = scsi_host_alloc(&driver_template, sizeof(TW_Device_Extension));
2028 	if (!host) {
2029 		TW_PRINTK(host, TW_DRIVER, 0x24, "Failed to allocate memory for device extension");
2030 		retval = -ENOMEM;
2031 		goto out_disable_device;
2032 	}
2033 	tw_dev = (TW_Device_Extension *)host->hostdata;
2034 
2035 	/* Save values to device extension */
2036 	tw_dev->host = host;
2037 	tw_dev->tw_pci_dev = pdev;
2038 
2039 	if (twa_initialize_device_extension(tw_dev)) {
2040 		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x25, "Failed to initialize device extension");
2041 		goto out_free_device_extension;
2042 	}
2043 
2044 	/* Request IO regions */
2045 	retval = pci_request_regions(pdev, "3w-9xxx");
2046 	if (retval) {
2047 		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x26, "Failed to get mem region");
2048 		goto out_free_device_extension;
2049 	}
2050 
2051 	if (pdev->device == PCI_DEVICE_ID_3WARE_9000) {
2052 		mem_addr = pci_resource_start(pdev, 1);
2053 		mem_len = pci_resource_len(pdev, 1);
2054 	} else {
2055 		mem_addr = pci_resource_start(pdev, 2);
2056 		mem_len = pci_resource_len(pdev, 2);
2057 	}
2058 
2059 	/* Save base address */
2060 	tw_dev->base_addr = ioremap(mem_addr, mem_len);
2061 	if (!tw_dev->base_addr) {
2062 		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x35, "Failed to ioremap");
2063 		goto out_release_mem_region;
2064 	}
2065 
2066 	/* Disable interrupts on the card */
2067 	TW_DISABLE_INTERRUPTS(tw_dev);
2068 
2069 	/* Initialize the card */
2070 	if (twa_reset_sequence(tw_dev, 0))
2071 		goto out_iounmap;
2072 
2073 	/* Set host specific parameters */
2074 	if ((pdev->device == PCI_DEVICE_ID_3WARE_9650SE) ||
2075 	    (pdev->device == PCI_DEVICE_ID_3WARE_9690SA))
2076 		host->max_id = TW_MAX_UNITS_9650SE;
2077 	else
2078 		host->max_id = TW_MAX_UNITS;
2079 
2080 	host->max_cmd_len = TW_MAX_CDB_LEN;
2081 
2082 	/* Channels aren't supported by adapter */
2083 	host->max_lun = TW_MAX_LUNS(tw_dev->tw_compat_info.working_srl);
2084 	host->max_channel = 0;
2085 
2086 	/* Register the card with the kernel SCSI layer */
2087 	retval = scsi_add_host(host, &pdev->dev);
2088 	if (retval) {
2089 		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x27, "scsi add host failed");
2090 		goto out_iounmap;
2091 	}
2092 
2093 	pci_set_drvdata(pdev, host);
2094 
2095 	printk(KERN_WARNING "3w-9xxx: scsi%d: Found a 3ware 9000 Storage Controller at 0x%lx, IRQ: %d.\n",
2096 	       host->host_no, mem_addr, pdev->irq);
2097 	printk(KERN_WARNING "3w-9xxx: scsi%d: Firmware %s, BIOS %s, Ports: %d.\n",
2098 	       host->host_no,
2099 	       (char *)twa_get_param(tw_dev, 0, TW_VERSION_TABLE,
2100 				     TW_PARAM_FWVER, TW_PARAM_FWVER_LENGTH),
2101 	       (char *)twa_get_param(tw_dev, 1, TW_VERSION_TABLE,
2102 				     TW_PARAM_BIOSVER, TW_PARAM_BIOSVER_LENGTH),
2103 	       le32_to_cpu(*(int *)twa_get_param(tw_dev, 2, TW_INFORMATION_TABLE,
2104 				     TW_PARAM_PORTCOUNT, TW_PARAM_PORTCOUNT_LENGTH)));
2105 
2106 	/* Try to enable MSI */
2107 	if (use_msi && (pdev->device != PCI_DEVICE_ID_3WARE_9000) &&
2108 	    !pci_enable_msi(pdev))
2109 		set_bit(TW_USING_MSI, &tw_dev->flags);
2110 
2111 	/* Now setup the interrupt handler */
2112 	retval = request_irq(pdev->irq, twa_interrupt, IRQF_SHARED, "3w-9xxx", tw_dev);
2113 	if (retval) {
2114 		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x30, "Error requesting IRQ");
2115 		goto out_remove_host;
2116 	}
2117 
2118 	twa_device_extension_list[twa_device_extension_count] = tw_dev;
2119 	twa_device_extension_count++;
2120 
2121 	/* Re-enable interrupts on the card */
2122 	TW_ENABLE_AND_CLEAR_INTERRUPTS(tw_dev);
2123 
2124 	/* Finally, scan the host */
2125 	scsi_scan_host(host);
2126 
2127 	if (twa_major == -1) {
2128 		if ((twa_major = register_chrdev (0, "twa", &twa_fops)) < 0)
2129 			TW_PRINTK(host, TW_DRIVER, 0x29, "Failed to register character device");
2130 	}
2131 	return 0;
2132 
2133 out_remove_host:
2134 	if (test_bit(TW_USING_MSI, &tw_dev->flags))
2135 		pci_disable_msi(pdev);
2136 	scsi_remove_host(host);
2137 out_iounmap:
2138 	iounmap(tw_dev->base_addr);
2139 out_release_mem_region:
2140 	pci_release_regions(pdev);
2141 out_free_device_extension:
2142 	twa_free_device_extension(tw_dev);
2143 	scsi_host_put(host);
2144 out_disable_device:
2145 	pci_disable_device(pdev);
2146 
2147 	return retval;
2148 } /* End twa_probe() */
2149 
2150 /* This function is called to remove a device */
2151 static void twa_remove(struct pci_dev *pdev)
2152 {
2153 	struct Scsi_Host *host = pci_get_drvdata(pdev);
2154 	TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
2155 
2156 	scsi_remove_host(tw_dev->host);
2157 
2158 	/* Unregister character device */
2159 	if (twa_major >= 0) {
2160 		unregister_chrdev(twa_major, "twa");
2161 		twa_major = -1;
2162 	}
2163 
2164 	/* Shutdown the card */
2165 	__twa_shutdown(tw_dev);
2166 
2167 	/* Disable MSI if enabled */
2168 	if (test_bit(TW_USING_MSI, &tw_dev->flags))
2169 		pci_disable_msi(pdev);
2170 
2171 	/* Free IO remapping */
2172 	iounmap(tw_dev->base_addr);
2173 
2174 	/* Free up the mem region */
2175 	pci_release_regions(pdev);
2176 
2177 	/* Free up device extension resources */
2178 	twa_free_device_extension(tw_dev);
2179 
2180 	scsi_host_put(tw_dev->host);
2181 	pci_disable_device(pdev);
2182 	twa_device_extension_count--;
2183 } /* End twa_remove() */
2184 
2185 #ifdef CONFIG_PM
2186 /* This function is called on PCI suspend */
2187 static int twa_suspend(struct pci_dev *pdev, pm_message_t state)
2188 {
2189 	struct Scsi_Host *host = pci_get_drvdata(pdev);
2190 	TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
2191 
2192 	printk(KERN_WARNING "3w-9xxx: Suspending host %d.\n", tw_dev->host->host_no);
2193 
2194 	TW_DISABLE_INTERRUPTS(tw_dev);
2195 	free_irq(tw_dev->tw_pci_dev->irq, tw_dev);
2196 
2197 	if (test_bit(TW_USING_MSI, &tw_dev->flags))
2198 		pci_disable_msi(pdev);
2199 
2200 	/* Tell the card we are shutting down */
2201 	if (twa_initconnection(tw_dev, 1, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL)) {
2202 		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x38, "Connection shutdown failed during suspend");
2203 	} else {
2204 		printk(KERN_WARNING "3w-9xxx: Suspend complete.\n");
2205 	}
2206 	TW_CLEAR_ALL_INTERRUPTS(tw_dev);
2207 
2208 	pci_save_state(pdev);
2209 	pci_disable_device(pdev);
2210 	pci_set_power_state(pdev, pci_choose_state(pdev, state));
2211 
2212 	return 0;
2213 } /* End twa_suspend() */
2214 
2215 /* This function is called on PCI resume */
2216 static int twa_resume(struct pci_dev *pdev)
2217 {
2218 	int retval = 0;
2219 	struct Scsi_Host *host = pci_get_drvdata(pdev);
2220 	TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
2221 
2222 	printk(KERN_WARNING "3w-9xxx: Resuming host %d.\n", tw_dev->host->host_no);
2223 	pci_set_power_state(pdev, PCI_D0);
2224 	pci_enable_wake(pdev, PCI_D0, 0);
2225 	pci_restore_state(pdev);
2226 
2227 	retval = pci_enable_device(pdev);
2228 	if (retval) {
2229 		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x39, "Enable device failed during resume");
2230 		return retval;
2231 	}
2232 
2233 	pci_set_master(pdev);
2234 	pci_try_set_mwi(pdev);
2235 
2236 	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
2237 	    || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
2238 		if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
2239 		    || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
2240 			TW_PRINTK(host, TW_DRIVER, 0x40, "Failed to set dma mask during resume");
2241 			retval = -ENODEV;
2242 			goto out_disable_device;
2243 		}
2244 
2245 	/* Initialize the card */
2246 	if (twa_reset_sequence(tw_dev, 0)) {
2247 		retval = -ENODEV;
2248 		goto out_disable_device;
2249 	}
2250 
2251 	/* Now setup the interrupt handler */
2252 	retval = request_irq(pdev->irq, twa_interrupt, IRQF_SHARED, "3w-9xxx", tw_dev);
2253 	if (retval) {
2254 		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x42, "Error requesting IRQ during resume");
2255 		retval = -ENODEV;
2256 		goto out_disable_device;
2257 	}
2258 
2259 	/* Now enable MSI if enabled */
2260 	if (test_bit(TW_USING_MSI, &tw_dev->flags))
2261 		pci_enable_msi(pdev);
2262 
2263 	/* Re-enable interrupts on the card */
2264 	TW_ENABLE_AND_CLEAR_INTERRUPTS(tw_dev);
2265 
2266 	printk(KERN_WARNING "3w-9xxx: Resume complete.\n");
2267 	return 0;
2268 
2269 out_disable_device:
2270 	scsi_remove_host(host);
2271 	pci_disable_device(pdev);
2272 
2273 	return retval;
2274 } /* End twa_resume() */
2275 #endif
2276 
2277 /* PCI Devices supported by this driver */
2278 static struct pci_device_id twa_pci_tbl[] = {
2279 	{ PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9000,
2280 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
2281 	{ PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9550SX,
2282 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
2283 	{ PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9650SE,
2284 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
2285 	{ PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9690SA,
2286 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
2287 	{ }
2288 };
2289 MODULE_DEVICE_TABLE(pci, twa_pci_tbl);
2290 
2291 /* pci_driver initializer */
2292 static struct pci_driver twa_driver = {
2293 	.name		= "3w-9xxx",
2294 	.id_table	= twa_pci_tbl,
2295 	.probe		= twa_probe,
2296 	.remove		= twa_remove,
2297 #ifdef CONFIG_PM
2298 	.suspend	= twa_suspend,
2299 	.resume		= twa_resume,
2300 #endif
2301 	.shutdown	= twa_shutdown
2302 };
2303 
2304 /* This function is called on driver initialization */
2305 static int __init twa_init(void)
2306 {
2307 	printk(KERN_WARNING "3ware 9000 Storage Controller device driver for Linux v%s.\n", TW_DRIVER_VERSION);
2308 
2309 	return pci_register_driver(&twa_driver);
2310 } /* End twa_init() */
2311 
2312 /* This function is called on driver exit */
2313 static void __exit twa_exit(void)
2314 {
2315 	pci_unregister_driver(&twa_driver);
2316 } /* End twa_exit() */
2317 
2318 module_init(twa_init);
2319 module_exit(twa_exit);
2320 
2321