xref: /openbmc/linux/drivers/scsi/3w-9xxx.c (revision 160b8e75)
1 /*
2    3w-9xxx.c -- 3ware 9000 Storage Controller device driver for Linux.
3 
4    Written By: Adam Radford <aradford@gmail.com>
5    Modifications By: Tom Couch
6 
7    Copyright (C) 2004-2009 Applied Micro Circuits Corporation.
8    Copyright (C) 2010 LSI Corporation.
9 
10    This program is free software; you can redistribute it and/or modify
11    it under the terms of the GNU General Public License as published by
12    the Free Software Foundation; version 2 of the License.
13 
14    This program is distributed in the hope that it will be useful,
15    but WITHOUT ANY WARRANTY; without even the implied warranty of
16    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17    GNU General Public License for more details.
18 
19    NO WARRANTY
20    THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
21    CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
22    LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
23    MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
24    solely responsible for determining the appropriateness of using and
25    distributing the Program and assumes all risks associated with its
26    exercise of rights under this Agreement, including but not limited to
27    the risks and costs of program errors, damage to or loss of data,
28    programs or equipment, and unavailability or interruption of operations.
29 
30    DISCLAIMER OF LIABILITY
31    NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
32    DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33    DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
34    ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
35    TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
36    USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
37    HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
38 
39    You should have received a copy of the GNU General Public License
40    along with this program; if not, write to the Free Software
41    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
42 
43    Bugs/Comments/Suggestions should be mailed to:
44    aradford@gmail.com
45 
46    Note: This version of the driver does not contain a bundled firmware
47          image.
48 
49    History
50    -------
51    2.26.02.000 - Driver cleanup for kernel submission.
52    2.26.02.001 - Replace schedule_timeout() calls with msleep().
53    2.26.02.002 - Add support for PAE mode.
54                  Add lun support.
55                  Fix twa_remove() to free irq handler/unregister_chrdev()
56                  before shutting down card.
57                  Change to new 'change_queue_depth' api.
58                  Fix 'handled=1' ISR usage, remove bogus IRQ check.
59                  Remove un-needed eh_abort handler.
60                  Add support for embedded firmware error strings.
61    2.26.02.003 - Correctly handle single sgl's with use_sg=1.
62    2.26.02.004 - Add support for 9550SX controllers.
63    2.26.02.005 - Fix use_sg == 0 mapping on systems with 4GB or higher.
64    2.26.02.006 - Fix 9550SX pchip reset timeout.
65                  Add big endian support.
66    2.26.02.007 - Disable local interrupts during kmap/unmap_atomic().
67    2.26.02.008 - Free irq handler in __twa_shutdown().
68                  Serialize reset code.
69                  Add support for 9650SE controllers.
70    2.26.02.009 - Fix dma mask setting to fallback to 32-bit if 64-bit fails.
71    2.26.02.010 - Add support for 9690SA controllers.
72    2.26.02.011 - Increase max AENs drained to 256.
73                  Add MSI support and "use_msi" module parameter.
74                  Fix bug in twa_get_param() on 4GB+.
75                  Use pci_resource_len() for ioremap().
76    2.26.02.012 - Add power management support.
77    2.26.02.013 - Fix bug in twa_load_sgl().
78    2.26.02.014 - Force 60 second timeout default.
79 */
80 
81 #include <linux/module.h>
82 #include <linux/reboot.h>
83 #include <linux/spinlock.h>
84 #include <linux/interrupt.h>
85 #include <linux/moduleparam.h>
86 #include <linux/errno.h>
87 #include <linux/types.h>
88 #include <linux/delay.h>
89 #include <linux/pci.h>
90 #include <linux/time.h>
91 #include <linux/mutex.h>
92 #include <linux/slab.h>
93 #include <asm/io.h>
94 #include <asm/irq.h>
95 #include <linux/uaccess.h>
96 #include <scsi/scsi.h>
97 #include <scsi/scsi_host.h>
98 #include <scsi/scsi_tcq.h>
99 #include <scsi/scsi_cmnd.h>
100 #include "3w-9xxx.h"
101 
102 /* Globals */
103 #define TW_DRIVER_VERSION "2.26.02.014"
104 static DEFINE_MUTEX(twa_chrdev_mutex);
105 static TW_Device_Extension *twa_device_extension_list[TW_MAX_SLOT];
106 static unsigned int twa_device_extension_count;
107 static int twa_major = -1;
108 extern struct timezone sys_tz;
109 
110 /* Module parameters */
111 MODULE_AUTHOR ("LSI");
112 MODULE_DESCRIPTION ("3ware 9000 Storage Controller Linux Driver");
113 MODULE_LICENSE("GPL");
114 MODULE_VERSION(TW_DRIVER_VERSION);
115 
116 static int use_msi = 0;
117 module_param(use_msi, int, S_IRUGO);
118 MODULE_PARM_DESC(use_msi, "Use Message Signaled Interrupts.  Default: 0");
119 
120 /* Function prototypes */
121 static void twa_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_Header *header);
122 static int twa_aen_read_queue(TW_Device_Extension *tw_dev, int request_id);
123 static char *twa_aen_severity_lookup(unsigned char severity_code);
124 static void twa_aen_sync_time(TW_Device_Extension *tw_dev, int request_id);
125 static long twa_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
126 static int twa_chrdev_open(struct inode *inode, struct file *file);
127 static int twa_fill_sense(TW_Device_Extension *tw_dev, int request_id, int copy_sense, int print_host);
128 static void twa_free_request_id(TW_Device_Extension *tw_dev,int request_id);
129 static void twa_get_request_id(TW_Device_Extension *tw_dev, int *request_id);
130 static int twa_initconnection(TW_Device_Extension *tw_dev, int message_credits,
131  			      u32 set_features, unsigned short current_fw_srl,
132 			      unsigned short current_fw_arch_id,
133 			      unsigned short current_fw_branch,
134 			      unsigned short current_fw_build,
135 			      unsigned short *fw_on_ctlr_srl,
136 			      unsigned short *fw_on_ctlr_arch_id,
137 			      unsigned short *fw_on_ctlr_branch,
138 			      unsigned short *fw_on_ctlr_build,
139 			      u32 *init_connect_result);
140 static void twa_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_command_packet, int request_id, dma_addr_t dma_handle, int length);
141 static int twa_poll_response(TW_Device_Extension *tw_dev, int request_id, int seconds);
142 static int twa_poll_status_gone(TW_Device_Extension *tw_dev, u32 flag, int seconds);
143 static int twa_post_command_packet(TW_Device_Extension *tw_dev, int request_id, char internal);
144 static int twa_reset_device_extension(TW_Device_Extension *tw_dev);
145 static int twa_reset_sequence(TW_Device_Extension *tw_dev, int soft_reset);
146 static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry *sglistarg);
147 static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int request_id);
148 static char *twa_string_lookup(twa_message_type *table, unsigned int aen_code);
149 
150 /* Functions */
151 
152 /* Show some statistics about the card */
153 static ssize_t twa_show_stats(struct device *dev,
154 			      struct device_attribute *attr, char *buf)
155 {
156 	struct Scsi_Host *host = class_to_shost(dev);
157 	TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
158 	unsigned long flags = 0;
159 	ssize_t len;
160 
161 	spin_lock_irqsave(tw_dev->host->host_lock, flags);
162 	len = snprintf(buf, PAGE_SIZE, "3w-9xxx Driver version: %s\n"
163 		       "Current commands posted:   %4d\n"
164 		       "Max commands posted:       %4d\n"
165 		       "Current pending commands:  %4d\n"
166 		       "Max pending commands:      %4d\n"
167 		       "Last sgl length:           %4d\n"
168 		       "Max sgl length:            %4d\n"
169 		       "Last sector count:         %4d\n"
170 		       "Max sector count:          %4d\n"
171 		       "SCSI Host Resets:          %4d\n"
172 		       "AEN's:                     %4d\n",
173 		       TW_DRIVER_VERSION,
174 		       tw_dev->posted_request_count,
175 		       tw_dev->max_posted_request_count,
176 		       tw_dev->pending_request_count,
177 		       tw_dev->max_pending_request_count,
178 		       tw_dev->sgl_entries,
179 		       tw_dev->max_sgl_entries,
180 		       tw_dev->sector_count,
181 		       tw_dev->max_sector_count,
182 		       tw_dev->num_resets,
183 		       tw_dev->aen_count);
184 	spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
185 	return len;
186 } /* End twa_show_stats() */
187 
188 /* Create sysfs 'stats' entry */
189 static struct device_attribute twa_host_stats_attr = {
190 	.attr = {
191 		.name = 	"stats",
192 		.mode =		S_IRUGO,
193 	},
194 	.show = twa_show_stats
195 };
196 
197 /* Host attributes initializer */
198 static struct device_attribute *twa_host_attrs[] = {
199 	&twa_host_stats_attr,
200 	NULL,
201 };
202 
203 /* File operations struct for character device */
204 static const struct file_operations twa_fops = {
205 	.owner		= THIS_MODULE,
206 	.unlocked_ioctl	= twa_chrdev_ioctl,
207 	.open		= twa_chrdev_open,
208 	.release	= NULL,
209 	.llseek		= noop_llseek,
210 };
211 
212 /*
213  * The controllers use an inline buffer instead of a mapped SGL for small,
214  * single entry buffers.  Note that we treat a zero-length transfer like
215  * a mapped SGL.
216  */
217 static bool twa_command_mapped(struct scsi_cmnd *cmd)
218 {
219 	return scsi_sg_count(cmd) != 1 ||
220 		scsi_bufflen(cmd) >= TW_MIN_SGL_LENGTH;
221 }
222 
223 /* This function will complete an aen request from the isr */
224 static int twa_aen_complete(TW_Device_Extension *tw_dev, int request_id)
225 {
226 	TW_Command_Full *full_command_packet;
227 	TW_Command *command_packet;
228 	TW_Command_Apache_Header *header;
229 	unsigned short aen;
230 	int retval = 1;
231 
232 	header = (TW_Command_Apache_Header *)tw_dev->generic_buffer_virt[request_id];
233 	tw_dev->posted_request_count--;
234 	aen = le16_to_cpu(header->status_block.error);
235 	full_command_packet = tw_dev->command_packet_virt[request_id];
236 	command_packet = &full_command_packet->command.oldcommand;
237 
238 	/* First check for internal completion of set param for time sync */
239 	if (TW_OP_OUT(command_packet->opcode__sgloffset) == TW_OP_SET_PARAM) {
240 		/* Keep reading the queue in case there are more aen's */
241 		if (twa_aen_read_queue(tw_dev, request_id))
242 			goto out2;
243 	        else {
244 			retval = 0;
245 			goto out;
246 		}
247 	}
248 
249 	switch (aen) {
250 	case TW_AEN_QUEUE_EMPTY:
251 		/* Quit reading the queue if this is the last one */
252 		break;
253 	case TW_AEN_SYNC_TIME_WITH_HOST:
254 		twa_aen_sync_time(tw_dev, request_id);
255 		retval = 0;
256 		goto out;
257 	default:
258 		twa_aen_queue_event(tw_dev, header);
259 
260 		/* If there are more aen's, keep reading the queue */
261 		if (twa_aen_read_queue(tw_dev, request_id))
262 			goto out2;
263 		else {
264 			retval = 0;
265 			goto out;
266 		}
267 	}
268 	retval = 0;
269 out2:
270 	tw_dev->state[request_id] = TW_S_COMPLETED;
271 	twa_free_request_id(tw_dev, request_id);
272 	clear_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags);
273 out:
274 	return retval;
275 } /* End twa_aen_complete() */
276 
277 /* This function will drain aen queue */
278 static int twa_aen_drain_queue(TW_Device_Extension *tw_dev, int no_check_reset)
279 {
280 	int request_id = 0;
281 	char cdb[TW_MAX_CDB_LEN];
282 	TW_SG_Entry sglist[1];
283 	int finished = 0, count = 0;
284 	TW_Command_Full *full_command_packet;
285 	TW_Command_Apache_Header *header;
286 	unsigned short aen;
287 	int first_reset = 0, queue = 0, retval = 1;
288 
289 	if (no_check_reset)
290 		first_reset = 0;
291 	else
292 		first_reset = 1;
293 
294 	full_command_packet = tw_dev->command_packet_virt[request_id];
295 	memset(full_command_packet, 0, sizeof(TW_Command_Full));
296 
297 	/* Initialize cdb */
298 	memset(&cdb, 0, TW_MAX_CDB_LEN);
299 	cdb[0] = REQUEST_SENSE; /* opcode */
300 	cdb[4] = TW_ALLOCATION_LENGTH; /* allocation length */
301 
302 	/* Initialize sglist */
303 	memset(&sglist, 0, sizeof(TW_SG_Entry));
304 	sglist[0].length = TW_SECTOR_SIZE;
305 	sglist[0].address = tw_dev->generic_buffer_phys[request_id];
306 
307 	if (sglist[0].address & TW_ALIGNMENT_9000_SGL) {
308 		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1, "Found unaligned address during AEN drain");
309 		goto out;
310 	}
311 
312 	/* Mark internal command */
313 	tw_dev->srb[request_id] = NULL;
314 
315 	do {
316 		/* Send command to the board */
317 		if (twa_scsiop_execute_scsi(tw_dev, request_id, cdb, 1, sglist)) {
318 			TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2, "Error posting request sense");
319 			goto out;
320 		}
321 
322 		/* Now poll for completion */
323 		if (twa_poll_response(tw_dev, request_id, 30)) {
324 			TW_PRINTK(tw_dev->host, TW_DRIVER, 0x3, "No valid response while draining AEN queue");
325 			tw_dev->posted_request_count--;
326 			goto out;
327 		}
328 
329 		tw_dev->posted_request_count--;
330 		header = (TW_Command_Apache_Header *)tw_dev->generic_buffer_virt[request_id];
331 		aen = le16_to_cpu(header->status_block.error);
332 		queue = 0;
333 		count++;
334 
335 		switch (aen) {
336 		case TW_AEN_QUEUE_EMPTY:
337 			if (first_reset != 1)
338 				goto out;
339 			else
340 				finished = 1;
341 			break;
342 		case TW_AEN_SOFT_RESET:
343 			if (first_reset == 0)
344 				first_reset = 1;
345 			else
346 				queue = 1;
347 			break;
348 		case TW_AEN_SYNC_TIME_WITH_HOST:
349 			break;
350 		default:
351 			queue = 1;
352 		}
353 
354 		/* Now queue an event info */
355 		if (queue)
356 			twa_aen_queue_event(tw_dev, header);
357 	} while ((finished == 0) && (count < TW_MAX_AEN_DRAIN));
358 
359 	if (count == TW_MAX_AEN_DRAIN)
360 		goto out;
361 
362 	retval = 0;
363 out:
364 	tw_dev->state[request_id] = TW_S_INITIAL;
365 	return retval;
366 } /* End twa_aen_drain_queue() */
367 
368 /* This function will queue an event */
369 static void twa_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_Header *header)
370 {
371 	u32 local_time;
372 	TW_Event *event;
373 	unsigned short aen;
374 	char host[16];
375 	char *error_str;
376 
377 	tw_dev->aen_count++;
378 
379 	/* Fill out event info */
380 	event = tw_dev->event_queue[tw_dev->error_index];
381 
382 	/* Check for clobber */
383 	host[0] = '\0';
384 	if (tw_dev->host) {
385 		sprintf(host, " scsi%d:", tw_dev->host->host_no);
386 		if (event->retrieved == TW_AEN_NOT_RETRIEVED)
387 			tw_dev->aen_clobber = 1;
388 	}
389 
390 	aen = le16_to_cpu(header->status_block.error);
391 	memset(event, 0, sizeof(TW_Event));
392 
393 	event->severity = TW_SEV_OUT(header->status_block.severity__reserved);
394 	/* event->time_stamp_sec overflows in y2106 */
395 	local_time = (u32)(ktime_get_real_seconds() - (sys_tz.tz_minuteswest * 60));
396 	event->time_stamp_sec = local_time;
397 	event->aen_code = aen;
398 	event->retrieved = TW_AEN_NOT_RETRIEVED;
399 	event->sequence_id = tw_dev->error_sequence_id;
400 	tw_dev->error_sequence_id++;
401 
402 	/* Check for embedded error string */
403 	error_str = &(header->err_specific_desc[strlen(header->err_specific_desc)+1]);
404 
405 	header->err_specific_desc[sizeof(header->err_specific_desc) - 1] = '\0';
406 	event->parameter_len = strlen(header->err_specific_desc);
407 	memcpy(event->parameter_data, header->err_specific_desc, event->parameter_len + (error_str[0] == '\0' ? 0 : (1 + strlen(error_str))));
408 	if (event->severity != TW_AEN_SEVERITY_DEBUG)
409 		printk(KERN_WARNING "3w-9xxx:%s AEN: %s (0x%02X:0x%04X): %s:%s.\n",
410 		       host,
411 		       twa_aen_severity_lookup(TW_SEV_OUT(header->status_block.severity__reserved)),
412 		       TW_MESSAGE_SOURCE_CONTROLLER_EVENT, aen,
413 		       error_str[0] == '\0' ? twa_string_lookup(twa_aen_table, aen) : error_str,
414 		       header->err_specific_desc);
415 	else
416 		tw_dev->aen_count--;
417 
418 	if ((tw_dev->error_index + 1) == TW_Q_LENGTH)
419 		tw_dev->event_queue_wrapped = 1;
420 	tw_dev->error_index = (tw_dev->error_index + 1 ) % TW_Q_LENGTH;
421 } /* End twa_aen_queue_event() */
422 
423 /* This function will read the aen queue from the isr */
424 static int twa_aen_read_queue(TW_Device_Extension *tw_dev, int request_id)
425 {
426 	char cdb[TW_MAX_CDB_LEN];
427 	TW_SG_Entry sglist[1];
428 	TW_Command_Full *full_command_packet;
429 	int retval = 1;
430 
431 	full_command_packet = tw_dev->command_packet_virt[request_id];
432 	memset(full_command_packet, 0, sizeof(TW_Command_Full));
433 
434 	/* Initialize cdb */
435 	memset(&cdb, 0, TW_MAX_CDB_LEN);
436 	cdb[0] = REQUEST_SENSE; /* opcode */
437 	cdb[4] = TW_ALLOCATION_LENGTH; /* allocation length */
438 
439 	/* Initialize sglist */
440 	memset(&sglist, 0, sizeof(TW_SG_Entry));
441 	sglist[0].length = TW_SECTOR_SIZE;
442 	sglist[0].address = tw_dev->generic_buffer_phys[request_id];
443 
444 	/* Mark internal command */
445 	tw_dev->srb[request_id] = NULL;
446 
447 	/* Now post the command packet */
448 	if (twa_scsiop_execute_scsi(tw_dev, request_id, cdb, 1, sglist)) {
449 		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x4, "Post failed while reading AEN queue");
450 		goto out;
451 	}
452 	retval = 0;
453 out:
454 	return retval;
455 } /* End twa_aen_read_queue() */
456 
457 /* This function will look up an AEN severity string */
458 static char *twa_aen_severity_lookup(unsigned char severity_code)
459 {
460 	char *retval = NULL;
461 
462 	if ((severity_code < (unsigned char) TW_AEN_SEVERITY_ERROR) ||
463 	    (severity_code > (unsigned char) TW_AEN_SEVERITY_DEBUG))
464 		goto out;
465 
466 	retval = twa_aen_severity_table[severity_code];
467 out:
468 	return retval;
469 } /* End twa_aen_severity_lookup() */
470 
471 /* This function will sync firmware time with the host time */
472 static void twa_aen_sync_time(TW_Device_Extension *tw_dev, int request_id)
473 {
474 	u32 schedulertime;
475 	TW_Command_Full *full_command_packet;
476 	TW_Command *command_packet;
477 	TW_Param_Apache *param;
478 	time64_t local_time;
479 
480 	/* Fill out the command packet */
481 	full_command_packet = tw_dev->command_packet_virt[request_id];
482 	memset(full_command_packet, 0, sizeof(TW_Command_Full));
483 	command_packet = &full_command_packet->command.oldcommand;
484 	command_packet->opcode__sgloffset = TW_OPSGL_IN(2, TW_OP_SET_PARAM);
485 	command_packet->request_id = request_id;
486 	command_packet->byte8_offset.param.sgl[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
487 	command_packet->byte8_offset.param.sgl[0].length = cpu_to_le32(TW_SECTOR_SIZE);
488 	command_packet->size = TW_COMMAND_SIZE;
489 	command_packet->byte6_offset.parameter_count = cpu_to_le16(1);
490 
491 	/* Setup the param */
492 	param = (TW_Param_Apache *)tw_dev->generic_buffer_virt[request_id];
493 	memset(param, 0, TW_SECTOR_SIZE);
494 	param->table_id = cpu_to_le16(TW_TIMEKEEP_TABLE | 0x8000); /* Controller time keep table */
495 	param->parameter_id = cpu_to_le16(0x3); /* SchedulerTime */
496 	param->parameter_size_bytes = cpu_to_le16(4);
497 
498 	/* Convert system time in UTC to local time seconds since last
499            Sunday 12:00AM */
500 	local_time = (ktime_get_real_seconds() - (sys_tz.tz_minuteswest * 60));
501 	div_u64_rem(local_time - (3 * 86400), 604800, &schedulertime);
502 	schedulertime = cpu_to_le32(schedulertime % 604800);
503 
504 	memcpy(param->data, &schedulertime, sizeof(u32));
505 
506 	/* Mark internal command */
507 	tw_dev->srb[request_id] = NULL;
508 
509 	/* Now post the command */
510 	twa_post_command_packet(tw_dev, request_id, 1);
511 } /* End twa_aen_sync_time() */
512 
513 /* This function will allocate memory and check if it is correctly aligned */
514 static int twa_allocate_memory(TW_Device_Extension *tw_dev, int size, int which)
515 {
516 	int i;
517 	dma_addr_t dma_handle;
518 	unsigned long *cpu_addr;
519 	int retval = 1;
520 
521 	cpu_addr = pci_alloc_consistent(tw_dev->tw_pci_dev, size*TW_Q_LENGTH, &dma_handle);
522 	if (!cpu_addr) {
523 		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x5, "Memory allocation failed");
524 		goto out;
525 	}
526 
527 	if ((unsigned long)cpu_addr % (TW_ALIGNMENT_9000)) {
528 		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x6, "Failed to allocate correctly aligned memory");
529 		pci_free_consistent(tw_dev->tw_pci_dev, size*TW_Q_LENGTH, cpu_addr, dma_handle);
530 		goto out;
531 	}
532 
533 	memset(cpu_addr, 0, size*TW_Q_LENGTH);
534 
535 	for (i = 0; i < TW_Q_LENGTH; i++) {
536 		switch(which) {
537 		case 0:
538 			tw_dev->command_packet_phys[i] = dma_handle+(i*size);
539 			tw_dev->command_packet_virt[i] = (TW_Command_Full *)((unsigned char *)cpu_addr + (i*size));
540 			break;
541 		case 1:
542 			tw_dev->generic_buffer_phys[i] = dma_handle+(i*size);
543 			tw_dev->generic_buffer_virt[i] = (unsigned long *)((unsigned char *)cpu_addr + (i*size));
544 			break;
545 		}
546 	}
547 	retval = 0;
548 out:
549 	return retval;
550 } /* End twa_allocate_memory() */
551 
552 /* This function will check the status register for unexpected bits */
553 static int twa_check_bits(u32 status_reg_value)
554 {
555 	int retval = 1;
556 
557 	if ((status_reg_value & TW_STATUS_EXPECTED_BITS) != TW_STATUS_EXPECTED_BITS)
558 		goto out;
559 	if ((status_reg_value & TW_STATUS_UNEXPECTED_BITS) != 0)
560 		goto out;
561 
562 	retval = 0;
563 out:
564 	return retval;
565 } /* End twa_check_bits() */
566 
567 /* This function will check the srl and decide if we are compatible  */
568 static int twa_check_srl(TW_Device_Extension *tw_dev, int *flashed)
569 {
570 	int retval = 1;
571 	unsigned short fw_on_ctlr_srl = 0, fw_on_ctlr_arch_id = 0;
572 	unsigned short fw_on_ctlr_branch = 0, fw_on_ctlr_build = 0;
573 	u32 init_connect_result = 0;
574 
575 	if (twa_initconnection(tw_dev, TW_INIT_MESSAGE_CREDITS,
576 			       TW_EXTENDED_INIT_CONNECT, TW_CURRENT_DRIVER_SRL,
577 			       TW_9000_ARCH_ID, TW_CURRENT_DRIVER_BRANCH,
578 			       TW_CURRENT_DRIVER_BUILD, &fw_on_ctlr_srl,
579 			       &fw_on_ctlr_arch_id, &fw_on_ctlr_branch,
580 			       &fw_on_ctlr_build, &init_connect_result)) {
581 		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x7, "Initconnection failed while checking SRL");
582 		goto out;
583 	}
584 
585 	tw_dev->tw_compat_info.working_srl = fw_on_ctlr_srl;
586 	tw_dev->tw_compat_info.working_branch = fw_on_ctlr_branch;
587 	tw_dev->tw_compat_info.working_build = fw_on_ctlr_build;
588 
589 	/* Try base mode compatibility */
590 	if (!(init_connect_result & TW_CTLR_FW_COMPATIBLE)) {
591 		if (twa_initconnection(tw_dev, TW_INIT_MESSAGE_CREDITS,
592 				       TW_EXTENDED_INIT_CONNECT,
593 				       TW_BASE_FW_SRL, TW_9000_ARCH_ID,
594 				       TW_BASE_FW_BRANCH, TW_BASE_FW_BUILD,
595 				       &fw_on_ctlr_srl, &fw_on_ctlr_arch_id,
596 				       &fw_on_ctlr_branch, &fw_on_ctlr_build,
597 				       &init_connect_result)) {
598 			TW_PRINTK(tw_dev->host, TW_DRIVER, 0xa, "Initconnection (base mode) failed while checking SRL");
599 			goto out;
600 		}
601 		if (!(init_connect_result & TW_CTLR_FW_COMPATIBLE)) {
602 			if (TW_CURRENT_DRIVER_SRL > fw_on_ctlr_srl) {
603 				TW_PRINTK(tw_dev->host, TW_DRIVER, 0x32, "Firmware and driver incompatibility: please upgrade firmware");
604 			} else {
605 				TW_PRINTK(tw_dev->host, TW_DRIVER, 0x33, "Firmware and driver incompatibility: please upgrade driver");
606 			}
607 			goto out;
608 		}
609 		tw_dev->tw_compat_info.working_srl = TW_BASE_FW_SRL;
610 		tw_dev->tw_compat_info.working_branch = TW_BASE_FW_BRANCH;
611 		tw_dev->tw_compat_info.working_build = TW_BASE_FW_BUILD;
612 	}
613 
614 	/* Load rest of compatibility struct */
615 	strlcpy(tw_dev->tw_compat_info.driver_version, TW_DRIVER_VERSION,
616 		sizeof(tw_dev->tw_compat_info.driver_version));
617 	tw_dev->tw_compat_info.driver_srl_high = TW_CURRENT_DRIVER_SRL;
618 	tw_dev->tw_compat_info.driver_branch_high = TW_CURRENT_DRIVER_BRANCH;
619 	tw_dev->tw_compat_info.driver_build_high = TW_CURRENT_DRIVER_BUILD;
620 	tw_dev->tw_compat_info.driver_srl_low = TW_BASE_FW_SRL;
621 	tw_dev->tw_compat_info.driver_branch_low = TW_BASE_FW_BRANCH;
622 	tw_dev->tw_compat_info.driver_build_low = TW_BASE_FW_BUILD;
623 	tw_dev->tw_compat_info.fw_on_ctlr_srl = fw_on_ctlr_srl;
624 	tw_dev->tw_compat_info.fw_on_ctlr_branch = fw_on_ctlr_branch;
625 	tw_dev->tw_compat_info.fw_on_ctlr_build = fw_on_ctlr_build;
626 
627 	retval = 0;
628 out:
629 	return retval;
630 } /* End twa_check_srl() */
631 
632 /* This function handles ioctl for the character device */
633 static long twa_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
634 {
635 	struct inode *inode = file_inode(file);
636 	long timeout;
637 	unsigned long *cpu_addr, data_buffer_length_adjusted = 0, flags = 0;
638 	dma_addr_t dma_handle;
639 	int request_id = 0;
640 	unsigned int sequence_id = 0;
641 	unsigned char event_index, start_index;
642 	TW_Ioctl_Driver_Command driver_command;
643 	TW_Ioctl_Buf_Apache *tw_ioctl;
644 	TW_Lock *tw_lock;
645 	TW_Command_Full *full_command_packet;
646 	TW_Compatibility_Info *tw_compat_info;
647 	TW_Event *event;
648 	ktime_t current_time;
649 	TW_Device_Extension *tw_dev = twa_device_extension_list[iminor(inode)];
650 	int retval = TW_IOCTL_ERROR_OS_EFAULT;
651 	void __user *argp = (void __user *)arg;
652 
653 	mutex_lock(&twa_chrdev_mutex);
654 
655 	/* Only let one of these through at a time */
656 	if (mutex_lock_interruptible(&tw_dev->ioctl_lock)) {
657 		retval = TW_IOCTL_ERROR_OS_EINTR;
658 		goto out;
659 	}
660 
661 	/* First copy down the driver command */
662 	if (copy_from_user(&driver_command, argp, sizeof(TW_Ioctl_Driver_Command)))
663 		goto out2;
664 
665 	/* Check data buffer size */
666 	if (driver_command.buffer_length > TW_MAX_SECTORS * 2048) {
667 		retval = TW_IOCTL_ERROR_OS_EINVAL;
668 		goto out2;
669 	}
670 
671 	/* Hardware can only do multiple of 512 byte transfers */
672 	data_buffer_length_adjusted = (driver_command.buffer_length + 511) & ~511;
673 
674 	/* Now allocate ioctl buf memory */
675 	cpu_addr = dma_alloc_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted+sizeof(TW_Ioctl_Buf_Apache) - 1, &dma_handle, GFP_KERNEL);
676 	if (!cpu_addr) {
677 		retval = TW_IOCTL_ERROR_OS_ENOMEM;
678 		goto out2;
679 	}
680 
681 	tw_ioctl = (TW_Ioctl_Buf_Apache *)cpu_addr;
682 
683 	/* Now copy down the entire ioctl */
684 	if (copy_from_user(tw_ioctl, argp, driver_command.buffer_length + sizeof(TW_Ioctl_Buf_Apache) - 1))
685 		goto out3;
686 
687 	/* See which ioctl we are doing */
688 	switch (cmd) {
689 	case TW_IOCTL_FIRMWARE_PASS_THROUGH:
690 		spin_lock_irqsave(tw_dev->host->host_lock, flags);
691 		twa_get_request_id(tw_dev, &request_id);
692 
693 		/* Flag internal command */
694 		tw_dev->srb[request_id] = NULL;
695 
696 		/* Flag chrdev ioctl */
697 		tw_dev->chrdev_request_id = request_id;
698 
699 		full_command_packet = &tw_ioctl->firmware_command;
700 
701 		/* Load request id and sglist for both command types */
702 		twa_load_sgl(tw_dev, full_command_packet, request_id, dma_handle, data_buffer_length_adjusted);
703 
704 		memcpy(tw_dev->command_packet_virt[request_id], &(tw_ioctl->firmware_command), sizeof(TW_Command_Full));
705 
706 		/* Now post the command packet to the controller */
707 		twa_post_command_packet(tw_dev, request_id, 1);
708 		spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
709 
710 		timeout = TW_IOCTL_CHRDEV_TIMEOUT*HZ;
711 
712 		/* Now wait for command to complete */
713 		timeout = wait_event_timeout(tw_dev->ioctl_wqueue, tw_dev->chrdev_request_id == TW_IOCTL_CHRDEV_FREE, timeout);
714 
715 		/* We timed out, and didn't get an interrupt */
716 		if (tw_dev->chrdev_request_id != TW_IOCTL_CHRDEV_FREE) {
717 			/* Now we need to reset the board */
718 			printk(KERN_WARNING "3w-9xxx: scsi%d: WARNING: (0x%02X:0x%04X): Character ioctl (0x%x) timed out, resetting card.\n",
719 			       tw_dev->host->host_no, TW_DRIVER, 0x37,
720 			       cmd);
721 			retval = TW_IOCTL_ERROR_OS_EIO;
722 			twa_reset_device_extension(tw_dev);
723 			goto out3;
724 		}
725 
726 		/* Now copy in the command packet response */
727 		memcpy(&(tw_ioctl->firmware_command), tw_dev->command_packet_virt[request_id], sizeof(TW_Command_Full));
728 
729 		/* Now complete the io */
730 		spin_lock_irqsave(tw_dev->host->host_lock, flags);
731 		tw_dev->posted_request_count--;
732 		tw_dev->state[request_id] = TW_S_COMPLETED;
733 		twa_free_request_id(tw_dev, request_id);
734 		spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
735 		break;
736 	case TW_IOCTL_GET_COMPATIBILITY_INFO:
737 		tw_ioctl->driver_command.status = 0;
738 		/* Copy compatibility struct into ioctl data buffer */
739 		tw_compat_info = (TW_Compatibility_Info *)tw_ioctl->data_buffer;
740 		memcpy(tw_compat_info, &tw_dev->tw_compat_info, sizeof(TW_Compatibility_Info));
741 		break;
742 	case TW_IOCTL_GET_LAST_EVENT:
743 		if (tw_dev->event_queue_wrapped) {
744 			if (tw_dev->aen_clobber) {
745 				tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER;
746 				tw_dev->aen_clobber = 0;
747 			} else
748 				tw_ioctl->driver_command.status = 0;
749 		} else {
750 			if (!tw_dev->error_index) {
751 				tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
752 				break;
753 			}
754 			tw_ioctl->driver_command.status = 0;
755 		}
756 		event_index = (tw_dev->error_index - 1 + TW_Q_LENGTH) % TW_Q_LENGTH;
757 		memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event));
758 		tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED;
759 		break;
760 	case TW_IOCTL_GET_FIRST_EVENT:
761 		if (tw_dev->event_queue_wrapped) {
762 			if (tw_dev->aen_clobber) {
763 				tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER;
764 				tw_dev->aen_clobber = 0;
765 			} else
766 				tw_ioctl->driver_command.status = 0;
767 			event_index = tw_dev->error_index;
768 		} else {
769 			if (!tw_dev->error_index) {
770 				tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
771 				break;
772 			}
773 			tw_ioctl->driver_command.status = 0;
774 			event_index = 0;
775 		}
776 		memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event));
777 		tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED;
778 		break;
779 	case TW_IOCTL_GET_NEXT_EVENT:
780 		event = (TW_Event *)tw_ioctl->data_buffer;
781 		sequence_id = event->sequence_id;
782 		tw_ioctl->driver_command.status = 0;
783 
784 		if (tw_dev->event_queue_wrapped) {
785 			if (tw_dev->aen_clobber) {
786 				tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER;
787 				tw_dev->aen_clobber = 0;
788 			}
789 			start_index = tw_dev->error_index;
790 		} else {
791 			if (!tw_dev->error_index) {
792 				tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
793 				break;
794 			}
795 			start_index = 0;
796 		}
797 		event_index = (start_index + sequence_id - tw_dev->event_queue[start_index]->sequence_id + 1) % TW_Q_LENGTH;
798 
799 		if (!(tw_dev->event_queue[event_index]->sequence_id > sequence_id)) {
800 			if (tw_ioctl->driver_command.status == TW_IOCTL_ERROR_STATUS_AEN_CLOBBER)
801 				tw_dev->aen_clobber = 1;
802 			tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
803 			break;
804 		}
805 		memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event));
806 		tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED;
807 		break;
808 	case TW_IOCTL_GET_PREVIOUS_EVENT:
809 		event = (TW_Event *)tw_ioctl->data_buffer;
810 		sequence_id = event->sequence_id;
811 		tw_ioctl->driver_command.status = 0;
812 
813 		if (tw_dev->event_queue_wrapped) {
814 			if (tw_dev->aen_clobber) {
815 				tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER;
816 				tw_dev->aen_clobber = 0;
817 			}
818 			start_index = tw_dev->error_index;
819 		} else {
820 			if (!tw_dev->error_index) {
821 				tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
822 				break;
823 			}
824 			start_index = 0;
825 		}
826 		event_index = (start_index + sequence_id - tw_dev->event_queue[start_index]->sequence_id - 1) % TW_Q_LENGTH;
827 
828 		if (!(tw_dev->event_queue[event_index]->sequence_id < sequence_id)) {
829 			if (tw_ioctl->driver_command.status == TW_IOCTL_ERROR_STATUS_AEN_CLOBBER)
830 				tw_dev->aen_clobber = 1;
831 			tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
832 			break;
833 		}
834 		memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event));
835 		tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED;
836 		break;
837 	case TW_IOCTL_GET_LOCK:
838 		tw_lock = (TW_Lock *)tw_ioctl->data_buffer;
839 		current_time = ktime_get();
840 
841 		if ((tw_lock->force_flag == 1) || (tw_dev->ioctl_sem_lock == 0) ||
842 		    ktime_after(current_time, tw_dev->ioctl_time)) {
843 			tw_dev->ioctl_sem_lock = 1;
844 			tw_dev->ioctl_time = ktime_add_ms(current_time, tw_lock->timeout_msec);
845 			tw_ioctl->driver_command.status = 0;
846 			tw_lock->time_remaining_msec = tw_lock->timeout_msec;
847 		} else {
848 			tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_LOCKED;
849 			tw_lock->time_remaining_msec = ktime_ms_delta(tw_dev->ioctl_time, current_time);
850 		}
851 		break;
852 	case TW_IOCTL_RELEASE_LOCK:
853 		if (tw_dev->ioctl_sem_lock == 1) {
854 			tw_dev->ioctl_sem_lock = 0;
855 			tw_ioctl->driver_command.status = 0;
856 		} else {
857 			tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NOT_LOCKED;
858 		}
859 		break;
860 	default:
861 		retval = TW_IOCTL_ERROR_OS_ENOTTY;
862 		goto out3;
863 	}
864 
865 	/* Now copy the entire response to userspace */
866 	if (copy_to_user(argp, tw_ioctl, sizeof(TW_Ioctl_Buf_Apache) + driver_command.buffer_length - 1) == 0)
867 		retval = 0;
868 out3:
869 	/* Now free ioctl buf memory */
870 	dma_free_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted+sizeof(TW_Ioctl_Buf_Apache) - 1, cpu_addr, dma_handle);
871 out2:
872 	mutex_unlock(&tw_dev->ioctl_lock);
873 out:
874 	mutex_unlock(&twa_chrdev_mutex);
875 	return retval;
876 } /* End twa_chrdev_ioctl() */
877 
878 /* This function handles open for the character device */
879 /* NOTE that this function will race with remove. */
880 static int twa_chrdev_open(struct inode *inode, struct file *file)
881 {
882 	unsigned int minor_number;
883 	int retval = TW_IOCTL_ERROR_OS_ENODEV;
884 
885 	minor_number = iminor(inode);
886 	if (minor_number >= twa_device_extension_count)
887 		goto out;
888 	retval = 0;
889 out:
890 	return retval;
891 } /* End twa_chrdev_open() */
892 
893 /* This function will print readable messages from status register errors */
894 static int twa_decode_bits(TW_Device_Extension *tw_dev, u32 status_reg_value)
895 {
896 	int retval = 1;
897 
898 	/* Check for various error conditions and handle them appropriately */
899 	if (status_reg_value & TW_STATUS_PCI_PARITY_ERROR) {
900 		TW_PRINTK(tw_dev->host, TW_DRIVER, 0xc, "PCI Parity Error: clearing");
901 		writel(TW_CONTROL_CLEAR_PARITY_ERROR, TW_CONTROL_REG_ADDR(tw_dev));
902 	}
903 
904 	if (status_reg_value & TW_STATUS_PCI_ABORT) {
905 		TW_PRINTK(tw_dev->host, TW_DRIVER, 0xd, "PCI Abort: clearing");
906 		writel(TW_CONTROL_CLEAR_PCI_ABORT, TW_CONTROL_REG_ADDR(tw_dev));
907 		pci_write_config_word(tw_dev->tw_pci_dev, PCI_STATUS, TW_PCI_CLEAR_PCI_ABORT);
908 	}
909 
910 	if (status_reg_value & TW_STATUS_QUEUE_ERROR) {
911 		if (((tw_dev->tw_pci_dev->device != PCI_DEVICE_ID_3WARE_9650SE) &&
912 		     (tw_dev->tw_pci_dev->device != PCI_DEVICE_ID_3WARE_9690SA)) ||
913 		    (!test_bit(TW_IN_RESET, &tw_dev->flags)))
914 			TW_PRINTK(tw_dev->host, TW_DRIVER, 0xe, "Controller Queue Error: clearing");
915 		writel(TW_CONTROL_CLEAR_QUEUE_ERROR, TW_CONTROL_REG_ADDR(tw_dev));
916 	}
917 
918 	if (status_reg_value & TW_STATUS_MICROCONTROLLER_ERROR) {
919 		if (tw_dev->reset_print == 0) {
920 			TW_PRINTK(tw_dev->host, TW_DRIVER, 0x10, "Microcontroller Error: clearing");
921 			tw_dev->reset_print = 1;
922 		}
923 		goto out;
924 	}
925 	retval = 0;
926 out:
927 	return retval;
928 } /* End twa_decode_bits() */
929 
930 /* This function will empty the response queue */
931 static int twa_empty_response_queue(TW_Device_Extension *tw_dev)
932 {
933 	u32 status_reg_value, response_que_value;
934 	int count = 0, retval = 1;
935 
936 	status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
937 
938 	while (((status_reg_value & TW_STATUS_RESPONSE_QUEUE_EMPTY) == 0) && (count < TW_MAX_RESPONSE_DRAIN)) {
939 		response_que_value = readl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev));
940 		status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
941 		count++;
942 	}
943 	if (count == TW_MAX_RESPONSE_DRAIN)
944 		goto out;
945 
946 	retval = 0;
947 out:
948 	return retval;
949 } /* End twa_empty_response_queue() */
950 
951 /* This function will clear the pchip/response queue on 9550SX */
952 static int twa_empty_response_queue_large(TW_Device_Extension *tw_dev)
953 {
954 	u32 response_que_value = 0;
955 	unsigned long before;
956 	int retval = 1;
957 
958 	if (tw_dev->tw_pci_dev->device != PCI_DEVICE_ID_3WARE_9000) {
959 		before = jiffies;
960 		while ((response_que_value & TW_9550SX_DRAIN_COMPLETED) != TW_9550SX_DRAIN_COMPLETED) {
961 			response_que_value = readl(TW_RESPONSE_QUEUE_REG_ADDR_LARGE(tw_dev));
962 			msleep(1);
963 			if (time_after(jiffies, before + HZ * 30))
964 				goto out;
965 		}
966 		/* P-chip settle time */
967 		msleep(500);
968 		retval = 0;
969 	} else
970 		retval = 0;
971 out:
972 	return retval;
973 } /* End twa_empty_response_queue_large() */
974 
975 /* This function passes sense keys from firmware to scsi layer */
976 static int twa_fill_sense(TW_Device_Extension *tw_dev, int request_id, int copy_sense, int print_host)
977 {
978 	TW_Command_Full *full_command_packet;
979 	unsigned short error;
980 	int retval = 1;
981 	char *error_str;
982 
983 	full_command_packet = tw_dev->command_packet_virt[request_id];
984 
985 	/* Check for embedded error string */
986 	error_str = &(full_command_packet->header.err_specific_desc[strlen(full_command_packet->header.err_specific_desc) + 1]);
987 
988 	/* Don't print error for Logical unit not supported during rollcall */
989 	error = le16_to_cpu(full_command_packet->header.status_block.error);
990 	if ((error != TW_ERROR_LOGICAL_UNIT_NOT_SUPPORTED) && (error != TW_ERROR_UNIT_OFFLINE)) {
991 		if (print_host)
992 			printk(KERN_WARNING "3w-9xxx: scsi%d: ERROR: (0x%02X:0x%04X): %s:%s.\n",
993 			       tw_dev->host->host_no,
994 			       TW_MESSAGE_SOURCE_CONTROLLER_ERROR,
995 			       full_command_packet->header.status_block.error,
996 			       error_str[0] == '\0' ?
997 			       twa_string_lookup(twa_error_table,
998 						 full_command_packet->header.status_block.error) : error_str,
999 			       full_command_packet->header.err_specific_desc);
1000 		else
1001 			printk(KERN_WARNING "3w-9xxx: ERROR: (0x%02X:0x%04X): %s:%s.\n",
1002 			       TW_MESSAGE_SOURCE_CONTROLLER_ERROR,
1003 			       full_command_packet->header.status_block.error,
1004 			       error_str[0] == '\0' ?
1005 			       twa_string_lookup(twa_error_table,
1006 						 full_command_packet->header.status_block.error) : error_str,
1007 			       full_command_packet->header.err_specific_desc);
1008 	}
1009 
1010 	if (copy_sense) {
1011 		memcpy(tw_dev->srb[request_id]->sense_buffer, full_command_packet->header.sense_data, TW_SENSE_DATA_LENGTH);
1012 		tw_dev->srb[request_id]->result = (full_command_packet->command.newcommand.status << 1);
1013 		retval = TW_ISR_DONT_RESULT;
1014 		goto out;
1015 	}
1016 	retval = 0;
1017 out:
1018 	return retval;
1019 } /* End twa_fill_sense() */
1020 
1021 /* This function will free up device extension resources */
1022 static void twa_free_device_extension(TW_Device_Extension *tw_dev)
1023 {
1024 	if (tw_dev->command_packet_virt[0])
1025 		pci_free_consistent(tw_dev->tw_pci_dev,
1026 				    sizeof(TW_Command_Full)*TW_Q_LENGTH,
1027 				    tw_dev->command_packet_virt[0],
1028 				    tw_dev->command_packet_phys[0]);
1029 
1030 	if (tw_dev->generic_buffer_virt[0])
1031 		pci_free_consistent(tw_dev->tw_pci_dev,
1032 				    TW_SECTOR_SIZE*TW_Q_LENGTH,
1033 				    tw_dev->generic_buffer_virt[0],
1034 				    tw_dev->generic_buffer_phys[0]);
1035 
1036 	kfree(tw_dev->event_queue[0]);
1037 } /* End twa_free_device_extension() */
1038 
1039 /* This function will free a request id */
1040 static void twa_free_request_id(TW_Device_Extension *tw_dev, int request_id)
1041 {
1042 	tw_dev->free_queue[tw_dev->free_tail] = request_id;
1043 	tw_dev->state[request_id] = TW_S_FINISHED;
1044 	tw_dev->free_tail = (tw_dev->free_tail + 1) % TW_Q_LENGTH;
1045 } /* End twa_free_request_id() */
1046 
1047 /* This function will get parameter table entries from the firmware */
1048 static void *twa_get_param(TW_Device_Extension *tw_dev, int request_id, int table_id, int parameter_id, int parameter_size_bytes)
1049 {
1050 	TW_Command_Full *full_command_packet;
1051 	TW_Command *command_packet;
1052 	TW_Param_Apache *param;
1053 	void *retval = NULL;
1054 
1055 	/* Setup the command packet */
1056 	full_command_packet = tw_dev->command_packet_virt[request_id];
1057 	memset(full_command_packet, 0, sizeof(TW_Command_Full));
1058 	command_packet = &full_command_packet->command.oldcommand;
1059 
1060 	command_packet->opcode__sgloffset = TW_OPSGL_IN(2, TW_OP_GET_PARAM);
1061 	command_packet->size              = TW_COMMAND_SIZE;
1062 	command_packet->request_id        = request_id;
1063 	command_packet->byte6_offset.block_count = cpu_to_le16(1);
1064 
1065 	/* Now setup the param */
1066 	param = (TW_Param_Apache *)tw_dev->generic_buffer_virt[request_id];
1067 	memset(param, 0, TW_SECTOR_SIZE);
1068 	param->table_id = cpu_to_le16(table_id | 0x8000);
1069 	param->parameter_id = cpu_to_le16(parameter_id);
1070 	param->parameter_size_bytes = cpu_to_le16(parameter_size_bytes);
1071 
1072 	command_packet->byte8_offset.param.sgl[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
1073 	command_packet->byte8_offset.param.sgl[0].length = cpu_to_le32(TW_SECTOR_SIZE);
1074 
1075 	/* Post the command packet to the board */
1076 	twa_post_command_packet(tw_dev, request_id, 1);
1077 
1078 	/* Poll for completion */
1079 	if (twa_poll_response(tw_dev, request_id, 30))
1080 		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x13, "No valid response during get param")
1081 	else
1082 		retval = (void *)&(param->data[0]);
1083 
1084 	tw_dev->posted_request_count--;
1085 	tw_dev->state[request_id] = TW_S_INITIAL;
1086 
1087 	return retval;
1088 } /* End twa_get_param() */
1089 
1090 /* This function will assign an available request id */
1091 static void twa_get_request_id(TW_Device_Extension *tw_dev, int *request_id)
1092 {
1093 	*request_id = tw_dev->free_queue[tw_dev->free_head];
1094 	tw_dev->free_head = (tw_dev->free_head + 1) % TW_Q_LENGTH;
1095 	tw_dev->state[*request_id] = TW_S_STARTED;
1096 } /* End twa_get_request_id() */
1097 
1098 /* This function will send an initconnection command to controller */
1099 static int twa_initconnection(TW_Device_Extension *tw_dev, int message_credits,
1100  			      u32 set_features, unsigned short current_fw_srl,
1101 			      unsigned short current_fw_arch_id,
1102 			      unsigned short current_fw_branch,
1103 			      unsigned short current_fw_build,
1104 			      unsigned short *fw_on_ctlr_srl,
1105 			      unsigned short *fw_on_ctlr_arch_id,
1106 			      unsigned short *fw_on_ctlr_branch,
1107 			      unsigned short *fw_on_ctlr_build,
1108 			      u32 *init_connect_result)
1109 {
1110 	TW_Command_Full *full_command_packet;
1111 	TW_Initconnect *tw_initconnect;
1112 	int request_id = 0, retval = 1;
1113 
1114 	/* Initialize InitConnection command packet */
1115 	full_command_packet = tw_dev->command_packet_virt[request_id];
1116 	memset(full_command_packet, 0, sizeof(TW_Command_Full));
1117 	full_command_packet->header.header_desc.size_header = 128;
1118 
1119 	tw_initconnect = (TW_Initconnect *)&full_command_packet->command.oldcommand;
1120 	tw_initconnect->opcode__reserved = TW_OPRES_IN(0, TW_OP_INIT_CONNECTION);
1121 	tw_initconnect->request_id = request_id;
1122 	tw_initconnect->message_credits = cpu_to_le16(message_credits);
1123 	tw_initconnect->features = set_features;
1124 
1125 	/* Turn on 64-bit sgl support if we need to */
1126 	tw_initconnect->features |= sizeof(dma_addr_t) > 4 ? 1 : 0;
1127 
1128 	tw_initconnect->features = cpu_to_le32(tw_initconnect->features);
1129 
1130 	if (set_features & TW_EXTENDED_INIT_CONNECT) {
1131 		tw_initconnect->size = TW_INIT_COMMAND_PACKET_SIZE_EXTENDED;
1132 		tw_initconnect->fw_srl = cpu_to_le16(current_fw_srl);
1133 		tw_initconnect->fw_arch_id = cpu_to_le16(current_fw_arch_id);
1134 		tw_initconnect->fw_branch = cpu_to_le16(current_fw_branch);
1135 		tw_initconnect->fw_build = cpu_to_le16(current_fw_build);
1136 	} else
1137 		tw_initconnect->size = TW_INIT_COMMAND_PACKET_SIZE;
1138 
1139 	/* Send command packet to the board */
1140 	twa_post_command_packet(tw_dev, request_id, 1);
1141 
1142 	/* Poll for completion */
1143 	if (twa_poll_response(tw_dev, request_id, 30)) {
1144 		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x15, "No valid response during init connection");
1145 	} else {
1146 		if (set_features & TW_EXTENDED_INIT_CONNECT) {
1147 			*fw_on_ctlr_srl = le16_to_cpu(tw_initconnect->fw_srl);
1148 			*fw_on_ctlr_arch_id = le16_to_cpu(tw_initconnect->fw_arch_id);
1149 			*fw_on_ctlr_branch = le16_to_cpu(tw_initconnect->fw_branch);
1150 			*fw_on_ctlr_build = le16_to_cpu(tw_initconnect->fw_build);
1151 			*init_connect_result = le32_to_cpu(tw_initconnect->result);
1152 		}
1153 		retval = 0;
1154 	}
1155 
1156 	tw_dev->posted_request_count--;
1157 	tw_dev->state[request_id] = TW_S_INITIAL;
1158 
1159 	return retval;
1160 } /* End twa_initconnection() */
1161 
1162 /* This function will initialize the fields of a device extension */
1163 static int twa_initialize_device_extension(TW_Device_Extension *tw_dev)
1164 {
1165 	int i, retval = 1;
1166 
1167 	/* Initialize command packet buffers */
1168 	if (twa_allocate_memory(tw_dev, sizeof(TW_Command_Full), 0)) {
1169 		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x16, "Command packet memory allocation failed");
1170 		goto out;
1171 	}
1172 
1173 	/* Initialize generic buffer */
1174 	if (twa_allocate_memory(tw_dev, TW_SECTOR_SIZE, 1)) {
1175 		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x17, "Generic memory allocation failed");
1176 		goto out;
1177 	}
1178 
1179 	/* Allocate event info space */
1180 	tw_dev->event_queue[0] = kcalloc(TW_Q_LENGTH, sizeof(TW_Event), GFP_KERNEL);
1181 	if (!tw_dev->event_queue[0]) {
1182 		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x18, "Event info memory allocation failed");
1183 		goto out;
1184 	}
1185 
1186 
1187 	for (i = 0; i < TW_Q_LENGTH; i++) {
1188 		tw_dev->event_queue[i] = (TW_Event *)((unsigned char *)tw_dev->event_queue[0] + (i * sizeof(TW_Event)));
1189 		tw_dev->free_queue[i] = i;
1190 		tw_dev->state[i] = TW_S_INITIAL;
1191 	}
1192 
1193 	tw_dev->pending_head = TW_Q_START;
1194 	tw_dev->pending_tail = TW_Q_START;
1195 	tw_dev->free_head = TW_Q_START;
1196 	tw_dev->free_tail = TW_Q_START;
1197 	tw_dev->error_sequence_id = 1;
1198 	tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE;
1199 
1200 	mutex_init(&tw_dev->ioctl_lock);
1201 	init_waitqueue_head(&tw_dev->ioctl_wqueue);
1202 
1203 	retval = 0;
1204 out:
1205 	return retval;
1206 } /* End twa_initialize_device_extension() */
1207 
1208 /* This function is the interrupt service routine */
1209 static irqreturn_t twa_interrupt(int irq, void *dev_instance)
1210 {
1211 	int request_id, error = 0;
1212 	u32 status_reg_value;
1213 	TW_Response_Queue response_que;
1214 	TW_Command_Full *full_command_packet;
1215 	TW_Device_Extension *tw_dev = (TW_Device_Extension *)dev_instance;
1216 	int handled = 0;
1217 
1218 	/* Get the per adapter lock */
1219 	spin_lock(tw_dev->host->host_lock);
1220 
1221 	/* Read the registers */
1222 	status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1223 
1224 	/* Check if this is our interrupt, otherwise bail */
1225 	if (!(status_reg_value & TW_STATUS_VALID_INTERRUPT))
1226 		goto twa_interrupt_bail;
1227 
1228 	handled = 1;
1229 
1230 	/* If we are resetting, bail */
1231 	if (test_bit(TW_IN_RESET, &tw_dev->flags))
1232 		goto twa_interrupt_bail;
1233 
1234 	/* Check controller for errors */
1235 	if (twa_check_bits(status_reg_value)) {
1236 		if (twa_decode_bits(tw_dev, status_reg_value)) {
1237 			TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1238 			goto twa_interrupt_bail;
1239 		}
1240 	}
1241 
1242 	/* Handle host interrupt */
1243 	if (status_reg_value & TW_STATUS_HOST_INTERRUPT)
1244 		TW_CLEAR_HOST_INTERRUPT(tw_dev);
1245 
1246 	/* Handle attention interrupt */
1247 	if (status_reg_value & TW_STATUS_ATTENTION_INTERRUPT) {
1248 		TW_CLEAR_ATTENTION_INTERRUPT(tw_dev);
1249 		if (!(test_and_set_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags))) {
1250 			twa_get_request_id(tw_dev, &request_id);
1251 
1252 			error = twa_aen_read_queue(tw_dev, request_id);
1253 			if (error) {
1254 				tw_dev->state[request_id] = TW_S_COMPLETED;
1255 				twa_free_request_id(tw_dev, request_id);
1256 				clear_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags);
1257 			}
1258 		}
1259 	}
1260 
1261 	/* Handle command interrupt */
1262 	if (status_reg_value & TW_STATUS_COMMAND_INTERRUPT) {
1263 		TW_MASK_COMMAND_INTERRUPT(tw_dev);
1264 		/* Drain as many pending commands as we can */
1265 		while (tw_dev->pending_request_count > 0) {
1266 			request_id = tw_dev->pending_queue[tw_dev->pending_head];
1267 			if (tw_dev->state[request_id] != TW_S_PENDING) {
1268 				TW_PRINTK(tw_dev->host, TW_DRIVER, 0x19, "Found request id that wasn't pending");
1269 				TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1270 				goto twa_interrupt_bail;
1271 			}
1272 			if (twa_post_command_packet(tw_dev, request_id, 1)==0) {
1273 				tw_dev->pending_head = (tw_dev->pending_head + 1) % TW_Q_LENGTH;
1274 				tw_dev->pending_request_count--;
1275 			} else {
1276 				/* If we get here, we will continue re-posting on the next command interrupt */
1277 				break;
1278 			}
1279 		}
1280 	}
1281 
1282 	/* Handle response interrupt */
1283 	if (status_reg_value & TW_STATUS_RESPONSE_INTERRUPT) {
1284 
1285 		/* Drain the response queue from the board */
1286 		while ((status_reg_value & TW_STATUS_RESPONSE_QUEUE_EMPTY) == 0) {
1287 			/* Complete the response */
1288 			response_que.value = readl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev));
1289 			request_id = TW_RESID_OUT(response_que.response_id);
1290 			full_command_packet = tw_dev->command_packet_virt[request_id];
1291 			error = 0;
1292 			/* Check for command packet errors */
1293 			if (full_command_packet->command.newcommand.status != 0) {
1294 				if (tw_dev->srb[request_id] != NULL) {
1295 					error = twa_fill_sense(tw_dev, request_id, 1, 1);
1296 				} else {
1297 					/* Skip ioctl error prints */
1298 					if (request_id != tw_dev->chrdev_request_id) {
1299 						error = twa_fill_sense(tw_dev, request_id, 0, 1);
1300 					}
1301 				}
1302 			}
1303 
1304 			/* Check for correct state */
1305 			if (tw_dev->state[request_id] != TW_S_POSTED) {
1306 				if (tw_dev->srb[request_id] != NULL) {
1307 					TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1a, "Received a request id that wasn't posted");
1308 					TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1309 					goto twa_interrupt_bail;
1310 				}
1311 			}
1312 
1313 			/* Check for internal command completion */
1314 			if (tw_dev->srb[request_id] == NULL) {
1315 				if (request_id != tw_dev->chrdev_request_id) {
1316 					if (twa_aen_complete(tw_dev, request_id))
1317 						TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1b, "Error completing AEN during attention interrupt");
1318 				} else {
1319 					tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE;
1320 					wake_up(&tw_dev->ioctl_wqueue);
1321 				}
1322 			} else {
1323 				struct scsi_cmnd *cmd;
1324 
1325 				cmd = tw_dev->srb[request_id];
1326 
1327 				twa_scsiop_execute_scsi_complete(tw_dev, request_id);
1328 				/* If no error command was a success */
1329 				if (error == 0) {
1330 					cmd->result = (DID_OK << 16);
1331 				}
1332 
1333 				/* If error, command failed */
1334 				if (error == 1) {
1335 					/* Ask for a host reset */
1336 					cmd->result = (DID_OK << 16) | (CHECK_CONDITION << 1);
1337 				}
1338 
1339 				/* Report residual bytes for single sgl */
1340 				if ((scsi_sg_count(cmd) <= 1) && (full_command_packet->command.newcommand.status == 0)) {
1341 					if (full_command_packet->command.newcommand.sg_list[0].length < scsi_bufflen(tw_dev->srb[request_id]))
1342 						scsi_set_resid(cmd, scsi_bufflen(cmd) - full_command_packet->command.newcommand.sg_list[0].length);
1343 				}
1344 
1345 				/* Now complete the io */
1346 				if (twa_command_mapped(cmd))
1347 					scsi_dma_unmap(cmd);
1348 				cmd->scsi_done(cmd);
1349 				tw_dev->state[request_id] = TW_S_COMPLETED;
1350 				twa_free_request_id(tw_dev, request_id);
1351 				tw_dev->posted_request_count--;
1352 			}
1353 
1354 			/* Check for valid status after each drain */
1355 			status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1356 			if (twa_check_bits(status_reg_value)) {
1357 				if (twa_decode_bits(tw_dev, status_reg_value)) {
1358 					TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1359 					goto twa_interrupt_bail;
1360 				}
1361 			}
1362 		}
1363 	}
1364 
1365 twa_interrupt_bail:
1366 	spin_unlock(tw_dev->host->host_lock);
1367 	return IRQ_RETVAL(handled);
1368 } /* End twa_interrupt() */
1369 
1370 /* This function will load the request id and various sgls for ioctls */
1371 static void twa_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_command_packet, int request_id, dma_addr_t dma_handle, int length)
1372 {
1373 	TW_Command *oldcommand;
1374 	TW_Command_Apache *newcommand;
1375 	TW_SG_Entry *sgl;
1376 	unsigned int pae = 0;
1377 
1378 	if ((sizeof(long) < 8) && (sizeof(dma_addr_t) > 4))
1379 		pae = 1;
1380 
1381 	if (TW_OP_OUT(full_command_packet->command.newcommand.opcode__reserved) == TW_OP_EXECUTE_SCSI) {
1382 		newcommand = &full_command_packet->command.newcommand;
1383 		newcommand->request_id__lunl =
1384 			cpu_to_le16(TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->request_id__lunl), request_id));
1385 		if (length) {
1386 			newcommand->sg_list[0].address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache) - 1);
1387 			newcommand->sg_list[0].length = cpu_to_le32(length);
1388 		}
1389 		newcommand->sgl_entries__lunh =
1390 			cpu_to_le16(TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->sgl_entries__lunh), length ? 1 : 0));
1391 	} else {
1392 		oldcommand = &full_command_packet->command.oldcommand;
1393 		oldcommand->request_id = request_id;
1394 
1395 		if (TW_SGL_OUT(oldcommand->opcode__sgloffset)) {
1396 			/* Load the sg list */
1397 			if (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9690SA)
1398 				sgl = (TW_SG_Entry *)((u32 *)oldcommand+oldcommand->size - (sizeof(TW_SG_Entry)/4) + pae);
1399 			else
1400 				sgl = (TW_SG_Entry *)((u32 *)oldcommand+TW_SGL_OUT(oldcommand->opcode__sgloffset));
1401 			sgl->address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache) - 1);
1402 			sgl->length = cpu_to_le32(length);
1403 
1404 			oldcommand->size += pae;
1405 		}
1406 	}
1407 } /* End twa_load_sgl() */
1408 
1409 /* This function will poll for a response interrupt of a request */
1410 static int twa_poll_response(TW_Device_Extension *tw_dev, int request_id, int seconds)
1411 {
1412 	int retval = 1, found = 0, response_request_id;
1413 	TW_Response_Queue response_queue;
1414 	TW_Command_Full *full_command_packet = tw_dev->command_packet_virt[request_id];
1415 
1416 	if (twa_poll_status_gone(tw_dev, TW_STATUS_RESPONSE_QUEUE_EMPTY, seconds) == 0) {
1417 		response_queue.value = readl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev));
1418 		response_request_id = TW_RESID_OUT(response_queue.response_id);
1419 		if (request_id != response_request_id) {
1420 			TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1e, "Found unexpected request id while polling for response");
1421 			goto out;
1422 		}
1423 		if (TW_OP_OUT(full_command_packet->command.newcommand.opcode__reserved) == TW_OP_EXECUTE_SCSI) {
1424 			if (full_command_packet->command.newcommand.status != 0) {
1425 				/* bad response */
1426 				twa_fill_sense(tw_dev, request_id, 0, 0);
1427 				goto out;
1428 			}
1429 			found = 1;
1430 		} else {
1431 			if (full_command_packet->command.oldcommand.status != 0) {
1432 				/* bad response */
1433 				twa_fill_sense(tw_dev, request_id, 0, 0);
1434 				goto out;
1435 			}
1436 			found = 1;
1437 		}
1438 	}
1439 
1440 	if (found)
1441 		retval = 0;
1442 out:
1443 	return retval;
1444 } /* End twa_poll_response() */
1445 
1446 /* This function will poll the status register for a flag */
1447 static int twa_poll_status(TW_Device_Extension *tw_dev, u32 flag, int seconds)
1448 {
1449 	u32 status_reg_value;
1450 	unsigned long before;
1451 	int retval = 1;
1452 
1453 	status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1454 	before = jiffies;
1455 
1456 	if (twa_check_bits(status_reg_value))
1457 		twa_decode_bits(tw_dev, status_reg_value);
1458 
1459 	while ((status_reg_value & flag) != flag) {
1460 		status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1461 
1462 		if (twa_check_bits(status_reg_value))
1463 			twa_decode_bits(tw_dev, status_reg_value);
1464 
1465 		if (time_after(jiffies, before + HZ * seconds))
1466 			goto out;
1467 
1468 		msleep(50);
1469 	}
1470 	retval = 0;
1471 out:
1472 	return retval;
1473 } /* End twa_poll_status() */
1474 
1475 /* This function will poll the status register for disappearance of a flag */
1476 static int twa_poll_status_gone(TW_Device_Extension *tw_dev, u32 flag, int seconds)
1477 {
1478 	u32 status_reg_value;
1479 	unsigned long before;
1480 	int retval = 1;
1481 
1482 	status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1483 	before = jiffies;
1484 
1485 	if (twa_check_bits(status_reg_value))
1486 		twa_decode_bits(tw_dev, status_reg_value);
1487 
1488 	while ((status_reg_value & flag) != 0) {
1489 		status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1490 		if (twa_check_bits(status_reg_value))
1491 			twa_decode_bits(tw_dev, status_reg_value);
1492 
1493 		if (time_after(jiffies, before + HZ * seconds))
1494 			goto out;
1495 
1496 		msleep(50);
1497 	}
1498 	retval = 0;
1499 out:
1500 	return retval;
1501 } /* End twa_poll_status_gone() */
1502 
1503 /* This function will attempt to post a command packet to the board */
1504 static int twa_post_command_packet(TW_Device_Extension *tw_dev, int request_id, char internal)
1505 {
1506 	u32 status_reg_value;
1507 	dma_addr_t command_que_value;
1508 	int retval = 1;
1509 
1510 	command_que_value = tw_dev->command_packet_phys[request_id];
1511 
1512 	/* For 9650SE write low 4 bytes first */
1513 	if ((tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9650SE) ||
1514 	    (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9690SA)) {
1515 		command_que_value += TW_COMMAND_OFFSET;
1516 		writel((u32)command_que_value, TW_COMMAND_QUEUE_REG_ADDR_LARGE(tw_dev));
1517 	}
1518 
1519 	status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1520 
1521 	if (twa_check_bits(status_reg_value))
1522 		twa_decode_bits(tw_dev, status_reg_value);
1523 
1524 	if (((tw_dev->pending_request_count > 0) && (tw_dev->state[request_id] != TW_S_PENDING)) || (status_reg_value & TW_STATUS_COMMAND_QUEUE_FULL)) {
1525 
1526 		/* Only pend internal driver commands */
1527 		if (!internal) {
1528 			retval = SCSI_MLQUEUE_HOST_BUSY;
1529 			goto out;
1530 		}
1531 
1532 		/* Couldn't post the command packet, so we do it later */
1533 		if (tw_dev->state[request_id] != TW_S_PENDING) {
1534 			tw_dev->state[request_id] = TW_S_PENDING;
1535 			tw_dev->pending_request_count++;
1536 			if (tw_dev->pending_request_count > tw_dev->max_pending_request_count) {
1537 				tw_dev->max_pending_request_count = tw_dev->pending_request_count;
1538 			}
1539 			tw_dev->pending_queue[tw_dev->pending_tail] = request_id;
1540 			tw_dev->pending_tail = (tw_dev->pending_tail + 1) % TW_Q_LENGTH;
1541 		}
1542 		TW_UNMASK_COMMAND_INTERRUPT(tw_dev);
1543 		goto out;
1544 	} else {
1545 		if ((tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9650SE) ||
1546 		    (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9690SA)) {
1547 			/* Now write upper 4 bytes */
1548 			writel((u32)((u64)command_que_value >> 32), TW_COMMAND_QUEUE_REG_ADDR_LARGE(tw_dev) + 0x4);
1549 		} else {
1550 			if (sizeof(dma_addr_t) > 4) {
1551 				command_que_value += TW_COMMAND_OFFSET;
1552 				writel((u32)command_que_value, TW_COMMAND_QUEUE_REG_ADDR(tw_dev));
1553 				writel((u32)((u64)command_que_value >> 32), TW_COMMAND_QUEUE_REG_ADDR(tw_dev) + 0x4);
1554 			} else {
1555 				writel(TW_COMMAND_OFFSET + command_que_value, TW_COMMAND_QUEUE_REG_ADDR(tw_dev));
1556 			}
1557 		}
1558 		tw_dev->state[request_id] = TW_S_POSTED;
1559 		tw_dev->posted_request_count++;
1560 		if (tw_dev->posted_request_count > tw_dev->max_posted_request_count) {
1561 			tw_dev->max_posted_request_count = tw_dev->posted_request_count;
1562 		}
1563 	}
1564 	retval = 0;
1565 out:
1566 	return retval;
1567 } /* End twa_post_command_packet() */
1568 
1569 /* This function will reset a device extension */
1570 static int twa_reset_device_extension(TW_Device_Extension *tw_dev)
1571 {
1572 	int i = 0;
1573 	int retval = 1;
1574 	unsigned long flags = 0;
1575 
1576 	set_bit(TW_IN_RESET, &tw_dev->flags);
1577 	TW_DISABLE_INTERRUPTS(tw_dev);
1578 	TW_MASK_COMMAND_INTERRUPT(tw_dev);
1579 	spin_lock_irqsave(tw_dev->host->host_lock, flags);
1580 
1581 	/* Abort all requests that are in progress */
1582 	for (i = 0; i < TW_Q_LENGTH; i++) {
1583 		if ((tw_dev->state[i] != TW_S_FINISHED) &&
1584 		    (tw_dev->state[i] != TW_S_INITIAL) &&
1585 		    (tw_dev->state[i] != TW_S_COMPLETED)) {
1586 			if (tw_dev->srb[i]) {
1587 				struct scsi_cmnd *cmd = tw_dev->srb[i];
1588 
1589 				cmd->result = (DID_RESET << 16);
1590 				if (twa_command_mapped(cmd))
1591 					scsi_dma_unmap(cmd);
1592 				cmd->scsi_done(cmd);
1593 			}
1594 		}
1595 	}
1596 
1597 	/* Reset queues and counts */
1598 	for (i = 0; i < TW_Q_LENGTH; i++) {
1599 		tw_dev->free_queue[i] = i;
1600 		tw_dev->state[i] = TW_S_INITIAL;
1601 	}
1602 	tw_dev->free_head = TW_Q_START;
1603 	tw_dev->free_tail = TW_Q_START;
1604 	tw_dev->posted_request_count = 0;
1605 	tw_dev->pending_request_count = 0;
1606 	tw_dev->pending_head = TW_Q_START;
1607 	tw_dev->pending_tail = TW_Q_START;
1608 	tw_dev->reset_print = 0;
1609 
1610 	spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
1611 
1612 	if (twa_reset_sequence(tw_dev, 1))
1613 		goto out;
1614 
1615 	TW_ENABLE_AND_CLEAR_INTERRUPTS(tw_dev);
1616 	clear_bit(TW_IN_RESET, &tw_dev->flags);
1617 	tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE;
1618 
1619 	retval = 0;
1620 out:
1621 	return retval;
1622 } /* End twa_reset_device_extension() */
1623 
1624 /* This function will reset a controller */
1625 static int twa_reset_sequence(TW_Device_Extension *tw_dev, int soft_reset)
1626 {
1627 	int tries = 0, retval = 1, flashed = 0, do_soft_reset = soft_reset;
1628 
1629 	while (tries < TW_MAX_RESET_TRIES) {
1630 		if (do_soft_reset) {
1631 			TW_SOFT_RESET(tw_dev);
1632 			/* Clear pchip/response queue on 9550SX */
1633 			if (twa_empty_response_queue_large(tw_dev)) {
1634 				TW_PRINTK(tw_dev->host, TW_DRIVER, 0x36, "Response queue (large) empty failed during reset sequence");
1635 				do_soft_reset = 1;
1636 				tries++;
1637 				continue;
1638 			}
1639 		}
1640 
1641 		/* Make sure controller is in a good state */
1642 		if (twa_poll_status(tw_dev, TW_STATUS_MICROCONTROLLER_READY | (do_soft_reset == 1 ? TW_STATUS_ATTENTION_INTERRUPT : 0), 60)) {
1643 			TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1f, "Microcontroller not ready during reset sequence");
1644 			do_soft_reset = 1;
1645 			tries++;
1646 			continue;
1647 		}
1648 
1649 		/* Empty response queue */
1650 		if (twa_empty_response_queue(tw_dev)) {
1651 			TW_PRINTK(tw_dev->host, TW_DRIVER, 0x20, "Response queue empty failed during reset sequence");
1652 			do_soft_reset = 1;
1653 			tries++;
1654 			continue;
1655 		}
1656 
1657 		flashed = 0;
1658 
1659 		/* Check for compatibility/flash */
1660 		if (twa_check_srl(tw_dev, &flashed)) {
1661 			TW_PRINTK(tw_dev->host, TW_DRIVER, 0x21, "Compatibility check failed during reset sequence");
1662 			do_soft_reset = 1;
1663 			tries++;
1664 			continue;
1665 		} else {
1666 			if (flashed) {
1667 				tries++;
1668 				continue;
1669 			}
1670 		}
1671 
1672 		/* Drain the AEN queue */
1673 		if (twa_aen_drain_queue(tw_dev, soft_reset)) {
1674 			TW_PRINTK(tw_dev->host, TW_DRIVER, 0x22, "AEN drain failed during reset sequence");
1675 			do_soft_reset = 1;
1676 			tries++;
1677 			continue;
1678 		}
1679 
1680 		/* If we got here, controller is in a good state */
1681 		retval = 0;
1682 		goto out;
1683 	}
1684 out:
1685 	return retval;
1686 } /* End twa_reset_sequence() */
1687 
1688 /* This funciton returns unit geometry in cylinders/heads/sectors */
1689 static int twa_scsi_biosparam(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int geom[])
1690 {
1691 	int heads, sectors, cylinders;
1692 	TW_Device_Extension *tw_dev;
1693 
1694 	tw_dev = (TW_Device_Extension *)sdev->host->hostdata;
1695 
1696 	if (capacity >= 0x200000) {
1697 		heads = 255;
1698 		sectors = 63;
1699 		cylinders = sector_div(capacity, heads * sectors);
1700 	} else {
1701 		heads = 64;
1702 		sectors = 32;
1703 		cylinders = sector_div(capacity, heads * sectors);
1704 	}
1705 
1706 	geom[0] = heads;
1707 	geom[1] = sectors;
1708 	geom[2] = cylinders;
1709 
1710 	return 0;
1711 } /* End twa_scsi_biosparam() */
1712 
1713 /* This is the new scsi eh reset function */
1714 static int twa_scsi_eh_reset(struct scsi_cmnd *SCpnt)
1715 {
1716 	TW_Device_Extension *tw_dev = NULL;
1717 	int retval = FAILED;
1718 
1719 	tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata;
1720 
1721 	tw_dev->num_resets++;
1722 
1723 	sdev_printk(KERN_WARNING, SCpnt->device,
1724 		"WARNING: (0x%02X:0x%04X): Command (0x%x) timed out, resetting card.\n",
1725 		TW_DRIVER, 0x2c, SCpnt->cmnd[0]);
1726 
1727 	/* Make sure we are not issuing an ioctl or resetting from ioctl */
1728 	mutex_lock(&tw_dev->ioctl_lock);
1729 
1730 	/* Now reset the card and some of the device extension data */
1731 	if (twa_reset_device_extension(tw_dev)) {
1732 		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2b, "Controller reset failed during scsi host reset");
1733 		goto out;
1734 	}
1735 
1736 	retval = SUCCESS;
1737 out:
1738 	mutex_unlock(&tw_dev->ioctl_lock);
1739 	return retval;
1740 } /* End twa_scsi_eh_reset() */
1741 
1742 /* This is the main scsi queue function to handle scsi opcodes */
1743 static int twa_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
1744 {
1745 	int request_id, retval;
1746 	TW_Device_Extension *tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata;
1747 
1748 	/* If we are resetting due to timed out ioctl, report as busy */
1749 	if (test_bit(TW_IN_RESET, &tw_dev->flags)) {
1750 		retval = SCSI_MLQUEUE_HOST_BUSY;
1751 		goto out;
1752 	}
1753 
1754 	/* Check if this FW supports luns */
1755 	if ((SCpnt->device->lun != 0) && (tw_dev->tw_compat_info.working_srl < TW_FW_SRL_LUNS_SUPPORTED)) {
1756 		SCpnt->result = (DID_BAD_TARGET << 16);
1757 		done(SCpnt);
1758 		retval = 0;
1759 		goto out;
1760 	}
1761 
1762 	/* Save done function into scsi_cmnd struct */
1763 	SCpnt->scsi_done = done;
1764 
1765 	/* Get a free request id */
1766 	twa_get_request_id(tw_dev, &request_id);
1767 
1768 	/* Save the scsi command for use by the ISR */
1769 	tw_dev->srb[request_id] = SCpnt;
1770 
1771 	retval = twa_scsiop_execute_scsi(tw_dev, request_id, NULL, 0, NULL);
1772 	switch (retval) {
1773 	case SCSI_MLQUEUE_HOST_BUSY:
1774 		if (twa_command_mapped(SCpnt))
1775 			scsi_dma_unmap(SCpnt);
1776 		twa_free_request_id(tw_dev, request_id);
1777 		break;
1778 	case 1:
1779 		SCpnt->result = (DID_ERROR << 16);
1780 		if (twa_command_mapped(SCpnt))
1781 			scsi_dma_unmap(SCpnt);
1782 		done(SCpnt);
1783 		tw_dev->state[request_id] = TW_S_COMPLETED;
1784 		twa_free_request_id(tw_dev, request_id);
1785 		retval = 0;
1786 	}
1787 out:
1788 	return retval;
1789 } /* End twa_scsi_queue() */
1790 
1791 static DEF_SCSI_QCMD(twa_scsi_queue)
1792 
1793 /* This function hands scsi cdb's to the firmware */
1794 static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry *sglistarg)
1795 {
1796 	TW_Command_Full *full_command_packet;
1797 	TW_Command_Apache *command_packet;
1798 	u32 num_sectors = 0x0;
1799 	int i, sg_count;
1800 	struct scsi_cmnd *srb = NULL;
1801 	struct scatterlist *sglist = NULL, *sg;
1802 	int retval = 1;
1803 
1804 	if (tw_dev->srb[request_id]) {
1805 		srb = tw_dev->srb[request_id];
1806 		if (scsi_sglist(srb))
1807 			sglist = scsi_sglist(srb);
1808 	}
1809 
1810 	/* Initialize command packet */
1811 	full_command_packet = tw_dev->command_packet_virt[request_id];
1812 	full_command_packet->header.header_desc.size_header = 128;
1813 	full_command_packet->header.status_block.error = 0;
1814 	full_command_packet->header.status_block.severity__reserved = 0;
1815 
1816 	command_packet = &full_command_packet->command.newcommand;
1817 	command_packet->status = 0;
1818 	command_packet->opcode__reserved = TW_OPRES_IN(0, TW_OP_EXECUTE_SCSI);
1819 
1820 	/* We forced 16 byte cdb use earlier */
1821 	if (!cdb)
1822 		memcpy(command_packet->cdb, srb->cmnd, TW_MAX_CDB_LEN);
1823 	else
1824 		memcpy(command_packet->cdb, cdb, TW_MAX_CDB_LEN);
1825 
1826 	if (srb) {
1827 		command_packet->unit = srb->device->id;
1828 		command_packet->request_id__lunl =
1829 			cpu_to_le16(TW_REQ_LUN_IN(srb->device->lun, request_id));
1830 	} else {
1831 		command_packet->request_id__lunl =
1832 			cpu_to_le16(TW_REQ_LUN_IN(0, request_id));
1833 		command_packet->unit = 0;
1834 	}
1835 
1836 	command_packet->sgl_offset = 16;
1837 
1838 	if (!sglistarg) {
1839 		/* Map sglist from scsi layer to cmd packet */
1840 
1841 		if (scsi_sg_count(srb)) {
1842 			if (!twa_command_mapped(srb)) {
1843 				if (srb->sc_data_direction == DMA_TO_DEVICE ||
1844 				    srb->sc_data_direction == DMA_BIDIRECTIONAL)
1845 					scsi_sg_copy_to_buffer(srb,
1846 							       tw_dev->generic_buffer_virt[request_id],
1847 							       TW_SECTOR_SIZE);
1848 				command_packet->sg_list[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
1849 				command_packet->sg_list[0].length = cpu_to_le32(TW_MIN_SGL_LENGTH);
1850 			} else {
1851 				sg_count = scsi_dma_map(srb);
1852 				if (sg_count < 0)
1853 					goto out;
1854 
1855 				scsi_for_each_sg(srb, sg, sg_count, i) {
1856 					command_packet->sg_list[i].address = TW_CPU_TO_SGL(sg_dma_address(sg));
1857 					command_packet->sg_list[i].length = cpu_to_le32(sg_dma_len(sg));
1858 					if (command_packet->sg_list[i].address & TW_CPU_TO_SGL(TW_ALIGNMENT_9000_SGL)) {
1859 						TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2e, "Found unaligned sgl address during execute scsi");
1860 						goto out;
1861 					}
1862 				}
1863 			}
1864 			command_packet->sgl_entries__lunh = cpu_to_le16(TW_REQ_LUN_IN((srb->device->lun >> 4), scsi_sg_count(tw_dev->srb[request_id])));
1865 		}
1866 	} else {
1867 		/* Internal cdb post */
1868 		for (i = 0; i < use_sg; i++) {
1869 			command_packet->sg_list[i].address = TW_CPU_TO_SGL(sglistarg[i].address);
1870 			command_packet->sg_list[i].length = cpu_to_le32(sglistarg[i].length);
1871 			if (command_packet->sg_list[i].address & TW_CPU_TO_SGL(TW_ALIGNMENT_9000_SGL)) {
1872 				TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2f, "Found unaligned sgl address during internal post");
1873 				goto out;
1874 			}
1875 		}
1876 		command_packet->sgl_entries__lunh = cpu_to_le16(TW_REQ_LUN_IN(0, use_sg));
1877 	}
1878 
1879 	if (srb) {
1880 		if (srb->cmnd[0] == READ_6 || srb->cmnd[0] == WRITE_6)
1881 			num_sectors = (u32)srb->cmnd[4];
1882 
1883 		if (srb->cmnd[0] == READ_10 || srb->cmnd[0] == WRITE_10)
1884 			num_sectors = (u32)srb->cmnd[8] | ((u32)srb->cmnd[7] << 8);
1885 	}
1886 
1887 	/* Update sector statistic */
1888 	tw_dev->sector_count = num_sectors;
1889 	if (tw_dev->sector_count > tw_dev->max_sector_count)
1890 		tw_dev->max_sector_count = tw_dev->sector_count;
1891 
1892 	/* Update SG statistics */
1893 	if (srb) {
1894 		tw_dev->sgl_entries = scsi_sg_count(tw_dev->srb[request_id]);
1895 		if (tw_dev->sgl_entries > tw_dev->max_sgl_entries)
1896 			tw_dev->max_sgl_entries = tw_dev->sgl_entries;
1897 	}
1898 
1899 	/* Now post the command to the board */
1900 	if (srb) {
1901 		retval = twa_post_command_packet(tw_dev, request_id, 0);
1902 	} else {
1903 		twa_post_command_packet(tw_dev, request_id, 1);
1904 		retval = 0;
1905 	}
1906 out:
1907 	return retval;
1908 } /* End twa_scsiop_execute_scsi() */
1909 
1910 /* This function completes an execute scsi operation */
1911 static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int request_id)
1912 {
1913 	struct scsi_cmnd *cmd = tw_dev->srb[request_id];
1914 
1915 	if (!twa_command_mapped(cmd) &&
1916 	    (cmd->sc_data_direction == DMA_FROM_DEVICE ||
1917 	     cmd->sc_data_direction == DMA_BIDIRECTIONAL)) {
1918 		if (scsi_sg_count(cmd) == 1) {
1919 			void *buf = tw_dev->generic_buffer_virt[request_id];
1920 
1921 			scsi_sg_copy_from_buffer(cmd, buf, TW_SECTOR_SIZE);
1922 		}
1923 	}
1924 } /* End twa_scsiop_execute_scsi_complete() */
1925 
1926 /* This function tells the controller to shut down */
1927 static void __twa_shutdown(TW_Device_Extension *tw_dev)
1928 {
1929 	/* Disable interrupts */
1930 	TW_DISABLE_INTERRUPTS(tw_dev);
1931 
1932 	/* Free up the IRQ */
1933 	free_irq(tw_dev->tw_pci_dev->irq, tw_dev);
1934 
1935 	printk(KERN_WARNING "3w-9xxx: Shutting down host %d.\n", tw_dev->host->host_no);
1936 
1937 	/* Tell the card we are shutting down */
1938 	if (twa_initconnection(tw_dev, 1, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL)) {
1939 		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x31, "Connection shutdown failed");
1940 	} else {
1941 		printk(KERN_WARNING "3w-9xxx: Shutdown complete.\n");
1942 	}
1943 
1944 	/* Clear all interrupts just before exit */
1945 	TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1946 } /* End __twa_shutdown() */
1947 
1948 /* Wrapper for __twa_shutdown */
1949 static void twa_shutdown(struct pci_dev *pdev)
1950 {
1951 	struct Scsi_Host *host = pci_get_drvdata(pdev);
1952 	TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
1953 
1954 	__twa_shutdown(tw_dev);
1955 } /* End twa_shutdown() */
1956 
1957 /* This function will look up a string */
1958 static char *twa_string_lookup(twa_message_type *table, unsigned int code)
1959 {
1960 	int index;
1961 
1962 	for (index = 0; ((code != table[index].code) &&
1963 		      (table[index].text != (char *)0)); index++);
1964 	return(table[index].text);
1965 } /* End twa_string_lookup() */
1966 
1967 /* This function gets called when a disk is coming on-line */
1968 static int twa_slave_configure(struct scsi_device *sdev)
1969 {
1970 	/* Force 60 second timeout */
1971 	blk_queue_rq_timeout(sdev->request_queue, 60 * HZ);
1972 
1973 	return 0;
1974 } /* End twa_slave_configure() */
1975 
1976 /* scsi_host_template initializer */
1977 static struct scsi_host_template driver_template = {
1978 	.module			= THIS_MODULE,
1979 	.name			= "3ware 9000 Storage Controller",
1980 	.queuecommand		= twa_scsi_queue,
1981 	.eh_host_reset_handler	= twa_scsi_eh_reset,
1982 	.bios_param		= twa_scsi_biosparam,
1983 	.change_queue_depth	= scsi_change_queue_depth,
1984 	.can_queue		= TW_Q_LENGTH-2,
1985 	.slave_configure	= twa_slave_configure,
1986 	.this_id		= -1,
1987 	.sg_tablesize		= TW_APACHE_MAX_SGL_LENGTH,
1988 	.max_sectors		= TW_MAX_SECTORS,
1989 	.cmd_per_lun		= TW_MAX_CMDS_PER_LUN,
1990 	.use_clustering		= ENABLE_CLUSTERING,
1991 	.shost_attrs		= twa_host_attrs,
1992 	.emulated		= 1,
1993 	.no_write_same		= 1,
1994 };
1995 
1996 /* This function will probe and initialize a card */
1997 static int twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
1998 {
1999 	struct Scsi_Host *host = NULL;
2000 	TW_Device_Extension *tw_dev;
2001 	unsigned long mem_addr, mem_len;
2002 	int retval = -ENODEV;
2003 
2004 	retval = pci_enable_device(pdev);
2005 	if (retval) {
2006 		TW_PRINTK(host, TW_DRIVER, 0x34, "Failed to enable pci device");
2007 		goto out_disable_device;
2008 	}
2009 
2010 	pci_set_master(pdev);
2011 	pci_try_set_mwi(pdev);
2012 
2013 	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
2014 	    || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
2015 		if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
2016 		    || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
2017 			TW_PRINTK(host, TW_DRIVER, 0x23, "Failed to set dma mask");
2018 			retval = -ENODEV;
2019 			goto out_disable_device;
2020 		}
2021 
2022 	host = scsi_host_alloc(&driver_template, sizeof(TW_Device_Extension));
2023 	if (!host) {
2024 		TW_PRINTK(host, TW_DRIVER, 0x24, "Failed to allocate memory for device extension");
2025 		retval = -ENOMEM;
2026 		goto out_disable_device;
2027 	}
2028 	tw_dev = (TW_Device_Extension *)host->hostdata;
2029 
2030 	/* Save values to device extension */
2031 	tw_dev->host = host;
2032 	tw_dev->tw_pci_dev = pdev;
2033 
2034 	if (twa_initialize_device_extension(tw_dev)) {
2035 		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x25, "Failed to initialize device extension");
2036 		goto out_free_device_extension;
2037 	}
2038 
2039 	/* Request IO regions */
2040 	retval = pci_request_regions(pdev, "3w-9xxx");
2041 	if (retval) {
2042 		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x26, "Failed to get mem region");
2043 		goto out_free_device_extension;
2044 	}
2045 
2046 	if (pdev->device == PCI_DEVICE_ID_3WARE_9000) {
2047 		mem_addr = pci_resource_start(pdev, 1);
2048 		mem_len = pci_resource_len(pdev, 1);
2049 	} else {
2050 		mem_addr = pci_resource_start(pdev, 2);
2051 		mem_len = pci_resource_len(pdev, 2);
2052 	}
2053 
2054 	/* Save base address */
2055 	tw_dev->base_addr = ioremap(mem_addr, mem_len);
2056 	if (!tw_dev->base_addr) {
2057 		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x35, "Failed to ioremap");
2058 		goto out_release_mem_region;
2059 	}
2060 
2061 	/* Disable interrupts on the card */
2062 	TW_DISABLE_INTERRUPTS(tw_dev);
2063 
2064 	/* Initialize the card */
2065 	if (twa_reset_sequence(tw_dev, 0))
2066 		goto out_iounmap;
2067 
2068 	/* Set host specific parameters */
2069 	if ((pdev->device == PCI_DEVICE_ID_3WARE_9650SE) ||
2070 	    (pdev->device == PCI_DEVICE_ID_3WARE_9690SA))
2071 		host->max_id = TW_MAX_UNITS_9650SE;
2072 	else
2073 		host->max_id = TW_MAX_UNITS;
2074 
2075 	host->max_cmd_len = TW_MAX_CDB_LEN;
2076 
2077 	/* Channels aren't supported by adapter */
2078 	host->max_lun = TW_MAX_LUNS(tw_dev->tw_compat_info.working_srl);
2079 	host->max_channel = 0;
2080 
2081 	/* Register the card with the kernel SCSI layer */
2082 	retval = scsi_add_host(host, &pdev->dev);
2083 	if (retval) {
2084 		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x27, "scsi add host failed");
2085 		goto out_iounmap;
2086 	}
2087 
2088 	pci_set_drvdata(pdev, host);
2089 
2090 	printk(KERN_WARNING "3w-9xxx: scsi%d: Found a 3ware 9000 Storage Controller at 0x%lx, IRQ: %d.\n",
2091 	       host->host_no, mem_addr, pdev->irq);
2092 	printk(KERN_WARNING "3w-9xxx: scsi%d: Firmware %s, BIOS %s, Ports: %d.\n",
2093 	       host->host_no,
2094 	       (char *)twa_get_param(tw_dev, 0, TW_VERSION_TABLE,
2095 				     TW_PARAM_FWVER, TW_PARAM_FWVER_LENGTH),
2096 	       (char *)twa_get_param(tw_dev, 1, TW_VERSION_TABLE,
2097 				     TW_PARAM_BIOSVER, TW_PARAM_BIOSVER_LENGTH),
2098 	       le32_to_cpu(*(int *)twa_get_param(tw_dev, 2, TW_INFORMATION_TABLE,
2099 				     TW_PARAM_PORTCOUNT, TW_PARAM_PORTCOUNT_LENGTH)));
2100 
2101 	/* Try to enable MSI */
2102 	if (use_msi && (pdev->device != PCI_DEVICE_ID_3WARE_9000) &&
2103 	    !pci_enable_msi(pdev))
2104 		set_bit(TW_USING_MSI, &tw_dev->flags);
2105 
2106 	/* Now setup the interrupt handler */
2107 	retval = request_irq(pdev->irq, twa_interrupt, IRQF_SHARED, "3w-9xxx", tw_dev);
2108 	if (retval) {
2109 		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x30, "Error requesting IRQ");
2110 		goto out_remove_host;
2111 	}
2112 
2113 	twa_device_extension_list[twa_device_extension_count] = tw_dev;
2114 	twa_device_extension_count++;
2115 
2116 	/* Re-enable interrupts on the card */
2117 	TW_ENABLE_AND_CLEAR_INTERRUPTS(tw_dev);
2118 
2119 	/* Finally, scan the host */
2120 	scsi_scan_host(host);
2121 
2122 	if (twa_major == -1) {
2123 		if ((twa_major = register_chrdev (0, "twa", &twa_fops)) < 0)
2124 			TW_PRINTK(host, TW_DRIVER, 0x29, "Failed to register character device");
2125 	}
2126 	return 0;
2127 
2128 out_remove_host:
2129 	if (test_bit(TW_USING_MSI, &tw_dev->flags))
2130 		pci_disable_msi(pdev);
2131 	scsi_remove_host(host);
2132 out_iounmap:
2133 	iounmap(tw_dev->base_addr);
2134 out_release_mem_region:
2135 	pci_release_regions(pdev);
2136 out_free_device_extension:
2137 	twa_free_device_extension(tw_dev);
2138 	scsi_host_put(host);
2139 out_disable_device:
2140 	pci_disable_device(pdev);
2141 
2142 	return retval;
2143 } /* End twa_probe() */
2144 
2145 /* This function is called to remove a device */
2146 static void twa_remove(struct pci_dev *pdev)
2147 {
2148 	struct Scsi_Host *host = pci_get_drvdata(pdev);
2149 	TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
2150 
2151 	scsi_remove_host(tw_dev->host);
2152 
2153 	/* Unregister character device */
2154 	if (twa_major >= 0) {
2155 		unregister_chrdev(twa_major, "twa");
2156 		twa_major = -1;
2157 	}
2158 
2159 	/* Shutdown the card */
2160 	__twa_shutdown(tw_dev);
2161 
2162 	/* Disable MSI if enabled */
2163 	if (test_bit(TW_USING_MSI, &tw_dev->flags))
2164 		pci_disable_msi(pdev);
2165 
2166 	/* Free IO remapping */
2167 	iounmap(tw_dev->base_addr);
2168 
2169 	/* Free up the mem region */
2170 	pci_release_regions(pdev);
2171 
2172 	/* Free up device extension resources */
2173 	twa_free_device_extension(tw_dev);
2174 
2175 	scsi_host_put(tw_dev->host);
2176 	pci_disable_device(pdev);
2177 	twa_device_extension_count--;
2178 } /* End twa_remove() */
2179 
2180 #ifdef CONFIG_PM
2181 /* This function is called on PCI suspend */
2182 static int twa_suspend(struct pci_dev *pdev, pm_message_t state)
2183 {
2184 	struct Scsi_Host *host = pci_get_drvdata(pdev);
2185 	TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
2186 
2187 	printk(KERN_WARNING "3w-9xxx: Suspending host %d.\n", tw_dev->host->host_no);
2188 
2189 	TW_DISABLE_INTERRUPTS(tw_dev);
2190 	free_irq(tw_dev->tw_pci_dev->irq, tw_dev);
2191 
2192 	if (test_bit(TW_USING_MSI, &tw_dev->flags))
2193 		pci_disable_msi(pdev);
2194 
2195 	/* Tell the card we are shutting down */
2196 	if (twa_initconnection(tw_dev, 1, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL)) {
2197 		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x38, "Connection shutdown failed during suspend");
2198 	} else {
2199 		printk(KERN_WARNING "3w-9xxx: Suspend complete.\n");
2200 	}
2201 	TW_CLEAR_ALL_INTERRUPTS(tw_dev);
2202 
2203 	pci_save_state(pdev);
2204 	pci_disable_device(pdev);
2205 	pci_set_power_state(pdev, pci_choose_state(pdev, state));
2206 
2207 	return 0;
2208 } /* End twa_suspend() */
2209 
2210 /* This function is called on PCI resume */
2211 static int twa_resume(struct pci_dev *pdev)
2212 {
2213 	int retval = 0;
2214 	struct Scsi_Host *host = pci_get_drvdata(pdev);
2215 	TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
2216 
2217 	printk(KERN_WARNING "3w-9xxx: Resuming host %d.\n", tw_dev->host->host_no);
2218 	pci_set_power_state(pdev, PCI_D0);
2219 	pci_enable_wake(pdev, PCI_D0, 0);
2220 	pci_restore_state(pdev);
2221 
2222 	retval = pci_enable_device(pdev);
2223 	if (retval) {
2224 		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x39, "Enable device failed during resume");
2225 		return retval;
2226 	}
2227 
2228 	pci_set_master(pdev);
2229 	pci_try_set_mwi(pdev);
2230 
2231 	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
2232 	    || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
2233 		if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
2234 		    || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
2235 			TW_PRINTK(host, TW_DRIVER, 0x40, "Failed to set dma mask during resume");
2236 			retval = -ENODEV;
2237 			goto out_disable_device;
2238 		}
2239 
2240 	/* Initialize the card */
2241 	if (twa_reset_sequence(tw_dev, 0)) {
2242 		retval = -ENODEV;
2243 		goto out_disable_device;
2244 	}
2245 
2246 	/* Now setup the interrupt handler */
2247 	retval = request_irq(pdev->irq, twa_interrupt, IRQF_SHARED, "3w-9xxx", tw_dev);
2248 	if (retval) {
2249 		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x42, "Error requesting IRQ during resume");
2250 		retval = -ENODEV;
2251 		goto out_disable_device;
2252 	}
2253 
2254 	/* Now enable MSI if enabled */
2255 	if (test_bit(TW_USING_MSI, &tw_dev->flags))
2256 		pci_enable_msi(pdev);
2257 
2258 	/* Re-enable interrupts on the card */
2259 	TW_ENABLE_AND_CLEAR_INTERRUPTS(tw_dev);
2260 
2261 	printk(KERN_WARNING "3w-9xxx: Resume complete.\n");
2262 	return 0;
2263 
2264 out_disable_device:
2265 	scsi_remove_host(host);
2266 	pci_disable_device(pdev);
2267 
2268 	return retval;
2269 } /* End twa_resume() */
2270 #endif
2271 
2272 /* PCI Devices supported by this driver */
2273 static struct pci_device_id twa_pci_tbl[] = {
2274 	{ PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9000,
2275 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
2276 	{ PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9550SX,
2277 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
2278 	{ PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9650SE,
2279 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
2280 	{ PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9690SA,
2281 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
2282 	{ }
2283 };
2284 MODULE_DEVICE_TABLE(pci, twa_pci_tbl);
2285 
2286 /* pci_driver initializer */
2287 static struct pci_driver twa_driver = {
2288 	.name		= "3w-9xxx",
2289 	.id_table	= twa_pci_tbl,
2290 	.probe		= twa_probe,
2291 	.remove		= twa_remove,
2292 #ifdef CONFIG_PM
2293 	.suspend	= twa_suspend,
2294 	.resume		= twa_resume,
2295 #endif
2296 	.shutdown	= twa_shutdown
2297 };
2298 
2299 /* This function is called on driver initialization */
2300 static int __init twa_init(void)
2301 {
2302 	printk(KERN_WARNING "3ware 9000 Storage Controller device driver for Linux v%s.\n", TW_DRIVER_VERSION);
2303 
2304 	return pci_register_driver(&twa_driver);
2305 } /* End twa_init() */
2306 
2307 /* This function is called on driver exit */
2308 static void __exit twa_exit(void)
2309 {
2310 	pci_unregister_driver(&twa_driver);
2311 } /* End twa_exit() */
2312 
2313 module_init(twa_init);
2314 module_exit(twa_exit);
2315 
2316