xref: /openbmc/linux/drivers/scsi/ipr.c (revision d5cb9783536a41df9f9cba5b0a1d78047ed787f7)
1 /*
2  * ipr.c -- driver for IBM Power Linux RAID adapters
3  *
4  * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
5  *
6  * Copyright (C) 2003, 2004 IBM Corporation
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
21  *
22  */
23 
24 /*
25  * Notes:
26  *
27  * This driver is used to control the following SCSI adapters:
28  *
29  * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
30  *
31  * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32  *              PCI-X Dual Channel Ultra 320 SCSI Adapter
33  *              PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34  *              Embedded SCSI adapter on p615 and p655 systems
35  *
36  * Supported Hardware Features:
37  *	- Ultra 320 SCSI controller
38  *	- PCI-X host interface
39  *	- Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40  *	- Non-Volatile Write Cache
41  *	- Supports attachment of non-RAID disks, tape, and optical devices
42  *	- RAID Levels 0, 5, 10
43  *	- Hot spare
44  *	- Background Parity Checking
45  *	- Background Data Scrubbing
46  *	- Ability to increase the capacity of an existing RAID 5 disk array
47  *		by adding disks
48  *
49  * Driver Features:
50  *	- Tagged command queuing
51  *	- Adapter microcode download
52  *	- PCI hot plug
53  *	- SCSI device hot plug
54  *
55  */
56 
57 #include <linux/config.h>
58 #include <linux/fs.h>
59 #include <linux/init.h>
60 #include <linux/types.h>
61 #include <linux/errno.h>
62 #include <linux/kernel.h>
63 #include <linux/ioport.h>
64 #include <linux/delay.h>
65 #include <linux/pci.h>
66 #include <linux/wait.h>
67 #include <linux/spinlock.h>
68 #include <linux/sched.h>
69 #include <linux/interrupt.h>
70 #include <linux/blkdev.h>
71 #include <linux/firmware.h>
72 #include <linux/module.h>
73 #include <linux/moduleparam.h>
74 #include <asm/io.h>
75 #include <asm/irq.h>
76 #include <asm/processor.h>
77 #include <scsi/scsi.h>
78 #include <scsi/scsi_host.h>
79 #include <scsi/scsi_tcq.h>
80 #include <scsi/scsi_eh.h>
81 #include <scsi/scsi_cmnd.h>
82 #include <scsi/scsi_request.h>
83 #include "ipr.h"
84 
85 /*
86  *   Global Data
87  */
88 static struct list_head ipr_ioa_head = LIST_HEAD_INIT(ipr_ioa_head);
89 static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
90 static unsigned int ipr_max_speed = 1;
91 static int ipr_testmode = 0;
92 static unsigned int ipr_fastfail = 0;
93 static unsigned int ipr_transop_timeout = IPR_OPERATIONAL_TIMEOUT;
94 static DEFINE_SPINLOCK(ipr_driver_lock);
95 
96 /* This table describes the differences between DMA controller chips */
97 static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
98 	{ /* Gemstone and Citrine */
99 		.mailbox = 0x0042C,
100 		.cache_line_size = 0x20,
101 		{
102 			.set_interrupt_mask_reg = 0x0022C,
103 			.clr_interrupt_mask_reg = 0x00230,
104 			.sense_interrupt_mask_reg = 0x0022C,
105 			.clr_interrupt_reg = 0x00228,
106 			.sense_interrupt_reg = 0x00224,
107 			.ioarrin_reg = 0x00404,
108 			.sense_uproc_interrupt_reg = 0x00214,
109 			.set_uproc_interrupt_reg = 0x00214,
110 			.clr_uproc_interrupt_reg = 0x00218
111 		}
112 	},
113 	{ /* Snipe and Scamp */
114 		.mailbox = 0x0052C,
115 		.cache_line_size = 0x20,
116 		{
117 			.set_interrupt_mask_reg = 0x00288,
118 			.clr_interrupt_mask_reg = 0x0028C,
119 			.sense_interrupt_mask_reg = 0x00288,
120 			.clr_interrupt_reg = 0x00284,
121 			.sense_interrupt_reg = 0x00280,
122 			.ioarrin_reg = 0x00504,
123 			.sense_uproc_interrupt_reg = 0x00290,
124 			.set_uproc_interrupt_reg = 0x00290,
125 			.clr_uproc_interrupt_reg = 0x00294
126 		}
127 	},
128 };
129 
130 static const struct ipr_chip_t ipr_chip[] = {
131 	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, &ipr_chip_cfg[0] },
132 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, &ipr_chip_cfg[0] },
133 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, &ipr_chip_cfg[1] },
134 	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, &ipr_chip_cfg[1] }
135 };
136 
137 static int ipr_max_bus_speeds [] = {
138 	IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
139 };
140 
141 MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
142 MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
143 module_param_named(max_speed, ipr_max_speed, uint, 0);
144 MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
145 module_param_named(log_level, ipr_log_level, uint, 0);
146 MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
147 module_param_named(testmode, ipr_testmode, int, 0);
148 MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
149 module_param_named(fastfail, ipr_fastfail, int, 0);
150 MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
151 module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
152 MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
153 MODULE_LICENSE("GPL");
154 MODULE_VERSION(IPR_DRIVER_VERSION);
155 
156 static const char *ipr_gpdd_dev_end_states[] = {
157 	"Command complete",
158 	"Terminated by host",
159 	"Terminated by device reset",
160 	"Terminated by bus reset",
161 	"Unknown",
162 	"Command not started"
163 };
164 
165 static const char *ipr_gpdd_dev_bus_phases[] = {
166 	"Bus free",
167 	"Arbitration",
168 	"Selection",
169 	"Message out",
170 	"Command",
171 	"Message in",
172 	"Data out",
173 	"Data in",
174 	"Status",
175 	"Reselection",
176 	"Unknown"
177 };
178 
179 /*  A constant array of IOASCs/URCs/Error Messages */
180 static const
181 struct ipr_error_table_t ipr_error_table[] = {
182 	{0x00000000, 1, 1,
183 	"8155: An unknown error was received"},
184 	{0x00330000, 0, 0,
185 	"Soft underlength error"},
186 	{0x005A0000, 0, 0,
187 	"Command to be cancelled not found"},
188 	{0x00808000, 0, 0,
189 	"Qualified success"},
190 	{0x01080000, 1, 1,
191 	"FFFE: Soft device bus error recovered by the IOA"},
192 	{0x01170600, 0, 1,
193 	"FFF9: Device sector reassign successful"},
194 	{0x01170900, 0, 1,
195 	"FFF7: Media error recovered by device rewrite procedures"},
196 	{0x01180200, 0, 1,
197 	"7001: IOA sector reassignment successful"},
198 	{0x01180500, 0, 1,
199 	"FFF9: Soft media error. Sector reassignment recommended"},
200 	{0x01180600, 0, 1,
201 	"FFF7: Media error recovered by IOA rewrite procedures"},
202 	{0x01418000, 0, 1,
203 	"FF3D: Soft PCI bus error recovered by the IOA"},
204 	{0x01440000, 1, 1,
205 	"FFF6: Device hardware error recovered by the IOA"},
206 	{0x01448100, 0, 1,
207 	"FFF6: Device hardware error recovered by the device"},
208 	{0x01448200, 1, 1,
209 	"FF3D: Soft IOA error recovered by the IOA"},
210 	{0x01448300, 0, 1,
211 	"FFFA: Undefined device response recovered by the IOA"},
212 	{0x014A0000, 1, 1,
213 	"FFF6: Device bus error, message or command phase"},
214 	{0x015D0000, 0, 1,
215 	"FFF6: Failure prediction threshold exceeded"},
216 	{0x015D9200, 0, 1,
217 	"8009: Impending cache battery pack failure"},
218 	{0x02040400, 0, 0,
219 	"34FF: Disk device format in progress"},
220 	{0x023F0000, 0, 0,
221 	"Synchronization required"},
222 	{0x024E0000, 0, 0,
223 	"No ready, IOA shutdown"},
224 	{0x025A0000, 0, 0,
225 	"Not ready, IOA has been shutdown"},
226 	{0x02670100, 0, 1,
227 	"3020: Storage subsystem configuration error"},
228 	{0x03110B00, 0, 0,
229 	"FFF5: Medium error, data unreadable, recommend reassign"},
230 	{0x03110C00, 0, 0,
231 	"7000: Medium error, data unreadable, do not reassign"},
232 	{0x03310000, 0, 1,
233 	"FFF3: Disk media format bad"},
234 	{0x04050000, 0, 1,
235 	"3002: Addressed device failed to respond to selection"},
236 	{0x04080000, 1, 1,
237 	"3100: Device bus error"},
238 	{0x04080100, 0, 1,
239 	"3109: IOA timed out a device command"},
240 	{0x04088000, 0, 0,
241 	"3120: SCSI bus is not operational"},
242 	{0x04118000, 0, 1,
243 	"9000: IOA reserved area data check"},
244 	{0x04118100, 0, 1,
245 	"9001: IOA reserved area invalid data pattern"},
246 	{0x04118200, 0, 1,
247 	"9002: IOA reserved area LRC error"},
248 	{0x04320000, 0, 1,
249 	"102E: Out of alternate sectors for disk storage"},
250 	{0x04330000, 1, 1,
251 	"FFF4: Data transfer underlength error"},
252 	{0x04338000, 1, 1,
253 	"FFF4: Data transfer overlength error"},
254 	{0x043E0100, 0, 1,
255 	"3400: Logical unit failure"},
256 	{0x04408500, 0, 1,
257 	"FFF4: Device microcode is corrupt"},
258 	{0x04418000, 1, 1,
259 	"8150: PCI bus error"},
260 	{0x04430000, 1, 0,
261 	"Unsupported device bus message received"},
262 	{0x04440000, 1, 1,
263 	"FFF4: Disk device problem"},
264 	{0x04448200, 1, 1,
265 	"8150: Permanent IOA failure"},
266 	{0x04448300, 0, 1,
267 	"3010: Disk device returned wrong response to IOA"},
268 	{0x04448400, 0, 1,
269 	"8151: IOA microcode error"},
270 	{0x04448500, 0, 0,
271 	"Device bus status error"},
272 	{0x04448600, 0, 1,
273 	"8157: IOA error requiring IOA reset to recover"},
274 	{0x04490000, 0, 0,
275 	"Message reject received from the device"},
276 	{0x04449200, 0, 1,
277 	"8008: A permanent cache battery pack failure occurred"},
278 	{0x0444A000, 0, 1,
279 	"9090: Disk unit has been modified after the last known status"},
280 	{0x0444A200, 0, 1,
281 	"9081: IOA detected device error"},
282 	{0x0444A300, 0, 1,
283 	"9082: IOA detected device error"},
284 	{0x044A0000, 1, 1,
285 	"3110: Device bus error, message or command phase"},
286 	{0x04670400, 0, 1,
287 	"9091: Incorrect hardware configuration change has been detected"},
288 	{0x046E0000, 0, 1,
289 	"FFF4: Command to logical unit failed"},
290 	{0x05240000, 1, 0,
291 	"Illegal request, invalid request type or request packet"},
292 	{0x05250000, 0, 0,
293 	"Illegal request, invalid resource handle"},
294 	{0x05260000, 0, 0,
295 	"Illegal request, invalid field in parameter list"},
296 	{0x05260100, 0, 0,
297 	"Illegal request, parameter not supported"},
298 	{0x05260200, 0, 0,
299 	"Illegal request, parameter value invalid"},
300 	{0x052C0000, 0, 0,
301 	"Illegal request, command sequence error"},
302 	{0x06040500, 0, 1,
303 	"9031: Array protection temporarily suspended, protection resuming"},
304 	{0x06040600, 0, 1,
305 	"9040: Array protection temporarily suspended, protection resuming"},
306 	{0x06290000, 0, 1,
307 	"FFFB: SCSI bus was reset"},
308 	{0x06290500, 0, 0,
309 	"FFFE: SCSI bus transition to single ended"},
310 	{0x06290600, 0, 0,
311 	"FFFE: SCSI bus transition to LVD"},
312 	{0x06298000, 0, 1,
313 	"FFFB: SCSI bus was reset by another initiator"},
314 	{0x063F0300, 0, 1,
315 	"3029: A device replacement has occurred"},
316 	{0x064C8000, 0, 1,
317 	"9051: IOA cache data exists for a missing or failed device"},
318 	{0x06670100, 0, 1,
319 	"9025: Disk unit is not supported at its physical location"},
320 	{0x06670600, 0, 1,
321 	"3020: IOA detected a SCSI bus configuration error"},
322 	{0x06678000, 0, 1,
323 	"3150: SCSI bus configuration error"},
324 	{0x06690200, 0, 1,
325 	"9041: Array protection temporarily suspended"},
326 	{0x06698200, 0, 1,
327 	"9042: Corrupt array parity detected on specified device"},
328 	{0x066B0200, 0, 1,
329 	"9030: Array no longer protected due to missing or failed disk unit"},
330 	{0x066B8200, 0, 1,
331 	"9032: Array exposed but still protected"},
332 	{0x07270000, 0, 0,
333 	"Failure due to other device"},
334 	{0x07278000, 0, 1,
335 	"9008: IOA does not support functions expected by devices"},
336 	{0x07278100, 0, 1,
337 	"9010: Cache data associated with attached devices cannot be found"},
338 	{0x07278200, 0, 1,
339 	"9011: Cache data belongs to devices other than those attached"},
340 	{0x07278400, 0, 1,
341 	"9020: Array missing 2 or more devices with only 1 device present"},
342 	{0x07278500, 0, 1,
343 	"9021: Array missing 2 or more devices with 2 or more devices present"},
344 	{0x07278600, 0, 1,
345 	"9022: Exposed array is missing a required device"},
346 	{0x07278700, 0, 1,
347 	"9023: Array member(s) not at required physical locations"},
348 	{0x07278800, 0, 1,
349 	"9024: Array not functional due to present hardware configuration"},
350 	{0x07278900, 0, 1,
351 	"9026: Array not functional due to present hardware configuration"},
352 	{0x07278A00, 0, 1,
353 	"9027: Array is missing a device and parity is out of sync"},
354 	{0x07278B00, 0, 1,
355 	"9028: Maximum number of arrays already exist"},
356 	{0x07278C00, 0, 1,
357 	"9050: Required cache data cannot be located for a disk unit"},
358 	{0x07278D00, 0, 1,
359 	"9052: Cache data exists for a device that has been modified"},
360 	{0x07278F00, 0, 1,
361 	"9054: IOA resources not available due to previous problems"},
362 	{0x07279100, 0, 1,
363 	"9092: Disk unit requires initialization before use"},
364 	{0x07279200, 0, 1,
365 	"9029: Incorrect hardware configuration change has been detected"},
366 	{0x07279600, 0, 1,
367 	"9060: One or more disk pairs are missing from an array"},
368 	{0x07279700, 0, 1,
369 	"9061: One or more disks are missing from an array"},
370 	{0x07279800, 0, 1,
371 	"9062: One or more disks are missing from an array"},
372 	{0x07279900, 0, 1,
373 	"9063: Maximum number of functional arrays has been exceeded"},
374 	{0x0B260000, 0, 0,
375 	"Aborted command, invalid descriptor"},
376 	{0x0B5A0000, 0, 0,
377 	"Command terminated by host"}
378 };
379 
380 static const struct ipr_ses_table_entry ipr_ses_table[] = {
381 	{ "2104-DL1        ", "XXXXXXXXXXXXXXXX", 80 },
382 	{ "2104-TL1        ", "XXXXXXXXXXXXXXXX", 80 },
383 	{ "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
384 	{ "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
385 	{ "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
386 	{ "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
387 	{ "2104-DU3        ", "XXXXXXXXXXXXXXXX", 160 },
388 	{ "2104-TU3        ", "XXXXXXXXXXXXXXXX", 160 },
389 	{ "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
390 	{ "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
391 	{ "St  V1S2        ", "XXXXXXXXXXXXXXXX", 160 },
392 	{ "HSBPD4M  PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
393 	{ "VSBPD1H   U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
394 };
395 
396 /*
397  *  Function Prototypes
398  */
399 static int ipr_reset_alert(struct ipr_cmnd *);
400 static void ipr_process_ccn(struct ipr_cmnd *);
401 static void ipr_process_error(struct ipr_cmnd *);
402 static void ipr_reset_ioa_job(struct ipr_cmnd *);
403 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
404 				   enum ipr_shutdown_type);
405 
406 #ifdef CONFIG_SCSI_IPR_TRACE
407 /**
408  * ipr_trc_hook - Add a trace entry to the driver trace
409  * @ipr_cmd:	ipr command struct
410  * @type:		trace type
411  * @add_data:	additional data
412  *
413  * Return value:
414  * 	none
415  **/
416 static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
417 			 u8 type, u32 add_data)
418 {
419 	struct ipr_trace_entry *trace_entry;
420 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
421 
422 	trace_entry = &ioa_cfg->trace[ioa_cfg->trace_index++];
423 	trace_entry->time = jiffies;
424 	trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
425 	trace_entry->type = type;
426 	trace_entry->cmd_index = ipr_cmd->cmd_index;
427 	trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
428 	trace_entry->u.add_data = add_data;
429 }
430 #else
431 #define ipr_trc_hook(ipr_cmd, type, add_data) do { } while(0)
432 #endif
433 
434 /**
435  * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
436  * @ipr_cmd:	ipr command struct
437  *
438  * Return value:
439  * 	none
440  **/
441 static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
442 {
443 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
444 	struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
445 
446 	memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
447 	ioarcb->write_data_transfer_length = 0;
448 	ioarcb->read_data_transfer_length = 0;
449 	ioarcb->write_ioadl_len = 0;
450 	ioarcb->read_ioadl_len = 0;
451 	ioasa->ioasc = 0;
452 	ioasa->residual_data_len = 0;
453 
454 	ipr_cmd->scsi_cmd = NULL;
455 	ipr_cmd->sense_buffer[0] = 0;
456 	ipr_cmd->dma_use_sg = 0;
457 }
458 
459 /**
460  * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
461  * @ipr_cmd:	ipr command struct
462  *
463  * Return value:
464  * 	none
465  **/
466 static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
467 {
468 	ipr_reinit_ipr_cmnd(ipr_cmd);
469 	ipr_cmd->u.scratch = 0;
470 	ipr_cmd->sibling = NULL;
471 	init_timer(&ipr_cmd->timer);
472 }
473 
474 /**
475  * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
476  * @ioa_cfg:	ioa config struct
477  *
478  * Return value:
479  * 	pointer to ipr command struct
480  **/
481 static
482 struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
483 {
484 	struct ipr_cmnd *ipr_cmd;
485 
486 	ipr_cmd = list_entry(ioa_cfg->free_q.next, struct ipr_cmnd, queue);
487 	list_del(&ipr_cmd->queue);
488 	ipr_init_ipr_cmnd(ipr_cmd);
489 
490 	return ipr_cmd;
491 }
492 
493 /**
494  * ipr_unmap_sglist - Unmap scatterlist if mapped
495  * @ioa_cfg:	ioa config struct
496  * @ipr_cmd:	ipr command struct
497  *
498  * Return value:
499  * 	nothing
500  **/
501 static void ipr_unmap_sglist(struct ipr_ioa_cfg *ioa_cfg,
502 			     struct ipr_cmnd *ipr_cmd)
503 {
504 	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
505 
506 	if (ipr_cmd->dma_use_sg) {
507 		if (scsi_cmd->use_sg > 0) {
508 			pci_unmap_sg(ioa_cfg->pdev, scsi_cmd->request_buffer,
509 				     scsi_cmd->use_sg,
510 				     scsi_cmd->sc_data_direction);
511 		} else {
512 			pci_unmap_single(ioa_cfg->pdev, ipr_cmd->dma_handle,
513 					 scsi_cmd->request_bufflen,
514 					 scsi_cmd->sc_data_direction);
515 		}
516 	}
517 }
518 
519 /**
520  * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
521  * @ioa_cfg:	ioa config struct
522  * @clr_ints:     interrupts to clear
523  *
524  * This function masks all interrupts on the adapter, then clears the
525  * interrupts specified in the mask
526  *
527  * Return value:
528  * 	none
529  **/
530 static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
531 					  u32 clr_ints)
532 {
533 	volatile u32 int_reg;
534 
535 	/* Stop new interrupts */
536 	ioa_cfg->allow_interrupts = 0;
537 
538 	/* Set interrupt mask to stop all new interrupts */
539 	writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
540 
541 	/* Clear any pending interrupts */
542 	writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg);
543 	int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
544 }
545 
546 /**
547  * ipr_save_pcix_cmd_reg - Save PCI-X command register
548  * @ioa_cfg:	ioa config struct
549  *
550  * Return value:
551  * 	0 on success / -EIO on failure
552  **/
553 static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
554 {
555 	int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
556 
557 	if (pcix_cmd_reg == 0) {
558 		dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
559 		return -EIO;
560 	}
561 
562 	if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
563 				 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
564 		dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
565 		return -EIO;
566 	}
567 
568 	ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
569 	return 0;
570 }
571 
572 /**
573  * ipr_set_pcix_cmd_reg - Setup PCI-X command register
574  * @ioa_cfg:	ioa config struct
575  *
576  * Return value:
577  * 	0 on success / -EIO on failure
578  **/
579 static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
580 {
581 	int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
582 
583 	if (pcix_cmd_reg) {
584 		if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
585 					  ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
586 			dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
587 			return -EIO;
588 		}
589 	} else {
590 		dev_err(&ioa_cfg->pdev->dev,
591 			"Failed to setup PCI-X command register\n");
592 		return -EIO;
593 	}
594 
595 	return 0;
596 }
597 
598 /**
599  * ipr_scsi_eh_done - mid-layer done function for aborted ops
600  * @ipr_cmd:	ipr command struct
601  *
602  * This function is invoked by the interrupt handler for
603  * ops generated by the SCSI mid-layer which are being aborted.
604  *
605  * Return value:
606  * 	none
607  **/
608 static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
609 {
610 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
611 	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
612 
613 	scsi_cmd->result |= (DID_ERROR << 16);
614 
615 	ipr_unmap_sglist(ioa_cfg, ipr_cmd);
616 	scsi_cmd->scsi_done(scsi_cmd);
617 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
618 }
619 
620 /**
621  * ipr_fail_all_ops - Fails all outstanding ops.
622  * @ioa_cfg:	ioa config struct
623  *
624  * This function fails all outstanding ops.
625  *
626  * Return value:
627  * 	none
628  **/
629 static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
630 {
631 	struct ipr_cmnd *ipr_cmd, *temp;
632 
633 	ENTER;
634 	list_for_each_entry_safe(ipr_cmd, temp, &ioa_cfg->pending_q, queue) {
635 		list_del(&ipr_cmd->queue);
636 
637 		ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
638 		ipr_cmd->ioasa.ilid = cpu_to_be32(IPR_DRIVER_ILID);
639 
640 		if (ipr_cmd->scsi_cmd)
641 			ipr_cmd->done = ipr_scsi_eh_done;
642 
643 		ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, IPR_IOASC_IOA_WAS_RESET);
644 		del_timer(&ipr_cmd->timer);
645 		ipr_cmd->done(ipr_cmd);
646 	}
647 
648 	LEAVE;
649 }
650 
651 /**
652  * ipr_do_req -  Send driver initiated requests.
653  * @ipr_cmd:		ipr command struct
654  * @done:			done function
655  * @timeout_func:	timeout function
656  * @timeout:		timeout value
657  *
658  * This function sends the specified command to the adapter with the
659  * timeout given. The done function is invoked on command completion.
660  *
661  * Return value:
662  * 	none
663  **/
664 static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
665 		       void (*done) (struct ipr_cmnd *),
666 		       void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
667 {
668 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
669 
670 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
671 
672 	ipr_cmd->done = done;
673 
674 	ipr_cmd->timer.data = (unsigned long) ipr_cmd;
675 	ipr_cmd->timer.expires = jiffies + timeout;
676 	ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
677 
678 	add_timer(&ipr_cmd->timer);
679 
680 	ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
681 
682 	mb();
683 	writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
684 	       ioa_cfg->regs.ioarrin_reg);
685 }
686 
687 /**
688  * ipr_internal_cmd_done - Op done function for an internally generated op.
689  * @ipr_cmd:	ipr command struct
690  *
691  * This function is the op done function for an internally generated,
692  * blocking op. It simply wakes the sleeping thread.
693  *
694  * Return value:
695  * 	none
696  **/
697 static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
698 {
699 	if (ipr_cmd->sibling)
700 		ipr_cmd->sibling = NULL;
701 	else
702 		complete(&ipr_cmd->completion);
703 }
704 
705 /**
706  * ipr_send_blocking_cmd - Send command and sleep on its completion.
707  * @ipr_cmd:	ipr command struct
708  * @timeout_func:	function to invoke if command times out
709  * @timeout:	timeout
710  *
711  * Return value:
712  * 	none
713  **/
714 static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
715 				  void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
716 				  u32 timeout)
717 {
718 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
719 
720 	init_completion(&ipr_cmd->completion);
721 	ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
722 
723 	spin_unlock_irq(ioa_cfg->host->host_lock);
724 	wait_for_completion(&ipr_cmd->completion);
725 	spin_lock_irq(ioa_cfg->host->host_lock);
726 }
727 
728 /**
729  * ipr_send_hcam - Send an HCAM to the adapter.
730  * @ioa_cfg:	ioa config struct
731  * @type:		HCAM type
732  * @hostrcb:	hostrcb struct
733  *
734  * This function will send a Host Controlled Async command to the adapter.
735  * If HCAMs are currently not allowed to be issued to the adapter, it will
736  * place the hostrcb on the free queue.
737  *
738  * Return value:
739  * 	none
740  **/
741 static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
742 			  struct ipr_hostrcb *hostrcb)
743 {
744 	struct ipr_cmnd *ipr_cmd;
745 	struct ipr_ioarcb *ioarcb;
746 
747 	if (ioa_cfg->allow_cmds) {
748 		ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
749 		list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
750 		list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
751 
752 		ipr_cmd->u.hostrcb = hostrcb;
753 		ioarcb = &ipr_cmd->ioarcb;
754 
755 		ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
756 		ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
757 		ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
758 		ioarcb->cmd_pkt.cdb[1] = type;
759 		ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
760 		ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
761 
762 		ioarcb->read_data_transfer_length = cpu_to_be32(sizeof(hostrcb->hcam));
763 		ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
764 		ipr_cmd->ioadl[0].flags_and_data_len =
765 			cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | sizeof(hostrcb->hcam));
766 		ipr_cmd->ioadl[0].address = cpu_to_be32(hostrcb->hostrcb_dma);
767 
768 		if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
769 			ipr_cmd->done = ipr_process_ccn;
770 		else
771 			ipr_cmd->done = ipr_process_error;
772 
773 		ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
774 
775 		mb();
776 		writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
777 		       ioa_cfg->regs.ioarrin_reg);
778 	} else {
779 		list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
780 	}
781 }
782 
783 /**
784  * ipr_init_res_entry - Initialize a resource entry struct.
785  * @res:	resource entry struct
786  *
787  * Return value:
788  * 	none
789  **/
790 static void ipr_init_res_entry(struct ipr_resource_entry *res)
791 {
792 	res->needs_sync_complete = 1;
793 	res->in_erp = 0;
794 	res->add_to_ml = 0;
795 	res->del_from_ml = 0;
796 	res->resetting_device = 0;
797 	res->sdev = NULL;
798 }
799 
800 /**
801  * ipr_handle_config_change - Handle a config change from the adapter
802  * @ioa_cfg:	ioa config struct
803  * @hostrcb:	hostrcb
804  *
805  * Return value:
806  * 	none
807  **/
808 static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
809 			      struct ipr_hostrcb *hostrcb)
810 {
811 	struct ipr_resource_entry *res = NULL;
812 	struct ipr_config_table_entry *cfgte;
813 	u32 is_ndn = 1;
814 
815 	cfgte = &hostrcb->hcam.u.ccn.cfgte;
816 
817 	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
818 		if (!memcmp(&res->cfgte.res_addr, &cfgte->res_addr,
819 			    sizeof(cfgte->res_addr))) {
820 			is_ndn = 0;
821 			break;
822 		}
823 	}
824 
825 	if (is_ndn) {
826 		if (list_empty(&ioa_cfg->free_res_q)) {
827 			ipr_send_hcam(ioa_cfg,
828 				      IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
829 				      hostrcb);
830 			return;
831 		}
832 
833 		res = list_entry(ioa_cfg->free_res_q.next,
834 				 struct ipr_resource_entry, queue);
835 
836 		list_del(&res->queue);
837 		ipr_init_res_entry(res);
838 		list_add_tail(&res->queue, &ioa_cfg->used_res_q);
839 	}
840 
841 	memcpy(&res->cfgte, cfgte, sizeof(struct ipr_config_table_entry));
842 
843 	if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
844 		if (res->sdev) {
845 			res->sdev->hostdata = NULL;
846 			res->del_from_ml = 1;
847 			if (ioa_cfg->allow_ml_add_del)
848 				schedule_work(&ioa_cfg->work_q);
849 		} else
850 			list_move_tail(&res->queue, &ioa_cfg->free_res_q);
851 	} else if (!res->sdev) {
852 		res->add_to_ml = 1;
853 		if (ioa_cfg->allow_ml_add_del)
854 			schedule_work(&ioa_cfg->work_q);
855 	}
856 
857 	ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
858 }
859 
860 /**
861  * ipr_process_ccn - Op done function for a CCN.
862  * @ipr_cmd:	ipr command struct
863  *
864  * This function is the op done function for a configuration
865  * change notification host controlled async from the adapter.
866  *
867  * Return value:
868  * 	none
869  **/
870 static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
871 {
872 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
873 	struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
874 	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
875 
876 	list_del(&hostrcb->queue);
877 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
878 
879 	if (ioasc) {
880 		if (ioasc != IPR_IOASC_IOA_WAS_RESET)
881 			dev_err(&ioa_cfg->pdev->dev,
882 				"Host RCB failed with IOASC: 0x%08X\n", ioasc);
883 
884 		ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
885 	} else {
886 		ipr_handle_config_change(ioa_cfg, hostrcb);
887 	}
888 }
889 
890 /**
891  * ipr_log_vpd - Log the passed VPD to the error log.
892  * @vpids:			vendor/product id struct
893  * @serial_num:		serial number string
894  *
895  * Return value:
896  * 	none
897  **/
898 static void ipr_log_vpd(struct ipr_std_inq_vpids *vpids, u8 *serial_num)
899 {
900 	char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
901 		    + IPR_SERIAL_NUM_LEN];
902 
903 	memcpy(buffer, vpids->vendor_id, IPR_VENDOR_ID_LEN);
904 	memcpy(buffer + IPR_VENDOR_ID_LEN, vpids->product_id,
905 	       IPR_PROD_ID_LEN);
906 	buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
907 	ipr_err("Vendor/Product ID: %s\n", buffer);
908 
909 	memcpy(buffer, serial_num, IPR_SERIAL_NUM_LEN);
910 	buffer[IPR_SERIAL_NUM_LEN] = '\0';
911 	ipr_err("    Serial Number: %s\n", buffer);
912 }
913 
914 /**
915  * ipr_log_cache_error - Log a cache error.
916  * @ioa_cfg:	ioa config struct
917  * @hostrcb:	hostrcb struct
918  *
919  * Return value:
920  * 	none
921  **/
922 static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
923 				struct ipr_hostrcb *hostrcb)
924 {
925 	struct ipr_hostrcb_type_02_error *error =
926 		&hostrcb->hcam.u.error.u.type_02_error;
927 
928 	ipr_err("-----Current Configuration-----\n");
929 	ipr_err("Cache Directory Card Information:\n");
930 	ipr_log_vpd(&error->ioa_vpids, error->ioa_sn);
931 	ipr_err("Adapter Card Information:\n");
932 	ipr_log_vpd(&error->cfc_vpids, error->cfc_sn);
933 
934 	ipr_err("-----Expected Configuration-----\n");
935 	ipr_err("Cache Directory Card Information:\n");
936 	ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpids,
937 		    error->ioa_last_attached_to_cfc_sn);
938 	ipr_err("Adapter Card Information:\n");
939 	ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpids,
940 		    error->cfc_last_attached_to_ioa_sn);
941 
942 	ipr_err("Additional IOA Data: %08X %08X %08X\n",
943 		     be32_to_cpu(error->ioa_data[0]),
944 		     be32_to_cpu(error->ioa_data[1]),
945 		     be32_to_cpu(error->ioa_data[2]));
946 }
947 
948 /**
949  * ipr_log_config_error - Log a configuration error.
950  * @ioa_cfg:	ioa config struct
951  * @hostrcb:	hostrcb struct
952  *
953  * Return value:
954  * 	none
955  **/
956 static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
957 				 struct ipr_hostrcb *hostrcb)
958 {
959 	int errors_logged, i;
960 	struct ipr_hostrcb_device_data_entry *dev_entry;
961 	struct ipr_hostrcb_type_03_error *error;
962 
963 	error = &hostrcb->hcam.u.error.u.type_03_error;
964 	errors_logged = be32_to_cpu(error->errors_logged);
965 
966 	ipr_err("Device Errors Detected/Logged: %d/%d\n",
967 		be32_to_cpu(error->errors_detected), errors_logged);
968 
969 	dev_entry = error->dev_entry;
970 
971 	for (i = 0; i < errors_logged; i++, dev_entry++) {
972 		ipr_err_separator;
973 
974 		if (dev_entry->dev_res_addr.bus >= IPR_MAX_NUM_BUSES) {
975 			ipr_err("Device %d: missing\n", i + 1);
976 		} else {
977 			ipr_err("Device %d: %d:%d:%d:%d\n", i + 1,
978 				ioa_cfg->host->host_no, dev_entry->dev_res_addr.bus,
979 				dev_entry->dev_res_addr.target, dev_entry->dev_res_addr.lun);
980 		}
981 		ipr_log_vpd(&dev_entry->dev_vpids, dev_entry->dev_sn);
982 
983 		ipr_err("-----New Device Information-----\n");
984 		ipr_log_vpd(&dev_entry->new_dev_vpids, dev_entry->new_dev_sn);
985 
986 		ipr_err("Cache Directory Card Information:\n");
987 		ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpids,
988 			    dev_entry->ioa_last_with_dev_sn);
989 
990 		ipr_err("Adapter Card Information:\n");
991 		ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpids,
992 			    dev_entry->cfc_last_with_dev_sn);
993 
994 		ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
995 			be32_to_cpu(dev_entry->ioa_data[0]),
996 			be32_to_cpu(dev_entry->ioa_data[1]),
997 			be32_to_cpu(dev_entry->ioa_data[2]),
998 			be32_to_cpu(dev_entry->ioa_data[3]),
999 			be32_to_cpu(dev_entry->ioa_data[4]));
1000 	}
1001 }
1002 
1003 /**
1004  * ipr_log_array_error - Log an array configuration error.
1005  * @ioa_cfg:	ioa config struct
1006  * @hostrcb:	hostrcb struct
1007  *
1008  * Return value:
1009  * 	none
1010  **/
1011 static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1012 				struct ipr_hostrcb *hostrcb)
1013 {
1014 	int i;
1015 	struct ipr_hostrcb_type_04_error *error;
1016 	struct ipr_hostrcb_array_data_entry *array_entry;
1017 	const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1018 
1019 	error = &hostrcb->hcam.u.error.u.type_04_error;
1020 
1021 	ipr_err_separator;
1022 
1023 	ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1024 		error->protection_level,
1025 		ioa_cfg->host->host_no,
1026 		error->last_func_vset_res_addr.bus,
1027 		error->last_func_vset_res_addr.target,
1028 		error->last_func_vset_res_addr.lun);
1029 
1030 	ipr_err_separator;
1031 
1032 	array_entry = error->array_member;
1033 
1034 	for (i = 0; i < 18; i++) {
1035 		if (!memcmp(array_entry->serial_num, zero_sn, IPR_SERIAL_NUM_LEN))
1036 			continue;
1037 
1038 		if (be32_to_cpu(error->exposed_mode_adn) == i) {
1039 			ipr_err("Exposed Array Member %d:\n", i);
1040 		} else {
1041 			ipr_err("Array Member %d:\n", i);
1042 		}
1043 
1044 		ipr_log_vpd(&array_entry->vpids, array_entry->serial_num);
1045 
1046 		if (array_entry->dev_res_addr.bus >= IPR_MAX_NUM_BUSES) {
1047 			ipr_err("Current Location: unknown\n");
1048 		} else {
1049 			ipr_err("Current Location: %d:%d:%d:%d\n",
1050 				ioa_cfg->host->host_no,
1051 				array_entry->dev_res_addr.bus,
1052 				array_entry->dev_res_addr.target,
1053 				array_entry->dev_res_addr.lun);
1054 		}
1055 
1056 		if (array_entry->expected_dev_res_addr.bus >= IPR_MAX_NUM_BUSES) {
1057 			ipr_err("Expected Location: unknown\n");
1058 		} else {
1059 			ipr_err("Expected Location: %d:%d:%d:%d\n",
1060 				ioa_cfg->host->host_no,
1061 				array_entry->expected_dev_res_addr.bus,
1062 				array_entry->expected_dev_res_addr.target,
1063 				array_entry->expected_dev_res_addr.lun);
1064 		}
1065 
1066 		ipr_err_separator;
1067 
1068 		if (i == 9)
1069 			array_entry = error->array_member2;
1070 		else
1071 			array_entry++;
1072 	}
1073 }
1074 
1075 /**
1076  * ipr_log_generic_error - Log an adapter error.
1077  * @ioa_cfg:	ioa config struct
1078  * @hostrcb:	hostrcb struct
1079  *
1080  * Return value:
1081  * 	none
1082  **/
1083 static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
1084 				  struct ipr_hostrcb *hostrcb)
1085 {
1086 	int i;
1087 	int ioa_data_len = be32_to_cpu(hostrcb->hcam.length);
1088 
1089 	if (ioa_data_len == 0)
1090 		return;
1091 
1092 	ipr_err("IOA Error Data:\n");
1093 	ipr_err("Offset    0 1 2 3  4 5 6 7  8 9 A B  C D E F\n");
1094 
1095 	for (i = 0; i < ioa_data_len / 4; i += 4) {
1096 		ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1097 			be32_to_cpu(hostrcb->hcam.u.raw.data[i]),
1098 			be32_to_cpu(hostrcb->hcam.u.raw.data[i+1]),
1099 			be32_to_cpu(hostrcb->hcam.u.raw.data[i+2]),
1100 			be32_to_cpu(hostrcb->hcam.u.raw.data[i+3]));
1101 	}
1102 }
1103 
1104 /**
1105  * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
1106  * @ioasc:	IOASC
1107  *
1108  * This function will return the index of into the ipr_error_table
1109  * for the specified IOASC. If the IOASC is not in the table,
1110  * 0 will be returned, which points to the entry used for unknown errors.
1111  *
1112  * Return value:
1113  * 	index into the ipr_error_table
1114  **/
1115 static u32 ipr_get_error(u32 ioasc)
1116 {
1117 	int i;
1118 
1119 	for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
1120 		if (ipr_error_table[i].ioasc == ioasc)
1121 			return i;
1122 
1123 	return 0;
1124 }
1125 
1126 /**
1127  * ipr_handle_log_data - Log an adapter error.
1128  * @ioa_cfg:	ioa config struct
1129  * @hostrcb:	hostrcb struct
1130  *
1131  * This function logs an adapter error to the system.
1132  *
1133  * Return value:
1134  * 	none
1135  **/
1136 static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
1137 				struct ipr_hostrcb *hostrcb)
1138 {
1139 	u32 ioasc;
1140 	int error_index;
1141 
1142 	if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
1143 		return;
1144 
1145 	if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
1146 		dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
1147 
1148 	ioasc = be32_to_cpu(hostrcb->hcam.u.error.failing_dev_ioasc);
1149 
1150 	if (ioasc == IPR_IOASC_BUS_WAS_RESET ||
1151 	    ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER) {
1152 		/* Tell the midlayer we had a bus reset so it will handle the UA properly */
1153 		scsi_report_bus_reset(ioa_cfg->host,
1154 				      hostrcb->hcam.u.error.failing_dev_res_addr.bus);
1155 	}
1156 
1157 	error_index = ipr_get_error(ioasc);
1158 
1159 	if (!ipr_error_table[error_index].log_hcam)
1160 		return;
1161 
1162 	if (ipr_is_device(&hostrcb->hcam.u.error.failing_dev_res_addr)) {
1163 		ipr_res_err(ioa_cfg, hostrcb->hcam.u.error.failing_dev_res_addr,
1164 			    "%s\n", ipr_error_table[error_index].error);
1165 	} else {
1166 		dev_err(&ioa_cfg->pdev->dev, "%s\n",
1167 			ipr_error_table[error_index].error);
1168 	}
1169 
1170 	/* Set indication we have logged an error */
1171 	ioa_cfg->errors_logged++;
1172 
1173 	if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
1174 		return;
1175 
1176 	switch (hostrcb->hcam.overlay_id) {
1177 	case IPR_HOST_RCB_OVERLAY_ID_1:
1178 		ipr_log_generic_error(ioa_cfg, hostrcb);
1179 		break;
1180 	case IPR_HOST_RCB_OVERLAY_ID_2:
1181 		ipr_log_cache_error(ioa_cfg, hostrcb);
1182 		break;
1183 	case IPR_HOST_RCB_OVERLAY_ID_3:
1184 		ipr_log_config_error(ioa_cfg, hostrcb);
1185 		break;
1186 	case IPR_HOST_RCB_OVERLAY_ID_4:
1187 	case IPR_HOST_RCB_OVERLAY_ID_6:
1188 		ipr_log_array_error(ioa_cfg, hostrcb);
1189 		break;
1190 	case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
1191 		ipr_log_generic_error(ioa_cfg, hostrcb);
1192 		break;
1193 	default:
1194 		dev_err(&ioa_cfg->pdev->dev,
1195 			"Unknown error received. Overlay ID: %d\n",
1196 			hostrcb->hcam.overlay_id);
1197 		break;
1198 	}
1199 }
1200 
1201 /**
1202  * ipr_process_error - Op done function for an adapter error log.
1203  * @ipr_cmd:	ipr command struct
1204  *
1205  * This function is the op done function for an error log host
1206  * controlled async from the adapter. It will log the error and
1207  * send the HCAM back to the adapter.
1208  *
1209  * Return value:
1210  * 	none
1211  **/
1212 static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
1213 {
1214 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1215 	struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1216 	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
1217 
1218 	list_del(&hostrcb->queue);
1219 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
1220 
1221 	if (!ioasc) {
1222 		ipr_handle_log_data(ioa_cfg, hostrcb);
1223 	} else if (ioasc != IPR_IOASC_IOA_WAS_RESET) {
1224 		dev_err(&ioa_cfg->pdev->dev,
1225 			"Host RCB failed with IOASC: 0x%08X\n", ioasc);
1226 	}
1227 
1228 	ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
1229 }
1230 
1231 /**
1232  * ipr_timeout -  An internally generated op has timed out.
1233  * @ipr_cmd:	ipr command struct
1234  *
1235  * This function blocks host requests and initiates an
1236  * adapter reset.
1237  *
1238  * Return value:
1239  * 	none
1240  **/
1241 static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
1242 {
1243 	unsigned long lock_flags = 0;
1244 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1245 
1246 	ENTER;
1247 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1248 
1249 	ioa_cfg->errors_logged++;
1250 	dev_err(&ioa_cfg->pdev->dev,
1251 		"Adapter being reset due to command timeout.\n");
1252 
1253 	if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
1254 		ioa_cfg->sdt_state = GET_DUMP;
1255 
1256 	if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
1257 		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
1258 
1259 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1260 	LEAVE;
1261 }
1262 
1263 /**
1264  * ipr_oper_timeout -  Adapter timed out transitioning to operational
1265  * @ipr_cmd:	ipr command struct
1266  *
1267  * This function blocks host requests and initiates an
1268  * adapter reset.
1269  *
1270  * Return value:
1271  * 	none
1272  **/
1273 static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
1274 {
1275 	unsigned long lock_flags = 0;
1276 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1277 
1278 	ENTER;
1279 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1280 
1281 	ioa_cfg->errors_logged++;
1282 	dev_err(&ioa_cfg->pdev->dev,
1283 		"Adapter timed out transitioning to operational.\n");
1284 
1285 	if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
1286 		ioa_cfg->sdt_state = GET_DUMP;
1287 
1288 	if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
1289 		if (ipr_fastfail)
1290 			ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
1291 		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
1292 	}
1293 
1294 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1295 	LEAVE;
1296 }
1297 
1298 /**
1299  * ipr_reset_reload - Reset/Reload the IOA
1300  * @ioa_cfg:		ioa config struct
1301  * @shutdown_type:	shutdown type
1302  *
1303  * This function resets the adapter and re-initializes it.
1304  * This function assumes that all new host commands have been stopped.
1305  * Return value:
1306  * 	SUCCESS / FAILED
1307  **/
1308 static int ipr_reset_reload(struct ipr_ioa_cfg *ioa_cfg,
1309 			    enum ipr_shutdown_type shutdown_type)
1310 {
1311 	if (!ioa_cfg->in_reset_reload)
1312 		ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
1313 
1314 	spin_unlock_irq(ioa_cfg->host->host_lock);
1315 	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
1316 	spin_lock_irq(ioa_cfg->host->host_lock);
1317 
1318 	/* If we got hit with a host reset while we were already resetting
1319 	 the adapter for some reason, and the reset failed. */
1320 	if (ioa_cfg->ioa_is_dead) {
1321 		ipr_trace;
1322 		return FAILED;
1323 	}
1324 
1325 	return SUCCESS;
1326 }
1327 
1328 /**
1329  * ipr_find_ses_entry - Find matching SES in SES table
1330  * @res:	resource entry struct of SES
1331  *
1332  * Return value:
1333  * 	pointer to SES table entry / NULL on failure
1334  **/
1335 static const struct ipr_ses_table_entry *
1336 ipr_find_ses_entry(struct ipr_resource_entry *res)
1337 {
1338 	int i, j, matches;
1339 	const struct ipr_ses_table_entry *ste = ipr_ses_table;
1340 
1341 	for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
1342 		for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
1343 			if (ste->compare_product_id_byte[j] == 'X') {
1344 				if (res->cfgte.std_inq_data.vpids.product_id[j] == ste->product_id[j])
1345 					matches++;
1346 				else
1347 					break;
1348 			} else
1349 				matches++;
1350 		}
1351 
1352 		if (matches == IPR_PROD_ID_LEN)
1353 			return ste;
1354 	}
1355 
1356 	return NULL;
1357 }
1358 
1359 /**
1360  * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
1361  * @ioa_cfg:	ioa config struct
1362  * @bus:		SCSI bus
1363  * @bus_width:	bus width
1364  *
1365  * Return value:
1366  *	SCSI bus speed in units of 100KHz, 1600 is 160 MHz
1367  *	For a 2-byte wide SCSI bus, the maximum transfer speed is
1368  *	twice the maximum transfer rate (e.g. for a wide enabled bus,
1369  *	max 160MHz = max 320MB/sec).
1370  **/
1371 static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
1372 {
1373 	struct ipr_resource_entry *res;
1374 	const struct ipr_ses_table_entry *ste;
1375 	u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
1376 
1377 	/* Loop through each config table entry in the config table buffer */
1378 	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1379 		if (!(IPR_IS_SES_DEVICE(res->cfgte.std_inq_data)))
1380 			continue;
1381 
1382 		if (bus != res->cfgte.res_addr.bus)
1383 			continue;
1384 
1385 		if (!(ste = ipr_find_ses_entry(res)))
1386 			continue;
1387 
1388 		max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
1389 	}
1390 
1391 	return max_xfer_rate;
1392 }
1393 
1394 /**
1395  * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
1396  * @ioa_cfg:		ioa config struct
1397  * @max_delay:		max delay in micro-seconds to wait
1398  *
1399  * Waits for an IODEBUG ACK from the IOA, doing busy looping.
1400  *
1401  * Return value:
1402  * 	0 on success / other on failure
1403  **/
1404 static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
1405 {
1406 	volatile u32 pcii_reg;
1407 	int delay = 1;
1408 
1409 	/* Read interrupt reg until IOA signals IO Debug Acknowledge */
1410 	while (delay < max_delay) {
1411 		pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
1412 
1413 		if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
1414 			return 0;
1415 
1416 		/* udelay cannot be used if delay is more than a few milliseconds */
1417 		if ((delay / 1000) > MAX_UDELAY_MS)
1418 			mdelay(delay / 1000);
1419 		else
1420 			udelay(delay);
1421 
1422 		delay += delay;
1423 	}
1424 	return -EIO;
1425 }
1426 
1427 /**
1428  * ipr_get_ldump_data_section - Dump IOA memory
1429  * @ioa_cfg:			ioa config struct
1430  * @start_addr:			adapter address to dump
1431  * @dest:				destination kernel buffer
1432  * @length_in_words:	length to dump in 4 byte words
1433  *
1434  * Return value:
1435  * 	0 on success / -EIO on failure
1436  **/
1437 static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
1438 				      u32 start_addr,
1439 				      __be32 *dest, u32 length_in_words)
1440 {
1441 	volatile u32 temp_pcii_reg;
1442 	int i, delay = 0;
1443 
1444 	/* Write IOA interrupt reg starting LDUMP state  */
1445 	writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
1446 	       ioa_cfg->regs.set_uproc_interrupt_reg);
1447 
1448 	/* Wait for IO debug acknowledge */
1449 	if (ipr_wait_iodbg_ack(ioa_cfg,
1450 			       IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
1451 		dev_err(&ioa_cfg->pdev->dev,
1452 			"IOA dump long data transfer timeout\n");
1453 		return -EIO;
1454 	}
1455 
1456 	/* Signal LDUMP interlocked - clear IO debug ack */
1457 	writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1458 	       ioa_cfg->regs.clr_interrupt_reg);
1459 
1460 	/* Write Mailbox with starting address */
1461 	writel(start_addr, ioa_cfg->ioa_mailbox);
1462 
1463 	/* Signal address valid - clear IOA Reset alert */
1464 	writel(IPR_UPROCI_RESET_ALERT,
1465 	       ioa_cfg->regs.clr_uproc_interrupt_reg);
1466 
1467 	for (i = 0; i < length_in_words; i++) {
1468 		/* Wait for IO debug acknowledge */
1469 		if (ipr_wait_iodbg_ack(ioa_cfg,
1470 				       IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
1471 			dev_err(&ioa_cfg->pdev->dev,
1472 				"IOA dump short data transfer timeout\n");
1473 			return -EIO;
1474 		}
1475 
1476 		/* Read data from mailbox and increment destination pointer */
1477 		*dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
1478 		dest++;
1479 
1480 		/* For all but the last word of data, signal data received */
1481 		if (i < (length_in_words - 1)) {
1482 			/* Signal dump data received - Clear IO debug Ack */
1483 			writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1484 			       ioa_cfg->regs.clr_interrupt_reg);
1485 		}
1486 	}
1487 
1488 	/* Signal end of block transfer. Set reset alert then clear IO debug ack */
1489 	writel(IPR_UPROCI_RESET_ALERT,
1490 	       ioa_cfg->regs.set_uproc_interrupt_reg);
1491 
1492 	writel(IPR_UPROCI_IO_DEBUG_ALERT,
1493 	       ioa_cfg->regs.clr_uproc_interrupt_reg);
1494 
1495 	/* Signal dump data received - Clear IO debug Ack */
1496 	writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1497 	       ioa_cfg->regs.clr_interrupt_reg);
1498 
1499 	/* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
1500 	while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
1501 		temp_pcii_reg =
1502 		    readl(ioa_cfg->regs.sense_uproc_interrupt_reg);
1503 
1504 		if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
1505 			return 0;
1506 
1507 		udelay(10);
1508 		delay += 10;
1509 	}
1510 
1511 	return 0;
1512 }
1513 
1514 #ifdef CONFIG_SCSI_IPR_DUMP
1515 /**
1516  * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
1517  * @ioa_cfg:		ioa config struct
1518  * @pci_address:	adapter address
1519  * @length:			length of data to copy
1520  *
1521  * Copy data from PCI adapter to kernel buffer.
1522  * Note: length MUST be a 4 byte multiple
1523  * Return value:
1524  * 	0 on success / other on failure
1525  **/
1526 static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
1527 			unsigned long pci_address, u32 length)
1528 {
1529 	int bytes_copied = 0;
1530 	int cur_len, rc, rem_len, rem_page_len;
1531 	__be32 *page;
1532 	unsigned long lock_flags = 0;
1533 	struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
1534 
1535 	while (bytes_copied < length &&
1536 	       (ioa_dump->hdr.len + bytes_copied) < IPR_MAX_IOA_DUMP_SIZE) {
1537 		if (ioa_dump->page_offset >= PAGE_SIZE ||
1538 		    ioa_dump->page_offset == 0) {
1539 			page = (__be32 *)__get_free_page(GFP_ATOMIC);
1540 
1541 			if (!page) {
1542 				ipr_trace;
1543 				return bytes_copied;
1544 			}
1545 
1546 			ioa_dump->page_offset = 0;
1547 			ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
1548 			ioa_dump->next_page_index++;
1549 		} else
1550 			page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
1551 
1552 		rem_len = length - bytes_copied;
1553 		rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
1554 		cur_len = min(rem_len, rem_page_len);
1555 
1556 		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1557 		if (ioa_cfg->sdt_state == ABORT_DUMP) {
1558 			rc = -EIO;
1559 		} else {
1560 			rc = ipr_get_ldump_data_section(ioa_cfg,
1561 							pci_address + bytes_copied,
1562 							&page[ioa_dump->page_offset / 4],
1563 							(cur_len / sizeof(u32)));
1564 		}
1565 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1566 
1567 		if (!rc) {
1568 			ioa_dump->page_offset += cur_len;
1569 			bytes_copied += cur_len;
1570 		} else {
1571 			ipr_trace;
1572 			break;
1573 		}
1574 		schedule();
1575 	}
1576 
1577 	return bytes_copied;
1578 }
1579 
1580 /**
1581  * ipr_init_dump_entry_hdr - Initialize a dump entry header.
1582  * @hdr:	dump entry header struct
1583  *
1584  * Return value:
1585  * 	nothing
1586  **/
1587 static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
1588 {
1589 	hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
1590 	hdr->num_elems = 1;
1591 	hdr->offset = sizeof(*hdr);
1592 	hdr->status = IPR_DUMP_STATUS_SUCCESS;
1593 }
1594 
1595 /**
1596  * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
1597  * @ioa_cfg:	ioa config struct
1598  * @driver_dump:	driver dump struct
1599  *
1600  * Return value:
1601  * 	nothing
1602  **/
1603 static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
1604 				   struct ipr_driver_dump *driver_dump)
1605 {
1606 	struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
1607 
1608 	ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
1609 	driver_dump->ioa_type_entry.hdr.len =
1610 		sizeof(struct ipr_dump_ioa_type_entry) -
1611 		sizeof(struct ipr_dump_entry_header);
1612 	driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
1613 	driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
1614 	driver_dump->ioa_type_entry.type = ioa_cfg->type;
1615 	driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
1616 		(ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
1617 		ucode_vpd->minor_release[1];
1618 	driver_dump->hdr.num_entries++;
1619 }
1620 
1621 /**
1622  * ipr_dump_version_data - Fill in the driver version in the dump.
1623  * @ioa_cfg:	ioa config struct
1624  * @driver_dump:	driver dump struct
1625  *
1626  * Return value:
1627  * 	nothing
1628  **/
1629 static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
1630 				  struct ipr_driver_dump *driver_dump)
1631 {
1632 	ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
1633 	driver_dump->version_entry.hdr.len =
1634 		sizeof(struct ipr_dump_version_entry) -
1635 		sizeof(struct ipr_dump_entry_header);
1636 	driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
1637 	driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
1638 	strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
1639 	driver_dump->hdr.num_entries++;
1640 }
1641 
1642 /**
1643  * ipr_dump_trace_data - Fill in the IOA trace in the dump.
1644  * @ioa_cfg:	ioa config struct
1645  * @driver_dump:	driver dump struct
1646  *
1647  * Return value:
1648  * 	nothing
1649  **/
1650 static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
1651 				   struct ipr_driver_dump *driver_dump)
1652 {
1653 	ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
1654 	driver_dump->trace_entry.hdr.len =
1655 		sizeof(struct ipr_dump_trace_entry) -
1656 		sizeof(struct ipr_dump_entry_header);
1657 	driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
1658 	driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
1659 	memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
1660 	driver_dump->hdr.num_entries++;
1661 }
1662 
1663 /**
1664  * ipr_dump_location_data - Fill in the IOA location in the dump.
1665  * @ioa_cfg:	ioa config struct
1666  * @driver_dump:	driver dump struct
1667  *
1668  * Return value:
1669  * 	nothing
1670  **/
1671 static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
1672 				   struct ipr_driver_dump *driver_dump)
1673 {
1674 	ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
1675 	driver_dump->location_entry.hdr.len =
1676 		sizeof(struct ipr_dump_location_entry) -
1677 		sizeof(struct ipr_dump_entry_header);
1678 	driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
1679 	driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
1680 	strcpy(driver_dump->location_entry.location, ioa_cfg->pdev->dev.bus_id);
1681 	driver_dump->hdr.num_entries++;
1682 }
1683 
1684 /**
1685  * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
1686  * @ioa_cfg:	ioa config struct
1687  * @dump:		dump struct
1688  *
1689  * Return value:
1690  * 	nothing
1691  **/
1692 static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
1693 {
1694 	unsigned long start_addr, sdt_word;
1695 	unsigned long lock_flags = 0;
1696 	struct ipr_driver_dump *driver_dump = &dump->driver_dump;
1697 	struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
1698 	u32 num_entries, start_off, end_off;
1699 	u32 bytes_to_copy, bytes_copied, rc;
1700 	struct ipr_sdt *sdt;
1701 	int i;
1702 
1703 	ENTER;
1704 
1705 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1706 
1707 	if (ioa_cfg->sdt_state != GET_DUMP) {
1708 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1709 		return;
1710 	}
1711 
1712 	start_addr = readl(ioa_cfg->ioa_mailbox);
1713 
1714 	if (!ipr_sdt_is_fmt2(start_addr)) {
1715 		dev_err(&ioa_cfg->pdev->dev,
1716 			"Invalid dump table format: %lx\n", start_addr);
1717 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1718 		return;
1719 	}
1720 
1721 	dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
1722 
1723 	driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
1724 
1725 	/* Initialize the overall dump header */
1726 	driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
1727 	driver_dump->hdr.num_entries = 1;
1728 	driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
1729 	driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
1730 	driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
1731 	driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
1732 
1733 	ipr_dump_version_data(ioa_cfg, driver_dump);
1734 	ipr_dump_location_data(ioa_cfg, driver_dump);
1735 	ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
1736 	ipr_dump_trace_data(ioa_cfg, driver_dump);
1737 
1738 	/* Update dump_header */
1739 	driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
1740 
1741 	/* IOA Dump entry */
1742 	ipr_init_dump_entry_hdr(&ioa_dump->hdr);
1743 	ioa_dump->format = IPR_SDT_FMT2;
1744 	ioa_dump->hdr.len = 0;
1745 	ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
1746 	ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
1747 
1748 	/* First entries in sdt are actually a list of dump addresses and
1749 	 lengths to gather the real dump data.  sdt represents the pointer
1750 	 to the ioa generated dump table.  Dump data will be extracted based
1751 	 on entries in this table */
1752 	sdt = &ioa_dump->sdt;
1753 
1754 	rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
1755 					sizeof(struct ipr_sdt) / sizeof(__be32));
1756 
1757 	/* Smart Dump table is ready to use and the first entry is valid */
1758 	if (rc || (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE)) {
1759 		dev_err(&ioa_cfg->pdev->dev,
1760 			"Dump of IOA failed. Dump table not valid: %d, %X.\n",
1761 			rc, be32_to_cpu(sdt->hdr.state));
1762 		driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
1763 		ioa_cfg->sdt_state = DUMP_OBTAINED;
1764 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1765 		return;
1766 	}
1767 
1768 	num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
1769 
1770 	if (num_entries > IPR_NUM_SDT_ENTRIES)
1771 		num_entries = IPR_NUM_SDT_ENTRIES;
1772 
1773 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1774 
1775 	for (i = 0; i < num_entries; i++) {
1776 		if (ioa_dump->hdr.len > IPR_MAX_IOA_DUMP_SIZE) {
1777 			driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
1778 			break;
1779 		}
1780 
1781 		if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
1782 			sdt_word = be32_to_cpu(sdt->entry[i].bar_str_offset);
1783 			start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
1784 			end_off = be32_to_cpu(sdt->entry[i].end_offset);
1785 
1786 			if (ipr_sdt_is_fmt2(sdt_word) && sdt_word) {
1787 				bytes_to_copy = end_off - start_off;
1788 				if (bytes_to_copy > IPR_MAX_IOA_DUMP_SIZE) {
1789 					sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
1790 					continue;
1791 				}
1792 
1793 				/* Copy data from adapter to driver buffers */
1794 				bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
1795 							    bytes_to_copy);
1796 
1797 				ioa_dump->hdr.len += bytes_copied;
1798 
1799 				if (bytes_copied != bytes_to_copy) {
1800 					driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
1801 					break;
1802 				}
1803 			}
1804 		}
1805 	}
1806 
1807 	dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
1808 
1809 	/* Update dump_header */
1810 	driver_dump->hdr.len += ioa_dump->hdr.len;
1811 	wmb();
1812 	ioa_cfg->sdt_state = DUMP_OBTAINED;
1813 	LEAVE;
1814 }
1815 
1816 #else
1817 #define ipr_get_ioa_dump(ioa_cfg, dump) do { } while(0)
1818 #endif
1819 
1820 /**
1821  * ipr_release_dump - Free adapter dump memory
1822  * @kref:	kref struct
1823  *
1824  * Return value:
1825  *	nothing
1826  **/
1827 static void ipr_release_dump(struct kref *kref)
1828 {
1829 	struct ipr_dump *dump = container_of(kref,struct ipr_dump,kref);
1830 	struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
1831 	unsigned long lock_flags = 0;
1832 	int i;
1833 
1834 	ENTER;
1835 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1836 	ioa_cfg->dump = NULL;
1837 	ioa_cfg->sdt_state = INACTIVE;
1838 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1839 
1840 	for (i = 0; i < dump->ioa_dump.next_page_index; i++)
1841 		free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
1842 
1843 	kfree(dump);
1844 	LEAVE;
1845 }
1846 
1847 /**
1848  * ipr_worker_thread - Worker thread
1849  * @data:		ioa config struct
1850  *
1851  * Called at task level from a work thread. This function takes care
1852  * of adding and removing device from the mid-layer as configuration
1853  * changes are detected by the adapter.
1854  *
1855  * Return value:
1856  * 	nothing
1857  **/
1858 static void ipr_worker_thread(void *data)
1859 {
1860 	unsigned long lock_flags;
1861 	struct ipr_resource_entry *res;
1862 	struct scsi_device *sdev;
1863 	struct ipr_dump *dump;
1864 	struct ipr_ioa_cfg *ioa_cfg = data;
1865 	u8 bus, target, lun;
1866 	int did_work;
1867 
1868 	ENTER;
1869 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1870 
1871 	if (ioa_cfg->sdt_state == GET_DUMP) {
1872 		dump = ioa_cfg->dump;
1873 		if (!dump) {
1874 			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1875 			return;
1876 		}
1877 		kref_get(&dump->kref);
1878 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1879 		ipr_get_ioa_dump(ioa_cfg, dump);
1880 		kref_put(&dump->kref, ipr_release_dump);
1881 
1882 		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1883 		if (ioa_cfg->sdt_state == DUMP_OBTAINED)
1884 			ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
1885 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1886 		return;
1887 	}
1888 
1889 restart:
1890 	do {
1891 		did_work = 0;
1892 		if (!ioa_cfg->allow_cmds || !ioa_cfg->allow_ml_add_del) {
1893 			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1894 			return;
1895 		}
1896 
1897 		list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1898 			if (res->del_from_ml && res->sdev) {
1899 				did_work = 1;
1900 				sdev = res->sdev;
1901 				if (!scsi_device_get(sdev)) {
1902 					res->sdev = NULL;
1903 					list_move_tail(&res->queue, &ioa_cfg->free_res_q);
1904 					spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1905 					scsi_remove_device(sdev);
1906 					scsi_device_put(sdev);
1907 					spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1908 				}
1909 				break;
1910 			}
1911 		}
1912 	} while(did_work);
1913 
1914 	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1915 		if (res->add_to_ml) {
1916 			bus = res->cfgte.res_addr.bus;
1917 			target = res->cfgte.res_addr.target;
1918 			lun = res->cfgte.res_addr.lun;
1919 			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1920 			scsi_add_device(ioa_cfg->host, bus, target, lun);
1921 			spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1922 			goto restart;
1923 		}
1924 	}
1925 
1926 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1927 	kobject_uevent(&ioa_cfg->host->shost_classdev.kobj, KOBJ_CHANGE, NULL);
1928 	LEAVE;
1929 }
1930 
1931 #ifdef CONFIG_SCSI_IPR_TRACE
1932 /**
1933  * ipr_read_trace - Dump the adapter trace
1934  * @kobj:		kobject struct
1935  * @buf:		buffer
1936  * @off:		offset
1937  * @count:		buffer size
1938  *
1939  * Return value:
1940  *	number of bytes printed to buffer
1941  **/
1942 static ssize_t ipr_read_trace(struct kobject *kobj, char *buf,
1943 			      loff_t off, size_t count)
1944 {
1945 	struct class_device *cdev = container_of(kobj,struct class_device,kobj);
1946 	struct Scsi_Host *shost = class_to_shost(cdev);
1947 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
1948 	unsigned long lock_flags = 0;
1949 	int size = IPR_TRACE_SIZE;
1950 	char *src = (char *)ioa_cfg->trace;
1951 
1952 	if (off > size)
1953 		return 0;
1954 	if (off + count > size) {
1955 		size -= off;
1956 		count = size;
1957 	}
1958 
1959 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1960 	memcpy(buf, &src[off], count);
1961 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1962 	return count;
1963 }
1964 
1965 static struct bin_attribute ipr_trace_attr = {
1966 	.attr =	{
1967 		.name = "trace",
1968 		.mode = S_IRUGO,
1969 	},
1970 	.size = 0,
1971 	.read = ipr_read_trace,
1972 };
1973 #endif
1974 
1975 /**
1976  * ipr_show_fw_version - Show the firmware version
1977  * @class_dev:	class device struct
1978  * @buf:		buffer
1979  *
1980  * Return value:
1981  *	number of bytes printed to buffer
1982  **/
1983 static ssize_t ipr_show_fw_version(struct class_device *class_dev, char *buf)
1984 {
1985 	struct Scsi_Host *shost = class_to_shost(class_dev);
1986 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
1987 	struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
1988 	unsigned long lock_flags = 0;
1989 	int len;
1990 
1991 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1992 	len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
1993 		       ucode_vpd->major_release, ucode_vpd->card_type,
1994 		       ucode_vpd->minor_release[0],
1995 		       ucode_vpd->minor_release[1]);
1996 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1997 	return len;
1998 }
1999 
2000 static struct class_device_attribute ipr_fw_version_attr = {
2001 	.attr = {
2002 		.name =		"fw_version",
2003 		.mode =		S_IRUGO,
2004 	},
2005 	.show = ipr_show_fw_version,
2006 };
2007 
2008 /**
2009  * ipr_show_log_level - Show the adapter's error logging level
2010  * @class_dev:	class device struct
2011  * @buf:		buffer
2012  *
2013  * Return value:
2014  * 	number of bytes printed to buffer
2015  **/
2016 static ssize_t ipr_show_log_level(struct class_device *class_dev, char *buf)
2017 {
2018 	struct Scsi_Host *shost = class_to_shost(class_dev);
2019 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2020 	unsigned long lock_flags = 0;
2021 	int len;
2022 
2023 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2024 	len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
2025 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2026 	return len;
2027 }
2028 
2029 /**
2030  * ipr_store_log_level - Change the adapter's error logging level
2031  * @class_dev:	class device struct
2032  * @buf:		buffer
2033  *
2034  * Return value:
2035  * 	number of bytes printed to buffer
2036  **/
2037 static ssize_t ipr_store_log_level(struct class_device *class_dev,
2038 				   const char *buf, size_t count)
2039 {
2040 	struct Scsi_Host *shost = class_to_shost(class_dev);
2041 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2042 	unsigned long lock_flags = 0;
2043 
2044 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2045 	ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
2046 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2047 	return strlen(buf);
2048 }
2049 
2050 static struct class_device_attribute ipr_log_level_attr = {
2051 	.attr = {
2052 		.name =		"log_level",
2053 		.mode =		S_IRUGO | S_IWUSR,
2054 	},
2055 	.show = ipr_show_log_level,
2056 	.store = ipr_store_log_level
2057 };
2058 
2059 /**
2060  * ipr_store_diagnostics - IOA Diagnostics interface
2061  * @class_dev:	class_device struct
2062  * @buf:		buffer
2063  * @count:		buffer size
2064  *
2065  * This function will reset the adapter and wait a reasonable
2066  * amount of time for any errors that the adapter might log.
2067  *
2068  * Return value:
2069  * 	count on success / other on failure
2070  **/
2071 static ssize_t ipr_store_diagnostics(struct class_device *class_dev,
2072 				     const char *buf, size_t count)
2073 {
2074 	struct Scsi_Host *shost = class_to_shost(class_dev);
2075 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2076 	unsigned long lock_flags = 0;
2077 	int rc = count;
2078 
2079 	if (!capable(CAP_SYS_ADMIN))
2080 		return -EACCES;
2081 
2082 	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2083 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2084 	ioa_cfg->errors_logged = 0;
2085 	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2086 
2087 	if (ioa_cfg->in_reset_reload) {
2088 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2089 		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2090 
2091 		/* Wait for a second for any errors to be logged */
2092 		msleep(1000);
2093 	} else {
2094 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2095 		return -EIO;
2096 	}
2097 
2098 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2099 	if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
2100 		rc = -EIO;
2101 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2102 
2103 	return rc;
2104 }
2105 
2106 static struct class_device_attribute ipr_diagnostics_attr = {
2107 	.attr = {
2108 		.name =		"run_diagnostics",
2109 		.mode =		S_IWUSR,
2110 	},
2111 	.store = ipr_store_diagnostics
2112 };
2113 
2114 /**
2115  * ipr_store_reset_adapter - Reset the adapter
2116  * @class_dev:	class_device struct
2117  * @buf:		buffer
2118  * @count:		buffer size
2119  *
2120  * This function will reset the adapter.
2121  *
2122  * Return value:
2123  * 	count on success / other on failure
2124  **/
2125 static ssize_t ipr_store_reset_adapter(struct class_device *class_dev,
2126 				       const char *buf, size_t count)
2127 {
2128 	struct Scsi_Host *shost = class_to_shost(class_dev);
2129 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2130 	unsigned long lock_flags;
2131 	int result = count;
2132 
2133 	if (!capable(CAP_SYS_ADMIN))
2134 		return -EACCES;
2135 
2136 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2137 	if (!ioa_cfg->in_reset_reload)
2138 		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2139 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2140 	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2141 
2142 	return result;
2143 }
2144 
2145 static struct class_device_attribute ipr_ioa_reset_attr = {
2146 	.attr = {
2147 		.name =		"reset_host",
2148 		.mode =		S_IWUSR,
2149 	},
2150 	.store = ipr_store_reset_adapter
2151 };
2152 
2153 /**
2154  * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
2155  * @buf_len:		buffer length
2156  *
2157  * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
2158  * list to use for microcode download
2159  *
2160  * Return value:
2161  * 	pointer to sglist / NULL on failure
2162  **/
2163 static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
2164 {
2165 	int sg_size, order, bsize_elem, num_elem, i, j;
2166 	struct ipr_sglist *sglist;
2167 	struct scatterlist *scatterlist;
2168 	struct page *page;
2169 
2170 	/* Get the minimum size per scatter/gather element */
2171 	sg_size = buf_len / (IPR_MAX_SGLIST - 1);
2172 
2173 	/* Get the actual size per element */
2174 	order = get_order(sg_size);
2175 
2176 	/* Determine the actual number of bytes per element */
2177 	bsize_elem = PAGE_SIZE * (1 << order);
2178 
2179 	/* Determine the actual number of sg entries needed */
2180 	if (buf_len % bsize_elem)
2181 		num_elem = (buf_len / bsize_elem) + 1;
2182 	else
2183 		num_elem = buf_len / bsize_elem;
2184 
2185 	/* Allocate a scatter/gather list for the DMA */
2186 	sglist = kmalloc(sizeof(struct ipr_sglist) +
2187 			 (sizeof(struct scatterlist) * (num_elem - 1)),
2188 			 GFP_KERNEL);
2189 
2190 	if (sglist == NULL) {
2191 		ipr_trace;
2192 		return NULL;
2193 	}
2194 
2195 	memset(sglist, 0, sizeof(struct ipr_sglist) +
2196 	       (sizeof(struct scatterlist) * (num_elem - 1)));
2197 
2198 	scatterlist = sglist->scatterlist;
2199 
2200 	sglist->order = order;
2201 	sglist->num_sg = num_elem;
2202 
2203 	/* Allocate a bunch of sg elements */
2204 	for (i = 0; i < num_elem; i++) {
2205 		page = alloc_pages(GFP_KERNEL, order);
2206 		if (!page) {
2207 			ipr_trace;
2208 
2209 			/* Free up what we already allocated */
2210 			for (j = i - 1; j >= 0; j--)
2211 				__free_pages(scatterlist[j].page, order);
2212 			kfree(sglist);
2213 			return NULL;
2214 		}
2215 
2216 		scatterlist[i].page = page;
2217 	}
2218 
2219 	return sglist;
2220 }
2221 
2222 /**
2223  * ipr_free_ucode_buffer - Frees a microcode download buffer
2224  * @p_dnld:		scatter/gather list pointer
2225  *
2226  * Free a DMA'able ucode download buffer previously allocated with
2227  * ipr_alloc_ucode_buffer
2228  *
2229  * Return value:
2230  * 	nothing
2231  **/
2232 static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
2233 {
2234 	int i;
2235 
2236 	for (i = 0; i < sglist->num_sg; i++)
2237 		__free_pages(sglist->scatterlist[i].page, sglist->order);
2238 
2239 	kfree(sglist);
2240 }
2241 
2242 /**
2243  * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
2244  * @sglist:		scatter/gather list pointer
2245  * @buffer:		buffer pointer
2246  * @len:		buffer length
2247  *
2248  * Copy a microcode image from a user buffer into a buffer allocated by
2249  * ipr_alloc_ucode_buffer
2250  *
2251  * Return value:
2252  * 	0 on success / other on failure
2253  **/
2254 static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
2255 				 u8 *buffer, u32 len)
2256 {
2257 	int bsize_elem, i, result = 0;
2258 	struct scatterlist *scatterlist;
2259 	void *kaddr;
2260 
2261 	/* Determine the actual number of bytes per element */
2262 	bsize_elem = PAGE_SIZE * (1 << sglist->order);
2263 
2264 	scatterlist = sglist->scatterlist;
2265 
2266 	for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
2267 		kaddr = kmap(scatterlist[i].page);
2268 		memcpy(kaddr, buffer, bsize_elem);
2269 		kunmap(scatterlist[i].page);
2270 
2271 		scatterlist[i].length = bsize_elem;
2272 
2273 		if (result != 0) {
2274 			ipr_trace;
2275 			return result;
2276 		}
2277 	}
2278 
2279 	if (len % bsize_elem) {
2280 		kaddr = kmap(scatterlist[i].page);
2281 		memcpy(kaddr, buffer, len % bsize_elem);
2282 		kunmap(scatterlist[i].page);
2283 
2284 		scatterlist[i].length = len % bsize_elem;
2285 	}
2286 
2287 	sglist->buffer_len = len;
2288 	return result;
2289 }
2290 
2291 /**
2292  * ipr_map_ucode_buffer - Map a microcode download buffer
2293  * @ipr_cmd:	ipr command struct
2294  * @sglist:		scatter/gather list
2295  * @len:		total length of download buffer
2296  *
2297  * Maps a microcode download scatter/gather list for DMA and
2298  * builds the IOADL.
2299  *
2300  * Return value:
2301  * 	0 on success / -EIO on failure
2302  **/
2303 static int ipr_map_ucode_buffer(struct ipr_cmnd *ipr_cmd,
2304 				struct ipr_sglist *sglist, int len)
2305 {
2306 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2307 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
2308 	struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
2309 	struct scatterlist *scatterlist = sglist->scatterlist;
2310 	int i;
2311 
2312 	ipr_cmd->dma_use_sg = pci_map_sg(ioa_cfg->pdev, scatterlist,
2313 					 sglist->num_sg, DMA_TO_DEVICE);
2314 
2315 	ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
2316 	ioarcb->write_data_transfer_length = cpu_to_be32(len);
2317 	ioarcb->write_ioadl_len =
2318 		cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
2319 
2320 	for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
2321 		ioadl[i].flags_and_data_len =
2322 			cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
2323 		ioadl[i].address =
2324 			cpu_to_be32(sg_dma_address(&scatterlist[i]));
2325 	}
2326 
2327 	if (likely(ipr_cmd->dma_use_sg)) {
2328 		ioadl[i-1].flags_and_data_len |=
2329 			cpu_to_be32(IPR_IOADL_FLAGS_LAST);
2330 	}
2331 	else {
2332 		dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
2333 		return -EIO;
2334 	}
2335 
2336 	return 0;
2337 }
2338 
2339 /**
2340  * ipr_store_update_fw - Update the firmware on the adapter
2341  * @class_dev:	class_device struct
2342  * @buf:		buffer
2343  * @count:		buffer size
2344  *
2345  * This function will update the firmware on the adapter.
2346  *
2347  * Return value:
2348  * 	count on success / other on failure
2349  **/
2350 static ssize_t ipr_store_update_fw(struct class_device *class_dev,
2351 				       const char *buf, size_t count)
2352 {
2353 	struct Scsi_Host *shost = class_to_shost(class_dev);
2354 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2355 	struct ipr_ucode_image_header *image_hdr;
2356 	const struct firmware *fw_entry;
2357 	struct ipr_sglist *sglist;
2358 	unsigned long lock_flags;
2359 	char fname[100];
2360 	char *src;
2361 	int len, result, dnld_size;
2362 
2363 	if (!capable(CAP_SYS_ADMIN))
2364 		return -EACCES;
2365 
2366 	len = snprintf(fname, 99, "%s", buf);
2367 	fname[len-1] = '\0';
2368 
2369 	if(request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
2370 		dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
2371 		return -EIO;
2372 	}
2373 
2374 	image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
2375 
2376 	if (be32_to_cpu(image_hdr->header_length) > fw_entry->size ||
2377 	    (ioa_cfg->vpd_cbs->page3_data.card_type &&
2378 	     ioa_cfg->vpd_cbs->page3_data.card_type != image_hdr->card_type)) {
2379 		dev_err(&ioa_cfg->pdev->dev, "Invalid microcode buffer\n");
2380 		release_firmware(fw_entry);
2381 		return -EINVAL;
2382 	}
2383 
2384 	src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
2385 	dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
2386 	sglist = ipr_alloc_ucode_buffer(dnld_size);
2387 
2388 	if (!sglist) {
2389 		dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
2390 		release_firmware(fw_entry);
2391 		return -ENOMEM;
2392 	}
2393 
2394 	result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
2395 
2396 	if (result) {
2397 		dev_err(&ioa_cfg->pdev->dev,
2398 			"Microcode buffer copy to DMA buffer failed\n");
2399 		ipr_free_ucode_buffer(sglist);
2400 		release_firmware(fw_entry);
2401 		return result;
2402 	}
2403 
2404 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2405 
2406 	if (ioa_cfg->ucode_sglist) {
2407 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2408 		dev_err(&ioa_cfg->pdev->dev,
2409 			"Microcode download already in progress\n");
2410 		ipr_free_ucode_buffer(sglist);
2411 		release_firmware(fw_entry);
2412 		return -EIO;
2413 	}
2414 
2415 	ioa_cfg->ucode_sglist = sglist;
2416 	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2417 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2418 	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2419 
2420 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2421 	ioa_cfg->ucode_sglist = NULL;
2422 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2423 
2424 	ipr_free_ucode_buffer(sglist);
2425 	release_firmware(fw_entry);
2426 
2427 	return count;
2428 }
2429 
2430 static struct class_device_attribute ipr_update_fw_attr = {
2431 	.attr = {
2432 		.name =		"update_fw",
2433 		.mode =		S_IWUSR,
2434 	},
2435 	.store = ipr_store_update_fw
2436 };
2437 
2438 static struct class_device_attribute *ipr_ioa_attrs[] = {
2439 	&ipr_fw_version_attr,
2440 	&ipr_log_level_attr,
2441 	&ipr_diagnostics_attr,
2442 	&ipr_ioa_reset_attr,
2443 	&ipr_update_fw_attr,
2444 	NULL,
2445 };
2446 
2447 #ifdef CONFIG_SCSI_IPR_DUMP
2448 /**
2449  * ipr_read_dump - Dump the adapter
2450  * @kobj:		kobject struct
2451  * @buf:		buffer
2452  * @off:		offset
2453  * @count:		buffer size
2454  *
2455  * Return value:
2456  *	number of bytes printed to buffer
2457  **/
2458 static ssize_t ipr_read_dump(struct kobject *kobj, char *buf,
2459 			      loff_t off, size_t count)
2460 {
2461 	struct class_device *cdev = container_of(kobj,struct class_device,kobj);
2462 	struct Scsi_Host *shost = class_to_shost(cdev);
2463 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2464 	struct ipr_dump *dump;
2465 	unsigned long lock_flags = 0;
2466 	char *src;
2467 	int len;
2468 	size_t rc = count;
2469 
2470 	if (!capable(CAP_SYS_ADMIN))
2471 		return -EACCES;
2472 
2473 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2474 	dump = ioa_cfg->dump;
2475 
2476 	if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
2477 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2478 		return 0;
2479 	}
2480 	kref_get(&dump->kref);
2481 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2482 
2483 	if (off > dump->driver_dump.hdr.len) {
2484 		kref_put(&dump->kref, ipr_release_dump);
2485 		return 0;
2486 	}
2487 
2488 	if (off + count > dump->driver_dump.hdr.len) {
2489 		count = dump->driver_dump.hdr.len - off;
2490 		rc = count;
2491 	}
2492 
2493 	if (count && off < sizeof(dump->driver_dump)) {
2494 		if (off + count > sizeof(dump->driver_dump))
2495 			len = sizeof(dump->driver_dump) - off;
2496 		else
2497 			len = count;
2498 		src = (u8 *)&dump->driver_dump + off;
2499 		memcpy(buf, src, len);
2500 		buf += len;
2501 		off += len;
2502 		count -= len;
2503 	}
2504 
2505 	off -= sizeof(dump->driver_dump);
2506 
2507 	if (count && off < offsetof(struct ipr_ioa_dump, ioa_data)) {
2508 		if (off + count > offsetof(struct ipr_ioa_dump, ioa_data))
2509 			len = offsetof(struct ipr_ioa_dump, ioa_data) - off;
2510 		else
2511 			len = count;
2512 		src = (u8 *)&dump->ioa_dump + off;
2513 		memcpy(buf, src, len);
2514 		buf += len;
2515 		off += len;
2516 		count -= len;
2517 	}
2518 
2519 	off -= offsetof(struct ipr_ioa_dump, ioa_data);
2520 
2521 	while (count) {
2522 		if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
2523 			len = PAGE_ALIGN(off) - off;
2524 		else
2525 			len = count;
2526 		src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
2527 		src += off & ~PAGE_MASK;
2528 		memcpy(buf, src, len);
2529 		buf += len;
2530 		off += len;
2531 		count -= len;
2532 	}
2533 
2534 	kref_put(&dump->kref, ipr_release_dump);
2535 	return rc;
2536 }
2537 
2538 /**
2539  * ipr_alloc_dump - Prepare for adapter dump
2540  * @ioa_cfg:	ioa config struct
2541  *
2542  * Return value:
2543  *	0 on success / other on failure
2544  **/
2545 static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
2546 {
2547 	struct ipr_dump *dump;
2548 	unsigned long lock_flags = 0;
2549 
2550 	ENTER;
2551 	dump = kmalloc(sizeof(struct ipr_dump), GFP_KERNEL);
2552 
2553 	if (!dump) {
2554 		ipr_err("Dump memory allocation failed\n");
2555 		return -ENOMEM;
2556 	}
2557 
2558 	memset(dump, 0, sizeof(struct ipr_dump));
2559 	kref_init(&dump->kref);
2560 	dump->ioa_cfg = ioa_cfg;
2561 
2562 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2563 
2564 	if (INACTIVE != ioa_cfg->sdt_state) {
2565 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2566 		kfree(dump);
2567 		return 0;
2568 	}
2569 
2570 	ioa_cfg->dump = dump;
2571 	ioa_cfg->sdt_state = WAIT_FOR_DUMP;
2572 	if (ioa_cfg->ioa_is_dead && !ioa_cfg->dump_taken) {
2573 		ioa_cfg->dump_taken = 1;
2574 		schedule_work(&ioa_cfg->work_q);
2575 	}
2576 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2577 
2578 	LEAVE;
2579 	return 0;
2580 }
2581 
2582 /**
2583  * ipr_free_dump - Free adapter dump memory
2584  * @ioa_cfg:	ioa config struct
2585  *
2586  * Return value:
2587  *	0 on success / other on failure
2588  **/
2589 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
2590 {
2591 	struct ipr_dump *dump;
2592 	unsigned long lock_flags = 0;
2593 
2594 	ENTER;
2595 
2596 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2597 	dump = ioa_cfg->dump;
2598 	if (!dump) {
2599 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2600 		return 0;
2601 	}
2602 
2603 	ioa_cfg->dump = NULL;
2604 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2605 
2606 	kref_put(&dump->kref, ipr_release_dump);
2607 
2608 	LEAVE;
2609 	return 0;
2610 }
2611 
2612 /**
2613  * ipr_write_dump - Setup dump state of adapter
2614  * @kobj:		kobject struct
2615  * @buf:		buffer
2616  * @off:		offset
2617  * @count:		buffer size
2618  *
2619  * Return value:
2620  *	number of bytes printed to buffer
2621  **/
2622 static ssize_t ipr_write_dump(struct kobject *kobj, char *buf,
2623 			      loff_t off, size_t count)
2624 {
2625 	struct class_device *cdev = container_of(kobj,struct class_device,kobj);
2626 	struct Scsi_Host *shost = class_to_shost(cdev);
2627 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2628 	int rc;
2629 
2630 	if (!capable(CAP_SYS_ADMIN))
2631 		return -EACCES;
2632 
2633 	if (buf[0] == '1')
2634 		rc = ipr_alloc_dump(ioa_cfg);
2635 	else if (buf[0] == '0')
2636 		rc = ipr_free_dump(ioa_cfg);
2637 	else
2638 		return -EINVAL;
2639 
2640 	if (rc)
2641 		return rc;
2642 	else
2643 		return count;
2644 }
2645 
2646 static struct bin_attribute ipr_dump_attr = {
2647 	.attr =	{
2648 		.name = "dump",
2649 		.mode = S_IRUSR | S_IWUSR,
2650 	},
2651 	.size = 0,
2652 	.read = ipr_read_dump,
2653 	.write = ipr_write_dump
2654 };
2655 #else
2656 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
2657 #endif
2658 
2659 /**
2660  * ipr_change_queue_depth - Change the device's queue depth
2661  * @sdev:	scsi device struct
2662  * @qdepth:	depth to set
2663  *
2664  * Return value:
2665  * 	actual depth set
2666  **/
2667 static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth)
2668 {
2669 	scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
2670 	return sdev->queue_depth;
2671 }
2672 
2673 /**
2674  * ipr_change_queue_type - Change the device's queue type
2675  * @dsev:		scsi device struct
2676  * @tag_type:	type of tags to use
2677  *
2678  * Return value:
2679  * 	actual queue type set
2680  **/
2681 static int ipr_change_queue_type(struct scsi_device *sdev, int tag_type)
2682 {
2683 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
2684 	struct ipr_resource_entry *res;
2685 	unsigned long lock_flags = 0;
2686 
2687 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2688 	res = (struct ipr_resource_entry *)sdev->hostdata;
2689 
2690 	if (res) {
2691 		if (ipr_is_gscsi(res) && sdev->tagged_supported) {
2692 			/*
2693 			 * We don't bother quiescing the device here since the
2694 			 * adapter firmware does it for us.
2695 			 */
2696 			scsi_set_tag_type(sdev, tag_type);
2697 
2698 			if (tag_type)
2699 				scsi_activate_tcq(sdev, sdev->queue_depth);
2700 			else
2701 				scsi_deactivate_tcq(sdev, sdev->queue_depth);
2702 		} else
2703 			tag_type = 0;
2704 	} else
2705 		tag_type = 0;
2706 
2707 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2708 	return tag_type;
2709 }
2710 
2711 /**
2712  * ipr_show_adapter_handle - Show the adapter's resource handle for this device
2713  * @dev:	device struct
2714  * @buf:	buffer
2715  *
2716  * Return value:
2717  * 	number of bytes printed to buffer
2718  **/
2719 static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
2720 {
2721 	struct scsi_device *sdev = to_scsi_device(dev);
2722 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
2723 	struct ipr_resource_entry *res;
2724 	unsigned long lock_flags = 0;
2725 	ssize_t len = -ENXIO;
2726 
2727 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2728 	res = (struct ipr_resource_entry *)sdev->hostdata;
2729 	if (res)
2730 		len = snprintf(buf, PAGE_SIZE, "%08X\n", res->cfgte.res_handle);
2731 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2732 	return len;
2733 }
2734 
2735 static struct device_attribute ipr_adapter_handle_attr = {
2736 	.attr = {
2737 		.name = 	"adapter_handle",
2738 		.mode =		S_IRUSR,
2739 	},
2740 	.show = ipr_show_adapter_handle
2741 };
2742 
2743 static struct device_attribute *ipr_dev_attrs[] = {
2744 	&ipr_adapter_handle_attr,
2745 	NULL,
2746 };
2747 
2748 /**
2749  * ipr_biosparam - Return the HSC mapping
2750  * @sdev:			scsi device struct
2751  * @block_device:	block device pointer
2752  * @capacity:		capacity of the device
2753  * @parm:			Array containing returned HSC values.
2754  *
2755  * This function generates the HSC parms that fdisk uses.
2756  * We want to make sure we return something that places partitions
2757  * on 4k boundaries for best performance with the IOA.
2758  *
2759  * Return value:
2760  * 	0 on success
2761  **/
2762 static int ipr_biosparam(struct scsi_device *sdev,
2763 			 struct block_device *block_device,
2764 			 sector_t capacity, int *parm)
2765 {
2766 	int heads, sectors;
2767 	sector_t cylinders;
2768 
2769 	heads = 128;
2770 	sectors = 32;
2771 
2772 	cylinders = capacity;
2773 	sector_div(cylinders, (128 * 32));
2774 
2775 	/* return result */
2776 	parm[0] = heads;
2777 	parm[1] = sectors;
2778 	parm[2] = cylinders;
2779 
2780 	return 0;
2781 }
2782 
2783 /**
2784  * ipr_slave_destroy - Unconfigure a SCSI device
2785  * @sdev:	scsi device struct
2786  *
2787  * Return value:
2788  * 	nothing
2789  **/
2790 static void ipr_slave_destroy(struct scsi_device *sdev)
2791 {
2792 	struct ipr_resource_entry *res;
2793 	struct ipr_ioa_cfg *ioa_cfg;
2794 	unsigned long lock_flags = 0;
2795 
2796 	ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
2797 
2798 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2799 	res = (struct ipr_resource_entry *) sdev->hostdata;
2800 	if (res) {
2801 		sdev->hostdata = NULL;
2802 		res->sdev = NULL;
2803 	}
2804 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2805 }
2806 
2807 /**
2808  * ipr_slave_configure - Configure a SCSI device
2809  * @sdev:	scsi device struct
2810  *
2811  * This function configures the specified scsi device.
2812  *
2813  * Return value:
2814  * 	0 on success
2815  **/
2816 static int ipr_slave_configure(struct scsi_device *sdev)
2817 {
2818 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
2819 	struct ipr_resource_entry *res;
2820 	unsigned long lock_flags = 0;
2821 
2822 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2823 	res = sdev->hostdata;
2824 	if (res) {
2825 		if (ipr_is_af_dasd_device(res))
2826 			sdev->type = TYPE_RAID;
2827 		if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res))
2828 			sdev->scsi_level = 4;
2829 		if (ipr_is_vset_device(res)) {
2830 			sdev->timeout = IPR_VSET_RW_TIMEOUT;
2831 			blk_queue_max_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
2832 		}
2833 		if (IPR_IS_DASD_DEVICE(res->cfgte.std_inq_data))
2834 			sdev->allow_restart = 1;
2835 		scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
2836 	}
2837 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2838 	return 0;
2839 }
2840 
2841 /**
2842  * ipr_slave_alloc - Prepare for commands to a device.
2843  * @sdev:	scsi device struct
2844  *
2845  * This function saves a pointer to the resource entry
2846  * in the scsi device struct if the device exists. We
2847  * can then use this pointer in ipr_queuecommand when
2848  * handling new commands.
2849  *
2850  * Return value:
2851  * 	0 on success
2852  **/
2853 static int ipr_slave_alloc(struct scsi_device *sdev)
2854 {
2855 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
2856 	struct ipr_resource_entry *res;
2857 	unsigned long lock_flags;
2858 
2859 	sdev->hostdata = NULL;
2860 
2861 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2862 
2863 	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2864 		if ((res->cfgte.res_addr.bus == sdev->channel) &&
2865 		    (res->cfgte.res_addr.target == sdev->id) &&
2866 		    (res->cfgte.res_addr.lun == sdev->lun)) {
2867 			res->sdev = sdev;
2868 			res->add_to_ml = 0;
2869 			res->in_erp = 0;
2870 			sdev->hostdata = res;
2871 			res->needs_sync_complete = 1;
2872 			break;
2873 		}
2874 	}
2875 
2876 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2877 
2878 	return 0;
2879 }
2880 
2881 /**
2882  * ipr_eh_host_reset - Reset the host adapter
2883  * @scsi_cmd:	scsi command struct
2884  *
2885  * Return value:
2886  * 	SUCCESS / FAILED
2887  **/
2888 static int __ipr_eh_host_reset(struct scsi_cmnd * scsi_cmd)
2889 {
2890 	struct ipr_ioa_cfg *ioa_cfg;
2891 	int rc;
2892 
2893 	ENTER;
2894 	ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
2895 
2896 	dev_err(&ioa_cfg->pdev->dev,
2897 		"Adapter being reset as a result of error recovery.\n");
2898 
2899 	if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2900 		ioa_cfg->sdt_state = GET_DUMP;
2901 
2902 	rc = ipr_reset_reload(ioa_cfg, IPR_SHUTDOWN_ABBREV);
2903 
2904 	LEAVE;
2905 	return rc;
2906 }
2907 
2908 static int ipr_eh_host_reset(struct scsi_cmnd * cmd)
2909 {
2910 	int rc;
2911 
2912 	spin_lock_irq(cmd->device->host->host_lock);
2913 	rc = __ipr_eh_host_reset(cmd);
2914 	spin_unlock_irq(cmd->device->host->host_lock);
2915 
2916 	return rc;
2917 }
2918 
2919 /**
2920  * ipr_eh_dev_reset - Reset the device
2921  * @scsi_cmd:	scsi command struct
2922  *
2923  * This function issues a device reset to the affected device.
2924  * A LUN reset will be sent to the device first. If that does
2925  * not work, a target reset will be sent.
2926  *
2927  * Return value:
2928  *	SUCCESS / FAILED
2929  **/
2930 static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd)
2931 {
2932 	struct ipr_cmnd *ipr_cmd;
2933 	struct ipr_ioa_cfg *ioa_cfg;
2934 	struct ipr_resource_entry *res;
2935 	struct ipr_cmd_pkt *cmd_pkt;
2936 	u32 ioasc;
2937 
2938 	ENTER;
2939 	ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
2940 	res = scsi_cmd->device->hostdata;
2941 
2942 	if (!res || (!ipr_is_gscsi(res) && !ipr_is_vset_device(res)))
2943 		return FAILED;
2944 
2945 	/*
2946 	 * If we are currently going through reset/reload, return failed. This will force the
2947 	 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
2948 	 * reset to complete
2949 	 */
2950 	if (ioa_cfg->in_reset_reload)
2951 		return FAILED;
2952 	if (ioa_cfg->ioa_is_dead)
2953 		return FAILED;
2954 
2955 	list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
2956 		if (ipr_cmd->ioarcb.res_handle == res->cfgte.res_handle) {
2957 			if (ipr_cmd->scsi_cmd)
2958 				ipr_cmd->done = ipr_scsi_eh_done;
2959 		}
2960 	}
2961 
2962 	res->resetting_device = 1;
2963 
2964 	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
2965 
2966 	ipr_cmd->ioarcb.res_handle = res->cfgte.res_handle;
2967 	cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
2968 	cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
2969 	cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
2970 
2971 	ipr_sdev_err(scsi_cmd->device, "Resetting device\n");
2972 	ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
2973 
2974 	ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
2975 
2976 	res->resetting_device = 0;
2977 
2978 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
2979 
2980 	LEAVE;
2981 	return (IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS);
2982 }
2983 
2984 static int ipr_eh_dev_reset(struct scsi_cmnd * cmd)
2985 {
2986 	int rc;
2987 
2988 	spin_lock_irq(cmd->device->host->host_lock);
2989 	rc = __ipr_eh_dev_reset(cmd);
2990 	spin_unlock_irq(cmd->device->host->host_lock);
2991 
2992 	return rc;
2993 }
2994 
2995 /**
2996  * ipr_bus_reset_done - Op done function for bus reset.
2997  * @ipr_cmd:	ipr command struct
2998  *
2999  * This function is the op done function for a bus reset
3000  *
3001  * Return value:
3002  * 	none
3003  **/
3004 static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
3005 {
3006 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3007 	struct ipr_resource_entry *res;
3008 
3009 	ENTER;
3010 	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3011 		if (!memcmp(&res->cfgte.res_handle, &ipr_cmd->ioarcb.res_handle,
3012 			    sizeof(res->cfgte.res_handle))) {
3013 			scsi_report_bus_reset(ioa_cfg->host, res->cfgte.res_addr.bus);
3014 			break;
3015 		}
3016 	}
3017 
3018 	/*
3019 	 * If abort has not completed, indicate the reset has, else call the
3020 	 * abort's done function to wake the sleeping eh thread
3021 	 */
3022 	if (ipr_cmd->sibling->sibling)
3023 		ipr_cmd->sibling->sibling = NULL;
3024 	else
3025 		ipr_cmd->sibling->done(ipr_cmd->sibling);
3026 
3027 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3028 	LEAVE;
3029 }
3030 
3031 /**
3032  * ipr_abort_timeout - An abort task has timed out
3033  * @ipr_cmd:	ipr command struct
3034  *
3035  * This function handles when an abort task times out. If this
3036  * happens we issue a bus reset since we have resources tied
3037  * up that must be freed before returning to the midlayer.
3038  *
3039  * Return value:
3040  *	none
3041  **/
3042 static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
3043 {
3044 	struct ipr_cmnd *reset_cmd;
3045 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3046 	struct ipr_cmd_pkt *cmd_pkt;
3047 	unsigned long lock_flags = 0;
3048 
3049 	ENTER;
3050 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3051 	if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
3052 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3053 		return;
3054 	}
3055 
3056 	ipr_sdev_err(ipr_cmd->u.sdev, "Abort timed out. Resetting bus\n");
3057 	reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3058 	ipr_cmd->sibling = reset_cmd;
3059 	reset_cmd->sibling = ipr_cmd;
3060 	reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
3061 	cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
3062 	cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3063 	cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
3064 	cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
3065 
3066 	ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
3067 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3068 	LEAVE;
3069 }
3070 
3071 /**
3072  * ipr_cancel_op - Cancel specified op
3073  * @scsi_cmd:	scsi command struct
3074  *
3075  * This function cancels specified op.
3076  *
3077  * Return value:
3078  *	SUCCESS / FAILED
3079  **/
3080 static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd)
3081 {
3082 	struct ipr_cmnd *ipr_cmd;
3083 	struct ipr_ioa_cfg *ioa_cfg;
3084 	struct ipr_resource_entry *res;
3085 	struct ipr_cmd_pkt *cmd_pkt;
3086 	u32 ioasc;
3087 	int op_found = 0;
3088 
3089 	ENTER;
3090 	ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
3091 	res = scsi_cmd->device->hostdata;
3092 
3093 	/* If we are currently going through reset/reload, return failed.
3094 	 * This will force the mid-layer to call ipr_eh_host_reset,
3095 	 * which will then go to sleep and wait for the reset to complete
3096 	 */
3097 	if (ioa_cfg->in_reset_reload || ioa_cfg->ioa_is_dead)
3098 		return FAILED;
3099 	if (!res || (!ipr_is_gscsi(res) && !ipr_is_vset_device(res)))
3100 		return FAILED;
3101 
3102 	list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
3103 		if (ipr_cmd->scsi_cmd == scsi_cmd) {
3104 			ipr_cmd->done = ipr_scsi_eh_done;
3105 			op_found = 1;
3106 			break;
3107 		}
3108 	}
3109 
3110 	if (!op_found)
3111 		return SUCCESS;
3112 
3113 	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3114 	ipr_cmd->ioarcb.res_handle = res->cfgte.res_handle;
3115 	cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
3116 	cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3117 	cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
3118 	ipr_cmd->u.sdev = scsi_cmd->device;
3119 
3120 	ipr_sdev_err(scsi_cmd->device, "Aborting command: %02X\n", scsi_cmd->cmnd[0]);
3121 	ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
3122 	ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3123 
3124 	/*
3125 	 * If the abort task timed out and we sent a bus reset, we will get
3126 	 * one the following responses to the abort
3127 	 */
3128 	if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
3129 		ioasc = 0;
3130 		ipr_trace;
3131 	}
3132 
3133 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3134 	res->needs_sync_complete = 1;
3135 
3136 	LEAVE;
3137 	return (IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS);
3138 }
3139 
3140 /**
3141  * ipr_eh_abort - Abort a single op
3142  * @scsi_cmd:	scsi command struct
3143  *
3144  * Return value:
3145  * 	SUCCESS / FAILED
3146  **/
3147 static int ipr_eh_abort(struct scsi_cmnd * scsi_cmd)
3148 {
3149 	unsigned long flags;
3150 	int rc;
3151 
3152 	ENTER;
3153 
3154 	spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
3155 	rc = ipr_cancel_op(scsi_cmd);
3156 	spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
3157 
3158 	LEAVE;
3159 	return rc;
3160 }
3161 
3162 /**
3163  * ipr_handle_other_interrupt - Handle "other" interrupts
3164  * @ioa_cfg:	ioa config struct
3165  * @int_reg:	interrupt register
3166  *
3167  * Return value:
3168  * 	IRQ_NONE / IRQ_HANDLED
3169  **/
3170 static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
3171 					      volatile u32 int_reg)
3172 {
3173 	irqreturn_t rc = IRQ_HANDLED;
3174 
3175 	if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
3176 		/* Mask the interrupt */
3177 		writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
3178 
3179 		/* Clear the interrupt */
3180 		writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.clr_interrupt_reg);
3181 		int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
3182 
3183 		list_del(&ioa_cfg->reset_cmd->queue);
3184 		del_timer(&ioa_cfg->reset_cmd->timer);
3185 		ipr_reset_ioa_job(ioa_cfg->reset_cmd);
3186 	} else {
3187 		if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
3188 			ioa_cfg->ioa_unit_checked = 1;
3189 		else
3190 			dev_err(&ioa_cfg->pdev->dev,
3191 				"Permanent IOA failure. 0x%08X\n", int_reg);
3192 
3193 		if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
3194 			ioa_cfg->sdt_state = GET_DUMP;
3195 
3196 		ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
3197 		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3198 	}
3199 
3200 	return rc;
3201 }
3202 
3203 /**
3204  * ipr_isr - Interrupt service routine
3205  * @irq:	irq number
3206  * @devp:	pointer to ioa config struct
3207  * @regs:	pt_regs struct
3208  *
3209  * Return value:
3210  * 	IRQ_NONE / IRQ_HANDLED
3211  **/
3212 static irqreturn_t ipr_isr(int irq, void *devp, struct pt_regs *regs)
3213 {
3214 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
3215 	unsigned long lock_flags = 0;
3216 	volatile u32 int_reg, int_mask_reg;
3217 	u32 ioasc;
3218 	u16 cmd_index;
3219 	struct ipr_cmnd *ipr_cmd;
3220 	irqreturn_t rc = IRQ_NONE;
3221 
3222 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3223 
3224 	/* If interrupts are disabled, ignore the interrupt */
3225 	if (!ioa_cfg->allow_interrupts) {
3226 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3227 		return IRQ_NONE;
3228 	}
3229 
3230 	int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
3231 	int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
3232 
3233 	/* If an interrupt on the adapter did not occur, ignore it */
3234 	if (unlikely((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0)) {
3235 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3236 		return IRQ_NONE;
3237 	}
3238 
3239 	while (1) {
3240 		ipr_cmd = NULL;
3241 
3242 		while ((be32_to_cpu(*ioa_cfg->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
3243 		       ioa_cfg->toggle_bit) {
3244 
3245 			cmd_index = (be32_to_cpu(*ioa_cfg->hrrq_curr) &
3246 				     IPR_HRRQ_REQ_RESP_HANDLE_MASK) >> IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
3247 
3248 			if (unlikely(cmd_index >= IPR_NUM_CMD_BLKS)) {
3249 				ioa_cfg->errors_logged++;
3250 				dev_err(&ioa_cfg->pdev->dev, "Invalid response handle from IOA\n");
3251 
3252 				if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
3253 					ioa_cfg->sdt_state = GET_DUMP;
3254 
3255 				ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3256 				spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3257 				return IRQ_HANDLED;
3258 			}
3259 
3260 			ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
3261 
3262 			ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3263 
3264 			ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
3265 
3266 			list_del(&ipr_cmd->queue);
3267 			del_timer(&ipr_cmd->timer);
3268 			ipr_cmd->done(ipr_cmd);
3269 
3270 			rc = IRQ_HANDLED;
3271 
3272 			if (ioa_cfg->hrrq_curr < ioa_cfg->hrrq_end) {
3273 				ioa_cfg->hrrq_curr++;
3274 			} else {
3275 				ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
3276 				ioa_cfg->toggle_bit ^= 1u;
3277 			}
3278 		}
3279 
3280 		if (ipr_cmd != NULL) {
3281 			/* Clear the PCI interrupt */
3282 			writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg);
3283 			int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
3284 		} else
3285 			break;
3286 	}
3287 
3288 	if (unlikely(rc == IRQ_NONE))
3289 		rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
3290 
3291 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3292 	return rc;
3293 }
3294 
3295 /**
3296  * ipr_build_ioadl - Build a scatter/gather list and map the buffer
3297  * @ioa_cfg:	ioa config struct
3298  * @ipr_cmd:	ipr command struct
3299  *
3300  * Return value:
3301  * 	0 on success / -1 on failure
3302  **/
3303 static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
3304 			   struct ipr_cmnd *ipr_cmd)
3305 {
3306 	int i;
3307 	struct scatterlist *sglist;
3308 	u32 length;
3309 	u32 ioadl_flags = 0;
3310 	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
3311 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3312 	struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
3313 
3314 	length = scsi_cmd->request_bufflen;
3315 
3316 	if (length == 0)
3317 		return 0;
3318 
3319 	if (scsi_cmd->use_sg) {
3320 		ipr_cmd->dma_use_sg = pci_map_sg(ioa_cfg->pdev,
3321 						 scsi_cmd->request_buffer,
3322 						 scsi_cmd->use_sg,
3323 						 scsi_cmd->sc_data_direction);
3324 
3325 		if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
3326 			ioadl_flags = IPR_IOADL_FLAGS_WRITE;
3327 			ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3328 			ioarcb->write_data_transfer_length = cpu_to_be32(length);
3329 			ioarcb->write_ioadl_len =
3330 				cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3331 		} else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
3332 			ioadl_flags = IPR_IOADL_FLAGS_READ;
3333 			ioarcb->read_data_transfer_length = cpu_to_be32(length);
3334 			ioarcb->read_ioadl_len =
3335 				cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3336 		}
3337 
3338 		sglist = scsi_cmd->request_buffer;
3339 
3340 		for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3341 			ioadl[i].flags_and_data_len =
3342 				cpu_to_be32(ioadl_flags | sg_dma_len(&sglist[i]));
3343 			ioadl[i].address =
3344 				cpu_to_be32(sg_dma_address(&sglist[i]));
3345 		}
3346 
3347 		if (likely(ipr_cmd->dma_use_sg)) {
3348 			ioadl[i-1].flags_and_data_len |=
3349 				cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3350 			return 0;
3351 		} else
3352 			dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
3353 	} else {
3354 		if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
3355 			ioadl_flags = IPR_IOADL_FLAGS_WRITE;
3356 			ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3357 			ioarcb->write_data_transfer_length = cpu_to_be32(length);
3358 			ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
3359 		} else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
3360 			ioadl_flags = IPR_IOADL_FLAGS_READ;
3361 			ioarcb->read_data_transfer_length = cpu_to_be32(length);
3362 			ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
3363 		}
3364 
3365 		ipr_cmd->dma_handle = pci_map_single(ioa_cfg->pdev,
3366 						     scsi_cmd->request_buffer, length,
3367 						     scsi_cmd->sc_data_direction);
3368 
3369 		if (likely(!pci_dma_mapping_error(ipr_cmd->dma_handle))) {
3370 			ipr_cmd->dma_use_sg = 1;
3371 			ioadl[0].flags_and_data_len =
3372 				cpu_to_be32(ioadl_flags | length | IPR_IOADL_FLAGS_LAST);
3373 			ioadl[0].address = cpu_to_be32(ipr_cmd->dma_handle);
3374 			return 0;
3375 		} else
3376 			dev_err(&ioa_cfg->pdev->dev, "pci_map_single failed!\n");
3377 	}
3378 
3379 	return -1;
3380 }
3381 
3382 /**
3383  * ipr_get_task_attributes - Translate SPI Q-Tag to task attributes
3384  * @scsi_cmd:	scsi command struct
3385  *
3386  * Return value:
3387  * 	task attributes
3388  **/
3389 static u8 ipr_get_task_attributes(struct scsi_cmnd *scsi_cmd)
3390 {
3391 	u8 tag[2];
3392 	u8 rc = IPR_FLAGS_LO_UNTAGGED_TASK;
3393 
3394 	if (scsi_populate_tag_msg(scsi_cmd, tag)) {
3395 		switch (tag[0]) {
3396 		case MSG_SIMPLE_TAG:
3397 			rc = IPR_FLAGS_LO_SIMPLE_TASK;
3398 			break;
3399 		case MSG_HEAD_TAG:
3400 			rc = IPR_FLAGS_LO_HEAD_OF_Q_TASK;
3401 			break;
3402 		case MSG_ORDERED_TAG:
3403 			rc = IPR_FLAGS_LO_ORDERED_TASK;
3404 			break;
3405 		};
3406 	}
3407 
3408 	return rc;
3409 }
3410 
3411 /**
3412  * ipr_erp_done - Process completion of ERP for a device
3413  * @ipr_cmd:		ipr command struct
3414  *
3415  * This function copies the sense buffer into the scsi_cmd
3416  * struct and pushes the scsi_done function.
3417  *
3418  * Return value:
3419  * 	nothing
3420  **/
3421 static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
3422 {
3423 	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
3424 	struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
3425 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3426 	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3427 
3428 	if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
3429 		scsi_cmd->result |= (DID_ERROR << 16);
3430 		ipr_sdev_err(scsi_cmd->device,
3431 			     "Request Sense failed with IOASC: 0x%08X\n", ioasc);
3432 	} else {
3433 		memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
3434 		       SCSI_SENSE_BUFFERSIZE);
3435 	}
3436 
3437 	if (res) {
3438 		res->needs_sync_complete = 1;
3439 		res->in_erp = 0;
3440 	}
3441 	ipr_unmap_sglist(ioa_cfg, ipr_cmd);
3442 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3443 	scsi_cmd->scsi_done(scsi_cmd);
3444 }
3445 
3446 /**
3447  * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
3448  * @ipr_cmd:	ipr command struct
3449  *
3450  * Return value:
3451  * 	none
3452  **/
3453 static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
3454 {
3455 	struct ipr_ioarcb *ioarcb;
3456 	struct ipr_ioasa *ioasa;
3457 
3458 	ioarcb = &ipr_cmd->ioarcb;
3459 	ioasa = &ipr_cmd->ioasa;
3460 
3461 	memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
3462 	ioarcb->write_data_transfer_length = 0;
3463 	ioarcb->read_data_transfer_length = 0;
3464 	ioarcb->write_ioadl_len = 0;
3465 	ioarcb->read_ioadl_len = 0;
3466 	ioasa->ioasc = 0;
3467 	ioasa->residual_data_len = 0;
3468 }
3469 
3470 /**
3471  * ipr_erp_request_sense - Send request sense to a device
3472  * @ipr_cmd:	ipr command struct
3473  *
3474  * This function sends a request sense to a device as a result
3475  * of a check condition.
3476  *
3477  * Return value:
3478  * 	nothing
3479  **/
3480 static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
3481 {
3482 	struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
3483 	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3484 
3485 	if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
3486 		ipr_erp_done(ipr_cmd);
3487 		return;
3488 	}
3489 
3490 	ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
3491 
3492 	cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
3493 	cmd_pkt->cdb[0] = REQUEST_SENSE;
3494 	cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
3495 	cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
3496 	cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
3497 	cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
3498 
3499 	ipr_cmd->ioadl[0].flags_and_data_len =
3500 		cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | SCSI_SENSE_BUFFERSIZE);
3501 	ipr_cmd->ioadl[0].address =
3502 		cpu_to_be32(ipr_cmd->sense_buffer_dma);
3503 
3504 	ipr_cmd->ioarcb.read_ioadl_len =
3505 		cpu_to_be32(sizeof(struct ipr_ioadl_desc));
3506 	ipr_cmd->ioarcb.read_data_transfer_length =
3507 		cpu_to_be32(SCSI_SENSE_BUFFERSIZE);
3508 
3509 	ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
3510 		   IPR_REQUEST_SENSE_TIMEOUT * 2);
3511 }
3512 
3513 /**
3514  * ipr_erp_cancel_all - Send cancel all to a device
3515  * @ipr_cmd:	ipr command struct
3516  *
3517  * This function sends a cancel all to a device to clear the
3518  * queue. If we are running TCQ on the device, QERR is set to 1,
3519  * which means all outstanding ops have been dropped on the floor.
3520  * Cancel all will return them to us.
3521  *
3522  * Return value:
3523  * 	nothing
3524  **/
3525 static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
3526 {
3527 	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
3528 	struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
3529 	struct ipr_cmd_pkt *cmd_pkt;
3530 
3531 	res->in_erp = 1;
3532 
3533 	ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
3534 
3535 	if (!scsi_get_tag_type(scsi_cmd->device)) {
3536 		ipr_erp_request_sense(ipr_cmd);
3537 		return;
3538 	}
3539 
3540 	cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
3541 	cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3542 	cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
3543 
3544 	ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
3545 		   IPR_CANCEL_ALL_TIMEOUT);
3546 }
3547 
3548 /**
3549  * ipr_dump_ioasa - Dump contents of IOASA
3550  * @ioa_cfg:	ioa config struct
3551  * @ipr_cmd:	ipr command struct
3552  *
3553  * This function is invoked by the interrupt handler when ops
3554  * fail. It will log the IOASA if appropriate. Only called
3555  * for GPDD ops.
3556  *
3557  * Return value:
3558  * 	none
3559  **/
3560 static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
3561 			   struct ipr_cmnd *ipr_cmd)
3562 {
3563 	int i;
3564 	u16 data_len;
3565 	u32 ioasc;
3566 	struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
3567 	__be32 *ioasa_data = (__be32 *)ioasa;
3568 	int error_index;
3569 
3570 	ioasc = be32_to_cpu(ioasa->ioasc) & IPR_IOASC_IOASC_MASK;
3571 
3572 	if (0 == ioasc)
3573 		return;
3574 
3575 	if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
3576 		return;
3577 
3578 	error_index = ipr_get_error(ioasc);
3579 
3580 	if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
3581 		/* Don't log an error if the IOA already logged one */
3582 		if (ioasa->ilid != 0)
3583 			return;
3584 
3585 		if (ipr_error_table[error_index].log_ioasa == 0)
3586 			return;
3587 	}
3588 
3589 	ipr_sdev_err(ipr_cmd->scsi_cmd->device, "%s\n",
3590 		     ipr_error_table[error_index].error);
3591 
3592 	if ((ioasa->u.gpdd.end_state <= ARRAY_SIZE(ipr_gpdd_dev_end_states)) &&
3593 	    (ioasa->u.gpdd.bus_phase <=  ARRAY_SIZE(ipr_gpdd_dev_bus_phases))) {
3594 		ipr_sdev_err(ipr_cmd->scsi_cmd->device,
3595 			     "Device End state: %s Phase: %s\n",
3596 			     ipr_gpdd_dev_end_states[ioasa->u.gpdd.end_state],
3597 			     ipr_gpdd_dev_bus_phases[ioasa->u.gpdd.bus_phase]);
3598 	}
3599 
3600 	if (sizeof(struct ipr_ioasa) < be16_to_cpu(ioasa->ret_stat_len))
3601 		data_len = sizeof(struct ipr_ioasa);
3602 	else
3603 		data_len = be16_to_cpu(ioasa->ret_stat_len);
3604 
3605 	ipr_err("IOASA Dump:\n");
3606 
3607 	for (i = 0; i < data_len / 4; i += 4) {
3608 		ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
3609 			be32_to_cpu(ioasa_data[i]),
3610 			be32_to_cpu(ioasa_data[i+1]),
3611 			be32_to_cpu(ioasa_data[i+2]),
3612 			be32_to_cpu(ioasa_data[i+3]));
3613 	}
3614 }
3615 
3616 /**
3617  * ipr_gen_sense - Generate SCSI sense data from an IOASA
3618  * @ioasa:		IOASA
3619  * @sense_buf:	sense data buffer
3620  *
3621  * Return value:
3622  * 	none
3623  **/
3624 static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
3625 {
3626 	u32 failing_lba;
3627 	u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
3628 	struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
3629 	struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
3630 	u32 ioasc = be32_to_cpu(ioasa->ioasc);
3631 
3632 	memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
3633 
3634 	if (ioasc >= IPR_FIRST_DRIVER_IOASC)
3635 		return;
3636 
3637 	ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
3638 
3639 	if (ipr_is_vset_device(res) &&
3640 	    ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
3641 	    ioasa->u.vset.failing_lba_hi != 0) {
3642 		sense_buf[0] = 0x72;
3643 		sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
3644 		sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
3645 		sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
3646 
3647 		sense_buf[7] = 12;
3648 		sense_buf[8] = 0;
3649 		sense_buf[9] = 0x0A;
3650 		sense_buf[10] = 0x80;
3651 
3652 		failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
3653 
3654 		sense_buf[12] = (failing_lba & 0xff000000) >> 24;
3655 		sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
3656 		sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
3657 		sense_buf[15] = failing_lba & 0x000000ff;
3658 
3659 		failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
3660 
3661 		sense_buf[16] = (failing_lba & 0xff000000) >> 24;
3662 		sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
3663 		sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
3664 		sense_buf[19] = failing_lba & 0x000000ff;
3665 	} else {
3666 		sense_buf[0] = 0x70;
3667 		sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
3668 		sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
3669 		sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
3670 
3671 		/* Illegal request */
3672 		if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
3673 		    (be32_to_cpu(ioasa->ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
3674 			sense_buf[7] = 10;	/* additional length */
3675 
3676 			/* IOARCB was in error */
3677 			if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
3678 				sense_buf[15] = 0xC0;
3679 			else	/* Parameter data was invalid */
3680 				sense_buf[15] = 0x80;
3681 
3682 			sense_buf[16] =
3683 			    ((IPR_FIELD_POINTER_MASK &
3684 			      be32_to_cpu(ioasa->ioasc_specific)) >> 8) & 0xff;
3685 			sense_buf[17] =
3686 			    (IPR_FIELD_POINTER_MASK &
3687 			     be32_to_cpu(ioasa->ioasc_specific)) & 0xff;
3688 		} else {
3689 			if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
3690 				if (ipr_is_vset_device(res))
3691 					failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
3692 				else
3693 					failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
3694 
3695 				sense_buf[0] |= 0x80;	/* Or in the Valid bit */
3696 				sense_buf[3] = (failing_lba & 0xff000000) >> 24;
3697 				sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
3698 				sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
3699 				sense_buf[6] = failing_lba & 0x000000ff;
3700 			}
3701 
3702 			sense_buf[7] = 6;	/* additional length */
3703 		}
3704 	}
3705 }
3706 
3707 /**
3708  * ipr_erp_start - Process an error response for a SCSI op
3709  * @ioa_cfg:	ioa config struct
3710  * @ipr_cmd:	ipr command struct
3711  *
3712  * This function determines whether or not to initiate ERP
3713  * on the affected device.
3714  *
3715  * Return value:
3716  * 	nothing
3717  **/
3718 static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
3719 			      struct ipr_cmnd *ipr_cmd)
3720 {
3721 	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
3722 	struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
3723 	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3724 
3725 	if (!res) {
3726 		ipr_scsi_eh_done(ipr_cmd);
3727 		return;
3728 	}
3729 
3730 	if (ipr_is_gscsi(res))
3731 		ipr_dump_ioasa(ioa_cfg, ipr_cmd);
3732 	else
3733 		ipr_gen_sense(ipr_cmd);
3734 
3735 	switch (ioasc & IPR_IOASC_IOASC_MASK) {
3736 	case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
3737 		scsi_cmd->result |= (DID_IMM_RETRY << 16);
3738 		break;
3739 	case IPR_IOASC_IR_RESOURCE_HANDLE:
3740 		scsi_cmd->result |= (DID_NO_CONNECT << 16);
3741 		break;
3742 	case IPR_IOASC_HW_SEL_TIMEOUT:
3743 		scsi_cmd->result |= (DID_NO_CONNECT << 16);
3744 		res->needs_sync_complete = 1;
3745 		break;
3746 	case IPR_IOASC_SYNC_REQUIRED:
3747 		if (!res->in_erp)
3748 			res->needs_sync_complete = 1;
3749 		scsi_cmd->result |= (DID_IMM_RETRY << 16);
3750 		break;
3751 	case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
3752 		scsi_cmd->result |= (DID_PASSTHROUGH << 16);
3753 		break;
3754 	case IPR_IOASC_BUS_WAS_RESET:
3755 	case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
3756 		/*
3757 		 * Report the bus reset and ask for a retry. The device
3758 		 * will give CC/UA the next command.
3759 		 */
3760 		if (!res->resetting_device)
3761 			scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
3762 		scsi_cmd->result |= (DID_ERROR << 16);
3763 		res->needs_sync_complete = 1;
3764 		break;
3765 	case IPR_IOASC_HW_DEV_BUS_STATUS:
3766 		scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
3767 		if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
3768 			ipr_erp_cancel_all(ipr_cmd);
3769 			return;
3770 		}
3771 		res->needs_sync_complete = 1;
3772 		break;
3773 	case IPR_IOASC_NR_INIT_CMD_REQUIRED:
3774 		break;
3775 	default:
3776 		scsi_cmd->result |= (DID_ERROR << 16);
3777 		if (!ipr_is_vset_device(res))
3778 			res->needs_sync_complete = 1;
3779 		break;
3780 	}
3781 
3782 	ipr_unmap_sglist(ioa_cfg, ipr_cmd);
3783 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3784 	scsi_cmd->scsi_done(scsi_cmd);
3785 }
3786 
3787 /**
3788  * ipr_scsi_done - mid-layer done function
3789  * @ipr_cmd:	ipr command struct
3790  *
3791  * This function is invoked by the interrupt handler for
3792  * ops generated by the SCSI mid-layer
3793  *
3794  * Return value:
3795  * 	none
3796  **/
3797 static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
3798 {
3799 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3800 	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
3801 	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3802 
3803 	scsi_cmd->resid = be32_to_cpu(ipr_cmd->ioasa.residual_data_len);
3804 
3805 	if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
3806 		ipr_unmap_sglist(ioa_cfg, ipr_cmd);
3807 		list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3808 		scsi_cmd->scsi_done(scsi_cmd);
3809 	} else
3810 		ipr_erp_start(ioa_cfg, ipr_cmd);
3811 }
3812 
3813 /**
3814  * ipr_save_ioafp_mode_select - Save adapters mode select data
3815  * @ioa_cfg:	ioa config struct
3816  * @scsi_cmd:	scsi command struct
3817  *
3818  * This function saves mode select data for the adapter to
3819  * use following an adapter reset.
3820  *
3821  * Return value:
3822  *	0 on success / SCSI_MLQUEUE_HOST_BUSY on failure
3823  **/
3824 static int ipr_save_ioafp_mode_select(struct ipr_ioa_cfg *ioa_cfg,
3825 				       struct scsi_cmnd *scsi_cmd)
3826 {
3827 	if (!ioa_cfg->saved_mode_pages) {
3828 		ioa_cfg->saved_mode_pages  = kmalloc(sizeof(struct ipr_mode_pages),
3829 						     GFP_ATOMIC);
3830 		if (!ioa_cfg->saved_mode_pages) {
3831 			dev_err(&ioa_cfg->pdev->dev,
3832 				"IOA mode select buffer allocation failed\n");
3833 			return SCSI_MLQUEUE_HOST_BUSY;
3834 		}
3835 	}
3836 
3837 	memcpy(ioa_cfg->saved_mode_pages, scsi_cmd->buffer, scsi_cmd->cmnd[4]);
3838 	ioa_cfg->saved_mode_page_len = scsi_cmd->cmnd[4];
3839 	return 0;
3840 }
3841 
3842 /**
3843  * ipr_queuecommand - Queue a mid-layer request
3844  * @scsi_cmd:	scsi command struct
3845  * @done:		done function
3846  *
3847  * This function queues a request generated by the mid-layer.
3848  *
3849  * Return value:
3850  *	0 on success
3851  *	SCSI_MLQUEUE_DEVICE_BUSY if device is busy
3852  *	SCSI_MLQUEUE_HOST_BUSY if host is busy
3853  **/
3854 static int ipr_queuecommand(struct scsi_cmnd *scsi_cmd,
3855 			    void (*done) (struct scsi_cmnd *))
3856 {
3857 	struct ipr_ioa_cfg *ioa_cfg;
3858 	struct ipr_resource_entry *res;
3859 	struct ipr_ioarcb *ioarcb;
3860 	struct ipr_cmnd *ipr_cmd;
3861 	int rc = 0;
3862 
3863 	scsi_cmd->scsi_done = done;
3864 	ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
3865 	res = scsi_cmd->device->hostdata;
3866 	scsi_cmd->result = (DID_OK << 16);
3867 
3868 	/*
3869 	 * We are currently blocking all devices due to a host reset
3870 	 * We have told the host to stop giving us new requests, but
3871 	 * ERP ops don't count. FIXME
3872 	 */
3873 	if (unlikely(!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead))
3874 		return SCSI_MLQUEUE_HOST_BUSY;
3875 
3876 	/*
3877 	 * FIXME - Create scsi_set_host_offline interface
3878 	 *  and the ioa_is_dead check can be removed
3879 	 */
3880 	if (unlikely(ioa_cfg->ioa_is_dead || !res)) {
3881 		memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
3882 		scsi_cmd->result = (DID_NO_CONNECT << 16);
3883 		scsi_cmd->scsi_done(scsi_cmd);
3884 		return 0;
3885 	}
3886 
3887 	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3888 	ioarcb = &ipr_cmd->ioarcb;
3889 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
3890 
3891 	memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
3892 	ipr_cmd->scsi_cmd = scsi_cmd;
3893 	ioarcb->res_handle = res->cfgte.res_handle;
3894 	ipr_cmd->done = ipr_scsi_done;
3895 	ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_PHYS_LOC(res->cfgte.res_addr));
3896 
3897 	if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
3898 		if (scsi_cmd->underflow == 0)
3899 			ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
3900 
3901 		if (res->needs_sync_complete) {
3902 			ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
3903 			res->needs_sync_complete = 0;
3904 		}
3905 
3906 		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
3907 		ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
3908 		ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
3909 		ioarcb->cmd_pkt.flags_lo |= ipr_get_task_attributes(scsi_cmd);
3910 	}
3911 
3912 	if (scsi_cmd->cmnd[0] >= 0xC0 &&
3913 	    (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE))
3914 		ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
3915 
3916 	if (ipr_is_ioa_resource(res) && scsi_cmd->cmnd[0] == MODE_SELECT)
3917 		rc = ipr_save_ioafp_mode_select(ioa_cfg, scsi_cmd);
3918 
3919 	if (likely(rc == 0))
3920 		rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
3921 
3922 	if (likely(rc == 0)) {
3923 		mb();
3924 		writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
3925 		       ioa_cfg->regs.ioarrin_reg);
3926 	} else {
3927 		 list_move_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3928 		 return SCSI_MLQUEUE_HOST_BUSY;
3929 	}
3930 
3931 	return 0;
3932 }
3933 
3934 /**
3935  * ipr_info - Get information about the card/driver
3936  * @scsi_host:	scsi host struct
3937  *
3938  * Return value:
3939  * 	pointer to buffer with description string
3940  **/
3941 static const char * ipr_ioa_info(struct Scsi_Host *host)
3942 {
3943 	static char buffer[512];
3944 	struct ipr_ioa_cfg *ioa_cfg;
3945 	unsigned long lock_flags = 0;
3946 
3947 	ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
3948 
3949 	spin_lock_irqsave(host->host_lock, lock_flags);
3950 	sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
3951 	spin_unlock_irqrestore(host->host_lock, lock_flags);
3952 
3953 	return buffer;
3954 }
3955 
3956 static struct scsi_host_template driver_template = {
3957 	.module = THIS_MODULE,
3958 	.name = "IPR",
3959 	.info = ipr_ioa_info,
3960 	.queuecommand = ipr_queuecommand,
3961 	.eh_abort_handler = ipr_eh_abort,
3962 	.eh_device_reset_handler = ipr_eh_dev_reset,
3963 	.eh_host_reset_handler = ipr_eh_host_reset,
3964 	.slave_alloc = ipr_slave_alloc,
3965 	.slave_configure = ipr_slave_configure,
3966 	.slave_destroy = ipr_slave_destroy,
3967 	.change_queue_depth = ipr_change_queue_depth,
3968 	.change_queue_type = ipr_change_queue_type,
3969 	.bios_param = ipr_biosparam,
3970 	.can_queue = IPR_MAX_COMMANDS,
3971 	.this_id = -1,
3972 	.sg_tablesize = IPR_MAX_SGLIST,
3973 	.max_sectors = IPR_IOA_MAX_SECTORS,
3974 	.cmd_per_lun = IPR_MAX_CMD_PER_LUN,
3975 	.use_clustering = ENABLE_CLUSTERING,
3976 	.shost_attrs = ipr_ioa_attrs,
3977 	.sdev_attrs = ipr_dev_attrs,
3978 	.proc_name = IPR_NAME
3979 };
3980 
3981 #ifdef CONFIG_PPC_PSERIES
3982 static const u16 ipr_blocked_processors[] = {
3983 	PV_NORTHSTAR,
3984 	PV_PULSAR,
3985 	PV_POWER4,
3986 	PV_ICESTAR,
3987 	PV_SSTAR,
3988 	PV_POWER4p,
3989 	PV_630,
3990 	PV_630p
3991 };
3992 
3993 /**
3994  * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
3995  * @ioa_cfg:	ioa cfg struct
3996  *
3997  * Adapters that use Gemstone revision < 3.1 do not work reliably on
3998  * certain pSeries hardware. This function determines if the given
3999  * adapter is in one of these confgurations or not.
4000  *
4001  * Return value:
4002  * 	1 if adapter is not supported / 0 if adapter is supported
4003  **/
4004 static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
4005 {
4006 	u8 rev_id;
4007 	int i;
4008 
4009 	if (ioa_cfg->type == 0x5702) {
4010 		if (pci_read_config_byte(ioa_cfg->pdev, PCI_REVISION_ID,
4011 					 &rev_id) == PCIBIOS_SUCCESSFUL) {
4012 			if (rev_id < 4) {
4013 				for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++){
4014 					if (__is_processor(ipr_blocked_processors[i]))
4015 						return 1;
4016 				}
4017 			}
4018 		}
4019 	}
4020 	return 0;
4021 }
4022 #else
4023 #define ipr_invalid_adapter(ioa_cfg) 0
4024 #endif
4025 
4026 /**
4027  * ipr_ioa_bringdown_done - IOA bring down completion.
4028  * @ipr_cmd:	ipr command struct
4029  *
4030  * This function processes the completion of an adapter bring down.
4031  * It wakes any reset sleepers.
4032  *
4033  * Return value:
4034  * 	IPR_RC_JOB_RETURN
4035  **/
4036 static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
4037 {
4038 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4039 
4040 	ENTER;
4041 	ioa_cfg->in_reset_reload = 0;
4042 	ioa_cfg->reset_retries = 0;
4043 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4044 	wake_up_all(&ioa_cfg->reset_wait_q);
4045 
4046 	spin_unlock_irq(ioa_cfg->host->host_lock);
4047 	scsi_unblock_requests(ioa_cfg->host);
4048 	spin_lock_irq(ioa_cfg->host->host_lock);
4049 	LEAVE;
4050 
4051 	return IPR_RC_JOB_RETURN;
4052 }
4053 
4054 /**
4055  * ipr_ioa_reset_done - IOA reset completion.
4056  * @ipr_cmd:	ipr command struct
4057  *
4058  * This function processes the completion of an adapter reset.
4059  * It schedules any necessary mid-layer add/removes and
4060  * wakes any reset sleepers.
4061  *
4062  * Return value:
4063  * 	IPR_RC_JOB_RETURN
4064  **/
4065 static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
4066 {
4067 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4068 	struct ipr_resource_entry *res;
4069 	struct ipr_hostrcb *hostrcb, *temp;
4070 	int i = 0;
4071 
4072 	ENTER;
4073 	ioa_cfg->in_reset_reload = 0;
4074 	ioa_cfg->allow_cmds = 1;
4075 	ioa_cfg->reset_cmd = NULL;
4076 
4077 	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4078 		if (ioa_cfg->allow_ml_add_del && (res->add_to_ml || res->del_from_ml)) {
4079 			ipr_trace;
4080 			break;
4081 		}
4082 	}
4083 	schedule_work(&ioa_cfg->work_q);
4084 
4085 	list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) {
4086 		list_del(&hostrcb->queue);
4087 		if (i++ < IPR_NUM_LOG_HCAMS)
4088 			ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
4089 		else
4090 			ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
4091 	}
4092 
4093 	dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
4094 
4095 	ioa_cfg->reset_retries = 0;
4096 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4097 	wake_up_all(&ioa_cfg->reset_wait_q);
4098 
4099 	spin_unlock_irq(ioa_cfg->host->host_lock);
4100 	scsi_unblock_requests(ioa_cfg->host);
4101 	spin_lock_irq(ioa_cfg->host->host_lock);
4102 
4103 	if (!ioa_cfg->allow_cmds)
4104 		scsi_block_requests(ioa_cfg->host);
4105 
4106 	LEAVE;
4107 	return IPR_RC_JOB_RETURN;
4108 }
4109 
4110 /**
4111  * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
4112  * @supported_dev:	supported device struct
4113  * @vpids:			vendor product id struct
4114  *
4115  * Return value:
4116  * 	none
4117  **/
4118 static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
4119 				 struct ipr_std_inq_vpids *vpids)
4120 {
4121 	memset(supported_dev, 0, sizeof(struct ipr_supported_device));
4122 	memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
4123 	supported_dev->num_records = 1;
4124 	supported_dev->data_length =
4125 		cpu_to_be16(sizeof(struct ipr_supported_device));
4126 	supported_dev->reserved = 0;
4127 }
4128 
4129 /**
4130  * ipr_set_supported_devs - Send Set Supported Devices for a device
4131  * @ipr_cmd:	ipr command struct
4132  *
4133  * This function send a Set Supported Devices to the adapter
4134  *
4135  * Return value:
4136  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
4137  **/
4138 static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
4139 {
4140 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4141 	struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
4142 	struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4143 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4144 	struct ipr_resource_entry *res = ipr_cmd->u.res;
4145 
4146 	ipr_cmd->job_step = ipr_ioa_reset_done;
4147 
4148 	list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
4149 		if (!ipr_is_af_dasd_device(res))
4150 			continue;
4151 
4152 		ipr_cmd->u.res = res;
4153 		ipr_set_sup_dev_dflt(supp_dev, &res->cfgte.std_inq_data.vpids);
4154 
4155 		ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
4156 		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4157 		ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4158 
4159 		ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
4160 		ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
4161 		ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
4162 
4163 		ioadl->flags_and_data_len = cpu_to_be32(IPR_IOADL_FLAGS_WRITE_LAST |
4164 							sizeof(struct ipr_supported_device));
4165 		ioadl->address = cpu_to_be32(ioa_cfg->vpd_cbs_dma +
4166 					     offsetof(struct ipr_misc_cbs, supp_dev));
4167 		ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4168 		ioarcb->write_data_transfer_length =
4169 			cpu_to_be32(sizeof(struct ipr_supported_device));
4170 
4171 		ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
4172 			   IPR_SET_SUP_DEVICE_TIMEOUT);
4173 
4174 		ipr_cmd->job_step = ipr_set_supported_devs;
4175 		return IPR_RC_JOB_RETURN;
4176 	}
4177 
4178 	return IPR_RC_JOB_CONTINUE;
4179 }
4180 
4181 /**
4182  * ipr_get_mode_page - Locate specified mode page
4183  * @mode_pages:	mode page buffer
4184  * @page_code:	page code to find
4185  * @len:		minimum required length for mode page
4186  *
4187  * Return value:
4188  * 	pointer to mode page / NULL on failure
4189  **/
4190 static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
4191 			       u32 page_code, u32 len)
4192 {
4193 	struct ipr_mode_page_hdr *mode_hdr;
4194 	u32 page_length;
4195 	u32 length;
4196 
4197 	if (!mode_pages || (mode_pages->hdr.length == 0))
4198 		return NULL;
4199 
4200 	length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
4201 	mode_hdr = (struct ipr_mode_page_hdr *)
4202 		(mode_pages->data + mode_pages->hdr.block_desc_len);
4203 
4204 	while (length) {
4205 		if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
4206 			if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
4207 				return mode_hdr;
4208 			break;
4209 		} else {
4210 			page_length = (sizeof(struct ipr_mode_page_hdr) +
4211 				       mode_hdr->page_length);
4212 			length -= page_length;
4213 			mode_hdr = (struct ipr_mode_page_hdr *)
4214 				((unsigned long)mode_hdr + page_length);
4215 		}
4216 	}
4217 	return NULL;
4218 }
4219 
4220 /**
4221  * ipr_check_term_power - Check for term power errors
4222  * @ioa_cfg:	ioa config struct
4223  * @mode_pages:	IOAFP mode pages buffer
4224  *
4225  * Check the IOAFP's mode page 28 for term power errors
4226  *
4227  * Return value:
4228  * 	nothing
4229  **/
4230 static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
4231 				 struct ipr_mode_pages *mode_pages)
4232 {
4233 	int i;
4234 	int entry_length;
4235 	struct ipr_dev_bus_entry *bus;
4236 	struct ipr_mode_page28 *mode_page;
4237 
4238 	mode_page = ipr_get_mode_page(mode_pages, 0x28,
4239 				      sizeof(struct ipr_mode_page28));
4240 
4241 	entry_length = mode_page->entry_length;
4242 
4243 	bus = mode_page->bus;
4244 
4245 	for (i = 0; i < mode_page->num_entries; i++) {
4246 		if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
4247 			dev_err(&ioa_cfg->pdev->dev,
4248 				"Term power is absent on scsi bus %d\n",
4249 				bus->res_addr.bus);
4250 		}
4251 
4252 		bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
4253 	}
4254 }
4255 
4256 /**
4257  * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
4258  * @ioa_cfg:	ioa config struct
4259  *
4260  * Looks through the config table checking for SES devices. If
4261  * the SES device is in the SES table indicating a maximum SCSI
4262  * bus speed, the speed is limited for the bus.
4263  *
4264  * Return value:
4265  * 	none
4266  **/
4267 static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
4268 {
4269 	u32 max_xfer_rate;
4270 	int i;
4271 
4272 	for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
4273 		max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
4274 						       ioa_cfg->bus_attr[i].bus_width);
4275 
4276 		if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
4277 			ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
4278 	}
4279 }
4280 
4281 /**
4282  * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
4283  * @ioa_cfg:	ioa config struct
4284  * @mode_pages:	mode page 28 buffer
4285  *
4286  * Updates mode page 28 based on driver configuration
4287  *
4288  * Return value:
4289  * 	none
4290  **/
4291 static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
4292 					  	struct ipr_mode_pages *mode_pages)
4293 {
4294 	int i, entry_length;
4295 	struct ipr_dev_bus_entry *bus;
4296 	struct ipr_bus_attributes *bus_attr;
4297 	struct ipr_mode_page28 *mode_page;
4298 
4299 	mode_page = ipr_get_mode_page(mode_pages, 0x28,
4300 				      sizeof(struct ipr_mode_page28));
4301 
4302 	entry_length = mode_page->entry_length;
4303 
4304 	/* Loop for each device bus entry */
4305 	for (i = 0, bus = mode_page->bus;
4306 	     i < mode_page->num_entries;
4307 	     i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
4308 		if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
4309 			dev_err(&ioa_cfg->pdev->dev,
4310 				"Invalid resource address reported: 0x%08X\n",
4311 				IPR_GET_PHYS_LOC(bus->res_addr));
4312 			continue;
4313 		}
4314 
4315 		bus_attr = &ioa_cfg->bus_attr[i];
4316 		bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
4317 		bus->bus_width = bus_attr->bus_width;
4318 		bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
4319 		bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
4320 		if (bus_attr->qas_enabled)
4321 			bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
4322 		else
4323 			bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
4324 	}
4325 }
4326 
4327 /**
4328  * ipr_build_mode_select - Build a mode select command
4329  * @ipr_cmd:	ipr command struct
4330  * @res_handle:	resource handle to send command to
4331  * @parm:		Byte 2 of Mode Sense command
4332  * @dma_addr:	DMA buffer address
4333  * @xfer_len:	data transfer length
4334  *
4335  * Return value:
4336  * 	none
4337  **/
4338 static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
4339 				  __be32 res_handle, u8 parm, u32 dma_addr,
4340 				  u8 xfer_len)
4341 {
4342 	struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4343 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4344 
4345 	ioarcb->res_handle = res_handle;
4346 	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
4347 	ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4348 	ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
4349 	ioarcb->cmd_pkt.cdb[1] = parm;
4350 	ioarcb->cmd_pkt.cdb[4] = xfer_len;
4351 
4352 	ioadl->flags_and_data_len =
4353 		cpu_to_be32(IPR_IOADL_FLAGS_WRITE_LAST | xfer_len);
4354 	ioadl->address = cpu_to_be32(dma_addr);
4355 	ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4356 	ioarcb->write_data_transfer_length = cpu_to_be32(xfer_len);
4357 }
4358 
4359 /**
4360  * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
4361  * @ipr_cmd:	ipr command struct
4362  *
4363  * This function sets up the SCSI bus attributes and sends
4364  * a Mode Select for Page 28 to activate them.
4365  *
4366  * Return value:
4367  * 	IPR_RC_JOB_RETURN
4368  **/
4369 static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
4370 {
4371 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4372 	struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
4373 	int length;
4374 
4375 	ENTER;
4376 	if (ioa_cfg->saved_mode_pages) {
4377 		memcpy(mode_pages, ioa_cfg->saved_mode_pages,
4378 		       ioa_cfg->saved_mode_page_len);
4379 		length = ioa_cfg->saved_mode_page_len;
4380 	} else {
4381 		ipr_scsi_bus_speed_limit(ioa_cfg);
4382 		ipr_check_term_power(ioa_cfg, mode_pages);
4383 		ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
4384 		length = mode_pages->hdr.length + 1;
4385 		mode_pages->hdr.length = 0;
4386 	}
4387 
4388 	ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
4389 			      ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
4390 			      length);
4391 
4392 	ipr_cmd->job_step = ipr_set_supported_devs;
4393 	ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
4394 				    struct ipr_resource_entry, queue);
4395 
4396 	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4397 
4398 	LEAVE;
4399 	return IPR_RC_JOB_RETURN;
4400 }
4401 
4402 /**
4403  * ipr_build_mode_sense - Builds a mode sense command
4404  * @ipr_cmd:	ipr command struct
4405  * @res:		resource entry struct
4406  * @parm:		Byte 2 of mode sense command
4407  * @dma_addr:	DMA address of mode sense buffer
4408  * @xfer_len:	Size of DMA buffer
4409  *
4410  * Return value:
4411  * 	none
4412  **/
4413 static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
4414 				 __be32 res_handle,
4415 				 u8 parm, u32 dma_addr, u8 xfer_len)
4416 {
4417 	struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4418 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4419 
4420 	ioarcb->res_handle = res_handle;
4421 	ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
4422 	ioarcb->cmd_pkt.cdb[2] = parm;
4423 	ioarcb->cmd_pkt.cdb[4] = xfer_len;
4424 	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
4425 
4426 	ioadl->flags_and_data_len =
4427 		cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | xfer_len);
4428 	ioadl->address = cpu_to_be32(dma_addr);
4429 	ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4430 	ioarcb->read_data_transfer_length = cpu_to_be32(xfer_len);
4431 }
4432 
4433 /**
4434  * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
4435  * @ipr_cmd:	ipr command struct
4436  *
4437  * This function send a Page 28 mode sense to the IOA to
4438  * retrieve SCSI bus attributes.
4439  *
4440  * Return value:
4441  * 	IPR_RC_JOB_RETURN
4442  **/
4443 static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
4444 {
4445 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4446 
4447 	ENTER;
4448 	ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
4449 			     0x28, ioa_cfg->vpd_cbs_dma +
4450 			     offsetof(struct ipr_misc_cbs, mode_pages),
4451 			     sizeof(struct ipr_mode_pages));
4452 
4453 	ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
4454 
4455 	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4456 
4457 	LEAVE;
4458 	return IPR_RC_JOB_RETURN;
4459 }
4460 
4461 /**
4462  * ipr_init_res_table - Initialize the resource table
4463  * @ipr_cmd:	ipr command struct
4464  *
4465  * This function looks through the existing resource table, comparing
4466  * it with the config table. This function will take care of old/new
4467  * devices and schedule adding/removing them from the mid-layer
4468  * as appropriate.
4469  *
4470  * Return value:
4471  * 	IPR_RC_JOB_CONTINUE
4472  **/
4473 static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
4474 {
4475 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4476 	struct ipr_resource_entry *res, *temp;
4477 	struct ipr_config_table_entry *cfgte;
4478 	int found, i;
4479 	LIST_HEAD(old_res);
4480 
4481 	ENTER;
4482 	if (ioa_cfg->cfg_table->hdr.flags & IPR_UCODE_DOWNLOAD_REQ)
4483 		dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
4484 
4485 	list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
4486 		list_move_tail(&res->queue, &old_res);
4487 
4488 	for (i = 0; i < ioa_cfg->cfg_table->hdr.num_entries; i++) {
4489 		cfgte = &ioa_cfg->cfg_table->dev[i];
4490 		found = 0;
4491 
4492 		list_for_each_entry_safe(res, temp, &old_res, queue) {
4493 			if (!memcmp(&res->cfgte.res_addr,
4494 				    &cfgte->res_addr, sizeof(cfgte->res_addr))) {
4495 				list_move_tail(&res->queue, &ioa_cfg->used_res_q);
4496 				found = 1;
4497 				break;
4498 			}
4499 		}
4500 
4501 		if (!found) {
4502 			if (list_empty(&ioa_cfg->free_res_q)) {
4503 				dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
4504 				break;
4505 			}
4506 
4507 			found = 1;
4508 			res = list_entry(ioa_cfg->free_res_q.next,
4509 					 struct ipr_resource_entry, queue);
4510 			list_move_tail(&res->queue, &ioa_cfg->used_res_q);
4511 			ipr_init_res_entry(res);
4512 			res->add_to_ml = 1;
4513 		}
4514 
4515 		if (found)
4516 			memcpy(&res->cfgte, cfgte, sizeof(struct ipr_config_table_entry));
4517 	}
4518 
4519 	list_for_each_entry_safe(res, temp, &old_res, queue) {
4520 		if (res->sdev) {
4521 			res->del_from_ml = 1;
4522 			res->sdev->hostdata = NULL;
4523 			list_move_tail(&res->queue, &ioa_cfg->used_res_q);
4524 		} else {
4525 			list_move_tail(&res->queue, &ioa_cfg->free_res_q);
4526 		}
4527 	}
4528 
4529 	ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
4530 
4531 	LEAVE;
4532 	return IPR_RC_JOB_CONTINUE;
4533 }
4534 
4535 /**
4536  * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
4537  * @ipr_cmd:	ipr command struct
4538  *
4539  * This function sends a Query IOA Configuration command
4540  * to the adapter to retrieve the IOA configuration table.
4541  *
4542  * Return value:
4543  * 	IPR_RC_JOB_RETURN
4544  **/
4545 static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
4546 {
4547 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4548 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4549 	struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4550 	struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
4551 
4552 	ENTER;
4553 	dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
4554 		 ucode_vpd->major_release, ucode_vpd->card_type,
4555 		 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
4556 	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4557 	ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
4558 
4559 	ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
4560 	ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_config_table) >> 8) & 0xff;
4561 	ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_config_table) & 0xff;
4562 
4563 	ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4564 	ioarcb->read_data_transfer_length =
4565 		cpu_to_be32(sizeof(struct ipr_config_table));
4566 
4567 	ioadl->address = cpu_to_be32(ioa_cfg->cfg_table_dma);
4568 	ioadl->flags_and_data_len =
4569 		cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | sizeof(struct ipr_config_table));
4570 
4571 	ipr_cmd->job_step = ipr_init_res_table;
4572 
4573 	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4574 
4575 	LEAVE;
4576 	return IPR_RC_JOB_RETURN;
4577 }
4578 
4579 /**
4580  * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
4581  * @ipr_cmd:	ipr command struct
4582  *
4583  * This utility function sends an inquiry to the adapter.
4584  *
4585  * Return value:
4586  * 	none
4587  **/
4588 static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
4589 			      u32 dma_addr, u8 xfer_len)
4590 {
4591 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4592 	struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4593 
4594 	ENTER;
4595 	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
4596 	ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
4597 
4598 	ioarcb->cmd_pkt.cdb[0] = INQUIRY;
4599 	ioarcb->cmd_pkt.cdb[1] = flags;
4600 	ioarcb->cmd_pkt.cdb[2] = page;
4601 	ioarcb->cmd_pkt.cdb[4] = xfer_len;
4602 
4603 	ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4604 	ioarcb->read_data_transfer_length = cpu_to_be32(xfer_len);
4605 
4606 	ioadl->address = cpu_to_be32(dma_addr);
4607 	ioadl->flags_and_data_len =
4608 		cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | xfer_len);
4609 
4610 	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4611 	LEAVE;
4612 }
4613 
4614 /**
4615  * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
4616  * @ipr_cmd:	ipr command struct
4617  *
4618  * This function sends a Page 3 inquiry to the adapter
4619  * to retrieve software VPD information.
4620  *
4621  * Return value:
4622  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
4623  **/
4624 static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
4625 {
4626 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4627 	char type[5];
4628 
4629 	ENTER;
4630 
4631 	/* Grab the type out of the VPD and store it away */
4632 	memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
4633 	type[4] = '\0';
4634 	ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
4635 
4636 	ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
4637 
4638 	ipr_ioafp_inquiry(ipr_cmd, 1, 3,
4639 			  ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
4640 			  sizeof(struct ipr_inquiry_page3));
4641 
4642 	LEAVE;
4643 	return IPR_RC_JOB_RETURN;
4644 }
4645 
4646 /**
4647  * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
4648  * @ipr_cmd:	ipr command struct
4649  *
4650  * This function sends a standard inquiry to the adapter.
4651  *
4652  * Return value:
4653  * 	IPR_RC_JOB_RETURN
4654  **/
4655 static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
4656 {
4657 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4658 
4659 	ENTER;
4660 	ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
4661 
4662 	ipr_ioafp_inquiry(ipr_cmd, 0, 0,
4663 			  ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
4664 			  sizeof(struct ipr_ioa_vpd));
4665 
4666 	LEAVE;
4667 	return IPR_RC_JOB_RETURN;
4668 }
4669 
4670 /**
4671  * ipr_ioafp_indentify_hrrq - Send Identify Host RRQ.
4672  * @ipr_cmd:	ipr command struct
4673  *
4674  * This function send an Identify Host Request Response Queue
4675  * command to establish the HRRQ with the adapter.
4676  *
4677  * Return value:
4678  * 	IPR_RC_JOB_RETURN
4679  **/
4680 static int ipr_ioafp_indentify_hrrq(struct ipr_cmnd *ipr_cmd)
4681 {
4682 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4683 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4684 
4685 	ENTER;
4686 	dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
4687 
4688 	ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
4689 	ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
4690 
4691 	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4692 	ioarcb->cmd_pkt.cdb[2] =
4693 		((u32) ioa_cfg->host_rrq_dma >> 24) & 0xff;
4694 	ioarcb->cmd_pkt.cdb[3] =
4695 		((u32) ioa_cfg->host_rrq_dma >> 16) & 0xff;
4696 	ioarcb->cmd_pkt.cdb[4] =
4697 		((u32) ioa_cfg->host_rrq_dma >> 8) & 0xff;
4698 	ioarcb->cmd_pkt.cdb[5] =
4699 		((u32) ioa_cfg->host_rrq_dma) & 0xff;
4700 	ioarcb->cmd_pkt.cdb[7] =
4701 		((sizeof(u32) * IPR_NUM_CMD_BLKS) >> 8) & 0xff;
4702 	ioarcb->cmd_pkt.cdb[8] =
4703 		(sizeof(u32) * IPR_NUM_CMD_BLKS) & 0xff;
4704 
4705 	ipr_cmd->job_step = ipr_ioafp_std_inquiry;
4706 
4707 	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4708 
4709 	LEAVE;
4710 	return IPR_RC_JOB_RETURN;
4711 }
4712 
4713 /**
4714  * ipr_reset_timer_done - Adapter reset timer function
4715  * @ipr_cmd:	ipr command struct
4716  *
4717  * Description: This function is used in adapter reset processing
4718  * for timing events. If the reset_cmd pointer in the IOA
4719  * config struct is not this adapter's we are doing nested
4720  * resets and fail_all_ops will take care of freeing the
4721  * command block.
4722  *
4723  * Return value:
4724  * 	none
4725  **/
4726 static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
4727 {
4728 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4729 	unsigned long lock_flags = 0;
4730 
4731 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4732 
4733 	if (ioa_cfg->reset_cmd == ipr_cmd) {
4734 		list_del(&ipr_cmd->queue);
4735 		ipr_cmd->done(ipr_cmd);
4736 	}
4737 
4738 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4739 }
4740 
4741 /**
4742  * ipr_reset_start_timer - Start a timer for adapter reset job
4743  * @ipr_cmd:	ipr command struct
4744  * @timeout:	timeout value
4745  *
4746  * Description: This function is used in adapter reset processing
4747  * for timing events. If the reset_cmd pointer in the IOA
4748  * config struct is not this adapter's we are doing nested
4749  * resets and fail_all_ops will take care of freeing the
4750  * command block.
4751  *
4752  * Return value:
4753  * 	none
4754  **/
4755 static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
4756 				  unsigned long timeout)
4757 {
4758 	list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
4759 	ipr_cmd->done = ipr_reset_ioa_job;
4760 
4761 	ipr_cmd->timer.data = (unsigned long) ipr_cmd;
4762 	ipr_cmd->timer.expires = jiffies + timeout;
4763 	ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
4764 	add_timer(&ipr_cmd->timer);
4765 }
4766 
4767 /**
4768  * ipr_init_ioa_mem - Initialize ioa_cfg control block
4769  * @ioa_cfg:	ioa cfg struct
4770  *
4771  * Return value:
4772  * 	nothing
4773  **/
4774 static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
4775 {
4776 	memset(ioa_cfg->host_rrq, 0, sizeof(u32) * IPR_NUM_CMD_BLKS);
4777 
4778 	/* Initialize Host RRQ pointers */
4779 	ioa_cfg->hrrq_start = ioa_cfg->host_rrq;
4780 	ioa_cfg->hrrq_end = &ioa_cfg->host_rrq[IPR_NUM_CMD_BLKS - 1];
4781 	ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
4782 	ioa_cfg->toggle_bit = 1;
4783 
4784 	/* Zero out config table */
4785 	memset(ioa_cfg->cfg_table, 0, sizeof(struct ipr_config_table));
4786 }
4787 
4788 /**
4789  * ipr_reset_enable_ioa - Enable the IOA following a reset.
4790  * @ipr_cmd:	ipr command struct
4791  *
4792  * This function reinitializes some control blocks and
4793  * enables destructive diagnostics on the adapter.
4794  *
4795  * Return value:
4796  * 	IPR_RC_JOB_RETURN
4797  **/
4798 static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
4799 {
4800 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4801 	volatile u32 int_reg;
4802 
4803 	ENTER;
4804 	ipr_cmd->job_step = ipr_ioafp_indentify_hrrq;
4805 	ipr_init_ioa_mem(ioa_cfg);
4806 
4807 	ioa_cfg->allow_interrupts = 1;
4808 	int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
4809 
4810 	if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
4811 		writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
4812 		       ioa_cfg->regs.clr_interrupt_mask_reg);
4813 		int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
4814 		return IPR_RC_JOB_CONTINUE;
4815 	}
4816 
4817 	/* Enable destructive diagnostics on IOA */
4818 	writel(IPR_DOORBELL, ioa_cfg->regs.set_uproc_interrupt_reg);
4819 
4820 	writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg);
4821 	int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
4822 
4823 	dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
4824 
4825 	ipr_cmd->timer.data = (unsigned long) ipr_cmd;
4826 	ipr_cmd->timer.expires = jiffies + (ipr_transop_timeout * HZ);
4827 	ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
4828 	ipr_cmd->done = ipr_reset_ioa_job;
4829 	add_timer(&ipr_cmd->timer);
4830 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
4831 
4832 	LEAVE;
4833 	return IPR_RC_JOB_RETURN;
4834 }
4835 
4836 /**
4837  * ipr_reset_wait_for_dump - Wait for a dump to timeout.
4838  * @ipr_cmd:	ipr command struct
4839  *
4840  * This function is invoked when an adapter dump has run out
4841  * of processing time.
4842  *
4843  * Return value:
4844  * 	IPR_RC_JOB_CONTINUE
4845  **/
4846 static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
4847 {
4848 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4849 
4850 	if (ioa_cfg->sdt_state == GET_DUMP)
4851 		ioa_cfg->sdt_state = ABORT_DUMP;
4852 
4853 	ipr_cmd->job_step = ipr_reset_alert;
4854 
4855 	return IPR_RC_JOB_CONTINUE;
4856 }
4857 
4858 /**
4859  * ipr_unit_check_no_data - Log a unit check/no data error log
4860  * @ioa_cfg:		ioa config struct
4861  *
4862  * Logs an error indicating the adapter unit checked, but for some
4863  * reason, we were unable to fetch the unit check buffer.
4864  *
4865  * Return value:
4866  * 	nothing
4867  **/
4868 static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
4869 {
4870 	ioa_cfg->errors_logged++;
4871 	dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
4872 }
4873 
4874 /**
4875  * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
4876  * @ioa_cfg:		ioa config struct
4877  *
4878  * Fetches the unit check buffer from the adapter by clocking the data
4879  * through the mailbox register.
4880  *
4881  * Return value:
4882  * 	nothing
4883  **/
4884 static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
4885 {
4886 	unsigned long mailbox;
4887 	struct ipr_hostrcb *hostrcb;
4888 	struct ipr_uc_sdt sdt;
4889 	int rc, length;
4890 
4891 	mailbox = readl(ioa_cfg->ioa_mailbox);
4892 
4893 	if (!ipr_sdt_is_fmt2(mailbox)) {
4894 		ipr_unit_check_no_data(ioa_cfg);
4895 		return;
4896 	}
4897 
4898 	memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
4899 	rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
4900 					(sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
4901 
4902 	if (rc || (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE) ||
4903 	    !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY)) {
4904 		ipr_unit_check_no_data(ioa_cfg);
4905 		return;
4906 	}
4907 
4908 	/* Find length of the first sdt entry (UC buffer) */
4909 	length = (be32_to_cpu(sdt.entry[0].end_offset) -
4910 		  be32_to_cpu(sdt.entry[0].bar_str_offset)) & IPR_FMT2_MBX_ADDR_MASK;
4911 
4912 	hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
4913 			     struct ipr_hostrcb, queue);
4914 	list_del(&hostrcb->queue);
4915 	memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
4916 
4917 	rc = ipr_get_ldump_data_section(ioa_cfg,
4918 					be32_to_cpu(sdt.entry[0].bar_str_offset),
4919 					(__be32 *)&hostrcb->hcam,
4920 					min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
4921 
4922 	if (!rc)
4923 		ipr_handle_log_data(ioa_cfg, hostrcb);
4924 	else
4925 		ipr_unit_check_no_data(ioa_cfg);
4926 
4927 	list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
4928 }
4929 
4930 /**
4931  * ipr_reset_restore_cfg_space - Restore PCI config space.
4932  * @ipr_cmd:	ipr command struct
4933  *
4934  * Description: This function restores the saved PCI config space of
4935  * the adapter, fails all outstanding ops back to the callers, and
4936  * fetches the dump/unit check if applicable to this reset.
4937  *
4938  * Return value:
4939  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
4940  **/
4941 static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
4942 {
4943 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4944 	int rc;
4945 
4946 	ENTER;
4947 	pci_unblock_user_cfg_access(ioa_cfg->pdev);
4948 	rc = pci_restore_state(ioa_cfg->pdev);
4949 
4950 	if (rc != PCIBIOS_SUCCESSFUL) {
4951 		ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
4952 		return IPR_RC_JOB_CONTINUE;
4953 	}
4954 
4955 	if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
4956 		ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
4957 		return IPR_RC_JOB_CONTINUE;
4958 	}
4959 
4960 	ipr_fail_all_ops(ioa_cfg);
4961 
4962 	if (ioa_cfg->ioa_unit_checked) {
4963 		ioa_cfg->ioa_unit_checked = 0;
4964 		ipr_get_unit_check_buffer(ioa_cfg);
4965 		ipr_cmd->job_step = ipr_reset_alert;
4966 		ipr_reset_start_timer(ipr_cmd, 0);
4967 		return IPR_RC_JOB_RETURN;
4968 	}
4969 
4970 	if (ioa_cfg->in_ioa_bringdown) {
4971 		ipr_cmd->job_step = ipr_ioa_bringdown_done;
4972 	} else {
4973 		ipr_cmd->job_step = ipr_reset_enable_ioa;
4974 
4975 		if (GET_DUMP == ioa_cfg->sdt_state) {
4976 			ipr_reset_start_timer(ipr_cmd, IPR_DUMP_TIMEOUT);
4977 			ipr_cmd->job_step = ipr_reset_wait_for_dump;
4978 			schedule_work(&ioa_cfg->work_q);
4979 			return IPR_RC_JOB_RETURN;
4980 		}
4981 	}
4982 
4983 	ENTER;
4984 	return IPR_RC_JOB_CONTINUE;
4985 }
4986 
4987 /**
4988  * ipr_reset_start_bist - Run BIST on the adapter.
4989  * @ipr_cmd:	ipr command struct
4990  *
4991  * Description: This function runs BIST on the adapter, then delays 2 seconds.
4992  *
4993  * Return value:
4994  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
4995  **/
4996 static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
4997 {
4998 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4999 	int rc;
5000 
5001 	ENTER;
5002 	pci_block_user_cfg_access(ioa_cfg->pdev);
5003 	rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
5004 
5005 	if (rc != PCIBIOS_SUCCESSFUL) {
5006 		ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
5007 		rc = IPR_RC_JOB_CONTINUE;
5008 	} else {
5009 		ipr_cmd->job_step = ipr_reset_restore_cfg_space;
5010 		ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
5011 		rc = IPR_RC_JOB_RETURN;
5012 	}
5013 
5014 	LEAVE;
5015 	return rc;
5016 }
5017 
5018 /**
5019  * ipr_reset_allowed - Query whether or not IOA can be reset
5020  * @ioa_cfg:	ioa config struct
5021  *
5022  * Return value:
5023  * 	0 if reset not allowed / non-zero if reset is allowed
5024  **/
5025 static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
5026 {
5027 	volatile u32 temp_reg;
5028 
5029 	temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5030 	return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
5031 }
5032 
5033 /**
5034  * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
5035  * @ipr_cmd:	ipr command struct
5036  *
5037  * Description: This function waits for adapter permission to run BIST,
5038  * then runs BIST. If the adapter does not give permission after a
5039  * reasonable time, we will reset the adapter anyway. The impact of
5040  * resetting the adapter without warning the adapter is the risk of
5041  * losing the persistent error log on the adapter. If the adapter is
5042  * reset while it is writing to the flash on the adapter, the flash
5043  * segment will have bad ECC and be zeroed.
5044  *
5045  * Return value:
5046  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5047  **/
5048 static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
5049 {
5050 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5051 	int rc = IPR_RC_JOB_RETURN;
5052 
5053 	if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
5054 		ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
5055 		ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
5056 	} else {
5057 		ipr_cmd->job_step = ipr_reset_start_bist;
5058 		rc = IPR_RC_JOB_CONTINUE;
5059 	}
5060 
5061 	return rc;
5062 }
5063 
5064 /**
5065  * ipr_reset_alert_part2 - Alert the adapter of a pending reset
5066  * @ipr_cmd:	ipr command struct
5067  *
5068  * Description: This function alerts the adapter that it will be reset.
5069  * If memory space is not currently enabled, proceed directly
5070  * to running BIST on the adapter. The timer must always be started
5071  * so we guarantee we do not run BIST from ipr_isr.
5072  *
5073  * Return value:
5074  * 	IPR_RC_JOB_RETURN
5075  **/
5076 static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
5077 {
5078 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5079 	u16 cmd_reg;
5080 	int rc;
5081 
5082 	ENTER;
5083 	rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
5084 
5085 	if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
5086 		ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
5087 		writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg);
5088 		ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
5089 	} else {
5090 		ipr_cmd->job_step = ipr_reset_start_bist;
5091 	}
5092 
5093 	ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
5094 	ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
5095 
5096 	LEAVE;
5097 	return IPR_RC_JOB_RETURN;
5098 }
5099 
5100 /**
5101  * ipr_reset_ucode_download_done - Microcode download completion
5102  * @ipr_cmd:	ipr command struct
5103  *
5104  * Description: This function unmaps the microcode download buffer.
5105  *
5106  * Return value:
5107  * 	IPR_RC_JOB_CONTINUE
5108  **/
5109 static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
5110 {
5111 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5112 	struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
5113 
5114 	pci_unmap_sg(ioa_cfg->pdev, sglist->scatterlist,
5115 		     sglist->num_sg, DMA_TO_DEVICE);
5116 
5117 	ipr_cmd->job_step = ipr_reset_alert;
5118 	return IPR_RC_JOB_CONTINUE;
5119 }
5120 
5121 /**
5122  * ipr_reset_ucode_download - Download microcode to the adapter
5123  * @ipr_cmd:	ipr command struct
5124  *
5125  * Description: This function checks to see if it there is microcode
5126  * to download to the adapter. If there is, a download is performed.
5127  *
5128  * Return value:
5129  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5130  **/
5131 static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
5132 {
5133 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5134 	struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
5135 
5136 	ENTER;
5137 	ipr_cmd->job_step = ipr_reset_alert;
5138 
5139 	if (!sglist)
5140 		return IPR_RC_JOB_CONTINUE;
5141 
5142 	ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5143 	ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
5144 	ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
5145 	ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
5146 	ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
5147 	ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
5148 	ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
5149 
5150 	if (ipr_map_ucode_buffer(ipr_cmd, sglist, sglist->buffer_len)) {
5151 		dev_err(&ioa_cfg->pdev->dev,
5152 			"Failed to map microcode download buffer\n");
5153 		return IPR_RC_JOB_CONTINUE;
5154 	}
5155 
5156 	ipr_cmd->job_step = ipr_reset_ucode_download_done;
5157 
5158 	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
5159 		   IPR_WRITE_BUFFER_TIMEOUT);
5160 
5161 	LEAVE;
5162 	return IPR_RC_JOB_RETURN;
5163 }
5164 
5165 /**
5166  * ipr_reset_shutdown_ioa - Shutdown the adapter
5167  * @ipr_cmd:	ipr command struct
5168  *
5169  * Description: This function issues an adapter shutdown of the
5170  * specified type to the specified adapter as part of the
5171  * adapter reset job.
5172  *
5173  * Return value:
5174  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5175  **/
5176 static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
5177 {
5178 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5179 	enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
5180 	unsigned long timeout;
5181 	int rc = IPR_RC_JOB_CONTINUE;
5182 
5183 	ENTER;
5184 	if (shutdown_type != IPR_SHUTDOWN_NONE && !ioa_cfg->ioa_is_dead) {
5185 		ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5186 		ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5187 		ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
5188 		ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
5189 
5190 		if (shutdown_type == IPR_SHUTDOWN_ABBREV)
5191 			timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
5192 		else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
5193 			timeout = IPR_INTERNAL_TIMEOUT;
5194 		else
5195 			timeout = IPR_SHUTDOWN_TIMEOUT;
5196 
5197 		ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
5198 
5199 		rc = IPR_RC_JOB_RETURN;
5200 		ipr_cmd->job_step = ipr_reset_ucode_download;
5201 	} else
5202 		ipr_cmd->job_step = ipr_reset_alert;
5203 
5204 	LEAVE;
5205 	return rc;
5206 }
5207 
5208 /**
5209  * ipr_reset_ioa_job - Adapter reset job
5210  * @ipr_cmd:	ipr command struct
5211  *
5212  * Description: This function is the job router for the adapter reset job.
5213  *
5214  * Return value:
5215  * 	none
5216  **/
5217 static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
5218 {
5219 	u32 rc, ioasc;
5220 	unsigned long scratch = ipr_cmd->u.scratch;
5221 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5222 
5223 	do {
5224 		ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5225 
5226 		if (ioa_cfg->reset_cmd != ipr_cmd) {
5227 			/*
5228 			 * We are doing nested adapter resets and this is
5229 			 * not the current reset job.
5230 			 */
5231 			list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5232 			return;
5233 		}
5234 
5235 		if (IPR_IOASC_SENSE_KEY(ioasc)) {
5236 			dev_err(&ioa_cfg->pdev->dev,
5237 				"0x%02X failed with IOASC: 0x%08X\n",
5238 				ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
5239 
5240 			ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5241 			list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5242 			return;
5243 		}
5244 
5245 		ipr_reinit_ipr_cmnd(ipr_cmd);
5246 		ipr_cmd->u.scratch = scratch;
5247 		rc = ipr_cmd->job_step(ipr_cmd);
5248 	} while(rc == IPR_RC_JOB_CONTINUE);
5249 }
5250 
5251 /**
5252  * _ipr_initiate_ioa_reset - Initiate an adapter reset
5253  * @ioa_cfg:		ioa config struct
5254  * @job_step:		first job step of reset job
5255  * @shutdown_type:	shutdown type
5256  *
5257  * Description: This function will initiate the reset of the given adapter
5258  * starting at the selected job step.
5259  * If the caller needs to wait on the completion of the reset,
5260  * the caller must sleep on the reset_wait_q.
5261  *
5262  * Return value:
5263  * 	none
5264  **/
5265 static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
5266 				    int (*job_step) (struct ipr_cmnd *),
5267 				    enum ipr_shutdown_type shutdown_type)
5268 {
5269 	struct ipr_cmnd *ipr_cmd;
5270 
5271 	ioa_cfg->in_reset_reload = 1;
5272 	ioa_cfg->allow_cmds = 0;
5273 	scsi_block_requests(ioa_cfg->host);
5274 
5275 	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5276 	ioa_cfg->reset_cmd = ipr_cmd;
5277 	ipr_cmd->job_step = job_step;
5278 	ipr_cmd->u.shutdown_type = shutdown_type;
5279 
5280 	ipr_reset_ioa_job(ipr_cmd);
5281 }
5282 
5283 /**
5284  * ipr_initiate_ioa_reset - Initiate an adapter reset
5285  * @ioa_cfg:		ioa config struct
5286  * @shutdown_type:	shutdown type
5287  *
5288  * Description: This function will initiate the reset of the given adapter.
5289  * If the caller needs to wait on the completion of the reset,
5290  * the caller must sleep on the reset_wait_q.
5291  *
5292  * Return value:
5293  * 	none
5294  **/
5295 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
5296 				   enum ipr_shutdown_type shutdown_type)
5297 {
5298 	if (ioa_cfg->ioa_is_dead)
5299 		return;
5300 
5301 	if (ioa_cfg->in_reset_reload && ioa_cfg->sdt_state == GET_DUMP)
5302 		ioa_cfg->sdt_state = ABORT_DUMP;
5303 
5304 	if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
5305 		dev_err(&ioa_cfg->pdev->dev,
5306 			"IOA taken offline - error recovery failed\n");
5307 
5308 		ioa_cfg->reset_retries = 0;
5309 		ioa_cfg->ioa_is_dead = 1;
5310 
5311 		if (ioa_cfg->in_ioa_bringdown) {
5312 			ioa_cfg->reset_cmd = NULL;
5313 			ioa_cfg->in_reset_reload = 0;
5314 			ipr_fail_all_ops(ioa_cfg);
5315 			wake_up_all(&ioa_cfg->reset_wait_q);
5316 
5317 			spin_unlock_irq(ioa_cfg->host->host_lock);
5318 			scsi_unblock_requests(ioa_cfg->host);
5319 			spin_lock_irq(ioa_cfg->host->host_lock);
5320 			return;
5321 		} else {
5322 			ioa_cfg->in_ioa_bringdown = 1;
5323 			shutdown_type = IPR_SHUTDOWN_NONE;
5324 		}
5325 	}
5326 
5327 	_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
5328 				shutdown_type);
5329 }
5330 
5331 /**
5332  * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
5333  * @ioa_cfg:	ioa cfg struct
5334  *
5335  * Description: This is the second phase of adapter intialization
5336  * This function takes care of initilizing the adapter to the point
5337  * where it can accept new commands.
5338 
5339  * Return value:
5340  * 	0 on sucess / -EIO on failure
5341  **/
5342 static int __devinit ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
5343 {
5344 	int rc = 0;
5345 	unsigned long host_lock_flags = 0;
5346 
5347 	ENTER;
5348 	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
5349 	dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
5350 	_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa, IPR_SHUTDOWN_NONE);
5351 
5352 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
5353 	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5354 	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
5355 
5356 	if (ioa_cfg->ioa_is_dead) {
5357 		rc = -EIO;
5358 	} else if (ipr_invalid_adapter(ioa_cfg)) {
5359 		if (!ipr_testmode)
5360 			rc = -EIO;
5361 
5362 		dev_err(&ioa_cfg->pdev->dev,
5363 			"Adapter not supported in this hardware configuration.\n");
5364 	}
5365 
5366 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
5367 
5368 	LEAVE;
5369 	return rc;
5370 }
5371 
5372 /**
5373  * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
5374  * @ioa_cfg:	ioa config struct
5375  *
5376  * Return value:
5377  * 	none
5378  **/
5379 static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
5380 {
5381 	int i;
5382 
5383 	for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
5384 		if (ioa_cfg->ipr_cmnd_list[i])
5385 			pci_pool_free(ioa_cfg->ipr_cmd_pool,
5386 				      ioa_cfg->ipr_cmnd_list[i],
5387 				      ioa_cfg->ipr_cmnd_list_dma[i]);
5388 
5389 		ioa_cfg->ipr_cmnd_list[i] = NULL;
5390 	}
5391 
5392 	if (ioa_cfg->ipr_cmd_pool)
5393 		pci_pool_destroy (ioa_cfg->ipr_cmd_pool);
5394 
5395 	ioa_cfg->ipr_cmd_pool = NULL;
5396 }
5397 
5398 /**
5399  * ipr_free_mem - Frees memory allocated for an adapter
5400  * @ioa_cfg:	ioa cfg struct
5401  *
5402  * Return value:
5403  * 	nothing
5404  **/
5405 static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
5406 {
5407 	int i;
5408 
5409 	kfree(ioa_cfg->res_entries);
5410 	pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_misc_cbs),
5411 			    ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
5412 	ipr_free_cmd_blks(ioa_cfg);
5413 	pci_free_consistent(ioa_cfg->pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
5414 			    ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
5415 	pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_config_table),
5416 			    ioa_cfg->cfg_table,
5417 			    ioa_cfg->cfg_table_dma);
5418 
5419 	for (i = 0; i < IPR_NUM_HCAMS; i++) {
5420 		pci_free_consistent(ioa_cfg->pdev,
5421 				    sizeof(struct ipr_hostrcb),
5422 				    ioa_cfg->hostrcb[i],
5423 				    ioa_cfg->hostrcb_dma[i]);
5424 	}
5425 
5426 	ipr_free_dump(ioa_cfg);
5427 	kfree(ioa_cfg->saved_mode_pages);
5428 	kfree(ioa_cfg->trace);
5429 }
5430 
5431 /**
5432  * ipr_free_all_resources - Free all allocated resources for an adapter.
5433  * @ipr_cmd:	ipr command struct
5434  *
5435  * This function frees all allocated resources for the
5436  * specified adapter.
5437  *
5438  * Return value:
5439  * 	none
5440  **/
5441 static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
5442 {
5443 	struct pci_dev *pdev = ioa_cfg->pdev;
5444 
5445 	ENTER;
5446 	free_irq(pdev->irq, ioa_cfg);
5447 	iounmap(ioa_cfg->hdw_dma_regs);
5448 	pci_release_regions(pdev);
5449 	ipr_free_mem(ioa_cfg);
5450 	scsi_host_put(ioa_cfg->host);
5451 	pci_disable_device(pdev);
5452 	LEAVE;
5453 }
5454 
5455 /**
5456  * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
5457  * @ioa_cfg:	ioa config struct
5458  *
5459  * Return value:
5460  * 	0 on success / -ENOMEM on allocation failure
5461  **/
5462 static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
5463 {
5464 	struct ipr_cmnd *ipr_cmd;
5465 	struct ipr_ioarcb *ioarcb;
5466 	dma_addr_t dma_addr;
5467 	int i;
5468 
5469 	ioa_cfg->ipr_cmd_pool = pci_pool_create (IPR_NAME, ioa_cfg->pdev,
5470 						 sizeof(struct ipr_cmnd), 8, 0);
5471 
5472 	if (!ioa_cfg->ipr_cmd_pool)
5473 		return -ENOMEM;
5474 
5475 	for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
5476 		ipr_cmd = pci_pool_alloc (ioa_cfg->ipr_cmd_pool, SLAB_KERNEL, &dma_addr);
5477 
5478 		if (!ipr_cmd) {
5479 			ipr_free_cmd_blks(ioa_cfg);
5480 			return -ENOMEM;
5481 		}
5482 
5483 		memset(ipr_cmd, 0, sizeof(*ipr_cmd));
5484 		ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
5485 		ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
5486 
5487 		ioarcb = &ipr_cmd->ioarcb;
5488 		ioarcb->ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
5489 		ioarcb->host_response_handle = cpu_to_be32(i << 2);
5490 		ioarcb->write_ioadl_addr =
5491 			cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioadl));
5492 		ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5493 		ioarcb->ioasa_host_pci_addr =
5494 			cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioasa));
5495 		ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
5496 		ipr_cmd->cmd_index = i;
5497 		ipr_cmd->ioa_cfg = ioa_cfg;
5498 		ipr_cmd->sense_buffer_dma = dma_addr +
5499 			offsetof(struct ipr_cmnd, sense_buffer);
5500 
5501 		list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5502 	}
5503 
5504 	return 0;
5505 }
5506 
5507 /**
5508  * ipr_alloc_mem - Allocate memory for an adapter
5509  * @ioa_cfg:	ioa config struct
5510  *
5511  * Return value:
5512  * 	0 on success / non-zero for error
5513  **/
5514 static int __devinit ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
5515 {
5516 	struct pci_dev *pdev = ioa_cfg->pdev;
5517 	int i, rc = -ENOMEM;
5518 
5519 	ENTER;
5520 	ioa_cfg->res_entries = kmalloc(sizeof(struct ipr_resource_entry) *
5521 				       IPR_MAX_PHYSICAL_DEVS, GFP_KERNEL);
5522 
5523 	if (!ioa_cfg->res_entries)
5524 		goto out;
5525 
5526 	memset(ioa_cfg->res_entries, 0,
5527 	       sizeof(struct ipr_resource_entry) * IPR_MAX_PHYSICAL_DEVS);
5528 
5529 	for (i = 0; i < IPR_MAX_PHYSICAL_DEVS; i++)
5530 		list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
5531 
5532 	ioa_cfg->vpd_cbs = pci_alloc_consistent(ioa_cfg->pdev,
5533 						sizeof(struct ipr_misc_cbs),
5534 						&ioa_cfg->vpd_cbs_dma);
5535 
5536 	if (!ioa_cfg->vpd_cbs)
5537 		goto out_free_res_entries;
5538 
5539 	if (ipr_alloc_cmd_blks(ioa_cfg))
5540 		goto out_free_vpd_cbs;
5541 
5542 	ioa_cfg->host_rrq = pci_alloc_consistent(ioa_cfg->pdev,
5543 						 sizeof(u32) * IPR_NUM_CMD_BLKS,
5544 						 &ioa_cfg->host_rrq_dma);
5545 
5546 	if (!ioa_cfg->host_rrq)
5547 		goto out_ipr_free_cmd_blocks;
5548 
5549 	ioa_cfg->cfg_table = pci_alloc_consistent(ioa_cfg->pdev,
5550 						  sizeof(struct ipr_config_table),
5551 						  &ioa_cfg->cfg_table_dma);
5552 
5553 	if (!ioa_cfg->cfg_table)
5554 		goto out_free_host_rrq;
5555 
5556 	for (i = 0; i < IPR_NUM_HCAMS; i++) {
5557 		ioa_cfg->hostrcb[i] = pci_alloc_consistent(ioa_cfg->pdev,
5558 							   sizeof(struct ipr_hostrcb),
5559 							   &ioa_cfg->hostrcb_dma[i]);
5560 
5561 		if (!ioa_cfg->hostrcb[i])
5562 			goto out_free_hostrcb_dma;
5563 
5564 		ioa_cfg->hostrcb[i]->hostrcb_dma =
5565 			ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
5566 		list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
5567 	}
5568 
5569 	ioa_cfg->trace = kmalloc(sizeof(struct ipr_trace_entry) *
5570 				 IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
5571 
5572 	if (!ioa_cfg->trace)
5573 		goto out_free_hostrcb_dma;
5574 
5575 	memset(ioa_cfg->trace, 0,
5576 	       sizeof(struct ipr_trace_entry) * IPR_NUM_TRACE_ENTRIES);
5577 
5578 	rc = 0;
5579 out:
5580 	LEAVE;
5581 	return rc;
5582 
5583 out_free_hostrcb_dma:
5584 	while (i-- > 0) {
5585 		pci_free_consistent(pdev, sizeof(struct ipr_hostrcb),
5586 				    ioa_cfg->hostrcb[i],
5587 				    ioa_cfg->hostrcb_dma[i]);
5588 	}
5589 	pci_free_consistent(pdev, sizeof(struct ipr_config_table),
5590 			    ioa_cfg->cfg_table, ioa_cfg->cfg_table_dma);
5591 out_free_host_rrq:
5592 	pci_free_consistent(pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
5593 			    ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
5594 out_ipr_free_cmd_blocks:
5595 	ipr_free_cmd_blks(ioa_cfg);
5596 out_free_vpd_cbs:
5597 	pci_free_consistent(pdev, sizeof(struct ipr_misc_cbs),
5598 			    ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
5599 out_free_res_entries:
5600 	kfree(ioa_cfg->res_entries);
5601 	goto out;
5602 }
5603 
5604 /**
5605  * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
5606  * @ioa_cfg:	ioa config struct
5607  *
5608  * Return value:
5609  * 	none
5610  **/
5611 static void __devinit ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
5612 {
5613 	int i;
5614 
5615 	for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
5616 		ioa_cfg->bus_attr[i].bus = i;
5617 		ioa_cfg->bus_attr[i].qas_enabled = 0;
5618 		ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
5619 		if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
5620 			ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
5621 		else
5622 			ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
5623 	}
5624 }
5625 
5626 /**
5627  * ipr_init_ioa_cfg - Initialize IOA config struct
5628  * @ioa_cfg:	ioa config struct
5629  * @host:		scsi host struct
5630  * @pdev:		PCI dev struct
5631  *
5632  * Return value:
5633  * 	none
5634  **/
5635 static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
5636 				       struct Scsi_Host *host, struct pci_dev *pdev)
5637 {
5638 	const struct ipr_interrupt_offsets *p;
5639 	struct ipr_interrupts *t;
5640 	void __iomem *base;
5641 
5642 	ioa_cfg->host = host;
5643 	ioa_cfg->pdev = pdev;
5644 	ioa_cfg->log_level = ipr_log_level;
5645 	sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
5646 	sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
5647 	sprintf(ioa_cfg->ipr_free_label, IPR_FREEQ_LABEL);
5648 	sprintf(ioa_cfg->ipr_pending_label, IPR_PENDQ_LABEL);
5649 	sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
5650 	sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
5651 	sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
5652 	sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
5653 
5654 	INIT_LIST_HEAD(&ioa_cfg->free_q);
5655 	INIT_LIST_HEAD(&ioa_cfg->pending_q);
5656 	INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
5657 	INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
5658 	INIT_LIST_HEAD(&ioa_cfg->free_res_q);
5659 	INIT_LIST_HEAD(&ioa_cfg->used_res_q);
5660 	INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread, ioa_cfg);
5661 	init_waitqueue_head(&ioa_cfg->reset_wait_q);
5662 	ioa_cfg->sdt_state = INACTIVE;
5663 
5664 	ipr_initialize_bus_attr(ioa_cfg);
5665 
5666 	host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
5667 	host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
5668 	host->max_channel = IPR_MAX_BUS_TO_SCAN;
5669 	host->unique_id = host->host_no;
5670 	host->max_cmd_len = IPR_MAX_CDB_LEN;
5671 	pci_set_drvdata(pdev, ioa_cfg);
5672 
5673 	p = &ioa_cfg->chip_cfg->regs;
5674 	t = &ioa_cfg->regs;
5675 	base = ioa_cfg->hdw_dma_regs;
5676 
5677 	t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
5678 	t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
5679 	t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
5680 	t->clr_interrupt_reg = base + p->clr_interrupt_reg;
5681 	t->sense_interrupt_reg = base + p->sense_interrupt_reg;
5682 	t->ioarrin_reg = base + p->ioarrin_reg;
5683 	t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
5684 	t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
5685 	t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
5686 }
5687 
5688 /**
5689  * ipr_get_chip_cfg - Find adapter chip configuration
5690  * @dev_id:		PCI device id struct
5691  *
5692  * Return value:
5693  * 	ptr to chip config on success / NULL on failure
5694  **/
5695 static const struct ipr_chip_cfg_t * __devinit
5696 ipr_get_chip_cfg(const struct pci_device_id *dev_id)
5697 {
5698 	int i;
5699 
5700 	if (dev_id->driver_data)
5701 		return (const struct ipr_chip_cfg_t *)dev_id->driver_data;
5702 
5703 	for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
5704 		if (ipr_chip[i].vendor == dev_id->vendor &&
5705 		    ipr_chip[i].device == dev_id->device)
5706 			return ipr_chip[i].cfg;
5707 	return NULL;
5708 }
5709 
5710 /**
5711  * ipr_probe_ioa - Allocates memory and does first stage of initialization
5712  * @pdev:		PCI device struct
5713  * @dev_id:		PCI device id struct
5714  *
5715  * Return value:
5716  * 	0 on success / non-zero on failure
5717  **/
5718 static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
5719 				   const struct pci_device_id *dev_id)
5720 {
5721 	struct ipr_ioa_cfg *ioa_cfg;
5722 	struct Scsi_Host *host;
5723 	unsigned long ipr_regs_pci;
5724 	void __iomem *ipr_regs;
5725 	u32 rc = PCIBIOS_SUCCESSFUL;
5726 
5727 	ENTER;
5728 
5729 	if ((rc = pci_enable_device(pdev))) {
5730 		dev_err(&pdev->dev, "Cannot enable adapter\n");
5731 		goto out;
5732 	}
5733 
5734 	dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
5735 
5736 	host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
5737 
5738 	if (!host) {
5739 		dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
5740 		rc = -ENOMEM;
5741 		goto out_disable;
5742 	}
5743 
5744 	ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
5745 	memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
5746 
5747 	ioa_cfg->chip_cfg = ipr_get_chip_cfg(dev_id);
5748 
5749 	if (!ioa_cfg->chip_cfg) {
5750 		dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
5751 			dev_id->vendor, dev_id->device);
5752 		goto out_scsi_host_put;
5753 	}
5754 
5755 	ipr_regs_pci = pci_resource_start(pdev, 0);
5756 
5757 	rc = pci_request_regions(pdev, IPR_NAME);
5758 	if (rc < 0) {
5759 		dev_err(&pdev->dev,
5760 			"Couldn't register memory range of registers\n");
5761 		goto out_scsi_host_put;
5762 	}
5763 
5764 	ipr_regs = ioremap(ipr_regs_pci, pci_resource_len(pdev, 0));
5765 
5766 	if (!ipr_regs) {
5767 		dev_err(&pdev->dev,
5768 			"Couldn't map memory range of registers\n");
5769 		rc = -ENOMEM;
5770 		goto out_release_regions;
5771 	}
5772 
5773 	ioa_cfg->hdw_dma_regs = ipr_regs;
5774 	ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
5775 	ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
5776 
5777 	ipr_init_ioa_cfg(ioa_cfg, host, pdev);
5778 
5779 	pci_set_master(pdev);
5780 
5781 	rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
5782 	if (rc < 0) {
5783 		dev_err(&pdev->dev, "Failed to set PCI DMA mask\n");
5784 		goto cleanup_nomem;
5785 	}
5786 
5787 	rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
5788 				   ioa_cfg->chip_cfg->cache_line_size);
5789 
5790 	if (rc != PCIBIOS_SUCCESSFUL) {
5791 		dev_err(&pdev->dev, "Write of cache line size failed\n");
5792 		rc = -EIO;
5793 		goto cleanup_nomem;
5794 	}
5795 
5796 	/* Save away PCI config space for use following IOA reset */
5797 	rc = pci_save_state(pdev);
5798 
5799 	if (rc != PCIBIOS_SUCCESSFUL) {
5800 		dev_err(&pdev->dev, "Failed to save PCI config space\n");
5801 		rc = -EIO;
5802 		goto cleanup_nomem;
5803 	}
5804 
5805 	if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
5806 		goto cleanup_nomem;
5807 
5808 	if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
5809 		goto cleanup_nomem;
5810 
5811 	rc = ipr_alloc_mem(ioa_cfg);
5812 	if (rc < 0) {
5813 		dev_err(&pdev->dev,
5814 			"Couldn't allocate enough memory for device driver!\n");
5815 		goto cleanup_nomem;
5816 	}
5817 
5818 	ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
5819 	rc = request_irq(pdev->irq, ipr_isr, SA_SHIRQ, IPR_NAME, ioa_cfg);
5820 
5821 	if (rc) {
5822 		dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
5823 			pdev->irq, rc);
5824 		goto cleanup_nolog;
5825 	}
5826 
5827 	spin_lock(&ipr_driver_lock);
5828 	list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
5829 	spin_unlock(&ipr_driver_lock);
5830 
5831 	LEAVE;
5832 out:
5833 	return rc;
5834 
5835 cleanup_nolog:
5836 	ipr_free_mem(ioa_cfg);
5837 cleanup_nomem:
5838 	iounmap(ipr_regs);
5839 out_release_regions:
5840 	pci_release_regions(pdev);
5841 out_scsi_host_put:
5842 	scsi_host_put(host);
5843 out_disable:
5844 	pci_disable_device(pdev);
5845 	goto out;
5846 }
5847 
5848 /**
5849  * ipr_scan_vsets - Scans for VSET devices
5850  * @ioa_cfg:	ioa config struct
5851  *
5852  * Description: Since the VSET resources do not follow SAM in that we can have
5853  * sparse LUNs with no LUN 0, we have to scan for these ourselves.
5854  *
5855  * Return value:
5856  * 	none
5857  **/
5858 static void ipr_scan_vsets(struct ipr_ioa_cfg *ioa_cfg)
5859 {
5860 	int target, lun;
5861 
5862 	for (target = 0; target < IPR_MAX_NUM_TARGETS_PER_BUS; target++)
5863 		for (lun = 0; lun < IPR_MAX_NUM_VSET_LUNS_PER_TARGET; lun++ )
5864 			scsi_add_device(ioa_cfg->host, IPR_VSET_BUS, target, lun);
5865 }
5866 
5867 /**
5868  * ipr_initiate_ioa_bringdown - Bring down an adapter
5869  * @ioa_cfg:		ioa config struct
5870  * @shutdown_type:	shutdown type
5871  *
5872  * Description: This function will initiate bringing down the adapter.
5873  * This consists of issuing an IOA shutdown to the adapter
5874  * to flush the cache, and running BIST.
5875  * If the caller needs to wait on the completion of the reset,
5876  * the caller must sleep on the reset_wait_q.
5877  *
5878  * Return value:
5879  * 	none
5880  **/
5881 static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
5882 				       enum ipr_shutdown_type shutdown_type)
5883 {
5884 	ENTER;
5885 	if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
5886 		ioa_cfg->sdt_state = ABORT_DUMP;
5887 	ioa_cfg->reset_retries = 0;
5888 	ioa_cfg->in_ioa_bringdown = 1;
5889 	ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
5890 	LEAVE;
5891 }
5892 
5893 /**
5894  * __ipr_remove - Remove a single adapter
5895  * @pdev:	pci device struct
5896  *
5897  * Adapter hot plug remove entry point.
5898  *
5899  * Return value:
5900  * 	none
5901  **/
5902 static void __ipr_remove(struct pci_dev *pdev)
5903 {
5904 	unsigned long host_lock_flags = 0;
5905 	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
5906 	ENTER;
5907 
5908 	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
5909 	ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
5910 
5911 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
5912 	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5913 	flush_scheduled_work();
5914 	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
5915 
5916 	spin_lock(&ipr_driver_lock);
5917 	list_del(&ioa_cfg->queue);
5918 	spin_unlock(&ipr_driver_lock);
5919 
5920 	if (ioa_cfg->sdt_state == ABORT_DUMP)
5921 		ioa_cfg->sdt_state = WAIT_FOR_DUMP;
5922 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
5923 
5924 	ipr_free_all_resources(ioa_cfg);
5925 
5926 	LEAVE;
5927 }
5928 
5929 /**
5930  * ipr_remove - IOA hot plug remove entry point
5931  * @pdev:	pci device struct
5932  *
5933  * Adapter hot plug remove entry point.
5934  *
5935  * Return value:
5936  * 	none
5937  **/
5938 static void ipr_remove(struct pci_dev *pdev)
5939 {
5940 	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
5941 
5942 	ENTER;
5943 
5944 	ipr_remove_trace_file(&ioa_cfg->host->shost_classdev.kobj,
5945 			      &ipr_trace_attr);
5946 	ipr_remove_dump_file(&ioa_cfg->host->shost_classdev.kobj,
5947 			     &ipr_dump_attr);
5948 	scsi_remove_host(ioa_cfg->host);
5949 
5950 	__ipr_remove(pdev);
5951 
5952 	LEAVE;
5953 }
5954 
5955 /**
5956  * ipr_probe - Adapter hot plug add entry point
5957  *
5958  * Return value:
5959  * 	0 on success / non-zero on failure
5960  **/
5961 static int __devinit ipr_probe(struct pci_dev *pdev,
5962 			       const struct pci_device_id *dev_id)
5963 {
5964 	struct ipr_ioa_cfg *ioa_cfg;
5965 	int rc;
5966 
5967 	rc = ipr_probe_ioa(pdev, dev_id);
5968 
5969 	if (rc)
5970 		return rc;
5971 
5972 	ioa_cfg = pci_get_drvdata(pdev);
5973 	rc = ipr_probe_ioa_part2(ioa_cfg);
5974 
5975 	if (rc) {
5976 		__ipr_remove(pdev);
5977 		return rc;
5978 	}
5979 
5980 	rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
5981 
5982 	if (rc) {
5983 		__ipr_remove(pdev);
5984 		return rc;
5985 	}
5986 
5987 	rc = ipr_create_trace_file(&ioa_cfg->host->shost_classdev.kobj,
5988 				   &ipr_trace_attr);
5989 
5990 	if (rc) {
5991 		scsi_remove_host(ioa_cfg->host);
5992 		__ipr_remove(pdev);
5993 		return rc;
5994 	}
5995 
5996 	rc = ipr_create_dump_file(&ioa_cfg->host->shost_classdev.kobj,
5997 				   &ipr_dump_attr);
5998 
5999 	if (rc) {
6000 		ipr_remove_trace_file(&ioa_cfg->host->shost_classdev.kobj,
6001 				      &ipr_trace_attr);
6002 		scsi_remove_host(ioa_cfg->host);
6003 		__ipr_remove(pdev);
6004 		return rc;
6005 	}
6006 
6007 	scsi_scan_host(ioa_cfg->host);
6008 	ipr_scan_vsets(ioa_cfg);
6009 	scsi_add_device(ioa_cfg->host, IPR_IOA_BUS, IPR_IOA_TARGET, IPR_IOA_LUN);
6010 	ioa_cfg->allow_ml_add_del = 1;
6011 	schedule_work(&ioa_cfg->work_q);
6012 	return 0;
6013 }
6014 
6015 /**
6016  * ipr_shutdown - Shutdown handler.
6017  * @pdev:	pci device struct
6018  *
6019  * This function is invoked upon system shutdown/reboot. It will issue
6020  * an adapter shutdown to the adapter to flush the write cache.
6021  *
6022  * Return value:
6023  * 	none
6024  **/
6025 static void ipr_shutdown(struct pci_dev *pdev)
6026 {
6027 	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
6028 	unsigned long lock_flags = 0;
6029 
6030 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6031 	ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
6032 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6033 	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6034 }
6035 
6036 static struct pci_device_id ipr_pci_table[] __devinitdata = {
6037 	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6038 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702,
6039 		0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6040 	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6041 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703,
6042 	      0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6043 	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6044 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D,
6045 	      0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6046 	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6047 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E,
6048 	      0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6049 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6050 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B,
6051 	      0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6052 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6053 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E,
6054 	      0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6055 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6056 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A,
6057 	      0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6058 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
6059 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780,
6060 		0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] },
6061 	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
6062 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E,
6063 		0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] },
6064 	{ }
6065 };
6066 MODULE_DEVICE_TABLE(pci, ipr_pci_table);
6067 
6068 static struct pci_driver ipr_driver = {
6069 	.name = IPR_NAME,
6070 	.id_table = ipr_pci_table,
6071 	.probe = ipr_probe,
6072 	.remove = ipr_remove,
6073 	.shutdown = ipr_shutdown,
6074 };
6075 
6076 /**
6077  * ipr_init - Module entry point
6078  *
6079  * Return value:
6080  * 	0 on success / negative value on failure
6081  **/
6082 static int __init ipr_init(void)
6083 {
6084 	ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
6085 		 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
6086 
6087 	return pci_module_init(&ipr_driver);
6088 }
6089 
6090 /**
6091  * ipr_exit - Module unload
6092  *
6093  * Module unload entry point.
6094  *
6095  * Return value:
6096  * 	none
6097  **/
6098 static void __exit ipr_exit(void)
6099 {
6100 	pci_unregister_driver(&ipr_driver);
6101 }
6102 
6103 module_init(ipr_init);
6104 module_exit(ipr_exit);
6105