xref: /openbmc/linux/drivers/scsi/ipr.c (revision afb46f79)
1 /*
2  * ipr.c -- driver for IBM Power Linux RAID adapters
3  *
4  * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
5  *
6  * Copyright (C) 2003, 2004 IBM Corporation
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
21  *
22  */
23 
24 /*
25  * Notes:
26  *
27  * This driver is used to control the following SCSI adapters:
28  *
29  * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
30  *
31  * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32  *              PCI-X Dual Channel Ultra 320 SCSI Adapter
33  *              PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34  *              Embedded SCSI adapter on p615 and p655 systems
35  *
36  * Supported Hardware Features:
37  *	- Ultra 320 SCSI controller
38  *	- PCI-X host interface
39  *	- Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40  *	- Non-Volatile Write Cache
41  *	- Supports attachment of non-RAID disks, tape, and optical devices
42  *	- RAID Levels 0, 5, 10
43  *	- Hot spare
44  *	- Background Parity Checking
45  *	- Background Data Scrubbing
46  *	- Ability to increase the capacity of an existing RAID 5 disk array
47  *		by adding disks
48  *
49  * Driver Features:
50  *	- Tagged command queuing
51  *	- Adapter microcode download
52  *	- PCI hot plug
53  *	- SCSI device hot plug
54  *
55  */
56 
57 #include <linux/fs.h>
58 #include <linux/init.h>
59 #include <linux/types.h>
60 #include <linux/errno.h>
61 #include <linux/kernel.h>
62 #include <linux/slab.h>
63 #include <linux/vmalloc.h>
64 #include <linux/ioport.h>
65 #include <linux/delay.h>
66 #include <linux/pci.h>
67 #include <linux/wait.h>
68 #include <linux/spinlock.h>
69 #include <linux/sched.h>
70 #include <linux/interrupt.h>
71 #include <linux/blkdev.h>
72 #include <linux/firmware.h>
73 #include <linux/module.h>
74 #include <linux/moduleparam.h>
75 #include <linux/libata.h>
76 #include <linux/hdreg.h>
77 #include <linux/reboot.h>
78 #include <linux/stringify.h>
79 #include <asm/io.h>
80 #include <asm/irq.h>
81 #include <asm/processor.h>
82 #include <scsi/scsi.h>
83 #include <scsi/scsi_host.h>
84 #include <scsi/scsi_tcq.h>
85 #include <scsi/scsi_eh.h>
86 #include <scsi/scsi_cmnd.h>
87 #include "ipr.h"
88 
89 /*
90  *   Global Data
91  */
92 static LIST_HEAD(ipr_ioa_head);
93 static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
94 static unsigned int ipr_max_speed = 1;
95 static int ipr_testmode = 0;
96 static unsigned int ipr_fastfail = 0;
97 static unsigned int ipr_transop_timeout = 0;
98 static unsigned int ipr_debug = 0;
99 static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
100 static unsigned int ipr_dual_ioa_raid = 1;
101 static unsigned int ipr_number_of_msix = 2;
102 static DEFINE_SPINLOCK(ipr_driver_lock);
103 
104 /* This table describes the differences between DMA controller chips */
105 static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
106 	{ /* Gemstone, Citrine, Obsidian, and Obsidian-E */
107 		.mailbox = 0x0042C,
108 		.max_cmds = 100,
109 		.cache_line_size = 0x20,
110 		.clear_isr = 1,
111 		.iopoll_weight = 0,
112 		{
113 			.set_interrupt_mask_reg = 0x0022C,
114 			.clr_interrupt_mask_reg = 0x00230,
115 			.clr_interrupt_mask_reg32 = 0x00230,
116 			.sense_interrupt_mask_reg = 0x0022C,
117 			.sense_interrupt_mask_reg32 = 0x0022C,
118 			.clr_interrupt_reg = 0x00228,
119 			.clr_interrupt_reg32 = 0x00228,
120 			.sense_interrupt_reg = 0x00224,
121 			.sense_interrupt_reg32 = 0x00224,
122 			.ioarrin_reg = 0x00404,
123 			.sense_uproc_interrupt_reg = 0x00214,
124 			.sense_uproc_interrupt_reg32 = 0x00214,
125 			.set_uproc_interrupt_reg = 0x00214,
126 			.set_uproc_interrupt_reg32 = 0x00214,
127 			.clr_uproc_interrupt_reg = 0x00218,
128 			.clr_uproc_interrupt_reg32 = 0x00218
129 		}
130 	},
131 	{ /* Snipe and Scamp */
132 		.mailbox = 0x0052C,
133 		.max_cmds = 100,
134 		.cache_line_size = 0x20,
135 		.clear_isr = 1,
136 		.iopoll_weight = 0,
137 		{
138 			.set_interrupt_mask_reg = 0x00288,
139 			.clr_interrupt_mask_reg = 0x0028C,
140 			.clr_interrupt_mask_reg32 = 0x0028C,
141 			.sense_interrupt_mask_reg = 0x00288,
142 			.sense_interrupt_mask_reg32 = 0x00288,
143 			.clr_interrupt_reg = 0x00284,
144 			.clr_interrupt_reg32 = 0x00284,
145 			.sense_interrupt_reg = 0x00280,
146 			.sense_interrupt_reg32 = 0x00280,
147 			.ioarrin_reg = 0x00504,
148 			.sense_uproc_interrupt_reg = 0x00290,
149 			.sense_uproc_interrupt_reg32 = 0x00290,
150 			.set_uproc_interrupt_reg = 0x00290,
151 			.set_uproc_interrupt_reg32 = 0x00290,
152 			.clr_uproc_interrupt_reg = 0x00294,
153 			.clr_uproc_interrupt_reg32 = 0x00294
154 		}
155 	},
156 	{ /* CRoC */
157 		.mailbox = 0x00044,
158 		.max_cmds = 1000,
159 		.cache_line_size = 0x20,
160 		.clear_isr = 0,
161 		.iopoll_weight = 64,
162 		{
163 			.set_interrupt_mask_reg = 0x00010,
164 			.clr_interrupt_mask_reg = 0x00018,
165 			.clr_interrupt_mask_reg32 = 0x0001C,
166 			.sense_interrupt_mask_reg = 0x00010,
167 			.sense_interrupt_mask_reg32 = 0x00014,
168 			.clr_interrupt_reg = 0x00008,
169 			.clr_interrupt_reg32 = 0x0000C,
170 			.sense_interrupt_reg = 0x00000,
171 			.sense_interrupt_reg32 = 0x00004,
172 			.ioarrin_reg = 0x00070,
173 			.sense_uproc_interrupt_reg = 0x00020,
174 			.sense_uproc_interrupt_reg32 = 0x00024,
175 			.set_uproc_interrupt_reg = 0x00020,
176 			.set_uproc_interrupt_reg32 = 0x00024,
177 			.clr_uproc_interrupt_reg = 0x00028,
178 			.clr_uproc_interrupt_reg32 = 0x0002C,
179 			.init_feedback_reg = 0x0005C,
180 			.dump_addr_reg = 0x00064,
181 			.dump_data_reg = 0x00068,
182 			.endian_swap_reg = 0x00084
183 		}
184 	},
185 };
186 
187 static const struct ipr_chip_t ipr_chip[] = {
188 	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
189 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
190 	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
191 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
192 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, IPR_USE_MSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
193 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
194 	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
195 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
196 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }
197 };
198 
199 static int ipr_max_bus_speeds[] = {
200 	IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
201 };
202 
203 MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
204 MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
205 module_param_named(max_speed, ipr_max_speed, uint, 0);
206 MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
207 module_param_named(log_level, ipr_log_level, uint, 0);
208 MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
209 module_param_named(testmode, ipr_testmode, int, 0);
210 MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
211 module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR);
212 MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
213 module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
214 MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
215 module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR);
216 MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
217 module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
218 MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
219 module_param_named(max_devs, ipr_max_devs, int, 0);
220 MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
221 		 "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
222 module_param_named(number_of_msix, ipr_number_of_msix, int, 0);
223 MODULE_PARM_DESC(number_of_msix, "Specify the number of MSIX interrupts to use on capable adapters (1 - 16).  (default:2)");
224 MODULE_LICENSE("GPL");
225 MODULE_VERSION(IPR_DRIVER_VERSION);
226 
227 /*  A constant array of IOASCs/URCs/Error Messages */
228 static const
229 struct ipr_error_table_t ipr_error_table[] = {
230 	{0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
231 	"8155: An unknown error was received"},
232 	{0x00330000, 0, 0,
233 	"Soft underlength error"},
234 	{0x005A0000, 0, 0,
235 	"Command to be cancelled not found"},
236 	{0x00808000, 0, 0,
237 	"Qualified success"},
238 	{0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
239 	"FFFE: Soft device bus error recovered by the IOA"},
240 	{0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
241 	"4101: Soft device bus fabric error"},
242 	{0x01100100, 0, IPR_DEFAULT_LOG_LEVEL,
243 	"FFFC: Logical block guard error recovered by the device"},
244 	{0x01100300, 0, IPR_DEFAULT_LOG_LEVEL,
245 	"FFFC: Logical block reference tag error recovered by the device"},
246 	{0x01108300, 0, IPR_DEFAULT_LOG_LEVEL,
247 	"4171: Recovered scatter list tag / sequence number error"},
248 	{0x01109000, 0, IPR_DEFAULT_LOG_LEVEL,
249 	"FF3D: Recovered logical block CRC error on IOA to Host transfer"},
250 	{0x01109200, 0, IPR_DEFAULT_LOG_LEVEL,
251 	"4171: Recovered logical block sequence number error on IOA to Host transfer"},
252 	{0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL,
253 	"FFFD: Recovered logical block reference tag error detected by the IOA"},
254 	{0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL,
255 	"FFFD: Logical block guard error recovered by the IOA"},
256 	{0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
257 	"FFF9: Device sector reassign successful"},
258 	{0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
259 	"FFF7: Media error recovered by device rewrite procedures"},
260 	{0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
261 	"7001: IOA sector reassignment successful"},
262 	{0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
263 	"FFF9: Soft media error. Sector reassignment recommended"},
264 	{0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
265 	"FFF7: Media error recovered by IOA rewrite procedures"},
266 	{0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
267 	"FF3D: Soft PCI bus error recovered by the IOA"},
268 	{0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
269 	"FFF6: Device hardware error recovered by the IOA"},
270 	{0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
271 	"FFF6: Device hardware error recovered by the device"},
272 	{0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
273 	"FF3D: Soft IOA error recovered by the IOA"},
274 	{0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
275 	"FFFA: Undefined device response recovered by the IOA"},
276 	{0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
277 	"FFF6: Device bus error, message or command phase"},
278 	{0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
279 	"FFFE: Task Management Function failed"},
280 	{0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
281 	"FFF6: Failure prediction threshold exceeded"},
282 	{0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
283 	"8009: Impending cache battery pack failure"},
284 	{0x02040100, 0, 0,
285 	"Logical Unit in process of becoming ready"},
286 	{0x02040200, 0, 0,
287 	"Initializing command required"},
288 	{0x02040400, 0, 0,
289 	"34FF: Disk device format in progress"},
290 	{0x02040C00, 0, 0,
291 	"Logical unit not accessible, target port in unavailable state"},
292 	{0x02048000, 0, IPR_DEFAULT_LOG_LEVEL,
293 	"9070: IOA requested reset"},
294 	{0x023F0000, 0, 0,
295 	"Synchronization required"},
296 	{0x02408500, 0, 0,
297 	"IOA microcode download required"},
298 	{0x02408600, 0, 0,
299 	"Device bus connection is prohibited by host"},
300 	{0x024E0000, 0, 0,
301 	"No ready, IOA shutdown"},
302 	{0x025A0000, 0, 0,
303 	"Not ready, IOA has been shutdown"},
304 	{0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
305 	"3020: Storage subsystem configuration error"},
306 	{0x03110B00, 0, 0,
307 	"FFF5: Medium error, data unreadable, recommend reassign"},
308 	{0x03110C00, 0, 0,
309 	"7000: Medium error, data unreadable, do not reassign"},
310 	{0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
311 	"FFF3: Disk media format bad"},
312 	{0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
313 	"3002: Addressed device failed to respond to selection"},
314 	{0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
315 	"3100: Device bus error"},
316 	{0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
317 	"3109: IOA timed out a device command"},
318 	{0x04088000, 0, 0,
319 	"3120: SCSI bus is not operational"},
320 	{0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
321 	"4100: Hard device bus fabric error"},
322 	{0x04100100, 0, IPR_DEFAULT_LOG_LEVEL,
323 	"310C: Logical block guard error detected by the device"},
324 	{0x04100300, 0, IPR_DEFAULT_LOG_LEVEL,
325 	"310C: Logical block reference tag error detected by the device"},
326 	{0x04108300, 1, IPR_DEFAULT_LOG_LEVEL,
327 	"4170: Scatter list tag / sequence number error"},
328 	{0x04109000, 1, IPR_DEFAULT_LOG_LEVEL,
329 	"8150: Logical block CRC error on IOA to Host transfer"},
330 	{0x04109200, 1, IPR_DEFAULT_LOG_LEVEL,
331 	"4170: Logical block sequence number error on IOA to Host transfer"},
332 	{0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL,
333 	"310D: Logical block reference tag error detected by the IOA"},
334 	{0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL,
335 	"310D: Logical block guard error detected by the IOA"},
336 	{0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
337 	"9000: IOA reserved area data check"},
338 	{0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
339 	"9001: IOA reserved area invalid data pattern"},
340 	{0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
341 	"9002: IOA reserved area LRC error"},
342 	{0x04118300, 1, IPR_DEFAULT_LOG_LEVEL,
343 	"Hardware Error, IOA metadata access error"},
344 	{0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
345 	"102E: Out of alternate sectors for disk storage"},
346 	{0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
347 	"FFF4: Data transfer underlength error"},
348 	{0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
349 	"FFF4: Data transfer overlength error"},
350 	{0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
351 	"3400: Logical unit failure"},
352 	{0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
353 	"FFF4: Device microcode is corrupt"},
354 	{0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
355 	"8150: PCI bus error"},
356 	{0x04430000, 1, 0,
357 	"Unsupported device bus message received"},
358 	{0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
359 	"FFF4: Disk device problem"},
360 	{0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
361 	"8150: Permanent IOA failure"},
362 	{0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
363 	"3010: Disk device returned wrong response to IOA"},
364 	{0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
365 	"8151: IOA microcode error"},
366 	{0x04448500, 0, 0,
367 	"Device bus status error"},
368 	{0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
369 	"8157: IOA error requiring IOA reset to recover"},
370 	{0x04448700, 0, 0,
371 	"ATA device status error"},
372 	{0x04490000, 0, 0,
373 	"Message reject received from the device"},
374 	{0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
375 	"8008: A permanent cache battery pack failure occurred"},
376 	{0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
377 	"9090: Disk unit has been modified after the last known status"},
378 	{0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
379 	"9081: IOA detected device error"},
380 	{0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
381 	"9082: IOA detected device error"},
382 	{0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
383 	"3110: Device bus error, message or command phase"},
384 	{0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
385 	"3110: SAS Command / Task Management Function failed"},
386 	{0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
387 	"9091: Incorrect hardware configuration change has been detected"},
388 	{0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
389 	"9073: Invalid multi-adapter configuration"},
390 	{0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
391 	"4010: Incorrect connection between cascaded expanders"},
392 	{0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
393 	"4020: Connections exceed IOA design limits"},
394 	{0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
395 	"4030: Incorrect multipath connection"},
396 	{0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
397 	"4110: Unsupported enclosure function"},
398 	{0x04679800, 0, IPR_DEFAULT_LOG_LEVEL,
399 	"4120: SAS cable VPD cannot be read"},
400 	{0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
401 	"FFF4: Command to logical unit failed"},
402 	{0x05240000, 1, 0,
403 	"Illegal request, invalid request type or request packet"},
404 	{0x05250000, 0, 0,
405 	"Illegal request, invalid resource handle"},
406 	{0x05258000, 0, 0,
407 	"Illegal request, commands not allowed to this device"},
408 	{0x05258100, 0, 0,
409 	"Illegal request, command not allowed to a secondary adapter"},
410 	{0x05258200, 0, 0,
411 	"Illegal request, command not allowed to a non-optimized resource"},
412 	{0x05260000, 0, 0,
413 	"Illegal request, invalid field in parameter list"},
414 	{0x05260100, 0, 0,
415 	"Illegal request, parameter not supported"},
416 	{0x05260200, 0, 0,
417 	"Illegal request, parameter value invalid"},
418 	{0x052C0000, 0, 0,
419 	"Illegal request, command sequence error"},
420 	{0x052C8000, 1, 0,
421 	"Illegal request, dual adapter support not enabled"},
422 	{0x052C8100, 1, 0,
423 	"Illegal request, another cable connector was physically disabled"},
424 	{0x054E8000, 1, 0,
425 	"Illegal request, inconsistent group id/group count"},
426 	{0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
427 	"9031: Array protection temporarily suspended, protection resuming"},
428 	{0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
429 	"9040: Array protection temporarily suspended, protection resuming"},
430 	{0x060B0100, 0, IPR_DEFAULT_LOG_LEVEL,
431 	"4080: IOA exceeded maximum operating temperature"},
432 	{0x060B8000, 0, IPR_DEFAULT_LOG_LEVEL,
433 	"4085: Service required"},
434 	{0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
435 	"3140: Device bus not ready to ready transition"},
436 	{0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
437 	"FFFB: SCSI bus was reset"},
438 	{0x06290500, 0, 0,
439 	"FFFE: SCSI bus transition to single ended"},
440 	{0x06290600, 0, 0,
441 	"FFFE: SCSI bus transition to LVD"},
442 	{0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
443 	"FFFB: SCSI bus was reset by another initiator"},
444 	{0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
445 	"3029: A device replacement has occurred"},
446 	{0x063F8300, 0, IPR_DEFAULT_LOG_LEVEL,
447 	"4102: Device bus fabric performance degradation"},
448 	{0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
449 	"9051: IOA cache data exists for a missing or failed device"},
450 	{0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
451 	"9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
452 	{0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
453 	"9025: Disk unit is not supported at its physical location"},
454 	{0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
455 	"3020: IOA detected a SCSI bus configuration error"},
456 	{0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
457 	"3150: SCSI bus configuration error"},
458 	{0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
459 	"9074: Asymmetric advanced function disk configuration"},
460 	{0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
461 	"4040: Incomplete multipath connection between IOA and enclosure"},
462 	{0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
463 	"4041: Incomplete multipath connection between enclosure and device"},
464 	{0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
465 	"9075: Incomplete multipath connection between IOA and remote IOA"},
466 	{0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
467 	"9076: Configuration error, missing remote IOA"},
468 	{0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
469 	"4050: Enclosure does not support a required multipath function"},
470 	{0x06679800, 0, IPR_DEFAULT_LOG_LEVEL,
471 	"4121: Configuration error, required cable is missing"},
472 	{0x06679900, 0, IPR_DEFAULT_LOG_LEVEL,
473 	"4122: Cable is not plugged into the correct location on remote IOA"},
474 	{0x06679A00, 0, IPR_DEFAULT_LOG_LEVEL,
475 	"4123: Configuration error, invalid cable vital product data"},
476 	{0x06679B00, 0, IPR_DEFAULT_LOG_LEVEL,
477 	"4124: Configuration error, both cable ends are plugged into the same IOA"},
478 	{0x06690000, 0, IPR_DEFAULT_LOG_LEVEL,
479 	"4070: Logically bad block written on device"},
480 	{0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
481 	"9041: Array protection temporarily suspended"},
482 	{0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
483 	"9042: Corrupt array parity detected on specified device"},
484 	{0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
485 	"9030: Array no longer protected due to missing or failed disk unit"},
486 	{0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
487 	"9071: Link operational transition"},
488 	{0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
489 	"9072: Link not operational transition"},
490 	{0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
491 	"9032: Array exposed but still protected"},
492 	{0x066B8300, 0, IPR_DEFAULT_LOG_LEVEL + 1,
493 	"70DD: Device forced failed by disrupt device command"},
494 	{0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
495 	"4061: Multipath redundancy level got better"},
496 	{0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
497 	"4060: Multipath redundancy level got worse"},
498 	{0x07270000, 0, 0,
499 	"Failure due to other device"},
500 	{0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
501 	"9008: IOA does not support functions expected by devices"},
502 	{0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
503 	"9010: Cache data associated with attached devices cannot be found"},
504 	{0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
505 	"9011: Cache data belongs to devices other than those attached"},
506 	{0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
507 	"9020: Array missing 2 or more devices with only 1 device present"},
508 	{0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
509 	"9021: Array missing 2 or more devices with 2 or more devices present"},
510 	{0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
511 	"9022: Exposed array is missing a required device"},
512 	{0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
513 	"9023: Array member(s) not at required physical locations"},
514 	{0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
515 	"9024: Array not functional due to present hardware configuration"},
516 	{0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
517 	"9026: Array not functional due to present hardware configuration"},
518 	{0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
519 	"9027: Array is missing a device and parity is out of sync"},
520 	{0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
521 	"9028: Maximum number of arrays already exist"},
522 	{0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
523 	"9050: Required cache data cannot be located for a disk unit"},
524 	{0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
525 	"9052: Cache data exists for a device that has been modified"},
526 	{0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
527 	"9054: IOA resources not available due to previous problems"},
528 	{0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
529 	"9092: Disk unit requires initialization before use"},
530 	{0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
531 	"9029: Incorrect hardware configuration change has been detected"},
532 	{0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
533 	"9060: One or more disk pairs are missing from an array"},
534 	{0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
535 	"9061: One or more disks are missing from an array"},
536 	{0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
537 	"9062: One or more disks are missing from an array"},
538 	{0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
539 	"9063: Maximum number of functional arrays has been exceeded"},
540 	{0x07279A00, 0, 0,
541 	"Data protect, other volume set problem"},
542 	{0x0B260000, 0, 0,
543 	"Aborted command, invalid descriptor"},
544 	{0x0B3F9000, 0, 0,
545 	"Target operating conditions have changed, dual adapter takeover"},
546 	{0x0B530200, 0, 0,
547 	"Aborted command, medium removal prevented"},
548 	{0x0B5A0000, 0, 0,
549 	"Command terminated by host"},
550 	{0x0B5B8000, 0, 0,
551 	"Aborted command, command terminated by host"}
552 };
553 
554 static const struct ipr_ses_table_entry ipr_ses_table[] = {
555 	{ "2104-DL1        ", "XXXXXXXXXXXXXXXX", 80 },
556 	{ "2104-TL1        ", "XXXXXXXXXXXXXXXX", 80 },
557 	{ "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
558 	{ "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
559 	{ "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
560 	{ "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
561 	{ "2104-DU3        ", "XXXXXXXXXXXXXXXX", 160 },
562 	{ "2104-TU3        ", "XXXXXXXXXXXXXXXX", 160 },
563 	{ "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
564 	{ "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
565 	{ "St  V1S2        ", "XXXXXXXXXXXXXXXX", 160 },
566 	{ "HSBPD4M  PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
567 	{ "VSBPD1H   U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
568 };
569 
570 /*
571  *  Function Prototypes
572  */
573 static int ipr_reset_alert(struct ipr_cmnd *);
574 static void ipr_process_ccn(struct ipr_cmnd *);
575 static void ipr_process_error(struct ipr_cmnd *);
576 static void ipr_reset_ioa_job(struct ipr_cmnd *);
577 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
578 				   enum ipr_shutdown_type);
579 
580 #ifdef CONFIG_SCSI_IPR_TRACE
581 /**
582  * ipr_trc_hook - Add a trace entry to the driver trace
583  * @ipr_cmd:	ipr command struct
584  * @type:		trace type
585  * @add_data:	additional data
586  *
587  * Return value:
588  * 	none
589  **/
590 static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
591 			 u8 type, u32 add_data)
592 {
593 	struct ipr_trace_entry *trace_entry;
594 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
595 
596 	trace_entry = &ioa_cfg->trace[atomic_add_return
597 			(1, &ioa_cfg->trace_index)%IPR_NUM_TRACE_ENTRIES];
598 	trace_entry->time = jiffies;
599 	trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
600 	trace_entry->type = type;
601 	if (ipr_cmd->ioa_cfg->sis64)
602 		trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command;
603 	else
604 		trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command;
605 	trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
606 	trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
607 	trace_entry->u.add_data = add_data;
608 	wmb();
609 }
610 #else
611 #define ipr_trc_hook(ipr_cmd, type, add_data) do { } while (0)
612 #endif
613 
614 /**
615  * ipr_lock_and_done - Acquire lock and complete command
616  * @ipr_cmd:	ipr command struct
617  *
618  * Return value:
619  *	none
620  **/
621 static void ipr_lock_and_done(struct ipr_cmnd *ipr_cmd)
622 {
623 	unsigned long lock_flags;
624 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
625 
626 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
627 	ipr_cmd->done(ipr_cmd);
628 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
629 }
630 
631 /**
632  * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
633  * @ipr_cmd:	ipr command struct
634  *
635  * Return value:
636  * 	none
637  **/
638 static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
639 {
640 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
641 	struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
642 	struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
643 	dma_addr_t dma_addr = ipr_cmd->dma_addr;
644 	int hrrq_id;
645 
646 	hrrq_id = ioarcb->cmd_pkt.hrrq_id;
647 	memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
648 	ioarcb->cmd_pkt.hrrq_id = hrrq_id;
649 	ioarcb->data_transfer_length = 0;
650 	ioarcb->read_data_transfer_length = 0;
651 	ioarcb->ioadl_len = 0;
652 	ioarcb->read_ioadl_len = 0;
653 
654 	if (ipr_cmd->ioa_cfg->sis64) {
655 		ioarcb->u.sis64_addr_data.data_ioadl_addr =
656 			cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
657 		ioasa64->u.gata.status = 0;
658 	} else {
659 		ioarcb->write_ioadl_addr =
660 			cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
661 		ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
662 		ioasa->u.gata.status = 0;
663 	}
664 
665 	ioasa->hdr.ioasc = 0;
666 	ioasa->hdr.residual_data_len = 0;
667 	ipr_cmd->scsi_cmd = NULL;
668 	ipr_cmd->qc = NULL;
669 	ipr_cmd->sense_buffer[0] = 0;
670 	ipr_cmd->dma_use_sg = 0;
671 }
672 
673 /**
674  * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
675  * @ipr_cmd:	ipr command struct
676  *
677  * Return value:
678  * 	none
679  **/
680 static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd,
681 			      void (*fast_done) (struct ipr_cmnd *))
682 {
683 	ipr_reinit_ipr_cmnd(ipr_cmd);
684 	ipr_cmd->u.scratch = 0;
685 	ipr_cmd->sibling = NULL;
686 	ipr_cmd->fast_done = fast_done;
687 	init_timer(&ipr_cmd->timer);
688 }
689 
690 /**
691  * __ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
692  * @ioa_cfg:	ioa config struct
693  *
694  * Return value:
695  * 	pointer to ipr command struct
696  **/
697 static
698 struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_hrr_queue *hrrq)
699 {
700 	struct ipr_cmnd *ipr_cmd = NULL;
701 
702 	if (likely(!list_empty(&hrrq->hrrq_free_q))) {
703 		ipr_cmd = list_entry(hrrq->hrrq_free_q.next,
704 			struct ipr_cmnd, queue);
705 		list_del(&ipr_cmd->queue);
706 	}
707 
708 
709 	return ipr_cmd;
710 }
711 
712 /**
713  * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block and initialize it
714  * @ioa_cfg:	ioa config struct
715  *
716  * Return value:
717  *	pointer to ipr command struct
718  **/
719 static
720 struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
721 {
722 	struct ipr_cmnd *ipr_cmd =
723 		__ipr_get_free_ipr_cmnd(&ioa_cfg->hrrq[IPR_INIT_HRRQ]);
724 	ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
725 	return ipr_cmd;
726 }
727 
728 /**
729  * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
730  * @ioa_cfg:	ioa config struct
731  * @clr_ints:     interrupts to clear
732  *
733  * This function masks all interrupts on the adapter, then clears the
734  * interrupts specified in the mask
735  *
736  * Return value:
737  * 	none
738  **/
739 static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
740 					  u32 clr_ints)
741 {
742 	volatile u32 int_reg;
743 	int i;
744 
745 	/* Stop new interrupts */
746 	for (i = 0; i < ioa_cfg->hrrq_num; i++) {
747 		spin_lock(&ioa_cfg->hrrq[i]._lock);
748 		ioa_cfg->hrrq[i].allow_interrupts = 0;
749 		spin_unlock(&ioa_cfg->hrrq[i]._lock);
750 	}
751 	wmb();
752 
753 	/* Set interrupt mask to stop all new interrupts */
754 	if (ioa_cfg->sis64)
755 		writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg);
756 	else
757 		writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
758 
759 	/* Clear any pending interrupts */
760 	if (ioa_cfg->sis64)
761 		writel(~0, ioa_cfg->regs.clr_interrupt_reg);
762 	writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32);
763 	int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
764 }
765 
766 /**
767  * ipr_save_pcix_cmd_reg - Save PCI-X command register
768  * @ioa_cfg:	ioa config struct
769  *
770  * Return value:
771  * 	0 on success / -EIO on failure
772  **/
773 static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
774 {
775 	int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
776 
777 	if (pcix_cmd_reg == 0)
778 		return 0;
779 
780 	if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
781 				 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
782 		dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
783 		return -EIO;
784 	}
785 
786 	ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
787 	return 0;
788 }
789 
790 /**
791  * ipr_set_pcix_cmd_reg - Setup PCI-X command register
792  * @ioa_cfg:	ioa config struct
793  *
794  * Return value:
795  * 	0 on success / -EIO on failure
796  **/
797 static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
798 {
799 	int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
800 
801 	if (pcix_cmd_reg) {
802 		if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
803 					  ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
804 			dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
805 			return -EIO;
806 		}
807 	}
808 
809 	return 0;
810 }
811 
812 /**
813  * ipr_sata_eh_done - done function for aborted SATA commands
814  * @ipr_cmd:	ipr command struct
815  *
816  * This function is invoked for ops generated to SATA
817  * devices which are being aborted.
818  *
819  * Return value:
820  * 	none
821  **/
822 static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
823 {
824 	struct ata_queued_cmd *qc = ipr_cmd->qc;
825 	struct ipr_sata_port *sata_port = qc->ap->private_data;
826 
827 	qc->err_mask |= AC_ERR_OTHER;
828 	sata_port->ioasa.status |= ATA_BUSY;
829 	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
830 	ata_qc_complete(qc);
831 }
832 
833 /**
834  * ipr_scsi_eh_done - mid-layer done function for aborted ops
835  * @ipr_cmd:	ipr command struct
836  *
837  * This function is invoked by the interrupt handler for
838  * ops generated by the SCSI mid-layer which are being aborted.
839  *
840  * Return value:
841  * 	none
842  **/
843 static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
844 {
845 	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
846 
847 	scsi_cmd->result |= (DID_ERROR << 16);
848 
849 	scsi_dma_unmap(ipr_cmd->scsi_cmd);
850 	scsi_cmd->scsi_done(scsi_cmd);
851 	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
852 }
853 
854 /**
855  * ipr_fail_all_ops - Fails all outstanding ops.
856  * @ioa_cfg:	ioa config struct
857  *
858  * This function fails all outstanding ops.
859  *
860  * Return value:
861  * 	none
862  **/
863 static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
864 {
865 	struct ipr_cmnd *ipr_cmd, *temp;
866 	struct ipr_hrr_queue *hrrq;
867 
868 	ENTER;
869 	for_each_hrrq(hrrq, ioa_cfg) {
870 		spin_lock(&hrrq->_lock);
871 		list_for_each_entry_safe(ipr_cmd,
872 					temp, &hrrq->hrrq_pending_q, queue) {
873 			list_del(&ipr_cmd->queue);
874 
875 			ipr_cmd->s.ioasa.hdr.ioasc =
876 				cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
877 			ipr_cmd->s.ioasa.hdr.ilid =
878 				cpu_to_be32(IPR_DRIVER_ILID);
879 
880 			if (ipr_cmd->scsi_cmd)
881 				ipr_cmd->done = ipr_scsi_eh_done;
882 			else if (ipr_cmd->qc)
883 				ipr_cmd->done = ipr_sata_eh_done;
884 
885 			ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH,
886 				     IPR_IOASC_IOA_WAS_RESET);
887 			del_timer(&ipr_cmd->timer);
888 			ipr_cmd->done(ipr_cmd);
889 		}
890 		spin_unlock(&hrrq->_lock);
891 	}
892 	LEAVE;
893 }
894 
895 /**
896  * ipr_send_command -  Send driver initiated requests.
897  * @ipr_cmd:		ipr command struct
898  *
899  * This function sends a command to the adapter using the correct write call.
900  * In the case of sis64, calculate the ioarcb size required. Then or in the
901  * appropriate bits.
902  *
903  * Return value:
904  * 	none
905  **/
906 static void ipr_send_command(struct ipr_cmnd *ipr_cmd)
907 {
908 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
909 	dma_addr_t send_dma_addr = ipr_cmd->dma_addr;
910 
911 	if (ioa_cfg->sis64) {
912 		/* The default size is 256 bytes */
913 		send_dma_addr |= 0x1;
914 
915 		/* If the number of ioadls * size of ioadl > 128 bytes,
916 		   then use a 512 byte ioarcb */
917 		if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 )
918 			send_dma_addr |= 0x4;
919 		writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
920 	} else
921 		writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
922 }
923 
924 /**
925  * ipr_do_req -  Send driver initiated requests.
926  * @ipr_cmd:		ipr command struct
927  * @done:			done function
928  * @timeout_func:	timeout function
929  * @timeout:		timeout value
930  *
931  * This function sends the specified command to the adapter with the
932  * timeout given. The done function is invoked on command completion.
933  *
934  * Return value:
935  * 	none
936  **/
937 static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
938 		       void (*done) (struct ipr_cmnd *),
939 		       void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
940 {
941 	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
942 
943 	ipr_cmd->done = done;
944 
945 	ipr_cmd->timer.data = (unsigned long) ipr_cmd;
946 	ipr_cmd->timer.expires = jiffies + timeout;
947 	ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
948 
949 	add_timer(&ipr_cmd->timer);
950 
951 	ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
952 
953 	ipr_send_command(ipr_cmd);
954 }
955 
956 /**
957  * ipr_internal_cmd_done - Op done function for an internally generated op.
958  * @ipr_cmd:	ipr command struct
959  *
960  * This function is the op done function for an internally generated,
961  * blocking op. It simply wakes the sleeping thread.
962  *
963  * Return value:
964  * 	none
965  **/
966 static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
967 {
968 	if (ipr_cmd->sibling)
969 		ipr_cmd->sibling = NULL;
970 	else
971 		complete(&ipr_cmd->completion);
972 }
973 
974 /**
975  * ipr_init_ioadl - initialize the ioadl for the correct SIS type
976  * @ipr_cmd:	ipr command struct
977  * @dma_addr:	dma address
978  * @len:	transfer length
979  * @flags:	ioadl flag value
980  *
981  * This function initializes an ioadl in the case where there is only a single
982  * descriptor.
983  *
984  * Return value:
985  * 	nothing
986  **/
987 static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr,
988 			   u32 len, int flags)
989 {
990 	struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
991 	struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
992 
993 	ipr_cmd->dma_use_sg = 1;
994 
995 	if (ipr_cmd->ioa_cfg->sis64) {
996 		ioadl64->flags = cpu_to_be32(flags);
997 		ioadl64->data_len = cpu_to_be32(len);
998 		ioadl64->address = cpu_to_be64(dma_addr);
999 
1000 		ipr_cmd->ioarcb.ioadl_len =
1001 		       	cpu_to_be32(sizeof(struct ipr_ioadl64_desc));
1002 		ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
1003 	} else {
1004 		ioadl->flags_and_data_len = cpu_to_be32(flags | len);
1005 		ioadl->address = cpu_to_be32(dma_addr);
1006 
1007 		if (flags == IPR_IOADL_FLAGS_READ_LAST) {
1008 			ipr_cmd->ioarcb.read_ioadl_len =
1009 				cpu_to_be32(sizeof(struct ipr_ioadl_desc));
1010 			ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len);
1011 		} else {
1012 			ipr_cmd->ioarcb.ioadl_len =
1013 			       	cpu_to_be32(sizeof(struct ipr_ioadl_desc));
1014 			ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
1015 		}
1016 	}
1017 }
1018 
1019 /**
1020  * ipr_send_blocking_cmd - Send command and sleep on its completion.
1021  * @ipr_cmd:	ipr command struct
1022  * @timeout_func:	function to invoke if command times out
1023  * @timeout:	timeout
1024  *
1025  * Return value:
1026  * 	none
1027  **/
1028 static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
1029 				  void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
1030 				  u32 timeout)
1031 {
1032 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1033 
1034 	init_completion(&ipr_cmd->completion);
1035 	ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
1036 
1037 	spin_unlock_irq(ioa_cfg->host->host_lock);
1038 	wait_for_completion(&ipr_cmd->completion);
1039 	spin_lock_irq(ioa_cfg->host->host_lock);
1040 }
1041 
1042 static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg)
1043 {
1044 	if (ioa_cfg->hrrq_num == 1)
1045 		return 0;
1046 	else
1047 		return (atomic_add_return(1, &ioa_cfg->hrrq_index) % (ioa_cfg->hrrq_num - 1)) + 1;
1048 }
1049 
1050 /**
1051  * ipr_send_hcam - Send an HCAM to the adapter.
1052  * @ioa_cfg:	ioa config struct
1053  * @type:		HCAM type
1054  * @hostrcb:	hostrcb struct
1055  *
1056  * This function will send a Host Controlled Async command to the adapter.
1057  * If HCAMs are currently not allowed to be issued to the adapter, it will
1058  * place the hostrcb on the free queue.
1059  *
1060  * Return value:
1061  * 	none
1062  **/
1063 static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
1064 			  struct ipr_hostrcb *hostrcb)
1065 {
1066 	struct ipr_cmnd *ipr_cmd;
1067 	struct ipr_ioarcb *ioarcb;
1068 
1069 	if (ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
1070 		ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
1071 		list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
1072 		list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
1073 
1074 		ipr_cmd->u.hostrcb = hostrcb;
1075 		ioarcb = &ipr_cmd->ioarcb;
1076 
1077 		ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
1078 		ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
1079 		ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
1080 		ioarcb->cmd_pkt.cdb[1] = type;
1081 		ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
1082 		ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
1083 
1084 		ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma,
1085 			       sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST);
1086 
1087 		if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
1088 			ipr_cmd->done = ipr_process_ccn;
1089 		else
1090 			ipr_cmd->done = ipr_process_error;
1091 
1092 		ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
1093 
1094 		ipr_send_command(ipr_cmd);
1095 	} else {
1096 		list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
1097 	}
1098 }
1099 
1100 /**
1101  * ipr_update_ata_class - Update the ata class in the resource entry
1102  * @res:	resource entry struct
1103  * @proto:	cfgte device bus protocol value
1104  *
1105  * Return value:
1106  * 	none
1107  **/
1108 static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto)
1109 {
1110 	switch (proto) {
1111 	case IPR_PROTO_SATA:
1112 	case IPR_PROTO_SAS_STP:
1113 		res->ata_class = ATA_DEV_ATA;
1114 		break;
1115 	case IPR_PROTO_SATA_ATAPI:
1116 	case IPR_PROTO_SAS_STP_ATAPI:
1117 		res->ata_class = ATA_DEV_ATAPI;
1118 		break;
1119 	default:
1120 		res->ata_class = ATA_DEV_UNKNOWN;
1121 		break;
1122 	};
1123 }
1124 
1125 /**
1126  * ipr_init_res_entry - Initialize a resource entry struct.
1127  * @res:	resource entry struct
1128  * @cfgtew:	config table entry wrapper struct
1129  *
1130  * Return value:
1131  * 	none
1132  **/
1133 static void ipr_init_res_entry(struct ipr_resource_entry *res,
1134 			       struct ipr_config_table_entry_wrapper *cfgtew)
1135 {
1136 	int found = 0;
1137 	unsigned int proto;
1138 	struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1139 	struct ipr_resource_entry *gscsi_res = NULL;
1140 
1141 	res->needs_sync_complete = 0;
1142 	res->in_erp = 0;
1143 	res->add_to_ml = 0;
1144 	res->del_from_ml = 0;
1145 	res->resetting_device = 0;
1146 	res->reset_occurred = 0;
1147 	res->sdev = NULL;
1148 	res->sata_port = NULL;
1149 
1150 	if (ioa_cfg->sis64) {
1151 		proto = cfgtew->u.cfgte64->proto;
1152 		res->res_flags = cfgtew->u.cfgte64->res_flags;
1153 		res->qmodel = IPR_QUEUEING_MODEL64(res);
1154 		res->type = cfgtew->u.cfgte64->res_type;
1155 
1156 		memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1157 			sizeof(res->res_path));
1158 
1159 		res->bus = 0;
1160 		memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1161 			sizeof(res->dev_lun.scsi_lun));
1162 		res->lun = scsilun_to_int(&res->dev_lun);
1163 
1164 		if (res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1165 			list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) {
1166 				if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) {
1167 					found = 1;
1168 					res->target = gscsi_res->target;
1169 					break;
1170 				}
1171 			}
1172 			if (!found) {
1173 				res->target = find_first_zero_bit(ioa_cfg->target_ids,
1174 								  ioa_cfg->max_devs_supported);
1175 				set_bit(res->target, ioa_cfg->target_ids);
1176 			}
1177 		} else if (res->type == IPR_RES_TYPE_IOAFP) {
1178 			res->bus = IPR_IOAFP_VIRTUAL_BUS;
1179 			res->target = 0;
1180 		} else if (res->type == IPR_RES_TYPE_ARRAY) {
1181 			res->bus = IPR_ARRAY_VIRTUAL_BUS;
1182 			res->target = find_first_zero_bit(ioa_cfg->array_ids,
1183 							  ioa_cfg->max_devs_supported);
1184 			set_bit(res->target, ioa_cfg->array_ids);
1185 		} else if (res->type == IPR_RES_TYPE_VOLUME_SET) {
1186 			res->bus = IPR_VSET_VIRTUAL_BUS;
1187 			res->target = find_first_zero_bit(ioa_cfg->vset_ids,
1188 							  ioa_cfg->max_devs_supported);
1189 			set_bit(res->target, ioa_cfg->vset_ids);
1190 		} else {
1191 			res->target = find_first_zero_bit(ioa_cfg->target_ids,
1192 							  ioa_cfg->max_devs_supported);
1193 			set_bit(res->target, ioa_cfg->target_ids);
1194 		}
1195 	} else {
1196 		proto = cfgtew->u.cfgte->proto;
1197 		res->qmodel = IPR_QUEUEING_MODEL(res);
1198 		res->flags = cfgtew->u.cfgte->flags;
1199 		if (res->flags & IPR_IS_IOA_RESOURCE)
1200 			res->type = IPR_RES_TYPE_IOAFP;
1201 		else
1202 			res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1203 
1204 		res->bus = cfgtew->u.cfgte->res_addr.bus;
1205 		res->target = cfgtew->u.cfgte->res_addr.target;
1206 		res->lun = cfgtew->u.cfgte->res_addr.lun;
1207 		res->lun_wwn = get_unaligned_be64(cfgtew->u.cfgte->lun_wwn);
1208 	}
1209 
1210 	ipr_update_ata_class(res, proto);
1211 }
1212 
1213 /**
1214  * ipr_is_same_device - Determine if two devices are the same.
1215  * @res:	resource entry struct
1216  * @cfgtew:	config table entry wrapper struct
1217  *
1218  * Return value:
1219  * 	1 if the devices are the same / 0 otherwise
1220  **/
1221 static int ipr_is_same_device(struct ipr_resource_entry *res,
1222 			      struct ipr_config_table_entry_wrapper *cfgtew)
1223 {
1224 	if (res->ioa_cfg->sis64) {
1225 		if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id,
1226 					sizeof(cfgtew->u.cfgte64->dev_id)) &&
1227 			!memcmp(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1228 					sizeof(cfgtew->u.cfgte64->lun))) {
1229 			return 1;
1230 		}
1231 	} else {
1232 		if (res->bus == cfgtew->u.cfgte->res_addr.bus &&
1233 		    res->target == cfgtew->u.cfgte->res_addr.target &&
1234 		    res->lun == cfgtew->u.cfgte->res_addr.lun)
1235 			return 1;
1236 	}
1237 
1238 	return 0;
1239 }
1240 
1241 /**
1242  * __ipr_format_res_path - Format the resource path for printing.
1243  * @res_path:	resource path
1244  * @buf:	buffer
1245  * @len:	length of buffer provided
1246  *
1247  * Return value:
1248  * 	pointer to buffer
1249  **/
1250 static char *__ipr_format_res_path(u8 *res_path, char *buffer, int len)
1251 {
1252 	int i;
1253 	char *p = buffer;
1254 
1255 	*p = '\0';
1256 	p += snprintf(p, buffer + len - p, "%02X", res_path[0]);
1257 	for (i = 1; res_path[i] != 0xff && ((i * 3) < len); i++)
1258 		p += snprintf(p, buffer + len - p, "-%02X", res_path[i]);
1259 
1260 	return buffer;
1261 }
1262 
1263 /**
1264  * ipr_format_res_path - Format the resource path for printing.
1265  * @ioa_cfg:	ioa config struct
1266  * @res_path:	resource path
1267  * @buf:	buffer
1268  * @len:	length of buffer provided
1269  *
1270  * Return value:
1271  *	pointer to buffer
1272  **/
1273 static char *ipr_format_res_path(struct ipr_ioa_cfg *ioa_cfg,
1274 				 u8 *res_path, char *buffer, int len)
1275 {
1276 	char *p = buffer;
1277 
1278 	*p = '\0';
1279 	p += snprintf(p, buffer + len - p, "%d/", ioa_cfg->host->host_no);
1280 	__ipr_format_res_path(res_path, p, len - (buffer - p));
1281 	return buffer;
1282 }
1283 
1284 /**
1285  * ipr_update_res_entry - Update the resource entry.
1286  * @res:	resource entry struct
1287  * @cfgtew:	config table entry wrapper struct
1288  *
1289  * Return value:
1290  *      none
1291  **/
1292 static void ipr_update_res_entry(struct ipr_resource_entry *res,
1293 				 struct ipr_config_table_entry_wrapper *cfgtew)
1294 {
1295 	char buffer[IPR_MAX_RES_PATH_LENGTH];
1296 	unsigned int proto;
1297 	int new_path = 0;
1298 
1299 	if (res->ioa_cfg->sis64) {
1300 		res->flags = cfgtew->u.cfgte64->flags;
1301 		res->res_flags = cfgtew->u.cfgte64->res_flags;
1302 		res->type = cfgtew->u.cfgte64->res_type;
1303 
1304 		memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data,
1305 			sizeof(struct ipr_std_inq_data));
1306 
1307 		res->qmodel = IPR_QUEUEING_MODEL64(res);
1308 		proto = cfgtew->u.cfgte64->proto;
1309 		res->res_handle = cfgtew->u.cfgte64->res_handle;
1310 		res->dev_id = cfgtew->u.cfgte64->dev_id;
1311 
1312 		memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1313 			sizeof(res->dev_lun.scsi_lun));
1314 
1315 		if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path,
1316 					sizeof(res->res_path))) {
1317 			memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1318 				sizeof(res->res_path));
1319 			new_path = 1;
1320 		}
1321 
1322 		if (res->sdev && new_path)
1323 			sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n",
1324 				    ipr_format_res_path(res->ioa_cfg,
1325 					res->res_path, buffer, sizeof(buffer)));
1326 	} else {
1327 		res->flags = cfgtew->u.cfgte->flags;
1328 		if (res->flags & IPR_IS_IOA_RESOURCE)
1329 			res->type = IPR_RES_TYPE_IOAFP;
1330 		else
1331 			res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1332 
1333 		memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data,
1334 			sizeof(struct ipr_std_inq_data));
1335 
1336 		res->qmodel = IPR_QUEUEING_MODEL(res);
1337 		proto = cfgtew->u.cfgte->proto;
1338 		res->res_handle = cfgtew->u.cfgte->res_handle;
1339 	}
1340 
1341 	ipr_update_ata_class(res, proto);
1342 }
1343 
1344 /**
1345  * ipr_clear_res_target - Clear the bit in the bit map representing the target
1346  * 			  for the resource.
1347  * @res:	resource entry struct
1348  * @cfgtew:	config table entry wrapper struct
1349  *
1350  * Return value:
1351  *      none
1352  **/
1353 static void ipr_clear_res_target(struct ipr_resource_entry *res)
1354 {
1355 	struct ipr_resource_entry *gscsi_res = NULL;
1356 	struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1357 
1358 	if (!ioa_cfg->sis64)
1359 		return;
1360 
1361 	if (res->bus == IPR_ARRAY_VIRTUAL_BUS)
1362 		clear_bit(res->target, ioa_cfg->array_ids);
1363 	else if (res->bus == IPR_VSET_VIRTUAL_BUS)
1364 		clear_bit(res->target, ioa_cfg->vset_ids);
1365 	else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1366 		list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue)
1367 			if (gscsi_res->dev_id == res->dev_id && gscsi_res != res)
1368 				return;
1369 		clear_bit(res->target, ioa_cfg->target_ids);
1370 
1371 	} else if (res->bus == 0)
1372 		clear_bit(res->target, ioa_cfg->target_ids);
1373 }
1374 
1375 /**
1376  * ipr_handle_config_change - Handle a config change from the adapter
1377  * @ioa_cfg:	ioa config struct
1378  * @hostrcb:	hostrcb
1379  *
1380  * Return value:
1381  * 	none
1382  **/
1383 static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
1384 				     struct ipr_hostrcb *hostrcb)
1385 {
1386 	struct ipr_resource_entry *res = NULL;
1387 	struct ipr_config_table_entry_wrapper cfgtew;
1388 	__be32 cc_res_handle;
1389 
1390 	u32 is_ndn = 1;
1391 
1392 	if (ioa_cfg->sis64) {
1393 		cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64;
1394 		cc_res_handle = cfgtew.u.cfgte64->res_handle;
1395 	} else {
1396 		cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte;
1397 		cc_res_handle = cfgtew.u.cfgte->res_handle;
1398 	}
1399 
1400 	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1401 		if (res->res_handle == cc_res_handle) {
1402 			is_ndn = 0;
1403 			break;
1404 		}
1405 	}
1406 
1407 	if (is_ndn) {
1408 		if (list_empty(&ioa_cfg->free_res_q)) {
1409 			ipr_send_hcam(ioa_cfg,
1410 				      IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
1411 				      hostrcb);
1412 			return;
1413 		}
1414 
1415 		res = list_entry(ioa_cfg->free_res_q.next,
1416 				 struct ipr_resource_entry, queue);
1417 
1418 		list_del(&res->queue);
1419 		ipr_init_res_entry(res, &cfgtew);
1420 		list_add_tail(&res->queue, &ioa_cfg->used_res_q);
1421 	}
1422 
1423 	ipr_update_res_entry(res, &cfgtew);
1424 
1425 	if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
1426 		if (res->sdev) {
1427 			res->del_from_ml = 1;
1428 			res->res_handle = IPR_INVALID_RES_HANDLE;
1429 			if (ioa_cfg->allow_ml_add_del)
1430 				schedule_work(&ioa_cfg->work_q);
1431 		} else {
1432 			ipr_clear_res_target(res);
1433 			list_move_tail(&res->queue, &ioa_cfg->free_res_q);
1434 		}
1435 	} else if (!res->sdev || res->del_from_ml) {
1436 		res->add_to_ml = 1;
1437 		if (ioa_cfg->allow_ml_add_del)
1438 			schedule_work(&ioa_cfg->work_q);
1439 	}
1440 
1441 	ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1442 }
1443 
1444 /**
1445  * ipr_process_ccn - Op done function for a CCN.
1446  * @ipr_cmd:	ipr command struct
1447  *
1448  * This function is the op done function for a configuration
1449  * change notification host controlled async from the adapter.
1450  *
1451  * Return value:
1452  * 	none
1453  **/
1454 static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
1455 {
1456 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1457 	struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1458 	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1459 
1460 	list_del(&hostrcb->queue);
1461 	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1462 
1463 	if (ioasc) {
1464 		if (ioasc != IPR_IOASC_IOA_WAS_RESET)
1465 			dev_err(&ioa_cfg->pdev->dev,
1466 				"Host RCB failed with IOASC: 0x%08X\n", ioasc);
1467 
1468 		ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1469 	} else {
1470 		ipr_handle_config_change(ioa_cfg, hostrcb);
1471 	}
1472 }
1473 
1474 /**
1475  * strip_and_pad_whitespace - Strip and pad trailing whitespace.
1476  * @i:		index into buffer
1477  * @buf:		string to modify
1478  *
1479  * This function will strip all trailing whitespace, pad the end
1480  * of the string with a single space, and NULL terminate the string.
1481  *
1482  * Return value:
1483  * 	new length of string
1484  **/
1485 static int strip_and_pad_whitespace(int i, char *buf)
1486 {
1487 	while (i && buf[i] == ' ')
1488 		i--;
1489 	buf[i+1] = ' ';
1490 	buf[i+2] = '\0';
1491 	return i + 2;
1492 }
1493 
1494 /**
1495  * ipr_log_vpd_compact - Log the passed extended VPD compactly.
1496  * @prefix:		string to print at start of printk
1497  * @hostrcb:	hostrcb pointer
1498  * @vpd:		vendor/product id/sn struct
1499  *
1500  * Return value:
1501  * 	none
1502  **/
1503 static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1504 				struct ipr_vpd *vpd)
1505 {
1506 	char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + IPR_SERIAL_NUM_LEN + 3];
1507 	int i = 0;
1508 
1509 	memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1510 	i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer);
1511 
1512 	memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN);
1513 	i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer);
1514 
1515 	memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN);
1516 	buffer[IPR_SERIAL_NUM_LEN + i] = '\0';
1517 
1518 	ipr_hcam_err(hostrcb, "%s VPID/SN: %s\n", prefix, buffer);
1519 }
1520 
1521 /**
1522  * ipr_log_vpd - Log the passed VPD to the error log.
1523  * @vpd:		vendor/product id/sn struct
1524  *
1525  * Return value:
1526  * 	none
1527  **/
1528 static void ipr_log_vpd(struct ipr_vpd *vpd)
1529 {
1530 	char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
1531 		    + IPR_SERIAL_NUM_LEN];
1532 
1533 	memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1534 	memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
1535 	       IPR_PROD_ID_LEN);
1536 	buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
1537 	ipr_err("Vendor/Product ID: %s\n", buffer);
1538 
1539 	memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
1540 	buffer[IPR_SERIAL_NUM_LEN] = '\0';
1541 	ipr_err("    Serial Number: %s\n", buffer);
1542 }
1543 
1544 /**
1545  * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1546  * @prefix:		string to print at start of printk
1547  * @hostrcb:	hostrcb pointer
1548  * @vpd:		vendor/product id/sn/wwn struct
1549  *
1550  * Return value:
1551  * 	none
1552  **/
1553 static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1554 				    struct ipr_ext_vpd *vpd)
1555 {
1556 	ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd);
1557 	ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix,
1558 		     be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1]));
1559 }
1560 
1561 /**
1562  * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1563  * @vpd:		vendor/product id/sn/wwn struct
1564  *
1565  * Return value:
1566  * 	none
1567  **/
1568 static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
1569 {
1570 	ipr_log_vpd(&vpd->vpd);
1571 	ipr_err("    WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
1572 		be32_to_cpu(vpd->wwid[1]));
1573 }
1574 
1575 /**
1576  * ipr_log_enhanced_cache_error - Log a cache error.
1577  * @ioa_cfg:	ioa config struct
1578  * @hostrcb:	hostrcb struct
1579  *
1580  * Return value:
1581  * 	none
1582  **/
1583 static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1584 					 struct ipr_hostrcb *hostrcb)
1585 {
1586 	struct ipr_hostrcb_type_12_error *error;
1587 
1588 	if (ioa_cfg->sis64)
1589 		error = &hostrcb->hcam.u.error64.u.type_12_error;
1590 	else
1591 		error = &hostrcb->hcam.u.error.u.type_12_error;
1592 
1593 	ipr_err("-----Current Configuration-----\n");
1594 	ipr_err("Cache Directory Card Information:\n");
1595 	ipr_log_ext_vpd(&error->ioa_vpd);
1596 	ipr_err("Adapter Card Information:\n");
1597 	ipr_log_ext_vpd(&error->cfc_vpd);
1598 
1599 	ipr_err("-----Expected Configuration-----\n");
1600 	ipr_err("Cache Directory Card Information:\n");
1601 	ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
1602 	ipr_err("Adapter Card Information:\n");
1603 	ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
1604 
1605 	ipr_err("Additional IOA Data: %08X %08X %08X\n",
1606 		     be32_to_cpu(error->ioa_data[0]),
1607 		     be32_to_cpu(error->ioa_data[1]),
1608 		     be32_to_cpu(error->ioa_data[2]));
1609 }
1610 
1611 /**
1612  * ipr_log_cache_error - Log a cache error.
1613  * @ioa_cfg:	ioa config struct
1614  * @hostrcb:	hostrcb struct
1615  *
1616  * Return value:
1617  * 	none
1618  **/
1619 static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1620 				struct ipr_hostrcb *hostrcb)
1621 {
1622 	struct ipr_hostrcb_type_02_error *error =
1623 		&hostrcb->hcam.u.error.u.type_02_error;
1624 
1625 	ipr_err("-----Current Configuration-----\n");
1626 	ipr_err("Cache Directory Card Information:\n");
1627 	ipr_log_vpd(&error->ioa_vpd);
1628 	ipr_err("Adapter Card Information:\n");
1629 	ipr_log_vpd(&error->cfc_vpd);
1630 
1631 	ipr_err("-----Expected Configuration-----\n");
1632 	ipr_err("Cache Directory Card Information:\n");
1633 	ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
1634 	ipr_err("Adapter Card Information:\n");
1635 	ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
1636 
1637 	ipr_err("Additional IOA Data: %08X %08X %08X\n",
1638 		     be32_to_cpu(error->ioa_data[0]),
1639 		     be32_to_cpu(error->ioa_data[1]),
1640 		     be32_to_cpu(error->ioa_data[2]));
1641 }
1642 
1643 /**
1644  * ipr_log_enhanced_config_error - Log a configuration error.
1645  * @ioa_cfg:	ioa config struct
1646  * @hostrcb:	hostrcb struct
1647  *
1648  * Return value:
1649  * 	none
1650  **/
1651 static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1652 					  struct ipr_hostrcb *hostrcb)
1653 {
1654 	int errors_logged, i;
1655 	struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1656 	struct ipr_hostrcb_type_13_error *error;
1657 
1658 	error = &hostrcb->hcam.u.error.u.type_13_error;
1659 	errors_logged = be32_to_cpu(error->errors_logged);
1660 
1661 	ipr_err("Device Errors Detected/Logged: %d/%d\n",
1662 		be32_to_cpu(error->errors_detected), errors_logged);
1663 
1664 	dev_entry = error->dev;
1665 
1666 	for (i = 0; i < errors_logged; i++, dev_entry++) {
1667 		ipr_err_separator;
1668 
1669 		ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1670 		ipr_log_ext_vpd(&dev_entry->vpd);
1671 
1672 		ipr_err("-----New Device Information-----\n");
1673 		ipr_log_ext_vpd(&dev_entry->new_vpd);
1674 
1675 		ipr_err("Cache Directory Card Information:\n");
1676 		ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1677 
1678 		ipr_err("Adapter Card Information:\n");
1679 		ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1680 	}
1681 }
1682 
1683 /**
1684  * ipr_log_sis64_config_error - Log a device error.
1685  * @ioa_cfg:	ioa config struct
1686  * @hostrcb:	hostrcb struct
1687  *
1688  * Return value:
1689  * 	none
1690  **/
1691 static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
1692 				       struct ipr_hostrcb *hostrcb)
1693 {
1694 	int errors_logged, i;
1695 	struct ipr_hostrcb64_device_data_entry_enhanced *dev_entry;
1696 	struct ipr_hostrcb_type_23_error *error;
1697 	char buffer[IPR_MAX_RES_PATH_LENGTH];
1698 
1699 	error = &hostrcb->hcam.u.error64.u.type_23_error;
1700 	errors_logged = be32_to_cpu(error->errors_logged);
1701 
1702 	ipr_err("Device Errors Detected/Logged: %d/%d\n",
1703 		be32_to_cpu(error->errors_detected), errors_logged);
1704 
1705 	dev_entry = error->dev;
1706 
1707 	for (i = 0; i < errors_logged; i++, dev_entry++) {
1708 		ipr_err_separator;
1709 
1710 		ipr_err("Device %d : %s", i + 1,
1711 			__ipr_format_res_path(dev_entry->res_path,
1712 					      buffer, sizeof(buffer)));
1713 		ipr_log_ext_vpd(&dev_entry->vpd);
1714 
1715 		ipr_err("-----New Device Information-----\n");
1716 		ipr_log_ext_vpd(&dev_entry->new_vpd);
1717 
1718 		ipr_err("Cache Directory Card Information:\n");
1719 		ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1720 
1721 		ipr_err("Adapter Card Information:\n");
1722 		ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1723 	}
1724 }
1725 
1726 /**
1727  * ipr_log_config_error - Log a configuration error.
1728  * @ioa_cfg:	ioa config struct
1729  * @hostrcb:	hostrcb struct
1730  *
1731  * Return value:
1732  * 	none
1733  **/
1734 static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1735 				 struct ipr_hostrcb *hostrcb)
1736 {
1737 	int errors_logged, i;
1738 	struct ipr_hostrcb_device_data_entry *dev_entry;
1739 	struct ipr_hostrcb_type_03_error *error;
1740 
1741 	error = &hostrcb->hcam.u.error.u.type_03_error;
1742 	errors_logged = be32_to_cpu(error->errors_logged);
1743 
1744 	ipr_err("Device Errors Detected/Logged: %d/%d\n",
1745 		be32_to_cpu(error->errors_detected), errors_logged);
1746 
1747 	dev_entry = error->dev;
1748 
1749 	for (i = 0; i < errors_logged; i++, dev_entry++) {
1750 		ipr_err_separator;
1751 
1752 		ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1753 		ipr_log_vpd(&dev_entry->vpd);
1754 
1755 		ipr_err("-----New Device Information-----\n");
1756 		ipr_log_vpd(&dev_entry->new_vpd);
1757 
1758 		ipr_err("Cache Directory Card Information:\n");
1759 		ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
1760 
1761 		ipr_err("Adapter Card Information:\n");
1762 		ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
1763 
1764 		ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1765 			be32_to_cpu(dev_entry->ioa_data[0]),
1766 			be32_to_cpu(dev_entry->ioa_data[1]),
1767 			be32_to_cpu(dev_entry->ioa_data[2]),
1768 			be32_to_cpu(dev_entry->ioa_data[3]),
1769 			be32_to_cpu(dev_entry->ioa_data[4]));
1770 	}
1771 }
1772 
1773 /**
1774  * ipr_log_enhanced_array_error - Log an array configuration error.
1775  * @ioa_cfg:	ioa config struct
1776  * @hostrcb:	hostrcb struct
1777  *
1778  * Return value:
1779  * 	none
1780  **/
1781 static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1782 					 struct ipr_hostrcb *hostrcb)
1783 {
1784 	int i, num_entries;
1785 	struct ipr_hostrcb_type_14_error *error;
1786 	struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1787 	const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1788 
1789 	error = &hostrcb->hcam.u.error.u.type_14_error;
1790 
1791 	ipr_err_separator;
1792 
1793 	ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1794 		error->protection_level,
1795 		ioa_cfg->host->host_no,
1796 		error->last_func_vset_res_addr.bus,
1797 		error->last_func_vset_res_addr.target,
1798 		error->last_func_vset_res_addr.lun);
1799 
1800 	ipr_err_separator;
1801 
1802 	array_entry = error->array_member;
1803 	num_entries = min_t(u32, be32_to_cpu(error->num_entries),
1804 			    ARRAY_SIZE(error->array_member));
1805 
1806 	for (i = 0; i < num_entries; i++, array_entry++) {
1807 		if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1808 			continue;
1809 
1810 		if (be32_to_cpu(error->exposed_mode_adn) == i)
1811 			ipr_err("Exposed Array Member %d:\n", i);
1812 		else
1813 			ipr_err("Array Member %d:\n", i);
1814 
1815 		ipr_log_ext_vpd(&array_entry->vpd);
1816 		ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1817 		ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1818 				 "Expected Location");
1819 
1820 		ipr_err_separator;
1821 	}
1822 }
1823 
1824 /**
1825  * ipr_log_array_error - Log an array configuration error.
1826  * @ioa_cfg:	ioa config struct
1827  * @hostrcb:	hostrcb struct
1828  *
1829  * Return value:
1830  * 	none
1831  **/
1832 static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1833 				struct ipr_hostrcb *hostrcb)
1834 {
1835 	int i;
1836 	struct ipr_hostrcb_type_04_error *error;
1837 	struct ipr_hostrcb_array_data_entry *array_entry;
1838 	const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1839 
1840 	error = &hostrcb->hcam.u.error.u.type_04_error;
1841 
1842 	ipr_err_separator;
1843 
1844 	ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1845 		error->protection_level,
1846 		ioa_cfg->host->host_no,
1847 		error->last_func_vset_res_addr.bus,
1848 		error->last_func_vset_res_addr.target,
1849 		error->last_func_vset_res_addr.lun);
1850 
1851 	ipr_err_separator;
1852 
1853 	array_entry = error->array_member;
1854 
1855 	for (i = 0; i < 18; i++) {
1856 		if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1857 			continue;
1858 
1859 		if (be32_to_cpu(error->exposed_mode_adn) == i)
1860 			ipr_err("Exposed Array Member %d:\n", i);
1861 		else
1862 			ipr_err("Array Member %d:\n", i);
1863 
1864 		ipr_log_vpd(&array_entry->vpd);
1865 
1866 		ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1867 		ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1868 				 "Expected Location");
1869 
1870 		ipr_err_separator;
1871 
1872 		if (i == 9)
1873 			array_entry = error->array_member2;
1874 		else
1875 			array_entry++;
1876 	}
1877 }
1878 
1879 /**
1880  * ipr_log_hex_data - Log additional hex IOA error data.
1881  * @ioa_cfg:	ioa config struct
1882  * @data:		IOA error data
1883  * @len:		data length
1884  *
1885  * Return value:
1886  * 	none
1887  **/
1888 static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, u32 *data, int len)
1889 {
1890 	int i;
1891 
1892 	if (len == 0)
1893 		return;
1894 
1895 	if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1896 		len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
1897 
1898 	for (i = 0; i < len / 4; i += 4) {
1899 		ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1900 			be32_to_cpu(data[i]),
1901 			be32_to_cpu(data[i+1]),
1902 			be32_to_cpu(data[i+2]),
1903 			be32_to_cpu(data[i+3]));
1904 	}
1905 }
1906 
1907 /**
1908  * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1909  * @ioa_cfg:	ioa config struct
1910  * @hostrcb:	hostrcb struct
1911  *
1912  * Return value:
1913  * 	none
1914  **/
1915 static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1916 					    struct ipr_hostrcb *hostrcb)
1917 {
1918 	struct ipr_hostrcb_type_17_error *error;
1919 
1920 	if (ioa_cfg->sis64)
1921 		error = &hostrcb->hcam.u.error64.u.type_17_error;
1922 	else
1923 		error = &hostrcb->hcam.u.error.u.type_17_error;
1924 
1925 	error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1926 	strim(error->failure_reason);
1927 
1928 	ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1929 		     be32_to_cpu(hostrcb->hcam.u.error.prc));
1930 	ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1931 	ipr_log_hex_data(ioa_cfg, error->data,
1932 			 be32_to_cpu(hostrcb->hcam.length) -
1933 			 (offsetof(struct ipr_hostrcb_error, u) +
1934 			  offsetof(struct ipr_hostrcb_type_17_error, data)));
1935 }
1936 
1937 /**
1938  * ipr_log_dual_ioa_error - Log a dual adapter error.
1939  * @ioa_cfg:	ioa config struct
1940  * @hostrcb:	hostrcb struct
1941  *
1942  * Return value:
1943  * 	none
1944  **/
1945 static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1946 				   struct ipr_hostrcb *hostrcb)
1947 {
1948 	struct ipr_hostrcb_type_07_error *error;
1949 
1950 	error = &hostrcb->hcam.u.error.u.type_07_error;
1951 	error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1952 	strim(error->failure_reason);
1953 
1954 	ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1955 		     be32_to_cpu(hostrcb->hcam.u.error.prc));
1956 	ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1957 	ipr_log_hex_data(ioa_cfg, error->data,
1958 			 be32_to_cpu(hostrcb->hcam.length) -
1959 			 (offsetof(struct ipr_hostrcb_error, u) +
1960 			  offsetof(struct ipr_hostrcb_type_07_error, data)));
1961 }
1962 
1963 static const struct {
1964 	u8 active;
1965 	char *desc;
1966 } path_active_desc[] = {
1967 	{ IPR_PATH_NO_INFO, "Path" },
1968 	{ IPR_PATH_ACTIVE, "Active path" },
1969 	{ IPR_PATH_NOT_ACTIVE, "Inactive path" }
1970 };
1971 
1972 static const struct {
1973 	u8 state;
1974 	char *desc;
1975 } path_state_desc[] = {
1976 	{ IPR_PATH_STATE_NO_INFO, "has no path state information available" },
1977 	{ IPR_PATH_HEALTHY, "is healthy" },
1978 	{ IPR_PATH_DEGRADED, "is degraded" },
1979 	{ IPR_PATH_FAILED, "is failed" }
1980 };
1981 
1982 /**
1983  * ipr_log_fabric_path - Log a fabric path error
1984  * @hostrcb:	hostrcb struct
1985  * @fabric:		fabric descriptor
1986  *
1987  * Return value:
1988  * 	none
1989  **/
1990 static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
1991 				struct ipr_hostrcb_fabric_desc *fabric)
1992 {
1993 	int i, j;
1994 	u8 path_state = fabric->path_state;
1995 	u8 active = path_state & IPR_PATH_ACTIVE_MASK;
1996 	u8 state = path_state & IPR_PATH_STATE_MASK;
1997 
1998 	for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
1999 		if (path_active_desc[i].active != active)
2000 			continue;
2001 
2002 		for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2003 			if (path_state_desc[j].state != state)
2004 				continue;
2005 
2006 			if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
2007 				ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
2008 					     path_active_desc[i].desc, path_state_desc[j].desc,
2009 					     fabric->ioa_port);
2010 			} else if (fabric->cascaded_expander == 0xff) {
2011 				ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
2012 					     path_active_desc[i].desc, path_state_desc[j].desc,
2013 					     fabric->ioa_port, fabric->phy);
2014 			} else if (fabric->phy == 0xff) {
2015 				ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
2016 					     path_active_desc[i].desc, path_state_desc[j].desc,
2017 					     fabric->ioa_port, fabric->cascaded_expander);
2018 			} else {
2019 				ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
2020 					     path_active_desc[i].desc, path_state_desc[j].desc,
2021 					     fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
2022 			}
2023 			return;
2024 		}
2025 	}
2026 
2027 	ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
2028 		fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
2029 }
2030 
2031 /**
2032  * ipr_log64_fabric_path - Log a fabric path error
2033  * @hostrcb:	hostrcb struct
2034  * @fabric:		fabric descriptor
2035  *
2036  * Return value:
2037  * 	none
2038  **/
2039 static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb,
2040 				  struct ipr_hostrcb64_fabric_desc *fabric)
2041 {
2042 	int i, j;
2043 	u8 path_state = fabric->path_state;
2044 	u8 active = path_state & IPR_PATH_ACTIVE_MASK;
2045 	u8 state = path_state & IPR_PATH_STATE_MASK;
2046 	char buffer[IPR_MAX_RES_PATH_LENGTH];
2047 
2048 	for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
2049 		if (path_active_desc[i].active != active)
2050 			continue;
2051 
2052 		for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2053 			if (path_state_desc[j].state != state)
2054 				continue;
2055 
2056 			ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n",
2057 				     path_active_desc[i].desc, path_state_desc[j].desc,
2058 				     ipr_format_res_path(hostrcb->ioa_cfg,
2059 						fabric->res_path,
2060 						buffer, sizeof(buffer)));
2061 			return;
2062 		}
2063 	}
2064 
2065 	ipr_err("Path state=%02X Resource Path=%s\n", path_state,
2066 		ipr_format_res_path(hostrcb->ioa_cfg, fabric->res_path,
2067 				    buffer, sizeof(buffer)));
2068 }
2069 
2070 static const struct {
2071 	u8 type;
2072 	char *desc;
2073 } path_type_desc[] = {
2074 	{ IPR_PATH_CFG_IOA_PORT, "IOA port" },
2075 	{ IPR_PATH_CFG_EXP_PORT, "Expander port" },
2076 	{ IPR_PATH_CFG_DEVICE_PORT, "Device port" },
2077 	{ IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
2078 };
2079 
2080 static const struct {
2081 	u8 status;
2082 	char *desc;
2083 } path_status_desc[] = {
2084 	{ IPR_PATH_CFG_NO_PROB, "Functional" },
2085 	{ IPR_PATH_CFG_DEGRADED, "Degraded" },
2086 	{ IPR_PATH_CFG_FAILED, "Failed" },
2087 	{ IPR_PATH_CFG_SUSPECT, "Suspect" },
2088 	{ IPR_PATH_NOT_DETECTED, "Missing" },
2089 	{ IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
2090 };
2091 
2092 static const char *link_rate[] = {
2093 	"unknown",
2094 	"disabled",
2095 	"phy reset problem",
2096 	"spinup hold",
2097 	"port selector",
2098 	"unknown",
2099 	"unknown",
2100 	"unknown",
2101 	"1.5Gbps",
2102 	"3.0Gbps",
2103 	"unknown",
2104 	"unknown",
2105 	"unknown",
2106 	"unknown",
2107 	"unknown",
2108 	"unknown"
2109 };
2110 
2111 /**
2112  * ipr_log_path_elem - Log a fabric path element.
2113  * @hostrcb:	hostrcb struct
2114  * @cfg:		fabric path element struct
2115  *
2116  * Return value:
2117  * 	none
2118  **/
2119 static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
2120 			      struct ipr_hostrcb_config_element *cfg)
2121 {
2122 	int i, j;
2123 	u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2124 	u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2125 
2126 	if (type == IPR_PATH_CFG_NOT_EXIST)
2127 		return;
2128 
2129 	for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2130 		if (path_type_desc[i].type != type)
2131 			continue;
2132 
2133 		for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2134 			if (path_status_desc[j].status != status)
2135 				continue;
2136 
2137 			if (type == IPR_PATH_CFG_IOA_PORT) {
2138 				ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
2139 					     path_status_desc[j].desc, path_type_desc[i].desc,
2140 					     cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2141 					     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2142 			} else {
2143 				if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
2144 					ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
2145 						     path_status_desc[j].desc, path_type_desc[i].desc,
2146 						     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2147 						     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2148 				} else if (cfg->cascaded_expander == 0xff) {
2149 					ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
2150 						     "WWN=%08X%08X\n", path_status_desc[j].desc,
2151 						     path_type_desc[i].desc, cfg->phy,
2152 						     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2153 						     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2154 				} else if (cfg->phy == 0xff) {
2155 					ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
2156 						     "WWN=%08X%08X\n", path_status_desc[j].desc,
2157 						     path_type_desc[i].desc, cfg->cascaded_expander,
2158 						     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2159 						     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2160 				} else {
2161 					ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
2162 						     "WWN=%08X%08X\n", path_status_desc[j].desc,
2163 						     path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
2164 						     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2165 						     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2166 				}
2167 			}
2168 			return;
2169 		}
2170 	}
2171 
2172 	ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
2173 		     "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
2174 		     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2175 		     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2176 }
2177 
2178 /**
2179  * ipr_log64_path_elem - Log a fabric path element.
2180  * @hostrcb:	hostrcb struct
2181  * @cfg:		fabric path element struct
2182  *
2183  * Return value:
2184  * 	none
2185  **/
2186 static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb,
2187 				struct ipr_hostrcb64_config_element *cfg)
2188 {
2189 	int i, j;
2190 	u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK;
2191 	u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2192 	u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2193 	char buffer[IPR_MAX_RES_PATH_LENGTH];
2194 
2195 	if (type == IPR_PATH_CFG_NOT_EXIST || desc_id != IPR_DESCRIPTOR_SIS64)
2196 		return;
2197 
2198 	for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2199 		if (path_type_desc[i].type != type)
2200 			continue;
2201 
2202 		for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2203 			if (path_status_desc[j].status != status)
2204 				continue;
2205 
2206 			ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
2207 				     path_status_desc[j].desc, path_type_desc[i].desc,
2208 				     ipr_format_res_path(hostrcb->ioa_cfg,
2209 					cfg->res_path, buffer, sizeof(buffer)),
2210 					link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2211 					be32_to_cpu(cfg->wwid[0]),
2212 					be32_to_cpu(cfg->wwid[1]));
2213 			return;
2214 		}
2215 	}
2216 	ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s "
2217 		     "WWN=%08X%08X\n", cfg->type_status,
2218 		     ipr_format_res_path(hostrcb->ioa_cfg,
2219 			cfg->res_path, buffer, sizeof(buffer)),
2220 			link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2221 			be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2222 }
2223 
2224 /**
2225  * ipr_log_fabric_error - Log a fabric error.
2226  * @ioa_cfg:	ioa config struct
2227  * @hostrcb:	hostrcb struct
2228  *
2229  * Return value:
2230  * 	none
2231  **/
2232 static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2233 				 struct ipr_hostrcb *hostrcb)
2234 {
2235 	struct ipr_hostrcb_type_20_error *error;
2236 	struct ipr_hostrcb_fabric_desc *fabric;
2237 	struct ipr_hostrcb_config_element *cfg;
2238 	int i, add_len;
2239 
2240 	error = &hostrcb->hcam.u.error.u.type_20_error;
2241 	error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2242 	ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2243 
2244 	add_len = be32_to_cpu(hostrcb->hcam.length) -
2245 		(offsetof(struct ipr_hostrcb_error, u) +
2246 		 offsetof(struct ipr_hostrcb_type_20_error, desc));
2247 
2248 	for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2249 		ipr_log_fabric_path(hostrcb, fabric);
2250 		for_each_fabric_cfg(fabric, cfg)
2251 			ipr_log_path_elem(hostrcb, cfg);
2252 
2253 		add_len -= be16_to_cpu(fabric->length);
2254 		fabric = (struct ipr_hostrcb_fabric_desc *)
2255 			((unsigned long)fabric + be16_to_cpu(fabric->length));
2256 	}
2257 
2258 	ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
2259 }
2260 
2261 /**
2262  * ipr_log_sis64_array_error - Log a sis64 array error.
2263  * @ioa_cfg:	ioa config struct
2264  * @hostrcb:	hostrcb struct
2265  *
2266  * Return value:
2267  * 	none
2268  **/
2269 static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
2270 				      struct ipr_hostrcb *hostrcb)
2271 {
2272 	int i, num_entries;
2273 	struct ipr_hostrcb_type_24_error *error;
2274 	struct ipr_hostrcb64_array_data_entry *array_entry;
2275 	char buffer[IPR_MAX_RES_PATH_LENGTH];
2276 	const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
2277 
2278 	error = &hostrcb->hcam.u.error64.u.type_24_error;
2279 
2280 	ipr_err_separator;
2281 
2282 	ipr_err("RAID %s Array Configuration: %s\n",
2283 		error->protection_level,
2284 		ipr_format_res_path(ioa_cfg, error->last_res_path,
2285 			buffer, sizeof(buffer)));
2286 
2287 	ipr_err_separator;
2288 
2289 	array_entry = error->array_member;
2290 	num_entries = min_t(u32, error->num_entries,
2291 			    ARRAY_SIZE(error->array_member));
2292 
2293 	for (i = 0; i < num_entries; i++, array_entry++) {
2294 
2295 		if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
2296 			continue;
2297 
2298 		if (error->exposed_mode_adn == i)
2299 			ipr_err("Exposed Array Member %d:\n", i);
2300 		else
2301 			ipr_err("Array Member %d:\n", i);
2302 
2303 		ipr_err("Array Member %d:\n", i);
2304 		ipr_log_ext_vpd(&array_entry->vpd);
2305 		ipr_err("Current Location: %s\n",
2306 			 ipr_format_res_path(ioa_cfg, array_entry->res_path,
2307 				buffer, sizeof(buffer)));
2308 		ipr_err("Expected Location: %s\n",
2309 			 ipr_format_res_path(ioa_cfg,
2310 				array_entry->expected_res_path,
2311 				buffer, sizeof(buffer)));
2312 
2313 		ipr_err_separator;
2314 	}
2315 }
2316 
2317 /**
2318  * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
2319  * @ioa_cfg:	ioa config struct
2320  * @hostrcb:	hostrcb struct
2321  *
2322  * Return value:
2323  * 	none
2324  **/
2325 static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2326 				       struct ipr_hostrcb *hostrcb)
2327 {
2328 	struct ipr_hostrcb_type_30_error *error;
2329 	struct ipr_hostrcb64_fabric_desc *fabric;
2330 	struct ipr_hostrcb64_config_element *cfg;
2331 	int i, add_len;
2332 
2333 	error = &hostrcb->hcam.u.error64.u.type_30_error;
2334 
2335 	error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2336 	ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2337 
2338 	add_len = be32_to_cpu(hostrcb->hcam.length) -
2339 		(offsetof(struct ipr_hostrcb64_error, u) +
2340 		 offsetof(struct ipr_hostrcb_type_30_error, desc));
2341 
2342 	for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2343 		ipr_log64_fabric_path(hostrcb, fabric);
2344 		for_each_fabric_cfg(fabric, cfg)
2345 			ipr_log64_path_elem(hostrcb, cfg);
2346 
2347 		add_len -= be16_to_cpu(fabric->length);
2348 		fabric = (struct ipr_hostrcb64_fabric_desc *)
2349 			((unsigned long)fabric + be16_to_cpu(fabric->length));
2350 	}
2351 
2352 	ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
2353 }
2354 
2355 /**
2356  * ipr_log_generic_error - Log an adapter error.
2357  * @ioa_cfg:	ioa config struct
2358  * @hostrcb:	hostrcb struct
2359  *
2360  * Return value:
2361  * 	none
2362  **/
2363 static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
2364 				  struct ipr_hostrcb *hostrcb)
2365 {
2366 	ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
2367 			 be32_to_cpu(hostrcb->hcam.length));
2368 }
2369 
2370 /**
2371  * ipr_log_sis64_device_error - Log a cache error.
2372  * @ioa_cfg:	ioa config struct
2373  * @hostrcb:	hostrcb struct
2374  *
2375  * Return value:
2376  * 	none
2377  **/
2378 static void ipr_log_sis64_device_error(struct ipr_ioa_cfg *ioa_cfg,
2379 					 struct ipr_hostrcb *hostrcb)
2380 {
2381 	struct ipr_hostrcb_type_21_error *error;
2382 	char buffer[IPR_MAX_RES_PATH_LENGTH];
2383 
2384 	error = &hostrcb->hcam.u.error64.u.type_21_error;
2385 
2386 	ipr_err("-----Failing Device Information-----\n");
2387 	ipr_err("World Wide Unique ID: %08X%08X%08X%08X\n",
2388 		be32_to_cpu(error->wwn[0]), be32_to_cpu(error->wwn[1]),
2389 		 be32_to_cpu(error->wwn[2]), be32_to_cpu(error->wwn[3]));
2390 	ipr_err("Device Resource Path: %s\n",
2391 		__ipr_format_res_path(error->res_path,
2392 				      buffer, sizeof(buffer)));
2393 	error->primary_problem_desc[sizeof(error->primary_problem_desc) - 1] = '\0';
2394 	error->second_problem_desc[sizeof(error->second_problem_desc) - 1] = '\0';
2395 	ipr_err("Primary Problem Description: %s\n", error->primary_problem_desc);
2396 	ipr_err("Secondary Problem Description:  %s\n", error->second_problem_desc);
2397 	ipr_err("SCSI Sense Data:\n");
2398 	ipr_log_hex_data(ioa_cfg, error->sense_data, sizeof(error->sense_data));
2399 	ipr_err("SCSI Command Descriptor Block: \n");
2400 	ipr_log_hex_data(ioa_cfg, error->cdb, sizeof(error->cdb));
2401 
2402 	ipr_err("Additional IOA Data:\n");
2403 	ipr_log_hex_data(ioa_cfg, error->ioa_data, be32_to_cpu(error->length_of_error));
2404 }
2405 
2406 /**
2407  * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
2408  * @ioasc:	IOASC
2409  *
2410  * This function will return the index of into the ipr_error_table
2411  * for the specified IOASC. If the IOASC is not in the table,
2412  * 0 will be returned, which points to the entry used for unknown errors.
2413  *
2414  * Return value:
2415  * 	index into the ipr_error_table
2416  **/
2417 static u32 ipr_get_error(u32 ioasc)
2418 {
2419 	int i;
2420 
2421 	for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
2422 		if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
2423 			return i;
2424 
2425 	return 0;
2426 }
2427 
2428 /**
2429  * ipr_handle_log_data - Log an adapter error.
2430  * @ioa_cfg:	ioa config struct
2431  * @hostrcb:	hostrcb struct
2432  *
2433  * This function logs an adapter error to the system.
2434  *
2435  * Return value:
2436  * 	none
2437  **/
2438 static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
2439 				struct ipr_hostrcb *hostrcb)
2440 {
2441 	u32 ioasc;
2442 	int error_index;
2443 
2444 	if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
2445 		return;
2446 
2447 	if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
2448 		dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
2449 
2450 	if (ioa_cfg->sis64)
2451 		ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2452 	else
2453 		ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2454 
2455 	if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET ||
2456 	    ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER)) {
2457 		/* Tell the midlayer we had a bus reset so it will handle the UA properly */
2458 		scsi_report_bus_reset(ioa_cfg->host,
2459 				      hostrcb->hcam.u.error.fd_res_addr.bus);
2460 	}
2461 
2462 	error_index = ipr_get_error(ioasc);
2463 
2464 	if (!ipr_error_table[error_index].log_hcam)
2465 		return;
2466 
2467 	ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
2468 
2469 	/* Set indication we have logged an error */
2470 	ioa_cfg->errors_logged++;
2471 
2472 	if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
2473 		return;
2474 	if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
2475 		hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
2476 
2477 	switch (hostrcb->hcam.overlay_id) {
2478 	case IPR_HOST_RCB_OVERLAY_ID_2:
2479 		ipr_log_cache_error(ioa_cfg, hostrcb);
2480 		break;
2481 	case IPR_HOST_RCB_OVERLAY_ID_3:
2482 		ipr_log_config_error(ioa_cfg, hostrcb);
2483 		break;
2484 	case IPR_HOST_RCB_OVERLAY_ID_4:
2485 	case IPR_HOST_RCB_OVERLAY_ID_6:
2486 		ipr_log_array_error(ioa_cfg, hostrcb);
2487 		break;
2488 	case IPR_HOST_RCB_OVERLAY_ID_7:
2489 		ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
2490 		break;
2491 	case IPR_HOST_RCB_OVERLAY_ID_12:
2492 		ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
2493 		break;
2494 	case IPR_HOST_RCB_OVERLAY_ID_13:
2495 		ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
2496 		break;
2497 	case IPR_HOST_RCB_OVERLAY_ID_14:
2498 	case IPR_HOST_RCB_OVERLAY_ID_16:
2499 		ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
2500 		break;
2501 	case IPR_HOST_RCB_OVERLAY_ID_17:
2502 		ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
2503 		break;
2504 	case IPR_HOST_RCB_OVERLAY_ID_20:
2505 		ipr_log_fabric_error(ioa_cfg, hostrcb);
2506 		break;
2507 	case IPR_HOST_RCB_OVERLAY_ID_21:
2508 		ipr_log_sis64_device_error(ioa_cfg, hostrcb);
2509 		break;
2510 	case IPR_HOST_RCB_OVERLAY_ID_23:
2511 		ipr_log_sis64_config_error(ioa_cfg, hostrcb);
2512 		break;
2513 	case IPR_HOST_RCB_OVERLAY_ID_24:
2514 	case IPR_HOST_RCB_OVERLAY_ID_26:
2515 		ipr_log_sis64_array_error(ioa_cfg, hostrcb);
2516 		break;
2517 	case IPR_HOST_RCB_OVERLAY_ID_30:
2518 		ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
2519 		break;
2520 	case IPR_HOST_RCB_OVERLAY_ID_1:
2521 	case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
2522 	default:
2523 		ipr_log_generic_error(ioa_cfg, hostrcb);
2524 		break;
2525 	}
2526 }
2527 
2528 /**
2529  * ipr_process_error - Op done function for an adapter error log.
2530  * @ipr_cmd:	ipr command struct
2531  *
2532  * This function is the op done function for an error log host
2533  * controlled async from the adapter. It will log the error and
2534  * send the HCAM back to the adapter.
2535  *
2536  * Return value:
2537  * 	none
2538  **/
2539 static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
2540 {
2541 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2542 	struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
2543 	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
2544 	u32 fd_ioasc;
2545 
2546 	if (ioa_cfg->sis64)
2547 		fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2548 	else
2549 		fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2550 
2551 	list_del(&hostrcb->queue);
2552 	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
2553 
2554 	if (!ioasc) {
2555 		ipr_handle_log_data(ioa_cfg, hostrcb);
2556 		if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
2557 			ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
2558 	} else if (ioasc != IPR_IOASC_IOA_WAS_RESET) {
2559 		dev_err(&ioa_cfg->pdev->dev,
2560 			"Host RCB failed with IOASC: 0x%08X\n", ioasc);
2561 	}
2562 
2563 	ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
2564 }
2565 
2566 /**
2567  * ipr_timeout -  An internally generated op has timed out.
2568  * @ipr_cmd:	ipr command struct
2569  *
2570  * This function blocks host requests and initiates an
2571  * adapter reset.
2572  *
2573  * Return value:
2574  * 	none
2575  **/
2576 static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
2577 {
2578 	unsigned long lock_flags = 0;
2579 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2580 
2581 	ENTER;
2582 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2583 
2584 	ioa_cfg->errors_logged++;
2585 	dev_err(&ioa_cfg->pdev->dev,
2586 		"Adapter being reset due to command timeout.\n");
2587 
2588 	if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2589 		ioa_cfg->sdt_state = GET_DUMP;
2590 
2591 	if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
2592 		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2593 
2594 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2595 	LEAVE;
2596 }
2597 
2598 /**
2599  * ipr_oper_timeout -  Adapter timed out transitioning to operational
2600  * @ipr_cmd:	ipr command struct
2601  *
2602  * This function blocks host requests and initiates an
2603  * adapter reset.
2604  *
2605  * Return value:
2606  * 	none
2607  **/
2608 static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
2609 {
2610 	unsigned long lock_flags = 0;
2611 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2612 
2613 	ENTER;
2614 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2615 
2616 	ioa_cfg->errors_logged++;
2617 	dev_err(&ioa_cfg->pdev->dev,
2618 		"Adapter timed out transitioning to operational.\n");
2619 
2620 	if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2621 		ioa_cfg->sdt_state = GET_DUMP;
2622 
2623 	if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
2624 		if (ipr_fastfail)
2625 			ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
2626 		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2627 	}
2628 
2629 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2630 	LEAVE;
2631 }
2632 
2633 /**
2634  * ipr_find_ses_entry - Find matching SES in SES table
2635  * @res:	resource entry struct of SES
2636  *
2637  * Return value:
2638  * 	pointer to SES table entry / NULL on failure
2639  **/
2640 static const struct ipr_ses_table_entry *
2641 ipr_find_ses_entry(struct ipr_resource_entry *res)
2642 {
2643 	int i, j, matches;
2644 	struct ipr_std_inq_vpids *vpids;
2645 	const struct ipr_ses_table_entry *ste = ipr_ses_table;
2646 
2647 	for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
2648 		for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
2649 			if (ste->compare_product_id_byte[j] == 'X') {
2650 				vpids = &res->std_inq_data.vpids;
2651 				if (vpids->product_id[j] == ste->product_id[j])
2652 					matches++;
2653 				else
2654 					break;
2655 			} else
2656 				matches++;
2657 		}
2658 
2659 		if (matches == IPR_PROD_ID_LEN)
2660 			return ste;
2661 	}
2662 
2663 	return NULL;
2664 }
2665 
2666 /**
2667  * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
2668  * @ioa_cfg:	ioa config struct
2669  * @bus:		SCSI bus
2670  * @bus_width:	bus width
2671  *
2672  * Return value:
2673  *	SCSI bus speed in units of 100KHz, 1600 is 160 MHz
2674  *	For a 2-byte wide SCSI bus, the maximum transfer speed is
2675  *	twice the maximum transfer rate (e.g. for a wide enabled bus,
2676  *	max 160MHz = max 320MB/sec).
2677  **/
2678 static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
2679 {
2680 	struct ipr_resource_entry *res;
2681 	const struct ipr_ses_table_entry *ste;
2682 	u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
2683 
2684 	/* Loop through each config table entry in the config table buffer */
2685 	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2686 		if (!(IPR_IS_SES_DEVICE(res->std_inq_data)))
2687 			continue;
2688 
2689 		if (bus != res->bus)
2690 			continue;
2691 
2692 		if (!(ste = ipr_find_ses_entry(res)))
2693 			continue;
2694 
2695 		max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
2696 	}
2697 
2698 	return max_xfer_rate;
2699 }
2700 
2701 /**
2702  * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
2703  * @ioa_cfg:		ioa config struct
2704  * @max_delay:		max delay in micro-seconds to wait
2705  *
2706  * Waits for an IODEBUG ACK from the IOA, doing busy looping.
2707  *
2708  * Return value:
2709  * 	0 on success / other on failure
2710  **/
2711 static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
2712 {
2713 	volatile u32 pcii_reg;
2714 	int delay = 1;
2715 
2716 	/* Read interrupt reg until IOA signals IO Debug Acknowledge */
2717 	while (delay < max_delay) {
2718 		pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
2719 
2720 		if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
2721 			return 0;
2722 
2723 		/* udelay cannot be used if delay is more than a few milliseconds */
2724 		if ((delay / 1000) > MAX_UDELAY_MS)
2725 			mdelay(delay / 1000);
2726 		else
2727 			udelay(delay);
2728 
2729 		delay += delay;
2730 	}
2731 	return -EIO;
2732 }
2733 
2734 /**
2735  * ipr_get_sis64_dump_data_section - Dump IOA memory
2736  * @ioa_cfg:			ioa config struct
2737  * @start_addr:			adapter address to dump
2738  * @dest:			destination kernel buffer
2739  * @length_in_words:		length to dump in 4 byte words
2740  *
2741  * Return value:
2742  * 	0 on success
2743  **/
2744 static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2745 					   u32 start_addr,
2746 					   __be32 *dest, u32 length_in_words)
2747 {
2748 	int i;
2749 
2750 	for (i = 0; i < length_in_words; i++) {
2751 		writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg);
2752 		*dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg));
2753 		dest++;
2754 	}
2755 
2756 	return 0;
2757 }
2758 
2759 /**
2760  * ipr_get_ldump_data_section - Dump IOA memory
2761  * @ioa_cfg:			ioa config struct
2762  * @start_addr:			adapter address to dump
2763  * @dest:				destination kernel buffer
2764  * @length_in_words:	length to dump in 4 byte words
2765  *
2766  * Return value:
2767  * 	0 on success / -EIO on failure
2768  **/
2769 static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2770 				      u32 start_addr,
2771 				      __be32 *dest, u32 length_in_words)
2772 {
2773 	volatile u32 temp_pcii_reg;
2774 	int i, delay = 0;
2775 
2776 	if (ioa_cfg->sis64)
2777 		return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr,
2778 						       dest, length_in_words);
2779 
2780 	/* Write IOA interrupt reg starting LDUMP state  */
2781 	writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
2782 	       ioa_cfg->regs.set_uproc_interrupt_reg32);
2783 
2784 	/* Wait for IO debug acknowledge */
2785 	if (ipr_wait_iodbg_ack(ioa_cfg,
2786 			       IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
2787 		dev_err(&ioa_cfg->pdev->dev,
2788 			"IOA dump long data transfer timeout\n");
2789 		return -EIO;
2790 	}
2791 
2792 	/* Signal LDUMP interlocked - clear IO debug ack */
2793 	writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2794 	       ioa_cfg->regs.clr_interrupt_reg);
2795 
2796 	/* Write Mailbox with starting address */
2797 	writel(start_addr, ioa_cfg->ioa_mailbox);
2798 
2799 	/* Signal address valid - clear IOA Reset alert */
2800 	writel(IPR_UPROCI_RESET_ALERT,
2801 	       ioa_cfg->regs.clr_uproc_interrupt_reg32);
2802 
2803 	for (i = 0; i < length_in_words; i++) {
2804 		/* Wait for IO debug acknowledge */
2805 		if (ipr_wait_iodbg_ack(ioa_cfg,
2806 				       IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
2807 			dev_err(&ioa_cfg->pdev->dev,
2808 				"IOA dump short data transfer timeout\n");
2809 			return -EIO;
2810 		}
2811 
2812 		/* Read data from mailbox and increment destination pointer */
2813 		*dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
2814 		dest++;
2815 
2816 		/* For all but the last word of data, signal data received */
2817 		if (i < (length_in_words - 1)) {
2818 			/* Signal dump data received - Clear IO debug Ack */
2819 			writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2820 			       ioa_cfg->regs.clr_interrupt_reg);
2821 		}
2822 	}
2823 
2824 	/* Signal end of block transfer. Set reset alert then clear IO debug ack */
2825 	writel(IPR_UPROCI_RESET_ALERT,
2826 	       ioa_cfg->regs.set_uproc_interrupt_reg32);
2827 
2828 	writel(IPR_UPROCI_IO_DEBUG_ALERT,
2829 	       ioa_cfg->regs.clr_uproc_interrupt_reg32);
2830 
2831 	/* Signal dump data received - Clear IO debug Ack */
2832 	writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2833 	       ioa_cfg->regs.clr_interrupt_reg);
2834 
2835 	/* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2836 	while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
2837 		temp_pcii_reg =
2838 		    readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
2839 
2840 		if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
2841 			return 0;
2842 
2843 		udelay(10);
2844 		delay += 10;
2845 	}
2846 
2847 	return 0;
2848 }
2849 
2850 #ifdef CONFIG_SCSI_IPR_DUMP
2851 /**
2852  * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2853  * @ioa_cfg:		ioa config struct
2854  * @pci_address:	adapter address
2855  * @length:			length of data to copy
2856  *
2857  * Copy data from PCI adapter to kernel buffer.
2858  * Note: length MUST be a 4 byte multiple
2859  * Return value:
2860  * 	0 on success / other on failure
2861  **/
2862 static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
2863 			unsigned long pci_address, u32 length)
2864 {
2865 	int bytes_copied = 0;
2866 	int cur_len, rc, rem_len, rem_page_len, max_dump_size;
2867 	__be32 *page;
2868 	unsigned long lock_flags = 0;
2869 	struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
2870 
2871 	if (ioa_cfg->sis64)
2872 		max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
2873 	else
2874 		max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
2875 
2876 	while (bytes_copied < length &&
2877 	       (ioa_dump->hdr.len + bytes_copied) < max_dump_size) {
2878 		if (ioa_dump->page_offset >= PAGE_SIZE ||
2879 		    ioa_dump->page_offset == 0) {
2880 			page = (__be32 *)__get_free_page(GFP_ATOMIC);
2881 
2882 			if (!page) {
2883 				ipr_trace;
2884 				return bytes_copied;
2885 			}
2886 
2887 			ioa_dump->page_offset = 0;
2888 			ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
2889 			ioa_dump->next_page_index++;
2890 		} else
2891 			page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
2892 
2893 		rem_len = length - bytes_copied;
2894 		rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
2895 		cur_len = min(rem_len, rem_page_len);
2896 
2897 		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2898 		if (ioa_cfg->sdt_state == ABORT_DUMP) {
2899 			rc = -EIO;
2900 		} else {
2901 			rc = ipr_get_ldump_data_section(ioa_cfg,
2902 							pci_address + bytes_copied,
2903 							&page[ioa_dump->page_offset / 4],
2904 							(cur_len / sizeof(u32)));
2905 		}
2906 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2907 
2908 		if (!rc) {
2909 			ioa_dump->page_offset += cur_len;
2910 			bytes_copied += cur_len;
2911 		} else {
2912 			ipr_trace;
2913 			break;
2914 		}
2915 		schedule();
2916 	}
2917 
2918 	return bytes_copied;
2919 }
2920 
2921 /**
2922  * ipr_init_dump_entry_hdr - Initialize a dump entry header.
2923  * @hdr:	dump entry header struct
2924  *
2925  * Return value:
2926  * 	nothing
2927  **/
2928 static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
2929 {
2930 	hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
2931 	hdr->num_elems = 1;
2932 	hdr->offset = sizeof(*hdr);
2933 	hdr->status = IPR_DUMP_STATUS_SUCCESS;
2934 }
2935 
2936 /**
2937  * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
2938  * @ioa_cfg:	ioa config struct
2939  * @driver_dump:	driver dump struct
2940  *
2941  * Return value:
2942  * 	nothing
2943  **/
2944 static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
2945 				   struct ipr_driver_dump *driver_dump)
2946 {
2947 	struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2948 
2949 	ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
2950 	driver_dump->ioa_type_entry.hdr.len =
2951 		sizeof(struct ipr_dump_ioa_type_entry) -
2952 		sizeof(struct ipr_dump_entry_header);
2953 	driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2954 	driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
2955 	driver_dump->ioa_type_entry.type = ioa_cfg->type;
2956 	driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
2957 		(ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
2958 		ucode_vpd->minor_release[1];
2959 	driver_dump->hdr.num_entries++;
2960 }
2961 
2962 /**
2963  * ipr_dump_version_data - Fill in the driver version in the dump.
2964  * @ioa_cfg:	ioa config struct
2965  * @driver_dump:	driver dump struct
2966  *
2967  * Return value:
2968  * 	nothing
2969  **/
2970 static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
2971 				  struct ipr_driver_dump *driver_dump)
2972 {
2973 	ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
2974 	driver_dump->version_entry.hdr.len =
2975 		sizeof(struct ipr_dump_version_entry) -
2976 		sizeof(struct ipr_dump_entry_header);
2977 	driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2978 	driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
2979 	strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
2980 	driver_dump->hdr.num_entries++;
2981 }
2982 
2983 /**
2984  * ipr_dump_trace_data - Fill in the IOA trace in the dump.
2985  * @ioa_cfg:	ioa config struct
2986  * @driver_dump:	driver dump struct
2987  *
2988  * Return value:
2989  * 	nothing
2990  **/
2991 static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
2992 				   struct ipr_driver_dump *driver_dump)
2993 {
2994 	ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
2995 	driver_dump->trace_entry.hdr.len =
2996 		sizeof(struct ipr_dump_trace_entry) -
2997 		sizeof(struct ipr_dump_entry_header);
2998 	driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2999 	driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
3000 	memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
3001 	driver_dump->hdr.num_entries++;
3002 }
3003 
3004 /**
3005  * ipr_dump_location_data - Fill in the IOA location in the dump.
3006  * @ioa_cfg:	ioa config struct
3007  * @driver_dump:	driver dump struct
3008  *
3009  * Return value:
3010  * 	nothing
3011  **/
3012 static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
3013 				   struct ipr_driver_dump *driver_dump)
3014 {
3015 	ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
3016 	driver_dump->location_entry.hdr.len =
3017 		sizeof(struct ipr_dump_location_entry) -
3018 		sizeof(struct ipr_dump_entry_header);
3019 	driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
3020 	driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
3021 	strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev));
3022 	driver_dump->hdr.num_entries++;
3023 }
3024 
3025 /**
3026  * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
3027  * @ioa_cfg:	ioa config struct
3028  * @dump:		dump struct
3029  *
3030  * Return value:
3031  * 	nothing
3032  **/
3033 static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
3034 {
3035 	unsigned long start_addr, sdt_word;
3036 	unsigned long lock_flags = 0;
3037 	struct ipr_driver_dump *driver_dump = &dump->driver_dump;
3038 	struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
3039 	u32 num_entries, max_num_entries, start_off, end_off;
3040 	u32 max_dump_size, bytes_to_copy, bytes_copied, rc;
3041 	struct ipr_sdt *sdt;
3042 	int valid = 1;
3043 	int i;
3044 
3045 	ENTER;
3046 
3047 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3048 
3049 	if (ioa_cfg->sdt_state != READ_DUMP) {
3050 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3051 		return;
3052 	}
3053 
3054 	if (ioa_cfg->sis64) {
3055 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3056 		ssleep(IPR_DUMP_DELAY_SECONDS);
3057 		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3058 	}
3059 
3060 	start_addr = readl(ioa_cfg->ioa_mailbox);
3061 
3062 	if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) {
3063 		dev_err(&ioa_cfg->pdev->dev,
3064 			"Invalid dump table format: %lx\n", start_addr);
3065 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3066 		return;
3067 	}
3068 
3069 	dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
3070 
3071 	driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
3072 
3073 	/* Initialize the overall dump header */
3074 	driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
3075 	driver_dump->hdr.num_entries = 1;
3076 	driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
3077 	driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
3078 	driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
3079 	driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
3080 
3081 	ipr_dump_version_data(ioa_cfg, driver_dump);
3082 	ipr_dump_location_data(ioa_cfg, driver_dump);
3083 	ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
3084 	ipr_dump_trace_data(ioa_cfg, driver_dump);
3085 
3086 	/* Update dump_header */
3087 	driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
3088 
3089 	/* IOA Dump entry */
3090 	ipr_init_dump_entry_hdr(&ioa_dump->hdr);
3091 	ioa_dump->hdr.len = 0;
3092 	ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3093 	ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
3094 
3095 	/* First entries in sdt are actually a list of dump addresses and
3096 	 lengths to gather the real dump data.  sdt represents the pointer
3097 	 to the ioa generated dump table.  Dump data will be extracted based
3098 	 on entries in this table */
3099 	sdt = &ioa_dump->sdt;
3100 
3101 	if (ioa_cfg->sis64) {
3102 		max_num_entries = IPR_FMT3_NUM_SDT_ENTRIES;
3103 		max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
3104 	} else {
3105 		max_num_entries = IPR_FMT2_NUM_SDT_ENTRIES;
3106 		max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
3107 	}
3108 
3109 	bytes_to_copy = offsetof(struct ipr_sdt, entry) +
3110 			(max_num_entries * sizeof(struct ipr_sdt_entry));
3111 	rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
3112 					bytes_to_copy / sizeof(__be32));
3113 
3114 	/* Smart Dump table is ready to use and the first entry is valid */
3115 	if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
3116 	    (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
3117 		dev_err(&ioa_cfg->pdev->dev,
3118 			"Dump of IOA failed. Dump table not valid: %d, %X.\n",
3119 			rc, be32_to_cpu(sdt->hdr.state));
3120 		driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
3121 		ioa_cfg->sdt_state = DUMP_OBTAINED;
3122 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3123 		return;
3124 	}
3125 
3126 	num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
3127 
3128 	if (num_entries > max_num_entries)
3129 		num_entries = max_num_entries;
3130 
3131 	/* Update dump length to the actual data to be copied */
3132 	dump->driver_dump.hdr.len += sizeof(struct ipr_sdt_header);
3133 	if (ioa_cfg->sis64)
3134 		dump->driver_dump.hdr.len += num_entries * sizeof(struct ipr_sdt_entry);
3135 	else
3136 		dump->driver_dump.hdr.len += max_num_entries * sizeof(struct ipr_sdt_entry);
3137 
3138 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3139 
3140 	for (i = 0; i < num_entries; i++) {
3141 		if (ioa_dump->hdr.len > max_dump_size) {
3142 			driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3143 			break;
3144 		}
3145 
3146 		if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
3147 			sdt_word = be32_to_cpu(sdt->entry[i].start_token);
3148 			if (ioa_cfg->sis64)
3149 				bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token);
3150 			else {
3151 				start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
3152 				end_off = be32_to_cpu(sdt->entry[i].end_token);
3153 
3154 				if (ipr_sdt_is_fmt2(sdt_word) && sdt_word)
3155 					bytes_to_copy = end_off - start_off;
3156 				else
3157 					valid = 0;
3158 			}
3159 			if (valid) {
3160 				if (bytes_to_copy > max_dump_size) {
3161 					sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
3162 					continue;
3163 				}
3164 
3165 				/* Copy data from adapter to driver buffers */
3166 				bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
3167 							    bytes_to_copy);
3168 
3169 				ioa_dump->hdr.len += bytes_copied;
3170 
3171 				if (bytes_copied != bytes_to_copy) {
3172 					driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3173 					break;
3174 				}
3175 			}
3176 		}
3177 	}
3178 
3179 	dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
3180 
3181 	/* Update dump_header */
3182 	driver_dump->hdr.len += ioa_dump->hdr.len;
3183 	wmb();
3184 	ioa_cfg->sdt_state = DUMP_OBTAINED;
3185 	LEAVE;
3186 }
3187 
3188 #else
3189 #define ipr_get_ioa_dump(ioa_cfg, dump) do { } while (0)
3190 #endif
3191 
3192 /**
3193  * ipr_release_dump - Free adapter dump memory
3194  * @kref:	kref struct
3195  *
3196  * Return value:
3197  *	nothing
3198  **/
3199 static void ipr_release_dump(struct kref *kref)
3200 {
3201 	struct ipr_dump *dump = container_of(kref, struct ipr_dump, kref);
3202 	struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
3203 	unsigned long lock_flags = 0;
3204 	int i;
3205 
3206 	ENTER;
3207 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3208 	ioa_cfg->dump = NULL;
3209 	ioa_cfg->sdt_state = INACTIVE;
3210 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3211 
3212 	for (i = 0; i < dump->ioa_dump.next_page_index; i++)
3213 		free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
3214 
3215 	vfree(dump->ioa_dump.ioa_data);
3216 	kfree(dump);
3217 	LEAVE;
3218 }
3219 
3220 /**
3221  * ipr_worker_thread - Worker thread
3222  * @work:		ioa config struct
3223  *
3224  * Called at task level from a work thread. This function takes care
3225  * of adding and removing device from the mid-layer as configuration
3226  * changes are detected by the adapter.
3227  *
3228  * Return value:
3229  * 	nothing
3230  **/
3231 static void ipr_worker_thread(struct work_struct *work)
3232 {
3233 	unsigned long lock_flags;
3234 	struct ipr_resource_entry *res;
3235 	struct scsi_device *sdev;
3236 	struct ipr_dump *dump;
3237 	struct ipr_ioa_cfg *ioa_cfg =
3238 		container_of(work, struct ipr_ioa_cfg, work_q);
3239 	u8 bus, target, lun;
3240 	int did_work;
3241 
3242 	ENTER;
3243 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3244 
3245 	if (ioa_cfg->sdt_state == READ_DUMP) {
3246 		dump = ioa_cfg->dump;
3247 		if (!dump) {
3248 			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3249 			return;
3250 		}
3251 		kref_get(&dump->kref);
3252 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3253 		ipr_get_ioa_dump(ioa_cfg, dump);
3254 		kref_put(&dump->kref, ipr_release_dump);
3255 
3256 		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3257 		if (ioa_cfg->sdt_state == DUMP_OBTAINED && !ioa_cfg->dump_timeout)
3258 			ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3259 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3260 		return;
3261 	}
3262 
3263 restart:
3264 	do {
3265 		did_work = 0;
3266 		if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds ||
3267 		    !ioa_cfg->allow_ml_add_del) {
3268 			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3269 			return;
3270 		}
3271 
3272 		list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3273 			if (res->del_from_ml && res->sdev) {
3274 				did_work = 1;
3275 				sdev = res->sdev;
3276 				if (!scsi_device_get(sdev)) {
3277 					if (!res->add_to_ml)
3278 						list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3279 					else
3280 						res->del_from_ml = 0;
3281 					spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3282 					scsi_remove_device(sdev);
3283 					scsi_device_put(sdev);
3284 					spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3285 				}
3286 				break;
3287 			}
3288 		}
3289 	} while (did_work);
3290 
3291 	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3292 		if (res->add_to_ml) {
3293 			bus = res->bus;
3294 			target = res->target;
3295 			lun = res->lun;
3296 			res->add_to_ml = 0;
3297 			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3298 			scsi_add_device(ioa_cfg->host, bus, target, lun);
3299 			spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3300 			goto restart;
3301 		}
3302 	}
3303 
3304 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3305 	kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
3306 	LEAVE;
3307 }
3308 
3309 #ifdef CONFIG_SCSI_IPR_TRACE
3310 /**
3311  * ipr_read_trace - Dump the adapter trace
3312  * @filp:		open sysfs file
3313  * @kobj:		kobject struct
3314  * @bin_attr:		bin_attribute struct
3315  * @buf:		buffer
3316  * @off:		offset
3317  * @count:		buffer size
3318  *
3319  * Return value:
3320  *	number of bytes printed to buffer
3321  **/
3322 static ssize_t ipr_read_trace(struct file *filp, struct kobject *kobj,
3323 			      struct bin_attribute *bin_attr,
3324 			      char *buf, loff_t off, size_t count)
3325 {
3326 	struct device *dev = container_of(kobj, struct device, kobj);
3327 	struct Scsi_Host *shost = class_to_shost(dev);
3328 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3329 	unsigned long lock_flags = 0;
3330 	ssize_t ret;
3331 
3332 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3333 	ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace,
3334 				IPR_TRACE_SIZE);
3335 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3336 
3337 	return ret;
3338 }
3339 
3340 static struct bin_attribute ipr_trace_attr = {
3341 	.attr =	{
3342 		.name = "trace",
3343 		.mode = S_IRUGO,
3344 	},
3345 	.size = 0,
3346 	.read = ipr_read_trace,
3347 };
3348 #endif
3349 
3350 /**
3351  * ipr_show_fw_version - Show the firmware version
3352  * @dev:	class device struct
3353  * @buf:	buffer
3354  *
3355  * Return value:
3356  *	number of bytes printed to buffer
3357  **/
3358 static ssize_t ipr_show_fw_version(struct device *dev,
3359 				   struct device_attribute *attr, char *buf)
3360 {
3361 	struct Scsi_Host *shost = class_to_shost(dev);
3362 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3363 	struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3364 	unsigned long lock_flags = 0;
3365 	int len;
3366 
3367 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3368 	len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
3369 		       ucode_vpd->major_release, ucode_vpd->card_type,
3370 		       ucode_vpd->minor_release[0],
3371 		       ucode_vpd->minor_release[1]);
3372 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3373 	return len;
3374 }
3375 
3376 static struct device_attribute ipr_fw_version_attr = {
3377 	.attr = {
3378 		.name =		"fw_version",
3379 		.mode =		S_IRUGO,
3380 	},
3381 	.show = ipr_show_fw_version,
3382 };
3383 
3384 /**
3385  * ipr_show_log_level - Show the adapter's error logging level
3386  * @dev:	class device struct
3387  * @buf:	buffer
3388  *
3389  * Return value:
3390  * 	number of bytes printed to buffer
3391  **/
3392 static ssize_t ipr_show_log_level(struct device *dev,
3393 				   struct device_attribute *attr, char *buf)
3394 {
3395 	struct Scsi_Host *shost = class_to_shost(dev);
3396 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3397 	unsigned long lock_flags = 0;
3398 	int len;
3399 
3400 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3401 	len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
3402 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3403 	return len;
3404 }
3405 
3406 /**
3407  * ipr_store_log_level - Change the adapter's error logging level
3408  * @dev:	class device struct
3409  * @buf:	buffer
3410  *
3411  * Return value:
3412  * 	number of bytes printed to buffer
3413  **/
3414 static ssize_t ipr_store_log_level(struct device *dev,
3415 				   struct device_attribute *attr,
3416 				   const char *buf, size_t count)
3417 {
3418 	struct Scsi_Host *shost = class_to_shost(dev);
3419 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3420 	unsigned long lock_flags = 0;
3421 
3422 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3423 	ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
3424 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3425 	return strlen(buf);
3426 }
3427 
3428 static struct device_attribute ipr_log_level_attr = {
3429 	.attr = {
3430 		.name =		"log_level",
3431 		.mode =		S_IRUGO | S_IWUSR,
3432 	},
3433 	.show = ipr_show_log_level,
3434 	.store = ipr_store_log_level
3435 };
3436 
3437 /**
3438  * ipr_store_diagnostics - IOA Diagnostics interface
3439  * @dev:	device struct
3440  * @buf:	buffer
3441  * @count:	buffer size
3442  *
3443  * This function will reset the adapter and wait a reasonable
3444  * amount of time for any errors that the adapter might log.
3445  *
3446  * Return value:
3447  * 	count on success / other on failure
3448  **/
3449 static ssize_t ipr_store_diagnostics(struct device *dev,
3450 				     struct device_attribute *attr,
3451 				     const char *buf, size_t count)
3452 {
3453 	struct Scsi_Host *shost = class_to_shost(dev);
3454 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3455 	unsigned long lock_flags = 0;
3456 	int rc = count;
3457 
3458 	if (!capable(CAP_SYS_ADMIN))
3459 		return -EACCES;
3460 
3461 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3462 	while (ioa_cfg->in_reset_reload) {
3463 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3464 		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3465 		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3466 	}
3467 
3468 	ioa_cfg->errors_logged = 0;
3469 	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3470 
3471 	if (ioa_cfg->in_reset_reload) {
3472 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3473 		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3474 
3475 		/* Wait for a second for any errors to be logged */
3476 		msleep(1000);
3477 	} else {
3478 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3479 		return -EIO;
3480 	}
3481 
3482 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3483 	if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
3484 		rc = -EIO;
3485 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3486 
3487 	return rc;
3488 }
3489 
3490 static struct device_attribute ipr_diagnostics_attr = {
3491 	.attr = {
3492 		.name =		"run_diagnostics",
3493 		.mode =		S_IWUSR,
3494 	},
3495 	.store = ipr_store_diagnostics
3496 };
3497 
3498 /**
3499  * ipr_show_adapter_state - Show the adapter's state
3500  * @class_dev:	device struct
3501  * @buf:	buffer
3502  *
3503  * Return value:
3504  * 	number of bytes printed to buffer
3505  **/
3506 static ssize_t ipr_show_adapter_state(struct device *dev,
3507 				      struct device_attribute *attr, char *buf)
3508 {
3509 	struct Scsi_Host *shost = class_to_shost(dev);
3510 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3511 	unsigned long lock_flags = 0;
3512 	int len;
3513 
3514 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3515 	if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
3516 		len = snprintf(buf, PAGE_SIZE, "offline\n");
3517 	else
3518 		len = snprintf(buf, PAGE_SIZE, "online\n");
3519 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3520 	return len;
3521 }
3522 
3523 /**
3524  * ipr_store_adapter_state - Change adapter state
3525  * @dev:	device struct
3526  * @buf:	buffer
3527  * @count:	buffer size
3528  *
3529  * This function will change the adapter's state.
3530  *
3531  * Return value:
3532  * 	count on success / other on failure
3533  **/
3534 static ssize_t ipr_store_adapter_state(struct device *dev,
3535 				       struct device_attribute *attr,
3536 				       const char *buf, size_t count)
3537 {
3538 	struct Scsi_Host *shost = class_to_shost(dev);
3539 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3540 	unsigned long lock_flags;
3541 	int result = count, i;
3542 
3543 	if (!capable(CAP_SYS_ADMIN))
3544 		return -EACCES;
3545 
3546 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3547 	if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead &&
3548 	    !strncmp(buf, "online", 6)) {
3549 		for (i = 0; i < ioa_cfg->hrrq_num; i++) {
3550 			spin_lock(&ioa_cfg->hrrq[i]._lock);
3551 			ioa_cfg->hrrq[i].ioa_is_dead = 0;
3552 			spin_unlock(&ioa_cfg->hrrq[i]._lock);
3553 		}
3554 		wmb();
3555 		ioa_cfg->reset_retries = 0;
3556 		ioa_cfg->in_ioa_bringdown = 0;
3557 		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3558 	}
3559 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3560 	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3561 
3562 	return result;
3563 }
3564 
3565 static struct device_attribute ipr_ioa_state_attr = {
3566 	.attr = {
3567 		.name =		"online_state",
3568 		.mode =		S_IRUGO | S_IWUSR,
3569 	},
3570 	.show = ipr_show_adapter_state,
3571 	.store = ipr_store_adapter_state
3572 };
3573 
3574 /**
3575  * ipr_store_reset_adapter - Reset the adapter
3576  * @dev:	device struct
3577  * @buf:	buffer
3578  * @count:	buffer size
3579  *
3580  * This function will reset the adapter.
3581  *
3582  * Return value:
3583  * 	count on success / other on failure
3584  **/
3585 static ssize_t ipr_store_reset_adapter(struct device *dev,
3586 				       struct device_attribute *attr,
3587 				       const char *buf, size_t count)
3588 {
3589 	struct Scsi_Host *shost = class_to_shost(dev);
3590 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3591 	unsigned long lock_flags;
3592 	int result = count;
3593 
3594 	if (!capable(CAP_SYS_ADMIN))
3595 		return -EACCES;
3596 
3597 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3598 	if (!ioa_cfg->in_reset_reload)
3599 		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3600 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3601 	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3602 
3603 	return result;
3604 }
3605 
3606 static struct device_attribute ipr_ioa_reset_attr = {
3607 	.attr = {
3608 		.name =		"reset_host",
3609 		.mode =		S_IWUSR,
3610 	},
3611 	.store = ipr_store_reset_adapter
3612 };
3613 
3614 static int ipr_iopoll(struct blk_iopoll *iop, int budget);
3615  /**
3616  * ipr_show_iopoll_weight - Show ipr polling mode
3617  * @dev:	class device struct
3618  * @buf:	buffer
3619  *
3620  * Return value:
3621  *	number of bytes printed to buffer
3622  **/
3623 static ssize_t ipr_show_iopoll_weight(struct device *dev,
3624 				   struct device_attribute *attr, char *buf)
3625 {
3626 	struct Scsi_Host *shost = class_to_shost(dev);
3627 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3628 	unsigned long lock_flags = 0;
3629 	int len;
3630 
3631 	spin_lock_irqsave(shost->host_lock, lock_flags);
3632 	len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->iopoll_weight);
3633 	spin_unlock_irqrestore(shost->host_lock, lock_flags);
3634 
3635 	return len;
3636 }
3637 
3638 /**
3639  * ipr_store_iopoll_weight - Change the adapter's polling mode
3640  * @dev:	class device struct
3641  * @buf:	buffer
3642  *
3643  * Return value:
3644  *	number of bytes printed to buffer
3645  **/
3646 static ssize_t ipr_store_iopoll_weight(struct device *dev,
3647 					struct device_attribute *attr,
3648 					const char *buf, size_t count)
3649 {
3650 	struct Scsi_Host *shost = class_to_shost(dev);
3651 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3652 	unsigned long user_iopoll_weight;
3653 	unsigned long lock_flags = 0;
3654 	int i;
3655 
3656 	if (!ioa_cfg->sis64) {
3657 		dev_info(&ioa_cfg->pdev->dev, "blk-iopoll not supported on this adapter\n");
3658 		return -EINVAL;
3659 	}
3660 	if (kstrtoul(buf, 10, &user_iopoll_weight))
3661 		return -EINVAL;
3662 
3663 	if (user_iopoll_weight > 256) {
3664 		dev_info(&ioa_cfg->pdev->dev, "Invalid blk-iopoll weight. It must be less than 256\n");
3665 		return -EINVAL;
3666 	}
3667 
3668 	if (user_iopoll_weight == ioa_cfg->iopoll_weight) {
3669 		dev_info(&ioa_cfg->pdev->dev, "Current blk-iopoll weight has the same weight\n");
3670 		return strlen(buf);
3671 	}
3672 
3673 	if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3674 		for (i = 1; i < ioa_cfg->hrrq_num; i++)
3675 			blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll);
3676 	}
3677 
3678 	spin_lock_irqsave(shost->host_lock, lock_flags);
3679 	ioa_cfg->iopoll_weight = user_iopoll_weight;
3680 	if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3681 		for (i = 1; i < ioa_cfg->hrrq_num; i++) {
3682 			blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll,
3683 					ioa_cfg->iopoll_weight, ipr_iopoll);
3684 			blk_iopoll_enable(&ioa_cfg->hrrq[i].iopoll);
3685 		}
3686 	}
3687 	spin_unlock_irqrestore(shost->host_lock, lock_flags);
3688 
3689 	return strlen(buf);
3690 }
3691 
3692 static struct device_attribute ipr_iopoll_weight_attr = {
3693 	.attr = {
3694 		.name =		"iopoll_weight",
3695 		.mode =		S_IRUGO | S_IWUSR,
3696 	},
3697 	.show = ipr_show_iopoll_weight,
3698 	.store = ipr_store_iopoll_weight
3699 };
3700 
3701 /**
3702  * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
3703  * @buf_len:		buffer length
3704  *
3705  * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
3706  * list to use for microcode download
3707  *
3708  * Return value:
3709  * 	pointer to sglist / NULL on failure
3710  **/
3711 static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
3712 {
3713 	int sg_size, order, bsize_elem, num_elem, i, j;
3714 	struct ipr_sglist *sglist;
3715 	struct scatterlist *scatterlist;
3716 	struct page *page;
3717 
3718 	/* Get the minimum size per scatter/gather element */
3719 	sg_size = buf_len / (IPR_MAX_SGLIST - 1);
3720 
3721 	/* Get the actual size per element */
3722 	order = get_order(sg_size);
3723 
3724 	/* Determine the actual number of bytes per element */
3725 	bsize_elem = PAGE_SIZE * (1 << order);
3726 
3727 	/* Determine the actual number of sg entries needed */
3728 	if (buf_len % bsize_elem)
3729 		num_elem = (buf_len / bsize_elem) + 1;
3730 	else
3731 		num_elem = buf_len / bsize_elem;
3732 
3733 	/* Allocate a scatter/gather list for the DMA */
3734 	sglist = kzalloc(sizeof(struct ipr_sglist) +
3735 			 (sizeof(struct scatterlist) * (num_elem - 1)),
3736 			 GFP_KERNEL);
3737 
3738 	if (sglist == NULL) {
3739 		ipr_trace;
3740 		return NULL;
3741 	}
3742 
3743 	scatterlist = sglist->scatterlist;
3744 	sg_init_table(scatterlist, num_elem);
3745 
3746 	sglist->order = order;
3747 	sglist->num_sg = num_elem;
3748 
3749 	/* Allocate a bunch of sg elements */
3750 	for (i = 0; i < num_elem; i++) {
3751 		page = alloc_pages(GFP_KERNEL, order);
3752 		if (!page) {
3753 			ipr_trace;
3754 
3755 			/* Free up what we already allocated */
3756 			for (j = i - 1; j >= 0; j--)
3757 				__free_pages(sg_page(&scatterlist[j]), order);
3758 			kfree(sglist);
3759 			return NULL;
3760 		}
3761 
3762 		sg_set_page(&scatterlist[i], page, 0, 0);
3763 	}
3764 
3765 	return sglist;
3766 }
3767 
3768 /**
3769  * ipr_free_ucode_buffer - Frees a microcode download buffer
3770  * @p_dnld:		scatter/gather list pointer
3771  *
3772  * Free a DMA'able ucode download buffer previously allocated with
3773  * ipr_alloc_ucode_buffer
3774  *
3775  * Return value:
3776  * 	nothing
3777  **/
3778 static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
3779 {
3780 	int i;
3781 
3782 	for (i = 0; i < sglist->num_sg; i++)
3783 		__free_pages(sg_page(&sglist->scatterlist[i]), sglist->order);
3784 
3785 	kfree(sglist);
3786 }
3787 
3788 /**
3789  * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
3790  * @sglist:		scatter/gather list pointer
3791  * @buffer:		buffer pointer
3792  * @len:		buffer length
3793  *
3794  * Copy a microcode image from a user buffer into a buffer allocated by
3795  * ipr_alloc_ucode_buffer
3796  *
3797  * Return value:
3798  * 	0 on success / other on failure
3799  **/
3800 static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
3801 				 u8 *buffer, u32 len)
3802 {
3803 	int bsize_elem, i, result = 0;
3804 	struct scatterlist *scatterlist;
3805 	void *kaddr;
3806 
3807 	/* Determine the actual number of bytes per element */
3808 	bsize_elem = PAGE_SIZE * (1 << sglist->order);
3809 
3810 	scatterlist = sglist->scatterlist;
3811 
3812 	for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
3813 		struct page *page = sg_page(&scatterlist[i]);
3814 
3815 		kaddr = kmap(page);
3816 		memcpy(kaddr, buffer, bsize_elem);
3817 		kunmap(page);
3818 
3819 		scatterlist[i].length = bsize_elem;
3820 
3821 		if (result != 0) {
3822 			ipr_trace;
3823 			return result;
3824 		}
3825 	}
3826 
3827 	if (len % bsize_elem) {
3828 		struct page *page = sg_page(&scatterlist[i]);
3829 
3830 		kaddr = kmap(page);
3831 		memcpy(kaddr, buffer, len % bsize_elem);
3832 		kunmap(page);
3833 
3834 		scatterlist[i].length = len % bsize_elem;
3835 	}
3836 
3837 	sglist->buffer_len = len;
3838 	return result;
3839 }
3840 
3841 /**
3842  * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
3843  * @ipr_cmd:		ipr command struct
3844  * @sglist:		scatter/gather list
3845  *
3846  * Builds a microcode download IOA data list (IOADL).
3847  *
3848  **/
3849 static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd,
3850 				    struct ipr_sglist *sglist)
3851 {
3852 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3853 	struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
3854 	struct scatterlist *scatterlist = sglist->scatterlist;
3855 	int i;
3856 
3857 	ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3858 	ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3859 	ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3860 
3861 	ioarcb->ioadl_len =
3862 		cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
3863 	for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3864 		ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE);
3865 		ioadl64[i].data_len = cpu_to_be32(sg_dma_len(&scatterlist[i]));
3866 		ioadl64[i].address = cpu_to_be64(sg_dma_address(&scatterlist[i]));
3867 	}
3868 
3869 	ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3870 }
3871 
3872 /**
3873  * ipr_build_ucode_ioadl - Build a microcode download IOADL
3874  * @ipr_cmd:	ipr command struct
3875  * @sglist:		scatter/gather list
3876  *
3877  * Builds a microcode download IOA data list (IOADL).
3878  *
3879  **/
3880 static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
3881 				  struct ipr_sglist *sglist)
3882 {
3883 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3884 	struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
3885 	struct scatterlist *scatterlist = sglist->scatterlist;
3886 	int i;
3887 
3888 	ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3889 	ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3890 	ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3891 
3892 	ioarcb->ioadl_len =
3893 		cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3894 
3895 	for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3896 		ioadl[i].flags_and_data_len =
3897 			cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
3898 		ioadl[i].address =
3899 			cpu_to_be32(sg_dma_address(&scatterlist[i]));
3900 	}
3901 
3902 	ioadl[i-1].flags_and_data_len |=
3903 		cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3904 }
3905 
3906 /**
3907  * ipr_update_ioa_ucode - Update IOA's microcode
3908  * @ioa_cfg:	ioa config struct
3909  * @sglist:		scatter/gather list
3910  *
3911  * Initiate an adapter reset to update the IOA's microcode
3912  *
3913  * Return value:
3914  * 	0 on success / -EIO on failure
3915  **/
3916 static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
3917 				struct ipr_sglist *sglist)
3918 {
3919 	unsigned long lock_flags;
3920 
3921 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3922 	while (ioa_cfg->in_reset_reload) {
3923 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3924 		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3925 		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3926 	}
3927 
3928 	if (ioa_cfg->ucode_sglist) {
3929 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3930 		dev_err(&ioa_cfg->pdev->dev,
3931 			"Microcode download already in progress\n");
3932 		return -EIO;
3933 	}
3934 
3935 	sglist->num_dma_sg = pci_map_sg(ioa_cfg->pdev, sglist->scatterlist,
3936 					sglist->num_sg, DMA_TO_DEVICE);
3937 
3938 	if (!sglist->num_dma_sg) {
3939 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3940 		dev_err(&ioa_cfg->pdev->dev,
3941 			"Failed to map microcode download buffer!\n");
3942 		return -EIO;
3943 	}
3944 
3945 	ioa_cfg->ucode_sglist = sglist;
3946 	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3947 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3948 	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3949 
3950 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3951 	ioa_cfg->ucode_sglist = NULL;
3952 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3953 	return 0;
3954 }
3955 
3956 /**
3957  * ipr_store_update_fw - Update the firmware on the adapter
3958  * @class_dev:	device struct
3959  * @buf:	buffer
3960  * @count:	buffer size
3961  *
3962  * This function will update the firmware on the adapter.
3963  *
3964  * Return value:
3965  * 	count on success / other on failure
3966  **/
3967 static ssize_t ipr_store_update_fw(struct device *dev,
3968 				   struct device_attribute *attr,
3969 				   const char *buf, size_t count)
3970 {
3971 	struct Scsi_Host *shost = class_to_shost(dev);
3972 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3973 	struct ipr_ucode_image_header *image_hdr;
3974 	const struct firmware *fw_entry;
3975 	struct ipr_sglist *sglist;
3976 	char fname[100];
3977 	char *src;
3978 	int len, result, dnld_size;
3979 
3980 	if (!capable(CAP_SYS_ADMIN))
3981 		return -EACCES;
3982 
3983 	len = snprintf(fname, 99, "%s", buf);
3984 	fname[len-1] = '\0';
3985 
3986 	if (request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
3987 		dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
3988 		return -EIO;
3989 	}
3990 
3991 	image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
3992 
3993 	src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
3994 	dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
3995 	sglist = ipr_alloc_ucode_buffer(dnld_size);
3996 
3997 	if (!sglist) {
3998 		dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
3999 		release_firmware(fw_entry);
4000 		return -ENOMEM;
4001 	}
4002 
4003 	result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
4004 
4005 	if (result) {
4006 		dev_err(&ioa_cfg->pdev->dev,
4007 			"Microcode buffer copy to DMA buffer failed\n");
4008 		goto out;
4009 	}
4010 
4011 	ipr_info("Updating microcode, please be patient.  This may take up to 30 minutes.\n");
4012 
4013 	result = ipr_update_ioa_ucode(ioa_cfg, sglist);
4014 
4015 	if (!result)
4016 		result = count;
4017 out:
4018 	ipr_free_ucode_buffer(sglist);
4019 	release_firmware(fw_entry);
4020 	return result;
4021 }
4022 
4023 static struct device_attribute ipr_update_fw_attr = {
4024 	.attr = {
4025 		.name =		"update_fw",
4026 		.mode =		S_IWUSR,
4027 	},
4028 	.store = ipr_store_update_fw
4029 };
4030 
4031 /**
4032  * ipr_show_fw_type - Show the adapter's firmware type.
4033  * @dev:	class device struct
4034  * @buf:	buffer
4035  *
4036  * Return value:
4037  *	number of bytes printed to buffer
4038  **/
4039 static ssize_t ipr_show_fw_type(struct device *dev,
4040 				struct device_attribute *attr, char *buf)
4041 {
4042 	struct Scsi_Host *shost = class_to_shost(dev);
4043 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4044 	unsigned long lock_flags = 0;
4045 	int len;
4046 
4047 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4048 	len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->sis64);
4049 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4050 	return len;
4051 }
4052 
4053 static struct device_attribute ipr_ioa_fw_type_attr = {
4054 	.attr = {
4055 		.name =		"fw_type",
4056 		.mode =		S_IRUGO,
4057 	},
4058 	.show = ipr_show_fw_type
4059 };
4060 
4061 static struct device_attribute *ipr_ioa_attrs[] = {
4062 	&ipr_fw_version_attr,
4063 	&ipr_log_level_attr,
4064 	&ipr_diagnostics_attr,
4065 	&ipr_ioa_state_attr,
4066 	&ipr_ioa_reset_attr,
4067 	&ipr_update_fw_attr,
4068 	&ipr_ioa_fw_type_attr,
4069 	&ipr_iopoll_weight_attr,
4070 	NULL,
4071 };
4072 
4073 #ifdef CONFIG_SCSI_IPR_DUMP
4074 /**
4075  * ipr_read_dump - Dump the adapter
4076  * @filp:		open sysfs file
4077  * @kobj:		kobject struct
4078  * @bin_attr:		bin_attribute struct
4079  * @buf:		buffer
4080  * @off:		offset
4081  * @count:		buffer size
4082  *
4083  * Return value:
4084  *	number of bytes printed to buffer
4085  **/
4086 static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj,
4087 			     struct bin_attribute *bin_attr,
4088 			     char *buf, loff_t off, size_t count)
4089 {
4090 	struct device *cdev = container_of(kobj, struct device, kobj);
4091 	struct Scsi_Host *shost = class_to_shost(cdev);
4092 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4093 	struct ipr_dump *dump;
4094 	unsigned long lock_flags = 0;
4095 	char *src;
4096 	int len, sdt_end;
4097 	size_t rc = count;
4098 
4099 	if (!capable(CAP_SYS_ADMIN))
4100 		return -EACCES;
4101 
4102 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4103 	dump = ioa_cfg->dump;
4104 
4105 	if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
4106 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4107 		return 0;
4108 	}
4109 	kref_get(&dump->kref);
4110 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4111 
4112 	if (off > dump->driver_dump.hdr.len) {
4113 		kref_put(&dump->kref, ipr_release_dump);
4114 		return 0;
4115 	}
4116 
4117 	if (off + count > dump->driver_dump.hdr.len) {
4118 		count = dump->driver_dump.hdr.len - off;
4119 		rc = count;
4120 	}
4121 
4122 	if (count && off < sizeof(dump->driver_dump)) {
4123 		if (off + count > sizeof(dump->driver_dump))
4124 			len = sizeof(dump->driver_dump) - off;
4125 		else
4126 			len = count;
4127 		src = (u8 *)&dump->driver_dump + off;
4128 		memcpy(buf, src, len);
4129 		buf += len;
4130 		off += len;
4131 		count -= len;
4132 	}
4133 
4134 	off -= sizeof(dump->driver_dump);
4135 
4136 	if (ioa_cfg->sis64)
4137 		sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4138 			  (be32_to_cpu(dump->ioa_dump.sdt.hdr.num_entries_used) *
4139 			   sizeof(struct ipr_sdt_entry));
4140 	else
4141 		sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4142 			  (IPR_FMT2_NUM_SDT_ENTRIES * sizeof(struct ipr_sdt_entry));
4143 
4144 	if (count && off < sdt_end) {
4145 		if (off + count > sdt_end)
4146 			len = sdt_end - off;
4147 		else
4148 			len = count;
4149 		src = (u8 *)&dump->ioa_dump + off;
4150 		memcpy(buf, src, len);
4151 		buf += len;
4152 		off += len;
4153 		count -= len;
4154 	}
4155 
4156 	off -= sdt_end;
4157 
4158 	while (count) {
4159 		if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
4160 			len = PAGE_ALIGN(off) - off;
4161 		else
4162 			len = count;
4163 		src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
4164 		src += off & ~PAGE_MASK;
4165 		memcpy(buf, src, len);
4166 		buf += len;
4167 		off += len;
4168 		count -= len;
4169 	}
4170 
4171 	kref_put(&dump->kref, ipr_release_dump);
4172 	return rc;
4173 }
4174 
4175 /**
4176  * ipr_alloc_dump - Prepare for adapter dump
4177  * @ioa_cfg:	ioa config struct
4178  *
4179  * Return value:
4180  *	0 on success / other on failure
4181  **/
4182 static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
4183 {
4184 	struct ipr_dump *dump;
4185 	__be32 **ioa_data;
4186 	unsigned long lock_flags = 0;
4187 
4188 	dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
4189 
4190 	if (!dump) {
4191 		ipr_err("Dump memory allocation failed\n");
4192 		return -ENOMEM;
4193 	}
4194 
4195 	if (ioa_cfg->sis64)
4196 		ioa_data = vmalloc(IPR_FMT3_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
4197 	else
4198 		ioa_data = vmalloc(IPR_FMT2_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
4199 
4200 	if (!ioa_data) {
4201 		ipr_err("Dump memory allocation failed\n");
4202 		kfree(dump);
4203 		return -ENOMEM;
4204 	}
4205 
4206 	dump->ioa_dump.ioa_data = ioa_data;
4207 
4208 	kref_init(&dump->kref);
4209 	dump->ioa_cfg = ioa_cfg;
4210 
4211 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4212 
4213 	if (INACTIVE != ioa_cfg->sdt_state) {
4214 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4215 		vfree(dump->ioa_dump.ioa_data);
4216 		kfree(dump);
4217 		return 0;
4218 	}
4219 
4220 	ioa_cfg->dump = dump;
4221 	ioa_cfg->sdt_state = WAIT_FOR_DUMP;
4222 	if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && !ioa_cfg->dump_taken) {
4223 		ioa_cfg->dump_taken = 1;
4224 		schedule_work(&ioa_cfg->work_q);
4225 	}
4226 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4227 
4228 	return 0;
4229 }
4230 
4231 /**
4232  * ipr_free_dump - Free adapter dump memory
4233  * @ioa_cfg:	ioa config struct
4234  *
4235  * Return value:
4236  *	0 on success / other on failure
4237  **/
4238 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
4239 {
4240 	struct ipr_dump *dump;
4241 	unsigned long lock_flags = 0;
4242 
4243 	ENTER;
4244 
4245 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4246 	dump = ioa_cfg->dump;
4247 	if (!dump) {
4248 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4249 		return 0;
4250 	}
4251 
4252 	ioa_cfg->dump = NULL;
4253 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4254 
4255 	kref_put(&dump->kref, ipr_release_dump);
4256 
4257 	LEAVE;
4258 	return 0;
4259 }
4260 
4261 /**
4262  * ipr_write_dump - Setup dump state of adapter
4263  * @filp:		open sysfs file
4264  * @kobj:		kobject struct
4265  * @bin_attr:		bin_attribute struct
4266  * @buf:		buffer
4267  * @off:		offset
4268  * @count:		buffer size
4269  *
4270  * Return value:
4271  *	number of bytes printed to buffer
4272  **/
4273 static ssize_t ipr_write_dump(struct file *filp, struct kobject *kobj,
4274 			      struct bin_attribute *bin_attr,
4275 			      char *buf, loff_t off, size_t count)
4276 {
4277 	struct device *cdev = container_of(kobj, struct device, kobj);
4278 	struct Scsi_Host *shost = class_to_shost(cdev);
4279 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4280 	int rc;
4281 
4282 	if (!capable(CAP_SYS_ADMIN))
4283 		return -EACCES;
4284 
4285 	if (buf[0] == '1')
4286 		rc = ipr_alloc_dump(ioa_cfg);
4287 	else if (buf[0] == '0')
4288 		rc = ipr_free_dump(ioa_cfg);
4289 	else
4290 		return -EINVAL;
4291 
4292 	if (rc)
4293 		return rc;
4294 	else
4295 		return count;
4296 }
4297 
4298 static struct bin_attribute ipr_dump_attr = {
4299 	.attr =	{
4300 		.name = "dump",
4301 		.mode = S_IRUSR | S_IWUSR,
4302 	},
4303 	.size = 0,
4304 	.read = ipr_read_dump,
4305 	.write = ipr_write_dump
4306 };
4307 #else
4308 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
4309 #endif
4310 
4311 /**
4312  * ipr_change_queue_depth - Change the device's queue depth
4313  * @sdev:	scsi device struct
4314  * @qdepth:	depth to set
4315  * @reason:	calling context
4316  *
4317  * Return value:
4318  * 	actual depth set
4319  **/
4320 static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth,
4321 				  int reason)
4322 {
4323 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4324 	struct ipr_resource_entry *res;
4325 	unsigned long lock_flags = 0;
4326 
4327 	if (reason != SCSI_QDEPTH_DEFAULT)
4328 		return -EOPNOTSUPP;
4329 
4330 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4331 	res = (struct ipr_resource_entry *)sdev->hostdata;
4332 
4333 	if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
4334 		qdepth = IPR_MAX_CMD_PER_ATA_LUN;
4335 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4336 
4337 	scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
4338 	return sdev->queue_depth;
4339 }
4340 
4341 /**
4342  * ipr_change_queue_type - Change the device's queue type
4343  * @dsev:		scsi device struct
4344  * @tag_type:	type of tags to use
4345  *
4346  * Return value:
4347  * 	actual queue type set
4348  **/
4349 static int ipr_change_queue_type(struct scsi_device *sdev, int tag_type)
4350 {
4351 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4352 	struct ipr_resource_entry *res;
4353 	unsigned long lock_flags = 0;
4354 
4355 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4356 	res = (struct ipr_resource_entry *)sdev->hostdata;
4357 
4358 	if (res) {
4359 		if (ipr_is_gscsi(res) && sdev->tagged_supported) {
4360 			/*
4361 			 * We don't bother quiescing the device here since the
4362 			 * adapter firmware does it for us.
4363 			 */
4364 			scsi_set_tag_type(sdev, tag_type);
4365 
4366 			if (tag_type)
4367 				scsi_activate_tcq(sdev, sdev->queue_depth);
4368 			else
4369 				scsi_deactivate_tcq(sdev, sdev->queue_depth);
4370 		} else
4371 			tag_type = 0;
4372 	} else
4373 		tag_type = 0;
4374 
4375 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4376 	return tag_type;
4377 }
4378 
4379 /**
4380  * ipr_show_adapter_handle - Show the adapter's resource handle for this device
4381  * @dev:	device struct
4382  * @attr:	device attribute structure
4383  * @buf:	buffer
4384  *
4385  * Return value:
4386  * 	number of bytes printed to buffer
4387  **/
4388 static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
4389 {
4390 	struct scsi_device *sdev = to_scsi_device(dev);
4391 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4392 	struct ipr_resource_entry *res;
4393 	unsigned long lock_flags = 0;
4394 	ssize_t len = -ENXIO;
4395 
4396 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4397 	res = (struct ipr_resource_entry *)sdev->hostdata;
4398 	if (res)
4399 		len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle);
4400 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4401 	return len;
4402 }
4403 
4404 static struct device_attribute ipr_adapter_handle_attr = {
4405 	.attr = {
4406 		.name = 	"adapter_handle",
4407 		.mode =		S_IRUSR,
4408 	},
4409 	.show = ipr_show_adapter_handle
4410 };
4411 
4412 /**
4413  * ipr_show_resource_path - Show the resource path or the resource address for
4414  *			    this device.
4415  * @dev:	device struct
4416  * @attr:	device attribute structure
4417  * @buf:	buffer
4418  *
4419  * Return value:
4420  * 	number of bytes printed to buffer
4421  **/
4422 static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribute *attr, char *buf)
4423 {
4424 	struct scsi_device *sdev = to_scsi_device(dev);
4425 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4426 	struct ipr_resource_entry *res;
4427 	unsigned long lock_flags = 0;
4428 	ssize_t len = -ENXIO;
4429 	char buffer[IPR_MAX_RES_PATH_LENGTH];
4430 
4431 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4432 	res = (struct ipr_resource_entry *)sdev->hostdata;
4433 	if (res && ioa_cfg->sis64)
4434 		len = snprintf(buf, PAGE_SIZE, "%s\n",
4435 			       __ipr_format_res_path(res->res_path, buffer,
4436 						     sizeof(buffer)));
4437 	else if (res)
4438 		len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no,
4439 			       res->bus, res->target, res->lun);
4440 
4441 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4442 	return len;
4443 }
4444 
4445 static struct device_attribute ipr_resource_path_attr = {
4446 	.attr = {
4447 		.name = 	"resource_path",
4448 		.mode =		S_IRUGO,
4449 	},
4450 	.show = ipr_show_resource_path
4451 };
4452 
4453 /**
4454  * ipr_show_device_id - Show the device_id for this device.
4455  * @dev:	device struct
4456  * @attr:	device attribute structure
4457  * @buf:	buffer
4458  *
4459  * Return value:
4460  *	number of bytes printed to buffer
4461  **/
4462 static ssize_t ipr_show_device_id(struct device *dev, struct device_attribute *attr, char *buf)
4463 {
4464 	struct scsi_device *sdev = to_scsi_device(dev);
4465 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4466 	struct ipr_resource_entry *res;
4467 	unsigned long lock_flags = 0;
4468 	ssize_t len = -ENXIO;
4469 
4470 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4471 	res = (struct ipr_resource_entry *)sdev->hostdata;
4472 	if (res && ioa_cfg->sis64)
4473 		len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->dev_id);
4474 	else if (res)
4475 		len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->lun_wwn);
4476 
4477 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4478 	return len;
4479 }
4480 
4481 static struct device_attribute ipr_device_id_attr = {
4482 	.attr = {
4483 		.name =		"device_id",
4484 		.mode =		S_IRUGO,
4485 	},
4486 	.show = ipr_show_device_id
4487 };
4488 
4489 /**
4490  * ipr_show_resource_type - Show the resource type for this device.
4491  * @dev:	device struct
4492  * @attr:	device attribute structure
4493  * @buf:	buffer
4494  *
4495  * Return value:
4496  *	number of bytes printed to buffer
4497  **/
4498 static ssize_t ipr_show_resource_type(struct device *dev, struct device_attribute *attr, char *buf)
4499 {
4500 	struct scsi_device *sdev = to_scsi_device(dev);
4501 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4502 	struct ipr_resource_entry *res;
4503 	unsigned long lock_flags = 0;
4504 	ssize_t len = -ENXIO;
4505 
4506 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4507 	res = (struct ipr_resource_entry *)sdev->hostdata;
4508 
4509 	if (res)
4510 		len = snprintf(buf, PAGE_SIZE, "%x\n", res->type);
4511 
4512 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4513 	return len;
4514 }
4515 
4516 static struct device_attribute ipr_resource_type_attr = {
4517 	.attr = {
4518 		.name =		"resource_type",
4519 		.mode =		S_IRUGO,
4520 	},
4521 	.show = ipr_show_resource_type
4522 };
4523 
4524 static struct device_attribute *ipr_dev_attrs[] = {
4525 	&ipr_adapter_handle_attr,
4526 	&ipr_resource_path_attr,
4527 	&ipr_device_id_attr,
4528 	&ipr_resource_type_attr,
4529 	NULL,
4530 };
4531 
4532 /**
4533  * ipr_biosparam - Return the HSC mapping
4534  * @sdev:			scsi device struct
4535  * @block_device:	block device pointer
4536  * @capacity:		capacity of the device
4537  * @parm:			Array containing returned HSC values.
4538  *
4539  * This function generates the HSC parms that fdisk uses.
4540  * We want to make sure we return something that places partitions
4541  * on 4k boundaries for best performance with the IOA.
4542  *
4543  * Return value:
4544  * 	0 on success
4545  **/
4546 static int ipr_biosparam(struct scsi_device *sdev,
4547 			 struct block_device *block_device,
4548 			 sector_t capacity, int *parm)
4549 {
4550 	int heads, sectors;
4551 	sector_t cylinders;
4552 
4553 	heads = 128;
4554 	sectors = 32;
4555 
4556 	cylinders = capacity;
4557 	sector_div(cylinders, (128 * 32));
4558 
4559 	/* return result */
4560 	parm[0] = heads;
4561 	parm[1] = sectors;
4562 	parm[2] = cylinders;
4563 
4564 	return 0;
4565 }
4566 
4567 /**
4568  * ipr_find_starget - Find target based on bus/target.
4569  * @starget:	scsi target struct
4570  *
4571  * Return value:
4572  * 	resource entry pointer if found / NULL if not found
4573  **/
4574 static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
4575 {
4576 	struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4577 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4578 	struct ipr_resource_entry *res;
4579 
4580 	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4581 		if ((res->bus == starget->channel) &&
4582 		    (res->target == starget->id)) {
4583 			return res;
4584 		}
4585 	}
4586 
4587 	return NULL;
4588 }
4589 
4590 static struct ata_port_info sata_port_info;
4591 
4592 /**
4593  * ipr_target_alloc - Prepare for commands to a SCSI target
4594  * @starget:	scsi target struct
4595  *
4596  * If the device is a SATA device, this function allocates an
4597  * ATA port with libata, else it does nothing.
4598  *
4599  * Return value:
4600  * 	0 on success / non-0 on failure
4601  **/
4602 static int ipr_target_alloc(struct scsi_target *starget)
4603 {
4604 	struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4605 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4606 	struct ipr_sata_port *sata_port;
4607 	struct ata_port *ap;
4608 	struct ipr_resource_entry *res;
4609 	unsigned long lock_flags;
4610 
4611 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4612 	res = ipr_find_starget(starget);
4613 	starget->hostdata = NULL;
4614 
4615 	if (res && ipr_is_gata(res)) {
4616 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4617 		sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
4618 		if (!sata_port)
4619 			return -ENOMEM;
4620 
4621 		ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
4622 		if (ap) {
4623 			spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4624 			sata_port->ioa_cfg = ioa_cfg;
4625 			sata_port->ap = ap;
4626 			sata_port->res = res;
4627 
4628 			res->sata_port = sata_port;
4629 			ap->private_data = sata_port;
4630 			starget->hostdata = sata_port;
4631 		} else {
4632 			kfree(sata_port);
4633 			return -ENOMEM;
4634 		}
4635 	}
4636 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4637 
4638 	return 0;
4639 }
4640 
4641 /**
4642  * ipr_target_destroy - Destroy a SCSI target
4643  * @starget:	scsi target struct
4644  *
4645  * If the device was a SATA device, this function frees the libata
4646  * ATA port, else it does nothing.
4647  *
4648  **/
4649 static void ipr_target_destroy(struct scsi_target *starget)
4650 {
4651 	struct ipr_sata_port *sata_port = starget->hostdata;
4652 	struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4653 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4654 
4655 	if (ioa_cfg->sis64) {
4656 		if (!ipr_find_starget(starget)) {
4657 			if (starget->channel == IPR_ARRAY_VIRTUAL_BUS)
4658 				clear_bit(starget->id, ioa_cfg->array_ids);
4659 			else if (starget->channel == IPR_VSET_VIRTUAL_BUS)
4660 				clear_bit(starget->id, ioa_cfg->vset_ids);
4661 			else if (starget->channel == 0)
4662 				clear_bit(starget->id, ioa_cfg->target_ids);
4663 		}
4664 	}
4665 
4666 	if (sata_port) {
4667 		starget->hostdata = NULL;
4668 		ata_sas_port_destroy(sata_port->ap);
4669 		kfree(sata_port);
4670 	}
4671 }
4672 
4673 /**
4674  * ipr_find_sdev - Find device based on bus/target/lun.
4675  * @sdev:	scsi device struct
4676  *
4677  * Return value:
4678  * 	resource entry pointer if found / NULL if not found
4679  **/
4680 static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
4681 {
4682 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4683 	struct ipr_resource_entry *res;
4684 
4685 	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4686 		if ((res->bus == sdev->channel) &&
4687 		    (res->target == sdev->id) &&
4688 		    (res->lun == sdev->lun))
4689 			return res;
4690 	}
4691 
4692 	return NULL;
4693 }
4694 
4695 /**
4696  * ipr_slave_destroy - Unconfigure a SCSI device
4697  * @sdev:	scsi device struct
4698  *
4699  * Return value:
4700  * 	nothing
4701  **/
4702 static void ipr_slave_destroy(struct scsi_device *sdev)
4703 {
4704 	struct ipr_resource_entry *res;
4705 	struct ipr_ioa_cfg *ioa_cfg;
4706 	unsigned long lock_flags = 0;
4707 
4708 	ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4709 
4710 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4711 	res = (struct ipr_resource_entry *) sdev->hostdata;
4712 	if (res) {
4713 		if (res->sata_port)
4714 			res->sata_port->ap->link.device[0].class = ATA_DEV_NONE;
4715 		sdev->hostdata = NULL;
4716 		res->sdev = NULL;
4717 		res->sata_port = NULL;
4718 	}
4719 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4720 }
4721 
4722 /**
4723  * ipr_slave_configure - Configure a SCSI device
4724  * @sdev:	scsi device struct
4725  *
4726  * This function configures the specified scsi device.
4727  *
4728  * Return value:
4729  * 	0 on success
4730  **/
4731 static int ipr_slave_configure(struct scsi_device *sdev)
4732 {
4733 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4734 	struct ipr_resource_entry *res;
4735 	struct ata_port *ap = NULL;
4736 	unsigned long lock_flags = 0;
4737 	char buffer[IPR_MAX_RES_PATH_LENGTH];
4738 
4739 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4740 	res = sdev->hostdata;
4741 	if (res) {
4742 		if (ipr_is_af_dasd_device(res))
4743 			sdev->type = TYPE_RAID;
4744 		if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
4745 			sdev->scsi_level = 4;
4746 			sdev->no_uld_attach = 1;
4747 		}
4748 		if (ipr_is_vset_device(res)) {
4749 			blk_queue_rq_timeout(sdev->request_queue,
4750 					     IPR_VSET_RW_TIMEOUT);
4751 			blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
4752 		}
4753 		if (ipr_is_gata(res) && res->sata_port)
4754 			ap = res->sata_port->ap;
4755 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4756 
4757 		if (ap) {
4758 			scsi_adjust_queue_depth(sdev, 0, IPR_MAX_CMD_PER_ATA_LUN);
4759 			ata_sas_slave_configure(sdev, ap);
4760 		} else
4761 			scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
4762 		if (ioa_cfg->sis64)
4763 			sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
4764 				    ipr_format_res_path(ioa_cfg,
4765 				res->res_path, buffer, sizeof(buffer)));
4766 		return 0;
4767 	}
4768 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4769 	return 0;
4770 }
4771 
4772 /**
4773  * ipr_ata_slave_alloc - Prepare for commands to a SATA device
4774  * @sdev:	scsi device struct
4775  *
4776  * This function initializes an ATA port so that future commands
4777  * sent through queuecommand will work.
4778  *
4779  * Return value:
4780  * 	0 on success
4781  **/
4782 static int ipr_ata_slave_alloc(struct scsi_device *sdev)
4783 {
4784 	struct ipr_sata_port *sata_port = NULL;
4785 	int rc = -ENXIO;
4786 
4787 	ENTER;
4788 	if (sdev->sdev_target)
4789 		sata_port = sdev->sdev_target->hostdata;
4790 	if (sata_port) {
4791 		rc = ata_sas_port_init(sata_port->ap);
4792 		if (rc == 0)
4793 			rc = ata_sas_sync_probe(sata_port->ap);
4794 	}
4795 
4796 	if (rc)
4797 		ipr_slave_destroy(sdev);
4798 
4799 	LEAVE;
4800 	return rc;
4801 }
4802 
4803 /**
4804  * ipr_slave_alloc - Prepare for commands to a device.
4805  * @sdev:	scsi device struct
4806  *
4807  * This function saves a pointer to the resource entry
4808  * in the scsi device struct if the device exists. We
4809  * can then use this pointer in ipr_queuecommand when
4810  * handling new commands.
4811  *
4812  * Return value:
4813  * 	0 on success / -ENXIO if device does not exist
4814  **/
4815 static int ipr_slave_alloc(struct scsi_device *sdev)
4816 {
4817 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4818 	struct ipr_resource_entry *res;
4819 	unsigned long lock_flags;
4820 	int rc = -ENXIO;
4821 
4822 	sdev->hostdata = NULL;
4823 
4824 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4825 
4826 	res = ipr_find_sdev(sdev);
4827 	if (res) {
4828 		res->sdev = sdev;
4829 		res->add_to_ml = 0;
4830 		res->in_erp = 0;
4831 		sdev->hostdata = res;
4832 		if (!ipr_is_naca_model(res))
4833 			res->needs_sync_complete = 1;
4834 		rc = 0;
4835 		if (ipr_is_gata(res)) {
4836 			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4837 			return ipr_ata_slave_alloc(sdev);
4838 		}
4839 	}
4840 
4841 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4842 
4843 	return rc;
4844 }
4845 
4846 static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
4847 {
4848 	struct ipr_ioa_cfg *ioa_cfg;
4849 	unsigned long lock_flags = 0;
4850 	int rc = SUCCESS;
4851 
4852 	ENTER;
4853 	ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
4854 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4855 
4856 	if (!ioa_cfg->in_reset_reload && !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
4857 		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
4858 		dev_err(&ioa_cfg->pdev->dev,
4859 			"Adapter being reset as a result of error recovery.\n");
4860 
4861 		if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4862 			ioa_cfg->sdt_state = GET_DUMP;
4863 	}
4864 
4865 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4866 	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4867 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4868 
4869 	/* If we got hit with a host reset while we were already resetting
4870 	 the adapter for some reason, and the reset failed. */
4871 	if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
4872 		ipr_trace;
4873 		rc = FAILED;
4874 	}
4875 
4876 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4877 	LEAVE;
4878 	return rc;
4879 }
4880 
4881 /**
4882  * ipr_device_reset - Reset the device
4883  * @ioa_cfg:	ioa config struct
4884  * @res:		resource entry struct
4885  *
4886  * This function issues a device reset to the affected device.
4887  * If the device is a SCSI device, a LUN reset will be sent
4888  * to the device first. If that does not work, a target reset
4889  * will be sent. If the device is a SATA device, a PHY reset will
4890  * be sent.
4891  *
4892  * Return value:
4893  *	0 on success / non-zero on failure
4894  **/
4895 static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
4896 			    struct ipr_resource_entry *res)
4897 {
4898 	struct ipr_cmnd *ipr_cmd;
4899 	struct ipr_ioarcb *ioarcb;
4900 	struct ipr_cmd_pkt *cmd_pkt;
4901 	struct ipr_ioarcb_ata_regs *regs;
4902 	u32 ioasc;
4903 
4904 	ENTER;
4905 	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4906 	ioarcb = &ipr_cmd->ioarcb;
4907 	cmd_pkt = &ioarcb->cmd_pkt;
4908 
4909 	if (ipr_cmd->ioa_cfg->sis64) {
4910 		regs = &ipr_cmd->i.ata_ioadl.regs;
4911 		ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
4912 	} else
4913 		regs = &ioarcb->u.add_data.u.regs;
4914 
4915 	ioarcb->res_handle = res->res_handle;
4916 	cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4917 	cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
4918 	if (ipr_is_gata(res)) {
4919 		cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
4920 		ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags));
4921 		regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
4922 	}
4923 
4924 	ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
4925 	ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
4926 	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
4927 	if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) {
4928 		if (ipr_cmd->ioa_cfg->sis64)
4929 			memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
4930 			       sizeof(struct ipr_ioasa_gata));
4931 		else
4932 			memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
4933 			       sizeof(struct ipr_ioasa_gata));
4934 	}
4935 
4936 	LEAVE;
4937 	return IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0;
4938 }
4939 
4940 /**
4941  * ipr_sata_reset - Reset the SATA port
4942  * @link:	SATA link to reset
4943  * @classes:	class of the attached device
4944  *
4945  * This function issues a SATA phy reset to the affected ATA link.
4946  *
4947  * Return value:
4948  *	0 on success / non-zero on failure
4949  **/
4950 static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
4951 				unsigned long deadline)
4952 {
4953 	struct ipr_sata_port *sata_port = link->ap->private_data;
4954 	struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
4955 	struct ipr_resource_entry *res;
4956 	unsigned long lock_flags = 0;
4957 	int rc = -ENXIO;
4958 
4959 	ENTER;
4960 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4961 	while (ioa_cfg->in_reset_reload) {
4962 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4963 		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4964 		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4965 	}
4966 
4967 	res = sata_port->res;
4968 	if (res) {
4969 		rc = ipr_device_reset(ioa_cfg, res);
4970 		*classes = res->ata_class;
4971 	}
4972 
4973 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4974 	LEAVE;
4975 	return rc;
4976 }
4977 
4978 /**
4979  * ipr_eh_dev_reset - Reset the device
4980  * @scsi_cmd:	scsi command struct
4981  *
4982  * This function issues a device reset to the affected device.
4983  * A LUN reset will be sent to the device first. If that does
4984  * not work, a target reset will be sent.
4985  *
4986  * Return value:
4987  *	SUCCESS / FAILED
4988  **/
4989 static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
4990 {
4991 	struct ipr_cmnd *ipr_cmd;
4992 	struct ipr_ioa_cfg *ioa_cfg;
4993 	struct ipr_resource_entry *res;
4994 	struct ata_port *ap;
4995 	int rc = 0;
4996 	struct ipr_hrr_queue *hrrq;
4997 
4998 	ENTER;
4999 	ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
5000 	res = scsi_cmd->device->hostdata;
5001 
5002 	if (!res)
5003 		return FAILED;
5004 
5005 	/*
5006 	 * If we are currently going through reset/reload, return failed. This will force the
5007 	 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
5008 	 * reset to complete
5009 	 */
5010 	if (ioa_cfg->in_reset_reload)
5011 		return FAILED;
5012 	if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
5013 		return FAILED;
5014 
5015 	for_each_hrrq(hrrq, ioa_cfg) {
5016 		spin_lock(&hrrq->_lock);
5017 		list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
5018 			if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
5019 				if (ipr_cmd->scsi_cmd)
5020 					ipr_cmd->done = ipr_scsi_eh_done;
5021 				if (ipr_cmd->qc)
5022 					ipr_cmd->done = ipr_sata_eh_done;
5023 				if (ipr_cmd->qc &&
5024 				    !(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
5025 					ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
5026 					ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
5027 				}
5028 			}
5029 		}
5030 		spin_unlock(&hrrq->_lock);
5031 	}
5032 	res->resetting_device = 1;
5033 	scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
5034 
5035 	if (ipr_is_gata(res) && res->sata_port) {
5036 		ap = res->sata_port->ap;
5037 		spin_unlock_irq(scsi_cmd->device->host->host_lock);
5038 		ata_std_error_handler(ap);
5039 		spin_lock_irq(scsi_cmd->device->host->host_lock);
5040 
5041 		for_each_hrrq(hrrq, ioa_cfg) {
5042 			spin_lock(&hrrq->_lock);
5043 			list_for_each_entry(ipr_cmd,
5044 					    &hrrq->hrrq_pending_q, queue) {
5045 				if (ipr_cmd->ioarcb.res_handle ==
5046 				    res->res_handle) {
5047 					rc = -EIO;
5048 					break;
5049 				}
5050 			}
5051 			spin_unlock(&hrrq->_lock);
5052 		}
5053 	} else
5054 		rc = ipr_device_reset(ioa_cfg, res);
5055 	res->resetting_device = 0;
5056 	res->reset_occurred = 1;
5057 
5058 	LEAVE;
5059 	return rc ? FAILED : SUCCESS;
5060 }
5061 
5062 static int ipr_eh_dev_reset(struct scsi_cmnd *cmd)
5063 {
5064 	int rc;
5065 
5066 	spin_lock_irq(cmd->device->host->host_lock);
5067 	rc = __ipr_eh_dev_reset(cmd);
5068 	spin_unlock_irq(cmd->device->host->host_lock);
5069 
5070 	return rc;
5071 }
5072 
5073 /**
5074  * ipr_bus_reset_done - Op done function for bus reset.
5075  * @ipr_cmd:	ipr command struct
5076  *
5077  * This function is the op done function for a bus reset
5078  *
5079  * Return value:
5080  * 	none
5081  **/
5082 static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
5083 {
5084 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5085 	struct ipr_resource_entry *res;
5086 
5087 	ENTER;
5088 	if (!ioa_cfg->sis64)
5089 		list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
5090 			if (res->res_handle == ipr_cmd->ioarcb.res_handle) {
5091 				scsi_report_bus_reset(ioa_cfg->host, res->bus);
5092 				break;
5093 			}
5094 		}
5095 
5096 	/*
5097 	 * If abort has not completed, indicate the reset has, else call the
5098 	 * abort's done function to wake the sleeping eh thread
5099 	 */
5100 	if (ipr_cmd->sibling->sibling)
5101 		ipr_cmd->sibling->sibling = NULL;
5102 	else
5103 		ipr_cmd->sibling->done(ipr_cmd->sibling);
5104 
5105 	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5106 	LEAVE;
5107 }
5108 
5109 /**
5110  * ipr_abort_timeout - An abort task has timed out
5111  * @ipr_cmd:	ipr command struct
5112  *
5113  * This function handles when an abort task times out. If this
5114  * happens we issue a bus reset since we have resources tied
5115  * up that must be freed before returning to the midlayer.
5116  *
5117  * Return value:
5118  *	none
5119  **/
5120 static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
5121 {
5122 	struct ipr_cmnd *reset_cmd;
5123 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5124 	struct ipr_cmd_pkt *cmd_pkt;
5125 	unsigned long lock_flags = 0;
5126 
5127 	ENTER;
5128 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5129 	if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
5130 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5131 		return;
5132 	}
5133 
5134 	sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
5135 	reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5136 	ipr_cmd->sibling = reset_cmd;
5137 	reset_cmd->sibling = ipr_cmd;
5138 	reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
5139 	cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
5140 	cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5141 	cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
5142 	cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
5143 
5144 	ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
5145 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5146 	LEAVE;
5147 }
5148 
5149 /**
5150  * ipr_cancel_op - Cancel specified op
5151  * @scsi_cmd:	scsi command struct
5152  *
5153  * This function cancels specified op.
5154  *
5155  * Return value:
5156  *	SUCCESS / FAILED
5157  **/
5158 static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
5159 {
5160 	struct ipr_cmnd *ipr_cmd;
5161 	struct ipr_ioa_cfg *ioa_cfg;
5162 	struct ipr_resource_entry *res;
5163 	struct ipr_cmd_pkt *cmd_pkt;
5164 	u32 ioasc, int_reg;
5165 	int op_found = 0;
5166 	struct ipr_hrr_queue *hrrq;
5167 
5168 	ENTER;
5169 	ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
5170 	res = scsi_cmd->device->hostdata;
5171 
5172 	/* If we are currently going through reset/reload, return failed.
5173 	 * This will force the mid-layer to call ipr_eh_host_reset,
5174 	 * which will then go to sleep and wait for the reset to complete
5175 	 */
5176 	if (ioa_cfg->in_reset_reload ||
5177 	    ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
5178 		return FAILED;
5179 	if (!res)
5180 		return FAILED;
5181 
5182 	/*
5183 	 * If we are aborting a timed out op, chances are that the timeout was caused
5184 	 * by a still not detected EEH error. In such cases, reading a register will
5185 	 * trigger the EEH recovery infrastructure.
5186 	 */
5187 	int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5188 
5189 	if (!ipr_is_gscsi(res))
5190 		return FAILED;
5191 
5192 	for_each_hrrq(hrrq, ioa_cfg) {
5193 		spin_lock(&hrrq->_lock);
5194 		list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
5195 			if (ipr_cmd->scsi_cmd == scsi_cmd) {
5196 				ipr_cmd->done = ipr_scsi_eh_done;
5197 				op_found = 1;
5198 				break;
5199 			}
5200 		}
5201 		spin_unlock(&hrrq->_lock);
5202 	}
5203 
5204 	if (!op_found)
5205 		return SUCCESS;
5206 
5207 	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5208 	ipr_cmd->ioarcb.res_handle = res->res_handle;
5209 	cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5210 	cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5211 	cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5212 	ipr_cmd->u.sdev = scsi_cmd->device;
5213 
5214 	scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
5215 		    scsi_cmd->cmnd[0]);
5216 	ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
5217 	ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5218 
5219 	/*
5220 	 * If the abort task timed out and we sent a bus reset, we will get
5221 	 * one the following responses to the abort
5222 	 */
5223 	if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
5224 		ioasc = 0;
5225 		ipr_trace;
5226 	}
5227 
5228 	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5229 	if (!ipr_is_naca_model(res))
5230 		res->needs_sync_complete = 1;
5231 
5232 	LEAVE;
5233 	return IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS;
5234 }
5235 
5236 /**
5237  * ipr_eh_abort - Abort a single op
5238  * @scsi_cmd:	scsi command struct
5239  *
5240  * Return value:
5241  * 	SUCCESS / FAILED
5242  **/
5243 static int ipr_eh_abort(struct scsi_cmnd *scsi_cmd)
5244 {
5245 	unsigned long flags;
5246 	int rc;
5247 
5248 	ENTER;
5249 
5250 	spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
5251 	rc = ipr_cancel_op(scsi_cmd);
5252 	spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
5253 
5254 	LEAVE;
5255 	return rc;
5256 }
5257 
5258 /**
5259  * ipr_handle_other_interrupt - Handle "other" interrupts
5260  * @ioa_cfg:	ioa config struct
5261  * @int_reg:	interrupt register
5262  *
5263  * Return value:
5264  * 	IRQ_NONE / IRQ_HANDLED
5265  **/
5266 static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
5267 					      u32 int_reg)
5268 {
5269 	irqreturn_t rc = IRQ_HANDLED;
5270 	u32 int_mask_reg;
5271 
5272 	int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
5273 	int_reg &= ~int_mask_reg;
5274 
5275 	/* If an interrupt on the adapter did not occur, ignore it.
5276 	 * Or in the case of SIS 64, check for a stage change interrupt.
5277 	 */
5278 	if ((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0) {
5279 		if (ioa_cfg->sis64) {
5280 			int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5281 			int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5282 			if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
5283 
5284 				/* clear stage change */
5285 				writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
5286 				int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5287 				list_del(&ioa_cfg->reset_cmd->queue);
5288 				del_timer(&ioa_cfg->reset_cmd->timer);
5289 				ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5290 				return IRQ_HANDLED;
5291 			}
5292 		}
5293 
5294 		return IRQ_NONE;
5295 	}
5296 
5297 	if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
5298 		/* Mask the interrupt */
5299 		writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
5300 
5301 		/* Clear the interrupt */
5302 		writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.clr_interrupt_reg);
5303 		int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5304 
5305 		list_del(&ioa_cfg->reset_cmd->queue);
5306 		del_timer(&ioa_cfg->reset_cmd->timer);
5307 		ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5308 	} else if ((int_reg & IPR_PCII_HRRQ_UPDATED) == int_reg) {
5309 		if (ioa_cfg->clear_isr) {
5310 			if (ipr_debug && printk_ratelimit())
5311 				dev_err(&ioa_cfg->pdev->dev,
5312 					"Spurious interrupt detected. 0x%08X\n", int_reg);
5313 			writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
5314 			int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5315 			return IRQ_NONE;
5316 		}
5317 	} else {
5318 		if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
5319 			ioa_cfg->ioa_unit_checked = 1;
5320 		else if (int_reg & IPR_PCII_NO_HOST_RRQ)
5321 			dev_err(&ioa_cfg->pdev->dev,
5322 				"No Host RRQ. 0x%08X\n", int_reg);
5323 		else
5324 			dev_err(&ioa_cfg->pdev->dev,
5325 				"Permanent IOA failure. 0x%08X\n", int_reg);
5326 
5327 		if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5328 			ioa_cfg->sdt_state = GET_DUMP;
5329 
5330 		ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
5331 		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5332 	}
5333 
5334 	return rc;
5335 }
5336 
5337 /**
5338  * ipr_isr_eh - Interrupt service routine error handler
5339  * @ioa_cfg:	ioa config struct
5340  * @msg:	message to log
5341  *
5342  * Return value:
5343  * 	none
5344  **/
5345 static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg, u16 number)
5346 {
5347 	ioa_cfg->errors_logged++;
5348 	dev_err(&ioa_cfg->pdev->dev, "%s %d\n", msg, number);
5349 
5350 	if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5351 		ioa_cfg->sdt_state = GET_DUMP;
5352 
5353 	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5354 }
5355 
5356 static int ipr_process_hrrq(struct ipr_hrr_queue *hrr_queue, int budget,
5357 						struct list_head *doneq)
5358 {
5359 	u32 ioasc;
5360 	u16 cmd_index;
5361 	struct ipr_cmnd *ipr_cmd;
5362 	struct ipr_ioa_cfg *ioa_cfg = hrr_queue->ioa_cfg;
5363 	int num_hrrq = 0;
5364 
5365 	/* If interrupts are disabled, ignore the interrupt */
5366 	if (!hrr_queue->allow_interrupts)
5367 		return 0;
5368 
5369 	while ((be32_to_cpu(*hrr_queue->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5370 	       hrr_queue->toggle_bit) {
5371 
5372 		cmd_index = (be32_to_cpu(*hrr_queue->hrrq_curr) &
5373 			     IPR_HRRQ_REQ_RESP_HANDLE_MASK) >>
5374 			     IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
5375 
5376 		if (unlikely(cmd_index > hrr_queue->max_cmd_id ||
5377 			     cmd_index < hrr_queue->min_cmd_id)) {
5378 			ipr_isr_eh(ioa_cfg,
5379 				"Invalid response handle from IOA: ",
5380 				cmd_index);
5381 			break;
5382 		}
5383 
5384 		ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
5385 		ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5386 
5387 		ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
5388 
5389 		list_move_tail(&ipr_cmd->queue, doneq);
5390 
5391 		if (hrr_queue->hrrq_curr < hrr_queue->hrrq_end) {
5392 			hrr_queue->hrrq_curr++;
5393 		} else {
5394 			hrr_queue->hrrq_curr = hrr_queue->hrrq_start;
5395 			hrr_queue->toggle_bit ^= 1u;
5396 		}
5397 		num_hrrq++;
5398 		if (budget > 0 && num_hrrq >= budget)
5399 			break;
5400 	}
5401 
5402 	return num_hrrq;
5403 }
5404 
5405 static int ipr_iopoll(struct blk_iopoll *iop, int budget)
5406 {
5407 	struct ipr_ioa_cfg *ioa_cfg;
5408 	struct ipr_hrr_queue *hrrq;
5409 	struct ipr_cmnd *ipr_cmd, *temp;
5410 	unsigned long hrrq_flags;
5411 	int completed_ops;
5412 	LIST_HEAD(doneq);
5413 
5414 	hrrq = container_of(iop, struct ipr_hrr_queue, iopoll);
5415 	ioa_cfg = hrrq->ioa_cfg;
5416 
5417 	spin_lock_irqsave(hrrq->lock, hrrq_flags);
5418 	completed_ops = ipr_process_hrrq(hrrq, budget, &doneq);
5419 
5420 	if (completed_ops < budget)
5421 		blk_iopoll_complete(iop);
5422 	spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5423 
5424 	list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5425 		list_del(&ipr_cmd->queue);
5426 		del_timer(&ipr_cmd->timer);
5427 		ipr_cmd->fast_done(ipr_cmd);
5428 	}
5429 
5430 	return completed_ops;
5431 }
5432 
5433 /**
5434  * ipr_isr - Interrupt service routine
5435  * @irq:	irq number
5436  * @devp:	pointer to ioa config struct
5437  *
5438  * Return value:
5439  * 	IRQ_NONE / IRQ_HANDLED
5440  **/
5441 static irqreturn_t ipr_isr(int irq, void *devp)
5442 {
5443 	struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5444 	struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
5445 	unsigned long hrrq_flags = 0;
5446 	u32 int_reg = 0;
5447 	int num_hrrq = 0;
5448 	int irq_none = 0;
5449 	struct ipr_cmnd *ipr_cmd, *temp;
5450 	irqreturn_t rc = IRQ_NONE;
5451 	LIST_HEAD(doneq);
5452 
5453 	spin_lock_irqsave(hrrq->lock, hrrq_flags);
5454 	/* If interrupts are disabled, ignore the interrupt */
5455 	if (!hrrq->allow_interrupts) {
5456 		spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5457 		return IRQ_NONE;
5458 	}
5459 
5460 	while (1) {
5461 		if (ipr_process_hrrq(hrrq, -1, &doneq)) {
5462 			rc =  IRQ_HANDLED;
5463 
5464 			if (!ioa_cfg->clear_isr)
5465 				break;
5466 
5467 			/* Clear the PCI interrupt */
5468 			num_hrrq = 0;
5469 			do {
5470 				writel(IPR_PCII_HRRQ_UPDATED,
5471 				     ioa_cfg->regs.clr_interrupt_reg32);
5472 				int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5473 			} while (int_reg & IPR_PCII_HRRQ_UPDATED &&
5474 				num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
5475 
5476 		} else if (rc == IRQ_NONE && irq_none == 0) {
5477 			int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5478 			irq_none++;
5479 		} else if (num_hrrq == IPR_MAX_HRRQ_RETRIES &&
5480 			   int_reg & IPR_PCII_HRRQ_UPDATED) {
5481 			ipr_isr_eh(ioa_cfg,
5482 				"Error clearing HRRQ: ", num_hrrq);
5483 			rc = IRQ_HANDLED;
5484 			break;
5485 		} else
5486 			break;
5487 	}
5488 
5489 	if (unlikely(rc == IRQ_NONE))
5490 		rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
5491 
5492 	spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5493 	list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5494 		list_del(&ipr_cmd->queue);
5495 		del_timer(&ipr_cmd->timer);
5496 		ipr_cmd->fast_done(ipr_cmd);
5497 	}
5498 	return rc;
5499 }
5500 
5501 /**
5502  * ipr_isr_mhrrq - Interrupt service routine
5503  * @irq:	irq number
5504  * @devp:	pointer to ioa config struct
5505  *
5506  * Return value:
5507  *	IRQ_NONE / IRQ_HANDLED
5508  **/
5509 static irqreturn_t ipr_isr_mhrrq(int irq, void *devp)
5510 {
5511 	struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5512 	struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
5513 	unsigned long hrrq_flags = 0;
5514 	struct ipr_cmnd *ipr_cmd, *temp;
5515 	irqreturn_t rc = IRQ_NONE;
5516 	LIST_HEAD(doneq);
5517 
5518 	spin_lock_irqsave(hrrq->lock, hrrq_flags);
5519 
5520 	/* If interrupts are disabled, ignore the interrupt */
5521 	if (!hrrq->allow_interrupts) {
5522 		spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5523 		return IRQ_NONE;
5524 	}
5525 
5526 	if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
5527 		if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5528 		       hrrq->toggle_bit) {
5529 			if (!blk_iopoll_sched_prep(&hrrq->iopoll))
5530 				blk_iopoll_sched(&hrrq->iopoll);
5531 			spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5532 			return IRQ_HANDLED;
5533 		}
5534 	} else {
5535 		if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5536 			hrrq->toggle_bit)
5537 
5538 			if (ipr_process_hrrq(hrrq, -1, &doneq))
5539 				rc =  IRQ_HANDLED;
5540 	}
5541 
5542 	spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5543 
5544 	list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5545 		list_del(&ipr_cmd->queue);
5546 		del_timer(&ipr_cmd->timer);
5547 		ipr_cmd->fast_done(ipr_cmd);
5548 	}
5549 	return rc;
5550 }
5551 
5552 /**
5553  * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
5554  * @ioa_cfg:	ioa config struct
5555  * @ipr_cmd:	ipr command struct
5556  *
5557  * Return value:
5558  * 	0 on success / -1 on failure
5559  **/
5560 static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
5561 			     struct ipr_cmnd *ipr_cmd)
5562 {
5563 	int i, nseg;
5564 	struct scatterlist *sg;
5565 	u32 length;
5566 	u32 ioadl_flags = 0;
5567 	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5568 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5569 	struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
5570 
5571 	length = scsi_bufflen(scsi_cmd);
5572 	if (!length)
5573 		return 0;
5574 
5575 	nseg = scsi_dma_map(scsi_cmd);
5576 	if (nseg < 0) {
5577 		if (printk_ratelimit())
5578 			dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
5579 		return -1;
5580 	}
5581 
5582 	ipr_cmd->dma_use_sg = nseg;
5583 
5584 	ioarcb->data_transfer_length = cpu_to_be32(length);
5585 	ioarcb->ioadl_len =
5586 		cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
5587 
5588 	if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5589 		ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5590 		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5591 	} else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE)
5592 		ioadl_flags = IPR_IOADL_FLAGS_READ;
5593 
5594 	scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5595 		ioadl64[i].flags = cpu_to_be32(ioadl_flags);
5596 		ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
5597 		ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
5598 	}
5599 
5600 	ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5601 	return 0;
5602 }
5603 
5604 /**
5605  * ipr_build_ioadl - Build a scatter/gather list and map the buffer
5606  * @ioa_cfg:	ioa config struct
5607  * @ipr_cmd:	ipr command struct
5608  *
5609  * Return value:
5610  * 	0 on success / -1 on failure
5611  **/
5612 static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
5613 			   struct ipr_cmnd *ipr_cmd)
5614 {
5615 	int i, nseg;
5616 	struct scatterlist *sg;
5617 	u32 length;
5618 	u32 ioadl_flags = 0;
5619 	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5620 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5621 	struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
5622 
5623 	length = scsi_bufflen(scsi_cmd);
5624 	if (!length)
5625 		return 0;
5626 
5627 	nseg = scsi_dma_map(scsi_cmd);
5628 	if (nseg < 0) {
5629 		dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
5630 		return -1;
5631 	}
5632 
5633 	ipr_cmd->dma_use_sg = nseg;
5634 
5635 	if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5636 		ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5637 		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5638 		ioarcb->data_transfer_length = cpu_to_be32(length);
5639 		ioarcb->ioadl_len =
5640 			cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5641 	} else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
5642 		ioadl_flags = IPR_IOADL_FLAGS_READ;
5643 		ioarcb->read_data_transfer_length = cpu_to_be32(length);
5644 		ioarcb->read_ioadl_len =
5645 			cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5646 	}
5647 
5648 	if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) {
5649 		ioadl = ioarcb->u.add_data.u.ioadl;
5650 		ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) +
5651 				    offsetof(struct ipr_ioarcb, u.add_data));
5652 		ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5653 	}
5654 
5655 	scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5656 		ioadl[i].flags_and_data_len =
5657 			cpu_to_be32(ioadl_flags | sg_dma_len(sg));
5658 		ioadl[i].address = cpu_to_be32(sg_dma_address(sg));
5659 	}
5660 
5661 	ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5662 	return 0;
5663 }
5664 
5665 /**
5666  * ipr_get_task_attributes - Translate SPI Q-Tag to task attributes
5667  * @scsi_cmd:	scsi command struct
5668  *
5669  * Return value:
5670  * 	task attributes
5671  **/
5672 static u8 ipr_get_task_attributes(struct scsi_cmnd *scsi_cmd)
5673 {
5674 	u8 tag[2];
5675 	u8 rc = IPR_FLAGS_LO_UNTAGGED_TASK;
5676 
5677 	if (scsi_populate_tag_msg(scsi_cmd, tag)) {
5678 		switch (tag[0]) {
5679 		case MSG_SIMPLE_TAG:
5680 			rc = IPR_FLAGS_LO_SIMPLE_TASK;
5681 			break;
5682 		case MSG_HEAD_TAG:
5683 			rc = IPR_FLAGS_LO_HEAD_OF_Q_TASK;
5684 			break;
5685 		case MSG_ORDERED_TAG:
5686 			rc = IPR_FLAGS_LO_ORDERED_TASK;
5687 			break;
5688 		};
5689 	}
5690 
5691 	return rc;
5692 }
5693 
5694 /**
5695  * ipr_erp_done - Process completion of ERP for a device
5696  * @ipr_cmd:		ipr command struct
5697  *
5698  * This function copies the sense buffer into the scsi_cmd
5699  * struct and pushes the scsi_done function.
5700  *
5701  * Return value:
5702  * 	nothing
5703  **/
5704 static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
5705 {
5706 	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5707 	struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5708 	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5709 
5710 	if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5711 		scsi_cmd->result |= (DID_ERROR << 16);
5712 		scmd_printk(KERN_ERR, scsi_cmd,
5713 			    "Request Sense failed with IOASC: 0x%08X\n", ioasc);
5714 	} else {
5715 		memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
5716 		       SCSI_SENSE_BUFFERSIZE);
5717 	}
5718 
5719 	if (res) {
5720 		if (!ipr_is_naca_model(res))
5721 			res->needs_sync_complete = 1;
5722 		res->in_erp = 0;
5723 	}
5724 	scsi_dma_unmap(ipr_cmd->scsi_cmd);
5725 	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5726 	scsi_cmd->scsi_done(scsi_cmd);
5727 }
5728 
5729 /**
5730  * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
5731  * @ipr_cmd:	ipr command struct
5732  *
5733  * Return value:
5734  * 	none
5735  **/
5736 static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
5737 {
5738 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5739 	struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5740 	dma_addr_t dma_addr = ipr_cmd->dma_addr;
5741 
5742 	memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
5743 	ioarcb->data_transfer_length = 0;
5744 	ioarcb->read_data_transfer_length = 0;
5745 	ioarcb->ioadl_len = 0;
5746 	ioarcb->read_ioadl_len = 0;
5747 	ioasa->hdr.ioasc = 0;
5748 	ioasa->hdr.residual_data_len = 0;
5749 
5750 	if (ipr_cmd->ioa_cfg->sis64)
5751 		ioarcb->u.sis64_addr_data.data_ioadl_addr =
5752 			cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
5753 	else {
5754 		ioarcb->write_ioadl_addr =
5755 			cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
5756 		ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5757 	}
5758 }
5759 
5760 /**
5761  * ipr_erp_request_sense - Send request sense to a device
5762  * @ipr_cmd:	ipr command struct
5763  *
5764  * This function sends a request sense to a device as a result
5765  * of a check condition.
5766  *
5767  * Return value:
5768  * 	nothing
5769  **/
5770 static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
5771 {
5772 	struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5773 	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5774 
5775 	if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5776 		ipr_erp_done(ipr_cmd);
5777 		return;
5778 	}
5779 
5780 	ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5781 
5782 	cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
5783 	cmd_pkt->cdb[0] = REQUEST_SENSE;
5784 	cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
5785 	cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
5786 	cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
5787 	cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
5788 
5789 	ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma,
5790 		       SCSI_SENSE_BUFFERSIZE, IPR_IOADL_FLAGS_READ_LAST);
5791 
5792 	ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
5793 		   IPR_REQUEST_SENSE_TIMEOUT * 2);
5794 }
5795 
5796 /**
5797  * ipr_erp_cancel_all - Send cancel all to a device
5798  * @ipr_cmd:	ipr command struct
5799  *
5800  * This function sends a cancel all to a device to clear the
5801  * queue. If we are running TCQ on the device, QERR is set to 1,
5802  * which means all outstanding ops have been dropped on the floor.
5803  * Cancel all will return them to us.
5804  *
5805  * Return value:
5806  * 	nothing
5807  **/
5808 static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
5809 {
5810 	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5811 	struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5812 	struct ipr_cmd_pkt *cmd_pkt;
5813 
5814 	res->in_erp = 1;
5815 
5816 	ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5817 
5818 	if (!scsi_get_tag_type(scsi_cmd->device)) {
5819 		ipr_erp_request_sense(ipr_cmd);
5820 		return;
5821 	}
5822 
5823 	cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5824 	cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5825 	cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5826 
5827 	ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
5828 		   IPR_CANCEL_ALL_TIMEOUT);
5829 }
5830 
5831 /**
5832  * ipr_dump_ioasa - Dump contents of IOASA
5833  * @ioa_cfg:	ioa config struct
5834  * @ipr_cmd:	ipr command struct
5835  * @res:		resource entry struct
5836  *
5837  * This function is invoked by the interrupt handler when ops
5838  * fail. It will log the IOASA if appropriate. Only called
5839  * for GPDD ops.
5840  *
5841  * Return value:
5842  * 	none
5843  **/
5844 static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
5845 			   struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
5846 {
5847 	int i;
5848 	u16 data_len;
5849 	u32 ioasc, fd_ioasc;
5850 	struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5851 	__be32 *ioasa_data = (__be32 *)ioasa;
5852 	int error_index;
5853 
5854 	ioasc = be32_to_cpu(ioasa->hdr.ioasc) & IPR_IOASC_IOASC_MASK;
5855 	fd_ioasc = be32_to_cpu(ioasa->hdr.fd_ioasc) & IPR_IOASC_IOASC_MASK;
5856 
5857 	if (0 == ioasc)
5858 		return;
5859 
5860 	if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
5861 		return;
5862 
5863 	if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc)
5864 		error_index = ipr_get_error(fd_ioasc);
5865 	else
5866 		error_index = ipr_get_error(ioasc);
5867 
5868 	if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
5869 		/* Don't log an error if the IOA already logged one */
5870 		if (ioasa->hdr.ilid != 0)
5871 			return;
5872 
5873 		if (!ipr_is_gscsi(res))
5874 			return;
5875 
5876 		if (ipr_error_table[error_index].log_ioasa == 0)
5877 			return;
5878 	}
5879 
5880 	ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
5881 
5882 	data_len = be16_to_cpu(ioasa->hdr.ret_stat_len);
5883 	if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len)
5884 		data_len = sizeof(struct ipr_ioasa64);
5885 	else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len)
5886 		data_len = sizeof(struct ipr_ioasa);
5887 
5888 	ipr_err("IOASA Dump:\n");
5889 
5890 	for (i = 0; i < data_len / 4; i += 4) {
5891 		ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
5892 			be32_to_cpu(ioasa_data[i]),
5893 			be32_to_cpu(ioasa_data[i+1]),
5894 			be32_to_cpu(ioasa_data[i+2]),
5895 			be32_to_cpu(ioasa_data[i+3]));
5896 	}
5897 }
5898 
5899 /**
5900  * ipr_gen_sense - Generate SCSI sense data from an IOASA
5901  * @ioasa:		IOASA
5902  * @sense_buf:	sense data buffer
5903  *
5904  * Return value:
5905  * 	none
5906  **/
5907 static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
5908 {
5909 	u32 failing_lba;
5910 	u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
5911 	struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
5912 	struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5913 	u32 ioasc = be32_to_cpu(ioasa->hdr.ioasc);
5914 
5915 	memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
5916 
5917 	if (ioasc >= IPR_FIRST_DRIVER_IOASC)
5918 		return;
5919 
5920 	ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
5921 
5922 	if (ipr_is_vset_device(res) &&
5923 	    ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
5924 	    ioasa->u.vset.failing_lba_hi != 0) {
5925 		sense_buf[0] = 0x72;
5926 		sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
5927 		sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
5928 		sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
5929 
5930 		sense_buf[7] = 12;
5931 		sense_buf[8] = 0;
5932 		sense_buf[9] = 0x0A;
5933 		sense_buf[10] = 0x80;
5934 
5935 		failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
5936 
5937 		sense_buf[12] = (failing_lba & 0xff000000) >> 24;
5938 		sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
5939 		sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
5940 		sense_buf[15] = failing_lba & 0x000000ff;
5941 
5942 		failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
5943 
5944 		sense_buf[16] = (failing_lba & 0xff000000) >> 24;
5945 		sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
5946 		sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
5947 		sense_buf[19] = failing_lba & 0x000000ff;
5948 	} else {
5949 		sense_buf[0] = 0x70;
5950 		sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
5951 		sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
5952 		sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
5953 
5954 		/* Illegal request */
5955 		if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
5956 		    (be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
5957 			sense_buf[7] = 10;	/* additional length */
5958 
5959 			/* IOARCB was in error */
5960 			if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
5961 				sense_buf[15] = 0xC0;
5962 			else	/* Parameter data was invalid */
5963 				sense_buf[15] = 0x80;
5964 
5965 			sense_buf[16] =
5966 			    ((IPR_FIELD_POINTER_MASK &
5967 			      be32_to_cpu(ioasa->hdr.ioasc_specific)) >> 8) & 0xff;
5968 			sense_buf[17] =
5969 			    (IPR_FIELD_POINTER_MASK &
5970 			     be32_to_cpu(ioasa->hdr.ioasc_specific)) & 0xff;
5971 		} else {
5972 			if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
5973 				if (ipr_is_vset_device(res))
5974 					failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
5975 				else
5976 					failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
5977 
5978 				sense_buf[0] |= 0x80;	/* Or in the Valid bit */
5979 				sense_buf[3] = (failing_lba & 0xff000000) >> 24;
5980 				sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
5981 				sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
5982 				sense_buf[6] = failing_lba & 0x000000ff;
5983 			}
5984 
5985 			sense_buf[7] = 6;	/* additional length */
5986 		}
5987 	}
5988 }
5989 
5990 /**
5991  * ipr_get_autosense - Copy autosense data to sense buffer
5992  * @ipr_cmd:	ipr command struct
5993  *
5994  * This function copies the autosense buffer to the buffer
5995  * in the scsi_cmd, if there is autosense available.
5996  *
5997  * Return value:
5998  *	1 if autosense was available / 0 if not
5999  **/
6000 static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
6001 {
6002 	struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6003 	struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
6004 
6005 	if ((be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
6006 		return 0;
6007 
6008 	if (ipr_cmd->ioa_cfg->sis64)
6009 		memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa64->auto_sense.data,
6010 		       min_t(u16, be16_to_cpu(ioasa64->auto_sense.auto_sense_len),
6011 			   SCSI_SENSE_BUFFERSIZE));
6012 	else
6013 		memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
6014 		       min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
6015 			   SCSI_SENSE_BUFFERSIZE));
6016 	return 1;
6017 }
6018 
6019 /**
6020  * ipr_erp_start - Process an error response for a SCSI op
6021  * @ioa_cfg:	ioa config struct
6022  * @ipr_cmd:	ipr command struct
6023  *
6024  * This function determines whether or not to initiate ERP
6025  * on the affected device.
6026  *
6027  * Return value:
6028  * 	nothing
6029  **/
6030 static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
6031 			      struct ipr_cmnd *ipr_cmd)
6032 {
6033 	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6034 	struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
6035 	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6036 	u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
6037 
6038 	if (!res) {
6039 		ipr_scsi_eh_done(ipr_cmd);
6040 		return;
6041 	}
6042 
6043 	if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS)
6044 		ipr_gen_sense(ipr_cmd);
6045 
6046 	ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6047 
6048 	switch (masked_ioasc) {
6049 	case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
6050 		if (ipr_is_naca_model(res))
6051 			scsi_cmd->result |= (DID_ABORT << 16);
6052 		else
6053 			scsi_cmd->result |= (DID_IMM_RETRY << 16);
6054 		break;
6055 	case IPR_IOASC_IR_RESOURCE_HANDLE:
6056 	case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
6057 		scsi_cmd->result |= (DID_NO_CONNECT << 16);
6058 		break;
6059 	case IPR_IOASC_HW_SEL_TIMEOUT:
6060 		scsi_cmd->result |= (DID_NO_CONNECT << 16);
6061 		if (!ipr_is_naca_model(res))
6062 			res->needs_sync_complete = 1;
6063 		break;
6064 	case IPR_IOASC_SYNC_REQUIRED:
6065 		if (!res->in_erp)
6066 			res->needs_sync_complete = 1;
6067 		scsi_cmd->result |= (DID_IMM_RETRY << 16);
6068 		break;
6069 	case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
6070 	case IPR_IOASA_IR_DUAL_IOA_DISABLED:
6071 		scsi_cmd->result |= (DID_PASSTHROUGH << 16);
6072 		break;
6073 	case IPR_IOASC_BUS_WAS_RESET:
6074 	case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
6075 		/*
6076 		 * Report the bus reset and ask for a retry. The device
6077 		 * will give CC/UA the next command.
6078 		 */
6079 		if (!res->resetting_device)
6080 			scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
6081 		scsi_cmd->result |= (DID_ERROR << 16);
6082 		if (!ipr_is_naca_model(res))
6083 			res->needs_sync_complete = 1;
6084 		break;
6085 	case IPR_IOASC_HW_DEV_BUS_STATUS:
6086 		scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
6087 		if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
6088 			if (!ipr_get_autosense(ipr_cmd)) {
6089 				if (!ipr_is_naca_model(res)) {
6090 					ipr_erp_cancel_all(ipr_cmd);
6091 					return;
6092 				}
6093 			}
6094 		}
6095 		if (!ipr_is_naca_model(res))
6096 			res->needs_sync_complete = 1;
6097 		break;
6098 	case IPR_IOASC_NR_INIT_CMD_REQUIRED:
6099 		break;
6100 	default:
6101 		if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6102 			scsi_cmd->result |= (DID_ERROR << 16);
6103 		if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
6104 			res->needs_sync_complete = 1;
6105 		break;
6106 	}
6107 
6108 	scsi_dma_unmap(ipr_cmd->scsi_cmd);
6109 	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6110 	scsi_cmd->scsi_done(scsi_cmd);
6111 }
6112 
6113 /**
6114  * ipr_scsi_done - mid-layer done function
6115  * @ipr_cmd:	ipr command struct
6116  *
6117  * This function is invoked by the interrupt handler for
6118  * ops generated by the SCSI mid-layer
6119  *
6120  * Return value:
6121  * 	none
6122  **/
6123 static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
6124 {
6125 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6126 	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6127 	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6128 	unsigned long hrrq_flags;
6129 
6130 	scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
6131 
6132 	if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
6133 		scsi_dma_unmap(scsi_cmd);
6134 
6135 		spin_lock_irqsave(ipr_cmd->hrrq->lock, hrrq_flags);
6136 		list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6137 		scsi_cmd->scsi_done(scsi_cmd);
6138 		spin_unlock_irqrestore(ipr_cmd->hrrq->lock, hrrq_flags);
6139 	} else {
6140 		spin_lock_irqsave(ipr_cmd->hrrq->lock, hrrq_flags);
6141 		ipr_erp_start(ioa_cfg, ipr_cmd);
6142 		spin_unlock_irqrestore(ipr_cmd->hrrq->lock, hrrq_flags);
6143 	}
6144 }
6145 
6146 /**
6147  * ipr_queuecommand - Queue a mid-layer request
6148  * @shost:		scsi host struct
6149  * @scsi_cmd:	scsi command struct
6150  *
6151  * This function queues a request generated by the mid-layer.
6152  *
6153  * Return value:
6154  *	0 on success
6155  *	SCSI_MLQUEUE_DEVICE_BUSY if device is busy
6156  *	SCSI_MLQUEUE_HOST_BUSY if host is busy
6157  **/
6158 static int ipr_queuecommand(struct Scsi_Host *shost,
6159 			    struct scsi_cmnd *scsi_cmd)
6160 {
6161 	struct ipr_ioa_cfg *ioa_cfg;
6162 	struct ipr_resource_entry *res;
6163 	struct ipr_ioarcb *ioarcb;
6164 	struct ipr_cmnd *ipr_cmd;
6165 	unsigned long hrrq_flags, lock_flags;
6166 	int rc;
6167 	struct ipr_hrr_queue *hrrq;
6168 	int hrrq_id;
6169 
6170 	ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
6171 
6172 	scsi_cmd->result = (DID_OK << 16);
6173 	res = scsi_cmd->device->hostdata;
6174 
6175 	if (ipr_is_gata(res) && res->sata_port) {
6176 		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6177 		rc = ata_sas_queuecmd(scsi_cmd, res->sata_port->ap);
6178 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6179 		return rc;
6180 	}
6181 
6182 	hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6183 	hrrq = &ioa_cfg->hrrq[hrrq_id];
6184 
6185 	spin_lock_irqsave(hrrq->lock, hrrq_flags);
6186 	/*
6187 	 * We are currently blocking all devices due to a host reset
6188 	 * We have told the host to stop giving us new requests, but
6189 	 * ERP ops don't count. FIXME
6190 	 */
6191 	if (unlikely(!hrrq->allow_cmds && !hrrq->ioa_is_dead && !hrrq->removing_ioa)) {
6192 		spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6193 		return SCSI_MLQUEUE_HOST_BUSY;
6194 	}
6195 
6196 	/*
6197 	 * FIXME - Create scsi_set_host_offline interface
6198 	 *  and the ioa_is_dead check can be removed
6199 	 */
6200 	if (unlikely(hrrq->ioa_is_dead || hrrq->removing_ioa || !res)) {
6201 		spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6202 		goto err_nodev;
6203 	}
6204 
6205 	ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6206 	if (ipr_cmd == NULL) {
6207 		spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6208 		return SCSI_MLQUEUE_HOST_BUSY;
6209 	}
6210 	spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6211 
6212 	ipr_init_ipr_cmnd(ipr_cmd, ipr_scsi_done);
6213 	ioarcb = &ipr_cmd->ioarcb;
6214 
6215 	memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
6216 	ipr_cmd->scsi_cmd = scsi_cmd;
6217 	ipr_cmd->done = ipr_scsi_eh_done;
6218 
6219 	if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
6220 		if (scsi_cmd->underflow == 0)
6221 			ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6222 
6223 		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
6224 		if (ipr_is_gscsi(res) && res->reset_occurred) {
6225 			res->reset_occurred = 0;
6226 			ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
6227 		}
6228 		ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
6229 		ioarcb->cmd_pkt.flags_lo |= ipr_get_task_attributes(scsi_cmd);
6230 	}
6231 
6232 	if (scsi_cmd->cmnd[0] >= 0xC0 &&
6233 	    (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) {
6234 		ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6235 	}
6236 
6237 	if (ioa_cfg->sis64)
6238 		rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
6239 	else
6240 		rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
6241 
6242 	spin_lock_irqsave(hrrq->lock, hrrq_flags);
6243 	if (unlikely(rc || (!hrrq->allow_cmds && !hrrq->ioa_is_dead))) {
6244 		list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
6245 		spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6246 		if (!rc)
6247 			scsi_dma_unmap(scsi_cmd);
6248 		return SCSI_MLQUEUE_HOST_BUSY;
6249 	}
6250 
6251 	if (unlikely(hrrq->ioa_is_dead)) {
6252 		list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
6253 		spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6254 		scsi_dma_unmap(scsi_cmd);
6255 		goto err_nodev;
6256 	}
6257 
6258 	ioarcb->res_handle = res->res_handle;
6259 	if (res->needs_sync_complete) {
6260 		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
6261 		res->needs_sync_complete = 0;
6262 	}
6263 	list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_pending_q);
6264 	ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
6265 	ipr_send_command(ipr_cmd);
6266 	spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6267 	return 0;
6268 
6269 err_nodev:
6270 	spin_lock_irqsave(hrrq->lock, hrrq_flags);
6271 	memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
6272 	scsi_cmd->result = (DID_NO_CONNECT << 16);
6273 	scsi_cmd->scsi_done(scsi_cmd);
6274 	spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6275 	return 0;
6276 }
6277 
6278 /**
6279  * ipr_ioctl - IOCTL handler
6280  * @sdev:	scsi device struct
6281  * @cmd:	IOCTL cmd
6282  * @arg:	IOCTL arg
6283  *
6284  * Return value:
6285  * 	0 on success / other on failure
6286  **/
6287 static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
6288 {
6289 	struct ipr_resource_entry *res;
6290 
6291 	res = (struct ipr_resource_entry *)sdev->hostdata;
6292 	if (res && ipr_is_gata(res)) {
6293 		if (cmd == HDIO_GET_IDENTITY)
6294 			return -ENOTTY;
6295 		return ata_sas_scsi_ioctl(res->sata_port->ap, sdev, cmd, arg);
6296 	}
6297 
6298 	return -EINVAL;
6299 }
6300 
6301 /**
6302  * ipr_info - Get information about the card/driver
6303  * @scsi_host:	scsi host struct
6304  *
6305  * Return value:
6306  * 	pointer to buffer with description string
6307  **/
6308 static const char *ipr_ioa_info(struct Scsi_Host *host)
6309 {
6310 	static char buffer[512];
6311 	struct ipr_ioa_cfg *ioa_cfg;
6312 	unsigned long lock_flags = 0;
6313 
6314 	ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
6315 
6316 	spin_lock_irqsave(host->host_lock, lock_flags);
6317 	sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
6318 	spin_unlock_irqrestore(host->host_lock, lock_flags);
6319 
6320 	return buffer;
6321 }
6322 
6323 static struct scsi_host_template driver_template = {
6324 	.module = THIS_MODULE,
6325 	.name = "IPR",
6326 	.info = ipr_ioa_info,
6327 	.ioctl = ipr_ioctl,
6328 	.queuecommand = ipr_queuecommand,
6329 	.eh_abort_handler = ipr_eh_abort,
6330 	.eh_device_reset_handler = ipr_eh_dev_reset,
6331 	.eh_host_reset_handler = ipr_eh_host_reset,
6332 	.slave_alloc = ipr_slave_alloc,
6333 	.slave_configure = ipr_slave_configure,
6334 	.slave_destroy = ipr_slave_destroy,
6335 	.target_alloc = ipr_target_alloc,
6336 	.target_destroy = ipr_target_destroy,
6337 	.change_queue_depth = ipr_change_queue_depth,
6338 	.change_queue_type = ipr_change_queue_type,
6339 	.bios_param = ipr_biosparam,
6340 	.can_queue = IPR_MAX_COMMANDS,
6341 	.this_id = -1,
6342 	.sg_tablesize = IPR_MAX_SGLIST,
6343 	.max_sectors = IPR_IOA_MAX_SECTORS,
6344 	.cmd_per_lun = IPR_MAX_CMD_PER_LUN,
6345 	.use_clustering = ENABLE_CLUSTERING,
6346 	.shost_attrs = ipr_ioa_attrs,
6347 	.sdev_attrs = ipr_dev_attrs,
6348 	.proc_name = IPR_NAME,
6349 	.no_write_same = 1,
6350 };
6351 
6352 /**
6353  * ipr_ata_phy_reset - libata phy_reset handler
6354  * @ap:		ata port to reset
6355  *
6356  **/
6357 static void ipr_ata_phy_reset(struct ata_port *ap)
6358 {
6359 	unsigned long flags;
6360 	struct ipr_sata_port *sata_port = ap->private_data;
6361 	struct ipr_resource_entry *res = sata_port->res;
6362 	struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6363 	int rc;
6364 
6365 	ENTER;
6366 	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6367 	while (ioa_cfg->in_reset_reload) {
6368 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6369 		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6370 		spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6371 	}
6372 
6373 	if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
6374 		goto out_unlock;
6375 
6376 	rc = ipr_device_reset(ioa_cfg, res);
6377 
6378 	if (rc) {
6379 		ap->link.device[0].class = ATA_DEV_NONE;
6380 		goto out_unlock;
6381 	}
6382 
6383 	ap->link.device[0].class = res->ata_class;
6384 	if (ap->link.device[0].class == ATA_DEV_UNKNOWN)
6385 		ap->link.device[0].class = ATA_DEV_NONE;
6386 
6387 out_unlock:
6388 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6389 	LEAVE;
6390 }
6391 
6392 /**
6393  * ipr_ata_post_internal - Cleanup after an internal command
6394  * @qc:	ATA queued command
6395  *
6396  * Return value:
6397  * 	none
6398  **/
6399 static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
6400 {
6401 	struct ipr_sata_port *sata_port = qc->ap->private_data;
6402 	struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6403 	struct ipr_cmnd *ipr_cmd;
6404 	struct ipr_hrr_queue *hrrq;
6405 	unsigned long flags;
6406 
6407 	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6408 	while (ioa_cfg->in_reset_reload) {
6409 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6410 		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6411 		spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6412 	}
6413 
6414 	for_each_hrrq(hrrq, ioa_cfg) {
6415 		spin_lock(&hrrq->_lock);
6416 		list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
6417 			if (ipr_cmd->qc == qc) {
6418 				ipr_device_reset(ioa_cfg, sata_port->res);
6419 				break;
6420 			}
6421 		}
6422 		spin_unlock(&hrrq->_lock);
6423 	}
6424 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6425 }
6426 
6427 /**
6428  * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
6429  * @regs:	destination
6430  * @tf:	source ATA taskfile
6431  *
6432  * Return value:
6433  * 	none
6434  **/
6435 static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
6436 			     struct ata_taskfile *tf)
6437 {
6438 	regs->feature = tf->feature;
6439 	regs->nsect = tf->nsect;
6440 	regs->lbal = tf->lbal;
6441 	regs->lbam = tf->lbam;
6442 	regs->lbah = tf->lbah;
6443 	regs->device = tf->device;
6444 	regs->command = tf->command;
6445 	regs->hob_feature = tf->hob_feature;
6446 	regs->hob_nsect = tf->hob_nsect;
6447 	regs->hob_lbal = tf->hob_lbal;
6448 	regs->hob_lbam = tf->hob_lbam;
6449 	regs->hob_lbah = tf->hob_lbah;
6450 	regs->ctl = tf->ctl;
6451 }
6452 
6453 /**
6454  * ipr_sata_done - done function for SATA commands
6455  * @ipr_cmd:	ipr command struct
6456  *
6457  * This function is invoked by the interrupt handler for
6458  * ops generated by the SCSI mid-layer to SATA devices
6459  *
6460  * Return value:
6461  * 	none
6462  **/
6463 static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
6464 {
6465 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6466 	struct ata_queued_cmd *qc = ipr_cmd->qc;
6467 	struct ipr_sata_port *sata_port = qc->ap->private_data;
6468 	struct ipr_resource_entry *res = sata_port->res;
6469 	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6470 
6471 	spin_lock(&ipr_cmd->hrrq->_lock);
6472 	if (ipr_cmd->ioa_cfg->sis64)
6473 		memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
6474 		       sizeof(struct ipr_ioasa_gata));
6475 	else
6476 		memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
6477 		       sizeof(struct ipr_ioasa_gata));
6478 	ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6479 
6480 	if (be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
6481 		scsi_report_device_reset(ioa_cfg->host, res->bus, res->target);
6482 
6483 	if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6484 		qc->err_mask |= __ac_err_mask(sata_port->ioasa.status);
6485 	else
6486 		qc->err_mask |= ac_err_mask(sata_port->ioasa.status);
6487 	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6488 	spin_unlock(&ipr_cmd->hrrq->_lock);
6489 	ata_qc_complete(qc);
6490 }
6491 
6492 /**
6493  * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
6494  * @ipr_cmd:	ipr command struct
6495  * @qc:		ATA queued command
6496  *
6497  **/
6498 static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
6499 				  struct ata_queued_cmd *qc)
6500 {
6501 	u32 ioadl_flags = 0;
6502 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6503 	struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ata_ioadl.ioadl64;
6504 	struct ipr_ioadl64_desc *last_ioadl64 = NULL;
6505 	int len = qc->nbytes;
6506 	struct scatterlist *sg;
6507 	unsigned int si;
6508 	dma_addr_t dma_addr = ipr_cmd->dma_addr;
6509 
6510 	if (len == 0)
6511 		return;
6512 
6513 	if (qc->dma_dir == DMA_TO_DEVICE) {
6514 		ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6515 		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6516 	} else if (qc->dma_dir == DMA_FROM_DEVICE)
6517 		ioadl_flags = IPR_IOADL_FLAGS_READ;
6518 
6519 	ioarcb->data_transfer_length = cpu_to_be32(len);
6520 	ioarcb->ioadl_len =
6521 		cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
6522 	ioarcb->u.sis64_addr_data.data_ioadl_addr =
6523 		cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl.ioadl64));
6524 
6525 	for_each_sg(qc->sg, sg, qc->n_elem, si) {
6526 		ioadl64->flags = cpu_to_be32(ioadl_flags);
6527 		ioadl64->data_len = cpu_to_be32(sg_dma_len(sg));
6528 		ioadl64->address = cpu_to_be64(sg_dma_address(sg));
6529 
6530 		last_ioadl64 = ioadl64;
6531 		ioadl64++;
6532 	}
6533 
6534 	if (likely(last_ioadl64))
6535 		last_ioadl64->flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6536 }
6537 
6538 /**
6539  * ipr_build_ata_ioadl - Build an ATA scatter/gather list
6540  * @ipr_cmd:	ipr command struct
6541  * @qc:		ATA queued command
6542  *
6543  **/
6544 static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
6545 				struct ata_queued_cmd *qc)
6546 {
6547 	u32 ioadl_flags = 0;
6548 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6549 	struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
6550 	struct ipr_ioadl_desc *last_ioadl = NULL;
6551 	int len = qc->nbytes;
6552 	struct scatterlist *sg;
6553 	unsigned int si;
6554 
6555 	if (len == 0)
6556 		return;
6557 
6558 	if (qc->dma_dir == DMA_TO_DEVICE) {
6559 		ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6560 		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6561 		ioarcb->data_transfer_length = cpu_to_be32(len);
6562 		ioarcb->ioadl_len =
6563 			cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6564 	} else if (qc->dma_dir == DMA_FROM_DEVICE) {
6565 		ioadl_flags = IPR_IOADL_FLAGS_READ;
6566 		ioarcb->read_data_transfer_length = cpu_to_be32(len);
6567 		ioarcb->read_ioadl_len =
6568 			cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6569 	}
6570 
6571 	for_each_sg(qc->sg, sg, qc->n_elem, si) {
6572 		ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
6573 		ioadl->address = cpu_to_be32(sg_dma_address(sg));
6574 
6575 		last_ioadl = ioadl;
6576 		ioadl++;
6577 	}
6578 
6579 	if (likely(last_ioadl))
6580 		last_ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6581 }
6582 
6583 /**
6584  * ipr_qc_defer - Get a free ipr_cmd
6585  * @qc:	queued command
6586  *
6587  * Return value:
6588  *	0 if success
6589  **/
6590 static int ipr_qc_defer(struct ata_queued_cmd *qc)
6591 {
6592 	struct ata_port *ap = qc->ap;
6593 	struct ipr_sata_port *sata_port = ap->private_data;
6594 	struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6595 	struct ipr_cmnd *ipr_cmd;
6596 	struct ipr_hrr_queue *hrrq;
6597 	int hrrq_id;
6598 
6599 	hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6600 	hrrq = &ioa_cfg->hrrq[hrrq_id];
6601 
6602 	qc->lldd_task = NULL;
6603 	spin_lock(&hrrq->_lock);
6604 	if (unlikely(hrrq->ioa_is_dead)) {
6605 		spin_unlock(&hrrq->_lock);
6606 		return 0;
6607 	}
6608 
6609 	if (unlikely(!hrrq->allow_cmds)) {
6610 		spin_unlock(&hrrq->_lock);
6611 		return ATA_DEFER_LINK;
6612 	}
6613 
6614 	ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6615 	if (ipr_cmd == NULL) {
6616 		spin_unlock(&hrrq->_lock);
6617 		return ATA_DEFER_LINK;
6618 	}
6619 
6620 	qc->lldd_task = ipr_cmd;
6621 	spin_unlock(&hrrq->_lock);
6622 	return 0;
6623 }
6624 
6625 /**
6626  * ipr_qc_issue - Issue a SATA qc to a device
6627  * @qc:	queued command
6628  *
6629  * Return value:
6630  * 	0 if success
6631  **/
6632 static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
6633 {
6634 	struct ata_port *ap = qc->ap;
6635 	struct ipr_sata_port *sata_port = ap->private_data;
6636 	struct ipr_resource_entry *res = sata_port->res;
6637 	struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6638 	struct ipr_cmnd *ipr_cmd;
6639 	struct ipr_ioarcb *ioarcb;
6640 	struct ipr_ioarcb_ata_regs *regs;
6641 
6642 	if (qc->lldd_task == NULL)
6643 		ipr_qc_defer(qc);
6644 
6645 	ipr_cmd = qc->lldd_task;
6646 	if (ipr_cmd == NULL)
6647 		return AC_ERR_SYSTEM;
6648 
6649 	qc->lldd_task = NULL;
6650 	spin_lock(&ipr_cmd->hrrq->_lock);
6651 	if (unlikely(!ipr_cmd->hrrq->allow_cmds ||
6652 			ipr_cmd->hrrq->ioa_is_dead)) {
6653 		list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6654 		spin_unlock(&ipr_cmd->hrrq->_lock);
6655 		return AC_ERR_SYSTEM;
6656 	}
6657 
6658 	ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
6659 	ioarcb = &ipr_cmd->ioarcb;
6660 
6661 	if (ioa_cfg->sis64) {
6662 		regs = &ipr_cmd->i.ata_ioadl.regs;
6663 		ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
6664 	} else
6665 		regs = &ioarcb->u.add_data.u.regs;
6666 
6667 	memset(regs, 0, sizeof(*regs));
6668 	ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs));
6669 
6670 	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
6671 	ipr_cmd->qc = qc;
6672 	ipr_cmd->done = ipr_sata_done;
6673 	ipr_cmd->ioarcb.res_handle = res->res_handle;
6674 	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
6675 	ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
6676 	ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6677 	ipr_cmd->dma_use_sg = qc->n_elem;
6678 
6679 	if (ioa_cfg->sis64)
6680 		ipr_build_ata_ioadl64(ipr_cmd, qc);
6681 	else
6682 		ipr_build_ata_ioadl(ipr_cmd, qc);
6683 
6684 	regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
6685 	ipr_copy_sata_tf(regs, &qc->tf);
6686 	memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
6687 	ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
6688 
6689 	switch (qc->tf.protocol) {
6690 	case ATA_PROT_NODATA:
6691 	case ATA_PROT_PIO:
6692 		break;
6693 
6694 	case ATA_PROT_DMA:
6695 		regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6696 		break;
6697 
6698 	case ATAPI_PROT_PIO:
6699 	case ATAPI_PROT_NODATA:
6700 		regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6701 		break;
6702 
6703 	case ATAPI_PROT_DMA:
6704 		regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6705 		regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6706 		break;
6707 
6708 	default:
6709 		WARN_ON(1);
6710 		spin_unlock(&ipr_cmd->hrrq->_lock);
6711 		return AC_ERR_INVALID;
6712 	}
6713 
6714 	ipr_send_command(ipr_cmd);
6715 	spin_unlock(&ipr_cmd->hrrq->_lock);
6716 
6717 	return 0;
6718 }
6719 
6720 /**
6721  * ipr_qc_fill_rtf - Read result TF
6722  * @qc: ATA queued command
6723  *
6724  * Return value:
6725  * 	true
6726  **/
6727 static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
6728 {
6729 	struct ipr_sata_port *sata_port = qc->ap->private_data;
6730 	struct ipr_ioasa_gata *g = &sata_port->ioasa;
6731 	struct ata_taskfile *tf = &qc->result_tf;
6732 
6733 	tf->feature = g->error;
6734 	tf->nsect = g->nsect;
6735 	tf->lbal = g->lbal;
6736 	tf->lbam = g->lbam;
6737 	tf->lbah = g->lbah;
6738 	tf->device = g->device;
6739 	tf->command = g->status;
6740 	tf->hob_nsect = g->hob_nsect;
6741 	tf->hob_lbal = g->hob_lbal;
6742 	tf->hob_lbam = g->hob_lbam;
6743 	tf->hob_lbah = g->hob_lbah;
6744 
6745 	return true;
6746 }
6747 
6748 static struct ata_port_operations ipr_sata_ops = {
6749 	.phy_reset = ipr_ata_phy_reset,
6750 	.hardreset = ipr_sata_reset,
6751 	.post_internal_cmd = ipr_ata_post_internal,
6752 	.qc_prep = ata_noop_qc_prep,
6753 	.qc_defer = ipr_qc_defer,
6754 	.qc_issue = ipr_qc_issue,
6755 	.qc_fill_rtf = ipr_qc_fill_rtf,
6756 	.port_start = ata_sas_port_start,
6757 	.port_stop = ata_sas_port_stop
6758 };
6759 
6760 static struct ata_port_info sata_port_info = {
6761 	.flags		= ATA_FLAG_SATA | ATA_FLAG_PIO_DMA,
6762 	.pio_mask	= ATA_PIO4_ONLY,
6763 	.mwdma_mask	= ATA_MWDMA2,
6764 	.udma_mask	= ATA_UDMA6,
6765 	.port_ops	= &ipr_sata_ops
6766 };
6767 
6768 #ifdef CONFIG_PPC_PSERIES
6769 static const u16 ipr_blocked_processors[] = {
6770 	PVR_NORTHSTAR,
6771 	PVR_PULSAR,
6772 	PVR_POWER4,
6773 	PVR_ICESTAR,
6774 	PVR_SSTAR,
6775 	PVR_POWER4p,
6776 	PVR_630,
6777 	PVR_630p
6778 };
6779 
6780 /**
6781  * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
6782  * @ioa_cfg:	ioa cfg struct
6783  *
6784  * Adapters that use Gemstone revision < 3.1 do not work reliably on
6785  * certain pSeries hardware. This function determines if the given
6786  * adapter is in one of these confgurations or not.
6787  *
6788  * Return value:
6789  * 	1 if adapter is not supported / 0 if adapter is supported
6790  **/
6791 static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
6792 {
6793 	int i;
6794 
6795 	if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) {
6796 		for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++) {
6797 			if (pvr_version_is(ipr_blocked_processors[i]))
6798 				return 1;
6799 		}
6800 	}
6801 	return 0;
6802 }
6803 #else
6804 #define ipr_invalid_adapter(ioa_cfg) 0
6805 #endif
6806 
6807 /**
6808  * ipr_ioa_bringdown_done - IOA bring down completion.
6809  * @ipr_cmd:	ipr command struct
6810  *
6811  * This function processes the completion of an adapter bring down.
6812  * It wakes any reset sleepers.
6813  *
6814  * Return value:
6815  * 	IPR_RC_JOB_RETURN
6816  **/
6817 static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
6818 {
6819 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6820 	int i;
6821 
6822 	ENTER;
6823 	if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
6824 		ipr_trace;
6825 		spin_unlock_irq(ioa_cfg->host->host_lock);
6826 		scsi_unblock_requests(ioa_cfg->host);
6827 		spin_lock_irq(ioa_cfg->host->host_lock);
6828 	}
6829 
6830 	ioa_cfg->in_reset_reload = 0;
6831 	ioa_cfg->reset_retries = 0;
6832 	for (i = 0; i < ioa_cfg->hrrq_num; i++) {
6833 		spin_lock(&ioa_cfg->hrrq[i]._lock);
6834 		ioa_cfg->hrrq[i].ioa_is_dead = 1;
6835 		spin_unlock(&ioa_cfg->hrrq[i]._lock);
6836 	}
6837 	wmb();
6838 
6839 	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6840 	wake_up_all(&ioa_cfg->reset_wait_q);
6841 	LEAVE;
6842 
6843 	return IPR_RC_JOB_RETURN;
6844 }
6845 
6846 /**
6847  * ipr_ioa_reset_done - IOA reset completion.
6848  * @ipr_cmd:	ipr command struct
6849  *
6850  * This function processes the completion of an adapter reset.
6851  * It schedules any necessary mid-layer add/removes and
6852  * wakes any reset sleepers.
6853  *
6854  * Return value:
6855  * 	IPR_RC_JOB_RETURN
6856  **/
6857 static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
6858 {
6859 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6860 	struct ipr_resource_entry *res;
6861 	struct ipr_hostrcb *hostrcb, *temp;
6862 	int i = 0, j;
6863 
6864 	ENTER;
6865 	ioa_cfg->in_reset_reload = 0;
6866 	for (j = 0; j < ioa_cfg->hrrq_num; j++) {
6867 		spin_lock(&ioa_cfg->hrrq[j]._lock);
6868 		ioa_cfg->hrrq[j].allow_cmds = 1;
6869 		spin_unlock(&ioa_cfg->hrrq[j]._lock);
6870 	}
6871 	wmb();
6872 	ioa_cfg->reset_cmd = NULL;
6873 	ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
6874 
6875 	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
6876 		if (ioa_cfg->allow_ml_add_del && (res->add_to_ml || res->del_from_ml)) {
6877 			ipr_trace;
6878 			break;
6879 		}
6880 	}
6881 	schedule_work(&ioa_cfg->work_q);
6882 
6883 	list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) {
6884 		list_del(&hostrcb->queue);
6885 		if (i++ < IPR_NUM_LOG_HCAMS)
6886 			ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
6887 		else
6888 			ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
6889 	}
6890 
6891 	scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
6892 	dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
6893 
6894 	ioa_cfg->reset_retries = 0;
6895 	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6896 	wake_up_all(&ioa_cfg->reset_wait_q);
6897 
6898 	spin_unlock(ioa_cfg->host->host_lock);
6899 	scsi_unblock_requests(ioa_cfg->host);
6900 	spin_lock(ioa_cfg->host->host_lock);
6901 
6902 	if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
6903 		scsi_block_requests(ioa_cfg->host);
6904 
6905 	LEAVE;
6906 	return IPR_RC_JOB_RETURN;
6907 }
6908 
6909 /**
6910  * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
6911  * @supported_dev:	supported device struct
6912  * @vpids:			vendor product id struct
6913  *
6914  * Return value:
6915  * 	none
6916  **/
6917 static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
6918 				 struct ipr_std_inq_vpids *vpids)
6919 {
6920 	memset(supported_dev, 0, sizeof(struct ipr_supported_device));
6921 	memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
6922 	supported_dev->num_records = 1;
6923 	supported_dev->data_length =
6924 		cpu_to_be16(sizeof(struct ipr_supported_device));
6925 	supported_dev->reserved = 0;
6926 }
6927 
6928 /**
6929  * ipr_set_supported_devs - Send Set Supported Devices for a device
6930  * @ipr_cmd:	ipr command struct
6931  *
6932  * This function sends a Set Supported Devices to the adapter
6933  *
6934  * Return value:
6935  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6936  **/
6937 static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
6938 {
6939 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6940 	struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
6941 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6942 	struct ipr_resource_entry *res = ipr_cmd->u.res;
6943 
6944 	ipr_cmd->job_step = ipr_ioa_reset_done;
6945 
6946 	list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
6947 		if (!ipr_is_scsi_disk(res))
6948 			continue;
6949 
6950 		ipr_cmd->u.res = res;
6951 		ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids);
6952 
6953 		ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6954 		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6955 		ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6956 
6957 		ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
6958 		ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES;
6959 		ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
6960 		ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
6961 
6962 		ipr_init_ioadl(ipr_cmd,
6963 			       ioa_cfg->vpd_cbs_dma +
6964 				 offsetof(struct ipr_misc_cbs, supp_dev),
6965 			       sizeof(struct ipr_supported_device),
6966 			       IPR_IOADL_FLAGS_WRITE_LAST);
6967 
6968 		ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
6969 			   IPR_SET_SUP_DEVICE_TIMEOUT);
6970 
6971 		if (!ioa_cfg->sis64)
6972 			ipr_cmd->job_step = ipr_set_supported_devs;
6973 		LEAVE;
6974 		return IPR_RC_JOB_RETURN;
6975 	}
6976 
6977 	LEAVE;
6978 	return IPR_RC_JOB_CONTINUE;
6979 }
6980 
6981 /**
6982  * ipr_get_mode_page - Locate specified mode page
6983  * @mode_pages:	mode page buffer
6984  * @page_code:	page code to find
6985  * @len:		minimum required length for mode page
6986  *
6987  * Return value:
6988  * 	pointer to mode page / NULL on failure
6989  **/
6990 static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
6991 			       u32 page_code, u32 len)
6992 {
6993 	struct ipr_mode_page_hdr *mode_hdr;
6994 	u32 page_length;
6995 	u32 length;
6996 
6997 	if (!mode_pages || (mode_pages->hdr.length == 0))
6998 		return NULL;
6999 
7000 	length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
7001 	mode_hdr = (struct ipr_mode_page_hdr *)
7002 		(mode_pages->data + mode_pages->hdr.block_desc_len);
7003 
7004 	while (length) {
7005 		if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
7006 			if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
7007 				return mode_hdr;
7008 			break;
7009 		} else {
7010 			page_length = (sizeof(struct ipr_mode_page_hdr) +
7011 				       mode_hdr->page_length);
7012 			length -= page_length;
7013 			mode_hdr = (struct ipr_mode_page_hdr *)
7014 				((unsigned long)mode_hdr + page_length);
7015 		}
7016 	}
7017 	return NULL;
7018 }
7019 
7020 /**
7021  * ipr_check_term_power - Check for term power errors
7022  * @ioa_cfg:	ioa config struct
7023  * @mode_pages:	IOAFP mode pages buffer
7024  *
7025  * Check the IOAFP's mode page 28 for term power errors
7026  *
7027  * Return value:
7028  * 	nothing
7029  **/
7030 static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
7031 				 struct ipr_mode_pages *mode_pages)
7032 {
7033 	int i;
7034 	int entry_length;
7035 	struct ipr_dev_bus_entry *bus;
7036 	struct ipr_mode_page28 *mode_page;
7037 
7038 	mode_page = ipr_get_mode_page(mode_pages, 0x28,
7039 				      sizeof(struct ipr_mode_page28));
7040 
7041 	entry_length = mode_page->entry_length;
7042 
7043 	bus = mode_page->bus;
7044 
7045 	for (i = 0; i < mode_page->num_entries; i++) {
7046 		if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
7047 			dev_err(&ioa_cfg->pdev->dev,
7048 				"Term power is absent on scsi bus %d\n",
7049 				bus->res_addr.bus);
7050 		}
7051 
7052 		bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
7053 	}
7054 }
7055 
7056 /**
7057  * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
7058  * @ioa_cfg:	ioa config struct
7059  *
7060  * Looks through the config table checking for SES devices. If
7061  * the SES device is in the SES table indicating a maximum SCSI
7062  * bus speed, the speed is limited for the bus.
7063  *
7064  * Return value:
7065  * 	none
7066  **/
7067 static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
7068 {
7069 	u32 max_xfer_rate;
7070 	int i;
7071 
7072 	for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
7073 		max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
7074 						       ioa_cfg->bus_attr[i].bus_width);
7075 
7076 		if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
7077 			ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
7078 	}
7079 }
7080 
7081 /**
7082  * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
7083  * @ioa_cfg:	ioa config struct
7084  * @mode_pages:	mode page 28 buffer
7085  *
7086  * Updates mode page 28 based on driver configuration
7087  *
7088  * Return value:
7089  * 	none
7090  **/
7091 static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
7092 					  struct ipr_mode_pages *mode_pages)
7093 {
7094 	int i, entry_length;
7095 	struct ipr_dev_bus_entry *bus;
7096 	struct ipr_bus_attributes *bus_attr;
7097 	struct ipr_mode_page28 *mode_page;
7098 
7099 	mode_page = ipr_get_mode_page(mode_pages, 0x28,
7100 				      sizeof(struct ipr_mode_page28));
7101 
7102 	entry_length = mode_page->entry_length;
7103 
7104 	/* Loop for each device bus entry */
7105 	for (i = 0, bus = mode_page->bus;
7106 	     i < mode_page->num_entries;
7107 	     i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
7108 		if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
7109 			dev_err(&ioa_cfg->pdev->dev,
7110 				"Invalid resource address reported: 0x%08X\n",
7111 				IPR_GET_PHYS_LOC(bus->res_addr));
7112 			continue;
7113 		}
7114 
7115 		bus_attr = &ioa_cfg->bus_attr[i];
7116 		bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
7117 		bus->bus_width = bus_attr->bus_width;
7118 		bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
7119 		bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
7120 		if (bus_attr->qas_enabled)
7121 			bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
7122 		else
7123 			bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
7124 	}
7125 }
7126 
7127 /**
7128  * ipr_build_mode_select - Build a mode select command
7129  * @ipr_cmd:	ipr command struct
7130  * @res_handle:	resource handle to send command to
7131  * @parm:		Byte 2 of Mode Sense command
7132  * @dma_addr:	DMA buffer address
7133  * @xfer_len:	data transfer length
7134  *
7135  * Return value:
7136  * 	none
7137  **/
7138 static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
7139 				  __be32 res_handle, u8 parm,
7140 				  dma_addr_t dma_addr, u8 xfer_len)
7141 {
7142 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7143 
7144 	ioarcb->res_handle = res_handle;
7145 	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7146 	ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
7147 	ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
7148 	ioarcb->cmd_pkt.cdb[1] = parm;
7149 	ioarcb->cmd_pkt.cdb[4] = xfer_len;
7150 
7151 	ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_WRITE_LAST);
7152 }
7153 
7154 /**
7155  * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
7156  * @ipr_cmd:	ipr command struct
7157  *
7158  * This function sets up the SCSI bus attributes and sends
7159  * a Mode Select for Page 28 to activate them.
7160  *
7161  * Return value:
7162  * 	IPR_RC_JOB_RETURN
7163  **/
7164 static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
7165 {
7166 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7167 	struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7168 	int length;
7169 
7170 	ENTER;
7171 	ipr_scsi_bus_speed_limit(ioa_cfg);
7172 	ipr_check_term_power(ioa_cfg, mode_pages);
7173 	ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
7174 	length = mode_pages->hdr.length + 1;
7175 	mode_pages->hdr.length = 0;
7176 
7177 	ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7178 			      ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7179 			      length);
7180 
7181 	ipr_cmd->job_step = ipr_set_supported_devs;
7182 	ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7183 				    struct ipr_resource_entry, queue);
7184 	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7185 
7186 	LEAVE;
7187 	return IPR_RC_JOB_RETURN;
7188 }
7189 
7190 /**
7191  * ipr_build_mode_sense - Builds a mode sense command
7192  * @ipr_cmd:	ipr command struct
7193  * @res:		resource entry struct
7194  * @parm:		Byte 2 of mode sense command
7195  * @dma_addr:	DMA address of mode sense buffer
7196  * @xfer_len:	Size of DMA buffer
7197  *
7198  * Return value:
7199  * 	none
7200  **/
7201 static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
7202 				 __be32 res_handle,
7203 				 u8 parm, dma_addr_t dma_addr, u8 xfer_len)
7204 {
7205 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7206 
7207 	ioarcb->res_handle = res_handle;
7208 	ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
7209 	ioarcb->cmd_pkt.cdb[2] = parm;
7210 	ioarcb->cmd_pkt.cdb[4] = xfer_len;
7211 	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7212 
7213 	ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
7214 }
7215 
7216 /**
7217  * ipr_reset_cmd_failed - Handle failure of IOA reset command
7218  * @ipr_cmd:	ipr command struct
7219  *
7220  * This function handles the failure of an IOA bringup command.
7221  *
7222  * Return value:
7223  * 	IPR_RC_JOB_RETURN
7224  **/
7225 static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
7226 {
7227 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7228 	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7229 
7230 	dev_err(&ioa_cfg->pdev->dev,
7231 		"0x%02X failed with IOASC: 0x%08X\n",
7232 		ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
7233 
7234 	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7235 	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7236 	return IPR_RC_JOB_RETURN;
7237 }
7238 
7239 /**
7240  * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
7241  * @ipr_cmd:	ipr command struct
7242  *
7243  * This function handles the failure of a Mode Sense to the IOAFP.
7244  * Some adapters do not handle all mode pages.
7245  *
7246  * Return value:
7247  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7248  **/
7249 static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
7250 {
7251 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7252 	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7253 
7254 	if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
7255 		ipr_cmd->job_step = ipr_set_supported_devs;
7256 		ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7257 					    struct ipr_resource_entry, queue);
7258 		return IPR_RC_JOB_CONTINUE;
7259 	}
7260 
7261 	return ipr_reset_cmd_failed(ipr_cmd);
7262 }
7263 
7264 /**
7265  * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
7266  * @ipr_cmd:	ipr command struct
7267  *
7268  * This function send a Page 28 mode sense to the IOA to
7269  * retrieve SCSI bus attributes.
7270  *
7271  * Return value:
7272  * 	IPR_RC_JOB_RETURN
7273  **/
7274 static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
7275 {
7276 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7277 
7278 	ENTER;
7279 	ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7280 			     0x28, ioa_cfg->vpd_cbs_dma +
7281 			     offsetof(struct ipr_misc_cbs, mode_pages),
7282 			     sizeof(struct ipr_mode_pages));
7283 
7284 	ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
7285 	ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
7286 
7287 	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7288 
7289 	LEAVE;
7290 	return IPR_RC_JOB_RETURN;
7291 }
7292 
7293 /**
7294  * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
7295  * @ipr_cmd:	ipr command struct
7296  *
7297  * This function enables dual IOA RAID support if possible.
7298  *
7299  * Return value:
7300  * 	IPR_RC_JOB_RETURN
7301  **/
7302 static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd)
7303 {
7304 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7305 	struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7306 	struct ipr_mode_page24 *mode_page;
7307 	int length;
7308 
7309 	ENTER;
7310 	mode_page = ipr_get_mode_page(mode_pages, 0x24,
7311 				      sizeof(struct ipr_mode_page24));
7312 
7313 	if (mode_page)
7314 		mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF;
7315 
7316 	length = mode_pages->hdr.length + 1;
7317 	mode_pages->hdr.length = 0;
7318 
7319 	ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7320 			      ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7321 			      length);
7322 
7323 	ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7324 	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7325 
7326 	LEAVE;
7327 	return IPR_RC_JOB_RETURN;
7328 }
7329 
7330 /**
7331  * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
7332  * @ipr_cmd:	ipr command struct
7333  *
7334  * This function handles the failure of a Mode Sense to the IOAFP.
7335  * Some adapters do not handle all mode pages.
7336  *
7337  * Return value:
7338  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7339  **/
7340 static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd)
7341 {
7342 	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7343 
7344 	if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
7345 		ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7346 		return IPR_RC_JOB_CONTINUE;
7347 	}
7348 
7349 	return ipr_reset_cmd_failed(ipr_cmd);
7350 }
7351 
7352 /**
7353  * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
7354  * @ipr_cmd:	ipr command struct
7355  *
7356  * This function send a mode sense to the IOA to retrieve
7357  * the IOA Advanced Function Control mode page.
7358  *
7359  * Return value:
7360  * 	IPR_RC_JOB_RETURN
7361  **/
7362 static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd *ipr_cmd)
7363 {
7364 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7365 
7366 	ENTER;
7367 	ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7368 			     0x24, ioa_cfg->vpd_cbs_dma +
7369 			     offsetof(struct ipr_misc_cbs, mode_pages),
7370 			     sizeof(struct ipr_mode_pages));
7371 
7372 	ipr_cmd->job_step = ipr_ioafp_mode_select_page24;
7373 	ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed;
7374 
7375 	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7376 
7377 	LEAVE;
7378 	return IPR_RC_JOB_RETURN;
7379 }
7380 
7381 /**
7382  * ipr_init_res_table - Initialize the resource table
7383  * @ipr_cmd:	ipr command struct
7384  *
7385  * This function looks through the existing resource table, comparing
7386  * it with the config table. This function will take care of old/new
7387  * devices and schedule adding/removing them from the mid-layer
7388  * as appropriate.
7389  *
7390  * Return value:
7391  * 	IPR_RC_JOB_CONTINUE
7392  **/
7393 static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
7394 {
7395 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7396 	struct ipr_resource_entry *res, *temp;
7397 	struct ipr_config_table_entry_wrapper cfgtew;
7398 	int entries, found, flag, i;
7399 	LIST_HEAD(old_res);
7400 
7401 	ENTER;
7402 	if (ioa_cfg->sis64)
7403 		flag = ioa_cfg->u.cfg_table64->hdr64.flags;
7404 	else
7405 		flag = ioa_cfg->u.cfg_table->hdr.flags;
7406 
7407 	if (flag & IPR_UCODE_DOWNLOAD_REQ)
7408 		dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
7409 
7410 	list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
7411 		list_move_tail(&res->queue, &old_res);
7412 
7413 	if (ioa_cfg->sis64)
7414 		entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries);
7415 	else
7416 		entries = ioa_cfg->u.cfg_table->hdr.num_entries;
7417 
7418 	for (i = 0; i < entries; i++) {
7419 		if (ioa_cfg->sis64)
7420 			cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i];
7421 		else
7422 			cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i];
7423 		found = 0;
7424 
7425 		list_for_each_entry_safe(res, temp, &old_res, queue) {
7426 			if (ipr_is_same_device(res, &cfgtew)) {
7427 				list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7428 				found = 1;
7429 				break;
7430 			}
7431 		}
7432 
7433 		if (!found) {
7434 			if (list_empty(&ioa_cfg->free_res_q)) {
7435 				dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
7436 				break;
7437 			}
7438 
7439 			found = 1;
7440 			res = list_entry(ioa_cfg->free_res_q.next,
7441 					 struct ipr_resource_entry, queue);
7442 			list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7443 			ipr_init_res_entry(res, &cfgtew);
7444 			res->add_to_ml = 1;
7445 		} else if (res->sdev && (ipr_is_vset_device(res) || ipr_is_scsi_disk(res)))
7446 			res->sdev->allow_restart = 1;
7447 
7448 		if (found)
7449 			ipr_update_res_entry(res, &cfgtew);
7450 	}
7451 
7452 	list_for_each_entry_safe(res, temp, &old_res, queue) {
7453 		if (res->sdev) {
7454 			res->del_from_ml = 1;
7455 			res->res_handle = IPR_INVALID_RES_HANDLE;
7456 			list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7457 		}
7458 	}
7459 
7460 	list_for_each_entry_safe(res, temp, &old_res, queue) {
7461 		ipr_clear_res_target(res);
7462 		list_move_tail(&res->queue, &ioa_cfg->free_res_q);
7463 	}
7464 
7465 	if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
7466 		ipr_cmd->job_step = ipr_ioafp_mode_sense_page24;
7467 	else
7468 		ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7469 
7470 	LEAVE;
7471 	return IPR_RC_JOB_CONTINUE;
7472 }
7473 
7474 /**
7475  * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
7476  * @ipr_cmd:	ipr command struct
7477  *
7478  * This function sends a Query IOA Configuration command
7479  * to the adapter to retrieve the IOA configuration table.
7480  *
7481  * Return value:
7482  * 	IPR_RC_JOB_RETURN
7483  **/
7484 static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
7485 {
7486 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7487 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7488 	struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
7489 	struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
7490 
7491 	ENTER;
7492 	if (cap->cap & IPR_CAP_DUAL_IOA_RAID)
7493 		ioa_cfg->dual_raid = 1;
7494 	dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
7495 		 ucode_vpd->major_release, ucode_vpd->card_type,
7496 		 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
7497 	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7498 	ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7499 
7500 	ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
7501 	ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff;
7502 	ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff;
7503 	ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff;
7504 
7505 	ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size,
7506 		       IPR_IOADL_FLAGS_READ_LAST);
7507 
7508 	ipr_cmd->job_step = ipr_init_res_table;
7509 
7510 	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7511 
7512 	LEAVE;
7513 	return IPR_RC_JOB_RETURN;
7514 }
7515 
7516 /**
7517  * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
7518  * @ipr_cmd:	ipr command struct
7519  *
7520  * This utility function sends an inquiry to the adapter.
7521  *
7522  * Return value:
7523  * 	none
7524  **/
7525 static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
7526 			      dma_addr_t dma_addr, u8 xfer_len)
7527 {
7528 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7529 
7530 	ENTER;
7531 	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7532 	ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7533 
7534 	ioarcb->cmd_pkt.cdb[0] = INQUIRY;
7535 	ioarcb->cmd_pkt.cdb[1] = flags;
7536 	ioarcb->cmd_pkt.cdb[2] = page;
7537 	ioarcb->cmd_pkt.cdb[4] = xfer_len;
7538 
7539 	ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
7540 
7541 	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7542 	LEAVE;
7543 }
7544 
7545 /**
7546  * ipr_inquiry_page_supported - Is the given inquiry page supported
7547  * @page0:		inquiry page 0 buffer
7548  * @page:		page code.
7549  *
7550  * This function determines if the specified inquiry page is supported.
7551  *
7552  * Return value:
7553  *	1 if page is supported / 0 if not
7554  **/
7555 static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
7556 {
7557 	int i;
7558 
7559 	for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
7560 		if (page0->page[i] == page)
7561 			return 1;
7562 
7563 	return 0;
7564 }
7565 
7566 /**
7567  * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
7568  * @ipr_cmd:	ipr command struct
7569  *
7570  * This function sends a Page 0xD0 inquiry to the adapter
7571  * to retrieve adapter capabilities.
7572  *
7573  * Return value:
7574  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7575  **/
7576 static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd)
7577 {
7578 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7579 	struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
7580 	struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
7581 
7582 	ENTER;
7583 	ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
7584 	memset(cap, 0, sizeof(*cap));
7585 
7586 	if (ipr_inquiry_page_supported(page0, 0xD0)) {
7587 		ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0,
7588 				  ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap),
7589 				  sizeof(struct ipr_inquiry_cap));
7590 		return IPR_RC_JOB_RETURN;
7591 	}
7592 
7593 	LEAVE;
7594 	return IPR_RC_JOB_CONTINUE;
7595 }
7596 
7597 /**
7598  * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
7599  * @ipr_cmd:	ipr command struct
7600  *
7601  * This function sends a Page 3 inquiry to the adapter
7602  * to retrieve software VPD information.
7603  *
7604  * Return value:
7605  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7606  **/
7607 static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
7608 {
7609 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7610 
7611 	ENTER;
7612 
7613 	ipr_cmd->job_step = ipr_ioafp_cap_inquiry;
7614 
7615 	ipr_ioafp_inquiry(ipr_cmd, 1, 3,
7616 			  ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
7617 			  sizeof(struct ipr_inquiry_page3));
7618 
7619 	LEAVE;
7620 	return IPR_RC_JOB_RETURN;
7621 }
7622 
7623 /**
7624  * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
7625  * @ipr_cmd:	ipr command struct
7626  *
7627  * This function sends a Page 0 inquiry to the adapter
7628  * to retrieve supported inquiry pages.
7629  *
7630  * Return value:
7631  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7632  **/
7633 static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
7634 {
7635 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7636 	char type[5];
7637 
7638 	ENTER;
7639 
7640 	/* Grab the type out of the VPD and store it away */
7641 	memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
7642 	type[4] = '\0';
7643 	ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
7644 
7645 	ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
7646 
7647 	ipr_ioafp_inquiry(ipr_cmd, 1, 0,
7648 			  ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
7649 			  sizeof(struct ipr_inquiry_page0));
7650 
7651 	LEAVE;
7652 	return IPR_RC_JOB_RETURN;
7653 }
7654 
7655 /**
7656  * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
7657  * @ipr_cmd:	ipr command struct
7658  *
7659  * This function sends a standard inquiry to the adapter.
7660  *
7661  * Return value:
7662  * 	IPR_RC_JOB_RETURN
7663  **/
7664 static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
7665 {
7666 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7667 
7668 	ENTER;
7669 	ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
7670 
7671 	ipr_ioafp_inquiry(ipr_cmd, 0, 0,
7672 			  ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
7673 			  sizeof(struct ipr_ioa_vpd));
7674 
7675 	LEAVE;
7676 	return IPR_RC_JOB_RETURN;
7677 }
7678 
7679 /**
7680  * ipr_ioafp_identify_hrrq - Send Identify Host RRQ.
7681  * @ipr_cmd:	ipr command struct
7682  *
7683  * This function send an Identify Host Request Response Queue
7684  * command to establish the HRRQ with the adapter.
7685  *
7686  * Return value:
7687  * 	IPR_RC_JOB_RETURN
7688  **/
7689 static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd)
7690 {
7691 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7692 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7693 	struct ipr_hrr_queue *hrrq;
7694 
7695 	ENTER;
7696 	ipr_cmd->job_step = ipr_ioafp_std_inquiry;
7697 	dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
7698 
7699 	if (ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num) {
7700 		hrrq = &ioa_cfg->hrrq[ioa_cfg->identify_hrrq_index];
7701 
7702 		ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
7703 		ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7704 
7705 		ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7706 		if (ioa_cfg->sis64)
7707 			ioarcb->cmd_pkt.cdb[1] = 0x1;
7708 
7709 		if (ioa_cfg->nvectors == 1)
7710 			ioarcb->cmd_pkt.cdb[1] &= ~IPR_ID_HRRQ_SELE_ENABLE;
7711 		else
7712 			ioarcb->cmd_pkt.cdb[1] |= IPR_ID_HRRQ_SELE_ENABLE;
7713 
7714 		ioarcb->cmd_pkt.cdb[2] =
7715 			((u64) hrrq->host_rrq_dma >> 24) & 0xff;
7716 		ioarcb->cmd_pkt.cdb[3] =
7717 			((u64) hrrq->host_rrq_dma >> 16) & 0xff;
7718 		ioarcb->cmd_pkt.cdb[4] =
7719 			((u64) hrrq->host_rrq_dma >> 8) & 0xff;
7720 		ioarcb->cmd_pkt.cdb[5] =
7721 			((u64) hrrq->host_rrq_dma) & 0xff;
7722 		ioarcb->cmd_pkt.cdb[7] =
7723 			((sizeof(u32) * hrrq->size) >> 8) & 0xff;
7724 		ioarcb->cmd_pkt.cdb[8] =
7725 			(sizeof(u32) * hrrq->size) & 0xff;
7726 
7727 		if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
7728 			ioarcb->cmd_pkt.cdb[9] =
7729 					ioa_cfg->identify_hrrq_index;
7730 
7731 		if (ioa_cfg->sis64) {
7732 			ioarcb->cmd_pkt.cdb[10] =
7733 				((u64) hrrq->host_rrq_dma >> 56) & 0xff;
7734 			ioarcb->cmd_pkt.cdb[11] =
7735 				((u64) hrrq->host_rrq_dma >> 48) & 0xff;
7736 			ioarcb->cmd_pkt.cdb[12] =
7737 				((u64) hrrq->host_rrq_dma >> 40) & 0xff;
7738 			ioarcb->cmd_pkt.cdb[13] =
7739 				((u64) hrrq->host_rrq_dma >> 32) & 0xff;
7740 		}
7741 
7742 		if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
7743 			ioarcb->cmd_pkt.cdb[14] =
7744 					ioa_cfg->identify_hrrq_index;
7745 
7746 		ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7747 			   IPR_INTERNAL_TIMEOUT);
7748 
7749 		if (++ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num)
7750 			ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7751 
7752 		LEAVE;
7753 		return IPR_RC_JOB_RETURN;
7754 	}
7755 
7756 	LEAVE;
7757 	return IPR_RC_JOB_CONTINUE;
7758 }
7759 
7760 /**
7761  * ipr_reset_timer_done - Adapter reset timer function
7762  * @ipr_cmd:	ipr command struct
7763  *
7764  * Description: This function is used in adapter reset processing
7765  * for timing events. If the reset_cmd pointer in the IOA
7766  * config struct is not this adapter's we are doing nested
7767  * resets and fail_all_ops will take care of freeing the
7768  * command block.
7769  *
7770  * Return value:
7771  * 	none
7772  **/
7773 static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
7774 {
7775 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7776 	unsigned long lock_flags = 0;
7777 
7778 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
7779 
7780 	if (ioa_cfg->reset_cmd == ipr_cmd) {
7781 		list_del(&ipr_cmd->queue);
7782 		ipr_cmd->done(ipr_cmd);
7783 	}
7784 
7785 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
7786 }
7787 
7788 /**
7789  * ipr_reset_start_timer - Start a timer for adapter reset job
7790  * @ipr_cmd:	ipr command struct
7791  * @timeout:	timeout value
7792  *
7793  * Description: This function is used in adapter reset processing
7794  * for timing events. If the reset_cmd pointer in the IOA
7795  * config struct is not this adapter's we are doing nested
7796  * resets and fail_all_ops will take care of freeing the
7797  * command block.
7798  *
7799  * Return value:
7800  * 	none
7801  **/
7802 static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
7803 				  unsigned long timeout)
7804 {
7805 
7806 	ENTER;
7807 	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
7808 	ipr_cmd->done = ipr_reset_ioa_job;
7809 
7810 	ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7811 	ipr_cmd->timer.expires = jiffies + timeout;
7812 	ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
7813 	add_timer(&ipr_cmd->timer);
7814 }
7815 
7816 /**
7817  * ipr_init_ioa_mem - Initialize ioa_cfg control block
7818  * @ioa_cfg:	ioa cfg struct
7819  *
7820  * Return value:
7821  * 	nothing
7822  **/
7823 static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
7824 {
7825 	struct ipr_hrr_queue *hrrq;
7826 
7827 	for_each_hrrq(hrrq, ioa_cfg) {
7828 		spin_lock(&hrrq->_lock);
7829 		memset(hrrq->host_rrq, 0, sizeof(u32) * hrrq->size);
7830 
7831 		/* Initialize Host RRQ pointers */
7832 		hrrq->hrrq_start = hrrq->host_rrq;
7833 		hrrq->hrrq_end = &hrrq->host_rrq[hrrq->size - 1];
7834 		hrrq->hrrq_curr = hrrq->hrrq_start;
7835 		hrrq->toggle_bit = 1;
7836 		spin_unlock(&hrrq->_lock);
7837 	}
7838 	wmb();
7839 
7840 	ioa_cfg->identify_hrrq_index = 0;
7841 	if (ioa_cfg->hrrq_num == 1)
7842 		atomic_set(&ioa_cfg->hrrq_index, 0);
7843 	else
7844 		atomic_set(&ioa_cfg->hrrq_index, 1);
7845 
7846 	/* Zero out config table */
7847 	memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
7848 }
7849 
7850 /**
7851  * ipr_reset_next_stage - Process IPL stage change based on feedback register.
7852  * @ipr_cmd:	ipr command struct
7853  *
7854  * Return value:
7855  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7856  **/
7857 static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd)
7858 {
7859 	unsigned long stage, stage_time;
7860 	u32 feedback;
7861 	volatile u32 int_reg;
7862 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7863 	u64 maskval = 0;
7864 
7865 	feedback = readl(ioa_cfg->regs.init_feedback_reg);
7866 	stage = feedback & IPR_IPL_INIT_STAGE_MASK;
7867 	stage_time = feedback & IPR_IPL_INIT_STAGE_TIME_MASK;
7868 
7869 	ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time);
7870 
7871 	/* sanity check the stage_time value */
7872 	if (stage_time == 0)
7873 		stage_time = IPR_IPL_INIT_DEFAULT_STAGE_TIME;
7874 	else if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME)
7875 		stage_time = IPR_IPL_INIT_MIN_STAGE_TIME;
7876 	else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT)
7877 		stage_time = IPR_LONG_OPERATIONAL_TIMEOUT;
7878 
7879 	if (stage == IPR_IPL_INIT_STAGE_UNKNOWN) {
7880 		writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg);
7881 		int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7882 		stage_time = ioa_cfg->transop_timeout;
7883 		ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7884 	} else if (stage == IPR_IPL_INIT_STAGE_TRANSOP) {
7885 		int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
7886 		if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
7887 			ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7888 			maskval = IPR_PCII_IPL_STAGE_CHANGE;
7889 			maskval = (maskval << 32) | IPR_PCII_IOA_TRANS_TO_OPER;
7890 			writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg);
7891 			int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7892 			return IPR_RC_JOB_CONTINUE;
7893 		}
7894 	}
7895 
7896 	ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7897 	ipr_cmd->timer.expires = jiffies + stage_time * HZ;
7898 	ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
7899 	ipr_cmd->done = ipr_reset_ioa_job;
7900 	add_timer(&ipr_cmd->timer);
7901 
7902 	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
7903 
7904 	return IPR_RC_JOB_RETURN;
7905 }
7906 
7907 /**
7908  * ipr_reset_enable_ioa - Enable the IOA following a reset.
7909  * @ipr_cmd:	ipr command struct
7910  *
7911  * This function reinitializes some control blocks and
7912  * enables destructive diagnostics on the adapter.
7913  *
7914  * Return value:
7915  * 	IPR_RC_JOB_RETURN
7916  **/
7917 static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
7918 {
7919 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7920 	volatile u32 int_reg;
7921 	volatile u64 maskval;
7922 	int i;
7923 
7924 	ENTER;
7925 	ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7926 	ipr_init_ioa_mem(ioa_cfg);
7927 
7928 	for (i = 0; i < ioa_cfg->hrrq_num; i++) {
7929 		spin_lock(&ioa_cfg->hrrq[i]._lock);
7930 		ioa_cfg->hrrq[i].allow_interrupts = 1;
7931 		spin_unlock(&ioa_cfg->hrrq[i]._lock);
7932 	}
7933 	wmb();
7934 	if (ioa_cfg->sis64) {
7935 		/* Set the adapter to the correct endian mode. */
7936 		writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
7937 		int_reg = readl(ioa_cfg->regs.endian_swap_reg);
7938 	}
7939 
7940 	int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
7941 
7942 	if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
7943 		writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
7944 		       ioa_cfg->regs.clr_interrupt_mask_reg32);
7945 		int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7946 		return IPR_RC_JOB_CONTINUE;
7947 	}
7948 
7949 	/* Enable destructive diagnostics on IOA */
7950 	writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32);
7951 
7952 	if (ioa_cfg->sis64) {
7953 		maskval = IPR_PCII_IPL_STAGE_CHANGE;
7954 		maskval = (maskval << 32) | IPR_PCII_OPER_INTERRUPTS;
7955 		writeq(maskval, ioa_cfg->regs.clr_interrupt_mask_reg);
7956 	} else
7957 		writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32);
7958 
7959 	int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7960 
7961 	dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
7962 
7963 	if (ioa_cfg->sis64) {
7964 		ipr_cmd->job_step = ipr_reset_next_stage;
7965 		return IPR_RC_JOB_CONTINUE;
7966 	}
7967 
7968 	ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7969 	ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
7970 	ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
7971 	ipr_cmd->done = ipr_reset_ioa_job;
7972 	add_timer(&ipr_cmd->timer);
7973 	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
7974 
7975 	LEAVE;
7976 	return IPR_RC_JOB_RETURN;
7977 }
7978 
7979 /**
7980  * ipr_reset_wait_for_dump - Wait for a dump to timeout.
7981  * @ipr_cmd:	ipr command struct
7982  *
7983  * This function is invoked when an adapter dump has run out
7984  * of processing time.
7985  *
7986  * Return value:
7987  * 	IPR_RC_JOB_CONTINUE
7988  **/
7989 static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
7990 {
7991 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7992 
7993 	if (ioa_cfg->sdt_state == GET_DUMP)
7994 		ioa_cfg->sdt_state = WAIT_FOR_DUMP;
7995 	else if (ioa_cfg->sdt_state == READ_DUMP)
7996 		ioa_cfg->sdt_state = ABORT_DUMP;
7997 
7998 	ioa_cfg->dump_timeout = 1;
7999 	ipr_cmd->job_step = ipr_reset_alert;
8000 
8001 	return IPR_RC_JOB_CONTINUE;
8002 }
8003 
8004 /**
8005  * ipr_unit_check_no_data - Log a unit check/no data error log
8006  * @ioa_cfg:		ioa config struct
8007  *
8008  * Logs an error indicating the adapter unit checked, but for some
8009  * reason, we were unable to fetch the unit check buffer.
8010  *
8011  * Return value:
8012  * 	nothing
8013  **/
8014 static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
8015 {
8016 	ioa_cfg->errors_logged++;
8017 	dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
8018 }
8019 
8020 /**
8021  * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
8022  * @ioa_cfg:		ioa config struct
8023  *
8024  * Fetches the unit check buffer from the adapter by clocking the data
8025  * through the mailbox register.
8026  *
8027  * Return value:
8028  * 	nothing
8029  **/
8030 static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
8031 {
8032 	unsigned long mailbox;
8033 	struct ipr_hostrcb *hostrcb;
8034 	struct ipr_uc_sdt sdt;
8035 	int rc, length;
8036 	u32 ioasc;
8037 
8038 	mailbox = readl(ioa_cfg->ioa_mailbox);
8039 
8040 	if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) {
8041 		ipr_unit_check_no_data(ioa_cfg);
8042 		return;
8043 	}
8044 
8045 	memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
8046 	rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
8047 					(sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
8048 
8049 	if (rc || !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY) ||
8050 	    ((be32_to_cpu(sdt.hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
8051 	    (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
8052 		ipr_unit_check_no_data(ioa_cfg);
8053 		return;
8054 	}
8055 
8056 	/* Find length of the first sdt entry (UC buffer) */
8057 	if (be32_to_cpu(sdt.hdr.state) == IPR_FMT3_SDT_READY_TO_USE)
8058 		length = be32_to_cpu(sdt.entry[0].end_token);
8059 	else
8060 		length = (be32_to_cpu(sdt.entry[0].end_token) -
8061 			  be32_to_cpu(sdt.entry[0].start_token)) &
8062 			  IPR_FMT2_MBX_ADDR_MASK;
8063 
8064 	hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
8065 			     struct ipr_hostrcb, queue);
8066 	list_del(&hostrcb->queue);
8067 	memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
8068 
8069 	rc = ipr_get_ldump_data_section(ioa_cfg,
8070 					be32_to_cpu(sdt.entry[0].start_token),
8071 					(__be32 *)&hostrcb->hcam,
8072 					min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
8073 
8074 	if (!rc) {
8075 		ipr_handle_log_data(ioa_cfg, hostrcb);
8076 		ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
8077 		if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED &&
8078 		    ioa_cfg->sdt_state == GET_DUMP)
8079 			ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8080 	} else
8081 		ipr_unit_check_no_data(ioa_cfg);
8082 
8083 	list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
8084 }
8085 
8086 /**
8087  * ipr_reset_get_unit_check_job - Call to get the unit check buffer.
8088  * @ipr_cmd:	ipr command struct
8089  *
8090  * Description: This function will call to get the unit check buffer.
8091  *
8092  * Return value:
8093  *	IPR_RC_JOB_RETURN
8094  **/
8095 static int ipr_reset_get_unit_check_job(struct ipr_cmnd *ipr_cmd)
8096 {
8097 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8098 
8099 	ENTER;
8100 	ioa_cfg->ioa_unit_checked = 0;
8101 	ipr_get_unit_check_buffer(ioa_cfg);
8102 	ipr_cmd->job_step = ipr_reset_alert;
8103 	ipr_reset_start_timer(ipr_cmd, 0);
8104 
8105 	LEAVE;
8106 	return IPR_RC_JOB_RETURN;
8107 }
8108 
8109 /**
8110  * ipr_reset_restore_cfg_space - Restore PCI config space.
8111  * @ipr_cmd:	ipr command struct
8112  *
8113  * Description: This function restores the saved PCI config space of
8114  * the adapter, fails all outstanding ops back to the callers, and
8115  * fetches the dump/unit check if applicable to this reset.
8116  *
8117  * Return value:
8118  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8119  **/
8120 static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
8121 {
8122 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8123 	u32 int_reg;
8124 
8125 	ENTER;
8126 	ioa_cfg->pdev->state_saved = true;
8127 	pci_restore_state(ioa_cfg->pdev);
8128 
8129 	if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
8130 		ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
8131 		return IPR_RC_JOB_CONTINUE;
8132 	}
8133 
8134 	ipr_fail_all_ops(ioa_cfg);
8135 
8136 	if (ioa_cfg->sis64) {
8137 		/* Set the adapter to the correct endian mode. */
8138 		writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
8139 		int_reg = readl(ioa_cfg->regs.endian_swap_reg);
8140 	}
8141 
8142 	if (ioa_cfg->ioa_unit_checked) {
8143 		if (ioa_cfg->sis64) {
8144 			ipr_cmd->job_step = ipr_reset_get_unit_check_job;
8145 			ipr_reset_start_timer(ipr_cmd, IPR_DUMP_DELAY_TIMEOUT);
8146 			return IPR_RC_JOB_RETURN;
8147 		} else {
8148 			ioa_cfg->ioa_unit_checked = 0;
8149 			ipr_get_unit_check_buffer(ioa_cfg);
8150 			ipr_cmd->job_step = ipr_reset_alert;
8151 			ipr_reset_start_timer(ipr_cmd, 0);
8152 			return IPR_RC_JOB_RETURN;
8153 		}
8154 	}
8155 
8156 	if (ioa_cfg->in_ioa_bringdown) {
8157 		ipr_cmd->job_step = ipr_ioa_bringdown_done;
8158 	} else {
8159 		ipr_cmd->job_step = ipr_reset_enable_ioa;
8160 
8161 		if (GET_DUMP == ioa_cfg->sdt_state) {
8162 			ioa_cfg->sdt_state = READ_DUMP;
8163 			ioa_cfg->dump_timeout = 0;
8164 			if (ioa_cfg->sis64)
8165 				ipr_reset_start_timer(ipr_cmd, IPR_SIS64_DUMP_TIMEOUT);
8166 			else
8167 				ipr_reset_start_timer(ipr_cmd, IPR_SIS32_DUMP_TIMEOUT);
8168 			ipr_cmd->job_step = ipr_reset_wait_for_dump;
8169 			schedule_work(&ioa_cfg->work_q);
8170 			return IPR_RC_JOB_RETURN;
8171 		}
8172 	}
8173 
8174 	LEAVE;
8175 	return IPR_RC_JOB_CONTINUE;
8176 }
8177 
8178 /**
8179  * ipr_reset_bist_done - BIST has completed on the adapter.
8180  * @ipr_cmd:	ipr command struct
8181  *
8182  * Description: Unblock config space and resume the reset process.
8183  *
8184  * Return value:
8185  * 	IPR_RC_JOB_CONTINUE
8186  **/
8187 static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
8188 {
8189 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8190 
8191 	ENTER;
8192 	if (ioa_cfg->cfg_locked)
8193 		pci_cfg_access_unlock(ioa_cfg->pdev);
8194 	ioa_cfg->cfg_locked = 0;
8195 	ipr_cmd->job_step = ipr_reset_restore_cfg_space;
8196 	LEAVE;
8197 	return IPR_RC_JOB_CONTINUE;
8198 }
8199 
8200 /**
8201  * ipr_reset_start_bist - Run BIST on the adapter.
8202  * @ipr_cmd:	ipr command struct
8203  *
8204  * Description: This function runs BIST on the adapter, then delays 2 seconds.
8205  *
8206  * Return value:
8207  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8208  **/
8209 static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
8210 {
8211 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8212 	int rc = PCIBIOS_SUCCESSFUL;
8213 
8214 	ENTER;
8215 	if (ioa_cfg->ipr_chip->bist_method == IPR_MMIO)
8216 		writel(IPR_UPROCI_SIS64_START_BIST,
8217 		       ioa_cfg->regs.set_uproc_interrupt_reg32);
8218 	else
8219 		rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
8220 
8221 	if (rc == PCIBIOS_SUCCESSFUL) {
8222 		ipr_cmd->job_step = ipr_reset_bist_done;
8223 		ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8224 		rc = IPR_RC_JOB_RETURN;
8225 	} else {
8226 		if (ioa_cfg->cfg_locked)
8227 			pci_cfg_access_unlock(ipr_cmd->ioa_cfg->pdev);
8228 		ioa_cfg->cfg_locked = 0;
8229 		ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
8230 		rc = IPR_RC_JOB_CONTINUE;
8231 	}
8232 
8233 	LEAVE;
8234 	return rc;
8235 }
8236 
8237 /**
8238  * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
8239  * @ipr_cmd:	ipr command struct
8240  *
8241  * Description: This clears PCI reset to the adapter and delays two seconds.
8242  *
8243  * Return value:
8244  * 	IPR_RC_JOB_RETURN
8245  **/
8246 static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
8247 {
8248 	ENTER;
8249 	pci_set_pcie_reset_state(ipr_cmd->ioa_cfg->pdev, pcie_deassert_reset);
8250 	ipr_cmd->job_step = ipr_reset_bist_done;
8251 	ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8252 	LEAVE;
8253 	return IPR_RC_JOB_RETURN;
8254 }
8255 
8256 /**
8257  * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
8258  * @ipr_cmd:	ipr command struct
8259  *
8260  * Description: This asserts PCI reset to the adapter.
8261  *
8262  * Return value:
8263  * 	IPR_RC_JOB_RETURN
8264  **/
8265 static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
8266 {
8267 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8268 	struct pci_dev *pdev = ioa_cfg->pdev;
8269 
8270 	ENTER;
8271 	pci_set_pcie_reset_state(pdev, pcie_warm_reset);
8272 	ipr_cmd->job_step = ipr_reset_slot_reset_done;
8273 	ipr_reset_start_timer(ipr_cmd, IPR_PCI_RESET_TIMEOUT);
8274 	LEAVE;
8275 	return IPR_RC_JOB_RETURN;
8276 }
8277 
8278 /**
8279  * ipr_reset_block_config_access_wait - Wait for permission to block config access
8280  * @ipr_cmd:	ipr command struct
8281  *
8282  * Description: This attempts to block config access to the IOA.
8283  *
8284  * Return value:
8285  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8286  **/
8287 static int ipr_reset_block_config_access_wait(struct ipr_cmnd *ipr_cmd)
8288 {
8289 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8290 	int rc = IPR_RC_JOB_CONTINUE;
8291 
8292 	if (pci_cfg_access_trylock(ioa_cfg->pdev)) {
8293 		ioa_cfg->cfg_locked = 1;
8294 		ipr_cmd->job_step = ioa_cfg->reset;
8295 	} else {
8296 		if (ipr_cmd->u.time_left) {
8297 			rc = IPR_RC_JOB_RETURN;
8298 			ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8299 			ipr_reset_start_timer(ipr_cmd,
8300 					      IPR_CHECK_FOR_RESET_TIMEOUT);
8301 		} else {
8302 			ipr_cmd->job_step = ioa_cfg->reset;
8303 			dev_err(&ioa_cfg->pdev->dev,
8304 				"Timed out waiting to lock config access. Resetting anyway.\n");
8305 		}
8306 	}
8307 
8308 	return rc;
8309 }
8310 
8311 /**
8312  * ipr_reset_block_config_access - Block config access to the IOA
8313  * @ipr_cmd:	ipr command struct
8314  *
8315  * Description: This attempts to block config access to the IOA
8316  *
8317  * Return value:
8318  * 	IPR_RC_JOB_CONTINUE
8319  **/
8320 static int ipr_reset_block_config_access(struct ipr_cmnd *ipr_cmd)
8321 {
8322 	ipr_cmd->ioa_cfg->cfg_locked = 0;
8323 	ipr_cmd->job_step = ipr_reset_block_config_access_wait;
8324 	ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8325 	return IPR_RC_JOB_CONTINUE;
8326 }
8327 
8328 /**
8329  * ipr_reset_allowed - Query whether or not IOA can be reset
8330  * @ioa_cfg:	ioa config struct
8331  *
8332  * Return value:
8333  * 	0 if reset not allowed / non-zero if reset is allowed
8334  **/
8335 static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
8336 {
8337 	volatile u32 temp_reg;
8338 
8339 	temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
8340 	return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
8341 }
8342 
8343 /**
8344  * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
8345  * @ipr_cmd:	ipr command struct
8346  *
8347  * Description: This function waits for adapter permission to run BIST,
8348  * then runs BIST. If the adapter does not give permission after a
8349  * reasonable time, we will reset the adapter anyway. The impact of
8350  * resetting the adapter without warning the adapter is the risk of
8351  * losing the persistent error log on the adapter. If the adapter is
8352  * reset while it is writing to the flash on the adapter, the flash
8353  * segment will have bad ECC and be zeroed.
8354  *
8355  * Return value:
8356  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8357  **/
8358 static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
8359 {
8360 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8361 	int rc = IPR_RC_JOB_RETURN;
8362 
8363 	if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
8364 		ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8365 		ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8366 	} else {
8367 		ipr_cmd->job_step = ipr_reset_block_config_access;
8368 		rc = IPR_RC_JOB_CONTINUE;
8369 	}
8370 
8371 	return rc;
8372 }
8373 
8374 /**
8375  * ipr_reset_alert - Alert the adapter of a pending reset
8376  * @ipr_cmd:	ipr command struct
8377  *
8378  * Description: This function alerts the adapter that it will be reset.
8379  * If memory space is not currently enabled, proceed directly
8380  * to running BIST on the adapter. The timer must always be started
8381  * so we guarantee we do not run BIST from ipr_isr.
8382  *
8383  * Return value:
8384  * 	IPR_RC_JOB_RETURN
8385  **/
8386 static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
8387 {
8388 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8389 	u16 cmd_reg;
8390 	int rc;
8391 
8392 	ENTER;
8393 	rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
8394 
8395 	if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
8396 		ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
8397 		writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32);
8398 		ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
8399 	} else {
8400 		ipr_cmd->job_step = ipr_reset_block_config_access;
8401 	}
8402 
8403 	ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8404 	ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8405 
8406 	LEAVE;
8407 	return IPR_RC_JOB_RETURN;
8408 }
8409 
8410 /**
8411  * ipr_reset_ucode_download_done - Microcode download completion
8412  * @ipr_cmd:	ipr command struct
8413  *
8414  * Description: This function unmaps the microcode download buffer.
8415  *
8416  * Return value:
8417  * 	IPR_RC_JOB_CONTINUE
8418  **/
8419 static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
8420 {
8421 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8422 	struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
8423 
8424 	pci_unmap_sg(ioa_cfg->pdev, sglist->scatterlist,
8425 		     sglist->num_sg, DMA_TO_DEVICE);
8426 
8427 	ipr_cmd->job_step = ipr_reset_alert;
8428 	return IPR_RC_JOB_CONTINUE;
8429 }
8430 
8431 /**
8432  * ipr_reset_ucode_download - Download microcode to the adapter
8433  * @ipr_cmd:	ipr command struct
8434  *
8435  * Description: This function checks to see if it there is microcode
8436  * to download to the adapter. If there is, a download is performed.
8437  *
8438  * Return value:
8439  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8440  **/
8441 static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
8442 {
8443 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8444 	struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
8445 
8446 	ENTER;
8447 	ipr_cmd->job_step = ipr_reset_alert;
8448 
8449 	if (!sglist)
8450 		return IPR_RC_JOB_CONTINUE;
8451 
8452 	ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8453 	ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
8454 	ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
8455 	ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
8456 	ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
8457 	ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
8458 	ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
8459 
8460 	if (ioa_cfg->sis64)
8461 		ipr_build_ucode_ioadl64(ipr_cmd, sglist);
8462 	else
8463 		ipr_build_ucode_ioadl(ipr_cmd, sglist);
8464 	ipr_cmd->job_step = ipr_reset_ucode_download_done;
8465 
8466 	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
8467 		   IPR_WRITE_BUFFER_TIMEOUT);
8468 
8469 	LEAVE;
8470 	return IPR_RC_JOB_RETURN;
8471 }
8472 
8473 /**
8474  * ipr_reset_shutdown_ioa - Shutdown the adapter
8475  * @ipr_cmd:	ipr command struct
8476  *
8477  * Description: This function issues an adapter shutdown of the
8478  * specified type to the specified adapter as part of the
8479  * adapter reset job.
8480  *
8481  * Return value:
8482  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8483  **/
8484 static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
8485 {
8486 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8487 	enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
8488 	unsigned long timeout;
8489 	int rc = IPR_RC_JOB_CONTINUE;
8490 
8491 	ENTER;
8492 	if (shutdown_type != IPR_SHUTDOWN_NONE &&
8493 			!ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
8494 		ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8495 		ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
8496 		ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
8497 		ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
8498 
8499 		if (shutdown_type == IPR_SHUTDOWN_NORMAL)
8500 			timeout = IPR_SHUTDOWN_TIMEOUT;
8501 		else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
8502 			timeout = IPR_INTERNAL_TIMEOUT;
8503 		else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
8504 			timeout = IPR_DUAL_IOA_ABBR_SHUTDOWN_TO;
8505 		else
8506 			timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
8507 
8508 		ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
8509 
8510 		rc = IPR_RC_JOB_RETURN;
8511 		ipr_cmd->job_step = ipr_reset_ucode_download;
8512 	} else
8513 		ipr_cmd->job_step = ipr_reset_alert;
8514 
8515 	LEAVE;
8516 	return rc;
8517 }
8518 
8519 /**
8520  * ipr_reset_ioa_job - Adapter reset job
8521  * @ipr_cmd:	ipr command struct
8522  *
8523  * Description: This function is the job router for the adapter reset job.
8524  *
8525  * Return value:
8526  * 	none
8527  **/
8528 static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
8529 {
8530 	u32 rc, ioasc;
8531 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8532 
8533 	do {
8534 		ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
8535 
8536 		if (ioa_cfg->reset_cmd != ipr_cmd) {
8537 			/*
8538 			 * We are doing nested adapter resets and this is
8539 			 * not the current reset job.
8540 			 */
8541 			list_add_tail(&ipr_cmd->queue,
8542 					&ipr_cmd->hrrq->hrrq_free_q);
8543 			return;
8544 		}
8545 
8546 		if (IPR_IOASC_SENSE_KEY(ioasc)) {
8547 			rc = ipr_cmd->job_step_failed(ipr_cmd);
8548 			if (rc == IPR_RC_JOB_RETURN)
8549 				return;
8550 		}
8551 
8552 		ipr_reinit_ipr_cmnd(ipr_cmd);
8553 		ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
8554 		rc = ipr_cmd->job_step(ipr_cmd);
8555 	} while (rc == IPR_RC_JOB_CONTINUE);
8556 }
8557 
8558 /**
8559  * _ipr_initiate_ioa_reset - Initiate an adapter reset
8560  * @ioa_cfg:		ioa config struct
8561  * @job_step:		first job step of reset job
8562  * @shutdown_type:	shutdown type
8563  *
8564  * Description: This function will initiate the reset of the given adapter
8565  * starting at the selected job step.
8566  * If the caller needs to wait on the completion of the reset,
8567  * the caller must sleep on the reset_wait_q.
8568  *
8569  * Return value:
8570  * 	none
8571  **/
8572 static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
8573 				    int (*job_step) (struct ipr_cmnd *),
8574 				    enum ipr_shutdown_type shutdown_type)
8575 {
8576 	struct ipr_cmnd *ipr_cmd;
8577 	int i;
8578 
8579 	ioa_cfg->in_reset_reload = 1;
8580 	for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8581 		spin_lock(&ioa_cfg->hrrq[i]._lock);
8582 		ioa_cfg->hrrq[i].allow_cmds = 0;
8583 		spin_unlock(&ioa_cfg->hrrq[i]._lock);
8584 	}
8585 	wmb();
8586 	if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa)
8587 		scsi_block_requests(ioa_cfg->host);
8588 
8589 	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
8590 	ioa_cfg->reset_cmd = ipr_cmd;
8591 	ipr_cmd->job_step = job_step;
8592 	ipr_cmd->u.shutdown_type = shutdown_type;
8593 
8594 	ipr_reset_ioa_job(ipr_cmd);
8595 }
8596 
8597 /**
8598  * ipr_initiate_ioa_reset - Initiate an adapter reset
8599  * @ioa_cfg:		ioa config struct
8600  * @shutdown_type:	shutdown type
8601  *
8602  * Description: This function will initiate the reset of the given adapter.
8603  * If the caller needs to wait on the completion of the reset,
8604  * the caller must sleep on the reset_wait_q.
8605  *
8606  * Return value:
8607  * 	none
8608  **/
8609 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
8610 				   enum ipr_shutdown_type shutdown_type)
8611 {
8612 	int i;
8613 
8614 	if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
8615 		return;
8616 
8617 	if (ioa_cfg->in_reset_reload) {
8618 		if (ioa_cfg->sdt_state == GET_DUMP)
8619 			ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8620 		else if (ioa_cfg->sdt_state == READ_DUMP)
8621 			ioa_cfg->sdt_state = ABORT_DUMP;
8622 	}
8623 
8624 	if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
8625 		dev_err(&ioa_cfg->pdev->dev,
8626 			"IOA taken offline - error recovery failed\n");
8627 
8628 		ioa_cfg->reset_retries = 0;
8629 		for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8630 			spin_lock(&ioa_cfg->hrrq[i]._lock);
8631 			ioa_cfg->hrrq[i].ioa_is_dead = 1;
8632 			spin_unlock(&ioa_cfg->hrrq[i]._lock);
8633 		}
8634 		wmb();
8635 
8636 		if (ioa_cfg->in_ioa_bringdown) {
8637 			ioa_cfg->reset_cmd = NULL;
8638 			ioa_cfg->in_reset_reload = 0;
8639 			ipr_fail_all_ops(ioa_cfg);
8640 			wake_up_all(&ioa_cfg->reset_wait_q);
8641 
8642 			if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
8643 				spin_unlock_irq(ioa_cfg->host->host_lock);
8644 				scsi_unblock_requests(ioa_cfg->host);
8645 				spin_lock_irq(ioa_cfg->host->host_lock);
8646 			}
8647 			return;
8648 		} else {
8649 			ioa_cfg->in_ioa_bringdown = 1;
8650 			shutdown_type = IPR_SHUTDOWN_NONE;
8651 		}
8652 	}
8653 
8654 	_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
8655 				shutdown_type);
8656 }
8657 
8658 /**
8659  * ipr_reset_freeze - Hold off all I/O activity
8660  * @ipr_cmd:	ipr command struct
8661  *
8662  * Description: If the PCI slot is frozen, hold off all I/O
8663  * activity; then, as soon as the slot is available again,
8664  * initiate an adapter reset.
8665  */
8666 static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
8667 {
8668 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8669 	int i;
8670 
8671 	/* Disallow new interrupts, avoid loop */
8672 	for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8673 		spin_lock(&ioa_cfg->hrrq[i]._lock);
8674 		ioa_cfg->hrrq[i].allow_interrupts = 0;
8675 		spin_unlock(&ioa_cfg->hrrq[i]._lock);
8676 	}
8677 	wmb();
8678 	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
8679 	ipr_cmd->done = ipr_reset_ioa_job;
8680 	return IPR_RC_JOB_RETURN;
8681 }
8682 
8683 /**
8684  * ipr_pci_mmio_enabled - Called when MMIO has been re-enabled
8685  * @pdev:	PCI device struct
8686  *
8687  * Description: This routine is called to tell us that the MMIO
8688  * access to the IOA has been restored
8689  */
8690 static pci_ers_result_t ipr_pci_mmio_enabled(struct pci_dev *pdev)
8691 {
8692 	unsigned long flags = 0;
8693 	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8694 
8695 	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8696 	if (!ioa_cfg->probe_done)
8697 		pci_save_state(pdev);
8698 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8699 	return PCI_ERS_RESULT_NEED_RESET;
8700 }
8701 
8702 /**
8703  * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
8704  * @pdev:	PCI device struct
8705  *
8706  * Description: This routine is called to tell us that the PCI bus
8707  * is down. Can't do anything here, except put the device driver
8708  * into a holding pattern, waiting for the PCI bus to come back.
8709  */
8710 static void ipr_pci_frozen(struct pci_dev *pdev)
8711 {
8712 	unsigned long flags = 0;
8713 	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8714 
8715 	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8716 	if (ioa_cfg->probe_done)
8717 		_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
8718 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8719 }
8720 
8721 /**
8722  * ipr_pci_slot_reset - Called when PCI slot has been reset.
8723  * @pdev:	PCI device struct
8724  *
8725  * Description: This routine is called by the pci error recovery
8726  * code after the PCI slot has been reset, just before we
8727  * should resume normal operations.
8728  */
8729 static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
8730 {
8731 	unsigned long flags = 0;
8732 	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8733 
8734 	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8735 	if (ioa_cfg->probe_done) {
8736 		if (ioa_cfg->needs_warm_reset)
8737 			ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8738 		else
8739 			_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
8740 						IPR_SHUTDOWN_NONE);
8741 	} else
8742 		wake_up_all(&ioa_cfg->eeh_wait_q);
8743 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8744 	return PCI_ERS_RESULT_RECOVERED;
8745 }
8746 
8747 /**
8748  * ipr_pci_perm_failure - Called when PCI slot is dead for good.
8749  * @pdev:	PCI device struct
8750  *
8751  * Description: This routine is called when the PCI bus has
8752  * permanently failed.
8753  */
8754 static void ipr_pci_perm_failure(struct pci_dev *pdev)
8755 {
8756 	unsigned long flags = 0;
8757 	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8758 	int i;
8759 
8760 	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8761 	if (ioa_cfg->probe_done) {
8762 		if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
8763 			ioa_cfg->sdt_state = ABORT_DUMP;
8764 		ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES - 1;
8765 		ioa_cfg->in_ioa_bringdown = 1;
8766 		for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8767 			spin_lock(&ioa_cfg->hrrq[i]._lock);
8768 			ioa_cfg->hrrq[i].allow_cmds = 0;
8769 			spin_unlock(&ioa_cfg->hrrq[i]._lock);
8770 		}
8771 		wmb();
8772 		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8773 	} else
8774 		wake_up_all(&ioa_cfg->eeh_wait_q);
8775 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8776 }
8777 
8778 /**
8779  * ipr_pci_error_detected - Called when a PCI error is detected.
8780  * @pdev:	PCI device struct
8781  * @state:	PCI channel state
8782  *
8783  * Description: Called when a PCI error is detected.
8784  *
8785  * Return value:
8786  * 	PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
8787  */
8788 static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
8789 					       pci_channel_state_t state)
8790 {
8791 	switch (state) {
8792 	case pci_channel_io_frozen:
8793 		ipr_pci_frozen(pdev);
8794 		return PCI_ERS_RESULT_CAN_RECOVER;
8795 	case pci_channel_io_perm_failure:
8796 		ipr_pci_perm_failure(pdev);
8797 		return PCI_ERS_RESULT_DISCONNECT;
8798 		break;
8799 	default:
8800 		break;
8801 	}
8802 	return PCI_ERS_RESULT_NEED_RESET;
8803 }
8804 
8805 /**
8806  * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
8807  * @ioa_cfg:	ioa cfg struct
8808  *
8809  * Description: This is the second phase of adapter intialization
8810  * This function takes care of initilizing the adapter to the point
8811  * where it can accept new commands.
8812 
8813  * Return value:
8814  * 	0 on success / -EIO on failure
8815  **/
8816 static int ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
8817 {
8818 	int rc = 0;
8819 	unsigned long host_lock_flags = 0;
8820 
8821 	ENTER;
8822 	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8823 	dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
8824 	ioa_cfg->probe_done = 1;
8825 	if (ioa_cfg->needs_hard_reset) {
8826 		ioa_cfg->needs_hard_reset = 0;
8827 		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8828 	} else
8829 		_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
8830 					IPR_SHUTDOWN_NONE);
8831 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8832 	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
8833 	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8834 
8835 	if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
8836 		rc = -EIO;
8837 	} else if (ipr_invalid_adapter(ioa_cfg)) {
8838 		if (!ipr_testmode)
8839 			rc = -EIO;
8840 
8841 		dev_err(&ioa_cfg->pdev->dev,
8842 			"Adapter not supported in this hardware configuration.\n");
8843 	}
8844 
8845 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8846 
8847 	LEAVE;
8848 	return rc;
8849 }
8850 
8851 /**
8852  * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
8853  * @ioa_cfg:	ioa config struct
8854  *
8855  * Return value:
8856  * 	none
8857  **/
8858 static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
8859 {
8860 	int i;
8861 
8862 	for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
8863 		if (ioa_cfg->ipr_cmnd_list[i])
8864 			pci_pool_free(ioa_cfg->ipr_cmd_pool,
8865 				      ioa_cfg->ipr_cmnd_list[i],
8866 				      ioa_cfg->ipr_cmnd_list_dma[i]);
8867 
8868 		ioa_cfg->ipr_cmnd_list[i] = NULL;
8869 	}
8870 
8871 	if (ioa_cfg->ipr_cmd_pool)
8872 		pci_pool_destroy(ioa_cfg->ipr_cmd_pool);
8873 
8874 	kfree(ioa_cfg->ipr_cmnd_list);
8875 	kfree(ioa_cfg->ipr_cmnd_list_dma);
8876 	ioa_cfg->ipr_cmnd_list = NULL;
8877 	ioa_cfg->ipr_cmnd_list_dma = NULL;
8878 	ioa_cfg->ipr_cmd_pool = NULL;
8879 }
8880 
8881 /**
8882  * ipr_free_mem - Frees memory allocated for an adapter
8883  * @ioa_cfg:	ioa cfg struct
8884  *
8885  * Return value:
8886  * 	nothing
8887  **/
8888 static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
8889 {
8890 	int i;
8891 
8892 	kfree(ioa_cfg->res_entries);
8893 	pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_misc_cbs),
8894 			    ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
8895 	ipr_free_cmd_blks(ioa_cfg);
8896 
8897 	for (i = 0; i < ioa_cfg->hrrq_num; i++)
8898 		pci_free_consistent(ioa_cfg->pdev,
8899 					sizeof(u32) * ioa_cfg->hrrq[i].size,
8900 					ioa_cfg->hrrq[i].host_rrq,
8901 					ioa_cfg->hrrq[i].host_rrq_dma);
8902 
8903 	pci_free_consistent(ioa_cfg->pdev, ioa_cfg->cfg_table_size,
8904 			    ioa_cfg->u.cfg_table,
8905 			    ioa_cfg->cfg_table_dma);
8906 
8907 	for (i = 0; i < IPR_NUM_HCAMS; i++) {
8908 		pci_free_consistent(ioa_cfg->pdev,
8909 				    sizeof(struct ipr_hostrcb),
8910 				    ioa_cfg->hostrcb[i],
8911 				    ioa_cfg->hostrcb_dma[i]);
8912 	}
8913 
8914 	ipr_free_dump(ioa_cfg);
8915 	kfree(ioa_cfg->trace);
8916 }
8917 
8918 /**
8919  * ipr_free_all_resources - Free all allocated resources for an adapter.
8920  * @ipr_cmd:	ipr command struct
8921  *
8922  * This function frees all allocated resources for the
8923  * specified adapter.
8924  *
8925  * Return value:
8926  * 	none
8927  **/
8928 static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
8929 {
8930 	struct pci_dev *pdev = ioa_cfg->pdev;
8931 
8932 	ENTER;
8933 	if (ioa_cfg->intr_flag == IPR_USE_MSI ||
8934 	    ioa_cfg->intr_flag == IPR_USE_MSIX) {
8935 		int i;
8936 		for (i = 0; i < ioa_cfg->nvectors; i++)
8937 			free_irq(ioa_cfg->vectors_info[i].vec,
8938 				&ioa_cfg->hrrq[i]);
8939 	} else
8940 		free_irq(pdev->irq, &ioa_cfg->hrrq[0]);
8941 
8942 	if (ioa_cfg->intr_flag == IPR_USE_MSI) {
8943 		pci_disable_msi(pdev);
8944 		ioa_cfg->intr_flag &= ~IPR_USE_MSI;
8945 	} else if (ioa_cfg->intr_flag == IPR_USE_MSIX) {
8946 		pci_disable_msix(pdev);
8947 		ioa_cfg->intr_flag &= ~IPR_USE_MSIX;
8948 	}
8949 
8950 	iounmap(ioa_cfg->hdw_dma_regs);
8951 	pci_release_regions(pdev);
8952 	ipr_free_mem(ioa_cfg);
8953 	scsi_host_put(ioa_cfg->host);
8954 	pci_disable_device(pdev);
8955 	LEAVE;
8956 }
8957 
8958 /**
8959  * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
8960  * @ioa_cfg:	ioa config struct
8961  *
8962  * Return value:
8963  * 	0 on success / -ENOMEM on allocation failure
8964  **/
8965 static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
8966 {
8967 	struct ipr_cmnd *ipr_cmd;
8968 	struct ipr_ioarcb *ioarcb;
8969 	dma_addr_t dma_addr;
8970 	int i, entries_each_hrrq, hrrq_id = 0;
8971 
8972 	ioa_cfg->ipr_cmd_pool = pci_pool_create(IPR_NAME, ioa_cfg->pdev,
8973 						sizeof(struct ipr_cmnd), 512, 0);
8974 
8975 	if (!ioa_cfg->ipr_cmd_pool)
8976 		return -ENOMEM;
8977 
8978 	ioa_cfg->ipr_cmnd_list = kcalloc(IPR_NUM_CMD_BLKS, sizeof(struct ipr_cmnd *), GFP_KERNEL);
8979 	ioa_cfg->ipr_cmnd_list_dma = kcalloc(IPR_NUM_CMD_BLKS, sizeof(dma_addr_t), GFP_KERNEL);
8980 
8981 	if (!ioa_cfg->ipr_cmnd_list || !ioa_cfg->ipr_cmnd_list_dma) {
8982 		ipr_free_cmd_blks(ioa_cfg);
8983 		return -ENOMEM;
8984 	}
8985 
8986 	for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8987 		if (ioa_cfg->hrrq_num > 1) {
8988 			if (i == 0) {
8989 				entries_each_hrrq = IPR_NUM_INTERNAL_CMD_BLKS;
8990 				ioa_cfg->hrrq[i].min_cmd_id = 0;
8991 					ioa_cfg->hrrq[i].max_cmd_id =
8992 						(entries_each_hrrq - 1);
8993 			} else {
8994 				entries_each_hrrq =
8995 					IPR_NUM_BASE_CMD_BLKS/
8996 					(ioa_cfg->hrrq_num - 1);
8997 				ioa_cfg->hrrq[i].min_cmd_id =
8998 					IPR_NUM_INTERNAL_CMD_BLKS +
8999 					(i - 1) * entries_each_hrrq;
9000 				ioa_cfg->hrrq[i].max_cmd_id =
9001 					(IPR_NUM_INTERNAL_CMD_BLKS +
9002 					i * entries_each_hrrq - 1);
9003 			}
9004 		} else {
9005 			entries_each_hrrq = IPR_NUM_CMD_BLKS;
9006 			ioa_cfg->hrrq[i].min_cmd_id = 0;
9007 			ioa_cfg->hrrq[i].max_cmd_id = (entries_each_hrrq - 1);
9008 		}
9009 		ioa_cfg->hrrq[i].size = entries_each_hrrq;
9010 	}
9011 
9012 	BUG_ON(ioa_cfg->hrrq_num == 0);
9013 
9014 	i = IPR_NUM_CMD_BLKS -
9015 		ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id - 1;
9016 	if (i > 0) {
9017 		ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].size += i;
9018 		ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id += i;
9019 	}
9020 
9021 	for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
9022 		ipr_cmd = pci_pool_alloc(ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);
9023 
9024 		if (!ipr_cmd) {
9025 			ipr_free_cmd_blks(ioa_cfg);
9026 			return -ENOMEM;
9027 		}
9028 
9029 		memset(ipr_cmd, 0, sizeof(*ipr_cmd));
9030 		ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
9031 		ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
9032 
9033 		ioarcb = &ipr_cmd->ioarcb;
9034 		ipr_cmd->dma_addr = dma_addr;
9035 		if (ioa_cfg->sis64)
9036 			ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr);
9037 		else
9038 			ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
9039 
9040 		ioarcb->host_response_handle = cpu_to_be32(i << 2);
9041 		if (ioa_cfg->sis64) {
9042 			ioarcb->u.sis64_addr_data.data_ioadl_addr =
9043 				cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
9044 			ioarcb->u.sis64_addr_data.ioasa_host_pci_addr =
9045 				cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, s.ioasa64));
9046 		} else {
9047 			ioarcb->write_ioadl_addr =
9048 				cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
9049 			ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
9050 			ioarcb->ioasa_host_pci_addr =
9051 				cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, s.ioasa));
9052 		}
9053 		ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
9054 		ipr_cmd->cmd_index = i;
9055 		ipr_cmd->ioa_cfg = ioa_cfg;
9056 		ipr_cmd->sense_buffer_dma = dma_addr +
9057 			offsetof(struct ipr_cmnd, sense_buffer);
9058 
9059 		ipr_cmd->ioarcb.cmd_pkt.hrrq_id = hrrq_id;
9060 		ipr_cmd->hrrq = &ioa_cfg->hrrq[hrrq_id];
9061 		list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
9062 		if (i >= ioa_cfg->hrrq[hrrq_id].max_cmd_id)
9063 			hrrq_id++;
9064 	}
9065 
9066 	return 0;
9067 }
9068 
9069 /**
9070  * ipr_alloc_mem - Allocate memory for an adapter
9071  * @ioa_cfg:	ioa config struct
9072  *
9073  * Return value:
9074  * 	0 on success / non-zero for error
9075  **/
9076 static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
9077 {
9078 	struct pci_dev *pdev = ioa_cfg->pdev;
9079 	int i, rc = -ENOMEM;
9080 
9081 	ENTER;
9082 	ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
9083 				       ioa_cfg->max_devs_supported, GFP_KERNEL);
9084 
9085 	if (!ioa_cfg->res_entries)
9086 		goto out;
9087 
9088 	for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
9089 		list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
9090 		ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
9091 	}
9092 
9093 	ioa_cfg->vpd_cbs = pci_alloc_consistent(ioa_cfg->pdev,
9094 						sizeof(struct ipr_misc_cbs),
9095 						&ioa_cfg->vpd_cbs_dma);
9096 
9097 	if (!ioa_cfg->vpd_cbs)
9098 		goto out_free_res_entries;
9099 
9100 	if (ipr_alloc_cmd_blks(ioa_cfg))
9101 		goto out_free_vpd_cbs;
9102 
9103 	for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9104 		ioa_cfg->hrrq[i].host_rrq = pci_alloc_consistent(ioa_cfg->pdev,
9105 					sizeof(u32) * ioa_cfg->hrrq[i].size,
9106 					&ioa_cfg->hrrq[i].host_rrq_dma);
9107 
9108 		if (!ioa_cfg->hrrq[i].host_rrq)  {
9109 			while (--i > 0)
9110 				pci_free_consistent(pdev,
9111 					sizeof(u32) * ioa_cfg->hrrq[i].size,
9112 					ioa_cfg->hrrq[i].host_rrq,
9113 					ioa_cfg->hrrq[i].host_rrq_dma);
9114 			goto out_ipr_free_cmd_blocks;
9115 		}
9116 		ioa_cfg->hrrq[i].ioa_cfg = ioa_cfg;
9117 	}
9118 
9119 	ioa_cfg->u.cfg_table = pci_alloc_consistent(ioa_cfg->pdev,
9120 						    ioa_cfg->cfg_table_size,
9121 						    &ioa_cfg->cfg_table_dma);
9122 
9123 	if (!ioa_cfg->u.cfg_table)
9124 		goto out_free_host_rrq;
9125 
9126 	for (i = 0; i < IPR_NUM_HCAMS; i++) {
9127 		ioa_cfg->hostrcb[i] = pci_alloc_consistent(ioa_cfg->pdev,
9128 							   sizeof(struct ipr_hostrcb),
9129 							   &ioa_cfg->hostrcb_dma[i]);
9130 
9131 		if (!ioa_cfg->hostrcb[i])
9132 			goto out_free_hostrcb_dma;
9133 
9134 		ioa_cfg->hostrcb[i]->hostrcb_dma =
9135 			ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
9136 		ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
9137 		list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
9138 	}
9139 
9140 	ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) *
9141 				 IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
9142 
9143 	if (!ioa_cfg->trace)
9144 		goto out_free_hostrcb_dma;
9145 
9146 	rc = 0;
9147 out:
9148 	LEAVE;
9149 	return rc;
9150 
9151 out_free_hostrcb_dma:
9152 	while (i-- > 0) {
9153 		pci_free_consistent(pdev, sizeof(struct ipr_hostrcb),
9154 				    ioa_cfg->hostrcb[i],
9155 				    ioa_cfg->hostrcb_dma[i]);
9156 	}
9157 	pci_free_consistent(pdev, ioa_cfg->cfg_table_size,
9158 			    ioa_cfg->u.cfg_table,
9159 			    ioa_cfg->cfg_table_dma);
9160 out_free_host_rrq:
9161 	for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9162 		pci_free_consistent(pdev,
9163 				sizeof(u32) * ioa_cfg->hrrq[i].size,
9164 				ioa_cfg->hrrq[i].host_rrq,
9165 				ioa_cfg->hrrq[i].host_rrq_dma);
9166 	}
9167 out_ipr_free_cmd_blocks:
9168 	ipr_free_cmd_blks(ioa_cfg);
9169 out_free_vpd_cbs:
9170 	pci_free_consistent(pdev, sizeof(struct ipr_misc_cbs),
9171 			    ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
9172 out_free_res_entries:
9173 	kfree(ioa_cfg->res_entries);
9174 	goto out;
9175 }
9176 
9177 /**
9178  * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
9179  * @ioa_cfg:	ioa config struct
9180  *
9181  * Return value:
9182  * 	none
9183  **/
9184 static void ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
9185 {
9186 	int i;
9187 
9188 	for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
9189 		ioa_cfg->bus_attr[i].bus = i;
9190 		ioa_cfg->bus_attr[i].qas_enabled = 0;
9191 		ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
9192 		if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
9193 			ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
9194 		else
9195 			ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
9196 	}
9197 }
9198 
9199 /**
9200  * ipr_init_regs - Initialize IOA registers
9201  * @ioa_cfg:	ioa config struct
9202  *
9203  * Return value:
9204  *	none
9205  **/
9206 static void ipr_init_regs(struct ipr_ioa_cfg *ioa_cfg)
9207 {
9208 	const struct ipr_interrupt_offsets *p;
9209 	struct ipr_interrupts *t;
9210 	void __iomem *base;
9211 
9212 	p = &ioa_cfg->chip_cfg->regs;
9213 	t = &ioa_cfg->regs;
9214 	base = ioa_cfg->hdw_dma_regs;
9215 
9216 	t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
9217 	t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
9218 	t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32;
9219 	t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
9220 	t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32;
9221 	t->clr_interrupt_reg = base + p->clr_interrupt_reg;
9222 	t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32;
9223 	t->sense_interrupt_reg = base + p->sense_interrupt_reg;
9224 	t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32;
9225 	t->ioarrin_reg = base + p->ioarrin_reg;
9226 	t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
9227 	t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32;
9228 	t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
9229 	t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32;
9230 	t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
9231 	t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32;
9232 
9233 	if (ioa_cfg->sis64) {
9234 		t->init_feedback_reg = base + p->init_feedback_reg;
9235 		t->dump_addr_reg = base + p->dump_addr_reg;
9236 		t->dump_data_reg = base + p->dump_data_reg;
9237 		t->endian_swap_reg = base + p->endian_swap_reg;
9238 	}
9239 }
9240 
9241 /**
9242  * ipr_init_ioa_cfg - Initialize IOA config struct
9243  * @ioa_cfg:	ioa config struct
9244  * @host:		scsi host struct
9245  * @pdev:		PCI dev struct
9246  *
9247  * Return value:
9248  * 	none
9249  **/
9250 static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
9251 			     struct Scsi_Host *host, struct pci_dev *pdev)
9252 {
9253 	int i;
9254 
9255 	ioa_cfg->host = host;
9256 	ioa_cfg->pdev = pdev;
9257 	ioa_cfg->log_level = ipr_log_level;
9258 	ioa_cfg->doorbell = IPR_DOORBELL;
9259 	sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
9260 	sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
9261 	sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
9262 	sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
9263 	sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
9264 	sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
9265 
9266 	INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
9267 	INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
9268 	INIT_LIST_HEAD(&ioa_cfg->free_res_q);
9269 	INIT_LIST_HEAD(&ioa_cfg->used_res_q);
9270 	INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
9271 	init_waitqueue_head(&ioa_cfg->reset_wait_q);
9272 	init_waitqueue_head(&ioa_cfg->msi_wait_q);
9273 	init_waitqueue_head(&ioa_cfg->eeh_wait_q);
9274 	ioa_cfg->sdt_state = INACTIVE;
9275 
9276 	ipr_initialize_bus_attr(ioa_cfg);
9277 	ioa_cfg->max_devs_supported = ipr_max_devs;
9278 
9279 	if (ioa_cfg->sis64) {
9280 		host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS;
9281 		host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
9282 		if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
9283 			ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS;
9284 		ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
9285 					   + ((sizeof(struct ipr_config_table_entry64)
9286 					       * ioa_cfg->max_devs_supported)));
9287 	} else {
9288 		host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
9289 		host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
9290 		if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
9291 			ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS;
9292 		ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
9293 					   + ((sizeof(struct ipr_config_table_entry)
9294 					       * ioa_cfg->max_devs_supported)));
9295 	}
9296 
9297 	host->max_channel = IPR_MAX_BUS_TO_SCAN;
9298 	host->unique_id = host->host_no;
9299 	host->max_cmd_len = IPR_MAX_CDB_LEN;
9300 	host->can_queue = ioa_cfg->max_cmds;
9301 	pci_set_drvdata(pdev, ioa_cfg);
9302 
9303 	for (i = 0; i < ARRAY_SIZE(ioa_cfg->hrrq); i++) {
9304 		INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q);
9305 		INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q);
9306 		spin_lock_init(&ioa_cfg->hrrq[i]._lock);
9307 		if (i == 0)
9308 			ioa_cfg->hrrq[i].lock = ioa_cfg->host->host_lock;
9309 		else
9310 			ioa_cfg->hrrq[i].lock = &ioa_cfg->hrrq[i]._lock;
9311 	}
9312 }
9313 
9314 /**
9315  * ipr_get_chip_info - Find adapter chip information
9316  * @dev_id:		PCI device id struct
9317  *
9318  * Return value:
9319  * 	ptr to chip information on success / NULL on failure
9320  **/
9321 static const struct ipr_chip_t *
9322 ipr_get_chip_info(const struct pci_device_id *dev_id)
9323 {
9324 	int i;
9325 
9326 	for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
9327 		if (ipr_chip[i].vendor == dev_id->vendor &&
9328 		    ipr_chip[i].device == dev_id->device)
9329 			return &ipr_chip[i];
9330 	return NULL;
9331 }
9332 
9333 /**
9334  * ipr_wait_for_pci_err_recovery - Wait for any PCI error recovery to complete
9335  *						during probe time
9336  * @ioa_cfg:	ioa config struct
9337  *
9338  * Return value:
9339  * 	None
9340  **/
9341 static void ipr_wait_for_pci_err_recovery(struct ipr_ioa_cfg *ioa_cfg)
9342 {
9343 	struct pci_dev *pdev = ioa_cfg->pdev;
9344 
9345 	if (pci_channel_offline(pdev)) {
9346 		wait_event_timeout(ioa_cfg->eeh_wait_q,
9347 				   !pci_channel_offline(pdev),
9348 				   IPR_PCI_ERROR_RECOVERY_TIMEOUT);
9349 		pci_restore_state(pdev);
9350 	}
9351 }
9352 
9353 static int ipr_enable_msix(struct ipr_ioa_cfg *ioa_cfg)
9354 {
9355 	struct msix_entry entries[IPR_MAX_MSIX_VECTORS];
9356 	int i, vectors;
9357 
9358 	for (i = 0; i < ARRAY_SIZE(entries); ++i)
9359 		entries[i].entry = i;
9360 
9361 	vectors = pci_enable_msix_range(ioa_cfg->pdev,
9362 					entries, 1, ipr_number_of_msix);
9363 	if (vectors < 0) {
9364 		ipr_wait_for_pci_err_recovery(ioa_cfg);
9365 		return vectors;
9366 	}
9367 
9368 	for (i = 0; i < vectors; i++)
9369 		ioa_cfg->vectors_info[i].vec = entries[i].vector;
9370 	ioa_cfg->nvectors = vectors;
9371 
9372 	return 0;
9373 }
9374 
9375 static int ipr_enable_msi(struct ipr_ioa_cfg *ioa_cfg)
9376 {
9377 	int i, vectors;
9378 
9379 	vectors = pci_enable_msi_range(ioa_cfg->pdev, 1, ipr_number_of_msix);
9380 	if (vectors < 0) {
9381 		ipr_wait_for_pci_err_recovery(ioa_cfg);
9382 		return vectors;
9383 	}
9384 
9385 	for (i = 0; i < vectors; i++)
9386 		ioa_cfg->vectors_info[i].vec = ioa_cfg->pdev->irq + i;
9387 	ioa_cfg->nvectors = vectors;
9388 
9389 	return 0;
9390 }
9391 
9392 static void name_msi_vectors(struct ipr_ioa_cfg *ioa_cfg)
9393 {
9394 	int vec_idx, n = sizeof(ioa_cfg->vectors_info[0].desc) - 1;
9395 
9396 	for (vec_idx = 0; vec_idx < ioa_cfg->nvectors; vec_idx++) {
9397 		snprintf(ioa_cfg->vectors_info[vec_idx].desc, n,
9398 			 "host%d-%d", ioa_cfg->host->host_no, vec_idx);
9399 		ioa_cfg->vectors_info[vec_idx].
9400 			desc[strlen(ioa_cfg->vectors_info[vec_idx].desc)] = 0;
9401 	}
9402 }
9403 
9404 static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg)
9405 {
9406 	int i, rc;
9407 
9408 	for (i = 1; i < ioa_cfg->nvectors; i++) {
9409 		rc = request_irq(ioa_cfg->vectors_info[i].vec,
9410 			ipr_isr_mhrrq,
9411 			0,
9412 			ioa_cfg->vectors_info[i].desc,
9413 			&ioa_cfg->hrrq[i]);
9414 		if (rc) {
9415 			while (--i >= 0)
9416 				free_irq(ioa_cfg->vectors_info[i].vec,
9417 					&ioa_cfg->hrrq[i]);
9418 			return rc;
9419 		}
9420 	}
9421 	return 0;
9422 }
9423 
9424 /**
9425  * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
9426  * @pdev:		PCI device struct
9427  *
9428  * Description: Simply set the msi_received flag to 1 indicating that
9429  * Message Signaled Interrupts are supported.
9430  *
9431  * Return value:
9432  * 	0 on success / non-zero on failure
9433  **/
9434 static irqreturn_t ipr_test_intr(int irq, void *devp)
9435 {
9436 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
9437 	unsigned long lock_flags = 0;
9438 	irqreturn_t rc = IRQ_HANDLED;
9439 
9440 	dev_info(&ioa_cfg->pdev->dev, "Received IRQ : %d\n", irq);
9441 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9442 
9443 	ioa_cfg->msi_received = 1;
9444 	wake_up(&ioa_cfg->msi_wait_q);
9445 
9446 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9447 	return rc;
9448 }
9449 
9450 /**
9451  * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
9452  * @pdev:		PCI device struct
9453  *
9454  * Description: The return value from pci_enable_msi_range() can not always be
9455  * trusted.  This routine sets up and initiates a test interrupt to determine
9456  * if the interrupt is received via the ipr_test_intr() service routine.
9457  * If the tests fails, the driver will fall back to LSI.
9458  *
9459  * Return value:
9460  * 	0 on success / non-zero on failure
9461  **/
9462 static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
9463 {
9464 	int rc;
9465 	volatile u32 int_reg;
9466 	unsigned long lock_flags = 0;
9467 
9468 	ENTER;
9469 
9470 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9471 	init_waitqueue_head(&ioa_cfg->msi_wait_q);
9472 	ioa_cfg->msi_received = 0;
9473 	ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
9474 	writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32);
9475 	int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
9476 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9477 
9478 	if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9479 		rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
9480 	else
9481 		rc = request_irq(pdev->irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
9482 	if (rc) {
9483 		dev_err(&pdev->dev, "Can not assign irq %d\n", pdev->irq);
9484 		return rc;
9485 	} else if (ipr_debug)
9486 		dev_info(&pdev->dev, "IRQ assigned: %d\n", pdev->irq);
9487 
9488 	writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
9489 	int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
9490 	wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
9491 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9492 	ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
9493 
9494 	if (!ioa_cfg->msi_received) {
9495 		/* MSI test failed */
9496 		dev_info(&pdev->dev, "MSI test failed.  Falling back to LSI.\n");
9497 		rc = -EOPNOTSUPP;
9498 	} else if (ipr_debug)
9499 		dev_info(&pdev->dev, "MSI test succeeded.\n");
9500 
9501 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9502 
9503 	if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9504 		free_irq(ioa_cfg->vectors_info[0].vec, ioa_cfg);
9505 	else
9506 		free_irq(pdev->irq, ioa_cfg);
9507 
9508 	LEAVE;
9509 
9510 	return rc;
9511 }
9512 
9513  /* ipr_probe_ioa - Allocates memory and does first stage of initialization
9514  * @pdev:		PCI device struct
9515  * @dev_id:		PCI device id struct
9516  *
9517  * Return value:
9518  * 	0 on success / non-zero on failure
9519  **/
9520 static int ipr_probe_ioa(struct pci_dev *pdev,
9521 			 const struct pci_device_id *dev_id)
9522 {
9523 	struct ipr_ioa_cfg *ioa_cfg;
9524 	struct Scsi_Host *host;
9525 	unsigned long ipr_regs_pci;
9526 	void __iomem *ipr_regs;
9527 	int rc = PCIBIOS_SUCCESSFUL;
9528 	volatile u32 mask, uproc, interrupts;
9529 	unsigned long lock_flags, driver_lock_flags;
9530 
9531 	ENTER;
9532 
9533 	dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
9534 	host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
9535 
9536 	if (!host) {
9537 		dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
9538 		rc = -ENOMEM;
9539 		goto out;
9540 	}
9541 
9542 	ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
9543 	memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
9544 	ata_host_init(&ioa_cfg->ata_host, &pdev->dev, &ipr_sata_ops);
9545 
9546 	ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
9547 
9548 	if (!ioa_cfg->ipr_chip) {
9549 		dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
9550 			dev_id->vendor, dev_id->device);
9551 		goto out_scsi_host_put;
9552 	}
9553 
9554 	/* set SIS 32 or SIS 64 */
9555 	ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
9556 	ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
9557 	ioa_cfg->clear_isr = ioa_cfg->chip_cfg->clear_isr;
9558 	ioa_cfg->max_cmds = ioa_cfg->chip_cfg->max_cmds;
9559 
9560 	if (ipr_transop_timeout)
9561 		ioa_cfg->transop_timeout = ipr_transop_timeout;
9562 	else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
9563 		ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
9564 	else
9565 		ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
9566 
9567 	ioa_cfg->revid = pdev->revision;
9568 
9569 	ipr_init_ioa_cfg(ioa_cfg, host, pdev);
9570 
9571 	ipr_regs_pci = pci_resource_start(pdev, 0);
9572 
9573 	rc = pci_request_regions(pdev, IPR_NAME);
9574 	if (rc < 0) {
9575 		dev_err(&pdev->dev,
9576 			"Couldn't register memory range of registers\n");
9577 		goto out_scsi_host_put;
9578 	}
9579 
9580 	rc = pci_enable_device(pdev);
9581 
9582 	if (rc || pci_channel_offline(pdev)) {
9583 		if (pci_channel_offline(pdev)) {
9584 			ipr_wait_for_pci_err_recovery(ioa_cfg);
9585 			rc = pci_enable_device(pdev);
9586 		}
9587 
9588 		if (rc) {
9589 			dev_err(&pdev->dev, "Cannot enable adapter\n");
9590 			ipr_wait_for_pci_err_recovery(ioa_cfg);
9591 			goto out_release_regions;
9592 		}
9593 	}
9594 
9595 	ipr_regs = pci_ioremap_bar(pdev, 0);
9596 
9597 	if (!ipr_regs) {
9598 		dev_err(&pdev->dev,
9599 			"Couldn't map memory range of registers\n");
9600 		rc = -ENOMEM;
9601 		goto out_disable;
9602 	}
9603 
9604 	ioa_cfg->hdw_dma_regs = ipr_regs;
9605 	ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
9606 	ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
9607 
9608 	ipr_init_regs(ioa_cfg);
9609 
9610 	if (ioa_cfg->sis64) {
9611 		rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
9612 		if (rc < 0) {
9613 			dev_dbg(&pdev->dev, "Failed to set 64 bit PCI DMA mask\n");
9614 			rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
9615 		}
9616 	} else
9617 		rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
9618 
9619 	if (rc < 0) {
9620 		dev_err(&pdev->dev, "Failed to set PCI DMA mask\n");
9621 		goto cleanup_nomem;
9622 	}
9623 
9624 	rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
9625 				   ioa_cfg->chip_cfg->cache_line_size);
9626 
9627 	if (rc != PCIBIOS_SUCCESSFUL) {
9628 		dev_err(&pdev->dev, "Write of cache line size failed\n");
9629 		ipr_wait_for_pci_err_recovery(ioa_cfg);
9630 		rc = -EIO;
9631 		goto cleanup_nomem;
9632 	}
9633 
9634 	/* Issue MMIO read to ensure card is not in EEH */
9635 	interrupts = readl(ioa_cfg->regs.sense_interrupt_reg);
9636 	ipr_wait_for_pci_err_recovery(ioa_cfg);
9637 
9638 	if (ipr_number_of_msix > IPR_MAX_MSIX_VECTORS) {
9639 		dev_err(&pdev->dev, "The max number of MSIX is %d\n",
9640 			IPR_MAX_MSIX_VECTORS);
9641 		ipr_number_of_msix = IPR_MAX_MSIX_VECTORS;
9642 	}
9643 
9644 	if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI &&
9645 			ipr_enable_msix(ioa_cfg) == 0)
9646 		ioa_cfg->intr_flag = IPR_USE_MSIX;
9647 	else if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI &&
9648 			ipr_enable_msi(ioa_cfg) == 0)
9649 		ioa_cfg->intr_flag = IPR_USE_MSI;
9650 	else {
9651 		ioa_cfg->intr_flag = IPR_USE_LSI;
9652 		ioa_cfg->nvectors = 1;
9653 		dev_info(&pdev->dev, "Cannot enable MSI.\n");
9654 	}
9655 
9656 	pci_set_master(pdev);
9657 
9658 	if (pci_channel_offline(pdev)) {
9659 		ipr_wait_for_pci_err_recovery(ioa_cfg);
9660 		pci_set_master(pdev);
9661 		if (pci_channel_offline(pdev)) {
9662 			rc = -EIO;
9663 			goto out_msi_disable;
9664 		}
9665 	}
9666 
9667 	if (ioa_cfg->intr_flag == IPR_USE_MSI ||
9668 	    ioa_cfg->intr_flag == IPR_USE_MSIX) {
9669 		rc = ipr_test_msi(ioa_cfg, pdev);
9670 		if (rc == -EOPNOTSUPP) {
9671 			ipr_wait_for_pci_err_recovery(ioa_cfg);
9672 			if (ioa_cfg->intr_flag == IPR_USE_MSI) {
9673 				ioa_cfg->intr_flag &= ~IPR_USE_MSI;
9674 				pci_disable_msi(pdev);
9675 			 } else if (ioa_cfg->intr_flag == IPR_USE_MSIX) {
9676 				ioa_cfg->intr_flag &= ~IPR_USE_MSIX;
9677 				pci_disable_msix(pdev);
9678 			}
9679 
9680 			ioa_cfg->intr_flag = IPR_USE_LSI;
9681 			ioa_cfg->nvectors = 1;
9682 		}
9683 		else if (rc)
9684 			goto out_msi_disable;
9685 		else {
9686 			if (ioa_cfg->intr_flag == IPR_USE_MSI)
9687 				dev_info(&pdev->dev,
9688 					"Request for %d MSIs succeeded with starting IRQ: %d\n",
9689 					ioa_cfg->nvectors, pdev->irq);
9690 			else if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9691 				dev_info(&pdev->dev,
9692 					"Request for %d MSIXs succeeded.",
9693 					ioa_cfg->nvectors);
9694 		}
9695 	}
9696 
9697 	ioa_cfg->hrrq_num = min3(ioa_cfg->nvectors,
9698 				(unsigned int)num_online_cpus(),
9699 				(unsigned int)IPR_MAX_HRRQ_NUM);
9700 
9701 	if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
9702 		goto out_msi_disable;
9703 
9704 	if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
9705 		goto out_msi_disable;
9706 
9707 	rc = ipr_alloc_mem(ioa_cfg);
9708 	if (rc < 0) {
9709 		dev_err(&pdev->dev,
9710 			"Couldn't allocate enough memory for device driver!\n");
9711 		goto out_msi_disable;
9712 	}
9713 
9714 	/* Save away PCI config space for use following IOA reset */
9715 	rc = pci_save_state(pdev);
9716 
9717 	if (rc != PCIBIOS_SUCCESSFUL) {
9718 		dev_err(&pdev->dev, "Failed to save PCI config space\n");
9719 		rc = -EIO;
9720 		goto cleanup_nolog;
9721 	}
9722 
9723 	/*
9724 	 * If HRRQ updated interrupt is not masked, or reset alert is set,
9725 	 * the card is in an unknown state and needs a hard reset
9726 	 */
9727 	mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
9728 	interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32);
9729 	uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
9730 	if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
9731 		ioa_cfg->needs_hard_reset = 1;
9732 	if ((interrupts & IPR_PCII_ERROR_INTERRUPTS) || reset_devices)
9733 		ioa_cfg->needs_hard_reset = 1;
9734 	if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
9735 		ioa_cfg->ioa_unit_checked = 1;
9736 
9737 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9738 	ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
9739 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9740 
9741 	if (ioa_cfg->intr_flag == IPR_USE_MSI
9742 			|| ioa_cfg->intr_flag == IPR_USE_MSIX) {
9743 		name_msi_vectors(ioa_cfg);
9744 		rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_isr,
9745 			0,
9746 			ioa_cfg->vectors_info[0].desc,
9747 			&ioa_cfg->hrrq[0]);
9748 		if (!rc)
9749 			rc = ipr_request_other_msi_irqs(ioa_cfg);
9750 	} else {
9751 		rc = request_irq(pdev->irq, ipr_isr,
9752 			 IRQF_SHARED,
9753 			 IPR_NAME, &ioa_cfg->hrrq[0]);
9754 	}
9755 	if (rc) {
9756 		dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
9757 			pdev->irq, rc);
9758 		goto cleanup_nolog;
9759 	}
9760 
9761 	if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) ||
9762 	    (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
9763 		ioa_cfg->needs_warm_reset = 1;
9764 		ioa_cfg->reset = ipr_reset_slot_reset;
9765 	} else
9766 		ioa_cfg->reset = ipr_reset_start_bist;
9767 
9768 	spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
9769 	list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
9770 	spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
9771 
9772 	LEAVE;
9773 out:
9774 	return rc;
9775 
9776 cleanup_nolog:
9777 	ipr_free_mem(ioa_cfg);
9778 out_msi_disable:
9779 	ipr_wait_for_pci_err_recovery(ioa_cfg);
9780 	if (ioa_cfg->intr_flag == IPR_USE_MSI)
9781 		pci_disable_msi(pdev);
9782 	else if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9783 		pci_disable_msix(pdev);
9784 cleanup_nomem:
9785 	iounmap(ipr_regs);
9786 out_disable:
9787 	pci_disable_device(pdev);
9788 out_release_regions:
9789 	pci_release_regions(pdev);
9790 out_scsi_host_put:
9791 	scsi_host_put(host);
9792 	goto out;
9793 }
9794 
9795 /**
9796  * ipr_scan_vsets - Scans for VSET devices
9797  * @ioa_cfg:	ioa config struct
9798  *
9799  * Description: Since the VSET resources do not follow SAM in that we can have
9800  * sparse LUNs with no LUN 0, we have to scan for these ourselves.
9801  *
9802  * Return value:
9803  * 	none
9804  **/
9805 static void ipr_scan_vsets(struct ipr_ioa_cfg *ioa_cfg)
9806 {
9807 	int target, lun;
9808 
9809 	for (target = 0; target < IPR_MAX_NUM_TARGETS_PER_BUS; target++)
9810 		for (lun = 0; lun < IPR_MAX_NUM_VSET_LUNS_PER_TARGET; lun++)
9811 			scsi_add_device(ioa_cfg->host, IPR_VSET_BUS, target, lun);
9812 }
9813 
9814 /**
9815  * ipr_initiate_ioa_bringdown - Bring down an adapter
9816  * @ioa_cfg:		ioa config struct
9817  * @shutdown_type:	shutdown type
9818  *
9819  * Description: This function will initiate bringing down the adapter.
9820  * This consists of issuing an IOA shutdown to the adapter
9821  * to flush the cache, and running BIST.
9822  * If the caller needs to wait on the completion of the reset,
9823  * the caller must sleep on the reset_wait_q.
9824  *
9825  * Return value:
9826  * 	none
9827  **/
9828 static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
9829 				       enum ipr_shutdown_type shutdown_type)
9830 {
9831 	ENTER;
9832 	if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
9833 		ioa_cfg->sdt_state = ABORT_DUMP;
9834 	ioa_cfg->reset_retries = 0;
9835 	ioa_cfg->in_ioa_bringdown = 1;
9836 	ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
9837 	LEAVE;
9838 }
9839 
9840 /**
9841  * __ipr_remove - Remove a single adapter
9842  * @pdev:	pci device struct
9843  *
9844  * Adapter hot plug remove entry point.
9845  *
9846  * Return value:
9847  * 	none
9848  **/
9849 static void __ipr_remove(struct pci_dev *pdev)
9850 {
9851 	unsigned long host_lock_flags = 0;
9852 	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9853 	int i;
9854 	unsigned long driver_lock_flags;
9855 	ENTER;
9856 
9857 	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9858 	while (ioa_cfg->in_reset_reload) {
9859 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9860 		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9861 		spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9862 	}
9863 
9864 	for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9865 		spin_lock(&ioa_cfg->hrrq[i]._lock);
9866 		ioa_cfg->hrrq[i].removing_ioa = 1;
9867 		spin_unlock(&ioa_cfg->hrrq[i]._lock);
9868 	}
9869 	wmb();
9870 	ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
9871 
9872 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9873 	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9874 	flush_work(&ioa_cfg->work_q);
9875 	INIT_LIST_HEAD(&ioa_cfg->used_res_q);
9876 	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9877 
9878 	spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
9879 	list_del(&ioa_cfg->queue);
9880 	spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
9881 
9882 	if (ioa_cfg->sdt_state == ABORT_DUMP)
9883 		ioa_cfg->sdt_state = WAIT_FOR_DUMP;
9884 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9885 
9886 	ipr_free_all_resources(ioa_cfg);
9887 
9888 	LEAVE;
9889 }
9890 
9891 /**
9892  * ipr_remove - IOA hot plug remove entry point
9893  * @pdev:	pci device struct
9894  *
9895  * Adapter hot plug remove entry point.
9896  *
9897  * Return value:
9898  * 	none
9899  **/
9900 static void ipr_remove(struct pci_dev *pdev)
9901 {
9902 	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9903 
9904 	ENTER;
9905 
9906 	ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
9907 			      &ipr_trace_attr);
9908 	ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
9909 			     &ipr_dump_attr);
9910 	scsi_remove_host(ioa_cfg->host);
9911 
9912 	__ipr_remove(pdev);
9913 
9914 	LEAVE;
9915 }
9916 
9917 /**
9918  * ipr_probe - Adapter hot plug add entry point
9919  *
9920  * Return value:
9921  * 	0 on success / non-zero on failure
9922  **/
9923 static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
9924 {
9925 	struct ipr_ioa_cfg *ioa_cfg;
9926 	int rc, i;
9927 
9928 	rc = ipr_probe_ioa(pdev, dev_id);
9929 
9930 	if (rc)
9931 		return rc;
9932 
9933 	ioa_cfg = pci_get_drvdata(pdev);
9934 	rc = ipr_probe_ioa_part2(ioa_cfg);
9935 
9936 	if (rc) {
9937 		__ipr_remove(pdev);
9938 		return rc;
9939 	}
9940 
9941 	rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
9942 
9943 	if (rc) {
9944 		__ipr_remove(pdev);
9945 		return rc;
9946 	}
9947 
9948 	rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj,
9949 				   &ipr_trace_attr);
9950 
9951 	if (rc) {
9952 		scsi_remove_host(ioa_cfg->host);
9953 		__ipr_remove(pdev);
9954 		return rc;
9955 	}
9956 
9957 	rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj,
9958 				   &ipr_dump_attr);
9959 
9960 	if (rc) {
9961 		ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
9962 				      &ipr_trace_attr);
9963 		scsi_remove_host(ioa_cfg->host);
9964 		__ipr_remove(pdev);
9965 		return rc;
9966 	}
9967 
9968 	scsi_scan_host(ioa_cfg->host);
9969 	ipr_scan_vsets(ioa_cfg);
9970 	scsi_add_device(ioa_cfg->host, IPR_IOA_BUS, IPR_IOA_TARGET, IPR_IOA_LUN);
9971 	ioa_cfg->allow_ml_add_del = 1;
9972 	ioa_cfg->host->max_channel = IPR_VSET_BUS;
9973 	ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight;
9974 
9975 	if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
9976 		for (i = 1; i < ioa_cfg->hrrq_num; i++) {
9977 			blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll,
9978 					ioa_cfg->iopoll_weight, ipr_iopoll);
9979 			blk_iopoll_enable(&ioa_cfg->hrrq[i].iopoll);
9980 		}
9981 	}
9982 
9983 	schedule_work(&ioa_cfg->work_q);
9984 	return 0;
9985 }
9986 
9987 /**
9988  * ipr_shutdown - Shutdown handler.
9989  * @pdev:	pci device struct
9990  *
9991  * This function is invoked upon system shutdown/reboot. It will issue
9992  * an adapter shutdown to the adapter to flush the write cache.
9993  *
9994  * Return value:
9995  * 	none
9996  **/
9997 static void ipr_shutdown(struct pci_dev *pdev)
9998 {
9999 	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
10000 	unsigned long lock_flags = 0;
10001 	int i;
10002 
10003 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10004 	if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
10005 		ioa_cfg->iopoll_weight = 0;
10006 		for (i = 1; i < ioa_cfg->hrrq_num; i++)
10007 			blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll);
10008 	}
10009 
10010 	while (ioa_cfg->in_reset_reload) {
10011 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10012 		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10013 		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10014 	}
10015 
10016 	ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
10017 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10018 	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10019 }
10020 
10021 static struct pci_device_id ipr_pci_table[] = {
10022 	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10023 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
10024 	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10025 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 },
10026 	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10027 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 },
10028 	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10029 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 },
10030 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10031 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 },
10032 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10033 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 },
10034 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10035 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 },
10036 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10037 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0,
10038 		IPR_USE_LONG_TRANSOP_TIMEOUT },
10039 	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
10040 	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
10041 	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
10042 	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
10043 	      IPR_USE_LONG_TRANSOP_TIMEOUT },
10044 	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
10045 	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
10046 	      IPR_USE_LONG_TRANSOP_TIMEOUT },
10047 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10048 	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
10049 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10050 	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
10051 	      IPR_USE_LONG_TRANSOP_TIMEOUT},
10052 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10053 	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
10054 	      IPR_USE_LONG_TRANSOP_TIMEOUT },
10055 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10056 	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0,
10057 	      IPR_USE_LONG_TRANSOP_TIMEOUT },
10058 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10059 	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
10060 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10061 	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CC, 0, 0, 0 },
10062 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10063 	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
10064 	      IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET },
10065 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
10066 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
10067 	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10068 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 },
10069 	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10070 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0,
10071 		IPR_USE_LONG_TRANSOP_TIMEOUT },
10072 	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10073 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
10074 		IPR_USE_LONG_TRANSOP_TIMEOUT },
10075 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10076 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B5, 0, 0, 0 },
10077 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10078 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 0 },
10079 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10080 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 },
10081 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10082 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C0, 0, 0, 0 },
10083 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10084 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C3, 0, 0, 0 },
10085 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10086 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C4, 0, 0, 0 },
10087 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10088 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 },
10089 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10090 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B1, 0, 0, 0 },
10091 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10092 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C6, 0, 0, 0 },
10093 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10094 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C8, 0, 0, 0 },
10095 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10096 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CE, 0, 0, 0 },
10097 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10098 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D5, 0, 0, 0 },
10099 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10100 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D6, 0, 0, 0 },
10101 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10102 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D7, 0, 0, 0 },
10103 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10104 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D8, 0, 0, 0 },
10105 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10106 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D9, 0, 0, 0 },
10107 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10108 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57DA, 0, 0, 0 },
10109 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10110 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EB, 0, 0, 0 },
10111 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10112 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EC, 0, 0, 0 },
10113 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10114 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57ED, 0, 0, 0 },
10115 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10116 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EE, 0, 0, 0 },
10117 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10118 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EF, 0, 0, 0 },
10119 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10120 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57F0, 0, 0, 0 },
10121 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10122 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCA, 0, 0, 0 },
10123 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10124 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CD2, 0, 0, 0 },
10125 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10126 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCD, 0, 0, 0 },
10127 	{ }
10128 };
10129 MODULE_DEVICE_TABLE(pci, ipr_pci_table);
10130 
10131 static const struct pci_error_handlers ipr_err_handler = {
10132 	.error_detected = ipr_pci_error_detected,
10133 	.mmio_enabled = ipr_pci_mmio_enabled,
10134 	.slot_reset = ipr_pci_slot_reset,
10135 };
10136 
10137 static struct pci_driver ipr_driver = {
10138 	.name = IPR_NAME,
10139 	.id_table = ipr_pci_table,
10140 	.probe = ipr_probe,
10141 	.remove = ipr_remove,
10142 	.shutdown = ipr_shutdown,
10143 	.err_handler = &ipr_err_handler,
10144 };
10145 
10146 /**
10147  * ipr_halt_done - Shutdown prepare completion
10148  *
10149  * Return value:
10150  * 	none
10151  **/
10152 static void ipr_halt_done(struct ipr_cmnd *ipr_cmd)
10153 {
10154 	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
10155 }
10156 
10157 /**
10158  * ipr_halt - Issue shutdown prepare to all adapters
10159  *
10160  * Return value:
10161  * 	NOTIFY_OK on success / NOTIFY_DONE on failure
10162  **/
10163 static int ipr_halt(struct notifier_block *nb, ulong event, void *buf)
10164 {
10165 	struct ipr_cmnd *ipr_cmd;
10166 	struct ipr_ioa_cfg *ioa_cfg;
10167 	unsigned long flags = 0, driver_lock_flags;
10168 
10169 	if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
10170 		return NOTIFY_DONE;
10171 
10172 	spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
10173 
10174 	list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
10175 		spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
10176 		if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
10177 			spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10178 			continue;
10179 		}
10180 
10181 		ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
10182 		ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
10183 		ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
10184 		ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
10185 		ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
10186 
10187 		ipr_do_req(ipr_cmd, ipr_halt_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
10188 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10189 	}
10190 	spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
10191 
10192 	return NOTIFY_OK;
10193 }
10194 
10195 static struct notifier_block ipr_notifier = {
10196 	ipr_halt, NULL, 0
10197 };
10198 
10199 /**
10200  * ipr_init - Module entry point
10201  *
10202  * Return value:
10203  * 	0 on success / negative value on failure
10204  **/
10205 static int __init ipr_init(void)
10206 {
10207 	ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
10208 		 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
10209 
10210 	register_reboot_notifier(&ipr_notifier);
10211 	return pci_register_driver(&ipr_driver);
10212 }
10213 
10214 /**
10215  * ipr_exit - Module unload
10216  *
10217  * Module unload entry point.
10218  *
10219  * Return value:
10220  * 	none
10221  **/
10222 static void __exit ipr_exit(void)
10223 {
10224 	unregister_reboot_notifier(&ipr_notifier);
10225 	pci_unregister_driver(&ipr_driver);
10226 }
10227 
10228 module_init(ipr_init);
10229 module_exit(ipr_exit);
10230