xref: /openbmc/linux/drivers/scsi/ipr.c (revision 50668633)
1 /*
2  * ipr.c -- driver for IBM Power Linux RAID adapters
3  *
4  * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
5  *
6  * Copyright (C) 2003, 2004 IBM Corporation
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
21  *
22  */
23 
24 /*
25  * Notes:
26  *
27  * This driver is used to control the following SCSI adapters:
28  *
29  * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
30  *
31  * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32  *              PCI-X Dual Channel Ultra 320 SCSI Adapter
33  *              PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34  *              Embedded SCSI adapter on p615 and p655 systems
35  *
36  * Supported Hardware Features:
37  *	- Ultra 320 SCSI controller
38  *	- PCI-X host interface
39  *	- Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40  *	- Non-Volatile Write Cache
41  *	- Supports attachment of non-RAID disks, tape, and optical devices
42  *	- RAID Levels 0, 5, 10
43  *	- Hot spare
44  *	- Background Parity Checking
45  *	- Background Data Scrubbing
46  *	- Ability to increase the capacity of an existing RAID 5 disk array
47  *		by adding disks
48  *
49  * Driver Features:
50  *	- Tagged command queuing
51  *	- Adapter microcode download
52  *	- PCI hot plug
53  *	- SCSI device hot plug
54  *
55  */
56 
57 #include <linux/fs.h>
58 #include <linux/init.h>
59 #include <linux/types.h>
60 #include <linux/errno.h>
61 #include <linux/kernel.h>
62 #include <linux/slab.h>
63 #include <linux/vmalloc.h>
64 #include <linux/ioport.h>
65 #include <linux/delay.h>
66 #include <linux/pci.h>
67 #include <linux/wait.h>
68 #include <linux/spinlock.h>
69 #include <linux/sched.h>
70 #include <linux/interrupt.h>
71 #include <linux/blkdev.h>
72 #include <linux/firmware.h>
73 #include <linux/module.h>
74 #include <linux/moduleparam.h>
75 #include <linux/libata.h>
76 #include <linux/hdreg.h>
77 #include <linux/reboot.h>
78 #include <linux/stringify.h>
79 #include <asm/io.h>
80 #include <asm/irq.h>
81 #include <asm/processor.h>
82 #include <scsi/scsi.h>
83 #include <scsi/scsi_host.h>
84 #include <scsi/scsi_tcq.h>
85 #include <scsi/scsi_eh.h>
86 #include <scsi/scsi_cmnd.h>
87 #include "ipr.h"
88 
89 /*
90  *   Global Data
91  */
92 static LIST_HEAD(ipr_ioa_head);
93 static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
94 static unsigned int ipr_max_speed = 1;
95 static int ipr_testmode = 0;
96 static unsigned int ipr_fastfail = 0;
97 static unsigned int ipr_transop_timeout = 0;
98 static unsigned int ipr_debug = 0;
99 static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
100 static unsigned int ipr_dual_ioa_raid = 1;
101 static unsigned int ipr_number_of_msix = 2;
102 static DEFINE_SPINLOCK(ipr_driver_lock);
103 
104 /* This table describes the differences between DMA controller chips */
105 static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
106 	{ /* Gemstone, Citrine, Obsidian, and Obsidian-E */
107 		.mailbox = 0x0042C,
108 		.max_cmds = 100,
109 		.cache_line_size = 0x20,
110 		.clear_isr = 1,
111 		.iopoll_weight = 0,
112 		{
113 			.set_interrupt_mask_reg = 0x0022C,
114 			.clr_interrupt_mask_reg = 0x00230,
115 			.clr_interrupt_mask_reg32 = 0x00230,
116 			.sense_interrupt_mask_reg = 0x0022C,
117 			.sense_interrupt_mask_reg32 = 0x0022C,
118 			.clr_interrupt_reg = 0x00228,
119 			.clr_interrupt_reg32 = 0x00228,
120 			.sense_interrupt_reg = 0x00224,
121 			.sense_interrupt_reg32 = 0x00224,
122 			.ioarrin_reg = 0x00404,
123 			.sense_uproc_interrupt_reg = 0x00214,
124 			.sense_uproc_interrupt_reg32 = 0x00214,
125 			.set_uproc_interrupt_reg = 0x00214,
126 			.set_uproc_interrupt_reg32 = 0x00214,
127 			.clr_uproc_interrupt_reg = 0x00218,
128 			.clr_uproc_interrupt_reg32 = 0x00218
129 		}
130 	},
131 	{ /* Snipe and Scamp */
132 		.mailbox = 0x0052C,
133 		.max_cmds = 100,
134 		.cache_line_size = 0x20,
135 		.clear_isr = 1,
136 		.iopoll_weight = 0,
137 		{
138 			.set_interrupt_mask_reg = 0x00288,
139 			.clr_interrupt_mask_reg = 0x0028C,
140 			.clr_interrupt_mask_reg32 = 0x0028C,
141 			.sense_interrupt_mask_reg = 0x00288,
142 			.sense_interrupt_mask_reg32 = 0x00288,
143 			.clr_interrupt_reg = 0x00284,
144 			.clr_interrupt_reg32 = 0x00284,
145 			.sense_interrupt_reg = 0x00280,
146 			.sense_interrupt_reg32 = 0x00280,
147 			.ioarrin_reg = 0x00504,
148 			.sense_uproc_interrupt_reg = 0x00290,
149 			.sense_uproc_interrupt_reg32 = 0x00290,
150 			.set_uproc_interrupt_reg = 0x00290,
151 			.set_uproc_interrupt_reg32 = 0x00290,
152 			.clr_uproc_interrupt_reg = 0x00294,
153 			.clr_uproc_interrupt_reg32 = 0x00294
154 		}
155 	},
156 	{ /* CRoC */
157 		.mailbox = 0x00044,
158 		.max_cmds = 1000,
159 		.cache_line_size = 0x20,
160 		.clear_isr = 0,
161 		.iopoll_weight = 64,
162 		{
163 			.set_interrupt_mask_reg = 0x00010,
164 			.clr_interrupt_mask_reg = 0x00018,
165 			.clr_interrupt_mask_reg32 = 0x0001C,
166 			.sense_interrupt_mask_reg = 0x00010,
167 			.sense_interrupt_mask_reg32 = 0x00014,
168 			.clr_interrupt_reg = 0x00008,
169 			.clr_interrupt_reg32 = 0x0000C,
170 			.sense_interrupt_reg = 0x00000,
171 			.sense_interrupt_reg32 = 0x00004,
172 			.ioarrin_reg = 0x00070,
173 			.sense_uproc_interrupt_reg = 0x00020,
174 			.sense_uproc_interrupt_reg32 = 0x00024,
175 			.set_uproc_interrupt_reg = 0x00020,
176 			.set_uproc_interrupt_reg32 = 0x00024,
177 			.clr_uproc_interrupt_reg = 0x00028,
178 			.clr_uproc_interrupt_reg32 = 0x0002C,
179 			.init_feedback_reg = 0x0005C,
180 			.dump_addr_reg = 0x00064,
181 			.dump_data_reg = 0x00068,
182 			.endian_swap_reg = 0x00084
183 		}
184 	},
185 };
186 
187 static const struct ipr_chip_t ipr_chip[] = {
188 	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
189 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
190 	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
191 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
192 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, IPR_USE_MSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
193 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
194 	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
195 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
196 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }
197 };
198 
199 static int ipr_max_bus_speeds[] = {
200 	IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
201 };
202 
203 MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
204 MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
205 module_param_named(max_speed, ipr_max_speed, uint, 0);
206 MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
207 module_param_named(log_level, ipr_log_level, uint, 0);
208 MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
209 module_param_named(testmode, ipr_testmode, int, 0);
210 MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
211 module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR);
212 MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
213 module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
214 MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
215 module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR);
216 MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
217 module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
218 MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
219 module_param_named(max_devs, ipr_max_devs, int, 0);
220 MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
221 		 "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
222 module_param_named(number_of_msix, ipr_number_of_msix, int, 0);
223 MODULE_PARM_DESC(number_of_msix, "Specify the number of MSIX interrupts to use on capable adapters (1 - 16).  (default:2)");
224 MODULE_LICENSE("GPL");
225 MODULE_VERSION(IPR_DRIVER_VERSION);
226 
227 /*  A constant array of IOASCs/URCs/Error Messages */
228 static const
229 struct ipr_error_table_t ipr_error_table[] = {
230 	{0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
231 	"8155: An unknown error was received"},
232 	{0x00330000, 0, 0,
233 	"Soft underlength error"},
234 	{0x005A0000, 0, 0,
235 	"Command to be cancelled not found"},
236 	{0x00808000, 0, 0,
237 	"Qualified success"},
238 	{0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
239 	"FFFE: Soft device bus error recovered by the IOA"},
240 	{0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
241 	"4101: Soft device bus fabric error"},
242 	{0x01100100, 0, IPR_DEFAULT_LOG_LEVEL,
243 	"FFFC: Logical block guard error recovered by the device"},
244 	{0x01100300, 0, IPR_DEFAULT_LOG_LEVEL,
245 	"FFFC: Logical block reference tag error recovered by the device"},
246 	{0x01108300, 0, IPR_DEFAULT_LOG_LEVEL,
247 	"4171: Recovered scatter list tag / sequence number error"},
248 	{0x01109000, 0, IPR_DEFAULT_LOG_LEVEL,
249 	"FF3D: Recovered logical block CRC error on IOA to Host transfer"},
250 	{0x01109200, 0, IPR_DEFAULT_LOG_LEVEL,
251 	"4171: Recovered logical block sequence number error on IOA to Host transfer"},
252 	{0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL,
253 	"FFFD: Recovered logical block reference tag error detected by the IOA"},
254 	{0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL,
255 	"FFFD: Logical block guard error recovered by the IOA"},
256 	{0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
257 	"FFF9: Device sector reassign successful"},
258 	{0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
259 	"FFF7: Media error recovered by device rewrite procedures"},
260 	{0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
261 	"7001: IOA sector reassignment successful"},
262 	{0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
263 	"FFF9: Soft media error. Sector reassignment recommended"},
264 	{0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
265 	"FFF7: Media error recovered by IOA rewrite procedures"},
266 	{0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
267 	"FF3D: Soft PCI bus error recovered by the IOA"},
268 	{0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
269 	"FFF6: Device hardware error recovered by the IOA"},
270 	{0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
271 	"FFF6: Device hardware error recovered by the device"},
272 	{0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
273 	"FF3D: Soft IOA error recovered by the IOA"},
274 	{0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
275 	"FFFA: Undefined device response recovered by the IOA"},
276 	{0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
277 	"FFF6: Device bus error, message or command phase"},
278 	{0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
279 	"FFFE: Task Management Function failed"},
280 	{0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
281 	"FFF6: Failure prediction threshold exceeded"},
282 	{0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
283 	"8009: Impending cache battery pack failure"},
284 	{0x02040100, 0, 0,
285 	"Logical Unit in process of becoming ready"},
286 	{0x02040200, 0, 0,
287 	"Initializing command required"},
288 	{0x02040400, 0, 0,
289 	"34FF: Disk device format in progress"},
290 	{0x02040C00, 0, 0,
291 	"Logical unit not accessible, target port in unavailable state"},
292 	{0x02048000, 0, IPR_DEFAULT_LOG_LEVEL,
293 	"9070: IOA requested reset"},
294 	{0x023F0000, 0, 0,
295 	"Synchronization required"},
296 	{0x02408500, 0, 0,
297 	"IOA microcode download required"},
298 	{0x02408600, 0, 0,
299 	"Device bus connection is prohibited by host"},
300 	{0x024E0000, 0, 0,
301 	"No ready, IOA shutdown"},
302 	{0x025A0000, 0, 0,
303 	"Not ready, IOA has been shutdown"},
304 	{0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
305 	"3020: Storage subsystem configuration error"},
306 	{0x03110B00, 0, 0,
307 	"FFF5: Medium error, data unreadable, recommend reassign"},
308 	{0x03110C00, 0, 0,
309 	"7000: Medium error, data unreadable, do not reassign"},
310 	{0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
311 	"FFF3: Disk media format bad"},
312 	{0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
313 	"3002: Addressed device failed to respond to selection"},
314 	{0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
315 	"3100: Device bus error"},
316 	{0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
317 	"3109: IOA timed out a device command"},
318 	{0x04088000, 0, 0,
319 	"3120: SCSI bus is not operational"},
320 	{0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
321 	"4100: Hard device bus fabric error"},
322 	{0x04100100, 0, IPR_DEFAULT_LOG_LEVEL,
323 	"310C: Logical block guard error detected by the device"},
324 	{0x04100300, 0, IPR_DEFAULT_LOG_LEVEL,
325 	"310C: Logical block reference tag error detected by the device"},
326 	{0x04108300, 1, IPR_DEFAULT_LOG_LEVEL,
327 	"4170: Scatter list tag / sequence number error"},
328 	{0x04109000, 1, IPR_DEFAULT_LOG_LEVEL,
329 	"8150: Logical block CRC error on IOA to Host transfer"},
330 	{0x04109200, 1, IPR_DEFAULT_LOG_LEVEL,
331 	"4170: Logical block sequence number error on IOA to Host transfer"},
332 	{0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL,
333 	"310D: Logical block reference tag error detected by the IOA"},
334 	{0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL,
335 	"310D: Logical block guard error detected by the IOA"},
336 	{0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
337 	"9000: IOA reserved area data check"},
338 	{0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
339 	"9001: IOA reserved area invalid data pattern"},
340 	{0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
341 	"9002: IOA reserved area LRC error"},
342 	{0x04118300, 1, IPR_DEFAULT_LOG_LEVEL,
343 	"Hardware Error, IOA metadata access error"},
344 	{0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
345 	"102E: Out of alternate sectors for disk storage"},
346 	{0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
347 	"FFF4: Data transfer underlength error"},
348 	{0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
349 	"FFF4: Data transfer overlength error"},
350 	{0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
351 	"3400: Logical unit failure"},
352 	{0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
353 	"FFF4: Device microcode is corrupt"},
354 	{0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
355 	"8150: PCI bus error"},
356 	{0x04430000, 1, 0,
357 	"Unsupported device bus message received"},
358 	{0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
359 	"FFF4: Disk device problem"},
360 	{0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
361 	"8150: Permanent IOA failure"},
362 	{0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
363 	"3010: Disk device returned wrong response to IOA"},
364 	{0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
365 	"8151: IOA microcode error"},
366 	{0x04448500, 0, 0,
367 	"Device bus status error"},
368 	{0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
369 	"8157: IOA error requiring IOA reset to recover"},
370 	{0x04448700, 0, 0,
371 	"ATA device status error"},
372 	{0x04490000, 0, 0,
373 	"Message reject received from the device"},
374 	{0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
375 	"8008: A permanent cache battery pack failure occurred"},
376 	{0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
377 	"9090: Disk unit has been modified after the last known status"},
378 	{0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
379 	"9081: IOA detected device error"},
380 	{0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
381 	"9082: IOA detected device error"},
382 	{0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
383 	"3110: Device bus error, message or command phase"},
384 	{0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
385 	"3110: SAS Command / Task Management Function failed"},
386 	{0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
387 	"9091: Incorrect hardware configuration change has been detected"},
388 	{0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
389 	"9073: Invalid multi-adapter configuration"},
390 	{0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
391 	"4010: Incorrect connection between cascaded expanders"},
392 	{0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
393 	"4020: Connections exceed IOA design limits"},
394 	{0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
395 	"4030: Incorrect multipath connection"},
396 	{0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
397 	"4110: Unsupported enclosure function"},
398 	{0x04679800, 0, IPR_DEFAULT_LOG_LEVEL,
399 	"4120: SAS cable VPD cannot be read"},
400 	{0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
401 	"FFF4: Command to logical unit failed"},
402 	{0x05240000, 1, 0,
403 	"Illegal request, invalid request type or request packet"},
404 	{0x05250000, 0, 0,
405 	"Illegal request, invalid resource handle"},
406 	{0x05258000, 0, 0,
407 	"Illegal request, commands not allowed to this device"},
408 	{0x05258100, 0, 0,
409 	"Illegal request, command not allowed to a secondary adapter"},
410 	{0x05258200, 0, 0,
411 	"Illegal request, command not allowed to a non-optimized resource"},
412 	{0x05260000, 0, 0,
413 	"Illegal request, invalid field in parameter list"},
414 	{0x05260100, 0, 0,
415 	"Illegal request, parameter not supported"},
416 	{0x05260200, 0, 0,
417 	"Illegal request, parameter value invalid"},
418 	{0x052C0000, 0, 0,
419 	"Illegal request, command sequence error"},
420 	{0x052C8000, 1, 0,
421 	"Illegal request, dual adapter support not enabled"},
422 	{0x052C8100, 1, 0,
423 	"Illegal request, another cable connector was physically disabled"},
424 	{0x054E8000, 1, 0,
425 	"Illegal request, inconsistent group id/group count"},
426 	{0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
427 	"9031: Array protection temporarily suspended, protection resuming"},
428 	{0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
429 	"9040: Array protection temporarily suspended, protection resuming"},
430 	{0x060B0100, 0, IPR_DEFAULT_LOG_LEVEL,
431 	"4080: IOA exceeded maximum operating temperature"},
432 	{0x060B8000, 0, IPR_DEFAULT_LOG_LEVEL,
433 	"4085: Service required"},
434 	{0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
435 	"3140: Device bus not ready to ready transition"},
436 	{0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
437 	"FFFB: SCSI bus was reset"},
438 	{0x06290500, 0, 0,
439 	"FFFE: SCSI bus transition to single ended"},
440 	{0x06290600, 0, 0,
441 	"FFFE: SCSI bus transition to LVD"},
442 	{0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
443 	"FFFB: SCSI bus was reset by another initiator"},
444 	{0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
445 	"3029: A device replacement has occurred"},
446 	{0x063F8300, 0, IPR_DEFAULT_LOG_LEVEL,
447 	"4102: Device bus fabric performance degradation"},
448 	{0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
449 	"9051: IOA cache data exists for a missing or failed device"},
450 	{0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
451 	"9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
452 	{0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
453 	"9025: Disk unit is not supported at its physical location"},
454 	{0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
455 	"3020: IOA detected a SCSI bus configuration error"},
456 	{0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
457 	"3150: SCSI bus configuration error"},
458 	{0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
459 	"9074: Asymmetric advanced function disk configuration"},
460 	{0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
461 	"4040: Incomplete multipath connection between IOA and enclosure"},
462 	{0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
463 	"4041: Incomplete multipath connection between enclosure and device"},
464 	{0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
465 	"9075: Incomplete multipath connection between IOA and remote IOA"},
466 	{0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
467 	"9076: Configuration error, missing remote IOA"},
468 	{0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
469 	"4050: Enclosure does not support a required multipath function"},
470 	{0x06679800, 0, IPR_DEFAULT_LOG_LEVEL,
471 	"4121: Configuration error, required cable is missing"},
472 	{0x06679900, 0, IPR_DEFAULT_LOG_LEVEL,
473 	"4122: Cable is not plugged into the correct location on remote IOA"},
474 	{0x06679A00, 0, IPR_DEFAULT_LOG_LEVEL,
475 	"4123: Configuration error, invalid cable vital product data"},
476 	{0x06679B00, 0, IPR_DEFAULT_LOG_LEVEL,
477 	"4124: Configuration error, both cable ends are plugged into the same IOA"},
478 	{0x06690000, 0, IPR_DEFAULT_LOG_LEVEL,
479 	"4070: Logically bad block written on device"},
480 	{0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
481 	"9041: Array protection temporarily suspended"},
482 	{0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
483 	"9042: Corrupt array parity detected on specified device"},
484 	{0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
485 	"9030: Array no longer protected due to missing or failed disk unit"},
486 	{0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
487 	"9071: Link operational transition"},
488 	{0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
489 	"9072: Link not operational transition"},
490 	{0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
491 	"9032: Array exposed but still protected"},
492 	{0x066B8300, 0, IPR_DEFAULT_LOG_LEVEL + 1,
493 	"70DD: Device forced failed by disrupt device command"},
494 	{0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
495 	"4061: Multipath redundancy level got better"},
496 	{0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
497 	"4060: Multipath redundancy level got worse"},
498 	{0x07270000, 0, 0,
499 	"Failure due to other device"},
500 	{0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
501 	"9008: IOA does not support functions expected by devices"},
502 	{0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
503 	"9010: Cache data associated with attached devices cannot be found"},
504 	{0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
505 	"9011: Cache data belongs to devices other than those attached"},
506 	{0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
507 	"9020: Array missing 2 or more devices with only 1 device present"},
508 	{0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
509 	"9021: Array missing 2 or more devices with 2 or more devices present"},
510 	{0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
511 	"9022: Exposed array is missing a required device"},
512 	{0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
513 	"9023: Array member(s) not at required physical locations"},
514 	{0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
515 	"9024: Array not functional due to present hardware configuration"},
516 	{0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
517 	"9026: Array not functional due to present hardware configuration"},
518 	{0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
519 	"9027: Array is missing a device and parity is out of sync"},
520 	{0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
521 	"9028: Maximum number of arrays already exist"},
522 	{0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
523 	"9050: Required cache data cannot be located for a disk unit"},
524 	{0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
525 	"9052: Cache data exists for a device that has been modified"},
526 	{0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
527 	"9054: IOA resources not available due to previous problems"},
528 	{0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
529 	"9092: Disk unit requires initialization before use"},
530 	{0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
531 	"9029: Incorrect hardware configuration change has been detected"},
532 	{0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
533 	"9060: One or more disk pairs are missing from an array"},
534 	{0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
535 	"9061: One or more disks are missing from an array"},
536 	{0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
537 	"9062: One or more disks are missing from an array"},
538 	{0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
539 	"9063: Maximum number of functional arrays has been exceeded"},
540 	{0x07279A00, 0, 0,
541 	"Data protect, other volume set problem"},
542 	{0x0B260000, 0, 0,
543 	"Aborted command, invalid descriptor"},
544 	{0x0B3F9000, 0, 0,
545 	"Target operating conditions have changed, dual adapter takeover"},
546 	{0x0B530200, 0, 0,
547 	"Aborted command, medium removal prevented"},
548 	{0x0B5A0000, 0, 0,
549 	"Command terminated by host"},
550 	{0x0B5B8000, 0, 0,
551 	"Aborted command, command terminated by host"}
552 };
553 
554 static const struct ipr_ses_table_entry ipr_ses_table[] = {
555 	{ "2104-DL1        ", "XXXXXXXXXXXXXXXX", 80 },
556 	{ "2104-TL1        ", "XXXXXXXXXXXXXXXX", 80 },
557 	{ "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
558 	{ "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
559 	{ "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
560 	{ "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
561 	{ "2104-DU3        ", "XXXXXXXXXXXXXXXX", 160 },
562 	{ "2104-TU3        ", "XXXXXXXXXXXXXXXX", 160 },
563 	{ "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
564 	{ "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
565 	{ "St  V1S2        ", "XXXXXXXXXXXXXXXX", 160 },
566 	{ "HSBPD4M  PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
567 	{ "VSBPD1H   U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
568 };
569 
570 /*
571  *  Function Prototypes
572  */
573 static int ipr_reset_alert(struct ipr_cmnd *);
574 static void ipr_process_ccn(struct ipr_cmnd *);
575 static void ipr_process_error(struct ipr_cmnd *);
576 static void ipr_reset_ioa_job(struct ipr_cmnd *);
577 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
578 				   enum ipr_shutdown_type);
579 
580 #ifdef CONFIG_SCSI_IPR_TRACE
581 /**
582  * ipr_trc_hook - Add a trace entry to the driver trace
583  * @ipr_cmd:	ipr command struct
584  * @type:		trace type
585  * @add_data:	additional data
586  *
587  * Return value:
588  * 	none
589  **/
590 static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
591 			 u8 type, u32 add_data)
592 {
593 	struct ipr_trace_entry *trace_entry;
594 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
595 
596 	trace_entry = &ioa_cfg->trace[atomic_add_return
597 			(1, &ioa_cfg->trace_index)%IPR_NUM_TRACE_ENTRIES];
598 	trace_entry->time = jiffies;
599 	trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
600 	trace_entry->type = type;
601 	if (ipr_cmd->ioa_cfg->sis64)
602 		trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command;
603 	else
604 		trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command;
605 	trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
606 	trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
607 	trace_entry->u.add_data = add_data;
608 	wmb();
609 }
610 #else
611 #define ipr_trc_hook(ipr_cmd, type, add_data) do { } while (0)
612 #endif
613 
614 /**
615  * ipr_lock_and_done - Acquire lock and complete command
616  * @ipr_cmd:	ipr command struct
617  *
618  * Return value:
619  *	none
620  **/
621 static void ipr_lock_and_done(struct ipr_cmnd *ipr_cmd)
622 {
623 	unsigned long lock_flags;
624 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
625 
626 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
627 	ipr_cmd->done(ipr_cmd);
628 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
629 }
630 
631 /**
632  * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
633  * @ipr_cmd:	ipr command struct
634  *
635  * Return value:
636  * 	none
637  **/
638 static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
639 {
640 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
641 	struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
642 	struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
643 	dma_addr_t dma_addr = ipr_cmd->dma_addr;
644 	int hrrq_id;
645 
646 	hrrq_id = ioarcb->cmd_pkt.hrrq_id;
647 	memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
648 	ioarcb->cmd_pkt.hrrq_id = hrrq_id;
649 	ioarcb->data_transfer_length = 0;
650 	ioarcb->read_data_transfer_length = 0;
651 	ioarcb->ioadl_len = 0;
652 	ioarcb->read_ioadl_len = 0;
653 
654 	if (ipr_cmd->ioa_cfg->sis64) {
655 		ioarcb->u.sis64_addr_data.data_ioadl_addr =
656 			cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
657 		ioasa64->u.gata.status = 0;
658 	} else {
659 		ioarcb->write_ioadl_addr =
660 			cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
661 		ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
662 		ioasa->u.gata.status = 0;
663 	}
664 
665 	ioasa->hdr.ioasc = 0;
666 	ioasa->hdr.residual_data_len = 0;
667 	ipr_cmd->scsi_cmd = NULL;
668 	ipr_cmd->qc = NULL;
669 	ipr_cmd->sense_buffer[0] = 0;
670 	ipr_cmd->dma_use_sg = 0;
671 }
672 
673 /**
674  * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
675  * @ipr_cmd:	ipr command struct
676  *
677  * Return value:
678  * 	none
679  **/
680 static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd,
681 			      void (*fast_done) (struct ipr_cmnd *))
682 {
683 	ipr_reinit_ipr_cmnd(ipr_cmd);
684 	ipr_cmd->u.scratch = 0;
685 	ipr_cmd->sibling = NULL;
686 	ipr_cmd->fast_done = fast_done;
687 	init_timer(&ipr_cmd->timer);
688 }
689 
690 /**
691  * __ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
692  * @ioa_cfg:	ioa config struct
693  *
694  * Return value:
695  * 	pointer to ipr command struct
696  **/
697 static
698 struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_hrr_queue *hrrq)
699 {
700 	struct ipr_cmnd *ipr_cmd = NULL;
701 
702 	if (likely(!list_empty(&hrrq->hrrq_free_q))) {
703 		ipr_cmd = list_entry(hrrq->hrrq_free_q.next,
704 			struct ipr_cmnd, queue);
705 		list_del(&ipr_cmd->queue);
706 	}
707 
708 
709 	return ipr_cmd;
710 }
711 
712 /**
713  * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block and initialize it
714  * @ioa_cfg:	ioa config struct
715  *
716  * Return value:
717  *	pointer to ipr command struct
718  **/
719 static
720 struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
721 {
722 	struct ipr_cmnd *ipr_cmd =
723 		__ipr_get_free_ipr_cmnd(&ioa_cfg->hrrq[IPR_INIT_HRRQ]);
724 	ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
725 	return ipr_cmd;
726 }
727 
728 /**
729  * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
730  * @ioa_cfg:	ioa config struct
731  * @clr_ints:     interrupts to clear
732  *
733  * This function masks all interrupts on the adapter, then clears the
734  * interrupts specified in the mask
735  *
736  * Return value:
737  * 	none
738  **/
739 static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
740 					  u32 clr_ints)
741 {
742 	volatile u32 int_reg;
743 	int i;
744 
745 	/* Stop new interrupts */
746 	for (i = 0; i < ioa_cfg->hrrq_num; i++) {
747 		spin_lock(&ioa_cfg->hrrq[i]._lock);
748 		ioa_cfg->hrrq[i].allow_interrupts = 0;
749 		spin_unlock(&ioa_cfg->hrrq[i]._lock);
750 	}
751 	wmb();
752 
753 	/* Set interrupt mask to stop all new interrupts */
754 	if (ioa_cfg->sis64)
755 		writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg);
756 	else
757 		writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
758 
759 	/* Clear any pending interrupts */
760 	if (ioa_cfg->sis64)
761 		writel(~0, ioa_cfg->regs.clr_interrupt_reg);
762 	writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32);
763 	int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
764 }
765 
766 /**
767  * ipr_save_pcix_cmd_reg - Save PCI-X command register
768  * @ioa_cfg:	ioa config struct
769  *
770  * Return value:
771  * 	0 on success / -EIO on failure
772  **/
773 static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
774 {
775 	int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
776 
777 	if (pcix_cmd_reg == 0)
778 		return 0;
779 
780 	if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
781 				 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
782 		dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
783 		return -EIO;
784 	}
785 
786 	ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
787 	return 0;
788 }
789 
790 /**
791  * ipr_set_pcix_cmd_reg - Setup PCI-X command register
792  * @ioa_cfg:	ioa config struct
793  *
794  * Return value:
795  * 	0 on success / -EIO on failure
796  **/
797 static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
798 {
799 	int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
800 
801 	if (pcix_cmd_reg) {
802 		if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
803 					  ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
804 			dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
805 			return -EIO;
806 		}
807 	}
808 
809 	return 0;
810 }
811 
812 /**
813  * ipr_sata_eh_done - done function for aborted SATA commands
814  * @ipr_cmd:	ipr command struct
815  *
816  * This function is invoked for ops generated to SATA
817  * devices which are being aborted.
818  *
819  * Return value:
820  * 	none
821  **/
822 static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
823 {
824 	struct ata_queued_cmd *qc = ipr_cmd->qc;
825 	struct ipr_sata_port *sata_port = qc->ap->private_data;
826 
827 	qc->err_mask |= AC_ERR_OTHER;
828 	sata_port->ioasa.status |= ATA_BUSY;
829 	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
830 	ata_qc_complete(qc);
831 }
832 
833 /**
834  * ipr_scsi_eh_done - mid-layer done function for aborted ops
835  * @ipr_cmd:	ipr command struct
836  *
837  * This function is invoked by the interrupt handler for
838  * ops generated by the SCSI mid-layer which are being aborted.
839  *
840  * Return value:
841  * 	none
842  **/
843 static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
844 {
845 	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
846 
847 	scsi_cmd->result |= (DID_ERROR << 16);
848 
849 	scsi_dma_unmap(ipr_cmd->scsi_cmd);
850 	scsi_cmd->scsi_done(scsi_cmd);
851 	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
852 }
853 
854 /**
855  * ipr_fail_all_ops - Fails all outstanding ops.
856  * @ioa_cfg:	ioa config struct
857  *
858  * This function fails all outstanding ops.
859  *
860  * Return value:
861  * 	none
862  **/
863 static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
864 {
865 	struct ipr_cmnd *ipr_cmd, *temp;
866 	struct ipr_hrr_queue *hrrq;
867 
868 	ENTER;
869 	for_each_hrrq(hrrq, ioa_cfg) {
870 		spin_lock(&hrrq->_lock);
871 		list_for_each_entry_safe(ipr_cmd,
872 					temp, &hrrq->hrrq_pending_q, queue) {
873 			list_del(&ipr_cmd->queue);
874 
875 			ipr_cmd->s.ioasa.hdr.ioasc =
876 				cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
877 			ipr_cmd->s.ioasa.hdr.ilid =
878 				cpu_to_be32(IPR_DRIVER_ILID);
879 
880 			if (ipr_cmd->scsi_cmd)
881 				ipr_cmd->done = ipr_scsi_eh_done;
882 			else if (ipr_cmd->qc)
883 				ipr_cmd->done = ipr_sata_eh_done;
884 
885 			ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH,
886 				     IPR_IOASC_IOA_WAS_RESET);
887 			del_timer(&ipr_cmd->timer);
888 			ipr_cmd->done(ipr_cmd);
889 		}
890 		spin_unlock(&hrrq->_lock);
891 	}
892 	LEAVE;
893 }
894 
895 /**
896  * ipr_send_command -  Send driver initiated requests.
897  * @ipr_cmd:		ipr command struct
898  *
899  * This function sends a command to the adapter using the correct write call.
900  * In the case of sis64, calculate the ioarcb size required. Then or in the
901  * appropriate bits.
902  *
903  * Return value:
904  * 	none
905  **/
906 static void ipr_send_command(struct ipr_cmnd *ipr_cmd)
907 {
908 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
909 	dma_addr_t send_dma_addr = ipr_cmd->dma_addr;
910 
911 	if (ioa_cfg->sis64) {
912 		/* The default size is 256 bytes */
913 		send_dma_addr |= 0x1;
914 
915 		/* If the number of ioadls * size of ioadl > 128 bytes,
916 		   then use a 512 byte ioarcb */
917 		if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 )
918 			send_dma_addr |= 0x4;
919 		writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
920 	} else
921 		writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
922 }
923 
924 /**
925  * ipr_do_req -  Send driver initiated requests.
926  * @ipr_cmd:		ipr command struct
927  * @done:			done function
928  * @timeout_func:	timeout function
929  * @timeout:		timeout value
930  *
931  * This function sends the specified command to the adapter with the
932  * timeout given. The done function is invoked on command completion.
933  *
934  * Return value:
935  * 	none
936  **/
937 static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
938 		       void (*done) (struct ipr_cmnd *),
939 		       void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
940 {
941 	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
942 
943 	ipr_cmd->done = done;
944 
945 	ipr_cmd->timer.data = (unsigned long) ipr_cmd;
946 	ipr_cmd->timer.expires = jiffies + timeout;
947 	ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
948 
949 	add_timer(&ipr_cmd->timer);
950 
951 	ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
952 
953 	ipr_send_command(ipr_cmd);
954 }
955 
956 /**
957  * ipr_internal_cmd_done - Op done function for an internally generated op.
958  * @ipr_cmd:	ipr command struct
959  *
960  * This function is the op done function for an internally generated,
961  * blocking op. It simply wakes the sleeping thread.
962  *
963  * Return value:
964  * 	none
965  **/
966 static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
967 {
968 	if (ipr_cmd->sibling)
969 		ipr_cmd->sibling = NULL;
970 	else
971 		complete(&ipr_cmd->completion);
972 }
973 
974 /**
975  * ipr_init_ioadl - initialize the ioadl for the correct SIS type
976  * @ipr_cmd:	ipr command struct
977  * @dma_addr:	dma address
978  * @len:	transfer length
979  * @flags:	ioadl flag value
980  *
981  * This function initializes an ioadl in the case where there is only a single
982  * descriptor.
983  *
984  * Return value:
985  * 	nothing
986  **/
987 static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr,
988 			   u32 len, int flags)
989 {
990 	struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
991 	struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
992 
993 	ipr_cmd->dma_use_sg = 1;
994 
995 	if (ipr_cmd->ioa_cfg->sis64) {
996 		ioadl64->flags = cpu_to_be32(flags);
997 		ioadl64->data_len = cpu_to_be32(len);
998 		ioadl64->address = cpu_to_be64(dma_addr);
999 
1000 		ipr_cmd->ioarcb.ioadl_len =
1001 		       	cpu_to_be32(sizeof(struct ipr_ioadl64_desc));
1002 		ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
1003 	} else {
1004 		ioadl->flags_and_data_len = cpu_to_be32(flags | len);
1005 		ioadl->address = cpu_to_be32(dma_addr);
1006 
1007 		if (flags == IPR_IOADL_FLAGS_READ_LAST) {
1008 			ipr_cmd->ioarcb.read_ioadl_len =
1009 				cpu_to_be32(sizeof(struct ipr_ioadl_desc));
1010 			ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len);
1011 		} else {
1012 			ipr_cmd->ioarcb.ioadl_len =
1013 			       	cpu_to_be32(sizeof(struct ipr_ioadl_desc));
1014 			ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
1015 		}
1016 	}
1017 }
1018 
1019 /**
1020  * ipr_send_blocking_cmd - Send command and sleep on its completion.
1021  * @ipr_cmd:	ipr command struct
1022  * @timeout_func:	function to invoke if command times out
1023  * @timeout:	timeout
1024  *
1025  * Return value:
1026  * 	none
1027  **/
1028 static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
1029 				  void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
1030 				  u32 timeout)
1031 {
1032 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1033 
1034 	init_completion(&ipr_cmd->completion);
1035 	ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
1036 
1037 	spin_unlock_irq(ioa_cfg->host->host_lock);
1038 	wait_for_completion(&ipr_cmd->completion);
1039 	spin_lock_irq(ioa_cfg->host->host_lock);
1040 }
1041 
1042 static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg)
1043 {
1044 	if (ioa_cfg->hrrq_num == 1)
1045 		return 0;
1046 	else
1047 		return (atomic_add_return(1, &ioa_cfg->hrrq_index) % (ioa_cfg->hrrq_num - 1)) + 1;
1048 }
1049 
1050 /**
1051  * ipr_send_hcam - Send an HCAM to the adapter.
1052  * @ioa_cfg:	ioa config struct
1053  * @type:		HCAM type
1054  * @hostrcb:	hostrcb struct
1055  *
1056  * This function will send a Host Controlled Async command to the adapter.
1057  * If HCAMs are currently not allowed to be issued to the adapter, it will
1058  * place the hostrcb on the free queue.
1059  *
1060  * Return value:
1061  * 	none
1062  **/
1063 static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
1064 			  struct ipr_hostrcb *hostrcb)
1065 {
1066 	struct ipr_cmnd *ipr_cmd;
1067 	struct ipr_ioarcb *ioarcb;
1068 
1069 	if (ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
1070 		ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
1071 		list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
1072 		list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
1073 
1074 		ipr_cmd->u.hostrcb = hostrcb;
1075 		ioarcb = &ipr_cmd->ioarcb;
1076 
1077 		ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
1078 		ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
1079 		ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
1080 		ioarcb->cmd_pkt.cdb[1] = type;
1081 		ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
1082 		ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
1083 
1084 		ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma,
1085 			       sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST);
1086 
1087 		if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
1088 			ipr_cmd->done = ipr_process_ccn;
1089 		else
1090 			ipr_cmd->done = ipr_process_error;
1091 
1092 		ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
1093 
1094 		ipr_send_command(ipr_cmd);
1095 	} else {
1096 		list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
1097 	}
1098 }
1099 
1100 /**
1101  * ipr_update_ata_class - Update the ata class in the resource entry
1102  * @res:	resource entry struct
1103  * @proto:	cfgte device bus protocol value
1104  *
1105  * Return value:
1106  * 	none
1107  **/
1108 static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto)
1109 {
1110 	switch (proto) {
1111 	case IPR_PROTO_SATA:
1112 	case IPR_PROTO_SAS_STP:
1113 		res->ata_class = ATA_DEV_ATA;
1114 		break;
1115 	case IPR_PROTO_SATA_ATAPI:
1116 	case IPR_PROTO_SAS_STP_ATAPI:
1117 		res->ata_class = ATA_DEV_ATAPI;
1118 		break;
1119 	default:
1120 		res->ata_class = ATA_DEV_UNKNOWN;
1121 		break;
1122 	};
1123 }
1124 
1125 /**
1126  * ipr_init_res_entry - Initialize a resource entry struct.
1127  * @res:	resource entry struct
1128  * @cfgtew:	config table entry wrapper struct
1129  *
1130  * Return value:
1131  * 	none
1132  **/
1133 static void ipr_init_res_entry(struct ipr_resource_entry *res,
1134 			       struct ipr_config_table_entry_wrapper *cfgtew)
1135 {
1136 	int found = 0;
1137 	unsigned int proto;
1138 	struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1139 	struct ipr_resource_entry *gscsi_res = NULL;
1140 
1141 	res->needs_sync_complete = 0;
1142 	res->in_erp = 0;
1143 	res->add_to_ml = 0;
1144 	res->del_from_ml = 0;
1145 	res->resetting_device = 0;
1146 	res->reset_occurred = 0;
1147 	res->sdev = NULL;
1148 	res->sata_port = NULL;
1149 
1150 	if (ioa_cfg->sis64) {
1151 		proto = cfgtew->u.cfgte64->proto;
1152 		res->res_flags = cfgtew->u.cfgte64->res_flags;
1153 		res->qmodel = IPR_QUEUEING_MODEL64(res);
1154 		res->type = cfgtew->u.cfgte64->res_type;
1155 
1156 		memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1157 			sizeof(res->res_path));
1158 
1159 		res->bus = 0;
1160 		memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1161 			sizeof(res->dev_lun.scsi_lun));
1162 		res->lun = scsilun_to_int(&res->dev_lun);
1163 
1164 		if (res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1165 			list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) {
1166 				if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) {
1167 					found = 1;
1168 					res->target = gscsi_res->target;
1169 					break;
1170 				}
1171 			}
1172 			if (!found) {
1173 				res->target = find_first_zero_bit(ioa_cfg->target_ids,
1174 								  ioa_cfg->max_devs_supported);
1175 				set_bit(res->target, ioa_cfg->target_ids);
1176 			}
1177 		} else if (res->type == IPR_RES_TYPE_IOAFP) {
1178 			res->bus = IPR_IOAFP_VIRTUAL_BUS;
1179 			res->target = 0;
1180 		} else if (res->type == IPR_RES_TYPE_ARRAY) {
1181 			res->bus = IPR_ARRAY_VIRTUAL_BUS;
1182 			res->target = find_first_zero_bit(ioa_cfg->array_ids,
1183 							  ioa_cfg->max_devs_supported);
1184 			set_bit(res->target, ioa_cfg->array_ids);
1185 		} else if (res->type == IPR_RES_TYPE_VOLUME_SET) {
1186 			res->bus = IPR_VSET_VIRTUAL_BUS;
1187 			res->target = find_first_zero_bit(ioa_cfg->vset_ids,
1188 							  ioa_cfg->max_devs_supported);
1189 			set_bit(res->target, ioa_cfg->vset_ids);
1190 		} else {
1191 			res->target = find_first_zero_bit(ioa_cfg->target_ids,
1192 							  ioa_cfg->max_devs_supported);
1193 			set_bit(res->target, ioa_cfg->target_ids);
1194 		}
1195 	} else {
1196 		proto = cfgtew->u.cfgte->proto;
1197 		res->qmodel = IPR_QUEUEING_MODEL(res);
1198 		res->flags = cfgtew->u.cfgte->flags;
1199 		if (res->flags & IPR_IS_IOA_RESOURCE)
1200 			res->type = IPR_RES_TYPE_IOAFP;
1201 		else
1202 			res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1203 
1204 		res->bus = cfgtew->u.cfgte->res_addr.bus;
1205 		res->target = cfgtew->u.cfgte->res_addr.target;
1206 		res->lun = cfgtew->u.cfgte->res_addr.lun;
1207 		res->lun_wwn = get_unaligned_be64(cfgtew->u.cfgte->lun_wwn);
1208 	}
1209 
1210 	ipr_update_ata_class(res, proto);
1211 }
1212 
1213 /**
1214  * ipr_is_same_device - Determine if two devices are the same.
1215  * @res:	resource entry struct
1216  * @cfgtew:	config table entry wrapper struct
1217  *
1218  * Return value:
1219  * 	1 if the devices are the same / 0 otherwise
1220  **/
1221 static int ipr_is_same_device(struct ipr_resource_entry *res,
1222 			      struct ipr_config_table_entry_wrapper *cfgtew)
1223 {
1224 	if (res->ioa_cfg->sis64) {
1225 		if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id,
1226 					sizeof(cfgtew->u.cfgte64->dev_id)) &&
1227 			!memcmp(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1228 					sizeof(cfgtew->u.cfgte64->lun))) {
1229 			return 1;
1230 		}
1231 	} else {
1232 		if (res->bus == cfgtew->u.cfgte->res_addr.bus &&
1233 		    res->target == cfgtew->u.cfgte->res_addr.target &&
1234 		    res->lun == cfgtew->u.cfgte->res_addr.lun)
1235 			return 1;
1236 	}
1237 
1238 	return 0;
1239 }
1240 
1241 /**
1242  * __ipr_format_res_path - Format the resource path for printing.
1243  * @res_path:	resource path
1244  * @buf:	buffer
1245  * @len:	length of buffer provided
1246  *
1247  * Return value:
1248  * 	pointer to buffer
1249  **/
1250 static char *__ipr_format_res_path(u8 *res_path, char *buffer, int len)
1251 {
1252 	int i;
1253 	char *p = buffer;
1254 
1255 	*p = '\0';
1256 	p += snprintf(p, buffer + len - p, "%02X", res_path[0]);
1257 	for (i = 1; res_path[i] != 0xff && ((i * 3) < len); i++)
1258 		p += snprintf(p, buffer + len - p, "-%02X", res_path[i]);
1259 
1260 	return buffer;
1261 }
1262 
1263 /**
1264  * ipr_format_res_path - Format the resource path for printing.
1265  * @ioa_cfg:	ioa config struct
1266  * @res_path:	resource path
1267  * @buf:	buffer
1268  * @len:	length of buffer provided
1269  *
1270  * Return value:
1271  *	pointer to buffer
1272  **/
1273 static char *ipr_format_res_path(struct ipr_ioa_cfg *ioa_cfg,
1274 				 u8 *res_path, char *buffer, int len)
1275 {
1276 	char *p = buffer;
1277 
1278 	*p = '\0';
1279 	p += snprintf(p, buffer + len - p, "%d/", ioa_cfg->host->host_no);
1280 	__ipr_format_res_path(res_path, p, len - (buffer - p));
1281 	return buffer;
1282 }
1283 
1284 /**
1285  * ipr_update_res_entry - Update the resource entry.
1286  * @res:	resource entry struct
1287  * @cfgtew:	config table entry wrapper struct
1288  *
1289  * Return value:
1290  *      none
1291  **/
1292 static void ipr_update_res_entry(struct ipr_resource_entry *res,
1293 				 struct ipr_config_table_entry_wrapper *cfgtew)
1294 {
1295 	char buffer[IPR_MAX_RES_PATH_LENGTH];
1296 	unsigned int proto;
1297 	int new_path = 0;
1298 
1299 	if (res->ioa_cfg->sis64) {
1300 		res->flags = cfgtew->u.cfgte64->flags;
1301 		res->res_flags = cfgtew->u.cfgte64->res_flags;
1302 		res->type = cfgtew->u.cfgte64->res_type;
1303 
1304 		memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data,
1305 			sizeof(struct ipr_std_inq_data));
1306 
1307 		res->qmodel = IPR_QUEUEING_MODEL64(res);
1308 		proto = cfgtew->u.cfgte64->proto;
1309 		res->res_handle = cfgtew->u.cfgte64->res_handle;
1310 		res->dev_id = cfgtew->u.cfgte64->dev_id;
1311 
1312 		memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1313 			sizeof(res->dev_lun.scsi_lun));
1314 
1315 		if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path,
1316 					sizeof(res->res_path))) {
1317 			memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1318 				sizeof(res->res_path));
1319 			new_path = 1;
1320 		}
1321 
1322 		if (res->sdev && new_path)
1323 			sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n",
1324 				    ipr_format_res_path(res->ioa_cfg,
1325 					res->res_path, buffer, sizeof(buffer)));
1326 	} else {
1327 		res->flags = cfgtew->u.cfgte->flags;
1328 		if (res->flags & IPR_IS_IOA_RESOURCE)
1329 			res->type = IPR_RES_TYPE_IOAFP;
1330 		else
1331 			res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1332 
1333 		memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data,
1334 			sizeof(struct ipr_std_inq_data));
1335 
1336 		res->qmodel = IPR_QUEUEING_MODEL(res);
1337 		proto = cfgtew->u.cfgte->proto;
1338 		res->res_handle = cfgtew->u.cfgte->res_handle;
1339 	}
1340 
1341 	ipr_update_ata_class(res, proto);
1342 }
1343 
1344 /**
1345  * ipr_clear_res_target - Clear the bit in the bit map representing the target
1346  * 			  for the resource.
1347  * @res:	resource entry struct
1348  * @cfgtew:	config table entry wrapper struct
1349  *
1350  * Return value:
1351  *      none
1352  **/
1353 static void ipr_clear_res_target(struct ipr_resource_entry *res)
1354 {
1355 	struct ipr_resource_entry *gscsi_res = NULL;
1356 	struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1357 
1358 	if (!ioa_cfg->sis64)
1359 		return;
1360 
1361 	if (res->bus == IPR_ARRAY_VIRTUAL_BUS)
1362 		clear_bit(res->target, ioa_cfg->array_ids);
1363 	else if (res->bus == IPR_VSET_VIRTUAL_BUS)
1364 		clear_bit(res->target, ioa_cfg->vset_ids);
1365 	else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1366 		list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue)
1367 			if (gscsi_res->dev_id == res->dev_id && gscsi_res != res)
1368 				return;
1369 		clear_bit(res->target, ioa_cfg->target_ids);
1370 
1371 	} else if (res->bus == 0)
1372 		clear_bit(res->target, ioa_cfg->target_ids);
1373 }
1374 
1375 /**
1376  * ipr_handle_config_change - Handle a config change from the adapter
1377  * @ioa_cfg:	ioa config struct
1378  * @hostrcb:	hostrcb
1379  *
1380  * Return value:
1381  * 	none
1382  **/
1383 static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
1384 				     struct ipr_hostrcb *hostrcb)
1385 {
1386 	struct ipr_resource_entry *res = NULL;
1387 	struct ipr_config_table_entry_wrapper cfgtew;
1388 	__be32 cc_res_handle;
1389 
1390 	u32 is_ndn = 1;
1391 
1392 	if (ioa_cfg->sis64) {
1393 		cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64;
1394 		cc_res_handle = cfgtew.u.cfgte64->res_handle;
1395 	} else {
1396 		cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte;
1397 		cc_res_handle = cfgtew.u.cfgte->res_handle;
1398 	}
1399 
1400 	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1401 		if (res->res_handle == cc_res_handle) {
1402 			is_ndn = 0;
1403 			break;
1404 		}
1405 	}
1406 
1407 	if (is_ndn) {
1408 		if (list_empty(&ioa_cfg->free_res_q)) {
1409 			ipr_send_hcam(ioa_cfg,
1410 				      IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
1411 				      hostrcb);
1412 			return;
1413 		}
1414 
1415 		res = list_entry(ioa_cfg->free_res_q.next,
1416 				 struct ipr_resource_entry, queue);
1417 
1418 		list_del(&res->queue);
1419 		ipr_init_res_entry(res, &cfgtew);
1420 		list_add_tail(&res->queue, &ioa_cfg->used_res_q);
1421 	}
1422 
1423 	ipr_update_res_entry(res, &cfgtew);
1424 
1425 	if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
1426 		if (res->sdev) {
1427 			res->del_from_ml = 1;
1428 			res->res_handle = IPR_INVALID_RES_HANDLE;
1429 			if (ioa_cfg->allow_ml_add_del)
1430 				schedule_work(&ioa_cfg->work_q);
1431 		} else {
1432 			ipr_clear_res_target(res);
1433 			list_move_tail(&res->queue, &ioa_cfg->free_res_q);
1434 		}
1435 	} else if (!res->sdev || res->del_from_ml) {
1436 		res->add_to_ml = 1;
1437 		if (ioa_cfg->allow_ml_add_del)
1438 			schedule_work(&ioa_cfg->work_q);
1439 	}
1440 
1441 	ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1442 }
1443 
1444 /**
1445  * ipr_process_ccn - Op done function for a CCN.
1446  * @ipr_cmd:	ipr command struct
1447  *
1448  * This function is the op done function for a configuration
1449  * change notification host controlled async from the adapter.
1450  *
1451  * Return value:
1452  * 	none
1453  **/
1454 static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
1455 {
1456 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1457 	struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1458 	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1459 
1460 	list_del(&hostrcb->queue);
1461 	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1462 
1463 	if (ioasc) {
1464 		if (ioasc != IPR_IOASC_IOA_WAS_RESET)
1465 			dev_err(&ioa_cfg->pdev->dev,
1466 				"Host RCB failed with IOASC: 0x%08X\n", ioasc);
1467 
1468 		ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1469 	} else {
1470 		ipr_handle_config_change(ioa_cfg, hostrcb);
1471 	}
1472 }
1473 
1474 /**
1475  * strip_and_pad_whitespace - Strip and pad trailing whitespace.
1476  * @i:		index into buffer
1477  * @buf:		string to modify
1478  *
1479  * This function will strip all trailing whitespace, pad the end
1480  * of the string with a single space, and NULL terminate the string.
1481  *
1482  * Return value:
1483  * 	new length of string
1484  **/
1485 static int strip_and_pad_whitespace(int i, char *buf)
1486 {
1487 	while (i && buf[i] == ' ')
1488 		i--;
1489 	buf[i+1] = ' ';
1490 	buf[i+2] = '\0';
1491 	return i + 2;
1492 }
1493 
1494 /**
1495  * ipr_log_vpd_compact - Log the passed extended VPD compactly.
1496  * @prefix:		string to print at start of printk
1497  * @hostrcb:	hostrcb pointer
1498  * @vpd:		vendor/product id/sn struct
1499  *
1500  * Return value:
1501  * 	none
1502  **/
1503 static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1504 				struct ipr_vpd *vpd)
1505 {
1506 	char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + IPR_SERIAL_NUM_LEN + 3];
1507 	int i = 0;
1508 
1509 	memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1510 	i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer);
1511 
1512 	memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN);
1513 	i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer);
1514 
1515 	memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN);
1516 	buffer[IPR_SERIAL_NUM_LEN + i] = '\0';
1517 
1518 	ipr_hcam_err(hostrcb, "%s VPID/SN: %s\n", prefix, buffer);
1519 }
1520 
1521 /**
1522  * ipr_log_vpd - Log the passed VPD to the error log.
1523  * @vpd:		vendor/product id/sn struct
1524  *
1525  * Return value:
1526  * 	none
1527  **/
1528 static void ipr_log_vpd(struct ipr_vpd *vpd)
1529 {
1530 	char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
1531 		    + IPR_SERIAL_NUM_LEN];
1532 
1533 	memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1534 	memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
1535 	       IPR_PROD_ID_LEN);
1536 	buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
1537 	ipr_err("Vendor/Product ID: %s\n", buffer);
1538 
1539 	memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
1540 	buffer[IPR_SERIAL_NUM_LEN] = '\0';
1541 	ipr_err("    Serial Number: %s\n", buffer);
1542 }
1543 
1544 /**
1545  * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1546  * @prefix:		string to print at start of printk
1547  * @hostrcb:	hostrcb pointer
1548  * @vpd:		vendor/product id/sn/wwn struct
1549  *
1550  * Return value:
1551  * 	none
1552  **/
1553 static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1554 				    struct ipr_ext_vpd *vpd)
1555 {
1556 	ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd);
1557 	ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix,
1558 		     be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1]));
1559 }
1560 
1561 /**
1562  * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1563  * @vpd:		vendor/product id/sn/wwn struct
1564  *
1565  * Return value:
1566  * 	none
1567  **/
1568 static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
1569 {
1570 	ipr_log_vpd(&vpd->vpd);
1571 	ipr_err("    WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
1572 		be32_to_cpu(vpd->wwid[1]));
1573 }
1574 
1575 /**
1576  * ipr_log_enhanced_cache_error - Log a cache error.
1577  * @ioa_cfg:	ioa config struct
1578  * @hostrcb:	hostrcb struct
1579  *
1580  * Return value:
1581  * 	none
1582  **/
1583 static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1584 					 struct ipr_hostrcb *hostrcb)
1585 {
1586 	struct ipr_hostrcb_type_12_error *error;
1587 
1588 	if (ioa_cfg->sis64)
1589 		error = &hostrcb->hcam.u.error64.u.type_12_error;
1590 	else
1591 		error = &hostrcb->hcam.u.error.u.type_12_error;
1592 
1593 	ipr_err("-----Current Configuration-----\n");
1594 	ipr_err("Cache Directory Card Information:\n");
1595 	ipr_log_ext_vpd(&error->ioa_vpd);
1596 	ipr_err("Adapter Card Information:\n");
1597 	ipr_log_ext_vpd(&error->cfc_vpd);
1598 
1599 	ipr_err("-----Expected Configuration-----\n");
1600 	ipr_err("Cache Directory Card Information:\n");
1601 	ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
1602 	ipr_err("Adapter Card Information:\n");
1603 	ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
1604 
1605 	ipr_err("Additional IOA Data: %08X %08X %08X\n",
1606 		     be32_to_cpu(error->ioa_data[0]),
1607 		     be32_to_cpu(error->ioa_data[1]),
1608 		     be32_to_cpu(error->ioa_data[2]));
1609 }
1610 
1611 /**
1612  * ipr_log_cache_error - Log a cache error.
1613  * @ioa_cfg:	ioa config struct
1614  * @hostrcb:	hostrcb struct
1615  *
1616  * Return value:
1617  * 	none
1618  **/
1619 static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1620 				struct ipr_hostrcb *hostrcb)
1621 {
1622 	struct ipr_hostrcb_type_02_error *error =
1623 		&hostrcb->hcam.u.error.u.type_02_error;
1624 
1625 	ipr_err("-----Current Configuration-----\n");
1626 	ipr_err("Cache Directory Card Information:\n");
1627 	ipr_log_vpd(&error->ioa_vpd);
1628 	ipr_err("Adapter Card Information:\n");
1629 	ipr_log_vpd(&error->cfc_vpd);
1630 
1631 	ipr_err("-----Expected Configuration-----\n");
1632 	ipr_err("Cache Directory Card Information:\n");
1633 	ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
1634 	ipr_err("Adapter Card Information:\n");
1635 	ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
1636 
1637 	ipr_err("Additional IOA Data: %08X %08X %08X\n",
1638 		     be32_to_cpu(error->ioa_data[0]),
1639 		     be32_to_cpu(error->ioa_data[1]),
1640 		     be32_to_cpu(error->ioa_data[2]));
1641 }
1642 
1643 /**
1644  * ipr_log_enhanced_config_error - Log a configuration error.
1645  * @ioa_cfg:	ioa config struct
1646  * @hostrcb:	hostrcb struct
1647  *
1648  * Return value:
1649  * 	none
1650  **/
1651 static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1652 					  struct ipr_hostrcb *hostrcb)
1653 {
1654 	int errors_logged, i;
1655 	struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1656 	struct ipr_hostrcb_type_13_error *error;
1657 
1658 	error = &hostrcb->hcam.u.error.u.type_13_error;
1659 	errors_logged = be32_to_cpu(error->errors_logged);
1660 
1661 	ipr_err("Device Errors Detected/Logged: %d/%d\n",
1662 		be32_to_cpu(error->errors_detected), errors_logged);
1663 
1664 	dev_entry = error->dev;
1665 
1666 	for (i = 0; i < errors_logged; i++, dev_entry++) {
1667 		ipr_err_separator;
1668 
1669 		ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1670 		ipr_log_ext_vpd(&dev_entry->vpd);
1671 
1672 		ipr_err("-----New Device Information-----\n");
1673 		ipr_log_ext_vpd(&dev_entry->new_vpd);
1674 
1675 		ipr_err("Cache Directory Card Information:\n");
1676 		ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1677 
1678 		ipr_err("Adapter Card Information:\n");
1679 		ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1680 	}
1681 }
1682 
1683 /**
1684  * ipr_log_sis64_config_error - Log a device error.
1685  * @ioa_cfg:	ioa config struct
1686  * @hostrcb:	hostrcb struct
1687  *
1688  * Return value:
1689  * 	none
1690  **/
1691 static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
1692 				       struct ipr_hostrcb *hostrcb)
1693 {
1694 	int errors_logged, i;
1695 	struct ipr_hostrcb64_device_data_entry_enhanced *dev_entry;
1696 	struct ipr_hostrcb_type_23_error *error;
1697 	char buffer[IPR_MAX_RES_PATH_LENGTH];
1698 
1699 	error = &hostrcb->hcam.u.error64.u.type_23_error;
1700 	errors_logged = be32_to_cpu(error->errors_logged);
1701 
1702 	ipr_err("Device Errors Detected/Logged: %d/%d\n",
1703 		be32_to_cpu(error->errors_detected), errors_logged);
1704 
1705 	dev_entry = error->dev;
1706 
1707 	for (i = 0; i < errors_logged; i++, dev_entry++) {
1708 		ipr_err_separator;
1709 
1710 		ipr_err("Device %d : %s", i + 1,
1711 			__ipr_format_res_path(dev_entry->res_path,
1712 					      buffer, sizeof(buffer)));
1713 		ipr_log_ext_vpd(&dev_entry->vpd);
1714 
1715 		ipr_err("-----New Device Information-----\n");
1716 		ipr_log_ext_vpd(&dev_entry->new_vpd);
1717 
1718 		ipr_err("Cache Directory Card Information:\n");
1719 		ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1720 
1721 		ipr_err("Adapter Card Information:\n");
1722 		ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1723 	}
1724 }
1725 
1726 /**
1727  * ipr_log_config_error - Log a configuration error.
1728  * @ioa_cfg:	ioa config struct
1729  * @hostrcb:	hostrcb struct
1730  *
1731  * Return value:
1732  * 	none
1733  **/
1734 static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1735 				 struct ipr_hostrcb *hostrcb)
1736 {
1737 	int errors_logged, i;
1738 	struct ipr_hostrcb_device_data_entry *dev_entry;
1739 	struct ipr_hostrcb_type_03_error *error;
1740 
1741 	error = &hostrcb->hcam.u.error.u.type_03_error;
1742 	errors_logged = be32_to_cpu(error->errors_logged);
1743 
1744 	ipr_err("Device Errors Detected/Logged: %d/%d\n",
1745 		be32_to_cpu(error->errors_detected), errors_logged);
1746 
1747 	dev_entry = error->dev;
1748 
1749 	for (i = 0; i < errors_logged; i++, dev_entry++) {
1750 		ipr_err_separator;
1751 
1752 		ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1753 		ipr_log_vpd(&dev_entry->vpd);
1754 
1755 		ipr_err("-----New Device Information-----\n");
1756 		ipr_log_vpd(&dev_entry->new_vpd);
1757 
1758 		ipr_err("Cache Directory Card Information:\n");
1759 		ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
1760 
1761 		ipr_err("Adapter Card Information:\n");
1762 		ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
1763 
1764 		ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1765 			be32_to_cpu(dev_entry->ioa_data[0]),
1766 			be32_to_cpu(dev_entry->ioa_data[1]),
1767 			be32_to_cpu(dev_entry->ioa_data[2]),
1768 			be32_to_cpu(dev_entry->ioa_data[3]),
1769 			be32_to_cpu(dev_entry->ioa_data[4]));
1770 	}
1771 }
1772 
1773 /**
1774  * ipr_log_enhanced_array_error - Log an array configuration error.
1775  * @ioa_cfg:	ioa config struct
1776  * @hostrcb:	hostrcb struct
1777  *
1778  * Return value:
1779  * 	none
1780  **/
1781 static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1782 					 struct ipr_hostrcb *hostrcb)
1783 {
1784 	int i, num_entries;
1785 	struct ipr_hostrcb_type_14_error *error;
1786 	struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1787 	const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1788 
1789 	error = &hostrcb->hcam.u.error.u.type_14_error;
1790 
1791 	ipr_err_separator;
1792 
1793 	ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1794 		error->protection_level,
1795 		ioa_cfg->host->host_no,
1796 		error->last_func_vset_res_addr.bus,
1797 		error->last_func_vset_res_addr.target,
1798 		error->last_func_vset_res_addr.lun);
1799 
1800 	ipr_err_separator;
1801 
1802 	array_entry = error->array_member;
1803 	num_entries = min_t(u32, be32_to_cpu(error->num_entries),
1804 			    ARRAY_SIZE(error->array_member));
1805 
1806 	for (i = 0; i < num_entries; i++, array_entry++) {
1807 		if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1808 			continue;
1809 
1810 		if (be32_to_cpu(error->exposed_mode_adn) == i)
1811 			ipr_err("Exposed Array Member %d:\n", i);
1812 		else
1813 			ipr_err("Array Member %d:\n", i);
1814 
1815 		ipr_log_ext_vpd(&array_entry->vpd);
1816 		ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1817 		ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1818 				 "Expected Location");
1819 
1820 		ipr_err_separator;
1821 	}
1822 }
1823 
1824 /**
1825  * ipr_log_array_error - Log an array configuration error.
1826  * @ioa_cfg:	ioa config struct
1827  * @hostrcb:	hostrcb struct
1828  *
1829  * Return value:
1830  * 	none
1831  **/
1832 static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1833 				struct ipr_hostrcb *hostrcb)
1834 {
1835 	int i;
1836 	struct ipr_hostrcb_type_04_error *error;
1837 	struct ipr_hostrcb_array_data_entry *array_entry;
1838 	const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1839 
1840 	error = &hostrcb->hcam.u.error.u.type_04_error;
1841 
1842 	ipr_err_separator;
1843 
1844 	ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1845 		error->protection_level,
1846 		ioa_cfg->host->host_no,
1847 		error->last_func_vset_res_addr.bus,
1848 		error->last_func_vset_res_addr.target,
1849 		error->last_func_vset_res_addr.lun);
1850 
1851 	ipr_err_separator;
1852 
1853 	array_entry = error->array_member;
1854 
1855 	for (i = 0; i < 18; i++) {
1856 		if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1857 			continue;
1858 
1859 		if (be32_to_cpu(error->exposed_mode_adn) == i)
1860 			ipr_err("Exposed Array Member %d:\n", i);
1861 		else
1862 			ipr_err("Array Member %d:\n", i);
1863 
1864 		ipr_log_vpd(&array_entry->vpd);
1865 
1866 		ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1867 		ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1868 				 "Expected Location");
1869 
1870 		ipr_err_separator;
1871 
1872 		if (i == 9)
1873 			array_entry = error->array_member2;
1874 		else
1875 			array_entry++;
1876 	}
1877 }
1878 
1879 /**
1880  * ipr_log_hex_data - Log additional hex IOA error data.
1881  * @ioa_cfg:	ioa config struct
1882  * @data:		IOA error data
1883  * @len:		data length
1884  *
1885  * Return value:
1886  * 	none
1887  **/
1888 static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, u32 *data, int len)
1889 {
1890 	int i;
1891 
1892 	if (len == 0)
1893 		return;
1894 
1895 	if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1896 		len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
1897 
1898 	for (i = 0; i < len / 4; i += 4) {
1899 		ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1900 			be32_to_cpu(data[i]),
1901 			be32_to_cpu(data[i+1]),
1902 			be32_to_cpu(data[i+2]),
1903 			be32_to_cpu(data[i+3]));
1904 	}
1905 }
1906 
1907 /**
1908  * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1909  * @ioa_cfg:	ioa config struct
1910  * @hostrcb:	hostrcb struct
1911  *
1912  * Return value:
1913  * 	none
1914  **/
1915 static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1916 					    struct ipr_hostrcb *hostrcb)
1917 {
1918 	struct ipr_hostrcb_type_17_error *error;
1919 
1920 	if (ioa_cfg->sis64)
1921 		error = &hostrcb->hcam.u.error64.u.type_17_error;
1922 	else
1923 		error = &hostrcb->hcam.u.error.u.type_17_error;
1924 
1925 	error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1926 	strim(error->failure_reason);
1927 
1928 	ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1929 		     be32_to_cpu(hostrcb->hcam.u.error.prc));
1930 	ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1931 	ipr_log_hex_data(ioa_cfg, error->data,
1932 			 be32_to_cpu(hostrcb->hcam.length) -
1933 			 (offsetof(struct ipr_hostrcb_error, u) +
1934 			  offsetof(struct ipr_hostrcb_type_17_error, data)));
1935 }
1936 
1937 /**
1938  * ipr_log_dual_ioa_error - Log a dual adapter error.
1939  * @ioa_cfg:	ioa config struct
1940  * @hostrcb:	hostrcb struct
1941  *
1942  * Return value:
1943  * 	none
1944  **/
1945 static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1946 				   struct ipr_hostrcb *hostrcb)
1947 {
1948 	struct ipr_hostrcb_type_07_error *error;
1949 
1950 	error = &hostrcb->hcam.u.error.u.type_07_error;
1951 	error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1952 	strim(error->failure_reason);
1953 
1954 	ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1955 		     be32_to_cpu(hostrcb->hcam.u.error.prc));
1956 	ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1957 	ipr_log_hex_data(ioa_cfg, error->data,
1958 			 be32_to_cpu(hostrcb->hcam.length) -
1959 			 (offsetof(struct ipr_hostrcb_error, u) +
1960 			  offsetof(struct ipr_hostrcb_type_07_error, data)));
1961 }
1962 
1963 static const struct {
1964 	u8 active;
1965 	char *desc;
1966 } path_active_desc[] = {
1967 	{ IPR_PATH_NO_INFO, "Path" },
1968 	{ IPR_PATH_ACTIVE, "Active path" },
1969 	{ IPR_PATH_NOT_ACTIVE, "Inactive path" }
1970 };
1971 
1972 static const struct {
1973 	u8 state;
1974 	char *desc;
1975 } path_state_desc[] = {
1976 	{ IPR_PATH_STATE_NO_INFO, "has no path state information available" },
1977 	{ IPR_PATH_HEALTHY, "is healthy" },
1978 	{ IPR_PATH_DEGRADED, "is degraded" },
1979 	{ IPR_PATH_FAILED, "is failed" }
1980 };
1981 
1982 /**
1983  * ipr_log_fabric_path - Log a fabric path error
1984  * @hostrcb:	hostrcb struct
1985  * @fabric:		fabric descriptor
1986  *
1987  * Return value:
1988  * 	none
1989  **/
1990 static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
1991 				struct ipr_hostrcb_fabric_desc *fabric)
1992 {
1993 	int i, j;
1994 	u8 path_state = fabric->path_state;
1995 	u8 active = path_state & IPR_PATH_ACTIVE_MASK;
1996 	u8 state = path_state & IPR_PATH_STATE_MASK;
1997 
1998 	for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
1999 		if (path_active_desc[i].active != active)
2000 			continue;
2001 
2002 		for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2003 			if (path_state_desc[j].state != state)
2004 				continue;
2005 
2006 			if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
2007 				ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
2008 					     path_active_desc[i].desc, path_state_desc[j].desc,
2009 					     fabric->ioa_port);
2010 			} else if (fabric->cascaded_expander == 0xff) {
2011 				ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
2012 					     path_active_desc[i].desc, path_state_desc[j].desc,
2013 					     fabric->ioa_port, fabric->phy);
2014 			} else if (fabric->phy == 0xff) {
2015 				ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
2016 					     path_active_desc[i].desc, path_state_desc[j].desc,
2017 					     fabric->ioa_port, fabric->cascaded_expander);
2018 			} else {
2019 				ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
2020 					     path_active_desc[i].desc, path_state_desc[j].desc,
2021 					     fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
2022 			}
2023 			return;
2024 		}
2025 	}
2026 
2027 	ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
2028 		fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
2029 }
2030 
2031 /**
2032  * ipr_log64_fabric_path - Log a fabric path error
2033  * @hostrcb:	hostrcb struct
2034  * @fabric:		fabric descriptor
2035  *
2036  * Return value:
2037  * 	none
2038  **/
2039 static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb,
2040 				  struct ipr_hostrcb64_fabric_desc *fabric)
2041 {
2042 	int i, j;
2043 	u8 path_state = fabric->path_state;
2044 	u8 active = path_state & IPR_PATH_ACTIVE_MASK;
2045 	u8 state = path_state & IPR_PATH_STATE_MASK;
2046 	char buffer[IPR_MAX_RES_PATH_LENGTH];
2047 
2048 	for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
2049 		if (path_active_desc[i].active != active)
2050 			continue;
2051 
2052 		for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2053 			if (path_state_desc[j].state != state)
2054 				continue;
2055 
2056 			ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n",
2057 				     path_active_desc[i].desc, path_state_desc[j].desc,
2058 				     ipr_format_res_path(hostrcb->ioa_cfg,
2059 						fabric->res_path,
2060 						buffer, sizeof(buffer)));
2061 			return;
2062 		}
2063 	}
2064 
2065 	ipr_err("Path state=%02X Resource Path=%s\n", path_state,
2066 		ipr_format_res_path(hostrcb->ioa_cfg, fabric->res_path,
2067 				    buffer, sizeof(buffer)));
2068 }
2069 
2070 static const struct {
2071 	u8 type;
2072 	char *desc;
2073 } path_type_desc[] = {
2074 	{ IPR_PATH_CFG_IOA_PORT, "IOA port" },
2075 	{ IPR_PATH_CFG_EXP_PORT, "Expander port" },
2076 	{ IPR_PATH_CFG_DEVICE_PORT, "Device port" },
2077 	{ IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
2078 };
2079 
2080 static const struct {
2081 	u8 status;
2082 	char *desc;
2083 } path_status_desc[] = {
2084 	{ IPR_PATH_CFG_NO_PROB, "Functional" },
2085 	{ IPR_PATH_CFG_DEGRADED, "Degraded" },
2086 	{ IPR_PATH_CFG_FAILED, "Failed" },
2087 	{ IPR_PATH_CFG_SUSPECT, "Suspect" },
2088 	{ IPR_PATH_NOT_DETECTED, "Missing" },
2089 	{ IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
2090 };
2091 
2092 static const char *link_rate[] = {
2093 	"unknown",
2094 	"disabled",
2095 	"phy reset problem",
2096 	"spinup hold",
2097 	"port selector",
2098 	"unknown",
2099 	"unknown",
2100 	"unknown",
2101 	"1.5Gbps",
2102 	"3.0Gbps",
2103 	"unknown",
2104 	"unknown",
2105 	"unknown",
2106 	"unknown",
2107 	"unknown",
2108 	"unknown"
2109 };
2110 
2111 /**
2112  * ipr_log_path_elem - Log a fabric path element.
2113  * @hostrcb:	hostrcb struct
2114  * @cfg:		fabric path element struct
2115  *
2116  * Return value:
2117  * 	none
2118  **/
2119 static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
2120 			      struct ipr_hostrcb_config_element *cfg)
2121 {
2122 	int i, j;
2123 	u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2124 	u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2125 
2126 	if (type == IPR_PATH_CFG_NOT_EXIST)
2127 		return;
2128 
2129 	for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2130 		if (path_type_desc[i].type != type)
2131 			continue;
2132 
2133 		for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2134 			if (path_status_desc[j].status != status)
2135 				continue;
2136 
2137 			if (type == IPR_PATH_CFG_IOA_PORT) {
2138 				ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
2139 					     path_status_desc[j].desc, path_type_desc[i].desc,
2140 					     cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2141 					     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2142 			} else {
2143 				if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
2144 					ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
2145 						     path_status_desc[j].desc, path_type_desc[i].desc,
2146 						     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2147 						     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2148 				} else if (cfg->cascaded_expander == 0xff) {
2149 					ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
2150 						     "WWN=%08X%08X\n", path_status_desc[j].desc,
2151 						     path_type_desc[i].desc, cfg->phy,
2152 						     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2153 						     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2154 				} else if (cfg->phy == 0xff) {
2155 					ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
2156 						     "WWN=%08X%08X\n", path_status_desc[j].desc,
2157 						     path_type_desc[i].desc, cfg->cascaded_expander,
2158 						     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2159 						     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2160 				} else {
2161 					ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
2162 						     "WWN=%08X%08X\n", path_status_desc[j].desc,
2163 						     path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
2164 						     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2165 						     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2166 				}
2167 			}
2168 			return;
2169 		}
2170 	}
2171 
2172 	ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
2173 		     "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
2174 		     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2175 		     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2176 }
2177 
2178 /**
2179  * ipr_log64_path_elem - Log a fabric path element.
2180  * @hostrcb:	hostrcb struct
2181  * @cfg:		fabric path element struct
2182  *
2183  * Return value:
2184  * 	none
2185  **/
2186 static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb,
2187 				struct ipr_hostrcb64_config_element *cfg)
2188 {
2189 	int i, j;
2190 	u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK;
2191 	u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2192 	u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2193 	char buffer[IPR_MAX_RES_PATH_LENGTH];
2194 
2195 	if (type == IPR_PATH_CFG_NOT_EXIST || desc_id != IPR_DESCRIPTOR_SIS64)
2196 		return;
2197 
2198 	for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2199 		if (path_type_desc[i].type != type)
2200 			continue;
2201 
2202 		for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2203 			if (path_status_desc[j].status != status)
2204 				continue;
2205 
2206 			ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
2207 				     path_status_desc[j].desc, path_type_desc[i].desc,
2208 				     ipr_format_res_path(hostrcb->ioa_cfg,
2209 					cfg->res_path, buffer, sizeof(buffer)),
2210 					link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2211 					be32_to_cpu(cfg->wwid[0]),
2212 					be32_to_cpu(cfg->wwid[1]));
2213 			return;
2214 		}
2215 	}
2216 	ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s "
2217 		     "WWN=%08X%08X\n", cfg->type_status,
2218 		     ipr_format_res_path(hostrcb->ioa_cfg,
2219 			cfg->res_path, buffer, sizeof(buffer)),
2220 			link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2221 			be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2222 }
2223 
2224 /**
2225  * ipr_log_fabric_error - Log a fabric error.
2226  * @ioa_cfg:	ioa config struct
2227  * @hostrcb:	hostrcb struct
2228  *
2229  * Return value:
2230  * 	none
2231  **/
2232 static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2233 				 struct ipr_hostrcb *hostrcb)
2234 {
2235 	struct ipr_hostrcb_type_20_error *error;
2236 	struct ipr_hostrcb_fabric_desc *fabric;
2237 	struct ipr_hostrcb_config_element *cfg;
2238 	int i, add_len;
2239 
2240 	error = &hostrcb->hcam.u.error.u.type_20_error;
2241 	error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2242 	ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2243 
2244 	add_len = be32_to_cpu(hostrcb->hcam.length) -
2245 		(offsetof(struct ipr_hostrcb_error, u) +
2246 		 offsetof(struct ipr_hostrcb_type_20_error, desc));
2247 
2248 	for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2249 		ipr_log_fabric_path(hostrcb, fabric);
2250 		for_each_fabric_cfg(fabric, cfg)
2251 			ipr_log_path_elem(hostrcb, cfg);
2252 
2253 		add_len -= be16_to_cpu(fabric->length);
2254 		fabric = (struct ipr_hostrcb_fabric_desc *)
2255 			((unsigned long)fabric + be16_to_cpu(fabric->length));
2256 	}
2257 
2258 	ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
2259 }
2260 
2261 /**
2262  * ipr_log_sis64_array_error - Log a sis64 array error.
2263  * @ioa_cfg:	ioa config struct
2264  * @hostrcb:	hostrcb struct
2265  *
2266  * Return value:
2267  * 	none
2268  **/
2269 static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
2270 				      struct ipr_hostrcb *hostrcb)
2271 {
2272 	int i, num_entries;
2273 	struct ipr_hostrcb_type_24_error *error;
2274 	struct ipr_hostrcb64_array_data_entry *array_entry;
2275 	char buffer[IPR_MAX_RES_PATH_LENGTH];
2276 	const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
2277 
2278 	error = &hostrcb->hcam.u.error64.u.type_24_error;
2279 
2280 	ipr_err_separator;
2281 
2282 	ipr_err("RAID %s Array Configuration: %s\n",
2283 		error->protection_level,
2284 		ipr_format_res_path(ioa_cfg, error->last_res_path,
2285 			buffer, sizeof(buffer)));
2286 
2287 	ipr_err_separator;
2288 
2289 	array_entry = error->array_member;
2290 	num_entries = min_t(u32, error->num_entries,
2291 			    ARRAY_SIZE(error->array_member));
2292 
2293 	for (i = 0; i < num_entries; i++, array_entry++) {
2294 
2295 		if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
2296 			continue;
2297 
2298 		if (error->exposed_mode_adn == i)
2299 			ipr_err("Exposed Array Member %d:\n", i);
2300 		else
2301 			ipr_err("Array Member %d:\n", i);
2302 
2303 		ipr_err("Array Member %d:\n", i);
2304 		ipr_log_ext_vpd(&array_entry->vpd);
2305 		ipr_err("Current Location: %s\n",
2306 			 ipr_format_res_path(ioa_cfg, array_entry->res_path,
2307 				buffer, sizeof(buffer)));
2308 		ipr_err("Expected Location: %s\n",
2309 			 ipr_format_res_path(ioa_cfg,
2310 				array_entry->expected_res_path,
2311 				buffer, sizeof(buffer)));
2312 
2313 		ipr_err_separator;
2314 	}
2315 }
2316 
2317 /**
2318  * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
2319  * @ioa_cfg:	ioa config struct
2320  * @hostrcb:	hostrcb struct
2321  *
2322  * Return value:
2323  * 	none
2324  **/
2325 static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2326 				       struct ipr_hostrcb *hostrcb)
2327 {
2328 	struct ipr_hostrcb_type_30_error *error;
2329 	struct ipr_hostrcb64_fabric_desc *fabric;
2330 	struct ipr_hostrcb64_config_element *cfg;
2331 	int i, add_len;
2332 
2333 	error = &hostrcb->hcam.u.error64.u.type_30_error;
2334 
2335 	error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2336 	ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2337 
2338 	add_len = be32_to_cpu(hostrcb->hcam.length) -
2339 		(offsetof(struct ipr_hostrcb64_error, u) +
2340 		 offsetof(struct ipr_hostrcb_type_30_error, desc));
2341 
2342 	for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2343 		ipr_log64_fabric_path(hostrcb, fabric);
2344 		for_each_fabric_cfg(fabric, cfg)
2345 			ipr_log64_path_elem(hostrcb, cfg);
2346 
2347 		add_len -= be16_to_cpu(fabric->length);
2348 		fabric = (struct ipr_hostrcb64_fabric_desc *)
2349 			((unsigned long)fabric + be16_to_cpu(fabric->length));
2350 	}
2351 
2352 	ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
2353 }
2354 
2355 /**
2356  * ipr_log_generic_error - Log an adapter error.
2357  * @ioa_cfg:	ioa config struct
2358  * @hostrcb:	hostrcb struct
2359  *
2360  * Return value:
2361  * 	none
2362  **/
2363 static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
2364 				  struct ipr_hostrcb *hostrcb)
2365 {
2366 	ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
2367 			 be32_to_cpu(hostrcb->hcam.length));
2368 }
2369 
2370 /**
2371  * ipr_log_sis64_device_error - Log a cache error.
2372  * @ioa_cfg:	ioa config struct
2373  * @hostrcb:	hostrcb struct
2374  *
2375  * Return value:
2376  * 	none
2377  **/
2378 static void ipr_log_sis64_device_error(struct ipr_ioa_cfg *ioa_cfg,
2379 					 struct ipr_hostrcb *hostrcb)
2380 {
2381 	struct ipr_hostrcb_type_21_error *error;
2382 	char buffer[IPR_MAX_RES_PATH_LENGTH];
2383 
2384 	error = &hostrcb->hcam.u.error64.u.type_21_error;
2385 
2386 	ipr_err("-----Failing Device Information-----\n");
2387 	ipr_err("World Wide Unique ID: %08X%08X%08X%08X\n",
2388 		be32_to_cpu(error->wwn[0]), be32_to_cpu(error->wwn[1]),
2389 		 be32_to_cpu(error->wwn[2]), be32_to_cpu(error->wwn[3]));
2390 	ipr_err("Device Resource Path: %s\n",
2391 		__ipr_format_res_path(error->res_path,
2392 				      buffer, sizeof(buffer)));
2393 	error->primary_problem_desc[sizeof(error->primary_problem_desc) - 1] = '\0';
2394 	error->second_problem_desc[sizeof(error->second_problem_desc) - 1] = '\0';
2395 	ipr_err("Primary Problem Description: %s\n", error->primary_problem_desc);
2396 	ipr_err("Secondary Problem Description:  %s\n", error->second_problem_desc);
2397 	ipr_err("SCSI Sense Data:\n");
2398 	ipr_log_hex_data(ioa_cfg, error->sense_data, sizeof(error->sense_data));
2399 	ipr_err("SCSI Command Descriptor Block: \n");
2400 	ipr_log_hex_data(ioa_cfg, error->cdb, sizeof(error->cdb));
2401 
2402 	ipr_err("Additional IOA Data:\n");
2403 	ipr_log_hex_data(ioa_cfg, error->ioa_data, be32_to_cpu(error->length_of_error));
2404 }
2405 
2406 /**
2407  * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
2408  * @ioasc:	IOASC
2409  *
2410  * This function will return the index of into the ipr_error_table
2411  * for the specified IOASC. If the IOASC is not in the table,
2412  * 0 will be returned, which points to the entry used for unknown errors.
2413  *
2414  * Return value:
2415  * 	index into the ipr_error_table
2416  **/
2417 static u32 ipr_get_error(u32 ioasc)
2418 {
2419 	int i;
2420 
2421 	for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
2422 		if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
2423 			return i;
2424 
2425 	return 0;
2426 }
2427 
2428 /**
2429  * ipr_handle_log_data - Log an adapter error.
2430  * @ioa_cfg:	ioa config struct
2431  * @hostrcb:	hostrcb struct
2432  *
2433  * This function logs an adapter error to the system.
2434  *
2435  * Return value:
2436  * 	none
2437  **/
2438 static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
2439 				struct ipr_hostrcb *hostrcb)
2440 {
2441 	u32 ioasc;
2442 	int error_index;
2443 	struct ipr_hostrcb_type_21_error *error;
2444 
2445 	if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
2446 		return;
2447 
2448 	if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
2449 		dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
2450 
2451 	if (ioa_cfg->sis64)
2452 		ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2453 	else
2454 		ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2455 
2456 	if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET ||
2457 	    ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER)) {
2458 		/* Tell the midlayer we had a bus reset so it will handle the UA properly */
2459 		scsi_report_bus_reset(ioa_cfg->host,
2460 				      hostrcb->hcam.u.error.fd_res_addr.bus);
2461 	}
2462 
2463 	error_index = ipr_get_error(ioasc);
2464 
2465 	if (!ipr_error_table[error_index].log_hcam)
2466 		return;
2467 
2468 	if (ioasc == IPR_IOASC_HW_CMD_FAILED &&
2469 	    hostrcb->hcam.overlay_id == IPR_HOST_RCB_OVERLAY_ID_21) {
2470 		error = &hostrcb->hcam.u.error64.u.type_21_error;
2471 
2472 		if (((be32_to_cpu(error->sense_data[0]) & 0x0000ff00) >> 8) == ILLEGAL_REQUEST &&
2473 			ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
2474 				return;
2475 	}
2476 
2477 	ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
2478 
2479 	/* Set indication we have logged an error */
2480 	ioa_cfg->errors_logged++;
2481 
2482 	if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
2483 		return;
2484 	if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
2485 		hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
2486 
2487 	switch (hostrcb->hcam.overlay_id) {
2488 	case IPR_HOST_RCB_OVERLAY_ID_2:
2489 		ipr_log_cache_error(ioa_cfg, hostrcb);
2490 		break;
2491 	case IPR_HOST_RCB_OVERLAY_ID_3:
2492 		ipr_log_config_error(ioa_cfg, hostrcb);
2493 		break;
2494 	case IPR_HOST_RCB_OVERLAY_ID_4:
2495 	case IPR_HOST_RCB_OVERLAY_ID_6:
2496 		ipr_log_array_error(ioa_cfg, hostrcb);
2497 		break;
2498 	case IPR_HOST_RCB_OVERLAY_ID_7:
2499 		ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
2500 		break;
2501 	case IPR_HOST_RCB_OVERLAY_ID_12:
2502 		ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
2503 		break;
2504 	case IPR_HOST_RCB_OVERLAY_ID_13:
2505 		ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
2506 		break;
2507 	case IPR_HOST_RCB_OVERLAY_ID_14:
2508 	case IPR_HOST_RCB_OVERLAY_ID_16:
2509 		ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
2510 		break;
2511 	case IPR_HOST_RCB_OVERLAY_ID_17:
2512 		ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
2513 		break;
2514 	case IPR_HOST_RCB_OVERLAY_ID_20:
2515 		ipr_log_fabric_error(ioa_cfg, hostrcb);
2516 		break;
2517 	case IPR_HOST_RCB_OVERLAY_ID_21:
2518 		ipr_log_sis64_device_error(ioa_cfg, hostrcb);
2519 		break;
2520 	case IPR_HOST_RCB_OVERLAY_ID_23:
2521 		ipr_log_sis64_config_error(ioa_cfg, hostrcb);
2522 		break;
2523 	case IPR_HOST_RCB_OVERLAY_ID_24:
2524 	case IPR_HOST_RCB_OVERLAY_ID_26:
2525 		ipr_log_sis64_array_error(ioa_cfg, hostrcb);
2526 		break;
2527 	case IPR_HOST_RCB_OVERLAY_ID_30:
2528 		ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
2529 		break;
2530 	case IPR_HOST_RCB_OVERLAY_ID_1:
2531 	case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
2532 	default:
2533 		ipr_log_generic_error(ioa_cfg, hostrcb);
2534 		break;
2535 	}
2536 }
2537 
2538 /**
2539  * ipr_process_error - Op done function for an adapter error log.
2540  * @ipr_cmd:	ipr command struct
2541  *
2542  * This function is the op done function for an error log host
2543  * controlled async from the adapter. It will log the error and
2544  * send the HCAM back to the adapter.
2545  *
2546  * Return value:
2547  * 	none
2548  **/
2549 static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
2550 {
2551 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2552 	struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
2553 	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
2554 	u32 fd_ioasc;
2555 
2556 	if (ioa_cfg->sis64)
2557 		fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2558 	else
2559 		fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2560 
2561 	list_del(&hostrcb->queue);
2562 	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
2563 
2564 	if (!ioasc) {
2565 		ipr_handle_log_data(ioa_cfg, hostrcb);
2566 		if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
2567 			ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
2568 	} else if (ioasc != IPR_IOASC_IOA_WAS_RESET) {
2569 		dev_err(&ioa_cfg->pdev->dev,
2570 			"Host RCB failed with IOASC: 0x%08X\n", ioasc);
2571 	}
2572 
2573 	ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
2574 }
2575 
2576 /**
2577  * ipr_timeout -  An internally generated op has timed out.
2578  * @ipr_cmd:	ipr command struct
2579  *
2580  * This function blocks host requests and initiates an
2581  * adapter reset.
2582  *
2583  * Return value:
2584  * 	none
2585  **/
2586 static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
2587 {
2588 	unsigned long lock_flags = 0;
2589 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2590 
2591 	ENTER;
2592 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2593 
2594 	ioa_cfg->errors_logged++;
2595 	dev_err(&ioa_cfg->pdev->dev,
2596 		"Adapter being reset due to command timeout.\n");
2597 
2598 	if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2599 		ioa_cfg->sdt_state = GET_DUMP;
2600 
2601 	if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
2602 		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2603 
2604 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2605 	LEAVE;
2606 }
2607 
2608 /**
2609  * ipr_oper_timeout -  Adapter timed out transitioning to operational
2610  * @ipr_cmd:	ipr command struct
2611  *
2612  * This function blocks host requests and initiates an
2613  * adapter reset.
2614  *
2615  * Return value:
2616  * 	none
2617  **/
2618 static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
2619 {
2620 	unsigned long lock_flags = 0;
2621 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2622 
2623 	ENTER;
2624 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2625 
2626 	ioa_cfg->errors_logged++;
2627 	dev_err(&ioa_cfg->pdev->dev,
2628 		"Adapter timed out transitioning to operational.\n");
2629 
2630 	if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2631 		ioa_cfg->sdt_state = GET_DUMP;
2632 
2633 	if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
2634 		if (ipr_fastfail)
2635 			ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
2636 		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2637 	}
2638 
2639 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2640 	LEAVE;
2641 }
2642 
2643 /**
2644  * ipr_find_ses_entry - Find matching SES in SES table
2645  * @res:	resource entry struct of SES
2646  *
2647  * Return value:
2648  * 	pointer to SES table entry / NULL on failure
2649  **/
2650 static const struct ipr_ses_table_entry *
2651 ipr_find_ses_entry(struct ipr_resource_entry *res)
2652 {
2653 	int i, j, matches;
2654 	struct ipr_std_inq_vpids *vpids;
2655 	const struct ipr_ses_table_entry *ste = ipr_ses_table;
2656 
2657 	for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
2658 		for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
2659 			if (ste->compare_product_id_byte[j] == 'X') {
2660 				vpids = &res->std_inq_data.vpids;
2661 				if (vpids->product_id[j] == ste->product_id[j])
2662 					matches++;
2663 				else
2664 					break;
2665 			} else
2666 				matches++;
2667 		}
2668 
2669 		if (matches == IPR_PROD_ID_LEN)
2670 			return ste;
2671 	}
2672 
2673 	return NULL;
2674 }
2675 
2676 /**
2677  * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
2678  * @ioa_cfg:	ioa config struct
2679  * @bus:		SCSI bus
2680  * @bus_width:	bus width
2681  *
2682  * Return value:
2683  *	SCSI bus speed in units of 100KHz, 1600 is 160 MHz
2684  *	For a 2-byte wide SCSI bus, the maximum transfer speed is
2685  *	twice the maximum transfer rate (e.g. for a wide enabled bus,
2686  *	max 160MHz = max 320MB/sec).
2687  **/
2688 static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
2689 {
2690 	struct ipr_resource_entry *res;
2691 	const struct ipr_ses_table_entry *ste;
2692 	u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
2693 
2694 	/* Loop through each config table entry in the config table buffer */
2695 	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2696 		if (!(IPR_IS_SES_DEVICE(res->std_inq_data)))
2697 			continue;
2698 
2699 		if (bus != res->bus)
2700 			continue;
2701 
2702 		if (!(ste = ipr_find_ses_entry(res)))
2703 			continue;
2704 
2705 		max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
2706 	}
2707 
2708 	return max_xfer_rate;
2709 }
2710 
2711 /**
2712  * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
2713  * @ioa_cfg:		ioa config struct
2714  * @max_delay:		max delay in micro-seconds to wait
2715  *
2716  * Waits for an IODEBUG ACK from the IOA, doing busy looping.
2717  *
2718  * Return value:
2719  * 	0 on success / other on failure
2720  **/
2721 static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
2722 {
2723 	volatile u32 pcii_reg;
2724 	int delay = 1;
2725 
2726 	/* Read interrupt reg until IOA signals IO Debug Acknowledge */
2727 	while (delay < max_delay) {
2728 		pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
2729 
2730 		if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
2731 			return 0;
2732 
2733 		/* udelay cannot be used if delay is more than a few milliseconds */
2734 		if ((delay / 1000) > MAX_UDELAY_MS)
2735 			mdelay(delay / 1000);
2736 		else
2737 			udelay(delay);
2738 
2739 		delay += delay;
2740 	}
2741 	return -EIO;
2742 }
2743 
2744 /**
2745  * ipr_get_sis64_dump_data_section - Dump IOA memory
2746  * @ioa_cfg:			ioa config struct
2747  * @start_addr:			adapter address to dump
2748  * @dest:			destination kernel buffer
2749  * @length_in_words:		length to dump in 4 byte words
2750  *
2751  * Return value:
2752  * 	0 on success
2753  **/
2754 static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2755 					   u32 start_addr,
2756 					   __be32 *dest, u32 length_in_words)
2757 {
2758 	int i;
2759 
2760 	for (i = 0; i < length_in_words; i++) {
2761 		writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg);
2762 		*dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg));
2763 		dest++;
2764 	}
2765 
2766 	return 0;
2767 }
2768 
2769 /**
2770  * ipr_get_ldump_data_section - Dump IOA memory
2771  * @ioa_cfg:			ioa config struct
2772  * @start_addr:			adapter address to dump
2773  * @dest:				destination kernel buffer
2774  * @length_in_words:	length to dump in 4 byte words
2775  *
2776  * Return value:
2777  * 	0 on success / -EIO on failure
2778  **/
2779 static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2780 				      u32 start_addr,
2781 				      __be32 *dest, u32 length_in_words)
2782 {
2783 	volatile u32 temp_pcii_reg;
2784 	int i, delay = 0;
2785 
2786 	if (ioa_cfg->sis64)
2787 		return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr,
2788 						       dest, length_in_words);
2789 
2790 	/* Write IOA interrupt reg starting LDUMP state  */
2791 	writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
2792 	       ioa_cfg->regs.set_uproc_interrupt_reg32);
2793 
2794 	/* Wait for IO debug acknowledge */
2795 	if (ipr_wait_iodbg_ack(ioa_cfg,
2796 			       IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
2797 		dev_err(&ioa_cfg->pdev->dev,
2798 			"IOA dump long data transfer timeout\n");
2799 		return -EIO;
2800 	}
2801 
2802 	/* Signal LDUMP interlocked - clear IO debug ack */
2803 	writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2804 	       ioa_cfg->regs.clr_interrupt_reg);
2805 
2806 	/* Write Mailbox with starting address */
2807 	writel(start_addr, ioa_cfg->ioa_mailbox);
2808 
2809 	/* Signal address valid - clear IOA Reset alert */
2810 	writel(IPR_UPROCI_RESET_ALERT,
2811 	       ioa_cfg->regs.clr_uproc_interrupt_reg32);
2812 
2813 	for (i = 0; i < length_in_words; i++) {
2814 		/* Wait for IO debug acknowledge */
2815 		if (ipr_wait_iodbg_ack(ioa_cfg,
2816 				       IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
2817 			dev_err(&ioa_cfg->pdev->dev,
2818 				"IOA dump short data transfer timeout\n");
2819 			return -EIO;
2820 		}
2821 
2822 		/* Read data from mailbox and increment destination pointer */
2823 		*dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
2824 		dest++;
2825 
2826 		/* For all but the last word of data, signal data received */
2827 		if (i < (length_in_words - 1)) {
2828 			/* Signal dump data received - Clear IO debug Ack */
2829 			writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2830 			       ioa_cfg->regs.clr_interrupt_reg);
2831 		}
2832 	}
2833 
2834 	/* Signal end of block transfer. Set reset alert then clear IO debug ack */
2835 	writel(IPR_UPROCI_RESET_ALERT,
2836 	       ioa_cfg->regs.set_uproc_interrupt_reg32);
2837 
2838 	writel(IPR_UPROCI_IO_DEBUG_ALERT,
2839 	       ioa_cfg->regs.clr_uproc_interrupt_reg32);
2840 
2841 	/* Signal dump data received - Clear IO debug Ack */
2842 	writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2843 	       ioa_cfg->regs.clr_interrupt_reg);
2844 
2845 	/* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2846 	while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
2847 		temp_pcii_reg =
2848 		    readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
2849 
2850 		if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
2851 			return 0;
2852 
2853 		udelay(10);
2854 		delay += 10;
2855 	}
2856 
2857 	return 0;
2858 }
2859 
2860 #ifdef CONFIG_SCSI_IPR_DUMP
2861 /**
2862  * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2863  * @ioa_cfg:		ioa config struct
2864  * @pci_address:	adapter address
2865  * @length:			length of data to copy
2866  *
2867  * Copy data from PCI adapter to kernel buffer.
2868  * Note: length MUST be a 4 byte multiple
2869  * Return value:
2870  * 	0 on success / other on failure
2871  **/
2872 static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
2873 			unsigned long pci_address, u32 length)
2874 {
2875 	int bytes_copied = 0;
2876 	int cur_len, rc, rem_len, rem_page_len, max_dump_size;
2877 	__be32 *page;
2878 	unsigned long lock_flags = 0;
2879 	struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
2880 
2881 	if (ioa_cfg->sis64)
2882 		max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
2883 	else
2884 		max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
2885 
2886 	while (bytes_copied < length &&
2887 	       (ioa_dump->hdr.len + bytes_copied) < max_dump_size) {
2888 		if (ioa_dump->page_offset >= PAGE_SIZE ||
2889 		    ioa_dump->page_offset == 0) {
2890 			page = (__be32 *)__get_free_page(GFP_ATOMIC);
2891 
2892 			if (!page) {
2893 				ipr_trace;
2894 				return bytes_copied;
2895 			}
2896 
2897 			ioa_dump->page_offset = 0;
2898 			ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
2899 			ioa_dump->next_page_index++;
2900 		} else
2901 			page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
2902 
2903 		rem_len = length - bytes_copied;
2904 		rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
2905 		cur_len = min(rem_len, rem_page_len);
2906 
2907 		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2908 		if (ioa_cfg->sdt_state == ABORT_DUMP) {
2909 			rc = -EIO;
2910 		} else {
2911 			rc = ipr_get_ldump_data_section(ioa_cfg,
2912 							pci_address + bytes_copied,
2913 							&page[ioa_dump->page_offset / 4],
2914 							(cur_len / sizeof(u32)));
2915 		}
2916 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2917 
2918 		if (!rc) {
2919 			ioa_dump->page_offset += cur_len;
2920 			bytes_copied += cur_len;
2921 		} else {
2922 			ipr_trace;
2923 			break;
2924 		}
2925 		schedule();
2926 	}
2927 
2928 	return bytes_copied;
2929 }
2930 
2931 /**
2932  * ipr_init_dump_entry_hdr - Initialize a dump entry header.
2933  * @hdr:	dump entry header struct
2934  *
2935  * Return value:
2936  * 	nothing
2937  **/
2938 static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
2939 {
2940 	hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
2941 	hdr->num_elems = 1;
2942 	hdr->offset = sizeof(*hdr);
2943 	hdr->status = IPR_DUMP_STATUS_SUCCESS;
2944 }
2945 
2946 /**
2947  * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
2948  * @ioa_cfg:	ioa config struct
2949  * @driver_dump:	driver dump struct
2950  *
2951  * Return value:
2952  * 	nothing
2953  **/
2954 static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
2955 				   struct ipr_driver_dump *driver_dump)
2956 {
2957 	struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2958 
2959 	ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
2960 	driver_dump->ioa_type_entry.hdr.len =
2961 		sizeof(struct ipr_dump_ioa_type_entry) -
2962 		sizeof(struct ipr_dump_entry_header);
2963 	driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2964 	driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
2965 	driver_dump->ioa_type_entry.type = ioa_cfg->type;
2966 	driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
2967 		(ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
2968 		ucode_vpd->minor_release[1];
2969 	driver_dump->hdr.num_entries++;
2970 }
2971 
2972 /**
2973  * ipr_dump_version_data - Fill in the driver version in the dump.
2974  * @ioa_cfg:	ioa config struct
2975  * @driver_dump:	driver dump struct
2976  *
2977  * Return value:
2978  * 	nothing
2979  **/
2980 static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
2981 				  struct ipr_driver_dump *driver_dump)
2982 {
2983 	ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
2984 	driver_dump->version_entry.hdr.len =
2985 		sizeof(struct ipr_dump_version_entry) -
2986 		sizeof(struct ipr_dump_entry_header);
2987 	driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2988 	driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
2989 	strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
2990 	driver_dump->hdr.num_entries++;
2991 }
2992 
2993 /**
2994  * ipr_dump_trace_data - Fill in the IOA trace in the dump.
2995  * @ioa_cfg:	ioa config struct
2996  * @driver_dump:	driver dump struct
2997  *
2998  * Return value:
2999  * 	nothing
3000  **/
3001 static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
3002 				   struct ipr_driver_dump *driver_dump)
3003 {
3004 	ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
3005 	driver_dump->trace_entry.hdr.len =
3006 		sizeof(struct ipr_dump_trace_entry) -
3007 		sizeof(struct ipr_dump_entry_header);
3008 	driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3009 	driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
3010 	memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
3011 	driver_dump->hdr.num_entries++;
3012 }
3013 
3014 /**
3015  * ipr_dump_location_data - Fill in the IOA location in the dump.
3016  * @ioa_cfg:	ioa config struct
3017  * @driver_dump:	driver dump struct
3018  *
3019  * Return value:
3020  * 	nothing
3021  **/
3022 static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
3023 				   struct ipr_driver_dump *driver_dump)
3024 {
3025 	ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
3026 	driver_dump->location_entry.hdr.len =
3027 		sizeof(struct ipr_dump_location_entry) -
3028 		sizeof(struct ipr_dump_entry_header);
3029 	driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
3030 	driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
3031 	strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev));
3032 	driver_dump->hdr.num_entries++;
3033 }
3034 
3035 /**
3036  * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
3037  * @ioa_cfg:	ioa config struct
3038  * @dump:		dump struct
3039  *
3040  * Return value:
3041  * 	nothing
3042  **/
3043 static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
3044 {
3045 	unsigned long start_addr, sdt_word;
3046 	unsigned long lock_flags = 0;
3047 	struct ipr_driver_dump *driver_dump = &dump->driver_dump;
3048 	struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
3049 	u32 num_entries, max_num_entries, start_off, end_off;
3050 	u32 max_dump_size, bytes_to_copy, bytes_copied, rc;
3051 	struct ipr_sdt *sdt;
3052 	int valid = 1;
3053 	int i;
3054 
3055 	ENTER;
3056 
3057 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3058 
3059 	if (ioa_cfg->sdt_state != READ_DUMP) {
3060 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3061 		return;
3062 	}
3063 
3064 	if (ioa_cfg->sis64) {
3065 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3066 		ssleep(IPR_DUMP_DELAY_SECONDS);
3067 		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3068 	}
3069 
3070 	start_addr = readl(ioa_cfg->ioa_mailbox);
3071 
3072 	if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) {
3073 		dev_err(&ioa_cfg->pdev->dev,
3074 			"Invalid dump table format: %lx\n", start_addr);
3075 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3076 		return;
3077 	}
3078 
3079 	dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
3080 
3081 	driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
3082 
3083 	/* Initialize the overall dump header */
3084 	driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
3085 	driver_dump->hdr.num_entries = 1;
3086 	driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
3087 	driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
3088 	driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
3089 	driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
3090 
3091 	ipr_dump_version_data(ioa_cfg, driver_dump);
3092 	ipr_dump_location_data(ioa_cfg, driver_dump);
3093 	ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
3094 	ipr_dump_trace_data(ioa_cfg, driver_dump);
3095 
3096 	/* Update dump_header */
3097 	driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
3098 
3099 	/* IOA Dump entry */
3100 	ipr_init_dump_entry_hdr(&ioa_dump->hdr);
3101 	ioa_dump->hdr.len = 0;
3102 	ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3103 	ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
3104 
3105 	/* First entries in sdt are actually a list of dump addresses and
3106 	 lengths to gather the real dump data.  sdt represents the pointer
3107 	 to the ioa generated dump table.  Dump data will be extracted based
3108 	 on entries in this table */
3109 	sdt = &ioa_dump->sdt;
3110 
3111 	if (ioa_cfg->sis64) {
3112 		max_num_entries = IPR_FMT3_NUM_SDT_ENTRIES;
3113 		max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
3114 	} else {
3115 		max_num_entries = IPR_FMT2_NUM_SDT_ENTRIES;
3116 		max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
3117 	}
3118 
3119 	bytes_to_copy = offsetof(struct ipr_sdt, entry) +
3120 			(max_num_entries * sizeof(struct ipr_sdt_entry));
3121 	rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
3122 					bytes_to_copy / sizeof(__be32));
3123 
3124 	/* Smart Dump table is ready to use and the first entry is valid */
3125 	if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
3126 	    (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
3127 		dev_err(&ioa_cfg->pdev->dev,
3128 			"Dump of IOA failed. Dump table not valid: %d, %X.\n",
3129 			rc, be32_to_cpu(sdt->hdr.state));
3130 		driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
3131 		ioa_cfg->sdt_state = DUMP_OBTAINED;
3132 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3133 		return;
3134 	}
3135 
3136 	num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
3137 
3138 	if (num_entries > max_num_entries)
3139 		num_entries = max_num_entries;
3140 
3141 	/* Update dump length to the actual data to be copied */
3142 	dump->driver_dump.hdr.len += sizeof(struct ipr_sdt_header);
3143 	if (ioa_cfg->sis64)
3144 		dump->driver_dump.hdr.len += num_entries * sizeof(struct ipr_sdt_entry);
3145 	else
3146 		dump->driver_dump.hdr.len += max_num_entries * sizeof(struct ipr_sdt_entry);
3147 
3148 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3149 
3150 	for (i = 0; i < num_entries; i++) {
3151 		if (ioa_dump->hdr.len > max_dump_size) {
3152 			driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3153 			break;
3154 		}
3155 
3156 		if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
3157 			sdt_word = be32_to_cpu(sdt->entry[i].start_token);
3158 			if (ioa_cfg->sis64)
3159 				bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token);
3160 			else {
3161 				start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
3162 				end_off = be32_to_cpu(sdt->entry[i].end_token);
3163 
3164 				if (ipr_sdt_is_fmt2(sdt_word) && sdt_word)
3165 					bytes_to_copy = end_off - start_off;
3166 				else
3167 					valid = 0;
3168 			}
3169 			if (valid) {
3170 				if (bytes_to_copy > max_dump_size) {
3171 					sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
3172 					continue;
3173 				}
3174 
3175 				/* Copy data from adapter to driver buffers */
3176 				bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
3177 							    bytes_to_copy);
3178 
3179 				ioa_dump->hdr.len += bytes_copied;
3180 
3181 				if (bytes_copied != bytes_to_copy) {
3182 					driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3183 					break;
3184 				}
3185 			}
3186 		}
3187 	}
3188 
3189 	dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
3190 
3191 	/* Update dump_header */
3192 	driver_dump->hdr.len += ioa_dump->hdr.len;
3193 	wmb();
3194 	ioa_cfg->sdt_state = DUMP_OBTAINED;
3195 	LEAVE;
3196 }
3197 
3198 #else
3199 #define ipr_get_ioa_dump(ioa_cfg, dump) do { } while (0)
3200 #endif
3201 
3202 /**
3203  * ipr_release_dump - Free adapter dump memory
3204  * @kref:	kref struct
3205  *
3206  * Return value:
3207  *	nothing
3208  **/
3209 static void ipr_release_dump(struct kref *kref)
3210 {
3211 	struct ipr_dump *dump = container_of(kref, struct ipr_dump, kref);
3212 	struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
3213 	unsigned long lock_flags = 0;
3214 	int i;
3215 
3216 	ENTER;
3217 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3218 	ioa_cfg->dump = NULL;
3219 	ioa_cfg->sdt_state = INACTIVE;
3220 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3221 
3222 	for (i = 0; i < dump->ioa_dump.next_page_index; i++)
3223 		free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
3224 
3225 	vfree(dump->ioa_dump.ioa_data);
3226 	kfree(dump);
3227 	LEAVE;
3228 }
3229 
3230 /**
3231  * ipr_worker_thread - Worker thread
3232  * @work:		ioa config struct
3233  *
3234  * Called at task level from a work thread. This function takes care
3235  * of adding and removing device from the mid-layer as configuration
3236  * changes are detected by the adapter.
3237  *
3238  * Return value:
3239  * 	nothing
3240  **/
3241 static void ipr_worker_thread(struct work_struct *work)
3242 {
3243 	unsigned long lock_flags;
3244 	struct ipr_resource_entry *res;
3245 	struct scsi_device *sdev;
3246 	struct ipr_dump *dump;
3247 	struct ipr_ioa_cfg *ioa_cfg =
3248 		container_of(work, struct ipr_ioa_cfg, work_q);
3249 	u8 bus, target, lun;
3250 	int did_work;
3251 
3252 	ENTER;
3253 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3254 
3255 	if (ioa_cfg->sdt_state == READ_DUMP) {
3256 		dump = ioa_cfg->dump;
3257 		if (!dump) {
3258 			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3259 			return;
3260 		}
3261 		kref_get(&dump->kref);
3262 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3263 		ipr_get_ioa_dump(ioa_cfg, dump);
3264 		kref_put(&dump->kref, ipr_release_dump);
3265 
3266 		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3267 		if (ioa_cfg->sdt_state == DUMP_OBTAINED && !ioa_cfg->dump_timeout)
3268 			ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3269 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3270 		return;
3271 	}
3272 
3273 restart:
3274 	do {
3275 		did_work = 0;
3276 		if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds ||
3277 		    !ioa_cfg->allow_ml_add_del) {
3278 			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3279 			return;
3280 		}
3281 
3282 		list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3283 			if (res->del_from_ml && res->sdev) {
3284 				did_work = 1;
3285 				sdev = res->sdev;
3286 				if (!scsi_device_get(sdev)) {
3287 					if (!res->add_to_ml)
3288 						list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3289 					else
3290 						res->del_from_ml = 0;
3291 					spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3292 					scsi_remove_device(sdev);
3293 					scsi_device_put(sdev);
3294 					spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3295 				}
3296 				break;
3297 			}
3298 		}
3299 	} while (did_work);
3300 
3301 	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3302 		if (res->add_to_ml) {
3303 			bus = res->bus;
3304 			target = res->target;
3305 			lun = res->lun;
3306 			res->add_to_ml = 0;
3307 			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3308 			scsi_add_device(ioa_cfg->host, bus, target, lun);
3309 			spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3310 			goto restart;
3311 		}
3312 	}
3313 
3314 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3315 	kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
3316 	LEAVE;
3317 }
3318 
3319 #ifdef CONFIG_SCSI_IPR_TRACE
3320 /**
3321  * ipr_read_trace - Dump the adapter trace
3322  * @filp:		open sysfs file
3323  * @kobj:		kobject struct
3324  * @bin_attr:		bin_attribute struct
3325  * @buf:		buffer
3326  * @off:		offset
3327  * @count:		buffer size
3328  *
3329  * Return value:
3330  *	number of bytes printed to buffer
3331  **/
3332 static ssize_t ipr_read_trace(struct file *filp, struct kobject *kobj,
3333 			      struct bin_attribute *bin_attr,
3334 			      char *buf, loff_t off, size_t count)
3335 {
3336 	struct device *dev = container_of(kobj, struct device, kobj);
3337 	struct Scsi_Host *shost = class_to_shost(dev);
3338 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3339 	unsigned long lock_flags = 0;
3340 	ssize_t ret;
3341 
3342 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3343 	ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace,
3344 				IPR_TRACE_SIZE);
3345 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3346 
3347 	return ret;
3348 }
3349 
3350 static struct bin_attribute ipr_trace_attr = {
3351 	.attr =	{
3352 		.name = "trace",
3353 		.mode = S_IRUGO,
3354 	},
3355 	.size = 0,
3356 	.read = ipr_read_trace,
3357 };
3358 #endif
3359 
3360 /**
3361  * ipr_show_fw_version - Show the firmware version
3362  * @dev:	class device struct
3363  * @buf:	buffer
3364  *
3365  * Return value:
3366  *	number of bytes printed to buffer
3367  **/
3368 static ssize_t ipr_show_fw_version(struct device *dev,
3369 				   struct device_attribute *attr, char *buf)
3370 {
3371 	struct Scsi_Host *shost = class_to_shost(dev);
3372 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3373 	struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3374 	unsigned long lock_flags = 0;
3375 	int len;
3376 
3377 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3378 	len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
3379 		       ucode_vpd->major_release, ucode_vpd->card_type,
3380 		       ucode_vpd->minor_release[0],
3381 		       ucode_vpd->minor_release[1]);
3382 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3383 	return len;
3384 }
3385 
3386 static struct device_attribute ipr_fw_version_attr = {
3387 	.attr = {
3388 		.name =		"fw_version",
3389 		.mode =		S_IRUGO,
3390 	},
3391 	.show = ipr_show_fw_version,
3392 };
3393 
3394 /**
3395  * ipr_show_log_level - Show the adapter's error logging level
3396  * @dev:	class device struct
3397  * @buf:	buffer
3398  *
3399  * Return value:
3400  * 	number of bytes printed to buffer
3401  **/
3402 static ssize_t ipr_show_log_level(struct device *dev,
3403 				   struct device_attribute *attr, char *buf)
3404 {
3405 	struct Scsi_Host *shost = class_to_shost(dev);
3406 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3407 	unsigned long lock_flags = 0;
3408 	int len;
3409 
3410 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3411 	len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
3412 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3413 	return len;
3414 }
3415 
3416 /**
3417  * ipr_store_log_level - Change the adapter's error logging level
3418  * @dev:	class device struct
3419  * @buf:	buffer
3420  *
3421  * Return value:
3422  * 	number of bytes printed to buffer
3423  **/
3424 static ssize_t ipr_store_log_level(struct device *dev,
3425 				   struct device_attribute *attr,
3426 				   const char *buf, size_t count)
3427 {
3428 	struct Scsi_Host *shost = class_to_shost(dev);
3429 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3430 	unsigned long lock_flags = 0;
3431 
3432 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3433 	ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
3434 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3435 	return strlen(buf);
3436 }
3437 
3438 static struct device_attribute ipr_log_level_attr = {
3439 	.attr = {
3440 		.name =		"log_level",
3441 		.mode =		S_IRUGO | S_IWUSR,
3442 	},
3443 	.show = ipr_show_log_level,
3444 	.store = ipr_store_log_level
3445 };
3446 
3447 /**
3448  * ipr_store_diagnostics - IOA Diagnostics interface
3449  * @dev:	device struct
3450  * @buf:	buffer
3451  * @count:	buffer size
3452  *
3453  * This function will reset the adapter and wait a reasonable
3454  * amount of time for any errors that the adapter might log.
3455  *
3456  * Return value:
3457  * 	count on success / other on failure
3458  **/
3459 static ssize_t ipr_store_diagnostics(struct device *dev,
3460 				     struct device_attribute *attr,
3461 				     const char *buf, size_t count)
3462 {
3463 	struct Scsi_Host *shost = class_to_shost(dev);
3464 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3465 	unsigned long lock_flags = 0;
3466 	int rc = count;
3467 
3468 	if (!capable(CAP_SYS_ADMIN))
3469 		return -EACCES;
3470 
3471 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3472 	while (ioa_cfg->in_reset_reload) {
3473 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3474 		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3475 		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3476 	}
3477 
3478 	ioa_cfg->errors_logged = 0;
3479 	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3480 
3481 	if (ioa_cfg->in_reset_reload) {
3482 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3483 		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3484 
3485 		/* Wait for a second for any errors to be logged */
3486 		msleep(1000);
3487 	} else {
3488 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3489 		return -EIO;
3490 	}
3491 
3492 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3493 	if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
3494 		rc = -EIO;
3495 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3496 
3497 	return rc;
3498 }
3499 
3500 static struct device_attribute ipr_diagnostics_attr = {
3501 	.attr = {
3502 		.name =		"run_diagnostics",
3503 		.mode =		S_IWUSR,
3504 	},
3505 	.store = ipr_store_diagnostics
3506 };
3507 
3508 /**
3509  * ipr_show_adapter_state - Show the adapter's state
3510  * @class_dev:	device struct
3511  * @buf:	buffer
3512  *
3513  * Return value:
3514  * 	number of bytes printed to buffer
3515  **/
3516 static ssize_t ipr_show_adapter_state(struct device *dev,
3517 				      struct device_attribute *attr, char *buf)
3518 {
3519 	struct Scsi_Host *shost = class_to_shost(dev);
3520 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3521 	unsigned long lock_flags = 0;
3522 	int len;
3523 
3524 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3525 	if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
3526 		len = snprintf(buf, PAGE_SIZE, "offline\n");
3527 	else
3528 		len = snprintf(buf, PAGE_SIZE, "online\n");
3529 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3530 	return len;
3531 }
3532 
3533 /**
3534  * ipr_store_adapter_state - Change adapter state
3535  * @dev:	device struct
3536  * @buf:	buffer
3537  * @count:	buffer size
3538  *
3539  * This function will change the adapter's state.
3540  *
3541  * Return value:
3542  * 	count on success / other on failure
3543  **/
3544 static ssize_t ipr_store_adapter_state(struct device *dev,
3545 				       struct device_attribute *attr,
3546 				       const char *buf, size_t count)
3547 {
3548 	struct Scsi_Host *shost = class_to_shost(dev);
3549 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3550 	unsigned long lock_flags;
3551 	int result = count, i;
3552 
3553 	if (!capable(CAP_SYS_ADMIN))
3554 		return -EACCES;
3555 
3556 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3557 	if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead &&
3558 	    !strncmp(buf, "online", 6)) {
3559 		for (i = 0; i < ioa_cfg->hrrq_num; i++) {
3560 			spin_lock(&ioa_cfg->hrrq[i]._lock);
3561 			ioa_cfg->hrrq[i].ioa_is_dead = 0;
3562 			spin_unlock(&ioa_cfg->hrrq[i]._lock);
3563 		}
3564 		wmb();
3565 		ioa_cfg->reset_retries = 0;
3566 		ioa_cfg->in_ioa_bringdown = 0;
3567 		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3568 	}
3569 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3570 	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3571 
3572 	return result;
3573 }
3574 
3575 static struct device_attribute ipr_ioa_state_attr = {
3576 	.attr = {
3577 		.name =		"online_state",
3578 		.mode =		S_IRUGO | S_IWUSR,
3579 	},
3580 	.show = ipr_show_adapter_state,
3581 	.store = ipr_store_adapter_state
3582 };
3583 
3584 /**
3585  * ipr_store_reset_adapter - Reset the adapter
3586  * @dev:	device struct
3587  * @buf:	buffer
3588  * @count:	buffer size
3589  *
3590  * This function will reset the adapter.
3591  *
3592  * Return value:
3593  * 	count on success / other on failure
3594  **/
3595 static ssize_t ipr_store_reset_adapter(struct device *dev,
3596 				       struct device_attribute *attr,
3597 				       const char *buf, size_t count)
3598 {
3599 	struct Scsi_Host *shost = class_to_shost(dev);
3600 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3601 	unsigned long lock_flags;
3602 	int result = count;
3603 
3604 	if (!capable(CAP_SYS_ADMIN))
3605 		return -EACCES;
3606 
3607 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3608 	if (!ioa_cfg->in_reset_reload)
3609 		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3610 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3611 	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3612 
3613 	return result;
3614 }
3615 
3616 static struct device_attribute ipr_ioa_reset_attr = {
3617 	.attr = {
3618 		.name =		"reset_host",
3619 		.mode =		S_IWUSR,
3620 	},
3621 	.store = ipr_store_reset_adapter
3622 };
3623 
3624 static int ipr_iopoll(struct blk_iopoll *iop, int budget);
3625  /**
3626  * ipr_show_iopoll_weight - Show ipr polling mode
3627  * @dev:	class device struct
3628  * @buf:	buffer
3629  *
3630  * Return value:
3631  *	number of bytes printed to buffer
3632  **/
3633 static ssize_t ipr_show_iopoll_weight(struct device *dev,
3634 				   struct device_attribute *attr, char *buf)
3635 {
3636 	struct Scsi_Host *shost = class_to_shost(dev);
3637 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3638 	unsigned long lock_flags = 0;
3639 	int len;
3640 
3641 	spin_lock_irqsave(shost->host_lock, lock_flags);
3642 	len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->iopoll_weight);
3643 	spin_unlock_irqrestore(shost->host_lock, lock_flags);
3644 
3645 	return len;
3646 }
3647 
3648 /**
3649  * ipr_store_iopoll_weight - Change the adapter's polling mode
3650  * @dev:	class device struct
3651  * @buf:	buffer
3652  *
3653  * Return value:
3654  *	number of bytes printed to buffer
3655  **/
3656 static ssize_t ipr_store_iopoll_weight(struct device *dev,
3657 					struct device_attribute *attr,
3658 					const char *buf, size_t count)
3659 {
3660 	struct Scsi_Host *shost = class_to_shost(dev);
3661 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3662 	unsigned long user_iopoll_weight;
3663 	unsigned long lock_flags = 0;
3664 	int i;
3665 
3666 	if (!ioa_cfg->sis64) {
3667 		dev_info(&ioa_cfg->pdev->dev, "blk-iopoll not supported on this adapter\n");
3668 		return -EINVAL;
3669 	}
3670 	if (kstrtoul(buf, 10, &user_iopoll_weight))
3671 		return -EINVAL;
3672 
3673 	if (user_iopoll_weight > 256) {
3674 		dev_info(&ioa_cfg->pdev->dev, "Invalid blk-iopoll weight. It must be less than 256\n");
3675 		return -EINVAL;
3676 	}
3677 
3678 	if (user_iopoll_weight == ioa_cfg->iopoll_weight) {
3679 		dev_info(&ioa_cfg->pdev->dev, "Current blk-iopoll weight has the same weight\n");
3680 		return strlen(buf);
3681 	}
3682 
3683 	if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3684 		for (i = 1; i < ioa_cfg->hrrq_num; i++)
3685 			blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll);
3686 	}
3687 
3688 	spin_lock_irqsave(shost->host_lock, lock_flags);
3689 	ioa_cfg->iopoll_weight = user_iopoll_weight;
3690 	if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3691 		for (i = 1; i < ioa_cfg->hrrq_num; i++) {
3692 			blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll,
3693 					ioa_cfg->iopoll_weight, ipr_iopoll);
3694 			blk_iopoll_enable(&ioa_cfg->hrrq[i].iopoll);
3695 		}
3696 	}
3697 	spin_unlock_irqrestore(shost->host_lock, lock_flags);
3698 
3699 	return strlen(buf);
3700 }
3701 
3702 static struct device_attribute ipr_iopoll_weight_attr = {
3703 	.attr = {
3704 		.name =		"iopoll_weight",
3705 		.mode =		S_IRUGO | S_IWUSR,
3706 	},
3707 	.show = ipr_show_iopoll_weight,
3708 	.store = ipr_store_iopoll_weight
3709 };
3710 
3711 /**
3712  * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
3713  * @buf_len:		buffer length
3714  *
3715  * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
3716  * list to use for microcode download
3717  *
3718  * Return value:
3719  * 	pointer to sglist / NULL on failure
3720  **/
3721 static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
3722 {
3723 	int sg_size, order, bsize_elem, num_elem, i, j;
3724 	struct ipr_sglist *sglist;
3725 	struct scatterlist *scatterlist;
3726 	struct page *page;
3727 
3728 	/* Get the minimum size per scatter/gather element */
3729 	sg_size = buf_len / (IPR_MAX_SGLIST - 1);
3730 
3731 	/* Get the actual size per element */
3732 	order = get_order(sg_size);
3733 
3734 	/* Determine the actual number of bytes per element */
3735 	bsize_elem = PAGE_SIZE * (1 << order);
3736 
3737 	/* Determine the actual number of sg entries needed */
3738 	if (buf_len % bsize_elem)
3739 		num_elem = (buf_len / bsize_elem) + 1;
3740 	else
3741 		num_elem = buf_len / bsize_elem;
3742 
3743 	/* Allocate a scatter/gather list for the DMA */
3744 	sglist = kzalloc(sizeof(struct ipr_sglist) +
3745 			 (sizeof(struct scatterlist) * (num_elem - 1)),
3746 			 GFP_KERNEL);
3747 
3748 	if (sglist == NULL) {
3749 		ipr_trace;
3750 		return NULL;
3751 	}
3752 
3753 	scatterlist = sglist->scatterlist;
3754 	sg_init_table(scatterlist, num_elem);
3755 
3756 	sglist->order = order;
3757 	sglist->num_sg = num_elem;
3758 
3759 	/* Allocate a bunch of sg elements */
3760 	for (i = 0; i < num_elem; i++) {
3761 		page = alloc_pages(GFP_KERNEL, order);
3762 		if (!page) {
3763 			ipr_trace;
3764 
3765 			/* Free up what we already allocated */
3766 			for (j = i - 1; j >= 0; j--)
3767 				__free_pages(sg_page(&scatterlist[j]), order);
3768 			kfree(sglist);
3769 			return NULL;
3770 		}
3771 
3772 		sg_set_page(&scatterlist[i], page, 0, 0);
3773 	}
3774 
3775 	return sglist;
3776 }
3777 
3778 /**
3779  * ipr_free_ucode_buffer - Frees a microcode download buffer
3780  * @p_dnld:		scatter/gather list pointer
3781  *
3782  * Free a DMA'able ucode download buffer previously allocated with
3783  * ipr_alloc_ucode_buffer
3784  *
3785  * Return value:
3786  * 	nothing
3787  **/
3788 static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
3789 {
3790 	int i;
3791 
3792 	for (i = 0; i < sglist->num_sg; i++)
3793 		__free_pages(sg_page(&sglist->scatterlist[i]), sglist->order);
3794 
3795 	kfree(sglist);
3796 }
3797 
3798 /**
3799  * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
3800  * @sglist:		scatter/gather list pointer
3801  * @buffer:		buffer pointer
3802  * @len:		buffer length
3803  *
3804  * Copy a microcode image from a user buffer into a buffer allocated by
3805  * ipr_alloc_ucode_buffer
3806  *
3807  * Return value:
3808  * 	0 on success / other on failure
3809  **/
3810 static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
3811 				 u8 *buffer, u32 len)
3812 {
3813 	int bsize_elem, i, result = 0;
3814 	struct scatterlist *scatterlist;
3815 	void *kaddr;
3816 
3817 	/* Determine the actual number of bytes per element */
3818 	bsize_elem = PAGE_SIZE * (1 << sglist->order);
3819 
3820 	scatterlist = sglist->scatterlist;
3821 
3822 	for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
3823 		struct page *page = sg_page(&scatterlist[i]);
3824 
3825 		kaddr = kmap(page);
3826 		memcpy(kaddr, buffer, bsize_elem);
3827 		kunmap(page);
3828 
3829 		scatterlist[i].length = bsize_elem;
3830 
3831 		if (result != 0) {
3832 			ipr_trace;
3833 			return result;
3834 		}
3835 	}
3836 
3837 	if (len % bsize_elem) {
3838 		struct page *page = sg_page(&scatterlist[i]);
3839 
3840 		kaddr = kmap(page);
3841 		memcpy(kaddr, buffer, len % bsize_elem);
3842 		kunmap(page);
3843 
3844 		scatterlist[i].length = len % bsize_elem;
3845 	}
3846 
3847 	sglist->buffer_len = len;
3848 	return result;
3849 }
3850 
3851 /**
3852  * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
3853  * @ipr_cmd:		ipr command struct
3854  * @sglist:		scatter/gather list
3855  *
3856  * Builds a microcode download IOA data list (IOADL).
3857  *
3858  **/
3859 static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd,
3860 				    struct ipr_sglist *sglist)
3861 {
3862 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3863 	struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
3864 	struct scatterlist *scatterlist = sglist->scatterlist;
3865 	int i;
3866 
3867 	ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3868 	ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3869 	ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3870 
3871 	ioarcb->ioadl_len =
3872 		cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
3873 	for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3874 		ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE);
3875 		ioadl64[i].data_len = cpu_to_be32(sg_dma_len(&scatterlist[i]));
3876 		ioadl64[i].address = cpu_to_be64(sg_dma_address(&scatterlist[i]));
3877 	}
3878 
3879 	ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3880 }
3881 
3882 /**
3883  * ipr_build_ucode_ioadl - Build a microcode download IOADL
3884  * @ipr_cmd:	ipr command struct
3885  * @sglist:		scatter/gather list
3886  *
3887  * Builds a microcode download IOA data list (IOADL).
3888  *
3889  **/
3890 static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
3891 				  struct ipr_sglist *sglist)
3892 {
3893 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3894 	struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
3895 	struct scatterlist *scatterlist = sglist->scatterlist;
3896 	int i;
3897 
3898 	ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3899 	ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3900 	ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3901 
3902 	ioarcb->ioadl_len =
3903 		cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3904 
3905 	for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3906 		ioadl[i].flags_and_data_len =
3907 			cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
3908 		ioadl[i].address =
3909 			cpu_to_be32(sg_dma_address(&scatterlist[i]));
3910 	}
3911 
3912 	ioadl[i-1].flags_and_data_len |=
3913 		cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3914 }
3915 
3916 /**
3917  * ipr_update_ioa_ucode - Update IOA's microcode
3918  * @ioa_cfg:	ioa config struct
3919  * @sglist:		scatter/gather list
3920  *
3921  * Initiate an adapter reset to update the IOA's microcode
3922  *
3923  * Return value:
3924  * 	0 on success / -EIO on failure
3925  **/
3926 static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
3927 				struct ipr_sglist *sglist)
3928 {
3929 	unsigned long lock_flags;
3930 
3931 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3932 	while (ioa_cfg->in_reset_reload) {
3933 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3934 		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3935 		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3936 	}
3937 
3938 	if (ioa_cfg->ucode_sglist) {
3939 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3940 		dev_err(&ioa_cfg->pdev->dev,
3941 			"Microcode download already in progress\n");
3942 		return -EIO;
3943 	}
3944 
3945 	sglist->num_dma_sg = pci_map_sg(ioa_cfg->pdev, sglist->scatterlist,
3946 					sglist->num_sg, DMA_TO_DEVICE);
3947 
3948 	if (!sglist->num_dma_sg) {
3949 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3950 		dev_err(&ioa_cfg->pdev->dev,
3951 			"Failed to map microcode download buffer!\n");
3952 		return -EIO;
3953 	}
3954 
3955 	ioa_cfg->ucode_sglist = sglist;
3956 	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3957 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3958 	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3959 
3960 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3961 	ioa_cfg->ucode_sglist = NULL;
3962 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3963 	return 0;
3964 }
3965 
3966 /**
3967  * ipr_store_update_fw - Update the firmware on the adapter
3968  * @class_dev:	device struct
3969  * @buf:	buffer
3970  * @count:	buffer size
3971  *
3972  * This function will update the firmware on the adapter.
3973  *
3974  * Return value:
3975  * 	count on success / other on failure
3976  **/
3977 static ssize_t ipr_store_update_fw(struct device *dev,
3978 				   struct device_attribute *attr,
3979 				   const char *buf, size_t count)
3980 {
3981 	struct Scsi_Host *shost = class_to_shost(dev);
3982 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3983 	struct ipr_ucode_image_header *image_hdr;
3984 	const struct firmware *fw_entry;
3985 	struct ipr_sglist *sglist;
3986 	char fname[100];
3987 	char *src;
3988 	int len, result, dnld_size;
3989 
3990 	if (!capable(CAP_SYS_ADMIN))
3991 		return -EACCES;
3992 
3993 	len = snprintf(fname, 99, "%s", buf);
3994 	fname[len-1] = '\0';
3995 
3996 	if (request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
3997 		dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
3998 		return -EIO;
3999 	}
4000 
4001 	image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
4002 
4003 	src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
4004 	dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
4005 	sglist = ipr_alloc_ucode_buffer(dnld_size);
4006 
4007 	if (!sglist) {
4008 		dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
4009 		release_firmware(fw_entry);
4010 		return -ENOMEM;
4011 	}
4012 
4013 	result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
4014 
4015 	if (result) {
4016 		dev_err(&ioa_cfg->pdev->dev,
4017 			"Microcode buffer copy to DMA buffer failed\n");
4018 		goto out;
4019 	}
4020 
4021 	ipr_info("Updating microcode, please be patient.  This may take up to 30 minutes.\n");
4022 
4023 	result = ipr_update_ioa_ucode(ioa_cfg, sglist);
4024 
4025 	if (!result)
4026 		result = count;
4027 out:
4028 	ipr_free_ucode_buffer(sglist);
4029 	release_firmware(fw_entry);
4030 	return result;
4031 }
4032 
4033 static struct device_attribute ipr_update_fw_attr = {
4034 	.attr = {
4035 		.name =		"update_fw",
4036 		.mode =		S_IWUSR,
4037 	},
4038 	.store = ipr_store_update_fw
4039 };
4040 
4041 /**
4042  * ipr_show_fw_type - Show the adapter's firmware type.
4043  * @dev:	class device struct
4044  * @buf:	buffer
4045  *
4046  * Return value:
4047  *	number of bytes printed to buffer
4048  **/
4049 static ssize_t ipr_show_fw_type(struct device *dev,
4050 				struct device_attribute *attr, char *buf)
4051 {
4052 	struct Scsi_Host *shost = class_to_shost(dev);
4053 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4054 	unsigned long lock_flags = 0;
4055 	int len;
4056 
4057 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4058 	len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->sis64);
4059 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4060 	return len;
4061 }
4062 
4063 static struct device_attribute ipr_ioa_fw_type_attr = {
4064 	.attr = {
4065 		.name =		"fw_type",
4066 		.mode =		S_IRUGO,
4067 	},
4068 	.show = ipr_show_fw_type
4069 };
4070 
4071 static struct device_attribute *ipr_ioa_attrs[] = {
4072 	&ipr_fw_version_attr,
4073 	&ipr_log_level_attr,
4074 	&ipr_diagnostics_attr,
4075 	&ipr_ioa_state_attr,
4076 	&ipr_ioa_reset_attr,
4077 	&ipr_update_fw_attr,
4078 	&ipr_ioa_fw_type_attr,
4079 	&ipr_iopoll_weight_attr,
4080 	NULL,
4081 };
4082 
4083 #ifdef CONFIG_SCSI_IPR_DUMP
4084 /**
4085  * ipr_read_dump - Dump the adapter
4086  * @filp:		open sysfs file
4087  * @kobj:		kobject struct
4088  * @bin_attr:		bin_attribute struct
4089  * @buf:		buffer
4090  * @off:		offset
4091  * @count:		buffer size
4092  *
4093  * Return value:
4094  *	number of bytes printed to buffer
4095  **/
4096 static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj,
4097 			     struct bin_attribute *bin_attr,
4098 			     char *buf, loff_t off, size_t count)
4099 {
4100 	struct device *cdev = container_of(kobj, struct device, kobj);
4101 	struct Scsi_Host *shost = class_to_shost(cdev);
4102 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4103 	struct ipr_dump *dump;
4104 	unsigned long lock_flags = 0;
4105 	char *src;
4106 	int len, sdt_end;
4107 	size_t rc = count;
4108 
4109 	if (!capable(CAP_SYS_ADMIN))
4110 		return -EACCES;
4111 
4112 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4113 	dump = ioa_cfg->dump;
4114 
4115 	if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
4116 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4117 		return 0;
4118 	}
4119 	kref_get(&dump->kref);
4120 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4121 
4122 	if (off > dump->driver_dump.hdr.len) {
4123 		kref_put(&dump->kref, ipr_release_dump);
4124 		return 0;
4125 	}
4126 
4127 	if (off + count > dump->driver_dump.hdr.len) {
4128 		count = dump->driver_dump.hdr.len - off;
4129 		rc = count;
4130 	}
4131 
4132 	if (count && off < sizeof(dump->driver_dump)) {
4133 		if (off + count > sizeof(dump->driver_dump))
4134 			len = sizeof(dump->driver_dump) - off;
4135 		else
4136 			len = count;
4137 		src = (u8 *)&dump->driver_dump + off;
4138 		memcpy(buf, src, len);
4139 		buf += len;
4140 		off += len;
4141 		count -= len;
4142 	}
4143 
4144 	off -= sizeof(dump->driver_dump);
4145 
4146 	if (ioa_cfg->sis64)
4147 		sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4148 			  (be32_to_cpu(dump->ioa_dump.sdt.hdr.num_entries_used) *
4149 			   sizeof(struct ipr_sdt_entry));
4150 	else
4151 		sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4152 			  (IPR_FMT2_NUM_SDT_ENTRIES * sizeof(struct ipr_sdt_entry));
4153 
4154 	if (count && off < sdt_end) {
4155 		if (off + count > sdt_end)
4156 			len = sdt_end - off;
4157 		else
4158 			len = count;
4159 		src = (u8 *)&dump->ioa_dump + off;
4160 		memcpy(buf, src, len);
4161 		buf += len;
4162 		off += len;
4163 		count -= len;
4164 	}
4165 
4166 	off -= sdt_end;
4167 
4168 	while (count) {
4169 		if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
4170 			len = PAGE_ALIGN(off) - off;
4171 		else
4172 			len = count;
4173 		src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
4174 		src += off & ~PAGE_MASK;
4175 		memcpy(buf, src, len);
4176 		buf += len;
4177 		off += len;
4178 		count -= len;
4179 	}
4180 
4181 	kref_put(&dump->kref, ipr_release_dump);
4182 	return rc;
4183 }
4184 
4185 /**
4186  * ipr_alloc_dump - Prepare for adapter dump
4187  * @ioa_cfg:	ioa config struct
4188  *
4189  * Return value:
4190  *	0 on success / other on failure
4191  **/
4192 static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
4193 {
4194 	struct ipr_dump *dump;
4195 	__be32 **ioa_data;
4196 	unsigned long lock_flags = 0;
4197 
4198 	dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
4199 
4200 	if (!dump) {
4201 		ipr_err("Dump memory allocation failed\n");
4202 		return -ENOMEM;
4203 	}
4204 
4205 	if (ioa_cfg->sis64)
4206 		ioa_data = vmalloc(IPR_FMT3_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
4207 	else
4208 		ioa_data = vmalloc(IPR_FMT2_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
4209 
4210 	if (!ioa_data) {
4211 		ipr_err("Dump memory allocation failed\n");
4212 		kfree(dump);
4213 		return -ENOMEM;
4214 	}
4215 
4216 	dump->ioa_dump.ioa_data = ioa_data;
4217 
4218 	kref_init(&dump->kref);
4219 	dump->ioa_cfg = ioa_cfg;
4220 
4221 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4222 
4223 	if (INACTIVE != ioa_cfg->sdt_state) {
4224 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4225 		vfree(dump->ioa_dump.ioa_data);
4226 		kfree(dump);
4227 		return 0;
4228 	}
4229 
4230 	ioa_cfg->dump = dump;
4231 	ioa_cfg->sdt_state = WAIT_FOR_DUMP;
4232 	if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && !ioa_cfg->dump_taken) {
4233 		ioa_cfg->dump_taken = 1;
4234 		schedule_work(&ioa_cfg->work_q);
4235 	}
4236 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4237 
4238 	return 0;
4239 }
4240 
4241 /**
4242  * ipr_free_dump - Free adapter dump memory
4243  * @ioa_cfg:	ioa config struct
4244  *
4245  * Return value:
4246  *	0 on success / other on failure
4247  **/
4248 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
4249 {
4250 	struct ipr_dump *dump;
4251 	unsigned long lock_flags = 0;
4252 
4253 	ENTER;
4254 
4255 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4256 	dump = ioa_cfg->dump;
4257 	if (!dump) {
4258 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4259 		return 0;
4260 	}
4261 
4262 	ioa_cfg->dump = NULL;
4263 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4264 
4265 	kref_put(&dump->kref, ipr_release_dump);
4266 
4267 	LEAVE;
4268 	return 0;
4269 }
4270 
4271 /**
4272  * ipr_write_dump - Setup dump state of adapter
4273  * @filp:		open sysfs file
4274  * @kobj:		kobject struct
4275  * @bin_attr:		bin_attribute struct
4276  * @buf:		buffer
4277  * @off:		offset
4278  * @count:		buffer size
4279  *
4280  * Return value:
4281  *	number of bytes printed to buffer
4282  **/
4283 static ssize_t ipr_write_dump(struct file *filp, struct kobject *kobj,
4284 			      struct bin_attribute *bin_attr,
4285 			      char *buf, loff_t off, size_t count)
4286 {
4287 	struct device *cdev = container_of(kobj, struct device, kobj);
4288 	struct Scsi_Host *shost = class_to_shost(cdev);
4289 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4290 	int rc;
4291 
4292 	if (!capable(CAP_SYS_ADMIN))
4293 		return -EACCES;
4294 
4295 	if (buf[0] == '1')
4296 		rc = ipr_alloc_dump(ioa_cfg);
4297 	else if (buf[0] == '0')
4298 		rc = ipr_free_dump(ioa_cfg);
4299 	else
4300 		return -EINVAL;
4301 
4302 	if (rc)
4303 		return rc;
4304 	else
4305 		return count;
4306 }
4307 
4308 static struct bin_attribute ipr_dump_attr = {
4309 	.attr =	{
4310 		.name = "dump",
4311 		.mode = S_IRUSR | S_IWUSR,
4312 	},
4313 	.size = 0,
4314 	.read = ipr_read_dump,
4315 	.write = ipr_write_dump
4316 };
4317 #else
4318 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
4319 #endif
4320 
4321 /**
4322  * ipr_change_queue_depth - Change the device's queue depth
4323  * @sdev:	scsi device struct
4324  * @qdepth:	depth to set
4325  * @reason:	calling context
4326  *
4327  * Return value:
4328  * 	actual depth set
4329  **/
4330 static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth,
4331 				  int reason)
4332 {
4333 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4334 	struct ipr_resource_entry *res;
4335 	unsigned long lock_flags = 0;
4336 
4337 	if (reason != SCSI_QDEPTH_DEFAULT)
4338 		return -EOPNOTSUPP;
4339 
4340 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4341 	res = (struct ipr_resource_entry *)sdev->hostdata;
4342 
4343 	if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
4344 		qdepth = IPR_MAX_CMD_PER_ATA_LUN;
4345 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4346 
4347 	scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
4348 	return sdev->queue_depth;
4349 }
4350 
4351 /**
4352  * ipr_change_queue_type - Change the device's queue type
4353  * @dsev:		scsi device struct
4354  * @tag_type:	type of tags to use
4355  *
4356  * Return value:
4357  * 	actual queue type set
4358  **/
4359 static int ipr_change_queue_type(struct scsi_device *sdev, int tag_type)
4360 {
4361 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4362 	struct ipr_resource_entry *res;
4363 	unsigned long lock_flags = 0;
4364 
4365 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4366 	res = (struct ipr_resource_entry *)sdev->hostdata;
4367 	if (res && ipr_is_gscsi(res))
4368 		tag_type = scsi_change_queue_type(sdev, tag_type);
4369 	else
4370 		tag_type = 0;
4371 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4372 	return tag_type;
4373 }
4374 
4375 /**
4376  * ipr_show_adapter_handle - Show the adapter's resource handle for this device
4377  * @dev:	device struct
4378  * @attr:	device attribute structure
4379  * @buf:	buffer
4380  *
4381  * Return value:
4382  * 	number of bytes printed to buffer
4383  **/
4384 static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
4385 {
4386 	struct scsi_device *sdev = to_scsi_device(dev);
4387 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4388 	struct ipr_resource_entry *res;
4389 	unsigned long lock_flags = 0;
4390 	ssize_t len = -ENXIO;
4391 
4392 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4393 	res = (struct ipr_resource_entry *)sdev->hostdata;
4394 	if (res)
4395 		len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle);
4396 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4397 	return len;
4398 }
4399 
4400 static struct device_attribute ipr_adapter_handle_attr = {
4401 	.attr = {
4402 		.name = 	"adapter_handle",
4403 		.mode =		S_IRUSR,
4404 	},
4405 	.show = ipr_show_adapter_handle
4406 };
4407 
4408 /**
4409  * ipr_show_resource_path - Show the resource path or the resource address for
4410  *			    this device.
4411  * @dev:	device struct
4412  * @attr:	device attribute structure
4413  * @buf:	buffer
4414  *
4415  * Return value:
4416  * 	number of bytes printed to buffer
4417  **/
4418 static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribute *attr, char *buf)
4419 {
4420 	struct scsi_device *sdev = to_scsi_device(dev);
4421 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4422 	struct ipr_resource_entry *res;
4423 	unsigned long lock_flags = 0;
4424 	ssize_t len = -ENXIO;
4425 	char buffer[IPR_MAX_RES_PATH_LENGTH];
4426 
4427 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4428 	res = (struct ipr_resource_entry *)sdev->hostdata;
4429 	if (res && ioa_cfg->sis64)
4430 		len = snprintf(buf, PAGE_SIZE, "%s\n",
4431 			       __ipr_format_res_path(res->res_path, buffer,
4432 						     sizeof(buffer)));
4433 	else if (res)
4434 		len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no,
4435 			       res->bus, res->target, res->lun);
4436 
4437 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4438 	return len;
4439 }
4440 
4441 static struct device_attribute ipr_resource_path_attr = {
4442 	.attr = {
4443 		.name = 	"resource_path",
4444 		.mode =		S_IRUGO,
4445 	},
4446 	.show = ipr_show_resource_path
4447 };
4448 
4449 /**
4450  * ipr_show_device_id - Show the device_id for this device.
4451  * @dev:	device struct
4452  * @attr:	device attribute structure
4453  * @buf:	buffer
4454  *
4455  * Return value:
4456  *	number of bytes printed to buffer
4457  **/
4458 static ssize_t ipr_show_device_id(struct device *dev, struct device_attribute *attr, char *buf)
4459 {
4460 	struct scsi_device *sdev = to_scsi_device(dev);
4461 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4462 	struct ipr_resource_entry *res;
4463 	unsigned long lock_flags = 0;
4464 	ssize_t len = -ENXIO;
4465 
4466 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4467 	res = (struct ipr_resource_entry *)sdev->hostdata;
4468 	if (res && ioa_cfg->sis64)
4469 		len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->dev_id);
4470 	else if (res)
4471 		len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->lun_wwn);
4472 
4473 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4474 	return len;
4475 }
4476 
4477 static struct device_attribute ipr_device_id_attr = {
4478 	.attr = {
4479 		.name =		"device_id",
4480 		.mode =		S_IRUGO,
4481 	},
4482 	.show = ipr_show_device_id
4483 };
4484 
4485 /**
4486  * ipr_show_resource_type - Show the resource type for this device.
4487  * @dev:	device struct
4488  * @attr:	device attribute structure
4489  * @buf:	buffer
4490  *
4491  * Return value:
4492  *	number of bytes printed to buffer
4493  **/
4494 static ssize_t ipr_show_resource_type(struct device *dev, struct device_attribute *attr, char *buf)
4495 {
4496 	struct scsi_device *sdev = to_scsi_device(dev);
4497 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4498 	struct ipr_resource_entry *res;
4499 	unsigned long lock_flags = 0;
4500 	ssize_t len = -ENXIO;
4501 
4502 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4503 	res = (struct ipr_resource_entry *)sdev->hostdata;
4504 
4505 	if (res)
4506 		len = snprintf(buf, PAGE_SIZE, "%x\n", res->type);
4507 
4508 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4509 	return len;
4510 }
4511 
4512 static struct device_attribute ipr_resource_type_attr = {
4513 	.attr = {
4514 		.name =		"resource_type",
4515 		.mode =		S_IRUGO,
4516 	},
4517 	.show = ipr_show_resource_type
4518 };
4519 
4520 static struct device_attribute *ipr_dev_attrs[] = {
4521 	&ipr_adapter_handle_attr,
4522 	&ipr_resource_path_attr,
4523 	&ipr_device_id_attr,
4524 	&ipr_resource_type_attr,
4525 	NULL,
4526 };
4527 
4528 /**
4529  * ipr_biosparam - Return the HSC mapping
4530  * @sdev:			scsi device struct
4531  * @block_device:	block device pointer
4532  * @capacity:		capacity of the device
4533  * @parm:			Array containing returned HSC values.
4534  *
4535  * This function generates the HSC parms that fdisk uses.
4536  * We want to make sure we return something that places partitions
4537  * on 4k boundaries for best performance with the IOA.
4538  *
4539  * Return value:
4540  * 	0 on success
4541  **/
4542 static int ipr_biosparam(struct scsi_device *sdev,
4543 			 struct block_device *block_device,
4544 			 sector_t capacity, int *parm)
4545 {
4546 	int heads, sectors;
4547 	sector_t cylinders;
4548 
4549 	heads = 128;
4550 	sectors = 32;
4551 
4552 	cylinders = capacity;
4553 	sector_div(cylinders, (128 * 32));
4554 
4555 	/* return result */
4556 	parm[0] = heads;
4557 	parm[1] = sectors;
4558 	parm[2] = cylinders;
4559 
4560 	return 0;
4561 }
4562 
4563 /**
4564  * ipr_find_starget - Find target based on bus/target.
4565  * @starget:	scsi target struct
4566  *
4567  * Return value:
4568  * 	resource entry pointer if found / NULL if not found
4569  **/
4570 static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
4571 {
4572 	struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4573 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4574 	struct ipr_resource_entry *res;
4575 
4576 	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4577 		if ((res->bus == starget->channel) &&
4578 		    (res->target == starget->id)) {
4579 			return res;
4580 		}
4581 	}
4582 
4583 	return NULL;
4584 }
4585 
4586 static struct ata_port_info sata_port_info;
4587 
4588 /**
4589  * ipr_target_alloc - Prepare for commands to a SCSI target
4590  * @starget:	scsi target struct
4591  *
4592  * If the device is a SATA device, this function allocates an
4593  * ATA port with libata, else it does nothing.
4594  *
4595  * Return value:
4596  * 	0 on success / non-0 on failure
4597  **/
4598 static int ipr_target_alloc(struct scsi_target *starget)
4599 {
4600 	struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4601 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4602 	struct ipr_sata_port *sata_port;
4603 	struct ata_port *ap;
4604 	struct ipr_resource_entry *res;
4605 	unsigned long lock_flags;
4606 
4607 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4608 	res = ipr_find_starget(starget);
4609 	starget->hostdata = NULL;
4610 
4611 	if (res && ipr_is_gata(res)) {
4612 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4613 		sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
4614 		if (!sata_port)
4615 			return -ENOMEM;
4616 
4617 		ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
4618 		if (ap) {
4619 			spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4620 			sata_port->ioa_cfg = ioa_cfg;
4621 			sata_port->ap = ap;
4622 			sata_port->res = res;
4623 
4624 			res->sata_port = sata_port;
4625 			ap->private_data = sata_port;
4626 			starget->hostdata = sata_port;
4627 		} else {
4628 			kfree(sata_port);
4629 			return -ENOMEM;
4630 		}
4631 	}
4632 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4633 
4634 	return 0;
4635 }
4636 
4637 /**
4638  * ipr_target_destroy - Destroy a SCSI target
4639  * @starget:	scsi target struct
4640  *
4641  * If the device was a SATA device, this function frees the libata
4642  * ATA port, else it does nothing.
4643  *
4644  **/
4645 static void ipr_target_destroy(struct scsi_target *starget)
4646 {
4647 	struct ipr_sata_port *sata_port = starget->hostdata;
4648 	struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4649 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4650 
4651 	if (ioa_cfg->sis64) {
4652 		if (!ipr_find_starget(starget)) {
4653 			if (starget->channel == IPR_ARRAY_VIRTUAL_BUS)
4654 				clear_bit(starget->id, ioa_cfg->array_ids);
4655 			else if (starget->channel == IPR_VSET_VIRTUAL_BUS)
4656 				clear_bit(starget->id, ioa_cfg->vset_ids);
4657 			else if (starget->channel == 0)
4658 				clear_bit(starget->id, ioa_cfg->target_ids);
4659 		}
4660 	}
4661 
4662 	if (sata_port) {
4663 		starget->hostdata = NULL;
4664 		ata_sas_port_destroy(sata_port->ap);
4665 		kfree(sata_port);
4666 	}
4667 }
4668 
4669 /**
4670  * ipr_find_sdev - Find device based on bus/target/lun.
4671  * @sdev:	scsi device struct
4672  *
4673  * Return value:
4674  * 	resource entry pointer if found / NULL if not found
4675  **/
4676 static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
4677 {
4678 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4679 	struct ipr_resource_entry *res;
4680 
4681 	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4682 		if ((res->bus == sdev->channel) &&
4683 		    (res->target == sdev->id) &&
4684 		    (res->lun == sdev->lun))
4685 			return res;
4686 	}
4687 
4688 	return NULL;
4689 }
4690 
4691 /**
4692  * ipr_slave_destroy - Unconfigure a SCSI device
4693  * @sdev:	scsi device struct
4694  *
4695  * Return value:
4696  * 	nothing
4697  **/
4698 static void ipr_slave_destroy(struct scsi_device *sdev)
4699 {
4700 	struct ipr_resource_entry *res;
4701 	struct ipr_ioa_cfg *ioa_cfg;
4702 	unsigned long lock_flags = 0;
4703 
4704 	ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4705 
4706 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4707 	res = (struct ipr_resource_entry *) sdev->hostdata;
4708 	if (res) {
4709 		if (res->sata_port)
4710 			res->sata_port->ap->link.device[0].class = ATA_DEV_NONE;
4711 		sdev->hostdata = NULL;
4712 		res->sdev = NULL;
4713 		res->sata_port = NULL;
4714 	}
4715 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4716 }
4717 
4718 /**
4719  * ipr_slave_configure - Configure a SCSI device
4720  * @sdev:	scsi device struct
4721  *
4722  * This function configures the specified scsi device.
4723  *
4724  * Return value:
4725  * 	0 on success
4726  **/
4727 static int ipr_slave_configure(struct scsi_device *sdev)
4728 {
4729 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4730 	struct ipr_resource_entry *res;
4731 	struct ata_port *ap = NULL;
4732 	unsigned long lock_flags = 0;
4733 	char buffer[IPR_MAX_RES_PATH_LENGTH];
4734 
4735 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4736 	res = sdev->hostdata;
4737 	if (res) {
4738 		if (ipr_is_af_dasd_device(res))
4739 			sdev->type = TYPE_RAID;
4740 		if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
4741 			sdev->scsi_level = 4;
4742 			sdev->no_uld_attach = 1;
4743 		}
4744 		if (ipr_is_vset_device(res)) {
4745 			blk_queue_rq_timeout(sdev->request_queue,
4746 					     IPR_VSET_RW_TIMEOUT);
4747 			blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
4748 		}
4749 		if (ipr_is_gata(res) && res->sata_port)
4750 			ap = res->sata_port->ap;
4751 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4752 
4753 		if (ap) {
4754 			scsi_adjust_queue_depth(sdev, 0, IPR_MAX_CMD_PER_ATA_LUN);
4755 			ata_sas_slave_configure(sdev, ap);
4756 		} else
4757 			scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
4758 		if (ioa_cfg->sis64)
4759 			sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
4760 				    ipr_format_res_path(ioa_cfg,
4761 				res->res_path, buffer, sizeof(buffer)));
4762 		return 0;
4763 	}
4764 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4765 	return 0;
4766 }
4767 
4768 /**
4769  * ipr_ata_slave_alloc - Prepare for commands to a SATA device
4770  * @sdev:	scsi device struct
4771  *
4772  * This function initializes an ATA port so that future commands
4773  * sent through queuecommand will work.
4774  *
4775  * Return value:
4776  * 	0 on success
4777  **/
4778 static int ipr_ata_slave_alloc(struct scsi_device *sdev)
4779 {
4780 	struct ipr_sata_port *sata_port = NULL;
4781 	int rc = -ENXIO;
4782 
4783 	ENTER;
4784 	if (sdev->sdev_target)
4785 		sata_port = sdev->sdev_target->hostdata;
4786 	if (sata_port) {
4787 		rc = ata_sas_port_init(sata_port->ap);
4788 		if (rc == 0)
4789 			rc = ata_sas_sync_probe(sata_port->ap);
4790 	}
4791 
4792 	if (rc)
4793 		ipr_slave_destroy(sdev);
4794 
4795 	LEAVE;
4796 	return rc;
4797 }
4798 
4799 /**
4800  * ipr_slave_alloc - Prepare for commands to a device.
4801  * @sdev:	scsi device struct
4802  *
4803  * This function saves a pointer to the resource entry
4804  * in the scsi device struct if the device exists. We
4805  * can then use this pointer in ipr_queuecommand when
4806  * handling new commands.
4807  *
4808  * Return value:
4809  * 	0 on success / -ENXIO if device does not exist
4810  **/
4811 static int ipr_slave_alloc(struct scsi_device *sdev)
4812 {
4813 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4814 	struct ipr_resource_entry *res;
4815 	unsigned long lock_flags;
4816 	int rc = -ENXIO;
4817 
4818 	sdev->hostdata = NULL;
4819 
4820 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4821 
4822 	res = ipr_find_sdev(sdev);
4823 	if (res) {
4824 		res->sdev = sdev;
4825 		res->add_to_ml = 0;
4826 		res->in_erp = 0;
4827 		sdev->hostdata = res;
4828 		if (!ipr_is_naca_model(res))
4829 			res->needs_sync_complete = 1;
4830 		rc = 0;
4831 		if (ipr_is_gata(res)) {
4832 			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4833 			return ipr_ata_slave_alloc(sdev);
4834 		}
4835 	}
4836 
4837 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4838 
4839 	return rc;
4840 }
4841 
4842 static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
4843 {
4844 	struct ipr_ioa_cfg *ioa_cfg;
4845 	unsigned long lock_flags = 0;
4846 	int rc = SUCCESS;
4847 
4848 	ENTER;
4849 	ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
4850 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4851 
4852 	if (!ioa_cfg->in_reset_reload && !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
4853 		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
4854 		dev_err(&ioa_cfg->pdev->dev,
4855 			"Adapter being reset as a result of error recovery.\n");
4856 
4857 		if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4858 			ioa_cfg->sdt_state = GET_DUMP;
4859 	}
4860 
4861 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4862 	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4863 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4864 
4865 	/* If we got hit with a host reset while we were already resetting
4866 	 the adapter for some reason, and the reset failed. */
4867 	if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
4868 		ipr_trace;
4869 		rc = FAILED;
4870 	}
4871 
4872 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4873 	LEAVE;
4874 	return rc;
4875 }
4876 
4877 /**
4878  * ipr_device_reset - Reset the device
4879  * @ioa_cfg:	ioa config struct
4880  * @res:		resource entry struct
4881  *
4882  * This function issues a device reset to the affected device.
4883  * If the device is a SCSI device, a LUN reset will be sent
4884  * to the device first. If that does not work, a target reset
4885  * will be sent. If the device is a SATA device, a PHY reset will
4886  * be sent.
4887  *
4888  * Return value:
4889  *	0 on success / non-zero on failure
4890  **/
4891 static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
4892 			    struct ipr_resource_entry *res)
4893 {
4894 	struct ipr_cmnd *ipr_cmd;
4895 	struct ipr_ioarcb *ioarcb;
4896 	struct ipr_cmd_pkt *cmd_pkt;
4897 	struct ipr_ioarcb_ata_regs *regs;
4898 	u32 ioasc;
4899 
4900 	ENTER;
4901 	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4902 	ioarcb = &ipr_cmd->ioarcb;
4903 	cmd_pkt = &ioarcb->cmd_pkt;
4904 
4905 	if (ipr_cmd->ioa_cfg->sis64) {
4906 		regs = &ipr_cmd->i.ata_ioadl.regs;
4907 		ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
4908 	} else
4909 		regs = &ioarcb->u.add_data.u.regs;
4910 
4911 	ioarcb->res_handle = res->res_handle;
4912 	cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4913 	cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
4914 	if (ipr_is_gata(res)) {
4915 		cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
4916 		ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags));
4917 		regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
4918 	}
4919 
4920 	ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
4921 	ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
4922 	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
4923 	if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) {
4924 		if (ipr_cmd->ioa_cfg->sis64)
4925 			memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
4926 			       sizeof(struct ipr_ioasa_gata));
4927 		else
4928 			memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
4929 			       sizeof(struct ipr_ioasa_gata));
4930 	}
4931 
4932 	LEAVE;
4933 	return IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0;
4934 }
4935 
4936 /**
4937  * ipr_sata_reset - Reset the SATA port
4938  * @link:	SATA link to reset
4939  * @classes:	class of the attached device
4940  *
4941  * This function issues a SATA phy reset to the affected ATA link.
4942  *
4943  * Return value:
4944  *	0 on success / non-zero on failure
4945  **/
4946 static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
4947 				unsigned long deadline)
4948 {
4949 	struct ipr_sata_port *sata_port = link->ap->private_data;
4950 	struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
4951 	struct ipr_resource_entry *res;
4952 	unsigned long lock_flags = 0;
4953 	int rc = -ENXIO;
4954 
4955 	ENTER;
4956 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4957 	while (ioa_cfg->in_reset_reload) {
4958 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4959 		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4960 		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4961 	}
4962 
4963 	res = sata_port->res;
4964 	if (res) {
4965 		rc = ipr_device_reset(ioa_cfg, res);
4966 		*classes = res->ata_class;
4967 	}
4968 
4969 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4970 	LEAVE;
4971 	return rc;
4972 }
4973 
4974 /**
4975  * ipr_eh_dev_reset - Reset the device
4976  * @scsi_cmd:	scsi command struct
4977  *
4978  * This function issues a device reset to the affected device.
4979  * A LUN reset will be sent to the device first. If that does
4980  * not work, a target reset will be sent.
4981  *
4982  * Return value:
4983  *	SUCCESS / FAILED
4984  **/
4985 static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
4986 {
4987 	struct ipr_cmnd *ipr_cmd;
4988 	struct ipr_ioa_cfg *ioa_cfg;
4989 	struct ipr_resource_entry *res;
4990 	struct ata_port *ap;
4991 	int rc = 0;
4992 	struct ipr_hrr_queue *hrrq;
4993 
4994 	ENTER;
4995 	ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
4996 	res = scsi_cmd->device->hostdata;
4997 
4998 	if (!res)
4999 		return FAILED;
5000 
5001 	/*
5002 	 * If we are currently going through reset/reload, return failed. This will force the
5003 	 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
5004 	 * reset to complete
5005 	 */
5006 	if (ioa_cfg->in_reset_reload)
5007 		return FAILED;
5008 	if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
5009 		return FAILED;
5010 
5011 	for_each_hrrq(hrrq, ioa_cfg) {
5012 		spin_lock(&hrrq->_lock);
5013 		list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
5014 			if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
5015 				if (ipr_cmd->scsi_cmd)
5016 					ipr_cmd->done = ipr_scsi_eh_done;
5017 				if (ipr_cmd->qc)
5018 					ipr_cmd->done = ipr_sata_eh_done;
5019 				if (ipr_cmd->qc &&
5020 				    !(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
5021 					ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
5022 					ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
5023 				}
5024 			}
5025 		}
5026 		spin_unlock(&hrrq->_lock);
5027 	}
5028 	res->resetting_device = 1;
5029 	scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
5030 
5031 	if (ipr_is_gata(res) && res->sata_port) {
5032 		ap = res->sata_port->ap;
5033 		spin_unlock_irq(scsi_cmd->device->host->host_lock);
5034 		ata_std_error_handler(ap);
5035 		spin_lock_irq(scsi_cmd->device->host->host_lock);
5036 
5037 		for_each_hrrq(hrrq, ioa_cfg) {
5038 			spin_lock(&hrrq->_lock);
5039 			list_for_each_entry(ipr_cmd,
5040 					    &hrrq->hrrq_pending_q, queue) {
5041 				if (ipr_cmd->ioarcb.res_handle ==
5042 				    res->res_handle) {
5043 					rc = -EIO;
5044 					break;
5045 				}
5046 			}
5047 			spin_unlock(&hrrq->_lock);
5048 		}
5049 	} else
5050 		rc = ipr_device_reset(ioa_cfg, res);
5051 	res->resetting_device = 0;
5052 	res->reset_occurred = 1;
5053 
5054 	LEAVE;
5055 	return rc ? FAILED : SUCCESS;
5056 }
5057 
5058 static int ipr_eh_dev_reset(struct scsi_cmnd *cmd)
5059 {
5060 	int rc;
5061 
5062 	spin_lock_irq(cmd->device->host->host_lock);
5063 	rc = __ipr_eh_dev_reset(cmd);
5064 	spin_unlock_irq(cmd->device->host->host_lock);
5065 
5066 	return rc;
5067 }
5068 
5069 /**
5070  * ipr_bus_reset_done - Op done function for bus reset.
5071  * @ipr_cmd:	ipr command struct
5072  *
5073  * This function is the op done function for a bus reset
5074  *
5075  * Return value:
5076  * 	none
5077  **/
5078 static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
5079 {
5080 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5081 	struct ipr_resource_entry *res;
5082 
5083 	ENTER;
5084 	if (!ioa_cfg->sis64)
5085 		list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
5086 			if (res->res_handle == ipr_cmd->ioarcb.res_handle) {
5087 				scsi_report_bus_reset(ioa_cfg->host, res->bus);
5088 				break;
5089 			}
5090 		}
5091 
5092 	/*
5093 	 * If abort has not completed, indicate the reset has, else call the
5094 	 * abort's done function to wake the sleeping eh thread
5095 	 */
5096 	if (ipr_cmd->sibling->sibling)
5097 		ipr_cmd->sibling->sibling = NULL;
5098 	else
5099 		ipr_cmd->sibling->done(ipr_cmd->sibling);
5100 
5101 	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5102 	LEAVE;
5103 }
5104 
5105 /**
5106  * ipr_abort_timeout - An abort task has timed out
5107  * @ipr_cmd:	ipr command struct
5108  *
5109  * This function handles when an abort task times out. If this
5110  * happens we issue a bus reset since we have resources tied
5111  * up that must be freed before returning to the midlayer.
5112  *
5113  * Return value:
5114  *	none
5115  **/
5116 static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
5117 {
5118 	struct ipr_cmnd *reset_cmd;
5119 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5120 	struct ipr_cmd_pkt *cmd_pkt;
5121 	unsigned long lock_flags = 0;
5122 
5123 	ENTER;
5124 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5125 	if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
5126 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5127 		return;
5128 	}
5129 
5130 	sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
5131 	reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5132 	ipr_cmd->sibling = reset_cmd;
5133 	reset_cmd->sibling = ipr_cmd;
5134 	reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
5135 	cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
5136 	cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5137 	cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
5138 	cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
5139 
5140 	ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
5141 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5142 	LEAVE;
5143 }
5144 
5145 /**
5146  * ipr_cancel_op - Cancel specified op
5147  * @scsi_cmd:	scsi command struct
5148  *
5149  * This function cancels specified op.
5150  *
5151  * Return value:
5152  *	SUCCESS / FAILED
5153  **/
5154 static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
5155 {
5156 	struct ipr_cmnd *ipr_cmd;
5157 	struct ipr_ioa_cfg *ioa_cfg;
5158 	struct ipr_resource_entry *res;
5159 	struct ipr_cmd_pkt *cmd_pkt;
5160 	u32 ioasc, int_reg;
5161 	int op_found = 0;
5162 	struct ipr_hrr_queue *hrrq;
5163 
5164 	ENTER;
5165 	ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
5166 	res = scsi_cmd->device->hostdata;
5167 
5168 	/* If we are currently going through reset/reload, return failed.
5169 	 * This will force the mid-layer to call ipr_eh_host_reset,
5170 	 * which will then go to sleep and wait for the reset to complete
5171 	 */
5172 	if (ioa_cfg->in_reset_reload ||
5173 	    ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
5174 		return FAILED;
5175 	if (!res)
5176 		return FAILED;
5177 
5178 	/*
5179 	 * If we are aborting a timed out op, chances are that the timeout was caused
5180 	 * by a still not detected EEH error. In such cases, reading a register will
5181 	 * trigger the EEH recovery infrastructure.
5182 	 */
5183 	int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5184 
5185 	if (!ipr_is_gscsi(res))
5186 		return FAILED;
5187 
5188 	for_each_hrrq(hrrq, ioa_cfg) {
5189 		spin_lock(&hrrq->_lock);
5190 		list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
5191 			if (ipr_cmd->scsi_cmd == scsi_cmd) {
5192 				ipr_cmd->done = ipr_scsi_eh_done;
5193 				op_found = 1;
5194 				break;
5195 			}
5196 		}
5197 		spin_unlock(&hrrq->_lock);
5198 	}
5199 
5200 	if (!op_found)
5201 		return SUCCESS;
5202 
5203 	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5204 	ipr_cmd->ioarcb.res_handle = res->res_handle;
5205 	cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5206 	cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5207 	cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5208 	ipr_cmd->u.sdev = scsi_cmd->device;
5209 
5210 	scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
5211 		    scsi_cmd->cmnd[0]);
5212 	ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
5213 	ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5214 
5215 	/*
5216 	 * If the abort task timed out and we sent a bus reset, we will get
5217 	 * one the following responses to the abort
5218 	 */
5219 	if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
5220 		ioasc = 0;
5221 		ipr_trace;
5222 	}
5223 
5224 	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5225 	if (!ipr_is_naca_model(res))
5226 		res->needs_sync_complete = 1;
5227 
5228 	LEAVE;
5229 	return IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS;
5230 }
5231 
5232 /**
5233  * ipr_eh_abort - Abort a single op
5234  * @scsi_cmd:	scsi command struct
5235  *
5236  * Return value:
5237  * 	SUCCESS / FAILED
5238  **/
5239 static int ipr_eh_abort(struct scsi_cmnd *scsi_cmd)
5240 {
5241 	unsigned long flags;
5242 	int rc;
5243 
5244 	ENTER;
5245 
5246 	spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
5247 	rc = ipr_cancel_op(scsi_cmd);
5248 	spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
5249 
5250 	LEAVE;
5251 	return rc;
5252 }
5253 
5254 /**
5255  * ipr_handle_other_interrupt - Handle "other" interrupts
5256  * @ioa_cfg:	ioa config struct
5257  * @int_reg:	interrupt register
5258  *
5259  * Return value:
5260  * 	IRQ_NONE / IRQ_HANDLED
5261  **/
5262 static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
5263 					      u32 int_reg)
5264 {
5265 	irqreturn_t rc = IRQ_HANDLED;
5266 	u32 int_mask_reg;
5267 
5268 	int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
5269 	int_reg &= ~int_mask_reg;
5270 
5271 	/* If an interrupt on the adapter did not occur, ignore it.
5272 	 * Or in the case of SIS 64, check for a stage change interrupt.
5273 	 */
5274 	if ((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0) {
5275 		if (ioa_cfg->sis64) {
5276 			int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5277 			int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5278 			if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
5279 
5280 				/* clear stage change */
5281 				writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
5282 				int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5283 				list_del(&ioa_cfg->reset_cmd->queue);
5284 				del_timer(&ioa_cfg->reset_cmd->timer);
5285 				ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5286 				return IRQ_HANDLED;
5287 			}
5288 		}
5289 
5290 		return IRQ_NONE;
5291 	}
5292 
5293 	if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
5294 		/* Mask the interrupt */
5295 		writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
5296 
5297 		/* Clear the interrupt */
5298 		writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.clr_interrupt_reg);
5299 		int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5300 
5301 		list_del(&ioa_cfg->reset_cmd->queue);
5302 		del_timer(&ioa_cfg->reset_cmd->timer);
5303 		ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5304 	} else if ((int_reg & IPR_PCII_HRRQ_UPDATED) == int_reg) {
5305 		if (ioa_cfg->clear_isr) {
5306 			if (ipr_debug && printk_ratelimit())
5307 				dev_err(&ioa_cfg->pdev->dev,
5308 					"Spurious interrupt detected. 0x%08X\n", int_reg);
5309 			writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
5310 			int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5311 			return IRQ_NONE;
5312 		}
5313 	} else {
5314 		if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
5315 			ioa_cfg->ioa_unit_checked = 1;
5316 		else if (int_reg & IPR_PCII_NO_HOST_RRQ)
5317 			dev_err(&ioa_cfg->pdev->dev,
5318 				"No Host RRQ. 0x%08X\n", int_reg);
5319 		else
5320 			dev_err(&ioa_cfg->pdev->dev,
5321 				"Permanent IOA failure. 0x%08X\n", int_reg);
5322 
5323 		if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5324 			ioa_cfg->sdt_state = GET_DUMP;
5325 
5326 		ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
5327 		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5328 	}
5329 
5330 	return rc;
5331 }
5332 
5333 /**
5334  * ipr_isr_eh - Interrupt service routine error handler
5335  * @ioa_cfg:	ioa config struct
5336  * @msg:	message to log
5337  *
5338  * Return value:
5339  * 	none
5340  **/
5341 static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg, u16 number)
5342 {
5343 	ioa_cfg->errors_logged++;
5344 	dev_err(&ioa_cfg->pdev->dev, "%s %d\n", msg, number);
5345 
5346 	if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5347 		ioa_cfg->sdt_state = GET_DUMP;
5348 
5349 	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5350 }
5351 
5352 static int ipr_process_hrrq(struct ipr_hrr_queue *hrr_queue, int budget,
5353 						struct list_head *doneq)
5354 {
5355 	u32 ioasc;
5356 	u16 cmd_index;
5357 	struct ipr_cmnd *ipr_cmd;
5358 	struct ipr_ioa_cfg *ioa_cfg = hrr_queue->ioa_cfg;
5359 	int num_hrrq = 0;
5360 
5361 	/* If interrupts are disabled, ignore the interrupt */
5362 	if (!hrr_queue->allow_interrupts)
5363 		return 0;
5364 
5365 	while ((be32_to_cpu(*hrr_queue->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5366 	       hrr_queue->toggle_bit) {
5367 
5368 		cmd_index = (be32_to_cpu(*hrr_queue->hrrq_curr) &
5369 			     IPR_HRRQ_REQ_RESP_HANDLE_MASK) >>
5370 			     IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
5371 
5372 		if (unlikely(cmd_index > hrr_queue->max_cmd_id ||
5373 			     cmd_index < hrr_queue->min_cmd_id)) {
5374 			ipr_isr_eh(ioa_cfg,
5375 				"Invalid response handle from IOA: ",
5376 				cmd_index);
5377 			break;
5378 		}
5379 
5380 		ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
5381 		ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5382 
5383 		ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
5384 
5385 		list_move_tail(&ipr_cmd->queue, doneq);
5386 
5387 		if (hrr_queue->hrrq_curr < hrr_queue->hrrq_end) {
5388 			hrr_queue->hrrq_curr++;
5389 		} else {
5390 			hrr_queue->hrrq_curr = hrr_queue->hrrq_start;
5391 			hrr_queue->toggle_bit ^= 1u;
5392 		}
5393 		num_hrrq++;
5394 		if (budget > 0 && num_hrrq >= budget)
5395 			break;
5396 	}
5397 
5398 	return num_hrrq;
5399 }
5400 
5401 static int ipr_iopoll(struct blk_iopoll *iop, int budget)
5402 {
5403 	struct ipr_ioa_cfg *ioa_cfg;
5404 	struct ipr_hrr_queue *hrrq;
5405 	struct ipr_cmnd *ipr_cmd, *temp;
5406 	unsigned long hrrq_flags;
5407 	int completed_ops;
5408 	LIST_HEAD(doneq);
5409 
5410 	hrrq = container_of(iop, struct ipr_hrr_queue, iopoll);
5411 	ioa_cfg = hrrq->ioa_cfg;
5412 
5413 	spin_lock_irqsave(hrrq->lock, hrrq_flags);
5414 	completed_ops = ipr_process_hrrq(hrrq, budget, &doneq);
5415 
5416 	if (completed_ops < budget)
5417 		blk_iopoll_complete(iop);
5418 	spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5419 
5420 	list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5421 		list_del(&ipr_cmd->queue);
5422 		del_timer(&ipr_cmd->timer);
5423 		ipr_cmd->fast_done(ipr_cmd);
5424 	}
5425 
5426 	return completed_ops;
5427 }
5428 
5429 /**
5430  * ipr_isr - Interrupt service routine
5431  * @irq:	irq number
5432  * @devp:	pointer to ioa config struct
5433  *
5434  * Return value:
5435  * 	IRQ_NONE / IRQ_HANDLED
5436  **/
5437 static irqreturn_t ipr_isr(int irq, void *devp)
5438 {
5439 	struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5440 	struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
5441 	unsigned long hrrq_flags = 0;
5442 	u32 int_reg = 0;
5443 	int num_hrrq = 0;
5444 	int irq_none = 0;
5445 	struct ipr_cmnd *ipr_cmd, *temp;
5446 	irqreturn_t rc = IRQ_NONE;
5447 	LIST_HEAD(doneq);
5448 
5449 	spin_lock_irqsave(hrrq->lock, hrrq_flags);
5450 	/* If interrupts are disabled, ignore the interrupt */
5451 	if (!hrrq->allow_interrupts) {
5452 		spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5453 		return IRQ_NONE;
5454 	}
5455 
5456 	while (1) {
5457 		if (ipr_process_hrrq(hrrq, -1, &doneq)) {
5458 			rc =  IRQ_HANDLED;
5459 
5460 			if (!ioa_cfg->clear_isr)
5461 				break;
5462 
5463 			/* Clear the PCI interrupt */
5464 			num_hrrq = 0;
5465 			do {
5466 				writel(IPR_PCII_HRRQ_UPDATED,
5467 				     ioa_cfg->regs.clr_interrupt_reg32);
5468 				int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5469 			} while (int_reg & IPR_PCII_HRRQ_UPDATED &&
5470 				num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
5471 
5472 		} else if (rc == IRQ_NONE && irq_none == 0) {
5473 			int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5474 			irq_none++;
5475 		} else if (num_hrrq == IPR_MAX_HRRQ_RETRIES &&
5476 			   int_reg & IPR_PCII_HRRQ_UPDATED) {
5477 			ipr_isr_eh(ioa_cfg,
5478 				"Error clearing HRRQ: ", num_hrrq);
5479 			rc = IRQ_HANDLED;
5480 			break;
5481 		} else
5482 			break;
5483 	}
5484 
5485 	if (unlikely(rc == IRQ_NONE))
5486 		rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
5487 
5488 	spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5489 	list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5490 		list_del(&ipr_cmd->queue);
5491 		del_timer(&ipr_cmd->timer);
5492 		ipr_cmd->fast_done(ipr_cmd);
5493 	}
5494 	return rc;
5495 }
5496 
5497 /**
5498  * ipr_isr_mhrrq - Interrupt service routine
5499  * @irq:	irq number
5500  * @devp:	pointer to ioa config struct
5501  *
5502  * Return value:
5503  *	IRQ_NONE / IRQ_HANDLED
5504  **/
5505 static irqreturn_t ipr_isr_mhrrq(int irq, void *devp)
5506 {
5507 	struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5508 	struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
5509 	unsigned long hrrq_flags = 0;
5510 	struct ipr_cmnd *ipr_cmd, *temp;
5511 	irqreturn_t rc = IRQ_NONE;
5512 	LIST_HEAD(doneq);
5513 
5514 	spin_lock_irqsave(hrrq->lock, hrrq_flags);
5515 
5516 	/* If interrupts are disabled, ignore the interrupt */
5517 	if (!hrrq->allow_interrupts) {
5518 		spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5519 		return IRQ_NONE;
5520 	}
5521 
5522 	if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
5523 		if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5524 		       hrrq->toggle_bit) {
5525 			if (!blk_iopoll_sched_prep(&hrrq->iopoll))
5526 				blk_iopoll_sched(&hrrq->iopoll);
5527 			spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5528 			return IRQ_HANDLED;
5529 		}
5530 	} else {
5531 		if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5532 			hrrq->toggle_bit)
5533 
5534 			if (ipr_process_hrrq(hrrq, -1, &doneq))
5535 				rc =  IRQ_HANDLED;
5536 	}
5537 
5538 	spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5539 
5540 	list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5541 		list_del(&ipr_cmd->queue);
5542 		del_timer(&ipr_cmd->timer);
5543 		ipr_cmd->fast_done(ipr_cmd);
5544 	}
5545 	return rc;
5546 }
5547 
5548 /**
5549  * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
5550  * @ioa_cfg:	ioa config struct
5551  * @ipr_cmd:	ipr command struct
5552  *
5553  * Return value:
5554  * 	0 on success / -1 on failure
5555  **/
5556 static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
5557 			     struct ipr_cmnd *ipr_cmd)
5558 {
5559 	int i, nseg;
5560 	struct scatterlist *sg;
5561 	u32 length;
5562 	u32 ioadl_flags = 0;
5563 	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5564 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5565 	struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
5566 
5567 	length = scsi_bufflen(scsi_cmd);
5568 	if (!length)
5569 		return 0;
5570 
5571 	nseg = scsi_dma_map(scsi_cmd);
5572 	if (nseg < 0) {
5573 		if (printk_ratelimit())
5574 			dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
5575 		return -1;
5576 	}
5577 
5578 	ipr_cmd->dma_use_sg = nseg;
5579 
5580 	ioarcb->data_transfer_length = cpu_to_be32(length);
5581 	ioarcb->ioadl_len =
5582 		cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
5583 
5584 	if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5585 		ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5586 		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5587 	} else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE)
5588 		ioadl_flags = IPR_IOADL_FLAGS_READ;
5589 
5590 	scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5591 		ioadl64[i].flags = cpu_to_be32(ioadl_flags);
5592 		ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
5593 		ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
5594 	}
5595 
5596 	ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5597 	return 0;
5598 }
5599 
5600 /**
5601  * ipr_build_ioadl - Build a scatter/gather list and map the buffer
5602  * @ioa_cfg:	ioa config struct
5603  * @ipr_cmd:	ipr command struct
5604  *
5605  * Return value:
5606  * 	0 on success / -1 on failure
5607  **/
5608 static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
5609 			   struct ipr_cmnd *ipr_cmd)
5610 {
5611 	int i, nseg;
5612 	struct scatterlist *sg;
5613 	u32 length;
5614 	u32 ioadl_flags = 0;
5615 	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5616 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5617 	struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
5618 
5619 	length = scsi_bufflen(scsi_cmd);
5620 	if (!length)
5621 		return 0;
5622 
5623 	nseg = scsi_dma_map(scsi_cmd);
5624 	if (nseg < 0) {
5625 		dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
5626 		return -1;
5627 	}
5628 
5629 	ipr_cmd->dma_use_sg = nseg;
5630 
5631 	if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5632 		ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5633 		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5634 		ioarcb->data_transfer_length = cpu_to_be32(length);
5635 		ioarcb->ioadl_len =
5636 			cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5637 	} else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
5638 		ioadl_flags = IPR_IOADL_FLAGS_READ;
5639 		ioarcb->read_data_transfer_length = cpu_to_be32(length);
5640 		ioarcb->read_ioadl_len =
5641 			cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5642 	}
5643 
5644 	if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) {
5645 		ioadl = ioarcb->u.add_data.u.ioadl;
5646 		ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) +
5647 				    offsetof(struct ipr_ioarcb, u.add_data));
5648 		ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5649 	}
5650 
5651 	scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5652 		ioadl[i].flags_and_data_len =
5653 			cpu_to_be32(ioadl_flags | sg_dma_len(sg));
5654 		ioadl[i].address = cpu_to_be32(sg_dma_address(sg));
5655 	}
5656 
5657 	ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5658 	return 0;
5659 }
5660 
5661 /**
5662  * ipr_erp_done - Process completion of ERP for a device
5663  * @ipr_cmd:		ipr command struct
5664  *
5665  * This function copies the sense buffer into the scsi_cmd
5666  * struct and pushes the scsi_done function.
5667  *
5668  * Return value:
5669  * 	nothing
5670  **/
5671 static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
5672 {
5673 	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5674 	struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5675 	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5676 
5677 	if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5678 		scsi_cmd->result |= (DID_ERROR << 16);
5679 		scmd_printk(KERN_ERR, scsi_cmd,
5680 			    "Request Sense failed with IOASC: 0x%08X\n", ioasc);
5681 	} else {
5682 		memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
5683 		       SCSI_SENSE_BUFFERSIZE);
5684 	}
5685 
5686 	if (res) {
5687 		if (!ipr_is_naca_model(res))
5688 			res->needs_sync_complete = 1;
5689 		res->in_erp = 0;
5690 	}
5691 	scsi_dma_unmap(ipr_cmd->scsi_cmd);
5692 	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5693 	scsi_cmd->scsi_done(scsi_cmd);
5694 }
5695 
5696 /**
5697  * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
5698  * @ipr_cmd:	ipr command struct
5699  *
5700  * Return value:
5701  * 	none
5702  **/
5703 static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
5704 {
5705 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5706 	struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5707 	dma_addr_t dma_addr = ipr_cmd->dma_addr;
5708 
5709 	memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
5710 	ioarcb->data_transfer_length = 0;
5711 	ioarcb->read_data_transfer_length = 0;
5712 	ioarcb->ioadl_len = 0;
5713 	ioarcb->read_ioadl_len = 0;
5714 	ioasa->hdr.ioasc = 0;
5715 	ioasa->hdr.residual_data_len = 0;
5716 
5717 	if (ipr_cmd->ioa_cfg->sis64)
5718 		ioarcb->u.sis64_addr_data.data_ioadl_addr =
5719 			cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
5720 	else {
5721 		ioarcb->write_ioadl_addr =
5722 			cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
5723 		ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5724 	}
5725 }
5726 
5727 /**
5728  * ipr_erp_request_sense - Send request sense to a device
5729  * @ipr_cmd:	ipr command struct
5730  *
5731  * This function sends a request sense to a device as a result
5732  * of a check condition.
5733  *
5734  * Return value:
5735  * 	nothing
5736  **/
5737 static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
5738 {
5739 	struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5740 	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5741 
5742 	if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5743 		ipr_erp_done(ipr_cmd);
5744 		return;
5745 	}
5746 
5747 	ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5748 
5749 	cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
5750 	cmd_pkt->cdb[0] = REQUEST_SENSE;
5751 	cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
5752 	cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
5753 	cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
5754 	cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
5755 
5756 	ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma,
5757 		       SCSI_SENSE_BUFFERSIZE, IPR_IOADL_FLAGS_READ_LAST);
5758 
5759 	ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
5760 		   IPR_REQUEST_SENSE_TIMEOUT * 2);
5761 }
5762 
5763 /**
5764  * ipr_erp_cancel_all - Send cancel all to a device
5765  * @ipr_cmd:	ipr command struct
5766  *
5767  * This function sends a cancel all to a device to clear the
5768  * queue. If we are running TCQ on the device, QERR is set to 1,
5769  * which means all outstanding ops have been dropped on the floor.
5770  * Cancel all will return them to us.
5771  *
5772  * Return value:
5773  * 	nothing
5774  **/
5775 static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
5776 {
5777 	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5778 	struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5779 	struct ipr_cmd_pkt *cmd_pkt;
5780 
5781 	res->in_erp = 1;
5782 
5783 	ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5784 
5785 	if (!scsi_get_tag_type(scsi_cmd->device)) {
5786 		ipr_erp_request_sense(ipr_cmd);
5787 		return;
5788 	}
5789 
5790 	cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5791 	cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5792 	cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5793 
5794 	ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
5795 		   IPR_CANCEL_ALL_TIMEOUT);
5796 }
5797 
5798 /**
5799  * ipr_dump_ioasa - Dump contents of IOASA
5800  * @ioa_cfg:	ioa config struct
5801  * @ipr_cmd:	ipr command struct
5802  * @res:		resource entry struct
5803  *
5804  * This function is invoked by the interrupt handler when ops
5805  * fail. It will log the IOASA if appropriate. Only called
5806  * for GPDD ops.
5807  *
5808  * Return value:
5809  * 	none
5810  **/
5811 static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
5812 			   struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
5813 {
5814 	int i;
5815 	u16 data_len;
5816 	u32 ioasc, fd_ioasc;
5817 	struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5818 	__be32 *ioasa_data = (__be32 *)ioasa;
5819 	int error_index;
5820 
5821 	ioasc = be32_to_cpu(ioasa->hdr.ioasc) & IPR_IOASC_IOASC_MASK;
5822 	fd_ioasc = be32_to_cpu(ioasa->hdr.fd_ioasc) & IPR_IOASC_IOASC_MASK;
5823 
5824 	if (0 == ioasc)
5825 		return;
5826 
5827 	if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
5828 		return;
5829 
5830 	if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc)
5831 		error_index = ipr_get_error(fd_ioasc);
5832 	else
5833 		error_index = ipr_get_error(ioasc);
5834 
5835 	if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
5836 		/* Don't log an error if the IOA already logged one */
5837 		if (ioasa->hdr.ilid != 0)
5838 			return;
5839 
5840 		if (!ipr_is_gscsi(res))
5841 			return;
5842 
5843 		if (ipr_error_table[error_index].log_ioasa == 0)
5844 			return;
5845 	}
5846 
5847 	ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
5848 
5849 	data_len = be16_to_cpu(ioasa->hdr.ret_stat_len);
5850 	if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len)
5851 		data_len = sizeof(struct ipr_ioasa64);
5852 	else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len)
5853 		data_len = sizeof(struct ipr_ioasa);
5854 
5855 	ipr_err("IOASA Dump:\n");
5856 
5857 	for (i = 0; i < data_len / 4; i += 4) {
5858 		ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
5859 			be32_to_cpu(ioasa_data[i]),
5860 			be32_to_cpu(ioasa_data[i+1]),
5861 			be32_to_cpu(ioasa_data[i+2]),
5862 			be32_to_cpu(ioasa_data[i+3]));
5863 	}
5864 }
5865 
5866 /**
5867  * ipr_gen_sense - Generate SCSI sense data from an IOASA
5868  * @ioasa:		IOASA
5869  * @sense_buf:	sense data buffer
5870  *
5871  * Return value:
5872  * 	none
5873  **/
5874 static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
5875 {
5876 	u32 failing_lba;
5877 	u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
5878 	struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
5879 	struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5880 	u32 ioasc = be32_to_cpu(ioasa->hdr.ioasc);
5881 
5882 	memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
5883 
5884 	if (ioasc >= IPR_FIRST_DRIVER_IOASC)
5885 		return;
5886 
5887 	ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
5888 
5889 	if (ipr_is_vset_device(res) &&
5890 	    ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
5891 	    ioasa->u.vset.failing_lba_hi != 0) {
5892 		sense_buf[0] = 0x72;
5893 		sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
5894 		sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
5895 		sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
5896 
5897 		sense_buf[7] = 12;
5898 		sense_buf[8] = 0;
5899 		sense_buf[9] = 0x0A;
5900 		sense_buf[10] = 0x80;
5901 
5902 		failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
5903 
5904 		sense_buf[12] = (failing_lba & 0xff000000) >> 24;
5905 		sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
5906 		sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
5907 		sense_buf[15] = failing_lba & 0x000000ff;
5908 
5909 		failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
5910 
5911 		sense_buf[16] = (failing_lba & 0xff000000) >> 24;
5912 		sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
5913 		sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
5914 		sense_buf[19] = failing_lba & 0x000000ff;
5915 	} else {
5916 		sense_buf[0] = 0x70;
5917 		sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
5918 		sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
5919 		sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
5920 
5921 		/* Illegal request */
5922 		if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
5923 		    (be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
5924 			sense_buf[7] = 10;	/* additional length */
5925 
5926 			/* IOARCB was in error */
5927 			if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
5928 				sense_buf[15] = 0xC0;
5929 			else	/* Parameter data was invalid */
5930 				sense_buf[15] = 0x80;
5931 
5932 			sense_buf[16] =
5933 			    ((IPR_FIELD_POINTER_MASK &
5934 			      be32_to_cpu(ioasa->hdr.ioasc_specific)) >> 8) & 0xff;
5935 			sense_buf[17] =
5936 			    (IPR_FIELD_POINTER_MASK &
5937 			     be32_to_cpu(ioasa->hdr.ioasc_specific)) & 0xff;
5938 		} else {
5939 			if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
5940 				if (ipr_is_vset_device(res))
5941 					failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
5942 				else
5943 					failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
5944 
5945 				sense_buf[0] |= 0x80;	/* Or in the Valid bit */
5946 				sense_buf[3] = (failing_lba & 0xff000000) >> 24;
5947 				sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
5948 				sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
5949 				sense_buf[6] = failing_lba & 0x000000ff;
5950 			}
5951 
5952 			sense_buf[7] = 6;	/* additional length */
5953 		}
5954 	}
5955 }
5956 
5957 /**
5958  * ipr_get_autosense - Copy autosense data to sense buffer
5959  * @ipr_cmd:	ipr command struct
5960  *
5961  * This function copies the autosense buffer to the buffer
5962  * in the scsi_cmd, if there is autosense available.
5963  *
5964  * Return value:
5965  *	1 if autosense was available / 0 if not
5966  **/
5967 static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
5968 {
5969 	struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5970 	struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
5971 
5972 	if ((be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
5973 		return 0;
5974 
5975 	if (ipr_cmd->ioa_cfg->sis64)
5976 		memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa64->auto_sense.data,
5977 		       min_t(u16, be16_to_cpu(ioasa64->auto_sense.auto_sense_len),
5978 			   SCSI_SENSE_BUFFERSIZE));
5979 	else
5980 		memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
5981 		       min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
5982 			   SCSI_SENSE_BUFFERSIZE));
5983 	return 1;
5984 }
5985 
5986 /**
5987  * ipr_erp_start - Process an error response for a SCSI op
5988  * @ioa_cfg:	ioa config struct
5989  * @ipr_cmd:	ipr command struct
5990  *
5991  * This function determines whether or not to initiate ERP
5992  * on the affected device.
5993  *
5994  * Return value:
5995  * 	nothing
5996  **/
5997 static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
5998 			      struct ipr_cmnd *ipr_cmd)
5999 {
6000 	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6001 	struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
6002 	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6003 	u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
6004 
6005 	if (!res) {
6006 		ipr_scsi_eh_done(ipr_cmd);
6007 		return;
6008 	}
6009 
6010 	if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS)
6011 		ipr_gen_sense(ipr_cmd);
6012 
6013 	ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6014 
6015 	switch (masked_ioasc) {
6016 	case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
6017 		if (ipr_is_naca_model(res))
6018 			scsi_cmd->result |= (DID_ABORT << 16);
6019 		else
6020 			scsi_cmd->result |= (DID_IMM_RETRY << 16);
6021 		break;
6022 	case IPR_IOASC_IR_RESOURCE_HANDLE:
6023 	case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
6024 		scsi_cmd->result |= (DID_NO_CONNECT << 16);
6025 		break;
6026 	case IPR_IOASC_HW_SEL_TIMEOUT:
6027 		scsi_cmd->result |= (DID_NO_CONNECT << 16);
6028 		if (!ipr_is_naca_model(res))
6029 			res->needs_sync_complete = 1;
6030 		break;
6031 	case IPR_IOASC_SYNC_REQUIRED:
6032 		if (!res->in_erp)
6033 			res->needs_sync_complete = 1;
6034 		scsi_cmd->result |= (DID_IMM_RETRY << 16);
6035 		break;
6036 	case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
6037 	case IPR_IOASA_IR_DUAL_IOA_DISABLED:
6038 		scsi_cmd->result |= (DID_PASSTHROUGH << 16);
6039 		break;
6040 	case IPR_IOASC_BUS_WAS_RESET:
6041 	case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
6042 		/*
6043 		 * Report the bus reset and ask for a retry. The device
6044 		 * will give CC/UA the next command.
6045 		 */
6046 		if (!res->resetting_device)
6047 			scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
6048 		scsi_cmd->result |= (DID_ERROR << 16);
6049 		if (!ipr_is_naca_model(res))
6050 			res->needs_sync_complete = 1;
6051 		break;
6052 	case IPR_IOASC_HW_DEV_BUS_STATUS:
6053 		scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
6054 		if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
6055 			if (!ipr_get_autosense(ipr_cmd)) {
6056 				if (!ipr_is_naca_model(res)) {
6057 					ipr_erp_cancel_all(ipr_cmd);
6058 					return;
6059 				}
6060 			}
6061 		}
6062 		if (!ipr_is_naca_model(res))
6063 			res->needs_sync_complete = 1;
6064 		break;
6065 	case IPR_IOASC_NR_INIT_CMD_REQUIRED:
6066 		break;
6067 	default:
6068 		if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6069 			scsi_cmd->result |= (DID_ERROR << 16);
6070 		if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
6071 			res->needs_sync_complete = 1;
6072 		break;
6073 	}
6074 
6075 	scsi_dma_unmap(ipr_cmd->scsi_cmd);
6076 	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6077 	scsi_cmd->scsi_done(scsi_cmd);
6078 }
6079 
6080 /**
6081  * ipr_scsi_done - mid-layer done function
6082  * @ipr_cmd:	ipr command struct
6083  *
6084  * This function is invoked by the interrupt handler for
6085  * ops generated by the SCSI mid-layer
6086  *
6087  * Return value:
6088  * 	none
6089  **/
6090 static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
6091 {
6092 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6093 	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6094 	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6095 	unsigned long hrrq_flags;
6096 
6097 	scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
6098 
6099 	if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
6100 		scsi_dma_unmap(scsi_cmd);
6101 
6102 		spin_lock_irqsave(ipr_cmd->hrrq->lock, hrrq_flags);
6103 		list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6104 		scsi_cmd->scsi_done(scsi_cmd);
6105 		spin_unlock_irqrestore(ipr_cmd->hrrq->lock, hrrq_flags);
6106 	} else {
6107 		spin_lock_irqsave(ipr_cmd->hrrq->lock, hrrq_flags);
6108 		ipr_erp_start(ioa_cfg, ipr_cmd);
6109 		spin_unlock_irqrestore(ipr_cmd->hrrq->lock, hrrq_flags);
6110 	}
6111 }
6112 
6113 /**
6114  * ipr_queuecommand - Queue a mid-layer request
6115  * @shost:		scsi host struct
6116  * @scsi_cmd:	scsi command struct
6117  *
6118  * This function queues a request generated by the mid-layer.
6119  *
6120  * Return value:
6121  *	0 on success
6122  *	SCSI_MLQUEUE_DEVICE_BUSY if device is busy
6123  *	SCSI_MLQUEUE_HOST_BUSY if host is busy
6124  **/
6125 static int ipr_queuecommand(struct Scsi_Host *shost,
6126 			    struct scsi_cmnd *scsi_cmd)
6127 {
6128 	struct ipr_ioa_cfg *ioa_cfg;
6129 	struct ipr_resource_entry *res;
6130 	struct ipr_ioarcb *ioarcb;
6131 	struct ipr_cmnd *ipr_cmd;
6132 	unsigned long hrrq_flags, lock_flags;
6133 	int rc;
6134 	struct ipr_hrr_queue *hrrq;
6135 	int hrrq_id;
6136 
6137 	ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
6138 
6139 	scsi_cmd->result = (DID_OK << 16);
6140 	res = scsi_cmd->device->hostdata;
6141 
6142 	if (ipr_is_gata(res) && res->sata_port) {
6143 		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6144 		rc = ata_sas_queuecmd(scsi_cmd, res->sata_port->ap);
6145 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6146 		return rc;
6147 	}
6148 
6149 	hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6150 	hrrq = &ioa_cfg->hrrq[hrrq_id];
6151 
6152 	spin_lock_irqsave(hrrq->lock, hrrq_flags);
6153 	/*
6154 	 * We are currently blocking all devices due to a host reset
6155 	 * We have told the host to stop giving us new requests, but
6156 	 * ERP ops don't count. FIXME
6157 	 */
6158 	if (unlikely(!hrrq->allow_cmds && !hrrq->ioa_is_dead && !hrrq->removing_ioa)) {
6159 		spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6160 		return SCSI_MLQUEUE_HOST_BUSY;
6161 	}
6162 
6163 	/*
6164 	 * FIXME - Create scsi_set_host_offline interface
6165 	 *  and the ioa_is_dead check can be removed
6166 	 */
6167 	if (unlikely(hrrq->ioa_is_dead || hrrq->removing_ioa || !res)) {
6168 		spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6169 		goto err_nodev;
6170 	}
6171 
6172 	ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6173 	if (ipr_cmd == NULL) {
6174 		spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6175 		return SCSI_MLQUEUE_HOST_BUSY;
6176 	}
6177 	spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6178 
6179 	ipr_init_ipr_cmnd(ipr_cmd, ipr_scsi_done);
6180 	ioarcb = &ipr_cmd->ioarcb;
6181 
6182 	memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
6183 	ipr_cmd->scsi_cmd = scsi_cmd;
6184 	ipr_cmd->done = ipr_scsi_eh_done;
6185 
6186 	if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
6187 		if (scsi_cmd->underflow == 0)
6188 			ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6189 
6190 		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
6191 		if (ipr_is_gscsi(res) && res->reset_occurred) {
6192 			res->reset_occurred = 0;
6193 			ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
6194 		}
6195 		ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
6196 		if (scsi_cmd->flags & SCMD_TAGGED)
6197 			ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_SIMPLE_TASK;
6198 		else
6199 			ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_UNTAGGED_TASK;
6200 	}
6201 
6202 	if (scsi_cmd->cmnd[0] >= 0xC0 &&
6203 	    (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) {
6204 		ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6205 	}
6206 
6207 	if (ioa_cfg->sis64)
6208 		rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
6209 	else
6210 		rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
6211 
6212 	spin_lock_irqsave(hrrq->lock, hrrq_flags);
6213 	if (unlikely(rc || (!hrrq->allow_cmds && !hrrq->ioa_is_dead))) {
6214 		list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
6215 		spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6216 		if (!rc)
6217 			scsi_dma_unmap(scsi_cmd);
6218 		return SCSI_MLQUEUE_HOST_BUSY;
6219 	}
6220 
6221 	if (unlikely(hrrq->ioa_is_dead)) {
6222 		list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
6223 		spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6224 		scsi_dma_unmap(scsi_cmd);
6225 		goto err_nodev;
6226 	}
6227 
6228 	ioarcb->res_handle = res->res_handle;
6229 	if (res->needs_sync_complete) {
6230 		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
6231 		res->needs_sync_complete = 0;
6232 	}
6233 	list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_pending_q);
6234 	ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
6235 	ipr_send_command(ipr_cmd);
6236 	spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6237 	return 0;
6238 
6239 err_nodev:
6240 	spin_lock_irqsave(hrrq->lock, hrrq_flags);
6241 	memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
6242 	scsi_cmd->result = (DID_NO_CONNECT << 16);
6243 	scsi_cmd->scsi_done(scsi_cmd);
6244 	spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6245 	return 0;
6246 }
6247 
6248 /**
6249  * ipr_ioctl - IOCTL handler
6250  * @sdev:	scsi device struct
6251  * @cmd:	IOCTL cmd
6252  * @arg:	IOCTL arg
6253  *
6254  * Return value:
6255  * 	0 on success / other on failure
6256  **/
6257 static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
6258 {
6259 	struct ipr_resource_entry *res;
6260 
6261 	res = (struct ipr_resource_entry *)sdev->hostdata;
6262 	if (res && ipr_is_gata(res)) {
6263 		if (cmd == HDIO_GET_IDENTITY)
6264 			return -ENOTTY;
6265 		return ata_sas_scsi_ioctl(res->sata_port->ap, sdev, cmd, arg);
6266 	}
6267 
6268 	return -EINVAL;
6269 }
6270 
6271 /**
6272  * ipr_info - Get information about the card/driver
6273  * @scsi_host:	scsi host struct
6274  *
6275  * Return value:
6276  * 	pointer to buffer with description string
6277  **/
6278 static const char *ipr_ioa_info(struct Scsi_Host *host)
6279 {
6280 	static char buffer[512];
6281 	struct ipr_ioa_cfg *ioa_cfg;
6282 	unsigned long lock_flags = 0;
6283 
6284 	ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
6285 
6286 	spin_lock_irqsave(host->host_lock, lock_flags);
6287 	sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
6288 	spin_unlock_irqrestore(host->host_lock, lock_flags);
6289 
6290 	return buffer;
6291 }
6292 
6293 static struct scsi_host_template driver_template = {
6294 	.module = THIS_MODULE,
6295 	.name = "IPR",
6296 	.info = ipr_ioa_info,
6297 	.ioctl = ipr_ioctl,
6298 	.queuecommand = ipr_queuecommand,
6299 	.eh_abort_handler = ipr_eh_abort,
6300 	.eh_device_reset_handler = ipr_eh_dev_reset,
6301 	.eh_host_reset_handler = ipr_eh_host_reset,
6302 	.slave_alloc = ipr_slave_alloc,
6303 	.slave_configure = ipr_slave_configure,
6304 	.slave_destroy = ipr_slave_destroy,
6305 	.target_alloc = ipr_target_alloc,
6306 	.target_destroy = ipr_target_destroy,
6307 	.change_queue_depth = ipr_change_queue_depth,
6308 	.change_queue_type = ipr_change_queue_type,
6309 	.bios_param = ipr_biosparam,
6310 	.can_queue = IPR_MAX_COMMANDS,
6311 	.this_id = -1,
6312 	.sg_tablesize = IPR_MAX_SGLIST,
6313 	.max_sectors = IPR_IOA_MAX_SECTORS,
6314 	.cmd_per_lun = IPR_MAX_CMD_PER_LUN,
6315 	.use_clustering = ENABLE_CLUSTERING,
6316 	.shost_attrs = ipr_ioa_attrs,
6317 	.sdev_attrs = ipr_dev_attrs,
6318 	.proc_name = IPR_NAME,
6319 	.no_write_same = 1,
6320 };
6321 
6322 /**
6323  * ipr_ata_phy_reset - libata phy_reset handler
6324  * @ap:		ata port to reset
6325  *
6326  **/
6327 static void ipr_ata_phy_reset(struct ata_port *ap)
6328 {
6329 	unsigned long flags;
6330 	struct ipr_sata_port *sata_port = ap->private_data;
6331 	struct ipr_resource_entry *res = sata_port->res;
6332 	struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6333 	int rc;
6334 
6335 	ENTER;
6336 	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6337 	while (ioa_cfg->in_reset_reload) {
6338 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6339 		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6340 		spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6341 	}
6342 
6343 	if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
6344 		goto out_unlock;
6345 
6346 	rc = ipr_device_reset(ioa_cfg, res);
6347 
6348 	if (rc) {
6349 		ap->link.device[0].class = ATA_DEV_NONE;
6350 		goto out_unlock;
6351 	}
6352 
6353 	ap->link.device[0].class = res->ata_class;
6354 	if (ap->link.device[0].class == ATA_DEV_UNKNOWN)
6355 		ap->link.device[0].class = ATA_DEV_NONE;
6356 
6357 out_unlock:
6358 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6359 	LEAVE;
6360 }
6361 
6362 /**
6363  * ipr_ata_post_internal - Cleanup after an internal command
6364  * @qc:	ATA queued command
6365  *
6366  * Return value:
6367  * 	none
6368  **/
6369 static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
6370 {
6371 	struct ipr_sata_port *sata_port = qc->ap->private_data;
6372 	struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6373 	struct ipr_cmnd *ipr_cmd;
6374 	struct ipr_hrr_queue *hrrq;
6375 	unsigned long flags;
6376 
6377 	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6378 	while (ioa_cfg->in_reset_reload) {
6379 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6380 		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6381 		spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6382 	}
6383 
6384 	for_each_hrrq(hrrq, ioa_cfg) {
6385 		spin_lock(&hrrq->_lock);
6386 		list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
6387 			if (ipr_cmd->qc == qc) {
6388 				ipr_device_reset(ioa_cfg, sata_port->res);
6389 				break;
6390 			}
6391 		}
6392 		spin_unlock(&hrrq->_lock);
6393 	}
6394 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6395 }
6396 
6397 /**
6398  * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
6399  * @regs:	destination
6400  * @tf:	source ATA taskfile
6401  *
6402  * Return value:
6403  * 	none
6404  **/
6405 static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
6406 			     struct ata_taskfile *tf)
6407 {
6408 	regs->feature = tf->feature;
6409 	regs->nsect = tf->nsect;
6410 	regs->lbal = tf->lbal;
6411 	regs->lbam = tf->lbam;
6412 	regs->lbah = tf->lbah;
6413 	regs->device = tf->device;
6414 	regs->command = tf->command;
6415 	regs->hob_feature = tf->hob_feature;
6416 	regs->hob_nsect = tf->hob_nsect;
6417 	regs->hob_lbal = tf->hob_lbal;
6418 	regs->hob_lbam = tf->hob_lbam;
6419 	regs->hob_lbah = tf->hob_lbah;
6420 	regs->ctl = tf->ctl;
6421 }
6422 
6423 /**
6424  * ipr_sata_done - done function for SATA commands
6425  * @ipr_cmd:	ipr command struct
6426  *
6427  * This function is invoked by the interrupt handler for
6428  * ops generated by the SCSI mid-layer to SATA devices
6429  *
6430  * Return value:
6431  * 	none
6432  **/
6433 static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
6434 {
6435 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6436 	struct ata_queued_cmd *qc = ipr_cmd->qc;
6437 	struct ipr_sata_port *sata_port = qc->ap->private_data;
6438 	struct ipr_resource_entry *res = sata_port->res;
6439 	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6440 
6441 	spin_lock(&ipr_cmd->hrrq->_lock);
6442 	if (ipr_cmd->ioa_cfg->sis64)
6443 		memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
6444 		       sizeof(struct ipr_ioasa_gata));
6445 	else
6446 		memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
6447 		       sizeof(struct ipr_ioasa_gata));
6448 	ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6449 
6450 	if (be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
6451 		scsi_report_device_reset(ioa_cfg->host, res->bus, res->target);
6452 
6453 	if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6454 		qc->err_mask |= __ac_err_mask(sata_port->ioasa.status);
6455 	else
6456 		qc->err_mask |= ac_err_mask(sata_port->ioasa.status);
6457 	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6458 	spin_unlock(&ipr_cmd->hrrq->_lock);
6459 	ata_qc_complete(qc);
6460 }
6461 
6462 /**
6463  * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
6464  * @ipr_cmd:	ipr command struct
6465  * @qc:		ATA queued command
6466  *
6467  **/
6468 static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
6469 				  struct ata_queued_cmd *qc)
6470 {
6471 	u32 ioadl_flags = 0;
6472 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6473 	struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ata_ioadl.ioadl64;
6474 	struct ipr_ioadl64_desc *last_ioadl64 = NULL;
6475 	int len = qc->nbytes;
6476 	struct scatterlist *sg;
6477 	unsigned int si;
6478 	dma_addr_t dma_addr = ipr_cmd->dma_addr;
6479 
6480 	if (len == 0)
6481 		return;
6482 
6483 	if (qc->dma_dir == DMA_TO_DEVICE) {
6484 		ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6485 		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6486 	} else if (qc->dma_dir == DMA_FROM_DEVICE)
6487 		ioadl_flags = IPR_IOADL_FLAGS_READ;
6488 
6489 	ioarcb->data_transfer_length = cpu_to_be32(len);
6490 	ioarcb->ioadl_len =
6491 		cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
6492 	ioarcb->u.sis64_addr_data.data_ioadl_addr =
6493 		cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl.ioadl64));
6494 
6495 	for_each_sg(qc->sg, sg, qc->n_elem, si) {
6496 		ioadl64->flags = cpu_to_be32(ioadl_flags);
6497 		ioadl64->data_len = cpu_to_be32(sg_dma_len(sg));
6498 		ioadl64->address = cpu_to_be64(sg_dma_address(sg));
6499 
6500 		last_ioadl64 = ioadl64;
6501 		ioadl64++;
6502 	}
6503 
6504 	if (likely(last_ioadl64))
6505 		last_ioadl64->flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6506 }
6507 
6508 /**
6509  * ipr_build_ata_ioadl - Build an ATA scatter/gather list
6510  * @ipr_cmd:	ipr command struct
6511  * @qc:		ATA queued command
6512  *
6513  **/
6514 static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
6515 				struct ata_queued_cmd *qc)
6516 {
6517 	u32 ioadl_flags = 0;
6518 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6519 	struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
6520 	struct ipr_ioadl_desc *last_ioadl = NULL;
6521 	int len = qc->nbytes;
6522 	struct scatterlist *sg;
6523 	unsigned int si;
6524 
6525 	if (len == 0)
6526 		return;
6527 
6528 	if (qc->dma_dir == DMA_TO_DEVICE) {
6529 		ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6530 		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6531 		ioarcb->data_transfer_length = cpu_to_be32(len);
6532 		ioarcb->ioadl_len =
6533 			cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6534 	} else if (qc->dma_dir == DMA_FROM_DEVICE) {
6535 		ioadl_flags = IPR_IOADL_FLAGS_READ;
6536 		ioarcb->read_data_transfer_length = cpu_to_be32(len);
6537 		ioarcb->read_ioadl_len =
6538 			cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6539 	}
6540 
6541 	for_each_sg(qc->sg, sg, qc->n_elem, si) {
6542 		ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
6543 		ioadl->address = cpu_to_be32(sg_dma_address(sg));
6544 
6545 		last_ioadl = ioadl;
6546 		ioadl++;
6547 	}
6548 
6549 	if (likely(last_ioadl))
6550 		last_ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6551 }
6552 
6553 /**
6554  * ipr_qc_defer - Get a free ipr_cmd
6555  * @qc:	queued command
6556  *
6557  * Return value:
6558  *	0 if success
6559  **/
6560 static int ipr_qc_defer(struct ata_queued_cmd *qc)
6561 {
6562 	struct ata_port *ap = qc->ap;
6563 	struct ipr_sata_port *sata_port = ap->private_data;
6564 	struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6565 	struct ipr_cmnd *ipr_cmd;
6566 	struct ipr_hrr_queue *hrrq;
6567 	int hrrq_id;
6568 
6569 	hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6570 	hrrq = &ioa_cfg->hrrq[hrrq_id];
6571 
6572 	qc->lldd_task = NULL;
6573 	spin_lock(&hrrq->_lock);
6574 	if (unlikely(hrrq->ioa_is_dead)) {
6575 		spin_unlock(&hrrq->_lock);
6576 		return 0;
6577 	}
6578 
6579 	if (unlikely(!hrrq->allow_cmds)) {
6580 		spin_unlock(&hrrq->_lock);
6581 		return ATA_DEFER_LINK;
6582 	}
6583 
6584 	ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6585 	if (ipr_cmd == NULL) {
6586 		spin_unlock(&hrrq->_lock);
6587 		return ATA_DEFER_LINK;
6588 	}
6589 
6590 	qc->lldd_task = ipr_cmd;
6591 	spin_unlock(&hrrq->_lock);
6592 	return 0;
6593 }
6594 
6595 /**
6596  * ipr_qc_issue - Issue a SATA qc to a device
6597  * @qc:	queued command
6598  *
6599  * Return value:
6600  * 	0 if success
6601  **/
6602 static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
6603 {
6604 	struct ata_port *ap = qc->ap;
6605 	struct ipr_sata_port *sata_port = ap->private_data;
6606 	struct ipr_resource_entry *res = sata_port->res;
6607 	struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6608 	struct ipr_cmnd *ipr_cmd;
6609 	struct ipr_ioarcb *ioarcb;
6610 	struct ipr_ioarcb_ata_regs *regs;
6611 
6612 	if (qc->lldd_task == NULL)
6613 		ipr_qc_defer(qc);
6614 
6615 	ipr_cmd = qc->lldd_task;
6616 	if (ipr_cmd == NULL)
6617 		return AC_ERR_SYSTEM;
6618 
6619 	qc->lldd_task = NULL;
6620 	spin_lock(&ipr_cmd->hrrq->_lock);
6621 	if (unlikely(!ipr_cmd->hrrq->allow_cmds ||
6622 			ipr_cmd->hrrq->ioa_is_dead)) {
6623 		list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6624 		spin_unlock(&ipr_cmd->hrrq->_lock);
6625 		return AC_ERR_SYSTEM;
6626 	}
6627 
6628 	ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
6629 	ioarcb = &ipr_cmd->ioarcb;
6630 
6631 	if (ioa_cfg->sis64) {
6632 		regs = &ipr_cmd->i.ata_ioadl.regs;
6633 		ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
6634 	} else
6635 		regs = &ioarcb->u.add_data.u.regs;
6636 
6637 	memset(regs, 0, sizeof(*regs));
6638 	ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs));
6639 
6640 	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
6641 	ipr_cmd->qc = qc;
6642 	ipr_cmd->done = ipr_sata_done;
6643 	ipr_cmd->ioarcb.res_handle = res->res_handle;
6644 	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
6645 	ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
6646 	ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6647 	ipr_cmd->dma_use_sg = qc->n_elem;
6648 
6649 	if (ioa_cfg->sis64)
6650 		ipr_build_ata_ioadl64(ipr_cmd, qc);
6651 	else
6652 		ipr_build_ata_ioadl(ipr_cmd, qc);
6653 
6654 	regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
6655 	ipr_copy_sata_tf(regs, &qc->tf);
6656 	memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
6657 	ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
6658 
6659 	switch (qc->tf.protocol) {
6660 	case ATA_PROT_NODATA:
6661 	case ATA_PROT_PIO:
6662 		break;
6663 
6664 	case ATA_PROT_DMA:
6665 		regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6666 		break;
6667 
6668 	case ATAPI_PROT_PIO:
6669 	case ATAPI_PROT_NODATA:
6670 		regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6671 		break;
6672 
6673 	case ATAPI_PROT_DMA:
6674 		regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6675 		regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6676 		break;
6677 
6678 	default:
6679 		WARN_ON(1);
6680 		spin_unlock(&ipr_cmd->hrrq->_lock);
6681 		return AC_ERR_INVALID;
6682 	}
6683 
6684 	ipr_send_command(ipr_cmd);
6685 	spin_unlock(&ipr_cmd->hrrq->_lock);
6686 
6687 	return 0;
6688 }
6689 
6690 /**
6691  * ipr_qc_fill_rtf - Read result TF
6692  * @qc: ATA queued command
6693  *
6694  * Return value:
6695  * 	true
6696  **/
6697 static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
6698 {
6699 	struct ipr_sata_port *sata_port = qc->ap->private_data;
6700 	struct ipr_ioasa_gata *g = &sata_port->ioasa;
6701 	struct ata_taskfile *tf = &qc->result_tf;
6702 
6703 	tf->feature = g->error;
6704 	tf->nsect = g->nsect;
6705 	tf->lbal = g->lbal;
6706 	tf->lbam = g->lbam;
6707 	tf->lbah = g->lbah;
6708 	tf->device = g->device;
6709 	tf->command = g->status;
6710 	tf->hob_nsect = g->hob_nsect;
6711 	tf->hob_lbal = g->hob_lbal;
6712 	tf->hob_lbam = g->hob_lbam;
6713 	tf->hob_lbah = g->hob_lbah;
6714 
6715 	return true;
6716 }
6717 
6718 static struct ata_port_operations ipr_sata_ops = {
6719 	.phy_reset = ipr_ata_phy_reset,
6720 	.hardreset = ipr_sata_reset,
6721 	.post_internal_cmd = ipr_ata_post_internal,
6722 	.qc_prep = ata_noop_qc_prep,
6723 	.qc_defer = ipr_qc_defer,
6724 	.qc_issue = ipr_qc_issue,
6725 	.qc_fill_rtf = ipr_qc_fill_rtf,
6726 	.port_start = ata_sas_port_start,
6727 	.port_stop = ata_sas_port_stop
6728 };
6729 
6730 static struct ata_port_info sata_port_info = {
6731 	.flags		= ATA_FLAG_SATA | ATA_FLAG_PIO_DMA,
6732 	.pio_mask	= ATA_PIO4_ONLY,
6733 	.mwdma_mask	= ATA_MWDMA2,
6734 	.udma_mask	= ATA_UDMA6,
6735 	.port_ops	= &ipr_sata_ops
6736 };
6737 
6738 #ifdef CONFIG_PPC_PSERIES
6739 static const u16 ipr_blocked_processors[] = {
6740 	PVR_NORTHSTAR,
6741 	PVR_PULSAR,
6742 	PVR_POWER4,
6743 	PVR_ICESTAR,
6744 	PVR_SSTAR,
6745 	PVR_POWER4p,
6746 	PVR_630,
6747 	PVR_630p
6748 };
6749 
6750 /**
6751  * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
6752  * @ioa_cfg:	ioa cfg struct
6753  *
6754  * Adapters that use Gemstone revision < 3.1 do not work reliably on
6755  * certain pSeries hardware. This function determines if the given
6756  * adapter is in one of these confgurations or not.
6757  *
6758  * Return value:
6759  * 	1 if adapter is not supported / 0 if adapter is supported
6760  **/
6761 static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
6762 {
6763 	int i;
6764 
6765 	if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) {
6766 		for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++) {
6767 			if (pvr_version_is(ipr_blocked_processors[i]))
6768 				return 1;
6769 		}
6770 	}
6771 	return 0;
6772 }
6773 #else
6774 #define ipr_invalid_adapter(ioa_cfg) 0
6775 #endif
6776 
6777 /**
6778  * ipr_ioa_bringdown_done - IOA bring down completion.
6779  * @ipr_cmd:	ipr command struct
6780  *
6781  * This function processes the completion of an adapter bring down.
6782  * It wakes any reset sleepers.
6783  *
6784  * Return value:
6785  * 	IPR_RC_JOB_RETURN
6786  **/
6787 static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
6788 {
6789 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6790 	int i;
6791 
6792 	ENTER;
6793 	if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
6794 		ipr_trace;
6795 		spin_unlock_irq(ioa_cfg->host->host_lock);
6796 		scsi_unblock_requests(ioa_cfg->host);
6797 		spin_lock_irq(ioa_cfg->host->host_lock);
6798 	}
6799 
6800 	ioa_cfg->in_reset_reload = 0;
6801 	ioa_cfg->reset_retries = 0;
6802 	for (i = 0; i < ioa_cfg->hrrq_num; i++) {
6803 		spin_lock(&ioa_cfg->hrrq[i]._lock);
6804 		ioa_cfg->hrrq[i].ioa_is_dead = 1;
6805 		spin_unlock(&ioa_cfg->hrrq[i]._lock);
6806 	}
6807 	wmb();
6808 
6809 	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6810 	wake_up_all(&ioa_cfg->reset_wait_q);
6811 	LEAVE;
6812 
6813 	return IPR_RC_JOB_RETURN;
6814 }
6815 
6816 /**
6817  * ipr_ioa_reset_done - IOA reset completion.
6818  * @ipr_cmd:	ipr command struct
6819  *
6820  * This function processes the completion of an adapter reset.
6821  * It schedules any necessary mid-layer add/removes and
6822  * wakes any reset sleepers.
6823  *
6824  * Return value:
6825  * 	IPR_RC_JOB_RETURN
6826  **/
6827 static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
6828 {
6829 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6830 	struct ipr_resource_entry *res;
6831 	struct ipr_hostrcb *hostrcb, *temp;
6832 	int i = 0, j;
6833 
6834 	ENTER;
6835 	ioa_cfg->in_reset_reload = 0;
6836 	for (j = 0; j < ioa_cfg->hrrq_num; j++) {
6837 		spin_lock(&ioa_cfg->hrrq[j]._lock);
6838 		ioa_cfg->hrrq[j].allow_cmds = 1;
6839 		spin_unlock(&ioa_cfg->hrrq[j]._lock);
6840 	}
6841 	wmb();
6842 	ioa_cfg->reset_cmd = NULL;
6843 	ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
6844 
6845 	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
6846 		if (ioa_cfg->allow_ml_add_del && (res->add_to_ml || res->del_from_ml)) {
6847 			ipr_trace;
6848 			break;
6849 		}
6850 	}
6851 	schedule_work(&ioa_cfg->work_q);
6852 
6853 	list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) {
6854 		list_del(&hostrcb->queue);
6855 		if (i++ < IPR_NUM_LOG_HCAMS)
6856 			ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
6857 		else
6858 			ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
6859 	}
6860 
6861 	scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
6862 	dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
6863 
6864 	ioa_cfg->reset_retries = 0;
6865 	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6866 	wake_up_all(&ioa_cfg->reset_wait_q);
6867 
6868 	spin_unlock(ioa_cfg->host->host_lock);
6869 	scsi_unblock_requests(ioa_cfg->host);
6870 	spin_lock(ioa_cfg->host->host_lock);
6871 
6872 	if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
6873 		scsi_block_requests(ioa_cfg->host);
6874 
6875 	LEAVE;
6876 	return IPR_RC_JOB_RETURN;
6877 }
6878 
6879 /**
6880  * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
6881  * @supported_dev:	supported device struct
6882  * @vpids:			vendor product id struct
6883  *
6884  * Return value:
6885  * 	none
6886  **/
6887 static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
6888 				 struct ipr_std_inq_vpids *vpids)
6889 {
6890 	memset(supported_dev, 0, sizeof(struct ipr_supported_device));
6891 	memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
6892 	supported_dev->num_records = 1;
6893 	supported_dev->data_length =
6894 		cpu_to_be16(sizeof(struct ipr_supported_device));
6895 	supported_dev->reserved = 0;
6896 }
6897 
6898 /**
6899  * ipr_set_supported_devs - Send Set Supported Devices for a device
6900  * @ipr_cmd:	ipr command struct
6901  *
6902  * This function sends a Set Supported Devices to the adapter
6903  *
6904  * Return value:
6905  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6906  **/
6907 static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
6908 {
6909 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6910 	struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
6911 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6912 	struct ipr_resource_entry *res = ipr_cmd->u.res;
6913 
6914 	ipr_cmd->job_step = ipr_ioa_reset_done;
6915 
6916 	list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
6917 		if (!ipr_is_scsi_disk(res))
6918 			continue;
6919 
6920 		ipr_cmd->u.res = res;
6921 		ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids);
6922 
6923 		ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6924 		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6925 		ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6926 
6927 		ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
6928 		ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES;
6929 		ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
6930 		ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
6931 
6932 		ipr_init_ioadl(ipr_cmd,
6933 			       ioa_cfg->vpd_cbs_dma +
6934 				 offsetof(struct ipr_misc_cbs, supp_dev),
6935 			       sizeof(struct ipr_supported_device),
6936 			       IPR_IOADL_FLAGS_WRITE_LAST);
6937 
6938 		ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
6939 			   IPR_SET_SUP_DEVICE_TIMEOUT);
6940 
6941 		if (!ioa_cfg->sis64)
6942 			ipr_cmd->job_step = ipr_set_supported_devs;
6943 		LEAVE;
6944 		return IPR_RC_JOB_RETURN;
6945 	}
6946 
6947 	LEAVE;
6948 	return IPR_RC_JOB_CONTINUE;
6949 }
6950 
6951 /**
6952  * ipr_get_mode_page - Locate specified mode page
6953  * @mode_pages:	mode page buffer
6954  * @page_code:	page code to find
6955  * @len:		minimum required length for mode page
6956  *
6957  * Return value:
6958  * 	pointer to mode page / NULL on failure
6959  **/
6960 static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
6961 			       u32 page_code, u32 len)
6962 {
6963 	struct ipr_mode_page_hdr *mode_hdr;
6964 	u32 page_length;
6965 	u32 length;
6966 
6967 	if (!mode_pages || (mode_pages->hdr.length == 0))
6968 		return NULL;
6969 
6970 	length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
6971 	mode_hdr = (struct ipr_mode_page_hdr *)
6972 		(mode_pages->data + mode_pages->hdr.block_desc_len);
6973 
6974 	while (length) {
6975 		if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
6976 			if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
6977 				return mode_hdr;
6978 			break;
6979 		} else {
6980 			page_length = (sizeof(struct ipr_mode_page_hdr) +
6981 				       mode_hdr->page_length);
6982 			length -= page_length;
6983 			mode_hdr = (struct ipr_mode_page_hdr *)
6984 				((unsigned long)mode_hdr + page_length);
6985 		}
6986 	}
6987 	return NULL;
6988 }
6989 
6990 /**
6991  * ipr_check_term_power - Check for term power errors
6992  * @ioa_cfg:	ioa config struct
6993  * @mode_pages:	IOAFP mode pages buffer
6994  *
6995  * Check the IOAFP's mode page 28 for term power errors
6996  *
6997  * Return value:
6998  * 	nothing
6999  **/
7000 static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
7001 				 struct ipr_mode_pages *mode_pages)
7002 {
7003 	int i;
7004 	int entry_length;
7005 	struct ipr_dev_bus_entry *bus;
7006 	struct ipr_mode_page28 *mode_page;
7007 
7008 	mode_page = ipr_get_mode_page(mode_pages, 0x28,
7009 				      sizeof(struct ipr_mode_page28));
7010 
7011 	entry_length = mode_page->entry_length;
7012 
7013 	bus = mode_page->bus;
7014 
7015 	for (i = 0; i < mode_page->num_entries; i++) {
7016 		if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
7017 			dev_err(&ioa_cfg->pdev->dev,
7018 				"Term power is absent on scsi bus %d\n",
7019 				bus->res_addr.bus);
7020 		}
7021 
7022 		bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
7023 	}
7024 }
7025 
7026 /**
7027  * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
7028  * @ioa_cfg:	ioa config struct
7029  *
7030  * Looks through the config table checking for SES devices. If
7031  * the SES device is in the SES table indicating a maximum SCSI
7032  * bus speed, the speed is limited for the bus.
7033  *
7034  * Return value:
7035  * 	none
7036  **/
7037 static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
7038 {
7039 	u32 max_xfer_rate;
7040 	int i;
7041 
7042 	for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
7043 		max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
7044 						       ioa_cfg->bus_attr[i].bus_width);
7045 
7046 		if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
7047 			ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
7048 	}
7049 }
7050 
7051 /**
7052  * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
7053  * @ioa_cfg:	ioa config struct
7054  * @mode_pages:	mode page 28 buffer
7055  *
7056  * Updates mode page 28 based on driver configuration
7057  *
7058  * Return value:
7059  * 	none
7060  **/
7061 static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
7062 					  struct ipr_mode_pages *mode_pages)
7063 {
7064 	int i, entry_length;
7065 	struct ipr_dev_bus_entry *bus;
7066 	struct ipr_bus_attributes *bus_attr;
7067 	struct ipr_mode_page28 *mode_page;
7068 
7069 	mode_page = ipr_get_mode_page(mode_pages, 0x28,
7070 				      sizeof(struct ipr_mode_page28));
7071 
7072 	entry_length = mode_page->entry_length;
7073 
7074 	/* Loop for each device bus entry */
7075 	for (i = 0, bus = mode_page->bus;
7076 	     i < mode_page->num_entries;
7077 	     i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
7078 		if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
7079 			dev_err(&ioa_cfg->pdev->dev,
7080 				"Invalid resource address reported: 0x%08X\n",
7081 				IPR_GET_PHYS_LOC(bus->res_addr));
7082 			continue;
7083 		}
7084 
7085 		bus_attr = &ioa_cfg->bus_attr[i];
7086 		bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
7087 		bus->bus_width = bus_attr->bus_width;
7088 		bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
7089 		bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
7090 		if (bus_attr->qas_enabled)
7091 			bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
7092 		else
7093 			bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
7094 	}
7095 }
7096 
7097 /**
7098  * ipr_build_mode_select - Build a mode select command
7099  * @ipr_cmd:	ipr command struct
7100  * @res_handle:	resource handle to send command to
7101  * @parm:		Byte 2 of Mode Sense command
7102  * @dma_addr:	DMA buffer address
7103  * @xfer_len:	data transfer length
7104  *
7105  * Return value:
7106  * 	none
7107  **/
7108 static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
7109 				  __be32 res_handle, u8 parm,
7110 				  dma_addr_t dma_addr, u8 xfer_len)
7111 {
7112 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7113 
7114 	ioarcb->res_handle = res_handle;
7115 	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7116 	ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
7117 	ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
7118 	ioarcb->cmd_pkt.cdb[1] = parm;
7119 	ioarcb->cmd_pkt.cdb[4] = xfer_len;
7120 
7121 	ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_WRITE_LAST);
7122 }
7123 
7124 /**
7125  * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
7126  * @ipr_cmd:	ipr command struct
7127  *
7128  * This function sets up the SCSI bus attributes and sends
7129  * a Mode Select for Page 28 to activate them.
7130  *
7131  * Return value:
7132  * 	IPR_RC_JOB_RETURN
7133  **/
7134 static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
7135 {
7136 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7137 	struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7138 	int length;
7139 
7140 	ENTER;
7141 	ipr_scsi_bus_speed_limit(ioa_cfg);
7142 	ipr_check_term_power(ioa_cfg, mode_pages);
7143 	ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
7144 	length = mode_pages->hdr.length + 1;
7145 	mode_pages->hdr.length = 0;
7146 
7147 	ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7148 			      ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7149 			      length);
7150 
7151 	ipr_cmd->job_step = ipr_set_supported_devs;
7152 	ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7153 				    struct ipr_resource_entry, queue);
7154 	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7155 
7156 	LEAVE;
7157 	return IPR_RC_JOB_RETURN;
7158 }
7159 
7160 /**
7161  * ipr_build_mode_sense - Builds a mode sense command
7162  * @ipr_cmd:	ipr command struct
7163  * @res:		resource entry struct
7164  * @parm:		Byte 2 of mode sense command
7165  * @dma_addr:	DMA address of mode sense buffer
7166  * @xfer_len:	Size of DMA buffer
7167  *
7168  * Return value:
7169  * 	none
7170  **/
7171 static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
7172 				 __be32 res_handle,
7173 				 u8 parm, dma_addr_t dma_addr, u8 xfer_len)
7174 {
7175 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7176 
7177 	ioarcb->res_handle = res_handle;
7178 	ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
7179 	ioarcb->cmd_pkt.cdb[2] = parm;
7180 	ioarcb->cmd_pkt.cdb[4] = xfer_len;
7181 	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7182 
7183 	ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
7184 }
7185 
7186 /**
7187  * ipr_reset_cmd_failed - Handle failure of IOA reset command
7188  * @ipr_cmd:	ipr command struct
7189  *
7190  * This function handles the failure of an IOA bringup command.
7191  *
7192  * Return value:
7193  * 	IPR_RC_JOB_RETURN
7194  **/
7195 static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
7196 {
7197 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7198 	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7199 
7200 	dev_err(&ioa_cfg->pdev->dev,
7201 		"0x%02X failed with IOASC: 0x%08X\n",
7202 		ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
7203 
7204 	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7205 	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7206 	return IPR_RC_JOB_RETURN;
7207 }
7208 
7209 /**
7210  * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
7211  * @ipr_cmd:	ipr command struct
7212  *
7213  * This function handles the failure of a Mode Sense to the IOAFP.
7214  * Some adapters do not handle all mode pages.
7215  *
7216  * Return value:
7217  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7218  **/
7219 static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
7220 {
7221 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7222 	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7223 
7224 	if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
7225 		ipr_cmd->job_step = ipr_set_supported_devs;
7226 		ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7227 					    struct ipr_resource_entry, queue);
7228 		return IPR_RC_JOB_CONTINUE;
7229 	}
7230 
7231 	return ipr_reset_cmd_failed(ipr_cmd);
7232 }
7233 
7234 /**
7235  * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
7236  * @ipr_cmd:	ipr command struct
7237  *
7238  * This function send a Page 28 mode sense to the IOA to
7239  * retrieve SCSI bus attributes.
7240  *
7241  * Return value:
7242  * 	IPR_RC_JOB_RETURN
7243  **/
7244 static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
7245 {
7246 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7247 
7248 	ENTER;
7249 	ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7250 			     0x28, ioa_cfg->vpd_cbs_dma +
7251 			     offsetof(struct ipr_misc_cbs, mode_pages),
7252 			     sizeof(struct ipr_mode_pages));
7253 
7254 	ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
7255 	ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
7256 
7257 	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7258 
7259 	LEAVE;
7260 	return IPR_RC_JOB_RETURN;
7261 }
7262 
7263 /**
7264  * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
7265  * @ipr_cmd:	ipr command struct
7266  *
7267  * This function enables dual IOA RAID support if possible.
7268  *
7269  * Return value:
7270  * 	IPR_RC_JOB_RETURN
7271  **/
7272 static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd)
7273 {
7274 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7275 	struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7276 	struct ipr_mode_page24 *mode_page;
7277 	int length;
7278 
7279 	ENTER;
7280 	mode_page = ipr_get_mode_page(mode_pages, 0x24,
7281 				      sizeof(struct ipr_mode_page24));
7282 
7283 	if (mode_page)
7284 		mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF;
7285 
7286 	length = mode_pages->hdr.length + 1;
7287 	mode_pages->hdr.length = 0;
7288 
7289 	ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7290 			      ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7291 			      length);
7292 
7293 	ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7294 	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7295 
7296 	LEAVE;
7297 	return IPR_RC_JOB_RETURN;
7298 }
7299 
7300 /**
7301  * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
7302  * @ipr_cmd:	ipr command struct
7303  *
7304  * This function handles the failure of a Mode Sense to the IOAFP.
7305  * Some adapters do not handle all mode pages.
7306  *
7307  * Return value:
7308  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7309  **/
7310 static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd)
7311 {
7312 	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7313 
7314 	if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
7315 		ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7316 		return IPR_RC_JOB_CONTINUE;
7317 	}
7318 
7319 	return ipr_reset_cmd_failed(ipr_cmd);
7320 }
7321 
7322 /**
7323  * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
7324  * @ipr_cmd:	ipr command struct
7325  *
7326  * This function send a mode sense to the IOA to retrieve
7327  * the IOA Advanced Function Control mode page.
7328  *
7329  * Return value:
7330  * 	IPR_RC_JOB_RETURN
7331  **/
7332 static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd *ipr_cmd)
7333 {
7334 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7335 
7336 	ENTER;
7337 	ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7338 			     0x24, ioa_cfg->vpd_cbs_dma +
7339 			     offsetof(struct ipr_misc_cbs, mode_pages),
7340 			     sizeof(struct ipr_mode_pages));
7341 
7342 	ipr_cmd->job_step = ipr_ioafp_mode_select_page24;
7343 	ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed;
7344 
7345 	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7346 
7347 	LEAVE;
7348 	return IPR_RC_JOB_RETURN;
7349 }
7350 
7351 /**
7352  * ipr_init_res_table - Initialize the resource table
7353  * @ipr_cmd:	ipr command struct
7354  *
7355  * This function looks through the existing resource table, comparing
7356  * it with the config table. This function will take care of old/new
7357  * devices and schedule adding/removing them from the mid-layer
7358  * as appropriate.
7359  *
7360  * Return value:
7361  * 	IPR_RC_JOB_CONTINUE
7362  **/
7363 static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
7364 {
7365 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7366 	struct ipr_resource_entry *res, *temp;
7367 	struct ipr_config_table_entry_wrapper cfgtew;
7368 	int entries, found, flag, i;
7369 	LIST_HEAD(old_res);
7370 
7371 	ENTER;
7372 	if (ioa_cfg->sis64)
7373 		flag = ioa_cfg->u.cfg_table64->hdr64.flags;
7374 	else
7375 		flag = ioa_cfg->u.cfg_table->hdr.flags;
7376 
7377 	if (flag & IPR_UCODE_DOWNLOAD_REQ)
7378 		dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
7379 
7380 	list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
7381 		list_move_tail(&res->queue, &old_res);
7382 
7383 	if (ioa_cfg->sis64)
7384 		entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries);
7385 	else
7386 		entries = ioa_cfg->u.cfg_table->hdr.num_entries;
7387 
7388 	for (i = 0; i < entries; i++) {
7389 		if (ioa_cfg->sis64)
7390 			cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i];
7391 		else
7392 			cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i];
7393 		found = 0;
7394 
7395 		list_for_each_entry_safe(res, temp, &old_res, queue) {
7396 			if (ipr_is_same_device(res, &cfgtew)) {
7397 				list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7398 				found = 1;
7399 				break;
7400 			}
7401 		}
7402 
7403 		if (!found) {
7404 			if (list_empty(&ioa_cfg->free_res_q)) {
7405 				dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
7406 				break;
7407 			}
7408 
7409 			found = 1;
7410 			res = list_entry(ioa_cfg->free_res_q.next,
7411 					 struct ipr_resource_entry, queue);
7412 			list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7413 			ipr_init_res_entry(res, &cfgtew);
7414 			res->add_to_ml = 1;
7415 		} else if (res->sdev && (ipr_is_vset_device(res) || ipr_is_scsi_disk(res)))
7416 			res->sdev->allow_restart = 1;
7417 
7418 		if (found)
7419 			ipr_update_res_entry(res, &cfgtew);
7420 	}
7421 
7422 	list_for_each_entry_safe(res, temp, &old_res, queue) {
7423 		if (res->sdev) {
7424 			res->del_from_ml = 1;
7425 			res->res_handle = IPR_INVALID_RES_HANDLE;
7426 			list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7427 		}
7428 	}
7429 
7430 	list_for_each_entry_safe(res, temp, &old_res, queue) {
7431 		ipr_clear_res_target(res);
7432 		list_move_tail(&res->queue, &ioa_cfg->free_res_q);
7433 	}
7434 
7435 	if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
7436 		ipr_cmd->job_step = ipr_ioafp_mode_sense_page24;
7437 	else
7438 		ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7439 
7440 	LEAVE;
7441 	return IPR_RC_JOB_CONTINUE;
7442 }
7443 
7444 /**
7445  * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
7446  * @ipr_cmd:	ipr command struct
7447  *
7448  * This function sends a Query IOA Configuration command
7449  * to the adapter to retrieve the IOA configuration table.
7450  *
7451  * Return value:
7452  * 	IPR_RC_JOB_RETURN
7453  **/
7454 static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
7455 {
7456 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7457 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7458 	struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
7459 	struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
7460 
7461 	ENTER;
7462 	if (cap->cap & IPR_CAP_DUAL_IOA_RAID)
7463 		ioa_cfg->dual_raid = 1;
7464 	dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
7465 		 ucode_vpd->major_release, ucode_vpd->card_type,
7466 		 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
7467 	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7468 	ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7469 
7470 	ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
7471 	ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff;
7472 	ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff;
7473 	ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff;
7474 
7475 	ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size,
7476 		       IPR_IOADL_FLAGS_READ_LAST);
7477 
7478 	ipr_cmd->job_step = ipr_init_res_table;
7479 
7480 	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7481 
7482 	LEAVE;
7483 	return IPR_RC_JOB_RETURN;
7484 }
7485 
7486 /**
7487  * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
7488  * @ipr_cmd:	ipr command struct
7489  *
7490  * This utility function sends an inquiry to the adapter.
7491  *
7492  * Return value:
7493  * 	none
7494  **/
7495 static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
7496 			      dma_addr_t dma_addr, u8 xfer_len)
7497 {
7498 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7499 
7500 	ENTER;
7501 	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7502 	ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7503 
7504 	ioarcb->cmd_pkt.cdb[0] = INQUIRY;
7505 	ioarcb->cmd_pkt.cdb[1] = flags;
7506 	ioarcb->cmd_pkt.cdb[2] = page;
7507 	ioarcb->cmd_pkt.cdb[4] = xfer_len;
7508 
7509 	ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
7510 
7511 	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7512 	LEAVE;
7513 }
7514 
7515 /**
7516  * ipr_inquiry_page_supported - Is the given inquiry page supported
7517  * @page0:		inquiry page 0 buffer
7518  * @page:		page code.
7519  *
7520  * This function determines if the specified inquiry page is supported.
7521  *
7522  * Return value:
7523  *	1 if page is supported / 0 if not
7524  **/
7525 static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
7526 {
7527 	int i;
7528 
7529 	for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
7530 		if (page0->page[i] == page)
7531 			return 1;
7532 
7533 	return 0;
7534 }
7535 
7536 /**
7537  * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
7538  * @ipr_cmd:	ipr command struct
7539  *
7540  * This function sends a Page 0xD0 inquiry to the adapter
7541  * to retrieve adapter capabilities.
7542  *
7543  * Return value:
7544  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7545  **/
7546 static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd)
7547 {
7548 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7549 	struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
7550 	struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
7551 
7552 	ENTER;
7553 	ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
7554 	memset(cap, 0, sizeof(*cap));
7555 
7556 	if (ipr_inquiry_page_supported(page0, 0xD0)) {
7557 		ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0,
7558 				  ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap),
7559 				  sizeof(struct ipr_inquiry_cap));
7560 		return IPR_RC_JOB_RETURN;
7561 	}
7562 
7563 	LEAVE;
7564 	return IPR_RC_JOB_CONTINUE;
7565 }
7566 
7567 /**
7568  * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
7569  * @ipr_cmd:	ipr command struct
7570  *
7571  * This function sends a Page 3 inquiry to the adapter
7572  * to retrieve software VPD information.
7573  *
7574  * Return value:
7575  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7576  **/
7577 static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
7578 {
7579 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7580 
7581 	ENTER;
7582 
7583 	ipr_cmd->job_step = ipr_ioafp_cap_inquiry;
7584 
7585 	ipr_ioafp_inquiry(ipr_cmd, 1, 3,
7586 			  ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
7587 			  sizeof(struct ipr_inquiry_page3));
7588 
7589 	LEAVE;
7590 	return IPR_RC_JOB_RETURN;
7591 }
7592 
7593 /**
7594  * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
7595  * @ipr_cmd:	ipr command struct
7596  *
7597  * This function sends a Page 0 inquiry to the adapter
7598  * to retrieve supported inquiry pages.
7599  *
7600  * Return value:
7601  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7602  **/
7603 static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
7604 {
7605 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7606 	char type[5];
7607 
7608 	ENTER;
7609 
7610 	/* Grab the type out of the VPD and store it away */
7611 	memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
7612 	type[4] = '\0';
7613 	ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
7614 
7615 	ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
7616 
7617 	ipr_ioafp_inquiry(ipr_cmd, 1, 0,
7618 			  ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
7619 			  sizeof(struct ipr_inquiry_page0));
7620 
7621 	LEAVE;
7622 	return IPR_RC_JOB_RETURN;
7623 }
7624 
7625 /**
7626  * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
7627  * @ipr_cmd:	ipr command struct
7628  *
7629  * This function sends a standard inquiry to the adapter.
7630  *
7631  * Return value:
7632  * 	IPR_RC_JOB_RETURN
7633  **/
7634 static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
7635 {
7636 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7637 
7638 	ENTER;
7639 	ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
7640 
7641 	ipr_ioafp_inquiry(ipr_cmd, 0, 0,
7642 			  ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
7643 			  sizeof(struct ipr_ioa_vpd));
7644 
7645 	LEAVE;
7646 	return IPR_RC_JOB_RETURN;
7647 }
7648 
7649 /**
7650  * ipr_ioafp_identify_hrrq - Send Identify Host RRQ.
7651  * @ipr_cmd:	ipr command struct
7652  *
7653  * This function send an Identify Host Request Response Queue
7654  * command to establish the HRRQ with the adapter.
7655  *
7656  * Return value:
7657  * 	IPR_RC_JOB_RETURN
7658  **/
7659 static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd)
7660 {
7661 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7662 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7663 	struct ipr_hrr_queue *hrrq;
7664 
7665 	ENTER;
7666 	ipr_cmd->job_step = ipr_ioafp_std_inquiry;
7667 	dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
7668 
7669 	if (ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num) {
7670 		hrrq = &ioa_cfg->hrrq[ioa_cfg->identify_hrrq_index];
7671 
7672 		ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
7673 		ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7674 
7675 		ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7676 		if (ioa_cfg->sis64)
7677 			ioarcb->cmd_pkt.cdb[1] = 0x1;
7678 
7679 		if (ioa_cfg->nvectors == 1)
7680 			ioarcb->cmd_pkt.cdb[1] &= ~IPR_ID_HRRQ_SELE_ENABLE;
7681 		else
7682 			ioarcb->cmd_pkt.cdb[1] |= IPR_ID_HRRQ_SELE_ENABLE;
7683 
7684 		ioarcb->cmd_pkt.cdb[2] =
7685 			((u64) hrrq->host_rrq_dma >> 24) & 0xff;
7686 		ioarcb->cmd_pkt.cdb[3] =
7687 			((u64) hrrq->host_rrq_dma >> 16) & 0xff;
7688 		ioarcb->cmd_pkt.cdb[4] =
7689 			((u64) hrrq->host_rrq_dma >> 8) & 0xff;
7690 		ioarcb->cmd_pkt.cdb[5] =
7691 			((u64) hrrq->host_rrq_dma) & 0xff;
7692 		ioarcb->cmd_pkt.cdb[7] =
7693 			((sizeof(u32) * hrrq->size) >> 8) & 0xff;
7694 		ioarcb->cmd_pkt.cdb[8] =
7695 			(sizeof(u32) * hrrq->size) & 0xff;
7696 
7697 		if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
7698 			ioarcb->cmd_pkt.cdb[9] =
7699 					ioa_cfg->identify_hrrq_index;
7700 
7701 		if (ioa_cfg->sis64) {
7702 			ioarcb->cmd_pkt.cdb[10] =
7703 				((u64) hrrq->host_rrq_dma >> 56) & 0xff;
7704 			ioarcb->cmd_pkt.cdb[11] =
7705 				((u64) hrrq->host_rrq_dma >> 48) & 0xff;
7706 			ioarcb->cmd_pkt.cdb[12] =
7707 				((u64) hrrq->host_rrq_dma >> 40) & 0xff;
7708 			ioarcb->cmd_pkt.cdb[13] =
7709 				((u64) hrrq->host_rrq_dma >> 32) & 0xff;
7710 		}
7711 
7712 		if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
7713 			ioarcb->cmd_pkt.cdb[14] =
7714 					ioa_cfg->identify_hrrq_index;
7715 
7716 		ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7717 			   IPR_INTERNAL_TIMEOUT);
7718 
7719 		if (++ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num)
7720 			ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7721 
7722 		LEAVE;
7723 		return IPR_RC_JOB_RETURN;
7724 	}
7725 
7726 	LEAVE;
7727 	return IPR_RC_JOB_CONTINUE;
7728 }
7729 
7730 /**
7731  * ipr_reset_timer_done - Adapter reset timer function
7732  * @ipr_cmd:	ipr command struct
7733  *
7734  * Description: This function is used in adapter reset processing
7735  * for timing events. If the reset_cmd pointer in the IOA
7736  * config struct is not this adapter's we are doing nested
7737  * resets and fail_all_ops will take care of freeing the
7738  * command block.
7739  *
7740  * Return value:
7741  * 	none
7742  **/
7743 static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
7744 {
7745 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7746 	unsigned long lock_flags = 0;
7747 
7748 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
7749 
7750 	if (ioa_cfg->reset_cmd == ipr_cmd) {
7751 		list_del(&ipr_cmd->queue);
7752 		ipr_cmd->done(ipr_cmd);
7753 	}
7754 
7755 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
7756 }
7757 
7758 /**
7759  * ipr_reset_start_timer - Start a timer for adapter reset job
7760  * @ipr_cmd:	ipr command struct
7761  * @timeout:	timeout value
7762  *
7763  * Description: This function is used in adapter reset processing
7764  * for timing events. If the reset_cmd pointer in the IOA
7765  * config struct is not this adapter's we are doing nested
7766  * resets and fail_all_ops will take care of freeing the
7767  * command block.
7768  *
7769  * Return value:
7770  * 	none
7771  **/
7772 static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
7773 				  unsigned long timeout)
7774 {
7775 
7776 	ENTER;
7777 	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
7778 	ipr_cmd->done = ipr_reset_ioa_job;
7779 
7780 	ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7781 	ipr_cmd->timer.expires = jiffies + timeout;
7782 	ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
7783 	add_timer(&ipr_cmd->timer);
7784 }
7785 
7786 /**
7787  * ipr_init_ioa_mem - Initialize ioa_cfg control block
7788  * @ioa_cfg:	ioa cfg struct
7789  *
7790  * Return value:
7791  * 	nothing
7792  **/
7793 static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
7794 {
7795 	struct ipr_hrr_queue *hrrq;
7796 
7797 	for_each_hrrq(hrrq, ioa_cfg) {
7798 		spin_lock(&hrrq->_lock);
7799 		memset(hrrq->host_rrq, 0, sizeof(u32) * hrrq->size);
7800 
7801 		/* Initialize Host RRQ pointers */
7802 		hrrq->hrrq_start = hrrq->host_rrq;
7803 		hrrq->hrrq_end = &hrrq->host_rrq[hrrq->size - 1];
7804 		hrrq->hrrq_curr = hrrq->hrrq_start;
7805 		hrrq->toggle_bit = 1;
7806 		spin_unlock(&hrrq->_lock);
7807 	}
7808 	wmb();
7809 
7810 	ioa_cfg->identify_hrrq_index = 0;
7811 	if (ioa_cfg->hrrq_num == 1)
7812 		atomic_set(&ioa_cfg->hrrq_index, 0);
7813 	else
7814 		atomic_set(&ioa_cfg->hrrq_index, 1);
7815 
7816 	/* Zero out config table */
7817 	memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
7818 }
7819 
7820 /**
7821  * ipr_reset_next_stage - Process IPL stage change based on feedback register.
7822  * @ipr_cmd:	ipr command struct
7823  *
7824  * Return value:
7825  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7826  **/
7827 static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd)
7828 {
7829 	unsigned long stage, stage_time;
7830 	u32 feedback;
7831 	volatile u32 int_reg;
7832 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7833 	u64 maskval = 0;
7834 
7835 	feedback = readl(ioa_cfg->regs.init_feedback_reg);
7836 	stage = feedback & IPR_IPL_INIT_STAGE_MASK;
7837 	stage_time = feedback & IPR_IPL_INIT_STAGE_TIME_MASK;
7838 
7839 	ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time);
7840 
7841 	/* sanity check the stage_time value */
7842 	if (stage_time == 0)
7843 		stage_time = IPR_IPL_INIT_DEFAULT_STAGE_TIME;
7844 	else if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME)
7845 		stage_time = IPR_IPL_INIT_MIN_STAGE_TIME;
7846 	else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT)
7847 		stage_time = IPR_LONG_OPERATIONAL_TIMEOUT;
7848 
7849 	if (stage == IPR_IPL_INIT_STAGE_UNKNOWN) {
7850 		writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg);
7851 		int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7852 		stage_time = ioa_cfg->transop_timeout;
7853 		ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7854 	} else if (stage == IPR_IPL_INIT_STAGE_TRANSOP) {
7855 		int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
7856 		if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
7857 			ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7858 			maskval = IPR_PCII_IPL_STAGE_CHANGE;
7859 			maskval = (maskval << 32) | IPR_PCII_IOA_TRANS_TO_OPER;
7860 			writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg);
7861 			int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7862 			return IPR_RC_JOB_CONTINUE;
7863 		}
7864 	}
7865 
7866 	ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7867 	ipr_cmd->timer.expires = jiffies + stage_time * HZ;
7868 	ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
7869 	ipr_cmd->done = ipr_reset_ioa_job;
7870 	add_timer(&ipr_cmd->timer);
7871 
7872 	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
7873 
7874 	return IPR_RC_JOB_RETURN;
7875 }
7876 
7877 /**
7878  * ipr_reset_enable_ioa - Enable the IOA following a reset.
7879  * @ipr_cmd:	ipr command struct
7880  *
7881  * This function reinitializes some control blocks and
7882  * enables destructive diagnostics on the adapter.
7883  *
7884  * Return value:
7885  * 	IPR_RC_JOB_RETURN
7886  **/
7887 static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
7888 {
7889 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7890 	volatile u32 int_reg;
7891 	volatile u64 maskval;
7892 	int i;
7893 
7894 	ENTER;
7895 	ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7896 	ipr_init_ioa_mem(ioa_cfg);
7897 
7898 	for (i = 0; i < ioa_cfg->hrrq_num; i++) {
7899 		spin_lock(&ioa_cfg->hrrq[i]._lock);
7900 		ioa_cfg->hrrq[i].allow_interrupts = 1;
7901 		spin_unlock(&ioa_cfg->hrrq[i]._lock);
7902 	}
7903 	wmb();
7904 	if (ioa_cfg->sis64) {
7905 		/* Set the adapter to the correct endian mode. */
7906 		writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
7907 		int_reg = readl(ioa_cfg->regs.endian_swap_reg);
7908 	}
7909 
7910 	int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
7911 
7912 	if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
7913 		writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
7914 		       ioa_cfg->regs.clr_interrupt_mask_reg32);
7915 		int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7916 		return IPR_RC_JOB_CONTINUE;
7917 	}
7918 
7919 	/* Enable destructive diagnostics on IOA */
7920 	writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32);
7921 
7922 	if (ioa_cfg->sis64) {
7923 		maskval = IPR_PCII_IPL_STAGE_CHANGE;
7924 		maskval = (maskval << 32) | IPR_PCII_OPER_INTERRUPTS;
7925 		writeq(maskval, ioa_cfg->regs.clr_interrupt_mask_reg);
7926 	} else
7927 		writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32);
7928 
7929 	int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7930 
7931 	dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
7932 
7933 	if (ioa_cfg->sis64) {
7934 		ipr_cmd->job_step = ipr_reset_next_stage;
7935 		return IPR_RC_JOB_CONTINUE;
7936 	}
7937 
7938 	ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7939 	ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
7940 	ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
7941 	ipr_cmd->done = ipr_reset_ioa_job;
7942 	add_timer(&ipr_cmd->timer);
7943 	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
7944 
7945 	LEAVE;
7946 	return IPR_RC_JOB_RETURN;
7947 }
7948 
7949 /**
7950  * ipr_reset_wait_for_dump - Wait for a dump to timeout.
7951  * @ipr_cmd:	ipr command struct
7952  *
7953  * This function is invoked when an adapter dump has run out
7954  * of processing time.
7955  *
7956  * Return value:
7957  * 	IPR_RC_JOB_CONTINUE
7958  **/
7959 static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
7960 {
7961 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7962 
7963 	if (ioa_cfg->sdt_state == GET_DUMP)
7964 		ioa_cfg->sdt_state = WAIT_FOR_DUMP;
7965 	else if (ioa_cfg->sdt_state == READ_DUMP)
7966 		ioa_cfg->sdt_state = ABORT_DUMP;
7967 
7968 	ioa_cfg->dump_timeout = 1;
7969 	ipr_cmd->job_step = ipr_reset_alert;
7970 
7971 	return IPR_RC_JOB_CONTINUE;
7972 }
7973 
7974 /**
7975  * ipr_unit_check_no_data - Log a unit check/no data error log
7976  * @ioa_cfg:		ioa config struct
7977  *
7978  * Logs an error indicating the adapter unit checked, but for some
7979  * reason, we were unable to fetch the unit check buffer.
7980  *
7981  * Return value:
7982  * 	nothing
7983  **/
7984 static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
7985 {
7986 	ioa_cfg->errors_logged++;
7987 	dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
7988 }
7989 
7990 /**
7991  * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
7992  * @ioa_cfg:		ioa config struct
7993  *
7994  * Fetches the unit check buffer from the adapter by clocking the data
7995  * through the mailbox register.
7996  *
7997  * Return value:
7998  * 	nothing
7999  **/
8000 static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
8001 {
8002 	unsigned long mailbox;
8003 	struct ipr_hostrcb *hostrcb;
8004 	struct ipr_uc_sdt sdt;
8005 	int rc, length;
8006 	u32 ioasc;
8007 
8008 	mailbox = readl(ioa_cfg->ioa_mailbox);
8009 
8010 	if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) {
8011 		ipr_unit_check_no_data(ioa_cfg);
8012 		return;
8013 	}
8014 
8015 	memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
8016 	rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
8017 					(sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
8018 
8019 	if (rc || !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY) ||
8020 	    ((be32_to_cpu(sdt.hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
8021 	    (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
8022 		ipr_unit_check_no_data(ioa_cfg);
8023 		return;
8024 	}
8025 
8026 	/* Find length of the first sdt entry (UC buffer) */
8027 	if (be32_to_cpu(sdt.hdr.state) == IPR_FMT3_SDT_READY_TO_USE)
8028 		length = be32_to_cpu(sdt.entry[0].end_token);
8029 	else
8030 		length = (be32_to_cpu(sdt.entry[0].end_token) -
8031 			  be32_to_cpu(sdt.entry[0].start_token)) &
8032 			  IPR_FMT2_MBX_ADDR_MASK;
8033 
8034 	hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
8035 			     struct ipr_hostrcb, queue);
8036 	list_del(&hostrcb->queue);
8037 	memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
8038 
8039 	rc = ipr_get_ldump_data_section(ioa_cfg,
8040 					be32_to_cpu(sdt.entry[0].start_token),
8041 					(__be32 *)&hostrcb->hcam,
8042 					min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
8043 
8044 	if (!rc) {
8045 		ipr_handle_log_data(ioa_cfg, hostrcb);
8046 		ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
8047 		if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED &&
8048 		    ioa_cfg->sdt_state == GET_DUMP)
8049 			ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8050 	} else
8051 		ipr_unit_check_no_data(ioa_cfg);
8052 
8053 	list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
8054 }
8055 
8056 /**
8057  * ipr_reset_get_unit_check_job - Call to get the unit check buffer.
8058  * @ipr_cmd:	ipr command struct
8059  *
8060  * Description: This function will call to get the unit check buffer.
8061  *
8062  * Return value:
8063  *	IPR_RC_JOB_RETURN
8064  **/
8065 static int ipr_reset_get_unit_check_job(struct ipr_cmnd *ipr_cmd)
8066 {
8067 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8068 
8069 	ENTER;
8070 	ioa_cfg->ioa_unit_checked = 0;
8071 	ipr_get_unit_check_buffer(ioa_cfg);
8072 	ipr_cmd->job_step = ipr_reset_alert;
8073 	ipr_reset_start_timer(ipr_cmd, 0);
8074 
8075 	LEAVE;
8076 	return IPR_RC_JOB_RETURN;
8077 }
8078 
8079 /**
8080  * ipr_reset_restore_cfg_space - Restore PCI config space.
8081  * @ipr_cmd:	ipr command struct
8082  *
8083  * Description: This function restores the saved PCI config space of
8084  * the adapter, fails all outstanding ops back to the callers, and
8085  * fetches the dump/unit check if applicable to this reset.
8086  *
8087  * Return value:
8088  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8089  **/
8090 static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
8091 {
8092 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8093 	u32 int_reg;
8094 
8095 	ENTER;
8096 	ioa_cfg->pdev->state_saved = true;
8097 	pci_restore_state(ioa_cfg->pdev);
8098 
8099 	if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
8100 		ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
8101 		return IPR_RC_JOB_CONTINUE;
8102 	}
8103 
8104 	ipr_fail_all_ops(ioa_cfg);
8105 
8106 	if (ioa_cfg->sis64) {
8107 		/* Set the adapter to the correct endian mode. */
8108 		writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
8109 		int_reg = readl(ioa_cfg->regs.endian_swap_reg);
8110 	}
8111 
8112 	if (ioa_cfg->ioa_unit_checked) {
8113 		if (ioa_cfg->sis64) {
8114 			ipr_cmd->job_step = ipr_reset_get_unit_check_job;
8115 			ipr_reset_start_timer(ipr_cmd, IPR_DUMP_DELAY_TIMEOUT);
8116 			return IPR_RC_JOB_RETURN;
8117 		} else {
8118 			ioa_cfg->ioa_unit_checked = 0;
8119 			ipr_get_unit_check_buffer(ioa_cfg);
8120 			ipr_cmd->job_step = ipr_reset_alert;
8121 			ipr_reset_start_timer(ipr_cmd, 0);
8122 			return IPR_RC_JOB_RETURN;
8123 		}
8124 	}
8125 
8126 	if (ioa_cfg->in_ioa_bringdown) {
8127 		ipr_cmd->job_step = ipr_ioa_bringdown_done;
8128 	} else {
8129 		ipr_cmd->job_step = ipr_reset_enable_ioa;
8130 
8131 		if (GET_DUMP == ioa_cfg->sdt_state) {
8132 			ioa_cfg->sdt_state = READ_DUMP;
8133 			ioa_cfg->dump_timeout = 0;
8134 			if (ioa_cfg->sis64)
8135 				ipr_reset_start_timer(ipr_cmd, IPR_SIS64_DUMP_TIMEOUT);
8136 			else
8137 				ipr_reset_start_timer(ipr_cmd, IPR_SIS32_DUMP_TIMEOUT);
8138 			ipr_cmd->job_step = ipr_reset_wait_for_dump;
8139 			schedule_work(&ioa_cfg->work_q);
8140 			return IPR_RC_JOB_RETURN;
8141 		}
8142 	}
8143 
8144 	LEAVE;
8145 	return IPR_RC_JOB_CONTINUE;
8146 }
8147 
8148 /**
8149  * ipr_reset_bist_done - BIST has completed on the adapter.
8150  * @ipr_cmd:	ipr command struct
8151  *
8152  * Description: Unblock config space and resume the reset process.
8153  *
8154  * Return value:
8155  * 	IPR_RC_JOB_CONTINUE
8156  **/
8157 static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
8158 {
8159 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8160 
8161 	ENTER;
8162 	if (ioa_cfg->cfg_locked)
8163 		pci_cfg_access_unlock(ioa_cfg->pdev);
8164 	ioa_cfg->cfg_locked = 0;
8165 	ipr_cmd->job_step = ipr_reset_restore_cfg_space;
8166 	LEAVE;
8167 	return IPR_RC_JOB_CONTINUE;
8168 }
8169 
8170 /**
8171  * ipr_reset_start_bist - Run BIST on the adapter.
8172  * @ipr_cmd:	ipr command struct
8173  *
8174  * Description: This function runs BIST on the adapter, then delays 2 seconds.
8175  *
8176  * Return value:
8177  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8178  **/
8179 static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
8180 {
8181 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8182 	int rc = PCIBIOS_SUCCESSFUL;
8183 
8184 	ENTER;
8185 	if (ioa_cfg->ipr_chip->bist_method == IPR_MMIO)
8186 		writel(IPR_UPROCI_SIS64_START_BIST,
8187 		       ioa_cfg->regs.set_uproc_interrupt_reg32);
8188 	else
8189 		rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
8190 
8191 	if (rc == PCIBIOS_SUCCESSFUL) {
8192 		ipr_cmd->job_step = ipr_reset_bist_done;
8193 		ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8194 		rc = IPR_RC_JOB_RETURN;
8195 	} else {
8196 		if (ioa_cfg->cfg_locked)
8197 			pci_cfg_access_unlock(ipr_cmd->ioa_cfg->pdev);
8198 		ioa_cfg->cfg_locked = 0;
8199 		ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
8200 		rc = IPR_RC_JOB_CONTINUE;
8201 	}
8202 
8203 	LEAVE;
8204 	return rc;
8205 }
8206 
8207 /**
8208  * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
8209  * @ipr_cmd:	ipr command struct
8210  *
8211  * Description: This clears PCI reset to the adapter and delays two seconds.
8212  *
8213  * Return value:
8214  * 	IPR_RC_JOB_RETURN
8215  **/
8216 static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
8217 {
8218 	ENTER;
8219 	pci_set_pcie_reset_state(ipr_cmd->ioa_cfg->pdev, pcie_deassert_reset);
8220 	ipr_cmd->job_step = ipr_reset_bist_done;
8221 	ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8222 	LEAVE;
8223 	return IPR_RC_JOB_RETURN;
8224 }
8225 
8226 /**
8227  * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
8228  * @ipr_cmd:	ipr command struct
8229  *
8230  * Description: This asserts PCI reset to the adapter.
8231  *
8232  * Return value:
8233  * 	IPR_RC_JOB_RETURN
8234  **/
8235 static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
8236 {
8237 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8238 	struct pci_dev *pdev = ioa_cfg->pdev;
8239 
8240 	ENTER;
8241 	pci_set_pcie_reset_state(pdev, pcie_warm_reset);
8242 	ipr_cmd->job_step = ipr_reset_slot_reset_done;
8243 	ipr_reset_start_timer(ipr_cmd, IPR_PCI_RESET_TIMEOUT);
8244 	LEAVE;
8245 	return IPR_RC_JOB_RETURN;
8246 }
8247 
8248 /**
8249  * ipr_reset_block_config_access_wait - Wait for permission to block config access
8250  * @ipr_cmd:	ipr command struct
8251  *
8252  * Description: This attempts to block config access to the IOA.
8253  *
8254  * Return value:
8255  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8256  **/
8257 static int ipr_reset_block_config_access_wait(struct ipr_cmnd *ipr_cmd)
8258 {
8259 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8260 	int rc = IPR_RC_JOB_CONTINUE;
8261 
8262 	if (pci_cfg_access_trylock(ioa_cfg->pdev)) {
8263 		ioa_cfg->cfg_locked = 1;
8264 		ipr_cmd->job_step = ioa_cfg->reset;
8265 	} else {
8266 		if (ipr_cmd->u.time_left) {
8267 			rc = IPR_RC_JOB_RETURN;
8268 			ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8269 			ipr_reset_start_timer(ipr_cmd,
8270 					      IPR_CHECK_FOR_RESET_TIMEOUT);
8271 		} else {
8272 			ipr_cmd->job_step = ioa_cfg->reset;
8273 			dev_err(&ioa_cfg->pdev->dev,
8274 				"Timed out waiting to lock config access. Resetting anyway.\n");
8275 		}
8276 	}
8277 
8278 	return rc;
8279 }
8280 
8281 /**
8282  * ipr_reset_block_config_access - Block config access to the IOA
8283  * @ipr_cmd:	ipr command struct
8284  *
8285  * Description: This attempts to block config access to the IOA
8286  *
8287  * Return value:
8288  * 	IPR_RC_JOB_CONTINUE
8289  **/
8290 static int ipr_reset_block_config_access(struct ipr_cmnd *ipr_cmd)
8291 {
8292 	ipr_cmd->ioa_cfg->cfg_locked = 0;
8293 	ipr_cmd->job_step = ipr_reset_block_config_access_wait;
8294 	ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8295 	return IPR_RC_JOB_CONTINUE;
8296 }
8297 
8298 /**
8299  * ipr_reset_allowed - Query whether or not IOA can be reset
8300  * @ioa_cfg:	ioa config struct
8301  *
8302  * Return value:
8303  * 	0 if reset not allowed / non-zero if reset is allowed
8304  **/
8305 static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
8306 {
8307 	volatile u32 temp_reg;
8308 
8309 	temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
8310 	return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
8311 }
8312 
8313 /**
8314  * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
8315  * @ipr_cmd:	ipr command struct
8316  *
8317  * Description: This function waits for adapter permission to run BIST,
8318  * then runs BIST. If the adapter does not give permission after a
8319  * reasonable time, we will reset the adapter anyway. The impact of
8320  * resetting the adapter without warning the adapter is the risk of
8321  * losing the persistent error log on the adapter. If the adapter is
8322  * reset while it is writing to the flash on the adapter, the flash
8323  * segment will have bad ECC and be zeroed.
8324  *
8325  * Return value:
8326  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8327  **/
8328 static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
8329 {
8330 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8331 	int rc = IPR_RC_JOB_RETURN;
8332 
8333 	if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
8334 		ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8335 		ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8336 	} else {
8337 		ipr_cmd->job_step = ipr_reset_block_config_access;
8338 		rc = IPR_RC_JOB_CONTINUE;
8339 	}
8340 
8341 	return rc;
8342 }
8343 
8344 /**
8345  * ipr_reset_alert - Alert the adapter of a pending reset
8346  * @ipr_cmd:	ipr command struct
8347  *
8348  * Description: This function alerts the adapter that it will be reset.
8349  * If memory space is not currently enabled, proceed directly
8350  * to running BIST on the adapter. The timer must always be started
8351  * so we guarantee we do not run BIST from ipr_isr.
8352  *
8353  * Return value:
8354  * 	IPR_RC_JOB_RETURN
8355  **/
8356 static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
8357 {
8358 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8359 	u16 cmd_reg;
8360 	int rc;
8361 
8362 	ENTER;
8363 	rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
8364 
8365 	if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
8366 		ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
8367 		writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32);
8368 		ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
8369 	} else {
8370 		ipr_cmd->job_step = ipr_reset_block_config_access;
8371 	}
8372 
8373 	ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8374 	ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8375 
8376 	LEAVE;
8377 	return IPR_RC_JOB_RETURN;
8378 }
8379 
8380 /**
8381  * ipr_reset_ucode_download_done - Microcode download completion
8382  * @ipr_cmd:	ipr command struct
8383  *
8384  * Description: This function unmaps the microcode download buffer.
8385  *
8386  * Return value:
8387  * 	IPR_RC_JOB_CONTINUE
8388  **/
8389 static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
8390 {
8391 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8392 	struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
8393 
8394 	pci_unmap_sg(ioa_cfg->pdev, sglist->scatterlist,
8395 		     sglist->num_sg, DMA_TO_DEVICE);
8396 
8397 	ipr_cmd->job_step = ipr_reset_alert;
8398 	return IPR_RC_JOB_CONTINUE;
8399 }
8400 
8401 /**
8402  * ipr_reset_ucode_download - Download microcode to the adapter
8403  * @ipr_cmd:	ipr command struct
8404  *
8405  * Description: This function checks to see if it there is microcode
8406  * to download to the adapter. If there is, a download is performed.
8407  *
8408  * Return value:
8409  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8410  **/
8411 static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
8412 {
8413 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8414 	struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
8415 
8416 	ENTER;
8417 	ipr_cmd->job_step = ipr_reset_alert;
8418 
8419 	if (!sglist)
8420 		return IPR_RC_JOB_CONTINUE;
8421 
8422 	ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8423 	ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
8424 	ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
8425 	ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
8426 	ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
8427 	ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
8428 	ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
8429 
8430 	if (ioa_cfg->sis64)
8431 		ipr_build_ucode_ioadl64(ipr_cmd, sglist);
8432 	else
8433 		ipr_build_ucode_ioadl(ipr_cmd, sglist);
8434 	ipr_cmd->job_step = ipr_reset_ucode_download_done;
8435 
8436 	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
8437 		   IPR_WRITE_BUFFER_TIMEOUT);
8438 
8439 	LEAVE;
8440 	return IPR_RC_JOB_RETURN;
8441 }
8442 
8443 /**
8444  * ipr_reset_shutdown_ioa - Shutdown the adapter
8445  * @ipr_cmd:	ipr command struct
8446  *
8447  * Description: This function issues an adapter shutdown of the
8448  * specified type to the specified adapter as part of the
8449  * adapter reset job.
8450  *
8451  * Return value:
8452  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8453  **/
8454 static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
8455 {
8456 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8457 	enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
8458 	unsigned long timeout;
8459 	int rc = IPR_RC_JOB_CONTINUE;
8460 
8461 	ENTER;
8462 	if (shutdown_type != IPR_SHUTDOWN_NONE &&
8463 			!ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
8464 		ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8465 		ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
8466 		ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
8467 		ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
8468 
8469 		if (shutdown_type == IPR_SHUTDOWN_NORMAL)
8470 			timeout = IPR_SHUTDOWN_TIMEOUT;
8471 		else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
8472 			timeout = IPR_INTERNAL_TIMEOUT;
8473 		else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
8474 			timeout = IPR_DUAL_IOA_ABBR_SHUTDOWN_TO;
8475 		else
8476 			timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
8477 
8478 		ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
8479 
8480 		rc = IPR_RC_JOB_RETURN;
8481 		ipr_cmd->job_step = ipr_reset_ucode_download;
8482 	} else
8483 		ipr_cmd->job_step = ipr_reset_alert;
8484 
8485 	LEAVE;
8486 	return rc;
8487 }
8488 
8489 /**
8490  * ipr_reset_ioa_job - Adapter reset job
8491  * @ipr_cmd:	ipr command struct
8492  *
8493  * Description: This function is the job router for the adapter reset job.
8494  *
8495  * Return value:
8496  * 	none
8497  **/
8498 static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
8499 {
8500 	u32 rc, ioasc;
8501 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8502 
8503 	do {
8504 		ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
8505 
8506 		if (ioa_cfg->reset_cmd != ipr_cmd) {
8507 			/*
8508 			 * We are doing nested adapter resets and this is
8509 			 * not the current reset job.
8510 			 */
8511 			list_add_tail(&ipr_cmd->queue,
8512 					&ipr_cmd->hrrq->hrrq_free_q);
8513 			return;
8514 		}
8515 
8516 		if (IPR_IOASC_SENSE_KEY(ioasc)) {
8517 			rc = ipr_cmd->job_step_failed(ipr_cmd);
8518 			if (rc == IPR_RC_JOB_RETURN)
8519 				return;
8520 		}
8521 
8522 		ipr_reinit_ipr_cmnd(ipr_cmd);
8523 		ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
8524 		rc = ipr_cmd->job_step(ipr_cmd);
8525 	} while (rc == IPR_RC_JOB_CONTINUE);
8526 }
8527 
8528 /**
8529  * _ipr_initiate_ioa_reset - Initiate an adapter reset
8530  * @ioa_cfg:		ioa config struct
8531  * @job_step:		first job step of reset job
8532  * @shutdown_type:	shutdown type
8533  *
8534  * Description: This function will initiate the reset of the given adapter
8535  * starting at the selected job step.
8536  * If the caller needs to wait on the completion of the reset,
8537  * the caller must sleep on the reset_wait_q.
8538  *
8539  * Return value:
8540  * 	none
8541  **/
8542 static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
8543 				    int (*job_step) (struct ipr_cmnd *),
8544 				    enum ipr_shutdown_type shutdown_type)
8545 {
8546 	struct ipr_cmnd *ipr_cmd;
8547 	int i;
8548 
8549 	ioa_cfg->in_reset_reload = 1;
8550 	for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8551 		spin_lock(&ioa_cfg->hrrq[i]._lock);
8552 		ioa_cfg->hrrq[i].allow_cmds = 0;
8553 		spin_unlock(&ioa_cfg->hrrq[i]._lock);
8554 	}
8555 	wmb();
8556 	if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa)
8557 		scsi_block_requests(ioa_cfg->host);
8558 
8559 	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
8560 	ioa_cfg->reset_cmd = ipr_cmd;
8561 	ipr_cmd->job_step = job_step;
8562 	ipr_cmd->u.shutdown_type = shutdown_type;
8563 
8564 	ipr_reset_ioa_job(ipr_cmd);
8565 }
8566 
8567 /**
8568  * ipr_initiate_ioa_reset - Initiate an adapter reset
8569  * @ioa_cfg:		ioa config struct
8570  * @shutdown_type:	shutdown type
8571  *
8572  * Description: This function will initiate the reset of the given adapter.
8573  * If the caller needs to wait on the completion of the reset,
8574  * the caller must sleep on the reset_wait_q.
8575  *
8576  * Return value:
8577  * 	none
8578  **/
8579 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
8580 				   enum ipr_shutdown_type shutdown_type)
8581 {
8582 	int i;
8583 
8584 	if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
8585 		return;
8586 
8587 	if (ioa_cfg->in_reset_reload) {
8588 		if (ioa_cfg->sdt_state == GET_DUMP)
8589 			ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8590 		else if (ioa_cfg->sdt_state == READ_DUMP)
8591 			ioa_cfg->sdt_state = ABORT_DUMP;
8592 	}
8593 
8594 	if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
8595 		dev_err(&ioa_cfg->pdev->dev,
8596 			"IOA taken offline - error recovery failed\n");
8597 
8598 		ioa_cfg->reset_retries = 0;
8599 		for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8600 			spin_lock(&ioa_cfg->hrrq[i]._lock);
8601 			ioa_cfg->hrrq[i].ioa_is_dead = 1;
8602 			spin_unlock(&ioa_cfg->hrrq[i]._lock);
8603 		}
8604 		wmb();
8605 
8606 		if (ioa_cfg->in_ioa_bringdown) {
8607 			ioa_cfg->reset_cmd = NULL;
8608 			ioa_cfg->in_reset_reload = 0;
8609 			ipr_fail_all_ops(ioa_cfg);
8610 			wake_up_all(&ioa_cfg->reset_wait_q);
8611 
8612 			if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
8613 				spin_unlock_irq(ioa_cfg->host->host_lock);
8614 				scsi_unblock_requests(ioa_cfg->host);
8615 				spin_lock_irq(ioa_cfg->host->host_lock);
8616 			}
8617 			return;
8618 		} else {
8619 			ioa_cfg->in_ioa_bringdown = 1;
8620 			shutdown_type = IPR_SHUTDOWN_NONE;
8621 		}
8622 	}
8623 
8624 	_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
8625 				shutdown_type);
8626 }
8627 
8628 /**
8629  * ipr_reset_freeze - Hold off all I/O activity
8630  * @ipr_cmd:	ipr command struct
8631  *
8632  * Description: If the PCI slot is frozen, hold off all I/O
8633  * activity; then, as soon as the slot is available again,
8634  * initiate an adapter reset.
8635  */
8636 static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
8637 {
8638 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8639 	int i;
8640 
8641 	/* Disallow new interrupts, avoid loop */
8642 	for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8643 		spin_lock(&ioa_cfg->hrrq[i]._lock);
8644 		ioa_cfg->hrrq[i].allow_interrupts = 0;
8645 		spin_unlock(&ioa_cfg->hrrq[i]._lock);
8646 	}
8647 	wmb();
8648 	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
8649 	ipr_cmd->done = ipr_reset_ioa_job;
8650 	return IPR_RC_JOB_RETURN;
8651 }
8652 
8653 /**
8654  * ipr_pci_mmio_enabled - Called when MMIO has been re-enabled
8655  * @pdev:	PCI device struct
8656  *
8657  * Description: This routine is called to tell us that the MMIO
8658  * access to the IOA has been restored
8659  */
8660 static pci_ers_result_t ipr_pci_mmio_enabled(struct pci_dev *pdev)
8661 {
8662 	unsigned long flags = 0;
8663 	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8664 
8665 	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8666 	if (!ioa_cfg->probe_done)
8667 		pci_save_state(pdev);
8668 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8669 	return PCI_ERS_RESULT_NEED_RESET;
8670 }
8671 
8672 /**
8673  * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
8674  * @pdev:	PCI device struct
8675  *
8676  * Description: This routine is called to tell us that the PCI bus
8677  * is down. Can't do anything here, except put the device driver
8678  * into a holding pattern, waiting for the PCI bus to come back.
8679  */
8680 static void ipr_pci_frozen(struct pci_dev *pdev)
8681 {
8682 	unsigned long flags = 0;
8683 	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8684 
8685 	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8686 	if (ioa_cfg->probe_done)
8687 		_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
8688 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8689 }
8690 
8691 /**
8692  * ipr_pci_slot_reset - Called when PCI slot has been reset.
8693  * @pdev:	PCI device struct
8694  *
8695  * Description: This routine is called by the pci error recovery
8696  * code after the PCI slot has been reset, just before we
8697  * should resume normal operations.
8698  */
8699 static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
8700 {
8701 	unsigned long flags = 0;
8702 	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8703 
8704 	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8705 	if (ioa_cfg->probe_done) {
8706 		if (ioa_cfg->needs_warm_reset)
8707 			ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8708 		else
8709 			_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
8710 						IPR_SHUTDOWN_NONE);
8711 	} else
8712 		wake_up_all(&ioa_cfg->eeh_wait_q);
8713 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8714 	return PCI_ERS_RESULT_RECOVERED;
8715 }
8716 
8717 /**
8718  * ipr_pci_perm_failure - Called when PCI slot is dead for good.
8719  * @pdev:	PCI device struct
8720  *
8721  * Description: This routine is called when the PCI bus has
8722  * permanently failed.
8723  */
8724 static void ipr_pci_perm_failure(struct pci_dev *pdev)
8725 {
8726 	unsigned long flags = 0;
8727 	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8728 	int i;
8729 
8730 	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8731 	if (ioa_cfg->probe_done) {
8732 		if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
8733 			ioa_cfg->sdt_state = ABORT_DUMP;
8734 		ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES - 1;
8735 		ioa_cfg->in_ioa_bringdown = 1;
8736 		for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8737 			spin_lock(&ioa_cfg->hrrq[i]._lock);
8738 			ioa_cfg->hrrq[i].allow_cmds = 0;
8739 			spin_unlock(&ioa_cfg->hrrq[i]._lock);
8740 		}
8741 		wmb();
8742 		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8743 	} else
8744 		wake_up_all(&ioa_cfg->eeh_wait_q);
8745 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8746 }
8747 
8748 /**
8749  * ipr_pci_error_detected - Called when a PCI error is detected.
8750  * @pdev:	PCI device struct
8751  * @state:	PCI channel state
8752  *
8753  * Description: Called when a PCI error is detected.
8754  *
8755  * Return value:
8756  * 	PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
8757  */
8758 static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
8759 					       pci_channel_state_t state)
8760 {
8761 	switch (state) {
8762 	case pci_channel_io_frozen:
8763 		ipr_pci_frozen(pdev);
8764 		return PCI_ERS_RESULT_CAN_RECOVER;
8765 	case pci_channel_io_perm_failure:
8766 		ipr_pci_perm_failure(pdev);
8767 		return PCI_ERS_RESULT_DISCONNECT;
8768 		break;
8769 	default:
8770 		break;
8771 	}
8772 	return PCI_ERS_RESULT_NEED_RESET;
8773 }
8774 
8775 /**
8776  * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
8777  * @ioa_cfg:	ioa cfg struct
8778  *
8779  * Description: This is the second phase of adapter intialization
8780  * This function takes care of initilizing the adapter to the point
8781  * where it can accept new commands.
8782 
8783  * Return value:
8784  * 	0 on success / -EIO on failure
8785  **/
8786 static int ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
8787 {
8788 	int rc = 0;
8789 	unsigned long host_lock_flags = 0;
8790 
8791 	ENTER;
8792 	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8793 	dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
8794 	ioa_cfg->probe_done = 1;
8795 	if (ioa_cfg->needs_hard_reset) {
8796 		ioa_cfg->needs_hard_reset = 0;
8797 		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8798 	} else
8799 		_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
8800 					IPR_SHUTDOWN_NONE);
8801 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8802 	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
8803 	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8804 
8805 	if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
8806 		rc = -EIO;
8807 	} else if (ipr_invalid_adapter(ioa_cfg)) {
8808 		if (!ipr_testmode)
8809 			rc = -EIO;
8810 
8811 		dev_err(&ioa_cfg->pdev->dev,
8812 			"Adapter not supported in this hardware configuration.\n");
8813 	}
8814 
8815 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8816 
8817 	LEAVE;
8818 	return rc;
8819 }
8820 
8821 /**
8822  * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
8823  * @ioa_cfg:	ioa config struct
8824  *
8825  * Return value:
8826  * 	none
8827  **/
8828 static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
8829 {
8830 	int i;
8831 
8832 	for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
8833 		if (ioa_cfg->ipr_cmnd_list[i])
8834 			pci_pool_free(ioa_cfg->ipr_cmd_pool,
8835 				      ioa_cfg->ipr_cmnd_list[i],
8836 				      ioa_cfg->ipr_cmnd_list_dma[i]);
8837 
8838 		ioa_cfg->ipr_cmnd_list[i] = NULL;
8839 	}
8840 
8841 	if (ioa_cfg->ipr_cmd_pool)
8842 		pci_pool_destroy(ioa_cfg->ipr_cmd_pool);
8843 
8844 	kfree(ioa_cfg->ipr_cmnd_list);
8845 	kfree(ioa_cfg->ipr_cmnd_list_dma);
8846 	ioa_cfg->ipr_cmnd_list = NULL;
8847 	ioa_cfg->ipr_cmnd_list_dma = NULL;
8848 	ioa_cfg->ipr_cmd_pool = NULL;
8849 }
8850 
8851 /**
8852  * ipr_free_mem - Frees memory allocated for an adapter
8853  * @ioa_cfg:	ioa cfg struct
8854  *
8855  * Return value:
8856  * 	nothing
8857  **/
8858 static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
8859 {
8860 	int i;
8861 
8862 	kfree(ioa_cfg->res_entries);
8863 	pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_misc_cbs),
8864 			    ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
8865 	ipr_free_cmd_blks(ioa_cfg);
8866 
8867 	for (i = 0; i < ioa_cfg->hrrq_num; i++)
8868 		pci_free_consistent(ioa_cfg->pdev,
8869 					sizeof(u32) * ioa_cfg->hrrq[i].size,
8870 					ioa_cfg->hrrq[i].host_rrq,
8871 					ioa_cfg->hrrq[i].host_rrq_dma);
8872 
8873 	pci_free_consistent(ioa_cfg->pdev, ioa_cfg->cfg_table_size,
8874 			    ioa_cfg->u.cfg_table,
8875 			    ioa_cfg->cfg_table_dma);
8876 
8877 	for (i = 0; i < IPR_NUM_HCAMS; i++) {
8878 		pci_free_consistent(ioa_cfg->pdev,
8879 				    sizeof(struct ipr_hostrcb),
8880 				    ioa_cfg->hostrcb[i],
8881 				    ioa_cfg->hostrcb_dma[i]);
8882 	}
8883 
8884 	ipr_free_dump(ioa_cfg);
8885 	kfree(ioa_cfg->trace);
8886 }
8887 
8888 /**
8889  * ipr_free_all_resources - Free all allocated resources for an adapter.
8890  * @ipr_cmd:	ipr command struct
8891  *
8892  * This function frees all allocated resources for the
8893  * specified adapter.
8894  *
8895  * Return value:
8896  * 	none
8897  **/
8898 static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
8899 {
8900 	struct pci_dev *pdev = ioa_cfg->pdev;
8901 
8902 	ENTER;
8903 	if (ioa_cfg->intr_flag == IPR_USE_MSI ||
8904 	    ioa_cfg->intr_flag == IPR_USE_MSIX) {
8905 		int i;
8906 		for (i = 0; i < ioa_cfg->nvectors; i++)
8907 			free_irq(ioa_cfg->vectors_info[i].vec,
8908 				&ioa_cfg->hrrq[i]);
8909 	} else
8910 		free_irq(pdev->irq, &ioa_cfg->hrrq[0]);
8911 
8912 	if (ioa_cfg->intr_flag == IPR_USE_MSI) {
8913 		pci_disable_msi(pdev);
8914 		ioa_cfg->intr_flag &= ~IPR_USE_MSI;
8915 	} else if (ioa_cfg->intr_flag == IPR_USE_MSIX) {
8916 		pci_disable_msix(pdev);
8917 		ioa_cfg->intr_flag &= ~IPR_USE_MSIX;
8918 	}
8919 
8920 	iounmap(ioa_cfg->hdw_dma_regs);
8921 	pci_release_regions(pdev);
8922 	ipr_free_mem(ioa_cfg);
8923 	scsi_host_put(ioa_cfg->host);
8924 	pci_disable_device(pdev);
8925 	LEAVE;
8926 }
8927 
8928 /**
8929  * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
8930  * @ioa_cfg:	ioa config struct
8931  *
8932  * Return value:
8933  * 	0 on success / -ENOMEM on allocation failure
8934  **/
8935 static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
8936 {
8937 	struct ipr_cmnd *ipr_cmd;
8938 	struct ipr_ioarcb *ioarcb;
8939 	dma_addr_t dma_addr;
8940 	int i, entries_each_hrrq, hrrq_id = 0;
8941 
8942 	ioa_cfg->ipr_cmd_pool = pci_pool_create(IPR_NAME, ioa_cfg->pdev,
8943 						sizeof(struct ipr_cmnd), 512, 0);
8944 
8945 	if (!ioa_cfg->ipr_cmd_pool)
8946 		return -ENOMEM;
8947 
8948 	ioa_cfg->ipr_cmnd_list = kcalloc(IPR_NUM_CMD_BLKS, sizeof(struct ipr_cmnd *), GFP_KERNEL);
8949 	ioa_cfg->ipr_cmnd_list_dma = kcalloc(IPR_NUM_CMD_BLKS, sizeof(dma_addr_t), GFP_KERNEL);
8950 
8951 	if (!ioa_cfg->ipr_cmnd_list || !ioa_cfg->ipr_cmnd_list_dma) {
8952 		ipr_free_cmd_blks(ioa_cfg);
8953 		return -ENOMEM;
8954 	}
8955 
8956 	for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8957 		if (ioa_cfg->hrrq_num > 1) {
8958 			if (i == 0) {
8959 				entries_each_hrrq = IPR_NUM_INTERNAL_CMD_BLKS;
8960 				ioa_cfg->hrrq[i].min_cmd_id = 0;
8961 					ioa_cfg->hrrq[i].max_cmd_id =
8962 						(entries_each_hrrq - 1);
8963 			} else {
8964 				entries_each_hrrq =
8965 					IPR_NUM_BASE_CMD_BLKS/
8966 					(ioa_cfg->hrrq_num - 1);
8967 				ioa_cfg->hrrq[i].min_cmd_id =
8968 					IPR_NUM_INTERNAL_CMD_BLKS +
8969 					(i - 1) * entries_each_hrrq;
8970 				ioa_cfg->hrrq[i].max_cmd_id =
8971 					(IPR_NUM_INTERNAL_CMD_BLKS +
8972 					i * entries_each_hrrq - 1);
8973 			}
8974 		} else {
8975 			entries_each_hrrq = IPR_NUM_CMD_BLKS;
8976 			ioa_cfg->hrrq[i].min_cmd_id = 0;
8977 			ioa_cfg->hrrq[i].max_cmd_id = (entries_each_hrrq - 1);
8978 		}
8979 		ioa_cfg->hrrq[i].size = entries_each_hrrq;
8980 	}
8981 
8982 	BUG_ON(ioa_cfg->hrrq_num == 0);
8983 
8984 	i = IPR_NUM_CMD_BLKS -
8985 		ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id - 1;
8986 	if (i > 0) {
8987 		ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].size += i;
8988 		ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id += i;
8989 	}
8990 
8991 	for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
8992 		ipr_cmd = pci_pool_alloc(ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);
8993 
8994 		if (!ipr_cmd) {
8995 			ipr_free_cmd_blks(ioa_cfg);
8996 			return -ENOMEM;
8997 		}
8998 
8999 		memset(ipr_cmd, 0, sizeof(*ipr_cmd));
9000 		ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
9001 		ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
9002 
9003 		ioarcb = &ipr_cmd->ioarcb;
9004 		ipr_cmd->dma_addr = dma_addr;
9005 		if (ioa_cfg->sis64)
9006 			ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr);
9007 		else
9008 			ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
9009 
9010 		ioarcb->host_response_handle = cpu_to_be32(i << 2);
9011 		if (ioa_cfg->sis64) {
9012 			ioarcb->u.sis64_addr_data.data_ioadl_addr =
9013 				cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
9014 			ioarcb->u.sis64_addr_data.ioasa_host_pci_addr =
9015 				cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, s.ioasa64));
9016 		} else {
9017 			ioarcb->write_ioadl_addr =
9018 				cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
9019 			ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
9020 			ioarcb->ioasa_host_pci_addr =
9021 				cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, s.ioasa));
9022 		}
9023 		ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
9024 		ipr_cmd->cmd_index = i;
9025 		ipr_cmd->ioa_cfg = ioa_cfg;
9026 		ipr_cmd->sense_buffer_dma = dma_addr +
9027 			offsetof(struct ipr_cmnd, sense_buffer);
9028 
9029 		ipr_cmd->ioarcb.cmd_pkt.hrrq_id = hrrq_id;
9030 		ipr_cmd->hrrq = &ioa_cfg->hrrq[hrrq_id];
9031 		list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
9032 		if (i >= ioa_cfg->hrrq[hrrq_id].max_cmd_id)
9033 			hrrq_id++;
9034 	}
9035 
9036 	return 0;
9037 }
9038 
9039 /**
9040  * ipr_alloc_mem - Allocate memory for an adapter
9041  * @ioa_cfg:	ioa config struct
9042  *
9043  * Return value:
9044  * 	0 on success / non-zero for error
9045  **/
9046 static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
9047 {
9048 	struct pci_dev *pdev = ioa_cfg->pdev;
9049 	int i, rc = -ENOMEM;
9050 
9051 	ENTER;
9052 	ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
9053 				       ioa_cfg->max_devs_supported, GFP_KERNEL);
9054 
9055 	if (!ioa_cfg->res_entries)
9056 		goto out;
9057 
9058 	for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
9059 		list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
9060 		ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
9061 	}
9062 
9063 	ioa_cfg->vpd_cbs = pci_alloc_consistent(ioa_cfg->pdev,
9064 						sizeof(struct ipr_misc_cbs),
9065 						&ioa_cfg->vpd_cbs_dma);
9066 
9067 	if (!ioa_cfg->vpd_cbs)
9068 		goto out_free_res_entries;
9069 
9070 	if (ipr_alloc_cmd_blks(ioa_cfg))
9071 		goto out_free_vpd_cbs;
9072 
9073 	for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9074 		ioa_cfg->hrrq[i].host_rrq = pci_alloc_consistent(ioa_cfg->pdev,
9075 					sizeof(u32) * ioa_cfg->hrrq[i].size,
9076 					&ioa_cfg->hrrq[i].host_rrq_dma);
9077 
9078 		if (!ioa_cfg->hrrq[i].host_rrq)  {
9079 			while (--i > 0)
9080 				pci_free_consistent(pdev,
9081 					sizeof(u32) * ioa_cfg->hrrq[i].size,
9082 					ioa_cfg->hrrq[i].host_rrq,
9083 					ioa_cfg->hrrq[i].host_rrq_dma);
9084 			goto out_ipr_free_cmd_blocks;
9085 		}
9086 		ioa_cfg->hrrq[i].ioa_cfg = ioa_cfg;
9087 	}
9088 
9089 	ioa_cfg->u.cfg_table = pci_alloc_consistent(ioa_cfg->pdev,
9090 						    ioa_cfg->cfg_table_size,
9091 						    &ioa_cfg->cfg_table_dma);
9092 
9093 	if (!ioa_cfg->u.cfg_table)
9094 		goto out_free_host_rrq;
9095 
9096 	for (i = 0; i < IPR_NUM_HCAMS; i++) {
9097 		ioa_cfg->hostrcb[i] = pci_alloc_consistent(ioa_cfg->pdev,
9098 							   sizeof(struct ipr_hostrcb),
9099 							   &ioa_cfg->hostrcb_dma[i]);
9100 
9101 		if (!ioa_cfg->hostrcb[i])
9102 			goto out_free_hostrcb_dma;
9103 
9104 		ioa_cfg->hostrcb[i]->hostrcb_dma =
9105 			ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
9106 		ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
9107 		list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
9108 	}
9109 
9110 	ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) *
9111 				 IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
9112 
9113 	if (!ioa_cfg->trace)
9114 		goto out_free_hostrcb_dma;
9115 
9116 	rc = 0;
9117 out:
9118 	LEAVE;
9119 	return rc;
9120 
9121 out_free_hostrcb_dma:
9122 	while (i-- > 0) {
9123 		pci_free_consistent(pdev, sizeof(struct ipr_hostrcb),
9124 				    ioa_cfg->hostrcb[i],
9125 				    ioa_cfg->hostrcb_dma[i]);
9126 	}
9127 	pci_free_consistent(pdev, ioa_cfg->cfg_table_size,
9128 			    ioa_cfg->u.cfg_table,
9129 			    ioa_cfg->cfg_table_dma);
9130 out_free_host_rrq:
9131 	for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9132 		pci_free_consistent(pdev,
9133 				sizeof(u32) * ioa_cfg->hrrq[i].size,
9134 				ioa_cfg->hrrq[i].host_rrq,
9135 				ioa_cfg->hrrq[i].host_rrq_dma);
9136 	}
9137 out_ipr_free_cmd_blocks:
9138 	ipr_free_cmd_blks(ioa_cfg);
9139 out_free_vpd_cbs:
9140 	pci_free_consistent(pdev, sizeof(struct ipr_misc_cbs),
9141 			    ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
9142 out_free_res_entries:
9143 	kfree(ioa_cfg->res_entries);
9144 	goto out;
9145 }
9146 
9147 /**
9148  * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
9149  * @ioa_cfg:	ioa config struct
9150  *
9151  * Return value:
9152  * 	none
9153  **/
9154 static void ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
9155 {
9156 	int i;
9157 
9158 	for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
9159 		ioa_cfg->bus_attr[i].bus = i;
9160 		ioa_cfg->bus_attr[i].qas_enabled = 0;
9161 		ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
9162 		if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
9163 			ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
9164 		else
9165 			ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
9166 	}
9167 }
9168 
9169 /**
9170  * ipr_init_regs - Initialize IOA registers
9171  * @ioa_cfg:	ioa config struct
9172  *
9173  * Return value:
9174  *	none
9175  **/
9176 static void ipr_init_regs(struct ipr_ioa_cfg *ioa_cfg)
9177 {
9178 	const struct ipr_interrupt_offsets *p;
9179 	struct ipr_interrupts *t;
9180 	void __iomem *base;
9181 
9182 	p = &ioa_cfg->chip_cfg->regs;
9183 	t = &ioa_cfg->regs;
9184 	base = ioa_cfg->hdw_dma_regs;
9185 
9186 	t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
9187 	t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
9188 	t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32;
9189 	t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
9190 	t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32;
9191 	t->clr_interrupt_reg = base + p->clr_interrupt_reg;
9192 	t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32;
9193 	t->sense_interrupt_reg = base + p->sense_interrupt_reg;
9194 	t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32;
9195 	t->ioarrin_reg = base + p->ioarrin_reg;
9196 	t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
9197 	t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32;
9198 	t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
9199 	t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32;
9200 	t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
9201 	t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32;
9202 
9203 	if (ioa_cfg->sis64) {
9204 		t->init_feedback_reg = base + p->init_feedback_reg;
9205 		t->dump_addr_reg = base + p->dump_addr_reg;
9206 		t->dump_data_reg = base + p->dump_data_reg;
9207 		t->endian_swap_reg = base + p->endian_swap_reg;
9208 	}
9209 }
9210 
9211 /**
9212  * ipr_init_ioa_cfg - Initialize IOA config struct
9213  * @ioa_cfg:	ioa config struct
9214  * @host:		scsi host struct
9215  * @pdev:		PCI dev struct
9216  *
9217  * Return value:
9218  * 	none
9219  **/
9220 static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
9221 			     struct Scsi_Host *host, struct pci_dev *pdev)
9222 {
9223 	int i;
9224 
9225 	ioa_cfg->host = host;
9226 	ioa_cfg->pdev = pdev;
9227 	ioa_cfg->log_level = ipr_log_level;
9228 	ioa_cfg->doorbell = IPR_DOORBELL;
9229 	sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
9230 	sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
9231 	sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
9232 	sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
9233 	sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
9234 	sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
9235 
9236 	INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
9237 	INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
9238 	INIT_LIST_HEAD(&ioa_cfg->free_res_q);
9239 	INIT_LIST_HEAD(&ioa_cfg->used_res_q);
9240 	INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
9241 	init_waitqueue_head(&ioa_cfg->reset_wait_q);
9242 	init_waitqueue_head(&ioa_cfg->msi_wait_q);
9243 	init_waitqueue_head(&ioa_cfg->eeh_wait_q);
9244 	ioa_cfg->sdt_state = INACTIVE;
9245 
9246 	ipr_initialize_bus_attr(ioa_cfg);
9247 	ioa_cfg->max_devs_supported = ipr_max_devs;
9248 
9249 	if (ioa_cfg->sis64) {
9250 		host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS;
9251 		host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
9252 		if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
9253 			ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS;
9254 		ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
9255 					   + ((sizeof(struct ipr_config_table_entry64)
9256 					       * ioa_cfg->max_devs_supported)));
9257 	} else {
9258 		host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
9259 		host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
9260 		if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
9261 			ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS;
9262 		ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
9263 					   + ((sizeof(struct ipr_config_table_entry)
9264 					       * ioa_cfg->max_devs_supported)));
9265 	}
9266 
9267 	host->max_channel = IPR_MAX_BUS_TO_SCAN;
9268 	host->unique_id = host->host_no;
9269 	host->max_cmd_len = IPR_MAX_CDB_LEN;
9270 	host->can_queue = ioa_cfg->max_cmds;
9271 	pci_set_drvdata(pdev, ioa_cfg);
9272 
9273 	for (i = 0; i < ARRAY_SIZE(ioa_cfg->hrrq); i++) {
9274 		INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q);
9275 		INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q);
9276 		spin_lock_init(&ioa_cfg->hrrq[i]._lock);
9277 		if (i == 0)
9278 			ioa_cfg->hrrq[i].lock = ioa_cfg->host->host_lock;
9279 		else
9280 			ioa_cfg->hrrq[i].lock = &ioa_cfg->hrrq[i]._lock;
9281 	}
9282 }
9283 
9284 /**
9285  * ipr_get_chip_info - Find adapter chip information
9286  * @dev_id:		PCI device id struct
9287  *
9288  * Return value:
9289  * 	ptr to chip information on success / NULL on failure
9290  **/
9291 static const struct ipr_chip_t *
9292 ipr_get_chip_info(const struct pci_device_id *dev_id)
9293 {
9294 	int i;
9295 
9296 	for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
9297 		if (ipr_chip[i].vendor == dev_id->vendor &&
9298 		    ipr_chip[i].device == dev_id->device)
9299 			return &ipr_chip[i];
9300 	return NULL;
9301 }
9302 
9303 /**
9304  * ipr_wait_for_pci_err_recovery - Wait for any PCI error recovery to complete
9305  *						during probe time
9306  * @ioa_cfg:	ioa config struct
9307  *
9308  * Return value:
9309  * 	None
9310  **/
9311 static void ipr_wait_for_pci_err_recovery(struct ipr_ioa_cfg *ioa_cfg)
9312 {
9313 	struct pci_dev *pdev = ioa_cfg->pdev;
9314 
9315 	if (pci_channel_offline(pdev)) {
9316 		wait_event_timeout(ioa_cfg->eeh_wait_q,
9317 				   !pci_channel_offline(pdev),
9318 				   IPR_PCI_ERROR_RECOVERY_TIMEOUT);
9319 		pci_restore_state(pdev);
9320 	}
9321 }
9322 
9323 static int ipr_enable_msix(struct ipr_ioa_cfg *ioa_cfg)
9324 {
9325 	struct msix_entry entries[IPR_MAX_MSIX_VECTORS];
9326 	int i, vectors;
9327 
9328 	for (i = 0; i < ARRAY_SIZE(entries); ++i)
9329 		entries[i].entry = i;
9330 
9331 	vectors = pci_enable_msix_range(ioa_cfg->pdev,
9332 					entries, 1, ipr_number_of_msix);
9333 	if (vectors < 0) {
9334 		ipr_wait_for_pci_err_recovery(ioa_cfg);
9335 		return vectors;
9336 	}
9337 
9338 	for (i = 0; i < vectors; i++)
9339 		ioa_cfg->vectors_info[i].vec = entries[i].vector;
9340 	ioa_cfg->nvectors = vectors;
9341 
9342 	return 0;
9343 }
9344 
9345 static int ipr_enable_msi(struct ipr_ioa_cfg *ioa_cfg)
9346 {
9347 	int i, vectors;
9348 
9349 	vectors = pci_enable_msi_range(ioa_cfg->pdev, 1, ipr_number_of_msix);
9350 	if (vectors < 0) {
9351 		ipr_wait_for_pci_err_recovery(ioa_cfg);
9352 		return vectors;
9353 	}
9354 
9355 	for (i = 0; i < vectors; i++)
9356 		ioa_cfg->vectors_info[i].vec = ioa_cfg->pdev->irq + i;
9357 	ioa_cfg->nvectors = vectors;
9358 
9359 	return 0;
9360 }
9361 
9362 static void name_msi_vectors(struct ipr_ioa_cfg *ioa_cfg)
9363 {
9364 	int vec_idx, n = sizeof(ioa_cfg->vectors_info[0].desc) - 1;
9365 
9366 	for (vec_idx = 0; vec_idx < ioa_cfg->nvectors; vec_idx++) {
9367 		snprintf(ioa_cfg->vectors_info[vec_idx].desc, n,
9368 			 "host%d-%d", ioa_cfg->host->host_no, vec_idx);
9369 		ioa_cfg->vectors_info[vec_idx].
9370 			desc[strlen(ioa_cfg->vectors_info[vec_idx].desc)] = 0;
9371 	}
9372 }
9373 
9374 static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg)
9375 {
9376 	int i, rc;
9377 
9378 	for (i = 1; i < ioa_cfg->nvectors; i++) {
9379 		rc = request_irq(ioa_cfg->vectors_info[i].vec,
9380 			ipr_isr_mhrrq,
9381 			0,
9382 			ioa_cfg->vectors_info[i].desc,
9383 			&ioa_cfg->hrrq[i]);
9384 		if (rc) {
9385 			while (--i >= 0)
9386 				free_irq(ioa_cfg->vectors_info[i].vec,
9387 					&ioa_cfg->hrrq[i]);
9388 			return rc;
9389 		}
9390 	}
9391 	return 0;
9392 }
9393 
9394 /**
9395  * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
9396  * @pdev:		PCI device struct
9397  *
9398  * Description: Simply set the msi_received flag to 1 indicating that
9399  * Message Signaled Interrupts are supported.
9400  *
9401  * Return value:
9402  * 	0 on success / non-zero on failure
9403  **/
9404 static irqreturn_t ipr_test_intr(int irq, void *devp)
9405 {
9406 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
9407 	unsigned long lock_flags = 0;
9408 	irqreturn_t rc = IRQ_HANDLED;
9409 
9410 	dev_info(&ioa_cfg->pdev->dev, "Received IRQ : %d\n", irq);
9411 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9412 
9413 	ioa_cfg->msi_received = 1;
9414 	wake_up(&ioa_cfg->msi_wait_q);
9415 
9416 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9417 	return rc;
9418 }
9419 
9420 /**
9421  * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
9422  * @pdev:		PCI device struct
9423  *
9424  * Description: The return value from pci_enable_msi_range() can not always be
9425  * trusted.  This routine sets up and initiates a test interrupt to determine
9426  * if the interrupt is received via the ipr_test_intr() service routine.
9427  * If the tests fails, the driver will fall back to LSI.
9428  *
9429  * Return value:
9430  * 	0 on success / non-zero on failure
9431  **/
9432 static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
9433 {
9434 	int rc;
9435 	volatile u32 int_reg;
9436 	unsigned long lock_flags = 0;
9437 
9438 	ENTER;
9439 
9440 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9441 	init_waitqueue_head(&ioa_cfg->msi_wait_q);
9442 	ioa_cfg->msi_received = 0;
9443 	ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
9444 	writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32);
9445 	int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
9446 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9447 
9448 	if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9449 		rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
9450 	else
9451 		rc = request_irq(pdev->irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
9452 	if (rc) {
9453 		dev_err(&pdev->dev, "Can not assign irq %d\n", pdev->irq);
9454 		return rc;
9455 	} else if (ipr_debug)
9456 		dev_info(&pdev->dev, "IRQ assigned: %d\n", pdev->irq);
9457 
9458 	writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
9459 	int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
9460 	wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
9461 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9462 	ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
9463 
9464 	if (!ioa_cfg->msi_received) {
9465 		/* MSI test failed */
9466 		dev_info(&pdev->dev, "MSI test failed.  Falling back to LSI.\n");
9467 		rc = -EOPNOTSUPP;
9468 	} else if (ipr_debug)
9469 		dev_info(&pdev->dev, "MSI test succeeded.\n");
9470 
9471 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9472 
9473 	if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9474 		free_irq(ioa_cfg->vectors_info[0].vec, ioa_cfg);
9475 	else
9476 		free_irq(pdev->irq, ioa_cfg);
9477 
9478 	LEAVE;
9479 
9480 	return rc;
9481 }
9482 
9483  /* ipr_probe_ioa - Allocates memory and does first stage of initialization
9484  * @pdev:		PCI device struct
9485  * @dev_id:		PCI device id struct
9486  *
9487  * Return value:
9488  * 	0 on success / non-zero on failure
9489  **/
9490 static int ipr_probe_ioa(struct pci_dev *pdev,
9491 			 const struct pci_device_id *dev_id)
9492 {
9493 	struct ipr_ioa_cfg *ioa_cfg;
9494 	struct Scsi_Host *host;
9495 	unsigned long ipr_regs_pci;
9496 	void __iomem *ipr_regs;
9497 	int rc = PCIBIOS_SUCCESSFUL;
9498 	volatile u32 mask, uproc, interrupts;
9499 	unsigned long lock_flags, driver_lock_flags;
9500 
9501 	ENTER;
9502 
9503 	dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
9504 	host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
9505 
9506 	if (!host) {
9507 		dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
9508 		rc = -ENOMEM;
9509 		goto out;
9510 	}
9511 
9512 	ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
9513 	memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
9514 	ata_host_init(&ioa_cfg->ata_host, &pdev->dev, &ipr_sata_ops);
9515 
9516 	ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
9517 
9518 	if (!ioa_cfg->ipr_chip) {
9519 		dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
9520 			dev_id->vendor, dev_id->device);
9521 		goto out_scsi_host_put;
9522 	}
9523 
9524 	/* set SIS 32 or SIS 64 */
9525 	ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
9526 	ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
9527 	ioa_cfg->clear_isr = ioa_cfg->chip_cfg->clear_isr;
9528 	ioa_cfg->max_cmds = ioa_cfg->chip_cfg->max_cmds;
9529 
9530 	if (ipr_transop_timeout)
9531 		ioa_cfg->transop_timeout = ipr_transop_timeout;
9532 	else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
9533 		ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
9534 	else
9535 		ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
9536 
9537 	ioa_cfg->revid = pdev->revision;
9538 
9539 	ipr_init_ioa_cfg(ioa_cfg, host, pdev);
9540 
9541 	ipr_regs_pci = pci_resource_start(pdev, 0);
9542 
9543 	rc = pci_request_regions(pdev, IPR_NAME);
9544 	if (rc < 0) {
9545 		dev_err(&pdev->dev,
9546 			"Couldn't register memory range of registers\n");
9547 		goto out_scsi_host_put;
9548 	}
9549 
9550 	rc = pci_enable_device(pdev);
9551 
9552 	if (rc || pci_channel_offline(pdev)) {
9553 		if (pci_channel_offline(pdev)) {
9554 			ipr_wait_for_pci_err_recovery(ioa_cfg);
9555 			rc = pci_enable_device(pdev);
9556 		}
9557 
9558 		if (rc) {
9559 			dev_err(&pdev->dev, "Cannot enable adapter\n");
9560 			ipr_wait_for_pci_err_recovery(ioa_cfg);
9561 			goto out_release_regions;
9562 		}
9563 	}
9564 
9565 	ipr_regs = pci_ioremap_bar(pdev, 0);
9566 
9567 	if (!ipr_regs) {
9568 		dev_err(&pdev->dev,
9569 			"Couldn't map memory range of registers\n");
9570 		rc = -ENOMEM;
9571 		goto out_disable;
9572 	}
9573 
9574 	ioa_cfg->hdw_dma_regs = ipr_regs;
9575 	ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
9576 	ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
9577 
9578 	ipr_init_regs(ioa_cfg);
9579 
9580 	if (ioa_cfg->sis64) {
9581 		rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
9582 		if (rc < 0) {
9583 			dev_dbg(&pdev->dev, "Failed to set 64 bit PCI DMA mask\n");
9584 			rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
9585 		}
9586 	} else
9587 		rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
9588 
9589 	if (rc < 0) {
9590 		dev_err(&pdev->dev, "Failed to set PCI DMA mask\n");
9591 		goto cleanup_nomem;
9592 	}
9593 
9594 	rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
9595 				   ioa_cfg->chip_cfg->cache_line_size);
9596 
9597 	if (rc != PCIBIOS_SUCCESSFUL) {
9598 		dev_err(&pdev->dev, "Write of cache line size failed\n");
9599 		ipr_wait_for_pci_err_recovery(ioa_cfg);
9600 		rc = -EIO;
9601 		goto cleanup_nomem;
9602 	}
9603 
9604 	/* Issue MMIO read to ensure card is not in EEH */
9605 	interrupts = readl(ioa_cfg->regs.sense_interrupt_reg);
9606 	ipr_wait_for_pci_err_recovery(ioa_cfg);
9607 
9608 	if (ipr_number_of_msix > IPR_MAX_MSIX_VECTORS) {
9609 		dev_err(&pdev->dev, "The max number of MSIX is %d\n",
9610 			IPR_MAX_MSIX_VECTORS);
9611 		ipr_number_of_msix = IPR_MAX_MSIX_VECTORS;
9612 	}
9613 
9614 	if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI &&
9615 			ipr_enable_msix(ioa_cfg) == 0)
9616 		ioa_cfg->intr_flag = IPR_USE_MSIX;
9617 	else if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI &&
9618 			ipr_enable_msi(ioa_cfg) == 0)
9619 		ioa_cfg->intr_flag = IPR_USE_MSI;
9620 	else {
9621 		ioa_cfg->intr_flag = IPR_USE_LSI;
9622 		ioa_cfg->nvectors = 1;
9623 		dev_info(&pdev->dev, "Cannot enable MSI.\n");
9624 	}
9625 
9626 	pci_set_master(pdev);
9627 
9628 	if (pci_channel_offline(pdev)) {
9629 		ipr_wait_for_pci_err_recovery(ioa_cfg);
9630 		pci_set_master(pdev);
9631 		if (pci_channel_offline(pdev)) {
9632 			rc = -EIO;
9633 			goto out_msi_disable;
9634 		}
9635 	}
9636 
9637 	if (ioa_cfg->intr_flag == IPR_USE_MSI ||
9638 	    ioa_cfg->intr_flag == IPR_USE_MSIX) {
9639 		rc = ipr_test_msi(ioa_cfg, pdev);
9640 		if (rc == -EOPNOTSUPP) {
9641 			ipr_wait_for_pci_err_recovery(ioa_cfg);
9642 			if (ioa_cfg->intr_flag == IPR_USE_MSI) {
9643 				ioa_cfg->intr_flag &= ~IPR_USE_MSI;
9644 				pci_disable_msi(pdev);
9645 			 } else if (ioa_cfg->intr_flag == IPR_USE_MSIX) {
9646 				ioa_cfg->intr_flag &= ~IPR_USE_MSIX;
9647 				pci_disable_msix(pdev);
9648 			}
9649 
9650 			ioa_cfg->intr_flag = IPR_USE_LSI;
9651 			ioa_cfg->nvectors = 1;
9652 		}
9653 		else if (rc)
9654 			goto out_msi_disable;
9655 		else {
9656 			if (ioa_cfg->intr_flag == IPR_USE_MSI)
9657 				dev_info(&pdev->dev,
9658 					"Request for %d MSIs succeeded with starting IRQ: %d\n",
9659 					ioa_cfg->nvectors, pdev->irq);
9660 			else if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9661 				dev_info(&pdev->dev,
9662 					"Request for %d MSIXs succeeded.",
9663 					ioa_cfg->nvectors);
9664 		}
9665 	}
9666 
9667 	ioa_cfg->hrrq_num = min3(ioa_cfg->nvectors,
9668 				(unsigned int)num_online_cpus(),
9669 				(unsigned int)IPR_MAX_HRRQ_NUM);
9670 
9671 	if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
9672 		goto out_msi_disable;
9673 
9674 	if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
9675 		goto out_msi_disable;
9676 
9677 	rc = ipr_alloc_mem(ioa_cfg);
9678 	if (rc < 0) {
9679 		dev_err(&pdev->dev,
9680 			"Couldn't allocate enough memory for device driver!\n");
9681 		goto out_msi_disable;
9682 	}
9683 
9684 	/* Save away PCI config space for use following IOA reset */
9685 	rc = pci_save_state(pdev);
9686 
9687 	if (rc != PCIBIOS_SUCCESSFUL) {
9688 		dev_err(&pdev->dev, "Failed to save PCI config space\n");
9689 		rc = -EIO;
9690 		goto cleanup_nolog;
9691 	}
9692 
9693 	/*
9694 	 * If HRRQ updated interrupt is not masked, or reset alert is set,
9695 	 * the card is in an unknown state and needs a hard reset
9696 	 */
9697 	mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
9698 	interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32);
9699 	uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
9700 	if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
9701 		ioa_cfg->needs_hard_reset = 1;
9702 	if ((interrupts & IPR_PCII_ERROR_INTERRUPTS) || reset_devices)
9703 		ioa_cfg->needs_hard_reset = 1;
9704 	if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
9705 		ioa_cfg->ioa_unit_checked = 1;
9706 
9707 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9708 	ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
9709 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9710 
9711 	if (ioa_cfg->intr_flag == IPR_USE_MSI
9712 			|| ioa_cfg->intr_flag == IPR_USE_MSIX) {
9713 		name_msi_vectors(ioa_cfg);
9714 		rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_isr,
9715 			0,
9716 			ioa_cfg->vectors_info[0].desc,
9717 			&ioa_cfg->hrrq[0]);
9718 		if (!rc)
9719 			rc = ipr_request_other_msi_irqs(ioa_cfg);
9720 	} else {
9721 		rc = request_irq(pdev->irq, ipr_isr,
9722 			 IRQF_SHARED,
9723 			 IPR_NAME, &ioa_cfg->hrrq[0]);
9724 	}
9725 	if (rc) {
9726 		dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
9727 			pdev->irq, rc);
9728 		goto cleanup_nolog;
9729 	}
9730 
9731 	if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) ||
9732 	    (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
9733 		ioa_cfg->needs_warm_reset = 1;
9734 		ioa_cfg->reset = ipr_reset_slot_reset;
9735 	} else
9736 		ioa_cfg->reset = ipr_reset_start_bist;
9737 
9738 	spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
9739 	list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
9740 	spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
9741 
9742 	LEAVE;
9743 out:
9744 	return rc;
9745 
9746 cleanup_nolog:
9747 	ipr_free_mem(ioa_cfg);
9748 out_msi_disable:
9749 	ipr_wait_for_pci_err_recovery(ioa_cfg);
9750 	if (ioa_cfg->intr_flag == IPR_USE_MSI)
9751 		pci_disable_msi(pdev);
9752 	else if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9753 		pci_disable_msix(pdev);
9754 cleanup_nomem:
9755 	iounmap(ipr_regs);
9756 out_disable:
9757 	pci_disable_device(pdev);
9758 out_release_regions:
9759 	pci_release_regions(pdev);
9760 out_scsi_host_put:
9761 	scsi_host_put(host);
9762 	goto out;
9763 }
9764 
9765 /**
9766  * ipr_scan_vsets - Scans for VSET devices
9767  * @ioa_cfg:	ioa config struct
9768  *
9769  * Description: Since the VSET resources do not follow SAM in that we can have
9770  * sparse LUNs with no LUN 0, we have to scan for these ourselves.
9771  *
9772  * Return value:
9773  * 	none
9774  **/
9775 static void ipr_scan_vsets(struct ipr_ioa_cfg *ioa_cfg)
9776 {
9777 	int target, lun;
9778 
9779 	for (target = 0; target < IPR_MAX_NUM_TARGETS_PER_BUS; target++)
9780 		for (lun = 0; lun < IPR_MAX_NUM_VSET_LUNS_PER_TARGET; lun++)
9781 			scsi_add_device(ioa_cfg->host, IPR_VSET_BUS, target, lun);
9782 }
9783 
9784 /**
9785  * ipr_initiate_ioa_bringdown - Bring down an adapter
9786  * @ioa_cfg:		ioa config struct
9787  * @shutdown_type:	shutdown type
9788  *
9789  * Description: This function will initiate bringing down the adapter.
9790  * This consists of issuing an IOA shutdown to the adapter
9791  * to flush the cache, and running BIST.
9792  * If the caller needs to wait on the completion of the reset,
9793  * the caller must sleep on the reset_wait_q.
9794  *
9795  * Return value:
9796  * 	none
9797  **/
9798 static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
9799 				       enum ipr_shutdown_type shutdown_type)
9800 {
9801 	ENTER;
9802 	if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
9803 		ioa_cfg->sdt_state = ABORT_DUMP;
9804 	ioa_cfg->reset_retries = 0;
9805 	ioa_cfg->in_ioa_bringdown = 1;
9806 	ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
9807 	LEAVE;
9808 }
9809 
9810 /**
9811  * __ipr_remove - Remove a single adapter
9812  * @pdev:	pci device struct
9813  *
9814  * Adapter hot plug remove entry point.
9815  *
9816  * Return value:
9817  * 	none
9818  **/
9819 static void __ipr_remove(struct pci_dev *pdev)
9820 {
9821 	unsigned long host_lock_flags = 0;
9822 	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9823 	int i;
9824 	unsigned long driver_lock_flags;
9825 	ENTER;
9826 
9827 	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9828 	while (ioa_cfg->in_reset_reload) {
9829 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9830 		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9831 		spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9832 	}
9833 
9834 	for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9835 		spin_lock(&ioa_cfg->hrrq[i]._lock);
9836 		ioa_cfg->hrrq[i].removing_ioa = 1;
9837 		spin_unlock(&ioa_cfg->hrrq[i]._lock);
9838 	}
9839 	wmb();
9840 	ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
9841 
9842 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9843 	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9844 	flush_work(&ioa_cfg->work_q);
9845 	INIT_LIST_HEAD(&ioa_cfg->used_res_q);
9846 	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9847 
9848 	spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
9849 	list_del(&ioa_cfg->queue);
9850 	spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
9851 
9852 	if (ioa_cfg->sdt_state == ABORT_DUMP)
9853 		ioa_cfg->sdt_state = WAIT_FOR_DUMP;
9854 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9855 
9856 	ipr_free_all_resources(ioa_cfg);
9857 
9858 	LEAVE;
9859 }
9860 
9861 /**
9862  * ipr_remove - IOA hot plug remove entry point
9863  * @pdev:	pci device struct
9864  *
9865  * Adapter hot plug remove entry point.
9866  *
9867  * Return value:
9868  * 	none
9869  **/
9870 static void ipr_remove(struct pci_dev *pdev)
9871 {
9872 	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9873 
9874 	ENTER;
9875 
9876 	ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
9877 			      &ipr_trace_attr);
9878 	ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
9879 			     &ipr_dump_attr);
9880 	scsi_remove_host(ioa_cfg->host);
9881 
9882 	__ipr_remove(pdev);
9883 
9884 	LEAVE;
9885 }
9886 
9887 /**
9888  * ipr_probe - Adapter hot plug add entry point
9889  *
9890  * Return value:
9891  * 	0 on success / non-zero on failure
9892  **/
9893 static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
9894 {
9895 	struct ipr_ioa_cfg *ioa_cfg;
9896 	int rc, i;
9897 
9898 	rc = ipr_probe_ioa(pdev, dev_id);
9899 
9900 	if (rc)
9901 		return rc;
9902 
9903 	ioa_cfg = pci_get_drvdata(pdev);
9904 	rc = ipr_probe_ioa_part2(ioa_cfg);
9905 
9906 	if (rc) {
9907 		__ipr_remove(pdev);
9908 		return rc;
9909 	}
9910 
9911 	rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
9912 
9913 	if (rc) {
9914 		__ipr_remove(pdev);
9915 		return rc;
9916 	}
9917 
9918 	rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj,
9919 				   &ipr_trace_attr);
9920 
9921 	if (rc) {
9922 		scsi_remove_host(ioa_cfg->host);
9923 		__ipr_remove(pdev);
9924 		return rc;
9925 	}
9926 
9927 	rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj,
9928 				   &ipr_dump_attr);
9929 
9930 	if (rc) {
9931 		ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
9932 				      &ipr_trace_attr);
9933 		scsi_remove_host(ioa_cfg->host);
9934 		__ipr_remove(pdev);
9935 		return rc;
9936 	}
9937 
9938 	scsi_scan_host(ioa_cfg->host);
9939 	ipr_scan_vsets(ioa_cfg);
9940 	scsi_add_device(ioa_cfg->host, IPR_IOA_BUS, IPR_IOA_TARGET, IPR_IOA_LUN);
9941 	ioa_cfg->allow_ml_add_del = 1;
9942 	ioa_cfg->host->max_channel = IPR_VSET_BUS;
9943 	ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight;
9944 
9945 	if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
9946 		for (i = 1; i < ioa_cfg->hrrq_num; i++) {
9947 			blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll,
9948 					ioa_cfg->iopoll_weight, ipr_iopoll);
9949 			blk_iopoll_enable(&ioa_cfg->hrrq[i].iopoll);
9950 		}
9951 	}
9952 
9953 	schedule_work(&ioa_cfg->work_q);
9954 	return 0;
9955 }
9956 
9957 /**
9958  * ipr_shutdown - Shutdown handler.
9959  * @pdev:	pci device struct
9960  *
9961  * This function is invoked upon system shutdown/reboot. It will issue
9962  * an adapter shutdown to the adapter to flush the write cache.
9963  *
9964  * Return value:
9965  * 	none
9966  **/
9967 static void ipr_shutdown(struct pci_dev *pdev)
9968 {
9969 	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9970 	unsigned long lock_flags = 0;
9971 	int i;
9972 
9973 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9974 	if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
9975 		ioa_cfg->iopoll_weight = 0;
9976 		for (i = 1; i < ioa_cfg->hrrq_num; i++)
9977 			blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll);
9978 	}
9979 
9980 	while (ioa_cfg->in_reset_reload) {
9981 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9982 		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9983 		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9984 	}
9985 
9986 	ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
9987 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9988 	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9989 }
9990 
9991 static struct pci_device_id ipr_pci_table[] = {
9992 	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
9993 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
9994 	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
9995 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 },
9996 	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
9997 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 },
9998 	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
9999 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 },
10000 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10001 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 },
10002 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10003 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 },
10004 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10005 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 },
10006 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10007 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0,
10008 		IPR_USE_LONG_TRANSOP_TIMEOUT },
10009 	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
10010 	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
10011 	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
10012 	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
10013 	      IPR_USE_LONG_TRANSOP_TIMEOUT },
10014 	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
10015 	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
10016 	      IPR_USE_LONG_TRANSOP_TIMEOUT },
10017 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10018 	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
10019 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10020 	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
10021 	      IPR_USE_LONG_TRANSOP_TIMEOUT},
10022 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10023 	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
10024 	      IPR_USE_LONG_TRANSOP_TIMEOUT },
10025 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10026 	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0,
10027 	      IPR_USE_LONG_TRANSOP_TIMEOUT },
10028 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10029 	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
10030 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10031 	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CC, 0, 0, 0 },
10032 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10033 	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
10034 	      IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET },
10035 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
10036 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
10037 	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10038 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 },
10039 	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10040 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0,
10041 		IPR_USE_LONG_TRANSOP_TIMEOUT },
10042 	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10043 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
10044 		IPR_USE_LONG_TRANSOP_TIMEOUT },
10045 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10046 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B5, 0, 0, 0 },
10047 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10048 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 0 },
10049 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10050 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 },
10051 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10052 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C0, 0, 0, 0 },
10053 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10054 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C3, 0, 0, 0 },
10055 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10056 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C4, 0, 0, 0 },
10057 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10058 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 },
10059 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10060 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B1, 0, 0, 0 },
10061 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10062 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C6, 0, 0, 0 },
10063 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10064 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C8, 0, 0, 0 },
10065 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10066 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CE, 0, 0, 0 },
10067 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10068 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D5, 0, 0, 0 },
10069 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10070 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D6, 0, 0, 0 },
10071 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10072 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D7, 0, 0, 0 },
10073 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10074 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D8, 0, 0, 0 },
10075 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10076 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D9, 0, 0, 0 },
10077 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10078 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57DA, 0, 0, 0 },
10079 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10080 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EB, 0, 0, 0 },
10081 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10082 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EC, 0, 0, 0 },
10083 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10084 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57ED, 0, 0, 0 },
10085 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10086 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EE, 0, 0, 0 },
10087 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10088 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EF, 0, 0, 0 },
10089 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10090 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57F0, 0, 0, 0 },
10091 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10092 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCA, 0, 0, 0 },
10093 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10094 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CD2, 0, 0, 0 },
10095 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10096 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCD, 0, 0, 0 },
10097 	{ }
10098 };
10099 MODULE_DEVICE_TABLE(pci, ipr_pci_table);
10100 
10101 static const struct pci_error_handlers ipr_err_handler = {
10102 	.error_detected = ipr_pci_error_detected,
10103 	.mmio_enabled = ipr_pci_mmio_enabled,
10104 	.slot_reset = ipr_pci_slot_reset,
10105 };
10106 
10107 static struct pci_driver ipr_driver = {
10108 	.name = IPR_NAME,
10109 	.id_table = ipr_pci_table,
10110 	.probe = ipr_probe,
10111 	.remove = ipr_remove,
10112 	.shutdown = ipr_shutdown,
10113 	.err_handler = &ipr_err_handler,
10114 };
10115 
10116 /**
10117  * ipr_halt_done - Shutdown prepare completion
10118  *
10119  * Return value:
10120  * 	none
10121  **/
10122 static void ipr_halt_done(struct ipr_cmnd *ipr_cmd)
10123 {
10124 	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
10125 }
10126 
10127 /**
10128  * ipr_halt - Issue shutdown prepare to all adapters
10129  *
10130  * Return value:
10131  * 	NOTIFY_OK on success / NOTIFY_DONE on failure
10132  **/
10133 static int ipr_halt(struct notifier_block *nb, ulong event, void *buf)
10134 {
10135 	struct ipr_cmnd *ipr_cmd;
10136 	struct ipr_ioa_cfg *ioa_cfg;
10137 	unsigned long flags = 0, driver_lock_flags;
10138 
10139 	if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
10140 		return NOTIFY_DONE;
10141 
10142 	spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
10143 
10144 	list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
10145 		spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
10146 		if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
10147 			spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10148 			continue;
10149 		}
10150 
10151 		ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
10152 		ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
10153 		ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
10154 		ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
10155 		ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
10156 
10157 		ipr_do_req(ipr_cmd, ipr_halt_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
10158 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10159 	}
10160 	spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
10161 
10162 	return NOTIFY_OK;
10163 }
10164 
10165 static struct notifier_block ipr_notifier = {
10166 	ipr_halt, NULL, 0
10167 };
10168 
10169 /**
10170  * ipr_init - Module entry point
10171  *
10172  * Return value:
10173  * 	0 on success / negative value on failure
10174  **/
10175 static int __init ipr_init(void)
10176 {
10177 	ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
10178 		 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
10179 
10180 	register_reboot_notifier(&ipr_notifier);
10181 	return pci_register_driver(&ipr_driver);
10182 }
10183 
10184 /**
10185  * ipr_exit - Module unload
10186  *
10187  * Module unload entry point.
10188  *
10189  * Return value:
10190  * 	none
10191  **/
10192 static void __exit ipr_exit(void)
10193 {
10194 	unregister_reboot_notifier(&ipr_notifier);
10195 	pci_unregister_driver(&ipr_driver);
10196 }
10197 
10198 module_init(ipr_init);
10199 module_exit(ipr_exit);
10200