xref: /openbmc/linux/drivers/scsi/ipr.c (revision 6a108a14)
1 /*
2  * ipr.c -- driver for IBM Power Linux RAID adapters
3  *
4  * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
5  *
6  * Copyright (C) 2003, 2004 IBM Corporation
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
21  *
22  */
23 
24 /*
25  * Notes:
26  *
27  * This driver is used to control the following SCSI adapters:
28  *
29  * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
30  *
31  * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32  *              PCI-X Dual Channel Ultra 320 SCSI Adapter
33  *              PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34  *              Embedded SCSI adapter on p615 and p655 systems
35  *
36  * Supported Hardware Features:
37  *	- Ultra 320 SCSI controller
38  *	- PCI-X host interface
39  *	- Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40  *	- Non-Volatile Write Cache
41  *	- Supports attachment of non-RAID disks, tape, and optical devices
42  *	- RAID Levels 0, 5, 10
43  *	- Hot spare
44  *	- Background Parity Checking
45  *	- Background Data Scrubbing
46  *	- Ability to increase the capacity of an existing RAID 5 disk array
47  *		by adding disks
48  *
49  * Driver Features:
50  *	- Tagged command queuing
51  *	- Adapter microcode download
52  *	- PCI hot plug
53  *	- SCSI device hot plug
54  *
55  */
56 
57 #include <linux/fs.h>
58 #include <linux/init.h>
59 #include <linux/types.h>
60 #include <linux/errno.h>
61 #include <linux/kernel.h>
62 #include <linux/slab.h>
63 #include <linux/ioport.h>
64 #include <linux/delay.h>
65 #include <linux/pci.h>
66 #include <linux/wait.h>
67 #include <linux/spinlock.h>
68 #include <linux/sched.h>
69 #include <linux/interrupt.h>
70 #include <linux/blkdev.h>
71 #include <linux/firmware.h>
72 #include <linux/module.h>
73 #include <linux/moduleparam.h>
74 #include <linux/libata.h>
75 #include <linux/hdreg.h>
76 #include <linux/reboot.h>
77 #include <linux/stringify.h>
78 #include <asm/io.h>
79 #include <asm/irq.h>
80 #include <asm/processor.h>
81 #include <scsi/scsi.h>
82 #include <scsi/scsi_host.h>
83 #include <scsi/scsi_tcq.h>
84 #include <scsi/scsi_eh.h>
85 #include <scsi/scsi_cmnd.h>
86 #include "ipr.h"
87 
88 /*
89  *   Global Data
90  */
91 static LIST_HEAD(ipr_ioa_head);
92 static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
93 static unsigned int ipr_max_speed = 1;
94 static int ipr_testmode = 0;
95 static unsigned int ipr_fastfail = 0;
96 static unsigned int ipr_transop_timeout = 0;
97 static unsigned int ipr_debug = 0;
98 static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
99 static unsigned int ipr_dual_ioa_raid = 1;
100 static DEFINE_SPINLOCK(ipr_driver_lock);
101 
102 /* This table describes the differences between DMA controller chips */
103 static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
104 	{ /* Gemstone, Citrine, Obsidian, and Obsidian-E */
105 		.mailbox = 0x0042C,
106 		.cache_line_size = 0x20,
107 		{
108 			.set_interrupt_mask_reg = 0x0022C,
109 			.clr_interrupt_mask_reg = 0x00230,
110 			.clr_interrupt_mask_reg32 = 0x00230,
111 			.sense_interrupt_mask_reg = 0x0022C,
112 			.sense_interrupt_mask_reg32 = 0x0022C,
113 			.clr_interrupt_reg = 0x00228,
114 			.clr_interrupt_reg32 = 0x00228,
115 			.sense_interrupt_reg = 0x00224,
116 			.sense_interrupt_reg32 = 0x00224,
117 			.ioarrin_reg = 0x00404,
118 			.sense_uproc_interrupt_reg = 0x00214,
119 			.sense_uproc_interrupt_reg32 = 0x00214,
120 			.set_uproc_interrupt_reg = 0x00214,
121 			.set_uproc_interrupt_reg32 = 0x00214,
122 			.clr_uproc_interrupt_reg = 0x00218,
123 			.clr_uproc_interrupt_reg32 = 0x00218
124 		}
125 	},
126 	{ /* Snipe and Scamp */
127 		.mailbox = 0x0052C,
128 		.cache_line_size = 0x20,
129 		{
130 			.set_interrupt_mask_reg = 0x00288,
131 			.clr_interrupt_mask_reg = 0x0028C,
132 			.clr_interrupt_mask_reg32 = 0x0028C,
133 			.sense_interrupt_mask_reg = 0x00288,
134 			.sense_interrupt_mask_reg32 = 0x00288,
135 			.clr_interrupt_reg = 0x00284,
136 			.clr_interrupt_reg32 = 0x00284,
137 			.sense_interrupt_reg = 0x00280,
138 			.sense_interrupt_reg32 = 0x00280,
139 			.ioarrin_reg = 0x00504,
140 			.sense_uproc_interrupt_reg = 0x00290,
141 			.sense_uproc_interrupt_reg32 = 0x00290,
142 			.set_uproc_interrupt_reg = 0x00290,
143 			.set_uproc_interrupt_reg32 = 0x00290,
144 			.clr_uproc_interrupt_reg = 0x00294,
145 			.clr_uproc_interrupt_reg32 = 0x00294
146 		}
147 	},
148 	{ /* CRoC */
149 		.mailbox = 0x00044,
150 		.cache_line_size = 0x20,
151 		{
152 			.set_interrupt_mask_reg = 0x00010,
153 			.clr_interrupt_mask_reg = 0x00018,
154 			.clr_interrupt_mask_reg32 = 0x0001C,
155 			.sense_interrupt_mask_reg = 0x00010,
156 			.sense_interrupt_mask_reg32 = 0x00014,
157 			.clr_interrupt_reg = 0x00008,
158 			.clr_interrupt_reg32 = 0x0000C,
159 			.sense_interrupt_reg = 0x00000,
160 			.sense_interrupt_reg32 = 0x00004,
161 			.ioarrin_reg = 0x00070,
162 			.sense_uproc_interrupt_reg = 0x00020,
163 			.sense_uproc_interrupt_reg32 = 0x00024,
164 			.set_uproc_interrupt_reg = 0x00020,
165 			.set_uproc_interrupt_reg32 = 0x00024,
166 			.clr_uproc_interrupt_reg = 0x00028,
167 			.clr_uproc_interrupt_reg32 = 0x0002C,
168 			.init_feedback_reg = 0x0005C,
169 			.dump_addr_reg = 0x00064,
170 			.dump_data_reg = 0x00068,
171 			.endian_swap_reg = 0x00084
172 		}
173 	},
174 };
175 
176 static const struct ipr_chip_t ipr_chip[] = {
177 	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
178 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
179 	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
180 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
181 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, IPR_USE_MSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
182 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
183 	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
184 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
185 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }
186 };
187 
188 static int ipr_max_bus_speeds [] = {
189 	IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
190 };
191 
192 MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
193 MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
194 module_param_named(max_speed, ipr_max_speed, uint, 0);
195 MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
196 module_param_named(log_level, ipr_log_level, uint, 0);
197 MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
198 module_param_named(testmode, ipr_testmode, int, 0);
199 MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
200 module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR);
201 MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
202 module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
203 MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
204 module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR);
205 MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
206 module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
207 MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
208 module_param_named(max_devs, ipr_max_devs, int, 0);
209 MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
210 		 "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
211 MODULE_LICENSE("GPL");
212 MODULE_VERSION(IPR_DRIVER_VERSION);
213 
214 /*  A constant array of IOASCs/URCs/Error Messages */
215 static const
216 struct ipr_error_table_t ipr_error_table[] = {
217 	{0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
218 	"8155: An unknown error was received"},
219 	{0x00330000, 0, 0,
220 	"Soft underlength error"},
221 	{0x005A0000, 0, 0,
222 	"Command to be cancelled not found"},
223 	{0x00808000, 0, 0,
224 	"Qualified success"},
225 	{0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
226 	"FFFE: Soft device bus error recovered by the IOA"},
227 	{0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
228 	"4101: Soft device bus fabric error"},
229 	{0x01100100, 0, IPR_DEFAULT_LOG_LEVEL,
230 	"FFFC: Logical block guard error recovered by the device"},
231 	{0x01100300, 0, IPR_DEFAULT_LOG_LEVEL,
232 	"FFFC: Logical block reference tag error recovered by the device"},
233 	{0x01108300, 0, IPR_DEFAULT_LOG_LEVEL,
234 	"4171: Recovered scatter list tag / sequence number error"},
235 	{0x01109000, 0, IPR_DEFAULT_LOG_LEVEL,
236 	"FF3D: Recovered logical block CRC error on IOA to Host transfer"},
237 	{0x01109200, 0, IPR_DEFAULT_LOG_LEVEL,
238 	"4171: Recovered logical block sequence number error on IOA to Host transfer"},
239 	{0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL,
240 	"FFFD: Recovered logical block reference tag error detected by the IOA"},
241 	{0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL,
242 	"FFFD: Logical block guard error recovered by the IOA"},
243 	{0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
244 	"FFF9: Device sector reassign successful"},
245 	{0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
246 	"FFF7: Media error recovered by device rewrite procedures"},
247 	{0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
248 	"7001: IOA sector reassignment successful"},
249 	{0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
250 	"FFF9: Soft media error. Sector reassignment recommended"},
251 	{0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
252 	"FFF7: Media error recovered by IOA rewrite procedures"},
253 	{0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
254 	"FF3D: Soft PCI bus error recovered by the IOA"},
255 	{0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
256 	"FFF6: Device hardware error recovered by the IOA"},
257 	{0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
258 	"FFF6: Device hardware error recovered by the device"},
259 	{0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
260 	"FF3D: Soft IOA error recovered by the IOA"},
261 	{0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
262 	"FFFA: Undefined device response recovered by the IOA"},
263 	{0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
264 	"FFF6: Device bus error, message or command phase"},
265 	{0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
266 	"FFFE: Task Management Function failed"},
267 	{0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
268 	"FFF6: Failure prediction threshold exceeded"},
269 	{0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
270 	"8009: Impending cache battery pack failure"},
271 	{0x02040400, 0, 0,
272 	"34FF: Disk device format in progress"},
273 	{0x02048000, 0, IPR_DEFAULT_LOG_LEVEL,
274 	"9070: IOA requested reset"},
275 	{0x023F0000, 0, 0,
276 	"Synchronization required"},
277 	{0x024E0000, 0, 0,
278 	"No ready, IOA shutdown"},
279 	{0x025A0000, 0, 0,
280 	"Not ready, IOA has been shutdown"},
281 	{0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
282 	"3020: Storage subsystem configuration error"},
283 	{0x03110B00, 0, 0,
284 	"FFF5: Medium error, data unreadable, recommend reassign"},
285 	{0x03110C00, 0, 0,
286 	"7000: Medium error, data unreadable, do not reassign"},
287 	{0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
288 	"FFF3: Disk media format bad"},
289 	{0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
290 	"3002: Addressed device failed to respond to selection"},
291 	{0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
292 	"3100: Device bus error"},
293 	{0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
294 	"3109: IOA timed out a device command"},
295 	{0x04088000, 0, 0,
296 	"3120: SCSI bus is not operational"},
297 	{0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
298 	"4100: Hard device bus fabric error"},
299 	{0x04100100, 0, IPR_DEFAULT_LOG_LEVEL,
300 	"310C: Logical block guard error detected by the device"},
301 	{0x04100300, 0, IPR_DEFAULT_LOG_LEVEL,
302 	"310C: Logical block reference tag error detected by the device"},
303 	{0x04108300, 1, IPR_DEFAULT_LOG_LEVEL,
304 	"4170: Scatter list tag / sequence number error"},
305 	{0x04109000, 1, IPR_DEFAULT_LOG_LEVEL,
306 	"8150: Logical block CRC error on IOA to Host transfer"},
307 	{0x04109200, 1, IPR_DEFAULT_LOG_LEVEL,
308 	"4170: Logical block sequence number error on IOA to Host transfer"},
309 	{0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL,
310 	"310D: Logical block reference tag error detected by the IOA"},
311 	{0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL,
312 	"310D: Logical block guard error detected by the IOA"},
313 	{0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
314 	"9000: IOA reserved area data check"},
315 	{0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
316 	"9001: IOA reserved area invalid data pattern"},
317 	{0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
318 	"9002: IOA reserved area LRC error"},
319 	{0x04118300, 1, IPR_DEFAULT_LOG_LEVEL,
320 	"Hardware Error, IOA metadata access error"},
321 	{0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
322 	"102E: Out of alternate sectors for disk storage"},
323 	{0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
324 	"FFF4: Data transfer underlength error"},
325 	{0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
326 	"FFF4: Data transfer overlength error"},
327 	{0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
328 	"3400: Logical unit failure"},
329 	{0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
330 	"FFF4: Device microcode is corrupt"},
331 	{0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
332 	"8150: PCI bus error"},
333 	{0x04430000, 1, 0,
334 	"Unsupported device bus message received"},
335 	{0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
336 	"FFF4: Disk device problem"},
337 	{0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
338 	"8150: Permanent IOA failure"},
339 	{0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
340 	"3010: Disk device returned wrong response to IOA"},
341 	{0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
342 	"8151: IOA microcode error"},
343 	{0x04448500, 0, 0,
344 	"Device bus status error"},
345 	{0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
346 	"8157: IOA error requiring IOA reset to recover"},
347 	{0x04448700, 0, 0,
348 	"ATA device status error"},
349 	{0x04490000, 0, 0,
350 	"Message reject received from the device"},
351 	{0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
352 	"8008: A permanent cache battery pack failure occurred"},
353 	{0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
354 	"9090: Disk unit has been modified after the last known status"},
355 	{0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
356 	"9081: IOA detected device error"},
357 	{0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
358 	"9082: IOA detected device error"},
359 	{0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
360 	"3110: Device bus error, message or command phase"},
361 	{0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
362 	"3110: SAS Command / Task Management Function failed"},
363 	{0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
364 	"9091: Incorrect hardware configuration change has been detected"},
365 	{0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
366 	"9073: Invalid multi-adapter configuration"},
367 	{0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
368 	"4010: Incorrect connection between cascaded expanders"},
369 	{0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
370 	"4020: Connections exceed IOA design limits"},
371 	{0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
372 	"4030: Incorrect multipath connection"},
373 	{0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
374 	"4110: Unsupported enclosure function"},
375 	{0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
376 	"FFF4: Command to logical unit failed"},
377 	{0x05240000, 1, 0,
378 	"Illegal request, invalid request type or request packet"},
379 	{0x05250000, 0, 0,
380 	"Illegal request, invalid resource handle"},
381 	{0x05258000, 0, 0,
382 	"Illegal request, commands not allowed to this device"},
383 	{0x05258100, 0, 0,
384 	"Illegal request, command not allowed to a secondary adapter"},
385 	{0x05258200, 0, 0,
386 	"Illegal request, command not allowed to a non-optimized resource"},
387 	{0x05260000, 0, 0,
388 	"Illegal request, invalid field in parameter list"},
389 	{0x05260100, 0, 0,
390 	"Illegal request, parameter not supported"},
391 	{0x05260200, 0, 0,
392 	"Illegal request, parameter value invalid"},
393 	{0x052C0000, 0, 0,
394 	"Illegal request, command sequence error"},
395 	{0x052C8000, 1, 0,
396 	"Illegal request, dual adapter support not enabled"},
397 	{0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
398 	"9031: Array protection temporarily suspended, protection resuming"},
399 	{0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
400 	"9040: Array protection temporarily suspended, protection resuming"},
401 	{0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
402 	"3140: Device bus not ready to ready transition"},
403 	{0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
404 	"FFFB: SCSI bus was reset"},
405 	{0x06290500, 0, 0,
406 	"FFFE: SCSI bus transition to single ended"},
407 	{0x06290600, 0, 0,
408 	"FFFE: SCSI bus transition to LVD"},
409 	{0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
410 	"FFFB: SCSI bus was reset by another initiator"},
411 	{0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
412 	"3029: A device replacement has occurred"},
413 	{0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
414 	"9051: IOA cache data exists for a missing or failed device"},
415 	{0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
416 	"9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
417 	{0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
418 	"9025: Disk unit is not supported at its physical location"},
419 	{0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
420 	"3020: IOA detected a SCSI bus configuration error"},
421 	{0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
422 	"3150: SCSI bus configuration error"},
423 	{0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
424 	"9074: Asymmetric advanced function disk configuration"},
425 	{0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
426 	"4040: Incomplete multipath connection between IOA and enclosure"},
427 	{0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
428 	"4041: Incomplete multipath connection between enclosure and device"},
429 	{0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
430 	"9075: Incomplete multipath connection between IOA and remote IOA"},
431 	{0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
432 	"9076: Configuration error, missing remote IOA"},
433 	{0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
434 	"4050: Enclosure does not support a required multipath function"},
435 	{0x06690000, 0, IPR_DEFAULT_LOG_LEVEL,
436 	"4070: Logically bad block written on device"},
437 	{0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
438 	"9041: Array protection temporarily suspended"},
439 	{0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
440 	"9042: Corrupt array parity detected on specified device"},
441 	{0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
442 	"9030: Array no longer protected due to missing or failed disk unit"},
443 	{0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
444 	"9071: Link operational transition"},
445 	{0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
446 	"9072: Link not operational transition"},
447 	{0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
448 	"9032: Array exposed but still protected"},
449 	{0x066B8300, 0, IPR_DEFAULT_LOG_LEVEL + 1,
450 	"70DD: Device forced failed by disrupt device command"},
451 	{0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
452 	"4061: Multipath redundancy level got better"},
453 	{0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
454 	"4060: Multipath redundancy level got worse"},
455 	{0x07270000, 0, 0,
456 	"Failure due to other device"},
457 	{0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
458 	"9008: IOA does not support functions expected by devices"},
459 	{0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
460 	"9010: Cache data associated with attached devices cannot be found"},
461 	{0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
462 	"9011: Cache data belongs to devices other than those attached"},
463 	{0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
464 	"9020: Array missing 2 or more devices with only 1 device present"},
465 	{0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
466 	"9021: Array missing 2 or more devices with 2 or more devices present"},
467 	{0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
468 	"9022: Exposed array is missing a required device"},
469 	{0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
470 	"9023: Array member(s) not at required physical locations"},
471 	{0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
472 	"9024: Array not functional due to present hardware configuration"},
473 	{0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
474 	"9026: Array not functional due to present hardware configuration"},
475 	{0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
476 	"9027: Array is missing a device and parity is out of sync"},
477 	{0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
478 	"9028: Maximum number of arrays already exist"},
479 	{0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
480 	"9050: Required cache data cannot be located for a disk unit"},
481 	{0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
482 	"9052: Cache data exists for a device that has been modified"},
483 	{0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
484 	"9054: IOA resources not available due to previous problems"},
485 	{0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
486 	"9092: Disk unit requires initialization before use"},
487 	{0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
488 	"9029: Incorrect hardware configuration change has been detected"},
489 	{0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
490 	"9060: One or more disk pairs are missing from an array"},
491 	{0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
492 	"9061: One or more disks are missing from an array"},
493 	{0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
494 	"9062: One or more disks are missing from an array"},
495 	{0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
496 	"9063: Maximum number of functional arrays has been exceeded"},
497 	{0x0B260000, 0, 0,
498 	"Aborted command, invalid descriptor"},
499 	{0x0B5A0000, 0, 0,
500 	"Command terminated by host"}
501 };
502 
503 static const struct ipr_ses_table_entry ipr_ses_table[] = {
504 	{ "2104-DL1        ", "XXXXXXXXXXXXXXXX", 80 },
505 	{ "2104-TL1        ", "XXXXXXXXXXXXXXXX", 80 },
506 	{ "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
507 	{ "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
508 	{ "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
509 	{ "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
510 	{ "2104-DU3        ", "XXXXXXXXXXXXXXXX", 160 },
511 	{ "2104-TU3        ", "XXXXXXXXXXXXXXXX", 160 },
512 	{ "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
513 	{ "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
514 	{ "St  V1S2        ", "XXXXXXXXXXXXXXXX", 160 },
515 	{ "HSBPD4M  PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
516 	{ "VSBPD1H   U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
517 };
518 
519 /*
520  *  Function Prototypes
521  */
522 static int ipr_reset_alert(struct ipr_cmnd *);
523 static void ipr_process_ccn(struct ipr_cmnd *);
524 static void ipr_process_error(struct ipr_cmnd *);
525 static void ipr_reset_ioa_job(struct ipr_cmnd *);
526 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
527 				   enum ipr_shutdown_type);
528 
529 #ifdef CONFIG_SCSI_IPR_TRACE
530 /**
531  * ipr_trc_hook - Add a trace entry to the driver trace
532  * @ipr_cmd:	ipr command struct
533  * @type:		trace type
534  * @add_data:	additional data
535  *
536  * Return value:
537  * 	none
538  **/
539 static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
540 			 u8 type, u32 add_data)
541 {
542 	struct ipr_trace_entry *trace_entry;
543 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
544 
545 	trace_entry = &ioa_cfg->trace[ioa_cfg->trace_index++];
546 	trace_entry->time = jiffies;
547 	trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
548 	trace_entry->type = type;
549 	if (ipr_cmd->ioa_cfg->sis64)
550 		trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command;
551 	else
552 		trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command;
553 	trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
554 	trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
555 	trace_entry->u.add_data = add_data;
556 }
557 #else
558 #define ipr_trc_hook(ipr_cmd, type, add_data) do { } while(0)
559 #endif
560 
561 /**
562  * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
563  * @ipr_cmd:	ipr command struct
564  *
565  * Return value:
566  * 	none
567  **/
568 static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
569 {
570 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
571 	struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
572 	struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
573 	dma_addr_t dma_addr = ipr_cmd->dma_addr;
574 
575 	memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
576 	ioarcb->data_transfer_length = 0;
577 	ioarcb->read_data_transfer_length = 0;
578 	ioarcb->ioadl_len = 0;
579 	ioarcb->read_ioadl_len = 0;
580 
581 	if (ipr_cmd->ioa_cfg->sis64) {
582 		ioarcb->u.sis64_addr_data.data_ioadl_addr =
583 			cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
584 		ioasa64->u.gata.status = 0;
585 	} else {
586 		ioarcb->write_ioadl_addr =
587 			cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
588 		ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
589 		ioasa->u.gata.status = 0;
590 	}
591 
592 	ioasa->hdr.ioasc = 0;
593 	ioasa->hdr.residual_data_len = 0;
594 	ipr_cmd->scsi_cmd = NULL;
595 	ipr_cmd->qc = NULL;
596 	ipr_cmd->sense_buffer[0] = 0;
597 	ipr_cmd->dma_use_sg = 0;
598 }
599 
600 /**
601  * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
602  * @ipr_cmd:	ipr command struct
603  *
604  * Return value:
605  * 	none
606  **/
607 static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
608 {
609 	ipr_reinit_ipr_cmnd(ipr_cmd);
610 	ipr_cmd->u.scratch = 0;
611 	ipr_cmd->sibling = NULL;
612 	init_timer(&ipr_cmd->timer);
613 }
614 
615 /**
616  * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
617  * @ioa_cfg:	ioa config struct
618  *
619  * Return value:
620  * 	pointer to ipr command struct
621  **/
622 static
623 struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
624 {
625 	struct ipr_cmnd *ipr_cmd;
626 
627 	ipr_cmd = list_entry(ioa_cfg->free_q.next, struct ipr_cmnd, queue);
628 	list_del(&ipr_cmd->queue);
629 	ipr_init_ipr_cmnd(ipr_cmd);
630 
631 	return ipr_cmd;
632 }
633 
634 /**
635  * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
636  * @ioa_cfg:	ioa config struct
637  * @clr_ints:     interrupts to clear
638  *
639  * This function masks all interrupts on the adapter, then clears the
640  * interrupts specified in the mask
641  *
642  * Return value:
643  * 	none
644  **/
645 static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
646 					  u32 clr_ints)
647 {
648 	volatile u32 int_reg;
649 
650 	/* Stop new interrupts */
651 	ioa_cfg->allow_interrupts = 0;
652 
653 	/* Set interrupt mask to stop all new interrupts */
654 	if (ioa_cfg->sis64)
655 		writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg);
656 	else
657 		writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
658 
659 	/* Clear any pending interrupts */
660 	if (ioa_cfg->sis64)
661 		writel(~0, ioa_cfg->regs.clr_interrupt_reg);
662 	writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32);
663 	int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
664 }
665 
666 /**
667  * ipr_save_pcix_cmd_reg - Save PCI-X command register
668  * @ioa_cfg:	ioa config struct
669  *
670  * Return value:
671  * 	0 on success / -EIO on failure
672  **/
673 static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
674 {
675 	int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
676 
677 	if (pcix_cmd_reg == 0)
678 		return 0;
679 
680 	if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
681 				 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
682 		dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
683 		return -EIO;
684 	}
685 
686 	ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
687 	return 0;
688 }
689 
690 /**
691  * ipr_set_pcix_cmd_reg - Setup PCI-X command register
692  * @ioa_cfg:	ioa config struct
693  *
694  * Return value:
695  * 	0 on success / -EIO on failure
696  **/
697 static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
698 {
699 	int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
700 
701 	if (pcix_cmd_reg) {
702 		if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
703 					  ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
704 			dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
705 			return -EIO;
706 		}
707 	}
708 
709 	return 0;
710 }
711 
712 /**
713  * ipr_sata_eh_done - done function for aborted SATA commands
714  * @ipr_cmd:	ipr command struct
715  *
716  * This function is invoked for ops generated to SATA
717  * devices which are being aborted.
718  *
719  * Return value:
720  * 	none
721  **/
722 static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
723 {
724 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
725 	struct ata_queued_cmd *qc = ipr_cmd->qc;
726 	struct ipr_sata_port *sata_port = qc->ap->private_data;
727 
728 	qc->err_mask |= AC_ERR_OTHER;
729 	sata_port->ioasa.status |= ATA_BUSY;
730 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
731 	ata_qc_complete(qc);
732 }
733 
734 /**
735  * ipr_scsi_eh_done - mid-layer done function for aborted ops
736  * @ipr_cmd:	ipr command struct
737  *
738  * This function is invoked by the interrupt handler for
739  * ops generated by the SCSI mid-layer which are being aborted.
740  *
741  * Return value:
742  * 	none
743  **/
744 static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
745 {
746 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
747 	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
748 
749 	scsi_cmd->result |= (DID_ERROR << 16);
750 
751 	scsi_dma_unmap(ipr_cmd->scsi_cmd);
752 	scsi_cmd->scsi_done(scsi_cmd);
753 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
754 }
755 
756 /**
757  * ipr_fail_all_ops - Fails all outstanding ops.
758  * @ioa_cfg:	ioa config struct
759  *
760  * This function fails all outstanding ops.
761  *
762  * Return value:
763  * 	none
764  **/
765 static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
766 {
767 	struct ipr_cmnd *ipr_cmd, *temp;
768 
769 	ENTER;
770 	list_for_each_entry_safe(ipr_cmd, temp, &ioa_cfg->pending_q, queue) {
771 		list_del(&ipr_cmd->queue);
772 
773 		ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
774 		ipr_cmd->s.ioasa.hdr.ilid = cpu_to_be32(IPR_DRIVER_ILID);
775 
776 		if (ipr_cmd->scsi_cmd)
777 			ipr_cmd->done = ipr_scsi_eh_done;
778 		else if (ipr_cmd->qc)
779 			ipr_cmd->done = ipr_sata_eh_done;
780 
781 		ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, IPR_IOASC_IOA_WAS_RESET);
782 		del_timer(&ipr_cmd->timer);
783 		ipr_cmd->done(ipr_cmd);
784 	}
785 
786 	LEAVE;
787 }
788 
789 /**
790  * ipr_send_command -  Send driver initiated requests.
791  * @ipr_cmd:		ipr command struct
792  *
793  * This function sends a command to the adapter using the correct write call.
794  * In the case of sis64, calculate the ioarcb size required. Then or in the
795  * appropriate bits.
796  *
797  * Return value:
798  * 	none
799  **/
800 static void ipr_send_command(struct ipr_cmnd *ipr_cmd)
801 {
802 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
803 	dma_addr_t send_dma_addr = ipr_cmd->dma_addr;
804 
805 	if (ioa_cfg->sis64) {
806 		/* The default size is 256 bytes */
807 		send_dma_addr |= 0x1;
808 
809 		/* If the number of ioadls * size of ioadl > 128 bytes,
810 		   then use a 512 byte ioarcb */
811 		if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 )
812 			send_dma_addr |= 0x4;
813 		writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
814 	} else
815 		writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
816 }
817 
818 /**
819  * ipr_do_req -  Send driver initiated requests.
820  * @ipr_cmd:		ipr command struct
821  * @done:			done function
822  * @timeout_func:	timeout function
823  * @timeout:		timeout value
824  *
825  * This function sends the specified command to the adapter with the
826  * timeout given. The done function is invoked on command completion.
827  *
828  * Return value:
829  * 	none
830  **/
831 static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
832 		       void (*done) (struct ipr_cmnd *),
833 		       void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
834 {
835 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
836 
837 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
838 
839 	ipr_cmd->done = done;
840 
841 	ipr_cmd->timer.data = (unsigned long) ipr_cmd;
842 	ipr_cmd->timer.expires = jiffies + timeout;
843 	ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
844 
845 	add_timer(&ipr_cmd->timer);
846 
847 	ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
848 
849 	mb();
850 
851 	ipr_send_command(ipr_cmd);
852 }
853 
854 /**
855  * ipr_internal_cmd_done - Op done function for an internally generated op.
856  * @ipr_cmd:	ipr command struct
857  *
858  * This function is the op done function for an internally generated,
859  * blocking op. It simply wakes the sleeping thread.
860  *
861  * Return value:
862  * 	none
863  **/
864 static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
865 {
866 	if (ipr_cmd->sibling)
867 		ipr_cmd->sibling = NULL;
868 	else
869 		complete(&ipr_cmd->completion);
870 }
871 
872 /**
873  * ipr_init_ioadl - initialize the ioadl for the correct SIS type
874  * @ipr_cmd:	ipr command struct
875  * @dma_addr:	dma address
876  * @len:	transfer length
877  * @flags:	ioadl flag value
878  *
879  * This function initializes an ioadl in the case where there is only a single
880  * descriptor.
881  *
882  * Return value:
883  * 	nothing
884  **/
885 static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr,
886 			   u32 len, int flags)
887 {
888 	struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
889 	struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
890 
891 	ipr_cmd->dma_use_sg = 1;
892 
893 	if (ipr_cmd->ioa_cfg->sis64) {
894 		ioadl64->flags = cpu_to_be32(flags);
895 		ioadl64->data_len = cpu_to_be32(len);
896 		ioadl64->address = cpu_to_be64(dma_addr);
897 
898 		ipr_cmd->ioarcb.ioadl_len =
899 		       	cpu_to_be32(sizeof(struct ipr_ioadl64_desc));
900 		ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
901 	} else {
902 		ioadl->flags_and_data_len = cpu_to_be32(flags | len);
903 		ioadl->address = cpu_to_be32(dma_addr);
904 
905 		if (flags == IPR_IOADL_FLAGS_READ_LAST) {
906 			ipr_cmd->ioarcb.read_ioadl_len =
907 				cpu_to_be32(sizeof(struct ipr_ioadl_desc));
908 			ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len);
909 		} else {
910 			ipr_cmd->ioarcb.ioadl_len =
911 			       	cpu_to_be32(sizeof(struct ipr_ioadl_desc));
912 			ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
913 		}
914 	}
915 }
916 
917 /**
918  * ipr_send_blocking_cmd - Send command and sleep on its completion.
919  * @ipr_cmd:	ipr command struct
920  * @timeout_func:	function to invoke if command times out
921  * @timeout:	timeout
922  *
923  * Return value:
924  * 	none
925  **/
926 static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
927 				  void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
928 				  u32 timeout)
929 {
930 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
931 
932 	init_completion(&ipr_cmd->completion);
933 	ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
934 
935 	spin_unlock_irq(ioa_cfg->host->host_lock);
936 	wait_for_completion(&ipr_cmd->completion);
937 	spin_lock_irq(ioa_cfg->host->host_lock);
938 }
939 
940 /**
941  * ipr_send_hcam - Send an HCAM to the adapter.
942  * @ioa_cfg:	ioa config struct
943  * @type:		HCAM type
944  * @hostrcb:	hostrcb struct
945  *
946  * This function will send a Host Controlled Async command to the adapter.
947  * If HCAMs are currently not allowed to be issued to the adapter, it will
948  * place the hostrcb on the free queue.
949  *
950  * Return value:
951  * 	none
952  **/
953 static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
954 			  struct ipr_hostrcb *hostrcb)
955 {
956 	struct ipr_cmnd *ipr_cmd;
957 	struct ipr_ioarcb *ioarcb;
958 
959 	if (ioa_cfg->allow_cmds) {
960 		ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
961 		list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
962 		list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
963 
964 		ipr_cmd->u.hostrcb = hostrcb;
965 		ioarcb = &ipr_cmd->ioarcb;
966 
967 		ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
968 		ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
969 		ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
970 		ioarcb->cmd_pkt.cdb[1] = type;
971 		ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
972 		ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
973 
974 		ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma,
975 			       sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST);
976 
977 		if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
978 			ipr_cmd->done = ipr_process_ccn;
979 		else
980 			ipr_cmd->done = ipr_process_error;
981 
982 		ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
983 
984 		mb();
985 
986 		ipr_send_command(ipr_cmd);
987 	} else {
988 		list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
989 	}
990 }
991 
992 /**
993  * ipr_update_ata_class - Update the ata class in the resource entry
994  * @res:	resource entry struct
995  * @proto:	cfgte device bus protocol value
996  *
997  * Return value:
998  * 	none
999  **/
1000 static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto)
1001 {
1002 	switch(proto) {
1003 	case IPR_PROTO_SATA:
1004 	case IPR_PROTO_SAS_STP:
1005 		res->ata_class = ATA_DEV_ATA;
1006 		break;
1007 	case IPR_PROTO_SATA_ATAPI:
1008 	case IPR_PROTO_SAS_STP_ATAPI:
1009 		res->ata_class = ATA_DEV_ATAPI;
1010 		break;
1011 	default:
1012 		res->ata_class = ATA_DEV_UNKNOWN;
1013 		break;
1014 	};
1015 }
1016 
1017 /**
1018  * ipr_init_res_entry - Initialize a resource entry struct.
1019  * @res:	resource entry struct
1020  * @cfgtew:	config table entry wrapper struct
1021  *
1022  * Return value:
1023  * 	none
1024  **/
1025 static void ipr_init_res_entry(struct ipr_resource_entry *res,
1026 			       struct ipr_config_table_entry_wrapper *cfgtew)
1027 {
1028 	int found = 0;
1029 	unsigned int proto;
1030 	struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1031 	struct ipr_resource_entry *gscsi_res = NULL;
1032 
1033 	res->needs_sync_complete = 0;
1034 	res->in_erp = 0;
1035 	res->add_to_ml = 0;
1036 	res->del_from_ml = 0;
1037 	res->resetting_device = 0;
1038 	res->sdev = NULL;
1039 	res->sata_port = NULL;
1040 
1041 	if (ioa_cfg->sis64) {
1042 		proto = cfgtew->u.cfgte64->proto;
1043 		res->res_flags = cfgtew->u.cfgte64->res_flags;
1044 		res->qmodel = IPR_QUEUEING_MODEL64(res);
1045 		res->type = cfgtew->u.cfgte64->res_type;
1046 
1047 		memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1048 			sizeof(res->res_path));
1049 
1050 		res->bus = 0;
1051 		memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1052 			sizeof(res->dev_lun.scsi_lun));
1053 		res->lun = scsilun_to_int(&res->dev_lun);
1054 
1055 		if (res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1056 			list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) {
1057 				if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) {
1058 					found = 1;
1059 					res->target = gscsi_res->target;
1060 					break;
1061 				}
1062 			}
1063 			if (!found) {
1064 				res->target = find_first_zero_bit(ioa_cfg->target_ids,
1065 								  ioa_cfg->max_devs_supported);
1066 				set_bit(res->target, ioa_cfg->target_ids);
1067 			}
1068 		} else if (res->type == IPR_RES_TYPE_IOAFP) {
1069 			res->bus = IPR_IOAFP_VIRTUAL_BUS;
1070 			res->target = 0;
1071 		} else if (res->type == IPR_RES_TYPE_ARRAY) {
1072 			res->bus = IPR_ARRAY_VIRTUAL_BUS;
1073 			res->target = find_first_zero_bit(ioa_cfg->array_ids,
1074 							  ioa_cfg->max_devs_supported);
1075 			set_bit(res->target, ioa_cfg->array_ids);
1076 		} else if (res->type == IPR_RES_TYPE_VOLUME_SET) {
1077 			res->bus = IPR_VSET_VIRTUAL_BUS;
1078 			res->target = find_first_zero_bit(ioa_cfg->vset_ids,
1079 							  ioa_cfg->max_devs_supported);
1080 			set_bit(res->target, ioa_cfg->vset_ids);
1081 		} else {
1082 			res->target = find_first_zero_bit(ioa_cfg->target_ids,
1083 							  ioa_cfg->max_devs_supported);
1084 			set_bit(res->target, ioa_cfg->target_ids);
1085 		}
1086 	} else {
1087 		proto = cfgtew->u.cfgte->proto;
1088 		res->qmodel = IPR_QUEUEING_MODEL(res);
1089 		res->flags = cfgtew->u.cfgte->flags;
1090 		if (res->flags & IPR_IS_IOA_RESOURCE)
1091 			res->type = IPR_RES_TYPE_IOAFP;
1092 		else
1093 			res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1094 
1095 		res->bus = cfgtew->u.cfgte->res_addr.bus;
1096 		res->target = cfgtew->u.cfgte->res_addr.target;
1097 		res->lun = cfgtew->u.cfgte->res_addr.lun;
1098 		res->lun_wwn = get_unaligned_be64(cfgtew->u.cfgte->lun_wwn);
1099 	}
1100 
1101 	ipr_update_ata_class(res, proto);
1102 }
1103 
1104 /**
1105  * ipr_is_same_device - Determine if two devices are the same.
1106  * @res:	resource entry struct
1107  * @cfgtew:	config table entry wrapper struct
1108  *
1109  * Return value:
1110  * 	1 if the devices are the same / 0 otherwise
1111  **/
1112 static int ipr_is_same_device(struct ipr_resource_entry *res,
1113 			      struct ipr_config_table_entry_wrapper *cfgtew)
1114 {
1115 	if (res->ioa_cfg->sis64) {
1116 		if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id,
1117 					sizeof(cfgtew->u.cfgte64->dev_id)) &&
1118 			!memcmp(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1119 					sizeof(cfgtew->u.cfgte64->lun))) {
1120 			return 1;
1121 		}
1122 	} else {
1123 		if (res->bus == cfgtew->u.cfgte->res_addr.bus &&
1124 		    res->target == cfgtew->u.cfgte->res_addr.target &&
1125 		    res->lun == cfgtew->u.cfgte->res_addr.lun)
1126 			return 1;
1127 	}
1128 
1129 	return 0;
1130 }
1131 
1132 /**
1133  * ipr_format_res_path - Format the resource path for printing.
1134  * @res_path:	resource path
1135  * @buf:	buffer
1136  *
1137  * Return value:
1138  * 	pointer to buffer
1139  **/
1140 static char *ipr_format_res_path(u8 *res_path, char *buffer, int len)
1141 {
1142 	int i;
1143 	char *p = buffer;
1144 
1145 	*p = '\0';
1146 	p += snprintf(p, buffer + len - p, "%02X", res_path[0]);
1147 	for (i = 1; res_path[i] != 0xff && ((i * 3) < len); i++)
1148 		p += snprintf(p, buffer + len - p, "-%02X", res_path[i]);
1149 
1150 	return buffer;
1151 }
1152 
1153 /**
1154  * ipr_update_res_entry - Update the resource entry.
1155  * @res:	resource entry struct
1156  * @cfgtew:	config table entry wrapper struct
1157  *
1158  * Return value:
1159  *      none
1160  **/
1161 static void ipr_update_res_entry(struct ipr_resource_entry *res,
1162 				 struct ipr_config_table_entry_wrapper *cfgtew)
1163 {
1164 	char buffer[IPR_MAX_RES_PATH_LENGTH];
1165 	unsigned int proto;
1166 	int new_path = 0;
1167 
1168 	if (res->ioa_cfg->sis64) {
1169 		res->flags = cfgtew->u.cfgte64->flags;
1170 		res->res_flags = cfgtew->u.cfgte64->res_flags;
1171 		res->type = cfgtew->u.cfgte64->res_type;
1172 
1173 		memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data,
1174 			sizeof(struct ipr_std_inq_data));
1175 
1176 		res->qmodel = IPR_QUEUEING_MODEL64(res);
1177 		proto = cfgtew->u.cfgte64->proto;
1178 		res->res_handle = cfgtew->u.cfgte64->res_handle;
1179 		res->dev_id = cfgtew->u.cfgte64->dev_id;
1180 
1181 		memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1182 			sizeof(res->dev_lun.scsi_lun));
1183 
1184 		if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path,
1185 					sizeof(res->res_path))) {
1186 			memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1187 				sizeof(res->res_path));
1188 			new_path = 1;
1189 		}
1190 
1191 		if (res->sdev && new_path)
1192 			sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n",
1193 				    ipr_format_res_path(res->res_path, buffer,
1194 							sizeof(buffer)));
1195 	} else {
1196 		res->flags = cfgtew->u.cfgte->flags;
1197 		if (res->flags & IPR_IS_IOA_RESOURCE)
1198 			res->type = IPR_RES_TYPE_IOAFP;
1199 		else
1200 			res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1201 
1202 		memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data,
1203 			sizeof(struct ipr_std_inq_data));
1204 
1205 		res->qmodel = IPR_QUEUEING_MODEL(res);
1206 		proto = cfgtew->u.cfgte->proto;
1207 		res->res_handle = cfgtew->u.cfgte->res_handle;
1208 	}
1209 
1210 	ipr_update_ata_class(res, proto);
1211 }
1212 
1213 /**
1214  * ipr_clear_res_target - Clear the bit in the bit map representing the target
1215  * 			  for the resource.
1216  * @res:	resource entry struct
1217  * @cfgtew:	config table entry wrapper struct
1218  *
1219  * Return value:
1220  *      none
1221  **/
1222 static void ipr_clear_res_target(struct ipr_resource_entry *res)
1223 {
1224 	struct ipr_resource_entry *gscsi_res = NULL;
1225 	struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1226 
1227 	if (!ioa_cfg->sis64)
1228 		return;
1229 
1230 	if (res->bus == IPR_ARRAY_VIRTUAL_BUS)
1231 		clear_bit(res->target, ioa_cfg->array_ids);
1232 	else if (res->bus == IPR_VSET_VIRTUAL_BUS)
1233 		clear_bit(res->target, ioa_cfg->vset_ids);
1234 	else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1235 		list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue)
1236 			if (gscsi_res->dev_id == res->dev_id && gscsi_res != res)
1237 				return;
1238 		clear_bit(res->target, ioa_cfg->target_ids);
1239 
1240 	} else if (res->bus == 0)
1241 		clear_bit(res->target, ioa_cfg->target_ids);
1242 }
1243 
1244 /**
1245  * ipr_handle_config_change - Handle a config change from the adapter
1246  * @ioa_cfg:	ioa config struct
1247  * @hostrcb:	hostrcb
1248  *
1249  * Return value:
1250  * 	none
1251  **/
1252 static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
1253 				     struct ipr_hostrcb *hostrcb)
1254 {
1255 	struct ipr_resource_entry *res = NULL;
1256 	struct ipr_config_table_entry_wrapper cfgtew;
1257 	__be32 cc_res_handle;
1258 
1259 	u32 is_ndn = 1;
1260 
1261 	if (ioa_cfg->sis64) {
1262 		cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64;
1263 		cc_res_handle = cfgtew.u.cfgte64->res_handle;
1264 	} else {
1265 		cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte;
1266 		cc_res_handle = cfgtew.u.cfgte->res_handle;
1267 	}
1268 
1269 	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1270 		if (res->res_handle == cc_res_handle) {
1271 			is_ndn = 0;
1272 			break;
1273 		}
1274 	}
1275 
1276 	if (is_ndn) {
1277 		if (list_empty(&ioa_cfg->free_res_q)) {
1278 			ipr_send_hcam(ioa_cfg,
1279 				      IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
1280 				      hostrcb);
1281 			return;
1282 		}
1283 
1284 		res = list_entry(ioa_cfg->free_res_q.next,
1285 				 struct ipr_resource_entry, queue);
1286 
1287 		list_del(&res->queue);
1288 		ipr_init_res_entry(res, &cfgtew);
1289 		list_add_tail(&res->queue, &ioa_cfg->used_res_q);
1290 	}
1291 
1292 	ipr_update_res_entry(res, &cfgtew);
1293 
1294 	if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
1295 		if (res->sdev) {
1296 			res->del_from_ml = 1;
1297 			res->res_handle = IPR_INVALID_RES_HANDLE;
1298 			if (ioa_cfg->allow_ml_add_del)
1299 				schedule_work(&ioa_cfg->work_q);
1300 		} else {
1301 			ipr_clear_res_target(res);
1302 			list_move_tail(&res->queue, &ioa_cfg->free_res_q);
1303 		}
1304 	} else if (!res->sdev) {
1305 		res->add_to_ml = 1;
1306 		if (ioa_cfg->allow_ml_add_del)
1307 			schedule_work(&ioa_cfg->work_q);
1308 	}
1309 
1310 	ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1311 }
1312 
1313 /**
1314  * ipr_process_ccn - Op done function for a CCN.
1315  * @ipr_cmd:	ipr command struct
1316  *
1317  * This function is the op done function for a configuration
1318  * change notification host controlled async from the adapter.
1319  *
1320  * Return value:
1321  * 	none
1322  **/
1323 static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
1324 {
1325 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1326 	struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1327 	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1328 
1329 	list_del(&hostrcb->queue);
1330 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
1331 
1332 	if (ioasc) {
1333 		if (ioasc != IPR_IOASC_IOA_WAS_RESET)
1334 			dev_err(&ioa_cfg->pdev->dev,
1335 				"Host RCB failed with IOASC: 0x%08X\n", ioasc);
1336 
1337 		ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1338 	} else {
1339 		ipr_handle_config_change(ioa_cfg, hostrcb);
1340 	}
1341 }
1342 
1343 /**
1344  * strip_and_pad_whitespace - Strip and pad trailing whitespace.
1345  * @i:		index into buffer
1346  * @buf:		string to modify
1347  *
1348  * This function will strip all trailing whitespace, pad the end
1349  * of the string with a single space, and NULL terminate the string.
1350  *
1351  * Return value:
1352  * 	new length of string
1353  **/
1354 static int strip_and_pad_whitespace(int i, char *buf)
1355 {
1356 	while (i && buf[i] == ' ')
1357 		i--;
1358 	buf[i+1] = ' ';
1359 	buf[i+2] = '\0';
1360 	return i + 2;
1361 }
1362 
1363 /**
1364  * ipr_log_vpd_compact - Log the passed extended VPD compactly.
1365  * @prefix:		string to print at start of printk
1366  * @hostrcb:	hostrcb pointer
1367  * @vpd:		vendor/product id/sn struct
1368  *
1369  * Return value:
1370  * 	none
1371  **/
1372 static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1373 				struct ipr_vpd *vpd)
1374 {
1375 	char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + IPR_SERIAL_NUM_LEN + 3];
1376 	int i = 0;
1377 
1378 	memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1379 	i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer);
1380 
1381 	memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN);
1382 	i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer);
1383 
1384 	memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN);
1385 	buffer[IPR_SERIAL_NUM_LEN + i] = '\0';
1386 
1387 	ipr_hcam_err(hostrcb, "%s VPID/SN: %s\n", prefix, buffer);
1388 }
1389 
1390 /**
1391  * ipr_log_vpd - Log the passed VPD to the error log.
1392  * @vpd:		vendor/product id/sn struct
1393  *
1394  * Return value:
1395  * 	none
1396  **/
1397 static void ipr_log_vpd(struct ipr_vpd *vpd)
1398 {
1399 	char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
1400 		    + IPR_SERIAL_NUM_LEN];
1401 
1402 	memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1403 	memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
1404 	       IPR_PROD_ID_LEN);
1405 	buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
1406 	ipr_err("Vendor/Product ID: %s\n", buffer);
1407 
1408 	memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
1409 	buffer[IPR_SERIAL_NUM_LEN] = '\0';
1410 	ipr_err("    Serial Number: %s\n", buffer);
1411 }
1412 
1413 /**
1414  * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1415  * @prefix:		string to print at start of printk
1416  * @hostrcb:	hostrcb pointer
1417  * @vpd:		vendor/product id/sn/wwn struct
1418  *
1419  * Return value:
1420  * 	none
1421  **/
1422 static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1423 				    struct ipr_ext_vpd *vpd)
1424 {
1425 	ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd);
1426 	ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix,
1427 		     be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1]));
1428 }
1429 
1430 /**
1431  * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1432  * @vpd:		vendor/product id/sn/wwn struct
1433  *
1434  * Return value:
1435  * 	none
1436  **/
1437 static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
1438 {
1439 	ipr_log_vpd(&vpd->vpd);
1440 	ipr_err("    WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
1441 		be32_to_cpu(vpd->wwid[1]));
1442 }
1443 
1444 /**
1445  * ipr_log_enhanced_cache_error - Log a cache error.
1446  * @ioa_cfg:	ioa config struct
1447  * @hostrcb:	hostrcb struct
1448  *
1449  * Return value:
1450  * 	none
1451  **/
1452 static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1453 					 struct ipr_hostrcb *hostrcb)
1454 {
1455 	struct ipr_hostrcb_type_12_error *error;
1456 
1457 	if (ioa_cfg->sis64)
1458 		error = &hostrcb->hcam.u.error64.u.type_12_error;
1459 	else
1460 		error = &hostrcb->hcam.u.error.u.type_12_error;
1461 
1462 	ipr_err("-----Current Configuration-----\n");
1463 	ipr_err("Cache Directory Card Information:\n");
1464 	ipr_log_ext_vpd(&error->ioa_vpd);
1465 	ipr_err("Adapter Card Information:\n");
1466 	ipr_log_ext_vpd(&error->cfc_vpd);
1467 
1468 	ipr_err("-----Expected Configuration-----\n");
1469 	ipr_err("Cache Directory Card Information:\n");
1470 	ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
1471 	ipr_err("Adapter Card Information:\n");
1472 	ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
1473 
1474 	ipr_err("Additional IOA Data: %08X %08X %08X\n",
1475 		     be32_to_cpu(error->ioa_data[0]),
1476 		     be32_to_cpu(error->ioa_data[1]),
1477 		     be32_to_cpu(error->ioa_data[2]));
1478 }
1479 
1480 /**
1481  * ipr_log_cache_error - Log a cache error.
1482  * @ioa_cfg:	ioa config struct
1483  * @hostrcb:	hostrcb struct
1484  *
1485  * Return value:
1486  * 	none
1487  **/
1488 static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1489 				struct ipr_hostrcb *hostrcb)
1490 {
1491 	struct ipr_hostrcb_type_02_error *error =
1492 		&hostrcb->hcam.u.error.u.type_02_error;
1493 
1494 	ipr_err("-----Current Configuration-----\n");
1495 	ipr_err("Cache Directory Card Information:\n");
1496 	ipr_log_vpd(&error->ioa_vpd);
1497 	ipr_err("Adapter Card Information:\n");
1498 	ipr_log_vpd(&error->cfc_vpd);
1499 
1500 	ipr_err("-----Expected Configuration-----\n");
1501 	ipr_err("Cache Directory Card Information:\n");
1502 	ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
1503 	ipr_err("Adapter Card Information:\n");
1504 	ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
1505 
1506 	ipr_err("Additional IOA Data: %08X %08X %08X\n",
1507 		     be32_to_cpu(error->ioa_data[0]),
1508 		     be32_to_cpu(error->ioa_data[1]),
1509 		     be32_to_cpu(error->ioa_data[2]));
1510 }
1511 
1512 /**
1513  * ipr_log_enhanced_config_error - Log a configuration error.
1514  * @ioa_cfg:	ioa config struct
1515  * @hostrcb:	hostrcb struct
1516  *
1517  * Return value:
1518  * 	none
1519  **/
1520 static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1521 					  struct ipr_hostrcb *hostrcb)
1522 {
1523 	int errors_logged, i;
1524 	struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1525 	struct ipr_hostrcb_type_13_error *error;
1526 
1527 	error = &hostrcb->hcam.u.error.u.type_13_error;
1528 	errors_logged = be32_to_cpu(error->errors_logged);
1529 
1530 	ipr_err("Device Errors Detected/Logged: %d/%d\n",
1531 		be32_to_cpu(error->errors_detected), errors_logged);
1532 
1533 	dev_entry = error->dev;
1534 
1535 	for (i = 0; i < errors_logged; i++, dev_entry++) {
1536 		ipr_err_separator;
1537 
1538 		ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1539 		ipr_log_ext_vpd(&dev_entry->vpd);
1540 
1541 		ipr_err("-----New Device Information-----\n");
1542 		ipr_log_ext_vpd(&dev_entry->new_vpd);
1543 
1544 		ipr_err("Cache Directory Card Information:\n");
1545 		ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1546 
1547 		ipr_err("Adapter Card Information:\n");
1548 		ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1549 	}
1550 }
1551 
1552 /**
1553  * ipr_log_sis64_config_error - Log a device error.
1554  * @ioa_cfg:	ioa config struct
1555  * @hostrcb:	hostrcb struct
1556  *
1557  * Return value:
1558  * 	none
1559  **/
1560 static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
1561 				       struct ipr_hostrcb *hostrcb)
1562 {
1563 	int errors_logged, i;
1564 	struct ipr_hostrcb64_device_data_entry_enhanced *dev_entry;
1565 	struct ipr_hostrcb_type_23_error *error;
1566 	char buffer[IPR_MAX_RES_PATH_LENGTH];
1567 
1568 	error = &hostrcb->hcam.u.error64.u.type_23_error;
1569 	errors_logged = be32_to_cpu(error->errors_logged);
1570 
1571 	ipr_err("Device Errors Detected/Logged: %d/%d\n",
1572 		be32_to_cpu(error->errors_detected), errors_logged);
1573 
1574 	dev_entry = error->dev;
1575 
1576 	for (i = 0; i < errors_logged; i++, dev_entry++) {
1577 		ipr_err_separator;
1578 
1579 		ipr_err("Device %d : %s", i + 1,
1580 			 ipr_format_res_path(dev_entry->res_path, buffer,
1581 					     sizeof(buffer)));
1582 		ipr_log_ext_vpd(&dev_entry->vpd);
1583 
1584 		ipr_err("-----New Device Information-----\n");
1585 		ipr_log_ext_vpd(&dev_entry->new_vpd);
1586 
1587 		ipr_err("Cache Directory Card Information:\n");
1588 		ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1589 
1590 		ipr_err("Adapter Card Information:\n");
1591 		ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1592 	}
1593 }
1594 
1595 /**
1596  * ipr_log_config_error - Log a configuration error.
1597  * @ioa_cfg:	ioa config struct
1598  * @hostrcb:	hostrcb struct
1599  *
1600  * Return value:
1601  * 	none
1602  **/
1603 static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1604 				 struct ipr_hostrcb *hostrcb)
1605 {
1606 	int errors_logged, i;
1607 	struct ipr_hostrcb_device_data_entry *dev_entry;
1608 	struct ipr_hostrcb_type_03_error *error;
1609 
1610 	error = &hostrcb->hcam.u.error.u.type_03_error;
1611 	errors_logged = be32_to_cpu(error->errors_logged);
1612 
1613 	ipr_err("Device Errors Detected/Logged: %d/%d\n",
1614 		be32_to_cpu(error->errors_detected), errors_logged);
1615 
1616 	dev_entry = error->dev;
1617 
1618 	for (i = 0; i < errors_logged; i++, dev_entry++) {
1619 		ipr_err_separator;
1620 
1621 		ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1622 		ipr_log_vpd(&dev_entry->vpd);
1623 
1624 		ipr_err("-----New Device Information-----\n");
1625 		ipr_log_vpd(&dev_entry->new_vpd);
1626 
1627 		ipr_err("Cache Directory Card Information:\n");
1628 		ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
1629 
1630 		ipr_err("Adapter Card Information:\n");
1631 		ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
1632 
1633 		ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1634 			be32_to_cpu(dev_entry->ioa_data[0]),
1635 			be32_to_cpu(dev_entry->ioa_data[1]),
1636 			be32_to_cpu(dev_entry->ioa_data[2]),
1637 			be32_to_cpu(dev_entry->ioa_data[3]),
1638 			be32_to_cpu(dev_entry->ioa_data[4]));
1639 	}
1640 }
1641 
1642 /**
1643  * ipr_log_enhanced_array_error - Log an array configuration error.
1644  * @ioa_cfg:	ioa config struct
1645  * @hostrcb:	hostrcb struct
1646  *
1647  * Return value:
1648  * 	none
1649  **/
1650 static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1651 					 struct ipr_hostrcb *hostrcb)
1652 {
1653 	int i, num_entries;
1654 	struct ipr_hostrcb_type_14_error *error;
1655 	struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1656 	const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1657 
1658 	error = &hostrcb->hcam.u.error.u.type_14_error;
1659 
1660 	ipr_err_separator;
1661 
1662 	ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1663 		error->protection_level,
1664 		ioa_cfg->host->host_no,
1665 		error->last_func_vset_res_addr.bus,
1666 		error->last_func_vset_res_addr.target,
1667 		error->last_func_vset_res_addr.lun);
1668 
1669 	ipr_err_separator;
1670 
1671 	array_entry = error->array_member;
1672 	num_entries = min_t(u32, be32_to_cpu(error->num_entries),
1673 			    ARRAY_SIZE(error->array_member));
1674 
1675 	for (i = 0; i < num_entries; i++, array_entry++) {
1676 		if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1677 			continue;
1678 
1679 		if (be32_to_cpu(error->exposed_mode_adn) == i)
1680 			ipr_err("Exposed Array Member %d:\n", i);
1681 		else
1682 			ipr_err("Array Member %d:\n", i);
1683 
1684 		ipr_log_ext_vpd(&array_entry->vpd);
1685 		ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1686 		ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1687 				 "Expected Location");
1688 
1689 		ipr_err_separator;
1690 	}
1691 }
1692 
1693 /**
1694  * ipr_log_array_error - Log an array configuration error.
1695  * @ioa_cfg:	ioa config struct
1696  * @hostrcb:	hostrcb struct
1697  *
1698  * Return value:
1699  * 	none
1700  **/
1701 static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1702 				struct ipr_hostrcb *hostrcb)
1703 {
1704 	int i;
1705 	struct ipr_hostrcb_type_04_error *error;
1706 	struct ipr_hostrcb_array_data_entry *array_entry;
1707 	const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1708 
1709 	error = &hostrcb->hcam.u.error.u.type_04_error;
1710 
1711 	ipr_err_separator;
1712 
1713 	ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1714 		error->protection_level,
1715 		ioa_cfg->host->host_no,
1716 		error->last_func_vset_res_addr.bus,
1717 		error->last_func_vset_res_addr.target,
1718 		error->last_func_vset_res_addr.lun);
1719 
1720 	ipr_err_separator;
1721 
1722 	array_entry = error->array_member;
1723 
1724 	for (i = 0; i < 18; i++) {
1725 		if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1726 			continue;
1727 
1728 		if (be32_to_cpu(error->exposed_mode_adn) == i)
1729 			ipr_err("Exposed Array Member %d:\n", i);
1730 		else
1731 			ipr_err("Array Member %d:\n", i);
1732 
1733 		ipr_log_vpd(&array_entry->vpd);
1734 
1735 		ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1736 		ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1737 				 "Expected Location");
1738 
1739 		ipr_err_separator;
1740 
1741 		if (i == 9)
1742 			array_entry = error->array_member2;
1743 		else
1744 			array_entry++;
1745 	}
1746 }
1747 
1748 /**
1749  * ipr_log_hex_data - Log additional hex IOA error data.
1750  * @ioa_cfg:	ioa config struct
1751  * @data:		IOA error data
1752  * @len:		data length
1753  *
1754  * Return value:
1755  * 	none
1756  **/
1757 static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, u32 *data, int len)
1758 {
1759 	int i;
1760 
1761 	if (len == 0)
1762 		return;
1763 
1764 	if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1765 		len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
1766 
1767 	for (i = 0; i < len / 4; i += 4) {
1768 		ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1769 			be32_to_cpu(data[i]),
1770 			be32_to_cpu(data[i+1]),
1771 			be32_to_cpu(data[i+2]),
1772 			be32_to_cpu(data[i+3]));
1773 	}
1774 }
1775 
1776 /**
1777  * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1778  * @ioa_cfg:	ioa config struct
1779  * @hostrcb:	hostrcb struct
1780  *
1781  * Return value:
1782  * 	none
1783  **/
1784 static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1785 					    struct ipr_hostrcb *hostrcb)
1786 {
1787 	struct ipr_hostrcb_type_17_error *error;
1788 
1789 	if (ioa_cfg->sis64)
1790 		error = &hostrcb->hcam.u.error64.u.type_17_error;
1791 	else
1792 		error = &hostrcb->hcam.u.error.u.type_17_error;
1793 
1794 	error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1795 	strim(error->failure_reason);
1796 
1797 	ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1798 		     be32_to_cpu(hostrcb->hcam.u.error.prc));
1799 	ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1800 	ipr_log_hex_data(ioa_cfg, error->data,
1801 			 be32_to_cpu(hostrcb->hcam.length) -
1802 			 (offsetof(struct ipr_hostrcb_error, u) +
1803 			  offsetof(struct ipr_hostrcb_type_17_error, data)));
1804 }
1805 
1806 /**
1807  * ipr_log_dual_ioa_error - Log a dual adapter error.
1808  * @ioa_cfg:	ioa config struct
1809  * @hostrcb:	hostrcb struct
1810  *
1811  * Return value:
1812  * 	none
1813  **/
1814 static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1815 				   struct ipr_hostrcb *hostrcb)
1816 {
1817 	struct ipr_hostrcb_type_07_error *error;
1818 
1819 	error = &hostrcb->hcam.u.error.u.type_07_error;
1820 	error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1821 	strim(error->failure_reason);
1822 
1823 	ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1824 		     be32_to_cpu(hostrcb->hcam.u.error.prc));
1825 	ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1826 	ipr_log_hex_data(ioa_cfg, error->data,
1827 			 be32_to_cpu(hostrcb->hcam.length) -
1828 			 (offsetof(struct ipr_hostrcb_error, u) +
1829 			  offsetof(struct ipr_hostrcb_type_07_error, data)));
1830 }
1831 
1832 static const struct {
1833 	u8 active;
1834 	char *desc;
1835 } path_active_desc[] = {
1836 	{ IPR_PATH_NO_INFO, "Path" },
1837 	{ IPR_PATH_ACTIVE, "Active path" },
1838 	{ IPR_PATH_NOT_ACTIVE, "Inactive path" }
1839 };
1840 
1841 static const struct {
1842 	u8 state;
1843 	char *desc;
1844 } path_state_desc[] = {
1845 	{ IPR_PATH_STATE_NO_INFO, "has no path state information available" },
1846 	{ IPR_PATH_HEALTHY, "is healthy" },
1847 	{ IPR_PATH_DEGRADED, "is degraded" },
1848 	{ IPR_PATH_FAILED, "is failed" }
1849 };
1850 
1851 /**
1852  * ipr_log_fabric_path - Log a fabric path error
1853  * @hostrcb:	hostrcb struct
1854  * @fabric:		fabric descriptor
1855  *
1856  * Return value:
1857  * 	none
1858  **/
1859 static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
1860 				struct ipr_hostrcb_fabric_desc *fabric)
1861 {
1862 	int i, j;
1863 	u8 path_state = fabric->path_state;
1864 	u8 active = path_state & IPR_PATH_ACTIVE_MASK;
1865 	u8 state = path_state & IPR_PATH_STATE_MASK;
1866 
1867 	for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
1868 		if (path_active_desc[i].active != active)
1869 			continue;
1870 
1871 		for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
1872 			if (path_state_desc[j].state != state)
1873 				continue;
1874 
1875 			if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
1876 				ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
1877 					     path_active_desc[i].desc, path_state_desc[j].desc,
1878 					     fabric->ioa_port);
1879 			} else if (fabric->cascaded_expander == 0xff) {
1880 				ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
1881 					     path_active_desc[i].desc, path_state_desc[j].desc,
1882 					     fabric->ioa_port, fabric->phy);
1883 			} else if (fabric->phy == 0xff) {
1884 				ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
1885 					     path_active_desc[i].desc, path_state_desc[j].desc,
1886 					     fabric->ioa_port, fabric->cascaded_expander);
1887 			} else {
1888 				ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
1889 					     path_active_desc[i].desc, path_state_desc[j].desc,
1890 					     fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1891 			}
1892 			return;
1893 		}
1894 	}
1895 
1896 	ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
1897 		fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1898 }
1899 
1900 /**
1901  * ipr_log64_fabric_path - Log a fabric path error
1902  * @hostrcb:	hostrcb struct
1903  * @fabric:		fabric descriptor
1904  *
1905  * Return value:
1906  * 	none
1907  **/
1908 static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb,
1909 				  struct ipr_hostrcb64_fabric_desc *fabric)
1910 {
1911 	int i, j;
1912 	u8 path_state = fabric->path_state;
1913 	u8 active = path_state & IPR_PATH_ACTIVE_MASK;
1914 	u8 state = path_state & IPR_PATH_STATE_MASK;
1915 	char buffer[IPR_MAX_RES_PATH_LENGTH];
1916 
1917 	for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
1918 		if (path_active_desc[i].active != active)
1919 			continue;
1920 
1921 		for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
1922 			if (path_state_desc[j].state != state)
1923 				continue;
1924 
1925 			ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n",
1926 				     path_active_desc[i].desc, path_state_desc[j].desc,
1927 				     ipr_format_res_path(fabric->res_path, buffer,
1928 							 sizeof(buffer)));
1929 			return;
1930 		}
1931 	}
1932 
1933 	ipr_err("Path state=%02X Resource Path=%s\n", path_state,
1934 		ipr_format_res_path(fabric->res_path, buffer, sizeof(buffer)));
1935 }
1936 
1937 static const struct {
1938 	u8 type;
1939 	char *desc;
1940 } path_type_desc[] = {
1941 	{ IPR_PATH_CFG_IOA_PORT, "IOA port" },
1942 	{ IPR_PATH_CFG_EXP_PORT, "Expander port" },
1943 	{ IPR_PATH_CFG_DEVICE_PORT, "Device port" },
1944 	{ IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
1945 };
1946 
1947 static const struct {
1948 	u8 status;
1949 	char *desc;
1950 } path_status_desc[] = {
1951 	{ IPR_PATH_CFG_NO_PROB, "Functional" },
1952 	{ IPR_PATH_CFG_DEGRADED, "Degraded" },
1953 	{ IPR_PATH_CFG_FAILED, "Failed" },
1954 	{ IPR_PATH_CFG_SUSPECT, "Suspect" },
1955 	{ IPR_PATH_NOT_DETECTED, "Missing" },
1956 	{ IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
1957 };
1958 
1959 static const char *link_rate[] = {
1960 	"unknown",
1961 	"disabled",
1962 	"phy reset problem",
1963 	"spinup hold",
1964 	"port selector",
1965 	"unknown",
1966 	"unknown",
1967 	"unknown",
1968 	"1.5Gbps",
1969 	"3.0Gbps",
1970 	"unknown",
1971 	"unknown",
1972 	"unknown",
1973 	"unknown",
1974 	"unknown",
1975 	"unknown"
1976 };
1977 
1978 /**
1979  * ipr_log_path_elem - Log a fabric path element.
1980  * @hostrcb:	hostrcb struct
1981  * @cfg:		fabric path element struct
1982  *
1983  * Return value:
1984  * 	none
1985  **/
1986 static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
1987 			      struct ipr_hostrcb_config_element *cfg)
1988 {
1989 	int i, j;
1990 	u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
1991 	u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
1992 
1993 	if (type == IPR_PATH_CFG_NOT_EXIST)
1994 		return;
1995 
1996 	for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
1997 		if (path_type_desc[i].type != type)
1998 			continue;
1999 
2000 		for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2001 			if (path_status_desc[j].status != status)
2002 				continue;
2003 
2004 			if (type == IPR_PATH_CFG_IOA_PORT) {
2005 				ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
2006 					     path_status_desc[j].desc, path_type_desc[i].desc,
2007 					     cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2008 					     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2009 			} else {
2010 				if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
2011 					ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
2012 						     path_status_desc[j].desc, path_type_desc[i].desc,
2013 						     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2014 						     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2015 				} else if (cfg->cascaded_expander == 0xff) {
2016 					ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
2017 						     "WWN=%08X%08X\n", path_status_desc[j].desc,
2018 						     path_type_desc[i].desc, cfg->phy,
2019 						     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2020 						     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2021 				} else if (cfg->phy == 0xff) {
2022 					ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
2023 						     "WWN=%08X%08X\n", path_status_desc[j].desc,
2024 						     path_type_desc[i].desc, cfg->cascaded_expander,
2025 						     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2026 						     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2027 				} else {
2028 					ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
2029 						     "WWN=%08X%08X\n", path_status_desc[j].desc,
2030 						     path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
2031 						     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2032 						     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2033 				}
2034 			}
2035 			return;
2036 		}
2037 	}
2038 
2039 	ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
2040 		     "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
2041 		     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2042 		     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2043 }
2044 
2045 /**
2046  * ipr_log64_path_elem - Log a fabric path element.
2047  * @hostrcb:	hostrcb struct
2048  * @cfg:		fabric path element struct
2049  *
2050  * Return value:
2051  * 	none
2052  **/
2053 static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb,
2054 				struct ipr_hostrcb64_config_element *cfg)
2055 {
2056 	int i, j;
2057 	u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK;
2058 	u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2059 	u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2060 	char buffer[IPR_MAX_RES_PATH_LENGTH];
2061 
2062 	if (type == IPR_PATH_CFG_NOT_EXIST || desc_id != IPR_DESCRIPTOR_SIS64)
2063 		return;
2064 
2065 	for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2066 		if (path_type_desc[i].type != type)
2067 			continue;
2068 
2069 		for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2070 			if (path_status_desc[j].status != status)
2071 				continue;
2072 
2073 			ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
2074 				     path_status_desc[j].desc, path_type_desc[i].desc,
2075 				     ipr_format_res_path(cfg->res_path, buffer,
2076 							 sizeof(buffer)),
2077 				     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2078 				     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2079 			return;
2080 		}
2081 	}
2082 	ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s "
2083 		     "WWN=%08X%08X\n", cfg->type_status,
2084 		     ipr_format_res_path(cfg->res_path, buffer, sizeof(buffer)),
2085 		     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2086 		     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2087 }
2088 
2089 /**
2090  * ipr_log_fabric_error - Log a fabric error.
2091  * @ioa_cfg:	ioa config struct
2092  * @hostrcb:	hostrcb struct
2093  *
2094  * Return value:
2095  * 	none
2096  **/
2097 static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2098 				 struct ipr_hostrcb *hostrcb)
2099 {
2100 	struct ipr_hostrcb_type_20_error *error;
2101 	struct ipr_hostrcb_fabric_desc *fabric;
2102 	struct ipr_hostrcb_config_element *cfg;
2103 	int i, add_len;
2104 
2105 	error = &hostrcb->hcam.u.error.u.type_20_error;
2106 	error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2107 	ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2108 
2109 	add_len = be32_to_cpu(hostrcb->hcam.length) -
2110 		(offsetof(struct ipr_hostrcb_error, u) +
2111 		 offsetof(struct ipr_hostrcb_type_20_error, desc));
2112 
2113 	for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2114 		ipr_log_fabric_path(hostrcb, fabric);
2115 		for_each_fabric_cfg(fabric, cfg)
2116 			ipr_log_path_elem(hostrcb, cfg);
2117 
2118 		add_len -= be16_to_cpu(fabric->length);
2119 		fabric = (struct ipr_hostrcb_fabric_desc *)
2120 			((unsigned long)fabric + be16_to_cpu(fabric->length));
2121 	}
2122 
2123 	ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
2124 }
2125 
2126 /**
2127  * ipr_log_sis64_array_error - Log a sis64 array error.
2128  * @ioa_cfg:	ioa config struct
2129  * @hostrcb:	hostrcb struct
2130  *
2131  * Return value:
2132  * 	none
2133  **/
2134 static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
2135 				      struct ipr_hostrcb *hostrcb)
2136 {
2137 	int i, num_entries;
2138 	struct ipr_hostrcb_type_24_error *error;
2139 	struct ipr_hostrcb64_array_data_entry *array_entry;
2140 	char buffer[IPR_MAX_RES_PATH_LENGTH];
2141 	const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
2142 
2143 	error = &hostrcb->hcam.u.error64.u.type_24_error;
2144 
2145 	ipr_err_separator;
2146 
2147 	ipr_err("RAID %s Array Configuration: %s\n",
2148 		error->protection_level,
2149 		ipr_format_res_path(error->last_res_path, buffer, sizeof(buffer)));
2150 
2151 	ipr_err_separator;
2152 
2153 	array_entry = error->array_member;
2154 	num_entries = min_t(u32, error->num_entries,
2155 			    ARRAY_SIZE(error->array_member));
2156 
2157 	for (i = 0; i < num_entries; i++, array_entry++) {
2158 
2159 		if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
2160 			continue;
2161 
2162 		if (error->exposed_mode_adn == i)
2163 			ipr_err("Exposed Array Member %d:\n", i);
2164 		else
2165 			ipr_err("Array Member %d:\n", i);
2166 
2167 		ipr_err("Array Member %d:\n", i);
2168 		ipr_log_ext_vpd(&array_entry->vpd);
2169 		ipr_err("Current Location: %s\n",
2170 			 ipr_format_res_path(array_entry->res_path, buffer,
2171 					     sizeof(buffer)));
2172 		ipr_err("Expected Location: %s\n",
2173 			 ipr_format_res_path(array_entry->expected_res_path,
2174 					     buffer, sizeof(buffer)));
2175 
2176 		ipr_err_separator;
2177 	}
2178 }
2179 
2180 /**
2181  * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
2182  * @ioa_cfg:	ioa config struct
2183  * @hostrcb:	hostrcb struct
2184  *
2185  * Return value:
2186  * 	none
2187  **/
2188 static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2189 				       struct ipr_hostrcb *hostrcb)
2190 {
2191 	struct ipr_hostrcb_type_30_error *error;
2192 	struct ipr_hostrcb64_fabric_desc *fabric;
2193 	struct ipr_hostrcb64_config_element *cfg;
2194 	int i, add_len;
2195 
2196 	error = &hostrcb->hcam.u.error64.u.type_30_error;
2197 
2198 	error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2199 	ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2200 
2201 	add_len = be32_to_cpu(hostrcb->hcam.length) -
2202 		(offsetof(struct ipr_hostrcb64_error, u) +
2203 		 offsetof(struct ipr_hostrcb_type_30_error, desc));
2204 
2205 	for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2206 		ipr_log64_fabric_path(hostrcb, fabric);
2207 		for_each_fabric_cfg(fabric, cfg)
2208 			ipr_log64_path_elem(hostrcb, cfg);
2209 
2210 		add_len -= be16_to_cpu(fabric->length);
2211 		fabric = (struct ipr_hostrcb64_fabric_desc *)
2212 			((unsigned long)fabric + be16_to_cpu(fabric->length));
2213 	}
2214 
2215 	ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
2216 }
2217 
2218 /**
2219  * ipr_log_generic_error - Log an adapter error.
2220  * @ioa_cfg:	ioa config struct
2221  * @hostrcb:	hostrcb struct
2222  *
2223  * Return value:
2224  * 	none
2225  **/
2226 static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
2227 				  struct ipr_hostrcb *hostrcb)
2228 {
2229 	ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
2230 			 be32_to_cpu(hostrcb->hcam.length));
2231 }
2232 
2233 /**
2234  * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
2235  * @ioasc:	IOASC
2236  *
2237  * This function will return the index of into the ipr_error_table
2238  * for the specified IOASC. If the IOASC is not in the table,
2239  * 0 will be returned, which points to the entry used for unknown errors.
2240  *
2241  * Return value:
2242  * 	index into the ipr_error_table
2243  **/
2244 static u32 ipr_get_error(u32 ioasc)
2245 {
2246 	int i;
2247 
2248 	for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
2249 		if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
2250 			return i;
2251 
2252 	return 0;
2253 }
2254 
2255 /**
2256  * ipr_handle_log_data - Log an adapter error.
2257  * @ioa_cfg:	ioa config struct
2258  * @hostrcb:	hostrcb struct
2259  *
2260  * This function logs an adapter error to the system.
2261  *
2262  * Return value:
2263  * 	none
2264  **/
2265 static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
2266 				struct ipr_hostrcb *hostrcb)
2267 {
2268 	u32 ioasc;
2269 	int error_index;
2270 
2271 	if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
2272 		return;
2273 
2274 	if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
2275 		dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
2276 
2277 	if (ioa_cfg->sis64)
2278 		ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2279 	else
2280 		ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2281 
2282 	if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET ||
2283 	    ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER)) {
2284 		/* Tell the midlayer we had a bus reset so it will handle the UA properly */
2285 		scsi_report_bus_reset(ioa_cfg->host,
2286 				      hostrcb->hcam.u.error.fd_res_addr.bus);
2287 	}
2288 
2289 	error_index = ipr_get_error(ioasc);
2290 
2291 	if (!ipr_error_table[error_index].log_hcam)
2292 		return;
2293 
2294 	ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
2295 
2296 	/* Set indication we have logged an error */
2297 	ioa_cfg->errors_logged++;
2298 
2299 	if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
2300 		return;
2301 	if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
2302 		hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
2303 
2304 	switch (hostrcb->hcam.overlay_id) {
2305 	case IPR_HOST_RCB_OVERLAY_ID_2:
2306 		ipr_log_cache_error(ioa_cfg, hostrcb);
2307 		break;
2308 	case IPR_HOST_RCB_OVERLAY_ID_3:
2309 		ipr_log_config_error(ioa_cfg, hostrcb);
2310 		break;
2311 	case IPR_HOST_RCB_OVERLAY_ID_4:
2312 	case IPR_HOST_RCB_OVERLAY_ID_6:
2313 		ipr_log_array_error(ioa_cfg, hostrcb);
2314 		break;
2315 	case IPR_HOST_RCB_OVERLAY_ID_7:
2316 		ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
2317 		break;
2318 	case IPR_HOST_RCB_OVERLAY_ID_12:
2319 		ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
2320 		break;
2321 	case IPR_HOST_RCB_OVERLAY_ID_13:
2322 		ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
2323 		break;
2324 	case IPR_HOST_RCB_OVERLAY_ID_14:
2325 	case IPR_HOST_RCB_OVERLAY_ID_16:
2326 		ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
2327 		break;
2328 	case IPR_HOST_RCB_OVERLAY_ID_17:
2329 		ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
2330 		break;
2331 	case IPR_HOST_RCB_OVERLAY_ID_20:
2332 		ipr_log_fabric_error(ioa_cfg, hostrcb);
2333 		break;
2334 	case IPR_HOST_RCB_OVERLAY_ID_23:
2335 		ipr_log_sis64_config_error(ioa_cfg, hostrcb);
2336 		break;
2337 	case IPR_HOST_RCB_OVERLAY_ID_24:
2338 	case IPR_HOST_RCB_OVERLAY_ID_26:
2339 		ipr_log_sis64_array_error(ioa_cfg, hostrcb);
2340 		break;
2341 	case IPR_HOST_RCB_OVERLAY_ID_30:
2342 		ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
2343 		break;
2344 	case IPR_HOST_RCB_OVERLAY_ID_1:
2345 	case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
2346 	default:
2347 		ipr_log_generic_error(ioa_cfg, hostrcb);
2348 		break;
2349 	}
2350 }
2351 
2352 /**
2353  * ipr_process_error - Op done function for an adapter error log.
2354  * @ipr_cmd:	ipr command struct
2355  *
2356  * This function is the op done function for an error log host
2357  * controlled async from the adapter. It will log the error and
2358  * send the HCAM back to the adapter.
2359  *
2360  * Return value:
2361  * 	none
2362  **/
2363 static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
2364 {
2365 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2366 	struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
2367 	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
2368 	u32 fd_ioasc;
2369 
2370 	if (ioa_cfg->sis64)
2371 		fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2372 	else
2373 		fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2374 
2375 	list_del(&hostrcb->queue);
2376 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
2377 
2378 	if (!ioasc) {
2379 		ipr_handle_log_data(ioa_cfg, hostrcb);
2380 		if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
2381 			ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
2382 	} else if (ioasc != IPR_IOASC_IOA_WAS_RESET) {
2383 		dev_err(&ioa_cfg->pdev->dev,
2384 			"Host RCB failed with IOASC: 0x%08X\n", ioasc);
2385 	}
2386 
2387 	ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
2388 }
2389 
2390 /**
2391  * ipr_timeout -  An internally generated op has timed out.
2392  * @ipr_cmd:	ipr command struct
2393  *
2394  * This function blocks host requests and initiates an
2395  * adapter reset.
2396  *
2397  * Return value:
2398  * 	none
2399  **/
2400 static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
2401 {
2402 	unsigned long lock_flags = 0;
2403 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2404 
2405 	ENTER;
2406 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2407 
2408 	ioa_cfg->errors_logged++;
2409 	dev_err(&ioa_cfg->pdev->dev,
2410 		"Adapter being reset due to command timeout.\n");
2411 
2412 	if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2413 		ioa_cfg->sdt_state = GET_DUMP;
2414 
2415 	if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
2416 		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2417 
2418 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2419 	LEAVE;
2420 }
2421 
2422 /**
2423  * ipr_oper_timeout -  Adapter timed out transitioning to operational
2424  * @ipr_cmd:	ipr command struct
2425  *
2426  * This function blocks host requests and initiates an
2427  * adapter reset.
2428  *
2429  * Return value:
2430  * 	none
2431  **/
2432 static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
2433 {
2434 	unsigned long lock_flags = 0;
2435 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2436 
2437 	ENTER;
2438 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2439 
2440 	ioa_cfg->errors_logged++;
2441 	dev_err(&ioa_cfg->pdev->dev,
2442 		"Adapter timed out transitioning to operational.\n");
2443 
2444 	if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2445 		ioa_cfg->sdt_state = GET_DUMP;
2446 
2447 	if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
2448 		if (ipr_fastfail)
2449 			ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
2450 		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2451 	}
2452 
2453 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2454 	LEAVE;
2455 }
2456 
2457 /**
2458  * ipr_reset_reload - Reset/Reload the IOA
2459  * @ioa_cfg:		ioa config struct
2460  * @shutdown_type:	shutdown type
2461  *
2462  * This function resets the adapter and re-initializes it.
2463  * This function assumes that all new host commands have been stopped.
2464  * Return value:
2465  * 	SUCCESS / FAILED
2466  **/
2467 static int ipr_reset_reload(struct ipr_ioa_cfg *ioa_cfg,
2468 			    enum ipr_shutdown_type shutdown_type)
2469 {
2470 	if (!ioa_cfg->in_reset_reload)
2471 		ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
2472 
2473 	spin_unlock_irq(ioa_cfg->host->host_lock);
2474 	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2475 	spin_lock_irq(ioa_cfg->host->host_lock);
2476 
2477 	/* If we got hit with a host reset while we were already resetting
2478 	 the adapter for some reason, and the reset failed. */
2479 	if (ioa_cfg->ioa_is_dead) {
2480 		ipr_trace;
2481 		return FAILED;
2482 	}
2483 
2484 	return SUCCESS;
2485 }
2486 
2487 /**
2488  * ipr_find_ses_entry - Find matching SES in SES table
2489  * @res:	resource entry struct of SES
2490  *
2491  * Return value:
2492  * 	pointer to SES table entry / NULL on failure
2493  **/
2494 static const struct ipr_ses_table_entry *
2495 ipr_find_ses_entry(struct ipr_resource_entry *res)
2496 {
2497 	int i, j, matches;
2498 	struct ipr_std_inq_vpids *vpids;
2499 	const struct ipr_ses_table_entry *ste = ipr_ses_table;
2500 
2501 	for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
2502 		for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
2503 			if (ste->compare_product_id_byte[j] == 'X') {
2504 				vpids = &res->std_inq_data.vpids;
2505 				if (vpids->product_id[j] == ste->product_id[j])
2506 					matches++;
2507 				else
2508 					break;
2509 			} else
2510 				matches++;
2511 		}
2512 
2513 		if (matches == IPR_PROD_ID_LEN)
2514 			return ste;
2515 	}
2516 
2517 	return NULL;
2518 }
2519 
2520 /**
2521  * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
2522  * @ioa_cfg:	ioa config struct
2523  * @bus:		SCSI bus
2524  * @bus_width:	bus width
2525  *
2526  * Return value:
2527  *	SCSI bus speed in units of 100KHz, 1600 is 160 MHz
2528  *	For a 2-byte wide SCSI bus, the maximum transfer speed is
2529  *	twice the maximum transfer rate (e.g. for a wide enabled bus,
2530  *	max 160MHz = max 320MB/sec).
2531  **/
2532 static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
2533 {
2534 	struct ipr_resource_entry *res;
2535 	const struct ipr_ses_table_entry *ste;
2536 	u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
2537 
2538 	/* Loop through each config table entry in the config table buffer */
2539 	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2540 		if (!(IPR_IS_SES_DEVICE(res->std_inq_data)))
2541 			continue;
2542 
2543 		if (bus != res->bus)
2544 			continue;
2545 
2546 		if (!(ste = ipr_find_ses_entry(res)))
2547 			continue;
2548 
2549 		max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
2550 	}
2551 
2552 	return max_xfer_rate;
2553 }
2554 
2555 /**
2556  * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
2557  * @ioa_cfg:		ioa config struct
2558  * @max_delay:		max delay in micro-seconds to wait
2559  *
2560  * Waits for an IODEBUG ACK from the IOA, doing busy looping.
2561  *
2562  * Return value:
2563  * 	0 on success / other on failure
2564  **/
2565 static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
2566 {
2567 	volatile u32 pcii_reg;
2568 	int delay = 1;
2569 
2570 	/* Read interrupt reg until IOA signals IO Debug Acknowledge */
2571 	while (delay < max_delay) {
2572 		pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
2573 
2574 		if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
2575 			return 0;
2576 
2577 		/* udelay cannot be used if delay is more than a few milliseconds */
2578 		if ((delay / 1000) > MAX_UDELAY_MS)
2579 			mdelay(delay / 1000);
2580 		else
2581 			udelay(delay);
2582 
2583 		delay += delay;
2584 	}
2585 	return -EIO;
2586 }
2587 
2588 /**
2589  * ipr_get_sis64_dump_data_section - Dump IOA memory
2590  * @ioa_cfg:			ioa config struct
2591  * @start_addr:			adapter address to dump
2592  * @dest:			destination kernel buffer
2593  * @length_in_words:		length to dump in 4 byte words
2594  *
2595  * Return value:
2596  * 	0 on success
2597  **/
2598 static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2599 					   u32 start_addr,
2600 					   __be32 *dest, u32 length_in_words)
2601 {
2602 	int i;
2603 
2604 	for (i = 0; i < length_in_words; i++) {
2605 		writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg);
2606 		*dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg));
2607 		dest++;
2608 	}
2609 
2610 	return 0;
2611 }
2612 
2613 /**
2614  * ipr_get_ldump_data_section - Dump IOA memory
2615  * @ioa_cfg:			ioa config struct
2616  * @start_addr:			adapter address to dump
2617  * @dest:				destination kernel buffer
2618  * @length_in_words:	length to dump in 4 byte words
2619  *
2620  * Return value:
2621  * 	0 on success / -EIO on failure
2622  **/
2623 static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2624 				      u32 start_addr,
2625 				      __be32 *dest, u32 length_in_words)
2626 {
2627 	volatile u32 temp_pcii_reg;
2628 	int i, delay = 0;
2629 
2630 	if (ioa_cfg->sis64)
2631 		return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr,
2632 						       dest, length_in_words);
2633 
2634 	/* Write IOA interrupt reg starting LDUMP state  */
2635 	writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
2636 	       ioa_cfg->regs.set_uproc_interrupt_reg32);
2637 
2638 	/* Wait for IO debug acknowledge */
2639 	if (ipr_wait_iodbg_ack(ioa_cfg,
2640 			       IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
2641 		dev_err(&ioa_cfg->pdev->dev,
2642 			"IOA dump long data transfer timeout\n");
2643 		return -EIO;
2644 	}
2645 
2646 	/* Signal LDUMP interlocked - clear IO debug ack */
2647 	writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2648 	       ioa_cfg->regs.clr_interrupt_reg);
2649 
2650 	/* Write Mailbox with starting address */
2651 	writel(start_addr, ioa_cfg->ioa_mailbox);
2652 
2653 	/* Signal address valid - clear IOA Reset alert */
2654 	writel(IPR_UPROCI_RESET_ALERT,
2655 	       ioa_cfg->regs.clr_uproc_interrupt_reg32);
2656 
2657 	for (i = 0; i < length_in_words; i++) {
2658 		/* Wait for IO debug acknowledge */
2659 		if (ipr_wait_iodbg_ack(ioa_cfg,
2660 				       IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
2661 			dev_err(&ioa_cfg->pdev->dev,
2662 				"IOA dump short data transfer timeout\n");
2663 			return -EIO;
2664 		}
2665 
2666 		/* Read data from mailbox and increment destination pointer */
2667 		*dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
2668 		dest++;
2669 
2670 		/* For all but the last word of data, signal data received */
2671 		if (i < (length_in_words - 1)) {
2672 			/* Signal dump data received - Clear IO debug Ack */
2673 			writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2674 			       ioa_cfg->regs.clr_interrupt_reg);
2675 		}
2676 	}
2677 
2678 	/* Signal end of block transfer. Set reset alert then clear IO debug ack */
2679 	writel(IPR_UPROCI_RESET_ALERT,
2680 	       ioa_cfg->regs.set_uproc_interrupt_reg32);
2681 
2682 	writel(IPR_UPROCI_IO_DEBUG_ALERT,
2683 	       ioa_cfg->regs.clr_uproc_interrupt_reg32);
2684 
2685 	/* Signal dump data received - Clear IO debug Ack */
2686 	writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2687 	       ioa_cfg->regs.clr_interrupt_reg);
2688 
2689 	/* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2690 	while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
2691 		temp_pcii_reg =
2692 		    readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
2693 
2694 		if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
2695 			return 0;
2696 
2697 		udelay(10);
2698 		delay += 10;
2699 	}
2700 
2701 	return 0;
2702 }
2703 
2704 #ifdef CONFIG_SCSI_IPR_DUMP
2705 /**
2706  * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2707  * @ioa_cfg:		ioa config struct
2708  * @pci_address:	adapter address
2709  * @length:			length of data to copy
2710  *
2711  * Copy data from PCI adapter to kernel buffer.
2712  * Note: length MUST be a 4 byte multiple
2713  * Return value:
2714  * 	0 on success / other on failure
2715  **/
2716 static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
2717 			unsigned long pci_address, u32 length)
2718 {
2719 	int bytes_copied = 0;
2720 	int cur_len, rc, rem_len, rem_page_len;
2721 	__be32 *page;
2722 	unsigned long lock_flags = 0;
2723 	struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
2724 
2725 	while (bytes_copied < length &&
2726 	       (ioa_dump->hdr.len + bytes_copied) < IPR_MAX_IOA_DUMP_SIZE) {
2727 		if (ioa_dump->page_offset >= PAGE_SIZE ||
2728 		    ioa_dump->page_offset == 0) {
2729 			page = (__be32 *)__get_free_page(GFP_ATOMIC);
2730 
2731 			if (!page) {
2732 				ipr_trace;
2733 				return bytes_copied;
2734 			}
2735 
2736 			ioa_dump->page_offset = 0;
2737 			ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
2738 			ioa_dump->next_page_index++;
2739 		} else
2740 			page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
2741 
2742 		rem_len = length - bytes_copied;
2743 		rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
2744 		cur_len = min(rem_len, rem_page_len);
2745 
2746 		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2747 		if (ioa_cfg->sdt_state == ABORT_DUMP) {
2748 			rc = -EIO;
2749 		} else {
2750 			rc = ipr_get_ldump_data_section(ioa_cfg,
2751 							pci_address + bytes_copied,
2752 							&page[ioa_dump->page_offset / 4],
2753 							(cur_len / sizeof(u32)));
2754 		}
2755 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2756 
2757 		if (!rc) {
2758 			ioa_dump->page_offset += cur_len;
2759 			bytes_copied += cur_len;
2760 		} else {
2761 			ipr_trace;
2762 			break;
2763 		}
2764 		schedule();
2765 	}
2766 
2767 	return bytes_copied;
2768 }
2769 
2770 /**
2771  * ipr_init_dump_entry_hdr - Initialize a dump entry header.
2772  * @hdr:	dump entry header struct
2773  *
2774  * Return value:
2775  * 	nothing
2776  **/
2777 static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
2778 {
2779 	hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
2780 	hdr->num_elems = 1;
2781 	hdr->offset = sizeof(*hdr);
2782 	hdr->status = IPR_DUMP_STATUS_SUCCESS;
2783 }
2784 
2785 /**
2786  * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
2787  * @ioa_cfg:	ioa config struct
2788  * @driver_dump:	driver dump struct
2789  *
2790  * Return value:
2791  * 	nothing
2792  **/
2793 static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
2794 				   struct ipr_driver_dump *driver_dump)
2795 {
2796 	struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2797 
2798 	ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
2799 	driver_dump->ioa_type_entry.hdr.len =
2800 		sizeof(struct ipr_dump_ioa_type_entry) -
2801 		sizeof(struct ipr_dump_entry_header);
2802 	driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2803 	driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
2804 	driver_dump->ioa_type_entry.type = ioa_cfg->type;
2805 	driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
2806 		(ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
2807 		ucode_vpd->minor_release[1];
2808 	driver_dump->hdr.num_entries++;
2809 }
2810 
2811 /**
2812  * ipr_dump_version_data - Fill in the driver version in the dump.
2813  * @ioa_cfg:	ioa config struct
2814  * @driver_dump:	driver dump struct
2815  *
2816  * Return value:
2817  * 	nothing
2818  **/
2819 static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
2820 				  struct ipr_driver_dump *driver_dump)
2821 {
2822 	ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
2823 	driver_dump->version_entry.hdr.len =
2824 		sizeof(struct ipr_dump_version_entry) -
2825 		sizeof(struct ipr_dump_entry_header);
2826 	driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2827 	driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
2828 	strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
2829 	driver_dump->hdr.num_entries++;
2830 }
2831 
2832 /**
2833  * ipr_dump_trace_data - Fill in the IOA trace in the dump.
2834  * @ioa_cfg:	ioa config struct
2835  * @driver_dump:	driver dump struct
2836  *
2837  * Return value:
2838  * 	nothing
2839  **/
2840 static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
2841 				   struct ipr_driver_dump *driver_dump)
2842 {
2843 	ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
2844 	driver_dump->trace_entry.hdr.len =
2845 		sizeof(struct ipr_dump_trace_entry) -
2846 		sizeof(struct ipr_dump_entry_header);
2847 	driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2848 	driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
2849 	memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
2850 	driver_dump->hdr.num_entries++;
2851 }
2852 
2853 /**
2854  * ipr_dump_location_data - Fill in the IOA location in the dump.
2855  * @ioa_cfg:	ioa config struct
2856  * @driver_dump:	driver dump struct
2857  *
2858  * Return value:
2859  * 	nothing
2860  **/
2861 static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
2862 				   struct ipr_driver_dump *driver_dump)
2863 {
2864 	ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
2865 	driver_dump->location_entry.hdr.len =
2866 		sizeof(struct ipr_dump_location_entry) -
2867 		sizeof(struct ipr_dump_entry_header);
2868 	driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2869 	driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
2870 	strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev));
2871 	driver_dump->hdr.num_entries++;
2872 }
2873 
2874 /**
2875  * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
2876  * @ioa_cfg:	ioa config struct
2877  * @dump:		dump struct
2878  *
2879  * Return value:
2880  * 	nothing
2881  **/
2882 static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
2883 {
2884 	unsigned long start_addr, sdt_word;
2885 	unsigned long lock_flags = 0;
2886 	struct ipr_driver_dump *driver_dump = &dump->driver_dump;
2887 	struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
2888 	u32 num_entries, start_off, end_off;
2889 	u32 bytes_to_copy, bytes_copied, rc;
2890 	struct ipr_sdt *sdt;
2891 	int valid = 1;
2892 	int i;
2893 
2894 	ENTER;
2895 
2896 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2897 
2898 	if (ioa_cfg->sdt_state != GET_DUMP) {
2899 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2900 		return;
2901 	}
2902 
2903 	if (ioa_cfg->sis64) {
2904 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2905 		ssleep(IPR_DUMP_DELAY_SECONDS);
2906 		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2907 	}
2908 
2909 	start_addr = readl(ioa_cfg->ioa_mailbox);
2910 
2911 	if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) {
2912 		dev_err(&ioa_cfg->pdev->dev,
2913 			"Invalid dump table format: %lx\n", start_addr);
2914 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2915 		return;
2916 	}
2917 
2918 	dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
2919 
2920 	driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
2921 
2922 	/* Initialize the overall dump header */
2923 	driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
2924 	driver_dump->hdr.num_entries = 1;
2925 	driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
2926 	driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
2927 	driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
2928 	driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
2929 
2930 	ipr_dump_version_data(ioa_cfg, driver_dump);
2931 	ipr_dump_location_data(ioa_cfg, driver_dump);
2932 	ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
2933 	ipr_dump_trace_data(ioa_cfg, driver_dump);
2934 
2935 	/* Update dump_header */
2936 	driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
2937 
2938 	/* IOA Dump entry */
2939 	ipr_init_dump_entry_hdr(&ioa_dump->hdr);
2940 	ioa_dump->hdr.len = 0;
2941 	ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2942 	ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
2943 
2944 	/* First entries in sdt are actually a list of dump addresses and
2945 	 lengths to gather the real dump data.  sdt represents the pointer
2946 	 to the ioa generated dump table.  Dump data will be extracted based
2947 	 on entries in this table */
2948 	sdt = &ioa_dump->sdt;
2949 
2950 	rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
2951 					sizeof(struct ipr_sdt) / sizeof(__be32));
2952 
2953 	/* Smart Dump table is ready to use and the first entry is valid */
2954 	if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
2955 	    (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
2956 		dev_err(&ioa_cfg->pdev->dev,
2957 			"Dump of IOA failed. Dump table not valid: %d, %X.\n",
2958 			rc, be32_to_cpu(sdt->hdr.state));
2959 		driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
2960 		ioa_cfg->sdt_state = DUMP_OBTAINED;
2961 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2962 		return;
2963 	}
2964 
2965 	num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
2966 
2967 	if (num_entries > IPR_NUM_SDT_ENTRIES)
2968 		num_entries = IPR_NUM_SDT_ENTRIES;
2969 
2970 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2971 
2972 	for (i = 0; i < num_entries; i++) {
2973 		if (ioa_dump->hdr.len > IPR_MAX_IOA_DUMP_SIZE) {
2974 			driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
2975 			break;
2976 		}
2977 
2978 		if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
2979 			sdt_word = be32_to_cpu(sdt->entry[i].start_token);
2980 			if (ioa_cfg->sis64)
2981 				bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token);
2982 			else {
2983 				start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
2984 				end_off = be32_to_cpu(sdt->entry[i].end_token);
2985 
2986 				if (ipr_sdt_is_fmt2(sdt_word) && sdt_word)
2987 					bytes_to_copy = end_off - start_off;
2988 				else
2989 					valid = 0;
2990 			}
2991 			if (valid) {
2992 				if (bytes_to_copy > IPR_MAX_IOA_DUMP_SIZE) {
2993 					sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
2994 					continue;
2995 				}
2996 
2997 				/* Copy data from adapter to driver buffers */
2998 				bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
2999 							    bytes_to_copy);
3000 
3001 				ioa_dump->hdr.len += bytes_copied;
3002 
3003 				if (bytes_copied != bytes_to_copy) {
3004 					driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3005 					break;
3006 				}
3007 			}
3008 		}
3009 	}
3010 
3011 	dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
3012 
3013 	/* Update dump_header */
3014 	driver_dump->hdr.len += ioa_dump->hdr.len;
3015 	wmb();
3016 	ioa_cfg->sdt_state = DUMP_OBTAINED;
3017 	LEAVE;
3018 }
3019 
3020 #else
3021 #define ipr_get_ioa_dump(ioa_cfg, dump) do { } while(0)
3022 #endif
3023 
3024 /**
3025  * ipr_release_dump - Free adapter dump memory
3026  * @kref:	kref struct
3027  *
3028  * Return value:
3029  *	nothing
3030  **/
3031 static void ipr_release_dump(struct kref *kref)
3032 {
3033 	struct ipr_dump *dump = container_of(kref,struct ipr_dump,kref);
3034 	struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
3035 	unsigned long lock_flags = 0;
3036 	int i;
3037 
3038 	ENTER;
3039 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3040 	ioa_cfg->dump = NULL;
3041 	ioa_cfg->sdt_state = INACTIVE;
3042 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3043 
3044 	for (i = 0; i < dump->ioa_dump.next_page_index; i++)
3045 		free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
3046 
3047 	kfree(dump);
3048 	LEAVE;
3049 }
3050 
3051 /**
3052  * ipr_worker_thread - Worker thread
3053  * @work:		ioa config struct
3054  *
3055  * Called at task level from a work thread. This function takes care
3056  * of adding and removing device from the mid-layer as configuration
3057  * changes are detected by the adapter.
3058  *
3059  * Return value:
3060  * 	nothing
3061  **/
3062 static void ipr_worker_thread(struct work_struct *work)
3063 {
3064 	unsigned long lock_flags;
3065 	struct ipr_resource_entry *res;
3066 	struct scsi_device *sdev;
3067 	struct ipr_dump *dump;
3068 	struct ipr_ioa_cfg *ioa_cfg =
3069 		container_of(work, struct ipr_ioa_cfg, work_q);
3070 	u8 bus, target, lun;
3071 	int did_work;
3072 
3073 	ENTER;
3074 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3075 
3076 	if (ioa_cfg->sdt_state == GET_DUMP) {
3077 		dump = ioa_cfg->dump;
3078 		if (!dump) {
3079 			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3080 			return;
3081 		}
3082 		kref_get(&dump->kref);
3083 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3084 		ipr_get_ioa_dump(ioa_cfg, dump);
3085 		kref_put(&dump->kref, ipr_release_dump);
3086 
3087 		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3088 		if (ioa_cfg->sdt_state == DUMP_OBTAINED)
3089 			ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3090 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3091 		return;
3092 	}
3093 
3094 restart:
3095 	do {
3096 		did_work = 0;
3097 		if (!ioa_cfg->allow_cmds || !ioa_cfg->allow_ml_add_del) {
3098 			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3099 			return;
3100 		}
3101 
3102 		list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3103 			if (res->del_from_ml && res->sdev) {
3104 				did_work = 1;
3105 				sdev = res->sdev;
3106 				if (!scsi_device_get(sdev)) {
3107 					list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3108 					spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3109 					scsi_remove_device(sdev);
3110 					scsi_device_put(sdev);
3111 					spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3112 				}
3113 				break;
3114 			}
3115 		}
3116 	} while(did_work);
3117 
3118 	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3119 		if (res->add_to_ml) {
3120 			bus = res->bus;
3121 			target = res->target;
3122 			lun = res->lun;
3123 			res->add_to_ml = 0;
3124 			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3125 			scsi_add_device(ioa_cfg->host, bus, target, lun);
3126 			spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3127 			goto restart;
3128 		}
3129 	}
3130 
3131 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3132 	kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
3133 	LEAVE;
3134 }
3135 
3136 #ifdef CONFIG_SCSI_IPR_TRACE
3137 /**
3138  * ipr_read_trace - Dump the adapter trace
3139  * @filp:		open sysfs file
3140  * @kobj:		kobject struct
3141  * @bin_attr:		bin_attribute struct
3142  * @buf:		buffer
3143  * @off:		offset
3144  * @count:		buffer size
3145  *
3146  * Return value:
3147  *	number of bytes printed to buffer
3148  **/
3149 static ssize_t ipr_read_trace(struct file *filp, struct kobject *kobj,
3150 			      struct bin_attribute *bin_attr,
3151 			      char *buf, loff_t off, size_t count)
3152 {
3153 	struct device *dev = container_of(kobj, struct device, kobj);
3154 	struct Scsi_Host *shost = class_to_shost(dev);
3155 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3156 	unsigned long lock_flags = 0;
3157 	ssize_t ret;
3158 
3159 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3160 	ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace,
3161 				IPR_TRACE_SIZE);
3162 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3163 
3164 	return ret;
3165 }
3166 
3167 static struct bin_attribute ipr_trace_attr = {
3168 	.attr =	{
3169 		.name = "trace",
3170 		.mode = S_IRUGO,
3171 	},
3172 	.size = 0,
3173 	.read = ipr_read_trace,
3174 };
3175 #endif
3176 
3177 /**
3178  * ipr_show_fw_version - Show the firmware version
3179  * @dev:	class device struct
3180  * @buf:	buffer
3181  *
3182  * Return value:
3183  *	number of bytes printed to buffer
3184  **/
3185 static ssize_t ipr_show_fw_version(struct device *dev,
3186 				   struct device_attribute *attr, char *buf)
3187 {
3188 	struct Scsi_Host *shost = class_to_shost(dev);
3189 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3190 	struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3191 	unsigned long lock_flags = 0;
3192 	int len;
3193 
3194 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3195 	len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
3196 		       ucode_vpd->major_release, ucode_vpd->card_type,
3197 		       ucode_vpd->minor_release[0],
3198 		       ucode_vpd->minor_release[1]);
3199 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3200 	return len;
3201 }
3202 
3203 static struct device_attribute ipr_fw_version_attr = {
3204 	.attr = {
3205 		.name =		"fw_version",
3206 		.mode =		S_IRUGO,
3207 	},
3208 	.show = ipr_show_fw_version,
3209 };
3210 
3211 /**
3212  * ipr_show_log_level - Show the adapter's error logging level
3213  * @dev:	class device struct
3214  * @buf:	buffer
3215  *
3216  * Return value:
3217  * 	number of bytes printed to buffer
3218  **/
3219 static ssize_t ipr_show_log_level(struct device *dev,
3220 				   struct device_attribute *attr, char *buf)
3221 {
3222 	struct Scsi_Host *shost = class_to_shost(dev);
3223 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3224 	unsigned long lock_flags = 0;
3225 	int len;
3226 
3227 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3228 	len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
3229 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3230 	return len;
3231 }
3232 
3233 /**
3234  * ipr_store_log_level - Change the adapter's error logging level
3235  * @dev:	class device struct
3236  * @buf:	buffer
3237  *
3238  * Return value:
3239  * 	number of bytes printed to buffer
3240  **/
3241 static ssize_t ipr_store_log_level(struct device *dev,
3242 			           struct device_attribute *attr,
3243 				   const char *buf, size_t count)
3244 {
3245 	struct Scsi_Host *shost = class_to_shost(dev);
3246 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3247 	unsigned long lock_flags = 0;
3248 
3249 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3250 	ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
3251 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3252 	return strlen(buf);
3253 }
3254 
3255 static struct device_attribute ipr_log_level_attr = {
3256 	.attr = {
3257 		.name =		"log_level",
3258 		.mode =		S_IRUGO | S_IWUSR,
3259 	},
3260 	.show = ipr_show_log_level,
3261 	.store = ipr_store_log_level
3262 };
3263 
3264 /**
3265  * ipr_store_diagnostics - IOA Diagnostics interface
3266  * @dev:	device struct
3267  * @buf:	buffer
3268  * @count:	buffer size
3269  *
3270  * This function will reset the adapter and wait a reasonable
3271  * amount of time for any errors that the adapter might log.
3272  *
3273  * Return value:
3274  * 	count on success / other on failure
3275  **/
3276 static ssize_t ipr_store_diagnostics(struct device *dev,
3277 				     struct device_attribute *attr,
3278 				     const char *buf, size_t count)
3279 {
3280 	struct Scsi_Host *shost = class_to_shost(dev);
3281 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3282 	unsigned long lock_flags = 0;
3283 	int rc = count;
3284 
3285 	if (!capable(CAP_SYS_ADMIN))
3286 		return -EACCES;
3287 
3288 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3289 	while(ioa_cfg->in_reset_reload) {
3290 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3291 		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3292 		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3293 	}
3294 
3295 	ioa_cfg->errors_logged = 0;
3296 	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3297 
3298 	if (ioa_cfg->in_reset_reload) {
3299 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3300 		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3301 
3302 		/* Wait for a second for any errors to be logged */
3303 		msleep(1000);
3304 	} else {
3305 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3306 		return -EIO;
3307 	}
3308 
3309 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3310 	if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
3311 		rc = -EIO;
3312 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3313 
3314 	return rc;
3315 }
3316 
3317 static struct device_attribute ipr_diagnostics_attr = {
3318 	.attr = {
3319 		.name =		"run_diagnostics",
3320 		.mode =		S_IWUSR,
3321 	},
3322 	.store = ipr_store_diagnostics
3323 };
3324 
3325 /**
3326  * ipr_show_adapter_state - Show the adapter's state
3327  * @class_dev:	device struct
3328  * @buf:	buffer
3329  *
3330  * Return value:
3331  * 	number of bytes printed to buffer
3332  **/
3333 static ssize_t ipr_show_adapter_state(struct device *dev,
3334 				      struct device_attribute *attr, char *buf)
3335 {
3336 	struct Scsi_Host *shost = class_to_shost(dev);
3337 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3338 	unsigned long lock_flags = 0;
3339 	int len;
3340 
3341 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3342 	if (ioa_cfg->ioa_is_dead)
3343 		len = snprintf(buf, PAGE_SIZE, "offline\n");
3344 	else
3345 		len = snprintf(buf, PAGE_SIZE, "online\n");
3346 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3347 	return len;
3348 }
3349 
3350 /**
3351  * ipr_store_adapter_state - Change adapter state
3352  * @dev:	device struct
3353  * @buf:	buffer
3354  * @count:	buffer size
3355  *
3356  * This function will change the adapter's state.
3357  *
3358  * Return value:
3359  * 	count on success / other on failure
3360  **/
3361 static ssize_t ipr_store_adapter_state(struct device *dev,
3362 				       struct device_attribute *attr,
3363 				       const char *buf, size_t count)
3364 {
3365 	struct Scsi_Host *shost = class_to_shost(dev);
3366 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3367 	unsigned long lock_flags;
3368 	int result = count;
3369 
3370 	if (!capable(CAP_SYS_ADMIN))
3371 		return -EACCES;
3372 
3373 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3374 	if (ioa_cfg->ioa_is_dead && !strncmp(buf, "online", 6)) {
3375 		ioa_cfg->ioa_is_dead = 0;
3376 		ioa_cfg->reset_retries = 0;
3377 		ioa_cfg->in_ioa_bringdown = 0;
3378 		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3379 	}
3380 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3381 	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3382 
3383 	return result;
3384 }
3385 
3386 static struct device_attribute ipr_ioa_state_attr = {
3387 	.attr = {
3388 		.name =		"online_state",
3389 		.mode =		S_IRUGO | S_IWUSR,
3390 	},
3391 	.show = ipr_show_adapter_state,
3392 	.store = ipr_store_adapter_state
3393 };
3394 
3395 /**
3396  * ipr_store_reset_adapter - Reset the adapter
3397  * @dev:	device struct
3398  * @buf:	buffer
3399  * @count:	buffer size
3400  *
3401  * This function will reset the adapter.
3402  *
3403  * Return value:
3404  * 	count on success / other on failure
3405  **/
3406 static ssize_t ipr_store_reset_adapter(struct device *dev,
3407 				       struct device_attribute *attr,
3408 				       const char *buf, size_t count)
3409 {
3410 	struct Scsi_Host *shost = class_to_shost(dev);
3411 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3412 	unsigned long lock_flags;
3413 	int result = count;
3414 
3415 	if (!capable(CAP_SYS_ADMIN))
3416 		return -EACCES;
3417 
3418 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3419 	if (!ioa_cfg->in_reset_reload)
3420 		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3421 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3422 	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3423 
3424 	return result;
3425 }
3426 
3427 static struct device_attribute ipr_ioa_reset_attr = {
3428 	.attr = {
3429 		.name =		"reset_host",
3430 		.mode =		S_IWUSR,
3431 	},
3432 	.store = ipr_store_reset_adapter
3433 };
3434 
3435 /**
3436  * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
3437  * @buf_len:		buffer length
3438  *
3439  * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
3440  * list to use for microcode download
3441  *
3442  * Return value:
3443  * 	pointer to sglist / NULL on failure
3444  **/
3445 static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
3446 {
3447 	int sg_size, order, bsize_elem, num_elem, i, j;
3448 	struct ipr_sglist *sglist;
3449 	struct scatterlist *scatterlist;
3450 	struct page *page;
3451 
3452 	/* Get the minimum size per scatter/gather element */
3453 	sg_size = buf_len / (IPR_MAX_SGLIST - 1);
3454 
3455 	/* Get the actual size per element */
3456 	order = get_order(sg_size);
3457 
3458 	/* Determine the actual number of bytes per element */
3459 	bsize_elem = PAGE_SIZE * (1 << order);
3460 
3461 	/* Determine the actual number of sg entries needed */
3462 	if (buf_len % bsize_elem)
3463 		num_elem = (buf_len / bsize_elem) + 1;
3464 	else
3465 		num_elem = buf_len / bsize_elem;
3466 
3467 	/* Allocate a scatter/gather list for the DMA */
3468 	sglist = kzalloc(sizeof(struct ipr_sglist) +
3469 			 (sizeof(struct scatterlist) * (num_elem - 1)),
3470 			 GFP_KERNEL);
3471 
3472 	if (sglist == NULL) {
3473 		ipr_trace;
3474 		return NULL;
3475 	}
3476 
3477 	scatterlist = sglist->scatterlist;
3478 	sg_init_table(scatterlist, num_elem);
3479 
3480 	sglist->order = order;
3481 	sglist->num_sg = num_elem;
3482 
3483 	/* Allocate a bunch of sg elements */
3484 	for (i = 0; i < num_elem; i++) {
3485 		page = alloc_pages(GFP_KERNEL, order);
3486 		if (!page) {
3487 			ipr_trace;
3488 
3489 			/* Free up what we already allocated */
3490 			for (j = i - 1; j >= 0; j--)
3491 				__free_pages(sg_page(&scatterlist[j]), order);
3492 			kfree(sglist);
3493 			return NULL;
3494 		}
3495 
3496 		sg_set_page(&scatterlist[i], page, 0, 0);
3497 	}
3498 
3499 	return sglist;
3500 }
3501 
3502 /**
3503  * ipr_free_ucode_buffer - Frees a microcode download buffer
3504  * @p_dnld:		scatter/gather list pointer
3505  *
3506  * Free a DMA'able ucode download buffer previously allocated with
3507  * ipr_alloc_ucode_buffer
3508  *
3509  * Return value:
3510  * 	nothing
3511  **/
3512 static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
3513 {
3514 	int i;
3515 
3516 	for (i = 0; i < sglist->num_sg; i++)
3517 		__free_pages(sg_page(&sglist->scatterlist[i]), sglist->order);
3518 
3519 	kfree(sglist);
3520 }
3521 
3522 /**
3523  * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
3524  * @sglist:		scatter/gather list pointer
3525  * @buffer:		buffer pointer
3526  * @len:		buffer length
3527  *
3528  * Copy a microcode image from a user buffer into a buffer allocated by
3529  * ipr_alloc_ucode_buffer
3530  *
3531  * Return value:
3532  * 	0 on success / other on failure
3533  **/
3534 static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
3535 				 u8 *buffer, u32 len)
3536 {
3537 	int bsize_elem, i, result = 0;
3538 	struct scatterlist *scatterlist;
3539 	void *kaddr;
3540 
3541 	/* Determine the actual number of bytes per element */
3542 	bsize_elem = PAGE_SIZE * (1 << sglist->order);
3543 
3544 	scatterlist = sglist->scatterlist;
3545 
3546 	for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
3547 		struct page *page = sg_page(&scatterlist[i]);
3548 
3549 		kaddr = kmap(page);
3550 		memcpy(kaddr, buffer, bsize_elem);
3551 		kunmap(page);
3552 
3553 		scatterlist[i].length = bsize_elem;
3554 
3555 		if (result != 0) {
3556 			ipr_trace;
3557 			return result;
3558 		}
3559 	}
3560 
3561 	if (len % bsize_elem) {
3562 		struct page *page = sg_page(&scatterlist[i]);
3563 
3564 		kaddr = kmap(page);
3565 		memcpy(kaddr, buffer, len % bsize_elem);
3566 		kunmap(page);
3567 
3568 		scatterlist[i].length = len % bsize_elem;
3569 	}
3570 
3571 	sglist->buffer_len = len;
3572 	return result;
3573 }
3574 
3575 /**
3576  * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
3577  * @ipr_cmd:		ipr command struct
3578  * @sglist:		scatter/gather list
3579  *
3580  * Builds a microcode download IOA data list (IOADL).
3581  *
3582  **/
3583 static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd,
3584 				    struct ipr_sglist *sglist)
3585 {
3586 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3587 	struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
3588 	struct scatterlist *scatterlist = sglist->scatterlist;
3589 	int i;
3590 
3591 	ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3592 	ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3593 	ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3594 
3595 	ioarcb->ioadl_len =
3596 		cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
3597 	for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3598 		ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE);
3599 		ioadl64[i].data_len = cpu_to_be32(sg_dma_len(&scatterlist[i]));
3600 		ioadl64[i].address = cpu_to_be64(sg_dma_address(&scatterlist[i]));
3601 	}
3602 
3603 	ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3604 }
3605 
3606 /**
3607  * ipr_build_ucode_ioadl - Build a microcode download IOADL
3608  * @ipr_cmd:	ipr command struct
3609  * @sglist:		scatter/gather list
3610  *
3611  * Builds a microcode download IOA data list (IOADL).
3612  *
3613  **/
3614 static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
3615 				  struct ipr_sglist *sglist)
3616 {
3617 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3618 	struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
3619 	struct scatterlist *scatterlist = sglist->scatterlist;
3620 	int i;
3621 
3622 	ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3623 	ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3624 	ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3625 
3626 	ioarcb->ioadl_len =
3627 		cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3628 
3629 	for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3630 		ioadl[i].flags_and_data_len =
3631 			cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
3632 		ioadl[i].address =
3633 			cpu_to_be32(sg_dma_address(&scatterlist[i]));
3634 	}
3635 
3636 	ioadl[i-1].flags_and_data_len |=
3637 		cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3638 }
3639 
3640 /**
3641  * ipr_update_ioa_ucode - Update IOA's microcode
3642  * @ioa_cfg:	ioa config struct
3643  * @sglist:		scatter/gather list
3644  *
3645  * Initiate an adapter reset to update the IOA's microcode
3646  *
3647  * Return value:
3648  * 	0 on success / -EIO on failure
3649  **/
3650 static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
3651 				struct ipr_sglist *sglist)
3652 {
3653 	unsigned long lock_flags;
3654 
3655 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3656 	while(ioa_cfg->in_reset_reload) {
3657 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3658 		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3659 		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3660 	}
3661 
3662 	if (ioa_cfg->ucode_sglist) {
3663 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3664 		dev_err(&ioa_cfg->pdev->dev,
3665 			"Microcode download already in progress\n");
3666 		return -EIO;
3667 	}
3668 
3669 	sglist->num_dma_sg = pci_map_sg(ioa_cfg->pdev, sglist->scatterlist,
3670 					sglist->num_sg, DMA_TO_DEVICE);
3671 
3672 	if (!sglist->num_dma_sg) {
3673 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3674 		dev_err(&ioa_cfg->pdev->dev,
3675 			"Failed to map microcode download buffer!\n");
3676 		return -EIO;
3677 	}
3678 
3679 	ioa_cfg->ucode_sglist = sglist;
3680 	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3681 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3682 	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3683 
3684 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3685 	ioa_cfg->ucode_sglist = NULL;
3686 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3687 	return 0;
3688 }
3689 
3690 /**
3691  * ipr_store_update_fw - Update the firmware on the adapter
3692  * @class_dev:	device struct
3693  * @buf:	buffer
3694  * @count:	buffer size
3695  *
3696  * This function will update the firmware on the adapter.
3697  *
3698  * Return value:
3699  * 	count on success / other on failure
3700  **/
3701 static ssize_t ipr_store_update_fw(struct device *dev,
3702 				   struct device_attribute *attr,
3703 				   const char *buf, size_t count)
3704 {
3705 	struct Scsi_Host *shost = class_to_shost(dev);
3706 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3707 	struct ipr_ucode_image_header *image_hdr;
3708 	const struct firmware *fw_entry;
3709 	struct ipr_sglist *sglist;
3710 	char fname[100];
3711 	char *src;
3712 	int len, result, dnld_size;
3713 
3714 	if (!capable(CAP_SYS_ADMIN))
3715 		return -EACCES;
3716 
3717 	len = snprintf(fname, 99, "%s", buf);
3718 	fname[len-1] = '\0';
3719 
3720 	if(request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
3721 		dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
3722 		return -EIO;
3723 	}
3724 
3725 	image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
3726 
3727 	if (be32_to_cpu(image_hdr->header_length) > fw_entry->size ||
3728 	    (ioa_cfg->vpd_cbs->page3_data.card_type &&
3729 	     ioa_cfg->vpd_cbs->page3_data.card_type != image_hdr->card_type)) {
3730 		dev_err(&ioa_cfg->pdev->dev, "Invalid microcode buffer\n");
3731 		release_firmware(fw_entry);
3732 		return -EINVAL;
3733 	}
3734 
3735 	src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
3736 	dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
3737 	sglist = ipr_alloc_ucode_buffer(dnld_size);
3738 
3739 	if (!sglist) {
3740 		dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
3741 		release_firmware(fw_entry);
3742 		return -ENOMEM;
3743 	}
3744 
3745 	result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
3746 
3747 	if (result) {
3748 		dev_err(&ioa_cfg->pdev->dev,
3749 			"Microcode buffer copy to DMA buffer failed\n");
3750 		goto out;
3751 	}
3752 
3753 	result = ipr_update_ioa_ucode(ioa_cfg, sglist);
3754 
3755 	if (!result)
3756 		result = count;
3757 out:
3758 	ipr_free_ucode_buffer(sglist);
3759 	release_firmware(fw_entry);
3760 	return result;
3761 }
3762 
3763 static struct device_attribute ipr_update_fw_attr = {
3764 	.attr = {
3765 		.name =		"update_fw",
3766 		.mode =		S_IWUSR,
3767 	},
3768 	.store = ipr_store_update_fw
3769 };
3770 
3771 /**
3772  * ipr_show_fw_type - Show the adapter's firmware type.
3773  * @dev:	class device struct
3774  * @buf:	buffer
3775  *
3776  * Return value:
3777  *	number of bytes printed to buffer
3778  **/
3779 static ssize_t ipr_show_fw_type(struct device *dev,
3780 				struct device_attribute *attr, char *buf)
3781 {
3782 	struct Scsi_Host *shost = class_to_shost(dev);
3783 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3784 	unsigned long lock_flags = 0;
3785 	int len;
3786 
3787 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3788 	len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->sis64);
3789 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3790 	return len;
3791 }
3792 
3793 static struct device_attribute ipr_ioa_fw_type_attr = {
3794 	.attr = {
3795 		.name =		"fw_type",
3796 		.mode =		S_IRUGO,
3797 	},
3798 	.show = ipr_show_fw_type
3799 };
3800 
3801 static struct device_attribute *ipr_ioa_attrs[] = {
3802 	&ipr_fw_version_attr,
3803 	&ipr_log_level_attr,
3804 	&ipr_diagnostics_attr,
3805 	&ipr_ioa_state_attr,
3806 	&ipr_ioa_reset_attr,
3807 	&ipr_update_fw_attr,
3808 	&ipr_ioa_fw_type_attr,
3809 	NULL,
3810 };
3811 
3812 #ifdef CONFIG_SCSI_IPR_DUMP
3813 /**
3814  * ipr_read_dump - Dump the adapter
3815  * @filp:		open sysfs file
3816  * @kobj:		kobject struct
3817  * @bin_attr:		bin_attribute struct
3818  * @buf:		buffer
3819  * @off:		offset
3820  * @count:		buffer size
3821  *
3822  * Return value:
3823  *	number of bytes printed to buffer
3824  **/
3825 static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj,
3826 			     struct bin_attribute *bin_attr,
3827 			     char *buf, loff_t off, size_t count)
3828 {
3829 	struct device *cdev = container_of(kobj, struct device, kobj);
3830 	struct Scsi_Host *shost = class_to_shost(cdev);
3831 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3832 	struct ipr_dump *dump;
3833 	unsigned long lock_flags = 0;
3834 	char *src;
3835 	int len;
3836 	size_t rc = count;
3837 
3838 	if (!capable(CAP_SYS_ADMIN))
3839 		return -EACCES;
3840 
3841 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3842 	dump = ioa_cfg->dump;
3843 
3844 	if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
3845 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3846 		return 0;
3847 	}
3848 	kref_get(&dump->kref);
3849 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3850 
3851 	if (off > dump->driver_dump.hdr.len) {
3852 		kref_put(&dump->kref, ipr_release_dump);
3853 		return 0;
3854 	}
3855 
3856 	if (off + count > dump->driver_dump.hdr.len) {
3857 		count = dump->driver_dump.hdr.len - off;
3858 		rc = count;
3859 	}
3860 
3861 	if (count && off < sizeof(dump->driver_dump)) {
3862 		if (off + count > sizeof(dump->driver_dump))
3863 			len = sizeof(dump->driver_dump) - off;
3864 		else
3865 			len = count;
3866 		src = (u8 *)&dump->driver_dump + off;
3867 		memcpy(buf, src, len);
3868 		buf += len;
3869 		off += len;
3870 		count -= len;
3871 	}
3872 
3873 	off -= sizeof(dump->driver_dump);
3874 
3875 	if (count && off < offsetof(struct ipr_ioa_dump, ioa_data)) {
3876 		if (off + count > offsetof(struct ipr_ioa_dump, ioa_data))
3877 			len = offsetof(struct ipr_ioa_dump, ioa_data) - off;
3878 		else
3879 			len = count;
3880 		src = (u8 *)&dump->ioa_dump + off;
3881 		memcpy(buf, src, len);
3882 		buf += len;
3883 		off += len;
3884 		count -= len;
3885 	}
3886 
3887 	off -= offsetof(struct ipr_ioa_dump, ioa_data);
3888 
3889 	while (count) {
3890 		if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
3891 			len = PAGE_ALIGN(off) - off;
3892 		else
3893 			len = count;
3894 		src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
3895 		src += off & ~PAGE_MASK;
3896 		memcpy(buf, src, len);
3897 		buf += len;
3898 		off += len;
3899 		count -= len;
3900 	}
3901 
3902 	kref_put(&dump->kref, ipr_release_dump);
3903 	return rc;
3904 }
3905 
3906 /**
3907  * ipr_alloc_dump - Prepare for adapter dump
3908  * @ioa_cfg:	ioa config struct
3909  *
3910  * Return value:
3911  *	0 on success / other on failure
3912  **/
3913 static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
3914 {
3915 	struct ipr_dump *dump;
3916 	unsigned long lock_flags = 0;
3917 
3918 	dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
3919 
3920 	if (!dump) {
3921 		ipr_err("Dump memory allocation failed\n");
3922 		return -ENOMEM;
3923 	}
3924 
3925 	kref_init(&dump->kref);
3926 	dump->ioa_cfg = ioa_cfg;
3927 
3928 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3929 
3930 	if (INACTIVE != ioa_cfg->sdt_state) {
3931 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3932 		kfree(dump);
3933 		return 0;
3934 	}
3935 
3936 	ioa_cfg->dump = dump;
3937 	ioa_cfg->sdt_state = WAIT_FOR_DUMP;
3938 	if (ioa_cfg->ioa_is_dead && !ioa_cfg->dump_taken) {
3939 		ioa_cfg->dump_taken = 1;
3940 		schedule_work(&ioa_cfg->work_q);
3941 	}
3942 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3943 
3944 	return 0;
3945 }
3946 
3947 /**
3948  * ipr_free_dump - Free adapter dump memory
3949  * @ioa_cfg:	ioa config struct
3950  *
3951  * Return value:
3952  *	0 on success / other on failure
3953  **/
3954 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
3955 {
3956 	struct ipr_dump *dump;
3957 	unsigned long lock_flags = 0;
3958 
3959 	ENTER;
3960 
3961 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3962 	dump = ioa_cfg->dump;
3963 	if (!dump) {
3964 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3965 		return 0;
3966 	}
3967 
3968 	ioa_cfg->dump = NULL;
3969 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3970 
3971 	kref_put(&dump->kref, ipr_release_dump);
3972 
3973 	LEAVE;
3974 	return 0;
3975 }
3976 
3977 /**
3978  * ipr_write_dump - Setup dump state of adapter
3979  * @filp:		open sysfs file
3980  * @kobj:		kobject struct
3981  * @bin_attr:		bin_attribute struct
3982  * @buf:		buffer
3983  * @off:		offset
3984  * @count:		buffer size
3985  *
3986  * Return value:
3987  *	number of bytes printed to buffer
3988  **/
3989 static ssize_t ipr_write_dump(struct file *filp, struct kobject *kobj,
3990 			      struct bin_attribute *bin_attr,
3991 			      char *buf, loff_t off, size_t count)
3992 {
3993 	struct device *cdev = container_of(kobj, struct device, kobj);
3994 	struct Scsi_Host *shost = class_to_shost(cdev);
3995 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3996 	int rc;
3997 
3998 	if (!capable(CAP_SYS_ADMIN))
3999 		return -EACCES;
4000 
4001 	if (buf[0] == '1')
4002 		rc = ipr_alloc_dump(ioa_cfg);
4003 	else if (buf[0] == '0')
4004 		rc = ipr_free_dump(ioa_cfg);
4005 	else
4006 		return -EINVAL;
4007 
4008 	if (rc)
4009 		return rc;
4010 	else
4011 		return count;
4012 }
4013 
4014 static struct bin_attribute ipr_dump_attr = {
4015 	.attr =	{
4016 		.name = "dump",
4017 		.mode = S_IRUSR | S_IWUSR,
4018 	},
4019 	.size = 0,
4020 	.read = ipr_read_dump,
4021 	.write = ipr_write_dump
4022 };
4023 #else
4024 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
4025 #endif
4026 
4027 /**
4028  * ipr_change_queue_depth - Change the device's queue depth
4029  * @sdev:	scsi device struct
4030  * @qdepth:	depth to set
4031  * @reason:	calling context
4032  *
4033  * Return value:
4034  * 	actual depth set
4035  **/
4036 static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth,
4037 				  int reason)
4038 {
4039 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4040 	struct ipr_resource_entry *res;
4041 	unsigned long lock_flags = 0;
4042 
4043 	if (reason != SCSI_QDEPTH_DEFAULT)
4044 		return -EOPNOTSUPP;
4045 
4046 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4047 	res = (struct ipr_resource_entry *)sdev->hostdata;
4048 
4049 	if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
4050 		qdepth = IPR_MAX_CMD_PER_ATA_LUN;
4051 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4052 
4053 	scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
4054 	return sdev->queue_depth;
4055 }
4056 
4057 /**
4058  * ipr_change_queue_type - Change the device's queue type
4059  * @dsev:		scsi device struct
4060  * @tag_type:	type of tags to use
4061  *
4062  * Return value:
4063  * 	actual queue type set
4064  **/
4065 static int ipr_change_queue_type(struct scsi_device *sdev, int tag_type)
4066 {
4067 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4068 	struct ipr_resource_entry *res;
4069 	unsigned long lock_flags = 0;
4070 
4071 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4072 	res = (struct ipr_resource_entry *)sdev->hostdata;
4073 
4074 	if (res) {
4075 		if (ipr_is_gscsi(res) && sdev->tagged_supported) {
4076 			/*
4077 			 * We don't bother quiescing the device here since the
4078 			 * adapter firmware does it for us.
4079 			 */
4080 			scsi_set_tag_type(sdev, tag_type);
4081 
4082 			if (tag_type)
4083 				scsi_activate_tcq(sdev, sdev->queue_depth);
4084 			else
4085 				scsi_deactivate_tcq(sdev, sdev->queue_depth);
4086 		} else
4087 			tag_type = 0;
4088 	} else
4089 		tag_type = 0;
4090 
4091 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4092 	return tag_type;
4093 }
4094 
4095 /**
4096  * ipr_show_adapter_handle - Show the adapter's resource handle for this device
4097  * @dev:	device struct
4098  * @attr:	device attribute structure
4099  * @buf:	buffer
4100  *
4101  * Return value:
4102  * 	number of bytes printed to buffer
4103  **/
4104 static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
4105 {
4106 	struct scsi_device *sdev = to_scsi_device(dev);
4107 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4108 	struct ipr_resource_entry *res;
4109 	unsigned long lock_flags = 0;
4110 	ssize_t len = -ENXIO;
4111 
4112 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4113 	res = (struct ipr_resource_entry *)sdev->hostdata;
4114 	if (res)
4115 		len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle);
4116 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4117 	return len;
4118 }
4119 
4120 static struct device_attribute ipr_adapter_handle_attr = {
4121 	.attr = {
4122 		.name = 	"adapter_handle",
4123 		.mode =		S_IRUSR,
4124 	},
4125 	.show = ipr_show_adapter_handle
4126 };
4127 
4128 /**
4129  * ipr_show_resource_path - Show the resource path or the resource address for
4130  *			    this device.
4131  * @dev:	device struct
4132  * @attr:	device attribute structure
4133  * @buf:	buffer
4134  *
4135  * Return value:
4136  * 	number of bytes printed to buffer
4137  **/
4138 static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribute *attr, char *buf)
4139 {
4140 	struct scsi_device *sdev = to_scsi_device(dev);
4141 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4142 	struct ipr_resource_entry *res;
4143 	unsigned long lock_flags = 0;
4144 	ssize_t len = -ENXIO;
4145 	char buffer[IPR_MAX_RES_PATH_LENGTH];
4146 
4147 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4148 	res = (struct ipr_resource_entry *)sdev->hostdata;
4149 	if (res && ioa_cfg->sis64)
4150 		len = snprintf(buf, PAGE_SIZE, "%s\n",
4151 			       ipr_format_res_path(res->res_path, buffer,
4152 						   sizeof(buffer)));
4153 	else if (res)
4154 		len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no,
4155 			       res->bus, res->target, res->lun);
4156 
4157 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4158 	return len;
4159 }
4160 
4161 static struct device_attribute ipr_resource_path_attr = {
4162 	.attr = {
4163 		.name = 	"resource_path",
4164 		.mode =		S_IRUGO,
4165 	},
4166 	.show = ipr_show_resource_path
4167 };
4168 
4169 /**
4170  * ipr_show_device_id - Show the device_id for this device.
4171  * @dev:	device struct
4172  * @attr:	device attribute structure
4173  * @buf:	buffer
4174  *
4175  * Return value:
4176  *	number of bytes printed to buffer
4177  **/
4178 static ssize_t ipr_show_device_id(struct device *dev, struct device_attribute *attr, char *buf)
4179 {
4180 	struct scsi_device *sdev = to_scsi_device(dev);
4181 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4182 	struct ipr_resource_entry *res;
4183 	unsigned long lock_flags = 0;
4184 	ssize_t len = -ENXIO;
4185 
4186 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4187 	res = (struct ipr_resource_entry *)sdev->hostdata;
4188 	if (res && ioa_cfg->sis64)
4189 		len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->dev_id);
4190 	else if (res)
4191 		len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->lun_wwn);
4192 
4193 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4194 	return len;
4195 }
4196 
4197 static struct device_attribute ipr_device_id_attr = {
4198 	.attr = {
4199 		.name =		"device_id",
4200 		.mode =		S_IRUGO,
4201 	},
4202 	.show = ipr_show_device_id
4203 };
4204 
4205 /**
4206  * ipr_show_resource_type - Show the resource type for this device.
4207  * @dev:	device struct
4208  * @attr:	device attribute structure
4209  * @buf:	buffer
4210  *
4211  * Return value:
4212  *	number of bytes printed to buffer
4213  **/
4214 static ssize_t ipr_show_resource_type(struct device *dev, struct device_attribute *attr, char *buf)
4215 {
4216 	struct scsi_device *sdev = to_scsi_device(dev);
4217 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4218 	struct ipr_resource_entry *res;
4219 	unsigned long lock_flags = 0;
4220 	ssize_t len = -ENXIO;
4221 
4222 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4223 	res = (struct ipr_resource_entry *)sdev->hostdata;
4224 
4225 	if (res)
4226 		len = snprintf(buf, PAGE_SIZE, "%x\n", res->type);
4227 
4228 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4229 	return len;
4230 }
4231 
4232 static struct device_attribute ipr_resource_type_attr = {
4233 	.attr = {
4234 		.name =		"resource_type",
4235 		.mode =		S_IRUGO,
4236 	},
4237 	.show = ipr_show_resource_type
4238 };
4239 
4240 static struct device_attribute *ipr_dev_attrs[] = {
4241 	&ipr_adapter_handle_attr,
4242 	&ipr_resource_path_attr,
4243 	&ipr_device_id_attr,
4244 	&ipr_resource_type_attr,
4245 	NULL,
4246 };
4247 
4248 /**
4249  * ipr_biosparam - Return the HSC mapping
4250  * @sdev:			scsi device struct
4251  * @block_device:	block device pointer
4252  * @capacity:		capacity of the device
4253  * @parm:			Array containing returned HSC values.
4254  *
4255  * This function generates the HSC parms that fdisk uses.
4256  * We want to make sure we return something that places partitions
4257  * on 4k boundaries for best performance with the IOA.
4258  *
4259  * Return value:
4260  * 	0 on success
4261  **/
4262 static int ipr_biosparam(struct scsi_device *sdev,
4263 			 struct block_device *block_device,
4264 			 sector_t capacity, int *parm)
4265 {
4266 	int heads, sectors;
4267 	sector_t cylinders;
4268 
4269 	heads = 128;
4270 	sectors = 32;
4271 
4272 	cylinders = capacity;
4273 	sector_div(cylinders, (128 * 32));
4274 
4275 	/* return result */
4276 	parm[0] = heads;
4277 	parm[1] = sectors;
4278 	parm[2] = cylinders;
4279 
4280 	return 0;
4281 }
4282 
4283 /**
4284  * ipr_find_starget - Find target based on bus/target.
4285  * @starget:	scsi target struct
4286  *
4287  * Return value:
4288  * 	resource entry pointer if found / NULL if not found
4289  **/
4290 static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
4291 {
4292 	struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4293 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4294 	struct ipr_resource_entry *res;
4295 
4296 	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4297 		if ((res->bus == starget->channel) &&
4298 		    (res->target == starget->id) &&
4299 		    (res->lun == 0)) {
4300 			return res;
4301 		}
4302 	}
4303 
4304 	return NULL;
4305 }
4306 
4307 static struct ata_port_info sata_port_info;
4308 
4309 /**
4310  * ipr_target_alloc - Prepare for commands to a SCSI target
4311  * @starget:	scsi target struct
4312  *
4313  * If the device is a SATA device, this function allocates an
4314  * ATA port with libata, else it does nothing.
4315  *
4316  * Return value:
4317  * 	0 on success / non-0 on failure
4318  **/
4319 static int ipr_target_alloc(struct scsi_target *starget)
4320 {
4321 	struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4322 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4323 	struct ipr_sata_port *sata_port;
4324 	struct ata_port *ap;
4325 	struct ipr_resource_entry *res;
4326 	unsigned long lock_flags;
4327 
4328 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4329 	res = ipr_find_starget(starget);
4330 	starget->hostdata = NULL;
4331 
4332 	if (res && ipr_is_gata(res)) {
4333 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4334 		sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
4335 		if (!sata_port)
4336 			return -ENOMEM;
4337 
4338 		ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
4339 		if (ap) {
4340 			spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4341 			sata_port->ioa_cfg = ioa_cfg;
4342 			sata_port->ap = ap;
4343 			sata_port->res = res;
4344 
4345 			res->sata_port = sata_port;
4346 			ap->private_data = sata_port;
4347 			starget->hostdata = sata_port;
4348 		} else {
4349 			kfree(sata_port);
4350 			return -ENOMEM;
4351 		}
4352 	}
4353 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4354 
4355 	return 0;
4356 }
4357 
4358 /**
4359  * ipr_target_destroy - Destroy a SCSI target
4360  * @starget:	scsi target struct
4361  *
4362  * If the device was a SATA device, this function frees the libata
4363  * ATA port, else it does nothing.
4364  *
4365  **/
4366 static void ipr_target_destroy(struct scsi_target *starget)
4367 {
4368 	struct ipr_sata_port *sata_port = starget->hostdata;
4369 	struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4370 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4371 
4372 	if (ioa_cfg->sis64) {
4373 		if (starget->channel == IPR_ARRAY_VIRTUAL_BUS)
4374 			clear_bit(starget->id, ioa_cfg->array_ids);
4375 		else if (starget->channel == IPR_VSET_VIRTUAL_BUS)
4376 			clear_bit(starget->id, ioa_cfg->vset_ids);
4377 		else if (starget->channel == 0)
4378 			clear_bit(starget->id, ioa_cfg->target_ids);
4379 	}
4380 
4381 	if (sata_port) {
4382 		starget->hostdata = NULL;
4383 		ata_sas_port_destroy(sata_port->ap);
4384 		kfree(sata_port);
4385 	}
4386 }
4387 
4388 /**
4389  * ipr_find_sdev - Find device based on bus/target/lun.
4390  * @sdev:	scsi device struct
4391  *
4392  * Return value:
4393  * 	resource entry pointer if found / NULL if not found
4394  **/
4395 static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
4396 {
4397 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4398 	struct ipr_resource_entry *res;
4399 
4400 	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4401 		if ((res->bus == sdev->channel) &&
4402 		    (res->target == sdev->id) &&
4403 		    (res->lun == sdev->lun))
4404 			return res;
4405 	}
4406 
4407 	return NULL;
4408 }
4409 
4410 /**
4411  * ipr_slave_destroy - Unconfigure a SCSI device
4412  * @sdev:	scsi device struct
4413  *
4414  * Return value:
4415  * 	nothing
4416  **/
4417 static void ipr_slave_destroy(struct scsi_device *sdev)
4418 {
4419 	struct ipr_resource_entry *res;
4420 	struct ipr_ioa_cfg *ioa_cfg;
4421 	unsigned long lock_flags = 0;
4422 
4423 	ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4424 
4425 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4426 	res = (struct ipr_resource_entry *) sdev->hostdata;
4427 	if (res) {
4428 		if (res->sata_port)
4429 			res->sata_port->ap->link.device[0].class = ATA_DEV_NONE;
4430 		sdev->hostdata = NULL;
4431 		res->sdev = NULL;
4432 		res->sata_port = NULL;
4433 	}
4434 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4435 }
4436 
4437 /**
4438  * ipr_slave_configure - Configure a SCSI device
4439  * @sdev:	scsi device struct
4440  *
4441  * This function configures the specified scsi device.
4442  *
4443  * Return value:
4444  * 	0 on success
4445  **/
4446 static int ipr_slave_configure(struct scsi_device *sdev)
4447 {
4448 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4449 	struct ipr_resource_entry *res;
4450 	struct ata_port *ap = NULL;
4451 	unsigned long lock_flags = 0;
4452 	char buffer[IPR_MAX_RES_PATH_LENGTH];
4453 
4454 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4455 	res = sdev->hostdata;
4456 	if (res) {
4457 		if (ipr_is_af_dasd_device(res))
4458 			sdev->type = TYPE_RAID;
4459 		if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
4460 			sdev->scsi_level = 4;
4461 			sdev->no_uld_attach = 1;
4462 		}
4463 		if (ipr_is_vset_device(res)) {
4464 			blk_queue_rq_timeout(sdev->request_queue,
4465 					     IPR_VSET_RW_TIMEOUT);
4466 			blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
4467 		}
4468 		if (ipr_is_gata(res) && res->sata_port)
4469 			ap = res->sata_port->ap;
4470 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4471 
4472 		if (ap) {
4473 			scsi_adjust_queue_depth(sdev, 0, IPR_MAX_CMD_PER_ATA_LUN);
4474 			ata_sas_slave_configure(sdev, ap);
4475 		} else
4476 			scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
4477 		if (ioa_cfg->sis64)
4478 			sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
4479 				    ipr_format_res_path(res->res_path, buffer,
4480 							sizeof(buffer)));
4481 		return 0;
4482 	}
4483 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4484 	return 0;
4485 }
4486 
4487 /**
4488  * ipr_ata_slave_alloc - Prepare for commands to a SATA device
4489  * @sdev:	scsi device struct
4490  *
4491  * This function initializes an ATA port so that future commands
4492  * sent through queuecommand will work.
4493  *
4494  * Return value:
4495  * 	0 on success
4496  **/
4497 static int ipr_ata_slave_alloc(struct scsi_device *sdev)
4498 {
4499 	struct ipr_sata_port *sata_port = NULL;
4500 	int rc = -ENXIO;
4501 
4502 	ENTER;
4503 	if (sdev->sdev_target)
4504 		sata_port = sdev->sdev_target->hostdata;
4505 	if (sata_port)
4506 		rc = ata_sas_port_init(sata_port->ap);
4507 	if (rc)
4508 		ipr_slave_destroy(sdev);
4509 
4510 	LEAVE;
4511 	return rc;
4512 }
4513 
4514 /**
4515  * ipr_slave_alloc - Prepare for commands to a device.
4516  * @sdev:	scsi device struct
4517  *
4518  * This function saves a pointer to the resource entry
4519  * in the scsi device struct if the device exists. We
4520  * can then use this pointer in ipr_queuecommand when
4521  * handling new commands.
4522  *
4523  * Return value:
4524  * 	0 on success / -ENXIO if device does not exist
4525  **/
4526 static int ipr_slave_alloc(struct scsi_device *sdev)
4527 {
4528 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4529 	struct ipr_resource_entry *res;
4530 	unsigned long lock_flags;
4531 	int rc = -ENXIO;
4532 
4533 	sdev->hostdata = NULL;
4534 
4535 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4536 
4537 	res = ipr_find_sdev(sdev);
4538 	if (res) {
4539 		res->sdev = sdev;
4540 		res->add_to_ml = 0;
4541 		res->in_erp = 0;
4542 		sdev->hostdata = res;
4543 		if (!ipr_is_naca_model(res))
4544 			res->needs_sync_complete = 1;
4545 		rc = 0;
4546 		if (ipr_is_gata(res)) {
4547 			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4548 			return ipr_ata_slave_alloc(sdev);
4549 		}
4550 	}
4551 
4552 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4553 
4554 	return rc;
4555 }
4556 
4557 /**
4558  * ipr_eh_host_reset - Reset the host adapter
4559  * @scsi_cmd:	scsi command struct
4560  *
4561  * Return value:
4562  * 	SUCCESS / FAILED
4563  **/
4564 static int __ipr_eh_host_reset(struct scsi_cmnd * scsi_cmd)
4565 {
4566 	struct ipr_ioa_cfg *ioa_cfg;
4567 	int rc;
4568 
4569 	ENTER;
4570 	ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
4571 
4572 	dev_err(&ioa_cfg->pdev->dev,
4573 		"Adapter being reset as a result of error recovery.\n");
4574 
4575 	if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4576 		ioa_cfg->sdt_state = GET_DUMP;
4577 
4578 	rc = ipr_reset_reload(ioa_cfg, IPR_SHUTDOWN_ABBREV);
4579 
4580 	LEAVE;
4581 	return rc;
4582 }
4583 
4584 static int ipr_eh_host_reset(struct scsi_cmnd * cmd)
4585 {
4586 	int rc;
4587 
4588 	spin_lock_irq(cmd->device->host->host_lock);
4589 	rc = __ipr_eh_host_reset(cmd);
4590 	spin_unlock_irq(cmd->device->host->host_lock);
4591 
4592 	return rc;
4593 }
4594 
4595 /**
4596  * ipr_device_reset - Reset the device
4597  * @ioa_cfg:	ioa config struct
4598  * @res:		resource entry struct
4599  *
4600  * This function issues a device reset to the affected device.
4601  * If the device is a SCSI device, a LUN reset will be sent
4602  * to the device first. If that does not work, a target reset
4603  * will be sent. If the device is a SATA device, a PHY reset will
4604  * be sent.
4605  *
4606  * Return value:
4607  *	0 on success / non-zero on failure
4608  **/
4609 static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
4610 			    struct ipr_resource_entry *res)
4611 {
4612 	struct ipr_cmnd *ipr_cmd;
4613 	struct ipr_ioarcb *ioarcb;
4614 	struct ipr_cmd_pkt *cmd_pkt;
4615 	struct ipr_ioarcb_ata_regs *regs;
4616 	u32 ioasc;
4617 
4618 	ENTER;
4619 	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4620 	ioarcb = &ipr_cmd->ioarcb;
4621 	cmd_pkt = &ioarcb->cmd_pkt;
4622 
4623 	if (ipr_cmd->ioa_cfg->sis64) {
4624 		regs = &ipr_cmd->i.ata_ioadl.regs;
4625 		ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
4626 	} else
4627 		regs = &ioarcb->u.add_data.u.regs;
4628 
4629 	ioarcb->res_handle = res->res_handle;
4630 	cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4631 	cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
4632 	if (ipr_is_gata(res)) {
4633 		cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
4634 		ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags));
4635 		regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
4636 	}
4637 
4638 	ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
4639 	ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
4640 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4641 	if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) {
4642 		if (ipr_cmd->ioa_cfg->sis64)
4643 			memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
4644 			       sizeof(struct ipr_ioasa_gata));
4645 		else
4646 			memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
4647 			       sizeof(struct ipr_ioasa_gata));
4648 	}
4649 
4650 	LEAVE;
4651 	return (IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0);
4652 }
4653 
4654 /**
4655  * ipr_sata_reset - Reset the SATA port
4656  * @link:	SATA link to reset
4657  * @classes:	class of the attached device
4658  *
4659  * This function issues a SATA phy reset to the affected ATA link.
4660  *
4661  * Return value:
4662  *	0 on success / non-zero on failure
4663  **/
4664 static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
4665 				unsigned long deadline)
4666 {
4667 	struct ipr_sata_port *sata_port = link->ap->private_data;
4668 	struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
4669 	struct ipr_resource_entry *res;
4670 	unsigned long lock_flags = 0;
4671 	int rc = -ENXIO;
4672 
4673 	ENTER;
4674 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4675 	while(ioa_cfg->in_reset_reload) {
4676 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4677 		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4678 		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4679 	}
4680 
4681 	res = sata_port->res;
4682 	if (res) {
4683 		rc = ipr_device_reset(ioa_cfg, res);
4684 		*classes = res->ata_class;
4685 	}
4686 
4687 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4688 	LEAVE;
4689 	return rc;
4690 }
4691 
4692 /**
4693  * ipr_eh_dev_reset - Reset the device
4694  * @scsi_cmd:	scsi command struct
4695  *
4696  * This function issues a device reset to the affected device.
4697  * A LUN reset will be sent to the device first. If that does
4698  * not work, a target reset will be sent.
4699  *
4700  * Return value:
4701  *	SUCCESS / FAILED
4702  **/
4703 static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd)
4704 {
4705 	struct ipr_cmnd *ipr_cmd;
4706 	struct ipr_ioa_cfg *ioa_cfg;
4707 	struct ipr_resource_entry *res;
4708 	struct ata_port *ap;
4709 	int rc = 0;
4710 
4711 	ENTER;
4712 	ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
4713 	res = scsi_cmd->device->hostdata;
4714 
4715 	if (!res)
4716 		return FAILED;
4717 
4718 	/*
4719 	 * If we are currently going through reset/reload, return failed. This will force the
4720 	 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
4721 	 * reset to complete
4722 	 */
4723 	if (ioa_cfg->in_reset_reload)
4724 		return FAILED;
4725 	if (ioa_cfg->ioa_is_dead)
4726 		return FAILED;
4727 
4728 	list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
4729 		if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
4730 			if (ipr_cmd->scsi_cmd)
4731 				ipr_cmd->done = ipr_scsi_eh_done;
4732 			if (ipr_cmd->qc)
4733 				ipr_cmd->done = ipr_sata_eh_done;
4734 			if (ipr_cmd->qc && !(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
4735 				ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
4736 				ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
4737 			}
4738 		}
4739 	}
4740 
4741 	res->resetting_device = 1;
4742 	scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
4743 
4744 	if (ipr_is_gata(res) && res->sata_port) {
4745 		ap = res->sata_port->ap;
4746 		spin_unlock_irq(scsi_cmd->device->host->host_lock);
4747 		ata_std_error_handler(ap);
4748 		spin_lock_irq(scsi_cmd->device->host->host_lock);
4749 
4750 		list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
4751 			if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
4752 				rc = -EIO;
4753 				break;
4754 			}
4755 		}
4756 	} else
4757 		rc = ipr_device_reset(ioa_cfg, res);
4758 	res->resetting_device = 0;
4759 
4760 	LEAVE;
4761 	return (rc ? FAILED : SUCCESS);
4762 }
4763 
4764 static int ipr_eh_dev_reset(struct scsi_cmnd * cmd)
4765 {
4766 	int rc;
4767 
4768 	spin_lock_irq(cmd->device->host->host_lock);
4769 	rc = __ipr_eh_dev_reset(cmd);
4770 	spin_unlock_irq(cmd->device->host->host_lock);
4771 
4772 	return rc;
4773 }
4774 
4775 /**
4776  * ipr_bus_reset_done - Op done function for bus reset.
4777  * @ipr_cmd:	ipr command struct
4778  *
4779  * This function is the op done function for a bus reset
4780  *
4781  * Return value:
4782  * 	none
4783  **/
4784 static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
4785 {
4786 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4787 	struct ipr_resource_entry *res;
4788 
4789 	ENTER;
4790 	if (!ioa_cfg->sis64)
4791 		list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4792 			if (res->res_handle == ipr_cmd->ioarcb.res_handle) {
4793 				scsi_report_bus_reset(ioa_cfg->host, res->bus);
4794 				break;
4795 			}
4796 		}
4797 
4798 	/*
4799 	 * If abort has not completed, indicate the reset has, else call the
4800 	 * abort's done function to wake the sleeping eh thread
4801 	 */
4802 	if (ipr_cmd->sibling->sibling)
4803 		ipr_cmd->sibling->sibling = NULL;
4804 	else
4805 		ipr_cmd->sibling->done(ipr_cmd->sibling);
4806 
4807 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4808 	LEAVE;
4809 }
4810 
4811 /**
4812  * ipr_abort_timeout - An abort task has timed out
4813  * @ipr_cmd:	ipr command struct
4814  *
4815  * This function handles when an abort task times out. If this
4816  * happens we issue a bus reset since we have resources tied
4817  * up that must be freed before returning to the midlayer.
4818  *
4819  * Return value:
4820  *	none
4821  **/
4822 static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
4823 {
4824 	struct ipr_cmnd *reset_cmd;
4825 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4826 	struct ipr_cmd_pkt *cmd_pkt;
4827 	unsigned long lock_flags = 0;
4828 
4829 	ENTER;
4830 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4831 	if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
4832 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4833 		return;
4834 	}
4835 
4836 	sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
4837 	reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4838 	ipr_cmd->sibling = reset_cmd;
4839 	reset_cmd->sibling = ipr_cmd;
4840 	reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
4841 	cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
4842 	cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4843 	cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
4844 	cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
4845 
4846 	ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
4847 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4848 	LEAVE;
4849 }
4850 
4851 /**
4852  * ipr_cancel_op - Cancel specified op
4853  * @scsi_cmd:	scsi command struct
4854  *
4855  * This function cancels specified op.
4856  *
4857  * Return value:
4858  *	SUCCESS / FAILED
4859  **/
4860 static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd)
4861 {
4862 	struct ipr_cmnd *ipr_cmd;
4863 	struct ipr_ioa_cfg *ioa_cfg;
4864 	struct ipr_resource_entry *res;
4865 	struct ipr_cmd_pkt *cmd_pkt;
4866 	u32 ioasc;
4867 	int op_found = 0;
4868 
4869 	ENTER;
4870 	ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
4871 	res = scsi_cmd->device->hostdata;
4872 
4873 	/* If we are currently going through reset/reload, return failed.
4874 	 * This will force the mid-layer to call ipr_eh_host_reset,
4875 	 * which will then go to sleep and wait for the reset to complete
4876 	 */
4877 	if (ioa_cfg->in_reset_reload || ioa_cfg->ioa_is_dead)
4878 		return FAILED;
4879 	if (!res || !ipr_is_gscsi(res))
4880 		return FAILED;
4881 
4882 	list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
4883 		if (ipr_cmd->scsi_cmd == scsi_cmd) {
4884 			ipr_cmd->done = ipr_scsi_eh_done;
4885 			op_found = 1;
4886 			break;
4887 		}
4888 	}
4889 
4890 	if (!op_found)
4891 		return SUCCESS;
4892 
4893 	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4894 	ipr_cmd->ioarcb.res_handle = res->res_handle;
4895 	cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
4896 	cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4897 	cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
4898 	ipr_cmd->u.sdev = scsi_cmd->device;
4899 
4900 	scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
4901 		    scsi_cmd->cmnd[0]);
4902 	ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
4903 	ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
4904 
4905 	/*
4906 	 * If the abort task timed out and we sent a bus reset, we will get
4907 	 * one the following responses to the abort
4908 	 */
4909 	if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
4910 		ioasc = 0;
4911 		ipr_trace;
4912 	}
4913 
4914 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4915 	if (!ipr_is_naca_model(res))
4916 		res->needs_sync_complete = 1;
4917 
4918 	LEAVE;
4919 	return (IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS);
4920 }
4921 
4922 /**
4923  * ipr_eh_abort - Abort a single op
4924  * @scsi_cmd:	scsi command struct
4925  *
4926  * Return value:
4927  * 	SUCCESS / FAILED
4928  **/
4929 static int ipr_eh_abort(struct scsi_cmnd * scsi_cmd)
4930 {
4931 	unsigned long flags;
4932 	int rc;
4933 
4934 	ENTER;
4935 
4936 	spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
4937 	rc = ipr_cancel_op(scsi_cmd);
4938 	spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
4939 
4940 	LEAVE;
4941 	return rc;
4942 }
4943 
4944 /**
4945  * ipr_handle_other_interrupt - Handle "other" interrupts
4946  * @ioa_cfg:	ioa config struct
4947  * @int_reg:	interrupt register
4948  *
4949  * Return value:
4950  * 	IRQ_NONE / IRQ_HANDLED
4951  **/
4952 static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
4953 					      volatile u32 int_reg)
4954 {
4955 	irqreturn_t rc = IRQ_HANDLED;
4956 
4957 	if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
4958 		/* Mask the interrupt */
4959 		writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
4960 
4961 		/* Clear the interrupt */
4962 		writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.clr_interrupt_reg);
4963 		int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
4964 
4965 		list_del(&ioa_cfg->reset_cmd->queue);
4966 		del_timer(&ioa_cfg->reset_cmd->timer);
4967 		ipr_reset_ioa_job(ioa_cfg->reset_cmd);
4968 	} else {
4969 		if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
4970 			ioa_cfg->ioa_unit_checked = 1;
4971 		else
4972 			dev_err(&ioa_cfg->pdev->dev,
4973 				"Permanent IOA failure. 0x%08X\n", int_reg);
4974 
4975 		if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4976 			ioa_cfg->sdt_state = GET_DUMP;
4977 
4978 		ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
4979 		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
4980 	}
4981 
4982 	return rc;
4983 }
4984 
4985 /**
4986  * ipr_isr_eh - Interrupt service routine error handler
4987  * @ioa_cfg:	ioa config struct
4988  * @msg:	message to log
4989  *
4990  * Return value:
4991  * 	none
4992  **/
4993 static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg)
4994 {
4995 	ioa_cfg->errors_logged++;
4996 	dev_err(&ioa_cfg->pdev->dev, "%s\n", msg);
4997 
4998 	if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4999 		ioa_cfg->sdt_state = GET_DUMP;
5000 
5001 	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5002 }
5003 
5004 /**
5005  * ipr_isr - Interrupt service routine
5006  * @irq:	irq number
5007  * @devp:	pointer to ioa config struct
5008  *
5009  * Return value:
5010  * 	IRQ_NONE / IRQ_HANDLED
5011  **/
5012 static irqreturn_t ipr_isr(int irq, void *devp)
5013 {
5014 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
5015 	unsigned long lock_flags = 0;
5016 	volatile u32 int_reg, int_mask_reg;
5017 	u32 ioasc;
5018 	u16 cmd_index;
5019 	int num_hrrq = 0;
5020 	struct ipr_cmnd *ipr_cmd;
5021 	irqreturn_t rc = IRQ_NONE;
5022 
5023 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5024 
5025 	/* If interrupts are disabled, ignore the interrupt */
5026 	if (!ioa_cfg->allow_interrupts) {
5027 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5028 		return IRQ_NONE;
5029 	}
5030 
5031 	int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
5032 	int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32) & ~int_mask_reg;
5033 
5034 	/* If an interrupt on the adapter did not occur, ignore it.
5035 	 * Or in the case of SIS 64, check for a stage change interrupt.
5036 	 */
5037 	if (unlikely((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0)) {
5038 		if (ioa_cfg->sis64) {
5039 			int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5040 			int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5041 			if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
5042 
5043 				/* clear stage change */
5044 				writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
5045 				int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5046 				list_del(&ioa_cfg->reset_cmd->queue);
5047 				del_timer(&ioa_cfg->reset_cmd->timer);
5048 				ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5049 				spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5050 				return IRQ_HANDLED;
5051 			}
5052 		}
5053 
5054 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5055 		return IRQ_NONE;
5056 	}
5057 
5058 	while (1) {
5059 		ipr_cmd = NULL;
5060 
5061 		while ((be32_to_cpu(*ioa_cfg->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5062 		       ioa_cfg->toggle_bit) {
5063 
5064 			cmd_index = (be32_to_cpu(*ioa_cfg->hrrq_curr) &
5065 				     IPR_HRRQ_REQ_RESP_HANDLE_MASK) >> IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
5066 
5067 			if (unlikely(cmd_index >= IPR_NUM_CMD_BLKS)) {
5068 				ipr_isr_eh(ioa_cfg, "Invalid response handle from IOA");
5069 				spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5070 				return IRQ_HANDLED;
5071 			}
5072 
5073 			ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
5074 
5075 			ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5076 
5077 			ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
5078 
5079 			list_del(&ipr_cmd->queue);
5080 			del_timer(&ipr_cmd->timer);
5081 			ipr_cmd->done(ipr_cmd);
5082 
5083 			rc = IRQ_HANDLED;
5084 
5085 			if (ioa_cfg->hrrq_curr < ioa_cfg->hrrq_end) {
5086 				ioa_cfg->hrrq_curr++;
5087 			} else {
5088 				ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
5089 				ioa_cfg->toggle_bit ^= 1u;
5090 			}
5091 		}
5092 
5093 		if (ipr_cmd != NULL) {
5094 			/* Clear the PCI interrupt */
5095 			do {
5096 				writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
5097 				int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32) & ~int_mask_reg;
5098 			} while (int_reg & IPR_PCII_HRRQ_UPDATED &&
5099 					num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
5100 
5101 			if (int_reg & IPR_PCII_HRRQ_UPDATED) {
5102 				ipr_isr_eh(ioa_cfg, "Error clearing HRRQ");
5103 				spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5104 				return IRQ_HANDLED;
5105 			}
5106 
5107 		} else
5108 			break;
5109 	}
5110 
5111 	if (unlikely(rc == IRQ_NONE))
5112 		rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
5113 
5114 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5115 	return rc;
5116 }
5117 
5118 /**
5119  * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
5120  * @ioa_cfg:	ioa config struct
5121  * @ipr_cmd:	ipr command struct
5122  *
5123  * Return value:
5124  * 	0 on success / -1 on failure
5125  **/
5126 static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
5127 			     struct ipr_cmnd *ipr_cmd)
5128 {
5129 	int i, nseg;
5130 	struct scatterlist *sg;
5131 	u32 length;
5132 	u32 ioadl_flags = 0;
5133 	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5134 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5135 	struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
5136 
5137 	length = scsi_bufflen(scsi_cmd);
5138 	if (!length)
5139 		return 0;
5140 
5141 	nseg = scsi_dma_map(scsi_cmd);
5142 	if (nseg < 0) {
5143 		dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
5144 		return -1;
5145 	}
5146 
5147 	ipr_cmd->dma_use_sg = nseg;
5148 
5149 	ioarcb->data_transfer_length = cpu_to_be32(length);
5150 	ioarcb->ioadl_len =
5151 		cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
5152 
5153 	if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5154 		ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5155 		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5156 	} else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE)
5157 		ioadl_flags = IPR_IOADL_FLAGS_READ;
5158 
5159 	scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5160 		ioadl64[i].flags = cpu_to_be32(ioadl_flags);
5161 		ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
5162 		ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
5163 	}
5164 
5165 	ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5166 	return 0;
5167 }
5168 
5169 /**
5170  * ipr_build_ioadl - Build a scatter/gather list and map the buffer
5171  * @ioa_cfg:	ioa config struct
5172  * @ipr_cmd:	ipr command struct
5173  *
5174  * Return value:
5175  * 	0 on success / -1 on failure
5176  **/
5177 static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
5178 			   struct ipr_cmnd *ipr_cmd)
5179 {
5180 	int i, nseg;
5181 	struct scatterlist *sg;
5182 	u32 length;
5183 	u32 ioadl_flags = 0;
5184 	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5185 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5186 	struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
5187 
5188 	length = scsi_bufflen(scsi_cmd);
5189 	if (!length)
5190 		return 0;
5191 
5192 	nseg = scsi_dma_map(scsi_cmd);
5193 	if (nseg < 0) {
5194 		dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
5195 		return -1;
5196 	}
5197 
5198 	ipr_cmd->dma_use_sg = nseg;
5199 
5200 	if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5201 		ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5202 		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5203 		ioarcb->data_transfer_length = cpu_to_be32(length);
5204 		ioarcb->ioadl_len =
5205 			cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5206 	} else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
5207 		ioadl_flags = IPR_IOADL_FLAGS_READ;
5208 		ioarcb->read_data_transfer_length = cpu_to_be32(length);
5209 		ioarcb->read_ioadl_len =
5210 			cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5211 	}
5212 
5213 	if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) {
5214 		ioadl = ioarcb->u.add_data.u.ioadl;
5215 		ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) +
5216 				    offsetof(struct ipr_ioarcb, u.add_data));
5217 		ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5218 	}
5219 
5220 	scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5221 		ioadl[i].flags_and_data_len =
5222 			cpu_to_be32(ioadl_flags | sg_dma_len(sg));
5223 		ioadl[i].address = cpu_to_be32(sg_dma_address(sg));
5224 	}
5225 
5226 	ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5227 	return 0;
5228 }
5229 
5230 /**
5231  * ipr_get_task_attributes - Translate SPI Q-Tag to task attributes
5232  * @scsi_cmd:	scsi command struct
5233  *
5234  * Return value:
5235  * 	task attributes
5236  **/
5237 static u8 ipr_get_task_attributes(struct scsi_cmnd *scsi_cmd)
5238 {
5239 	u8 tag[2];
5240 	u8 rc = IPR_FLAGS_LO_UNTAGGED_TASK;
5241 
5242 	if (scsi_populate_tag_msg(scsi_cmd, tag)) {
5243 		switch (tag[0]) {
5244 		case MSG_SIMPLE_TAG:
5245 			rc = IPR_FLAGS_LO_SIMPLE_TASK;
5246 			break;
5247 		case MSG_HEAD_TAG:
5248 			rc = IPR_FLAGS_LO_HEAD_OF_Q_TASK;
5249 			break;
5250 		case MSG_ORDERED_TAG:
5251 			rc = IPR_FLAGS_LO_ORDERED_TASK;
5252 			break;
5253 		};
5254 	}
5255 
5256 	return rc;
5257 }
5258 
5259 /**
5260  * ipr_erp_done - Process completion of ERP for a device
5261  * @ipr_cmd:		ipr command struct
5262  *
5263  * This function copies the sense buffer into the scsi_cmd
5264  * struct and pushes the scsi_done function.
5265  *
5266  * Return value:
5267  * 	nothing
5268  **/
5269 static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
5270 {
5271 	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5272 	struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5273 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5274 	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5275 
5276 	if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5277 		scsi_cmd->result |= (DID_ERROR << 16);
5278 		scmd_printk(KERN_ERR, scsi_cmd,
5279 			    "Request Sense failed with IOASC: 0x%08X\n", ioasc);
5280 	} else {
5281 		memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
5282 		       SCSI_SENSE_BUFFERSIZE);
5283 	}
5284 
5285 	if (res) {
5286 		if (!ipr_is_naca_model(res))
5287 			res->needs_sync_complete = 1;
5288 		res->in_erp = 0;
5289 	}
5290 	scsi_dma_unmap(ipr_cmd->scsi_cmd);
5291 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5292 	scsi_cmd->scsi_done(scsi_cmd);
5293 }
5294 
5295 /**
5296  * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
5297  * @ipr_cmd:	ipr command struct
5298  *
5299  * Return value:
5300  * 	none
5301  **/
5302 static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
5303 {
5304 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5305 	struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5306 	dma_addr_t dma_addr = ipr_cmd->dma_addr;
5307 
5308 	memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
5309 	ioarcb->data_transfer_length = 0;
5310 	ioarcb->read_data_transfer_length = 0;
5311 	ioarcb->ioadl_len = 0;
5312 	ioarcb->read_ioadl_len = 0;
5313 	ioasa->hdr.ioasc = 0;
5314 	ioasa->hdr.residual_data_len = 0;
5315 
5316 	if (ipr_cmd->ioa_cfg->sis64)
5317 		ioarcb->u.sis64_addr_data.data_ioadl_addr =
5318 			cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
5319 	else {
5320 		ioarcb->write_ioadl_addr =
5321 			cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
5322 		ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5323 	}
5324 }
5325 
5326 /**
5327  * ipr_erp_request_sense - Send request sense to a device
5328  * @ipr_cmd:	ipr command struct
5329  *
5330  * This function sends a request sense to a device as a result
5331  * of a check condition.
5332  *
5333  * Return value:
5334  * 	nothing
5335  **/
5336 static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
5337 {
5338 	struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5339 	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5340 
5341 	if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5342 		ipr_erp_done(ipr_cmd);
5343 		return;
5344 	}
5345 
5346 	ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5347 
5348 	cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
5349 	cmd_pkt->cdb[0] = REQUEST_SENSE;
5350 	cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
5351 	cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
5352 	cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
5353 	cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
5354 
5355 	ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma,
5356 		       SCSI_SENSE_BUFFERSIZE, IPR_IOADL_FLAGS_READ_LAST);
5357 
5358 	ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
5359 		   IPR_REQUEST_SENSE_TIMEOUT * 2);
5360 }
5361 
5362 /**
5363  * ipr_erp_cancel_all - Send cancel all to a device
5364  * @ipr_cmd:	ipr command struct
5365  *
5366  * This function sends a cancel all to a device to clear the
5367  * queue. If we are running TCQ on the device, QERR is set to 1,
5368  * which means all outstanding ops have been dropped on the floor.
5369  * Cancel all will return them to us.
5370  *
5371  * Return value:
5372  * 	nothing
5373  **/
5374 static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
5375 {
5376 	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5377 	struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5378 	struct ipr_cmd_pkt *cmd_pkt;
5379 
5380 	res->in_erp = 1;
5381 
5382 	ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5383 
5384 	if (!scsi_get_tag_type(scsi_cmd->device)) {
5385 		ipr_erp_request_sense(ipr_cmd);
5386 		return;
5387 	}
5388 
5389 	cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5390 	cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5391 	cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5392 
5393 	ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
5394 		   IPR_CANCEL_ALL_TIMEOUT);
5395 }
5396 
5397 /**
5398  * ipr_dump_ioasa - Dump contents of IOASA
5399  * @ioa_cfg:	ioa config struct
5400  * @ipr_cmd:	ipr command struct
5401  * @res:		resource entry struct
5402  *
5403  * This function is invoked by the interrupt handler when ops
5404  * fail. It will log the IOASA if appropriate. Only called
5405  * for GPDD ops.
5406  *
5407  * Return value:
5408  * 	none
5409  **/
5410 static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
5411 			   struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
5412 {
5413 	int i;
5414 	u16 data_len;
5415 	u32 ioasc, fd_ioasc;
5416 	struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5417 	__be32 *ioasa_data = (__be32 *)ioasa;
5418 	int error_index;
5419 
5420 	ioasc = be32_to_cpu(ioasa->hdr.ioasc) & IPR_IOASC_IOASC_MASK;
5421 	fd_ioasc = be32_to_cpu(ioasa->hdr.fd_ioasc) & IPR_IOASC_IOASC_MASK;
5422 
5423 	if (0 == ioasc)
5424 		return;
5425 
5426 	if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
5427 		return;
5428 
5429 	if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc)
5430 		error_index = ipr_get_error(fd_ioasc);
5431 	else
5432 		error_index = ipr_get_error(ioasc);
5433 
5434 	if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
5435 		/* Don't log an error if the IOA already logged one */
5436 		if (ioasa->hdr.ilid != 0)
5437 			return;
5438 
5439 		if (!ipr_is_gscsi(res))
5440 			return;
5441 
5442 		if (ipr_error_table[error_index].log_ioasa == 0)
5443 			return;
5444 	}
5445 
5446 	ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
5447 
5448 	data_len = be16_to_cpu(ioasa->hdr.ret_stat_len);
5449 	if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len)
5450 		data_len = sizeof(struct ipr_ioasa64);
5451 	else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len)
5452 		data_len = sizeof(struct ipr_ioasa);
5453 
5454 	ipr_err("IOASA Dump:\n");
5455 
5456 	for (i = 0; i < data_len / 4; i += 4) {
5457 		ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
5458 			be32_to_cpu(ioasa_data[i]),
5459 			be32_to_cpu(ioasa_data[i+1]),
5460 			be32_to_cpu(ioasa_data[i+2]),
5461 			be32_to_cpu(ioasa_data[i+3]));
5462 	}
5463 }
5464 
5465 /**
5466  * ipr_gen_sense - Generate SCSI sense data from an IOASA
5467  * @ioasa:		IOASA
5468  * @sense_buf:	sense data buffer
5469  *
5470  * Return value:
5471  * 	none
5472  **/
5473 static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
5474 {
5475 	u32 failing_lba;
5476 	u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
5477 	struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
5478 	struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5479 	u32 ioasc = be32_to_cpu(ioasa->hdr.ioasc);
5480 
5481 	memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
5482 
5483 	if (ioasc >= IPR_FIRST_DRIVER_IOASC)
5484 		return;
5485 
5486 	ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
5487 
5488 	if (ipr_is_vset_device(res) &&
5489 	    ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
5490 	    ioasa->u.vset.failing_lba_hi != 0) {
5491 		sense_buf[0] = 0x72;
5492 		sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
5493 		sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
5494 		sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
5495 
5496 		sense_buf[7] = 12;
5497 		sense_buf[8] = 0;
5498 		sense_buf[9] = 0x0A;
5499 		sense_buf[10] = 0x80;
5500 
5501 		failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
5502 
5503 		sense_buf[12] = (failing_lba & 0xff000000) >> 24;
5504 		sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
5505 		sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
5506 		sense_buf[15] = failing_lba & 0x000000ff;
5507 
5508 		failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
5509 
5510 		sense_buf[16] = (failing_lba & 0xff000000) >> 24;
5511 		sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
5512 		sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
5513 		sense_buf[19] = failing_lba & 0x000000ff;
5514 	} else {
5515 		sense_buf[0] = 0x70;
5516 		sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
5517 		sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
5518 		sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
5519 
5520 		/* Illegal request */
5521 		if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
5522 		    (be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
5523 			sense_buf[7] = 10;	/* additional length */
5524 
5525 			/* IOARCB was in error */
5526 			if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
5527 				sense_buf[15] = 0xC0;
5528 			else	/* Parameter data was invalid */
5529 				sense_buf[15] = 0x80;
5530 
5531 			sense_buf[16] =
5532 			    ((IPR_FIELD_POINTER_MASK &
5533 			      be32_to_cpu(ioasa->hdr.ioasc_specific)) >> 8) & 0xff;
5534 			sense_buf[17] =
5535 			    (IPR_FIELD_POINTER_MASK &
5536 			     be32_to_cpu(ioasa->hdr.ioasc_specific)) & 0xff;
5537 		} else {
5538 			if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
5539 				if (ipr_is_vset_device(res))
5540 					failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
5541 				else
5542 					failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
5543 
5544 				sense_buf[0] |= 0x80;	/* Or in the Valid bit */
5545 				sense_buf[3] = (failing_lba & 0xff000000) >> 24;
5546 				sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
5547 				sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
5548 				sense_buf[6] = failing_lba & 0x000000ff;
5549 			}
5550 
5551 			sense_buf[7] = 6;	/* additional length */
5552 		}
5553 	}
5554 }
5555 
5556 /**
5557  * ipr_get_autosense - Copy autosense data to sense buffer
5558  * @ipr_cmd:	ipr command struct
5559  *
5560  * This function copies the autosense buffer to the buffer
5561  * in the scsi_cmd, if there is autosense available.
5562  *
5563  * Return value:
5564  *	1 if autosense was available / 0 if not
5565  **/
5566 static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
5567 {
5568 	struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5569 	struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
5570 
5571 	if ((be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
5572 		return 0;
5573 
5574 	if (ipr_cmd->ioa_cfg->sis64)
5575 		memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa64->auto_sense.data,
5576 		       min_t(u16, be16_to_cpu(ioasa64->auto_sense.auto_sense_len),
5577 			   SCSI_SENSE_BUFFERSIZE));
5578 	else
5579 		memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
5580 		       min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
5581 			   SCSI_SENSE_BUFFERSIZE));
5582 	return 1;
5583 }
5584 
5585 /**
5586  * ipr_erp_start - Process an error response for a SCSI op
5587  * @ioa_cfg:	ioa config struct
5588  * @ipr_cmd:	ipr command struct
5589  *
5590  * This function determines whether or not to initiate ERP
5591  * on the affected device.
5592  *
5593  * Return value:
5594  * 	nothing
5595  **/
5596 static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
5597 			      struct ipr_cmnd *ipr_cmd)
5598 {
5599 	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5600 	struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5601 	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5602 	u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
5603 
5604 	if (!res) {
5605 		ipr_scsi_eh_done(ipr_cmd);
5606 		return;
5607 	}
5608 
5609 	if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS)
5610 		ipr_gen_sense(ipr_cmd);
5611 
5612 	ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
5613 
5614 	switch (masked_ioasc) {
5615 	case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
5616 		if (ipr_is_naca_model(res))
5617 			scsi_cmd->result |= (DID_ABORT << 16);
5618 		else
5619 			scsi_cmd->result |= (DID_IMM_RETRY << 16);
5620 		break;
5621 	case IPR_IOASC_IR_RESOURCE_HANDLE:
5622 	case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
5623 		scsi_cmd->result |= (DID_NO_CONNECT << 16);
5624 		break;
5625 	case IPR_IOASC_HW_SEL_TIMEOUT:
5626 		scsi_cmd->result |= (DID_NO_CONNECT << 16);
5627 		if (!ipr_is_naca_model(res))
5628 			res->needs_sync_complete = 1;
5629 		break;
5630 	case IPR_IOASC_SYNC_REQUIRED:
5631 		if (!res->in_erp)
5632 			res->needs_sync_complete = 1;
5633 		scsi_cmd->result |= (DID_IMM_RETRY << 16);
5634 		break;
5635 	case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
5636 	case IPR_IOASA_IR_DUAL_IOA_DISABLED:
5637 		scsi_cmd->result |= (DID_PASSTHROUGH << 16);
5638 		break;
5639 	case IPR_IOASC_BUS_WAS_RESET:
5640 	case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
5641 		/*
5642 		 * Report the bus reset and ask for a retry. The device
5643 		 * will give CC/UA the next command.
5644 		 */
5645 		if (!res->resetting_device)
5646 			scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
5647 		scsi_cmd->result |= (DID_ERROR << 16);
5648 		if (!ipr_is_naca_model(res))
5649 			res->needs_sync_complete = 1;
5650 		break;
5651 	case IPR_IOASC_HW_DEV_BUS_STATUS:
5652 		scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
5653 		if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
5654 			if (!ipr_get_autosense(ipr_cmd)) {
5655 				if (!ipr_is_naca_model(res)) {
5656 					ipr_erp_cancel_all(ipr_cmd);
5657 					return;
5658 				}
5659 			}
5660 		}
5661 		if (!ipr_is_naca_model(res))
5662 			res->needs_sync_complete = 1;
5663 		break;
5664 	case IPR_IOASC_NR_INIT_CMD_REQUIRED:
5665 		break;
5666 	default:
5667 		if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
5668 			scsi_cmd->result |= (DID_ERROR << 16);
5669 		if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
5670 			res->needs_sync_complete = 1;
5671 		break;
5672 	}
5673 
5674 	scsi_dma_unmap(ipr_cmd->scsi_cmd);
5675 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5676 	scsi_cmd->scsi_done(scsi_cmd);
5677 }
5678 
5679 /**
5680  * ipr_scsi_done - mid-layer done function
5681  * @ipr_cmd:	ipr command struct
5682  *
5683  * This function is invoked by the interrupt handler for
5684  * ops generated by the SCSI mid-layer
5685  *
5686  * Return value:
5687  * 	none
5688  **/
5689 static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
5690 {
5691 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5692 	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5693 	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5694 
5695 	scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
5696 
5697 	if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
5698 		scsi_dma_unmap(ipr_cmd->scsi_cmd);
5699 		list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5700 		scsi_cmd->scsi_done(scsi_cmd);
5701 	} else
5702 		ipr_erp_start(ioa_cfg, ipr_cmd);
5703 }
5704 
5705 /**
5706  * ipr_queuecommand - Queue a mid-layer request
5707  * @scsi_cmd:	scsi command struct
5708  * @done:		done function
5709  *
5710  * This function queues a request generated by the mid-layer.
5711  *
5712  * Return value:
5713  *	0 on success
5714  *	SCSI_MLQUEUE_DEVICE_BUSY if device is busy
5715  *	SCSI_MLQUEUE_HOST_BUSY if host is busy
5716  **/
5717 static int ipr_queuecommand_lck(struct scsi_cmnd *scsi_cmd,
5718 			    void (*done) (struct scsi_cmnd *))
5719 {
5720 	struct ipr_ioa_cfg *ioa_cfg;
5721 	struct ipr_resource_entry *res;
5722 	struct ipr_ioarcb *ioarcb;
5723 	struct ipr_cmnd *ipr_cmd;
5724 	int rc = 0;
5725 
5726 	scsi_cmd->scsi_done = done;
5727 	ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
5728 	res = scsi_cmd->device->hostdata;
5729 	scsi_cmd->result = (DID_OK << 16);
5730 
5731 	/*
5732 	 * We are currently blocking all devices due to a host reset
5733 	 * We have told the host to stop giving us new requests, but
5734 	 * ERP ops don't count. FIXME
5735 	 */
5736 	if (unlikely(!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead))
5737 		return SCSI_MLQUEUE_HOST_BUSY;
5738 
5739 	/*
5740 	 * FIXME - Create scsi_set_host_offline interface
5741 	 *  and the ioa_is_dead check can be removed
5742 	 */
5743 	if (unlikely(ioa_cfg->ioa_is_dead || !res)) {
5744 		memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
5745 		scsi_cmd->result = (DID_NO_CONNECT << 16);
5746 		scsi_cmd->scsi_done(scsi_cmd);
5747 		return 0;
5748 	}
5749 
5750 	if (ipr_is_gata(res) && res->sata_port)
5751 		return ata_sas_queuecmd(scsi_cmd, res->sata_port->ap);
5752 
5753 	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5754 	ioarcb = &ipr_cmd->ioarcb;
5755 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
5756 
5757 	memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
5758 	ipr_cmd->scsi_cmd = scsi_cmd;
5759 	ioarcb->res_handle = res->res_handle;
5760 	ipr_cmd->done = ipr_scsi_done;
5761 	ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
5762 
5763 	if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
5764 		if (scsi_cmd->underflow == 0)
5765 			ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
5766 
5767 		if (res->needs_sync_complete) {
5768 			ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
5769 			res->needs_sync_complete = 0;
5770 		}
5771 
5772 		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
5773 		ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
5774 		ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
5775 		ioarcb->cmd_pkt.flags_lo |= ipr_get_task_attributes(scsi_cmd);
5776 	}
5777 
5778 	if (scsi_cmd->cmnd[0] >= 0xC0 &&
5779 	    (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE))
5780 		ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5781 
5782 	if (likely(rc == 0)) {
5783 		if (ioa_cfg->sis64)
5784 			rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
5785 		else
5786 			rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
5787 	}
5788 
5789 	if (likely(rc == 0)) {
5790 		mb();
5791 		ipr_send_command(ipr_cmd);
5792 	} else {
5793 		 list_move_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5794 		 return SCSI_MLQUEUE_HOST_BUSY;
5795 	}
5796 
5797 	return 0;
5798 }
5799 
5800 static DEF_SCSI_QCMD(ipr_queuecommand)
5801 
5802 /**
5803  * ipr_ioctl - IOCTL handler
5804  * @sdev:	scsi device struct
5805  * @cmd:	IOCTL cmd
5806  * @arg:	IOCTL arg
5807  *
5808  * Return value:
5809  * 	0 on success / other on failure
5810  **/
5811 static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
5812 {
5813 	struct ipr_resource_entry *res;
5814 
5815 	res = (struct ipr_resource_entry *)sdev->hostdata;
5816 	if (res && ipr_is_gata(res)) {
5817 		if (cmd == HDIO_GET_IDENTITY)
5818 			return -ENOTTY;
5819 		return ata_sas_scsi_ioctl(res->sata_port->ap, sdev, cmd, arg);
5820 	}
5821 
5822 	return -EINVAL;
5823 }
5824 
5825 /**
5826  * ipr_info - Get information about the card/driver
5827  * @scsi_host:	scsi host struct
5828  *
5829  * Return value:
5830  * 	pointer to buffer with description string
5831  **/
5832 static const char * ipr_ioa_info(struct Scsi_Host *host)
5833 {
5834 	static char buffer[512];
5835 	struct ipr_ioa_cfg *ioa_cfg;
5836 	unsigned long lock_flags = 0;
5837 
5838 	ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
5839 
5840 	spin_lock_irqsave(host->host_lock, lock_flags);
5841 	sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
5842 	spin_unlock_irqrestore(host->host_lock, lock_flags);
5843 
5844 	return buffer;
5845 }
5846 
5847 static struct scsi_host_template driver_template = {
5848 	.module = THIS_MODULE,
5849 	.name = "IPR",
5850 	.info = ipr_ioa_info,
5851 	.ioctl = ipr_ioctl,
5852 	.queuecommand = ipr_queuecommand,
5853 	.eh_abort_handler = ipr_eh_abort,
5854 	.eh_device_reset_handler = ipr_eh_dev_reset,
5855 	.eh_host_reset_handler = ipr_eh_host_reset,
5856 	.slave_alloc = ipr_slave_alloc,
5857 	.slave_configure = ipr_slave_configure,
5858 	.slave_destroy = ipr_slave_destroy,
5859 	.target_alloc = ipr_target_alloc,
5860 	.target_destroy = ipr_target_destroy,
5861 	.change_queue_depth = ipr_change_queue_depth,
5862 	.change_queue_type = ipr_change_queue_type,
5863 	.bios_param = ipr_biosparam,
5864 	.can_queue = IPR_MAX_COMMANDS,
5865 	.this_id = -1,
5866 	.sg_tablesize = IPR_MAX_SGLIST,
5867 	.max_sectors = IPR_IOA_MAX_SECTORS,
5868 	.cmd_per_lun = IPR_MAX_CMD_PER_LUN,
5869 	.use_clustering = ENABLE_CLUSTERING,
5870 	.shost_attrs = ipr_ioa_attrs,
5871 	.sdev_attrs = ipr_dev_attrs,
5872 	.proc_name = IPR_NAME
5873 };
5874 
5875 /**
5876  * ipr_ata_phy_reset - libata phy_reset handler
5877  * @ap:		ata port to reset
5878  *
5879  **/
5880 static void ipr_ata_phy_reset(struct ata_port *ap)
5881 {
5882 	unsigned long flags;
5883 	struct ipr_sata_port *sata_port = ap->private_data;
5884 	struct ipr_resource_entry *res = sata_port->res;
5885 	struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5886 	int rc;
5887 
5888 	ENTER;
5889 	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5890 	while(ioa_cfg->in_reset_reload) {
5891 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5892 		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5893 		spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5894 	}
5895 
5896 	if (!ioa_cfg->allow_cmds)
5897 		goto out_unlock;
5898 
5899 	rc = ipr_device_reset(ioa_cfg, res);
5900 
5901 	if (rc) {
5902 		ap->link.device[0].class = ATA_DEV_NONE;
5903 		goto out_unlock;
5904 	}
5905 
5906 	ap->link.device[0].class = res->ata_class;
5907 	if (ap->link.device[0].class == ATA_DEV_UNKNOWN)
5908 		ap->link.device[0].class = ATA_DEV_NONE;
5909 
5910 out_unlock:
5911 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5912 	LEAVE;
5913 }
5914 
5915 /**
5916  * ipr_ata_post_internal - Cleanup after an internal command
5917  * @qc:	ATA queued command
5918  *
5919  * Return value:
5920  * 	none
5921  **/
5922 static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
5923 {
5924 	struct ipr_sata_port *sata_port = qc->ap->private_data;
5925 	struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5926 	struct ipr_cmnd *ipr_cmd;
5927 	unsigned long flags;
5928 
5929 	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5930 	while(ioa_cfg->in_reset_reload) {
5931 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5932 		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5933 		spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5934 	}
5935 
5936 	list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
5937 		if (ipr_cmd->qc == qc) {
5938 			ipr_device_reset(ioa_cfg, sata_port->res);
5939 			break;
5940 		}
5941 	}
5942 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5943 }
5944 
5945 /**
5946  * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
5947  * @regs:	destination
5948  * @tf:	source ATA taskfile
5949  *
5950  * Return value:
5951  * 	none
5952  **/
5953 static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
5954 			     struct ata_taskfile *tf)
5955 {
5956 	regs->feature = tf->feature;
5957 	regs->nsect = tf->nsect;
5958 	regs->lbal = tf->lbal;
5959 	regs->lbam = tf->lbam;
5960 	regs->lbah = tf->lbah;
5961 	regs->device = tf->device;
5962 	regs->command = tf->command;
5963 	regs->hob_feature = tf->hob_feature;
5964 	regs->hob_nsect = tf->hob_nsect;
5965 	regs->hob_lbal = tf->hob_lbal;
5966 	regs->hob_lbam = tf->hob_lbam;
5967 	regs->hob_lbah = tf->hob_lbah;
5968 	regs->ctl = tf->ctl;
5969 }
5970 
5971 /**
5972  * ipr_sata_done - done function for SATA commands
5973  * @ipr_cmd:	ipr command struct
5974  *
5975  * This function is invoked by the interrupt handler for
5976  * ops generated by the SCSI mid-layer to SATA devices
5977  *
5978  * Return value:
5979  * 	none
5980  **/
5981 static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
5982 {
5983 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5984 	struct ata_queued_cmd *qc = ipr_cmd->qc;
5985 	struct ipr_sata_port *sata_port = qc->ap->private_data;
5986 	struct ipr_resource_entry *res = sata_port->res;
5987 	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5988 
5989 	if (ipr_cmd->ioa_cfg->sis64)
5990 		memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
5991 		       sizeof(struct ipr_ioasa_gata));
5992 	else
5993 		memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
5994 		       sizeof(struct ipr_ioasa_gata));
5995 	ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
5996 
5997 	if (be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
5998 		scsi_report_device_reset(ioa_cfg->host, res->bus, res->target);
5999 
6000 	if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6001 		qc->err_mask |= __ac_err_mask(sata_port->ioasa.status);
6002 	else
6003 		qc->err_mask |= ac_err_mask(sata_port->ioasa.status);
6004 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6005 	ata_qc_complete(qc);
6006 }
6007 
6008 /**
6009  * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
6010  * @ipr_cmd:	ipr command struct
6011  * @qc:		ATA queued command
6012  *
6013  **/
6014 static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
6015 				  struct ata_queued_cmd *qc)
6016 {
6017 	u32 ioadl_flags = 0;
6018 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6019 	struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
6020 	struct ipr_ioadl64_desc *last_ioadl64 = NULL;
6021 	int len = qc->nbytes;
6022 	struct scatterlist *sg;
6023 	unsigned int si;
6024 	dma_addr_t dma_addr = ipr_cmd->dma_addr;
6025 
6026 	if (len == 0)
6027 		return;
6028 
6029 	if (qc->dma_dir == DMA_TO_DEVICE) {
6030 		ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6031 		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6032 	} else if (qc->dma_dir == DMA_FROM_DEVICE)
6033 		ioadl_flags = IPR_IOADL_FLAGS_READ;
6034 
6035 	ioarcb->data_transfer_length = cpu_to_be32(len);
6036 	ioarcb->ioadl_len =
6037 		cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
6038 	ioarcb->u.sis64_addr_data.data_ioadl_addr =
6039 		cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl));
6040 
6041 	for_each_sg(qc->sg, sg, qc->n_elem, si) {
6042 		ioadl64->flags = cpu_to_be32(ioadl_flags);
6043 		ioadl64->data_len = cpu_to_be32(sg_dma_len(sg));
6044 		ioadl64->address = cpu_to_be64(sg_dma_address(sg));
6045 
6046 		last_ioadl64 = ioadl64;
6047 		ioadl64++;
6048 	}
6049 
6050 	if (likely(last_ioadl64))
6051 		last_ioadl64->flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6052 }
6053 
6054 /**
6055  * ipr_build_ata_ioadl - Build an ATA scatter/gather list
6056  * @ipr_cmd:	ipr command struct
6057  * @qc:		ATA queued command
6058  *
6059  **/
6060 static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
6061 				struct ata_queued_cmd *qc)
6062 {
6063 	u32 ioadl_flags = 0;
6064 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6065 	struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
6066 	struct ipr_ioadl_desc *last_ioadl = NULL;
6067 	int len = qc->nbytes;
6068 	struct scatterlist *sg;
6069 	unsigned int si;
6070 
6071 	if (len == 0)
6072 		return;
6073 
6074 	if (qc->dma_dir == DMA_TO_DEVICE) {
6075 		ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6076 		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6077 		ioarcb->data_transfer_length = cpu_to_be32(len);
6078 		ioarcb->ioadl_len =
6079 			cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6080 	} else if (qc->dma_dir == DMA_FROM_DEVICE) {
6081 		ioadl_flags = IPR_IOADL_FLAGS_READ;
6082 		ioarcb->read_data_transfer_length = cpu_to_be32(len);
6083 		ioarcb->read_ioadl_len =
6084 			cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6085 	}
6086 
6087 	for_each_sg(qc->sg, sg, qc->n_elem, si) {
6088 		ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
6089 		ioadl->address = cpu_to_be32(sg_dma_address(sg));
6090 
6091 		last_ioadl = ioadl;
6092 		ioadl++;
6093 	}
6094 
6095 	if (likely(last_ioadl))
6096 		last_ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6097 }
6098 
6099 /**
6100  * ipr_qc_issue - Issue a SATA qc to a device
6101  * @qc:	queued command
6102  *
6103  * Return value:
6104  * 	0 if success
6105  **/
6106 static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
6107 {
6108 	struct ata_port *ap = qc->ap;
6109 	struct ipr_sata_port *sata_port = ap->private_data;
6110 	struct ipr_resource_entry *res = sata_port->res;
6111 	struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6112 	struct ipr_cmnd *ipr_cmd;
6113 	struct ipr_ioarcb *ioarcb;
6114 	struct ipr_ioarcb_ata_regs *regs;
6115 
6116 	if (unlikely(!ioa_cfg->allow_cmds || ioa_cfg->ioa_is_dead))
6117 		return AC_ERR_SYSTEM;
6118 
6119 	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
6120 	ioarcb = &ipr_cmd->ioarcb;
6121 
6122 	if (ioa_cfg->sis64) {
6123 		regs = &ipr_cmd->i.ata_ioadl.regs;
6124 		ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
6125 	} else
6126 		regs = &ioarcb->u.add_data.u.regs;
6127 
6128 	memset(regs, 0, sizeof(*regs));
6129 	ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs));
6130 
6131 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
6132 	ipr_cmd->qc = qc;
6133 	ipr_cmd->done = ipr_sata_done;
6134 	ipr_cmd->ioarcb.res_handle = res->res_handle;
6135 	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
6136 	ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
6137 	ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6138 	ipr_cmd->dma_use_sg = qc->n_elem;
6139 
6140 	if (ioa_cfg->sis64)
6141 		ipr_build_ata_ioadl64(ipr_cmd, qc);
6142 	else
6143 		ipr_build_ata_ioadl(ipr_cmd, qc);
6144 
6145 	regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
6146 	ipr_copy_sata_tf(regs, &qc->tf);
6147 	memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
6148 	ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
6149 
6150 	switch (qc->tf.protocol) {
6151 	case ATA_PROT_NODATA:
6152 	case ATA_PROT_PIO:
6153 		break;
6154 
6155 	case ATA_PROT_DMA:
6156 		regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6157 		break;
6158 
6159 	case ATAPI_PROT_PIO:
6160 	case ATAPI_PROT_NODATA:
6161 		regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6162 		break;
6163 
6164 	case ATAPI_PROT_DMA:
6165 		regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6166 		regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6167 		break;
6168 
6169 	default:
6170 		WARN_ON(1);
6171 		return AC_ERR_INVALID;
6172 	}
6173 
6174 	mb();
6175 
6176 	ipr_send_command(ipr_cmd);
6177 
6178 	return 0;
6179 }
6180 
6181 /**
6182  * ipr_qc_fill_rtf - Read result TF
6183  * @qc: ATA queued command
6184  *
6185  * Return value:
6186  * 	true
6187  **/
6188 static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
6189 {
6190 	struct ipr_sata_port *sata_port = qc->ap->private_data;
6191 	struct ipr_ioasa_gata *g = &sata_port->ioasa;
6192 	struct ata_taskfile *tf = &qc->result_tf;
6193 
6194 	tf->feature = g->error;
6195 	tf->nsect = g->nsect;
6196 	tf->lbal = g->lbal;
6197 	tf->lbam = g->lbam;
6198 	tf->lbah = g->lbah;
6199 	tf->device = g->device;
6200 	tf->command = g->status;
6201 	tf->hob_nsect = g->hob_nsect;
6202 	tf->hob_lbal = g->hob_lbal;
6203 	tf->hob_lbam = g->hob_lbam;
6204 	tf->hob_lbah = g->hob_lbah;
6205 	tf->ctl = g->alt_status;
6206 
6207 	return true;
6208 }
6209 
6210 static struct ata_port_operations ipr_sata_ops = {
6211 	.phy_reset = ipr_ata_phy_reset,
6212 	.hardreset = ipr_sata_reset,
6213 	.post_internal_cmd = ipr_ata_post_internal,
6214 	.qc_prep = ata_noop_qc_prep,
6215 	.qc_issue = ipr_qc_issue,
6216 	.qc_fill_rtf = ipr_qc_fill_rtf,
6217 	.port_start = ata_sas_port_start,
6218 	.port_stop = ata_sas_port_stop
6219 };
6220 
6221 static struct ata_port_info sata_port_info = {
6222 	.flags	= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | ATA_FLAG_SATA_RESET |
6223 	ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA,
6224 	.pio_mask	= 0x10, /* pio4 */
6225 	.mwdma_mask = 0x07,
6226 	.udma_mask	= 0x7f, /* udma0-6 */
6227 	.port_ops	= &ipr_sata_ops
6228 };
6229 
6230 #ifdef CONFIG_PPC_PSERIES
6231 static const u16 ipr_blocked_processors[] = {
6232 	PV_NORTHSTAR,
6233 	PV_PULSAR,
6234 	PV_POWER4,
6235 	PV_ICESTAR,
6236 	PV_SSTAR,
6237 	PV_POWER4p,
6238 	PV_630,
6239 	PV_630p
6240 };
6241 
6242 /**
6243  * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
6244  * @ioa_cfg:	ioa cfg struct
6245  *
6246  * Adapters that use Gemstone revision < 3.1 do not work reliably on
6247  * certain pSeries hardware. This function determines if the given
6248  * adapter is in one of these confgurations or not.
6249  *
6250  * Return value:
6251  * 	1 if adapter is not supported / 0 if adapter is supported
6252  **/
6253 static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
6254 {
6255 	int i;
6256 
6257 	if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) {
6258 		for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++){
6259 			if (__is_processor(ipr_blocked_processors[i]))
6260 				return 1;
6261 		}
6262 	}
6263 	return 0;
6264 }
6265 #else
6266 #define ipr_invalid_adapter(ioa_cfg) 0
6267 #endif
6268 
6269 /**
6270  * ipr_ioa_bringdown_done - IOA bring down completion.
6271  * @ipr_cmd:	ipr command struct
6272  *
6273  * This function processes the completion of an adapter bring down.
6274  * It wakes any reset sleepers.
6275  *
6276  * Return value:
6277  * 	IPR_RC_JOB_RETURN
6278  **/
6279 static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
6280 {
6281 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6282 
6283 	ENTER;
6284 	ioa_cfg->in_reset_reload = 0;
6285 	ioa_cfg->reset_retries = 0;
6286 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6287 	wake_up_all(&ioa_cfg->reset_wait_q);
6288 
6289 	spin_unlock_irq(ioa_cfg->host->host_lock);
6290 	scsi_unblock_requests(ioa_cfg->host);
6291 	spin_lock_irq(ioa_cfg->host->host_lock);
6292 	LEAVE;
6293 
6294 	return IPR_RC_JOB_RETURN;
6295 }
6296 
6297 /**
6298  * ipr_ioa_reset_done - IOA reset completion.
6299  * @ipr_cmd:	ipr command struct
6300  *
6301  * This function processes the completion of an adapter reset.
6302  * It schedules any necessary mid-layer add/removes and
6303  * wakes any reset sleepers.
6304  *
6305  * Return value:
6306  * 	IPR_RC_JOB_RETURN
6307  **/
6308 static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
6309 {
6310 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6311 	struct ipr_resource_entry *res;
6312 	struct ipr_hostrcb *hostrcb, *temp;
6313 	int i = 0;
6314 
6315 	ENTER;
6316 	ioa_cfg->in_reset_reload = 0;
6317 	ioa_cfg->allow_cmds = 1;
6318 	ioa_cfg->reset_cmd = NULL;
6319 	ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
6320 
6321 	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
6322 		if (ioa_cfg->allow_ml_add_del && (res->add_to_ml || res->del_from_ml)) {
6323 			ipr_trace;
6324 			break;
6325 		}
6326 	}
6327 	schedule_work(&ioa_cfg->work_q);
6328 
6329 	list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) {
6330 		list_del(&hostrcb->queue);
6331 		if (i++ < IPR_NUM_LOG_HCAMS)
6332 			ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
6333 		else
6334 			ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
6335 	}
6336 
6337 	scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
6338 	dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
6339 
6340 	ioa_cfg->reset_retries = 0;
6341 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6342 	wake_up_all(&ioa_cfg->reset_wait_q);
6343 
6344 	spin_unlock(ioa_cfg->host->host_lock);
6345 	scsi_unblock_requests(ioa_cfg->host);
6346 	spin_lock(ioa_cfg->host->host_lock);
6347 
6348 	if (!ioa_cfg->allow_cmds)
6349 		scsi_block_requests(ioa_cfg->host);
6350 
6351 	LEAVE;
6352 	return IPR_RC_JOB_RETURN;
6353 }
6354 
6355 /**
6356  * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
6357  * @supported_dev:	supported device struct
6358  * @vpids:			vendor product id struct
6359  *
6360  * Return value:
6361  * 	none
6362  **/
6363 static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
6364 				 struct ipr_std_inq_vpids *vpids)
6365 {
6366 	memset(supported_dev, 0, sizeof(struct ipr_supported_device));
6367 	memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
6368 	supported_dev->num_records = 1;
6369 	supported_dev->data_length =
6370 		cpu_to_be16(sizeof(struct ipr_supported_device));
6371 	supported_dev->reserved = 0;
6372 }
6373 
6374 /**
6375  * ipr_set_supported_devs - Send Set Supported Devices for a device
6376  * @ipr_cmd:	ipr command struct
6377  *
6378  * This function sends a Set Supported Devices to the adapter
6379  *
6380  * Return value:
6381  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6382  **/
6383 static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
6384 {
6385 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6386 	struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
6387 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6388 	struct ipr_resource_entry *res = ipr_cmd->u.res;
6389 
6390 	ipr_cmd->job_step = ipr_ioa_reset_done;
6391 
6392 	list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
6393 		if (!ipr_is_scsi_disk(res))
6394 			continue;
6395 
6396 		ipr_cmd->u.res = res;
6397 		ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids);
6398 
6399 		ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6400 		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6401 		ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6402 
6403 		ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
6404 		ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES;
6405 		ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
6406 		ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
6407 
6408 		ipr_init_ioadl(ipr_cmd,
6409 			       ioa_cfg->vpd_cbs_dma +
6410 				 offsetof(struct ipr_misc_cbs, supp_dev),
6411 			       sizeof(struct ipr_supported_device),
6412 			       IPR_IOADL_FLAGS_WRITE_LAST);
6413 
6414 		ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
6415 			   IPR_SET_SUP_DEVICE_TIMEOUT);
6416 
6417 		if (!ioa_cfg->sis64)
6418 			ipr_cmd->job_step = ipr_set_supported_devs;
6419 		return IPR_RC_JOB_RETURN;
6420 	}
6421 
6422 	return IPR_RC_JOB_CONTINUE;
6423 }
6424 
6425 /**
6426  * ipr_get_mode_page - Locate specified mode page
6427  * @mode_pages:	mode page buffer
6428  * @page_code:	page code to find
6429  * @len:		minimum required length for mode page
6430  *
6431  * Return value:
6432  * 	pointer to mode page / NULL on failure
6433  **/
6434 static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
6435 			       u32 page_code, u32 len)
6436 {
6437 	struct ipr_mode_page_hdr *mode_hdr;
6438 	u32 page_length;
6439 	u32 length;
6440 
6441 	if (!mode_pages || (mode_pages->hdr.length == 0))
6442 		return NULL;
6443 
6444 	length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
6445 	mode_hdr = (struct ipr_mode_page_hdr *)
6446 		(mode_pages->data + mode_pages->hdr.block_desc_len);
6447 
6448 	while (length) {
6449 		if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
6450 			if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
6451 				return mode_hdr;
6452 			break;
6453 		} else {
6454 			page_length = (sizeof(struct ipr_mode_page_hdr) +
6455 				       mode_hdr->page_length);
6456 			length -= page_length;
6457 			mode_hdr = (struct ipr_mode_page_hdr *)
6458 				((unsigned long)mode_hdr + page_length);
6459 		}
6460 	}
6461 	return NULL;
6462 }
6463 
6464 /**
6465  * ipr_check_term_power - Check for term power errors
6466  * @ioa_cfg:	ioa config struct
6467  * @mode_pages:	IOAFP mode pages buffer
6468  *
6469  * Check the IOAFP's mode page 28 for term power errors
6470  *
6471  * Return value:
6472  * 	nothing
6473  **/
6474 static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
6475 				 struct ipr_mode_pages *mode_pages)
6476 {
6477 	int i;
6478 	int entry_length;
6479 	struct ipr_dev_bus_entry *bus;
6480 	struct ipr_mode_page28 *mode_page;
6481 
6482 	mode_page = ipr_get_mode_page(mode_pages, 0x28,
6483 				      sizeof(struct ipr_mode_page28));
6484 
6485 	entry_length = mode_page->entry_length;
6486 
6487 	bus = mode_page->bus;
6488 
6489 	for (i = 0; i < mode_page->num_entries; i++) {
6490 		if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
6491 			dev_err(&ioa_cfg->pdev->dev,
6492 				"Term power is absent on scsi bus %d\n",
6493 				bus->res_addr.bus);
6494 		}
6495 
6496 		bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
6497 	}
6498 }
6499 
6500 /**
6501  * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
6502  * @ioa_cfg:	ioa config struct
6503  *
6504  * Looks through the config table checking for SES devices. If
6505  * the SES device is in the SES table indicating a maximum SCSI
6506  * bus speed, the speed is limited for the bus.
6507  *
6508  * Return value:
6509  * 	none
6510  **/
6511 static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
6512 {
6513 	u32 max_xfer_rate;
6514 	int i;
6515 
6516 	for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
6517 		max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
6518 						       ioa_cfg->bus_attr[i].bus_width);
6519 
6520 		if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
6521 			ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
6522 	}
6523 }
6524 
6525 /**
6526  * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
6527  * @ioa_cfg:	ioa config struct
6528  * @mode_pages:	mode page 28 buffer
6529  *
6530  * Updates mode page 28 based on driver configuration
6531  *
6532  * Return value:
6533  * 	none
6534  **/
6535 static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
6536 					  	struct ipr_mode_pages *mode_pages)
6537 {
6538 	int i, entry_length;
6539 	struct ipr_dev_bus_entry *bus;
6540 	struct ipr_bus_attributes *bus_attr;
6541 	struct ipr_mode_page28 *mode_page;
6542 
6543 	mode_page = ipr_get_mode_page(mode_pages, 0x28,
6544 				      sizeof(struct ipr_mode_page28));
6545 
6546 	entry_length = mode_page->entry_length;
6547 
6548 	/* Loop for each device bus entry */
6549 	for (i = 0, bus = mode_page->bus;
6550 	     i < mode_page->num_entries;
6551 	     i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
6552 		if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
6553 			dev_err(&ioa_cfg->pdev->dev,
6554 				"Invalid resource address reported: 0x%08X\n",
6555 				IPR_GET_PHYS_LOC(bus->res_addr));
6556 			continue;
6557 		}
6558 
6559 		bus_attr = &ioa_cfg->bus_attr[i];
6560 		bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
6561 		bus->bus_width = bus_attr->bus_width;
6562 		bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
6563 		bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
6564 		if (bus_attr->qas_enabled)
6565 			bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
6566 		else
6567 			bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
6568 	}
6569 }
6570 
6571 /**
6572  * ipr_build_mode_select - Build a mode select command
6573  * @ipr_cmd:	ipr command struct
6574  * @res_handle:	resource handle to send command to
6575  * @parm:		Byte 2 of Mode Sense command
6576  * @dma_addr:	DMA buffer address
6577  * @xfer_len:	data transfer length
6578  *
6579  * Return value:
6580  * 	none
6581  **/
6582 static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
6583 				  __be32 res_handle, u8 parm,
6584 				  dma_addr_t dma_addr, u8 xfer_len)
6585 {
6586 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6587 
6588 	ioarcb->res_handle = res_handle;
6589 	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
6590 	ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6591 	ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
6592 	ioarcb->cmd_pkt.cdb[1] = parm;
6593 	ioarcb->cmd_pkt.cdb[4] = xfer_len;
6594 
6595 	ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_WRITE_LAST);
6596 }
6597 
6598 /**
6599  * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
6600  * @ipr_cmd:	ipr command struct
6601  *
6602  * This function sets up the SCSI bus attributes and sends
6603  * a Mode Select for Page 28 to activate them.
6604  *
6605  * Return value:
6606  * 	IPR_RC_JOB_RETURN
6607  **/
6608 static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
6609 {
6610 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6611 	struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
6612 	int length;
6613 
6614 	ENTER;
6615 	ipr_scsi_bus_speed_limit(ioa_cfg);
6616 	ipr_check_term_power(ioa_cfg, mode_pages);
6617 	ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
6618 	length = mode_pages->hdr.length + 1;
6619 	mode_pages->hdr.length = 0;
6620 
6621 	ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
6622 			      ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
6623 			      length);
6624 
6625 	ipr_cmd->job_step = ipr_set_supported_devs;
6626 	ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
6627 				    struct ipr_resource_entry, queue);
6628 	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6629 
6630 	LEAVE;
6631 	return IPR_RC_JOB_RETURN;
6632 }
6633 
6634 /**
6635  * ipr_build_mode_sense - Builds a mode sense command
6636  * @ipr_cmd:	ipr command struct
6637  * @res:		resource entry struct
6638  * @parm:		Byte 2 of mode sense command
6639  * @dma_addr:	DMA address of mode sense buffer
6640  * @xfer_len:	Size of DMA buffer
6641  *
6642  * Return value:
6643  * 	none
6644  **/
6645 static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
6646 				 __be32 res_handle,
6647 				 u8 parm, dma_addr_t dma_addr, u8 xfer_len)
6648 {
6649 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6650 
6651 	ioarcb->res_handle = res_handle;
6652 	ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
6653 	ioarcb->cmd_pkt.cdb[2] = parm;
6654 	ioarcb->cmd_pkt.cdb[4] = xfer_len;
6655 	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
6656 
6657 	ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
6658 }
6659 
6660 /**
6661  * ipr_reset_cmd_failed - Handle failure of IOA reset command
6662  * @ipr_cmd:	ipr command struct
6663  *
6664  * This function handles the failure of an IOA bringup command.
6665  *
6666  * Return value:
6667  * 	IPR_RC_JOB_RETURN
6668  **/
6669 static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
6670 {
6671 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6672 	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6673 
6674 	dev_err(&ioa_cfg->pdev->dev,
6675 		"0x%02X failed with IOASC: 0x%08X\n",
6676 		ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
6677 
6678 	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
6679 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6680 	return IPR_RC_JOB_RETURN;
6681 }
6682 
6683 /**
6684  * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
6685  * @ipr_cmd:	ipr command struct
6686  *
6687  * This function handles the failure of a Mode Sense to the IOAFP.
6688  * Some adapters do not handle all mode pages.
6689  *
6690  * Return value:
6691  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6692  **/
6693 static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
6694 {
6695 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6696 	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6697 
6698 	if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
6699 		ipr_cmd->job_step = ipr_set_supported_devs;
6700 		ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
6701 					    struct ipr_resource_entry, queue);
6702 		return IPR_RC_JOB_CONTINUE;
6703 	}
6704 
6705 	return ipr_reset_cmd_failed(ipr_cmd);
6706 }
6707 
6708 /**
6709  * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
6710  * @ipr_cmd:	ipr command struct
6711  *
6712  * This function send a Page 28 mode sense to the IOA to
6713  * retrieve SCSI bus attributes.
6714  *
6715  * Return value:
6716  * 	IPR_RC_JOB_RETURN
6717  **/
6718 static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
6719 {
6720 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6721 
6722 	ENTER;
6723 	ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
6724 			     0x28, ioa_cfg->vpd_cbs_dma +
6725 			     offsetof(struct ipr_misc_cbs, mode_pages),
6726 			     sizeof(struct ipr_mode_pages));
6727 
6728 	ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
6729 	ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
6730 
6731 	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6732 
6733 	LEAVE;
6734 	return IPR_RC_JOB_RETURN;
6735 }
6736 
6737 /**
6738  * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
6739  * @ipr_cmd:	ipr command struct
6740  *
6741  * This function enables dual IOA RAID support if possible.
6742  *
6743  * Return value:
6744  * 	IPR_RC_JOB_RETURN
6745  **/
6746 static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd)
6747 {
6748 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6749 	struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
6750 	struct ipr_mode_page24 *mode_page;
6751 	int length;
6752 
6753 	ENTER;
6754 	mode_page = ipr_get_mode_page(mode_pages, 0x24,
6755 				      sizeof(struct ipr_mode_page24));
6756 
6757 	if (mode_page)
6758 		mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF;
6759 
6760 	length = mode_pages->hdr.length + 1;
6761 	mode_pages->hdr.length = 0;
6762 
6763 	ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
6764 			      ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
6765 			      length);
6766 
6767 	ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
6768 	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6769 
6770 	LEAVE;
6771 	return IPR_RC_JOB_RETURN;
6772 }
6773 
6774 /**
6775  * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
6776  * @ipr_cmd:	ipr command struct
6777  *
6778  * This function handles the failure of a Mode Sense to the IOAFP.
6779  * Some adapters do not handle all mode pages.
6780  *
6781  * Return value:
6782  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6783  **/
6784 static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd)
6785 {
6786 	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6787 
6788 	if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
6789 		ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
6790 		return IPR_RC_JOB_CONTINUE;
6791 	}
6792 
6793 	return ipr_reset_cmd_failed(ipr_cmd);
6794 }
6795 
6796 /**
6797  * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
6798  * @ipr_cmd:	ipr command struct
6799  *
6800  * This function send a mode sense to the IOA to retrieve
6801  * the IOA Advanced Function Control mode page.
6802  *
6803  * Return value:
6804  * 	IPR_RC_JOB_RETURN
6805  **/
6806 static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd *ipr_cmd)
6807 {
6808 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6809 
6810 	ENTER;
6811 	ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
6812 			     0x24, ioa_cfg->vpd_cbs_dma +
6813 			     offsetof(struct ipr_misc_cbs, mode_pages),
6814 			     sizeof(struct ipr_mode_pages));
6815 
6816 	ipr_cmd->job_step = ipr_ioafp_mode_select_page24;
6817 	ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed;
6818 
6819 	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6820 
6821 	LEAVE;
6822 	return IPR_RC_JOB_RETURN;
6823 }
6824 
6825 /**
6826  * ipr_init_res_table - Initialize the resource table
6827  * @ipr_cmd:	ipr command struct
6828  *
6829  * This function looks through the existing resource table, comparing
6830  * it with the config table. This function will take care of old/new
6831  * devices and schedule adding/removing them from the mid-layer
6832  * as appropriate.
6833  *
6834  * Return value:
6835  * 	IPR_RC_JOB_CONTINUE
6836  **/
6837 static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
6838 {
6839 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6840 	struct ipr_resource_entry *res, *temp;
6841 	struct ipr_config_table_entry_wrapper cfgtew;
6842 	int entries, found, flag, i;
6843 	LIST_HEAD(old_res);
6844 
6845 	ENTER;
6846 	if (ioa_cfg->sis64)
6847 		flag = ioa_cfg->u.cfg_table64->hdr64.flags;
6848 	else
6849 		flag = ioa_cfg->u.cfg_table->hdr.flags;
6850 
6851 	if (flag & IPR_UCODE_DOWNLOAD_REQ)
6852 		dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
6853 
6854 	list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
6855 		list_move_tail(&res->queue, &old_res);
6856 
6857 	if (ioa_cfg->sis64)
6858 		entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries);
6859 	else
6860 		entries = ioa_cfg->u.cfg_table->hdr.num_entries;
6861 
6862 	for (i = 0; i < entries; i++) {
6863 		if (ioa_cfg->sis64)
6864 			cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i];
6865 		else
6866 			cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i];
6867 		found = 0;
6868 
6869 		list_for_each_entry_safe(res, temp, &old_res, queue) {
6870 			if (ipr_is_same_device(res, &cfgtew)) {
6871 				list_move_tail(&res->queue, &ioa_cfg->used_res_q);
6872 				found = 1;
6873 				break;
6874 			}
6875 		}
6876 
6877 		if (!found) {
6878 			if (list_empty(&ioa_cfg->free_res_q)) {
6879 				dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
6880 				break;
6881 			}
6882 
6883 			found = 1;
6884 			res = list_entry(ioa_cfg->free_res_q.next,
6885 					 struct ipr_resource_entry, queue);
6886 			list_move_tail(&res->queue, &ioa_cfg->used_res_q);
6887 			ipr_init_res_entry(res, &cfgtew);
6888 			res->add_to_ml = 1;
6889 		} else if (res->sdev && (ipr_is_vset_device(res) || ipr_is_scsi_disk(res)))
6890 			res->sdev->allow_restart = 1;
6891 
6892 		if (found)
6893 			ipr_update_res_entry(res, &cfgtew);
6894 	}
6895 
6896 	list_for_each_entry_safe(res, temp, &old_res, queue) {
6897 		if (res->sdev) {
6898 			res->del_from_ml = 1;
6899 			res->res_handle = IPR_INVALID_RES_HANDLE;
6900 			list_move_tail(&res->queue, &ioa_cfg->used_res_q);
6901 		}
6902 	}
6903 
6904 	list_for_each_entry_safe(res, temp, &old_res, queue) {
6905 		ipr_clear_res_target(res);
6906 		list_move_tail(&res->queue, &ioa_cfg->free_res_q);
6907 	}
6908 
6909 	if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
6910 		ipr_cmd->job_step = ipr_ioafp_mode_sense_page24;
6911 	else
6912 		ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
6913 
6914 	LEAVE;
6915 	return IPR_RC_JOB_CONTINUE;
6916 }
6917 
6918 /**
6919  * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
6920  * @ipr_cmd:	ipr command struct
6921  *
6922  * This function sends a Query IOA Configuration command
6923  * to the adapter to retrieve the IOA configuration table.
6924  *
6925  * Return value:
6926  * 	IPR_RC_JOB_RETURN
6927  **/
6928 static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
6929 {
6930 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6931 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6932 	struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
6933 	struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
6934 
6935 	ENTER;
6936 	if (cap->cap & IPR_CAP_DUAL_IOA_RAID)
6937 		ioa_cfg->dual_raid = 1;
6938 	dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
6939 		 ucode_vpd->major_release, ucode_vpd->card_type,
6940 		 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
6941 	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6942 	ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6943 
6944 	ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
6945 	ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff;
6946 	ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff;
6947 	ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff;
6948 
6949 	ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size,
6950 		       IPR_IOADL_FLAGS_READ_LAST);
6951 
6952 	ipr_cmd->job_step = ipr_init_res_table;
6953 
6954 	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6955 
6956 	LEAVE;
6957 	return IPR_RC_JOB_RETURN;
6958 }
6959 
6960 /**
6961  * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
6962  * @ipr_cmd:	ipr command struct
6963  *
6964  * This utility function sends an inquiry to the adapter.
6965  *
6966  * Return value:
6967  * 	none
6968  **/
6969 static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
6970 			      dma_addr_t dma_addr, u8 xfer_len)
6971 {
6972 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6973 
6974 	ENTER;
6975 	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
6976 	ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6977 
6978 	ioarcb->cmd_pkt.cdb[0] = INQUIRY;
6979 	ioarcb->cmd_pkt.cdb[1] = flags;
6980 	ioarcb->cmd_pkt.cdb[2] = page;
6981 	ioarcb->cmd_pkt.cdb[4] = xfer_len;
6982 
6983 	ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
6984 
6985 	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6986 	LEAVE;
6987 }
6988 
6989 /**
6990  * ipr_inquiry_page_supported - Is the given inquiry page supported
6991  * @page0:		inquiry page 0 buffer
6992  * @page:		page code.
6993  *
6994  * This function determines if the specified inquiry page is supported.
6995  *
6996  * Return value:
6997  *	1 if page is supported / 0 if not
6998  **/
6999 static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
7000 {
7001 	int i;
7002 
7003 	for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
7004 		if (page0->page[i] == page)
7005 			return 1;
7006 
7007 	return 0;
7008 }
7009 
7010 /**
7011  * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
7012  * @ipr_cmd:	ipr command struct
7013  *
7014  * This function sends a Page 0xD0 inquiry to the adapter
7015  * to retrieve adapter capabilities.
7016  *
7017  * Return value:
7018  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7019  **/
7020 static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd)
7021 {
7022 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7023 	struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
7024 	struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
7025 
7026 	ENTER;
7027 	ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
7028 	memset(cap, 0, sizeof(*cap));
7029 
7030 	if (ipr_inquiry_page_supported(page0, 0xD0)) {
7031 		ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0,
7032 				  ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap),
7033 				  sizeof(struct ipr_inquiry_cap));
7034 		return IPR_RC_JOB_RETURN;
7035 	}
7036 
7037 	LEAVE;
7038 	return IPR_RC_JOB_CONTINUE;
7039 }
7040 
7041 /**
7042  * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
7043  * @ipr_cmd:	ipr command struct
7044  *
7045  * This function sends a Page 3 inquiry to the adapter
7046  * to retrieve software VPD information.
7047  *
7048  * Return value:
7049  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7050  **/
7051 static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
7052 {
7053 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7054 
7055 	ENTER;
7056 
7057 	ipr_cmd->job_step = ipr_ioafp_cap_inquiry;
7058 
7059 	ipr_ioafp_inquiry(ipr_cmd, 1, 3,
7060 			  ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
7061 			  sizeof(struct ipr_inquiry_page3));
7062 
7063 	LEAVE;
7064 	return IPR_RC_JOB_RETURN;
7065 }
7066 
7067 /**
7068  * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
7069  * @ipr_cmd:	ipr command struct
7070  *
7071  * This function sends a Page 0 inquiry to the adapter
7072  * to retrieve supported inquiry pages.
7073  *
7074  * Return value:
7075  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7076  **/
7077 static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
7078 {
7079 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7080 	char type[5];
7081 
7082 	ENTER;
7083 
7084 	/* Grab the type out of the VPD and store it away */
7085 	memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
7086 	type[4] = '\0';
7087 	ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
7088 
7089 	ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
7090 
7091 	ipr_ioafp_inquiry(ipr_cmd, 1, 0,
7092 			  ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
7093 			  sizeof(struct ipr_inquiry_page0));
7094 
7095 	LEAVE;
7096 	return IPR_RC_JOB_RETURN;
7097 }
7098 
7099 /**
7100  * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
7101  * @ipr_cmd:	ipr command struct
7102  *
7103  * This function sends a standard inquiry to the adapter.
7104  *
7105  * Return value:
7106  * 	IPR_RC_JOB_RETURN
7107  **/
7108 static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
7109 {
7110 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7111 
7112 	ENTER;
7113 	ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
7114 
7115 	ipr_ioafp_inquiry(ipr_cmd, 0, 0,
7116 			  ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
7117 			  sizeof(struct ipr_ioa_vpd));
7118 
7119 	LEAVE;
7120 	return IPR_RC_JOB_RETURN;
7121 }
7122 
7123 /**
7124  * ipr_ioafp_identify_hrrq - Send Identify Host RRQ.
7125  * @ipr_cmd:	ipr command struct
7126  *
7127  * This function send an Identify Host Request Response Queue
7128  * command to establish the HRRQ with the adapter.
7129  *
7130  * Return value:
7131  * 	IPR_RC_JOB_RETURN
7132  **/
7133 static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd)
7134 {
7135 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7136 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7137 
7138 	ENTER;
7139 	dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
7140 
7141 	ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
7142 	ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7143 
7144 	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7145 	if (ioa_cfg->sis64)
7146 		ioarcb->cmd_pkt.cdb[1] = 0x1;
7147 	ioarcb->cmd_pkt.cdb[2] =
7148 		((u64) ioa_cfg->host_rrq_dma >> 24) & 0xff;
7149 	ioarcb->cmd_pkt.cdb[3] =
7150 		((u64) ioa_cfg->host_rrq_dma >> 16) & 0xff;
7151 	ioarcb->cmd_pkt.cdb[4] =
7152 		((u64) ioa_cfg->host_rrq_dma >> 8) & 0xff;
7153 	ioarcb->cmd_pkt.cdb[5] =
7154 		((u64) ioa_cfg->host_rrq_dma) & 0xff;
7155 	ioarcb->cmd_pkt.cdb[7] =
7156 		((sizeof(u32) * IPR_NUM_CMD_BLKS) >> 8) & 0xff;
7157 	ioarcb->cmd_pkt.cdb[8] =
7158 		(sizeof(u32) * IPR_NUM_CMD_BLKS) & 0xff;
7159 
7160 	if (ioa_cfg->sis64) {
7161 		ioarcb->cmd_pkt.cdb[10] =
7162 			((u64) ioa_cfg->host_rrq_dma >> 56) & 0xff;
7163 		ioarcb->cmd_pkt.cdb[11] =
7164 			((u64) ioa_cfg->host_rrq_dma >> 48) & 0xff;
7165 		ioarcb->cmd_pkt.cdb[12] =
7166 			((u64) ioa_cfg->host_rrq_dma >> 40) & 0xff;
7167 		ioarcb->cmd_pkt.cdb[13] =
7168 			((u64) ioa_cfg->host_rrq_dma >> 32) & 0xff;
7169 	}
7170 
7171 	ipr_cmd->job_step = ipr_ioafp_std_inquiry;
7172 
7173 	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7174 
7175 	LEAVE;
7176 	return IPR_RC_JOB_RETURN;
7177 }
7178 
7179 /**
7180  * ipr_reset_timer_done - Adapter reset timer function
7181  * @ipr_cmd:	ipr command struct
7182  *
7183  * Description: This function is used in adapter reset processing
7184  * for timing events. If the reset_cmd pointer in the IOA
7185  * config struct is not this adapter's we are doing nested
7186  * resets and fail_all_ops will take care of freeing the
7187  * command block.
7188  *
7189  * Return value:
7190  * 	none
7191  **/
7192 static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
7193 {
7194 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7195 	unsigned long lock_flags = 0;
7196 
7197 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
7198 
7199 	if (ioa_cfg->reset_cmd == ipr_cmd) {
7200 		list_del(&ipr_cmd->queue);
7201 		ipr_cmd->done(ipr_cmd);
7202 	}
7203 
7204 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
7205 }
7206 
7207 /**
7208  * ipr_reset_start_timer - Start a timer for adapter reset job
7209  * @ipr_cmd:	ipr command struct
7210  * @timeout:	timeout value
7211  *
7212  * Description: This function is used in adapter reset processing
7213  * for timing events. If the reset_cmd pointer in the IOA
7214  * config struct is not this adapter's we are doing nested
7215  * resets and fail_all_ops will take care of freeing the
7216  * command block.
7217  *
7218  * Return value:
7219  * 	none
7220  **/
7221 static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
7222 				  unsigned long timeout)
7223 {
7224 	list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
7225 	ipr_cmd->done = ipr_reset_ioa_job;
7226 
7227 	ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7228 	ipr_cmd->timer.expires = jiffies + timeout;
7229 	ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
7230 	add_timer(&ipr_cmd->timer);
7231 }
7232 
7233 /**
7234  * ipr_init_ioa_mem - Initialize ioa_cfg control block
7235  * @ioa_cfg:	ioa cfg struct
7236  *
7237  * Return value:
7238  * 	nothing
7239  **/
7240 static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
7241 {
7242 	memset(ioa_cfg->host_rrq, 0, sizeof(u32) * IPR_NUM_CMD_BLKS);
7243 
7244 	/* Initialize Host RRQ pointers */
7245 	ioa_cfg->hrrq_start = ioa_cfg->host_rrq;
7246 	ioa_cfg->hrrq_end = &ioa_cfg->host_rrq[IPR_NUM_CMD_BLKS - 1];
7247 	ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
7248 	ioa_cfg->toggle_bit = 1;
7249 
7250 	/* Zero out config table */
7251 	memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
7252 }
7253 
7254 /**
7255  * ipr_reset_next_stage - Process IPL stage change based on feedback register.
7256  * @ipr_cmd:	ipr command struct
7257  *
7258  * Return value:
7259  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7260  **/
7261 static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd)
7262 {
7263 	unsigned long stage, stage_time;
7264 	u32 feedback;
7265 	volatile u32 int_reg;
7266 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7267 	u64 maskval = 0;
7268 
7269 	feedback = readl(ioa_cfg->regs.init_feedback_reg);
7270 	stage = feedback & IPR_IPL_INIT_STAGE_MASK;
7271 	stage_time = feedback & IPR_IPL_INIT_STAGE_TIME_MASK;
7272 
7273 	ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time);
7274 
7275 	/* sanity check the stage_time value */
7276 	if (stage_time == 0)
7277 		stage_time = IPR_IPL_INIT_DEFAULT_STAGE_TIME;
7278 	else if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME)
7279 		stage_time = IPR_IPL_INIT_MIN_STAGE_TIME;
7280 	else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT)
7281 		stage_time = IPR_LONG_OPERATIONAL_TIMEOUT;
7282 
7283 	if (stage == IPR_IPL_INIT_STAGE_UNKNOWN) {
7284 		writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg);
7285 		int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7286 		stage_time = ioa_cfg->transop_timeout;
7287 		ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7288 	} else if (stage == IPR_IPL_INIT_STAGE_TRANSOP) {
7289 		int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
7290 		if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
7291 			ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7292 			maskval = IPR_PCII_IPL_STAGE_CHANGE;
7293 			maskval = (maskval << 32) | IPR_PCII_IOA_TRANS_TO_OPER;
7294 			writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg);
7295 			int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7296 			return IPR_RC_JOB_CONTINUE;
7297 		}
7298 	}
7299 
7300 	ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7301 	ipr_cmd->timer.expires = jiffies + stage_time * HZ;
7302 	ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
7303 	ipr_cmd->done = ipr_reset_ioa_job;
7304 	add_timer(&ipr_cmd->timer);
7305 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
7306 
7307 	return IPR_RC_JOB_RETURN;
7308 }
7309 
7310 /**
7311  * ipr_reset_enable_ioa - Enable the IOA following a reset.
7312  * @ipr_cmd:	ipr command struct
7313  *
7314  * This function reinitializes some control blocks and
7315  * enables destructive diagnostics on the adapter.
7316  *
7317  * Return value:
7318  * 	IPR_RC_JOB_RETURN
7319  **/
7320 static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
7321 {
7322 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7323 	volatile u32 int_reg;
7324 	volatile u64 maskval;
7325 
7326 	ENTER;
7327 	ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7328 	ipr_init_ioa_mem(ioa_cfg);
7329 
7330 	ioa_cfg->allow_interrupts = 1;
7331 	if (ioa_cfg->sis64) {
7332 		/* Set the adapter to the correct endian mode. */
7333 		writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
7334 		int_reg = readl(ioa_cfg->regs.endian_swap_reg);
7335 	}
7336 
7337 	int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
7338 
7339 	if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
7340 		writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
7341 		       ioa_cfg->regs.clr_interrupt_mask_reg32);
7342 		int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7343 		return IPR_RC_JOB_CONTINUE;
7344 	}
7345 
7346 	/* Enable destructive diagnostics on IOA */
7347 	writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32);
7348 
7349 	if (ioa_cfg->sis64) {
7350 		maskval = IPR_PCII_IPL_STAGE_CHANGE;
7351 		maskval = (maskval << 32) | IPR_PCII_OPER_INTERRUPTS;
7352 		writeq(maskval, ioa_cfg->regs.clr_interrupt_mask_reg);
7353 	} else
7354 		writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32);
7355 
7356 	int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7357 
7358 	dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
7359 
7360 	if (ioa_cfg->sis64) {
7361 		ipr_cmd->job_step = ipr_reset_next_stage;
7362 		return IPR_RC_JOB_CONTINUE;
7363 	}
7364 
7365 	ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7366 	ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
7367 	ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
7368 	ipr_cmd->done = ipr_reset_ioa_job;
7369 	add_timer(&ipr_cmd->timer);
7370 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
7371 
7372 	LEAVE;
7373 	return IPR_RC_JOB_RETURN;
7374 }
7375 
7376 /**
7377  * ipr_reset_wait_for_dump - Wait for a dump to timeout.
7378  * @ipr_cmd:	ipr command struct
7379  *
7380  * This function is invoked when an adapter dump has run out
7381  * of processing time.
7382  *
7383  * Return value:
7384  * 	IPR_RC_JOB_CONTINUE
7385  **/
7386 static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
7387 {
7388 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7389 
7390 	if (ioa_cfg->sdt_state == GET_DUMP)
7391 		ioa_cfg->sdt_state = ABORT_DUMP;
7392 
7393 	ipr_cmd->job_step = ipr_reset_alert;
7394 
7395 	return IPR_RC_JOB_CONTINUE;
7396 }
7397 
7398 /**
7399  * ipr_unit_check_no_data - Log a unit check/no data error log
7400  * @ioa_cfg:		ioa config struct
7401  *
7402  * Logs an error indicating the adapter unit checked, but for some
7403  * reason, we were unable to fetch the unit check buffer.
7404  *
7405  * Return value:
7406  * 	nothing
7407  **/
7408 static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
7409 {
7410 	ioa_cfg->errors_logged++;
7411 	dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
7412 }
7413 
7414 /**
7415  * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
7416  * @ioa_cfg:		ioa config struct
7417  *
7418  * Fetches the unit check buffer from the adapter by clocking the data
7419  * through the mailbox register.
7420  *
7421  * Return value:
7422  * 	nothing
7423  **/
7424 static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
7425 {
7426 	unsigned long mailbox;
7427 	struct ipr_hostrcb *hostrcb;
7428 	struct ipr_uc_sdt sdt;
7429 	int rc, length;
7430 	u32 ioasc;
7431 
7432 	mailbox = readl(ioa_cfg->ioa_mailbox);
7433 
7434 	if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) {
7435 		ipr_unit_check_no_data(ioa_cfg);
7436 		return;
7437 	}
7438 
7439 	memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
7440 	rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
7441 					(sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
7442 
7443 	if (rc || !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY) ||
7444 	    ((be32_to_cpu(sdt.hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
7445 	    (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
7446 		ipr_unit_check_no_data(ioa_cfg);
7447 		return;
7448 	}
7449 
7450 	/* Find length of the first sdt entry (UC buffer) */
7451 	if (be32_to_cpu(sdt.hdr.state) == IPR_FMT3_SDT_READY_TO_USE)
7452 		length = be32_to_cpu(sdt.entry[0].end_token);
7453 	else
7454 		length = (be32_to_cpu(sdt.entry[0].end_token) -
7455 			  be32_to_cpu(sdt.entry[0].start_token)) &
7456 			  IPR_FMT2_MBX_ADDR_MASK;
7457 
7458 	hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
7459 			     struct ipr_hostrcb, queue);
7460 	list_del(&hostrcb->queue);
7461 	memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
7462 
7463 	rc = ipr_get_ldump_data_section(ioa_cfg,
7464 					be32_to_cpu(sdt.entry[0].start_token),
7465 					(__be32 *)&hostrcb->hcam,
7466 					min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
7467 
7468 	if (!rc) {
7469 		ipr_handle_log_data(ioa_cfg, hostrcb);
7470 		ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
7471 		if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED &&
7472 		    ioa_cfg->sdt_state == GET_DUMP)
7473 			ioa_cfg->sdt_state = WAIT_FOR_DUMP;
7474 	} else
7475 		ipr_unit_check_no_data(ioa_cfg);
7476 
7477 	list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
7478 }
7479 
7480 /**
7481  * ipr_reset_get_unit_check_job - Call to get the unit check buffer.
7482  * @ipr_cmd:	ipr command struct
7483  *
7484  * Description: This function will call to get the unit check buffer.
7485  *
7486  * Return value:
7487  *	IPR_RC_JOB_RETURN
7488  **/
7489 static int ipr_reset_get_unit_check_job(struct ipr_cmnd *ipr_cmd)
7490 {
7491 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7492 
7493 	ENTER;
7494 	ioa_cfg->ioa_unit_checked = 0;
7495 	ipr_get_unit_check_buffer(ioa_cfg);
7496 	ipr_cmd->job_step = ipr_reset_alert;
7497 	ipr_reset_start_timer(ipr_cmd, 0);
7498 
7499 	LEAVE;
7500 	return IPR_RC_JOB_RETURN;
7501 }
7502 
7503 /**
7504  * ipr_reset_restore_cfg_space - Restore PCI config space.
7505  * @ipr_cmd:	ipr command struct
7506  *
7507  * Description: This function restores the saved PCI config space of
7508  * the adapter, fails all outstanding ops back to the callers, and
7509  * fetches the dump/unit check if applicable to this reset.
7510  *
7511  * Return value:
7512  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7513  **/
7514 static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
7515 {
7516 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7517 	volatile u32 int_reg;
7518 
7519 	ENTER;
7520 	ioa_cfg->pdev->state_saved = true;
7521 	pci_restore_state(ioa_cfg->pdev);
7522 
7523 	if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
7524 		ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
7525 		return IPR_RC_JOB_CONTINUE;
7526 	}
7527 
7528 	ipr_fail_all_ops(ioa_cfg);
7529 
7530 	if (ioa_cfg->sis64) {
7531 		/* Set the adapter to the correct endian mode. */
7532 		writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
7533 		int_reg = readl(ioa_cfg->regs.endian_swap_reg);
7534 	}
7535 
7536 	if (ioa_cfg->ioa_unit_checked) {
7537 		if (ioa_cfg->sis64) {
7538 			ipr_cmd->job_step = ipr_reset_get_unit_check_job;
7539 			ipr_reset_start_timer(ipr_cmd, IPR_DUMP_DELAY_TIMEOUT);
7540 			return IPR_RC_JOB_RETURN;
7541 		} else {
7542 			ioa_cfg->ioa_unit_checked = 0;
7543 			ipr_get_unit_check_buffer(ioa_cfg);
7544 			ipr_cmd->job_step = ipr_reset_alert;
7545 			ipr_reset_start_timer(ipr_cmd, 0);
7546 			return IPR_RC_JOB_RETURN;
7547 		}
7548 	}
7549 
7550 	if (ioa_cfg->in_ioa_bringdown) {
7551 		ipr_cmd->job_step = ipr_ioa_bringdown_done;
7552 	} else {
7553 		ipr_cmd->job_step = ipr_reset_enable_ioa;
7554 
7555 		if (GET_DUMP == ioa_cfg->sdt_state) {
7556 			ipr_reset_start_timer(ipr_cmd, IPR_DUMP_TIMEOUT);
7557 			ipr_cmd->job_step = ipr_reset_wait_for_dump;
7558 			schedule_work(&ioa_cfg->work_q);
7559 			return IPR_RC_JOB_RETURN;
7560 		}
7561 	}
7562 
7563 	LEAVE;
7564 	return IPR_RC_JOB_CONTINUE;
7565 }
7566 
7567 /**
7568  * ipr_reset_bist_done - BIST has completed on the adapter.
7569  * @ipr_cmd:	ipr command struct
7570  *
7571  * Description: Unblock config space and resume the reset process.
7572  *
7573  * Return value:
7574  * 	IPR_RC_JOB_CONTINUE
7575  **/
7576 static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
7577 {
7578 	ENTER;
7579 	pci_unblock_user_cfg_access(ipr_cmd->ioa_cfg->pdev);
7580 	ipr_cmd->job_step = ipr_reset_restore_cfg_space;
7581 	LEAVE;
7582 	return IPR_RC_JOB_CONTINUE;
7583 }
7584 
7585 /**
7586  * ipr_reset_start_bist - Run BIST on the adapter.
7587  * @ipr_cmd:	ipr command struct
7588  *
7589  * Description: This function runs BIST on the adapter, then delays 2 seconds.
7590  *
7591  * Return value:
7592  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7593  **/
7594 static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
7595 {
7596 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7597 	int rc = PCIBIOS_SUCCESSFUL;
7598 
7599 	ENTER;
7600 	pci_block_user_cfg_access(ioa_cfg->pdev);
7601 
7602 	if (ioa_cfg->ipr_chip->bist_method == IPR_MMIO)
7603 		writel(IPR_UPROCI_SIS64_START_BIST,
7604 		       ioa_cfg->regs.set_uproc_interrupt_reg32);
7605 	else
7606 		rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
7607 
7608 	if (rc == PCIBIOS_SUCCESSFUL) {
7609 		ipr_cmd->job_step = ipr_reset_bist_done;
7610 		ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
7611 		rc = IPR_RC_JOB_RETURN;
7612 	} else {
7613 		pci_unblock_user_cfg_access(ipr_cmd->ioa_cfg->pdev);
7614 		ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
7615 		rc = IPR_RC_JOB_CONTINUE;
7616 	}
7617 
7618 	LEAVE;
7619 	return rc;
7620 }
7621 
7622 /**
7623  * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
7624  * @ipr_cmd:	ipr command struct
7625  *
7626  * Description: This clears PCI reset to the adapter and delays two seconds.
7627  *
7628  * Return value:
7629  * 	IPR_RC_JOB_RETURN
7630  **/
7631 static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
7632 {
7633 	ENTER;
7634 	pci_set_pcie_reset_state(ipr_cmd->ioa_cfg->pdev, pcie_deassert_reset);
7635 	ipr_cmd->job_step = ipr_reset_bist_done;
7636 	ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
7637 	LEAVE;
7638 	return IPR_RC_JOB_RETURN;
7639 }
7640 
7641 /**
7642  * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
7643  * @ipr_cmd:	ipr command struct
7644  *
7645  * Description: This asserts PCI reset to the adapter.
7646  *
7647  * Return value:
7648  * 	IPR_RC_JOB_RETURN
7649  **/
7650 static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
7651 {
7652 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7653 	struct pci_dev *pdev = ioa_cfg->pdev;
7654 
7655 	ENTER;
7656 	pci_block_user_cfg_access(pdev);
7657 	pci_set_pcie_reset_state(pdev, pcie_warm_reset);
7658 	ipr_cmd->job_step = ipr_reset_slot_reset_done;
7659 	ipr_reset_start_timer(ipr_cmd, IPR_PCI_RESET_TIMEOUT);
7660 	LEAVE;
7661 	return IPR_RC_JOB_RETURN;
7662 }
7663 
7664 /**
7665  * ipr_reset_allowed - Query whether or not IOA can be reset
7666  * @ioa_cfg:	ioa config struct
7667  *
7668  * Return value:
7669  * 	0 if reset not allowed / non-zero if reset is allowed
7670  **/
7671 static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
7672 {
7673 	volatile u32 temp_reg;
7674 
7675 	temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
7676 	return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
7677 }
7678 
7679 /**
7680  * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
7681  * @ipr_cmd:	ipr command struct
7682  *
7683  * Description: This function waits for adapter permission to run BIST,
7684  * then runs BIST. If the adapter does not give permission after a
7685  * reasonable time, we will reset the adapter anyway. The impact of
7686  * resetting the adapter without warning the adapter is the risk of
7687  * losing the persistent error log on the adapter. If the adapter is
7688  * reset while it is writing to the flash on the adapter, the flash
7689  * segment will have bad ECC and be zeroed.
7690  *
7691  * Return value:
7692  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7693  **/
7694 static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
7695 {
7696 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7697 	int rc = IPR_RC_JOB_RETURN;
7698 
7699 	if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
7700 		ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
7701 		ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
7702 	} else {
7703 		ipr_cmd->job_step = ioa_cfg->reset;
7704 		rc = IPR_RC_JOB_CONTINUE;
7705 	}
7706 
7707 	return rc;
7708 }
7709 
7710 /**
7711  * ipr_reset_alert - Alert the adapter of a pending reset
7712  * @ipr_cmd:	ipr command struct
7713  *
7714  * Description: This function alerts the adapter that it will be reset.
7715  * If memory space is not currently enabled, proceed directly
7716  * to running BIST on the adapter. The timer must always be started
7717  * so we guarantee we do not run BIST from ipr_isr.
7718  *
7719  * Return value:
7720  * 	IPR_RC_JOB_RETURN
7721  **/
7722 static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
7723 {
7724 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7725 	u16 cmd_reg;
7726 	int rc;
7727 
7728 	ENTER;
7729 	rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
7730 
7731 	if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
7732 		ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
7733 		writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32);
7734 		ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
7735 	} else {
7736 		ipr_cmd->job_step = ioa_cfg->reset;
7737 	}
7738 
7739 	ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
7740 	ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
7741 
7742 	LEAVE;
7743 	return IPR_RC_JOB_RETURN;
7744 }
7745 
7746 /**
7747  * ipr_reset_ucode_download_done - Microcode download completion
7748  * @ipr_cmd:	ipr command struct
7749  *
7750  * Description: This function unmaps the microcode download buffer.
7751  *
7752  * Return value:
7753  * 	IPR_RC_JOB_CONTINUE
7754  **/
7755 static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
7756 {
7757 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7758 	struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
7759 
7760 	pci_unmap_sg(ioa_cfg->pdev, sglist->scatterlist,
7761 		     sglist->num_sg, DMA_TO_DEVICE);
7762 
7763 	ipr_cmd->job_step = ipr_reset_alert;
7764 	return IPR_RC_JOB_CONTINUE;
7765 }
7766 
7767 /**
7768  * ipr_reset_ucode_download - Download microcode to the adapter
7769  * @ipr_cmd:	ipr command struct
7770  *
7771  * Description: This function checks to see if it there is microcode
7772  * to download to the adapter. If there is, a download is performed.
7773  *
7774  * Return value:
7775  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7776  **/
7777 static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
7778 {
7779 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7780 	struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
7781 
7782 	ENTER;
7783 	ipr_cmd->job_step = ipr_reset_alert;
7784 
7785 	if (!sglist)
7786 		return IPR_RC_JOB_CONTINUE;
7787 
7788 	ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7789 	ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7790 	ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
7791 	ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
7792 	ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
7793 	ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
7794 	ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
7795 
7796 	if (ioa_cfg->sis64)
7797 		ipr_build_ucode_ioadl64(ipr_cmd, sglist);
7798 	else
7799 		ipr_build_ucode_ioadl(ipr_cmd, sglist);
7800 	ipr_cmd->job_step = ipr_reset_ucode_download_done;
7801 
7802 	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7803 		   IPR_WRITE_BUFFER_TIMEOUT);
7804 
7805 	LEAVE;
7806 	return IPR_RC_JOB_RETURN;
7807 }
7808 
7809 /**
7810  * ipr_reset_shutdown_ioa - Shutdown the adapter
7811  * @ipr_cmd:	ipr command struct
7812  *
7813  * Description: This function issues an adapter shutdown of the
7814  * specified type to the specified adapter as part of the
7815  * adapter reset job.
7816  *
7817  * Return value:
7818  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7819  **/
7820 static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
7821 {
7822 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7823 	enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
7824 	unsigned long timeout;
7825 	int rc = IPR_RC_JOB_CONTINUE;
7826 
7827 	ENTER;
7828 	if (shutdown_type != IPR_SHUTDOWN_NONE && !ioa_cfg->ioa_is_dead) {
7829 		ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7830 		ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7831 		ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
7832 		ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
7833 
7834 		if (shutdown_type == IPR_SHUTDOWN_NORMAL)
7835 			timeout = IPR_SHUTDOWN_TIMEOUT;
7836 		else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
7837 			timeout = IPR_INTERNAL_TIMEOUT;
7838 		else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
7839 			timeout = IPR_DUAL_IOA_ABBR_SHUTDOWN_TO;
7840 		else
7841 			timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
7842 
7843 		ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
7844 
7845 		rc = IPR_RC_JOB_RETURN;
7846 		ipr_cmd->job_step = ipr_reset_ucode_download;
7847 	} else
7848 		ipr_cmd->job_step = ipr_reset_alert;
7849 
7850 	LEAVE;
7851 	return rc;
7852 }
7853 
7854 /**
7855  * ipr_reset_ioa_job - Adapter reset job
7856  * @ipr_cmd:	ipr command struct
7857  *
7858  * Description: This function is the job router for the adapter reset job.
7859  *
7860  * Return value:
7861  * 	none
7862  **/
7863 static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
7864 {
7865 	u32 rc, ioasc;
7866 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7867 
7868 	do {
7869 		ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7870 
7871 		if (ioa_cfg->reset_cmd != ipr_cmd) {
7872 			/*
7873 			 * We are doing nested adapter resets and this is
7874 			 * not the current reset job.
7875 			 */
7876 			list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
7877 			return;
7878 		}
7879 
7880 		if (IPR_IOASC_SENSE_KEY(ioasc)) {
7881 			rc = ipr_cmd->job_step_failed(ipr_cmd);
7882 			if (rc == IPR_RC_JOB_RETURN)
7883 				return;
7884 		}
7885 
7886 		ipr_reinit_ipr_cmnd(ipr_cmd);
7887 		ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
7888 		rc = ipr_cmd->job_step(ipr_cmd);
7889 	} while(rc == IPR_RC_JOB_CONTINUE);
7890 }
7891 
7892 /**
7893  * _ipr_initiate_ioa_reset - Initiate an adapter reset
7894  * @ioa_cfg:		ioa config struct
7895  * @job_step:		first job step of reset job
7896  * @shutdown_type:	shutdown type
7897  *
7898  * Description: This function will initiate the reset of the given adapter
7899  * starting at the selected job step.
7900  * If the caller needs to wait on the completion of the reset,
7901  * the caller must sleep on the reset_wait_q.
7902  *
7903  * Return value:
7904  * 	none
7905  **/
7906 static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
7907 				    int (*job_step) (struct ipr_cmnd *),
7908 				    enum ipr_shutdown_type shutdown_type)
7909 {
7910 	struct ipr_cmnd *ipr_cmd;
7911 
7912 	ioa_cfg->in_reset_reload = 1;
7913 	ioa_cfg->allow_cmds = 0;
7914 	scsi_block_requests(ioa_cfg->host);
7915 
7916 	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
7917 	ioa_cfg->reset_cmd = ipr_cmd;
7918 	ipr_cmd->job_step = job_step;
7919 	ipr_cmd->u.shutdown_type = shutdown_type;
7920 
7921 	ipr_reset_ioa_job(ipr_cmd);
7922 }
7923 
7924 /**
7925  * ipr_initiate_ioa_reset - Initiate an adapter reset
7926  * @ioa_cfg:		ioa config struct
7927  * @shutdown_type:	shutdown type
7928  *
7929  * Description: This function will initiate the reset of the given adapter.
7930  * If the caller needs to wait on the completion of the reset,
7931  * the caller must sleep on the reset_wait_q.
7932  *
7933  * Return value:
7934  * 	none
7935  **/
7936 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
7937 				   enum ipr_shutdown_type shutdown_type)
7938 {
7939 	if (ioa_cfg->ioa_is_dead)
7940 		return;
7941 
7942 	if (ioa_cfg->in_reset_reload && ioa_cfg->sdt_state == GET_DUMP)
7943 		ioa_cfg->sdt_state = ABORT_DUMP;
7944 
7945 	if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
7946 		dev_err(&ioa_cfg->pdev->dev,
7947 			"IOA taken offline - error recovery failed\n");
7948 
7949 		ioa_cfg->reset_retries = 0;
7950 		ioa_cfg->ioa_is_dead = 1;
7951 
7952 		if (ioa_cfg->in_ioa_bringdown) {
7953 			ioa_cfg->reset_cmd = NULL;
7954 			ioa_cfg->in_reset_reload = 0;
7955 			ipr_fail_all_ops(ioa_cfg);
7956 			wake_up_all(&ioa_cfg->reset_wait_q);
7957 
7958 			spin_unlock_irq(ioa_cfg->host->host_lock);
7959 			scsi_unblock_requests(ioa_cfg->host);
7960 			spin_lock_irq(ioa_cfg->host->host_lock);
7961 			return;
7962 		} else {
7963 			ioa_cfg->in_ioa_bringdown = 1;
7964 			shutdown_type = IPR_SHUTDOWN_NONE;
7965 		}
7966 	}
7967 
7968 	_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
7969 				shutdown_type);
7970 }
7971 
7972 /**
7973  * ipr_reset_freeze - Hold off all I/O activity
7974  * @ipr_cmd:	ipr command struct
7975  *
7976  * Description: If the PCI slot is frozen, hold off all I/O
7977  * activity; then, as soon as the slot is available again,
7978  * initiate an adapter reset.
7979  */
7980 static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
7981 {
7982 	/* Disallow new interrupts, avoid loop */
7983 	ipr_cmd->ioa_cfg->allow_interrupts = 0;
7984 	list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
7985 	ipr_cmd->done = ipr_reset_ioa_job;
7986 	return IPR_RC_JOB_RETURN;
7987 }
7988 
7989 /**
7990  * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
7991  * @pdev:	PCI device struct
7992  *
7993  * Description: This routine is called to tell us that the PCI bus
7994  * is down. Can't do anything here, except put the device driver
7995  * into a holding pattern, waiting for the PCI bus to come back.
7996  */
7997 static void ipr_pci_frozen(struct pci_dev *pdev)
7998 {
7999 	unsigned long flags = 0;
8000 	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8001 
8002 	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8003 	_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
8004 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8005 }
8006 
8007 /**
8008  * ipr_pci_slot_reset - Called when PCI slot has been reset.
8009  * @pdev:	PCI device struct
8010  *
8011  * Description: This routine is called by the pci error recovery
8012  * code after the PCI slot has been reset, just before we
8013  * should resume normal operations.
8014  */
8015 static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
8016 {
8017 	unsigned long flags = 0;
8018 	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8019 
8020 	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8021 	if (ioa_cfg->needs_warm_reset)
8022 		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8023 	else
8024 		_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
8025 					IPR_SHUTDOWN_NONE);
8026 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8027 	return PCI_ERS_RESULT_RECOVERED;
8028 }
8029 
8030 /**
8031  * ipr_pci_perm_failure - Called when PCI slot is dead for good.
8032  * @pdev:	PCI device struct
8033  *
8034  * Description: This routine is called when the PCI bus has
8035  * permanently failed.
8036  */
8037 static void ipr_pci_perm_failure(struct pci_dev *pdev)
8038 {
8039 	unsigned long flags = 0;
8040 	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8041 
8042 	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8043 	if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
8044 		ioa_cfg->sdt_state = ABORT_DUMP;
8045 	ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES;
8046 	ioa_cfg->in_ioa_bringdown = 1;
8047 	ioa_cfg->allow_cmds = 0;
8048 	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8049 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8050 }
8051 
8052 /**
8053  * ipr_pci_error_detected - Called when a PCI error is detected.
8054  * @pdev:	PCI device struct
8055  * @state:	PCI channel state
8056  *
8057  * Description: Called when a PCI error is detected.
8058  *
8059  * Return value:
8060  * 	PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
8061  */
8062 static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
8063 					       pci_channel_state_t state)
8064 {
8065 	switch (state) {
8066 	case pci_channel_io_frozen:
8067 		ipr_pci_frozen(pdev);
8068 		return PCI_ERS_RESULT_NEED_RESET;
8069 	case pci_channel_io_perm_failure:
8070 		ipr_pci_perm_failure(pdev);
8071 		return PCI_ERS_RESULT_DISCONNECT;
8072 		break;
8073 	default:
8074 		break;
8075 	}
8076 	return PCI_ERS_RESULT_NEED_RESET;
8077 }
8078 
8079 /**
8080  * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
8081  * @ioa_cfg:	ioa cfg struct
8082  *
8083  * Description: This is the second phase of adapter intialization
8084  * This function takes care of initilizing the adapter to the point
8085  * where it can accept new commands.
8086 
8087  * Return value:
8088  * 	0 on success / -EIO on failure
8089  **/
8090 static int __devinit ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
8091 {
8092 	int rc = 0;
8093 	unsigned long host_lock_flags = 0;
8094 
8095 	ENTER;
8096 	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8097 	dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
8098 	if (ioa_cfg->needs_hard_reset) {
8099 		ioa_cfg->needs_hard_reset = 0;
8100 		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8101 	} else
8102 		_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
8103 					IPR_SHUTDOWN_NONE);
8104 
8105 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8106 	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
8107 	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8108 
8109 	if (ioa_cfg->ioa_is_dead) {
8110 		rc = -EIO;
8111 	} else if (ipr_invalid_adapter(ioa_cfg)) {
8112 		if (!ipr_testmode)
8113 			rc = -EIO;
8114 
8115 		dev_err(&ioa_cfg->pdev->dev,
8116 			"Adapter not supported in this hardware configuration.\n");
8117 	}
8118 
8119 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8120 
8121 	LEAVE;
8122 	return rc;
8123 }
8124 
8125 /**
8126  * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
8127  * @ioa_cfg:	ioa config struct
8128  *
8129  * Return value:
8130  * 	none
8131  **/
8132 static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
8133 {
8134 	int i;
8135 
8136 	for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
8137 		if (ioa_cfg->ipr_cmnd_list[i])
8138 			pci_pool_free(ioa_cfg->ipr_cmd_pool,
8139 				      ioa_cfg->ipr_cmnd_list[i],
8140 				      ioa_cfg->ipr_cmnd_list_dma[i]);
8141 
8142 		ioa_cfg->ipr_cmnd_list[i] = NULL;
8143 	}
8144 
8145 	if (ioa_cfg->ipr_cmd_pool)
8146 		pci_pool_destroy (ioa_cfg->ipr_cmd_pool);
8147 
8148 	ioa_cfg->ipr_cmd_pool = NULL;
8149 }
8150 
8151 /**
8152  * ipr_free_mem - Frees memory allocated for an adapter
8153  * @ioa_cfg:	ioa cfg struct
8154  *
8155  * Return value:
8156  * 	nothing
8157  **/
8158 static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
8159 {
8160 	int i;
8161 
8162 	kfree(ioa_cfg->res_entries);
8163 	pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_misc_cbs),
8164 			    ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
8165 	ipr_free_cmd_blks(ioa_cfg);
8166 	pci_free_consistent(ioa_cfg->pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
8167 			    ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
8168 	pci_free_consistent(ioa_cfg->pdev, ioa_cfg->cfg_table_size,
8169 			    ioa_cfg->u.cfg_table,
8170 			    ioa_cfg->cfg_table_dma);
8171 
8172 	for (i = 0; i < IPR_NUM_HCAMS; i++) {
8173 		pci_free_consistent(ioa_cfg->pdev,
8174 				    sizeof(struct ipr_hostrcb),
8175 				    ioa_cfg->hostrcb[i],
8176 				    ioa_cfg->hostrcb_dma[i]);
8177 	}
8178 
8179 	ipr_free_dump(ioa_cfg);
8180 	kfree(ioa_cfg->trace);
8181 }
8182 
8183 /**
8184  * ipr_free_all_resources - Free all allocated resources for an adapter.
8185  * @ipr_cmd:	ipr command struct
8186  *
8187  * This function frees all allocated resources for the
8188  * specified adapter.
8189  *
8190  * Return value:
8191  * 	none
8192  **/
8193 static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
8194 {
8195 	struct pci_dev *pdev = ioa_cfg->pdev;
8196 
8197 	ENTER;
8198 	free_irq(pdev->irq, ioa_cfg);
8199 	pci_disable_msi(pdev);
8200 	iounmap(ioa_cfg->hdw_dma_regs);
8201 	pci_release_regions(pdev);
8202 	ipr_free_mem(ioa_cfg);
8203 	scsi_host_put(ioa_cfg->host);
8204 	pci_disable_device(pdev);
8205 	LEAVE;
8206 }
8207 
8208 /**
8209  * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
8210  * @ioa_cfg:	ioa config struct
8211  *
8212  * Return value:
8213  * 	0 on success / -ENOMEM on allocation failure
8214  **/
8215 static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
8216 {
8217 	struct ipr_cmnd *ipr_cmd;
8218 	struct ipr_ioarcb *ioarcb;
8219 	dma_addr_t dma_addr;
8220 	int i;
8221 
8222 	ioa_cfg->ipr_cmd_pool = pci_pool_create (IPR_NAME, ioa_cfg->pdev,
8223 						 sizeof(struct ipr_cmnd), 16, 0);
8224 
8225 	if (!ioa_cfg->ipr_cmd_pool)
8226 		return -ENOMEM;
8227 
8228 	for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
8229 		ipr_cmd = pci_pool_alloc (ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);
8230 
8231 		if (!ipr_cmd) {
8232 			ipr_free_cmd_blks(ioa_cfg);
8233 			return -ENOMEM;
8234 		}
8235 
8236 		memset(ipr_cmd, 0, sizeof(*ipr_cmd));
8237 		ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
8238 		ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
8239 
8240 		ioarcb = &ipr_cmd->ioarcb;
8241 		ipr_cmd->dma_addr = dma_addr;
8242 		if (ioa_cfg->sis64)
8243 			ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr);
8244 		else
8245 			ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
8246 
8247 		ioarcb->host_response_handle = cpu_to_be32(i << 2);
8248 		if (ioa_cfg->sis64) {
8249 			ioarcb->u.sis64_addr_data.data_ioadl_addr =
8250 				cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
8251 			ioarcb->u.sis64_addr_data.ioasa_host_pci_addr =
8252 				cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, s.ioasa64));
8253 		} else {
8254 			ioarcb->write_ioadl_addr =
8255 				cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
8256 			ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
8257 			ioarcb->ioasa_host_pci_addr =
8258 				cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, s.ioasa));
8259 		}
8260 		ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
8261 		ipr_cmd->cmd_index = i;
8262 		ipr_cmd->ioa_cfg = ioa_cfg;
8263 		ipr_cmd->sense_buffer_dma = dma_addr +
8264 			offsetof(struct ipr_cmnd, sense_buffer);
8265 
8266 		list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
8267 	}
8268 
8269 	return 0;
8270 }
8271 
8272 /**
8273  * ipr_alloc_mem - Allocate memory for an adapter
8274  * @ioa_cfg:	ioa config struct
8275  *
8276  * Return value:
8277  * 	0 on success / non-zero for error
8278  **/
8279 static int __devinit ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
8280 {
8281 	struct pci_dev *pdev = ioa_cfg->pdev;
8282 	int i, rc = -ENOMEM;
8283 
8284 	ENTER;
8285 	ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
8286 				       ioa_cfg->max_devs_supported, GFP_KERNEL);
8287 
8288 	if (!ioa_cfg->res_entries)
8289 		goto out;
8290 
8291 	if (ioa_cfg->sis64) {
8292 		ioa_cfg->target_ids = kzalloc(sizeof(unsigned long) *
8293 					      BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
8294 		ioa_cfg->array_ids = kzalloc(sizeof(unsigned long) *
8295 					     BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
8296 		ioa_cfg->vset_ids = kzalloc(sizeof(unsigned long) *
8297 					    BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
8298 	}
8299 
8300 	for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
8301 		list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
8302 		ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
8303 	}
8304 
8305 	ioa_cfg->vpd_cbs = pci_alloc_consistent(ioa_cfg->pdev,
8306 						sizeof(struct ipr_misc_cbs),
8307 						&ioa_cfg->vpd_cbs_dma);
8308 
8309 	if (!ioa_cfg->vpd_cbs)
8310 		goto out_free_res_entries;
8311 
8312 	if (ipr_alloc_cmd_blks(ioa_cfg))
8313 		goto out_free_vpd_cbs;
8314 
8315 	ioa_cfg->host_rrq = pci_alloc_consistent(ioa_cfg->pdev,
8316 						 sizeof(u32) * IPR_NUM_CMD_BLKS,
8317 						 &ioa_cfg->host_rrq_dma);
8318 
8319 	if (!ioa_cfg->host_rrq)
8320 		goto out_ipr_free_cmd_blocks;
8321 
8322 	ioa_cfg->u.cfg_table = pci_alloc_consistent(ioa_cfg->pdev,
8323 						    ioa_cfg->cfg_table_size,
8324 						    &ioa_cfg->cfg_table_dma);
8325 
8326 	if (!ioa_cfg->u.cfg_table)
8327 		goto out_free_host_rrq;
8328 
8329 	for (i = 0; i < IPR_NUM_HCAMS; i++) {
8330 		ioa_cfg->hostrcb[i] = pci_alloc_consistent(ioa_cfg->pdev,
8331 							   sizeof(struct ipr_hostrcb),
8332 							   &ioa_cfg->hostrcb_dma[i]);
8333 
8334 		if (!ioa_cfg->hostrcb[i])
8335 			goto out_free_hostrcb_dma;
8336 
8337 		ioa_cfg->hostrcb[i]->hostrcb_dma =
8338 			ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
8339 		ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
8340 		list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
8341 	}
8342 
8343 	ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) *
8344 				 IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
8345 
8346 	if (!ioa_cfg->trace)
8347 		goto out_free_hostrcb_dma;
8348 
8349 	rc = 0;
8350 out:
8351 	LEAVE;
8352 	return rc;
8353 
8354 out_free_hostrcb_dma:
8355 	while (i-- > 0) {
8356 		pci_free_consistent(pdev, sizeof(struct ipr_hostrcb),
8357 				    ioa_cfg->hostrcb[i],
8358 				    ioa_cfg->hostrcb_dma[i]);
8359 	}
8360 	pci_free_consistent(pdev, ioa_cfg->cfg_table_size,
8361 			    ioa_cfg->u.cfg_table,
8362 			    ioa_cfg->cfg_table_dma);
8363 out_free_host_rrq:
8364 	pci_free_consistent(pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
8365 			    ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
8366 out_ipr_free_cmd_blocks:
8367 	ipr_free_cmd_blks(ioa_cfg);
8368 out_free_vpd_cbs:
8369 	pci_free_consistent(pdev, sizeof(struct ipr_misc_cbs),
8370 			    ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
8371 out_free_res_entries:
8372 	kfree(ioa_cfg->res_entries);
8373 	goto out;
8374 }
8375 
8376 /**
8377  * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
8378  * @ioa_cfg:	ioa config struct
8379  *
8380  * Return value:
8381  * 	none
8382  **/
8383 static void __devinit ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
8384 {
8385 	int i;
8386 
8387 	for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
8388 		ioa_cfg->bus_attr[i].bus = i;
8389 		ioa_cfg->bus_attr[i].qas_enabled = 0;
8390 		ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
8391 		if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
8392 			ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
8393 		else
8394 			ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
8395 	}
8396 }
8397 
8398 /**
8399  * ipr_init_ioa_cfg - Initialize IOA config struct
8400  * @ioa_cfg:	ioa config struct
8401  * @host:		scsi host struct
8402  * @pdev:		PCI dev struct
8403  *
8404  * Return value:
8405  * 	none
8406  **/
8407 static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
8408 				       struct Scsi_Host *host, struct pci_dev *pdev)
8409 {
8410 	const struct ipr_interrupt_offsets *p;
8411 	struct ipr_interrupts *t;
8412 	void __iomem *base;
8413 
8414 	ioa_cfg->host = host;
8415 	ioa_cfg->pdev = pdev;
8416 	ioa_cfg->log_level = ipr_log_level;
8417 	ioa_cfg->doorbell = IPR_DOORBELL;
8418 	sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
8419 	sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
8420 	sprintf(ioa_cfg->ipr_free_label, IPR_FREEQ_LABEL);
8421 	sprintf(ioa_cfg->ipr_pending_label, IPR_PENDQ_LABEL);
8422 	sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
8423 	sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
8424 	sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
8425 	sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
8426 
8427 	INIT_LIST_HEAD(&ioa_cfg->free_q);
8428 	INIT_LIST_HEAD(&ioa_cfg->pending_q);
8429 	INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
8430 	INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
8431 	INIT_LIST_HEAD(&ioa_cfg->free_res_q);
8432 	INIT_LIST_HEAD(&ioa_cfg->used_res_q);
8433 	INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
8434 	init_waitqueue_head(&ioa_cfg->reset_wait_q);
8435 	init_waitqueue_head(&ioa_cfg->msi_wait_q);
8436 	ioa_cfg->sdt_state = INACTIVE;
8437 
8438 	ipr_initialize_bus_attr(ioa_cfg);
8439 	ioa_cfg->max_devs_supported = ipr_max_devs;
8440 
8441 	if (ioa_cfg->sis64) {
8442 		host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS;
8443 		host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
8444 		if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
8445 			ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS;
8446 	} else {
8447 		host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
8448 		host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
8449 		if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
8450 			ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS;
8451 	}
8452 	host->max_channel = IPR_MAX_BUS_TO_SCAN;
8453 	host->unique_id = host->host_no;
8454 	host->max_cmd_len = IPR_MAX_CDB_LEN;
8455 	pci_set_drvdata(pdev, ioa_cfg);
8456 
8457 	p = &ioa_cfg->chip_cfg->regs;
8458 	t = &ioa_cfg->regs;
8459 	base = ioa_cfg->hdw_dma_regs;
8460 
8461 	t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
8462 	t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
8463 	t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32;
8464 	t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
8465 	t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32;
8466 	t->clr_interrupt_reg = base + p->clr_interrupt_reg;
8467 	t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32;
8468 	t->sense_interrupt_reg = base + p->sense_interrupt_reg;
8469 	t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32;
8470 	t->ioarrin_reg = base + p->ioarrin_reg;
8471 	t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
8472 	t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32;
8473 	t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
8474 	t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32;
8475 	t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
8476 	t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32;
8477 
8478 	if (ioa_cfg->sis64) {
8479 		t->init_feedback_reg = base + p->init_feedback_reg;
8480 		t->dump_addr_reg = base + p->dump_addr_reg;
8481 		t->dump_data_reg = base + p->dump_data_reg;
8482 		t->endian_swap_reg = base + p->endian_swap_reg;
8483 	}
8484 }
8485 
8486 /**
8487  * ipr_get_chip_info - Find adapter chip information
8488  * @dev_id:		PCI device id struct
8489  *
8490  * Return value:
8491  * 	ptr to chip information on success / NULL on failure
8492  **/
8493 static const struct ipr_chip_t * __devinit
8494 ipr_get_chip_info(const struct pci_device_id *dev_id)
8495 {
8496 	int i;
8497 
8498 	for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
8499 		if (ipr_chip[i].vendor == dev_id->vendor &&
8500 		    ipr_chip[i].device == dev_id->device)
8501 			return &ipr_chip[i];
8502 	return NULL;
8503 }
8504 
8505 /**
8506  * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
8507  * @pdev:		PCI device struct
8508  *
8509  * Description: Simply set the msi_received flag to 1 indicating that
8510  * Message Signaled Interrupts are supported.
8511  *
8512  * Return value:
8513  * 	0 on success / non-zero on failure
8514  **/
8515 static irqreturn_t __devinit ipr_test_intr(int irq, void *devp)
8516 {
8517 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
8518 	unsigned long lock_flags = 0;
8519 	irqreturn_t rc = IRQ_HANDLED;
8520 
8521 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8522 
8523 	ioa_cfg->msi_received = 1;
8524 	wake_up(&ioa_cfg->msi_wait_q);
8525 
8526 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8527 	return rc;
8528 }
8529 
8530 /**
8531  * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
8532  * @pdev:		PCI device struct
8533  *
8534  * Description: The return value from pci_enable_msi() can not always be
8535  * trusted.  This routine sets up and initiates a test interrupt to determine
8536  * if the interrupt is received via the ipr_test_intr() service routine.
8537  * If the tests fails, the driver will fall back to LSI.
8538  *
8539  * Return value:
8540  * 	0 on success / non-zero on failure
8541  **/
8542 static int __devinit ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg,
8543 				  struct pci_dev *pdev)
8544 {
8545 	int rc;
8546 	volatile u32 int_reg;
8547 	unsigned long lock_flags = 0;
8548 
8549 	ENTER;
8550 
8551 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8552 	init_waitqueue_head(&ioa_cfg->msi_wait_q);
8553 	ioa_cfg->msi_received = 0;
8554 	ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
8555 	writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32);
8556 	int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8557 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8558 
8559 	rc = request_irq(pdev->irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
8560 	if (rc) {
8561 		dev_err(&pdev->dev, "Can not assign irq %d\n", pdev->irq);
8562 		return rc;
8563 	} else if (ipr_debug)
8564 		dev_info(&pdev->dev, "IRQ assigned: %d\n", pdev->irq);
8565 
8566 	writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
8567 	int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
8568 	wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
8569 	ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
8570 
8571 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8572 	if (!ioa_cfg->msi_received) {
8573 		/* MSI test failed */
8574 		dev_info(&pdev->dev, "MSI test failed.  Falling back to LSI.\n");
8575 		rc = -EOPNOTSUPP;
8576 	} else if (ipr_debug)
8577 		dev_info(&pdev->dev, "MSI test succeeded.\n");
8578 
8579 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8580 
8581 	free_irq(pdev->irq, ioa_cfg);
8582 
8583 	LEAVE;
8584 
8585 	return rc;
8586 }
8587 
8588 /**
8589  * ipr_probe_ioa - Allocates memory and does first stage of initialization
8590  * @pdev:		PCI device struct
8591  * @dev_id:		PCI device id struct
8592  *
8593  * Return value:
8594  * 	0 on success / non-zero on failure
8595  **/
8596 static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
8597 				   const struct pci_device_id *dev_id)
8598 {
8599 	struct ipr_ioa_cfg *ioa_cfg;
8600 	struct Scsi_Host *host;
8601 	unsigned long ipr_regs_pci;
8602 	void __iomem *ipr_regs;
8603 	int rc = PCIBIOS_SUCCESSFUL;
8604 	volatile u32 mask, uproc, interrupts;
8605 
8606 	ENTER;
8607 
8608 	if ((rc = pci_enable_device(pdev))) {
8609 		dev_err(&pdev->dev, "Cannot enable adapter\n");
8610 		goto out;
8611 	}
8612 
8613 	dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
8614 
8615 	host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
8616 
8617 	if (!host) {
8618 		dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
8619 		rc = -ENOMEM;
8620 		goto out_disable;
8621 	}
8622 
8623 	ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
8624 	memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
8625 	ata_host_init(&ioa_cfg->ata_host, &pdev->dev,
8626 		      sata_port_info.flags, &ipr_sata_ops);
8627 
8628 	ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
8629 
8630 	if (!ioa_cfg->ipr_chip) {
8631 		dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
8632 			dev_id->vendor, dev_id->device);
8633 		goto out_scsi_host_put;
8634 	}
8635 
8636 	/* set SIS 32 or SIS 64 */
8637 	ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
8638 	ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
8639 
8640 	if (ipr_transop_timeout)
8641 		ioa_cfg->transop_timeout = ipr_transop_timeout;
8642 	else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
8643 		ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
8644 	else
8645 		ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
8646 
8647 	ioa_cfg->revid = pdev->revision;
8648 
8649 	ipr_regs_pci = pci_resource_start(pdev, 0);
8650 
8651 	rc = pci_request_regions(pdev, IPR_NAME);
8652 	if (rc < 0) {
8653 		dev_err(&pdev->dev,
8654 			"Couldn't register memory range of registers\n");
8655 		goto out_scsi_host_put;
8656 	}
8657 
8658 	ipr_regs = pci_ioremap_bar(pdev, 0);
8659 
8660 	if (!ipr_regs) {
8661 		dev_err(&pdev->dev,
8662 			"Couldn't map memory range of registers\n");
8663 		rc = -ENOMEM;
8664 		goto out_release_regions;
8665 	}
8666 
8667 	ioa_cfg->hdw_dma_regs = ipr_regs;
8668 	ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
8669 	ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
8670 
8671 	ipr_init_ioa_cfg(ioa_cfg, host, pdev);
8672 
8673 	pci_set_master(pdev);
8674 
8675 	if (ioa_cfg->sis64) {
8676 		rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
8677 		if (rc < 0) {
8678 			dev_dbg(&pdev->dev, "Failed to set 64 bit PCI DMA mask\n");
8679 			rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
8680 		}
8681 
8682 	} else
8683 		rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
8684 
8685 	if (rc < 0) {
8686 		dev_err(&pdev->dev, "Failed to set PCI DMA mask\n");
8687 		goto cleanup_nomem;
8688 	}
8689 
8690 	rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
8691 				   ioa_cfg->chip_cfg->cache_line_size);
8692 
8693 	if (rc != PCIBIOS_SUCCESSFUL) {
8694 		dev_err(&pdev->dev, "Write of cache line size failed\n");
8695 		rc = -EIO;
8696 		goto cleanup_nomem;
8697 	}
8698 
8699 	/* Enable MSI style interrupts if they are supported. */
8700 	if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI && !pci_enable_msi(pdev)) {
8701 		rc = ipr_test_msi(ioa_cfg, pdev);
8702 		if (rc == -EOPNOTSUPP)
8703 			pci_disable_msi(pdev);
8704 		else if (rc)
8705 			goto out_msi_disable;
8706 		else
8707 			dev_info(&pdev->dev, "MSI enabled with IRQ: %d\n", pdev->irq);
8708 	} else if (ipr_debug)
8709 		dev_info(&pdev->dev, "Cannot enable MSI.\n");
8710 
8711 	/* Save away PCI config space for use following IOA reset */
8712 	rc = pci_save_state(pdev);
8713 
8714 	if (rc != PCIBIOS_SUCCESSFUL) {
8715 		dev_err(&pdev->dev, "Failed to save PCI config space\n");
8716 		rc = -EIO;
8717 		goto cleanup_nomem;
8718 	}
8719 
8720 	if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
8721 		goto cleanup_nomem;
8722 
8723 	if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
8724 		goto cleanup_nomem;
8725 
8726 	if (ioa_cfg->sis64)
8727 		ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
8728 				+ ((sizeof(struct ipr_config_table_entry64)
8729 				* ioa_cfg->max_devs_supported)));
8730 	else
8731 		ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
8732 				+ ((sizeof(struct ipr_config_table_entry)
8733 				* ioa_cfg->max_devs_supported)));
8734 
8735 	rc = ipr_alloc_mem(ioa_cfg);
8736 	if (rc < 0) {
8737 		dev_err(&pdev->dev,
8738 			"Couldn't allocate enough memory for device driver!\n");
8739 		goto cleanup_nomem;
8740 	}
8741 
8742 	/*
8743 	 * If HRRQ updated interrupt is not masked, or reset alert is set,
8744 	 * the card is in an unknown state and needs a hard reset
8745 	 */
8746 	mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
8747 	interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32);
8748 	uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
8749 	if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
8750 		ioa_cfg->needs_hard_reset = 1;
8751 	if (interrupts & IPR_PCII_ERROR_INTERRUPTS)
8752 		ioa_cfg->needs_hard_reset = 1;
8753 	if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
8754 		ioa_cfg->ioa_unit_checked = 1;
8755 
8756 	ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
8757 	rc = request_irq(pdev->irq, ipr_isr,
8758 			 ioa_cfg->msi_received ? 0 : IRQF_SHARED,
8759 			 IPR_NAME, ioa_cfg);
8760 
8761 	if (rc) {
8762 		dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
8763 			pdev->irq, rc);
8764 		goto cleanup_nolog;
8765 	}
8766 
8767 	if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) ||
8768 	    (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
8769 		ioa_cfg->needs_warm_reset = 1;
8770 		ioa_cfg->reset = ipr_reset_slot_reset;
8771 	} else
8772 		ioa_cfg->reset = ipr_reset_start_bist;
8773 
8774 	spin_lock(&ipr_driver_lock);
8775 	list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
8776 	spin_unlock(&ipr_driver_lock);
8777 
8778 	LEAVE;
8779 out:
8780 	return rc;
8781 
8782 cleanup_nolog:
8783 	ipr_free_mem(ioa_cfg);
8784 cleanup_nomem:
8785 	iounmap(ipr_regs);
8786 out_msi_disable:
8787 	pci_disable_msi(pdev);
8788 out_release_regions:
8789 	pci_release_regions(pdev);
8790 out_scsi_host_put:
8791 	scsi_host_put(host);
8792 out_disable:
8793 	pci_disable_device(pdev);
8794 	goto out;
8795 }
8796 
8797 /**
8798  * ipr_scan_vsets - Scans for VSET devices
8799  * @ioa_cfg:	ioa config struct
8800  *
8801  * Description: Since the VSET resources do not follow SAM in that we can have
8802  * sparse LUNs with no LUN 0, we have to scan for these ourselves.
8803  *
8804  * Return value:
8805  * 	none
8806  **/
8807 static void ipr_scan_vsets(struct ipr_ioa_cfg *ioa_cfg)
8808 {
8809 	int target, lun;
8810 
8811 	for (target = 0; target < IPR_MAX_NUM_TARGETS_PER_BUS; target++)
8812 		for (lun = 0; lun < IPR_MAX_NUM_VSET_LUNS_PER_TARGET; lun++ )
8813 			scsi_add_device(ioa_cfg->host, IPR_VSET_BUS, target, lun);
8814 }
8815 
8816 /**
8817  * ipr_initiate_ioa_bringdown - Bring down an adapter
8818  * @ioa_cfg:		ioa config struct
8819  * @shutdown_type:	shutdown type
8820  *
8821  * Description: This function will initiate bringing down the adapter.
8822  * This consists of issuing an IOA shutdown to the adapter
8823  * to flush the cache, and running BIST.
8824  * If the caller needs to wait on the completion of the reset,
8825  * the caller must sleep on the reset_wait_q.
8826  *
8827  * Return value:
8828  * 	none
8829  **/
8830 static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
8831 				       enum ipr_shutdown_type shutdown_type)
8832 {
8833 	ENTER;
8834 	if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
8835 		ioa_cfg->sdt_state = ABORT_DUMP;
8836 	ioa_cfg->reset_retries = 0;
8837 	ioa_cfg->in_ioa_bringdown = 1;
8838 	ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
8839 	LEAVE;
8840 }
8841 
8842 /**
8843  * __ipr_remove - Remove a single adapter
8844  * @pdev:	pci device struct
8845  *
8846  * Adapter hot plug remove entry point.
8847  *
8848  * Return value:
8849  * 	none
8850  **/
8851 static void __ipr_remove(struct pci_dev *pdev)
8852 {
8853 	unsigned long host_lock_flags = 0;
8854 	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8855 	ENTER;
8856 
8857 	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8858 	while(ioa_cfg->in_reset_reload) {
8859 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8860 		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
8861 		spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8862 	}
8863 
8864 	ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
8865 
8866 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8867 	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
8868 	flush_scheduled_work();
8869 	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8870 
8871 	spin_lock(&ipr_driver_lock);
8872 	list_del(&ioa_cfg->queue);
8873 	spin_unlock(&ipr_driver_lock);
8874 
8875 	if (ioa_cfg->sdt_state == ABORT_DUMP)
8876 		ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8877 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8878 
8879 	ipr_free_all_resources(ioa_cfg);
8880 
8881 	LEAVE;
8882 }
8883 
8884 /**
8885  * ipr_remove - IOA hot plug remove entry point
8886  * @pdev:	pci device struct
8887  *
8888  * Adapter hot plug remove entry point.
8889  *
8890  * Return value:
8891  * 	none
8892  **/
8893 static void __devexit ipr_remove(struct pci_dev *pdev)
8894 {
8895 	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8896 
8897 	ENTER;
8898 
8899 	ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
8900 			      &ipr_trace_attr);
8901 	ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
8902 			     &ipr_dump_attr);
8903 	scsi_remove_host(ioa_cfg->host);
8904 
8905 	__ipr_remove(pdev);
8906 
8907 	LEAVE;
8908 }
8909 
8910 /**
8911  * ipr_probe - Adapter hot plug add entry point
8912  *
8913  * Return value:
8914  * 	0 on success / non-zero on failure
8915  **/
8916 static int __devinit ipr_probe(struct pci_dev *pdev,
8917 			       const struct pci_device_id *dev_id)
8918 {
8919 	struct ipr_ioa_cfg *ioa_cfg;
8920 	int rc;
8921 
8922 	rc = ipr_probe_ioa(pdev, dev_id);
8923 
8924 	if (rc)
8925 		return rc;
8926 
8927 	ioa_cfg = pci_get_drvdata(pdev);
8928 	rc = ipr_probe_ioa_part2(ioa_cfg);
8929 
8930 	if (rc) {
8931 		__ipr_remove(pdev);
8932 		return rc;
8933 	}
8934 
8935 	rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
8936 
8937 	if (rc) {
8938 		__ipr_remove(pdev);
8939 		return rc;
8940 	}
8941 
8942 	rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj,
8943 				   &ipr_trace_attr);
8944 
8945 	if (rc) {
8946 		scsi_remove_host(ioa_cfg->host);
8947 		__ipr_remove(pdev);
8948 		return rc;
8949 	}
8950 
8951 	rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj,
8952 				   &ipr_dump_attr);
8953 
8954 	if (rc) {
8955 		ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
8956 				      &ipr_trace_attr);
8957 		scsi_remove_host(ioa_cfg->host);
8958 		__ipr_remove(pdev);
8959 		return rc;
8960 	}
8961 
8962 	scsi_scan_host(ioa_cfg->host);
8963 	ipr_scan_vsets(ioa_cfg);
8964 	scsi_add_device(ioa_cfg->host, IPR_IOA_BUS, IPR_IOA_TARGET, IPR_IOA_LUN);
8965 	ioa_cfg->allow_ml_add_del = 1;
8966 	ioa_cfg->host->max_channel = IPR_VSET_BUS;
8967 	schedule_work(&ioa_cfg->work_q);
8968 	return 0;
8969 }
8970 
8971 /**
8972  * ipr_shutdown - Shutdown handler.
8973  * @pdev:	pci device struct
8974  *
8975  * This function is invoked upon system shutdown/reboot. It will issue
8976  * an adapter shutdown to the adapter to flush the write cache.
8977  *
8978  * Return value:
8979  * 	none
8980  **/
8981 static void ipr_shutdown(struct pci_dev *pdev)
8982 {
8983 	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8984 	unsigned long lock_flags = 0;
8985 
8986 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8987 	while(ioa_cfg->in_reset_reload) {
8988 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8989 		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
8990 		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8991 	}
8992 
8993 	ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
8994 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8995 	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
8996 }
8997 
8998 static struct pci_device_id ipr_pci_table[] __devinitdata = {
8999 	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
9000 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
9001 	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
9002 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 },
9003 	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
9004 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 },
9005 	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
9006 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 },
9007 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
9008 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 },
9009 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
9010 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 },
9011 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
9012 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 },
9013 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
9014 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0,
9015 		IPR_USE_LONG_TRANSOP_TIMEOUT },
9016 	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
9017 	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
9018 	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
9019 	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
9020 	      IPR_USE_LONG_TRANSOP_TIMEOUT },
9021 	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
9022 	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
9023 	      IPR_USE_LONG_TRANSOP_TIMEOUT },
9024 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
9025 	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
9026 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
9027 	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
9028 	      IPR_USE_LONG_TRANSOP_TIMEOUT},
9029 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
9030 	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
9031 	      IPR_USE_LONG_TRANSOP_TIMEOUT },
9032 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
9033 	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0,
9034 	      IPR_USE_LONG_TRANSOP_TIMEOUT },
9035 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
9036 	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
9037 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
9038 	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CC, 0, 0, 0 },
9039 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
9040 	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
9041 	      IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET },
9042 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
9043 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
9044 	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
9045 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 },
9046 	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
9047 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0,
9048 		IPR_USE_LONG_TRANSOP_TIMEOUT },
9049 	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
9050 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
9051 		IPR_USE_LONG_TRANSOP_TIMEOUT },
9052 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
9053 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B5, 0, 0, 0 },
9054 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
9055 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 0 },
9056 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
9057 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 },
9058 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
9059 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C4, 0, 0, 0 },
9060 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
9061 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 },
9062 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
9063 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B1, 0, 0, 0 },
9064 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
9065 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C6, 0, 0, 0 },
9066 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
9067 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575D, 0, 0, 0 },
9068 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
9069 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CE, 0, 0, 0 },
9070 	{ }
9071 };
9072 MODULE_DEVICE_TABLE(pci, ipr_pci_table);
9073 
9074 static struct pci_error_handlers ipr_err_handler = {
9075 	.error_detected = ipr_pci_error_detected,
9076 	.slot_reset = ipr_pci_slot_reset,
9077 };
9078 
9079 static struct pci_driver ipr_driver = {
9080 	.name = IPR_NAME,
9081 	.id_table = ipr_pci_table,
9082 	.probe = ipr_probe,
9083 	.remove = __devexit_p(ipr_remove),
9084 	.shutdown = ipr_shutdown,
9085 	.err_handler = &ipr_err_handler,
9086 };
9087 
9088 /**
9089  * ipr_halt_done - Shutdown prepare completion
9090  *
9091  * Return value:
9092  * 	none
9093  **/
9094 static void ipr_halt_done(struct ipr_cmnd *ipr_cmd)
9095 {
9096 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9097 
9098 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
9099 }
9100 
9101 /**
9102  * ipr_halt - Issue shutdown prepare to all adapters
9103  *
9104  * Return value:
9105  * 	NOTIFY_OK on success / NOTIFY_DONE on failure
9106  **/
9107 static int ipr_halt(struct notifier_block *nb, ulong event, void *buf)
9108 {
9109 	struct ipr_cmnd *ipr_cmd;
9110 	struct ipr_ioa_cfg *ioa_cfg;
9111 	unsigned long flags = 0;
9112 
9113 	if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
9114 		return NOTIFY_DONE;
9115 
9116 	spin_lock(&ipr_driver_lock);
9117 
9118 	list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
9119 		spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9120 		if (!ioa_cfg->allow_cmds) {
9121 			spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9122 			continue;
9123 		}
9124 
9125 		ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
9126 		ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
9127 		ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
9128 		ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
9129 		ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
9130 
9131 		ipr_do_req(ipr_cmd, ipr_halt_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
9132 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9133 	}
9134 	spin_unlock(&ipr_driver_lock);
9135 
9136 	return NOTIFY_OK;
9137 }
9138 
9139 static struct notifier_block ipr_notifier = {
9140 	ipr_halt, NULL, 0
9141 };
9142 
9143 /**
9144  * ipr_init - Module entry point
9145  *
9146  * Return value:
9147  * 	0 on success / negative value on failure
9148  **/
9149 static int __init ipr_init(void)
9150 {
9151 	ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
9152 		 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
9153 
9154 	register_reboot_notifier(&ipr_notifier);
9155 	return pci_register_driver(&ipr_driver);
9156 }
9157 
9158 /**
9159  * ipr_exit - Module unload
9160  *
9161  * Module unload entry point.
9162  *
9163  * Return value:
9164  * 	none
9165  **/
9166 static void __exit ipr_exit(void)
9167 {
9168 	unregister_reboot_notifier(&ipr_notifier);
9169 	pci_unregister_driver(&ipr_driver);
9170 }
9171 
9172 module_init(ipr_init);
9173 module_exit(ipr_exit);
9174